Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394-2.6

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394-2.6: (31 commits)
  firewire: fw-sbp2: fix DMA mapping of management ORBs
  firewire: fw-sbp2: fix DMA mapping of command ORBs
  firewire: fw-sbp2: fix DMA mapping of S/G tables
  firewire: fw-sbp2: add a boundary check
  firewire: fw-sbp2: correctly align page tables
  firewire: fw-sbp2: memset wants string.h
  firewire: fw-sbp2: use correct speed in sbp2_agent_reset
  firewire: fw-sbp2: correctly dereference by container_of
  firewire: Document userspace ioctl interface.
  firewire: fw-sbp2: implement nonexclusive login
  firewire: fw-sbp2: let SCSI shutdown commands through before logout
  firewire: fw-sbp2: implement max sectors limit for some old bridges
  firewire: simplify a struct type
  firewire: support S100B...S400B and link slower than PHY
  firewire: optimize gap count with 1394b leaf nodes
  firewire: remove unused macro
  firewire: missing newline in printk
  firewire: fw-sbp2: remove unused struct member
  ieee1394: remove old isochronous ABI
  ieee1394: sbp2: change some module parameters from int to bool
  ...
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index af50f9b..4d880b3 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1014,49 +1014,6 @@
 
 	mga=		[HW,DRM]
 
-	migration_cost=
-			[KNL,SMP] debug: override scheduler migration costs
-			Format: <level-1-usecs>,<level-2-usecs>,...
-			This debugging option can be used to override the
-			default scheduler migration cost matrix. The numbers
-			are indexed by 'CPU domain distance'.
-			E.g. migration_cost=1000,2000,3000 on an SMT NUMA
-			box will set up an intra-core migration cost of
-			1 msec, an inter-core migration cost of 2 msecs,
-			and an inter-node migration cost of 3 msecs.
-
-			WARNING: using the wrong values here can break
-			scheduler performance, so it's only for scheduler
-			development purposes, not production environments.
-
-	migration_debug=
-			[KNL,SMP] migration cost auto-detect verbosity
-			Format=<0|1|2>
-			If a system's migration matrix reported at bootup
-			seems erroneous then this option can be used to
-			increase verbosity of the detection process.
-			We default to 0 (no extra messages), 1 will print
-			some more information, and 2 will be really
-			verbose (probably only useful if you also have a
-			serial console attached to the system).
-
-	migration_factor=
-			[KNL,SMP] multiply/divide migration costs by a factor
-			Format=<percent>
-			This debug option can be used to proportionally
-			increase or decrease the auto-detected migration
-			costs for all entries of the migration matrix.
-			E.g. migration_factor=150 will increase migration
-			costs by 50%. (and thus the scheduler will be less
-			eager migrating cache-hot tasks)
-			migration_factor=80 will decrease migration costs
-			by 20%. (thus the scheduler will be more eager to
-			migrate tasks)
-
-			WARNING: using the wrong values here can break
-			scheduler performance, so it's only for scheduler
-			development purposes, not production environments.
-
 	mousedev.tap_time=
 			[MOUSE] Maximum time between finger touching and
 			leaving touchpad surface for touch to be considered
diff --git a/Documentation/networking/spider_net.txt b/Documentation/networking/spider_net.txt
new file mode 100644
index 0000000..4b4adb8
--- /dev/null
+++ b/Documentation/networking/spider_net.txt
@@ -0,0 +1,204 @@
+
+            The Spidernet Device Driver
+            ===========================
+
+Written by Linas Vepstas <linas@austin.ibm.com>
+
+Version of 7 June 2007
+
+Abstract
+========
+This document sketches the structure of portions of the spidernet
+device driver in the Linux kernel tree. The spidernet is a gigabit
+ethernet device built into the Toshiba southbridge commonly used
+in the SONY Playstation 3 and the IBM QS20 Cell blade.
+
+The Structure of the RX Ring.
+=============================
+The receive (RX) ring is a circular linked list of RX descriptors,
+together with three pointers into the ring that are used to manage its
+contents.
+
+The elements of the ring are called "descriptors" or "descrs"; they
+describe the received data. This includes a pointer to a buffer
+containing the received data, the buffer size, and various status bits.
+
+There are three primary states that a descriptor can be in: "empty",
+"full" and "not-in-use".  An "empty" or "ready" descriptor is ready
+to receive data from the hardware. A "full" descriptor has data in it,
+and is waiting to be emptied and processed by the OS. A "not-in-use"
+descriptor is neither empty or full; it is simply not ready. It may
+not even have a data buffer in it, or is otherwise unusable.
+
+During normal operation, on device startup, the OS (specifically, the
+spidernet device driver) allocates a set of RX descriptors and RX
+buffers. These are all marked "empty", ready to receive data. This
+ring is handed off to the hardware, which sequentially fills in the
+buffers, and marks them "full". The OS follows up, taking the full
+buffers, processing them, and re-marking them empty.
+
+This filling and emptying is managed by three pointers, the "head"
+and "tail" pointers, managed by the OS, and a hardware current
+descriptor pointer (GDACTDPA). The GDACTDPA points at the descr
+currently being filled. When this descr is filled, the hardware
+marks it full, and advances the GDACTDPA by one.  Thus, when there is
+flowing RX traffic, every descr behind it should be marked "full",
+and everything in front of it should be "empty".  If the hardware
+discovers that the current descr is not empty, it will signal an
+interrupt, and halt processing.
+
+The tail pointer tails or trails the hardware pointer. When the
+hardware is ahead, the tail pointer will be pointing at a "full"
+descr. The OS will process this descr, and then mark it "not-in-use",
+and advance the tail pointer.  Thus, when there is flowing RX traffic,
+all of the descrs in front of the tail pointer should be "full", and
+all of those behind it should be "not-in-use". When RX traffic is not
+flowing, then the tail pointer can catch up to the hardware pointer.
+The OS will then note that the current tail is "empty", and halt
+processing.
+
+The head pointer (somewhat mis-named) follows after the tail pointer.
+When traffic is flowing, then the head pointer will be pointing at
+a "not-in-use" descr. The OS will perform various housekeeping duties
+on this descr. This includes allocating a new data buffer and
+dma-mapping it so as to make it visible to the hardware. The OS will
+then mark the descr as "empty", ready to receive data. Thus, when there
+is flowing RX traffic, everything in front of the head pointer should
+be "not-in-use", and everything behind it should be "empty". If no
+RX traffic is flowing, then the head pointer can catch up to the tail
+pointer, at which point the OS will notice that the head descr is
+"empty", and it will halt processing.
+
+Thus, in an idle system, the GDACTDPA, tail and head pointers will
+all be pointing at the same descr, which should be "empty". All of the
+other descrs in the ring should be "empty" as well.
+
+The show_rx_chain() routine will print out the the locations of the
+GDACTDPA, tail and head pointers. It will also summarize the contents
+of the ring, starting at the tail pointer, and listing the status
+of the descrs that follow.
+
+A typical example of the output, for a nearly idle system, might be
+
+net eth1: Total number of descrs=256
+net eth1: Chain tail located at descr=20
+net eth1: Chain head is at 20
+net eth1: HW curr desc (GDACTDPA) is at 21
+net eth1: Have 1 descrs with stat=x40800101
+net eth1: HW next desc (GDACNEXTDA) is at 22
+net eth1: Last 255 descrs with stat=xa0800000
+
+In the above, the hardware has filled in one descr, number 20. Both
+head and tail are pointing at 20, because it has not yet been emptied.
+Meanwhile, hw is pointing at 21, which is free.
+
+The "Have nnn decrs" refers to the descr starting at the tail: in this
+case, nnn=1 descr, starting at descr 20. The "Last nnn descrs" refers
+to all of the rest of the descrs, from the last status change. The "nnn"
+is a count of how many descrs have exactly the same status.
+
+The status x4... corresponds to "full" and status xa... corresponds
+to "empty". The actual value printed is RXCOMST_A.
+
+In the device driver source code, a different set of names are
+used for these same concepts, so that
+
+"empty" == SPIDER_NET_DESCR_CARDOWNED == 0xa
+"full"  == SPIDER_NET_DESCR_FRAME_END == 0x4
+"not in use" == SPIDER_NET_DESCR_NOT_IN_USE == 0xf
+
+
+The RX RAM full bug/feature
+===========================
+
+As long as the OS can empty out the RX buffers at a rate faster than
+the hardware can fill them, there is no problem. If, for some reason,
+the OS fails to empty the RX ring fast enough, the hardware GDACTDPA
+pointer will catch up to the head, notice the not-empty condition,
+ad stop. However, RX packets may still continue arriving on the wire.
+The spidernet chip can save some limited number of these in local RAM.
+When this local ram fills up, the spider chip will issue an interrupt
+indicating this (GHIINT0STS will show ERRINT, and the GRMFLLINT bit
+will be set in GHIINT1STS).  When the RX ram full condition occurs,
+a certain bug/feature is triggered that has to be specially handled.
+This section describes the special handling for this condition.
+
+When the OS finally has a chance to run, it will empty out the RX ring.
+In particular, it will clear the descriptor on which the hardware had
+stopped. However, once the hardware has decided that a certain
+descriptor is invalid, it will not restart at that descriptor; instead
+it will restart at the next descr. This potentially will lead to a
+deadlock condition, as the tail pointer will be pointing at this descr,
+which, from the OS point of view, is empty; the OS will be waiting for
+this descr to be filled. However, the hardware has skipped this descr,
+and is filling the next descrs. Since the OS doesn't see this, there
+is a potential deadlock, with the OS waiting for one descr to fill,
+while the hardware is waiting for a different set of descrs to become
+empty.
+
+A call to show_rx_chain() at this point indicates the nature of the
+problem. A typical print when the network is hung shows the following:
+
+net eth1: Spider RX RAM full, incoming packets might be discarded!
+net eth1: Total number of descrs=256
+net eth1: Chain tail located at descr=255
+net eth1: Chain head is at 255
+net eth1: HW curr desc (GDACTDPA) is at 0
+net eth1: Have 1 descrs with stat=xa0800000
+net eth1: HW next desc (GDACNEXTDA) is at 1
+net eth1: Have 127 descrs with stat=x40800101
+net eth1: Have 1 descrs with stat=x40800001
+net eth1: Have 126 descrs with stat=x40800101
+net eth1: Last 1 descrs with stat=xa0800000
+
+Both the tail and head pointers are pointing at descr 255, which is
+marked xa... which is "empty". Thus, from the OS point of view, there
+is nothing to be done. In particular, there is the implicit assumption
+that everything in front of the "empty" descr must surely also be empty,
+as explained in the last section. The OS is waiting for descr 255 to
+become non-empty, which, in this case, will never happen.
+
+The HW pointer is at descr 0. This descr is marked 0x4.. or "full".
+Since its already full, the hardware can do nothing more, and thus has
+halted processing. Notice that descrs 0 through 254 are all marked
+"full", while descr 254 and 255 are empty. (The "Last 1 descrs" is
+descr 254, since tail was at 255.) Thus, the system is deadlocked,
+and there can be no forward progress; the OS thinks there's nothing
+to do, and the hardware has nowhere to put incoming data.
+
+This bug/feature is worked around with the spider_net_resync_head_ptr()
+routine. When the driver receives RX interrupts, but an examination
+of the RX chain seems to show it is empty, then it is probable that
+the hardware has skipped a descr or two (sometimes dozens under heavy
+network conditions). The spider_net_resync_head_ptr() subroutine will
+search the ring for the next full descr, and the driver will resume
+operations there.  Since this will leave "holes" in the ring, there
+is also a spider_net_resync_tail_ptr() that will skip over such holes.
+
+As of this writing, the spider_net_resync() strategy seems to work very
+well, even under heavy network loads.
+
+
+The TX ring
+===========
+The TX ring uses a low-watermark interrupt scheme to make sure that
+the TX queue is appropriately serviced for large packet sizes.
+
+For packet sizes greater than about 1KBytes, the kernel can fill
+the TX ring quicker than the device can drain it. Once the ring
+is full, the netdev is stopped. When there is room in the ring,
+the netdev needs to be reawakened, so that more TX packets are placed
+in the ring. The hardware can empty the ring about four times per jiffy,
+so its not appropriate to wait for the poll routine to refill, since
+the poll routine runs only once per jiffy.  The low-watermark mechanism
+marks a descr about 1/4th of the way from the bottom of the queue, so
+that an interrupt is generated when the descr is processed. This
+interrupt wakes up the netdev, which can then refill the queue.
+For large packets, this mechanism generates a relatively small number
+of interrupts, about 1K/sec. For smaller packets, this will drop to zero
+interrupts, as the hardware can empty the queue faster than the kernel
+can fill it.
+
+
+ ======= END OF DOCUMENT ========
+
diff --git a/Documentation/sched-design-CFS.txt b/Documentation/sched-design-CFS.txt
new file mode 100644
index 0000000..16feebb
--- /dev/null
+++ b/Documentation/sched-design-CFS.txt
@@ -0,0 +1,119 @@
+
+This is the CFS scheduler.
+
+80% of CFS's design can be summed up in a single sentence: CFS basically
+models an "ideal, precise multi-tasking CPU" on real hardware.
+
+"Ideal multi-tasking CPU" is a (non-existent  :-))  CPU that has 100%
+physical power and which can run each task at precise equal speed, in
+parallel, each at 1/nr_running speed. For example: if there are 2 tasks
+running then it runs each at 50% physical power - totally in parallel.
+
+On real hardware, we can run only a single task at once, so while that
+one task runs, the other tasks that are waiting for the CPU are at a
+disadvantage - the current task gets an unfair amount of CPU time. In
+CFS this fairness imbalance is expressed and tracked via the per-task
+p->wait_runtime (nanosec-unit) value. "wait_runtime" is the amount of
+time the task should now run on the CPU for it to become completely fair
+and balanced.
+
+( small detail: on 'ideal' hardware, the p->wait_runtime value would
+  always be zero - no task would ever get 'out of balance' from the
+  'ideal' share of CPU time. )
+
+CFS's task picking logic is based on this p->wait_runtime value and it
+is thus very simple: it always tries to run the task with the largest
+p->wait_runtime value. In other words, CFS tries to run the task with
+the 'gravest need' for more CPU time. So CFS always tries to split up
+CPU time between runnable tasks as close to 'ideal multitasking
+hardware' as possible.
+
+Most of the rest of CFS's design just falls out of this really simple
+concept, with a few add-on embellishments like nice levels,
+multiprocessing and various algorithm variants to recognize sleepers.
+
+In practice it works like this: the system runs a task a bit, and when
+the task schedules (or a scheduler tick happens) the task's CPU usage is
+'accounted for': the (small) time it just spent using the physical CPU
+is deducted from p->wait_runtime. [minus the 'fair share' it would have
+gotten anyway]. Once p->wait_runtime gets low enough so that another
+task becomes the 'leftmost task' of the time-ordered rbtree it maintains
+(plus a small amount of 'granularity' distance relative to the leftmost
+task so that we do not over-schedule tasks and trash the cache) then the
+new leftmost task is picked and the current task is preempted.
+
+The rq->fair_clock value tracks the 'CPU time a runnable task would have
+fairly gotten, had it been runnable during that time'. So by using
+rq->fair_clock values we can accurately timestamp and measure the
+'expected CPU time' a task should have gotten. All runnable tasks are
+sorted in the rbtree by the "rq->fair_clock - p->wait_runtime" key, and
+CFS picks the 'leftmost' task and sticks to it. As the system progresses
+forwards, newly woken tasks are put into the tree more and more to the
+right - slowly but surely giving a chance for every task to become the
+'leftmost task' and thus get on the CPU within a deterministic amount of
+time.
+
+Some implementation details:
+
+ - the introduction of Scheduling Classes: an extensible hierarchy of
+   scheduler modules. These modules encapsulate scheduling policy
+   details and are handled by the scheduler core without the core
+   code assuming about them too much.
+
+ - sched_fair.c implements the 'CFS desktop scheduler': it is a
+   replacement for the vanilla scheduler's SCHED_OTHER interactivity
+   code.
+
+   I'd like to give credit to Con Kolivas for the general approach here:
+   he has proven via RSDL/SD that 'fair scheduling' is possible and that
+   it results in better desktop scheduling. Kudos Con!
+
+   The CFS patch uses a completely different approach and implementation
+   from RSDL/SD. My goal was to make CFS's interactivity quality exceed
+   that of RSDL/SD, which is a high standard to meet :-) Testing
+   feedback is welcome to decide this one way or another. [ and, in any
+   case, all of SD's logic could be added via a kernel/sched_sd.c module
+   as well, if Con is interested in such an approach. ]
+
+   CFS's design is quite radical: it does not use runqueues, it uses a
+   time-ordered rbtree to build a 'timeline' of future task execution,
+   and thus has no 'array switch' artifacts (by which both the vanilla
+   scheduler and RSDL/SD are affected).
+
+   CFS uses nanosecond granularity accounting and does not rely on any
+   jiffies or other HZ detail. Thus the CFS scheduler has no notion of
+   'timeslices' and has no heuristics whatsoever. There is only one
+   central tunable:
+
+         /proc/sys/kernel/sched_granularity_ns
+
+   which can be used to tune the scheduler from 'desktop' (low
+   latencies) to 'server' (good batching) workloads. It defaults to a
+   setting suitable for desktop workloads. SCHED_BATCH is handled by the
+   CFS scheduler module too.
+
+   Due to its design, the CFS scheduler is not prone to any of the
+   'attacks' that exist today against the heuristics of the stock
+   scheduler: fiftyp.c, thud.c, chew.c, ring-test.c, massive_intr.c all
+   work fine and do not impact interactivity and produce the expected
+   behavior.
+
+   the CFS scheduler has a much stronger handling of nice levels and
+   SCHED_BATCH: both types of workloads should be isolated much more
+   agressively than under the vanilla scheduler.
+
+   ( another detail: due to nanosec accounting and timeline sorting,
+     sched_yield() support is very simple under CFS, and in fact under
+     CFS sched_yield() behaves much better than under any other
+     scheduler i have tested so far. )
+
+ - sched_rt.c implements SCHED_FIFO and SCHED_RR semantics, in a simpler
+   way than the vanilla scheduler does. It uses 100 runqueues (for all
+   100 RT priority levels, instead of 140 in the vanilla scheduler)
+   and it needs no expired array.
+
+ - reworked/sanitized SMP load-balancing: the runqueue-walking
+   assumptions are gone from the load-balancing code now, and
+   iterators of the scheduling modules are used. The balancing code got
+   quite a bit simpler as a result.
+
diff --git a/MAINTAINERS b/MAINTAINERS
index df40a4e..2c1dfb2 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -3049,6 +3049,16 @@
 RISCOM8 DRIVER
 S:	Orphan
 
+RTL818X WIRELESS DRIVER
+P:	Michael Wu
+M:	flamingice@sourmilk.net
+P:	Andrea Merello
+M:	andreamrl@tiscali.it
+L:	linux-wireless@vger.kernel.org
+W:	http://linuxwireless.org/
+T:	git kernel.org:/pub/scm/linux/kernel/git/mwu/mac80211-drivers.git
+S:	Maintained
+
 S3 SAVAGE FRAMEBUFFER DRIVER
 P:	Antonino Daplas
 M:	adaplas@gmail.com
diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c
index 88baed1..0b29545 100644
--- a/arch/i386/kernel/smpboot.c
+++ b/arch/i386/kernel/smpboot.c
@@ -941,17 +941,6 @@
 }
 #endif
 
-static void smp_tune_scheduling(void)
-{
-	if (cpu_khz) {
-		/* cache size in kB */
-		long cachesize = boot_cpu_data.x86_cache_size;
-
-		if (cachesize > 0)
-			max_cache_size = cachesize * 1024;
-	}
-}
-
 /*
  * Cycle through the processors sending APIC IPIs to boot each.
  */
@@ -980,7 +969,6 @@
 	x86_cpu_to_apicid[0] = boot_cpu_physical_apicid;
 
 	current_thread_info()->cpu = 0;
-	smp_tune_scheduling();
 
 	set_cpu_sibling_map(0);
 
diff --git a/arch/i386/kernel/tsc.c b/arch/i386/kernel/tsc.c
index f64b81f..ea63a30 100644
--- a/arch/i386/kernel/tsc.c
+++ b/arch/i386/kernel/tsc.c
@@ -4,6 +4,7 @@
  * See comments there for proper credits.
  */
 
+#include <linux/sched.h>
 #include <linux/clocksource.h>
 #include <linux/workqueue.h>
 #include <linux/cpufreq.h>
@@ -106,8 +107,13 @@
 
 	/*
 	 * Fall back to jiffies if there's no TSC available:
+	 * ( But note that we still use it if the TSC is marked
+	 *   unstable. We do this because unlike Time Of Day,
+	 *   the scheduler clock tolerates small errors and it's
+	 *   very important for it to be as fast as the platform
+	 *   can achive it. )
 	 */
-	if (unlikely(!tsc_enabled))
+	if (unlikely(!tsc_enabled && !tsc_unstable))
 		/* No locking but a rare wrong value is not a big deal: */
 		return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ);
 
@@ -277,6 +283,7 @@
 
 void mark_tsc_unstable(char *reason)
 {
+	sched_clock_unstable_event();
 	if (!tsc_unstable) {
 		tsc_unstable = 1;
 		tsc_enabled = 0;
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index eaa6a24..188fb73 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -805,7 +805,6 @@
 get_max_cacheline_size (void)
 {
 	unsigned long line_size, max = 1;
-	unsigned int cache_size = 0;
 	u64 l, levels, unique_caches;
         pal_cache_config_info_t cci;
         s64 status;
@@ -835,8 +834,6 @@
 		line_size = 1 << cci.pcci_line_size;
 		if (line_size > max)
 			max = line_size;
-		if (cache_size < cci.pcci_cache_size)
-			cache_size = cci.pcci_cache_size;
 		if (!cci.pcci_unified) {
 			status = ia64_pal_cache_config_info(l,
 						    /* cache_type (instruction)= */ 1,
@@ -853,9 +850,6 @@
 			ia64_i_cache_stride_shift = cci.pcci_stride;
 	}
   out:
-#ifdef CONFIG_SMP
-	max_cache_size = max(max_cache_size, cache_size);
-#endif
 	if (max > ia64_max_cacheline_size)
 		ia64_max_cacheline_size = max;
 }
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 67edfa7..a1b017f 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -51,16 +51,6 @@
 EXPORT_SYMBOL(phys_cpu_present_map);
 EXPORT_SYMBOL(cpu_online_map);
 
-/* This happens early in bootup, can't really do it better */
-static void smp_tune_scheduling (void)
-{
-	struct cache_desc *cd = &current_cpu_data.scache;
-	unsigned long cachesize = cd->linesz * cd->sets * cd->ways;
-
-	if (cachesize > max_cache_size)
-		max_cache_size = cachesize;
-}
-
 extern void __init calibrate_delay(void);
 extern ATTRIB_NORET void cpu_idle(void);
 
@@ -228,7 +218,6 @@
 {
 	init_new_context(current, &init_mm);
 	current_thread_info()->cpu = 0;
-	smp_tune_scheduling();
 	plat_prepare_cpus(max_cpus);
 #ifndef CONFIG_HOTPLUG_CPU
 	cpu_present_map = cpu_possible_map;
diff --git a/arch/sparc/kernel/smp.c b/arch/sparc/kernel/smp.c
index 4d9ad590..4fea3ac 100644
--- a/arch/sparc/kernel/smp.c
+++ b/arch/sparc/kernel/smp.c
@@ -68,16 +68,6 @@
 	cpu_data(id).prom_node = cpu_node;
 	cpu_data(id).mid = cpu_get_hwmid(cpu_node);
 
-	/* this is required to tune the scheduler correctly */
-	/* is it possible to have CPUs with different cache sizes? */
-	if (id == boot_cpu_id) {
-		int cache_line,cache_nlines;
-		cache_line = 0x20;
-		cache_line = prom_getintdefault(cpu_node, "ecache-line-size", cache_line);
-		cache_nlines = 0x8000;
-		cache_nlines = prom_getintdefault(cpu_node, "ecache-nlines", cache_nlines);
-		max_cache_size = cache_line * cache_nlines;
-	}
 	if (cpu_data(id).mid < 0)
 		panic("No MID found for CPU%d at node 0x%08d", id, cpu_node);
 }
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index 4dcd7d0..40e40f9 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -1163,32 +1163,6 @@
 	return -EINVAL;
 }
 
-static void __init smp_tune_scheduling(void)
-{
-	unsigned int smallest = ~0U;
-	int i;
-
-	for (i = 0; i < NR_CPUS; i++) {
-		unsigned int val = cpu_data(i).ecache_size;
-
-		if (val && val < smallest)
-			smallest = val;
-	}
-
-	/* Any value less than 256K is nonsense.  */
-	if (smallest < (256U * 1024U))
-		smallest = 256 * 1024;
-
-	max_cache_size = smallest;
-
-	if (smallest < 1U * 1024U * 1024U)
-		printk(KERN_INFO "Using max_cache_size of %uKB\n",
-		       smallest / 1024U);
-	else
-		printk(KERN_INFO "Using max_cache_size of %uMB\n",
-		       smallest / 1024U / 1024U);
-}
-
 /* Constrain the number of cpus to max_cpus.  */
 void __init smp_prepare_cpus(unsigned int max_cpus)
 {
@@ -1206,7 +1180,6 @@
 	}
 
 	cpu_data(boot_cpu_id).udelay_val = loops_per_jiffy;
-	smp_tune_scheduling();
 }
 
 void __devinit smp_prepare_boot_cpu(void)
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 8fbe9fd..3b63b0b 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -1,8 +1,12 @@
 #
 # HID driver configuration
 #
-menu "HID Devices"
+menuconfig HID_SUPPORT
+	bool "HID Devices"
 	depends on INPUT
+	default y
+
+if HID_SUPPORT
 
 config HID
 	tristate "Generic HID support"
@@ -24,6 +28,7 @@
 
 config HID_DEBUG
 	bool "HID debugging support"
+	default y if !EMBEDDED
 	depends on HID
 	---help---
 	This option lets the HID layer output diagnostics about its internal
@@ -38,5 +43,4 @@
 
 source "drivers/hid/usbhid/Kconfig"
 
-endmenu
-
+endif # HID_SUPPORT
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 6ec04e7..317cf8a 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -40,6 +40,13 @@
 #define DRIVER_DESC "HID core driver"
 #define DRIVER_LICENSE "GPL"
 
+#ifdef CONFIG_HID_DEBUG
+int hid_debug = 0;
+module_param_named(debug, hid_debug, bool, 0600);
+MODULE_PARM_DESC(debug, "Turn HID debugging mode on and off");
+EXPORT_SYMBOL_GPL(hid_debug);
+#endif
+
 /*
  * Register a new report for a device.
  */
@@ -78,7 +85,7 @@
 	struct hid_field *field;
 
 	if (report->maxfield == HID_MAX_FIELDS) {
-		dbg("too many fields in report");
+		dbg_hid("too many fields in report\n");
 		return NULL;
 	}
 
@@ -106,7 +113,7 @@
 	usage = parser->local.usage[0];
 
 	if (parser->collection_stack_ptr == HID_COLLECTION_STACK_SIZE) {
-		dbg("collection stack overflow");
+		dbg_hid("collection stack overflow\n");
 		return -1;
 	}
 
@@ -114,7 +121,7 @@
 		collection = kmalloc(sizeof(struct hid_collection) *
 				parser->device->collection_size * 2, GFP_KERNEL);
 		if (collection == NULL) {
-			dbg("failed to reallocate collection array");
+			dbg_hid("failed to reallocate collection array\n");
 			return -1;
 		}
 		memcpy(collection, parser->device->collection,
@@ -150,7 +157,7 @@
 static int close_collection(struct hid_parser *parser)
 {
 	if (!parser->collection_stack_ptr) {
-		dbg("collection stack underflow");
+		dbg_hid("collection stack underflow\n");
 		return -1;
 	}
 	parser->collection_stack_ptr--;
@@ -178,7 +185,7 @@
 static int hid_add_usage(struct hid_parser *parser, unsigned usage)
 {
 	if (parser->local.usage_index >= HID_MAX_USAGES) {
-		dbg("usage index exceeded");
+		dbg_hid("usage index exceeded\n");
 		return -1;
 	}
 	parser->local.usage[parser->local.usage_index] = usage;
@@ -202,12 +209,12 @@
 	int i;
 
 	if (!(report = hid_register_report(parser->device, report_type, parser->global.report_id))) {
-		dbg("hid_register_report failed");
+		dbg_hid("hid_register_report failed\n");
 		return -1;
 	}
 
 	if (parser->global.logical_maximum < parser->global.logical_minimum) {
-		dbg("logical range invalid %d %d", parser->global.logical_minimum, parser->global.logical_maximum);
+		dbg_hid("logical range invalid %d %d\n", parser->global.logical_minimum, parser->global.logical_maximum);
 		return -1;
 	}
 
@@ -287,7 +294,7 @@
 		case HID_GLOBAL_ITEM_TAG_PUSH:
 
 			if (parser->global_stack_ptr == HID_GLOBAL_STACK_SIZE) {
-				dbg("global enviroment stack overflow");
+				dbg_hid("global enviroment stack overflow\n");
 				return -1;
 			}
 
@@ -298,7 +305,7 @@
 		case HID_GLOBAL_ITEM_TAG_POP:
 
 			if (!parser->global_stack_ptr) {
-				dbg("global enviroment stack underflow");
+				dbg_hid("global enviroment stack underflow\n");
 				return -1;
 			}
 
@@ -342,27 +349,27 @@
 
 		case HID_GLOBAL_ITEM_TAG_REPORT_SIZE:
 			if ((parser->global.report_size = item_udata(item)) > 32) {
-				dbg("invalid report_size %d", parser->global.report_size);
+				dbg_hid("invalid report_size %d\n", parser->global.report_size);
 				return -1;
 			}
 			return 0;
 
 		case HID_GLOBAL_ITEM_TAG_REPORT_COUNT:
 			if ((parser->global.report_count = item_udata(item)) > HID_MAX_USAGES) {
-				dbg("invalid report_count %d", parser->global.report_count);
+				dbg_hid("invalid report_count %d\n", parser->global.report_count);
 				return -1;
 			}
 			return 0;
 
 		case HID_GLOBAL_ITEM_TAG_REPORT_ID:
 			if ((parser->global.report_id = item_udata(item)) == 0) {
-				dbg("report_id 0 is invalid");
+				dbg_hid("report_id 0 is invalid\n");
 				return -1;
 			}
 			return 0;
 
 		default:
-			dbg("unknown global tag 0x%x", item->tag);
+			dbg_hid("unknown global tag 0x%x\n", item->tag);
 			return -1;
 	}
 }
@@ -377,7 +384,7 @@
 	unsigned n;
 
 	if (item->size == 0) {
-		dbg("item data expected for local item");
+		dbg_hid("item data expected for local item\n");
 		return -1;
 	}
 
@@ -395,14 +402,14 @@
 				 * items and the first delimiter set.
 				 */
 				if (parser->local.delimiter_depth != 0) {
-					dbg("nested delimiters");
+					dbg_hid("nested delimiters\n");
 					return -1;
 				}
 				parser->local.delimiter_depth++;
 				parser->local.delimiter_branch++;
 			} else {
 				if (parser->local.delimiter_depth < 1) {
-					dbg("bogus close delimiter");
+					dbg_hid("bogus close delimiter\n");
 					return -1;
 				}
 				parser->local.delimiter_depth--;
@@ -412,7 +419,7 @@
 		case HID_LOCAL_ITEM_TAG_USAGE:
 
 			if (parser->local.delimiter_branch > 1) {
-				dbg("alternative usage ignored");
+				dbg_hid("alternative usage ignored\n");
 				return 0;
 			}
 
@@ -424,7 +431,7 @@
 		case HID_LOCAL_ITEM_TAG_USAGE_MINIMUM:
 
 			if (parser->local.delimiter_branch > 1) {
-				dbg("alternative usage ignored");
+				dbg_hid("alternative usage ignored\n");
 				return 0;
 			}
 
@@ -437,7 +444,7 @@
 		case HID_LOCAL_ITEM_TAG_USAGE_MAXIMUM:
 
 			if (parser->local.delimiter_branch > 1) {
-				dbg("alternative usage ignored");
+				dbg_hid("alternative usage ignored\n");
 				return 0;
 			}
 
@@ -446,14 +453,14 @@
 
 			for (n = parser->local.usage_minimum; n <= data; n++)
 				if (hid_add_usage(parser, n)) {
-					dbg("hid_add_usage failed\n");
+					dbg_hid("hid_add_usage failed\n");
 					return -1;
 				}
 			return 0;
 
 		default:
 
-			dbg("unknown local item tag 0x%x", item->tag);
+			dbg_hid("unknown local item tag 0x%x\n", item->tag);
 			return 0;
 	}
 	return 0;
@@ -487,7 +494,7 @@
 			ret = hid_add_field(parser, HID_FEATURE_REPORT, data);
 			break;
 		default:
-			dbg("unknown main item tag 0x%x", item->tag);
+			dbg_hid("unknown main item tag 0x%x\n", item->tag);
 			ret = 0;
 	}
 
@@ -502,7 +509,7 @@
 
 static int hid_parser_reserved(struct hid_parser *parser, struct hid_item *item)
 {
-	dbg("reserved item type, tag 0x%x", item->tag);
+	dbg_hid("reserved item type, tag 0x%x\n", item->tag);
 	return 0;
 }
 
@@ -667,14 +674,14 @@
 	while ((start = fetch_item(start, end, &item)) != NULL) {
 
 		if (item.format != HID_ITEM_FORMAT_SHORT) {
-			dbg("unexpected long global item");
+			dbg_hid("unexpected long global item\n");
 			hid_free_device(device);
 			vfree(parser);
 			return NULL;
 		}
 
 		if (dispatch_type[item.type](parser, &item)) {
-			dbg("item %u %u %u %u parsing failed\n",
+			dbg_hid("item %u %u %u %u parsing failed\n",
 				item.format, (unsigned)item.size, (unsigned)item.type, (unsigned)item.tag);
 			hid_free_device(device);
 			vfree(parser);
@@ -683,13 +690,13 @@
 
 		if (start == end) {
 			if (parser->collection_stack_ptr) {
-				dbg("unbalanced collection at end of report description");
+				dbg_hid("unbalanced collection at end of report description\n");
 				hid_free_device(device);
 				vfree(parser);
 				return NULL;
 			}
 			if (parser->local.delimiter_depth) {
-				dbg("unbalanced delimiter at end of report description");
+				dbg_hid("unbalanced delimiter at end of report description\n");
 				hid_free_device(device);
 				vfree(parser);
 				return NULL;
@@ -699,7 +706,7 @@
 		}
 	}
 
-	dbg("item fetching failed at offset %d\n", (int)(end - start));
+	dbg_hid("item fetching failed at offset %d\n", (int)(end - start));
 	hid_free_device(device);
 	vfree(parser);
 	return NULL;
@@ -915,13 +922,13 @@
 	hid_dump_input(field->usage + offset, value);
 
 	if (offset >= field->report_count) {
-		dbg("offset (%d) exceeds report_count (%d)", offset, field->report_count);
+		dbg_hid("offset (%d) exceeds report_count (%d)\n", offset, field->report_count);
 		hid_dump_field(field, 8);
 		return -1;
 	}
 	if (field->logical_minimum < 0) {
 		if (value != snto32(s32ton(value, size), size)) {
-			dbg("value %d is out of range", value);
+			dbg_hid("value %d is out of range\n", value);
 			return -1;
 		}
 	}
@@ -934,19 +941,17 @@
 {
 	struct hid_report_enum *report_enum = hid->report_enum + type;
 	struct hid_report *report;
-	int n, rsize;
+	int n, rsize, i;
 
 	if (!hid)
 		return -ENODEV;
 
 	if (!size) {
-		dbg("empty report");
+		dbg_hid("empty report\n");
 		return -1;
 	}
 
-#ifdef CONFIG_HID_DEBUG
-	printk(KERN_DEBUG __FILE__ ": report (size %u) (%snumbered)\n", size, report_enum->numbered ? "" : "un");
-#endif
+	dbg_hid("report (size %u) (%snumbered)\n", size, report_enum->numbered ? "" : "un");
 
 	n = 0;                          /* Normally report number is 0 */
 	if (report_enum->numbered) {    /* Device uses numbered reports, data[0] is report number */
@@ -954,25 +959,21 @@
 		size--;
 	}
 
-#ifdef CONFIG_HID_DEBUG
-	{
-		int i;
-		printk(KERN_DEBUG __FILE__ ": report %d (size %u) = ", n, size);
-		for (i = 0; i < size; i++)
-			printk(" %02x", data[i]);
-		printk("\n");
-	}
-#endif
+	/* dump the report descriptor */
+	dbg_hid("report %d (size %u) = ", n, size);
+	for (i = 0; i < size; i++)
+		dbg_hid_line(" %02x", data[i]);
+	dbg_hid_line("\n");
 
 	if (!(report = report_enum->report_id_hash[n])) {
-		dbg("undefined report_id %d received", n);
+		dbg_hid("undefined report_id %d received\n", n);
 		return -1;
 	}
 
 	rsize = ((report->size - 1) >> 3) + 1;
 
 	if (size < rsize) {
-		dbg("report %d is too short, (%d < %d)", report->id, size, rsize);
+		dbg_hid("report %d is too short, (%d < %d)\n", report->id, size, rsize);
 		memset(data + size, 0, rsize - size);
 	}
 
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
index 83c4126..a13757b 100644
--- a/drivers/hid/hid-debug.c
+++ b/drivers/hid/hid-debug.c
@@ -347,6 +347,9 @@
 void hid_resolv_usage(unsigned usage) {
 	const struct hid_usage_entry *p;
 
+	if (!hid_debug)
+		return;
+
 	resolv_usage_page(usage >> 16);
 	printk(".");
 	for (p = hid_usage_table; p->description; p++)
@@ -369,6 +372,9 @@
 void hid_dump_field(struct hid_field *field, int n) {
 	int j;
 
+	if (!hid_debug)
+		return;
+
 	if (field->physical) {
 		tab(n);
 		printk("Physical(");
@@ -466,6 +472,9 @@
 	unsigned i,k;
 	static char *table[] = {"INPUT", "OUTPUT", "FEATURE"};
 
+	if (!hid_debug)
+		return;
+
 	for (i = 0; i < HID_REPORT_TYPES; i++) {
 		report_enum = device->report_enum + i;
 		list = report_enum->report_list.next;
@@ -489,6 +498,9 @@
 EXPORT_SYMBOL_GPL(hid_dump_device);
 
 void hid_dump_input(struct hid_usage *usage, __s32 value) {
+	if (!hid_debug)
+		return;
+
 	printk("hid-debug: input ");
 	hid_resolv_usage(usage->hid);
 	printk(" = %d\n", value);
@@ -758,6 +770,9 @@
 
 void hid_resolv_event(__u8 type, __u16 code) {
 
+	if (!hid_debug)
+		return;
+
 	printk("%s.%s", events[type] ? events[type] : "?",
 		names[type] ? (names[type][code] ? names[type][code] : "?") : "?");
 }
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index 7f81789..8edbd30 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -60,6 +60,19 @@
 	150,158,159,128,136,177,178,176,142,152,173,140,unk,unk,unk,unk
 };
 
+/* extended mapping for certain Logitech hardware (Logitech cordless desktop LX500) */
+#define LOGITECH_EXPANDED_KEYMAP_SIZE 80
+static int logitech_expanded_keymap[LOGITECH_EXPANDED_KEYMAP_SIZE] = {
+	  0,216,  0,213,175,156,  0,  0,  0,  0,
+	144,  0,  0,  0,  0,  0,  0,  0,  0,212,
+	174,167,152,161,112,  0,  0,  0,154,  0,
+	  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,
+	  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,
+	  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,
+	  0,  0,  0,  0,  0,183,184,185,186,187,
+	188,189,190,191,192,193,194,  0,  0,  0
+};
+
 static const struct {
 	__s32 x;
 	__s32 y;
@@ -308,9 +321,7 @@
 		
 		clear_bit(old_keycode, dev->keybit);
 		set_bit(usage->code, dev->keybit);
-#ifdef CONFIG_HID_DEBUG
-		printk (KERN_DEBUG "Assigned keycode %d to HID usage code %x\n", keycode, scancode);
-#endif
+		dbg_hid(KERN_DEBUG "Assigned keycode %d to HID usage code %x\n", keycode, scancode);
 		/* Set the keybit for the old keycode if the old keycode is used
 		 * by another key */
 		if (hidinput_find_key (hid, 0, old_keycode))
@@ -333,11 +344,9 @@
 
 	field->hidinput = hidinput;
 
-#ifdef CONFIG_HID_DEBUG
-	printk(KERN_DEBUG "Mapping: ");
+	dbg_hid("Mapping: ");
 	hid_resolv_usage(usage->hid);
-	printk(" ---> ");
-#endif
+	dbg_hid_line(" ---> ");
 
 	if (field->flags & HID_MAIN_ITEM_CONSTANT)
 		goto ignore;
@@ -378,6 +387,21 @@
 					}
 			}
 
+			/* Special handling for Logitech Cordless Desktop */
+			if (field->application != HID_GD_MOUSE) {
+				if (device->quirks & HID_QUIRK_LOGITECH_EXPANDED_KEYMAP) {
+					int hid = usage->hid & HID_USAGE;
+					if (hid < LOGITECH_EXPANDED_KEYMAP_SIZE && logitech_expanded_keymap[hid] != 0)
+						code = logitech_expanded_keymap[hid];
+				}
+			} else {
+				if (device->quirks & HID_QUIRK_LOGITECH_IGNORE_DOUBLED_WHEEL) {
+					int hid = usage->hid & HID_USAGE;
+					if (hid == 7 || hid == 8)
+						goto ignore;
+				}
+			}
+
 			map_key(code);
 			break;
 
@@ -566,6 +590,11 @@
 				case 0x0e5: map_key_clear(KEY_BASSBOOST);	break;
 				case 0x0e9: map_key_clear(KEY_VOLUMEUP);	break;
 				case 0x0ea: map_key_clear(KEY_VOLUMEDOWN);	break;
+
+				/* reserved in HUT 1.12. Reported on Petalynx remote */
+				case 0x0f6: map_key_clear(KEY_NEXT);		break;
+				case 0x0fa: map_key_clear(KEY_BACK);		break;
+
 				case 0x183: map_key_clear(KEY_CONFIG);		break;
 				case 0x184: map_key_clear(KEY_WORDPROCESSOR);	break;
 				case 0x185: map_key_clear(KEY_EDITOR);		break;
@@ -598,7 +627,9 @@
 				case 0x21b: map_key_clear(KEY_COPY);		break;
 				case 0x21c: map_key_clear(KEY_CUT);		break;
 				case 0x21d: map_key_clear(KEY_PASTE);		break;
-				case 0x221: map_key_clear(KEY_FIND);		break;
+				case 0x21f: map_key_clear(KEY_FIND);		break;
+				case 0x221: map_key_clear(KEY_SEARCH);		break;
+				case 0x222: map_key_clear(KEY_GOTO);		break;
 				case 0x223: map_key_clear(KEY_HOMEPAGE);	break;
 				case 0x224: map_key_clear(KEY_BACK);		break;
 				case 0x225: map_key_clear(KEY_FORWARD);		break;
@@ -688,7 +719,28 @@
 			break;
 
 		case HID_UP_MSVENDOR:
-			goto ignore;
+
+			/* special case - Chicony Chicony KU-0418 tactical pad */
+			if (device->vendor == 0x04f2 && device->product == 0x0418) {
+				set_bit(EV_REP, input->evbit);
+				switch(usage->hid & HID_USAGE) {
+					case 0xff01: map_key_clear(BTN_1);		break;
+					case 0xff02: map_key_clear(BTN_2);		break;
+					case 0xff03: map_key_clear(BTN_3);		break;
+					case 0xff04: map_key_clear(BTN_4);		break;
+					case 0xff05: map_key_clear(BTN_5);		break;
+					case 0xff06: map_key_clear(BTN_6);		break;
+					case 0xff07: map_key_clear(BTN_7);		break;
+					case 0xff08: map_key_clear(BTN_8);		break;
+					case 0xff09: map_key_clear(BTN_9);		break;
+					case 0xff0a: map_key_clear(BTN_A);		break;
+					case 0xff0b: map_key_clear(BTN_B);		break;
+					default:    goto ignore;
+				}
+			} else {
+				goto ignore;
+			}
+			break;
 
 		case HID_UP_CUSTOM: /* Reported on Logitech and Powerbook USB keyboards */
 
@@ -704,10 +756,10 @@
 			}
 			break;
 
-		case HID_UP_LOGIVENDOR: /* Reported on Logitech Ultra X Media Remote */
-
+		case HID_UP_LOGIVENDOR:
 			set_bit(EV_REP, input->evbit);
 			switch(usage->hid & HID_USAGE) {
+				/* Reported on Logitech Ultra X Media Remote */
 				case 0x004: map_key_clear(KEY_AGAIN);		break;
 				case 0x00d: map_key_clear(KEY_HOME);		break;
 				case 0x024: map_key_clear(KEY_SHUFFLE);		break;
@@ -725,6 +777,14 @@
 				case 0x04d: map_key_clear(KEY_SUBTITLE);	break;
 				case 0x051: map_key_clear(KEY_RED);		break;
 				case 0x052: map_key_clear(KEY_CLOSE);		break;
+
+				/* Reported on Petalynx Maxter remote */
+				case 0x05a: map_key_clear(KEY_TEXT);		break;
+				case 0x05b: map_key_clear(KEY_RED);		break;
+				case 0x05c: map_key_clear(KEY_GREEN);		break;
+				case 0x05d: map_key_clear(KEY_YELLOW);		break;
+				case 0x05e: map_key_clear(KEY_BLUE);		break;
+
 				default:    goto ignore;
 			}
 			break;
@@ -818,16 +878,24 @@
 			field->dpad = usage->code;
 	}
 
+	/* for those devices which produce Consumer volume usage as relative,
+	 * we emulate pressing volumeup/volumedown appropriate number of times
+	 * in hidinput_hid_event()
+	 */
+	if ((usage->type == EV_ABS) && (field->flags & HID_MAIN_ITEM_RELATIVE) &&
+			(usage->code == ABS_VOLUME)) {
+		set_bit(KEY_VOLUMEUP, input->keybit);
+		set_bit(KEY_VOLUMEDOWN, input->keybit);
+	}
+
 	hid_resolv_event(usage->type, usage->code);
-#ifdef CONFIG_HID_DEBUG
-	printk("\n");
-#endif
+
+	dbg_hid_line("\n");
+
 	return;
 
 ignore:
-#ifdef CONFIG_HID_DEBUG
-	printk("IGNORED\n");
-#endif
+	dbg_hid_line("IGNORED\n");
 	return;
 }
 
@@ -896,18 +964,33 @@
 	}
 
 	if (usage->hid == (HID_UP_PID | 0x83UL)) { /* Simultaneous Effects Max */
-		dbg("Maximum Effects - %d",value);
+		dbg_hid("Maximum Effects - %d\n",value);
 		return;
 	}
 
 	if (usage->hid == (HID_UP_PID | 0x7fUL)) {
-		dbg("PID Pool Report\n");
+		dbg_hid("PID Pool Report\n");
 		return;
 	}
 
 	if ((usage->type == EV_KEY) && (usage->code == 0)) /* Key 0 is "unassigned", not KEY_UNKNOWN */
 		return;
 
+	if ((usage->type == EV_ABS) && (field->flags & HID_MAIN_ITEM_RELATIVE) &&
+			(usage->code == ABS_VOLUME)) {
+		int count = abs(value);
+		int direction = value > 0 ? KEY_VOLUMEUP : KEY_VOLUMEDOWN;
+		int i;
+
+		for (i = 0; i < count; i++) {
+			input_event(input, EV_KEY, direction, 1);
+			input_sync(input);
+			input_event(input, EV_KEY, direction, 0);
+			input_sync(input);
+		}
+		return;
+	}
+
 	input_event(input, usage->type, usage->code, value);
 
 	if ((field->flags & HID_MAIN_ITEM_RELATIVE) && (usage->type == EV_KEY))
@@ -976,7 +1059,7 @@
 			if (IS_INPUT_APPLICATION(hid->collection[i].usage))
 				break;
 
-	if (i == hid->maxcollection)
+	if (i == hid->maxcollection && (hid->quirks & HID_QUIRK_HIDINPUT) == 0)
 		return -1;
 
 	if (hid->quirks & HID_QUIRK_SKIP_OUTPUT_REPORTS)
@@ -994,7 +1077,7 @@
 				if (!hidinput || !input_dev) {
 					kfree(hidinput);
 					input_free_device(input_dev);
-					err("Out of memory during hid input probe");
+					err_hid("Out of memory during hid input probe");
 					return -1;
 				}
 
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index d91b9da..3afa4a5 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -60,6 +60,12 @@
 		" quirks=vendorID:productID:quirks"
 		" where vendorID, productID, and quirks are all in"
 		" 0x-prefixed hex");
+static char *rdesc_quirks_param[MAX_USBHID_BOOT_QUIRKS] = { [ 0 ... (MAX_USBHID_BOOT_QUIRKS - 1) ] = NULL };
+module_param_array_named(rdesc_quirks, rdesc_quirks_param, charp, NULL, 0444);
+MODULE_PARM_DESC(rdesc_quirks, "Add/modify report descriptor quirks by specifying "
+		" rdesc_quirks=vendorID:productID:rdesc_quirks"
+		" where vendorID, productID, and rdesc_quirks are all in"
+		" 0x-prefixed hex");
 /*
  * Input submission and I/O error handler.
  */
@@ -127,7 +133,7 @@
 			hid_io_error(hid);
 		break;
 	default:
-		err("can't reset device, %s-%s/input%d, status %d",
+		err_hid("can't reset device, %s-%s/input%d, status %d",
 				hid_to_usb_dev(hid)->bus->bus_name,
 				hid_to_usb_dev(hid)->devpath,
 				usbhid->ifnum, rc);
@@ -220,7 +226,7 @@
 	if (status) {
 		clear_bit(HID_IN_RUNNING, &usbhid->iofl);
 		if (status != -EPERM) {
-			err("can't resubmit intr, %s-%s/input%d, status %d",
+			err_hid("can't resubmit intr, %s-%s/input%d, status %d",
 					hid_to_usb_dev(hid)->bus->bus_name,
 					hid_to_usb_dev(hid)->devpath,
 					usbhid->ifnum, status);
@@ -240,10 +246,10 @@
 	usbhid->urbout->transfer_buffer_length = ((report->size - 1) >> 3) + 1 + (report->id > 0);
 	usbhid->urbout->dev = hid_to_usb_dev(hid);
 
-	dbg("submitting out urb");
+	dbg_hid("submitting out urb\n");
 
 	if (usb_submit_urb(usbhid->urbout, GFP_ATOMIC)) {
-		err("usb_submit_urb(out) failed");
+		err_hid("usb_submit_urb(out) failed");
 		return -1;
 	}
 
@@ -287,12 +293,12 @@
 	usbhid->cr->wIndex = cpu_to_le16(usbhid->ifnum);
 	usbhid->cr->wLength = cpu_to_le16(len);
 
-	dbg("submitting ctrl urb: %s wValue=0x%04x wIndex=0x%04x wLength=%u",
+	dbg_hid("submitting ctrl urb: %s wValue=0x%04x wIndex=0x%04x wLength=%u\n",
 		usbhid->cr->bRequest == HID_REQ_SET_REPORT ? "Set_Report" : "Get_Report",
 		usbhid->cr->wValue, usbhid->cr->wIndex, usbhid->cr->wLength);
 
 	if (usb_submit_urb(usbhid->urbctrl, GFP_ATOMIC)) {
-		err("usb_submit_urb(ctrl) failed");
+		err_hid("usb_submit_urb(ctrl) failed");
 		return -1;
 	}
 
@@ -474,7 +480,7 @@
 	if (!wait_event_timeout(hid->wait, (!test_bit(HID_CTRL_RUNNING, &usbhid->iofl) &&
 					!test_bit(HID_OUT_RUNNING, &usbhid->iofl)),
 					10*HZ)) {
-		dbg("timeout waiting for ctrl or out queue to clear");
+		dbg_hid("timeout waiting for ctrl or out queue to clear\n");
 		return -1;
 	}
 
@@ -633,20 +639,6 @@
 }
 
 /*
- * Cherry Cymotion keyboard have an invalid HID report descriptor,
- * that needs fixing before we can parse it.
- */
-
-static void hid_fixup_cymotion_descriptor(char *rdesc, int rsize)
-{
-	if (rsize >= 17 && rdesc[11] == 0x3c && rdesc[12] == 0x02) {
-		info("Fixing up Cherry Cymotion report descriptor");
-		rdesc[11] = rdesc[16] = 0xff;
-		rdesc[12] = rdesc[17] = 0x03;
-	}
-}
-
-/*
  * Sending HID_REQ_GET_REPORT changes the operation mode of the ps3 controller
  * to "operational".  Without this, the ps3 controller will not report any
  * events.
@@ -667,51 +659,11 @@
 				 USB_CTRL_GET_TIMEOUT);
 
 	if (result < 0)
-		err("%s failed: %d\n", __func__, result);
+		err_hid("%s failed: %d\n", __func__, result);
 
 	kfree(buf);
 }
 
-/*
- * Certain Logitech keyboards send in report #3 keys which are far
- * above the logical maximum described in descriptor. This extends
- * the original value of 0x28c of logical maximum to 0x104d
- */
-static void hid_fixup_logitech_descriptor(unsigned char *rdesc, int rsize)
-{
-	if (rsize >= 90 && rdesc[83] == 0x26
-			&& rdesc[84] == 0x8c
-			&& rdesc[85] == 0x02) {
-		info("Fixing up Logitech keyboard report descriptor");
-		rdesc[84] = rdesc[89] = 0x4d;
-		rdesc[85] = rdesc[90] = 0x10;
-	}
-}
-
-/*
- * Some USB barcode readers from cypress have usage min and usage max in
- * the wrong order
- */
-static void hid_fixup_cypress_descriptor(unsigned char *rdesc, int rsize)
-{
-	short fixed = 0;
-	int i;
-
-	for (i = 0; i < rsize - 4; i++) {
-		if (rdesc[i] == 0x29 && rdesc [i+2] == 0x19) {
-			unsigned char tmp;
-
-			rdesc[i] = 0x19; rdesc[i+2] = 0x29;
-			tmp = rdesc[i+3];
-			rdesc[i+3] = rdesc[i+1];
-			rdesc[i+1] = tmp;
-		}
-	}
-
-	if (fixed)
-		info("Fixing up Cypress report descriptor");
-}
-
 static struct hid_device *usb_hid_configure(struct usb_interface *intf)
 {
 	struct usb_host_interface *interface = intf->cur_altsetting;
@@ -746,7 +698,7 @@
 	if (usb_get_extra_descriptor(interface, HID_DT_HID, &hdesc) &&
 	    (!interface->desc.bNumEndpoints ||
 	     usb_get_extra_descriptor(&interface->endpoint[0], HID_DT_HID, &hdesc))) {
-		dbg("class descriptor not present\n");
+		dbg_hid("class descriptor not present\n");
 		return NULL;
 	}
 
@@ -755,41 +707,34 @@
 			rsize = le16_to_cpu(hdesc->desc[n].wDescriptorLength);
 
 	if (!rsize || rsize > HID_MAX_DESCRIPTOR_SIZE) {
-		dbg("weird size of report descriptor (%u)", rsize);
+		dbg_hid("weird size of report descriptor (%u)\n", rsize);
 		return NULL;
 	}
 
 	if (!(rdesc = kmalloc(rsize, GFP_KERNEL))) {
-		dbg("couldn't allocate rdesc memory");
+		dbg_hid("couldn't allocate rdesc memory\n");
 		return NULL;
 	}
 
 	hid_set_idle(dev, interface->desc.bInterfaceNumber, 0, 0);
 
 	if ((n = hid_get_class_descriptor(dev, interface->desc.bInterfaceNumber, HID_DT_REPORT, rdesc, rsize)) < 0) {
-		dbg("reading report descriptor failed");
+		dbg_hid("reading report descriptor failed\n");
 		kfree(rdesc);
 		return NULL;
 	}
 
-	if ((quirks & HID_QUIRK_CYMOTION))
-		hid_fixup_cymotion_descriptor(rdesc, rsize);
+	usbhid_fixup_report_descriptor(le16_to_cpu(dev->descriptor.idVendor),
+			le16_to_cpu(dev->descriptor.idProduct), rdesc,
+			rsize, rdesc_quirks_param);
 
-	if (quirks & HID_QUIRK_LOGITECH_DESCRIPTOR)
-		hid_fixup_logitech_descriptor(rdesc, rsize);
-
-	if (quirks & HID_QUIRK_SWAPPED_MIN_MAX)
-		hid_fixup_cypress_descriptor(rdesc, rsize);
-
-#ifdef CONFIG_HID_DEBUG
-	printk(KERN_DEBUG __FILE__ ": report descriptor (size %u, read %d) = ", rsize, n);
+	dbg_hid("report descriptor (size %u, read %d) = ", rsize, n);
 	for (n = 0; n < rsize; n++)
-		printk(" %02x", (unsigned char) rdesc[n]);
-	printk("\n");
-#endif
+		dbg_hid_line(" %02x", (unsigned char) rdesc[n]);
+	dbg_hid_line("\n");
 
 	if (!(hid = hid_parse_report(rdesc, n))) {
-		dbg("parsing report descriptor failed");
+		dbg_hid("parsing report descriptor failed\n");
 		kfree(rdesc);
 		return NULL;
 	}
@@ -861,7 +806,7 @@
 	}
 
 	if (!usbhid->urbin) {
-		err("couldn't find an input interrupt endpoint");
+		err_hid("couldn't find an input interrupt endpoint");
 		goto fail;
 	}
 
@@ -956,7 +901,7 @@
 	usb_kill_urb(usbhid->urbctrl);
 
 	del_timer_sync(&usbhid->io_retry);
-	flush_scheduled_work();
+	cancel_work_sync(&usbhid->reset_work);
 
 	if (hid->claimed & HID_CLAIMED_INPUT)
 		hidinput_disconnect(hid);
@@ -978,7 +923,7 @@
 	int i;
 	char *c;
 
-	dbg("HID probe called for ifnum %d",
+	dbg_hid("HID probe called for ifnum %d\n",
 			intf->altsetting->desc.bInterfaceNumber);
 
 	if (!(hid = usb_hid_configure(intf)))
diff --git a/drivers/hid/usbhid/hid-lgff.c b/drivers/hid/usbhid/hid-lgff.c
index c5cd410..4b7ab6a 100644
--- a/drivers/hid/usbhid/hid-lgff.c
+++ b/drivers/hid/usbhid/hid-lgff.c
@@ -78,7 +78,7 @@
 		report->field[0]->value[1] = 0x08;
 		report->field[0]->value[2] = x;
 		report->field[0]->value[3] = y;
-		dbg("(x, y)=(%04x, %04x)", x, y);
+		dbg_hid("(x, y)=(%04x, %04x)\n", x, y);
 		usbhid_submit_report(hid, report, USB_DIR_OUT);
 		break;
 
@@ -93,7 +93,7 @@
 		report->field[0]->value[1] = 0x00;
 		report->field[0]->value[2] = left;
 		report->field[0]->value[3] = right;
-		dbg("(left, right)=(%04x, %04x)", left, right);
+		dbg_hid("(left, right)=(%04x, %04x)\n", left, right);
 		usbhid_submit_report(hid, report, USB_DIR_OUT);
 		break;
 	}
@@ -113,20 +113,20 @@
 
 	/* Find the report to use */
 	if (list_empty(report_list)) {
-		err("No output report found");
+		err_hid("No output report found");
 		return -1;
 	}
 
 	/* Check that the report looks ok */
 	report = list_entry(report_list->next, struct hid_report, list);
 	if (!report) {
-		err("NULL output report");
+		err_hid("NULL output report");
 		return -1;
 	}
 
 	field = report->field[0];
 	if (!field) {
-		err("NULL field");
+		err_hid("NULL field");
 		return -1;
 	}
 
diff --git a/drivers/hid/usbhid/hid-pidff.c b/drivers/hid/usbhid/hid-pidff.c
index f5a90e9..0113261 100644
--- a/drivers/hid/usbhid/hid-pidff.c
+++ b/drivers/hid/usbhid/hid-pidff.c
@@ -738,6 +738,7 @@
 	pidff->set_effect[PID_TRIGGER_BUTTON].value[0] = 0;
 	pidff->set_effect[PID_TRIGGER_REPEAT_INT].value[0] = 0;
 	pidff_set(&pidff->set_effect[PID_GAIN], magnitude);
+	pidff->set_effect[PID_DIRECTION_ENABLE].value[0] = 1;
 	pidff->set_effect[PID_START_DELAY].value[0] = 0;
 
 	usbhid_submit_report(pidff->hid, pidff->reports[PID_SET_EFFECT],
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index f6c4145..775b9f3 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -105,6 +105,9 @@
 #define USB_VENDOR_ID_ESSENTIAL_REALITY	0x0d7f
 #define USB_DEVICE_ID_ESSENTIAL_REALITY_P5 0x0100
 
+#define USB_VENDOR_ID_GAMERON		0x0810
+#define USB_DEVICE_ID_GAMERON_DUAL_PSX_ADAPTOR	0x0001
+
 #define USB_VENDOR_ID_GLAB		0x06c2
 #define USB_DEVICE_ID_4_PHIDGETSERVO_30	0x0038
 #define USB_DEVICE_ID_1_PHIDGETSERVO_30	0x0039
@@ -196,8 +199,10 @@
 #define USB_VENDOR_ID_LOGITECH		0x046d
 #define USB_DEVICE_ID_LOGITECH_RECEIVER	0xc101
 #define USB_DEVICE_ID_LOGITECH_WHEEL	0xc294
+#define USB_DEVICE_ID_LOGITECH_KBD	0xc311
 #define USB_DEVICE_ID_S510_RECEIVER	0xc50c
 #define USB_DEVICE_ID_S510_RECEIVER_2	0xc517
+#define USB_DEVICE_ID_LOGITECH_CORDLESS_DESKTOP_LX500	0xc512
 #define USB_DEVICE_ID_MX3000_RECEIVER	0xc513
 #define USB_DEVICE_ID_DINOVO_EDGE	0xc714
 
@@ -209,6 +214,13 @@
 #define USB_DEVICE_ID_MGE_UPS		0xffff
 #define USB_DEVICE_ID_MGE_UPS1		0x0001
 
+#define USB_VENDOR_ID_MICROSOFT		0x045e
+#define USB_DEVICE_ID_SIDEWINDER_GV	0x003b
+
+#define USB_VENDOR_ID_NCR		0x0404
+#define USB_DEVICE_ID_NCR_FIRST		0x0300
+#define USB_DEVICE_ID_NCR_LAST		0x03ff
+
 #define USB_VENDOR_ID_NEC		0x073e
 #define USB_DEVICE_ID_NEC_USB_GAME_PAD	0x0301
 
@@ -220,6 +232,9 @@
 #define USB_VENDOR_ID_PANTHERLORD	0x0810
 #define USB_DEVICE_ID_PANTHERLORD_TWIN_USB_JOYSTICK	0x0001
 
+#define USB_VENDOR_ID_PETALYNX		0x18b1
+#define USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE	0x0037
+
 #define USB_VENDOR_ID_PLAYDOTCOM	0x0b43
 #define USB_DEVICE_ID_PLAYDOTCOM_EMS_USBII	0x0003
 
@@ -278,6 +293,7 @@
 	{ USB_VENDOR_ID_AASHIMA, USB_DEVICE_ID_AASHIMA_PREDATOR, HID_QUIRK_BADPAD },
 	{ USB_VENDOR_ID_ALPS, USB_DEVICE_ID_IBM_GAMEPAD, HID_QUIRK_BADPAD },
 	{ USB_VENDOR_ID_CHIC, USB_DEVICE_ID_CHIC_GAMEPAD, HID_QUIRK_BADPAD },
+	{ USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PSX_ADAPTOR, HID_QUIRK_MULTI_INPUT },
 	{ USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_DRIVING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
 	{ USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_FLYING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
 	{ USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_FIGHTING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
@@ -285,11 +301,10 @@
 	{ USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RUMBLEPAD, HID_QUIRK_BADPAD },
 	{ USB_VENDOR_ID_TOPMAX, USB_DEVICE_ID_TOPMAX_COBRAPAD, HID_QUIRK_BADPAD },
 	
-	{ USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION, HID_QUIRK_CYMOTION },
-
 	{ USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_EDGE, HID_QUIRK_DUPLICATE_USAGES },
 
 	{ USB_VENDOR_ID_BELKIN, USB_DEVICE_ID_FLIP_KVM, HID_QUIRK_HIDDEV },
+	{ USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_SIDEWINDER_GV, HID_QUIRK_HIDINPUT },
 
 	{ USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_01, HID_QUIRK_IGNORE },
 	{ USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_10, HID_QUIRK_IGNORE },
@@ -409,9 +424,7 @@
 	{ USB_VENDOR_ID_ACECAD, USB_DEVICE_ID_ACECAD_FLAIR, HID_QUIRK_IGNORE },
 	{ USB_VENDOR_ID_ACECAD, USB_DEVICE_ID_ACECAD_302, HID_QUIRK_IGNORE },
 
-	{ USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX3000_RECEIVER, HID_QUIRK_LOGITECH_DESCRIPTOR },
-	{ USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER, HID_QUIRK_LOGITECH_DESCRIPTOR },
-	{ USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER_2, HID_QUIRK_LOGITECH_DESCRIPTOR },
+	{ USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_CORDLESS_DESKTOP_LX500, HID_QUIRK_LOGITECH_IGNORE_DOUBLED_WHEEL | HID_QUIRK_LOGITECH_EXPANDED_KEYMAP },
 
 	{ USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MIGHTYMOUSE, HID_QUIRK_MIGHTYMOUSE | HID_QUIRK_INVERT_HWHEEL },
 
@@ -426,6 +439,7 @@
 	{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVM, HID_QUIRK_NOGET },
 	{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVMC, HID_QUIRK_NOGET },
 	{ USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WHEEL, HID_QUIRK_NOGET },
+	{ USB_VENDOR_ID_PETALYNX, USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE, HID_QUIRK_NOGET },
 	{ USB_VENDOR_ID_SUN, USB_DEVICE_ID_RARITAN_KVM_DONGLE, HID_QUIRK_NOGET },
 	{ USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_KEYBOARD, HID_QUIRK_NOGET },
 	{ USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD, HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT },
@@ -448,9 +462,28 @@
 	{ USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
 
 	{ USB_VENDOR_ID_DELL, USB_DEVICE_ID_DELL_W7658, HID_QUIRK_RESET_LEDS },
+	{ USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_KBD, HID_QUIRK_RESET_LEDS },
 
-	{ USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_1, HID_QUIRK_SWAPPED_MIN_MAX },
-	{ USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_2, HID_QUIRK_SWAPPED_MIN_MAX },
+	{ 0, 0 }
+};
+
+/* Quirks for devices which require report descriptor fixup go here */
+static const struct hid_rdesc_blacklist {
+	__u16 idVendor;
+	__u16 idProduct;
+	__u32 quirks;
+} hid_rdesc_blacklist[] = {
+
+	{ USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION, HID_QUIRK_RDESC_CYMOTION },
+
+	{ USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX3000_RECEIVER, HID_QUIRK_RDESC_LOGITECH },
+	{ USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER, HID_QUIRK_RDESC_LOGITECH },
+	{ USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER_2, HID_QUIRK_RDESC_LOGITECH },
+
+	{ USB_VENDOR_ID_PETALYNX, USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE, HID_QUIRK_RDESC_PETALYNX },
+
+	{ USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_1, HID_QUIRK_RDESC_SWAPPED_MIN_MAX },
+	{ USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_2, HID_QUIRK_RDESC_SWAPPED_MIN_MAX },
 
 	{ 0, 0 }
 };
@@ -493,7 +526,7 @@
 	}
 
 	if (bl_entry != NULL)
-		dbg("Found dynamic quirk 0x%x for USB HID vendor 0x%hx prod 0x%hx\n",
+		dbg_hid("Found dynamic quirk 0x%x for USB HID vendor 0x%hx prod 0x%hx\n",
 				bl_entry->quirks, bl_entry->idVendor,
 				bl_entry->idProduct);
 
@@ -521,13 +554,13 @@
 	int list_edited = 0;
 
 	if (!idVendor) {
-		dbg("Cannot add a quirk with idVendor = 0");
+		dbg_hid("Cannot add a quirk with idVendor = 0\n");
 		return -EINVAL;
 	}
 
 	q_new = kmalloc(sizeof(struct quirks_list_struct), GFP_KERNEL);
 	if (!q_new) {
-		dbg("Could not allocate quirks_list_struct");
+		dbg_hid("Could not allocate quirks_list_struct\n");
 		return -ENOMEM;
 	}
 
@@ -559,7 +592,6 @@
 	return 0;
 }
 
-
 /**
  * usbhid_remove_all_dquirks: remove all runtime HID quirks from memory
  *
@@ -643,7 +675,7 @@
 			bl_entry = &hid_blacklist[n];
 
 	if (bl_entry != NULL)
-		dbg("Found squirk 0x%x for USB HID vendor 0x%hx prod 0x%hx\n",
+		dbg_hid("Found squirk 0x%x for USB HID vendor 0x%hx prod 0x%hx\n",
 				bl_entry->quirks, bl_entry->idVendor, 
 				bl_entry->idProduct);
 	return bl_entry;
@@ -675,6 +707,12 @@
 				idProduct <= USB_DEVICE_ID_CODEMERCS_IOW_LAST)
 			return HID_QUIRK_IGNORE;
 
+	/* NCR devices must not be queried for reports */
+	if (idVendor == USB_VENDOR_ID_NCR &&
+			idProduct >= USB_DEVICE_ID_NCR_FIRST &&
+			idProduct <= USB_DEVICE_ID_NCR_LAST)
+			return HID_QUIRK_NOGET;
+
 	down_read(&dquirks_rwsem);
 	bl_entry = usbhid_exists_dquirk(idVendor, idProduct);
 	if (!bl_entry)
@@ -686,3 +724,126 @@
 	return quirks;
 }
 
+/*
+ * Cherry Cymotion keyboard have an invalid HID report descriptor,
+ * that needs fixing before we can parse it.
+ */
+static void usbhid_fixup_cymotion_descriptor(char *rdesc, int rsize)
+{
+	if (rsize >= 17 && rdesc[11] == 0x3c && rdesc[12] == 0x02) {
+		printk(KERN_INFO "Fixing up Cherry Cymotion report descriptor\n");
+		rdesc[11] = rdesc[16] = 0xff;
+		rdesc[12] = rdesc[17] = 0x03;
+	}
+}
+
+
+/*
+ * Certain Logitech keyboards send in report #3 keys which are far
+ * above the logical maximum described in descriptor. This extends
+ * the original value of 0x28c of logical maximum to 0x104d
+ */
+static void usbhid_fixup_logitech_descriptor(unsigned char *rdesc, int rsize)
+{
+	if (rsize >= 90 && rdesc[83] == 0x26
+			&& rdesc[84] == 0x8c
+			&& rdesc[85] == 0x02) {
+		printk(KERN_INFO "Fixing up Logitech keyboard report descriptor\n");
+		rdesc[84] = rdesc[89] = 0x4d;
+		rdesc[85] = rdesc[90] = 0x10;
+	}
+}
+
+/* Petalynx Maxter Remote has maximum for consumer page set too low */
+static void usbhid_fixup_petalynx_descriptor(unsigned char *rdesc, int rsize)
+{
+	if (rsize >= 60 && rdesc[39] == 0x2a
+			&& rdesc[40] == 0xf5
+			&& rdesc[41] == 0x00
+			&& rdesc[59] == 0x26
+			&& rdesc[60] == 0xf9
+			&& rdesc[61] == 0x00) {
+		printk(KERN_INFO "Fixing up Petalynx Maxter Remote report descriptor\n");
+		rdesc[60] = 0xfa;
+		rdesc[40] = 0xfa;
+	}
+}
+
+/*
+ * Some USB barcode readers from cypress have usage min and usage max in
+ * the wrong order
+ */
+static void usbhid_fixup_cypress_descriptor(unsigned char *rdesc, int rsize)
+{
+	short fixed = 0;
+	int i;
+
+	for (i = 0; i < rsize - 4; i++) {
+		if (rdesc[i] == 0x29 && rdesc [i+2] == 0x19) {
+			unsigned char tmp;
+
+			rdesc[i] = 0x19; rdesc[i+2] = 0x29;
+			tmp = rdesc[i+3];
+			rdesc[i+3] = rdesc[i+1];
+			rdesc[i+1] = tmp;
+		}
+	}
+
+	if (fixed)
+		printk(KERN_INFO "Fixing up Cypress report descriptor\n");
+}
+
+
+static void __usbhid_fixup_report_descriptor(__u32 quirks, char *rdesc, unsigned rsize)
+{
+	if ((quirks & HID_QUIRK_RDESC_CYMOTION))
+		usbhid_fixup_cymotion_descriptor(rdesc, rsize);
+
+	if (quirks & HID_QUIRK_RDESC_LOGITECH)
+		usbhid_fixup_logitech_descriptor(rdesc, rsize);
+
+	if (quirks & HID_QUIRK_RDESC_SWAPPED_MIN_MAX)
+		usbhid_fixup_cypress_descriptor(rdesc, rsize);
+
+	if (quirks & HID_QUIRK_RDESC_PETALYNX)
+		usbhid_fixup_petalynx_descriptor(rdesc, rsize);
+}
+
+/**
+ * usbhid_fixup_report_descriptor: check if report descriptor needs fixup
+ *
+ * Description:
+ *	Walks the hid_rdesc_blacklist[] array and checks whether the device
+ *	is known to have broken report descriptor that needs to be fixed up
+ *	prior to entering the HID parser
+ *
+ * Returns: nothing
+ */
+void usbhid_fixup_report_descriptor(const u16 idVendor, const u16 idProduct,
+				    char *rdesc, unsigned rsize, char **quirks_param)
+{
+	int n, m;
+	u16 paramVendor, paramProduct;
+	u32 quirks;
+
+	/* static rdesc quirk entries */
+	for (n = 0; hid_rdesc_blacklist[n].idVendor; n++)
+		if (hid_rdesc_blacklist[n].idVendor == idVendor &&
+				hid_rdesc_blacklist[n].idProduct == idProduct)
+			__usbhid_fixup_report_descriptor(hid_rdesc_blacklist[n].quirks,
+					rdesc, rsize);
+
+	/* runtime rdesc quirk entries handling */
+	for (n = 0; quirks_param[n] && n < MAX_USBHID_BOOT_QUIRKS; n++) {
+		m = sscanf(quirks_param[n], "0x%hx:0x%hx:0x%x",
+				&paramVendor, &paramProduct, &quirks);
+
+		if (m != 3)
+			printk(KERN_WARNING
+				"Could not parse HID quirk module param %s\n",
+				quirks_param[n]);
+		else if (paramVendor == idVendor && paramProduct == idProduct)
+			__usbhid_fixup_report_descriptor(quirks, rdesc, rsize);
+	}
+
+}
diff --git a/drivers/hid/usbhid/hid-tmff.c b/drivers/hid/usbhid/hid-tmff.c
index ab5ba6e..555bb48 100644
--- a/drivers/hid/usbhid/hid-tmff.c
+++ b/drivers/hid/usbhid/hid-tmff.c
@@ -70,7 +70,7 @@
 
 	tmff->rumble->value[0] = left;
 	tmff->rumble->value[1] = right;
-	dbg("(left,right)=(%08x, %08x)", left, right);
+	dbg_hid("(left,right)=(%08x, %08x)\n", left, right);
 	usbhid_submit_report(hid, tmff->report, USB_DIR_OUT);
 
 	return 0;
diff --git a/drivers/hid/usbhid/hid-zpff.c b/drivers/hid/usbhid/hid-zpff.c
index a7fbffc..5a68827 100644
--- a/drivers/hid/usbhid/hid-zpff.c
+++ b/drivers/hid/usbhid/hid-zpff.c
@@ -21,10 +21,6 @@
  */
 
 
-/* #define DEBUG */
-
-#define debug(format, arg...) pr_debug("hid-zpff: " format "\n" , ## arg)
-
 #include <linux/input.h>
 #include <linux/usb.h>
 #include <linux/hid.h>
@@ -49,14 +45,14 @@
 
 	left = effect->u.rumble.strong_magnitude;
 	right = effect->u.rumble.weak_magnitude;
-	debug("called with 0x%04x 0x%04x", left, right);
+	dbg_hid("called with 0x%04x 0x%04x\n", left, right);
 
 	left = left * 0x7f / 0xffff;
 	right = right * 0x7f / 0xffff;
 
 	zpff->report->field[2]->value[0] = left;
 	zpff->report->field[3]->value[0] = right;
-	debug("running with 0x%02x 0x%02x", left, right);
+	dbg_hid("running with 0x%02x 0x%02x\n", left, right);
 	usbhid_submit_report(hid, zpff->report, USB_DIR_OUT);
 
 	return 0;
diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
index 488d61b..e793127 100644
--- a/drivers/hid/usbhid/hiddev.c
+++ b/drivers/hid/usbhid/hiddev.c
@@ -779,7 +779,7 @@
 
 	retval = usb_register_dev(usbhid->intf, &hiddev_class);
 	if (retval) {
-		err("Not able to get a minor for this device.");
+		err_hid("Not able to get a minor for this device.");
 		kfree(hiddev);
 		return -1;
 	}
diff --git a/drivers/hid/usbhid/usbkbd.c b/drivers/hid/usbhid/usbkbd.c
index 1309787..b76b02f 100644
--- a/drivers/hid/usbhid/usbkbd.c
+++ b/drivers/hid/usbhid/usbkbd.c
@@ -125,7 +125,7 @@
 resubmit:
 	i = usb_submit_urb (urb, GFP_ATOMIC);
 	if (i)
-		err ("can't resubmit intr, %s-%s/input0, status %d",
+		err_hid ("can't resubmit intr, %s-%s/input0, status %d",
 				kbd->usbdev->bus->bus_name,
 				kbd->usbdev->devpath, i);
 }
@@ -151,7 +151,7 @@
 	*(kbd->leds) = kbd->newleds;
 	kbd->led->dev = kbd->usbdev;
 	if (usb_submit_urb(kbd->led, GFP_ATOMIC))
-		err("usb_submit_urb(leds) failed");
+		err_hid("usb_submit_urb(leds) failed");
 
 	return 0;
 }
@@ -169,7 +169,7 @@
 	*(kbd->leds) = kbd->newleds;
 	kbd->led->dev = kbd->usbdev;
 	if (usb_submit_urb(kbd->led, GFP_ATOMIC))
-		err("usb_submit_urb(leds) failed");
+		err_hid("usb_submit_urb(leds) failed");
 }
 
 static int usb_kbd_open(struct input_dev *dev)
diff --git a/drivers/ide/arm/icside.c b/drivers/ide/arm/icside.c
index 66f8262..444a0b8 100644
--- a/drivers/ide/arm/icside.c
+++ b/drivers/ide/arm/icside.c
@@ -448,23 +448,21 @@
 			ICS_ARCIN_V6_INTRSTAT_1)) & 1;
 }
 
-static int icside_dma_timeout(ide_drive_t *drive)
+static void icside_dma_timeout(ide_drive_t *drive)
 {
 	printk(KERN_ERR "%s: DMA timeout occurred: ", drive->name);
 
 	if (icside_dma_test_irq(drive))
-		return 0;
+		return;
 
-	ide_dump_status(drive, "DMA timeout",
-		HWIF(drive)->INB(IDE_STATUS_REG));
+	ide_dump_status(drive, "DMA timeout", HWIF(drive)->INB(IDE_STATUS_REG));
 
-	return icside_dma_end(drive);
+	icside_dma_end(drive);
 }
 
-static int icside_dma_lostirq(ide_drive_t *drive)
+static void icside_dma_lost_irq(ide_drive_t *drive)
 {
 	printk(KERN_ERR "%s: IRQ lost\n", drive->name);
-	return 1;
 }
 
 static void icside_dma_init(ide_hwif_t *hwif)
@@ -490,8 +488,8 @@
 	hwif->dma_start		= icside_dma_start;
 	hwif->ide_dma_end	= icside_dma_end;
 	hwif->ide_dma_test_irq	= icside_dma_test_irq;
-	hwif->ide_dma_timeout	= icside_dma_timeout;
-	hwif->ide_dma_lostirq	= icside_dma_lostirq;
+	hwif->dma_timeout	= icside_dma_timeout;
+	hwif->dma_lost_irq	= icside_dma_lost_irq;
 
 	hwif->drives[0].autodma = hwif->autodma;
 	hwif->drives[1].autodma = hwif->autodma;
diff --git a/drivers/ide/cris/ide-cris.c b/drivers/ide/cris/ide-cris.c
index ca0341c..886091b 100644
--- a/drivers/ide/cris/ide-cris.c
+++ b/drivers/ide/cris/ide-cris.c
@@ -819,7 +819,7 @@
 		hwif->dma_host_off = &cris_dma_off;
 		hwif->dma_host_on = &cris_dma_on;
 		hwif->dma_off_quietly = &cris_dma_off;
-		hwif->udma_four = 0;
+		hwif->cbl = ATA_CBL_PATA40;
 		hwif->ultra_mask = cris_ultra_mask;
 		hwif->mwdma_mask = 0x07; /* Multiword DMA 0-2 */
 		hwif->autodma = 1;
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index 252ab82..1486eb2 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -481,7 +481,7 @@
 		else
 			printk("  Unknown Error Type: ");
 
-		if (sense->sense_key < ARY_LEN(sense_key_texts))
+		if (sense->sense_key < ARRAY_SIZE(sense_key_texts))
 			s = sense_key_texts[sense->sense_key];
 
 		printk("%s -- (Sense key=0x%02x)\n", s, sense->sense_key);
@@ -491,7 +491,7 @@
 				 sense->ascq);
 			s = buf;
 		} else {
-			int lo = 0, mid, hi = ARY_LEN(sense_data_texts);
+			int lo = 0, mid, hi = ARRAY_SIZE(sense_data_texts);
 			unsigned long key = (sense->sense_key << 16);
 			key |= (sense->asc << 8);
 			if (!(sense->ascq >= 0x80 && sense->ascq <= 0xdd))
@@ -524,7 +524,7 @@
 
 		if (failed_command != NULL) {
 
-			int lo=0, mid, hi= ARY_LEN (packet_command_texts);
+			int lo=0, mid, hi= ARRAY_SIZE(packet_command_texts);
 			s = NULL;
 
 			while (hi > lo) {
diff --git a/drivers/ide/ide-cd.h b/drivers/ide/ide-cd.h
index ad1f2ed..228b29c 100644
--- a/drivers/ide/ide-cd.h
+++ b/drivers/ide/ide-cd.h
@@ -498,8 +498,6 @@
  * Descriptions of ATAPI error codes.
  */
 
-#define ARY_LEN(a) ((sizeof(a) / sizeof(a[0])))
-
 /* This stuff should be in cdrom.h, since it is now generic... */
 
 /* ATAPI sense keys (from table 140 of ATAPI 2.6) */
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
index dc2175c..b1304a7 100644
--- a/drivers/ide/ide-disk.c
+++ b/drivers/ide/ide-disk.c
@@ -1190,11 +1190,11 @@
 	return generic_ide_ioctl(drive, file, bdev, cmd, arg);
 
 read_val:
-	down(&ide_setting_sem);
+	mutex_lock(&ide_setting_mtx);
 	spin_lock_irqsave(&ide_lock, flags);
 	err = *val;
 	spin_unlock_irqrestore(&ide_lock, flags);
-	up(&ide_setting_sem);
+	mutex_unlock(&ide_setting_mtx);
 	return err >= 0 ? put_user(err, (long __user *)arg) : err;
 
 set_val:
@@ -1204,9 +1204,9 @@
 		if (!capable(CAP_SYS_ADMIN))
 			err = -EACCES;
 		else {
-			down(&ide_setting_sem);
+			mutex_lock(&ide_setting_mtx);
 			err = setfunc(drive, arg);
-			up(&ide_setting_sem);
+			mutex_unlock(&ide_setting_mtx);
 		}
 	}
 	return err;
diff --git a/drivers/ide/ide-dma.c b/drivers/ide/ide-dma.c
index ead141e..5fe1d72 100644
--- a/drivers/ide/ide-dma.c
+++ b/drivers/ide/ide-dma.c
@@ -91,45 +91,45 @@
 
 static const struct drive_list_entry drive_whitelist [] = {
 
-	{ "Micropolis 2112A"	,       "ALL"		},
-	{ "CONNER CTMA 4000"	,       "ALL"		},
-	{ "CONNER CTT8000-A"	,       "ALL"		},
-	{ "ST34342A"		,	"ALL"		},
+	{ "Micropolis 2112A"	,       NULL		},
+	{ "CONNER CTMA 4000"	,       NULL		},
+	{ "CONNER CTT8000-A"	,       NULL		},
+	{ "ST34342A"		,	NULL		},
 	{ NULL			,	NULL		}
 };
 
 static const struct drive_list_entry drive_blacklist [] = {
 
-	{ "WDC AC11000H"	,	"ALL"		},
-	{ "WDC AC22100H"	,	"ALL"		},
-	{ "WDC AC32500H"	,	"ALL"		},
-	{ "WDC AC33100H"	,	"ALL"		},
-	{ "WDC AC31600H"	,	"ALL"		},
+	{ "WDC AC11000H"	,	NULL 		},
+	{ "WDC AC22100H"	,	NULL 		},
+	{ "WDC AC32500H"	,	NULL 		},
+	{ "WDC AC33100H"	,	NULL 		},
+	{ "WDC AC31600H"	,	NULL 		},
 	{ "WDC AC32100H"	,	"24.09P07"	},
 	{ "WDC AC23200L"	,	"21.10N21"	},
-	{ "Compaq CRD-8241B"	,	"ALL"		},
-	{ "CRD-8400B"		,	"ALL"		},
-	{ "CRD-8480B",			"ALL"		},
-	{ "CRD-8482B",			"ALL"		},
- 	{ "CRD-84"		,	"ALL"		},
-	{ "SanDisk SDP3B"	,	"ALL"		},
-	{ "SanDisk SDP3B-64"	,	"ALL"		},
-	{ "SANYO CD-ROM CRD"	,	"ALL"		},
-	{ "HITACHI CDR-8"	,	"ALL"		},
-	{ "HITACHI CDR-8335"	,	"ALL"		},
-	{ "HITACHI CDR-8435"	,	"ALL"		},
-	{ "Toshiba CD-ROM XM-6202B"	,	"ALL"		},
-	{ "TOSHIBA CD-ROM XM-1702BC",	"ALL"		},
-	{ "CD-532E-A"		,	"ALL"		},
-	{ "E-IDE CD-ROM CR-840",	"ALL"		},
-	{ "CD-ROM Drive/F5A",	"ALL"		},
-	{ "WPI CDD-820",		"ALL"		},
-	{ "SAMSUNG CD-ROM SC-148C",	"ALL"		},
-	{ "SAMSUNG CD-ROM SC",	"ALL"		},
-	{ "ATAPI CD-ROM DRIVE 40X MAXIMUM",	"ALL"		},
-	{ "_NEC DV5800A",               "ALL"           },  
+	{ "Compaq CRD-8241B"	,	NULL 		},
+	{ "CRD-8400B"		,	NULL 		},
+	{ "CRD-8480B",			NULL 		},
+	{ "CRD-8482B",			NULL 		},
+	{ "CRD-84"		,	NULL 		},
+	{ "SanDisk SDP3B"	,	NULL 		},
+	{ "SanDisk SDP3B-64"	,	NULL 		},
+	{ "SANYO CD-ROM CRD"	,	NULL 		},
+	{ "HITACHI CDR-8"	,	NULL 		},
+	{ "HITACHI CDR-8335"	,	NULL 		},
+	{ "HITACHI CDR-8435"	,	NULL 		},
+	{ "Toshiba CD-ROM XM-6202B"	,	NULL 		},
+	{ "TOSHIBA CD-ROM XM-1702BC",	NULL 		},
+	{ "CD-532E-A"		,	NULL 		},
+	{ "E-IDE CD-ROM CR-840",	NULL 		},
+	{ "CD-ROM Drive/F5A",	NULL 		},
+	{ "WPI CDD-820",		NULL 		},
+	{ "SAMSUNG CD-ROM SC-148C",	NULL 		},
+	{ "SAMSUNG CD-ROM SC",	NULL 		},
+	{ "ATAPI CD-ROM DRIVE 40X MAXIMUM",	NULL 		},
+	{ "_NEC DV5800A",               NULL            },
 	{ "SAMSUNG CD-ROM SN-124",	"N001" },
-	{ "Seagate STT20000A",		"ALL" },
+	{ "Seagate STT20000A",		NULL  },
 	{ NULL			,	NULL		}
 
 };
@@ -147,8 +147,8 @@
 {
 	for ( ; drive_table->id_model ; drive_table++)
 		if ((!strcmp(drive_table->id_model, id->model)) &&
-		    ((strstr(id->fw_rev, drive_table->id_firmware)) ||
-		     (!strcmp(drive_table->id_firmware, "ALL"))))
+		    (!drive_table->id_firmware ||
+		     strstr(id->fw_rev, drive_table->id_firmware)))
 			return 1;
 	return 0;
 }
@@ -702,8 +702,22 @@
 			mask = id->dma_mword & hwif->mwdma_mask;
 		break;
 	case XFER_SW_DMA_0:
-		if (id->field_valid & 2)
+		if (id->field_valid & 2) {
 			mask = id->dma_1word & hwif->swdma_mask;
+		} else if (id->tDMA) {
+			/*
+			 * ide_fix_driveid() doesn't convert ->tDMA to the
+			 * CPU endianness so we need to do it here
+			 */
+			u8 mode = le16_to_cpu(id->tDMA);
+
+			/*
+			 * if the mode is valid convert it to the mask
+			 * (the maximum allowed mode is XFER_SW_DMA_2)
+			 */
+			if (mode <= 2)
+				mask = ((2 << mode) - 1) & hwif->swdma_mask;
+		}
 		break;
 	default:
 		BUG();
@@ -847,27 +861,27 @@
 	return rc;
 }
 
-EXPORT_SYMBOL_GPL(ide_set_dma);
-
 #ifdef CONFIG_BLK_DEV_IDEDMA_PCI
-int __ide_dma_lostirq (ide_drive_t *drive)
+void ide_dma_lost_irq (ide_drive_t *drive)
 {
 	printk("%s: DMA interrupt recovery\n", drive->name);
-	return 1;
 }
 
-EXPORT_SYMBOL(__ide_dma_lostirq);
+EXPORT_SYMBOL(ide_dma_lost_irq);
 
-int __ide_dma_timeout (ide_drive_t *drive)
+void ide_dma_timeout (ide_drive_t *drive)
 {
-	printk(KERN_ERR "%s: timeout waiting for DMA\n", drive->name);
-	if (HWIF(drive)->ide_dma_test_irq(drive))
-		return 0;
+	ide_hwif_t *hwif = HWIF(drive);
 
-	return HWIF(drive)->ide_dma_end(drive);
+	printk(KERN_ERR "%s: timeout waiting for DMA\n", drive->name);
+
+	if (hwif->ide_dma_test_irq(drive))
+		return;
+
+	hwif->ide_dma_end(drive);
 }
 
-EXPORT_SYMBOL(__ide_dma_timeout);
+EXPORT_SYMBOL(ide_dma_timeout);
 
 /*
  * Needed for allowing full modular support of ide-driver
@@ -1018,10 +1032,10 @@
 		hwif->ide_dma_end = &__ide_dma_end;
 	if (!hwif->ide_dma_test_irq)
 		hwif->ide_dma_test_irq = &__ide_dma_test_irq;
-	if (!hwif->ide_dma_timeout)
-		hwif->ide_dma_timeout = &__ide_dma_timeout;
-	if (!hwif->ide_dma_lostirq)
-		hwif->ide_dma_lostirq = &__ide_dma_lostirq;
+	if (!hwif->dma_timeout)
+		hwif->dma_timeout = &ide_dma_timeout;
+	if (!hwif->dma_lost_irq)
+		hwif->dma_lost_irq = &ide_dma_lost_irq;
 
 	if (hwif->chipset != ide_trm290) {
 		u8 dma_stat = hwif->INB(hwif->dma_status);
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index bfe8f1b..c5b5011 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -1350,7 +1350,7 @@
 						hwif->INB(IDE_STATUS_REG));
 	} else {
 		printk(KERN_WARNING "%s: DMA timeout retry\n", drive->name);
-		(void) hwif->ide_dma_timeout(drive);
+		hwif->dma_timeout(drive);
 	}
 
 	/*
@@ -1466,7 +1466,7 @@
 				startstop = handler(drive);
 			} else if (drive_is_ready(drive)) {
 				if (drive->waiting_for_dma)
-					(void) hwgroup->hwif->ide_dma_lostirq(drive);
+					hwgroup->hwif->dma_lost_irq(drive);
 				(void)ide_ack_intr(hwif);
 				printk(KERN_WARNING "%s: lost interrupt\n", drive->name);
 				startstop = handler(drive);
diff --git a/drivers/ide/ide-iops.c b/drivers/ide/ide-iops.c
index f0be5f6..92578b6 100644
--- a/drivers/ide/ide-iops.c
+++ b/drivers/ide/ide-iops.c
@@ -574,7 +574,10 @@
 	ide_hwif_t *hwif = drive->hwif;
 	struct hd_driveid *id = drive->id;
 
-	if (hwif->udma_four == 0)
+	if (hwif->cbl == ATA_CBL_PATA40_SHORT)
+		return 1;
+
+	if (hwif->cbl != ATA_CBL_PATA80)
 		goto no_80w;
 
 	/* Check for SATA but only if we are ATA5 or higher */
@@ -600,7 +603,8 @@
 
 	printk(KERN_WARNING "%s: %s side 80-wire cable detection failed, "
 			    "limiting max speed to UDMA33\n",
-			    drive->name, hwif->udma_four ? "drive" : "host");
+			    drive->name,
+			    hwif->cbl == ATA_CBL_PATA80 ? "drive" : "host");
 
 	drive->udma33_warned = 1;
 
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index f5ce22c..cc58013 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -144,7 +144,7 @@
 	local_irq_enable();
 	ide_fix_driveid(id);
 
-#if defined (CONFIG_SCSI_EATA_DMA) || defined (CONFIG_SCSI_EATA_PIO) || defined (CONFIG_SCSI_EATA)
+#if defined (CONFIG_SCSI_EATA_PIO) || defined (CONFIG_SCSI_EATA)
 	/*
 	 * EATA SCSI controllers do a hardware ATA emulation:
 	 * Ignore them if there is a driver for them available.
@@ -154,7 +154,7 @@
 		printk("%s: EATA SCSI HBA %.10s\n", drive->name, id->model);
 		goto err_misc;
 	}
-#endif /* CONFIG_SCSI_EATA_DMA || CONFIG_SCSI_EATA_PIO */
+#endif /* CONFIG_SCSI_EATA || CONFIG_SCSI_EATA_PIO */
 
 	/*
 	 *  WIN_IDENTIFY returns little-endian info,
@@ -1025,7 +1025,7 @@
 	BUG_ON(irqs_disabled());	
 	BUG_ON(hwif == NULL);
 
-	down(&ide_cfg_sem);
+	mutex_lock(&ide_cfg_mtx);
 	hwif->hwgroup = NULL;
 #if MAX_HWIFS > 1
 	/*
@@ -1154,7 +1154,7 @@
 		printk(" (%sed with %s)",
 			hwif->sharing_irq ? "shar" : "serializ", match->name);
 	printk("\n");
-	up(&ide_cfg_sem);
+	mutex_unlock(&ide_cfg_mtx);
 	return 0;
 out_unlink:
 	spin_lock_irq(&ide_lock);
@@ -1177,7 +1177,7 @@
 	}
 	spin_unlock_irq(&ide_lock);
 out_up:
-	up(&ide_cfg_sem);
+	mutex_unlock(&ide_cfg_mtx);
 	return 1;
 }
 
diff --git a/drivers/ide/ide-proc.c b/drivers/ide/ide-proc.c
index ea94c9a..fc1d8ae 100644
--- a/drivers/ide/ide-proc.c
+++ b/drivers/ide/ide-proc.c
@@ -156,7 +156,7 @@
 {
 	ide_settings_t **p = (ide_settings_t **) &drive->settings, *setting = NULL;
 
-	down(&ide_setting_sem);
+	mutex_lock(&ide_setting_mtx);
 	while ((*p) && strcmp((*p)->name, name) < 0)
 		p = &((*p)->next);
 	if ((setting = kzalloc(sizeof(*setting), GFP_KERNEL)) == NULL)
@@ -177,10 +177,10 @@
 	if (auto_remove)
 		setting->auto_remove = 1;
 	*p = setting;
-	up(&ide_setting_sem);
+	mutex_unlock(&ide_setting_mtx);
 	return 0;
 abort:
-	up(&ide_setting_sem);
+	mutex_unlock(&ide_setting_mtx);
 	kfree(setting);
 	return -1;
 }
@@ -224,7 +224,7 @@
  *
  *	Automatically remove all the driver specific settings for this
  *	drive. This function may not be called from IRQ context. The
- *	caller must hold ide_setting_sem.
+ *	caller must hold ide_setting_mtx.
  */
 
 static void auto_remove_settings (ide_drive_t *drive)
@@ -269,7 +269,7 @@
  *	@setting: drive setting
  *
  *	Read a drive setting and return the value. The caller
- *	must hold the ide_setting_sem when making this call.
+ *	must hold the ide_setting_mtx when making this call.
  *
  *	BUGS: the data return and error are the same return value
  *	so an error -EINVAL and true return of the same value cannot
@@ -306,7 +306,7 @@
  *	@val: value
  *
  *	Write a drive setting if it is possible. The caller
- *	must hold the ide_setting_sem when making this call.
+ *	must hold the ide_setting_mtx when making this call.
  *
  *	BUGS: the data return and error are the same return value
  *	so an error -EINVAL and true return of the same value cannot
@@ -367,7 +367,7 @@
  *	@drive: drive being configured
  *
  *	Add the generic parts of the system settings to the /proc files.
- *	The caller must not be holding the ide_setting_sem.
+ *	The caller must not be holding the ide_setting_mtx.
  */
 
 void ide_add_generic_settings (ide_drive_t *drive)
@@ -408,7 +408,7 @@
 
 	proc_ide_settings_warn();
 
-	down(&ide_setting_sem);
+	mutex_lock(&ide_setting_mtx);
 	out += sprintf(out, "name\t\t\tvalue\t\tmin\t\tmax\t\tmode\n");
 	out += sprintf(out, "----\t\t\t-----\t\t---\t\t---\t\t----\n");
 	while(setting) {
@@ -428,7 +428,7 @@
 		setting = setting->next;
 	}
 	len = out - page;
-	up(&ide_setting_sem);
+	mutex_unlock(&ide_setting_mtx);
 	PROC_IDE_READ_RETURN(page,start,off,count,eof,len);
 }
 
@@ -508,16 +508,16 @@
 				++p;
 			}
 
-			down(&ide_setting_sem);
+			mutex_lock(&ide_setting_mtx);
 			setting = ide_find_setting_by_name(drive, name);
 			if (!setting)
 			{
-				up(&ide_setting_sem);
+				mutex_unlock(&ide_setting_mtx);
 				goto parse_error;
 			}
 			if (for_real)
 				ide_write_setting(drive, setting, val * setting->div_factor / setting->mul_factor);
-			up(&ide_setting_sem);
+			mutex_unlock(&ide_setting_mtx);
 		}
 	} while (!for_real++);
 	free_page((unsigned long)buf);
@@ -705,7 +705,7 @@
  *	Clean up the driver specific /proc files and IDE settings
  *	for a given drive.
  *
- *	Takes ide_setting_sem and ide_lock.
+ *	Takes ide_setting_mtx and ide_lock.
  *	Caller must hold none of the locks.
  */
 
@@ -715,10 +715,10 @@
 
 	ide_remove_proc_entries(drive->proc, driver->proc);
 
-	down(&ide_setting_sem);
+	mutex_lock(&ide_setting_mtx);
 	spin_lock_irqsave(&ide_lock, flags);
 	/*
-	 * ide_setting_sem protects the settings list
+	 * ide_setting_mtx protects the settings list
 	 * ide_lock protects the use of settings
 	 *
 	 * so we need to hold both, ide_settings_sem because we want to
@@ -726,11 +726,11 @@
 	 * a setting out that is being used.
 	 *
 	 * OTOH both ide_{read,write}_setting are only ever used under
-	 * ide_setting_sem.
+	 * ide_setting_mtx.
 	 */
 	auto_remove_settings(drive);
 	spin_unlock_irqrestore(&ide_lock, flags);
-	up(&ide_setting_sem);
+	mutex_unlock(&ide_setting_mtx);
 }
 
 EXPORT_SYMBOL(ide_proc_unregister_driver);
diff --git a/drivers/ide/ide-timing.h b/drivers/ide/ide-timing.h
index c0864b1..e6cb859 100644
--- a/drivers/ide/ide-timing.h
+++ b/drivers/ide/ide-timing.h
@@ -102,66 +102,16 @@
 #define EZ(v,unit)		((v)?ENOUGH(v,unit):0)
 
 #define XFER_MODE	0xf0
-#define XFER_UDMA_133	0x48
-#define XFER_UDMA_100	0x44
-#define XFER_UDMA_66	0x42
-#define XFER_UDMA	0x40
 #define XFER_MWDMA	0x20
-#define XFER_SWDMA	0x10
 #define XFER_EPIO	0x01
 #define XFER_PIO	0x00
 
-static short ide_find_best_mode(ide_drive_t *drive, int map)
+static short ide_find_best_pio_mode(ide_drive_t *drive)
 {
 	struct hd_driveid *id = drive->id;
 	short best = 0;
 
-	if (!id)
-		return XFER_PIO_SLOW;
-
-	if ((map & XFER_UDMA) && (id->field_valid & 4)) {	/* Want UDMA and UDMA bitmap valid */
-
-		if ((map & XFER_UDMA_133) == XFER_UDMA_133)
-			if ((best = (id->dma_ultra & 0x0040) ? XFER_UDMA_6 : 0)) return best;
-
-		if ((map & XFER_UDMA_100) == XFER_UDMA_100)
-			if ((best = (id->dma_ultra & 0x0020) ? XFER_UDMA_5 : 0)) return best;
-
-		if ((map & XFER_UDMA_66) == XFER_UDMA_66)
-			if ((best = (id->dma_ultra & 0x0010) ? XFER_UDMA_4 :
-                	    	    (id->dma_ultra & 0x0008) ? XFER_UDMA_3 : 0)) return best;
-
-                if ((best = (id->dma_ultra & 0x0004) ? XFER_UDMA_2 :
-                	    (id->dma_ultra & 0x0002) ? XFER_UDMA_1 :
-                	    (id->dma_ultra & 0x0001) ? XFER_UDMA_0 : 0)) return best;
-	}
-
-	if ((map & XFER_MWDMA) && (id->field_valid & 2)) {	/* Want MWDMA and drive has EIDE fields */
-
-		if ((best = (id->dma_mword & 0x0004) ? XFER_MW_DMA_2 :
-                	    (id->dma_mword & 0x0002) ? XFER_MW_DMA_1 :
-                	    (id->dma_mword & 0x0001) ? XFER_MW_DMA_0 : 0)) return best;
-	}
-
-	if (map & XFER_SWDMA) {					/* Want SWDMA */
-
- 		if (id->field_valid & 2) {			/* EIDE SWDMA */
-
-			if ((best = (id->dma_1word & 0x0004) ? XFER_SW_DMA_2 :
-      				    (id->dma_1word & 0x0002) ? XFER_SW_DMA_1 :
-				    (id->dma_1word & 0x0001) ? XFER_SW_DMA_0 : 0)) return best;
-		}
-
-		if (id->capability & 1) {			/* Pre-EIDE style SWDMA */
-
-			if ((best = (id->tDMA == 2) ? XFER_SW_DMA_2 :
-				    (id->tDMA == 1) ? XFER_SW_DMA_1 :
-				    (id->tDMA == 0) ? XFER_SW_DMA_0 : 0)) return best;
-		}
-	}
-
-
-	if ((map & XFER_EPIO) && (id->field_valid & 2)) {	/* EIDE PIO modes */
+	if (id->field_valid & 2) {	/* EIDE PIO modes */
 
 		if ((best = (drive->id->eide_pio_modes & 4) ? XFER_PIO_5 :
 			    (drive->id->eide_pio_modes & 2) ? XFER_PIO_4 :
@@ -262,7 +212,7 @@
  */
 
 	if ((speed & XFER_MODE) != XFER_PIO) {
-		ide_timing_compute(drive, ide_find_best_mode(drive, XFER_PIO | XFER_EPIO), &p, T, UT);
+		ide_timing_compute(drive, ide_find_best_pio_mode(drive), &p, T, UT);
 		ide_timing_merge(&p, t, t, IDE_TIMING_ALL);
 	}
 
diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c
index 0cd76bf..c948a5c 100644
--- a/drivers/ide/ide.c
+++ b/drivers/ide/ide.c
@@ -169,7 +169,7 @@
 static int idebus_parameter;	/* holds the "idebus=" parameter */
 static int system_bus_speed;	/* holds what we think is VESA/PCI bus speed */
 
-DECLARE_MUTEX(ide_cfg_sem);
+DEFINE_MUTEX(ide_cfg_mtx);
  __cacheline_aligned_in_smp DEFINE_SPINLOCK(ide_lock);
 
 #ifdef CONFIG_IDEPCI_PCIBUS_ORDER
@@ -460,6 +460,8 @@
 	hwif->mwdma_mask		= tmp_hwif->mwdma_mask;
 	hwif->swdma_mask		= tmp_hwif->swdma_mask;
 
+	hwif->cbl			= tmp_hwif->cbl;
+
 	hwif->chipset			= tmp_hwif->chipset;
 	hwif->hold			= tmp_hwif->hold;
 
@@ -496,8 +498,8 @@
 	hwif->ide_dma_clear_irq		= tmp_hwif->ide_dma_clear_irq;
 	hwif->dma_host_on		= tmp_hwif->dma_host_on;
 	hwif->dma_host_off		= tmp_hwif->dma_host_off;
-	hwif->ide_dma_lostirq		= tmp_hwif->ide_dma_lostirq;
-	hwif->ide_dma_timeout		= tmp_hwif->ide_dma_timeout;
+	hwif->dma_lost_irq		= tmp_hwif->dma_lost_irq;
+	hwif->dma_timeout		= tmp_hwif->dma_timeout;
 
 	hwif->OUTB			= tmp_hwif->OUTB;
 	hwif->OUTBSYNC			= tmp_hwif->OUTBSYNC;
@@ -533,7 +535,6 @@
 	hwif->extra_base		= tmp_hwif->extra_base;
 	hwif->extra_ports		= tmp_hwif->extra_ports;
 	hwif->autodma			= tmp_hwif->autodma;
-	hwif->udma_four			= tmp_hwif->udma_four;
 
 	hwif->hwif_data			= tmp_hwif->hwif_data;
 }
@@ -564,7 +565,7 @@
 {
 	ide_drive_t *drive;
 	ide_hwif_t *hwif, *g;
-	static ide_hwif_t tmp_hwif; /* protected by ide_cfg_sem */
+	static ide_hwif_t tmp_hwif; /* protected by ide_cfg_mtx */
 	ide_hwgroup_t *hwgroup;
 	int irq_count = 0, unit;
 
@@ -572,7 +573,7 @@
 
 	BUG_ON(in_interrupt());
 	BUG_ON(irqs_disabled());
-	down(&ide_cfg_sem);
+	mutex_lock(&ide_cfg_mtx);
 	spin_lock_irq(&ide_lock);
 	hwif = &ide_hwifs[index];
 	if (!hwif->present)
@@ -679,7 +680,7 @@
 
 abort:
 	spin_unlock_irq(&ide_lock);
-	up(&ide_cfg_sem);
+	mutex_unlock(&ide_cfg_mtx);
 }
 
 EXPORT_SYMBOL(ide_unregister);
@@ -817,9 +818,9 @@
  *	Locks for IDE setting functionality
  */
 
-DECLARE_MUTEX(ide_setting_sem);
+DEFINE_MUTEX(ide_setting_mtx);
 
-EXPORT_SYMBOL_GPL(ide_setting_sem);
+EXPORT_SYMBOL_GPL(ide_setting_mtx);
 
 /**
  *	ide_spin_wait_hwgroup	-	wait for group
@@ -1192,11 +1193,11 @@
 	}
 
 read_val:
-	down(&ide_setting_sem);
+	mutex_lock(&ide_setting_mtx);
 	spin_lock_irqsave(&ide_lock, flags);
 	err = *val;
 	spin_unlock_irqrestore(&ide_lock, flags);
-	up(&ide_setting_sem);
+	mutex_unlock(&ide_setting_mtx);
 	return err >= 0 ? put_user(err, (long __user *)arg) : err;
 
 set_val:
@@ -1206,9 +1207,9 @@
 		if (!capable(CAP_SYS_ADMIN))
 			err = -EACCES;
 		else {
-			down(&ide_setting_sem);
+			mutex_lock(&ide_setting_mtx);
 			err = setfunc(drive, arg);
-			up(&ide_setting_sem);
+			mutex_unlock(&ide_setting_mtx);
 		}
 	}
 	return err;
@@ -1548,7 +1549,11 @@
 				goto bad_option;
 			case -7: /* ata66 */
 #ifdef CONFIG_BLK_DEV_IDEPCI
-				hwif->udma_four = 1;
+				/*
+				 * Use ATA_CBL_PATA40_SHORT so drive side
+				 * cable detection is also overriden.
+				 */
+				hwif->cbl = ATA_CBL_PATA40_SHORT;
 				goto obsolete_option;
 #else
 				goto bad_hwif;
diff --git a/drivers/ide/legacy/hd.c b/drivers/ide/legacy/hd.c
index 45ed035..661c12f 100644
--- a/drivers/ide/legacy/hd.c
+++ b/drivers/ide/legacy/hd.c
@@ -130,7 +130,7 @@
 	
 #ifdef HD_TYPE
 static struct hd_i_struct hd_info[] = { HD_TYPE };
-static int NR_HD = ((sizeof (hd_info))/(sizeof (struct hd_i_struct)));
+static int NR_HD = ARRAY_SIZE(hd_info);
 #else
 static struct hd_i_struct hd_info[MAX_HD];
 static int NR_HD;
diff --git a/drivers/ide/legacy/macide.c b/drivers/ide/legacy/macide.c
index c211fc7..b557c45 100644
--- a/drivers/ide/legacy/macide.c
+++ b/drivers/ide/legacy/macide.c
@@ -77,15 +77,6 @@
 	return 0;
 }
 
-#ifdef CONFIG_BLK_DEV_MAC_MEDIABAY
-static void macide_mediabay_interrupt(int irq, void *dev_id)
-{
-	int state = baboon->mb_status & 0x04;
-
-	printk(KERN_INFO "macide: media bay %s detected\n", state? "removal":"insertion");
-}
-#endif
-
 /*
  * Probe for a Macintosh IDE interface
  */
@@ -128,11 +119,6 @@
 			ide_drive_t *drive = &ide_hwifs[index].drives[0];
 			drive->capacity64 = drive->cyl*drive->head*drive->sect;
 
-#ifdef CONFIG_BLK_DEV_MAC_MEDIABAY
-			request_irq(IRQ_BABOON_2, macide_mediabay_interrupt,
-					IRQ_FLG_FAST, "mediabay",
-					macide_mediabay_interrupt);
-#endif
 		}
 		break;
 
diff --git a/drivers/ide/mips/au1xxx-ide.c b/drivers/ide/mips/au1xxx-ide.c
index ca95e99..2e7013a 100644
--- a/drivers/ide/mips/au1xxx-ide.c
+++ b/drivers/ide/mips/au1xxx-ide.c
@@ -381,9 +381,7 @@
 
 static int auide_dma_check(ide_drive_t *drive)
 {
-	u8 speed;
-
-#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
+	u8 speed = ide_max_dma_mode(drive);
 
 	if( dbdma_init_done == 0 ){
 		auide_hwif.white_list = ide_in_drive_list(drive->id,
@@ -394,7 +392,6 @@
 		auide_ddma_init(&auide_hwif);
 		dbdma_init_done = 1;
 	}
-#endif
 
 	/* Is the drive in our DMA black list? */
 
@@ -409,8 +406,6 @@
 	else
 		drive->using_dma = 1;
 
-	speed = ide_find_best_mode(drive, XFER_PIO | XFER_MWDMA);
-	
 	if (drive->autodma && (speed & XFER_MODE) != XFER_PIO)
 		return 0;
 
@@ -456,10 +451,9 @@
 	drive->using_dma = 0;
 }
 
-static int auide_dma_lostirq(ide_drive_t *drive)
+static void auide_dma_lost_irq(ide_drive_t *drive)
 {
 	printk(KERN_ERR "%s: IRQ lost\n", drive->name);
-	return 0;
 }
 
 static void auide_ddma_tx_callback(int irq, void *param)
@@ -489,16 +483,16 @@
   
 #if defined(CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA)
 
-static int auide_dma_timeout(ide_drive_t *drive)
+static void auide_dma_timeout(ide_drive_t *drive)
 {
-//      printk("%s\n", __FUNCTION__);
+	ide_hwif_t *hwif = HWIF(drive);
 
 	printk(KERN_ERR "%s: DMA timeout occurred: ", drive->name);
 
-	if (HWIF(drive)->ide_dma_test_irq(drive))
-		return 0;
+	if (hwif->ide_dma_test_irq(drive))
+		return;
 
-	return HWIF(drive)->ide_dma_end(drive);
+	hwif->ide_dma_end(drive);
 }
 					
 
@@ -721,7 +715,7 @@
 
 #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
 	hwif->dma_off_quietly		= &auide_dma_off_quietly;
-	hwif->ide_dma_timeout           = &auide_dma_timeout;
+	hwif->dma_timeout		= &auide_dma_timeout;
 
 	hwif->ide_dma_check             = &auide_dma_check;
 	hwif->dma_exec_cmd              = &auide_dma_exec_cmd;
@@ -731,7 +725,7 @@
 	hwif->ide_dma_test_irq          = &auide_dma_test_irq;
 	hwif->dma_host_off		= &auide_dma_host_off;
 	hwif->dma_host_on		= &auide_dma_host_on;
-	hwif->ide_dma_lostirq           = &auide_dma_lostirq;
+	hwif->dma_lost_irq		= &auide_dma_lost_irq;
 	hwif->ide_dma_on                = &auide_dma_on;
 
 	hwif->autodma                   = 1;
diff --git a/drivers/ide/pci/aec62xx.c b/drivers/ide/pci/aec62xx.c
index b173bc6..e5d0936 100644
--- a/drivers/ide/pci/aec62xx.c
+++ b/drivers/ide/pci/aec62xx.c
@@ -1,5 +1,5 @@
 /*
- * linux/drivers/ide/pci/aec62xx.c		Version 0.21	Apr 21, 2007
+ * linux/drivers/ide/pci/aec62xx.c		Version 0.24	May 24, 2007
  *
  * Copyright (C) 1999-2002	Andre Hedrick <andre@linux-ide.org>
  * Copyright (C) 2007		MontaVista Software, Inc. <source@mvista.com>
@@ -140,25 +140,10 @@
 	return(ide_config_drive_speed(drive, speed));
 }
 
-static int aec62xx_tune_chipset (ide_drive_t *drive, u8 speed)
-{
-	switch (HWIF(drive)->pci_dev->device) {
-		case PCI_DEVICE_ID_ARTOP_ATP865:
-		case PCI_DEVICE_ID_ARTOP_ATP865R:
-		case PCI_DEVICE_ID_ARTOP_ATP860:
-		case PCI_DEVICE_ID_ARTOP_ATP860R:
-			return ((int) aec6260_tune_chipset(drive, speed));
-		case PCI_DEVICE_ID_ARTOP_ATP850UF:
-			return ((int) aec6210_tune_chipset(drive, speed));
-		default:
-			return -1;
-	}
-}
-
 static void aec62xx_tune_drive (ide_drive_t *drive, u8 pio)
 {
 	pio = ide_get_best_pio_mode(drive, pio, 4, NULL);
-	(void) aec62xx_tune_chipset(drive, pio + XFER_PIO_0);
+	(void) HWIF(drive)->speedproc(drive, pio + XFER_PIO_0);
 }
 
 static int aec62xx_config_drive_xfer_rate (ide_drive_t *drive)
@@ -172,12 +157,9 @@
 	return -1;
 }
 
-static int aec62xx_irq_timeout (ide_drive_t *drive)
+static void aec62xx_dma_lost_irq (ide_drive_t *drive)
 {
-	ide_hwif_t *hwif	= HWIF(drive);
-	struct pci_dev *dev	= hwif->pci_dev;
-
-	switch(dev->device) {
+	switch (HWIF(drive)->pci_dev->device) {
 		case PCI_DEVICE_ID_ARTOP_ATP860:
 		case PCI_DEVICE_ID_ARTOP_ATP860R:
 		case PCI_DEVICE_ID_ARTOP_ATP865:
@@ -186,7 +168,6 @@
 		default:
 			break;
 	}
-	return 0;
 }
 
 static unsigned int __devinit init_chipset_aec62xx(struct pci_dev *dev, const char *name)
@@ -224,64 +205,46 @@
 
 static void __devinit init_hwif_aec62xx(ide_hwif_t *hwif)
 {
-	struct pci_dev *dev = hwif->pci_dev;
+	struct pci_dev *dev	= hwif->pci_dev;
+	u8 reg54 = 0,  mask	= hwif->channel ? 0xf0 : 0x0f;
+	unsigned long flags;
 
-	hwif->autodma = 0;
 	hwif->tuneproc = &aec62xx_tune_drive;
-	hwif->speedproc = &aec62xx_tune_chipset;
 
-	if (dev->device == PCI_DEVICE_ID_ARTOP_ATP850UF)
-		hwif->serialized = hwif->channel;
-
-	if (hwif->mate)
-		hwif->mate->serialized = hwif->serialized;
+	if (dev->device == PCI_DEVICE_ID_ARTOP_ATP850UF) {
+		if(hwif->mate)
+			hwif->mate->serialized = hwif->serialized = 1;
+		hwif->speedproc = &aec6210_tune_chipset;
+	} else
+		hwif->speedproc = &aec6260_tune_chipset;
 
 	if (!hwif->dma_base) {
-		hwif->drives[0].autotune = 1;
-		hwif->drives[1].autotune = 1;
+		hwif->drives[0].autotune = hwif->drives[1].autotune = 1;
 		return;
 	}
 
 	hwif->ultra_mask = hwif->cds->udma_mask;
-
-	/* atp865 and atp865r */
-	if (hwif->ultra_mask == 0x3f) {
-		/* check bit 0x10 of DMA status register */
-		if (inb(pci_resource_start(dev, 4) + 2) & 0x10)
- 			hwif->ultra_mask = 0x7f; /* udma0-6 */
-	}
-
 	hwif->mwdma_mask = 0x07;
 
 	hwif->ide_dma_check	= &aec62xx_config_drive_xfer_rate;
-	hwif->ide_dma_lostirq	= &aec62xx_irq_timeout;
+	hwif->dma_lost_irq	= &aec62xx_dma_lost_irq;
+
+	if (dev->device == PCI_DEVICE_ID_ARTOP_ATP850UF) {
+		spin_lock_irqsave(&ide_lock, flags);
+		pci_read_config_byte (dev, 0x54, &reg54);
+		pci_write_config_byte(dev, 0x54, (reg54 & ~mask));
+		spin_unlock_irqrestore(&ide_lock, flags);
+	} else if (hwif->cbl != ATA_CBL_PATA40_SHORT) {
+		u8 ata66 = 0, mask = hwif->channel ? 0x02 : 0x01;
+
+		pci_read_config_byte(hwif->pci_dev, 0x49, &ata66);
+
+		hwif->cbl = (ata66 & mask) ? ATA_CBL_PATA40 : ATA_CBL_PATA80;
+	}
 
 	if (!noautodma)
 		hwif->autodma = 1;
-	hwif->drives[0].autodma = hwif->autodma;
-	hwif->drives[1].autodma = hwif->autodma;
-}
-
-static void __devinit init_dma_aec62xx(ide_hwif_t *hwif, unsigned long dmabase)
-{
-	struct pci_dev *dev	= hwif->pci_dev;
-
-	if (dev->device == PCI_DEVICE_ID_ARTOP_ATP850UF) {
-		u8 reg54h = 0;
-		unsigned long flags;
-
-		spin_lock_irqsave(&ide_lock, flags);
-		pci_read_config_byte(dev, 0x54, &reg54h);
-		pci_write_config_byte(dev, 0x54, reg54h & ~(hwif->channel ? 0xF0 : 0x0F));
-		spin_unlock_irqrestore(&ide_lock, flags);
-	} else {
-		u8 ata66	= 0;
-		pci_read_config_byte(hwif->pci_dev, 0x49, &ata66);
-	        if (!(hwif->udma_four))
-			hwif->udma_four = (ata66&(hwif->channel?0x02:0x01))?0:1;
-	}
-
-	ide_setup_dma(hwif, dmabase, 8);
+	hwif->drives[0].autodma = hwif->drives[1].autodma = hwif->autodma;
 }
 
 static int __devinit init_setup_aec62xx(struct pci_dev *dev, ide_pci_device_t *d)
@@ -291,16 +254,12 @@
 
 static int __devinit init_setup_aec6x80(struct pci_dev *dev, ide_pci_device_t *d)
 {
-	unsigned long bar4reg = pci_resource_start(dev, 4);
+	unsigned long dma_base = pci_resource_start(dev, 4);
 
-	if (inb(bar4reg+2) & 0x10) {
-		strcpy(d->name, "AEC6880");
-		if (dev->device == PCI_DEVICE_ID_ARTOP_ATP865R)
-			strcpy(d->name, "AEC6880R");
-	} else {
-		strcpy(d->name, "AEC6280");
-		if (dev->device == PCI_DEVICE_ID_ARTOP_ATP865R)
-			strcpy(d->name, "AEC6280R");
+	if (inb(dma_base + 2) & 0x10) {
+		d->name = (dev->device == PCI_DEVICE_ID_ARTOP_ATP865R) ?
+			  "AEC6880R" : "AEC6880";
+		d->udma_mask = 0x7f; /* udma0-6 */
 	}
 
 	return ide_setup_pci_device(dev, d);
@@ -312,7 +271,6 @@
 		.init_setup	= init_setup_aec62xx,
 		.init_chipset	= init_chipset_aec62xx,
 		.init_hwif	= init_hwif_aec62xx,
-		.init_dma	= init_dma_aec62xx,
 		.channels	= 2,
 		.autodma	= AUTODMA,
 		.enablebits	= {{0x4a,0x02,0x02}, {0x4a,0x04,0x04}},
@@ -323,7 +281,6 @@
 		.init_setup	= init_setup_aec62xx,
 		.init_chipset	= init_chipset_aec62xx,
 		.init_hwif	= init_hwif_aec62xx,
-		.init_dma	= init_dma_aec62xx,
 		.channels	= 2,
 		.autodma	= NOAUTODMA,
 		.bootable	= OFF_BOARD,
@@ -333,28 +290,25 @@
 		.init_setup	= init_setup_aec62xx,
 		.init_chipset	= init_chipset_aec62xx,
 		.init_hwif	= init_hwif_aec62xx,
-		.init_dma	= init_dma_aec62xx,
 		.channels	= 2,
 		.autodma	= AUTODMA,
 		.enablebits	= {{0x4a,0x02,0x02}, {0x4a,0x04,0x04}},
 		.bootable	= NEVER_BOARD,
 		.udma_mask	= 0x1f, /* udma0-4 */
 	},{	/* 3 */
-		.name		= "AEC6X80",
+		.name		= "AEC6280",
 		.init_setup	= init_setup_aec6x80,
 		.init_chipset	= init_chipset_aec62xx,
 		.init_hwif	= init_hwif_aec62xx,
-		.init_dma	= init_dma_aec62xx,
 		.channels	= 2,
 		.autodma	= AUTODMA,
 		.bootable	= OFF_BOARD,
 		.udma_mask	= 0x3f, /* udma0-5 */
 	},{	/* 4 */
-		.name		= "AEC6X80R",
+		.name		= "AEC6280R",
 		.init_setup	= init_setup_aec6x80,
 		.init_chipset	= init_chipset_aec62xx,
 		.init_hwif	= init_hwif_aec62xx,
-		.init_dma	= init_dma_aec62xx,
 		.channels	= 2,
 		.autodma	= AUTODMA,
 		.enablebits	= {{0x4a,0x02,0x02}, {0x4a,0x04,0x04}},
@@ -370,13 +324,16 @@
  *
  *	Called when the PCI registration layer (or the IDE initialization)
  *	finds a device matching our IDE device tables.
+ *
+ *	NOTE: since we're going to modify the 'name' field for AEC-6[26]80[R]
+ *	chips, pass a local copy of 'struct pci_device_id' down the call chain.
  */
  
 static int __devinit aec62xx_init_one(struct pci_dev *dev, const struct pci_device_id *id)
 {
-	ide_pci_device_t *d = &aec62xx_chipsets[id->driver_data];
+	ide_pci_device_t d = aec62xx_chipsets[id->driver_data];
 
-	return d->init_setup(dev, d);
+	return d.init_setup(dev, &d);
 }
 
 static struct pci_device_id aec62xx_pci_tbl[] = {
diff --git a/drivers/ide/pci/alim15x3.c b/drivers/ide/pci/alim15x3.c
index 27525ec..8a6b27b 100644
--- a/drivers/ide/pci/alim15x3.c
+++ b/drivers/ide/pci/alim15x3.c
@@ -1,5 +1,5 @@
 /*
- * linux/drivers/ide/pci/alim15x3.c		Version 0.21	2007/02/03
+ * linux/drivers/ide/pci/alim15x3.c		Version 0.25	Jun 9 2007
  *
  *  Copyright (C) 1998-2000 Michel Aubry, Maintainer
  *  Copyright (C) 1998-2000 Andrzej Krzysztofowicz, Maintainer
@@ -10,6 +10,7 @@
  *  Copyright (C) 2002 Alan Cox <alan@redhat.com>
  *  ALi (now ULi M5228) support by Clear Zhang <Clear.Zhang@ali.com.tw>
  *  Copyright (C) 2007 MontaVista Software, Inc. <source@mvista.com>
+ *  Copyright (C) 2007 Bartlomiej Zolnierkiewicz <bzolnier@gmail.com>
  *
  *  (U)DMA capable version of ali 1533/1543(C), 1535(D)
  *
@@ -36,6 +37,7 @@
 #include <linux/hdreg.h>
 #include <linux/ide.h>
 #include <linux/init.h>
+#include <linux/dmi.h>
 
 #include <asm/io.h>
 
@@ -583,6 +585,35 @@
 	return 0;
 }
 
+/*
+ *	Cable special cases
+ */
+
+static struct dmi_system_id cable_dmi_table[] = {
+	{
+		.ident = "HP Pavilion N5430",
+		.matches = {
+			DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
+			DMI_MATCH(DMI_BOARD_NAME, "OmniBook N32N-736"),
+		},
+	},
+	{ }
+};
+
+static int ali_cable_override(struct pci_dev *pdev)
+{
+	/* Fujitsu P2000 */
+	if (pdev->subsystem_vendor == 0x10CF &&
+	    pdev->subsystem_device == 0x10AF)
+		return 1;
+
+	/* Systems by DMI */
+	if (dmi_check_system(cable_dmi_table))
+		return 1;
+
+	return 0;
+}
+
 /**
  *	ata66_ali15x3	-	check for UDMA 66 support
  *	@hwif: IDE interface
@@ -594,37 +625,31 @@
  *	FIXME: frobs bits that are not defined on newer ALi devicea
  */
 
-static unsigned int __devinit ata66_ali15x3 (ide_hwif_t *hwif)
+static u8 __devinit ata66_ali15x3(ide_hwif_t *hwif)
 {
 	struct pci_dev *dev	= hwif->pci_dev;
-	unsigned int ata66	= 0;
-	u8 cable_80_pin[2]	= { 0, 0 };
-
 	unsigned long flags;
-	u8 tmpbyte;
+	u8 cbl = ATA_CBL_PATA40, tmpbyte;
 
 	local_irq_save(flags);
 
 	if (m5229_revision >= 0xC2) {
 		/*
-		 * Ultra66 cable detection (from Host View)
-		 * m5229, 0x4a, bit0: primary, bit1: secondary 80 pin
+		 * m5229 80-pin cable detection (from Host View)
+		 *
+		 * 0x4a bit0 is 0 => primary channel has 80-pin
+		 * 0x4a bit1 is 0 => secondary channel has 80-pin
+		 *
+		 * Certain laptops use short but suitable cables
+		 * and don't implement the detect logic.
 		 */
-		pci_read_config_byte(dev, 0x4a, &tmpbyte);
-		/*
-		 * 0x4a, bit0 is 0 => primary channel
-		 * has 80-pin (from host view)
-		 */
-		if (!(tmpbyte & 0x01)) cable_80_pin[0] = 1;
-		/*
-		 * 0x4a, bit1 is 0 => secondary channel
-		 * has 80-pin (from host view)
-		 */
-		if (!(tmpbyte & 0x02)) cable_80_pin[1] = 1;
-		/*
-		 * Allow ata66 if cable of current channel has 80 pins
-		 */
-		ata66 = (hwif->channel)?cable_80_pin[1]:cable_80_pin[0];
+		if (ali_cable_override(dev))
+			cbl = ATA_CBL_PATA40_SHORT;
+		else {
+			pci_read_config_byte(dev, 0x4a, &tmpbyte);
+			if ((tmpbyte & (1 << hwif->channel)) == 0)
+				cbl = ATA_CBL_PATA80;
+		}
 	} else {
 		/*
 		 * check m1533, 0x5e, bit 1~4 == 1001 => & 00011110 = 00010010
@@ -657,7 +682,7 @@
 
 	local_irq_restore(flags);
 
-	return(ata66);
+	return cbl;
 }
 
 /**
@@ -708,8 +733,9 @@
 		hwif->dma_setup = &ali15x3_dma_setup;
 		if (!noautodma)
 			hwif->autodma = 1;
-		if (!(hwif->udma_four))
-			hwif->udma_four = ata66_ali15x3(hwif);
+
+		if (hwif->cbl != ATA_CBL_PATA40_SHORT)
+			hwif->cbl = ata66_ali15x3(hwif);
 	}
 	hwif->drives[0].autodma = hwif->autodma;
 	hwif->drives[1].autodma = hwif->autodma;
diff --git a/drivers/ide/pci/amd74xx.c b/drivers/ide/pci/amd74xx.c
index a2be65fc..84ed30c 100644
--- a/drivers/ide/pci/amd74xx.c
+++ b/drivers/ide/pci/amd74xx.c
@@ -1,10 +1,11 @@
 /*
- * Version 2.16
+ * Version 2.20
  *
  * AMD 755/756/766/8111 and nVidia nForce/2/2s/3/3s/CK804/MCP04
  * IDE driver for Linux.
  *
  * Copyright (c) 2000-2002 Vojtech Pavlik
+ * Copyright (c) 2007 Bartlomiej Zolnierkiewicz
  *
  * Based on the work of:
  *      Andre Hedrick
@@ -37,11 +38,6 @@
 #define AMD_ADDRESS_SETUP	(0x0c + amd_config->base)
 #define AMD_UDMA_TIMING		(0x10 + amd_config->base)
 
-#define AMD_UDMA		0x07
-#define AMD_UDMA_33		0x01
-#define AMD_UDMA_66		0x02
-#define AMD_UDMA_100		0x03
-#define AMD_UDMA_133		0x04
 #define AMD_CHECK_SWDMA		0x08
 #define AMD_BAD_SWDMA		0x10
 #define AMD_BAD_FIFO		0x20
@@ -53,32 +49,33 @@
 
 static struct amd_ide_chip {
 	unsigned short id;
-	unsigned long base;
-	unsigned char flags;
+	u8 base;
+	u8 udma_mask;
+	u8 flags;
 } amd_ide_chips[] = {
-	{ PCI_DEVICE_ID_AMD_COBRA_7401,		0x40, AMD_UDMA_33 | AMD_BAD_SWDMA },
-	{ PCI_DEVICE_ID_AMD_VIPER_7409,		0x40, AMD_UDMA_66 | AMD_CHECK_SWDMA },
-	{ PCI_DEVICE_ID_AMD_VIPER_7411,		0x40, AMD_UDMA_100 | AMD_BAD_FIFO },
-	{ PCI_DEVICE_ID_AMD_OPUS_7441,		0x40, AMD_UDMA_100 },
-	{ PCI_DEVICE_ID_AMD_8111_IDE,		0x40, AMD_UDMA_133 | AMD_CHECK_SERENADE },
-	{ PCI_DEVICE_ID_NVIDIA_NFORCE_IDE,	0x50, AMD_UDMA_100 },
-	{ PCI_DEVICE_ID_NVIDIA_NFORCE2_IDE,	0x50, AMD_UDMA_133 },
-	{ PCI_DEVICE_ID_NVIDIA_NFORCE2S_IDE,	0x50, AMD_UDMA_133 },
-	{ PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA,	0x50, AMD_UDMA_133 },
-	{ PCI_DEVICE_ID_NVIDIA_NFORCE3_IDE,	0x50, AMD_UDMA_133 },
-	{ PCI_DEVICE_ID_NVIDIA_NFORCE3S_IDE,	0x50, AMD_UDMA_133 },
-	{ PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA,	0x50, AMD_UDMA_133 },
-	{ PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2,	0x50, AMD_UDMA_133 },
-	{ PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_IDE,	0x50, AMD_UDMA_133 },
-	{ PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_IDE,	0x50, AMD_UDMA_133 },
-	{ PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_IDE,	0x50, AMD_UDMA_133 },
-	{ PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_IDE,	0x50, AMD_UDMA_133 },
-	{ PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_IDE,	0x50, AMD_UDMA_133 },
-	{ PCI_DEVICE_ID_NVIDIA_NFORCE_MCP65_IDE,	0x50, AMD_UDMA_133 },
-	{ PCI_DEVICE_ID_NVIDIA_NFORCE_MCP67_IDE,	0x50, AMD_UDMA_133 },
-	{ PCI_DEVICE_ID_NVIDIA_NFORCE_MCP73_IDE,	0x50, AMD_UDMA_133 },
-	{ PCI_DEVICE_ID_NVIDIA_NFORCE_MCP77_IDE,	0x50, AMD_UDMA_133 },
-	{ PCI_DEVICE_ID_AMD_CS5536_IDE,			0x40, AMD_UDMA_100 },
+	{ PCI_DEVICE_ID_AMD_COBRA_7401,		 0x40, ATA_UDMA2, AMD_BAD_SWDMA },
+	{ PCI_DEVICE_ID_AMD_VIPER_7409,		 0x40, ATA_UDMA4, AMD_CHECK_SWDMA },
+	{ PCI_DEVICE_ID_AMD_VIPER_7411,		 0x40, ATA_UDMA5, AMD_BAD_FIFO },
+	{ PCI_DEVICE_ID_AMD_OPUS_7441,		 0x40, ATA_UDMA5, },
+	{ PCI_DEVICE_ID_AMD_8111_IDE,		 0x40, ATA_UDMA6, AMD_CHECK_SERENADE },
+	{ PCI_DEVICE_ID_NVIDIA_NFORCE_IDE,	 0x50, ATA_UDMA5, },
+	{ PCI_DEVICE_ID_NVIDIA_NFORCE2_IDE,	 0x50, ATA_UDMA6, },
+	{ PCI_DEVICE_ID_NVIDIA_NFORCE2S_IDE,	 0x50, ATA_UDMA6, },
+	{ PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA,	 0x50, ATA_UDMA6, },
+	{ PCI_DEVICE_ID_NVIDIA_NFORCE3_IDE,	 0x50, ATA_UDMA6, },
+	{ PCI_DEVICE_ID_NVIDIA_NFORCE3S_IDE,	 0x50, ATA_UDMA6, },
+	{ PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA,	 0x50, ATA_UDMA6, },
+	{ PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2,	 0x50, ATA_UDMA6, },
+	{ PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_IDE, 0x50, ATA_UDMA6, },
+	{ PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_IDE, 0x50, ATA_UDMA6, },
+	{ PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_IDE, 0x50, ATA_UDMA6, },
+	{ PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_IDE, 0x50, ATA_UDMA6, },
+	{ PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_IDE, 0x50, ATA_UDMA6, },
+	{ PCI_DEVICE_ID_NVIDIA_NFORCE_MCP65_IDE, 0x50, ATA_UDMA6, },
+	{ PCI_DEVICE_ID_NVIDIA_NFORCE_MCP67_IDE, 0x50, ATA_UDMA6, },
+	{ PCI_DEVICE_ID_NVIDIA_NFORCE_MCP73_IDE, 0x50, ATA_UDMA6, },
+	{ PCI_DEVICE_ID_NVIDIA_NFORCE_MCP77_IDE, 0x50, ATA_UDMA6, },
+	{ PCI_DEVICE_ID_AMD_CS5536_IDE,		 0x40, ATA_UDMA5, },
 	{ 0 }
 };
 
@@ -87,7 +84,7 @@
 static unsigned int amd_80w;
 static unsigned int amd_clock;
 
-static char *amd_dma[] = { "MWDMA16", "UDMA33", "UDMA66", "UDMA100", "UDMA133" };
+static char *amd_dma[] = { "16", "25", "33", "44", "66", "100", "133" };
 static unsigned char amd_cyc2udma[] = { 6, 6, 5, 4, 0, 1, 1, 2, 2, 3, 3, 3, 3, 3, 3, 7 };
 
 /*
@@ -128,7 +125,7 @@
 
 	pci_read_config_byte(dev, PCI_REVISION_ID, &t);
 	amd_print("Revision:                           IDE %#x", t);
-	amd_print("Highest DMA rate:                   %s", amd_dma[amd_config->flags & AMD_UDMA]);
+	amd_print("Highest DMA rate:                   UDMA%s", amd_dma[fls(amd_config->udma_mask) - 1]);
 
 	amd_print("BM-DMA base:                        %#lx", amd_base);
 	amd_print("PCI clock:                          %d.%dMHz", amd_clock / 1000, amd_clock / 100 % 10);
@@ -221,12 +218,12 @@
 	pci_write_config_byte(dev, AMD_DRIVE_TIMING + (3 - dn),
 		((FIT(timing->active, 1, 16) - 1) << 4) | (FIT(timing->recover, 1, 16) - 1));
 
-	switch (amd_config->flags & AMD_UDMA) {
-		case AMD_UDMA_33:  t = timing->udma ? (0xc0 | (FIT(timing->udma, 2, 5) - 2)) : 0x03; break;
-		case AMD_UDMA_66:  t = timing->udma ? (0xc0 | amd_cyc2udma[FIT(timing->udma, 2, 10)]) : 0x03; break;
-		case AMD_UDMA_100: t = timing->udma ? (0xc0 | amd_cyc2udma[FIT(timing->udma, 1, 10)]) : 0x03; break;
-		case AMD_UDMA_133: t = timing->udma ? (0xc0 | amd_cyc2udma[FIT(timing->udma, 1, 15)]) : 0x03; break;
-		default: return;
+	switch (amd_config->udma_mask) {
+	case ATA_UDMA2: t = timing->udma ? (0xc0 | (FIT(timing->udma, 2, 5) - 2)) : 0x03; break;
+	case ATA_UDMA4: t = timing->udma ? (0xc0 | amd_cyc2udma[FIT(timing->udma, 2, 10)]) : 0x03; break;
+	case ATA_UDMA5: t = timing->udma ? (0xc0 | amd_cyc2udma[FIT(timing->udma, 1, 10)]) : 0x03; break;
+	case ATA_UDMA6: t = timing->udma ? (0xc0 | amd_cyc2udma[FIT(timing->udma, 1, 15)]) : 0x03; break;
+	default: return;
 	}
 
 	pci_write_config_byte(dev, AMD_UDMA_TIMING + (3 - dn), t);
@@ -248,7 +245,7 @@
 		ide_config_drive_speed(drive, speed);
 
 	T = 1000000000 / amd_clock;
-	UT = T / min_t(int, max_t(int, amd_config->flags & AMD_UDMA, 1), 2);
+	UT = (amd_config->udma_mask == ATA_UDMA2) ? T : (T / 2);
 
 	ide_timing_compute(drive, speed, &t, T, UT);
 
@@ -277,29 +274,19 @@
 static void amd74xx_tune_drive(ide_drive_t *drive, u8 pio)
 {
 	if (pio == 255) {
-		amd_set_drive(drive, ide_find_best_mode(drive, XFER_PIO | XFER_EPIO));
+		amd_set_drive(drive, ide_find_best_pio_mode(drive));
 		return;
 	}
 
 	amd_set_drive(drive, XFER_PIO_0 + min_t(byte, pio, 5));
 }
 
-/*
- * amd74xx_dmaproc() is a callback from upper layers that can do
- * a lot, but we use it for DMA/PIO tuning only, delegating everything
- * else to the default ide_dmaproc().
- */
-
 static int amd74xx_ide_dma_check(ide_drive_t *drive)
 {
-	int w80 = HWIF(drive)->udma_four;
+	u8 speed = ide_max_dma_mode(drive);
 
-	u8 speed = ide_find_best_mode(drive,
-		XFER_PIO | XFER_EPIO | XFER_MWDMA | XFER_UDMA |
-		((amd_config->flags & AMD_BAD_SWDMA) ? 0 : XFER_SWDMA) |
-		(w80 && (amd_config->flags & AMD_UDMA) >= AMD_UDMA_66 ? XFER_UDMA_66 : 0) |
-		(w80 && (amd_config->flags & AMD_UDMA) >= AMD_UDMA_100 ? XFER_UDMA_100 : 0) |
-		(w80 && (amd_config->flags & AMD_UDMA) >= AMD_UDMA_133 ? XFER_UDMA_133 : 0));
+	if (speed == 0)
+		speed = ide_find_best_pio_mode(drive);
 
 	amd_set_drive(drive, speed);
 
@@ -334,10 +321,10 @@
  * Check 80-wire cable presence.
  */
 
-	switch (amd_config->flags & AMD_UDMA) {
+	switch (amd_config->udma_mask) {
 
-		case AMD_UDMA_133:
-		case AMD_UDMA_100:
+		case ATA_UDMA6:
+		case ATA_UDMA5:
 			pci_read_config_byte(dev, AMD_CABLE_DETECT, &t);
 			pci_read_config_dword(dev, AMD_UDMA_TIMING, &u);
 			amd_80w = ((t & 0x3) ? 1 : 0) | ((t & 0xc) ? 2 : 0);
@@ -349,7 +336,7 @@
 				}
 			break;
 
-		case AMD_UDMA_66:
+		case ATA_UDMA4:
 			/* no host side cable detection */
 			amd_80w = 0x03;
 			break;
@@ -370,7 +357,7 @@
 	if ((amd_config->flags & AMD_CHECK_SERENADE) &&
 		dev->subsystem_vendor == PCI_VENDOR_ID_AMD &&
 		dev->subsystem_device == PCI_DEVICE_ID_AMD_SERENADE)
-			amd_config->flags = AMD_UDMA_100;
+			amd_config->udma_mask = ATA_UDMA5;
 
 /*
  * Determine the system bus clock.
@@ -395,8 +382,9 @@
  */
 
 	pci_read_config_byte(dev, PCI_REVISION_ID, &t);
-	printk(KERN_INFO "%s: %s (rev %02x) %s controller\n",
-		amd_chipset->name, pci_name(dev), t, amd_dma[amd_config->flags & AMD_UDMA]);
+	printk(KERN_INFO "%s: %s (rev %02x) UDMA%s controller\n",
+		amd_chipset->name, pci_name(dev), t,
+		amd_dma[fls(amd_config->udma_mask) - 1]);
 
 /*
  * Register /proc/ide/amd74xx entry
@@ -437,12 +425,19 @@
 		return;
 
         hwif->atapi_dma = 1;
-        hwif->ultra_mask = 0x7f;
-        hwif->mwdma_mask = 0x07;
-        hwif->swdma_mask = 0x07;
 
-	if (!hwif->udma_four)
-		hwif->udma_four = (amd_80w >> hwif->channel) & 1;
+	hwif->ultra_mask = amd_config->udma_mask;
+	hwif->mwdma_mask = 0x07;
+	if ((amd_config->flags & AMD_BAD_SWDMA) == 0)
+		hwif->swdma_mask = 0x07;
+
+	if (hwif->cbl != ATA_CBL_PATA40_SHORT) {
+		if ((amd_80w >> hwif->channel) & 1)
+			hwif->cbl = ATA_CBL_PATA80;
+		else
+			hwif->cbl = ATA_CBL_PATA40;
+	}
+
         hwif->ide_dma_check = &amd74xx_ide_dma_check;
         if (!noautodma)
                 hwif->autodma = 1;
diff --git a/drivers/ide/pci/atiixp.c b/drivers/ide/pci/atiixp.c
index 8ab33fa..2761510 100644
--- a/drivers/ide/pci/atiixp.c
+++ b/drivers/ide/pci/atiixp.c
@@ -264,10 +264,11 @@
 	hwif->swdma_mask = 0x04;
 
 	pci_read_config_byte(pdev, ATIIXP_IDE_UDMA_MODE + ch, &udma_mode);
+
 	if ((udma_mode & 0x07) >= 0x04 || (udma_mode & 0x70) >= 0x40)
-		hwif->udma_four = 1;
+		hwif->cbl = ATA_CBL_PATA80;
 	else
-		hwif->udma_four = 0;
+		hwif->cbl = ATA_CBL_PATA40;
 
 	hwif->dma_host_on = &atiixp_dma_host_on;
 	hwif->dma_host_off = &atiixp_dma_host_off;
diff --git a/drivers/ide/pci/cmd64x.c b/drivers/ide/pci/cmd64x.c
index 7c57dc6..8631b6c 100644
--- a/drivers/ide/pci/cmd64x.c
+++ b/drivers/ide/pci/cmd64x.c
@@ -1,5 +1,5 @@
 /*
- * linux/drivers/ide/pci/cmd64x.c		Version 1.47	Mar 19, 2007
+ * linux/drivers/ide/pci/cmd64x.c		Version 1.50	May 10, 2007
  *
  * cmd64x.c: Enable interrupts at initialization time on Ultra/PCI machines.
  *           Due to massive hardware bugs, UltraDMA is only supported
@@ -52,9 +52,6 @@
 #define   ARTTIM23_DIS_RA2	0x04
 #define   ARTTIM23_DIS_RA3	0x08
 #define   ARTTIM23_INTR_CH1	0x10
-#define ARTTIM2		0x57
-#define ARTTIM3		0x57
-#define DRWTIM23	0x58
 #define DRWTIM2		0x58
 #define BRST		0x59
 #define DRWTIM3		0x5b
@@ -469,71 +466,43 @@
 
 static unsigned int __devinit init_chipset_cmd64x(struct pci_dev *dev, const char *name)
 {
-	u32 class_rev = 0;
 	u8 mrdmode = 0;
 
-	pci_read_config_dword(dev, PCI_CLASS_REVISION, &class_rev);
-	class_rev &= 0xff;
+	if (dev->device == PCI_DEVICE_ID_CMD_646) {
+		u8 rev = 0;
 
-	switch(dev->device) {
-		case PCI_DEVICE_ID_CMD_643:
+		pci_read_config_byte(dev, PCI_REVISION_ID, &rev);
+
+		switch (rev) {
+		case 0x07:
+		case 0x05:
+			printk("%s: UltraDMA capable", name);
 			break;
-		case PCI_DEVICE_ID_CMD_646:
-			printk(KERN_INFO "%s: chipset revision 0x%02X, ", name, class_rev);
-			switch(class_rev) {
-				case 0x07:
-				case 0x05:
-					printk("UltraDMA Capable");
-					break;
-				case 0x03:
-					printk("MultiWord DMA Force Limited");
-					break;
-				case 0x01:
-				default:
-					printk("MultiWord DMA Limited, IRQ workaround enabled");
-					break;
-				}
-			printk("\n");
-                        break;
-		case PCI_DEVICE_ID_CMD_648:
-		case PCI_DEVICE_ID_CMD_649:
-			break;
+		case 0x03:
 		default:
+			printk("%s: MultiWord DMA force limited", name);
 			break;
+		case 0x01:
+			printk("%s: MultiWord DMA limited, "
+			       "IRQ workaround enabled\n", name);
+			break;
+		}
 	}
 
 	/* Set a good latency timer and cache line size value. */
 	(void) pci_write_config_byte(dev, PCI_LATENCY_TIMER, 64);
 	/* FIXME: pci_set_master() to ensure a good latency timer value */
 
-	/* Setup interrupts. */
-	(void) pci_read_config_byte(dev, MRDMODE, &mrdmode);
-	mrdmode &= ~(0x30);
-	(void) pci_write_config_byte(dev, MRDMODE, mrdmode);
-
-	/* Use MEMORY READ LINE for reads.
-	 * NOTE: Although not mentioned in the PCI0646U specs,
-	 *       these bits are write only and won't be read
-	 *       back as set or not.  The PCI0646U2 specs clarify
-	 *       this point.
+	/*
+	 * Enable interrupts, select MEMORY READ LINE for reads.
+	 *
+	 * NOTE: although not mentioned in the PCI0646U specs,
+	 * bits 0-1 are write only and won't be read back as
+	 * set or not -- PCI0646U2 specs clarify this point.
 	 */
-	(void) pci_write_config_byte(dev, MRDMODE, mrdmode | 0x02);
-
-	/* Set reasonable active/recovery/address-setup values. */
-	(void) pci_write_config_byte(dev, ARTTIM0,  0x40);
-	(void) pci_write_config_byte(dev, DRWTIM0,  0x3f);
-	(void) pci_write_config_byte(dev, ARTTIM1,  0x40);
-	(void) pci_write_config_byte(dev, DRWTIM1,  0x3f);
-#ifdef __i386__
-	(void) pci_write_config_byte(dev, ARTTIM23, 0x1c);
-#else
-	(void) pci_write_config_byte(dev, ARTTIM23, 0x5c);
-#endif
-	(void) pci_write_config_byte(dev, DRWTIM23, 0x3f);
-	(void) pci_write_config_byte(dev, DRWTIM3,  0x3f);
-#ifdef CONFIG_PPC
-	(void) pci_write_config_byte(dev, UDIDETCR0, 0xf0);
-#endif /* CONFIG_PPC */
+	(void) pci_read_config_byte (dev, MRDMODE, &mrdmode);
+	mrdmode &= ~0x30;
+	(void) pci_write_config_byte(dev, MRDMODE, (mrdmode | 0x02));
 
 #if defined(DISPLAY_CMD64X_TIMINGS) && defined(CONFIG_IDE_PROC_FS)
 
@@ -548,29 +517,27 @@
 	return 0;
 }
 
-static unsigned int __devinit ata66_cmd64x(ide_hwif_t *hwif)
+static u8 __devinit ata66_cmd64x(ide_hwif_t *hwif)
 {
-	u8 ata66 = 0, mask = (hwif->channel) ? 0x02 : 0x01;
+	struct pci_dev  *dev	= hwif->pci_dev;
+	u8 bmidecsr = 0, mask	= hwif->channel ? 0x02 : 0x01;
 
-	switch(hwif->pci_dev->device) {
-		case PCI_DEVICE_ID_CMD_643:
-		case PCI_DEVICE_ID_CMD_646:
-			return ata66;
-		default:
-			break;
+	switch (dev->device) {
+	case PCI_DEVICE_ID_CMD_648:
+	case PCI_DEVICE_ID_CMD_649:
+ 		pci_read_config_byte(dev, BMIDECSR, &bmidecsr);
+		return (bmidecsr & mask) ? ATA_CBL_PATA80 : ATA_CBL_PATA40;
+	default:
+		return ATA_CBL_PATA40;
 	}
-	pci_read_config_byte(hwif->pci_dev, BMIDECSR, &ata66);
-	return (ata66 & mask) ? 1 : 0;
 }
 
 static void __devinit init_hwif_cmd64x(ide_hwif_t *hwif)
 {
 	struct pci_dev *dev	= hwif->pci_dev;
-	unsigned int class_rev;
+	u8 rev			= 0;
 
-	hwif->autodma = 0;
-	pci_read_config_dword(dev, PCI_CLASS_REVISION, &class_rev);
-	class_rev &= 0xff;
+	pci_read_config_byte(dev, PCI_REVISION_ID, &rev);
 
 	hwif->tuneproc  = &cmd64x_tune_drive;
 	hwif->speedproc = &cmd64x_tune_chipset;
@@ -580,8 +547,8 @@
 	if (!hwif->dma_base)
 		return;
 
-	hwif->atapi_dma = 1;
-
+	hwif->atapi_dma  = 1;
+	hwif->mwdma_mask = 0x07;
 	hwif->ultra_mask = hwif->cds->udma_mask;
 
 	/*
@@ -596,16 +563,15 @@
 	 *
 	 * So we only do UltraDMA on revision 0x05 and 0x07 chipsets.
 	 */
-	if (dev->device == PCI_DEVICE_ID_CMD_646 && class_rev < 5)
+	if (dev->device == PCI_DEVICE_ID_CMD_646 && rev < 5)
 		hwif->ultra_mask = 0x00;
 
-	hwif->mwdma_mask = 0x07;
-
 	hwif->ide_dma_check = &cmd64x_config_drive_for_dma;
-	if (!(hwif->udma_four))
-		hwif->udma_four = ata66_cmd64x(hwif);
 
-	switch(dev->device) {
+	if (hwif->cbl != ATA_CBL_PATA40_SHORT)
+		hwif->cbl = ata66_cmd64x(hwif);
+
+	switch (dev->device) {
 	case PCI_DEVICE_ID_CMD_648:
 	case PCI_DEVICE_ID_CMD_649:
 	alt_irq_bits:
@@ -614,10 +580,10 @@
 		break;
 	case PCI_DEVICE_ID_CMD_646:
 		hwif->chipset = ide_cmd646;
-		if (class_rev == 0x01) {
+		if (rev == 0x01) {
 			hwif->ide_dma_end = &cmd646_1_ide_dma_end;
 			break;
-		} else if (class_rev >= 0x03)
+		} else if (rev >= 0x03)
 			goto alt_irq_bits;
 		/* fall thru */
 	default:
@@ -626,11 +592,9 @@
 		break;
 	}
 
-
 	if (!noautodma)
 		hwif->autodma = 1;
-	hwif->drives[0].autodma = hwif->autodma;
-	hwif->drives[1].autodma = hwif->autodma;
+	hwif->drives[0].autodma = hwif->drives[1].autodma = hwif->autodma;
 }
 
 static int __devinit init_setup_cmd64x(struct pci_dev *dev, ide_pci_device_t *d)
diff --git a/drivers/ide/pci/cs5535.c b/drivers/ide/pci/cs5535.c
index 41925c4..10f61f3 100644
--- a/drivers/ide/pci/cs5535.c
+++ b/drivers/ide/pci/cs5535.c
@@ -187,7 +187,8 @@
 
 	/* if a 80 wire cable was detected */
 	pci_read_config_byte(dev, CS5535_CABLE_DETECT, &bit);
-	return (bit & 1);
+
+	return (bit & 1) ? ATA_CBL_PATA80 : ATA_CBL_PATA40;
 }
 
 /****
@@ -212,8 +213,7 @@
 	hwif->ultra_mask = 0x1F;
 	hwif->mwdma_mask = 0x07;
 
-
-	hwif->udma_four = cs5535_cable_detect(hwif->pci_dev);
+	hwif->cbl = cs5535_cable_detect(hwif->pci_dev);
 
 	if (!noautodma)
 		hwif->autodma = 1;
diff --git a/drivers/ide/pci/hpt366.c b/drivers/ide/pci/hpt366.c
index c33d0b0..4b6bae8 100644
--- a/drivers/ide/pci/hpt366.c
+++ b/drivers/ide/pci/hpt366.c
@@ -1,5 +1,5 @@
 /*
- * linux/drivers/ide/pci/hpt366.c		Version 1.06	Jun 27, 2007
+ * linux/drivers/ide/pci/hpt366.c		Version 1.10	Jun 29, 2007
  *
  * Copyright (C) 1999-2003		Andre Hedrick <andre@linux-ide.org>
  * Portions Copyright (C) 2001	        Sun Microsystems, Inc.
@@ -77,7 +77,7 @@
  *   since they may tamper with its fields
  * - prefix the driver startup messages with the real chip name
  * - claim the extra 240 bytes of I/O space for all chips
- * - optimize the rate masking/filtering and the drive list lookup code
+ * - optimize the UltraDMA filtering and the drive list lookup code
  * - use pci_get_slot() to get to the function 1 of HPT36x/374
  * - cache offset of the channel's misc. control registers (MCRs) being used
  *   throughout the driver
@@ -99,9 +99,9 @@
  *   stop duplicating it for each channel by storing the pointer in the pci_dev
  *   structure: first, at the init_setup stage, point it to a static "template"
  *   with only the chip type and its specific base DPLL frequency, the highest
- *   supported DMA mode, and the chip settings table pointer filled, then, at
- *   the init_chipset stage, allocate per-chip instance  and fill it with the
- *   rest of the necessary information
+ *   UltraDMA mode, and the chip settings table pointer filled,  then, at the
+ *   init_chipset stage, allocate per-chip instance  and fill it with the rest
+ *   of the necessary information
  * - get rid of the constant thresholds in the HPT37x PCI clock detection code,
  *   switch  to calculating  PCI clock frequency based on the chip's base DPLL
  *   frequency
@@ -112,6 +112,7 @@
  *   also fixing the interchanged 25/40 MHz PCI clock cases for HPT36x chips;
  *   unify HPT36x/37x timing setup code and the speedproc handlers by joining
  *   the register setting lists into the table indexed by the clock selected
+ * - set the correct hwif->ultra_mask for each individual chip
  *	Sergei Shtylyov, <sshtylyov@ru.mvista.com> or <source@mvista.com>
  */
 
@@ -391,7 +392,7 @@
 
 struct hpt_info {
 	u8 chip_type;		/* Chip type */
-	u8 max_mode;		/* Speeds allowed */
+	u8 max_ultra;		/* Max. UltraDMA mode allowed */
 	u8 dpll_clk;		/* DPLL clock in MHz */
 	u8 pci_clk;		/* PCI  clock in MHz */
 	u32 **settings; 	/* Chipset settings table */
@@ -430,77 +431,77 @@
 
 static struct hpt_info hpt36x __devinitdata = {
 	.chip_type	= HPT36x,
-	.max_mode	= (HPT366_ALLOW_ATA66_4 || HPT366_ALLOW_ATA66_3) ? 2 : 1,
+	.max_ultra	= HPT366_ALLOW_ATA66_3 ? (HPT366_ALLOW_ATA66_4 ? 4 : 3) : 2,
 	.dpll_clk	= 0,	/* no DPLL */
 	.settings	= hpt36x_settings
 };
 
 static struct hpt_info hpt370 __devinitdata = {
 	.chip_type	= HPT370,
-	.max_mode	= HPT370_ALLOW_ATA100_5 ? 3 : 2,
+	.max_ultra	= HPT370_ALLOW_ATA100_5 ? 5 : 4,
 	.dpll_clk	= 48,
 	.settings	= hpt37x_settings
 };
 
 static struct hpt_info hpt370a __devinitdata = {
 	.chip_type	= HPT370A,
-	.max_mode	= HPT370_ALLOW_ATA100_5 ? 3 : 2,
+	.max_ultra	= HPT370_ALLOW_ATA100_5 ? 5 : 4,
 	.dpll_clk	= 48,
 	.settings	= hpt37x_settings
 };
 
 static struct hpt_info hpt374 __devinitdata = {
 	.chip_type	= HPT374,
-	.max_mode	= 3,
+	.max_ultra	= 5,
 	.dpll_clk	= 48,
 	.settings	= hpt37x_settings
 };
 
 static struct hpt_info hpt372 __devinitdata = {
 	.chip_type	= HPT372,
-	.max_mode	= HPT372_ALLOW_ATA133_6 ? 4 : 3,
+	.max_ultra	= HPT372_ALLOW_ATA133_6 ? 6 : 5,
 	.dpll_clk	= 55,
 	.settings	= hpt37x_settings
 };
 
 static struct hpt_info hpt372a __devinitdata = {
 	.chip_type	= HPT372A,
-	.max_mode	= HPT372_ALLOW_ATA133_6 ? 4 : 3,
+	.max_ultra	= HPT372_ALLOW_ATA133_6 ? 6 : 5,
 	.dpll_clk	= 66,
 	.settings	= hpt37x_settings
 };
 
 static struct hpt_info hpt302 __devinitdata = {
 	.chip_type	= HPT302,
-	.max_mode	= HPT302_ALLOW_ATA133_6 ? 4 : 3,
+	.max_ultra	= HPT372_ALLOW_ATA133_6 ? 6 : 5,
 	.dpll_clk	= 66,
 	.settings	= hpt37x_settings
 };
 
 static struct hpt_info hpt371 __devinitdata = {
 	.chip_type	= HPT371,
-	.max_mode	= HPT371_ALLOW_ATA133_6 ? 4 : 3,
+	.max_ultra	= HPT371_ALLOW_ATA133_6 ? 6 : 5,
 	.dpll_clk	= 66,
 	.settings	= hpt37x_settings
 };
 
 static struct hpt_info hpt372n __devinitdata = {
 	.chip_type	= HPT372N,
-	.max_mode	= HPT372_ALLOW_ATA133_6 ? 4 : 3,
+	.max_ultra	= HPT372_ALLOW_ATA133_6 ? 6 : 5,
 	.dpll_clk	= 77,
 	.settings	= hpt37x_settings
 };
 
 static struct hpt_info hpt302n __devinitdata = {
 	.chip_type	= HPT302N,
-	.max_mode	= HPT302_ALLOW_ATA133_6 ? 4 : 3,
+	.max_ultra	= HPT302_ALLOW_ATA133_6 ? 6 : 5,
 	.dpll_clk	= 77,
 	.settings	= hpt37x_settings
 };
 
 static struct hpt_info hpt371n __devinitdata = {
 	.chip_type	= HPT371N,
-	.max_mode	= HPT371_ALLOW_ATA133_6 ? 4 : 3,
+	.max_ultra	= HPT371_ALLOW_ATA133_6 ? 6 : 5,
 	.dpll_clk	= 77,
 	.settings	= hpt37x_settings
 };
@@ -523,53 +524,38 @@
 static u8 hpt3xx_udma_filter(ide_drive_t *drive)
 {
 	struct hpt_info *info	= pci_get_drvdata(HWIF(drive)->pci_dev);
-	u8 chip_type		= info->chip_type;
-	u8 mode			= info->max_mode;
 	u8 mask;
 
-	switch (mode) {
-		case 0x04:
-			mask = 0x7f;
-			break;
-		case 0x03:
+	switch (info->chip_type) {
+	case HPT370A:
+		if (!HPT370_ALLOW_ATA100_5 ||
+		    check_in_drive_list(drive, bad_ata100_5))
+			return 0x1f;
+		else
+			return 0x3f;
+	case HPT370:
+		if (!HPT370_ALLOW_ATA100_5 ||
+		    check_in_drive_list(drive, bad_ata100_5))
+			mask = 0x1f;
+		else
 			mask = 0x3f;
-			if (chip_type >= HPT374)
-				break;
-			if (!check_in_drive_list(drive, bad_ata100_5))
-				goto check_bad_ata33;
-			/* fall thru */
-		case 0x02:
+		break;
+	case HPT36x:
+		if (!HPT366_ALLOW_ATA66_4 ||
+		    check_in_drive_list(drive, bad_ata66_4))
+			mask = 0x0f;
+		else
 			mask = 0x1f;
 
-			/*
-			 * CHECK ME, Does this need to be changed to HPT374 ??
-			 */
-			if (chip_type >= HPT370)
-				goto check_bad_ata33;
-			if (HPT366_ALLOW_ATA66_4 &&
-			    !check_in_drive_list(drive, bad_ata66_4))
-				goto check_bad_ata33;
-
-			mask = 0x0f;
-			if (HPT366_ALLOW_ATA66_3 &&
-			    !check_in_drive_list(drive, bad_ata66_3))
-				goto check_bad_ata33;
-			/* fall thru */
-		case 0x01:
+		if (!HPT366_ALLOW_ATA66_3 ||
+		    check_in_drive_list(drive, bad_ata66_3))
 			mask = 0x07;
-
-		check_bad_ata33:
-			if (chip_type >= HPT370A)
-				break;
-			if (!check_in_drive_list(drive, bad_ata33))
-				break;
-			/* fall thru */
-		case 0x00:
-		default:
-			mask = 0x00;
-			break;
+		break;
+	default:
+		return 0x7f;
 	}
-	return mask;
+
+	return check_in_drive_list(drive, bad_ata33) ? 0x00 : mask;
 }
 
 static u32 get_speed_setting(u8 speed, struct hpt_info *info)
@@ -737,7 +723,7 @@
  * This is specific to the HPT366 UDMA chipset
  * by HighPoint|Triones Technologies, Inc.
  */
-static int hpt366_ide_dma_lostirq(ide_drive_t *drive)
+static void hpt366_dma_lost_irq(ide_drive_t *drive)
 {
 	struct pci_dev *dev = HWIF(drive)->pci_dev;
 	u8 mcr1 = 0, mcr3 = 0, scr1 = 0;
@@ -749,7 +735,7 @@
 		drive->name, __FUNCTION__, mcr1, mcr3, scr1);
 	if (scr1 & 0x10)
 		pci_write_config_byte(dev, 0x5a, scr1 & ~0x10);
-	return __ide_dma_lostirq(drive);
+	ide_dma_lost_irq(drive);
 }
 
 static void hpt370_clear_engine(ide_drive_t *drive)
@@ -799,10 +785,10 @@
 	return __ide_dma_end(drive);
 }
 
-static int hpt370_ide_dma_timeout(ide_drive_t *drive)
+static void hpt370_dma_timeout(ide_drive_t *drive)
 {
 	hpt370_irq_timeout(drive);
-	return __ide_dma_timeout(drive);
+	ide_dma_timeout(drive);
 }
 
 /* returns 1 if DMA IRQ issued, 0 otherwise */
@@ -1150,7 +1136,7 @@
 		  * Select 66 MHz DPLL clock only if UltraATA/133 mode is
 		  * supported/enabled, use 50 MHz DPLL clock otherwise...
 		  */
-		if (info->max_mode == 0x04) {
+		if (info->max_ultra == 6) {
 			dpll_clk = 66;
 			clock = ATA_CLOCK_66MHZ;
 		} else if (dpll_clk) {	/* HPT36x chips don't have DPLL */
@@ -1243,7 +1229,7 @@
 	struct pci_dev	*dev		= hwif->pci_dev;
 	struct hpt_info *info		= pci_get_drvdata(dev);
 	int serialize			= HPT_SERIALIZE_IO;
-	u8  scr1 = 0, ata66		= (hwif->channel) ? 0x01 : 0x02;
+	u8  scr1 = 0, ata66		= hwif->channel ? 0x01 : 0x02;
 	u8  chip_type			= info->chip_type;
 	u8  new_mcr, old_mcr 		= 0;
 
@@ -1256,7 +1242,9 @@
 	hwif->intrproc			= &hpt3xx_intrproc;
 	hwif->maskproc			= &hpt3xx_maskproc;
 	hwif->busproc			= &hpt3xx_busproc;
-	hwif->udma_filter		= &hpt3xx_udma_filter;
+
+	if (chip_type <= HPT370A)
+		hwif->udma_filter	= &hpt3xx_udma_filter;
 
 	/*
 	 * HPT3xxN chips have some complications:
@@ -1305,7 +1293,7 @@
 		return;
 	}
 
-	hwif->ultra_mask = 0x7f;
+	hwif->ultra_mask = hwif->cds->udma_mask;
 	hwif->mwdma_mask = 0x07;
 
 	/*
@@ -1342,8 +1330,8 @@
 	} else
 		pci_read_config_byte (dev, 0x5a, &scr1);
 
-	if (!hwif->udma_four)
-		hwif->udma_four = (scr1 & ata66) ? 0 : 1;
+	if (hwif->cbl != ATA_CBL_PATA40_SHORT)
+		hwif->cbl = (scr1 & ata66) ? ATA_CBL_PATA40 : ATA_CBL_PATA80;
 
 	hwif->ide_dma_check		= &hpt366_config_drive_xfer_rate;
 
@@ -1353,9 +1341,9 @@
 	} else if (chip_type >= HPT370) {
 		hwif->dma_start 	= &hpt370_ide_dma_start;
 		hwif->ide_dma_end	= &hpt370_ide_dma_end;
-		hwif->ide_dma_timeout	= &hpt370_ide_dma_timeout;
+		hwif->dma_timeout	= &hpt370_dma_timeout;
 	} else
-		hwif->ide_dma_lostirq	= &hpt366_ide_dma_lostirq;
+		hwif->dma_lost_irq	= &hpt366_dma_lost_irq;
 
 	if (!noautodma)
 		hwif->autodma = 1;
@@ -1503,9 +1491,35 @@
 
 	pci_read_config_byte(dev, PCI_REVISION_ID, &rev);
 
-	if (rev > 6)
+	switch (rev) {
+	case 0:
+	case 1:
+	case 2:
+		/*
+		 * HPT36x chips have one channel per function and have
+		 * both channel enable bits located differently and visible
+		 * to both functions -- really stupid design decision... :-(
+		 * Bit 4 is for the primary channel, bit 5 for the secondary.
+		 */
+		d->channels = 1;
+		d->enablebits[0].mask = d->enablebits[0].val = 0x10;
+
+		d->udma_mask = HPT366_ALLOW_ATA66_3 ?
+			      (HPT366_ALLOW_ATA66_4 ? 0x1f : 0x0f) : 0x07;
+		break;
+	case 3:
+	case 4:
+		d->udma_mask = HPT370_ALLOW_ATA100_5 ? 0x3f : 0x1f;
+		break;
+	default:
 		rev = 6;
-		
+		/* fall thru */
+	case 5:
+	case 6:
+		d->udma_mask = HPT372_ALLOW_ATA133_6 ? 0x7f : 0x3f;
+		break;
+	}
+
 	d->name = chipset_names[rev];
 
 	pci_set_drvdata(dev, info[rev]);
@@ -1513,15 +1527,6 @@
 	if (rev > 2)
 		goto init_single;
 
-	/*
-	 * HPT36x chips have one channel per function and have
-	 * both channel enable bits located differently and visible
-	 * to both functions -- really stupid design decision... :-(
-	 * Bit 4 is for the primary channel, bit 5 for the secondary.
-	 */
-	d->channels = 1;
-	d->enablebits[0].mask = d->enablebits[0].val = 0x10;
-
 	if ((dev2 = pci_get_slot(dev->bus, dev->devfn + 1)) != NULL) {
 		u8  mcr1 = 0, pin1 = 0, pin2 = 0;
 		int ret;
@@ -1573,6 +1578,7 @@
 		.channels	= 2,
 		.autodma	= AUTODMA,
 		.enablebits	= {{0x50,0x04,0x04}, {0x54,0x04,0x04}},
+		.udma_mask	= HPT372_ALLOW_ATA133_6 ? 0x7f : 0x3f,
 		.bootable	= OFF_BOARD,
 		.extra		= 240
 	},{	/* 2 */
@@ -1584,6 +1590,7 @@
 		.channels	= 2,
 		.autodma	= AUTODMA,
 		.enablebits	= {{0x50,0x04,0x04}, {0x54,0x04,0x04}},
+		.udma_mask	= HPT302_ALLOW_ATA133_6 ? 0x7f : 0x3f,
 		.bootable	= OFF_BOARD,
 		.extra		= 240
 	},{	/* 3 */
@@ -1595,6 +1602,7 @@
 		.channels	= 2,
 		.autodma	= AUTODMA,
 		.enablebits	= {{0x50,0x04,0x04}, {0x54,0x04,0x04}},
+		.udma_mask	= HPT371_ALLOW_ATA133_6 ? 0x7f : 0x3f,
 		.bootable	= OFF_BOARD,
 		.extra		= 240
 	},{	/* 4 */
@@ -1606,6 +1614,7 @@
 		.channels	= 2,	/* 4 */
 		.autodma	= AUTODMA,
 		.enablebits	= {{0x50,0x04,0x04}, {0x54,0x04,0x04}},
+		.udma_mask	= 0x3f,
 		.bootable	= OFF_BOARD,
 		.extra		= 240
 	},{	/* 5 */
@@ -1617,6 +1626,7 @@
 		.channels	= 2,	/* 4 */
 		.autodma	= AUTODMA,
 		.enablebits	= {{0x50,0x04,0x04}, {0x54,0x04,0x04}},
+		.udma_mask	= HPT372_ALLOW_ATA133_6 ? 0x7f : 0x3f,
 		.bootable	= OFF_BOARD,
 		.extra		= 240
 	}
diff --git a/drivers/ide/pci/it8213.c b/drivers/ide/pci/it8213.c
index c04a026..ff48c23 100644
--- a/drivers/ide/pci/it8213.c
+++ b/drivers/ide/pci/it8213.c
@@ -231,7 +231,7 @@
 
 static void __devinit init_hwif_it8213(ide_hwif_t *hwif)
 {
-	u8 reg42h = 0, ata66 = 0;
+	u8 reg42h = 0;
 
 	hwif->speedproc = &it8213_tune_chipset;
 	hwif->tuneproc	= &it8213_tuneproc;
@@ -250,11 +250,11 @@
 	hwif->swdma_mask = 0x04;
 
 	pci_read_config_byte(hwif->pci_dev, 0x42, &reg42h);
-	ata66 = (reg42h & 0x02) ? 0 : 1;
 
 	hwif->ide_dma_check = &it8213_config_drive_for_dma;
-	if (!(hwif->udma_four))
-		hwif->udma_four = ata66;
+
+	if (hwif->cbl != ATA_CBL_PATA40_SHORT)
+		hwif->cbl = (reg42h & 0x02) ? ATA_CBL_PATA40 : ATA_CBL_PATA80;
 
 	/*
 	 *	The BIOS often doesn't set up DMA on this controller
diff --git a/drivers/ide/pci/it821x.c b/drivers/ide/pci/it821x.c
index 3aeb7f1..8197b65 100644
--- a/drivers/ide/pci/it821x.c
+++ b/drivers/ide/pci/it821x.c
@@ -491,10 +491,10 @@
  *	the needed logic onboard.
  */
 
-static unsigned int __devinit ata66_it821x(ide_hwif_t *hwif)
+static u8 __devinit ata66_it821x(ide_hwif_t *hwif)
 {
 	/* The reference driver also only does disk side */
-	return 1;
+	return ATA_CBL_PATA80;
 }
 
 /**
@@ -662,8 +662,9 @@
 	hwif->mwdma_mask = 0x07;
 
 	hwif->ide_dma_check = &it821x_config_drive_for_dma;
-	if (!(hwif->udma_four))
-		hwif->udma_four = ata66_it821x(hwif);
+
+	if (hwif->cbl != ATA_CBL_PATA40_SHORT)
+		hwif->cbl = ata66_it821x(hwif);
 
 	/*
 	 *	The BIOS often doesn't set up DMA on this controller
diff --git a/drivers/ide/pci/jmicron.c b/drivers/ide/pci/jmicron.c
index 76ed251..a6008f6 100644
--- a/drivers/ide/pci/jmicron.c
+++ b/drivers/ide/pci/jmicron.c
@@ -25,10 +25,10 @@
  *	ata66_jmicron		-	Cable check
  *	@hwif: IDE port
  *
- *	Return 1 if the cable is 80pin
+ *	Returns the cable type.
  */
 
-static int __devinit ata66_jmicron(ide_hwif_t *hwif)
+static u8 __devinit ata66_jmicron(ide_hwif_t *hwif)
 {
 	struct pci_dev *pdev = hwif->pci_dev;
 
@@ -70,16 +70,17 @@
 	{
 	case PORT_PATA0:
 		if (control & (1 << 3))	/* 40/80 pin primary */
-			return 0;
-		return 1;
+			return ATA_CBL_PATA40;
+		return ATA_CBL_PATA80;
 	case PORT_PATA1:
 		if (control5 & (1 << 19))	/* 40/80 pin secondary */
-			return 0;
-		return 1;
+			return ATA_CBL_PATA40;
+		return ATA_CBL_PATA80;
 	case PORT_SATA:
 		break;
 	}
-	return 1; /* Avoid bogus "control reaches end of non-void function" */
+	/* Avoid bogus "control reaches end of non-void function" */
+	return ATA_CBL_PATA80;
 }
 
 static void jmicron_tuneproc (ide_drive_t *drive, byte mode_wanted)
@@ -159,8 +160,9 @@
 	hwif->mwdma_mask = 0x07;
 
 	hwif->ide_dma_check = &jmicron_config_drive_for_dma;
-	if (!(hwif->udma_four))
-		hwif->udma_four = ata66_jmicron(hwif);
+
+	if (hwif->cbl != ATA_CBL_PATA40_SHORT)
+		hwif->cbl = ata66_jmicron(hwif);
 
 	hwif->autodma = 1;
 	hwif->drives[0].autodma = hwif->autodma;
diff --git a/drivers/ide/pci/pdc202xx_new.c b/drivers/ide/pci/pdc202xx_new.c
index 0765dce..ee5020d 100644
--- a/drivers/ide/pci/pdc202xx_new.c
+++ b/drivers/ide/pci/pdc202xx_new.c
@@ -225,7 +225,10 @@
 
 static u8 pdcnew_cable_detect(ide_hwif_t *hwif)
 {
-	return get_indexed_reg(hwif, 0x0b) & 0x04;
+	if (get_indexed_reg(hwif, 0x0b) & 0x04)
+		return ATA_CBL_PATA40;
+	else
+		return ATA_CBL_PATA80;
 }
 
 static int pdcnew_config_drive_xfer_rate(ide_drive_t *drive)
@@ -509,8 +512,8 @@
 
 	hwif->ide_dma_check = &pdcnew_config_drive_xfer_rate;
 
-	if (!hwif->udma_four)
-		hwif->udma_four = pdcnew_cable_detect(hwif) ? 0 : 1;
+	if (hwif->cbl != ATA_CBL_PATA40_SHORT)
+		hwif->cbl = pdcnew_cable_detect(hwif);
 
 	if (!noautodma)
 		hwif->autodma = 1;
diff --git a/drivers/ide/pci/pdc202xx_old.c b/drivers/ide/pci/pdc202xx_old.c
index 2384468..41ac4a9 100644
--- a/drivers/ide/pci/pdc202xx_old.c
+++ b/drivers/ide/pci/pdc202xx_old.c
@@ -152,8 +152,10 @@
 static u8 pdc202xx_old_cable_detect (ide_hwif_t *hwif)
 {
 	u16 CIS = 0, mask = (hwif->channel) ? (1<<11) : (1<<10);
+
 	pci_read_config_word(hwif->pci_dev, 0x50, &CIS);
-	return (CIS & mask) ? 1 : 0;
+
+	return (CIS & mask) ? ATA_CBL_PATA40 : ATA_CBL_PATA80;
 }
 
 /*
@@ -267,18 +269,24 @@
 	return (dma_stat & 4) == 4;	/* return 1 if INTR asserted */
 }
 
-static int pdc202xx_ide_dma_lostirq(ide_drive_t *drive)
+static void pdc202xx_dma_lost_irq(ide_drive_t *drive)
 {
-	if (HWIF(drive)->resetproc != NULL)
-		HWIF(drive)->resetproc(drive);
-	return __ide_dma_lostirq(drive);
+	ide_hwif_t *hwif = HWIF(drive);
+
+	if (hwif->resetproc != NULL)
+		hwif->resetproc(drive);
+
+	ide_dma_lost_irq(drive);
 }
 
-static int pdc202xx_ide_dma_timeout(ide_drive_t *drive)
+static void pdc202xx_dma_timeout(ide_drive_t *drive)
 {
-	if (HWIF(drive)->resetproc != NULL)
-		HWIF(drive)->resetproc(drive);
-	return __ide_dma_timeout(drive);
+	ide_hwif_t *hwif = HWIF(drive);
+
+	if (hwif->resetproc != NULL)
+		hwif->resetproc(drive);
+
+	ide_dma_timeout(drive);
 }
 
 static void pdc202xx_reset_host (ide_hwif_t *hwif)
@@ -347,12 +355,13 @@
 	hwif->err_stops_fifo = 1;
 
 	hwif->ide_dma_check = &pdc202xx_config_drive_xfer_rate;
-	hwif->ide_dma_lostirq = &pdc202xx_ide_dma_lostirq;
-	hwif->ide_dma_timeout = &pdc202xx_ide_dma_timeout;
+	hwif->dma_lost_irq = &pdc202xx_dma_lost_irq;
+	hwif->dma_timeout = &pdc202xx_dma_timeout;
 
 	if (hwif->pci_dev->device != PCI_DEVICE_ID_PROMISE_20246) {
-		if (!(hwif->udma_four))
-			hwif->udma_four = (pdc202xx_old_cable_detect(hwif)) ? 0 : 1;
+		if (hwif->cbl != ATA_CBL_PATA40_SHORT)
+			hwif->cbl = pdc202xx_old_cable_detect(hwif);
+
 		hwif->dma_start = &pdc202xx_old_ide_dma_start;
 		hwif->ide_dma_end = &pdc202xx_old_ide_dma_end;
 	} 
diff --git a/drivers/ide/pci/piix.c b/drivers/ide/pci/piix.c
index 8b219dd..2e0b29e 100644
--- a/drivers/ide/pci/piix.c
+++ b/drivers/ide/pci/piix.c
@@ -1,5 +1,5 @@
 /*
- *  linux/drivers/ide/pci/piix.c	Version 0.47	February 8, 2007
+ *  linux/drivers/ide/pci/piix.c	Version 0.50	Jun 10, 2007
  *
  *  Copyright (C) 1998-1999 Andrzej Krzysztofowicz, Author and Maintainer
  *  Copyright (C) 1998-2000 Andre Hedrick <andre@linux-ide.org>
@@ -394,14 +394,45 @@
 	hwif->OUTB(dma_stat, hwif->dma_status);
 }
 
-static int __devinit piix_cable_detect(ide_hwif_t *hwif)
+struct ich_laptop {
+	u16 device;
+	u16 subvendor;
+	u16 subdevice;
+};
+
+/*
+ *	List of laptops that use short cables rather than 80 wire
+ */
+
+static const struct ich_laptop ich_laptop[] = {
+	/* devid, subvendor, subdev */
+	{ 0x27DF, 0x0005, 0x0280 },	/* ICH7 on Acer 5602WLMi */
+	{ 0x27DF, 0x1025, 0x0110 },	/* ICH7 on Acer 3682WLMi */
+	{ 0x27DF, 0x1043, 0x1267 },	/* ICH7 on Asus W5F */
+	{ 0x24CA, 0x1025, 0x0061 },	/* ICH4 on Acer Aspire 2023WLMi */
+	/* end marker */
+	{ 0, }
+};
+
+static u8 __devinit piix_cable_detect(ide_hwif_t *hwif)
 {
-	struct pci_dev *dev = hwif->pci_dev;
+	struct pci_dev *pdev = hwif->pci_dev;
+	const struct ich_laptop *lap = &ich_laptop[0];
 	u8 reg54h = 0, mask = hwif->channel ? 0xc0 : 0x30;
 
-	pci_read_config_byte(dev, 0x54, &reg54h);
+	/* check for specials */
+	while (lap->device) {
+		if (lap->device == pdev->device &&
+		    lap->subvendor == pdev->subsystem_vendor &&
+		    lap->subdevice == pdev->subsystem_device) {
+			return ATA_CBL_PATA40_SHORT;
+		}
+		lap++;
+	}
 
-	return (reg54h & mask) ? 1 : 0;
+	pci_read_config_byte(pdev, 0x54, &reg54h);
+
+	return (reg54h & mask) ? ATA_CBL_PATA80 : ATA_CBL_PATA40;
 }
 
 /**
@@ -444,8 +475,8 @@
 	hwif->swdma_mask = 0x04;
 
 	if (hwif->ultra_mask & 0x78) {
-		if (!hwif->udma_four)
-			hwif->udma_four = piix_cable_detect(hwif);
+		if (hwif->cbl != ATA_CBL_PATA40_SHORT)
+			hwif->cbl = piix_cable_detect(hwif);
 	}
 
 	if (no_piix_dma)
diff --git a/drivers/ide/pci/scc_pata.c b/drivers/ide/pci/scc_pata.c
index 55bc0a3..7b87488 100644
--- a/drivers/ide/pci/scc_pata.c
+++ b/drivers/ide/pci/scc_pata.c
@@ -716,7 +716,7 @@
 	hwif->atapi_dma = 1;
 
 	/* we support 80c cable only. */
-	hwif->udma_four = 1;
+	hwif->cbl = ATA_CBL_PATA80;
 
 	hwif->autodma = 0;
 	if (!noautodma)
diff --git a/drivers/ide/pci/serverworks.c b/drivers/ide/pci/serverworks.c
index d9c4fd1..1371b5b 100644
--- a/drivers/ide/pci/serverworks.c
+++ b/drivers/ide/pci/serverworks.c
@@ -1,5 +1,5 @@
 /*
- * linux/drivers/ide/pci/serverworks.c		Version 0.11	Jun 2 2007
+ * linux/drivers/ide/pci/serverworks.c		Version 0.20	Jun 3 2007
  *
  * Copyright (C) 1998-2000 Michel Aubry
  * Copyright (C) 1998-2000 Andrzej Krzysztofowicz
@@ -151,84 +151,11 @@
 	if(dev->device == PCI_DEVICE_ID_SERVERWORKS_OSB4 &&
 		drive->media == ide_disk && speed >= XFER_UDMA_0)
 			BUG();
-			
-	pci_read_config_byte(dev, drive_pci[drive->dn], &pio_timing);
-	pci_read_config_byte(dev, drive_pci2[drive->dn], &dma_timing);
+
 	pci_read_config_byte(dev, (0x56|hwif->channel), &ultra_timing);
 	pci_read_config_word(dev, 0x4A, &csb5_pio);
 	pci_read_config_byte(dev, 0x54, &ultra_enable);
 
-	/* If we are in RAID mode (eg AMI MegaIDE) then we can't it
-	   turns out trust the firmware configuration */
-
-	if ((dev->class >> 8) != PCI_CLASS_STORAGE_IDE)
-		goto oem_setup_failed;
-
-	/* Per Specified Design by OEM, and ASIC Architect */
-	if ((dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE) ||
-	    (dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2)) {
-		if (!drive->init_speed) {
-			u8 dma_stat = inb(hwif->dma_status);
-
-			if (((ultra_enable << (7-drive->dn) & 0x80) == 0x80) &&
-			    ((dma_stat & (1<<(5+unit))) == (1<<(5+unit)))) {
-				drive->current_speed = drive->init_speed = XFER_UDMA_0 + udma_modes[(ultra_timing >> (4*unit)) & ~(0xF0)];
-				return 0;
-			} else if ((dma_timing) &&
-				   ((dma_stat&(1<<(5+unit)))==(1<<(5+unit)))) {
-				u8 dmaspeed;
-
-				switch (dma_timing & 0x77) {
-				case 0x20:
-					dmaspeed = XFER_MW_DMA_2;
-					break;
-				case 0x21:
-					dmaspeed = XFER_MW_DMA_1;
-					break;
-				case 0x77:
-					dmaspeed = XFER_MW_DMA_0;
-					break;
-				default:
-					goto dma_pio;
-				}
-
-				drive->current_speed = drive->init_speed = dmaspeed;
-				return 0;
-			}
-dma_pio:
-			if (pio_timing) {
-				u8 piospeed;
-
-				switch (pio_timing & 0x7f) {
-				case 0x20:
-					piospeed = XFER_PIO_4;
-					break;
-				case 0x22:
-					piospeed = XFER_PIO_3;
-					break;
-				case 0x34:
-					piospeed = XFER_PIO_2;
-					break;
-				case 0x47:
-					piospeed = XFER_PIO_1;
-					break;
-				case 0x5d:
-					piospeed = XFER_PIO_0;
-					break;
-				default:
-					goto oem_setup_failed;
-				}
-
-				drive->current_speed = drive->init_speed = piospeed;
-				return 0;
-			}
-		}
-	}
-
-oem_setup_failed:
-
-	pio_timing	= 0;
-	dma_timing	= 0;
 	ultra_timing	&= ~(0x0F << (4*unit));
 	ultra_enable	&= ~(0x01 << drive->dn);
 	csb5_pio	&= ~(0x0F << (4*drive->dn));
@@ -402,9 +329,9 @@
 	return dev->irq;
 }
 
-static unsigned int __devinit ata66_svwks_svwks (ide_hwif_t *hwif)
+static u8 __devinit ata66_svwks_svwks(ide_hwif_t *hwif)
 {
-	return 1;
+	return ATA_CBL_PATA80;
 }
 
 /* On Dell PowerEdge servers with a CSB5/CSB6, the top two bits
@@ -414,7 +341,7 @@
  * Bit 14 clear = primary IDE channel does not have 80-pin cable.
  * Bit 14 set   = primary IDE channel has 80-pin cable.
  */
-static unsigned int __devinit ata66_svwks_dell (ide_hwif_t *hwif)
+static u8 __devinit ata66_svwks_dell(ide_hwif_t *hwif)
 {
 	struct pci_dev *dev = hwif->pci_dev;
 	if (dev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
@@ -422,8 +349,8 @@
 	    (dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB5IDE ||
 	     dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE))
 		return ((1 << (hwif->channel + 14)) &
-			dev->subsystem_device) ? 1 : 0;
-	return 0;
+			dev->subsystem_device) ? ATA_CBL_PATA80 : ATA_CBL_PATA40;
+	return ATA_CBL_PATA40;
 }
 
 /* Sun Cobalt Alpine hardware avoids the 80-pin cable
@@ -432,18 +359,18 @@
  *
  * WARNING: this only works on Alpine hardware!
  */
-static unsigned int __devinit ata66_svwks_cobalt (ide_hwif_t *hwif)
+static u8 __devinit ata66_svwks_cobalt(ide_hwif_t *hwif)
 {
 	struct pci_dev *dev = hwif->pci_dev;
 	if (dev->subsystem_vendor == PCI_VENDOR_ID_SUN &&
 	    dev->vendor	== PCI_VENDOR_ID_SERVERWORKS &&
 	    dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB5IDE)
 		return ((1 << (hwif->channel + 14)) &
-			dev->subsystem_device) ? 1 : 0;
-	return 0;
+			dev->subsystem_device) ? ATA_CBL_PATA80 : ATA_CBL_PATA40;
+	return ATA_CBL_PATA40;
 }
 
-static unsigned int __devinit ata66_svwks (ide_hwif_t *hwif)
+static u8 __devinit ata66_svwks(ide_hwif_t *hwif)
 {
 	struct pci_dev *dev = hwif->pci_dev;
 
@@ -462,9 +389,9 @@
 	/* Per Specified Design by OEM, and ASIC Architect */
 	if ((dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE) ||
 	    (dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2))
-		return 1;
+		return ATA_CBL_PATA80;
 
-	return 0;
+	return ATA_CBL_PATA40;
 }
 
 static void __devinit init_hwif_svwks (ide_hwif_t *hwif)
@@ -495,8 +422,8 @@
 
 	hwif->ide_dma_check = &svwks_config_drive_xfer_rate;
 	if (hwif->pci_dev->device != PCI_DEVICE_ID_SERVERWORKS_OSB4IDE) {
-		if (!hwif->udma_four)
-			hwif->udma_four = ata66_svwks(hwif);
+		if (hwif->cbl != ATA_CBL_PATA40_SHORT)
+			hwif->cbl = ata66_svwks(hwif);
 	}
 	if (!noautodma)
 		hwif->autodma = 1;
diff --git a/drivers/ide/pci/sgiioc4.c b/drivers/ide/pci/sgiioc4.c
index d3185e2..d396b29 100644
--- a/drivers/ide/pci/sgiioc4.c
+++ b/drivers/ide/pci/sgiioc4.c
@@ -316,14 +316,6 @@
 	sgiioc4_clearirq(drive);
 }
 
-static int
-sgiioc4_ide_dma_lostirq(ide_drive_t * drive)
-{
-	HWIF(drive)->resetproc(drive);
-
-	return __ide_dma_lostirq(drive);
-}
-
 static void
 sgiioc4_resetproc(ide_drive_t * drive)
 {
@@ -331,6 +323,14 @@
 	sgiioc4_clearirq(drive);
 }
 
+static void
+sgiioc4_dma_lost_irq(ide_drive_t * drive)
+{
+	sgiioc4_resetproc(drive);
+
+	ide_dma_lost_irq(drive);
+}
+
 static u8
 sgiioc4_INB(unsigned long port)
 {
@@ -607,8 +607,8 @@
 	hwif->ide_dma_test_irq = &sgiioc4_ide_dma_test_irq;
 	hwif->dma_host_on = &sgiioc4_dma_host_on;
 	hwif->dma_host_off = &sgiioc4_dma_host_off;
-	hwif->ide_dma_lostirq = &sgiioc4_ide_dma_lostirq;
-	hwif->ide_dma_timeout = &__ide_dma_timeout;
+	hwif->dma_lost_irq = &sgiioc4_dma_lost_irq;
+	hwif->dma_timeout = &ide_dma_timeout;
 
 	hwif->INB = &sgiioc4_INB;
 }
diff --git a/drivers/ide/pci/siimage.c b/drivers/ide/pci/siimage.c
index 1a4444e..1c3e354 100644
--- a/drivers/ide/pci/siimage.c
+++ b/drivers/ide/pci/siimage.c
@@ -933,16 +933,17 @@
  *	interface.
  */
 
-static unsigned int __devinit ata66_siimage(ide_hwif_t *hwif)
+static u8 __devinit ata66_siimage(ide_hwif_t *hwif)
 {
 	unsigned long addr = siimage_selreg(hwif, 0);
-	if (pci_get_drvdata(hwif->pci_dev) == NULL) {
-		u8 ata66 = 0;
-		pci_read_config_byte(hwif->pci_dev, addr, &ata66);
-		return (ata66 & 0x01) ? 1 : 0;
-	}
+	u8 ata66 = 0;
 
-	return (hwif->INB(addr) & 0x01) ? 1 : 0;
+	if (pci_get_drvdata(hwif->pci_dev) == NULL)
+		pci_read_config_byte(hwif->pci_dev, addr, &ata66);
+	else
+		ata66 = hwif->INB(addr);
+
+	return (ata66 & 0x01) ? ATA_CBL_PATA80 : ATA_CBL_PATA40;
 }
 
 /**
@@ -988,8 +989,9 @@
 		hwif->atapi_dma = 1;
 
 	hwif->ide_dma_check = &siimage_config_drive_for_dma;
-	if (!(hwif->udma_four))
-		hwif->udma_four = ata66_siimage(hwif);
+
+	if (hwif->cbl != ATA_CBL_PATA40_SHORT)
+		hwif->cbl = ata66_siimage(hwif);
 
 	if (hwif->mmio) {
 		hwif->ide_dma_test_irq = &siimage_mmio_ide_dma_test_irq;
diff --git a/drivers/ide/pci/sis5513.c b/drivers/ide/pci/sis5513.c
index ec0adad..f875183 100644
--- a/drivers/ide/pci/sis5513.c
+++ b/drivers/ide/pci/sis5513.c
@@ -1,5 +1,5 @@
 /*
- * linux/drivers/ide/pci/sis5513.c	Version 0.20	Mar 4, 2007
+ * linux/drivers/ide/pci/sis5513.c	Version 0.25	Jun 10, 2007
  *
  * Copyright (C) 1999-2000	Andre Hedrick <andre@linux-ide.org>
  * Copyright (C) 2002		Lionel Bouton <Lionel.Bouton@inet6.fr>, Maintainer
@@ -796,10 +796,33 @@
 	return 0;
 }
 
-static unsigned int __devinit ata66_sis5513 (ide_hwif_t *hwif)
+struct sis_laptop {
+	u16 device;
+	u16 subvendor;
+	u16 subdevice;
+};
+
+static const struct sis_laptop sis_laptop[] = {
+	/* devid, subvendor, subdev */
+	{ 0x5513, 0x1043, 0x1107 },	/* ASUS A6K */
+	/* end marker */
+	{ 0, }
+};
+
+static u8 __devinit ata66_sis5513(ide_hwif_t *hwif)
 {
+	struct pci_dev *pdev = hwif->pci_dev;
+	const struct sis_laptop *lap = &sis_laptop[0];
 	u8 ata66 = 0;
 
+	while (lap->device) {
+		if (lap->device == pdev->device &&
+		    lap->subvendor == pdev->subsystem_vendor &&
+		    lap->subdevice == pdev->subsystem_device)
+			return ATA_CBL_PATA40_SHORT;
+		lap++;
+	}
+
 	if (chipset_family >= ATA_133) {
 		u16 regw = 0;
 		u16 reg_addr = hwif->channel ? 0x52: 0x50;
@@ -811,7 +834,8 @@
 		pci_read_config_byte(hwif->pci_dev, 0x48, &reg48h);
 		ata66 = (reg48h & mask) ? 0 : 1;
 	}
-        return ata66;
+
+	return ata66 ? ATA_CBL_PATA80 : ATA_CBL_PATA40;
 }
 
 static void __devinit init_hwif_sis5513 (ide_hwif_t *hwif)
@@ -841,8 +865,8 @@
 	if (!chipset_family)
 		return;
 
-	if (!(hwif->udma_four))
-		hwif->udma_four = ata66_sis5513(hwif);
+	if (hwif->cbl != ATA_CBL_PATA40_SHORT)
+		hwif->cbl = ata66_sis5513(hwif);
 
 	if (chipset_family > ATA_16) {
 		hwif->ide_dma_check = &sis5513_config_xfer_rate;
diff --git a/drivers/ide/pci/sl82c105.c b/drivers/ide/pci/sl82c105.c
index 7c383d9..4878798 100644
--- a/drivers/ide/pci/sl82c105.c
+++ b/drivers/ide/pci/sl82c105.c
@@ -195,7 +195,7 @@
  * This function is called when the IDE timer expires, the drive
  * indicates that it is READY, and we were waiting for DMA to complete.
  */
-static int sl82c105_ide_dma_lostirq(ide_drive_t *drive)
+static void sl82c105_dma_lost_irq(ide_drive_t *drive)
 {
 	ide_hwif_t *hwif	= HWIF(drive);
 	struct pci_dev *dev	= hwif->pci_dev;
@@ -222,9 +222,6 @@
 	}
 
 	sl82c105_reset_host(dev);
-
-	/* __ide_dma_lostirq would return 1, so we do as well */
-	return 1;
 }
 
 /*
@@ -244,15 +241,12 @@
 	ide_dma_start(drive);
 }
 
-static int sl82c105_ide_dma_timeout(ide_drive_t *drive)
+static void sl82c105_dma_timeout(ide_drive_t *drive)
 {
-	ide_hwif_t *hwif	= HWIF(drive);
-	struct pci_dev *dev	= hwif->pci_dev;
+	DBG(("sl82c105_dma_timeout(drive:%s)\n", drive->name));
 
-	DBG(("sl82c105_ide_dma_timeout(drive:%s)\n", drive->name));
-
-	sl82c105_reset_host(dev);
-	return __ide_dma_timeout(drive);
+	sl82c105_reset_host(HWIF(drive)->pci_dev);
+	ide_dma_timeout(drive);
 }
 
 static int sl82c105_ide_dma_on(ide_drive_t *drive)
@@ -441,9 +435,9 @@
 	hwif->ide_dma_check		= &sl82c105_ide_dma_check;
 	hwif->ide_dma_on		= &sl82c105_ide_dma_on;
 	hwif->dma_off_quietly		= &sl82c105_dma_off_quietly;
-	hwif->ide_dma_lostirq		= &sl82c105_ide_dma_lostirq;
+	hwif->dma_lost_irq		= &sl82c105_dma_lost_irq;
 	hwif->dma_start			= &sl82c105_dma_start;
-	hwif->ide_dma_timeout		= &sl82c105_ide_dma_timeout;
+	hwif->dma_timeout		= &sl82c105_dma_timeout;
 
 	if (!noautodma)
 		hwif->autodma = 1;
diff --git a/drivers/ide/pci/slc90e66.c b/drivers/ide/pci/slc90e66.c
index c40f291..575dbbd 100644
--- a/drivers/ide/pci/slc90e66.c
+++ b/drivers/ide/pci/slc90e66.c
@@ -199,10 +199,9 @@
 	hwif->mwdma_mask = 0x06;
 	hwif->swdma_mask = 0x04;
 
-	if (!hwif->udma_four) {
+	if (hwif->cbl != ATA_CBL_PATA40_SHORT)
 		/* bit[0(1)]: 0:80, 1:40 */
-		hwif->udma_four = (reg47 & mask) ? 0 : 1;
-	}
+		hwif->cbl = (reg47 & mask) ? ATA_CBL_PATA40 : ATA_CBL_PATA80;
 
 	hwif->ide_dma_check = &slc90e66_config_drive_xfer_rate;
 
diff --git a/drivers/ide/pci/tc86c001.c b/drivers/ide/pci/tc86c001.c
index cee619b..8de1f8e 100644
--- a/drivers/ide/pci/tc86c001.c
+++ b/drivers/ide/pci/tc86c001.c
@@ -220,13 +220,13 @@
 	hwif->ide_dma_check	= &tc86c001_config_drive_xfer_rate;
 	hwif->dma_start 	= &tc86c001_dma_start;
 
-	if (!hwif->udma_four) {
+	if (hwif->cbl != ATA_CBL_PATA40_SHORT) {
 		/*
 		 * System Control  1 Register bit 13 (PDIAGN):
 		 * 0=80-pin cable, 1=40-pin cable
 		 */
 		scr1 = hwif->INW(sc_base + 0x00);
-		hwif->udma_four = (scr1 & 0x2000) ? 0 : 1;
+		hwif->cbl = (scr1 & 0x2000) ? ATA_CBL_PATA40 : ATA_CBL_PATA80;
 	}
 
 	if (!noautodma)
diff --git a/drivers/ide/pci/via82cxxx.c b/drivers/ide/pci/via82cxxx.c
index a508550..d21dd2e 100644
--- a/drivers/ide/pci/via82cxxx.c
+++ b/drivers/ide/pci/via82cxxx.c
@@ -1,6 +1,6 @@
 /*
  *
- * Version 3.38
+ * Version 3.45
  *
  * VIA IDE driver for Linux. Supported southbridges:
  *
@@ -9,6 +9,7 @@
  *   vt8235, vt8237, vt8237a
  *
  * Copyright (c) 2000-2002 Vojtech Pavlik
+ * Copyright (c) 2007 Bartlomiej Zolnierkiewicz
  *
  * Based on the work of:
  *	Michel Aubry
@@ -33,6 +34,8 @@
 #include <linux/pci.h>
 #include <linux/init.h>
 #include <linux/ide.h>
+#include <linux/dmi.h>
+
 #include <asm/io.h>
 
 #ifdef CONFIG_PPC_CHRP
@@ -41,8 +44,6 @@
 
 #include "ide-timing.h"
 
-#define DISPLAY_VIA_TIMINGS
-
 #define VIA_IDE_ENABLE		0x40
 #define VIA_IDE_CONFIG		0x41
 #define VIA_FIFO_CONFIG		0x43
@@ -54,18 +55,12 @@
 #define VIA_ADDRESS_SETUP	0x4c
 #define VIA_UDMA_TIMING		0x50
 
-#define VIA_UDMA		0x007
-#define VIA_UDMA_NONE		0x000
-#define VIA_UDMA_33		0x001
-#define VIA_UDMA_66		0x002
-#define VIA_UDMA_100		0x003
-#define VIA_UDMA_133		0x004
-#define VIA_BAD_PREQ		0x010	/* Crashes if PREQ# till DDACK# set */
-#define VIA_BAD_CLK66		0x020	/* 66 MHz clock doesn't work correctly */
-#define VIA_SET_FIFO		0x040	/* Needs to have FIFO split set */
-#define VIA_NO_UNMASK		0x080	/* Doesn't work with IRQ unmasking on */
-#define VIA_BAD_ID		0x100	/* Has wrong vendor ID (0x1107) */
-#define VIA_BAD_AST		0x200	/* Don't touch Address Setup Timing */
+#define VIA_BAD_PREQ		0x01 /* Crashes if PREQ# till DDACK# set */
+#define VIA_BAD_CLK66		0x02 /* 66 MHz clock doesn't work correctly */
+#define VIA_SET_FIFO		0x04 /* Needs to have FIFO split set */
+#define VIA_NO_UNMASK		0x08 /* Doesn't work with IRQ unmasking on */
+#define VIA_BAD_ID		0x10 /* Has wrong vendor ID (0x1107) */
+#define VIA_BAD_AST		0x20 /* Don't touch Address Setup Timing */
 
 /*
  * VIA SouthBridge chips.
@@ -76,36 +71,37 @@
 	u16 id;
 	u8 rev_min;
 	u8 rev_max;
-	u16 flags;
+	u8 udma_mask;
+	u8 flags;
 } via_isa_bridges[] = {
-	{ "cx700",	PCI_DEVICE_ID_VIA_CX700,    0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
-	{ "vt8237s",	PCI_DEVICE_ID_VIA_8237S,    0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
-	{ "vt6410",	PCI_DEVICE_ID_VIA_6410,     0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
-	{ "vt8251",	PCI_DEVICE_ID_VIA_8251,     0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
-	{ "vt8237",	PCI_DEVICE_ID_VIA_8237,     0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
-	{ "vt8237a",	PCI_DEVICE_ID_VIA_8237A,    0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
-	{ "vt8235",	PCI_DEVICE_ID_VIA_8235,     0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
-	{ "vt8233a",	PCI_DEVICE_ID_VIA_8233A,    0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
-	{ "vt8233c",	PCI_DEVICE_ID_VIA_8233C_0,  0x00, 0x2f, VIA_UDMA_100 },
-	{ "vt8233",	PCI_DEVICE_ID_VIA_8233_0,   0x00, 0x2f, VIA_UDMA_100 },
-	{ "vt8231",	PCI_DEVICE_ID_VIA_8231,     0x00, 0x2f, VIA_UDMA_100 },
-	{ "vt82c686b",	PCI_DEVICE_ID_VIA_82C686,   0x40, 0x4f, VIA_UDMA_100 },
-	{ "vt82c686a",	PCI_DEVICE_ID_VIA_82C686,   0x10, 0x2f, VIA_UDMA_66 },
-	{ "vt82c686",	PCI_DEVICE_ID_VIA_82C686,   0x00, 0x0f, VIA_UDMA_33 | VIA_BAD_CLK66 },
-	{ "vt82c596b",	PCI_DEVICE_ID_VIA_82C596,   0x10, 0x2f, VIA_UDMA_66 },
-	{ "vt82c596a",	PCI_DEVICE_ID_VIA_82C596,   0x00, 0x0f, VIA_UDMA_33 | VIA_BAD_CLK66 },
-	{ "vt82c586b",	PCI_DEVICE_ID_VIA_82C586_0, 0x47, 0x4f, VIA_UDMA_33 | VIA_SET_FIFO },
-	{ "vt82c586b",	PCI_DEVICE_ID_VIA_82C586_0, 0x40, 0x46, VIA_UDMA_33 | VIA_SET_FIFO | VIA_BAD_PREQ },
-	{ "vt82c586b",	PCI_DEVICE_ID_VIA_82C586_0, 0x30, 0x3f, VIA_UDMA_33 | VIA_SET_FIFO },
-	{ "vt82c586a",	PCI_DEVICE_ID_VIA_82C586_0, 0x20, 0x2f, VIA_UDMA_33 | VIA_SET_FIFO },
-	{ "vt82c586",	PCI_DEVICE_ID_VIA_82C586_0, 0x00, 0x0f, VIA_UDMA_NONE | VIA_SET_FIFO },
-	{ "vt82c576",	PCI_DEVICE_ID_VIA_82C576,   0x00, 0x2f, VIA_UDMA_NONE | VIA_SET_FIFO | VIA_NO_UNMASK },
-	{ "vt82c576",	PCI_DEVICE_ID_VIA_82C576,   0x00, 0x2f, VIA_UDMA_NONE | VIA_SET_FIFO | VIA_NO_UNMASK | VIA_BAD_ID },
+	{ "cx700",	PCI_DEVICE_ID_VIA_CX700,    0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
+	{ "vt8237s",	PCI_DEVICE_ID_VIA_8237S,    0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
+	{ "vt6410",	PCI_DEVICE_ID_VIA_6410,     0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
+	{ "vt8251",	PCI_DEVICE_ID_VIA_8251,     0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
+	{ "vt8237",	PCI_DEVICE_ID_VIA_8237,     0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
+	{ "vt8237a",	PCI_DEVICE_ID_VIA_8237A,    0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
+	{ "vt8235",	PCI_DEVICE_ID_VIA_8235,     0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
+	{ "vt8233a",	PCI_DEVICE_ID_VIA_8233A,    0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
+	{ "vt8233c",	PCI_DEVICE_ID_VIA_8233C_0,  0x00, 0x2f, ATA_UDMA5, },
+	{ "vt8233",	PCI_DEVICE_ID_VIA_8233_0,   0x00, 0x2f, ATA_UDMA5, },
+	{ "vt8231",	PCI_DEVICE_ID_VIA_8231,     0x00, 0x2f, ATA_UDMA5, },
+	{ "vt82c686b",	PCI_DEVICE_ID_VIA_82C686,   0x40, 0x4f, ATA_UDMA5, },
+	{ "vt82c686a",	PCI_DEVICE_ID_VIA_82C686,   0x10, 0x2f, ATA_UDMA4, },
+	{ "vt82c686",	PCI_DEVICE_ID_VIA_82C686,   0x00, 0x0f, ATA_UDMA2, VIA_BAD_CLK66 },
+	{ "vt82c596b",	PCI_DEVICE_ID_VIA_82C596,   0x10, 0x2f, ATA_UDMA4, },
+	{ "vt82c596a",	PCI_DEVICE_ID_VIA_82C596,   0x00, 0x0f, ATA_UDMA2, VIA_BAD_CLK66 },
+	{ "vt82c586b",	PCI_DEVICE_ID_VIA_82C586_0, 0x47, 0x4f, ATA_UDMA2, VIA_SET_FIFO },
+	{ "vt82c586b",	PCI_DEVICE_ID_VIA_82C586_0, 0x40, 0x46, ATA_UDMA2, VIA_SET_FIFO | VIA_BAD_PREQ },
+	{ "vt82c586b",	PCI_DEVICE_ID_VIA_82C586_0, 0x30, 0x3f, ATA_UDMA2, VIA_SET_FIFO },
+	{ "vt82c586a",	PCI_DEVICE_ID_VIA_82C586_0, 0x20, 0x2f, ATA_UDMA2, VIA_SET_FIFO },
+	{ "vt82c586",	PCI_DEVICE_ID_VIA_82C586_0, 0x00, 0x0f,      0x00, VIA_SET_FIFO },
+	{ "vt82c576",	PCI_DEVICE_ID_VIA_82C576,   0x00, 0x2f,      0x00, VIA_SET_FIFO | VIA_NO_UNMASK },
+	{ "vt82c576",	PCI_DEVICE_ID_VIA_82C576,   0x00, 0x2f,      0x00, VIA_SET_FIFO | VIA_NO_UNMASK | VIA_BAD_ID },
 	{ NULL }
 };
 
 static unsigned int via_clock;
-static char *via_dma[] = { "MWDMA16", "UDMA33", "UDMA66", "UDMA100", "UDMA133" };
+static char *via_dma[] = { "16", "25", "33", "44", "66", "100", "133" };
 
 struct via82cxxx_dev
 {
@@ -140,12 +136,12 @@
 	pci_write_config_byte(dev, VIA_DRIVE_TIMING + (3 - dn),
 		((FIT(timing->active, 1, 16) - 1) << 4) | (FIT(timing->recover, 1, 16) - 1));
 
-	switch (vdev->via_config->flags & VIA_UDMA) {
-		case VIA_UDMA_33:  t = timing->udma ? (0xe0 | (FIT(timing->udma, 2, 5) - 2)) : 0x03; break;
-		case VIA_UDMA_66:  t = timing->udma ? (0xe8 | (FIT(timing->udma, 2, 9) - 2)) : 0x0f; break;
-		case VIA_UDMA_100: t = timing->udma ? (0xe0 | (FIT(timing->udma, 2, 9) - 2)) : 0x07; break;
-		case VIA_UDMA_133: t = timing->udma ? (0xe0 | (FIT(timing->udma, 2, 9) - 2)) : 0x07; break;
-		default: return;
+	switch (vdev->via_config->udma_mask) {
+	case ATA_UDMA2: t = timing->udma ? (0xe0 | (FIT(timing->udma, 2, 5) - 2)) : 0x03; break;
+	case ATA_UDMA4: t = timing->udma ? (0xe8 | (FIT(timing->udma, 2, 9) - 2)) : 0x0f; break;
+	case ATA_UDMA5: t = timing->udma ? (0xe0 | (FIT(timing->udma, 2, 9) - 2)) : 0x07; break;
+	case ATA_UDMA6: t = timing->udma ? (0xe0 | (FIT(timing->udma, 2, 9) - 2)) : 0x07; break;
+	default: return;
 	}
 
 	pci_write_config_byte(dev, VIA_UDMA_TIMING + (3 - dn), t);
@@ -173,12 +169,12 @@
 
 	T = 1000000000 / via_clock;
 
-	switch (vdev->via_config->flags & VIA_UDMA) {
-		case VIA_UDMA_33:   UT = T;   break;
-		case VIA_UDMA_66:   UT = T/2; break;
-		case VIA_UDMA_100:  UT = T/3; break;
-		case VIA_UDMA_133:  UT = T/4; break;
-		default: UT = T;
+	switch (vdev->via_config->udma_mask) {
+	case ATA_UDMA2: UT = T;   break;
+	case ATA_UDMA4: UT = T/2; break;
+	case ATA_UDMA5: UT = T/3; break;
+	case ATA_UDMA6: UT = T/4; break;
+	default:	UT = T;
 	}
 
 	ide_timing_compute(drive, speed, &t, T, UT);
@@ -208,8 +204,7 @@
 static void via82cxxx_tune_drive(ide_drive_t *drive, u8 pio)
 {
 	if (pio == 255) {
-		via_set_drive(drive,
-			ide_find_best_mode(drive, XFER_PIO | XFER_EPIO));
+		via_set_drive(drive, ide_find_best_pio_mode(drive));
 		return;
 	}
 
@@ -226,16 +221,10 @@
  
 static int via82cxxx_ide_dma_check (ide_drive_t *drive)
 {
-	ide_hwif_t *hwif = HWIF(drive);
-	struct via82cxxx_dev *vdev = pci_get_drvdata(hwif->pci_dev);
-	u16 w80 = hwif->udma_four;
+	u8 speed = ide_max_dma_mode(drive);
 
-	u16 speed = ide_find_best_mode(drive,
-		XFER_PIO | XFER_EPIO | XFER_SWDMA | XFER_MWDMA |
-		(vdev->via_config->flags & VIA_UDMA ? XFER_UDMA : 0) |
-		(w80 && (vdev->via_config->flags & VIA_UDMA) >= VIA_UDMA_66 ? XFER_UDMA_66 : 0) |
-		(w80 && (vdev->via_config->flags & VIA_UDMA) >= VIA_UDMA_100 ? XFER_UDMA_100 : 0) |
-		(w80 && (vdev->via_config->flags & VIA_UDMA) >= VIA_UDMA_133 ? XFER_UDMA_133 : 0));
+	if (speed == 0)
+		speed = ide_find_best_pio_mode(drive);
 
 	via_set_drive(drive, speed);
 
@@ -272,8 +261,8 @@
 {
 	int i;
 
-	switch (vdev->via_config->flags & VIA_UDMA) {
-		case VIA_UDMA_66:
+	switch (vdev->via_config->udma_mask) {
+		case ATA_UDMA4:
 			for (i = 24; i >= 0; i -= 8)
 				if (((u >> (i & 16)) & 8) &&
 				    ((u >> i) & 0x20) &&
@@ -286,7 +275,7 @@
 				}
 			break;
 
-		case VIA_UDMA_100:
+		case ATA_UDMA5:
 			for (i = 24; i >= 0; i -= 8)
 				if (((u >> i) & 0x10) ||
 				    (((u >> i) & 0x20) &&
@@ -298,7 +287,7 @@
 				}
 			break;
 
-		case VIA_UDMA_133:
+		case ATA_UDMA6:
 			for (i = 24; i >= 0; i -= 8)
 				if (((u >> i) & 0x10) ||
 				    (((u >> i) & 0x20) &&
@@ -353,7 +342,7 @@
 
 	via_cable_detect(vdev, u);
 
-	if ((via_config->flags & VIA_UDMA) == VIA_UDMA_66) {
+	if (via_config->udma_mask == ATA_UDMA4) {
 		/* Enable Clk66 */
 		pci_write_config_dword(dev, VIA_UDMA_TIMING, u|0x80008);
 	} else if (via_config->flags & VIA_BAD_CLK66) {
@@ -416,16 +405,54 @@
 	 */
 
 	pci_read_config_byte(isa, PCI_REVISION_ID, &t);
-	printk(KERN_INFO "VP_IDE: VIA %s (rev %02x) IDE %s "
+	printk(KERN_INFO "VP_IDE: VIA %s (rev %02x) IDE %sDMA%s "
 		"controller on pci%s\n",
 		via_config->name, t,
-		via_dma[via_config->flags & VIA_UDMA],
+		via_config->udma_mask ? "U" : "MW",
+		via_dma[via_config->udma_mask ?
+			(fls(via_config->udma_mask) - 1) : 0],
 		pci_name(dev));
 
 	pci_dev_put(isa);
 	return 0;
 }
 
+/*
+ *	Cable special cases
+ */
+
+static struct dmi_system_id cable_dmi_table[] = {
+	{
+		.ident = "Acer Ferrari 3400",
+		.matches = {
+			DMI_MATCH(DMI_BOARD_VENDOR, "Acer,Inc."),
+			DMI_MATCH(DMI_BOARD_NAME, "Ferrari 3400"),
+		},
+	},
+	{ }
+};
+
+static int via_cable_override(void)
+{
+	/* Systems by DMI */
+	if (dmi_check_system(cable_dmi_table))
+		return 1;
+	return 0;
+}
+
+static u8 __devinit via82cxxx_cable_detect(ide_hwif_t *hwif)
+{
+	struct via82cxxx_dev *vdev = pci_get_drvdata(hwif->pci_dev);
+
+	if (via_cable_override())
+		return ATA_CBL_PATA40_SHORT;
+
+	if ((vdev->via_80w >> hwif->channel) & 1)
+		return ATA_CBL_PATA80;
+	else
+		return ATA_CBL_PATA40;
+}
+
 static void __devinit init_hwif_via82cxxx(ide_hwif_t *hwif)
 {
 	struct via82cxxx_dev *vdev = pci_get_drvdata(hwif->pci_dev);
@@ -454,12 +481,14 @@
 		return;
 
 	hwif->atapi_dma = 1;
-	hwif->ultra_mask = 0x7f;
+
+	hwif->ultra_mask = vdev->via_config->udma_mask;
 	hwif->mwdma_mask = 0x07;
 	hwif->swdma_mask = 0x07;
 
-	if (!hwif->udma_four)
-		hwif->udma_four = (vdev->via_80w >> hwif->channel) & 1;
+	if (hwif->cbl != ATA_CBL_PATA40_SHORT)
+		hwif->cbl = via82cxxx_cable_detect(hwif);
+
 	hwif->ide_dma_check = &via82cxxx_ide_dma_check;
 	if (!noautodma)
 		hwif->autodma = 1;
diff --git a/drivers/ide/ppc/pmac.c b/drivers/ide/ppc/pmac.c
index 45fc36f..e46f4720 100644
--- a/drivers/ide/ppc/pmac.c
+++ b/drivers/ide/ppc/pmac.c
@@ -942,8 +942,8 @@
 				return 1;
 		case XFER_UDMA_4:
 		case XFER_UDMA_3:
-			if (HWIF(drive)->udma_four == 0)
-				return 1;		
+			if (drive->hwif->cbl != ATA_CBL_PATA80)
+				return 1;
 		case XFER_UDMA_2:
 		case XFER_UDMA_1:
 		case XFER_UDMA_0:
@@ -1244,7 +1244,7 @@
 	hwif->chipset = ide_pmac;
 	hwif->noprobe = !hwif->io_ports[IDE_DATA_OFFSET] || pmif->mediabay;
 	hwif->hold = pmif->mediabay;
-	hwif->udma_four = pmif->cable_80;
+	hwif->cbl = pmif->cable_80 ? ATA_CBL_PATA80 : ATA_CBL_PATA40;
 	hwif->drives[0].unmask = 1;
 	hwif->drives[1].unmask = 1;
 	hwif->tuneproc = pmac_ide_tuneproc;
@@ -1821,28 +1821,11 @@
 		enable = 0;
 
 	if (enable) {
-		short mode;
-		
-		map = XFER_MWDMA;
-		if (pmif->kind == controller_kl_ata4
-		    || pmif->kind == controller_un_ata6
-		    || pmif->kind == controller_k2_ata6
-		    || pmif->kind == controller_sh_ata6) {
-			map |= XFER_UDMA;
-			if (pmif->cable_80) {
-				map |= XFER_UDMA_66;
-				if (pmif->kind == controller_un_ata6 ||
-				    pmif->kind == controller_k2_ata6 ||
-				    pmif->kind == controller_sh_ata6)
-					map |= XFER_UDMA_100;
-				if (pmif->kind == controller_sh_ata6)
-					map |= XFER_UDMA_133;
-			}
-		}
-		mode = ide_find_best_mode(drive, map);
-		if (mode & XFER_UDMA)
+		u8 mode = ide_max_dma_mode(drive);
+
+		if (mode >= XFER_UDMA_0)
 			drive->using_dma = pmac_ide_udma_enable(drive, mode);
-		else if (mode & XFER_MWDMA)
+		else if (mode >= XFER_MW_DMA_0)
 			drive->using_dma = pmac_ide_mdma_enable(drive, mode);
 		hwif->OUTB(0, IDE_CONTROL_REG);
 		/* Apply settings to controller */
@@ -2004,20 +1987,19 @@
 {
 }
 
-static int
-pmac_ide_dma_lostirq (ide_drive_t *drive)
+static void
+pmac_ide_dma_lost_irq (ide_drive_t *drive)
 {
 	pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data;
 	volatile struct dbdma_regs __iomem *dma;
 	unsigned long status;
 
 	if (pmif == NULL)
-		return 0;
+		return;
 	dma = pmif->dma_regs;
 
 	status = readl(&dma->status);
 	printk(KERN_ERR "ide-pmac lost interrupt, dma status: %lx\n", status);
-	return 0;
 }
 
 /*
@@ -2057,8 +2039,8 @@
 	hwif->ide_dma_test_irq = &pmac_ide_dma_test_irq;
 	hwif->dma_host_off = &pmac_ide_dma_host_off;
 	hwif->dma_host_on = &pmac_ide_dma_host_on;
-	hwif->ide_dma_timeout = &__ide_dma_timeout;
-	hwif->ide_dma_lostirq = &pmac_ide_dma_lostirq;
+	hwif->dma_timeout = &ide_dma_timeout;
+	hwif->dma_lost_irq = &pmac_ide_dma_lost_irq;
 
 	hwif->atapi_dma = 1;
 	switch(pmif->kind) {
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 616eee9..bd601ef 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -34,6 +34,11 @@
 	  If you choose to build module, its name will be phantom. If unsure,
 	  say N here.
 
+config EEPROM_93CX6
+	tristate "EEPROM 93CX6 support"
+	---help---
+	  This is a driver for the EEPROM chipsets 93c46 and 93c66.
+	  The driver supports both read as well as write commands.
 
 	  If unsure, say N.
 
@@ -187,5 +192,4 @@
 
 	  If you are not sure, say Y here.
 
-
 endmenu
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 8abbf2f..b5ce0e3 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -14,3 +14,4 @@
 obj-$(CONFIG_SGI_IOC4)		+= ioc4.o
 obj-$(CONFIG_SONY_LAPTOP)	+= sony-laptop.o
 obj-$(CONFIG_THINKPAD_ACPI)	+= thinkpad_acpi.o
+obj-$(CONFIG_EEPROM_93CX6)	+= eeprom_93cx6.o
diff --git a/drivers/misc/eeprom_93cx6.c b/drivers/misc/eeprom_93cx6.c
new file mode 100644
index 0000000..ac515b0
--- /dev/null
+++ b/drivers/misc/eeprom_93cx6.c
@@ -0,0 +1,241 @@
+/*
+	Copyright (C) 2004 - 2006 rt2x00 SourceForge Project
+	<http://rt2x00.serialmonkey.com>
+
+	This program is free software; you can redistribute it and/or modify
+	it under the terms of the GNU General Public License as published by
+	the Free Software Foundation; either version 2 of the License, or
+	(at your option) any later version.
+
+	This program is distributed in the hope that it will be useful,
+	but WITHOUT ANY WARRANTY; without even the implied warranty of
+	MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+	GNU General Public License for more details.
+
+	You should have received a copy of the GNU General Public License
+	along with this program; if not, write to the
+	Free Software Foundation, Inc.,
+	59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+/*
+	Module: eeprom_93cx6
+	Abstract: EEPROM reader routines for 93cx6 chipsets.
+	Supported chipsets: 93c46 & 93c66.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/delay.h>
+#include <linux/eeprom_93cx6.h>
+
+MODULE_AUTHOR("http://rt2x00.serialmonkey.com");
+MODULE_VERSION("1.0");
+MODULE_DESCRIPTION("EEPROM 93cx6 chip driver");
+MODULE_LICENSE("GPL");
+
+static inline void eeprom_93cx6_pulse_high(struct eeprom_93cx6 *eeprom)
+{
+	eeprom->reg_data_clock = 1;
+	eeprom->register_write(eeprom);
+
+	/*
+	 * Add a short delay for the pulse to work.
+	 * According to the specifications the "maximum minimum"
+	 * time should be 450ns.
+	 */
+	ndelay(450);
+}
+
+static inline void eeprom_93cx6_pulse_low(struct eeprom_93cx6 *eeprom)
+{
+	eeprom->reg_data_clock = 0;
+	eeprom->register_write(eeprom);
+
+	/*
+	 * Add a short delay for the pulse to work.
+	 * According to the specifications the minimal time
+	 * should be 450ns so a 1us delay is sufficient.
+	 */
+	udelay(1);
+}
+
+static void eeprom_93cx6_startup(struct eeprom_93cx6 *eeprom)
+{
+	/*
+	 * Clear all flags, and enable chip select.
+	 */
+	eeprom->register_read(eeprom);
+	eeprom->reg_data_in = 0;
+	eeprom->reg_data_out = 0;
+	eeprom->reg_data_clock = 0;
+	eeprom->reg_chip_select = 1;
+	eeprom->register_write(eeprom);
+
+	/*
+	 * kick a pulse.
+	 */
+	eeprom_93cx6_pulse_high(eeprom);
+	eeprom_93cx6_pulse_low(eeprom);
+}
+
+static void eeprom_93cx6_cleanup(struct eeprom_93cx6 *eeprom)
+{
+	/*
+	 * Clear chip_select and data_in flags.
+	 */
+	eeprom->register_read(eeprom);
+	eeprom->reg_data_in = 0;
+	eeprom->reg_chip_select = 0;
+	eeprom->register_write(eeprom);
+
+	/*
+	 * kick a pulse.
+	 */
+	eeprom_93cx6_pulse_high(eeprom);
+	eeprom_93cx6_pulse_low(eeprom);
+}
+
+static void eeprom_93cx6_write_bits(struct eeprom_93cx6 *eeprom,
+	const u16 data, const u16 count)
+{
+	unsigned int i;
+
+	eeprom->register_read(eeprom);
+
+	/*
+	 * Clear data flags.
+	 */
+	eeprom->reg_data_in = 0;
+	eeprom->reg_data_out = 0;
+
+	/*
+	 * Start writing all bits.
+	 */
+	for (i = count; i > 0; i--) {
+		/*
+		 * Check if this bit needs to be set.
+		 */
+		eeprom->reg_data_in = !!(data & (1 << (i - 1)));
+
+		/*
+		 * Write the bit to the eeprom register.
+		 */
+		eeprom->register_write(eeprom);
+
+		/*
+		 * Kick a pulse.
+		 */
+		eeprom_93cx6_pulse_high(eeprom);
+		eeprom_93cx6_pulse_low(eeprom);
+	}
+
+	eeprom->reg_data_in = 0;
+	eeprom->register_write(eeprom);
+}
+
+static void eeprom_93cx6_read_bits(struct eeprom_93cx6 *eeprom,
+	u16 *data, const u16 count)
+{
+	unsigned int i;
+	u16 buf = 0;
+
+	eeprom->register_read(eeprom);
+
+	/*
+	 * Clear data flags.
+	 */
+	eeprom->reg_data_in = 0;
+	eeprom->reg_data_out = 0;
+
+	/*
+	 * Start reading all bits.
+	 */
+	for (i = count; i > 0; i--) {
+		eeprom_93cx6_pulse_high(eeprom);
+
+		eeprom->register_read(eeprom);
+
+		/*
+		 * Clear data_in flag.
+		 */
+		eeprom->reg_data_in = 0;
+
+		/*
+		 * Read if the bit has been set.
+		 */
+		if (eeprom->reg_data_out)
+			buf |= (1 << (i - 1));
+
+		eeprom_93cx6_pulse_low(eeprom);
+	}
+
+	*data = buf;
+}
+
+/**
+ * eeprom_93cx6_read - Read multiple words from eeprom
+ * @eeprom: Pointer to eeprom structure
+ * @word: Word index from where we should start reading
+ * @data: target pointer where the information will have to be stored
+ *
+ * This function will read the eeprom data as host-endian word
+ * into the given data pointer.
+ */
+void eeprom_93cx6_read(struct eeprom_93cx6 *eeprom, const u8 word,
+	u16 *data)
+{
+	u16 command;
+
+	/*
+	 * Initialize the eeprom register
+	 */
+	eeprom_93cx6_startup(eeprom);
+
+	/*
+	 * Select the read opcode and the word to be read.
+	 */
+	command = (PCI_EEPROM_READ_OPCODE << eeprom->width) | word;
+	eeprom_93cx6_write_bits(eeprom, command,
+		PCI_EEPROM_WIDTH_OPCODE + eeprom->width);
+
+	/*
+	 * Read the requested 16 bits.
+	 */
+	eeprom_93cx6_read_bits(eeprom, data, 16);
+
+	/*
+	 * Cleanup eeprom register.
+	 */
+	eeprom_93cx6_cleanup(eeprom);
+}
+EXPORT_SYMBOL_GPL(eeprom_93cx6_read);
+
+/**
+ * eeprom_93cx6_multiread - Read multiple words from eeprom
+ * @eeprom: Pointer to eeprom structure
+ * @word: Word index from where we should start reading
+ * @data: target pointer where the information will have to be stored
+ * @words: Number of words that should be read.
+ *
+ * This function will read all requested words from the eeprom,
+ * this is done by calling eeprom_93cx6_read() multiple times.
+ * But with the additional change that while the eeprom_93cx6_read
+ * will return host ordered bytes, this method will return little
+ * endian words.
+ */
+void eeprom_93cx6_multiread(struct eeprom_93cx6 *eeprom, const u8 word,
+	__le16 *data, const u16 words)
+{
+	unsigned int i;
+	u16 tmp;
+
+	for (i = 0; i < words; i++) {
+		tmp = 0;
+		eeprom_93cx6_read(eeprom, word + i, &tmp);
+		data[i] = cpu_to_le16(tmp);
+	}
+}
+EXPORT_SYMBOL_GPL(eeprom_93cx6_multiread);
+
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
index a804965..58bbc3e 100644
--- a/drivers/net/8139cp.c
+++ b/drivers/net/8139cp.c
@@ -107,11 +107,6 @@
 
 #define PFX			DRV_NAME ": "
 
-#ifndef TRUE
-#define FALSE 0
-#define TRUE (!FALSE)
-#endif
-
 #define CP_DEF_MSG_ENABLE	(NETIF_MSG_DRV		| \
 				 NETIF_MSG_PROBE 	| \
 				 NETIF_MSG_LINK)
@@ -661,7 +656,7 @@
 	if (status & (TxOK | TxErr | TxEmpty | SWInt))
 		cp_tx(cp);
 	if (status & LinkChg)
-		mii_check_media(&cp->mii_if, netif_msg_link(cp), FALSE);
+		mii_check_media(&cp->mii_if, netif_msg_link(cp), false);
 
 	spin_unlock(&cp->lock);
 
@@ -1188,7 +1183,7 @@
 		goto err_out_hw;
 
 	netif_carrier_off(dev);
-	mii_check_media(&cp->mii_if, netif_msg_link(cp), TRUE);
+	mii_check_media(&cp->mii_if, netif_msg_link(cp), true);
 	netif_start_queue(dev);
 
 	return 0;
@@ -2050,7 +2045,7 @@
 
 	spin_lock_irqsave (&cp->lock, flags);
 
-	mii_check_media(&cp->mii_if, netif_msg_link(cp), FALSE);
+	mii_check_media(&cp->mii_if, netif_msg_link(cp), false);
 
 	spin_unlock_irqrestore (&cp->lock, flags);
 
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index b49375a..5cc3d51 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -3,10 +3,7 @@
 # Network device configuration
 #
 
-menu "Network device support"
-	depends on NET
-
-config NETDEVICES
+menuconfig NETDEVICES
 	default y if UML
 	bool "Network device support"
 	---help---
@@ -151,11 +148,9 @@
 #	Ethernet
 #
 
-menu "Ethernet (10 or 100Mbit)"
-	depends on !UML
-
-config NET_ETHERNET
+menuconfig NET_ETHERNET
 	bool "Ethernet (10 or 100Mbit)"
+	depends on !UML
 	---help---
 	  Ethernet (also called IEEE 802.3 or ISO 8802-2) is the most common
 	  type of Local Area Network (LAN) in universities and companies.
@@ -180,9 +175,10 @@
 	  kernel: saying N will just cause the configurator to skip all
 	  the questions about Ethernet network cards. If unsure, say N.
 
+if NET_ETHERNET
+
 config MII
 	tristate "Generic Media Independent Interface device support"
-	depends on NET_ETHERNET
 	help
 	  Most ethernet controllers have MII transceiver either as an external
 	  or internal device.  It is safe to say Y or M here even if your
@@ -190,7 +186,7 @@
 
 config MACB
 	tristate "Atmel MACB support"
-	depends on NET_ETHERNET && (AVR32 || ARCH_AT91SAM9260 || ARCH_AT91SAM9263)
+	depends on AVR32 || ARCH_AT91SAM9260 || ARCH_AT91SAM9263
 	select MII
 	help
 	  The Atmel MACB ethernet interface is found on many AT32 and AT91
@@ -203,7 +199,7 @@
 
 config MACE
 	tristate "MACE (Power Mac ethernet) support"
-	depends on NET_ETHERNET && PPC_PMAC && PPC32
+	depends on PPC_PMAC && PPC32
 	select CRC32
 	help
 	  Power Macintoshes and clones with Ethernet built-in on the
@@ -226,7 +222,7 @@
 
 config BMAC
 	tristate "BMAC (G3 ethernet) support"
-	depends on NET_ETHERNET && PPC_PMAC && PPC32
+	depends on PPC_PMAC && PPC32
 	select CRC32
 	help
 	  Say Y for support of BMAC Ethernet interfaces. These are used on G3
@@ -237,7 +233,7 @@
 
 config ARIADNE
 	tristate "Ariadne support"
-	depends on NET_ETHERNET && ZORRO
+	depends on ZORRO
 	help
 	  If you have a Village Tronic Ariadne Ethernet adapter, say Y.
 	  Otherwise, say N.
@@ -247,7 +243,7 @@
 
 config A2065
 	tristate "A2065 support"
-	depends on NET_ETHERNET && ZORRO
+	depends on ZORRO
 	select CRC32
 	help
 	  If you have a Commodore A2065 Ethernet adapter, say Y. Otherwise,
@@ -258,7 +254,7 @@
 
 config HYDRA
 	tristate "Hydra support"
-	depends on NET_ETHERNET && ZORRO
+	depends on ZORRO
 	select CRC32
 	help
 	  If you have a Hydra Ethernet adapter, say Y. Otherwise, say N.
@@ -268,7 +264,7 @@
 
 config ZORRO8390
 	tristate "Zorro NS8390-based Ethernet support"
-	depends on NET_ETHERNET && ZORRO
+	depends on ZORRO
 	select CRC32
 	help
 	  This driver is for Zorro Ethernet cards using an NS8390-compatible
@@ -281,7 +277,7 @@
 
 config APNE
 	tristate "PCMCIA NE2000 support"
-	depends on NET_ETHERNET && AMIGA_PCMCIA
+	depends on AMIGA_PCMCIA
 	select CRC32
 	help
 	  If you have a PCMCIA NE2000 compatible adapter, say Y.  Otherwise,
@@ -292,7 +288,7 @@
 
 config APOLLO_ELPLUS
 	tristate "Apollo 3c505 support"
-	depends on NET_ETHERNET && APOLLO
+	depends on APOLLO
 	help
 	  Say Y or M here if your Apollo has a 3Com 3c505 ISA Ethernet card.
 	  If you don't have one made for Apollos, you can use one from a PC,
@@ -301,7 +297,7 @@
 
 config MAC8390
 	bool "Macintosh NS 8390 based ethernet cards"
-	depends on NET_ETHERNET && MAC
+	depends on MAC
 	select CRC32
 	help
 	  If you want to include a driver to support Nubus or LC-PDS
@@ -311,7 +307,7 @@
 
 config MAC89x0
 	tristate "Macintosh CS89x0 based ethernet cards"
-	depends on NET_ETHERNET && MAC
+	depends on MAC
 	---help---
 	  Support for CS89x0 chipset based Ethernet cards.  If you have a
 	  Nubus or LC-PDS network (Ethernet) card of this type, say Y and
@@ -324,7 +320,7 @@
 
 config MACSONIC
 	tristate "Macintosh SONIC based ethernet (onboard, NuBus, LC, CS)"
-	depends on NET_ETHERNET && MAC
+	depends on MAC
 	---help---
 	  Support for NatSemi SONIC based Ethernet devices.  This includes
 	  the onboard Ethernet in many Quadras as well as some LC-PDS,
@@ -338,7 +334,7 @@
 
 config MACMACE
 	bool "Macintosh (AV) onboard MACE ethernet"
-	depends on NET_ETHERNET && MAC
+	depends on MAC
 	select CRC32
 	help
 	  Support for the onboard AMD 79C940 MACE Ethernet controller used in
@@ -348,7 +344,7 @@
 
 config MVME147_NET
 	tristate "MVME147 (Lance) Ethernet support"
-	depends on NET_ETHERNET && MVME147
+	depends on MVME147
 	select CRC32
 	help
 	  Support for the on-board Ethernet interface on the Motorola MVME147
@@ -358,7 +354,7 @@
 
 config MVME16x_NET
 	tristate "MVME16x Ethernet support"
-	depends on NET_ETHERNET && MVME16x
+	depends on MVME16x
 	help
 	  This is the driver for the Ethernet interface on the Motorola
 	  MVME162, 166, 167, 172 and 177 boards.  Say Y here to include the
@@ -367,7 +363,7 @@
 
 config BVME6000_NET
 	tristate "BVME6000 Ethernet support"
-	depends on NET_ETHERNET && BVME6000
+	depends on BVME6000
 	help
 	  This is the driver for the Ethernet interface on BVME4000 and
 	  BVME6000 VME boards.  Say Y here to include the driver for this chip
@@ -376,7 +372,7 @@
 
 config ATARILANCE
 	tristate "Atari Lance support"
-	depends on NET_ETHERNET && ATARI
+	depends on ATARI
 	help
 	  Say Y to include support for several Atari Ethernet adapters based
 	  on the AMD Lance chipset: RieblCard (with or without battery), or
@@ -384,7 +380,7 @@
 
 config ATARI_BIONET
 	tristate "BioNet-100 support"
-	depends on NET_ETHERNET && ATARI && ATARI_ACSI && BROKEN
+	depends on ATARI && ATARI_ACSI && BROKEN
 	help
 	  Say Y to include support for BioData's BioNet-100 Ethernet adapter
 	  for the ACSI port. The driver works (has to work...) with a polled
@@ -392,7 +388,7 @@
 
 config ATARI_PAMSNET
 	tristate "PAMsNet support"
-	depends on NET_ETHERNET && ATARI && ATARI_ACSI && BROKEN
+	depends on ATARI && ATARI_ACSI && BROKEN
 	help
 	  Say Y to include support for the PAMsNet Ethernet adapter for the
 	  ACSI port ("ACSI node"). The driver works (has to work...) with a
@@ -400,7 +396,7 @@
 
 config SUN3LANCE
 	tristate "Sun3/Sun3x on-board LANCE support"
-	depends on NET_ETHERNET && (SUN3 || SUN3X)
+	depends on SUN3 || SUN3X
 	help
 	  Most Sun3 and Sun3x motherboards (including the 3/50, 3/60 and 3/80)
 	  featured an AMD Lance 10Mbit Ethernet controller on board; say Y
@@ -413,7 +409,7 @@
 
 config SUN3_82586
 	bool "Sun3 on-board Intel 82586 support"
-	depends on NET_ETHERNET && SUN3
+	depends on SUN3
 	help
 	  This driver enables support for the on-board Intel 82586 based
 	  Ethernet adapter found on Sun 3/1xx and 3/2xx motherboards.  Note
@@ -422,7 +418,7 @@
 
 config HPLANCE
 	bool "HP on-board LANCE support"
-	depends on NET_ETHERNET && DIO
+	depends on DIO
 	select CRC32
 	help
 	  If you want to use the builtin "LANCE" Ethernet controller on an
@@ -430,21 +426,28 @@
 
 config LASI_82596
 	tristate "Lasi ethernet"
-	depends on NET_ETHERNET && GSC
+	depends on GSC
 	help
 	  Say Y here to support the builtin Intel 82596 ethernet controller
 	  found in Hewlett-Packard PA-RISC machines with 10Mbit ethernet.
 
+config SNI_82596
+	tristate "SNI RM ethernet"
+	depends on NET_ETHERNET && SNI_RM
+	help
+	  Say Y here to support the on-board Intel 82596 ethernet controller
+	  built into SNI RM machines.
+
 config MIPS_JAZZ_SONIC
 	tristate "MIPS JAZZ onboard SONIC Ethernet support"
-	depends on NET_ETHERNET && MACH_JAZZ
+	depends on MACH_JAZZ
 	help
 	  This is the driver for the onboard card of MIPS Magnum 4000,
 	  Acer PICA, Olivetti M700-10 and a few other identical OEM systems.
 
 config MIPS_AU1X00_ENET
 	bool "MIPS AU1000 Ethernet support"
-	depends on NET_ETHERNET && SOC_AU1X00
+	depends on SOC_AU1X00
 	select PHYLIB
 	select CRC32
 	help
@@ -453,11 +456,11 @@
 
 config NET_SB1250_MAC
 	tristate "SB1250 Ethernet support"
-	depends on NET_ETHERNET && SIBYTE_SB1xxx_SOC
+	depends on SIBYTE_SB1xxx_SOC
 
 config SGI_IOC3_ETH
 	bool "SGI IOC3 Ethernet"
-	depends on NET_ETHERNET && PCI && SGI_IP27
+	depends on PCI && SGI_IP27
 	select CRC32
 	select MII
 	help
@@ -487,7 +490,7 @@
 
 config MIPS_SIM_NET
 	tristate "MIPS simulator Network device"
-	depends on NET_ETHERNET && MIPS_SIM
+	depends on MIPS_SIM
 	help
 	  The MIPSNET device is a simple Ethernet network device which is
 	  emulated by the MIPS Simulator.
@@ -495,11 +498,11 @@
 
 config SGI_O2MACE_ETH
 	tristate "SGI O2 MACE Fast Ethernet support"
-	depends on NET_ETHERNET && SGI_IP32=y
+	depends on SGI_IP32=y
 
 config STNIC
 	tristate "National DP83902AV  support"
-	depends on NET_ETHERNET && SUPERH
+	depends on SUPERH
 	select CRC32
 	help
 	  Support for cards based on the National Semiconductor DP83902AV
@@ -511,7 +514,7 @@
 
 config SUNLANCE
 	tristate "Sun LANCE support"
-	depends on NET_ETHERNET && SBUS
+	depends on SBUS
 	select CRC32
 	help
 	  This driver supports the "le" interface present on all 32-bit Sparc
@@ -524,7 +527,7 @@
 
 config HAPPYMEAL
 	tristate "Sun Happy Meal 10/100baseT support"
-	depends on NET_ETHERNET && (SBUS || PCI)
+	depends on SBUS || PCI
 	select CRC32
 	help
 	  This driver supports the "hme" interface present on most Ultra
@@ -537,7 +540,7 @@
 
 config SUNBMAC
 	tristate "Sun BigMAC 10/100baseT support (EXPERIMENTAL)"
-	depends on NET_ETHERNET && SBUS && EXPERIMENTAL
+	depends on SBUS && EXPERIMENTAL
 	select CRC32
 	help
 	  This driver supports the "be" interface available as an Sbus option.
@@ -548,7 +551,7 @@
 
 config SUNQE
 	tristate "Sun QuadEthernet support"
-	depends on NET_ETHERNET && SBUS
+	depends on SBUS
 	select CRC32
 	help
 	  This driver supports the "qe" 10baseT Ethernet device, available as
@@ -560,7 +563,7 @@
 
 config SUNGEM
 	tristate "Sun GEM support"
-	depends on NET_ETHERNET && PCI
+	depends on PCI
 	select CRC32
 	help
 	  Support for the Sun GEM chip, aka Sun GigabitEthernet/P 2.0.  See also
@@ -568,7 +571,7 @@
 
 config CASSINI
 	tristate "Sun Cassini support"
-	depends on NET_ETHERNET && PCI
+	depends on PCI
 	select CRC32
 	help
 	  Support for the Sun Cassini chip, aka Sun GigaSwift Ethernet. See also
@@ -576,7 +579,7 @@
 
 config NET_VENDOR_3COM
 	bool "3COM cards"
-	depends on NET_ETHERNET && (ISA || EISA || MCA || PCI)
+	depends on ISA || EISA || MCA || PCI
 	help
 	  If you have a network (Ethernet) card belonging to this class, say Y
 	  and read the Ethernet-HOWTO, available from
@@ -736,7 +739,7 @@
 
 config LANCE
 	tristate "AMD LANCE and PCnet (AT1500 and NE2100) support"
-	depends on NET_ETHERNET && ISA && ISA_DMA_API
+	depends on ISA && ISA_DMA_API
 	help
 	  If you have a network (Ethernet) card of this type, say Y and read
 	  the Ethernet-HOWTO, available from
@@ -748,7 +751,7 @@
 
 config NET_VENDOR_SMC
 	bool "Western Digital/SMC cards"
-	depends on NET_ETHERNET && (ISA || MCA || EISA || MAC)
+	depends on ISA || MCA || EISA || MAC
 	help
 	  If you have a network (Ethernet) card belonging to this class, say Y
 	  and read the Ethernet-HOWTO, available from
@@ -818,24 +821,6 @@
 	  <file:Documentation/networking/net-modules.txt>. The module
 	  will be called smc-ultra32.
 
-config SMC91X
-	tristate "SMC 91C9x/91C1xxx support"
-	select CRC32
-	select MII
-	depends on NET_ETHERNET && (ARM || REDWOOD_5 || REDWOOD_6 || M32R || SUPERH || SOC_AU1X00 || BFIN)
-	help
-	  This is a driver for SMC's 91x series of Ethernet chipsets,
-	  including the SMC91C94 and the SMC91C111. Say Y if you want it
-	  compiled into the kernel, and read the file
-	  <file:Documentation/networking/smc9.txt>  and the Ethernet-HOWTO,
-	  available from  <http://www.linuxdoc.org/docs.html#howto>.
-
-	  This driver is also available as a module ( = code which can be
-	  inserted in and removed from the running kernel whenever you want).
-	  The module will be called smc91x.  If you want to compile it as a
-	  module, say M here and read <file:Documentation/kbuild/modules.txt>
-	  as well as <file:Documentation/networking/net-modules.txt>.
-
 config SMC9194
 	tristate "SMC 9194 support"
 	depends on NET_VENDOR_SMC && (ISA || MAC && BROKEN)
@@ -852,10 +837,28 @@
 	  <file:Documentation/networking/net-modules.txt>. The module
 	  will be called smc9194.
 
+config SMC91X
+	tristate "SMC 91C9x/91C1xxx support"
+	select CRC32
+	select MII
+	depends on ARM || REDWOOD_5 || REDWOOD_6 || M32R || SUPERH || SOC_AU1X00 || BFIN
+	help
+	  This is a driver for SMC's 91x series of Ethernet chipsets,
+	  including the SMC91C94 and the SMC91C111. Say Y if you want it
+	  compiled into the kernel, and read the file
+	  <file:Documentation/networking/smc9.txt>  and the Ethernet-HOWTO,
+	  available from  <http://www.linuxdoc.org/docs.html#howto>.
+
+	  This driver is also available as a module ( = code which can be
+	  inserted in and removed from the running kernel whenever you want).
+	  The module will be called smc91x.  If you want to compile it as a
+	  module, say M here and read <file:Documentation/kbuild/modules.txt>
+	  as well as <file:Documentation/networking/net-modules.txt>.
+
 config NET_NETX
 	tristate "NetX Ethernet support"
 	select MII
-	depends on NET_ETHERNET && ARCH_NETX
+	depends on ARCH_NETX
 	help
 	  This is support for the Hilscher netX builtin Ethernet ports
 
@@ -865,7 +868,7 @@
 
 config DM9000
 	tristate "DM9000 support"
-	depends on (ARM || MIPS) && NET_ETHERNET
+	depends on ARM || MIPS
 	select CRC32
 	select MII
 	---help---
@@ -879,7 +882,7 @@
 	tristate "SMSC LAN911[5678] support"
 	select CRC32
 	select MII
-	depends on NET_ETHERNET && ARCH_PXA
+	depends on ARCH_PXA
 	help
 	  This is a driver for SMSC's LAN911x series of Ethernet chipsets
 	  including the new LAN9115, LAN9116, LAN9117, and LAN9118.
@@ -893,7 +896,7 @@
 
 config NET_VENDOR_RACAL
 	bool "Racal-Interlan (Micom) NI cards"
-	depends on NET_ETHERNET && ISA
+	depends on ISA
 	help
 	  If you have a network (Ethernet) card belonging to this class, such
 	  as the NI5010, NI5210 or NI6210, say Y and read the Ethernet-HOWTO,
@@ -945,7 +948,7 @@
 
 config AT1700
 	tristate "AT1700/1720 support (EXPERIMENTAL)"
-	depends on NET_ETHERNET && (ISA || MCA_LEGACY) && EXPERIMENTAL
+	depends on (ISA || MCA_LEGACY) && EXPERIMENTAL
 	select CRC32
 	---help---
 	  If you have a network (Ethernet) card of this type, say Y and read
@@ -958,7 +961,7 @@
 
 config DEPCA
 	tristate "DEPCA, DE10x, DE200, DE201, DE202, DE422 support"
-	depends on NET_ETHERNET && (ISA || EISA || MCA)
+	depends on ISA || EISA || MCA
 	select CRC32
 	---help---
 	  If you have a network (Ethernet) card of this type, say Y and read
@@ -972,7 +975,7 @@
 
 config HP100
 	tristate "HP 10/100VG PCLAN (ISA, EISA, PCI) support"
-	depends on NET_ETHERNET && (ISA || EISA || PCI)
+	depends on ISA || EISA || PCI
 	help
 	  If you have a network (Ethernet) card of this type, say Y and read
 	  the Ethernet-HOWTO, available from
@@ -984,7 +987,7 @@
 
 config NET_ISA
 	bool "Other ISA cards"
-	depends on NET_ETHERNET && ISA
+	depends on ISA
 	---help---
 	  If your network (Ethernet) card hasn't been mentioned yet and its
 	  bus system (that's the way the cards talks to the other components
@@ -1147,7 +1150,7 @@
 
 config NE2_MCA
 	tristate "NE/2 (ne2000 MCA version) support"
-	depends on NET_ETHERNET && MCA_LEGACY
+	depends on MCA_LEGACY
 	select CRC32
 	help
 	  If you have a network (Ethernet) card of this type, say Y and read
@@ -1160,7 +1163,7 @@
 
 config IBMLANA
 	tristate "IBM LAN Adapter/A support"
-	depends on NET_ETHERNET && MCA && MCA_LEGACY
+	depends on MCA && MCA_LEGACY
 	---help---
 	  This is a Micro Channel Ethernet adapter.  You need to set
 	  CONFIG_MCA to use this driver.  It is both available as an in-kernel
@@ -1176,7 +1179,7 @@
 
 config IBMVETH
 	tristate "IBM LAN Virtual Ethernet support"
-	depends on NET_ETHERNET && PPC_PSERIES
+	depends on PPC_PSERIES
 	---help---
 	  This driver supports virtual ethernet adapters on newer IBM iSeries
 	  and pSeries systems.
@@ -1257,7 +1260,7 @@
 
 config NET_PCI
 	bool "EISA, VLB, PCI and on board controllers"
-	depends on NET_ETHERNET && (ISA || EISA || PCI)
+	depends on ISA || EISA || PCI
 	help
 	  This is another class of network cards which attach directly to the
 	  bus. If you have one of those, say Y and read the Ethernet-HOWTO,
@@ -1313,6 +1316,7 @@
 	  To compile this driver as a module, choose M here and read
 	  <file:Documentation/networking/net-modules.txt>. The module
 	  will be called amd8111e.
+
 config AMD8111E_NAPI
 	bool "Enable NAPI support"
 	depends on AMD8111_ETH
@@ -1778,7 +1782,7 @@
 
 config NET_POCKET
 	bool "Pocket and portable adapters"
-	depends on NET_ETHERNET && PARPORT
+	depends on PARPORT
 	---help---
 	  Cute little network (Ethernet) devices which attach to the parallel
 	  port ("pocket adapters"), commonly used with laptops. If you have
@@ -1847,14 +1851,14 @@
 
 config SGISEEQ
 	tristate "SGI Seeq ethernet controller support"
-	depends on NET_ETHERNET && SGI_IP22
+	depends on SGI_IP22
 	help
 	  Say Y here if you have an Seeq based Ethernet network card. This is
 	  used in many Silicon Graphics machines.
 
 config DECLANCE
 	tristate "DEC LANCE ethernet controller support"
-	depends on NET_ETHERNET && MACH_DECSTATION
+	depends on MACH_DECSTATION
 	select CRC32
 	help
 	  This driver is for the series of Ethernet controllers produced by
@@ -1884,7 +1888,7 @@
 
 config NE_H8300
 	tristate "NE2000 compatible support for H8/300"
-	depends on H8300 && NET_ETHERNET
+	depends on H8300
 	help
 	  Say Y here if you want to use the NE2000 compatible
 	  controller on the Renesas H8/300 processor.
@@ -1892,7 +1896,7 @@
 source "drivers/net/fec_8xx/Kconfig"
 source "drivers/net/fs_enet/Kconfig"
 
-endmenu
+endif # NET_ETHERNET
 
 #
 #	Gigabit Ethernet
@@ -2948,8 +2952,6 @@
 	If you want to log kernel messages over the network, enable this.
 	See <file:Documentation/networking/netconsole.txt> for details.
 
-endif #NETDEVICES
-
 config NETPOLL
 	def_bool NETCONSOLE
 
@@ -2961,4 +2963,4 @@
 config NET_POLL_CONTROLLER
 	def_bool NETPOLL
 
-endmenu
+endif # NETDEVICES
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index a77affa..eb62fb4 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -157,6 +157,7 @@
 obj-$(CONFIG_AC3200) += ac3200.o 8390.o
 obj-$(CONFIG_APRICOT) += 82596.o
 obj-$(CONFIG_LASI_82596) += lasi_82596.o
+obj-$(CONFIG_SNI_82596) += sni_82596.o
 obj-$(CONFIG_MVME16x_NET) += 82596.o
 obj-$(CONFIG_BVME6000_NET) += 82596.o
 obj-$(CONFIG_SC92031) += sc92031.o
diff --git a/drivers/net/acenic.c b/drivers/net/acenic.c
index 04382f9..b78a4e5 100644
--- a/drivers/net/acenic.c
+++ b/drivers/net/acenic.c
@@ -159,10 +159,6 @@
 };
 MODULE_DEVICE_TABLE(pci, acenic_pci_tbl);
 
-#ifndef SET_NETDEV_DEV
-#define SET_NETDEV_DEV(net, pdev)	do{} while(0)
-#endif
-
 #define ace_sync_irq(irq)	synchronize_irq(irq)
 
 #ifndef offset_in_page
diff --git a/drivers/net/arm/Kconfig b/drivers/net/arm/Kconfig
index 678e4f4..5bf2d33 100644
--- a/drivers/net/arm/Kconfig
+++ b/drivers/net/arm/Kconfig
@@ -4,7 +4,7 @@
 #
 config ARM_AM79C961A
 	bool "ARM EBSA110 AM79C961A support"
-	depends on NET_ETHERNET && ARM && ARCH_EBSA110
+	depends on ARM && ARCH_EBSA110
 	select CRC32
 	help
 	  If you wish to compile a kernel for the EBSA-110, then you should
@@ -12,21 +12,21 @@
 
 config ARM_ETHER1
 	tristate "Acorn Ether1 support"
-	depends on NET_ETHERNET && ARM && ARCH_ACORN
+	depends on ARM && ARCH_ACORN
 	help
 	  If you have an Acorn system with one of these (AKA25) network cards,
 	  you should say Y to this option if you wish to use it with Linux.
 
 config ARM_ETHER3
 	tristate "Acorn/ANT Ether3 support"
-	depends on NET_ETHERNET && ARM && ARCH_ACORN
+	depends on ARM && ARCH_ACORN
 	help
 	  If you have an Acorn system with one of these network cards, you
 	  should say Y to this option if you wish to use it with Linux.
 
 config ARM_ETHERH
 	tristate "I-cubed EtherH/ANT EtherM support"
-	depends on NET_ETHERNET && ARM && ARCH_ACORN
+	depends on ARM && ARCH_ACORN
 	select CRC32
 	help
 	  If you have an Acorn system with one of these network cards, you
@@ -34,7 +34,7 @@
 
 config ARM_AT91_ETHER
 	tristate "AT91RM9200 Ethernet support"
-	depends on NET_ETHERNET && ARM && ARCH_AT91RM9200
+	depends on ARM && ARCH_AT91RM9200
 	select MII
 	help
 	  If you wish to compile a kernel for the AT91RM9200 and enable
@@ -42,7 +42,7 @@
 
 config EP93XX_ETH
 	tristate "EP93xx Ethernet support"
-	depends on NET_ETHERNET && ARM && ARCH_EP93XX
+	depends on ARM && ARCH_EP93XX
 	help
 	  This is a driver for the ethernet hardware included in EP93xx CPUs.
 	  Say Y if you are building a kernel for EP93xx based devices.
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
index 879a2ff..96fb0ec 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -15,6 +15,7 @@
 #include <linux/ethtool.h>
 #include <linux/mii.h>
 #include <linux/if_ether.h>
+#include <linux/if_vlan.h>
 #include <linux/etherdevice.h>
 #include <linux/pci.h>
 #include <linux/delay.h>
@@ -68,8 +69,8 @@
 	  (BP)->tx_cons - (BP)->tx_prod - TX_RING_GAP(BP))
 #define NEXT_TX(N)		(((N) + 1) & (B44_TX_RING_SIZE - 1))
 
-#define RX_PKT_BUF_SZ		(1536 + bp->rx_offset + 64)
-#define TX_PKT_BUF_SZ		(B44_MAX_MTU + ETH_HLEN + 8)
+#define RX_PKT_OFFSET		30
+#define RX_PKT_BUF_SZ		(1536 + RX_PKT_OFFSET + 64)
 
 /* minimum number of free TX descriptors required to wake up TX process */
 #define B44_TX_WAKEUP_THRESH		(B44_TX_RING_SIZE / 4)
@@ -599,8 +600,7 @@
 
 	spin_unlock_irq(&bp->lock);
 
-	bp->timer.expires = jiffies + HZ;
-	add_timer(&bp->timer);
+	mod_timer(&bp->timer, round_jiffies(jiffies + HZ));
 }
 
 static void b44_tx(struct b44 *bp)
@@ -653,7 +653,7 @@
 		src_map = &bp->rx_buffers[src_idx];
 	dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
 	map = &bp->rx_buffers[dest_idx];
-	skb = dev_alloc_skb(RX_PKT_BUF_SZ);
+	skb = netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ);
 	if (skb == NULL)
 		return -ENOMEM;
 
@@ -669,7 +669,7 @@
 		if (!dma_mapping_error(mapping))
 			pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
 		dev_kfree_skb_any(skb);
-		skb = __dev_alloc_skb(RX_PKT_BUF_SZ,GFP_DMA);
+		skb = __netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ, GFP_ATOMIC|GFP_DMA);
 		if (skb == NULL)
 			return -ENOMEM;
 		mapping = pci_map_single(bp->pdev, skb->data,
@@ -684,11 +684,9 @@
 		}
 	}
 
-	skb->dev = bp->dev;
-	skb_reserve(skb, bp->rx_offset);
+	rh = (struct rx_header *) skb->data;
+	skb_reserve(skb, RX_PKT_OFFSET);
 
-	rh = (struct rx_header *)
-		(skb->data - bp->rx_offset);
 	rh->len = 0;
 	rh->flags = 0;
 
@@ -698,13 +696,13 @@
 	if (src_map != NULL)
 		src_map->skb = NULL;
 
-	ctrl  = (DESC_CTRL_LEN & (RX_PKT_BUF_SZ - bp->rx_offset));
+	ctrl  = (DESC_CTRL_LEN & (RX_PKT_BUF_SZ - RX_PKT_OFFSET));
 	if (dest_idx == (B44_RX_RING_SIZE - 1))
 		ctrl |= DESC_CTRL_EOT;
 
 	dp = &bp->rx_ring[dest_idx];
 	dp->ctrl = cpu_to_le32(ctrl);
-	dp->addr = cpu_to_le32((u32) mapping + bp->rx_offset + bp->dma_offset);
+	dp->addr = cpu_to_le32((u32) mapping + RX_PKT_OFFSET + bp->dma_offset);
 
 	if (bp->flags & B44_FLAG_RX_RING_HACK)
 		b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma,
@@ -783,7 +781,7 @@
 					    PCI_DMA_FROMDEVICE);
 		rh = (struct rx_header *) skb->data;
 		len = le16_to_cpu(rh->len);
-		if ((len > (RX_PKT_BUF_SZ - bp->rx_offset)) ||
+		if ((len > (RX_PKT_BUF_SZ - RX_PKT_OFFSET)) ||
 		    (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) {
 		drop_it:
 			b44_recycle_rx(bp, cons, bp->rx_prod);
@@ -815,8 +813,8 @@
 			pci_unmap_single(bp->pdev, map,
 					 skb_size, PCI_DMA_FROMDEVICE);
 			/* Leave out rx_header */
-                	skb_put(skb, len+bp->rx_offset);
-            	        skb_pull(skb,bp->rx_offset);
+                	skb_put(skb, len + RX_PKT_OFFSET);
+            	        skb_pull(skb, RX_PKT_OFFSET);
 		} else {
 			struct sk_buff *copy_skb;
 
@@ -828,7 +826,7 @@
 			skb_reserve(copy_skb, 2);
 			skb_put(copy_skb, len);
 			/* DMA sync done above, copy just the actual packet */
-			skb_copy_from_linear_data_offset(skb, bp->rx_offset,
+			skb_copy_from_linear_data_offset(skb, RX_PKT_OFFSET,
 							 copy_skb->data, len);
 			skb = copy_skb;
 		}
@@ -969,7 +967,6 @@
 static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
 	struct b44 *bp = netdev_priv(dev);
-	struct sk_buff *bounce_skb;
 	int rc = NETDEV_TX_OK;
 	dma_addr_t mapping;
 	u32 len, entry, ctrl;
@@ -987,12 +984,13 @@
 
 	mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
 	if (dma_mapping_error(mapping) || mapping + len > DMA_30BIT_MASK) {
+		struct sk_buff *bounce_skb;
+
 		/* Chip can't handle DMA to/from >1GB, use bounce buffer */
 		if (!dma_mapping_error(mapping))
 			pci_unmap_single(bp->pdev, mapping, len, PCI_DMA_TODEVICE);
 
-		bounce_skb = __dev_alloc_skb(TX_PKT_BUF_SZ,
-					     GFP_ATOMIC|GFP_DMA);
+		bounce_skb = __dev_alloc_skb(len, GFP_ATOMIC | GFP_DMA);
 		if (!bounce_skb)
 			goto err_out;
 
@@ -1001,13 +999,12 @@
 		if (dma_mapping_error(mapping) || mapping + len > DMA_30BIT_MASK) {
 			if (!dma_mapping_error(mapping))
 				pci_unmap_single(bp->pdev, mapping,
-					 len, PCI_DMA_TODEVICE);
+						 len, PCI_DMA_TODEVICE);
 			dev_kfree_skb_any(bounce_skb);
 			goto err_out;
 		}
 
-		skb_copy_from_linear_data(skb, skb_put(bounce_skb, len),
-					  skb->len);
+		skb_copy_from_linear_data(skb, skb_put(bounce_skb, len), len);
 		dev_kfree_skb_any(skb);
 		skb = bounce_skb;
 	}
@@ -1396,12 +1393,12 @@
 	bw32(bp, B44_TX_WMARK, 56); /* XXX magic */
 	if (reset_kind == B44_PARTIAL_RESET) {
 		bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
-				      (bp->rx_offset << DMARX_CTRL_ROSHIFT)));
+				      (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT)));
 	} else {
 		bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE);
 		bw32(bp, B44_DMATX_ADDR, bp->tx_ring_dma + bp->dma_offset);
 		bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
-				      (bp->rx_offset << DMARX_CTRL_ROSHIFT)));
+				      (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT)));
 		bw32(bp, B44_DMARX_ADDR, bp->rx_ring_dma + bp->dma_offset);
 
 		bw32(bp, B44_DMARX_PTR, bp->rx_pending);
@@ -2093,11 +2090,6 @@
 
 	bp->phy_addr = eeprom[90] & 0x1f;
 
-	/* With this, plus the rx_header prepended to the data by the
-	 * hardware, we'll land the ethernet header on a 2-byte boundary.
-	 */
-	bp->rx_offset = 30;
-
 	bp->imask = IMASK_DEF;
 
 	bp->core_unit = ssb_core_unit(bp);
@@ -2348,11 +2340,11 @@
 	netif_device_attach(bp->dev);
 	spin_unlock_irq(&bp->lock);
 
-	bp->timer.expires = jiffies + HZ;
-	add_timer(&bp->timer);
-
 	b44_enable_ints(bp);
 	netif_wake_queue(dev);
+
+	mod_timer(&bp->timer, jiffies + 1);
+
 	return 0;
 }
 
diff --git a/drivers/net/b44.h b/drivers/net/b44.h
index 18fc133..e537e63 100644
--- a/drivers/net/b44.h
+++ b/drivers/net/b44.h
@@ -443,8 +443,6 @@
 #define B44_FLAG_TX_RING_HACK	0x40000000
 #define B44_FLAG_WOL_ENABLE	0x80000000
 
-	u32			rx_offset;
-
 	u32			msg_enable;
 
 	struct timer_list	timer;
diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h
index 80c3d8f..ab72563 100644
--- a/drivers/net/cxgb3/adapter.h
+++ b/drivers/net/cxgb3/adapter.h
@@ -71,27 +71,29 @@
 	QUEUES_BOUND = (1 << 3),
 };
 
+struct fl_pg_chunk {
+	struct page *page;
+	void *va;
+	unsigned int offset;
+};
+
 struct rx_desc;
 struct rx_sw_desc;
 
-struct sge_fl_page {
-	struct skb_frag_struct frag;
-	unsigned char *va;
-};
-
-struct sge_fl {			/* SGE per free-buffer list state */
-	unsigned int buf_size;	/* size of each Rx buffer */
-	unsigned int credits;	/* # of available Rx buffers */
-	unsigned int size;	/* capacity of free list */
-	unsigned int cidx;	/* consumer index */
-	unsigned int pidx;	/* producer index */
-	unsigned int gen;	/* free list generation */
-	unsigned int cntxt_id;	/* SGE context id for the free list */
-	struct sge_fl_page page;
-	struct rx_desc *desc;	/* address of HW Rx descriptor ring */
-	struct rx_sw_desc *sdesc;	/* address of SW Rx descriptor ring */
-	dma_addr_t phys_addr;	/* physical address of HW ring start */
-	unsigned long empty;	/* # of times queue ran out of buffers */
+struct sge_fl {                     /* SGE per free-buffer list state */
+	unsigned int buf_size;      /* size of each Rx buffer */
+	unsigned int credits;       /* # of available Rx buffers */
+	unsigned int size;          /* capacity of free list */
+	unsigned int cidx;          /* consumer index */
+	unsigned int pidx;          /* producer index */
+	unsigned int gen;           /* free list generation */
+	struct fl_pg_chunk pg_chunk;/* page chunk cache */
+	unsigned int use_pages;     /* whether FL uses pages or sk_buffs */
+	struct rx_desc *desc;       /* address of HW Rx descriptor ring */
+	struct rx_sw_desc *sdesc;   /* address of SW Rx descriptor ring */
+	dma_addr_t   phys_addr;     /* physical address of HW ring start */
+	unsigned int cntxt_id;      /* SGE context id for the free list */
+	unsigned long empty;        /* # of times queue ran out of buffers */
 	unsigned long alloc_failed; /* # of times buffer allocation failed */
 };
 
diff --git a/drivers/net/cxgb3/common.h b/drivers/net/cxgb3/common.h
index 8d13796..1637800 100644
--- a/drivers/net/cxgb3/common.h
+++ b/drivers/net/cxgb3/common.h
@@ -101,6 +101,7 @@
 	TCB_SIZE = 128,		/* TCB size */
 	NMTUS = 16,		/* size of MTU table */
 	NCCTRL_WIN = 32,	/* # of congestion control windows */
+	PROTO_SRAM_LINES = 128, /* size of TP sram */
 };
 
 #define MAX_RX_COALESCING_LEN 16224U
@@ -124,6 +125,30 @@
 };
 
 enum {
+	TP_VERSION_MAJOR	= 1,
+	TP_VERSION_MINOR	= 0,
+	TP_VERSION_MICRO	= 44
+};
+
+#define S_TP_VERSION_MAJOR		16
+#define M_TP_VERSION_MAJOR		0xFF
+#define V_TP_VERSION_MAJOR(x)		((x) << S_TP_VERSION_MAJOR)
+#define G_TP_VERSION_MAJOR(x)		\
+	    (((x) >> S_TP_VERSION_MAJOR) & M_TP_VERSION_MAJOR)
+
+#define S_TP_VERSION_MINOR		8
+#define M_TP_VERSION_MINOR		0xFF
+#define V_TP_VERSION_MINOR(x)		((x) << S_TP_VERSION_MINOR)
+#define G_TP_VERSION_MINOR(x)		\
+	    (((x) >> S_TP_VERSION_MINOR) & M_TP_VERSION_MINOR)
+
+#define S_TP_VERSION_MICRO		0
+#define M_TP_VERSION_MICRO		0xFF
+#define V_TP_VERSION_MICRO(x)		((x) << S_TP_VERSION_MICRO)
+#define G_TP_VERSION_MICRO(x)		\
+	    (((x) >> S_TP_VERSION_MICRO) & M_TP_VERSION_MICRO)
+
+enum {
 	SGE_QSETS = 8,		/* # of SGE Tx/Rx/RspQ sets */
 	SGE_RXQ_PER_SET = 2,	/* # of Rx queues per set */
 	SGE_TXQ_PER_SET = 3	/* # of Tx queues per set */
@@ -654,6 +679,9 @@
 int t3_seeprom_read(struct adapter *adapter, u32 addr, u32 *data);
 int t3_seeprom_write(struct adapter *adapter, u32 addr, u32 data);
 int t3_seeprom_wp(struct adapter *adapter, int enable);
+int t3_check_tpsram_version(struct adapter *adapter);
+int t3_check_tpsram(struct adapter *adapter, u8 *tp_ram, unsigned int size);
+int t3_set_proto_sram(struct adapter *adap, u8 *data);
 int t3_read_flash(struct adapter *adapter, unsigned int addr,
 		  unsigned int nwords, u32 *data, int byte_oriented);
 int t3_load_fw(struct adapter *adapter, const u8 * fw_data, unsigned int size);
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index d8a1f54..15defe4 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -2088,6 +2088,42 @@
 }
 #endif
 
+#define TPSRAM_NAME "t3%c_protocol_sram-%d.%d.%d.bin"
+int update_tpsram(struct adapter *adap)
+{
+	const struct firmware *tpsram;
+	char buf[64];
+	struct device *dev = &adap->pdev->dev;
+	int ret;
+	char rev;
+	
+	rev = adap->params.rev == T3_REV_B2 ? 'b' : 'a';
+
+	snprintf(buf, sizeof(buf), TPSRAM_NAME, rev,
+		 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
+
+	ret = request_firmware(&tpsram, buf, dev);
+	if (ret < 0) {
+		dev_err(dev, "could not load TP SRAM: unable to load %s\n",
+			buf);
+		return ret;
+	}
+	
+	ret = t3_check_tpsram(adap, tpsram->data, tpsram->size);
+	if (ret)
+		goto release_tpsram;	
+
+	ret = t3_set_proto_sram(adap, tpsram->data);
+	if (ret)
+		dev_err(dev, "loading protocol SRAM failed\n");
+
+release_tpsram:
+	release_firmware(tpsram);
+	
+	return ret;
+}
+
+
 /*
  * Periodic accumulation of MAC statistics.
  */
@@ -2437,6 +2473,13 @@
 		goto out_free_dev;
 	}
 
+	err = t3_check_tpsram_version(adapter);
+	if (err == -EINVAL)
+		err = update_tpsram(adapter);
+
+	if (err)
+		goto out_free_dev;
+		
 	/*
 	 * The card is now ready to go.  If any errors occur during device
 	 * registration we do not fail the whole card but rather proceed only
diff --git a/drivers/net/cxgb3/regs.h b/drivers/net/cxgb3/regs.h
index 020859c..aa80313 100644
--- a/drivers/net/cxgb3/regs.h
+++ b/drivers/net/cxgb3/regs.h
@@ -1160,6 +1160,8 @@
 
 #define A_TP_MOD_CHANNEL_WEIGHT 0x434
 
+#define A_TP_MOD_RATE_LIMIT 0x438
+
 #define A_TP_PIO_ADDR 0x440
 
 #define A_TP_PIO_DATA 0x444
@@ -1214,6 +1216,15 @@
 #define G_TXDROPCNTCH0RCVD(x) (((x) >> S_TXDROPCNTCH0RCVD) & \
 			       M_TXDROPCNTCH0RCVD)
 
+#define A_TP_PROXY_FLOW_CNTL 0x4b0
+
+#define A_TP_EMBED_OP_FIELD0 0x4e8
+#define A_TP_EMBED_OP_FIELD1 0x4ec
+#define A_TP_EMBED_OP_FIELD2 0x4f0
+#define A_TP_EMBED_OP_FIELD3 0x4f4
+#define A_TP_EMBED_OP_FIELD4 0x4f8
+#define A_TP_EMBED_OP_FIELD5 0x4fc
+
 #define A_ULPRX_CTL 0x500
 
 #define S_ROUND_ROBIN    4
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index a60ec4d..a2cfd68 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -46,23 +46,16 @@
 
 #define SGE_RX_SM_BUF_SIZE 1536
 
-/*
- * If USE_RX_PAGE is defined, the small freelist populated with (partial)
- * pages instead of skbs. Pages are carved up into RX_PAGE_SIZE chunks (must
- * be a multiple of the host page size).
- */
-#define USE_RX_PAGE
-#define RX_PAGE_SIZE 2048
-
-/*
- * skb freelist packets are copied into a new skb (and the freelist one is 
- * reused) if their len is <= 
- */
 #define SGE_RX_COPY_THRES  256
+#define SGE_RX_PULL_LEN    128
 
 /*
- * Minimum number of freelist entries before we start dropping TUNNEL frames.
+ * Page chunk size for FL0 buffers if FL0 is to be populated with page chunks.
+ * It must be a divisor of PAGE_SIZE.  If set to 0 FL0 will use sk_buffs
+ * directly.
  */
+#define FL0_PG_CHUNK_SIZE  2048
+
 #define SGE_RX_DROP_THRES 16
 
 /*
@@ -100,12 +93,12 @@
 	struct sk_buff *skb;
 };
 
-struct rx_sw_desc {		/* SW state per Rx descriptor */
+struct rx_sw_desc {                /* SW state per Rx descriptor */
 	union {
 		struct sk_buff *skb;
-		struct sge_fl_page page;
-	} t;
-	 DECLARE_PCI_UNMAP_ADDR(dma_addr);
+		struct fl_pg_chunk pg_chunk;
+	};
+	DECLARE_PCI_UNMAP_ADDR(dma_addr);
 };
 
 struct rsp_desc {		/* response queue descriptor */
@@ -351,27 +344,26 @@
 
 		pci_unmap_single(pdev, pci_unmap_addr(d, dma_addr),
 				 q->buf_size, PCI_DMA_FROMDEVICE);
-
-		if (q->buf_size != RX_PAGE_SIZE) {
-			kfree_skb(d->t.skb);
-			d->t.skb = NULL;
+		if (q->use_pages) {
+			put_page(d->pg_chunk.page);
+			d->pg_chunk.page = NULL;
 		} else {
-			if (d->t.page.frag.page)
-				put_page(d->t.page.frag.page);
-			d->t.page.frag.page = NULL;
+			kfree_skb(d->skb);
+			d->skb = NULL;
 		}
 		if (++cidx == q->size)
 			cidx = 0;
 	}
 
-	if (q->page.frag.page)
-		put_page(q->page.frag.page);
-	q->page.frag.page = NULL;
+	if (q->pg_chunk.page) {
+		__free_page(q->pg_chunk.page);
+		q->pg_chunk.page = NULL;
+	}
 }
 
 /**
  *	add_one_rx_buf - add a packet buffer to a free-buffer list
- *	@va: va of the buffer to add
+ *	@va:  buffer start VA
  *	@len: the buffer length
  *	@d: the HW Rx descriptor to write
  *	@sd: the SW Rx descriptor to write
@@ -381,7 +373,7 @@
  *	Add a buffer of the given length to the supplied HW and SW Rx
  *	descriptors.
  */
-static inline void add_one_rx_buf(unsigned char *va, unsigned int len,
+static inline void add_one_rx_buf(void *va, unsigned int len,
 				  struct rx_desc *d, struct rx_sw_desc *sd,
 				  unsigned int gen, struct pci_dev *pdev)
 {
@@ -397,6 +389,27 @@
 	d->gen2 = cpu_to_be32(V_FLD_GEN2(gen));
 }
 
+static int alloc_pg_chunk(struct sge_fl *q, struct rx_sw_desc *sd, gfp_t gfp)
+{
+	if (!q->pg_chunk.page) {
+		q->pg_chunk.page = alloc_page(gfp);
+		if (unlikely(!q->pg_chunk.page))
+			return -ENOMEM;
+		q->pg_chunk.va = page_address(q->pg_chunk.page);
+		q->pg_chunk.offset = 0;
+	}
+	sd->pg_chunk = q->pg_chunk;
+
+	q->pg_chunk.offset += q->buf_size;
+	if (q->pg_chunk.offset == PAGE_SIZE)
+		q->pg_chunk.page = NULL;
+	else {
+		q->pg_chunk.va += q->buf_size;
+		get_page(q->pg_chunk.page);
+	}
+	return 0;
+}
+
 /**
  *	refill_fl - refill an SGE free-buffer list
  *	@adapter: the adapter
@@ -410,49 +423,29 @@
  */
 static void refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp)
 {
+	void *buf_start;
 	struct rx_sw_desc *sd = &q->sdesc[q->pidx];
 	struct rx_desc *d = &q->desc[q->pidx];
-	struct sge_fl_page *p = &q->page;
 
 	while (n--) {
-		unsigned char *va;
-
-		if (unlikely(q->buf_size != RX_PAGE_SIZE)) {
-			struct sk_buff *skb = alloc_skb(q->buf_size, gfp);
-
-			if (!skb) {
-				q->alloc_failed++;
+		if (q->use_pages) {
+			if (unlikely(alloc_pg_chunk(q, sd, gfp))) {
+nomem:				q->alloc_failed++;
 				break;
 			}
-			va = skb->data;
-			sd->t.skb = skb;
+			buf_start = sd->pg_chunk.va;
 		} else {
-			if (!p->frag.page) {
-				p->frag.page = alloc_pages(gfp, 0);
-				if (unlikely(!p->frag.page)) {
-					q->alloc_failed++;
-					break;
-				} else {
-					p->frag.size = RX_PAGE_SIZE;
-					p->frag.page_offset = 0;
-					p->va = page_address(p->frag.page);
-				}
-			}
+			struct sk_buff *skb = alloc_skb(q->buf_size, gfp);
 
-			memcpy(&sd->t, p, sizeof(*p));
-			va = p->va;
+			if (!skb)
+				goto nomem;
 
-			p->frag.page_offset += RX_PAGE_SIZE;
-			BUG_ON(p->frag.page_offset > PAGE_SIZE);
-			p->va += RX_PAGE_SIZE;
-			if (p->frag.page_offset == PAGE_SIZE)
-				p->frag.page = NULL;
-			else
-				get_page(p->frag.page);
+			sd->skb = skb;
+			buf_start = skb->data;
 		}
 
-		add_one_rx_buf(va, q->buf_size, d, sd, q->gen, adap->pdev);
-
+		add_one_rx_buf(buf_start, q->buf_size, d, sd, q->gen,
+			       adap->pdev);
 		d++;
 		sd++;
 		if (++q->pidx == q->size) {
@@ -487,7 +480,7 @@
 	struct rx_desc *from = &q->desc[idx];
 	struct rx_desc *to = &q->desc[q->pidx];
 
-	memcpy(&q->sdesc[q->pidx], &q->sdesc[idx], sizeof(struct rx_sw_desc));
+	q->sdesc[q->pidx] = q->sdesc[idx];
 	to->addr_lo = from->addr_lo;	/* already big endian */
 	to->addr_hi = from->addr_hi;	/* likewise */
 	wmb();
@@ -650,6 +643,132 @@
 }
 
 /**
+ *	get_packet - return the next ingress packet buffer from a free list
+ *	@adap: the adapter that received the packet
+ *	@fl: the SGE free list holding the packet
+ *	@len: the packet length including any SGE padding
+ *	@drop_thres: # of remaining buffers before we start dropping packets
+ *
+ *	Get the next packet from a free list and complete setup of the
+ *	sk_buff.  If the packet is small we make a copy and recycle the
+ *	original buffer, otherwise we use the original buffer itself.  If a
+ *	positive drop threshold is supplied packets are dropped and their
+ *	buffers recycled if (a) the number of remaining buffers is under the
+ *	threshold and the packet is too big to copy, or (b) the packet should
+ *	be copied but there is no memory for the copy.
+ */
+static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
+				  unsigned int len, unsigned int drop_thres)
+{
+	struct sk_buff *skb = NULL;
+	struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
+
+	prefetch(sd->skb->data);
+	fl->credits--;
+
+	if (len <= SGE_RX_COPY_THRES) {
+		skb = alloc_skb(len, GFP_ATOMIC);
+		if (likely(skb != NULL)) {
+			__skb_put(skb, len);
+			pci_dma_sync_single_for_cpu(adap->pdev,
+					    pci_unmap_addr(sd, dma_addr), len,
+					    PCI_DMA_FROMDEVICE);
+			memcpy(skb->data, sd->skb->data, len);
+			pci_dma_sync_single_for_device(adap->pdev,
+					    pci_unmap_addr(sd, dma_addr), len,
+					    PCI_DMA_FROMDEVICE);
+		} else if (!drop_thres)
+			goto use_orig_buf;
+recycle:
+		recycle_rx_buf(adap, fl, fl->cidx);
+		return skb;
+	}
+
+	if (unlikely(fl->credits < drop_thres))
+		goto recycle;
+
+use_orig_buf:
+	pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr),
+			 fl->buf_size, PCI_DMA_FROMDEVICE);
+	skb = sd->skb;
+	skb_put(skb, len);
+	__refill_fl(adap, fl);
+	return skb;
+}
+
+/**
+ *	get_packet_pg - return the next ingress packet buffer from a free list
+ *	@adap: the adapter that received the packet
+ *	@fl: the SGE free list holding the packet
+ *	@len: the packet length including any SGE padding
+ *	@drop_thres: # of remaining buffers before we start dropping packets
+ *
+ *	Get the next packet from a free list populated with page chunks.
+ *	If the packet is small we make a copy and recycle the original buffer,
+ *	otherwise we attach the original buffer as a page fragment to a fresh
+ *	sk_buff.  If a positive drop threshold is supplied packets are dropped
+ *	and their buffers recycled if (a) the number of remaining buffers is
+ *	under the threshold and the packet is too big to copy, or (b) there's
+ *	no system memory.
+ *
+ * 	Note: this function is similar to @get_packet but deals with Rx buffers
+ * 	that are page chunks rather than sk_buffs.
+ */
+static struct sk_buff *get_packet_pg(struct adapter *adap, struct sge_fl *fl,
+				     unsigned int len, unsigned int drop_thres)
+{
+	struct sk_buff *skb = NULL;
+	struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
+
+	if (len <= SGE_RX_COPY_THRES) {
+		skb = alloc_skb(len, GFP_ATOMIC);
+		if (likely(skb != NULL)) {
+			__skb_put(skb, len);
+			pci_dma_sync_single_for_cpu(adap->pdev,
+					    pci_unmap_addr(sd, dma_addr), len,
+					    PCI_DMA_FROMDEVICE);
+			memcpy(skb->data, sd->pg_chunk.va, len);
+			pci_dma_sync_single_for_device(adap->pdev,
+					    pci_unmap_addr(sd, dma_addr), len,
+					    PCI_DMA_FROMDEVICE);
+		} else if (!drop_thres)
+			return NULL;
+recycle:
+		fl->credits--;
+		recycle_rx_buf(adap, fl, fl->cidx);
+		return skb;
+	}
+
+	if (unlikely(fl->credits <= drop_thres))
+		goto recycle;
+
+	skb = alloc_skb(SGE_RX_PULL_LEN, GFP_ATOMIC);
+	if (unlikely(!skb)) {
+		if (!drop_thres)
+			return NULL;
+		goto recycle;
+	}
+
+	pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr),
+			 fl->buf_size, PCI_DMA_FROMDEVICE);
+	__skb_put(skb, SGE_RX_PULL_LEN);
+	memcpy(skb->data, sd->pg_chunk.va, SGE_RX_PULL_LEN);
+	skb_fill_page_desc(skb, 0, sd->pg_chunk.page,
+			   sd->pg_chunk.offset + SGE_RX_PULL_LEN,
+			   len - SGE_RX_PULL_LEN);
+	skb->len = len;
+	skb->data_len = len - SGE_RX_PULL_LEN;
+	skb->truesize += skb->data_len;
+
+	fl->credits--;
+	/*
+	 * We do not refill FLs here, we let the caller do it to overlap a
+	 * prefetch.
+	 */
+	return skb;
+}
+
+/**
  *	get_imm_packet - return the next ingress packet buffer from a response
  *	@resp: the response descriptor containing the packet data
  *
@@ -1715,85 +1834,6 @@
 		netif_rx(skb);
 }
 
-#define SKB_DATA_SIZE 128
-
-static void skb_data_init(struct sk_buff *skb, struct sge_fl_page *p,
-			  unsigned int len)
-{
-	skb->len = len;
-	if (len <= SKB_DATA_SIZE) {
-		skb_copy_to_linear_data(skb, p->va, len);
-		skb->tail += len;
-		put_page(p->frag.page);
-	} else {
-		skb_copy_to_linear_data(skb, p->va, SKB_DATA_SIZE);
-		skb_shinfo(skb)->frags[0].page = p->frag.page;
-		skb_shinfo(skb)->frags[0].page_offset =
-		    p->frag.page_offset + SKB_DATA_SIZE;
-		skb_shinfo(skb)->frags[0].size = len - SKB_DATA_SIZE;
-		skb_shinfo(skb)->nr_frags = 1;
-		skb->data_len = len - SKB_DATA_SIZE;
-		skb->tail += SKB_DATA_SIZE;
-		skb->truesize += skb->data_len;
-	}
-}
-
-/**
-*      get_packet - return the next ingress packet buffer from a free list
-*      @adap: the adapter that received the packet
-*      @fl: the SGE free list holding the packet
-*      @len: the packet length including any SGE padding
-*      @drop_thres: # of remaining buffers before we start dropping packets
-*
-*      Get the next packet from a free list and complete setup of the
-*      sk_buff.  If the packet is small we make a copy and recycle the
-*      original buffer, otherwise we use the original buffer itself.  If a
-*      positive drop threshold is supplied packets are dropped and their
-*      buffers recycled if (a) the number of remaining buffers is under the
-*      threshold and the packet is too big to copy, or (b) the packet should
-*      be copied but there is no memory for the copy.
-*/
-static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
-				  unsigned int len, unsigned int drop_thres)
-{
-	struct sk_buff *skb = NULL;
-	struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
-
-	prefetch(sd->t.skb->data);
-
-	if (len <= SGE_RX_COPY_THRES) {
-		skb = alloc_skb(len, GFP_ATOMIC);
-		if (likely(skb != NULL)) {
-			struct rx_desc *d = &fl->desc[fl->cidx];
-			dma_addr_t mapping =
-			    (dma_addr_t)((u64) be32_to_cpu(d->addr_hi) << 32 |
-					 be32_to_cpu(d->addr_lo));
-
-			__skb_put(skb, len);
-			pci_dma_sync_single_for_cpu(adap->pdev, mapping, len,
-						    PCI_DMA_FROMDEVICE);
-			skb_copy_from_linear_data(sd->t.skb, skb->data, len);
-			pci_dma_sync_single_for_device(adap->pdev, mapping, len,
-						       PCI_DMA_FROMDEVICE);
-		} else if (!drop_thres)
-			goto use_orig_buf;
-recycle:
-		recycle_rx_buf(adap, fl, fl->cidx);
-		return skb;
-	}
-
-	if (unlikely(fl->credits < drop_thres))
-		goto recycle;
-
-use_orig_buf:
-	pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr),
-			 fl->buf_size, PCI_DMA_FROMDEVICE);
-	skb = sd->t.skb;
-	skb_put(skb, len);
-	__refill_fl(adap, fl);
-	return skb;
-}
-
 /**
  *	handle_rsp_cntrl_info - handles control information in a response
  *	@qs: the queue set corresponding to the response
@@ -1935,7 +1975,7 @@
 		} else if (flags & F_RSPD_IMM_DATA_VALID) {
 			skb = get_imm_packet(r);
 			if (unlikely(!skb)) {
-			      no_mem:
+no_mem:
 				q->next_holdoff = NOMEM_INTR_DELAY;
 				q->nomem++;
 				/* consume one credit since we tried */
@@ -1945,53 +1985,29 @@
 			q->imm_data++;
 			ethpad = 0;
 		} else if ((len = ntohl(r->len_cq)) != 0) {
-			struct sge_fl *fl =
-			    (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0];
+			struct sge_fl *fl;
 
-			if (fl->buf_size == RX_PAGE_SIZE) {
-				struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
-				struct sge_fl_page *p = &sd->t.page;
+			fl = (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0];
+			if (fl->use_pages) {
+				void *addr = fl->sdesc[fl->cidx].pg_chunk.va;
 
-				prefetch(p->va);
-				prefetch(p->va + L1_CACHE_BYTES);
-
+				prefetch(addr);
+#if L1_CACHE_BYTES < 128
+				prefetch(addr + L1_CACHE_BYTES);
+#endif
 				__refill_fl(adap, fl);
 
-				pci_unmap_single(adap->pdev,
-						 pci_unmap_addr(sd, dma_addr),
-						 fl->buf_size,
-						 PCI_DMA_FROMDEVICE);
-
-				if (eth) {
-					if (unlikely(fl->credits <
-						     SGE_RX_DROP_THRES))
-						goto eth_recycle;
-
-					skb = alloc_skb(SKB_DATA_SIZE,
-							GFP_ATOMIC);
-					if (unlikely(!skb)) {
-eth_recycle:
-						q->rx_drops++;
-						recycle_rx_buf(adap, fl,
-							       fl->cidx);
-						goto eth_done;
-					}
-				} else {
-					skb = alloc_skb(SKB_DATA_SIZE,
-							GFP_ATOMIC);
-					if (unlikely(!skb))
-						goto no_mem;
-				}
-
-				skb_data_init(skb, p, G_RSPD_LEN(len));
-eth_done:
-				fl->credits--;
-				q->eth_pkts++;
-			} else {
-				fl->credits--;
+				skb = get_packet_pg(adap, fl, G_RSPD_LEN(len),
+						 eth ? SGE_RX_DROP_THRES : 0);
+			} else
 				skb = get_packet(adap, fl, G_RSPD_LEN(len),
 						 eth ? SGE_RX_DROP_THRES : 0);
-			}
+			if (unlikely(!skb)) {
+				if (!eth)
+					goto no_mem;
+				q->rx_drops++;
+			} else if (unlikely(r->rss_hdr.opcode == CPL_TRACE_PKT))
+				__skb_pull(skb, 2);
 
 			if (++fl->cidx == fl->size)
 				fl->cidx = 0;
@@ -2016,20 +2032,15 @@
 			q->credits = 0;
 		}
 
-		if (skb) {
-			/* Preserve the RSS info in csum & priority */
-			skb->csum = rss_hi;
-			skb->priority = rss_lo;
-
+		if (likely(skb != NULL)) {
 			if (eth)
 				rx_eth(adap, q, skb, ethpad);
 			else {
-				if (unlikely(r->rss_hdr.opcode ==
-					     CPL_TRACE_PKT))
-					__skb_pull(skb, ethpad);
-
-				ngathered = rx_offload(&adap->tdev, q,
-						       skb, offload_skbs,
+				/* Preserve the RSS info in csum & priority */
+				skb->csum = rss_hi;
+				skb->priority = rss_lo;
+				ngathered = rx_offload(&adap->tdev, q, skb,
+						       offload_skbs,
 						       ngathered);
 			}
 		}
@@ -2635,25 +2646,15 @@
 	q->txq[TXQ_ETH].stop_thres = nports *
 	    flits_to_desc(sgl_len(MAX_SKB_FRAGS + 1) + 3);
 
-	if (!is_offload(adapter)) {
-#ifdef USE_RX_PAGE
-		q->fl[0].buf_size = RX_PAGE_SIZE;
+#if FL0_PG_CHUNK_SIZE > 0
+	q->fl[0].buf_size = FL0_PG_CHUNK_SIZE;
 #else
-		q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + 2 +
-		    sizeof(struct cpl_rx_pkt);
+	q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + sizeof(struct cpl_rx_data);
 #endif
-		q->fl[1].buf_size = MAX_FRAME_SIZE + 2 +
-		    sizeof(struct cpl_rx_pkt);
-	} else {
-#ifdef USE_RX_PAGE
-		q->fl[0].buf_size = RX_PAGE_SIZE;
-#else
-		q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE +
-		    sizeof(struct cpl_rx_data);
-#endif
-		q->fl[1].buf_size = (16 * 1024) -
-		    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
-	}
+	q->fl[0].use_pages = FL0_PG_CHUNK_SIZE > 0;
+	q->fl[1].buf_size = is_offload(adapter) ?
+		(16 * 1024) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
+		MAX_FRAME_SIZE + 2 + sizeof(struct cpl_rx_pkt);
 
 	spin_lock(&adapter->sge.reg_lock);
 
diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
index fb485d0..dd3149d 100644
--- a/drivers/net/cxgb3/t3_hw.c
+++ b/drivers/net/cxgb3/t3_hw.c
@@ -847,6 +847,64 @@
 	return 0;
 }
 
+/**
+ *	t3_check_tpsram_version - read the tp sram version
+ *	@adapter: the adapter
+ *
+ *	Reads the protocol sram version from serial eeprom.
+ */
+int t3_check_tpsram_version(struct adapter *adapter)
+{
+	int ret;
+	u32 vers;
+	unsigned int major, minor;
+
+	/* Get version loaded in SRAM */
+	t3_write_reg(adapter, A_TP_EMBED_OP_FIELD0, 0);
+	ret = t3_wait_op_done(adapter, A_TP_EMBED_OP_FIELD0,
+			      1, 1, 5, 1);
+	if (ret)
+		return ret;
+	
+	vers = t3_read_reg(adapter, A_TP_EMBED_OP_FIELD1);
+
+	major = G_TP_VERSION_MAJOR(vers);
+	minor = G_TP_VERSION_MINOR(vers);
+
+	if (major == TP_VERSION_MAJOR && minor == TP_VERSION_MINOR) 
+		return 0;
+
+	return -EINVAL;
+}
+
+/**
+ *	t3_check_tpsram - check if provided protocol SRAM 
+ *			  is compatible with this driver
+ *	@adapter: the adapter
+ *	@tp_sram: the firmware image to write
+ *	@size: image size
+ *
+ *	Checks if an adapter's tp sram is compatible with the driver.
+ *	Returns 0 if the versions are compatible, a negative error otherwise.
+ */
+int t3_check_tpsram(struct adapter *adapter, u8 *tp_sram, unsigned int size)
+{
+	u32 csum;
+	unsigned int i;
+	const u32 *p = (const u32 *)tp_sram;
+
+	/* Verify checksum */
+	for (csum = 0, i = 0; i < size / sizeof(csum); i++)
+		csum += ntohl(p[i]);
+	if (csum != 0xffffffff) {
+		CH_ERR(adapter, "corrupted protocol SRAM image, checksum %u\n",
+		       csum);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
 enum fw_version_type {
 	FW_VERSION_N3,
 	FW_VERSION_T3
@@ -921,7 +979,7 @@
 /*
  *	t3_load_fw - download firmware
  *	@adapter: the adapter
- *	@fw_data: the firrware image to write
+ *	@fw_data: the firmware image to write
  *	@size: image size
  *
  *	Write the supplied firmware image to the card's serial flash.
@@ -2362,7 +2420,7 @@
 		     F_TCPCHECKSUMOFFLOAD | V_IPTTL(64));
 	t3_write_reg(adap, A_TP_TCP_OPTIONS, V_MTUDEFAULT(576) |
 		     F_MTUENABLE | V_WINDOWSCALEMODE(1) |
-		     V_TIMESTAMPSMODE(1) | V_SACKMODE(1) | V_SACKRX(1));
+		     V_TIMESTAMPSMODE(0) | V_SACKMODE(1) | V_SACKRX(1));
 	t3_write_reg(adap, A_TP_DACK_CONFIG, V_AUTOSTATE3(1) |
 		     V_AUTOSTATE2(1) | V_AUTOSTATE1(0) |
 		     V_BYTETHRESHOLD(16384) | V_MSSTHRESHOLD(2) |
@@ -2371,16 +2429,18 @@
 			 F_IPV6ENABLE | F_NICMODE);
 	t3_write_reg(adap, A_TP_TX_RESOURCE_LIMIT, 0x18141814);
 	t3_write_reg(adap, A_TP_PARA_REG4, 0x5050105);
-	t3_set_reg_field(adap, A_TP_PARA_REG6,
-			 adap->params.rev > 0 ? F_ENABLEESND : F_T3A_ENABLEESND,
-			 0);
+	t3_set_reg_field(adap, A_TP_PARA_REG6, 0,
+			 adap->params.rev > 0 ? F_ENABLEESND :
+			 F_T3A_ENABLEESND);
 
 	t3_set_reg_field(adap, A_TP_PC_CONFIG,
-			 F_ENABLEEPCMDAFULL | F_ENABLEOCSPIFULL,
-			 F_TXDEFERENABLE | F_HEARBEATDACK | F_TXCONGESTIONMODE |
-			 F_RXCONGESTIONMODE);
+			 F_ENABLEEPCMDAFULL,
+			 F_ENABLEOCSPIFULL |F_TXDEFERENABLE | F_HEARBEATDACK |
+			 F_TXCONGESTIONMODE | F_RXCONGESTIONMODE);
 	t3_set_reg_field(adap, A_TP_PC_CONFIG2, F_CHDRAFULL, 0);
-
+	t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1080);
+	t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1000);
+	
 	if (adap->params.rev > 0) {
 		tp_wr_indirect(adap, A_TP_EGRESS_CONFIG, F_REWRITEFORCETOSIZE);
 		t3_set_reg_field(adap, A_TP_PARA_REG3, F_TXPACEAUTO,
@@ -2390,9 +2450,10 @@
 	} else
 		t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEFIXED);
 
-	t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT1, 0x12121212);
-	t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0, 0x12121212);
-	t3_write_reg(adap, A_TP_MOD_CHANNEL_WEIGHT, 0x1212);
+	t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT1, 0);
+	t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0, 0);
+	t3_write_reg(adap, A_TP_MOD_CHANNEL_WEIGHT, 0);
+	t3_write_reg(adap, A_TP_MOD_RATE_LIMIT, 0xf2200000);
 }
 
 /* Desired TP timer resolution in usec */
@@ -2468,6 +2529,7 @@
 		val |= F_RXCOALESCEENABLE;
 		if (psh)
 			val |= F_RXCOALESCEPSHEN;
+		size = min(MAX_RX_COALESCING_LEN, size);
 		t3_write_reg(adap, A_TP_PARA_REG2, V_RXCOALESCESIZE(size) |
 			     V_MAXRXDATA(MAX_RX_COALESCING_LEN));
 	}
@@ -2496,11 +2558,11 @@
 	 * it can accomodate max size TCP/IP headers when SACK and timestamps
 	 * are enabled and still have at least 8 bytes of payload.
 	 */
-	mtus[0] = 88;
-	mtus[1] = 256;
-	mtus[2] = 512;
-	mtus[3] = 576;
-	mtus[4] = 808;
+	mtus[1] = 88;
+	mtus[1] = 88;
+	mtus[2] = 256;
+	mtus[3] = 512;
+	mtus[4] = 576;
 	mtus[5] = 1024;
 	mtus[6] = 1280;
 	mtus[7] = 1492;
@@ -2682,6 +2744,34 @@
 	t3_write_reg(adap, A_ULPRX_TDDP_TAGMASK, 0xffffffff);
 }
 
+/**
+ *	t3_set_proto_sram - set the contents of the protocol sram
+ *	@adapter: the adapter
+ *	@data: the protocol image
+ *
+ *	Write the contents of the protocol SRAM.
+ */
+int t3_set_proto_sram(struct adapter *adap, u8 *data)
+{
+	int i;
+	u32 *buf = (u32 *)data;
+
+	for (i = 0; i < PROTO_SRAM_LINES; i++) {
+		t3_write_reg(adap, A_TP_EMBED_OP_FIELD5, cpu_to_be32(*buf++));
+		t3_write_reg(adap, A_TP_EMBED_OP_FIELD4, cpu_to_be32(*buf++));
+		t3_write_reg(adap, A_TP_EMBED_OP_FIELD3, cpu_to_be32(*buf++));
+		t3_write_reg(adap, A_TP_EMBED_OP_FIELD2, cpu_to_be32(*buf++));
+		t3_write_reg(adap, A_TP_EMBED_OP_FIELD1, cpu_to_be32(*buf++));
+		
+		t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, i << 1 | 1 << 31);
+		if (t3_wait_op_done(adap, A_TP_EMBED_OP_FIELD0, 1, 1, 5, 1))
+			return -EIO;
+	}
+	t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, 0);
+
+	return 0;
+}
+
 void t3_config_trace_filter(struct adapter *adapter,
 			    const struct trace_params *tp, int filter_index,
 			    int invert, int enable)
@@ -2802,7 +2892,7 @@
 		t3_set_reg_field(adap, A_ULPTX_CONFIG, F_CFG_RR_ARB, 0);
 		t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_TPTXPORT0EN |
 			     F_PORT0ACTIVE | F_ENFORCEPKT);
-		t3_write_reg(adap, A_PM1_TX_CFG, 0xc000c000);
+		t3_write_reg(adap, A_PM1_TX_CFG, 0xffffffff);
 	} else {
 		t3_set_reg_field(adap, A_ULPRX_CTL, 0, F_ROUND_ROBIN);
 		t3_set_reg_field(adap, A_ULPTX_CONFIG, 0, F_CFG_RR_ARB);
@@ -3097,7 +3187,7 @@
 	else
 		t3_set_reg_field(adapter, A_PCIX_CFG, 0, F_CLIDECEN);
 
-	t3_write_reg(adapter, A_PM1_RX_CFG, 0xf000f000);
+	t3_write_reg(adapter, A_PM1_RX_CFG, 0xffffffff);
 	init_hw_for_avail_ports(adapter, adapter->params.nports);
 	t3_sge_init(adapter, &adapter->params.sge);
 
diff --git a/drivers/net/cxgb3/version.h b/drivers/net/cxgb3/version.h
index b112317..8eddd23 100644
--- a/drivers/net/cxgb3/version.h
+++ b/drivers/net/cxgb3/version.h
@@ -39,6 +39,6 @@
 
 /* Firmware version */
 #define FW_VERSION_MAJOR 4
-#define FW_VERSION_MINOR 0
+#define FW_VERSION_MINOR 1
 #define FW_VERSION_MICRO 0
 #endif				/* __CHELSIO_VERSION_H */
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h
index c0f81b5..abaf3ac 100644
--- a/drivers/net/ehea/ehea.h
+++ b/drivers/net/ehea/ehea.h
@@ -39,7 +39,7 @@
 #include <asm/io.h>
 
 #define DRV_NAME	"ehea"
-#define DRV_VERSION	"EHEA_0064"
+#define DRV_VERSION	"EHEA_0065"
 
 #define EHEA_MSG_DEFAULT (NETIF_MSG_LINK | NETIF_MSG_TIMER \
 	| NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
@@ -136,10 +136,10 @@
 	(0xffffffffffffffffULL >> ((64 - (mask)) & 0xffff))
 
 #define EHEA_BMASK_SET(mask, value) \
-        ((EHEA_BMASK_MASK(mask) & ((u64)(value))) << EHEA_BMASK_SHIFTPOS(mask))
+	((EHEA_BMASK_MASK(mask) & ((u64)(value))) << EHEA_BMASK_SHIFTPOS(mask))
 
 #define EHEA_BMASK_GET(mask, value) \
-        (EHEA_BMASK_MASK(mask) & (((u64)(value)) >> EHEA_BMASK_SHIFTPOS(mask)))
+	(EHEA_BMASK_MASK(mask) & (((u64)(value)) >> EHEA_BMASK_SHIFTPOS(mask)))
 
 /*
  * Generic ehea page
@@ -190,7 +190,7 @@
  * Queue attributes passed to ehea_create_qp()
  */
 struct ehea_qp_init_attr {
-        /* input parameter */
+	/* input parameter */
 	u32 qp_token;           /* queue token */
 	u8 low_lat_rq1;
 	u8 signalingtype;       /* cqe generation flag */
@@ -212,7 +212,7 @@
 	u64 recv_cq_handle;
 	u64 aff_eq_handle;
 
-        /* output parameter */
+	/* output parameter */
 	u32 qp_nr;
 	u16 act_nr_send_wqes;
 	u16 act_nr_rwqes_rq1;
@@ -279,12 +279,12 @@
  * Completion Queue attributes
  */
 struct ehea_cq_attr {
-        /* input parameter */
+	/* input parameter */
 	u32 max_nr_of_cqes;
 	u32 cq_token;
 	u64 eq_handle;
 
-        /* output parameter */
+	/* output parameter */
 	u32 act_nr_of_cqes;
 	u32 nr_pages;
 };
diff --git a/drivers/net/ehea/ehea_hw.h b/drivers/net/ehea/ehea_hw.h
index 1246757..1af7ca4 100644
--- a/drivers/net/ehea/ehea_hw.h
+++ b/drivers/net/ehea/ehea_hw.h
@@ -211,34 +211,34 @@
 }
 
 #define epa_store_eq(epa, offset, value)\
-        epa_store(epa, EQTEMM_OFFSET(offset), value)
+	epa_store(epa, EQTEMM_OFFSET(offset), value)
 #define epa_load_eq(epa, offset)\
-        epa_load(epa, EQTEMM_OFFSET(offset))
+	epa_load(epa, EQTEMM_OFFSET(offset))
 
 #define epa_store_cq(epa, offset, value)\
-        epa_store(epa, CQTEMM_OFFSET(offset), value)
+	epa_store(epa, CQTEMM_OFFSET(offset), value)
 #define epa_load_cq(epa, offset)\
-        epa_load(epa, CQTEMM_OFFSET(offset))
+	epa_load(epa, CQTEMM_OFFSET(offset))
 
 #define epa_store_qp(epa, offset, value)\
-        epa_store(epa, QPTEMM_OFFSET(offset), value)
+	epa_store(epa, QPTEMM_OFFSET(offset), value)
 #define epa_load_qp(epa, offset)\
-        epa_load(epa, QPTEMM_OFFSET(offset))
+	epa_load(epa, QPTEMM_OFFSET(offset))
 
 #define epa_store_qped(epa, offset, value)\
-        epa_store(epa, QPEDMM_OFFSET(offset), value)
+	epa_store(epa, QPEDMM_OFFSET(offset), value)
 #define epa_load_qped(epa, offset)\
-        epa_load(epa, QPEDMM_OFFSET(offset))
+	epa_load(epa, QPEDMM_OFFSET(offset))
 
 #define epa_store_mrmw(epa, offset, value)\
-        epa_store(epa, MRMWMM_OFFSET(offset), value)
+	epa_store(epa, MRMWMM_OFFSET(offset), value)
 #define epa_load_mrmw(epa, offset)\
-        epa_load(epa, MRMWMM_OFFSET(offset))
+	epa_load(epa, MRMWMM_OFFSET(offset))
 
 #define epa_store_base(epa, offset, value)\
-        epa_store(epa, HCAGR_OFFSET(offset), value)
+	epa_store(epa, HCAGR_OFFSET(offset), value)
 #define epa_load_base(epa, offset)\
-        epa_load(epa, HCAGR_OFFSET(offset))
+	epa_load(epa, HCAGR_OFFSET(offset))
 
 static inline void ehea_update_sqa(struct ehea_qp *qp, u16 nr_wqes)
 {
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index 9e13433..bdb5241 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -81,7 +81,7 @@
 static int port_name_cnt = 0;
 
 static int __devinit ehea_probe_adapter(struct ibmebus_dev *dev,
-                                        const struct of_device_id *id);
+					const struct of_device_id *id);
 
 static int __devexit ehea_remove(struct ibmebus_dev *dev);
 
@@ -236,7 +236,7 @@
 
 		rwqe = ehea_get_next_rwqe(qp, rq_nr);
 		rwqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, wqe_type)
-		            | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, index);
+			    | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, index);
 		rwqe->sg_list[0].l_key = pr->recv_mr.lkey;
 		rwqe->sg_list[0].vaddr = (u64)skb->data;
 		rwqe->sg_list[0].len = packet_size;
@@ -427,7 +427,7 @@
 						break;
 				}
 				skb_copy_to_linear_data(skb, ((char*)cqe) + 64,
-					       cqe->num_bytes_transfered - 4);
+						 cqe->num_bytes_transfered - 4);
 				ehea_fill_skb(port->netdev, skb, cqe);
 			} else if (rq == 2) {  /* RQ2 */
 				skb = get_skb_by_index(skb_arr_rq2,
@@ -618,7 +618,7 @@
 
 	for (i = 0; i < EHEA_MAX_PORTS; i++)
 		if (adapter->port[i])
-	                if (adapter->port[i]->logical_port_id == logical_port)
+			if (adapter->port[i]->logical_port_id == logical_port)
 				return adapter->port[i];
 	return NULL;
 }
@@ -1695,6 +1695,7 @@
 {
 	if (skb->protocol == htons(ETH_P_IP)) {
 		const struct iphdr *iph = ip_hdr(skb);
+
 		/* IPv4 */
 		swqe->tx_control |= EHEA_SWQE_CRC
 				 | EHEA_SWQE_IP_CHECKSUM
@@ -1705,13 +1706,12 @@
 		write_ip_start_end(swqe, skb);
 
 		if (iph->protocol == IPPROTO_UDP) {
-			if ((iph->frag_off & IP_MF) ||
-			    (iph->frag_off & IP_OFFSET))
+			if ((iph->frag_off & IP_MF)
+			    || (iph->frag_off & IP_OFFSET))
 				/* IP fragment, so don't change cs */
 				swqe->tx_control &= ~EHEA_SWQE_TCP_CHECKSUM;
 			else
 				write_udp_offset_end(swqe, skb);
-
 		} else if (iph->protocol == IPPROTO_TCP) {
 			write_tcp_offset_end(swqe, skb);
 		}
@@ -1739,6 +1739,7 @@
 
 	if (skb->protocol == htons(ETH_P_IP)) {
 		const struct iphdr *iph = ip_hdr(skb);
+
 		/* IPv4 */
 		write_ip_start_end(swqe, skb);
 
@@ -1751,8 +1752,8 @@
 			write_tcp_offset_end(swqe, skb);
 
 		} else if (iph->protocol == IPPROTO_UDP) {
-			if ((iph->frag_off & IP_MF) ||
-			    (iph->frag_off & IP_OFFSET))
+			if ((iph->frag_off & IP_MF)
+			    || (iph->frag_off & IP_OFFSET))
 				/* IP fragment, so don't change cs */
 				swqe->tx_control |= EHEA_SWQE_CRC
 						 | EHEA_SWQE_IMM_DATA_PRESENT;
@@ -2407,7 +2408,7 @@
 }
 
 static int ehea_driver_sysfs_add(struct device *dev,
-                                 struct device_driver *driver)
+				 struct device_driver *driver)
 {
 	int ret;
 
@@ -2424,7 +2425,7 @@
 }
 
 static void ehea_driver_sysfs_remove(struct device *dev,
-                                     struct device_driver *driver)
+				     struct device_driver *driver)
 {
 	struct device_driver *drv = driver;
 
@@ -2453,7 +2454,7 @@
 	}
 
 	ret = device_create_file(&port->ofdev.dev, &dev_attr_log_port_id);
-        if (ret) {
+	if (ret) {
 		ehea_error("failed to register attributes, ret=%d", ret);
 		goto out_unreg_of_dev;
 	}
@@ -2601,6 +2602,7 @@
 {
 	struct device_node *lhea_dn;
 	struct device_node *eth_dn = NULL;
+
 	const u32 *dn_log_port_id;
 	int i = 0;
 
@@ -2608,7 +2610,7 @@
 	while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) {
 
 		dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no",
-						    NULL);
+						 NULL);
 		if (!dn_log_port_id) {
 			ehea_error("bad device node: eth_dn name=%s",
 				   eth_dn->full_name);
@@ -2648,7 +2650,7 @@
 	while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) {
 
 		dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no",
-						    NULL);
+						 NULL);
 		if (dn_log_port_id)
 			if (*dn_log_port_id == logical_port_id)
 				return eth_dn;
@@ -2789,7 +2791,7 @@
 	adapter->ebus_dev = dev;
 
 	adapter_handle = of_get_property(dev->ofdev.node, "ibm,hea-handle",
-					    NULL);
+					 NULL);
 	if (adapter_handle)
 		adapter->handle = *adapter_handle;
 
diff --git a/drivers/net/ehea/ehea_qmr.c b/drivers/net/ehea/ehea_qmr.c
index f24a886..29eaa46 100644
--- a/drivers/net/ehea/ehea_qmr.c
+++ b/drivers/net/ehea/ehea_qmr.c
@@ -211,7 +211,7 @@
 	u64 hret;
 	u64 adapter_handle = cq->adapter->handle;
 
-        /* deregister all previous registered pages */
+	/* deregister all previous registered pages */
 	hret = ehea_h_free_resource(adapter_handle, cq->fw_handle, force);
 	if (hret != H_SUCCESS)
 		return hret;
@@ -362,7 +362,7 @@
 	if (hret != H_SUCCESS) {
 		ehea_error("destroy EQ failed");
 		return -EIO;
-        }
+	}
 
 	return 0;
 }
@@ -507,44 +507,44 @@
 
 u64 ehea_destroy_qp_res(struct ehea_qp *qp, u64 force)
 {
-        u64 hret;
-        struct ehea_qp_init_attr *qp_attr = &qp->init_attr;
+	u64 hret;
+	struct ehea_qp_init_attr *qp_attr = &qp->init_attr;
 
 
-        ehea_h_disable_and_get_hea(qp->adapter->handle, qp->fw_handle);
-        hret = ehea_h_free_resource(qp->adapter->handle, qp->fw_handle, force);
-        if (hret != H_SUCCESS)
-                return hret;
+	ehea_h_disable_and_get_hea(qp->adapter->handle, qp->fw_handle);
+	hret = ehea_h_free_resource(qp->adapter->handle, qp->fw_handle, force);
+	if (hret != H_SUCCESS)
+		return hret;
 
-        hw_queue_dtor(&qp->hw_squeue);
-        hw_queue_dtor(&qp->hw_rqueue1);
+	hw_queue_dtor(&qp->hw_squeue);
+	hw_queue_dtor(&qp->hw_rqueue1);
 
-        if (qp_attr->rq_count > 1)
-                hw_queue_dtor(&qp->hw_rqueue2);
-        if (qp_attr->rq_count > 2)
-                hw_queue_dtor(&qp->hw_rqueue3);
-        kfree(qp);
+	if (qp_attr->rq_count > 1)
+		hw_queue_dtor(&qp->hw_rqueue2);
+	if (qp_attr->rq_count > 2)
+		hw_queue_dtor(&qp->hw_rqueue3);
+	kfree(qp);
 
-        return hret;
+	return hret;
 }
 
 int ehea_destroy_qp(struct ehea_qp *qp)
 {
-        u64 hret;
-        if (!qp)
-                return 0;
+	u64 hret;
+	if (!qp)
+		return 0;
 
-        if ((hret = ehea_destroy_qp_res(qp, NORMAL_FREE)) == H_R_STATE) {
-                ehea_error_data(qp->adapter, qp->fw_handle);
-                hret = ehea_destroy_qp_res(qp, FORCE_FREE);
-        }
+	if ((hret = ehea_destroy_qp_res(qp, NORMAL_FREE)) == H_R_STATE) {
+		ehea_error_data(qp->adapter, qp->fw_handle);
+		hret = ehea_destroy_qp_res(qp, FORCE_FREE);
+	}
 
-        if (hret != H_SUCCESS) {
-                ehea_error("destroy QP failed");
-                return -EIO;
-        }
+	if (hret != H_SUCCESS) {
+		ehea_error("destroy QP failed");
+		return -EIO;
+	}
 
-        return 0;
+	return 0;
 }
 
 int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr)
diff --git a/drivers/net/fec_8xx/Kconfig b/drivers/net/fec_8xx/Kconfig
index a84c232..afb34de 100644
--- a/drivers/net/fec_8xx/Kconfig
+++ b/drivers/net/fec_8xx/Kconfig
@@ -1,6 +1,6 @@
 config FEC_8XX
 	tristate "Motorola 8xx FEC driver"
-	depends on NET_ETHERNET && 8xx
+	depends on 8XX
 	select MII
 
 config FEC_8XX_GENERIC_PHY
diff --git a/drivers/net/fs_enet/Kconfig b/drivers/net/fs_enet/Kconfig
index 6aaee67..e27ee21 100644
--- a/drivers/net/fs_enet/Kconfig
+++ b/drivers/net/fs_enet/Kconfig
@@ -1,6 +1,6 @@
 config FS_ENET
        tristate "Freescale Ethernet Driver"
-       depends on NET_ETHERNET && (CPM1 || CPM2)
+       depends on CPM1 || CPM2
        select MII
 
 config FS_ENET_HAS_SCC
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 1b854bf..d7a1a58 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -130,6 +130,9 @@
 static void free_skb_resources(struct gfar_private *priv);
 static void gfar_set_multi(struct net_device *dev);
 static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
+static void gfar_configure_serdes(struct net_device *dev);
+extern int gfar_local_mdio_write(struct gfar_mii *regs, int mii_id, int regnum, u16 value);
+extern int gfar_local_mdio_read(struct gfar_mii *regs, int mii_id, int regnum);
 #ifdef CONFIG_GFAR_NAPI
 static int gfar_poll(struct net_device *dev, int *budget);
 #endif
@@ -451,6 +454,9 @@
 
 	phydev = phy_connect(dev, phy_id, &adjust_link, 0, interface);
 
+	if (interface == PHY_INTERFACE_MODE_SGMII)
+		gfar_configure_serdes(dev);
+
 	if (IS_ERR(phydev)) {
 		printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
 		return PTR_ERR(phydev);
@@ -465,6 +471,27 @@
 	return 0;
 }
 
+static void gfar_configure_serdes(struct net_device *dev)
+{
+	struct gfar_private *priv = netdev_priv(dev);
+	struct gfar_mii __iomem *regs =
+			(void __iomem *)&priv->regs->gfar_mii_regs;
+
+	/* Initialise TBI i/f to communicate with serdes (lynx phy) */
+
+	/* Single clk mode, mii mode off(for aerdes communication) */
+	gfar_local_mdio_write(regs, TBIPA_VALUE, MII_TBICON, TBICON_CLK_SELECT);
+
+	/* Supported pause and full-duplex, no half-duplex */
+	gfar_local_mdio_write(regs, TBIPA_VALUE, MII_ADVERTISE,
+			ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
+			ADVERTISE_1000XPSE_ASYM);
+
+	/* ANEG enable, restart ANEG, full duplex mode, speed[1] set */
+	gfar_local_mdio_write(regs, TBIPA_VALUE, MII_BMCR, BMCR_ANENABLE |
+			BMCR_ANRESTART | BMCR_FULLDPLX | BMCR_SPEED1000);
+}
+
 static void init_registers(struct net_device *dev)
 {
 	struct gfar_private *priv = netdev_priv(dev);
diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h
index 39e9e32..d8e779c 100644
--- a/drivers/net/gianfar.h
+++ b/drivers/net/gianfar.h
@@ -136,6 +136,12 @@
 #define MIIMCFG_RESET           0x80000000
 #define MIIMIND_BUSY            0x00000001
 
+/* TBI register addresses */
+#define MII_TBICON		0x11
+
+/* TBICON register bit fields */
+#define TBICON_CLK_SELECT	0x0020
+
 /* MAC register bits */
 #define MACCFG1_SOFT_RESET	0x80000000
 #define MACCFG1_RESET_RX_MC	0x00080000
diff --git a/drivers/net/gianfar_mii.c b/drivers/net/gianfar_mii.c
index bcc6b82..5dd34a1 100644
--- a/drivers/net/gianfar_mii.c
+++ b/drivers/net/gianfar_mii.c
@@ -43,13 +43,18 @@
 #include "gianfar.h"
 #include "gianfar_mii.h"
 
-/* Write value to the PHY at mii_id at register regnum,
- * on the bus, waiting until the write is done before returning.
- * All PHY configuration is done through the TSEC1 MIIM regs */
-int gfar_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value)
+/*
+ * Write value to the PHY at mii_id at register regnum,
+ * on the bus attached to the local interface, which may be different from the
+ * generic mdio bus (tied to a single interface), waiting until the write is
+ * done before returning. This is helpful in programming interfaces like
+ * the TBI which control interfaces like onchip SERDES and are always tied to
+ * the local mdio pins, which may not be the same as system mdio bus, used for
+ * controlling the external PHYs, for example.
+ */
+int gfar_local_mdio_write(struct gfar_mii *regs, int mii_id,
+			  int regnum, u16 value)
 {
-	struct gfar_mii __iomem *regs = (void __iomem *)bus->priv;
-
 	/* Set the PHY address and the register address we want to write */
 	gfar_write(&regs->miimadd, (mii_id << 8) | regnum);
 
@@ -63,12 +68,19 @@
 	return 0;
 }
 
-/* Read the bus for PHY at addr mii_id, register regnum, and
- * return the value.  Clears miimcom first.  All PHY
- * configuration has to be done through the TSEC1 MIIM regs */
-int gfar_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
+/*
+ * Read the bus for PHY at addr mii_id, register regnum, and
+ * return the value.  Clears miimcom first.  All PHY operation
+ * done on the bus attached to the local interface,
+ * which may be different from the generic mdio bus
+ * This is helpful in programming interfaces like
+ * the TBI which, inturn, control interfaces like onchip SERDES
+ * and are always tied to the local mdio pins, which may not be the
+ * same as system mdio bus, used for controlling the external PHYs, for eg.
+ */
+int gfar_local_mdio_read(struct gfar_mii *regs, int mii_id, int regnum)
+
 {
-	struct gfar_mii __iomem *regs = (void __iomem *)bus->priv;
 	u16 value;
 
 	/* Set the PHY address and the register address we want to read */
@@ -88,6 +100,27 @@
 	return value;
 }
 
+/* Write value to the PHY at mii_id at register regnum,
+ * on the bus, waiting until the write is done before returning.
+ * All PHY configuration is done through the TSEC1 MIIM regs */
+int gfar_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value)
+{
+	struct gfar_mii __iomem *regs = (void __iomem *)bus->priv;
+
+	/* Write to the local MII regs */
+	return(gfar_local_mdio_write(regs, mii_id, regnum, value));
+}
+
+/* Read the bus for PHY at addr mii_id, register regnum, and
+ * return the value.  Clears miimcom first.  All PHY
+ * configuration has to be done through the TSEC1 MIIM regs */
+int gfar_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
+{
+	struct gfar_mii __iomem *regs = (void __iomem *)bus->priv;
+
+	/* Read the local MII regs */
+	return(gfar_local_mdio_read(regs, mii_id, regnum));
+}
 
 /* Reset the MIIM registers, and wait for the bus to free */
 int gfar_mdio_reset(struct mii_bus *bus)
diff --git a/drivers/net/lasi_82596.c b/drivers/net/lasi_82596.c
index 741780e..efbae4b 100644
--- a/drivers/net/lasi_82596.c
+++ b/drivers/net/lasi_82596.c
@@ -86,93 +86,36 @@
 #include <linux/dma-mapping.h>
 
 #include <asm/io.h>
-#include <asm/pgtable.h>
 #include <asm/irq.h>
 #include <asm/pdc.h>
-#include <asm/cache.h>
 #include <asm/parisc-device.h>
 
 #define LASI_82596_DRIVER_VERSION "LASI 82596 driver - Revision: 1.30"
 
-/* DEBUG flags
- */
-
-#define DEB_INIT	0x0001
-#define DEB_PROBE	0x0002
-#define DEB_SERIOUS	0x0004
-#define DEB_ERRORS	0x0008
-#define DEB_MULTI	0x0010
-#define DEB_TDR		0x0020
-#define DEB_OPEN	0x0040
-#define DEB_RESET	0x0080
-#define DEB_ADDCMD	0x0100
-#define DEB_STATUS	0x0200
-#define DEB_STARTTX	0x0400
-#define DEB_RXADDR	0x0800
-#define DEB_TXADDR	0x1000
-#define DEB_RXFRAME	0x2000
-#define DEB_INTS	0x4000
-#define DEB_STRUCT	0x8000
-#define DEB_ANY		0xffff
-
-
-#define DEB(x,y)	if (i596_debug & (x)) { y; }
-
-
-#define  CHECK_WBACK(priv, addr,len) \
-	do { dma_cache_sync((priv)->dev, (void *)addr, len, DMA_TO_DEVICE); } while (0)
-
-#define  CHECK_INV(priv, addr,len) \
-	do { dma_cache_sync((priv)->dev, (void *)addr, len, DMA_FROM_DEVICE); } while(0)
-
-#define  CHECK_WBACK_INV(priv, addr,len) \
-	do { dma_cache_sync((priv)->dev, (void *)addr, len, DMA_BIDIRECTIONAL); } while (0)
-
-
 #define PA_I82596_RESET		0	/* Offsets relative to LASI-LAN-Addr.*/
 #define PA_CPU_PORT_L_ACCESS	4
 #define PA_CHANNEL_ATTENTION	8
 
+#define OPT_SWAP_PORT	0x0001	/* Need to wordswp on the MPU port */
 
-/*
- * Define various macros for Channel Attention, word swapping etc., dependent
- * on architecture.  MVME and BVME are 680x0 based, otherwise it is Intel.
- */
+#define DMA_ALLOC                        dma_alloc_noncoherent
+#define DMA_FREE                         dma_free_noncoherent
+#define DMA_WBACK(ndev, addr, len) \
+	do { dma_cache_sync((ndev)->dev.parent, (void *)addr, len, DMA_TO_DEVICE); } while (0)
 
-#ifdef __BIG_ENDIAN
-#define WSWAPrfd(x)  (((u32)(x)<<16) | ((((u32)(x)))>>16))
-#define WSWAPrbd(x)  (((u32)(x)<<16) | ((((u32)(x)))>>16))
-#define WSWAPiscp(x) (((u32)(x)<<16) | ((((u32)(x)))>>16))
-#define WSWAPscb(x)  (((u32)(x)<<16) | ((((u32)(x)))>>16))
-#define WSWAPcmd(x)  (((u32)(x)<<16) | ((((u32)(x)))>>16))
-#define WSWAPtbd(x)  (((u32)(x)<<16) | ((((u32)(x)))>>16))
-#define WSWAPchar(x) (((u32)(x)<<16) | ((((u32)(x)))>>16))
-#define ISCP_BUSY	0x00010000
-#define MACH_IS_APRICOT	0
-#else
-#define WSWAPrfd(x)     ((struct i596_rfd *)(x))
-#define WSWAPrbd(x)     ((struct i596_rbd *)(x))
-#define WSWAPiscp(x)    ((struct i596_iscp *)(x))
-#define WSWAPscb(x)     ((struct i596_scb *)(x))
-#define WSWAPcmd(x)     ((struct i596_cmd *)(x))
-#define WSWAPtbd(x)     ((struct i596_tbd *)(x))
-#define WSWAPchar(x)    ((char *)(x))
-#define ISCP_BUSY	0x0001
-#define MACH_IS_APRICOT	1
-#endif
+#define DMA_INV(ndev, addr, len) \
+	do { dma_cache_sync((ndev)->dev.parent, (void *)addr, len, DMA_FROM_DEVICE); } while (0)
 
-/*
- * The MPU_PORT command allows direct access to the 82596. With PORT access
- * the following commands are available (p5-18). The 32-bit port command
- * must be word-swapped with the most significant word written first.
- * This only applies to VME boards.
- */
-#define PORT_RESET		0x00	/* reset 82596 */
-#define PORT_SELFTEST		0x01	/* selftest */
-#define PORT_ALTSCP		0x02	/* alternate SCB address */
-#define PORT_ALTDUMP		0x03	/* Alternate DUMP address */
+#define DMA_WBACK_INV(ndev, addr, len) \
+	do { dma_cache_sync((ndev)->dev.parent, (void *)addr, len, DMA_BIDIRECTIONAL); } while (0)
 
-static int i596_debug = (DEB_SERIOUS|DEB_PROBE);
+#define SYSBUS      0x0000006c;
+
+/* big endian CPU, 82596 "big" endian mode */
+#define SWAP32(x)   (((u32)(x)<<16) | ((((u32)(x)))>>16))
+#define SWAP16(x)   (x)
+
+#include "lib82596.c"
 
 MODULE_AUTHOR("Richard Hirst");
 MODULE_DESCRIPTION("i82596 driver");
@@ -180,255 +123,15 @@
 module_param(i596_debug, int, 0);
 MODULE_PARM_DESC(i596_debug, "lasi_82596 debug mask");
 
-/* Copy frames shorter than rx_copybreak, otherwise pass on up in
- * a full sized sk_buff.  Value of 100 stolen from tulip.c (!alpha).
- */
-static int rx_copybreak = 100;
-
-#define MAX_DRIVERS	4	/* max count of drivers */
-
-#define PKT_BUF_SZ	1536
-#define MAX_MC_CNT	64
-
-#define I596_NULL ((u32)0xffffffff)
-
-#define CMD_EOL		0x8000	/* The last command of the list, stop. */
-#define CMD_SUSP	0x4000	/* Suspend after doing cmd. */
-#define CMD_INTR	0x2000	/* Interrupt after doing cmd. */
-
-#define CMD_FLEX	0x0008	/* Enable flexible memory model */
-
-enum commands {
-	CmdNOp = 0, CmdSASetup = 1, CmdConfigure = 2, CmdMulticastList = 3,
-	CmdTx = 4, CmdTDR = 5, CmdDump = 6, CmdDiagnose = 7
-};
-
-#define STAT_C		0x8000	/* Set to 0 after execution */
-#define STAT_B		0x4000	/* Command being executed */
-#define STAT_OK		0x2000	/* Command executed ok */
-#define STAT_A		0x1000	/* Command aborted */
-
-#define	 CUC_START	0x0100
-#define	 CUC_RESUME	0x0200
-#define	 CUC_SUSPEND    0x0300
-#define	 CUC_ABORT	0x0400
-#define	 RX_START	0x0010
-#define	 RX_RESUME	0x0020
-#define	 RX_SUSPEND	0x0030
-#define	 RX_ABORT	0x0040
-
-#define TX_TIMEOUT	5
-
-#define OPT_SWAP_PORT	0x0001	/* Need to wordswp on the MPU port */
-
-
-struct i596_reg {
-	unsigned short porthi;
-	unsigned short portlo;
-	u32            ca;
-};
-
-#define EOF		0x8000
-#define SIZE_MASK	0x3fff
-
-struct i596_tbd {
-	unsigned short size;
-	unsigned short pad;
-	dma_addr_t     next;
-	dma_addr_t     data;
-	u32 cache_pad[5];		/* Total 32 bytes... */
-};
-
-/* The command structure has two 'next' pointers; v_next is the address of
- * the next command as seen by the CPU, b_next is the address of the next
- * command as seen by the 82596.  The b_next pointer, as used by the 82596
- * always references the status field of the next command, rather than the
- * v_next field, because the 82596 is unaware of v_next.  It may seem more
- * logical to put v_next at the end of the structure, but we cannot do that
- * because the 82596 expects other fields to be there, depending on command
- * type.
- */
-
-struct i596_cmd {
-	struct i596_cmd *v_next;	/* Address from CPUs viewpoint */
-	unsigned short status;
-	unsigned short command;
-	dma_addr_t     b_next;	/* Address from i596 viewpoint */
-};
-
-struct tx_cmd {
-	struct i596_cmd cmd;
-	dma_addr_t     tbd;
-	unsigned short size;
-	unsigned short pad;
-	struct sk_buff *skb;		/* So we can free it after tx */
-	dma_addr_t dma_addr;
-#ifdef __LP64__
-	u32 cache_pad[6];		/* Total 64 bytes... */
-#else
-	u32 cache_pad[1];		/* Total 32 bytes... */
-#endif
-};
-
-struct tdr_cmd {
-	struct i596_cmd cmd;
-	unsigned short status;
-	unsigned short pad;
-};
-
-struct mc_cmd {
-	struct i596_cmd cmd;
-	short mc_cnt;
-	char mc_addrs[MAX_MC_CNT*6];
-};
-
-struct sa_cmd {
-	struct i596_cmd cmd;
-	char eth_addr[8];
-};
-
-struct cf_cmd {
-	struct i596_cmd cmd;
-	char i596_config[16];
-};
-
-struct i596_rfd {
-	unsigned short stat;
-	unsigned short cmd;
-	dma_addr_t     b_next;	/* Address from i596 viewpoint */
-	dma_addr_t     rbd;
-	unsigned short count;
-	unsigned short size;
-	struct i596_rfd *v_next;	/* Address from CPUs viewpoint */
-	struct i596_rfd *v_prev;
-#ifndef __LP64__
-	u32 cache_pad[2];		/* Total 32 bytes... */
-#endif
-};
-
-struct i596_rbd {
-    /* hardware data */
-    unsigned short count;
-    unsigned short zero1;
-    dma_addr_t     b_next;
-    dma_addr_t     b_data;		/* Address from i596 viewpoint */
-    unsigned short size;
-    unsigned short zero2;
-    /* driver data */
-    struct sk_buff *skb;
-    struct i596_rbd *v_next;
-    dma_addr_t     b_addr;		/* This rbd addr from i596 view */
-    unsigned char *v_data;		/* Address from CPUs viewpoint */
-					/* Total 32 bytes... */
-#ifdef __LP64__
-    u32 cache_pad[4];
-#endif
-};
-
-/* These values as chosen so struct i596_private fits in one page... */
-
-#define TX_RING_SIZE 32
-#define RX_RING_SIZE 16
-
-struct i596_scb {
-	unsigned short status;
-	unsigned short command;
-	dma_addr_t    cmd;
-	dma_addr_t    rfd;
-	u32           crc_err;
-	u32           align_err;
-	u32           resource_err;
-	u32           over_err;
-	u32           rcvdt_err;
-	u32           short_err;
-	unsigned short t_on;
-	unsigned short t_off;
-};
-
-struct i596_iscp {
-	u32           stat;
-	dma_addr_t    scb;
-};
-
-struct i596_scp {
-	u32           sysbus;
-	u32            pad;
-	dma_addr_t    iscp;
-};
-
-struct i596_private {
-	volatile struct i596_scp scp		__attribute__((aligned(32)));
-	volatile struct i596_iscp iscp		__attribute__((aligned(32)));
-	volatile struct i596_scb scb		__attribute__((aligned(32)));
-	struct sa_cmd sa_cmd			__attribute__((aligned(32)));
-	struct cf_cmd cf_cmd			__attribute__((aligned(32)));
-	struct tdr_cmd tdr_cmd			__attribute__((aligned(32)));
-	struct mc_cmd mc_cmd			__attribute__((aligned(32)));
-	struct i596_rfd rfds[RX_RING_SIZE]	__attribute__((aligned(32)));
-	struct i596_rbd rbds[RX_RING_SIZE]	__attribute__((aligned(32)));
-	struct tx_cmd tx_cmds[TX_RING_SIZE]	__attribute__((aligned(32)));
-	struct i596_tbd tbds[TX_RING_SIZE]	__attribute__((aligned(32)));
-	u32    stat;
-	int last_restart;
-	struct i596_rfd *rfd_head;
-	struct i596_rbd *rbd_head;
-	struct i596_cmd *cmd_tail;
-	struct i596_cmd *cmd_head;
-	int cmd_backlog;
-	u32    last_cmd;
-	struct net_device_stats stats;
-	int next_tx_cmd;
-	int options;
-	spinlock_t lock;
-	dma_addr_t dma_addr;
-	struct device *dev;
-};
-
-static const char init_setup[] =
-{
-	0x8E,			/* length, prefetch on */
-	0xC8,			/* fifo to 8, monitor off */
-	0x80,			/* don't save bad frames */
-	0x2E,			/* No source address insertion, 8 byte preamble */
-	0x00,			/* priority and backoff defaults */
-	0x60,			/* interframe spacing */
-	0x00,			/* slot time LSB */
-	0xf2,			/* slot time and retries */
-	0x00,			/* promiscuous mode */
-	0x00,			/* collision detect */
-	0x40,			/* minimum frame length */
-	0xff,
-	0x00,
-	0x7f /*  *multi IA */ };
-
-static int i596_open(struct net_device *dev);
-static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev);
-static irqreturn_t i596_interrupt(int irq, void *dev_id);
-static int i596_close(struct net_device *dev);
-static struct net_device_stats *i596_get_stats(struct net_device *dev);
-static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd);
-static void i596_tx_timeout (struct net_device *dev);
-static void print_eth(unsigned char *buf, char *str);
-static void set_multicast_list(struct net_device *dev);
-
-static int rx_ring_size = RX_RING_SIZE;
-static int ticks_limit = 100;
-static int max_cmd_backlog = TX_RING_SIZE-1;
-
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void i596_poll_controller(struct net_device *dev);
-#endif
-
-
-static inline void CA(struct net_device *dev)
+static inline void ca(struct net_device *dev)
 {
 	gsc_writel(0, dev->base_addr + PA_CHANNEL_ATTENTION);
 }
 
 
-static inline void MPU_PORT(struct net_device *dev, int c, dma_addr_t x)
+static void mpu_port(struct net_device *dev, int c, dma_addr_t x)
 {
-	struct i596_private *lp = dev->priv;
+	struct i596_private *lp = netdev_priv(dev);
 
 	u32 v = (u32) (c) | (u32) (x);
 	u16 a, b;
@@ -446,1078 +149,15 @@
 	gsc_writel(b, dev->base_addr + PA_CPU_PORT_L_ACCESS);
 }
 
-
-static inline int wait_istat(struct net_device *dev, struct i596_private *lp, int delcnt, char *str)
-{
-	CHECK_INV(lp, &(lp->iscp), sizeof(struct i596_iscp));
-	while (--delcnt && lp->iscp.stat) {
-		udelay(10);
-		CHECK_INV(lp, &(lp->iscp), sizeof(struct i596_iscp));
-	}
-	if (!delcnt) {
-		printk("%s: %s, iscp.stat %04x, didn't clear\n",
-		     dev->name, str, lp->iscp.stat);
-		return -1;
-	}
-	else
-		return 0;
-}
-
-
-static inline int wait_cmd(struct net_device *dev, struct i596_private *lp, int delcnt, char *str)
-{
-	CHECK_INV(lp, &(lp->scb), sizeof(struct i596_scb));
-	while (--delcnt && lp->scb.command) {
-		udelay(10);
-		CHECK_INV(lp, &(lp->scb), sizeof(struct i596_scb));
-	}
-	if (!delcnt) {
-		printk("%s: %s, status %4.4x, cmd %4.4x.\n",
-		     dev->name, str, lp->scb.status, lp->scb.command);
-		return -1;
-	}
-	else
-		return 0;
-}
-
-
-static void i596_display_data(struct net_device *dev)
-{
-	struct i596_private *lp = dev->priv;
-	struct i596_cmd *cmd;
-	struct i596_rfd *rfd;
-	struct i596_rbd *rbd;
-
-	printk("lp and scp at %p, .sysbus = %08x, .iscp = %08x\n",
-	       &lp->scp, lp->scp.sysbus, lp->scp.iscp);
-	printk("iscp at %p, iscp.stat = %08x, .scb = %08x\n",
-	       &lp->iscp, lp->iscp.stat, lp->iscp.scb);
-	printk("scb at %p, scb.status = %04x, .command = %04x,"
-		" .cmd = %08x, .rfd = %08x\n",
-	       &lp->scb, lp->scb.status, lp->scb.command,
-		lp->scb.cmd, lp->scb.rfd);
-	printk("   errors: crc %x, align %x, resource %x,"
-               " over %x, rcvdt %x, short %x\n",
-		lp->scb.crc_err, lp->scb.align_err, lp->scb.resource_err,
-		lp->scb.over_err, lp->scb.rcvdt_err, lp->scb.short_err);
-	cmd = lp->cmd_head;
-	while (cmd != NULL) {
-		printk("cmd at %p, .status = %04x, .command = %04x, .b_next = %08x\n",
-		  cmd, cmd->status, cmd->command, cmd->b_next);
-		cmd = cmd->v_next;
-	}
-	rfd = lp->rfd_head;
-	printk("rfd_head = %p\n", rfd);
-	do {
-		printk("   %p .stat %04x, .cmd %04x, b_next %08x, rbd %08x,"
-                        " count %04x\n",
-			rfd, rfd->stat, rfd->cmd, rfd->b_next, rfd->rbd,
-			rfd->count);
-		rfd = rfd->v_next;
-	} while (rfd != lp->rfd_head);
-	rbd = lp->rbd_head;
-	printk("rbd_head = %p\n", rbd);
-	do {
-		printk("   %p .count %04x, b_next %08x, b_data %08x, size %04x\n",
-			rbd, rbd->count, rbd->b_next, rbd->b_data, rbd->size);
-		rbd = rbd->v_next;
-	} while (rbd != lp->rbd_head);
-	CHECK_INV(lp, lp, sizeof(struct i596_private));
-}
-
-
-#if defined(ENABLE_MVME16x_NET) || defined(ENABLE_BVME6000_NET)
-static void i596_error(int irq, void *dev_id)
-{
-	struct net_device *dev = dev_id;
-	volatile unsigned char *pcc2 = (unsigned char *) 0xfff42000;
-
-	pcc2[0x28] = 1;
-	pcc2[0x2b] = 0x1d;
-	printk("%s: Error interrupt\n", dev->name);
-	i596_display_data(dev);
-}
-#endif
-
-#define virt_to_dma(lp,v) ((lp)->dma_addr + (dma_addr_t)((unsigned long)(v)-(unsigned long)(lp)))
-
-static inline void init_rx_bufs(struct net_device *dev)
-{
-	struct i596_private *lp = dev->priv;
-	int i;
-	struct i596_rfd *rfd;
-	struct i596_rbd *rbd;
-
-	/* First build the Receive Buffer Descriptor List */
-
-	for (i = 0, rbd = lp->rbds; i < rx_ring_size; i++, rbd++) {
-		dma_addr_t dma_addr;
-		struct sk_buff *skb = dev_alloc_skb(PKT_BUF_SZ + 4);
-
-		if (skb == NULL)
-			panic("%s: alloc_skb() failed", __FILE__);
-		skb_reserve(skb, 2);
-		dma_addr = dma_map_single(lp->dev, skb->data,PKT_BUF_SZ,
-					  DMA_FROM_DEVICE);
-		skb->dev = dev;
-		rbd->v_next = rbd+1;
-		rbd->b_next = WSWAPrbd(virt_to_dma(lp,rbd+1));
-		rbd->b_addr = WSWAPrbd(virt_to_dma(lp,rbd));
-		rbd->skb = skb;
-		rbd->v_data = skb->data;
-		rbd->b_data = WSWAPchar(dma_addr);
-		rbd->size = PKT_BUF_SZ;
-	}
-	lp->rbd_head = lp->rbds;
-	rbd = lp->rbds + rx_ring_size - 1;
-	rbd->v_next = lp->rbds;
-	rbd->b_next = WSWAPrbd(virt_to_dma(lp,lp->rbds));
-
-	/* Now build the Receive Frame Descriptor List */
-
-	for (i = 0, rfd = lp->rfds; i < rx_ring_size; i++, rfd++) {
-		rfd->rbd = I596_NULL;
-		rfd->v_next = rfd+1;
-		rfd->v_prev = rfd-1;
-		rfd->b_next = WSWAPrfd(virt_to_dma(lp,rfd+1));
-		rfd->cmd = CMD_FLEX;
-	}
-	lp->rfd_head = lp->rfds;
-	lp->scb.rfd = WSWAPrfd(virt_to_dma(lp,lp->rfds));
-	rfd = lp->rfds;
-	rfd->rbd = WSWAPrbd(virt_to_dma(lp,lp->rbd_head));
-	rfd->v_prev = lp->rfds + rx_ring_size - 1;
-	rfd = lp->rfds + rx_ring_size - 1;
-	rfd->v_next = lp->rfds;
-	rfd->b_next = WSWAPrfd(virt_to_dma(lp,lp->rfds));
-	rfd->cmd = CMD_EOL|CMD_FLEX;
-
-	CHECK_WBACK_INV(lp, lp, sizeof(struct i596_private));
-}
-
-static inline void remove_rx_bufs(struct net_device *dev)
-{
-	struct i596_private *lp = dev->priv;
-	struct i596_rbd *rbd;
-	int i;
-
-	for (i = 0, rbd = lp->rbds; i < rx_ring_size; i++, rbd++) {
-		if (rbd->skb == NULL)
-			break;
-		dma_unmap_single(lp->dev,
-				 (dma_addr_t)WSWAPchar(rbd->b_data),
-				 PKT_BUF_SZ, DMA_FROM_DEVICE);
-		dev_kfree_skb(rbd->skb);
-	}
-}
-
-
-static void rebuild_rx_bufs(struct net_device *dev)
-{
-	struct i596_private *lp = dev->priv;
-	int i;
-
-	/* Ensure rx frame/buffer descriptors are tidy */
-
-	for (i = 0; i < rx_ring_size; i++) {
-		lp->rfds[i].rbd = I596_NULL;
-		lp->rfds[i].cmd = CMD_FLEX;
-	}
-	lp->rfds[rx_ring_size-1].cmd = CMD_EOL|CMD_FLEX;
-	lp->rfd_head = lp->rfds;
-	lp->scb.rfd = WSWAPrfd(virt_to_dma(lp,lp->rfds));
-	lp->rbd_head = lp->rbds;
-	lp->rfds[0].rbd = WSWAPrbd(virt_to_dma(lp,lp->rbds));
-
-	CHECK_WBACK_INV(lp, lp, sizeof(struct i596_private));
-}
-
-
-static int init_i596_mem(struct net_device *dev)
-{
-	struct i596_private *lp = dev->priv;
-	unsigned long flags;
-
-	disable_irq(dev->irq);	/* disable IRQs from LAN */
-	DEB(DEB_INIT,
-		printk("RESET 82596 port: %lx (with IRQ %d disabled)\n",
-		       (dev->base_addr + PA_I82596_RESET),
-		       dev->irq));
-
-	gsc_writel(0, (dev->base_addr + PA_I82596_RESET)); /* Hard Reset */
-	udelay(100);			/* Wait 100us - seems to help */
-
-	/* change the scp address */
-
-	lp->last_cmd = jiffies;
-
-
-	lp->scp.sysbus = 0x0000006c;
-	lp->scp.iscp = WSWAPiscp(virt_to_dma(lp,&(lp->iscp)));
-	lp->iscp.scb = WSWAPscb(virt_to_dma(lp,&(lp->scb)));
-	lp->iscp.stat = ISCP_BUSY;
-	lp->cmd_backlog = 0;
-
-	lp->cmd_head = NULL;
-        lp->scb.cmd = I596_NULL;
-
-	DEB(DEB_INIT, printk("%s: starting i82596.\n", dev->name));
-
-	CHECK_WBACK(lp, &(lp->scp), sizeof(struct i596_scp));
-	CHECK_WBACK(lp, &(lp->iscp), sizeof(struct i596_iscp));
-
-	MPU_PORT(dev, PORT_ALTSCP, virt_to_dma(lp,&lp->scp));
-
-	CA(dev);
-
-	if (wait_istat(dev, lp, 1000, "initialization timed out"))
-		goto failed;
-	DEB(DEB_INIT, printk("%s: i82596 initialization successful\n", dev->name));
-
-	/* Ensure rx frame/buffer descriptors are tidy */
-	rebuild_rx_bufs(dev);
-
-	lp->scb.command = 0;
-	CHECK_WBACK(lp, &(lp->scb), sizeof(struct i596_scb));
-
-	enable_irq(dev->irq);	/* enable IRQs from LAN */
-
-	DEB(DEB_INIT, printk("%s: queuing CmdConfigure\n", dev->name));
-	memcpy(lp->cf_cmd.i596_config, init_setup, sizeof(init_setup));
-	lp->cf_cmd.cmd.command = CmdConfigure;
-	CHECK_WBACK(lp, &(lp->cf_cmd), sizeof(struct cf_cmd));
-	i596_add_cmd(dev, &lp->cf_cmd.cmd);
-
-	DEB(DEB_INIT, printk("%s: queuing CmdSASetup\n", dev->name));
-	memcpy(lp->sa_cmd.eth_addr, dev->dev_addr, 6);
-	lp->sa_cmd.cmd.command = CmdSASetup;
-	CHECK_WBACK(lp, &(lp->sa_cmd), sizeof(struct sa_cmd));
-	i596_add_cmd(dev, &lp->sa_cmd.cmd);
-
-	DEB(DEB_INIT, printk("%s: queuing CmdTDR\n", dev->name));
-	lp->tdr_cmd.cmd.command = CmdTDR;
-	CHECK_WBACK(lp, &(lp->tdr_cmd), sizeof(struct tdr_cmd));
-	i596_add_cmd(dev, &lp->tdr_cmd.cmd);
-
-	spin_lock_irqsave (&lp->lock, flags);
-
-	if (wait_cmd(dev, lp, 1000, "timed out waiting to issue RX_START")) {
-		spin_unlock_irqrestore (&lp->lock, flags);
-		goto failed;
-	}
-	DEB(DEB_INIT, printk("%s: Issuing RX_START\n", dev->name));
-	lp->scb.command = RX_START;
-	lp->scb.rfd = WSWAPrfd(virt_to_dma(lp,lp->rfds));
-	CHECK_WBACK(lp, &(lp->scb), sizeof(struct i596_scb));
-
-	CA(dev);
-
-	spin_unlock_irqrestore (&lp->lock, flags);
-
-	if (wait_cmd(dev, lp, 1000, "RX_START not processed"))
-		goto failed;
-	DEB(DEB_INIT, printk("%s: Receive unit started OK\n", dev->name));
-
-	return 0;
-
-failed:
-	printk("%s: Failed to initialise 82596\n", dev->name);
-	MPU_PORT(dev, PORT_RESET, 0);
-	return -1;
-}
-
-
-static inline int i596_rx(struct net_device *dev)
-{
-	struct i596_private *lp = dev->priv;
-	struct i596_rfd *rfd;
-	struct i596_rbd *rbd;
-	int frames = 0;
-
-	DEB(DEB_RXFRAME, printk("i596_rx(), rfd_head %p, rbd_head %p\n",
-			lp->rfd_head, lp->rbd_head));
-
-
-	rfd = lp->rfd_head;		/* Ref next frame to check */
-
-	CHECK_INV(lp, rfd, sizeof(struct i596_rfd));
-	while ((rfd->stat) & STAT_C) {	/* Loop while complete frames */
-		if (rfd->rbd == I596_NULL)
-			rbd = NULL;
-		else if (rfd->rbd == lp->rbd_head->b_addr) {
-			rbd = lp->rbd_head;
-			CHECK_INV(lp, rbd, sizeof(struct i596_rbd));
-		}
-		else {
-			printk("%s: rbd chain broken!\n", dev->name);
-			/* XXX Now what? */
-			rbd = NULL;
-		}
-		DEB(DEB_RXFRAME, printk("  rfd %p, rfd.rbd %08x, rfd.stat %04x\n",
-			rfd, rfd->rbd, rfd->stat));
-
-		if (rbd != NULL && ((rfd->stat) & STAT_OK)) {
-			/* a good frame */
-			int pkt_len = rbd->count & 0x3fff;
-			struct sk_buff *skb = rbd->skb;
-			int rx_in_place = 0;
-
-			DEB(DEB_RXADDR,print_eth(rbd->v_data, "received"));
-			frames++;
-
-			/* Check if the packet is long enough to just accept
-			 * without copying to a properly sized skbuff.
-			 */
-
-			if (pkt_len > rx_copybreak) {
-				struct sk_buff *newskb;
-				dma_addr_t dma_addr;
-
-				dma_unmap_single(lp->dev,(dma_addr_t)WSWAPchar(rbd->b_data), PKT_BUF_SZ, DMA_FROM_DEVICE);
-				/* Get fresh skbuff to replace filled one. */
-				newskb = dev_alloc_skb(PKT_BUF_SZ + 4);
-				if (newskb == NULL) {
-					skb = NULL;	/* drop pkt */
-					goto memory_squeeze;
-				}
-				skb_reserve(newskb, 2);
-
-				/* Pass up the skb already on the Rx ring. */
-				skb_put(skb, pkt_len);
-				rx_in_place = 1;
-				rbd->skb = newskb;
-				newskb->dev = dev;
-				dma_addr = dma_map_single(lp->dev, newskb->data, PKT_BUF_SZ, DMA_FROM_DEVICE);
-				rbd->v_data = newskb->data;
-				rbd->b_data = WSWAPchar(dma_addr);
-				CHECK_WBACK_INV(lp, rbd, sizeof(struct i596_rbd));
-			}
-			else
-				skb = dev_alloc_skb(pkt_len + 2);
-memory_squeeze:
-			if (skb == NULL) {
-				/* XXX tulip.c can defer packets here!! */
-				printk("%s: i596_rx Memory squeeze, dropping packet.\n", dev->name);
-				lp->stats.rx_dropped++;
-			}
-			else {
-				if (!rx_in_place) {
-					/* 16 byte align the data fields */
-					dma_sync_single_for_cpu(lp->dev, (dma_addr_t)WSWAPchar(rbd->b_data), PKT_BUF_SZ, DMA_FROM_DEVICE);
-					skb_reserve(skb, 2);
-					memcpy(skb_put(skb,pkt_len), rbd->v_data, pkt_len);
-					dma_sync_single_for_device(lp->dev, (dma_addr_t)WSWAPchar(rbd->b_data), PKT_BUF_SZ, DMA_FROM_DEVICE);
-				}
-				skb->len = pkt_len;
-				skb->protocol=eth_type_trans(skb,dev);
-				netif_rx(skb);
-				dev->last_rx = jiffies;
-				lp->stats.rx_packets++;
-				lp->stats.rx_bytes+=pkt_len;
-			}
-		}
-		else {
-			DEB(DEB_ERRORS, printk("%s: Error, rfd.stat = 0x%04x\n",
-					dev->name, rfd->stat));
-			lp->stats.rx_errors++;
-			if ((rfd->stat) & 0x0001)
-				lp->stats.collisions++;
-			if ((rfd->stat) & 0x0080)
-				lp->stats.rx_length_errors++;
-			if ((rfd->stat) & 0x0100)
-				lp->stats.rx_over_errors++;
-			if ((rfd->stat) & 0x0200)
-				lp->stats.rx_fifo_errors++;
-			if ((rfd->stat) & 0x0400)
-				lp->stats.rx_frame_errors++;
-			if ((rfd->stat) & 0x0800)
-				lp->stats.rx_crc_errors++;
-			if ((rfd->stat) & 0x1000)
-				lp->stats.rx_length_errors++;
-		}
-
-		/* Clear the buffer descriptor count and EOF + F flags */
-
-		if (rbd != NULL && (rbd->count & 0x4000)) {
-			rbd->count = 0;
-			lp->rbd_head = rbd->v_next;
-			CHECK_WBACK_INV(lp, rbd, sizeof(struct i596_rbd));
-		}
-
-		/* Tidy the frame descriptor, marking it as end of list */
-
-		rfd->rbd = I596_NULL;
-		rfd->stat = 0;
-		rfd->cmd = CMD_EOL|CMD_FLEX;
-		rfd->count = 0;
-
-		/* Remove end-of-list from old end descriptor */
-
-		rfd->v_prev->cmd = CMD_FLEX;
-
-		/* Update record of next frame descriptor to process */
-
-		lp->scb.rfd = rfd->b_next;
-		lp->rfd_head = rfd->v_next;
-		CHECK_WBACK_INV(lp, rfd->v_prev, sizeof(struct i596_rfd));
-		CHECK_WBACK_INV(lp, rfd, sizeof(struct i596_rfd));
-		rfd = lp->rfd_head;
-		CHECK_INV(lp, rfd, sizeof(struct i596_rfd));
-	}
-
-	DEB(DEB_RXFRAME, printk("frames %d\n", frames));
-
-	return 0;
-}
-
-
-static inline void i596_cleanup_cmd(struct net_device *dev, struct i596_private *lp)
-{
-	struct i596_cmd *ptr;
-
-	while (lp->cmd_head != NULL) {
-		ptr = lp->cmd_head;
-		lp->cmd_head = ptr->v_next;
-		lp->cmd_backlog--;
-
-		switch ((ptr->command) & 0x7) {
-		case CmdTx:
-			{
-				struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr;
-				struct sk_buff *skb = tx_cmd->skb;
-				dma_unmap_single(lp->dev, tx_cmd->dma_addr, skb->len, DMA_TO_DEVICE);
-
-				dev_kfree_skb(skb);
-
-				lp->stats.tx_errors++;
-				lp->stats.tx_aborted_errors++;
-
-				ptr->v_next = NULL;
-				ptr->b_next = I596_NULL;
-				tx_cmd->cmd.command = 0;  /* Mark as free */
-				break;
-			}
-		default:
-			ptr->v_next = NULL;
-			ptr->b_next = I596_NULL;
-		}
-		CHECK_WBACK_INV(lp, ptr, sizeof(struct i596_cmd));
-	}
-
-	wait_cmd(dev, lp, 100, "i596_cleanup_cmd timed out");
-	lp->scb.cmd = I596_NULL;
-	CHECK_WBACK(lp, &(lp->scb), sizeof(struct i596_scb));
-}
-
-
-static inline void i596_reset(struct net_device *dev, struct i596_private *lp)
-{
-	unsigned long flags;
-
-	DEB(DEB_RESET, printk("i596_reset\n"));
-
-	spin_lock_irqsave (&lp->lock, flags);
-
-	wait_cmd(dev, lp, 100, "i596_reset timed out");
-
-	netif_stop_queue(dev);
-
-	/* FIXME: this command might cause an lpmc */
-	lp->scb.command = CUC_ABORT | RX_ABORT;
-	CHECK_WBACK(lp, &(lp->scb), sizeof(struct i596_scb));
-	CA(dev);
-
-	/* wait for shutdown */
-	wait_cmd(dev, lp, 1000, "i596_reset 2 timed out");
-	spin_unlock_irqrestore (&lp->lock, flags);
-
-	i596_cleanup_cmd(dev,lp);
-	i596_rx(dev);
-
-	netif_start_queue(dev);
-	init_i596_mem(dev);
-}
-
-
-static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd)
-{
-	struct i596_private *lp = dev->priv;
-	unsigned long flags;
-
-	DEB(DEB_ADDCMD, printk("i596_add_cmd cmd_head %p\n", lp->cmd_head));
-
-	cmd->status = 0;
-	cmd->command |= (CMD_EOL | CMD_INTR);
-	cmd->v_next = NULL;
-	cmd->b_next = I596_NULL;
-	CHECK_WBACK(lp, cmd, sizeof(struct i596_cmd));
-
-	spin_lock_irqsave (&lp->lock, flags);
-
-	if (lp->cmd_head != NULL) {
-		lp->cmd_tail->v_next = cmd;
-		lp->cmd_tail->b_next = WSWAPcmd(virt_to_dma(lp,&cmd->status));
-		CHECK_WBACK(lp, lp->cmd_tail, sizeof(struct i596_cmd));
-	} else {
-		lp->cmd_head = cmd;
-		wait_cmd(dev, lp, 100, "i596_add_cmd timed out");
-		lp->scb.cmd = WSWAPcmd(virt_to_dma(lp,&cmd->status));
-		lp->scb.command = CUC_START;
-		CHECK_WBACK(lp, &(lp->scb), sizeof(struct i596_scb));
-		CA(dev);
-	}
-	lp->cmd_tail = cmd;
-	lp->cmd_backlog++;
-
-	spin_unlock_irqrestore (&lp->lock, flags);
-
-	if (lp->cmd_backlog > max_cmd_backlog) {
-		unsigned long tickssofar = jiffies - lp->last_cmd;
-
-		if (tickssofar < ticks_limit)
-			return;
-
-		printk("%s: command unit timed out, status resetting.\n", dev->name);
-#if 1
-		i596_reset(dev, lp);
-#endif
-	}
-}
-
-#if 0
-/* this function makes a perfectly adequate probe...  but we have a
-   device list */
-static int i596_test(struct net_device *dev)
-{
-	struct i596_private *lp = dev->priv;
-	volatile int *tint;
-	u32 data;
-
-	tint = (volatile int *)(&(lp->scp));
-	data = virt_to_dma(lp,tint);
-
-	tint[1] = -1;
-	CHECK_WBACK(lp, tint, PAGE_SIZE);
-
-	MPU_PORT(dev, 1, data);
-
-	for(data = 1000000; data; data--) {
-		CHECK_INV(lp, tint, PAGE_SIZE);
-		if(tint[1] != -1)
-			break;
-
-	}
-
-	printk("i596_test result %d\n", tint[1]);
-
-}
-#endif
-
-
-static int i596_open(struct net_device *dev)
-{
-	DEB(DEB_OPEN, printk("%s: i596_open() irq %d.\n", dev->name, dev->irq));
-
-	if (request_irq(dev->irq, &i596_interrupt, 0, "i82596", dev)) {
-		printk("%s: IRQ %d not free\n", dev->name, dev->irq);
-		goto out;
-	}
-
-	init_rx_bufs(dev);
-
-	if (init_i596_mem(dev)) {
-		printk("%s: Failed to init memory\n", dev->name);
-		goto out_remove_rx_bufs;
-	}
-
-	netif_start_queue(dev);
-
-	return 0;
-
-out_remove_rx_bufs:
-	remove_rx_bufs(dev);
-	free_irq(dev->irq, dev);
-out:
-	return -EAGAIN;
-}
-
-static void i596_tx_timeout (struct net_device *dev)
-{
-	struct i596_private *lp = dev->priv;
-
-	/* Transmitter timeout, serious problems. */
-	DEB(DEB_ERRORS, printk("%s: transmit timed out, status resetting.\n",
-			dev->name));
-
-	lp->stats.tx_errors++;
-
-	/* Try to restart the adaptor */
-	if (lp->last_restart == lp->stats.tx_packets) {
-		DEB(DEB_ERRORS, printk("Resetting board.\n"));
-		/* Shutdown and restart */
-		i596_reset (dev, lp);
-	} else {
-		/* Issue a channel attention signal */
-		DEB(DEB_ERRORS, printk("Kicking board.\n"));
-		lp->scb.command = CUC_START | RX_START;
-		CHECK_WBACK_INV(lp, &(lp->scb), sizeof(struct i596_scb));
-		CA (dev);
-		lp->last_restart = lp->stats.tx_packets;
-	}
-
-	dev->trans_start = jiffies;
-	netif_wake_queue (dev);
-}
-
-
-static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev)
-{
-	struct i596_private *lp = dev->priv;
-	struct tx_cmd *tx_cmd;
-	struct i596_tbd *tbd;
-	short length = skb->len;
-	dev->trans_start = jiffies;
-
-	DEB(DEB_STARTTX, printk("%s: i596_start_xmit(%x,%p) called\n", dev->name,
-				skb->len, skb->data));
-
-	if (length < ETH_ZLEN) {
-		if (skb_padto(skb, ETH_ZLEN))
-			return 0;
-		length = ETH_ZLEN;
-	}
-
-	netif_stop_queue(dev);
-
-	tx_cmd = lp->tx_cmds + lp->next_tx_cmd;
-	tbd = lp->tbds + lp->next_tx_cmd;
-
-	if (tx_cmd->cmd.command) {
-		DEB(DEB_ERRORS, printk("%s: xmit ring full, dropping packet.\n",
-				dev->name));
-		lp->stats.tx_dropped++;
-
-		dev_kfree_skb(skb);
-	} else {
-		if (++lp->next_tx_cmd == TX_RING_SIZE)
-			lp->next_tx_cmd = 0;
-		tx_cmd->tbd = WSWAPtbd(virt_to_dma(lp,tbd));
-		tbd->next = I596_NULL;
-
-		tx_cmd->cmd.command = CMD_FLEX | CmdTx;
-		tx_cmd->skb = skb;
-
-		tx_cmd->pad = 0;
-		tx_cmd->size = 0;
-		tbd->pad = 0;
-		tbd->size = EOF | length;
-
-		tx_cmd->dma_addr = dma_map_single(lp->dev, skb->data, skb->len,
-				DMA_TO_DEVICE);
-		tbd->data = WSWAPchar(tx_cmd->dma_addr);
-
-		DEB(DEB_TXADDR,print_eth(skb->data, "tx-queued"));
-		CHECK_WBACK_INV(lp, tx_cmd, sizeof(struct tx_cmd));
-		CHECK_WBACK_INV(lp, tbd, sizeof(struct i596_tbd));
-		i596_add_cmd(dev, &tx_cmd->cmd);
-
-		lp->stats.tx_packets++;
-		lp->stats.tx_bytes += length;
-	}
-
-	netif_start_queue(dev);
-
-	return 0;
-}
-
-static void print_eth(unsigned char *add, char *str)
-{
-	int i;
-
-	printk("i596 0x%p, ", add);
-	for (i = 0; i < 6; i++)
-		printk(" %02X", add[i + 6]);
-	printk(" -->");
-	for (i = 0; i < 6; i++)
-		printk(" %02X", add[i]);
-	printk(" %02X%02X, %s\n", add[12], add[13], str);
-}
-
-
 #define LAN_PROM_ADDR	0xF0810000
 
-static int __devinit i82596_probe(struct net_device *dev,
-				  struct device *gen_dev)
-{
-	int i;
-	struct i596_private *lp;
-	char eth_addr[6];
-	dma_addr_t dma_addr;
-
-	/* This lot is ensure things have been cache line aligned. */
-	BUILD_BUG_ON(sizeof(struct i596_rfd) != 32);
-	BUILD_BUG_ON(sizeof(struct i596_rbd) &  31);
-	BUILD_BUG_ON(sizeof(struct tx_cmd)   &  31);
-	BUILD_BUG_ON(sizeof(struct i596_tbd) != 32);
-#ifndef __LP64__
-	BUILD_BUG_ON(sizeof(struct i596_private) > 4096);
-#endif
-
-	if (!dev->base_addr || !dev->irq)
-		return -ENODEV;
-
-	if (pdc_lan_station_id(eth_addr, dev->base_addr)) {
-		for (i=0; i < 6; i++) {
-			eth_addr[i] = gsc_readb(LAN_PROM_ADDR + i);
-		}
-		printk(KERN_INFO "%s: MAC of HP700 LAN read from EEPROM\n", __FILE__);
-	}
-
-	dev->mem_start = (unsigned long) dma_alloc_noncoherent(gen_dev,
-		sizeof(struct i596_private), &dma_addr, GFP_KERNEL);
-	if (!dev->mem_start) {
-		printk(KERN_ERR "%s: Couldn't get shared memory\n", __FILE__);
-		return -ENOMEM;
-	}
-
-	for (i = 0; i < 6; i++)
-		dev->dev_addr[i] = eth_addr[i];
-
-	/* The 82596-specific entries in the device structure. */
-	dev->open = i596_open;
-	dev->stop = i596_close;
-	dev->hard_start_xmit = i596_start_xmit;
-	dev->get_stats = i596_get_stats;
-	dev->set_multicast_list = set_multicast_list;
-	dev->tx_timeout = i596_tx_timeout;
-	dev->watchdog_timeo = TX_TIMEOUT;
-#ifdef CONFIG_NET_POLL_CONTROLLER
-	dev->poll_controller = i596_poll_controller;
-#endif
-
-	dev->priv = (void *)(dev->mem_start);
-
-	lp = dev->priv;
-	memset(lp, 0, sizeof(struct i596_private));
-
-	lp->scb.command = 0;
-	lp->scb.cmd = I596_NULL;
-	lp->scb.rfd = I596_NULL;
-	spin_lock_init(&lp->lock);
-	lp->dma_addr = dma_addr;
-	lp->dev = gen_dev;
-
-	CHECK_WBACK_INV(lp, dev->mem_start, sizeof(struct i596_private));
-
-	i = register_netdev(dev);
-	if (i) {
-		lp = dev->priv;
-		dma_free_noncoherent(lp->dev, sizeof(struct i596_private),
-				    (void *)dev->mem_start, lp->dma_addr);
-		return i;
-	};
-
-	DEB(DEB_PROBE, printk(KERN_INFO "%s: 82596 at %#3lx,", dev->name, dev->base_addr));
-	for (i = 0; i < 6; i++)
-		DEB(DEB_PROBE, printk(" %2.2X", dev->dev_addr[i]));
-	DEB(DEB_PROBE, printk(" IRQ %d.\n", dev->irq));
-	DEB(DEB_INIT, printk(KERN_INFO "%s: lp at 0x%p (%d bytes), lp->scb at 0x%p\n",
-		dev->name, lp, (int)sizeof(struct i596_private), &lp->scb));
-
-	return 0;
-}
-
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void i596_poll_controller(struct net_device *dev)
-{
-	disable_irq(dev->irq);
-	i596_interrupt(dev->irq, dev);
-	enable_irq(dev->irq);
-}
-#endif
-
-static irqreturn_t i596_interrupt(int irq, void *dev_id)
-{
-	struct net_device *dev = dev_id;
-	struct i596_private *lp;
-	unsigned short status, ack_cmd = 0;
-
-	if (dev == NULL) {
-		printk("%s: irq %d for unknown device.\n", __FUNCTION__, irq);
-		return IRQ_NONE;
-	}
-
-	lp = dev->priv;
-
-	spin_lock (&lp->lock);
-
-	wait_cmd(dev, lp, 100, "i596 interrupt, timeout");
-	status = lp->scb.status;
-
-	DEB(DEB_INTS, printk("%s: i596 interrupt, IRQ %d, status %4.4x.\n",
-			dev->name, irq, status));
-
-	ack_cmd = status & 0xf000;
-
-	if (!ack_cmd) {
-		DEB(DEB_ERRORS, printk("%s: interrupt with no events\n", dev->name));
-		spin_unlock (&lp->lock);
-		return IRQ_NONE;
-	}
-
-	if ((status & 0x8000) || (status & 0x2000)) {
-		struct i596_cmd *ptr;
-
-		if ((status & 0x8000))
-			DEB(DEB_INTS, printk("%s: i596 interrupt completed command.\n", dev->name));
-		if ((status & 0x2000))
-			DEB(DEB_INTS, printk("%s: i596 interrupt command unit inactive %x.\n", dev->name, status & 0x0700));
-
-		while (lp->cmd_head != NULL) {
-			CHECK_INV(lp, lp->cmd_head, sizeof(struct i596_cmd));
-			if (!(lp->cmd_head->status & STAT_C))
-				break;
-
-			ptr = lp->cmd_head;
-
-			DEB(DEB_STATUS, printk("cmd_head->status = %04x, ->command = %04x\n",
-				       lp->cmd_head->status, lp->cmd_head->command));
-			lp->cmd_head = ptr->v_next;
-			lp->cmd_backlog--;
-
-			switch ((ptr->command) & 0x7) {
-			case CmdTx:
-			    {
-				struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr;
-				struct sk_buff *skb = tx_cmd->skb;
-
-				if ((ptr->status) & STAT_OK) {
-					DEB(DEB_TXADDR, print_eth(skb->data, "tx-done"));
-				} else {
-					lp->stats.tx_errors++;
-					if ((ptr->status) & 0x0020)
-						lp->stats.collisions++;
-					if (!((ptr->status) & 0x0040))
-						lp->stats.tx_heartbeat_errors++;
-					if ((ptr->status) & 0x0400)
-						lp->stats.tx_carrier_errors++;
-					if ((ptr->status) & 0x0800)
-						lp->stats.collisions++;
-					if ((ptr->status) & 0x1000)
-						lp->stats.tx_aborted_errors++;
-				}
-				dma_unmap_single(lp->dev, tx_cmd->dma_addr, skb->len, DMA_TO_DEVICE);
-				dev_kfree_skb_irq(skb);
-
-				tx_cmd->cmd.command = 0; /* Mark free */
-				break;
-			    }
-			case CmdTDR:
-			    {
-				unsigned short status = ((struct tdr_cmd *)ptr)->status;
-
-				if (status & 0x8000) {
-					DEB(DEB_ANY, printk("%s: link ok.\n", dev->name));
-				} else {
-					if (status & 0x4000)
-						printk("%s: Transceiver problem.\n", dev->name);
-					if (status & 0x2000)
-						printk("%s: Termination problem.\n", dev->name);
-					if (status & 0x1000)
-						printk("%s: Short circuit.\n", dev->name);
-
-					DEB(DEB_TDR, printk("%s: Time %d.\n", dev->name, status & 0x07ff));
-				}
-				break;
-			    }
-			case CmdConfigure:
-				/* Zap command so set_multicast_list() knows it is free */
-				ptr->command = 0;
-				break;
-			}
-			ptr->v_next = NULL;
-		        ptr->b_next = I596_NULL;
-			CHECK_WBACK(lp, ptr, sizeof(struct i596_cmd));
-			lp->last_cmd = jiffies;
-		}
-
-		/* This mess is arranging that only the last of any outstanding
-		 * commands has the interrupt bit set.  Should probably really
-		 * only add to the cmd queue when the CU is stopped.
-		 */
-		ptr = lp->cmd_head;
-		while ((ptr != NULL) && (ptr != lp->cmd_tail)) {
-			struct i596_cmd *prev = ptr;
-
-			ptr->command &= 0x1fff;
-			ptr = ptr->v_next;
-			CHECK_WBACK_INV(lp, prev, sizeof(struct i596_cmd));
-		}
-
-		if ((lp->cmd_head != NULL))
-			ack_cmd |= CUC_START;
-		lp->scb.cmd = WSWAPcmd(virt_to_dma(lp,&lp->cmd_head->status));
-		CHECK_WBACK_INV(lp, &lp->scb, sizeof(struct i596_scb));
-	}
-	if ((status & 0x1000) || (status & 0x4000)) {
-		if ((status & 0x4000))
-			DEB(DEB_INTS, printk("%s: i596 interrupt received a frame.\n", dev->name));
-		i596_rx(dev);
-		/* Only RX_START if stopped - RGH 07-07-96 */
-		if (status & 0x1000) {
-			if (netif_running(dev)) {
-				DEB(DEB_ERRORS, printk("%s: i596 interrupt receive unit inactive, status 0x%x\n", dev->name, status));
-				ack_cmd |= RX_START;
-				lp->stats.rx_errors++;
-				lp->stats.rx_fifo_errors++;
-				rebuild_rx_bufs(dev);
-			}
-		}
-	}
-	wait_cmd(dev, lp, 100, "i596 interrupt, timeout");
-	lp->scb.command = ack_cmd;
-	CHECK_WBACK(lp, &lp->scb, sizeof(struct i596_scb));
-
-	/* DANGER: I suspect that some kind of interrupt
-	 acknowledgement aside from acking the 82596 might be needed
-	 here...  but it's running acceptably without */
-
-	CA(dev);
-
-	wait_cmd(dev, lp, 100, "i596 interrupt, exit timeout");
-	DEB(DEB_INTS, printk("%s: exiting interrupt.\n", dev->name));
-
-	spin_unlock (&lp->lock);
-	return IRQ_HANDLED;
-}
-
-static int i596_close(struct net_device *dev)
-{
-	struct i596_private *lp = dev->priv;
-	unsigned long flags;
-
-	netif_stop_queue(dev);
-
-	DEB(DEB_INIT, printk("%s: Shutting down ethercard, status was %4.4x.\n",
-		       dev->name, lp->scb.status));
-
-	spin_lock_irqsave(&lp->lock, flags);
-
-	wait_cmd(dev, lp, 100, "close1 timed out");
-	lp->scb.command = CUC_ABORT | RX_ABORT;
-	CHECK_WBACK(lp, &lp->scb, sizeof(struct i596_scb));
-
-	CA(dev);
-
-	wait_cmd(dev, lp, 100, "close2 timed out");
-	spin_unlock_irqrestore(&lp->lock, flags);
-	DEB(DEB_STRUCT,i596_display_data(dev));
-	i596_cleanup_cmd(dev,lp);
-
-	disable_irq(dev->irq);
-
-	free_irq(dev->irq, dev);
-	remove_rx_bufs(dev);
-
-	return 0;
-}
-
-static struct net_device_stats *
- i596_get_stats(struct net_device *dev)
-{
-	struct i596_private *lp = dev->priv;
-
-	return &lp->stats;
-}
-
-/*
- *    Set or clear the multicast filter for this adaptor.
- */
-
-static void set_multicast_list(struct net_device *dev)
-{
-	struct i596_private *lp = dev->priv;
-	int config = 0, cnt;
-
-	DEB(DEB_MULTI, printk("%s: set multicast list, %d entries, promisc %s, allmulti %s\n",
-		dev->name, dev->mc_count, dev->flags & IFF_PROMISC ? "ON" : "OFF",
-		dev->flags & IFF_ALLMULTI ? "ON" : "OFF"));
-
-	if ((dev->flags & IFF_PROMISC) && !(lp->cf_cmd.i596_config[8] & 0x01)) {
-		lp->cf_cmd.i596_config[8] |= 0x01;
-		config = 1;
-	}
-	if (!(dev->flags & IFF_PROMISC) && (lp->cf_cmd.i596_config[8] & 0x01)) {
-		lp->cf_cmd.i596_config[8] &= ~0x01;
-		config = 1;
-	}
-	if ((dev->flags & IFF_ALLMULTI) && (lp->cf_cmd.i596_config[11] & 0x20)) {
-		lp->cf_cmd.i596_config[11] &= ~0x20;
-		config = 1;
-	}
-	if (!(dev->flags & IFF_ALLMULTI) && !(lp->cf_cmd.i596_config[11] & 0x20)) {
-		lp->cf_cmd.i596_config[11] |= 0x20;
-		config = 1;
-	}
-	if (config) {
-		if (lp->cf_cmd.cmd.command)
-			printk("%s: config change request already queued\n",
-			       dev->name);
-		else {
-			lp->cf_cmd.cmd.command = CmdConfigure;
-			CHECK_WBACK_INV(lp, &lp->cf_cmd, sizeof(struct cf_cmd));
-			i596_add_cmd(dev, &lp->cf_cmd.cmd);
-		}
-	}
-
-	cnt = dev->mc_count;
-	if (cnt > MAX_MC_CNT)
-	{
-		cnt = MAX_MC_CNT;
-		printk("%s: Only %d multicast addresses supported",
-			dev->name, cnt);
-	}
-
-	if (dev->mc_count > 0) {
-		struct dev_mc_list *dmi;
-		unsigned char *cp;
-		struct mc_cmd *cmd;
-
-		cmd = &lp->mc_cmd;
-		cmd->cmd.command = CmdMulticastList;
-		cmd->mc_cnt = dev->mc_count * 6;
-		cp = cmd->mc_addrs;
-		for (dmi = dev->mc_list; cnt && dmi != NULL; dmi = dmi->next, cnt--, cp += 6) {
-			memcpy(cp, dmi->dmi_addr, 6);
-			if (i596_debug > 1)
-				DEB(DEB_MULTI, printk("%s: Adding address %02x:%02x:%02x:%02x:%02x:%02x\n",
-						dev->name, cp[0],cp[1],cp[2],cp[3],cp[4],cp[5]));
-		}
-		CHECK_WBACK_INV(lp, &lp->mc_cmd, sizeof(struct mc_cmd));
-		i596_add_cmd(dev, &cmd->cmd);
-	}
-}
-
-static int debug = -1;
-module_param(debug, int, 0);
-MODULE_PARM_DESC(debug, "lasi_82596 debug mask");
-
-static int num_drivers;
-static struct net_device *netdevs[MAX_DRIVERS];
-
 static int __devinit
 lan_init_chip(struct parisc_device *dev)
 {
 	struct	net_device *netdevice;
+	struct i596_private *lp;
 	int	retval;
-
-	if (num_drivers >= MAX_DRIVERS) {
-		/* max count of possible i82596 drivers reached */
-		return -ENOMEM;
-	}
-
-	if (num_drivers == 0)
-		printk(KERN_INFO LASI_82596_DRIVER_VERSION "\n");
+	int i;
 
 	if (!dev->irq) {
 		printk(KERN_ERR "%s: IRQ not found for i82596 at 0x%lx\n",
@@ -1528,28 +168,45 @@
 	printk(KERN_INFO "Found i82596 at 0x%lx, IRQ %d\n", dev->hpa.start,
 			dev->irq);
 
-	netdevice = alloc_etherdev(0);
+	netdevice = alloc_etherdev(sizeof(struct i596_private));
 	if (!netdevice)
 		return -ENOMEM;
+	SET_NETDEV_DEV(netdevice, &dev->dev);
+	parisc_set_drvdata (dev, netdevice);
 
 	netdevice->base_addr = dev->hpa.start;
 	netdevice->irq = dev->irq;
 
-	retval = i82596_probe(netdevice, &dev->dev);
+	if (pdc_lan_station_id(netdevice->dev_addr, netdevice->base_addr)) {
+		for (i = 0; i < 6; i++) {
+			netdevice->dev_addr[i] = gsc_readb(LAN_PROM_ADDR + i);
+		}
+		printk(KERN_INFO
+		       "%s: MAC of HP700 LAN read from EEPROM\n", __FILE__);
+	}
+
+	lp = netdev_priv(netdevice);
+	lp->options = dev->id.sversion == 0x72 ? OPT_SWAP_PORT : 0;
+
+	retval = i82596_probe(netdevice);
 	if (retval) {
 		free_netdev(netdevice);
 		return -ENODEV;
 	}
-
-	if (dev->id.sversion == 0x72) {
-		((struct i596_private *)netdevice->priv)->options = OPT_SWAP_PORT;
-	}
-
-	netdevs[num_drivers++] = netdevice;
-
 	return retval;
 }
 
+static int __devexit lan_remove_chip (struct parisc_device *pdev)
+{
+	struct net_device *dev = parisc_get_drvdata(pdev);
+	struct i596_private *lp = netdev_priv(dev);
+
+	unregister_netdev (dev);
+	DMA_FREE(&pdev->dev, sizeof(struct i596_private),
+		 (void *)lp->dma, lp->dma_addr);
+	free_netdev (dev);
+	return 0;
+}
 
 static struct parisc_device_id lan_tbl[] = {
 	{ HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x0008a },
@@ -1563,12 +220,12 @@
 	.name		= "lasi_82596",
 	.id_table	= lan_tbl,
 	.probe		= lan_init_chip,
+	.remove         = __devexit_p(lan_remove_chip),
 };
 
 static int __devinit lasi_82596_init(void)
 {
-	if (debug >= 0)
-		i596_debug = debug;
+	printk(KERN_INFO LASI_82596_DRIVER_VERSION "\n");
 	return register_parisc_driver(&lan_driver);
 }
 
@@ -1576,25 +233,6 @@
 
 static void __exit lasi_82596_exit(void)
 {
-	int i;
-
-	for (i=0; i<MAX_DRIVERS; i++) {
-		struct i596_private *lp;
-		struct net_device *netdevice;
-
-		netdevice = netdevs[i];
-		if (!netdevice)
-			continue;
-
-		unregister_netdev(netdevice);
-
-		lp = netdevice->priv;
-		dma_free_noncoherent(lp->dev, sizeof(struct i596_private),
-				       (void *)netdevice->mem_start, lp->dma_addr);
-		free_netdev(netdevice);
-	}
-	num_drivers = 0;
-
 	unregister_parisc_driver(&lan_driver);
 }
 
diff --git a/drivers/net/lib82596.c b/drivers/net/lib82596.c
new file mode 100644
index 0000000..5884f5b
--- /dev/null
+++ b/drivers/net/lib82596.c
@@ -0,0 +1,1434 @@
+/* lasi_82596.c -- driver for the intel 82596 ethernet controller, as
+   munged into HPPA boxen .
+
+   This driver is based upon 82596.c, original credits are below...
+   but there were too many hoops which HP wants jumped through to
+   keep this code in there in a sane manner.
+
+   3 primary sources of the mess --
+   1) hppa needs *lots* of cacheline flushing to keep this kind of
+   MMIO running.
+
+   2) The 82596 needs to see all of its pointers as their physical
+   address.  Thus virt_to_bus/bus_to_virt are *everywhere*.
+
+   3) The implementation HP is using seems to be significantly pickier
+   about when and how the command and RX units are started.  some
+   command ordering was changed.
+
+   Examination of the mach driver leads one to believe that there
+   might be a saner way to pull this off...  anyone who feels like a
+   full rewrite can be my guest.
+
+   Split 02/13/2000 Sam Creasey (sammy@oh.verio.com)
+
+   02/01/2000  Initial modifications for parisc by Helge Deller (deller@gmx.de)
+   03/02/2000  changes for better/correct(?) cache-flushing (deller)
+*/
+
+/* 82596.c: A generic 82596 ethernet driver for linux. */
+/*
+   Based on Apricot.c
+   Written 1994 by Mark Evans.
+   This driver is for the Apricot 82596 bus-master interface
+
+   Modularised 12/94 Mark Evans
+
+
+   Modified to support the 82596 ethernet chips on 680x0 VME boards.
+   by Richard Hirst <richard@sleepie.demon.co.uk>
+   Renamed to be 82596.c
+
+   980825:  Changed to receive directly in to sk_buffs which are
+   allocated at open() time.  Eliminates copy on incoming frames
+   (small ones are still copied).  Shared data now held in a
+   non-cached page, so we can run on 68060 in copyback mode.
+
+   TBD:
+   * look at deferring rx frames rather than discarding (as per tulip)
+   * handle tx ring full as per tulip
+   * performace test to tune rx_copybreak
+
+   Most of my modifications relate to the braindead big-endian
+   implementation by Intel.  When the i596 is operating in
+   'big-endian' mode, it thinks a 32 bit value of 0x12345678
+   should be stored as 0x56781234.  This is a real pain, when
+   you have linked lists which are shared by the 680x0 and the
+   i596.
+
+   Driver skeleton
+   Written 1993 by Donald Becker.
+   Copyright 1993 United States Government as represented by the Director,
+   National Security Agency. This software may only be used and distributed
+   according to the terms of the GNU General Public License as modified by SRC,
+   incorporated herein by reference.
+
+   The author may be reached as becker@scyld.com, or C/O
+   Scyld Computing Corporation, 410 Severn Ave., Suite 210, Annapolis MD 21403
+
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+
+/* DEBUG flags
+ */
+
+#define DEB_INIT	0x0001
+#define DEB_PROBE	0x0002
+#define DEB_SERIOUS	0x0004
+#define DEB_ERRORS	0x0008
+#define DEB_MULTI	0x0010
+#define DEB_TDR		0x0020
+#define DEB_OPEN	0x0040
+#define DEB_RESET	0x0080
+#define DEB_ADDCMD	0x0100
+#define DEB_STATUS	0x0200
+#define DEB_STARTTX	0x0400
+#define DEB_RXADDR	0x0800
+#define DEB_TXADDR	0x1000
+#define DEB_RXFRAME	0x2000
+#define DEB_INTS	0x4000
+#define DEB_STRUCT	0x8000
+#define DEB_ANY		0xffff
+
+
+#define DEB(x, y)	if (i596_debug & (x)) { y; }
+
+
+/*
+ * The MPU_PORT command allows direct access to the 82596. With PORT access
+ * the following commands are available (p5-18). The 32-bit port command
+ * must be word-swapped with the most significant word written first.
+ * This only applies to VME boards.
+ */
+#define PORT_RESET		0x00	/* reset 82596 */
+#define PORT_SELFTEST		0x01	/* selftest */
+#define PORT_ALTSCP		0x02	/* alternate SCB address */
+#define PORT_ALTDUMP		0x03	/* Alternate DUMP address */
+
+static int i596_debug = (DEB_SERIOUS|DEB_PROBE);
+
+/* Copy frames shorter than rx_copybreak, otherwise pass on up in
+ * a full sized sk_buff.  Value of 100 stolen from tulip.c (!alpha).
+ */
+static int rx_copybreak = 100;
+
+#define PKT_BUF_SZ	1536
+#define MAX_MC_CNT	64
+
+#define ISCP_BUSY	0x0001
+
+#define I596_NULL ((u32)0xffffffff)
+
+#define CMD_EOL		0x8000	/* The last command of the list, stop. */
+#define CMD_SUSP	0x4000	/* Suspend after doing cmd. */
+#define CMD_INTR	0x2000	/* Interrupt after doing cmd. */
+
+#define CMD_FLEX	0x0008	/* Enable flexible memory model */
+
+enum commands {
+	CmdNOp = 0, CmdSASetup = 1, CmdConfigure = 2, CmdMulticastList = 3,
+	CmdTx = 4, CmdTDR = 5, CmdDump = 6, CmdDiagnose = 7
+};
+
+#define STAT_C		0x8000	/* Set to 0 after execution */
+#define STAT_B		0x4000	/* Command being executed */
+#define STAT_OK		0x2000	/* Command executed ok */
+#define STAT_A		0x1000	/* Command aborted */
+
+#define	 CUC_START	0x0100
+#define	 CUC_RESUME	0x0200
+#define	 CUC_SUSPEND    0x0300
+#define	 CUC_ABORT	0x0400
+#define	 RX_START	0x0010
+#define	 RX_RESUME	0x0020
+#define	 RX_SUSPEND	0x0030
+#define	 RX_ABORT	0x0040
+
+#define TX_TIMEOUT	5
+
+
+struct i596_reg {
+	unsigned short porthi;
+	unsigned short portlo;
+	u32            ca;
+};
+
+#define EOF		0x8000
+#define SIZE_MASK	0x3fff
+
+struct i596_tbd {
+	unsigned short size;
+	unsigned short pad;
+	dma_addr_t     next;
+	dma_addr_t     data;
+	u32 cache_pad[5];		/* Total 32 bytes... */
+};
+
+/* The command structure has two 'next' pointers; v_next is the address of
+ * the next command as seen by the CPU, b_next is the address of the next
+ * command as seen by the 82596.  The b_next pointer, as used by the 82596
+ * always references the status field of the next command, rather than the
+ * v_next field, because the 82596 is unaware of v_next.  It may seem more
+ * logical to put v_next at the end of the structure, but we cannot do that
+ * because the 82596 expects other fields to be there, depending on command
+ * type.
+ */
+
+struct i596_cmd {
+	struct i596_cmd *v_next;	/* Address from CPUs viewpoint */
+	unsigned short status;
+	unsigned short command;
+	dma_addr_t     b_next;	/* Address from i596 viewpoint */
+};
+
+struct tx_cmd {
+	struct i596_cmd cmd;
+	dma_addr_t     tbd;
+	unsigned short size;
+	unsigned short pad;
+	struct sk_buff *skb;		/* So we can free it after tx */
+	dma_addr_t dma_addr;
+#ifdef __LP64__
+	u32 cache_pad[6];		/* Total 64 bytes... */
+#else
+	u32 cache_pad[1];		/* Total 32 bytes... */
+#endif
+};
+
+struct tdr_cmd {
+	struct i596_cmd cmd;
+	unsigned short status;
+	unsigned short pad;
+};
+
+struct mc_cmd {
+	struct i596_cmd cmd;
+	short mc_cnt;
+	char mc_addrs[MAX_MC_CNT*6];
+};
+
+struct sa_cmd {
+	struct i596_cmd cmd;
+	char eth_addr[8];
+};
+
+struct cf_cmd {
+	struct i596_cmd cmd;
+	char i596_config[16];
+};
+
+struct i596_rfd {
+	unsigned short stat;
+	unsigned short cmd;
+	dma_addr_t     b_next;	/* Address from i596 viewpoint */
+	dma_addr_t     rbd;
+	unsigned short count;
+	unsigned short size;
+	struct i596_rfd *v_next;	/* Address from CPUs viewpoint */
+	struct i596_rfd *v_prev;
+#ifndef __LP64__
+	u32 cache_pad[2];		/* Total 32 bytes... */
+#endif
+};
+
+struct i596_rbd {
+    /* hardware data */
+    unsigned short count;
+    unsigned short zero1;
+    dma_addr_t     b_next;
+    dma_addr_t     b_data;		/* Address from i596 viewpoint */
+    unsigned short size;
+    unsigned short zero2;
+    /* driver data */
+    struct sk_buff *skb;
+    struct i596_rbd *v_next;
+    dma_addr_t     b_addr;		/* This rbd addr from i596 view */
+    unsigned char *v_data;		/* Address from CPUs viewpoint */
+					/* Total 32 bytes... */
+#ifdef __LP64__
+    u32 cache_pad[4];
+#endif
+};
+
+/* These values as chosen so struct i596_dma fits in one page... */
+
+#define TX_RING_SIZE 32
+#define RX_RING_SIZE 16
+
+struct i596_scb {
+	unsigned short status;
+	unsigned short command;
+	dma_addr_t    cmd;
+	dma_addr_t    rfd;
+	u32           crc_err;
+	u32           align_err;
+	u32           resource_err;
+	u32           over_err;
+	u32           rcvdt_err;
+	u32           short_err;
+	unsigned short t_on;
+	unsigned short t_off;
+};
+
+struct i596_iscp {
+	u32           stat;
+	dma_addr_t    scb;
+};
+
+struct i596_scp {
+	u32           sysbus;
+	u32           pad;
+	dma_addr_t    iscp;
+};
+
+struct i596_dma {
+	struct i596_scp scp		        __attribute__((aligned(32)));
+	volatile struct i596_iscp iscp		__attribute__((aligned(32)));
+	volatile struct i596_scb scb		__attribute__((aligned(32)));
+	struct sa_cmd sa_cmd			__attribute__((aligned(32)));
+	struct cf_cmd cf_cmd			__attribute__((aligned(32)));
+	struct tdr_cmd tdr_cmd			__attribute__((aligned(32)));
+	struct mc_cmd mc_cmd			__attribute__((aligned(32)));
+	struct i596_rfd rfds[RX_RING_SIZE]	__attribute__((aligned(32)));
+	struct i596_rbd rbds[RX_RING_SIZE]	__attribute__((aligned(32)));
+	struct tx_cmd tx_cmds[TX_RING_SIZE]	__attribute__((aligned(32)));
+	struct i596_tbd tbds[TX_RING_SIZE]	__attribute__((aligned(32)));
+};
+
+struct i596_private {
+	struct i596_dma *dma;
+	u32    stat;
+	int last_restart;
+	struct i596_rfd *rfd_head;
+	struct i596_rbd *rbd_head;
+	struct i596_cmd *cmd_tail;
+	struct i596_cmd *cmd_head;
+	int cmd_backlog;
+	u32    last_cmd;
+	struct net_device_stats stats;
+	int next_tx_cmd;
+	int options;
+	spinlock_t lock;       /* serialize access to chip */
+	dma_addr_t dma_addr;
+	void __iomem *mpu_port;
+	void __iomem *ca;
+};
+
+static const char init_setup[] =
+{
+	0x8E,		/* length, prefetch on */
+	0xC8,		/* fifo to 8, monitor off */
+	0x80,		/* don't save bad frames */
+	0x2E,		/* No source address insertion, 8 byte preamble */
+	0x00,		/* priority and backoff defaults */
+	0x60,		/* interframe spacing */
+	0x00,		/* slot time LSB */
+	0xf2,		/* slot time and retries */
+	0x00,		/* promiscuous mode */
+	0x00,		/* collision detect */
+	0x40,		/* minimum frame length */
+	0xff,
+	0x00,
+	0x7f /*  *multi IA */ };
+
+static int i596_open(struct net_device *dev);
+static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static irqreturn_t i596_interrupt(int irq, void *dev_id);
+static int i596_close(struct net_device *dev);
+static struct net_device_stats *i596_get_stats(struct net_device *dev);
+static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd);
+static void i596_tx_timeout (struct net_device *dev);
+static void print_eth(unsigned char *buf, char *str);
+static void set_multicast_list(struct net_device *dev);
+static inline void ca(struct net_device *dev);
+static void mpu_port(struct net_device *dev, int c, dma_addr_t x);
+
+static int rx_ring_size = RX_RING_SIZE;
+static int ticks_limit = 100;
+static int max_cmd_backlog = TX_RING_SIZE-1;
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void i596_poll_controller(struct net_device *dev);
+#endif
+
+
+static inline int wait_istat(struct net_device *dev, struct i596_dma *dma, int delcnt, char *str)
+{
+	DMA_INV(dev, &(dma->iscp), sizeof(struct i596_iscp));
+	while (--delcnt && dma->iscp.stat) {
+		udelay(10);
+		DMA_INV(dev, &(dma->iscp), sizeof(struct i596_iscp));
+	}
+	if (!delcnt) {
+		printk(KERN_ERR "%s: %s, iscp.stat %04x, didn't clear\n",
+		     dev->name, str, SWAP16(dma->iscp.stat));
+		return -1;
+	} else
+		return 0;
+}
+
+
+static inline int wait_cmd(struct net_device *dev, struct i596_dma *dma, int delcnt, char *str)
+{
+	DMA_INV(dev, &(dma->scb), sizeof(struct i596_scb));
+	while (--delcnt && dma->scb.command) {
+		udelay(10);
+		DMA_INV(dev, &(dma->scb), sizeof(struct i596_scb));
+	}
+	if (!delcnt) {
+		printk(KERN_ERR "%s: %s, status %4.4x, cmd %4.4x.\n",
+		       dev->name, str,
+		       SWAP16(dma->scb.status),
+		       SWAP16(dma->scb.command));
+		return -1;
+	} else
+		return 0;
+}
+
+
+static void i596_display_data(struct net_device *dev)
+{
+	struct i596_private *lp = netdev_priv(dev);
+	struct i596_dma *dma = lp->dma;
+	struct i596_cmd *cmd;
+	struct i596_rfd *rfd;
+	struct i596_rbd *rbd;
+
+	printk(KERN_DEBUG "lp and scp at %p, .sysbus = %08x, .iscp = %08x\n",
+	       &dma->scp, dma->scp.sysbus, SWAP32(dma->scp.iscp));
+	printk(KERN_DEBUG "iscp at %p, iscp.stat = %08x, .scb = %08x\n",
+	       &dma->iscp, SWAP32(dma->iscp.stat), SWAP32(dma->iscp.scb));
+	printk(KERN_DEBUG "scb at %p, scb.status = %04x, .command = %04x,"
+		" .cmd = %08x, .rfd = %08x\n",
+	       &dma->scb, SWAP16(dma->scb.status), SWAP16(dma->scb.command),
+		SWAP16(dma->scb.cmd), SWAP32(dma->scb.rfd));
+	printk(KERN_DEBUG "   errors: crc %x, align %x, resource %x,"
+	       " over %x, rcvdt %x, short %x\n",
+	       SWAP32(dma->scb.crc_err), SWAP32(dma->scb.align_err),
+	       SWAP32(dma->scb.resource_err), SWAP32(dma->scb.over_err),
+	       SWAP32(dma->scb.rcvdt_err), SWAP32(dma->scb.short_err));
+	cmd = lp->cmd_head;
+	while (cmd != NULL) {
+		printk(KERN_DEBUG
+		       "cmd at %p, .status = %04x, .command = %04x,"
+		       " .b_next = %08x\n",
+		       cmd, SWAP16(cmd->status), SWAP16(cmd->command),
+		       SWAP32(cmd->b_next));
+		cmd = cmd->v_next;
+	}
+	rfd = lp->rfd_head;
+	printk(KERN_DEBUG "rfd_head = %p\n", rfd);
+	do {
+		printk(KERN_DEBUG
+		       "   %p .stat %04x, .cmd %04x, b_next %08x, rbd %08x,"
+		       " count %04x\n",
+		       rfd, SWAP16(rfd->stat), SWAP16(rfd->cmd),
+		       SWAP32(rfd->b_next), SWAP32(rfd->rbd),
+		       SWAP16(rfd->count));
+		rfd = rfd->v_next;
+	} while (rfd != lp->rfd_head);
+	rbd = lp->rbd_head;
+	printk(KERN_DEBUG "rbd_head = %p\n", rbd);
+	do {
+		printk(KERN_DEBUG
+		       "   %p .count %04x, b_next %08x, b_data %08x,"
+		       " size %04x\n",
+			rbd, SWAP16(rbd->count), SWAP32(rbd->b_next),
+		       SWAP32(rbd->b_data), SWAP16(rbd->size));
+		rbd = rbd->v_next;
+	} while (rbd != lp->rbd_head);
+	DMA_INV(dev, dma, sizeof(struct i596_dma));
+}
+
+
+#define virt_to_dma(lp, v) ((lp)->dma_addr + (dma_addr_t)((unsigned long)(v)-(unsigned long)((lp)->dma)))
+
+static inline int init_rx_bufs(struct net_device *dev)
+{
+	struct i596_private *lp = netdev_priv(dev);
+	struct i596_dma *dma = lp->dma;
+	int i;
+	struct i596_rfd *rfd;
+	struct i596_rbd *rbd;
+
+	/* First build the Receive Buffer Descriptor List */
+
+	for (i = 0, rbd = dma->rbds; i < rx_ring_size; i++, rbd++) {
+		dma_addr_t dma_addr;
+		struct sk_buff *skb = netdev_alloc_skb(dev, PKT_BUF_SZ + 4);
+
+		if (skb == NULL)
+			return -1;
+		skb_reserve(skb, 2);
+		dma_addr = dma_map_single(dev->dev.parent, skb->data,
+					  PKT_BUF_SZ, DMA_FROM_DEVICE);
+		rbd->v_next = rbd+1;
+		rbd->b_next = SWAP32(virt_to_dma(lp, rbd+1));
+		rbd->b_addr = SWAP32(virt_to_dma(lp, rbd));
+		rbd->skb = skb;
+		rbd->v_data = skb->data;
+		rbd->b_data = SWAP32(dma_addr);
+		rbd->size = SWAP16(PKT_BUF_SZ);
+	}
+	lp->rbd_head = dma->rbds;
+	rbd = dma->rbds + rx_ring_size - 1;
+	rbd->v_next = dma->rbds;
+	rbd->b_next = SWAP32(virt_to_dma(lp, dma->rbds));
+
+	/* Now build the Receive Frame Descriptor List */
+
+	for (i = 0, rfd = dma->rfds; i < rx_ring_size; i++, rfd++) {
+		rfd->rbd = I596_NULL;
+		rfd->v_next = rfd+1;
+		rfd->v_prev = rfd-1;
+		rfd->b_next = SWAP32(virt_to_dma(lp, rfd+1));
+		rfd->cmd = SWAP16(CMD_FLEX);
+	}
+	lp->rfd_head = dma->rfds;
+	dma->scb.rfd = SWAP32(virt_to_dma(lp, dma->rfds));
+	rfd = dma->rfds;
+	rfd->rbd = SWAP32(virt_to_dma(lp, lp->rbd_head));
+	rfd->v_prev = dma->rfds + rx_ring_size - 1;
+	rfd = dma->rfds + rx_ring_size - 1;
+	rfd->v_next = dma->rfds;
+	rfd->b_next = SWAP32(virt_to_dma(lp, dma->rfds));
+	rfd->cmd = SWAP16(CMD_EOL|CMD_FLEX);
+
+	DMA_WBACK_INV(dev, dma, sizeof(struct i596_dma));
+	return 0;
+}
+
+static inline void remove_rx_bufs(struct net_device *dev)
+{
+	struct i596_private *lp = netdev_priv(dev);
+	struct i596_rbd *rbd;
+	int i;
+
+	for (i = 0, rbd = lp->dma->rbds; i < rx_ring_size; i++, rbd++) {
+		if (rbd->skb == NULL)
+			break;
+		dma_unmap_single(dev->dev.parent,
+				 (dma_addr_t)SWAP32(rbd->b_data),
+				 PKT_BUF_SZ, DMA_FROM_DEVICE);
+		dev_kfree_skb(rbd->skb);
+	}
+}
+
+
+static void rebuild_rx_bufs(struct net_device *dev)
+{
+	struct i596_private *lp = netdev_priv(dev);
+	struct i596_dma *dma = lp->dma;
+	int i;
+
+	/* Ensure rx frame/buffer descriptors are tidy */
+
+	for (i = 0; i < rx_ring_size; i++) {
+		dma->rfds[i].rbd = I596_NULL;
+		dma->rfds[i].cmd = SWAP16(CMD_FLEX);
+	}
+	dma->rfds[rx_ring_size-1].cmd = SWAP16(CMD_EOL|CMD_FLEX);
+	lp->rfd_head = dma->rfds;
+	dma->scb.rfd = SWAP32(virt_to_dma(lp, dma->rfds));
+	lp->rbd_head = dma->rbds;
+	dma->rfds[0].rbd = SWAP32(virt_to_dma(lp, dma->rbds));
+
+	DMA_WBACK_INV(dev, dma, sizeof(struct i596_dma));
+}
+
+
+static int init_i596_mem(struct net_device *dev)
+{
+	struct i596_private *lp = netdev_priv(dev);
+	struct i596_dma *dma = lp->dma;
+	unsigned long flags;
+
+	mpu_port(dev, PORT_RESET, 0);
+	udelay(100);			/* Wait 100us - seems to help */
+
+	/* change the scp address */
+
+	lp->last_cmd = jiffies;
+
+	dma->scp.sysbus = SYSBUS;
+	dma->scp.iscp = SWAP32(virt_to_dma(lp, &(dma->iscp)));
+	dma->iscp.scb = SWAP32(virt_to_dma(lp, &(dma->scb)));
+	dma->iscp.stat = SWAP32(ISCP_BUSY);
+	lp->cmd_backlog = 0;
+
+	lp->cmd_head = NULL;
+	dma->scb.cmd = I596_NULL;
+
+	DEB(DEB_INIT, printk(KERN_DEBUG "%s: starting i82596.\n", dev->name));
+
+	DMA_WBACK(dev, &(dma->scp), sizeof(struct i596_scp));
+	DMA_WBACK(dev, &(dma->iscp), sizeof(struct i596_iscp));
+	DMA_WBACK(dev, &(dma->scb), sizeof(struct i596_scb));
+
+	mpu_port(dev, PORT_ALTSCP, virt_to_dma(lp, &dma->scp));
+	ca(dev);
+	if (wait_istat(dev, dma, 1000, "initialization timed out"))
+		goto failed;
+	DEB(DEB_INIT, printk(KERN_DEBUG
+			     "%s: i82596 initialization successful\n",
+			     dev->name));
+
+	if (request_irq(dev->irq, &i596_interrupt, 0, "i82596", dev)) {
+		printk(KERN_ERR "%s: IRQ %d not free\n", dev->name, dev->irq);
+		goto failed;
+	}
+
+	/* Ensure rx frame/buffer descriptors are tidy */
+	rebuild_rx_bufs(dev);
+
+	dma->scb.command = 0;
+	DMA_WBACK(dev, &(dma->scb), sizeof(struct i596_scb));
+
+	DEB(DEB_INIT, printk(KERN_DEBUG
+			     "%s: queuing CmdConfigure\n", dev->name));
+	memcpy(dma->cf_cmd.i596_config, init_setup, 14);
+	dma->cf_cmd.cmd.command = SWAP16(CmdConfigure);
+	DMA_WBACK(dev, &(dma->cf_cmd), sizeof(struct cf_cmd));
+	i596_add_cmd(dev, &dma->cf_cmd.cmd);
+
+	DEB(DEB_INIT, printk(KERN_DEBUG "%s: queuing CmdSASetup\n", dev->name));
+	memcpy(dma->sa_cmd.eth_addr, dev->dev_addr, 6);
+	dma->sa_cmd.cmd.command = SWAP16(CmdSASetup);
+	DMA_WBACK(dev, &(dma->sa_cmd), sizeof(struct sa_cmd));
+	i596_add_cmd(dev, &dma->sa_cmd.cmd);
+
+	DEB(DEB_INIT, printk(KERN_DEBUG "%s: queuing CmdTDR\n", dev->name));
+	dma->tdr_cmd.cmd.command = SWAP16(CmdTDR);
+	DMA_WBACK(dev, &(dma->tdr_cmd), sizeof(struct tdr_cmd));
+	i596_add_cmd(dev, &dma->tdr_cmd.cmd);
+
+	spin_lock_irqsave (&lp->lock, flags);
+
+	if (wait_cmd(dev, dma, 1000, "timed out waiting to issue RX_START")) {
+		spin_unlock_irqrestore (&lp->lock, flags);
+		goto failed_free_irq;
+	}
+	DEB(DEB_INIT, printk(KERN_DEBUG "%s: Issuing RX_START\n", dev->name));
+	dma->scb.command = SWAP16(RX_START);
+	dma->scb.rfd = SWAP32(virt_to_dma(lp, dma->rfds));
+	DMA_WBACK(dev, &(dma->scb), sizeof(struct i596_scb));
+
+	ca(dev);
+
+	spin_unlock_irqrestore (&lp->lock, flags);
+	if (wait_cmd(dev, dma, 1000, "RX_START not processed"))
+		goto failed_free_irq;
+	DEB(DEB_INIT, printk(KERN_DEBUG
+			     "%s: Receive unit started OK\n", dev->name));
+	return 0;
+
+failed_free_irq:
+	free_irq(dev->irq, dev);
+failed:
+	printk(KERN_ERR "%s: Failed to initialise 82596\n", dev->name);
+	mpu_port(dev, PORT_RESET, 0);
+	return -1;
+}
+
+
+static inline int i596_rx(struct net_device *dev)
+{
+	struct i596_private *lp = netdev_priv(dev);
+	struct i596_rfd *rfd;
+	struct i596_rbd *rbd;
+	int frames = 0;
+
+	DEB(DEB_RXFRAME, printk(KERN_DEBUG
+				"i596_rx(), rfd_head %p, rbd_head %p\n",
+				lp->rfd_head, lp->rbd_head));
+
+
+	rfd = lp->rfd_head;		/* Ref next frame to check */
+
+	DMA_INV(dev, rfd, sizeof(struct i596_rfd));
+	while (rfd->stat & SWAP16(STAT_C)) {	/* Loop while complete frames */
+		if (rfd->rbd == I596_NULL)
+			rbd = NULL;
+		else if (rfd->rbd == lp->rbd_head->b_addr) {
+			rbd = lp->rbd_head;
+			DMA_INV(dev, rbd, sizeof(struct i596_rbd));
+		} else {
+			printk(KERN_ERR "%s: rbd chain broken!\n", dev->name);
+			/* XXX Now what? */
+			rbd = NULL;
+		}
+		DEB(DEB_RXFRAME, printk(KERN_DEBUG
+				      "  rfd %p, rfd.rbd %08x, rfd.stat %04x\n",
+				      rfd, rfd->rbd, rfd->stat));
+
+		if (rbd != NULL && (rfd->stat & SWAP16(STAT_OK))) {
+			/* a good frame */
+			int pkt_len = SWAP16(rbd->count) & 0x3fff;
+			struct sk_buff *skb = rbd->skb;
+			int rx_in_place = 0;
+
+			DEB(DEB_RXADDR, print_eth(rbd->v_data, "received"));
+			frames++;
+
+			/* Check if the packet is long enough to just accept
+			 * without copying to a properly sized skbuff.
+			 */
+
+			if (pkt_len > rx_copybreak) {
+				struct sk_buff *newskb;
+				dma_addr_t dma_addr;
+
+				dma_unmap_single(dev->dev.parent,
+						 (dma_addr_t)SWAP32(rbd->b_data),
+						 PKT_BUF_SZ, DMA_FROM_DEVICE);
+				/* Get fresh skbuff to replace filled one. */
+				newskb = netdev_alloc_skb(dev, PKT_BUF_SZ + 4);
+				if (newskb == NULL) {
+					skb = NULL;	/* drop pkt */
+					goto memory_squeeze;
+				}
+				skb_reserve(newskb, 2);
+
+				/* Pass up the skb already on the Rx ring. */
+				skb_put(skb, pkt_len);
+				rx_in_place = 1;
+				rbd->skb = newskb;
+				dma_addr = dma_map_single(dev->dev.parent,
+							  newskb->data,
+							  PKT_BUF_SZ,
+							  DMA_FROM_DEVICE);
+				rbd->v_data = newskb->data;
+				rbd->b_data = SWAP32(dma_addr);
+				DMA_WBACK_INV(dev, rbd, sizeof(struct i596_rbd));
+			} else
+				skb = netdev_alloc_skb(dev, pkt_len + 2);
+memory_squeeze:
+			if (skb == NULL) {
+				/* XXX tulip.c can defer packets here!! */
+				printk(KERN_ERR
+				       "%s: i596_rx Memory squeeze, dropping packet.\n",
+				       dev->name);
+				lp->stats.rx_dropped++;
+			} else {
+				if (!rx_in_place) {
+					/* 16 byte align the data fields */
+					dma_sync_single_for_cpu(dev->dev.parent,
+								(dma_addr_t)SWAP32(rbd->b_data),
+								PKT_BUF_SZ, DMA_FROM_DEVICE);
+					skb_reserve(skb, 2);
+					memcpy(skb_put(skb, pkt_len), rbd->v_data, pkt_len);
+					dma_sync_single_for_device(dev->dev.parent,
+								   (dma_addr_t)SWAP32(rbd->b_data),
+								   PKT_BUF_SZ, DMA_FROM_DEVICE);
+				}
+				skb->len = pkt_len;
+				skb->protocol = eth_type_trans(skb, dev);
+				netif_rx(skb);
+				dev->last_rx = jiffies;
+				lp->stats.rx_packets++;
+				lp->stats.rx_bytes += pkt_len;
+			}
+		} else {
+			DEB(DEB_ERRORS, printk(KERN_DEBUG
+					       "%s: Error, rfd.stat = 0x%04x\n",
+					       dev->name, rfd->stat));
+			lp->stats.rx_errors++;
+			if (rfd->stat & SWAP16(0x0100))
+				lp->stats.collisions++;
+			if (rfd->stat & SWAP16(0x8000))
+				lp->stats.rx_length_errors++;
+			if (rfd->stat & SWAP16(0x0001))
+				lp->stats.rx_over_errors++;
+			if (rfd->stat & SWAP16(0x0002))
+				lp->stats.rx_fifo_errors++;
+			if (rfd->stat & SWAP16(0x0004))
+				lp->stats.rx_frame_errors++;
+			if (rfd->stat & SWAP16(0x0008))
+				lp->stats.rx_crc_errors++;
+			if (rfd->stat & SWAP16(0x0010))
+				lp->stats.rx_length_errors++;
+		}
+
+		/* Clear the buffer descriptor count and EOF + F flags */
+
+		if (rbd != NULL && (rbd->count & SWAP16(0x4000))) {
+			rbd->count = 0;
+			lp->rbd_head = rbd->v_next;
+			DMA_WBACK_INV(dev, rbd, sizeof(struct i596_rbd));
+		}
+
+		/* Tidy the frame descriptor, marking it as end of list */
+
+		rfd->rbd = I596_NULL;
+		rfd->stat = 0;
+		rfd->cmd = SWAP16(CMD_EOL|CMD_FLEX);
+		rfd->count = 0;
+
+		/* Update record of next frame descriptor to process */
+
+		lp->dma->scb.rfd = rfd->b_next;
+		lp->rfd_head = rfd->v_next;
+		DMA_WBACK_INV(dev, rfd, sizeof(struct i596_rfd));
+
+		/* Remove end-of-list from old end descriptor */
+
+		rfd->v_prev->cmd = SWAP16(CMD_FLEX);
+		DMA_WBACK_INV(dev, rfd->v_prev, sizeof(struct i596_rfd));
+		rfd = lp->rfd_head;
+		DMA_INV(dev, rfd, sizeof(struct i596_rfd));
+	}
+
+	DEB(DEB_RXFRAME, printk(KERN_DEBUG "frames %d\n", frames));
+
+	return 0;
+}
+
+
+static inline void i596_cleanup_cmd(struct net_device *dev, struct i596_private *lp)
+{
+	struct i596_cmd *ptr;
+
+	while (lp->cmd_head != NULL) {
+		ptr = lp->cmd_head;
+		lp->cmd_head = ptr->v_next;
+		lp->cmd_backlog--;
+
+		switch (SWAP16(ptr->command) & 0x7) {
+		case CmdTx:
+			{
+				struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr;
+				struct sk_buff *skb = tx_cmd->skb;
+				dma_unmap_single(dev->dev.parent,
+						 tx_cmd->dma_addr,
+						 skb->len, DMA_TO_DEVICE);
+
+				dev_kfree_skb(skb);
+
+				lp->stats.tx_errors++;
+				lp->stats.tx_aborted_errors++;
+
+				ptr->v_next = NULL;
+				ptr->b_next = I596_NULL;
+				tx_cmd->cmd.command = 0;  /* Mark as free */
+				break;
+			}
+		default:
+			ptr->v_next = NULL;
+			ptr->b_next = I596_NULL;
+		}
+		DMA_WBACK_INV(dev, ptr, sizeof(struct i596_cmd));
+	}
+
+	wait_cmd(dev, lp->dma, 100, "i596_cleanup_cmd timed out");
+	lp->dma->scb.cmd = I596_NULL;
+	DMA_WBACK(dev, &(lp->dma->scb), sizeof(struct i596_scb));
+}
+
+
+static inline void i596_reset(struct net_device *dev, struct i596_private *lp)
+{
+	unsigned long flags;
+
+	DEB(DEB_RESET, printk(KERN_DEBUG "i596_reset\n"));
+
+	spin_lock_irqsave (&lp->lock, flags);
+
+	wait_cmd(dev, lp->dma, 100, "i596_reset timed out");
+
+	netif_stop_queue(dev);
+
+	/* FIXME: this command might cause an lpmc */
+	lp->dma->scb.command = SWAP16(CUC_ABORT | RX_ABORT);
+	DMA_WBACK(dev, &(lp->dma->scb), sizeof(struct i596_scb));
+	ca(dev);
+
+	/* wait for shutdown */
+	wait_cmd(dev, lp->dma, 1000, "i596_reset 2 timed out");
+	spin_unlock_irqrestore (&lp->lock, flags);
+
+	i596_cleanup_cmd(dev, lp);
+	i596_rx(dev);
+
+	netif_start_queue(dev);
+	init_i596_mem(dev);
+}
+
+
+static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd)
+{
+	struct i596_private *lp = netdev_priv(dev);
+	struct i596_dma *dma = lp->dma;
+	unsigned long flags;
+
+	DEB(DEB_ADDCMD, printk(KERN_DEBUG "i596_add_cmd cmd_head %p\n",
+			       lp->cmd_head));
+
+	cmd->status = 0;
+	cmd->command |= SWAP16(CMD_EOL | CMD_INTR);
+	cmd->v_next = NULL;
+	cmd->b_next = I596_NULL;
+	DMA_WBACK(dev, cmd, sizeof(struct i596_cmd));
+
+	spin_lock_irqsave (&lp->lock, flags);
+
+	if (lp->cmd_head != NULL) {
+		lp->cmd_tail->v_next = cmd;
+		lp->cmd_tail->b_next = SWAP32(virt_to_dma(lp, &cmd->status));
+		DMA_WBACK(dev, lp->cmd_tail, sizeof(struct i596_cmd));
+	} else {
+		lp->cmd_head = cmd;
+		wait_cmd(dev, dma, 100, "i596_add_cmd timed out");
+		dma->scb.cmd = SWAP32(virt_to_dma(lp, &cmd->status));
+		dma->scb.command = SWAP16(CUC_START);
+		DMA_WBACK(dev, &(dma->scb), sizeof(struct i596_scb));
+		ca(dev);
+	}
+	lp->cmd_tail = cmd;
+	lp->cmd_backlog++;
+
+	spin_unlock_irqrestore (&lp->lock, flags);
+
+	if (lp->cmd_backlog > max_cmd_backlog) {
+		unsigned long tickssofar = jiffies - lp->last_cmd;
+
+		if (tickssofar < ticks_limit)
+			return;
+
+		printk(KERN_ERR
+		       "%s: command unit timed out, status resetting.\n",
+		       dev->name);
+#if 1
+		i596_reset(dev, lp);
+#endif
+	}
+}
+
+static int i596_open(struct net_device *dev)
+{
+	DEB(DEB_OPEN, printk(KERN_DEBUG
+			     "%s: i596_open() irq %d.\n", dev->name, dev->irq));
+
+	if (init_rx_bufs(dev)) {
+		printk(KERN_ERR "%s: Failed to init rx bufs\n", dev->name);
+		return -EAGAIN;
+	}
+	if (init_i596_mem(dev)) {
+		printk(KERN_ERR "%s: Failed to init memory\n", dev->name);
+		goto out_remove_rx_bufs;
+	}
+	netif_start_queue(dev);
+
+	return 0;
+
+out_remove_rx_bufs:
+	remove_rx_bufs(dev);
+	return -EAGAIN;
+}
+
+static void i596_tx_timeout (struct net_device *dev)
+{
+	struct i596_private *lp = netdev_priv(dev);
+
+	/* Transmitter timeout, serious problems. */
+	DEB(DEB_ERRORS, printk(KERN_DEBUG
+			       "%s: transmit timed out, status resetting.\n",
+			       dev->name));
+
+	lp->stats.tx_errors++;
+
+	/* Try to restart the adaptor */
+	if (lp->last_restart == lp->stats.tx_packets) {
+		DEB(DEB_ERRORS, printk(KERN_DEBUG "Resetting board.\n"));
+		/* Shutdown and restart */
+		i596_reset (dev, lp);
+	} else {
+		/* Issue a channel attention signal */
+		DEB(DEB_ERRORS, printk(KERN_DEBUG "Kicking board.\n"));
+		lp->dma->scb.command = SWAP16(CUC_START | RX_START);
+		DMA_WBACK_INV(dev, &(lp->dma->scb), sizeof(struct i596_scb));
+		ca (dev);
+		lp->last_restart = lp->stats.tx_packets;
+	}
+
+	dev->trans_start = jiffies;
+	netif_wake_queue (dev);
+}
+
+
+static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct i596_private *lp = netdev_priv(dev);
+	struct tx_cmd *tx_cmd;
+	struct i596_tbd *tbd;
+	short length = skb->len;
+	dev->trans_start = jiffies;
+
+	DEB(DEB_STARTTX, printk(KERN_DEBUG
+				"%s: i596_start_xmit(%x,%p) called\n",
+				dev->name, skb->len, skb->data));
+
+	if (length < ETH_ZLEN) {
+		if (skb_padto(skb, ETH_ZLEN))
+			return 0;
+		length = ETH_ZLEN;
+	}
+
+	netif_stop_queue(dev);
+
+	tx_cmd = lp->dma->tx_cmds + lp->next_tx_cmd;
+	tbd = lp->dma->tbds + lp->next_tx_cmd;
+
+	if (tx_cmd->cmd.command) {
+		DEB(DEB_ERRORS, printk(KERN_DEBUG
+				       "%s: xmit ring full, dropping packet.\n",
+				       dev->name));
+		lp->stats.tx_dropped++;
+
+		dev_kfree_skb(skb);
+	} else {
+		if (++lp->next_tx_cmd == TX_RING_SIZE)
+			lp->next_tx_cmd = 0;
+		tx_cmd->tbd = SWAP32(virt_to_dma(lp, tbd));
+		tbd->next = I596_NULL;
+
+		tx_cmd->cmd.command = SWAP16(CMD_FLEX | CmdTx);
+		tx_cmd->skb = skb;
+
+		tx_cmd->pad = 0;
+		tx_cmd->size = 0;
+		tbd->pad = 0;
+		tbd->size = SWAP16(EOF | length);
+
+		tx_cmd->dma_addr = dma_map_single(dev->dev.parent, skb->data,
+						  skb->len, DMA_TO_DEVICE);
+		tbd->data = SWAP32(tx_cmd->dma_addr);
+
+		DEB(DEB_TXADDR, print_eth(skb->data, "tx-queued"));
+		DMA_WBACK_INV(dev, tx_cmd, sizeof(struct tx_cmd));
+		DMA_WBACK_INV(dev, tbd, sizeof(struct i596_tbd));
+		i596_add_cmd(dev, &tx_cmd->cmd);
+
+		lp->stats.tx_packets++;
+		lp->stats.tx_bytes += length;
+	}
+
+	netif_start_queue(dev);
+
+	return 0;
+}
+
+static void print_eth(unsigned char *add, char *str)
+{
+	int i;
+
+	printk(KERN_DEBUG "i596 0x%p, ", add);
+	for (i = 0; i < 6; i++)
+		printk(" %02X", add[i + 6]);
+	printk(" -->");
+	for (i = 0; i < 6; i++)
+		printk(" %02X", add[i]);
+	printk(" %02X%02X, %s\n", add[12], add[13], str);
+}
+
+static int __devinit i82596_probe(struct net_device *dev)
+{
+	int i;
+	struct i596_private *lp = netdev_priv(dev);
+	struct i596_dma *dma;
+
+	/* This lot is ensure things have been cache line aligned. */
+	BUILD_BUG_ON(sizeof(struct i596_rfd) != 32);
+	BUILD_BUG_ON(sizeof(struct i596_rbd) &  31);
+	BUILD_BUG_ON(sizeof(struct tx_cmd)   &  31);
+	BUILD_BUG_ON(sizeof(struct i596_tbd) != 32);
+#ifndef __LP64__
+	BUILD_BUG_ON(sizeof(struct i596_dma) > 4096);
+#endif
+
+	if (!dev->base_addr || !dev->irq)
+		return -ENODEV;
+
+	dma = (struct i596_dma *) DMA_ALLOC(dev->dev.parent,
+		sizeof(struct i596_dma), &lp->dma_addr, GFP_KERNEL);
+	if (!dma) {
+		printk(KERN_ERR "%s: Couldn't get shared memory\n", __FILE__);
+		return -ENOMEM;
+	}
+
+	/* The 82596-specific entries in the device structure. */
+	dev->open = i596_open;
+	dev->stop = i596_close;
+	dev->hard_start_xmit = i596_start_xmit;
+	dev->get_stats = i596_get_stats;
+	dev->set_multicast_list = set_multicast_list;
+	dev->tx_timeout = i596_tx_timeout;
+	dev->watchdog_timeo = TX_TIMEOUT;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	dev->poll_controller = i596_poll_controller;
+#endif
+
+	memset(dma, 0, sizeof(struct i596_dma));
+	lp->dma = dma;
+
+	dma->scb.command = 0;
+	dma->scb.cmd = I596_NULL;
+	dma->scb.rfd = I596_NULL;
+	spin_lock_init(&lp->lock);
+
+	DMA_WBACK_INV(dev, dma, sizeof(struct i596_dma));
+
+	i = register_netdev(dev);
+	if (i) {
+		DMA_FREE(dev->dev.parent, sizeof(struct i596_dma),
+				    (void *)dma, lp->dma_addr);
+		return i;
+	};
+
+	DEB(DEB_PROBE, printk(KERN_INFO "%s: 82596 at %#3lx,",
+			      dev->name, dev->base_addr));
+	for (i = 0; i < 6; i++)
+		DEB(DEB_PROBE, printk(" %2.2X", dev->dev_addr[i]));
+	DEB(DEB_PROBE, printk(" IRQ %d.\n", dev->irq));
+	DEB(DEB_INIT, printk(KERN_INFO
+			     "%s: dma at 0x%p (%d bytes), lp->scb at 0x%p\n",
+			     dev->name, dma, (int)sizeof(struct i596_dma),
+			     &dma->scb));
+
+	return 0;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void i596_poll_controller(struct net_device *dev)
+{
+	disable_irq(dev->irq);
+	i596_interrupt(dev->irq, dev);
+	enable_irq(dev->irq);
+}
+#endif
+
+static irqreturn_t i596_interrupt(int irq, void *dev_id)
+{
+	struct net_device *dev = dev_id;
+	struct i596_private *lp;
+	struct i596_dma *dma;
+	unsigned short status, ack_cmd = 0;
+
+	if (dev == NULL) {
+		printk(KERN_WARNING "%s: irq %d for unknown device.\n",
+		       __FUNCTION__, irq);
+		return IRQ_NONE;
+	}
+
+	lp = netdev_priv(dev);
+	dma = lp->dma;
+
+	spin_lock (&lp->lock);
+
+	wait_cmd(dev, dma, 100, "i596 interrupt, timeout");
+	status = SWAP16(dma->scb.status);
+
+	DEB(DEB_INTS, printk(KERN_DEBUG
+			     "%s: i596 interrupt, IRQ %d, status %4.4x.\n",
+			dev->name, irq, status));
+
+	ack_cmd = status & 0xf000;
+
+	if (!ack_cmd) {
+		DEB(DEB_ERRORS, printk(KERN_DEBUG
+				       "%s: interrupt with no events\n",
+				       dev->name));
+		spin_unlock (&lp->lock);
+		return IRQ_NONE;
+	}
+
+	if ((status & 0x8000) || (status & 0x2000)) {
+		struct i596_cmd *ptr;
+
+		if ((status & 0x8000))
+			DEB(DEB_INTS,
+			    printk(KERN_DEBUG
+				   "%s: i596 interrupt completed command.\n",
+				   dev->name));
+		if ((status & 0x2000))
+			DEB(DEB_INTS,
+			    printk(KERN_DEBUG
+				   "%s: i596 interrupt command unit inactive %x.\n",
+				   dev->name, status & 0x0700));
+
+		while (lp->cmd_head != NULL) {
+			DMA_INV(dev, lp->cmd_head, sizeof(struct i596_cmd));
+			if (!(lp->cmd_head->status & SWAP16(STAT_C)))
+				break;
+
+			ptr = lp->cmd_head;
+
+			DEB(DEB_STATUS,
+			    printk(KERN_DEBUG
+				   "cmd_head->status = %04x, ->command = %04x\n",
+				   SWAP16(lp->cmd_head->status),
+				   SWAP16(lp->cmd_head->command)));
+			lp->cmd_head = ptr->v_next;
+			lp->cmd_backlog--;
+
+			switch (SWAP16(ptr->command) & 0x7) {
+			case CmdTx:
+			    {
+				struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr;
+				struct sk_buff *skb = tx_cmd->skb;
+
+				if (ptr->status & SWAP16(STAT_OK)) {
+					DEB(DEB_TXADDR,
+					    print_eth(skb->data, "tx-done"));
+				} else {
+					lp->stats.tx_errors++;
+					if (ptr->status & SWAP16(0x0020))
+						lp->stats.collisions++;
+					if (!(ptr->status & SWAP16(0x0040)))
+						lp->stats.tx_heartbeat_errors++;
+					if (ptr->status & SWAP16(0x0400))
+						lp->stats.tx_carrier_errors++;
+					if (ptr->status & SWAP16(0x0800))
+						lp->stats.collisions++;
+					if (ptr->status & SWAP16(0x1000))
+						lp->stats.tx_aborted_errors++;
+				}
+				dma_unmap_single(dev->dev.parent,
+						 tx_cmd->dma_addr,
+						 skb->len, DMA_TO_DEVICE);
+				dev_kfree_skb_irq(skb);
+
+				tx_cmd->cmd.command = 0; /* Mark free */
+				break;
+			    }
+			case CmdTDR:
+			    {
+				unsigned short status = SWAP16(((struct tdr_cmd *)ptr)->status);
+
+				if (status & 0x8000) {
+					DEB(DEB_ANY,
+					    printk(KERN_DEBUG "%s: link ok.\n",
+						   dev->name));
+				} else {
+					if (status & 0x4000)
+						printk(KERN_ERR
+						       "%s: Transceiver problem.\n",
+						       dev->name);
+					if (status & 0x2000)
+						printk(KERN_ERR
+						       "%s: Termination problem.\n",
+						       dev->name);
+					if (status & 0x1000)
+						printk(KERN_ERR
+						       "%s: Short circuit.\n",
+						       dev->name);
+
+					DEB(DEB_TDR,
+					    printk(KERN_DEBUG "%s: Time %d.\n",
+						   dev->name, status & 0x07ff));
+				}
+				break;
+			    }
+			case CmdConfigure:
+				/*
+				 * Zap command so set_multicast_list() know
+				 * it is free
+				 */
+				ptr->command = 0;
+				break;
+			}
+			ptr->v_next = NULL;
+			ptr->b_next = I596_NULL;
+			DMA_WBACK(dev, ptr, sizeof(struct i596_cmd));
+			lp->last_cmd = jiffies;
+		}
+
+		/* This mess is arranging that only the last of any outstanding
+		 * commands has the interrupt bit set.  Should probably really
+		 * only add to the cmd queue when the CU is stopped.
+		 */
+		ptr = lp->cmd_head;
+		while ((ptr != NULL) && (ptr != lp->cmd_tail)) {
+			struct i596_cmd *prev = ptr;
+
+			ptr->command &= SWAP16(0x1fff);
+			ptr = ptr->v_next;
+			DMA_WBACK_INV(dev, prev, sizeof(struct i596_cmd));
+		}
+
+		if (lp->cmd_head != NULL)
+			ack_cmd |= CUC_START;
+		dma->scb.cmd = SWAP32(virt_to_dma(lp, &lp->cmd_head->status));
+		DMA_WBACK_INV(dev, &dma->scb, sizeof(struct i596_scb));
+	}
+	if ((status & 0x1000) || (status & 0x4000)) {
+		if ((status & 0x4000))
+			DEB(DEB_INTS,
+			    printk(KERN_DEBUG
+				   "%s: i596 interrupt received a frame.\n",
+				   dev->name));
+		i596_rx(dev);
+		/* Only RX_START if stopped - RGH 07-07-96 */
+		if (status & 0x1000) {
+			if (netif_running(dev)) {
+				DEB(DEB_ERRORS,
+				    printk(KERN_DEBUG
+					   "%s: i596 interrupt receive unit inactive, status 0x%x\n",
+					   dev->name, status));
+				ack_cmd |= RX_START;
+				lp->stats.rx_errors++;
+				lp->stats.rx_fifo_errors++;
+				rebuild_rx_bufs(dev);
+			}
+		}
+	}
+	wait_cmd(dev, dma, 100, "i596 interrupt, timeout");
+	dma->scb.command = SWAP16(ack_cmd);
+	DMA_WBACK(dev, &dma->scb, sizeof(struct i596_scb));
+
+	/* DANGER: I suspect that some kind of interrupt
+	 acknowledgement aside from acking the 82596 might be needed
+	 here...  but it's running acceptably without */
+
+	ca(dev);
+
+	wait_cmd(dev, dma, 100, "i596 interrupt, exit timeout");
+	DEB(DEB_INTS, printk(KERN_DEBUG "%s: exiting interrupt.\n", dev->name));
+
+	spin_unlock (&lp->lock);
+	return IRQ_HANDLED;
+}
+
+static int i596_close(struct net_device *dev)
+{
+	struct i596_private *lp = netdev_priv(dev);
+	unsigned long flags;
+
+	netif_stop_queue(dev);
+
+	DEB(DEB_INIT,
+	    printk(KERN_DEBUG
+		   "%s: Shutting down ethercard, status was %4.4x.\n",
+		   dev->name, SWAP16(lp->dma->scb.status)));
+
+	spin_lock_irqsave(&lp->lock, flags);
+
+	wait_cmd(dev, lp->dma, 100, "close1 timed out");
+	lp->dma->scb.command = SWAP16(CUC_ABORT | RX_ABORT);
+	DMA_WBACK(dev, &lp->dma->scb, sizeof(struct i596_scb));
+
+	ca(dev);
+
+	wait_cmd(dev, lp->dma, 100, "close2 timed out");
+	spin_unlock_irqrestore(&lp->lock, flags);
+	DEB(DEB_STRUCT, i596_display_data(dev));
+	i596_cleanup_cmd(dev, lp);
+
+	free_irq(dev->irq, dev);
+	remove_rx_bufs(dev);
+
+	return 0;
+}
+
+static struct net_device_stats *i596_get_stats(struct net_device *dev)
+{
+	struct i596_private *lp = netdev_priv(dev);
+
+	return &lp->stats;
+}
+
+/*
+ *    Set or clear the multicast filter for this adaptor.
+ */
+
+static void set_multicast_list(struct net_device *dev)
+{
+	struct i596_private *lp = netdev_priv(dev);
+	struct i596_dma *dma = lp->dma;
+	int config = 0, cnt;
+
+	DEB(DEB_MULTI,
+	    printk(KERN_DEBUG
+		   "%s: set multicast list, %d entries, promisc %s, allmulti %s\n",
+		   dev->name, dev->mc_count,
+		   dev->flags & IFF_PROMISC ? "ON" : "OFF",
+		   dev->flags & IFF_ALLMULTI ? "ON" : "OFF"));
+
+	if ((dev->flags & IFF_PROMISC) &&
+	    !(dma->cf_cmd.i596_config[8] & 0x01)) {
+		dma->cf_cmd.i596_config[8] |= 0x01;
+		config = 1;
+	}
+	if (!(dev->flags & IFF_PROMISC) &&
+	    (dma->cf_cmd.i596_config[8] & 0x01)) {
+		dma->cf_cmd.i596_config[8] &= ~0x01;
+		config = 1;
+	}
+	if ((dev->flags & IFF_ALLMULTI) &&
+	    (dma->cf_cmd.i596_config[11] & 0x20)) {
+		dma->cf_cmd.i596_config[11] &= ~0x20;
+		config = 1;
+	}
+	if (!(dev->flags & IFF_ALLMULTI) &&
+	    !(dma->cf_cmd.i596_config[11] & 0x20)) {
+		dma->cf_cmd.i596_config[11] |= 0x20;
+		config = 1;
+	}
+	if (config) {
+		if (dma->cf_cmd.cmd.command)
+			printk(KERN_INFO
+			       "%s: config change request already queued\n",
+			       dev->name);
+		else {
+			dma->cf_cmd.cmd.command = SWAP16(CmdConfigure);
+			DMA_WBACK_INV(dev, &dma->cf_cmd, sizeof(struct cf_cmd));
+			i596_add_cmd(dev, &dma->cf_cmd.cmd);
+		}
+	}
+
+	cnt = dev->mc_count;
+	if (cnt > MAX_MC_CNT) {
+		cnt = MAX_MC_CNT;
+		printk(KERN_NOTICE "%s: Only %d multicast addresses supported",
+			dev->name, cnt);
+	}
+
+	if (dev->mc_count > 0) {
+		struct dev_mc_list *dmi;
+		unsigned char *cp;
+		struct mc_cmd *cmd;
+
+		cmd = &dma->mc_cmd;
+		cmd->cmd.command = SWAP16(CmdMulticastList);
+		cmd->mc_cnt = SWAP16(dev->mc_count * 6);
+		cp = cmd->mc_addrs;
+		for (dmi = dev->mc_list;
+		     cnt && dmi != NULL;
+		     dmi = dmi->next, cnt--, cp += 6) {
+			memcpy(cp, dmi->dmi_addr, 6);
+			if (i596_debug > 1)
+				DEB(DEB_MULTI,
+				    printk(KERN_DEBUG
+					   "%s: Adding address %02x:%02x:%02x:%02x:%02x:%02x\n",
+					   dev->name, cp[0], cp[1], cp[2], cp[3], cp[4], cp[5]));
+		}
+		DMA_WBACK_INV(dev, &dma->mc_cmd, sizeof(struct mc_cmd));
+		i596_add_cmd(dev, &cmd->cmd);
+	}
+}
diff --git a/drivers/net/mlx4/qp.c b/drivers/net/mlx4/qp.c
index 7f8b7d5..492cfaa 100644
--- a/drivers/net/mlx4/qp.c
+++ b/drivers/net/mlx4/qp.c
@@ -113,8 +113,7 @@
 	struct mlx4_cmd_mailbox *mailbox;
 	int ret = 0;
 
-	if (cur_state < 0 || cur_state >= MLX4_QP_NUM_STATE ||
-	    new_state < 0 || cur_state >= MLX4_QP_NUM_STATE ||
+	if (cur_state >= MLX4_QP_NUM_STATE || cur_state >= MLX4_QP_NUM_STATE ||
 	    !op[cur_state][new_state])
 		return -EINVAL;
 
diff --git a/drivers/net/netxen/netxen_nic_niu.c b/drivers/net/netxen/netxen_nic_niu.c
index 75102d3..05e0577 100644
--- a/drivers/net/netxen/netxen_nic_niu.c
+++ b/drivers/net/netxen/netxen_nic_niu.c
@@ -724,7 +724,7 @@
 	__u32 mac_cfg0;
 	u32 port = physical_port[adapter->portnum];
 
-	if ((port < 0) || (port > NETXEN_NIU_MAX_GBE_PORTS))
+	if (port > NETXEN_NIU_MAX_GBE_PORTS)
 		return -EINVAL;
 	mac_cfg0 = 0;
 	netxen_gb_soft_reset(mac_cfg0);
@@ -757,7 +757,7 @@
 	__u32 reg;
 	u32 port = physical_port[adapter->portnum];
 
-	if ((port < 0) || (port > NETXEN_NIU_MAX_GBE_PORTS))
+	if (port > NETXEN_NIU_MAX_GBE_PORTS)
 		return -EINVAL;
 
 	/* save previous contents */
@@ -894,7 +894,7 @@
 	__u32 reg;
 	u32 port = physical_port[adapter->portnum];
 
-	if ((port < 0) || (port > NETXEN_NIU_MAX_XG_PORTS))
+	if (port > NETXEN_NIU_MAX_XG_PORTS)
 		return -EINVAL;
 
 	if (netxen_nic_hw_read_wx(adapter,
diff --git a/drivers/net/pasemi_mac.c b/drivers/net/pasemi_mac.c
index 8d38425..0b3066a 100644
--- a/drivers/net/pasemi_mac.c
+++ b/drivers/net/pasemi_mac.c
@@ -755,7 +755,7 @@
 	flags |= PAS_MAC_CFG_PCFG_TSR_1G | PAS_MAC_CFG_PCFG_SPD_1G;
 
 	pci_write_config_dword(mac->iob_pdev, PAS_IOB_DMA_RXCH_CFG(mac->dma_rxch),
-			       PAS_IOB_DMA_RXCH_CFG_CNTTH(1));
+			       PAS_IOB_DMA_RXCH_CFG_CNTTH(0));
 
 	pci_write_config_dword(mac->iob_pdev, PAS_IOB_DMA_TXCH_CFG(mac->dma_txch),
 			       PAS_IOB_DMA_TXCH_CFG_CNTTH(32));
diff --git a/drivers/net/pcmcia/axnet_cs.c b/drivers/net/pcmcia/axnet_cs.c
index 808fae1..50dff1b 100644
--- a/drivers/net/pcmcia/axnet_cs.c
+++ b/drivers/net/pcmcia/axnet_cs.c
@@ -521,6 +521,7 @@
 
 static int axnet_open(struct net_device *dev)
 {
+    int ret;
     axnet_dev_t *info = PRIV(dev);
     struct pcmcia_device *link = info->p_dev;
     
@@ -529,9 +530,11 @@
     if (!pcmcia_dev_present(link))
 	return -ENODEV;
 
-    link->open++;
+    ret = request_irq(dev->irq, ei_irq_wrapper, IRQF_SHARED, "axnet_cs", dev);
+    if (ret)
+	    return ret;
 
-    request_irq(dev->irq, ei_irq_wrapper, IRQF_SHARED, "axnet_cs", dev);
+    link->open++;
 
     info->link_status = 0x00;
     init_timer(&info->watchdog);
diff --git a/drivers/net/pcmcia/fmvj18x_cs.c b/drivers/net/pcmcia/fmvj18x_cs.c
index 3f93d49..85d5f2c 100644
--- a/drivers/net/pcmcia/fmvj18x_cs.c
+++ b/drivers/net/pcmcia/fmvj18x_cs.c
@@ -109,7 +109,7 @@
     card type
  */
 typedef enum { MBH10302, MBH10304, TDK, CONTEC, LA501, UNGERMANN, 
-	       XXX10304
+	       XXX10304, NEC, KME
 } cardtype_t;
 
 /*
@@ -374,6 +374,18 @@
 		link->io.NumPorts2 = 8;
 	    }
 	    break;
+	case MANFID_NEC:
+	    cardtype = NEC; /* MultiFunction Card */
+	    link->conf.ConfigBase = 0x800;
+	    link->conf.ConfigIndex = 0x47;
+	    link->io.NumPorts2 = 8;
+	    break;
+	case MANFID_KME:
+	    cardtype = KME; /* MultiFunction Card */
+	    link->conf.ConfigBase = 0x800;
+	    link->conf.ConfigIndex = 0x47;
+	    link->io.NumPorts2 = 8;
+	    break;
 	case MANFID_CONTEC:
 	    cardtype = CONTEC;
 	    break;
@@ -450,6 +462,8 @@
     case TDK:
     case LA501:
     case CONTEC:
+    case NEC:
+    case KME:
 	tuple.DesiredTuple = CISTPL_FUNCE;
 	tuple.TupleOffset = 0;
 	CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
@@ -469,6 +483,10 @@
 		card_name = "TDK LAK-CD021";
 	    } else if( cardtype == LA501 ) {
 		card_name = "LA501";
+	    } else if( cardtype == NEC ) {
+		card_name = "PK-UG-J001";
+	    } else if( cardtype == KME ) {
+		card_name = "Panasonic";
 	    } else {
 		card_name = "C-NET(PC)C";
 	    }
@@ -678,8 +696,11 @@
 	PCMCIA_DEVICE_PROD_ID1("PCMCIA MBH10302", 0x8f4005da),
 	PCMCIA_DEVICE_PROD_ID1("UBKK,V2.0", 0x90888080),
 	PCMCIA_PFC_DEVICE_PROD_ID12(0, "TDK", "GlobalNetworker 3410/3412", 0x1eae9475, 0xd9a93bed),
+	PCMCIA_PFC_DEVICE_PROD_ID12(0, "NEC", "PK-UG-J001" ,0x18df0ba0 ,0x831b1064),
 	PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0105, 0x0d0a),
 	PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0105, 0x0e0a),
+	PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0a05),
+	PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x1101),
 	PCMCIA_DEVICE_NULL,
 };
 MODULE_DEVICE_TABLE(pcmcia, fmvj18x_ids);
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c
index d88e9b2..f2613c2 100644
--- a/drivers/net/pcmcia/pcnet_cs.c
+++ b/drivers/net/pcmcia/pcnet_cs.c
@@ -960,6 +960,7 @@
 
 static int pcnet_open(struct net_device *dev)
 {
+    int ret;
     pcnet_dev_t *info = PRIV(dev);
     struct pcmcia_device *link = info->p_dev;
 
@@ -968,10 +969,12 @@
     if (!pcmcia_dev_present(link))
 	return -ENODEV;
 
-    link->open++;
-
     set_misc_reg(dev);
-    request_irq(dev->irq, ei_irq_wrapper, IRQF_SHARED, dev_info, dev);
+    ret = request_irq(dev->irq, ei_irq_wrapper, IRQF_SHARED, dev_info, dev);
+    if (ret)
+	    return ret;
+
+    link->open++;
 
     info->phy_id = info->eth_phy;
     info->link_status = 0x00;
@@ -1552,6 +1555,7 @@
 	PCMCIA_PFC_DEVICE_PROD_ID12(0, "Grey Cell", "GCS3000", 0x2a151fac, 0x48b932ae),
 	PCMCIA_PFC_DEVICE_PROD_ID12(0, "Linksys", "EtherFast 10&100 + 56K PC Card (PCMLM56)", 0x0733cc81, 0xb3765033),
 	PCMCIA_PFC_DEVICE_PROD_ID12(0, "LINKSYS", "PCMLM336", 0xf7cb0b07, 0x7a821b58),
+	PCMCIA_PFC_DEVICE_PROD_ID12(0, "MICRO RESEARCH", "COMBO-L/M-336", 0xb2ced065, 0x3ced0555),
 	PCMCIA_PFC_DEVICE_PROD_ID12(0, "PCMCIAs", "ComboCard", 0xdcfe12d3, 0xcd8906cc),
 	PCMCIA_PFC_DEVICE_PROD_ID12(0, "PCMCIAs", "LanModem", 0xdcfe12d3, 0xc67c648f),
 	PCMCIA_MFC_DEVICE_PROD_ID12(0, "IBM", "Home and Away 28.8 PC Card       ", 0xb569a6e5, 0x5bd4ff2c),
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 09b6f25..dd09011 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -55,6 +55,11 @@
 	---help---
 	  Currently supports the BCM5411, BCM5421 and BCM5461 PHYs.
 
+config ICPLUS_PHY
+	tristate "Drivers for ICPlus PHYs"
+	---help---
+	  Currently supports the IP175C PHY.
+
 config FIXED_PHY
 	tristate "Drivers for PHY emulation on fixed speed/link"
 	---help---
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index bcd1efb..8885650 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -11,4 +11,5 @@
 obj-$(CONFIG_SMSC_PHY)		+= smsc.o
 obj-$(CONFIG_VITESSE_PHY)	+= vitesse.o
 obj-$(CONFIG_BROADCOM_PHY)	+= broadcom.o
+obj-$(CONFIG_ICPLUS_PHY)	+= icplus.o
 obj-$(CONFIG_FIXED_PHY)		+= fixed.o
diff --git a/drivers/net/phy/icplus.c b/drivers/net/phy/icplus.c
new file mode 100644
index 0000000..af3f1f2
--- /dev/null
+++ b/drivers/net/phy/icplus.c
@@ -0,0 +1,134 @@
+/*
+ * Driver for ICPlus PHYs
+ *
+ * Copyright (c) 2007 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/unistd.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/phy.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+
+MODULE_DESCRIPTION("ICPlus IP175C PHY driver");
+MODULE_AUTHOR("Michael Barkowski");
+MODULE_LICENSE("GPL");
+
+static int ip175c_config_init(struct phy_device *phydev)
+{
+	int err, i;
+	static int full_reset_performed = 0;
+
+	if (full_reset_performed == 0) {
+
+		/* master reset */
+		err = phydev->bus->write(phydev->bus, 30, 0, 0x175c);
+		if (err < 0)
+			return err;
+
+		/* ensure no bus delays overlap reset period */
+		err = phydev->bus->read(phydev->bus, 30, 0);
+
+		/* data sheet specifies reset period is 2 msec */
+		mdelay(2);
+
+		/* enable IP175C mode */
+		err = phydev->bus->write(phydev->bus, 29, 31, 0x175c);
+		if (err < 0)
+			return err;
+
+		/* Set MII0 speed and duplex (in PHY mode) */
+		err = phydev->bus->write(phydev->bus, 29, 22, 0x420);
+		if (err < 0)
+			return err;
+
+		/* reset switch ports */
+		for (i = 0; i < 5; i++) {
+			err = phydev->bus->write(phydev->bus, i,
+						 MII_BMCR, BMCR_RESET);
+			if (err < 0)
+				return err;
+		}
+
+		for (i = 0; i < 5; i++)
+			err = phydev->bus->read(phydev->bus, i, MII_BMCR);
+
+		mdelay(2);
+
+		full_reset_performed = 1;
+	}
+
+	if (phydev->addr != 4) {
+		phydev->state = PHY_RUNNING;
+		phydev->speed = SPEED_100;
+		phydev->duplex = DUPLEX_FULL;
+		phydev->link = 1;
+		netif_carrier_on(phydev->attached_dev);
+	}
+
+	return 0;
+}
+
+static int ip175c_read_status(struct phy_device *phydev)
+{
+	if (phydev->addr == 4) /* WAN port */
+		genphy_read_status(phydev);
+	else
+		/* Don't need to read status for switch ports */
+		phydev->irq = PHY_IGNORE_INTERRUPT;
+
+	return 0;
+}
+
+static int ip175c_config_aneg(struct phy_device *phydev)
+{
+	if (phydev->addr == 4) /* WAN port */
+		genphy_config_aneg(phydev);
+
+	return 0;
+}
+
+static struct phy_driver ip175c_driver = {
+	.phy_id		= 0x02430d80,
+	.name		= "ICPlus IP175C",
+	.phy_id_mask	= 0x0ffffff0,
+	.features	= PHY_BASIC_FEATURES,
+	.config_init	= &ip175c_config_init,
+	.config_aneg	= &ip175c_config_aneg,
+	.read_status	= &ip175c_read_status,
+	.driver		= { .owner = THIS_MODULE,},
+};
+
+static int __init ip175c_init(void)
+{
+	return phy_driver_register(&ip175c_driver);
+}
+
+static void __exit ip175c_exit(void)
+{
+	phy_driver_unregister(&ip175c_driver);
+}
+
+module_init(ip175c_init);
+module_exit(ip175c_exit);
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index b87f8d2..fbe1104 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -60,6 +60,7 @@
 #define MII_M1111_PHY_EXT_SR		0x1b
 #define MII_M1111_HWCFG_MODE_MASK	0xf
 #define MII_M1111_HWCFG_MODE_RGMII	0xb
+#define MII_M1111_HWCFG_MODE_SGMII_NO_CLK	0x4
 
 MODULE_DESCRIPTION("Marvell PHY driver");
 MODULE_AUTHOR("Andy Fleming");
@@ -169,6 +170,21 @@
 			return err;
 	}
 
+	if (phydev->interface == PHY_INTERFACE_MODE_SGMII) {
+		int temp;
+
+		temp = phy_read(phydev, MII_M1111_PHY_EXT_SR);
+		if (temp < 0)
+			return temp;
+
+		temp &= ~(MII_M1111_HWCFG_MODE_MASK);
+		temp |= MII_M1111_HWCFG_MODE_SGMII_NO_CLK;
+
+		err = phy_write(phydev, MII_M1111_PHY_EXT_SR, temp);
+		if (err < 0)
+			return err;
+	}
+
 	err = phy_write(phydev, MII_BMCR, BMCR_RESET);
 	if (err < 0)
 		return err;
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c
index 585be04..8be8be4 100755
--- a/drivers/net/qla3xxx.c
+++ b/drivers/net/qla3xxx.c
@@ -2433,37 +2433,22 @@
 	return -1;
 }
 
-static void ql_hw_csum_setup(struct sk_buff *skb,
+static void ql_hw_csum_setup(const struct sk_buff *skb,
 			     struct ob_mac_iocb_req *mac_iocb_ptr)
 {
-	struct ethhdr *eth;
-	struct iphdr *ip = NULL;
-	u8 offset = ETH_HLEN;
+	const struct iphdr *ip = ip_hdr(skb);
 
-	eth = (struct ethhdr *)(skb->data);
+	mac_iocb_ptr->ip_hdr_off = skb_network_offset(skb);
+	mac_iocb_ptr->ip_hdr_len = ip->ihl;
 
-	if (eth->h_proto == __constant_htons(ETH_P_IP)) {
-		ip = (struct iphdr *)&skb->data[ETH_HLEN];
-	} else if (eth->h_proto == htons(ETH_P_8021Q) &&
-		   ((struct vlan_ethhdr *)skb->data)->
-		   h_vlan_encapsulated_proto == __constant_htons(ETH_P_IP)) {
-		ip = (struct iphdr *)&skb->data[VLAN_ETH_HLEN];
-		offset = VLAN_ETH_HLEN;
+	if (ip->protocol == IPPROTO_TCP) {
+		mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_TC |
+			OB_3032MAC_IOCB_REQ_IC;
+	} else {
+		mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_UC |
+			OB_3032MAC_IOCB_REQ_IC;
 	}
 
-	if (ip) {
-		if (ip->protocol == IPPROTO_TCP) {
-			mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_TC | 
-			OB_3032MAC_IOCB_REQ_IC;
-			mac_iocb_ptr->ip_hdr_off = offset;
-			mac_iocb_ptr->ip_hdr_len = ip->ihl;
-		} else if (ip->protocol == IPPROTO_UDP) {
-			mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_UC | 
-			OB_3032MAC_IOCB_REQ_IC;
-			mac_iocb_ptr->ip_hdr_off = offset;
-			mac_iocb_ptr->ip_hdr_len = ip->ihl;
-		}
-	}
 }
 
 /*
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 5ec7752c..982a901 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -1,53 +1,11 @@
 /*
-=========================================================================
- r8169.c: A RealTek RTL-8169 Gigabit Ethernet driver for Linux kernel 2.4.x.
- --------------------------------------------------------------------
-
- History:
- Feb  4 2002	- created initially by ShuChen <shuchen@realtek.com.tw>.
- May 20 2002	- Add link status force-mode and TBI mode support.
-	2004	- Massive updates. See kernel SCM system for details.
-=========================================================================
-  1. [DEPRECATED: use ethtool instead] The media can be forced in 5 modes.
-	 Command: 'insmod r8169 media = SET_MEDIA'
-	 Ex:	  'insmod r8169 media = 0x04' will force PHY to operate in 100Mpbs Half-duplex.
-
-	 SET_MEDIA can be:
- 		_10_Half	= 0x01
- 		_10_Full	= 0x02
- 		_100_Half	= 0x04
- 		_100_Full	= 0x08
- 		_1000_Full	= 0x10
-
-  2. Support TBI mode.
-=========================================================================
-VERSION 1.1	<2002/10/4>
-
-	The bit4:0 of MII register 4 is called "selector field", and have to be
-	00001b to indicate support of IEEE std 802.3 during NWay process of
-	exchanging Link Code Word (FLP).
-
-VERSION 1.2	<2002/11/30>
-
-	- Large style cleanup
-	- Use ether_crc in stock kernel (linux/crc32.h)
-	- Copy mc_filter setup code from 8139cp
-	  (includes an optimization, and avoids set_bit use)
-
-VERSION 1.6LK	<2004/04/14>
-
-	- Merge of Realtek's version 1.6
-	- Conversion to DMA API
-	- Suspend/resume
-	- Endianness
-	- Misc Rx/Tx bugs
-
-VERSION 2.2LK	<2005/01/25>
-
-	- RX csum, TX csum/SG, TSO
-	- VLAN
-	- baby (< 7200) Jumbo frames support
-	- Merge of Realtek's version 2.2 (new phy)
+ * r8169.c: RealTek 8169/8168/8101 ethernet driver.
+ *
+ * Copyright (c) 2002 ShuChen <shuchen@realtek.com.tw>
+ * Copyright (c) 2003 - 2007 Francois Romieu <romieu@fr.zoreil.com>
+ * Copyright (c) a lot of people too. Please respect their work.
+ *
+ * See MAINTAINERS file for support contact information.
  */
 
 #include <linux/module.h>
@@ -108,11 +66,6 @@
 #define rtl8169_rx_quota(count, quota)	count
 #endif
 
-/* media options */
-#define MAX_UNITS 8
-static int media[MAX_UNITS] = { -1, -1, -1, -1, -1, -1, -1, -1 };
-static int num_media = 0;
-
 /* Maximum events (Rx packets, etc.) to handle at each interrupt. */
 static const int max_interrupt_work = 20;
 
@@ -126,7 +79,7 @@
 #define RX_FIFO_THRESH	7	/* 7 means NO threshold, Rx buffer level before first PCI xfer. */
 #define RX_DMA_BURST	6	/* Maximum PCI burst, '6' is 1024 */
 #define TX_DMA_BURST	6	/* Maximum PCI burst, '6' is 1024 */
-#define EarlyTxThld 	0x3F	/* 0x3F means NO early transmit */
+#define EarlyTxThld	0x3F	/* 0x3F means NO early transmit */
 #define RxPacketMaxSize	0x3FE8	/* 16K - 1 - ETH_HLEN - VLAN - CRC... */
 #define SafeMtu		0x1c20	/* ... actually life sucks beyond ~7k */
 #define InterFrameGap	0x03	/* 3 means InterFrameGap = the shortest one */
@@ -151,16 +104,17 @@
 #define RTL_R32(reg)		((unsigned long) readl (ioaddr + (reg)))
 
 enum mac_version {
-	RTL_GIGA_MAC_VER_01 = 0x00,
-	RTL_GIGA_MAC_VER_02 = 0x01,
-	RTL_GIGA_MAC_VER_03 = 0x02,
-	RTL_GIGA_MAC_VER_04 = 0x03,
-	RTL_GIGA_MAC_VER_05 = 0x04,
-	RTL_GIGA_MAC_VER_11 = 0x0b,
-	RTL_GIGA_MAC_VER_12 = 0x0c,
-	RTL_GIGA_MAC_VER_13 = 0x0d,
-	RTL_GIGA_MAC_VER_14 = 0x0e,
-	RTL_GIGA_MAC_VER_15 = 0x0f
+	RTL_GIGA_MAC_VER_01 = 0x01, // 8169
+	RTL_GIGA_MAC_VER_02 = 0x02, // 8169S
+	RTL_GIGA_MAC_VER_03 = 0x03, // 8110S
+	RTL_GIGA_MAC_VER_04 = 0x04, // 8169SB
+	RTL_GIGA_MAC_VER_05 = 0x05, // 8110SCd
+	RTL_GIGA_MAC_VER_06 = 0x06, // 8110SCe
+	RTL_GIGA_MAC_VER_11 = 0x0b, // 8168Bb
+	RTL_GIGA_MAC_VER_12 = 0x0c, // 8168Be 8168Bf
+	RTL_GIGA_MAC_VER_13 = 0x0d, // 8101Eb 8101Ec
+	RTL_GIGA_MAC_VER_14 = 0x0e, // 8101
+	RTL_GIGA_MAC_VER_15 = 0x0f  // 8101
 };
 
 enum phy_version {
@@ -180,11 +134,12 @@
 	u8 mac_version;
 	u32 RxConfigMask;	/* Clears the bits supported by this chip */
 } rtl_chip_info[] = {
-	_R("RTL8169",		RTL_GIGA_MAC_VER_01, 0xff7e1880),
-	_R("RTL8169s/8110s",	RTL_GIGA_MAC_VER_02, 0xff7e1880),
-	_R("RTL8169s/8110s",	RTL_GIGA_MAC_VER_03, 0xff7e1880),
-	_R("RTL8169sb/8110sb",	RTL_GIGA_MAC_VER_04, 0xff7e1880),
-	_R("RTL8169sc/8110sc",	RTL_GIGA_MAC_VER_05, 0xff7e1880),
+	_R("RTL8169",		RTL_GIGA_MAC_VER_01, 0xff7e1880), // 8169
+	_R("RTL8169s",		RTL_GIGA_MAC_VER_02, 0xff7e1880), // 8169S
+	_R("RTL8110s",		RTL_GIGA_MAC_VER_03, 0xff7e1880), // 8110S
+	_R("RTL8169sb/8110sb",	RTL_GIGA_MAC_VER_04, 0xff7e1880), // 8169SB
+	_R("RTL8169sc/8110sc",	RTL_GIGA_MAC_VER_05, 0xff7e1880), // 8110SCd
+	_R("RTL8169sc/8110sc",	RTL_GIGA_MAC_VER_06, 0xff7e1880), // 8110SCe
 	_R("RTL8168b/8111b",	RTL_GIGA_MAC_VER_11, 0xff7e1880), // PCI-E
 	_R("RTL8168b/8111b",	RTL_GIGA_MAC_VER_12, 0xff7e1880), // PCI-E
 	_R("RTL8101e",		RTL_GIGA_MAC_VER_13, 0xff7e1880), // PCI-E 8139
@@ -199,20 +154,15 @@
 	RTL_CFG_2
 };
 
-static const struct {
-	unsigned int region;
-	unsigned int align;
-} rtl_cfg_info[] = {
-	[RTL_CFG_0] = { 1, NET_IP_ALIGN },
-	[RTL_CFG_1] = { 2, NET_IP_ALIGN },
-	[RTL_CFG_2] = { 2, 8 }
-};
+static void rtl_hw_start_8169(struct net_device *);
+static void rtl_hw_start_8168(struct net_device *);
+static void rtl_hw_start_8101(struct net_device *);
 
 static struct pci_device_id rtl8169_pci_tbl[] = {
 	{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK,	0x8129), 0, 0, RTL_CFG_0 },
 	{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK,	0x8136), 0, 0, RTL_CFG_2 },
 	{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK,	0x8167), 0, 0, RTL_CFG_0 },
-	{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK,	0x8168), 0, 0, RTL_CFG_2 },
+	{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK,	0x8168), 0, 0, RTL_CFG_1 },
 	{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK,	0x8169), 0, 0, RTL_CFG_0 },
 	{ PCI_DEVICE(PCI_VENDOR_ID_DLINK,	0x4300), 0, 0, RTL_CFG_0 },
 	{ PCI_DEVICE(0x1259,			0xc107), 0, 0, RTL_CFG_0 },
@@ -230,62 +180,63 @@
 	u32 msg_enable;
 } debug = { -1 };
 
-enum RTL8169_registers {
-	MAC0 = 0,		/* Ethernet hardware address. */
-	MAR0 = 8,		/* Multicast filter. */
-	CounterAddrLow = 0x10,
-	CounterAddrHigh = 0x14,
-	TxDescStartAddrLow = 0x20,
-	TxDescStartAddrHigh = 0x24,
-	TxHDescStartAddrLow = 0x28,
-	TxHDescStartAddrHigh = 0x2c,
-	FLASH = 0x30,
-	ERSR = 0x36,
-	ChipCmd = 0x37,
-	TxPoll = 0x38,
-	IntrMask = 0x3C,
-	IntrStatus = 0x3E,
-	TxConfig = 0x40,
-	RxConfig = 0x44,
-	RxMissed = 0x4C,
-	Cfg9346 = 0x50,
-	Config0 = 0x51,
-	Config1 = 0x52,
-	Config2 = 0x53,
-	Config3 = 0x54,
-	Config4 = 0x55,
-	Config5 = 0x56,
-	MultiIntr = 0x5C,
-	PHYAR = 0x60,
-	TBICSR = 0x64,
-	TBI_ANAR = 0x68,
-	TBI_LPAR = 0x6A,
-	PHYstatus = 0x6C,
-	RxMaxSize = 0xDA,
-	CPlusCmd = 0xE0,
-	IntrMitigate = 0xE2,
-	RxDescAddrLow = 0xE4,
-	RxDescAddrHigh = 0xE8,
-	EarlyTxThres = 0xEC,
-	FuncEvent = 0xF0,
-	FuncEventMask = 0xF4,
-	FuncPresetState = 0xF8,
-	FuncForceEvent = 0xFC,
+enum rtl_registers {
+	MAC0		= 0,	/* Ethernet hardware address. */
+	MAC4		= 4,
+	MAR0		= 8,	/* Multicast filter. */
+	CounterAddrLow		= 0x10,
+	CounterAddrHigh		= 0x14,
+	TxDescStartAddrLow	= 0x20,
+	TxDescStartAddrHigh	= 0x24,
+	TxHDescStartAddrLow	= 0x28,
+	TxHDescStartAddrHigh	= 0x2c,
+	FLASH		= 0x30,
+	ERSR		= 0x36,
+	ChipCmd		= 0x37,
+	TxPoll		= 0x38,
+	IntrMask	= 0x3c,
+	IntrStatus	= 0x3e,
+	TxConfig	= 0x40,
+	RxConfig	= 0x44,
+	RxMissed	= 0x4c,
+	Cfg9346		= 0x50,
+	Config0		= 0x51,
+	Config1		= 0x52,
+	Config2		= 0x53,
+	Config3		= 0x54,
+	Config4		= 0x55,
+	Config5		= 0x56,
+	MultiIntr	= 0x5c,
+	PHYAR		= 0x60,
+	TBICSR		= 0x64,
+	TBI_ANAR	= 0x68,
+	TBI_LPAR	= 0x6a,
+	PHYstatus	= 0x6c,
+	RxMaxSize	= 0xda,
+	CPlusCmd	= 0xe0,
+	IntrMitigate	= 0xe2,
+	RxDescAddrLow	= 0xe4,
+	RxDescAddrHigh	= 0xe8,
+	EarlyTxThres	= 0xec,
+	FuncEvent	= 0xf0,
+	FuncEventMask	= 0xf4,
+	FuncPresetState	= 0xf8,
+	FuncForceEvent	= 0xfc,
 };
 
-enum RTL8169_register_content {
+enum rtl_register_content {
 	/* InterruptStatusBits */
-	SYSErr = 0x8000,
-	PCSTimeout = 0x4000,
-	SWInt = 0x0100,
-	TxDescUnavail = 0x80,
-	RxFIFOOver = 0x40,
-	LinkChg = 0x20,
-	RxOverflow = 0x10,
-	TxErr = 0x08,
-	TxOK = 0x04,
-	RxErr = 0x02,
-	RxOK = 0x01,
+	SYSErr		= 0x8000,
+	PCSTimeout	= 0x4000,
+	SWInt		= 0x0100,
+	TxDescUnavail	= 0x0080,
+	RxFIFOOver	= 0x0040,
+	LinkChg		= 0x0020,
+	RxOverflow	= 0x0010,
+	TxErr		= 0x0008,
+	TxOK		= 0x0004,
+	RxErr		= 0x0002,
+	RxOK		= 0x0001,
 
 	/* RxStatusDesc */
 	RxFOVF	= (1 << 23),
@@ -295,26 +246,31 @@
 	RxCRC	= (1 << 19),
 
 	/* ChipCmdBits */
-	CmdReset = 0x10,
-	CmdRxEnb = 0x08,
-	CmdTxEnb = 0x04,
-	RxBufEmpty = 0x01,
+	CmdReset	= 0x10,
+	CmdRxEnb	= 0x08,
+	CmdTxEnb	= 0x04,
+	RxBufEmpty	= 0x01,
+
+	/* TXPoll register p.5 */
+	HPQ		= 0x80,		/* Poll cmd on the high prio queue */
+	NPQ		= 0x40,		/* Poll cmd on the low prio queue */
+	FSWInt		= 0x01,		/* Forced software interrupt */
 
 	/* Cfg9346Bits */
-	Cfg9346_Lock = 0x00,
-	Cfg9346_Unlock = 0xC0,
+	Cfg9346_Lock	= 0x00,
+	Cfg9346_Unlock	= 0xc0,
 
 	/* rx_mode_bits */
-	AcceptErr = 0x20,
-	AcceptRunt = 0x10,
-	AcceptBroadcast = 0x08,
-	AcceptMulticast = 0x04,
-	AcceptMyPhys = 0x02,
-	AcceptAllPhys = 0x01,
+	AcceptErr	= 0x20,
+	AcceptRunt	= 0x10,
+	AcceptBroadcast	= 0x08,
+	AcceptMulticast	= 0x04,
+	AcceptMyPhys	= 0x02,
+	AcceptAllPhys	= 0x01,
 
 	/* RxConfigBits */
-	RxCfgFIFOShift = 13,
-	RxCfgDMAShift = 8,
+	RxCfgFIFOShift	= 13,
+	RxCfgDMAShift	=  8,
 
 	/* TxConfigBits */
 	TxInterFrameGapShift = 24,
@@ -323,6 +279,10 @@
 	/* Config1 register p.24 */
 	PMEnable	= (1 << 0),	/* Power Management Enable */
 
+	/* Config2 register p. 25 */
+	PCI_Clock_66MHz = 0x01,
+	PCI_Clock_33MHz = 0x00,
+
 	/* Config3 register p.25 */
 	MagicPacket	= (1 << 5),	/* Wake up when receives a Magic Packet */
 	LinkUp		= (1 << 4),	/* Wake up when the cable connection is re-established */
@@ -343,36 +303,34 @@
 	TBINwComplete	= 0x01000000,
 
 	/* CPlusCmd p.31 */
+	PktCntrDisable	= (1 << 7),	// 8168
 	RxVlan		= (1 << 6),
 	RxChkSum	= (1 << 5),
 	PCIDAC		= (1 << 4),
 	PCIMulRW	= (1 << 3),
+	INTT_0		= 0x0000,	// 8168
+	INTT_1		= 0x0001,	// 8168
+	INTT_2		= 0x0002,	// 8168
+	INTT_3		= 0x0003,	// 8168
 
 	/* rtl8169_PHYstatus */
-	TBI_Enable = 0x80,
-	TxFlowCtrl = 0x40,
-	RxFlowCtrl = 0x20,
-	_1000bpsF = 0x10,
-	_100bps = 0x08,
-	_10bps = 0x04,
-	LinkStatus = 0x02,
-	FullDup = 0x01,
-
-	/* _MediaType */
-	_10_Half = 0x01,
-	_10_Full = 0x02,
-	_100_Half = 0x04,
-	_100_Full = 0x08,
-	_1000_Full = 0x10,
+	TBI_Enable	= 0x80,
+	TxFlowCtrl	= 0x40,
+	RxFlowCtrl	= 0x20,
+	_1000bpsF	= 0x10,
+	_100bps		= 0x08,
+	_10bps		= 0x04,
+	LinkStatus	= 0x02,
+	FullDup		= 0x01,
 
 	/* _TBICSRBit */
-	TBILinkOK = 0x02000000,
+	TBILinkOK	= 0x02000000,
 
 	/* DumpCounterCommand */
-	CounterDump = 0x8,
+	CounterDump	= 0x8,
 };
 
-enum _DescStatusBit {
+enum desc_status_bit {
 	DescOwn		= (1 << 31), /* Descriptor is owned by NIC */
 	RingEnd		= (1 << 30), /* End of descriptor ring */
 	FirstFrag	= (1 << 29), /* First segment of a packet */
@@ -405,15 +363,15 @@
 #define RsvdMask	0x3fffc000
 
 struct TxDesc {
-	u32 opts1;
-	u32 opts2;
-	u64 addr;
+	__le32 opts1;
+	__le32 opts2;
+	__le64 addr;
 };
 
 struct RxDesc {
-	u32 opts1;
-	u32 opts2;
-	u64 addr;
+	__le32 opts1;
+	__le32 opts2;
+	__le64 addr;
 };
 
 struct ring_info {
@@ -446,6 +404,8 @@
 	unsigned rx_buf_sz;
 	struct timer_list timer;
 	u16 cp_cmd;
+	u16 intr_event;
+	u16 napi_event;
 	u16 intr_mask;
 	int phy_auto_nego_reg;
 	int phy_1000_ctrl_reg;
@@ -455,6 +415,7 @@
 	int (*set_speed)(struct net_device *, u8 autoneg, u16 speed, u8 duplex);
 	void (*get_settings)(struct net_device *, struct ethtool_cmd *);
 	void (*phy_reset_enable)(void __iomem *);
+	void (*hw_start)(struct net_device *);
 	unsigned int (*phy_reset_pending)(void __iomem *);
 	unsigned int (*link_ok)(void __iomem *);
 	struct delayed_work task;
@@ -463,8 +424,6 @@
 
 MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
 MODULE_DESCRIPTION("RealTek RTL-8169 Gigabit Ethernet driver");
-module_param_array(media, int, &num_media, 0);
-MODULE_PARM_DESC(media, "force phy operation. Deprecated by ethtool (8).");
 module_param(rx_copybreak, int, 0);
 MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
 module_param(use_dac, int, 0);
@@ -478,9 +437,9 @@
 static int rtl8169_start_xmit(struct sk_buff *skb, struct net_device *dev);
 static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance);
 static int rtl8169_init_ring(struct net_device *dev);
-static void rtl8169_hw_start(struct net_device *dev);
+static void rtl_hw_start(struct net_device *dev);
 static int rtl8169_close(struct net_device *dev);
-static void rtl8169_set_rx_mode(struct net_device *dev);
+static void rtl_set_rx_mode(struct net_device *dev);
 static void rtl8169_tx_timeout(struct net_device *dev);
 static struct net_device_stats *rtl8169_get_stats(struct net_device *dev);
 static int rtl8169_rx_interrupt(struct net_device *, struct rtl8169_private *,
@@ -493,35 +452,37 @@
 static int rtl8169_poll(struct net_device *dev, int *budget);
 #endif
 
-static const u16 rtl8169_intr_mask =
-	SYSErr | LinkChg | RxOverflow | RxFIFOOver | TxErr | TxOK | RxErr | RxOK;
-static const u16 rtl8169_napi_event =
-	RxOK | RxOverflow | RxFIFOOver | TxOK | TxErr;
 static const unsigned int rtl8169_rx_config =
 	(RX_FIFO_THRESH << RxCfgFIFOShift) | (RX_DMA_BURST << RxCfgDMAShift);
 
-static void mdio_write(void __iomem *ioaddr, int RegAddr, int value)
+static void mdio_write(void __iomem *ioaddr, int reg_addr, int value)
 {
 	int i;
 
-	RTL_W32(PHYAR, 0x80000000 | (RegAddr & 0xFF) << 16 | value);
+	RTL_W32(PHYAR, 0x80000000 | (reg_addr & 0xFF) << 16 | value);
 
 	for (i = 20; i > 0; i--) {
-		/* Check if the RTL8169 has completed writing to the specified MII register */
+		/*
+		 * Check if the RTL8169 has completed writing to the specified
+		 * MII register.
+		 */
 		if (!(RTL_R32(PHYAR) & 0x80000000))
 			break;
 		udelay(25);
 	}
 }
 
-static int mdio_read(void __iomem *ioaddr, int RegAddr)
+static int mdio_read(void __iomem *ioaddr, int reg_addr)
 {
 	int i, value = -1;
 
-	RTL_W32(PHYAR, 0x0 | (RegAddr & 0xFF) << 16);
+	RTL_W32(PHYAR, 0x0 | (reg_addr & 0xFF) << 16);
 
 	for (i = 20; i > 0; i--) {
-		/* Check if the RTL8169 has completed retrieving data from the specified MII register */
+		/*
+		 * Check if the RTL8169 has completed retrieving data from
+		 * the specified MII register.
+		 */
 		if (RTL_R32(PHYAR) & 0x80000000) {
 			value = (int) (RTL_R32(PHYAR) & 0xFFFF);
 			break;
@@ -579,7 +540,8 @@
 }
 
 static void rtl8169_check_link_status(struct net_device *dev,
-				      struct rtl8169_private *tp, void __iomem *ioaddr)
+				      struct rtl8169_private *tp,
+				      void __iomem *ioaddr)
 {
 	unsigned long flags;
 
@@ -596,38 +558,6 @@
 	spin_unlock_irqrestore(&tp->lock, flags);
 }
 
-static void rtl8169_link_option(int idx, u8 *autoneg, u16 *speed, u8 *duplex)
-{
-	struct {
-		u16 speed;
-		u8 duplex;
-		u8 autoneg;
-		u8 media;
-	} link_settings[] = {
-		{ SPEED_10,	DUPLEX_HALF, AUTONEG_DISABLE,	_10_Half },
-		{ SPEED_10,	DUPLEX_FULL, AUTONEG_DISABLE,	_10_Full },
-		{ SPEED_100,	DUPLEX_HALF, AUTONEG_DISABLE,	_100_Half },
-		{ SPEED_100,	DUPLEX_FULL, AUTONEG_DISABLE,	_100_Full },
-		{ SPEED_1000,	DUPLEX_FULL, AUTONEG_DISABLE,	_1000_Full },
-		/* Make TBI happy */
-		{ SPEED_1000,	DUPLEX_FULL, AUTONEG_ENABLE,	0xff }
-	}, *p;
-	unsigned char option;
-
-	option = ((idx < MAX_UNITS) && (idx >= 0)) ? media[idx] : 0xff;
-
-	if ((option != 0xff) && !idx && netif_msg_drv(&debug))
-		printk(KERN_WARNING PFX "media option is deprecated.\n");
-
-	for (p = link_settings; p->media != 0xff; p++) {
-		if (p->media == option)
-			break;
-	}
-	*autoneg = p->autoneg;
-	*speed = p->speed;
-	*duplex = p->duplex;
-}
-
 static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
 {
 	struct rtl8169_private *tp = netdev_priv(dev);
@@ -667,7 +597,7 @@
 {
 	struct rtl8169_private *tp = netdev_priv(dev);
 	void __iomem *ioaddr = tp->mmio_addr;
-	int i;
+	unsigned int i;
 	static struct {
 		u32 opt;
 		u16 reg;
@@ -893,8 +823,7 @@
 	int ret;
 
 	if (tp->vlgrp && (opts2 & RxVlanTag)) {
-		rtl8169_rx_hwaccel_skb(skb, tp->vlgrp,
-				       swab16(opts2 & 0xffff));
+		rtl8169_rx_hwaccel_skb(skb, tp->vlgrp, swab16(opts2 & 0xffff));
 		ret = 0;
 	} else
 		ret = -1;
@@ -1115,7 +1044,6 @@
 	}
 }
 
-
 static const struct ethtool_ops rtl8169_ethtool_ops = {
 	.get_drvinfo		= rtl8169_get_drvinfo,
 	.get_regs_len		= rtl8169_get_regs_len,
@@ -1141,8 +1069,8 @@
 	.get_perm_addr		= ethtool_op_get_perm_addr,
 };
 
-static void rtl8169_write_gmii_reg_bit(void __iomem *ioaddr, int reg, int bitnum,
-				       int bitval)
+static void rtl8169_write_gmii_reg_bit(void __iomem *ioaddr, int reg,
+				       int bitnum, int bitval)
 {
 	int val;
 
@@ -1152,8 +1080,20 @@
 	mdio_write(ioaddr, reg, val & 0xffff);
 }
 
-static void rtl8169_get_mac_version(struct rtl8169_private *tp, void __iomem *ioaddr)
+static void rtl8169_get_mac_version(struct rtl8169_private *tp,
+				    void __iomem *ioaddr)
 {
+	/*
+	 * The driver currently handles the 8168Bf and the 8168Be identically
+	 * but they can be identified more specifically through the test below
+	 * if needed:
+	 *
+	 * (RTL_R32(TxConfig) & 0x700000) == 0x500000 ? 8168Bf : 8168Be
+	 *
+	 * Same thing for the 8101Eb and the 8101Ec:
+	 *
+	 * (RTL_R32(TxConfig) & 0x700000) == 0x200000 ? 8101Eb : 8101Ec
+	 */
 	const struct {
 		u32 mask;
 		int mac_version;
@@ -1163,6 +1103,7 @@
 		{ 0x34000000,	RTL_GIGA_MAC_VER_13 },
 		{ 0x30800000,	RTL_GIGA_MAC_VER_14 },
 		{ 0x30000000,	RTL_GIGA_MAC_VER_11 },
+		{ 0x98000000,	RTL_GIGA_MAC_VER_06 },
 		{ 0x18000000,	RTL_GIGA_MAC_VER_05 },
 		{ 0x10000000,	RTL_GIGA_MAC_VER_04 },
 		{ 0x04000000,	RTL_GIGA_MAC_VER_03 },
@@ -1171,7 +1112,7 @@
 	}, *p = mac_info;
 	u32 reg;
 
-	reg = RTL_R32(TxConfig) & 0x7c800000;
+	reg = RTL_R32(TxConfig) & 0xfc800000;
 	while ((reg & p->mask) != p->mask)
 		p++;
 	tp->mac_version = p->mac_version;
@@ -1182,7 +1123,8 @@
 	dprintk("mac_version = 0x%02x\n", tp->mac_version);
 }
 
-static void rtl8169_get_phy_version(struct rtl8169_private *tp, void __iomem *ioaddr)
+static void rtl8169_get_phy_version(struct rtl8169_private *tp,
+				    void __iomem *ioaddr)
 {
 	const struct {
 		u16 mask;
@@ -1259,7 +1201,7 @@
 		  0xbf00 }	//w 0 15 0 bf00
 		}
 	}, *p = phy_magic;
-	int i;
+	unsigned int i;
 
 	rtl8169_print_mac_version(tp);
 	rtl8169_print_phy_version(tp);
@@ -1393,7 +1335,7 @@
 			      struct rtl8169_private *tp)
 {
 	void __iomem *ioaddr = tp->mmio_addr;
-	int i;
+	unsigned int i;
 
 	tp->phy_reset_enable(ioaddr);
 	for (i = 0; i < 100; i++) {
@@ -1408,21 +1350,16 @@
 static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
 {
 	void __iomem *ioaddr = tp->mmio_addr;
-	static int board_idx = -1;
-	u8 autoneg, duplex;
-	u16 speed;
-
-	board_idx++;
 
 	rtl8169_hw_phy_config(dev);
 
 	dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
 	RTL_W8(0x82, 0x01);
 
-	if (tp->mac_version < RTL_GIGA_MAC_VER_03) {
-		dprintk("Set PCI Latency=0x40\n");
-		pci_write_config_byte(tp->pci_dev, PCI_LATENCY_TIMER, 0x40);
-	}
+	pci_write_config_byte(tp->pci_dev, PCI_LATENCY_TIMER, 0x40);
+
+	if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
+		pci_write_config_byte(tp->pci_dev, PCI_CACHE_LINE_SIZE, 0x08);
 
 	if (tp->mac_version == RTL_GIGA_MAC_VER_02) {
 		dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
@@ -1431,16 +1368,52 @@
 		mdio_write(ioaddr, 0x0b, 0x0000); //w 0x0b 15 0 0
 	}
 
-	rtl8169_link_option(board_idx, &autoneg, &speed, &duplex);
-
 	rtl8169_phy_reset(dev, tp);
 
-	rtl8169_set_speed(dev, autoneg, speed, duplex);
+	/*
+	 * rtl8169_set_speed_xmii takes good care of the Fast Ethernet
+	 * only 8101. Don't panic.
+	 */
+	rtl8169_set_speed(dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL);
 
 	if ((RTL_R8(PHYstatus) & TBI_Enable) && netif_msg_link(tp))
 		printk(KERN_INFO PFX "%s: TBI auto-negotiating\n", dev->name);
 }
 
+static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
+{
+	void __iomem *ioaddr = tp->mmio_addr;
+	u32 high;
+	u32 low;
+
+	low  = addr[0] | (addr[1] << 8) | (addr[2] << 16) | (addr[3] << 24);
+	high = addr[4] | (addr[5] << 8);
+
+	spin_lock_irq(&tp->lock);
+
+	RTL_W8(Cfg9346, Cfg9346_Unlock);
+	RTL_W32(MAC0, low);
+	RTL_W32(MAC4, high);
+	RTL_W8(Cfg9346, Cfg9346_Lock);
+
+	spin_unlock_irq(&tp->lock);
+}
+
+static int rtl_set_mac_address(struct net_device *dev, void *p)
+{
+	struct rtl8169_private *tp = netdev_priv(dev);
+	struct sockaddr *addr = p;
+
+	if (!is_valid_ether_addr(addr->sa_data))
+		return -EADDRNOTAVAIL;
+
+	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+
+	rtl_rar_set(tp, dev->dev_addr);
+
+	return 0;
+}
+
 static int rtl8169_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 {
 	struct rtl8169_private *tp = netdev_priv(dev);
@@ -1467,15 +1440,49 @@
 	return -EOPNOTSUPP;
 }
 
+static const struct rtl_cfg_info {
+	void (*hw_start)(struct net_device *);
+	unsigned int region;
+	unsigned int align;
+	u16 intr_event;
+	u16 napi_event;
+} rtl_cfg_infos [] = {
+	[RTL_CFG_0] = {
+		.hw_start	= rtl_hw_start_8169,
+		.region		= 1,
+		.align		= 0,
+		.intr_event	= SYSErr | LinkChg | RxOverflow |
+				  RxFIFOOver | TxErr | TxOK | RxOK | RxErr,
+		.napi_event	= RxFIFOOver | TxErr | TxOK | RxOK | RxOverflow
+	},
+	[RTL_CFG_1] = {
+		.hw_start	= rtl_hw_start_8168,
+		.region		= 2,
+		.align		= 8,
+		.intr_event	= SYSErr | LinkChg | RxOverflow |
+				  TxErr | TxOK | RxOK | RxErr,
+		.napi_event	= TxErr | TxOK | RxOK | RxOverflow
+	},
+	[RTL_CFG_2] = {
+		.hw_start	= rtl_hw_start_8101,
+		.region		= 2,
+		.align		= 8,
+		.intr_event	= SYSErr | LinkChg | RxOverflow | PCSTimeout |
+				  RxFIFOOver | TxErr | TxOK | RxOK | RxErr,
+		.napi_event	= RxFIFOOver | TxErr | TxOK | RxOK | RxOverflow
+	}
+};
+
 static int __devinit
 rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
-	const unsigned int region = rtl_cfg_info[ent->driver_data].region;
+	const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data;
+	const unsigned int region = cfg->region;
 	struct rtl8169_private *tp;
 	struct net_device *dev;
 	void __iomem *ioaddr;
-	unsigned int pm_cap;
-	int i, rc;
+	unsigned int i;
+	int rc;
 
 	if (netif_msg_drv(&debug)) {
 		printk(KERN_INFO "%s Gigabit Ethernet driver %s loaded\n",
@@ -1508,20 +1515,6 @@
 	if (rc < 0)
 		goto err_out_disable_2;
 
-	/* save power state before pci_enable_device overwrites it */
-	pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
-	if (pm_cap) {
-		u16 pwr_command, acpi_idle_state;
-
-		pci_read_config_word(pdev, pm_cap + PCI_PM_CTRL, &pwr_command);
-		acpi_idle_state = pwr_command & PCI_PM_CTRL_STATE_MASK;
-	} else {
-		if (netif_msg_probe(tp)) {
-			dev_err(&pdev->dev,
-				"PowerManagement capability not found.\n");
-		}
-	}
-
 	/* make sure PCI base addr 1 is MMIO */
 	if (!(pci_resource_flags(pdev, region) & IORESOURCE_MEM)) {
 		if (netif_msg_probe(tp)) {
@@ -1585,7 +1578,7 @@
 	RTL_W8(ChipCmd, CmdReset);
 
 	/* Check that the chip has finished the reset. */
-	for (i = 100; i > 0; i--) {
+	for (i = 0; i < 100; i++) {
 		if ((RTL_R8(ChipCmd) & CmdReset) == 0)
 			break;
 		msleep_interruptible(1);
@@ -1647,11 +1640,12 @@
 	SET_ETHTOOL_OPS(dev, &rtl8169_ethtool_ops);
 	dev->stop = rtl8169_close;
 	dev->tx_timeout = rtl8169_tx_timeout;
-	dev->set_multicast_list = rtl8169_set_rx_mode;
+	dev->set_multicast_list = rtl_set_rx_mode;
 	dev->watchdog_timeo = RTL8169_TX_TIMEOUT;
 	dev->irq = pdev->irq;
 	dev->base_addr = (unsigned long) ioaddr;
 	dev->change_mtu = rtl8169_change_mtu;
+	dev->set_mac_address = rtl_set_mac_address;
 
 #ifdef CONFIG_R8169_NAPI
 	dev->poll = rtl8169_poll;
@@ -1670,7 +1664,10 @@
 	tp->intr_mask = 0xffff;
 	tp->pci_dev = pdev;
 	tp->mmio_addr = ioaddr;
-	tp->align = rtl_cfg_info[ent->driver_data].align;
+	tp->align = cfg->align;
+	tp->hw_start = cfg->hw_start;
+	tp->intr_event = cfg->intr_event;
+	tp->napi_event = cfg->napi_event;
 
 	init_timer(&tp->timer);
 	tp->timer.data = (unsigned long) dev;
@@ -1685,15 +1682,17 @@
 	pci_set_drvdata(pdev, dev);
 
 	if (netif_msg_probe(tp)) {
+		u32 xid = RTL_R32(TxConfig) & 0x7cf0f8ff;
+
 		printk(KERN_INFO "%s: %s at 0x%lx, "
 		       "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x, "
-		       "IRQ %d\n",
+		       "XID %08x IRQ %d\n",
 		       dev->name,
 		       rtl_chip_info[tp->chipset].name,
 		       dev->base_addr,
 		       dev->dev_addr[0], dev->dev_addr[1],
 		       dev->dev_addr[2], dev->dev_addr[3],
-		       dev->dev_addr[4], dev->dev_addr[5], dev->irq);
+		       dev->dev_addr[4], dev->dev_addr[5], xid, dev->irq);
 	}
 
 	rtl8169_init_phy(dev, tp);
@@ -1714,15 +1713,11 @@
 	goto out;
 }
 
-static void __devexit
-rtl8169_remove_one(struct pci_dev *pdev)
+static void __devexit rtl8169_remove_one(struct pci_dev *pdev)
 {
 	struct net_device *dev = pci_get_drvdata(pdev);
 	struct rtl8169_private *tp = netdev_priv(dev);
 
-	assert(dev != NULL);
-	assert(tp != NULL);
-
 	flush_scheduled_work();
 
 	unregister_netdev(dev);
@@ -1774,7 +1769,7 @@
 	if (retval < 0)
 		goto err_release_ring_2;
 
-	rtl8169_hw_start(dev);
+	rtl_hw_start(dev);
 
 	rtl8169_request_timer(dev);
 
@@ -1805,7 +1800,7 @@
 	RTL_R8(ChipCmd);
 }
 
-static void rtl8169_set_rx_tx_config_registers(struct rtl8169_private *tp)
+static void rtl_set_rx_tx_config_registers(struct rtl8169_private *tp)
 {
 	void __iomem *ioaddr = tp->mmio_addr;
 	u32 cfg = rtl8169_rx_config;
@@ -1818,47 +1813,92 @@
 		(InterFrameGap << TxInterFrameGapShift));
 }
 
-static void rtl8169_hw_start(struct net_device *dev)
+static void rtl_hw_start(struct net_device *dev)
 {
 	struct rtl8169_private *tp = netdev_priv(dev);
 	void __iomem *ioaddr = tp->mmio_addr;
-	struct pci_dev *pdev = tp->pci_dev;
-	u16 cmd;
-	u32 i;
+	unsigned int i;
 
 	/* Soft reset the chip. */
 	RTL_W8(ChipCmd, CmdReset);
 
 	/* Check that the chip has finished the reset. */
-	for (i = 100; i > 0; i--) {
+	for (i = 0; i < 100; i++) {
 		if ((RTL_R8(ChipCmd) & CmdReset) == 0)
 			break;
 		msleep_interruptible(1);
 	}
 
+	tp->hw_start(dev);
+
+	netif_start_queue(dev);
+}
+
+
+static void rtl_set_rx_tx_desc_registers(struct rtl8169_private *tp,
+					 void __iomem *ioaddr)
+{
+	/*
+	 * Magic spell: some iop3xx ARM board needs the TxDescAddrHigh
+	 * register to be written before TxDescAddrLow to work.
+	 * Switching from MMIO to I/O access fixes the issue as well.
+	 */
+	RTL_W32(TxDescStartAddrHigh, ((u64) tp->TxPhyAddr) >> 32);
+	RTL_W32(TxDescStartAddrLow, ((u64) tp->TxPhyAddr) & DMA_32BIT_MASK);
+	RTL_W32(RxDescAddrHigh, ((u64) tp->RxPhyAddr) >> 32);
+	RTL_W32(RxDescAddrLow, ((u64) tp->RxPhyAddr) & DMA_32BIT_MASK);
+}
+
+static u16 rtl_rw_cpluscmd(void __iomem *ioaddr)
+{
+	u16 cmd;
+
+	cmd = RTL_R16(CPlusCmd);
+	RTL_W16(CPlusCmd, cmd);
+	return cmd;
+}
+
+static void rtl_set_rx_max_size(void __iomem *ioaddr)
+{
+	/* Low hurts. Let's disable the filtering. */
+	RTL_W16(RxMaxSize, 16383);
+}
+
+static void rtl8169_set_magic_reg(void __iomem *ioaddr, unsigned mac_version)
+{
+	struct {
+		u32 mac_version;
+		u32 clk;
+		u32 val;
+	} cfg2_info [] = {
+		{ RTL_GIGA_MAC_VER_05, PCI_Clock_33MHz, 0x000fff00 }, // 8110SCd
+		{ RTL_GIGA_MAC_VER_05, PCI_Clock_66MHz, 0x000fffff },
+		{ RTL_GIGA_MAC_VER_06, PCI_Clock_33MHz, 0x00ffff00 }, // 8110SCe
+		{ RTL_GIGA_MAC_VER_06, PCI_Clock_66MHz, 0x00ffffff }
+	}, *p = cfg2_info;
+	unsigned int i;
+	u32 clk;
+
+	clk = RTL_R8(Config2) & PCI_Clock_66MHz;
+	for (i = 0; i < ARRAY_SIZE(cfg2_info); i++) {
+		if ((p->mac_version == mac_version) && (p->clk == clk)) {
+			RTL_W32(0x7c, p->val);
+			break;
+		}
+	}
+}
+
+static void rtl_hw_start_8169(struct net_device *dev)
+{
+	struct rtl8169_private *tp = netdev_priv(dev);
+	void __iomem *ioaddr = tp->mmio_addr;
+	struct pci_dev *pdev = tp->pci_dev;
+
 	if (tp->mac_version == RTL_GIGA_MAC_VER_05) {
 		RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) | PCIMulRW);
 		pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x08);
 	}
 
-	if (tp->mac_version == RTL_GIGA_MAC_VER_13) {
-		pci_write_config_word(pdev, 0x68, 0x00);
-		pci_write_config_word(pdev, 0x69, 0x08);
-	}
-
-	/* Undocumented stuff. */
-	if (tp->mac_version == RTL_GIGA_MAC_VER_05) {
-		/* Realtek's r1000_n.c driver uses '&& 0x01' here. Well... */
-		if ((RTL_R8(Config2) & 0x07) & 0x01)
-			RTL_W32(0x7c, 0x0007ffff);
-
-		RTL_W32(0x7c, 0x0007ff00);
-
-		pci_read_config_word(pdev, PCI_COMMAND, &cmd);
-		cmd = cmd & 0xef;
-		pci_write_config_word(pdev, PCI_COMMAND, cmd);
-	}
-
 	RTL_W8(Cfg9346, Cfg9346_Unlock);
 	if ((tp->mac_version == RTL_GIGA_MAC_VER_01) ||
 	    (tp->mac_version == RTL_GIGA_MAC_VER_02) ||
@@ -1868,19 +1908,11 @@
 
 	RTL_W8(EarlyTxThres, EarlyTxThld);
 
-	/* Low hurts. Let's disable the filtering. */
-	RTL_W16(RxMaxSize, 16383);
+	rtl_set_rx_max_size(ioaddr);
 
-	if ((tp->mac_version == RTL_GIGA_MAC_VER_01) ||
-	    (tp->mac_version == RTL_GIGA_MAC_VER_02) ||
-	    (tp->mac_version == RTL_GIGA_MAC_VER_03) ||
-	    (tp->mac_version == RTL_GIGA_MAC_VER_04))
-		rtl8169_set_rx_tx_config_registers(tp);
+	rtl_set_rx_tx_config_registers(tp);
 
-	cmd = RTL_R16(CPlusCmd);
-	RTL_W16(CPlusCmd, cmd);
-
-	tp->cp_cmd |= cmd | PCIMulRW;
+	tp->cp_cmd |= rtl_rw_cpluscmd(ioaddr) | PCIMulRW;
 
 	if ((tp->mac_version == RTL_GIGA_MAC_VER_02) ||
 	    (tp->mac_version == RTL_GIGA_MAC_VER_03)) {
@@ -1891,29 +1923,15 @@
 
 	RTL_W16(CPlusCmd, tp->cp_cmd);
 
+	rtl8169_set_magic_reg(ioaddr, tp->mac_version);
+
 	/*
 	 * Undocumented corner. Supposedly:
 	 * (TxTimer << 12) | (TxPackets << 8) | (RxTimer << 4) | RxPackets
 	 */
 	RTL_W16(IntrMitigate, 0x0000);
 
-	/*
-	 * Magic spell: some iop3xx ARM board needs the TxDescAddrHigh
-	 * register to be written before TxDescAddrLow to work.
-	 * Switching from MMIO to I/O access fixes the issue as well.
-	 */
-	RTL_W32(TxDescStartAddrHigh, ((u64) tp->TxPhyAddr >> 32));
-	RTL_W32(TxDescStartAddrLow, ((u64) tp->TxPhyAddr & DMA_32BIT_MASK));
-	RTL_W32(RxDescAddrHigh, ((u64) tp->RxPhyAddr >> 32));
-	RTL_W32(RxDescAddrLow, ((u64) tp->RxPhyAddr & DMA_32BIT_MASK));
-
-	if ((tp->mac_version != RTL_GIGA_MAC_VER_01) &&
-	    (tp->mac_version != RTL_GIGA_MAC_VER_02) &&
-	    (tp->mac_version != RTL_GIGA_MAC_VER_03) &&
-	    (tp->mac_version != RTL_GIGA_MAC_VER_04)) {
-		RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
-		rtl8169_set_rx_tx_config_registers(tp);
-	}
+	rtl_set_rx_tx_desc_registers(tp, ioaddr);
 
 	RTL_W8(Cfg9346, Cfg9346_Lock);
 
@@ -1922,15 +1940,107 @@
 
 	RTL_W32(RxMissed, 0);
 
-	rtl8169_set_rx_mode(dev);
+	rtl_set_rx_mode(dev);
 
 	/* no early-rx interrupts */
 	RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000);
 
 	/* Enable all known interrupts by setting the interrupt mask. */
-	RTL_W16(IntrMask, rtl8169_intr_mask);
+	RTL_W16(IntrMask, tp->intr_event);
 
-	netif_start_queue(dev);
+	RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
+}
+
+static void rtl_hw_start_8168(struct net_device *dev)
+{
+	struct rtl8169_private *tp = netdev_priv(dev);
+	void __iomem *ioaddr = tp->mmio_addr;
+	struct pci_dev *pdev = tp->pci_dev;
+	u8 ctl;
+
+	RTL_W8(Cfg9346, Cfg9346_Unlock);
+
+	RTL_W8(EarlyTxThres, EarlyTxThld);
+
+	rtl_set_rx_max_size(ioaddr);
+
+	rtl_set_rx_tx_config_registers(tp);
+
+	tp->cp_cmd |= RTL_R16(CPlusCmd) | PktCntrDisable | INTT_1;
+
+	RTL_W16(CPlusCmd, tp->cp_cmd);
+
+	/* Tx performance tweak. */
+	pci_read_config_byte(pdev, 0x69, &ctl);
+	ctl = (ctl & ~0x70) | 0x50;
+	pci_write_config_byte(pdev, 0x69, ctl);
+
+	RTL_W16(IntrMitigate, 0x5151);
+
+	/* Work around for RxFIFO overflow. */
+	if (tp->mac_version == RTL_GIGA_MAC_VER_11) {
+		tp->intr_event |= RxFIFOOver | PCSTimeout;
+		tp->intr_event &= ~RxOverflow;
+	}
+
+	rtl_set_rx_tx_desc_registers(tp, ioaddr);
+
+	RTL_W8(Cfg9346, Cfg9346_Lock);
+
+	RTL_R8(IntrMask);
+
+	RTL_W32(RxMissed, 0);
+
+	rtl_set_rx_mode(dev);
+
+	RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
+
+	RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000);
+
+	RTL_W16(IntrMask, tp->intr_event);
+}
+
+static void rtl_hw_start_8101(struct net_device *dev)
+{
+	struct rtl8169_private *tp = netdev_priv(dev);
+	void __iomem *ioaddr = tp->mmio_addr;
+	struct pci_dev *pdev = tp->pci_dev;
+
+	if (tp->mac_version == RTL_GIGA_MAC_VER_13) {
+		pci_write_config_word(pdev, 0x68, 0x00);
+		pci_write_config_word(pdev, 0x69, 0x08);
+	}
+
+	RTL_W8(Cfg9346, Cfg9346_Unlock);
+
+	RTL_W8(EarlyTxThres, EarlyTxThld);
+
+	rtl_set_rx_max_size(ioaddr);
+
+	tp->cp_cmd |= rtl_rw_cpluscmd(ioaddr) | PCIMulRW;
+
+	RTL_W16(CPlusCmd, tp->cp_cmd);
+
+	RTL_W16(IntrMitigate, 0x0000);
+
+	rtl_set_rx_tx_desc_registers(tp, ioaddr);
+
+	RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
+	rtl_set_rx_tx_config_registers(tp);
+
+	RTL_W8(Cfg9346, Cfg9346_Lock);
+
+	RTL_R8(IntrMask);
+
+	RTL_W32(RxMissed, 0);
+
+	rtl_set_rx_mode(dev);
+
+	RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
+
+	RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xf000);
+
+	RTL_W16(IntrMask, tp->intr_event);
 }
 
 static int rtl8169_change_mtu(struct net_device *dev, int new_mtu)
@@ -1956,7 +2066,7 @@
 
 	netif_poll_enable(dev);
 
-	rtl8169_hw_start(dev);
+	rtl_hw_start(dev);
 
 	rtl8169_request_timer(dev);
 
@@ -1997,38 +2107,38 @@
 	rtl8169_mark_to_asic(desc, rx_buf_sz);
 }
 
-static int rtl8169_alloc_rx_skb(struct pci_dev *pdev, struct sk_buff **sk_buff,
-				struct RxDesc *desc, int rx_buf_sz,
-				unsigned int align)
+static struct sk_buff *rtl8169_alloc_rx_skb(struct pci_dev *pdev,
+					    struct net_device *dev,
+					    struct RxDesc *desc, int rx_buf_sz,
+					    unsigned int align)
 {
 	struct sk_buff *skb;
 	dma_addr_t mapping;
-	int ret = 0;
+	unsigned int pad;
 
-	skb = dev_alloc_skb(rx_buf_sz + align);
+	pad = align ? align : NET_IP_ALIGN;
+
+	skb = netdev_alloc_skb(dev, rx_buf_sz + pad);
 	if (!skb)
 		goto err_out;
 
-	skb_reserve(skb, (align - 1) & (unsigned long)skb->data);
-	*sk_buff = skb;
+	skb_reserve(skb, align ? ((pad - 1) & (unsigned long)skb->data) : pad);
 
 	mapping = pci_map_single(pdev, skb->data, rx_buf_sz,
 				 PCI_DMA_FROMDEVICE);
 
 	rtl8169_map_to_asic(desc, mapping, rx_buf_sz);
-
 out:
-	return ret;
+	return skb;
 
 err_out:
-	ret = -ENOMEM;
 	rtl8169_make_unusable_by_asic(desc);
 	goto out;
 }
 
 static void rtl8169_rx_clear(struct rtl8169_private *tp)
 {
-	int i;
+	unsigned int i;
 
 	for (i = 0; i < NUM_RX_DESC; i++) {
 		if (tp->Rx_skbuff[i]) {
@@ -2043,16 +2153,22 @@
 {
 	u32 cur;
 
-	for (cur = start; end - cur > 0; cur++) {
-		int ret, i = cur % NUM_RX_DESC;
+	for (cur = start; end - cur != 0; cur++) {
+		struct sk_buff *skb;
+		unsigned int i = cur % NUM_RX_DESC;
+
+		WARN_ON((s32)(end - cur) < 0);
 
 		if (tp->Rx_skbuff[i])
 			continue;
 
-		ret = rtl8169_alloc_rx_skb(tp->pci_dev, tp->Rx_skbuff + i,
-			tp->RxDescArray + i, tp->rx_buf_sz, tp->align);
-		if (ret < 0)
+		skb = rtl8169_alloc_rx_skb(tp->pci_dev, dev,
+					   tp->RxDescArray + i,
+					   tp->rx_buf_sz, tp->align);
+		if (!skb)
 			break;
+
+		tp->Rx_skbuff[i] = skb;
 	}
 	return cur - start;
 }
@@ -2164,14 +2280,9 @@
 
 	ret = rtl8169_open(dev);
 	if (unlikely(ret < 0)) {
-		if (net_ratelimit()) {
-			struct rtl8169_private *tp = netdev_priv(dev);
-
-			if (netif_msg_drv(tp)) {
-				printk(PFX KERN_ERR
-				       "%s: reinit failure (status = %d)."
-				       " Rescheduling.\n", dev->name, ret);
-			}
+		if (net_ratelimit() && netif_msg_drv(tp)) {
+			printk(PFX KERN_ERR "%s: reinit failure (status = %d)."
+			       " Rescheduling.\n", dev->name, ret);
 		}
 		rtl8169_schedule_work(dev, rtl8169_reinit_task);
 	}
@@ -2198,16 +2309,12 @@
 
 	if (tp->dirty_rx == tp->cur_rx) {
 		rtl8169_init_ring_indexes(tp);
-		rtl8169_hw_start(dev);
+		rtl_hw_start(dev);
 		netif_wake_queue(dev);
 	} else {
-		if (net_ratelimit()) {
-			struct rtl8169_private *tp = netdev_priv(dev);
-
-			if (netif_msg_intr(tp)) {
-				printk(PFX KERN_EMERG
-				       "%s: Rx buffers shortage\n", dev->name);
-			}
+		if (net_ratelimit() && netif_msg_intr(tp)) {
+			printk(PFX KERN_EMERG "%s: Rx buffers shortage\n",
+			       dev->name);
 		}
 		rtl8169_schedule_work(dev, rtl8169_reset_task);
 	}
@@ -2344,7 +2451,7 @@
 
 	smp_wmb();
 
-	RTL_W8(TxPoll, 0x40);	/* set polling bit */
+	RTL_W8(TxPoll, NPQ);	/* set polling bit */
 
 	if (TX_BUFFS_AVAIL(tp) < MAX_SKB_FRAGS) {
 		netif_stop_queue(dev);
@@ -2414,16 +2521,12 @@
 	rtl8169_schedule_work(dev, rtl8169_reinit_task);
 }
 
-static void
-rtl8169_tx_interrupt(struct net_device *dev, struct rtl8169_private *tp,
-		     void __iomem *ioaddr)
+static void rtl8169_tx_interrupt(struct net_device *dev,
+				 struct rtl8169_private *tp,
+				 void __iomem *ioaddr)
 {
 	unsigned int dirty_tx, tx_left;
 
-	assert(dev != NULL);
-	assert(tp != NULL);
-	assert(ioaddr != NULL);
-
 	dirty_tx = tp->dirty_tx;
 	smp_rmb();
 	tx_left = tp->cur_tx - dirty_tx;
@@ -2480,38 +2583,37 @@
 		skb->ip_summed = CHECKSUM_NONE;
 }
 
-static inline int rtl8169_try_rx_copy(struct sk_buff **sk_buff, int pkt_size,
-				      struct RxDesc *desc, int rx_buf_sz,
-				      unsigned int align)
+static inline bool rtl8169_try_rx_copy(struct sk_buff **sk_buff,
+				       struct rtl8169_private *tp, int pkt_size,
+				       dma_addr_t addr)
 {
-	int ret = -1;
+	struct sk_buff *skb;
+	bool done = false;
 
-	if (pkt_size < rx_copybreak) {
-		struct sk_buff *skb;
+	if (pkt_size >= rx_copybreak)
+		goto out;
 
-		skb = dev_alloc_skb(pkt_size + align);
-		if (skb) {
-			skb_reserve(skb, (align - 1) & (unsigned long)skb->data);
-			eth_copy_and_sum(skb, sk_buff[0]->data, pkt_size, 0);
-			*sk_buff = skb;
-			rtl8169_mark_to_asic(desc, rx_buf_sz);
-			ret = 0;
-		}
-	}
-	return ret;
+	skb = netdev_alloc_skb(tp->dev, pkt_size + NET_IP_ALIGN);
+	if (!skb)
+		goto out;
+
+	pci_dma_sync_single_for_cpu(tp->pci_dev, addr, pkt_size,
+				    PCI_DMA_FROMDEVICE);
+	skb_reserve(skb, NET_IP_ALIGN);
+	skb_copy_from_linear_data(*sk_buff, skb->data, pkt_size);
+	*sk_buff = skb;
+	done = true;
+out:
+	return done;
 }
 
-static int
-rtl8169_rx_interrupt(struct net_device *dev, struct rtl8169_private *tp,
-		     void __iomem *ioaddr)
+static int rtl8169_rx_interrupt(struct net_device *dev,
+				struct rtl8169_private *tp,
+				void __iomem *ioaddr)
 {
 	unsigned int cur_rx, rx_left;
 	unsigned int delta, count;
 
-	assert(dev != NULL);
-	assert(tp != NULL);
-	assert(ioaddr != NULL);
-
 	cur_rx = tp->cur_rx;
 	rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx;
 	rx_left = rtl8169_rx_quota(rx_left, (u32) dev->quota);
@@ -2544,9 +2646,9 @@
 			rtl8169_mark_to_asic(desc, tp->rx_buf_sz);
 		} else {
 			struct sk_buff *skb = tp->Rx_skbuff[entry];
+			dma_addr_t addr = le64_to_cpu(desc->addr);
 			int pkt_size = (status & 0x00001FFF) - 4;
-			void (*pci_action)(struct pci_dev *, dma_addr_t,
-				size_t, int) = pci_dma_sync_single_for_device;
+			struct pci_dev *pdev = tp->pci_dev;
 
 			/*
 			 * The driver does not support incoming fragmented
@@ -2562,19 +2664,16 @@
 
 			rtl8169_rx_csum(skb, desc);
 
-			pci_dma_sync_single_for_cpu(tp->pci_dev,
-				le64_to_cpu(desc->addr), tp->rx_buf_sz,
-				PCI_DMA_FROMDEVICE);
-
-			if (rtl8169_try_rx_copy(&skb, pkt_size, desc,
-						tp->rx_buf_sz, tp->align)) {
-				pci_action = pci_unmap_single;
+			if (rtl8169_try_rx_copy(&skb, tp, pkt_size, addr)) {
+				pci_dma_sync_single_for_device(pdev, addr,
+					pkt_size, PCI_DMA_FROMDEVICE);
+				rtl8169_mark_to_asic(desc, tp->rx_buf_sz);
+			} else {
+				pci_unmap_single(pdev, addr, pkt_size,
+						 PCI_DMA_FROMDEVICE);
 				tp->Rx_skbuff[entry] = NULL;
 			}
 
-			pci_action(tp->pci_dev, le64_to_cpu(desc->addr),
-				   tp->rx_buf_sz, PCI_DMA_FROMDEVICE);
-
 			skb_put(skb, pkt_size);
 			skb->protocol = eth_type_trans(skb, dev);
 
@@ -2585,6 +2684,13 @@
 			tp->stats.rx_bytes += pkt_size;
 			tp->stats.rx_packets++;
 		}
+
+		/* Work around for AMD plateform. */
+		if ((desc->opts2 & 0xfffe000) &&
+		    (tp->mac_version == RTL_GIGA_MAC_VER_05)) {
+			desc->opts2 = 0;
+			cur_rx++;
+		}
 	}
 
 	count = cur_rx - tp->cur_rx;
@@ -2608,11 +2714,9 @@
 	return count;
 }
 
-/* The interrupt handler does all of the Rx thread work and cleans up after the Tx thread. */
-static irqreturn_t
-rtl8169_interrupt(int irq, void *dev_instance)
+static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
 {
-	struct net_device *dev = (struct net_device *) dev_instance;
+	struct net_device *dev = dev_instance;
 	struct rtl8169_private *tp = netdev_priv(dev);
 	int boguscnt = max_interrupt_work;
 	void __iomem *ioaddr = tp->mmio_addr;
@@ -2637,9 +2741,17 @@
 		RTL_W16(IntrStatus,
 			(status & RxFIFOOver) ? (status | RxOverflow) : status);
 
-		if (!(status & rtl8169_intr_mask))
+		if (!(status & tp->intr_event))
 			break;
 
+                /* Work around for rx fifo overflow */
+                if (unlikely(status & RxFIFOOver) &&
+		    (tp->mac_version == RTL_GIGA_MAC_VER_11)) {
+			netif_stop_queue(dev);
+			rtl8169_tx_timeout(dev);
+			break;
+		}
+
 		if (unlikely(status & SYSErr)) {
 			rtl8169_pcierr_interrupt(dev);
 			break;
@@ -2649,8 +2761,8 @@
 			rtl8169_check_link_status(dev, tp, ioaddr);
 
 #ifdef CONFIG_R8169_NAPI
-		RTL_W16(IntrMask, rtl8169_intr_mask & ~rtl8169_napi_event);
-		tp->intr_mask = ~rtl8169_napi_event;
+		RTL_W16(IntrMask, tp->intr_event & ~tp->napi_event);
+		tp->intr_mask = ~tp->napi_event;
 
 		if (likely(netif_rx_schedule_prep(dev)))
 			__netif_rx_schedule(dev);
@@ -2661,9 +2773,9 @@
 		break;
 #else
 		/* Rx interrupt */
-		if (status & (RxOK | RxOverflow | RxFIFOOver)) {
+		if (status & (RxOK | RxOverflow | RxFIFOOver))
 			rtl8169_rx_interrupt(dev, tp, ioaddr);
-		}
+
 		/* Tx interrupt */
 		if (status & (TxOK | TxErr))
 			rtl8169_tx_interrupt(dev, tp, ioaddr);
@@ -2707,7 +2819,7 @@
 		 * write is safe - FR
 		 */
 		smp_wmb();
-		RTL_W16(IntrMask, rtl8169_intr_mask);
+		RTL_W16(IntrMask, tp->intr_event);
 	}
 
 	return (work_done >= work_to_do);
@@ -2789,14 +2901,13 @@
 	return 0;
 }
 
-static void
-rtl8169_set_rx_mode(struct net_device *dev)
+static void rtl_set_rx_mode(struct net_device *dev)
 {
 	struct rtl8169_private *tp = netdev_priv(dev);
 	void __iomem *ioaddr = tp->mmio_addr;
 	unsigned long flags;
 	u32 mc_filter[2];	/* Multicast hash filter */
-	int i, rx_mode;
+	int rx_mode;
 	u32 tmp = 0;
 
 	if (dev->flags & IFF_PROMISC) {
@@ -2816,6 +2927,8 @@
 		mc_filter[1] = mc_filter[0] = 0xffffffff;
 	} else {
 		struct dev_mc_list *mclist;
+		unsigned int i;
+
 		rx_mode = AcceptBroadcast | AcceptMyPhys;
 		mc_filter[1] = mc_filter[0] = 0;
 		for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
@@ -2840,10 +2953,11 @@
 		mc_filter[1] = 0xffffffff;
 	}
 
-	RTL_W32(RxConfig, tmp);
 	RTL_W32(MAR0 + 0, mc_filter[0]);
 	RTL_W32(MAR0 + 4, mc_filter[1]);
 
+	RTL_W32(RxConfig, tmp);
+
 	spin_unlock_irqrestore(&tp->lock, flags);
 }
 
@@ -2931,14 +3045,12 @@
 #endif
 };
 
-static int __init
-rtl8169_init_module(void)
+static int __init rtl8169_init_module(void)
 {
 	return pci_register_driver(&rtl8169_pci_driver);
 }
 
-static void __exit
-rtl8169_cleanup_module(void)
+static void __exit rtl8169_cleanup_module(void)
 {
 	pci_unregister_driver(&rtl8169_pci_driver);
 }
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index 09078ff..2d826ff 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -469,11 +469,18 @@
 
 MODULE_DEVICE_TABLE(pci, s2io_tbl);
 
+static struct pci_error_handlers s2io_err_handler = {
+	.error_detected = s2io_io_error_detected,
+	.slot_reset = s2io_io_slot_reset,
+	.resume = s2io_io_resume,
+};
+
 static struct pci_driver s2io_driver = {
       .name = "S2IO",
       .id_table = s2io_tbl,
       .probe = s2io_init_nic,
       .remove = __devexit_p(s2io_rem_nic),
+      .err_handler = &s2io_err_handler,
 };
 
 /* A simplifier macro used both by init and free shared_mem Fns(). */
@@ -2689,6 +2696,9 @@
 	u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
 	int i;
 
+	if (pci_channel_offline(nic->pdev))
+		return;
+
 	disable_irq(dev->irq);
 
 	atomic_inc(&nic->isr_cnt);
@@ -3215,6 +3225,8 @@
 	int i;
 	if (atomic_read(&nic->card_state) == CARD_DOWN)
 		return;
+	if (pci_channel_offline(nic->pdev))
+		return;
 	nic->mac_control.stats_info->sw_stat.ring_full_cnt = 0;
 	/* Handling the XPAK counters update */
 	if(nic->mac_control.stats_info->xpak_stat.xpak_timer_count < 72000) {
@@ -3958,7 +3970,6 @@
 	/* Reset card, kill tasklet and free Tx and Rx buffers. */
 	s2io_card_down(sp);
 
-	sp->device_close_flag = TRUE;	/* Device is shut down. */
 	return 0;
 }
 
@@ -4314,6 +4325,10 @@
 	struct mac_info *mac_control;
 	struct config_param *config;
 
+	/* Pretend we handled any irq's from a disconnected card */
+	if (pci_channel_offline(sp->pdev))
+		return IRQ_NONE;
+
 	atomic_inc(&sp->isr_cnt);
 	mac_control = &sp->mac_control;
 	config = &sp->config;
@@ -6569,7 +6584,7 @@
 	} while(cnt < 5);
 }
 
-static void s2io_card_down(struct s2io_nic * sp)
+static void do_s2io_card_down(struct s2io_nic * sp, int do_io)
 {
 	int cnt = 0;
 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
@@ -6584,7 +6599,8 @@
 	atomic_set(&sp->card_state, CARD_DOWN);
 
 	/* disable Tx and Rx traffic on the NIC */
-	stop_nic(sp);
+	if (do_io)
+		stop_nic(sp);
 
 	s2io_rem_isr(sp);
 
@@ -6592,7 +6608,7 @@
 	tasklet_kill(&sp->task);
 
 	/* Check if the device is Quiescent and then Reset the NIC */
-	do {
+	while(do_io) {
 		/* As per the HW requirement we need to replenish the
 		 * receive buffer to avoid the ring bump. Since there is
 		 * no intention of processing the Rx frame at this pointwe are
@@ -6617,8 +6633,9 @@
 				  (unsigned long long) val64);
 			break;
 		}
-	} while (1);
-	s2io_reset(sp);
+	}
+	if (do_io)
+		s2io_reset(sp);
 
 	spin_lock_irqsave(&sp->tx_lock, flags);
 	/* Free all Tx buffers */
@@ -6633,6 +6650,11 @@
 	clear_bit(0, &(sp->link_state));
 }
 
+static void s2io_card_down(struct s2io_nic * sp)
+{
+	do_s2io_card_down(sp, 1);
+}
+
 static int s2io_card_up(struct s2io_nic * sp)
 {
 	int i, ret = 0;
@@ -8010,3 +8032,85 @@
 	sp->mac_control.stats_info->sw_stat.clubbed_frms_cnt++;
 	return;
 }
+
+/**
+ * s2io_io_error_detected - called when PCI error is detected
+ * @pdev: Pointer to PCI device
+ * @state: The current pci conneection state
+ *
+ * This function is called after a PCI bus error affecting
+ * this device has been detected.
+ */
+static pci_ers_result_t s2io_io_error_detected(struct pci_dev *pdev,
+                                               pci_channel_state_t state)
+{
+	struct net_device *netdev = pci_get_drvdata(pdev);
+	struct s2io_nic *sp = netdev->priv;
+
+	netif_device_detach(netdev);
+
+	if (netif_running(netdev)) {
+		/* Bring down the card, while avoiding PCI I/O */
+		do_s2io_card_down(sp, 0);
+	}
+	pci_disable_device(pdev);
+
+	return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/**
+ * s2io_io_slot_reset - called after the pci bus has been reset.
+ * @pdev: Pointer to PCI device
+ *
+ * Restart the card from scratch, as if from a cold-boot.
+ * At this point, the card has exprienced a hard reset,
+ * followed by fixups by BIOS, and has its config space
+ * set up identically to what it was at cold boot.
+ */
+static pci_ers_result_t s2io_io_slot_reset(struct pci_dev *pdev)
+{
+	struct net_device *netdev = pci_get_drvdata(pdev);
+	struct s2io_nic *sp = netdev->priv;
+
+	if (pci_enable_device(pdev)) {
+		printk(KERN_ERR "s2io: "
+		       "Cannot re-enable PCI device after reset.\n");
+		return PCI_ERS_RESULT_DISCONNECT;
+	}
+
+	pci_set_master(pdev);
+	s2io_reset(sp);
+
+	return PCI_ERS_RESULT_RECOVERED;
+}
+
+/**
+ * s2io_io_resume - called when traffic can start flowing again.
+ * @pdev: Pointer to PCI device
+ *
+ * This callback is called when the error recovery driver tells
+ * us that its OK to resume normal operation.
+ */
+static void s2io_io_resume(struct pci_dev *pdev)
+{
+	struct net_device *netdev = pci_get_drvdata(pdev);
+	struct s2io_nic *sp = netdev->priv;
+
+	if (netif_running(netdev)) {
+		if (s2io_card_up(sp)) {
+			printk(KERN_ERR "s2io: "
+			       "Can't bring device back up after reset.\n");
+			return;
+		}
+
+		if (s2io_set_mac_addr(netdev, netdev->dev_addr) == FAILURE) {
+			s2io_card_down(sp);
+			printk(KERN_ERR "s2io: "
+			       "Can't resetore mac addr after reset.\n");
+			return;
+		}
+	}
+
+	netif_device_attach(netdev);
+	netif_wake_queue(netdev);
+}
diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h
index 54baa0b..5859278 100644
--- a/drivers/net/s2io.h
+++ b/drivers/net/s2io.h
@@ -794,7 +794,6 @@
 
 	struct net_device_stats stats;
 	int high_dma_flag;
-	int device_close_flag;
 	int device_enabled_once;
 
 	char name[60];
@@ -1052,6 +1051,11 @@
 			   struct sk_buff *skb, u32 tcp_len);
 static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring);
 
+static pci_ers_result_t s2io_io_error_detected(struct pci_dev *pdev,
+			                      pci_channel_state_t state);
+static pci_ers_result_t s2io_io_slot_reset(struct pci_dev *pdev);
+static void s2io_io_resume(struct pci_dev *pdev);
+
 #define s2io_tcp_mss(skb) skb_shinfo(skb)->gso_size
 #define s2io_udp_mss(skb) skb_shinfo(skb)->gso_size
 #define s2io_offload_type(skb) skb_shinfo(skb)->gso_type
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index fe01b96..b51d73c 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -50,7 +50,7 @@
 #include "sky2.h"
 
 #define DRV_NAME		"sky2"
-#define DRV_VERSION		"1.14"
+#define DRV_VERSION		"1.15"
 #define PFX			DRV_NAME " "
 
 /*
@@ -130,7 +130,7 @@
 	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4368) }, /* 88EC034 */
 	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4369) }, /* 88EC042 */
 	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436A) }, /* 88E8058 */
-//	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436B) }, /* 88E8071 */
+	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436B) }, /* 88E8071 */
 	{ 0 }
 };
 
@@ -217,13 +217,24 @@
 		sky2_write8(hw, B2_Y2_CLK_GATE, 0);
 
 	if (hw->chip_id == CHIP_ID_YUKON_EC_U || hw->chip_id == CHIP_ID_YUKON_EX) {
-		u32 reg1;
+		u32 reg;
 
-		sky2_pci_write32(hw, PCI_DEV_REG3, 0);
-		reg1 = sky2_pci_read32(hw, PCI_DEV_REG4);
-		reg1 &= P_ASPM_CONTROL_MSK;
-		sky2_pci_write32(hw, PCI_DEV_REG4, reg1);
-		sky2_pci_write32(hw, PCI_DEV_REG5, 0);
+		reg = sky2_pci_read32(hw, PCI_DEV_REG4);
+		/* set all bits to 0 except bits 15..12 and 8 */
+		reg &= P_ASPM_CONTROL_MSK;
+		sky2_pci_write32(hw, PCI_DEV_REG4, reg);
+
+		reg = sky2_pci_read32(hw, PCI_DEV_REG5);
+		/* set all bits to 0 except bits 28 & 27 */
+		reg &= P_CTL_TIM_VMAIN_AV_MSK;
+		sky2_pci_write32(hw, PCI_DEV_REG5, reg);
+
+		sky2_pci_write32(hw, PCI_CFG_REG_1, 0);
+
+		/* Enable workaround for dev 4.107 on Yukon-Ultra & Extreme */
+		reg = sky2_read32(hw, B2_GP_IO);
+		reg |= GLB_GPIO_STAT_RACE_DIS;
+		sky2_write32(hw, B2_GP_IO, reg);
 	}
 }
 
@@ -650,6 +661,30 @@
 
 }
 
+static void sky2_set_tx_stfwd(struct sky2_hw *hw, unsigned port)
+{
+	if (hw->chip_id == CHIP_ID_YUKON_EX && hw->chip_rev != CHIP_REV_YU_EX_A0) {
+		sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
+			     TX_STFW_ENA |
+			     (hw->dev[port]->mtu > ETH_DATA_LEN) ? TX_JUMBO_ENA : TX_JUMBO_DIS);
+	} else {
+		if (hw->dev[port]->mtu > ETH_DATA_LEN) {
+			/* set Tx GMAC FIFO Almost Empty Threshold */
+			sky2_write32(hw, SK_REG(port, TX_GMF_AE_THR),
+				     (ECU_JUMBO_WM << 16) | ECU_AE_THR);
+
+			sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
+				     TX_JUMBO_ENA | TX_STFW_DIS);
+
+			/* Can't do offload because of lack of store/forward */
+			hw->dev[port]->features &= ~(NETIF_F_TSO | NETIF_F_SG
+						     | NETIF_F_ALL_CSUM);
+		} else
+			sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
+				     TX_JUMBO_DIS | TX_STFW_ENA);
+	}
+}
+
 static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
 {
 	struct sky2_port *sky2 = netdev_priv(hw->dev[port]);
@@ -730,8 +765,11 @@
 
 	/* Configure Rx MAC FIFO */
 	sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR);
-	sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T),
-		     GMF_OPER_ON | GMF_RX_F_FL_ON);
+	reg = GMF_OPER_ON | GMF_RX_F_FL_ON;
+	if (hw->chip_id == CHIP_ID_YUKON_EX)
+		reg |= GMF_RX_OVER_ON;
+
+	sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), reg);
 
 	/* Flush Rx MAC FIFO on any flow control or error */
 	sky2_write16(hw, SK_REG(port, RX_GMF_FL_MSK), GMR_FS_ANY_ERR);
@@ -747,16 +785,7 @@
 		sky2_write8(hw, SK_REG(port, RX_GMF_LP_THR), 768/8);
 		sky2_write8(hw, SK_REG(port, RX_GMF_UP_THR), 1024/8);
 
-		/* set Tx GMAC FIFO Almost Empty Threshold */
-		sky2_write32(hw, SK_REG(port, TX_GMF_AE_THR),
-			     (ECU_JUMBO_WM << 16) | ECU_AE_THR);
-
-		if (hw->dev[port]->mtu > ETH_DATA_LEN)
-			sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
-				     TX_JUMBO_ENA | TX_STFW_DIS);
-		else
-			sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
-				     TX_JUMBO_DIS | TX_STFW_ENA);
+		sky2_set_tx_stfwd(hw, port);
 	}
 
 }
@@ -939,14 +968,16 @@
 {
 	struct sky2_rx_le *le;
 
-	le = sky2_next_rx(sky2);
-	le->addr = cpu_to_le32((ETH_HLEN << 16) | ETH_HLEN);
-	le->ctrl = 0;
-	le->opcode = OP_TCPSTART | HW_OWNER;
+	if (sky2->hw->chip_id != CHIP_ID_YUKON_EX) {
+		le = sky2_next_rx(sky2);
+		le->addr = cpu_to_le32((ETH_HLEN << 16) | ETH_HLEN);
+		le->ctrl = 0;
+		le->opcode = OP_TCPSTART | HW_OWNER;
 
-	sky2_write32(sky2->hw,
-		     Q_ADDR(rxqaddr[sky2->port], Q_CSR),
-		     sky2->rx_csum ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM);
+		sky2_write32(sky2->hw,
+			     Q_ADDR(rxqaddr[sky2->port], Q_CSR),
+			     sky2->rx_csum ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM);
+	}
 
 }
 
@@ -1134,7 +1165,7 @@
 	if (hw->chip_id == CHIP_ID_YUKON_EC_U &&
 	    (hw->chip_rev == CHIP_REV_YU_EC_U_A1
 	     || hw->chip_rev == CHIP_REV_YU_EC_U_B0))
-		sky2_write32(hw, Q_ADDR(rxq, Q_F), F_M_RX_RAM_DIS);
+		sky2_write32(hw, Q_ADDR(rxq, Q_TEST), F_M_RX_RAM_DIS);
 
 	sky2_prefetch_init(hw, rxq, sky2->rx_le_map, RX_LE_SIZE - 1);
 
@@ -1285,6 +1316,10 @@
 
 	sky2_qset(hw, txqaddr[port]);
 
+	/* This is copied from sk98lin 10.0.5.3; no one tells me about erratta's */
+	if (hw->chip_id == CHIP_ID_YUKON_EX && hw->chip_rev == CHIP_REV_YU_EX_B0)
+		sky2_write32(hw, Q_ADDR(txqaddr[port], Q_TEST), F_TX_CHK_AUTO_OFF);
+
 	/* Set almost empty threshold */
 	if (hw->chip_id == CHIP_ID_YUKON_EC_U
 	    && hw->chip_rev == CHIP_REV_YU_EC_U_A0)
@@ -1393,14 +1428,16 @@
 	/* Check for TCP Segmentation Offload */
 	mss = skb_shinfo(skb)->gso_size;
 	if (mss != 0) {
-		mss += tcp_optlen(skb); /* TCP options */
-		mss += ip_hdrlen(skb) + sizeof(struct tcphdr);
-		mss += ETH_HLEN;
+		if (hw->chip_id != CHIP_ID_YUKON_EX)
+			mss += ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb);
 
-		if (mss != sky2->tx_last_mss) {
-			le = get_tx_le(sky2);
-			le->addr = cpu_to_le32(mss);
-			le->opcode = OP_LRGLEN | HW_OWNER;
+  		if (mss != sky2->tx_last_mss) {
+  			le = get_tx_le(sky2);
+  			le->addr = cpu_to_le32(mss);
+ 			if (hw->chip_id == CHIP_ID_YUKON_EX)
+				le->opcode = OP_MSS | HW_OWNER;
+			else
+				le->opcode = OP_LRGLEN | HW_OWNER;
 			sky2->tx_last_mss = mss;
 		}
 	}
@@ -1422,24 +1459,30 @@
 
 	/* Handle TCP checksum offload */
 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
-		const unsigned offset = skb_transport_offset(skb);
-		u32 tcpsum;
+		/* On Yukon EX (some versions) encoding change. */
+ 		if (hw->chip_id == CHIP_ID_YUKON_EX
+		    && hw->chip_rev != CHIP_REV_YU_EX_B0)
+ 			ctrl |= CALSUM;	/* auto checksum */
+		else {
+			const unsigned offset = skb_transport_offset(skb);
+			u32 tcpsum;
 
-		tcpsum = offset << 16;		/* sum start */
-		tcpsum |= offset + skb->csum_offset;	/* sum write */
+			tcpsum = offset << 16;			/* sum start */
+			tcpsum |= offset + skb->csum_offset;	/* sum write */
 
-		ctrl |= CALSUM | WR_SUM | INIT_SUM | LOCK_SUM;
-		if (ip_hdr(skb)->protocol == IPPROTO_UDP)
-			ctrl |= UDPTCP;
+			ctrl |= CALSUM | WR_SUM | INIT_SUM | LOCK_SUM;
+			if (ip_hdr(skb)->protocol == IPPROTO_UDP)
+				ctrl |= UDPTCP;
 
-		if (tcpsum != sky2->tx_tcpsum) {
-			sky2->tx_tcpsum = tcpsum;
+			if (tcpsum != sky2->tx_tcpsum) {
+				sky2->tx_tcpsum = tcpsum;
 
-			le = get_tx_le(sky2);
-			le->addr = cpu_to_le32(tcpsum);
-			le->length = 0;	/* initial checksum value */
-			le->ctrl = 1;	/* one packet */
-			le->opcode = OP_TCPLISW | HW_OWNER;
+				le = get_tx_le(sky2);
+				le->addr = cpu_to_le32(tcpsum);
+				le->length = 0;	/* initial checksum value */
+				le->ctrl = 1;	/* one packet */
+				le->opcode = OP_TCPLISW | HW_OWNER;
+			}
 		}
 	}
 
@@ -1913,15 +1956,8 @@
 
 	synchronize_irq(hw->pdev->irq);
 
-	if (hw->chip_id == CHIP_ID_YUKON_EC_U || hw->chip_id == CHIP_ID_YUKON_EX) {
-		if (new_mtu > ETH_DATA_LEN) {
-			sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
-				     TX_JUMBO_ENA | TX_STFW_DIS);
-			dev->features &= NETIF_F_TSO | NETIF_F_SG | NETIF_F_IP_CSUM;
-		} else
-			sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
-				     TX_JUMBO_DIS | TX_STFW_ENA);
-	}
+	if (hw->chip_id == CHIP_ID_YUKON_EC_U || hw->chip_id == CHIP_ID_YUKON_EX)
+		sky2_set_tx_stfwd(hw, port);
 
 	ctl = gma_read16(hw, port, GM_GP_CTRL);
 	gma_write16(hw, port, GM_GP_CTRL, ctl & ~GM_GPCR_RX_ENA);
@@ -2118,6 +2154,7 @@
 
 	while (hw->st_idx != hwidx) {
 		struct sky2_status_le *le  = hw->st_le + hw->st_idx;
+		unsigned port = le->css & CSS_LINK_BIT;
 		struct net_device *dev;
 		struct sk_buff *skb;
 		u32 status;
@@ -2125,9 +2162,7 @@
 
 		hw->st_idx = RING_NEXT(hw->st_idx, STATUS_RING_SIZE);
 
-		BUG_ON(le->link >= 2);
-		dev = hw->dev[le->link];
-
+		dev = hw->dev[port];
 		sky2 = netdev_priv(dev);
 		length = le16_to_cpu(le->length);
 		status = le32_to_cpu(le->status);
@@ -2140,6 +2175,16 @@
 				goto force_update;
 			}
 
+			/* This chip reports checksum status differently */
+			if (hw->chip_id == CHIP_ID_YUKON_EX) {
+				if (sky2->rx_csum &&
+				    (le->css & (CSS_ISIPV4 | CSS_ISIPV6)) &&
+				    (le->css & CSS_TCPUDPCSOK))
+					skb->ip_summed = CHECKSUM_UNNECESSARY;
+				else
+					skb->ip_summed = CHECKSUM_NONE;
+			}
+
 			skb->protocol = eth_type_trans(skb, dev);
 			sky2->net_stats.rx_packets++;
 			sky2->net_stats.rx_bytes += skb->len;
@@ -2155,10 +2200,10 @@
 				netif_receive_skb(skb);
 
 			/* Update receiver after 16 frames */
-			if (++buf_write[le->link] == RX_BUF_WRITE) {
+			if (++buf_write[port] == RX_BUF_WRITE) {
 force_update:
-				sky2_put_idx(hw, rxqaddr[le->link], sky2->rx_put);
-				buf_write[le->link] = 0;
+				sky2_put_idx(hw, rxqaddr[port], sky2->rx_put);
+				buf_write[port] = 0;
 			}
 
 			/* Stop after net poll weight */
@@ -2179,6 +2224,9 @@
 			if (!sky2->rx_csum)
 				break;
 
+			if (hw->chip_id == CHIP_ID_YUKON_EX)
+				break;
+
 			/* Both checksum counters are programmed to start at
 			 * the same offset, so unless there is a problem they
 			 * should match. This failure is an early indication that
@@ -2194,7 +2242,7 @@
 				       dev->name, status);
 				sky2->rx_csum = 0;
 				sky2_write32(sky2->hw,
-					     Q_ADDR(rxqaddr[le->link], Q_CSR),
+					     Q_ADDR(rxqaddr[port], Q_CSR),
 					     BMU_DIS_RX_CHKSUM);
 			}
 			break;
@@ -2513,6 +2561,9 @@
 {
 	u8 t8;
 
+	/* Enable all clocks */
+	sky2_pci_write32(hw, PCI_DEV_REG3, 0);
+
 	sky2_write8(hw, B0_CTST, CS_RST_CLR);
 
 	hw->chip_id = sky2_read8(hw, B2_CHIP_ID);
@@ -2522,14 +2573,6 @@
 		return -EOPNOTSUPP;
 	}
 
-	if (hw->chip_id == CHIP_ID_YUKON_EX)
-		dev_warn(&hw->pdev->dev, "this driver not yet tested on this chip type\n"
-			 "Please report success or failure to <netdev@vger.kernel.org>\n");
-
-	/* Make sure and enable all clocks */
-	if (hw->chip_id == CHIP_ID_YUKON_EX || hw->chip_id == CHIP_ID_YUKON_EC_U)
-		sky2_pci_write32(hw, PCI_DEV_REG3, 0);
-
 	hw->chip_rev = (sky2_read8(hw, B2_MAC_CFG) & CFG_CHIP_R_MSK) >> 4;
 
 	/* This rev is really old, and requires untested workarounds */
@@ -2589,6 +2632,11 @@
 	for (i = 0; i < hw->ports; i++) {
 		sky2_write8(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET);
 		sky2_write8(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_CLR);
+
+		if (hw->chip_id == CHIP_ID_YUKON_EX)
+			sky2_write16(hw, SK_REG(i, GMAC_CTRL),
+				     GMC_BYP_MACSECRX_ON | GMC_BYP_MACSECTX_ON
+				     | GMC_BYP_RETR_ON);
 	}
 
 	sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
@@ -2735,7 +2783,7 @@
 
 	sky2->wol = wol->wolopts;
 
-	if (hw->chip_id == CHIP_ID_YUKON_EC_U)
+	if (hw->chip_id == CHIP_ID_YUKON_EC_U || hw->chip_id == CHIP_ID_YUKON_EX)
 		sky2_write32(hw, B0_CTST, sky2->wol
 			     ? Y2_HW_WOL_ON : Y2_HW_WOL_OFF);
 
@@ -3330,7 +3378,7 @@
 
 /*
  * Returns copy of control register region
- * Note: access to the RAM address register set will cause timeouts.
+ * Note: ethtool_get_regs always provides full size (16k) buffer
  */
 static void sky2_get_regs(struct net_device *dev, struct ethtool_regs *regs,
 			  void *p)
@@ -3338,15 +3386,19 @@
 	const struct sky2_port *sky2 = netdev_priv(dev);
 	const void __iomem *io = sky2->hw->regs;
 
-	BUG_ON(regs->len < B3_RI_WTO_R1);
 	regs->version = 1;
 	memset(p, 0, regs->len);
 
 	memcpy_fromio(p, io, B3_RAM_ADDR);
 
-	memcpy_fromio(p + B3_RI_WTO_R1,
-		      io + B3_RI_WTO_R1,
-		      regs->len - B3_RI_WTO_R1);
+	/* skip diagnostic ram region */
+	memcpy_fromio(p + B3_RI_WTO_R1, io + B3_RI_WTO_R1, 0x2000 - B3_RI_WTO_R1);
+
+	/* copy GMAC registers */
+	memcpy_fromio(p + BASE_GMAC_1, io + BASE_GMAC_1, 0x1000);
+	if (sky2->hw->ports > 1)
+		memcpy_fromio(p + BASE_GMAC_2, io + BASE_GMAC_2, 0x1000);
+
 }
 
 /* In order to do Jumbo packets on these chips, need to turn off the
@@ -3357,9 +3409,7 @@
 	const struct sky2_port *sky2 = netdev_priv(dev);
 	const struct sky2_hw *hw = sky2->hw;
 
-	return dev->mtu > ETH_DATA_LEN &&
-		(hw->chip_id == CHIP_ID_YUKON_EX
-		 || hw->chip_id == CHIP_ID_YUKON_EC_U);
+	return dev->mtu > ETH_DATA_LEN && hw->chip_id == CHIP_ID_YUKON_EC_U;
 }
 
 static int sky2_set_tx_csum(struct net_device *dev, u32 data)
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
index b8c4a3b..8df4643 100644
--- a/drivers/net/sky2.h
+++ b/drivers/net/sky2.h
@@ -14,6 +14,8 @@
 	PCI_DEV_REG3	= 0x80,
 	PCI_DEV_REG4	= 0x84,
 	PCI_DEV_REG5    = 0x88,
+	PCI_CFG_REG_0	= 0x90,
+	PCI_CFG_REG_1	= 0x94,
 };
 
 enum {
@@ -28,6 +30,7 @@
 enum pci_dev_reg_1 {
 	PCI_Y2_PIG_ENA	 = 1<<31, /* Enable Plug-in-Go (YUKON-2) */
 	PCI_Y2_DLL_DIS	 = 1<<30, /* Disable PCI DLL (YUKON-2) */
+	PCI_SW_PWR_ON_RST= 1<<30, /* SW Power on Reset (Yukon-EX) */
 	PCI_Y2_PHY2_COMA = 1<<29, /* Set PHY 2 to Coma Mode (YUKON-2) */
 	PCI_Y2_PHY1_COMA = 1<<28, /* Set PHY 1 to Coma Mode (YUKON-2) */
 	PCI_Y2_PHY2_POWD = 1<<27, /* Set PHY 2 to Power Down (YUKON-2) */
@@ -67,6 +70,80 @@
 				  | P_ASPM_CLKRUN_REQUEST | P_ASPM_INT_FIFO_EMPTY,
 };
 
+/*	PCI_OUR_REG_5		32 bit	Our Register 5 (Yukon-ECU only) */
+enum pci_dev_reg_5 {
+					/* Bit 31..27:	for A3 & later */
+	P_CTL_DIV_CORE_CLK_ENA	= 1<<31, /* Divide Core Clock Enable */
+	P_CTL_SRESET_VMAIN_AV	= 1<<30, /* Soft Reset for Vmain_av De-Glitch */
+	P_CTL_BYPASS_VMAIN_AV	= 1<<29, /* Bypass En. for Vmain_av De-Glitch */
+	P_CTL_TIM_VMAIN_AV_MSK	= 3<<27, /* Bit 28..27: Timer Vmain_av Mask */
+					 /* Bit 26..16: Release Clock on Event */
+	P_REL_PCIE_RST_DE_ASS	= 1<<26, /* PCIe Reset De-Asserted */
+	P_REL_GPHY_REC_PACKET	= 1<<25, /* GPHY Received Packet */
+	P_REL_INT_FIFO_N_EMPTY	= 1<<24, /* Internal FIFO Not Empty */
+	P_REL_MAIN_PWR_AVAIL	= 1<<23, /* Main Power Available */
+	P_REL_CLKRUN_REQ_REL	= 1<<22, /* CLKRUN Request Release */
+	P_REL_PCIE_RESET_ASS	= 1<<21, /* PCIe Reset Asserted */
+	P_REL_PME_ASSERTED	= 1<<20, /* PME Asserted */
+	P_REL_PCIE_EXIT_L1_ST	= 1<<19, /* PCIe Exit L1 State */
+	P_REL_LOADER_NOT_FIN	= 1<<18, /* EPROM Loader Not Finished */
+	P_REL_PCIE_RX_EX_IDLE	= 1<<17, /* PCIe Rx Exit Electrical Idle State */
+	P_REL_GPHY_LINK_UP	= 1<<16, /* GPHY Link Up */
+
+					/* Bit 10.. 0: Mask for Gate Clock */
+	P_GAT_PCIE_RST_ASSERTED	= 1<<10,/* PCIe Reset Asserted */
+	P_GAT_GPHY_N_REC_PACKET	= 1<<9, /* GPHY Not Received Packet */
+	P_GAT_INT_FIFO_EMPTY	= 1<<8, /* Internal FIFO Empty */
+	P_GAT_MAIN_PWR_N_AVAIL	= 1<<7, /* Main Power Not Available */
+	P_GAT_CLKRUN_REQ_REL	= 1<<6, /* CLKRUN Not Requested */
+	P_GAT_PCIE_RESET_ASS	= 1<<5, /* PCIe Reset Asserted */
+	P_GAT_PME_DE_ASSERTED	= 1<<4, /* PME De-Asserted */
+	P_GAT_PCIE_ENTER_L1_ST	= 1<<3, /* PCIe Enter L1 State */
+	P_GAT_LOADER_FINISHED	= 1<<2, /* EPROM Loader Finished */
+	P_GAT_PCIE_RX_EL_IDLE	= 1<<1, /* PCIe Rx Electrical Idle State */
+	P_GAT_GPHY_LINK_DOWN	= 1<<0,	/* GPHY Link Down */
+
+	PCIE_OUR5_EVENT_CLK_D3_SET = P_REL_GPHY_REC_PACKET |
+				     P_REL_INT_FIFO_N_EMPTY |
+				     P_REL_PCIE_EXIT_L1_ST |
+				     P_REL_PCIE_RX_EX_IDLE |
+				     P_GAT_GPHY_N_REC_PACKET |
+				     P_GAT_INT_FIFO_EMPTY |
+				     P_GAT_PCIE_ENTER_L1_ST |
+				     P_GAT_PCIE_RX_EL_IDLE,
+};
+
+#/*	PCI_CFG_REG_1			32 bit	Config Register 1 (Yukon-Ext only) */
+enum pci_cfg_reg1 {
+	P_CF1_DIS_REL_EVT_RST	= 1<<24, /* Dis. Rel. Event during PCIE reset */
+										/* Bit 23..21: Release Clock on Event */
+	P_CF1_REL_LDR_NOT_FIN	= 1<<23, /* EEPROM Loader Not Finished */
+	P_CF1_REL_VMAIN_AVLBL	= 1<<22, /* Vmain available */
+	P_CF1_REL_PCIE_RESET	= 1<<21, /* PCI-E reset */
+										/* Bit 20..18: Gate Clock on Event */
+	P_CF1_GAT_LDR_NOT_FIN	= 1<<20, /* EEPROM Loader Finished */
+	P_CF1_GAT_PCIE_RX_IDLE	= 1<<19, /* PCI-E Rx Electrical idle */
+	P_CF1_GAT_PCIE_RESET	= 1<<18, /* PCI-E Reset */
+	P_CF1_PRST_PHY_CLKREQ	= 1<<17, /* Enable PCI-E rst & PM2PHY gen. CLKREQ */
+	P_CF1_PCIE_RST_CLKREQ	= 1<<16, /* Enable PCI-E rst generate CLKREQ */
+
+	P_CF1_ENA_CFG_LDR_DONE	= 1<<8, /* Enable core level Config loader done */
+
+	P_CF1_ENA_TXBMU_RD_IDLE	= 1<<1, /* Enable TX BMU Read  IDLE for ASPM */
+	P_CF1_ENA_TXBMU_WR_IDLE	= 1<<0, /* Enable TX BMU Write IDLE for ASPM */
+
+	PCIE_CFG1_EVENT_CLK_D3_SET = P_CF1_DIS_REL_EVT_RST |
+					P_CF1_REL_LDR_NOT_FIN |
+					P_CF1_REL_VMAIN_AVLBL |
+					P_CF1_REL_PCIE_RESET |
+					P_CF1_GAT_LDR_NOT_FIN |
+					P_CF1_GAT_PCIE_RESET |
+					P_CF1_PRST_PHY_CLKREQ |
+					P_CF1_ENA_CFG_LDR_DONE |
+					P_CF1_ENA_TXBMU_RD_IDLE |
+					P_CF1_ENA_TXBMU_WR_IDLE,
+};
+
 
 #define PCI_STATUS_ERROR_BITS (PCI_STATUS_DETECTED_PARITY | \
 			       PCI_STATUS_SIG_SYSTEM_ERROR | \
@@ -364,6 +441,20 @@
 	TST_CFG_WRITE_OFF= 1<<0, /* Disable Config Reg WR */
 };
 
+/* 	B2_GPIO */
+enum {
+	GLB_GPIO_CLK_DEB_ENA = 1<<31,	/* Clock Debug Enable */
+	GLB_GPIO_CLK_DBG_MSK = 0xf<<26, /* Clock Debug */
+
+	GLB_GPIO_INT_RST_D3_DIS = 1<<15, /* Disable Internal Reset After D3 to D0 */
+	GLB_GPIO_LED_PAD_SPEED_UP = 1<<14, /* LED PAD Speed Up */
+	GLB_GPIO_STAT_RACE_DIS	= 1<<13, /* Status Race Disable */
+	GLB_GPIO_TEST_SEL_MSK	= 3<<11, /* Testmode Select */
+	GLB_GPIO_TEST_SEL_BASE	= 1<<11,
+	GLB_GPIO_RAND_ENA	= 1<<10, /* Random Enable */
+	GLB_GPIO_RAND_BIT_1	= 1<<9,  /* Random Bit 1 */
+};
+
 /*	B2_MAC_CFG		 8 bit	MAC Configuration / Chip Revision */
 enum {
 	CFG_CHIP_R_MSK	  = 0xf<<4,	/* Bit 7.. 4: Chip Revision */
@@ -392,6 +483,11 @@
 	CHIP_REV_YU_FE_A2    = 2,
 
 };
+enum yukon_ex_rev {
+	CHIP_REV_YU_EX_A0    = 1,
+	CHIP_REV_YU_EX_B0    = 2,
+};
+
 
 /*	B2_Y2_CLK_GATE	 8 bit	Clock Gating (Yukon-2 only) */
 enum {
@@ -515,23 +611,15 @@
 enum {
 	B8_Q_REGS = 0x0400, /* base of Queue registers */
 	Q_D	= 0x00,	/* 8*32	bit	Current Descriptor */
-	Q_DA_L	= 0x20,	/* 32 bit	Current Descriptor Address Low dWord */
-	Q_DA_H	= 0x24,	/* 32 bit	Current Descriptor Address High dWord */
+	Q_VLAN  = 0x20, /* 16 bit	Current VLAN Tag */
+	Q_DONE	= 0x24,	/* 16 bit	Done Index */
 	Q_AC_L	= 0x28,	/* 32 bit	Current Address Counter Low dWord */
 	Q_AC_H	= 0x2c,	/* 32 bit	Current Address Counter High dWord */
 	Q_BC	= 0x30,	/* 32 bit	Current Byte Counter */
 	Q_CSR	= 0x34,	/* 32 bit	BMU Control/Status Register */
-	Q_F	= 0x38,	/* 32 bit	Flag Register */
-	Q_T1	= 0x3c,	/* 32 bit	Test Register 1 */
-	Q_T1_TR	= 0x3c,	/*  8 bit	Test Register 1 Transfer SM */
-	Q_T1_WR	= 0x3d,	/*  8 bit	Test Register 1 Write Descriptor SM */
-	Q_T1_RD	= 0x3e,	/*  8 bit	Test Register 1 Read Descriptor SM */
-	Q_T1_SV	= 0x3f,	/*  8 bit	Test Register 1 Supervisor SM */
-	Q_T2	= 0x40,	/* 32 bit	Test Register 2	*/
-	Q_T3	= 0x44,	/* 32 bit	Test Register 3	*/
+	Q_TEST	= 0x38,	/* 32 bit	Test/Control Register */
 
 /* Yukon-2 */
-	Q_DONE	= 0x24,	/* 16 bit	Done Index 		(Yukon-2 only) */
 	Q_WM	= 0x40,	/* 16 bit	FIFO Watermark */
 	Q_AL	= 0x42,	/*  8 bit	FIFO Alignment */
 	Q_RSP	= 0x44,	/* 16 bit	FIFO Read Shadow Pointer */
@@ -545,15 +633,16 @@
 };
 #define Q_ADDR(reg, offs) (B8_Q_REGS + (reg) + (offs))
 
-/*	Q_F				32 bit	Flag Register */
+/*	Q_TEST				32 bit	Test Register */
 enum {
-	F_ALM_FULL	= 1<<27, /* Rx FIFO: almost full */
-	F_EMPTY		= 1<<27, /* Tx FIFO: empty flag */
-	F_FIFO_EOF	= 1<<26, /* Tag (EOF Flag) bit in FIFO */
-	F_WM_REACHED	= 1<<25, /* Watermark reached */
+	/* Transmit */
+	F_TX_CHK_AUTO_OFF = 1<<31, /* Tx checksum auto calc off (Yukon EX) */
+	F_TX_CHK_AUTO_ON  = 1<<30, /* Tx checksum auto calc off (Yukon EX) */
+
+	/* Receive */
 	F_M_RX_RAM_DIS	= 1<<24, /* MAC Rx RAM Read Port disable */
-	F_FIFO_LEVEL	= 0x1fL<<16, /* Bit 23..16:	# of Qwords in FIFO */
-	F_WATER_MARK	= 0x0007ffL, /* Bit 10.. 0:	Watermark */
+
+	/* Hardware testbits not used */
 };
 
 /* Queue Prefetch Unit Offsets, use Y2_QADDR() to address (Yukon-2 only)*/
@@ -1608,6 +1697,16 @@
 	RX_VLAN_STRIP_ON = 1<<25,	/* enable  VLAN stripping */
 	RX_VLAN_STRIP_OFF = 1<<24,	/* disable VLAN stripping */
 
+	RX_MACSEC_FLUSH_ON  = 1<<23,
+	RX_MACSEC_FLUSH_OFF = 1<<22,
+	RX_MACSEC_ASF_FLUSH_ON = 1<<21,
+	RX_MACSEC_ASF_FLUSH_OFF = 1<<20,
+
+	GMF_RX_OVER_ON      = 1<<19,	/* enable flushing on receive overrun */
+	GMF_RX_OVER_OFF     = 1<<18,	/* disable flushing on receive overrun */
+	GMF_ASF_RX_OVER_ON  = 1<<17,	/* enable flushing of ASF when overrun */
+	GMF_ASF_RX_OVER_OFF = 1<<16,	/* disable flushing of ASF when overrun */
+
 	GMF_WP_TST_ON	= 1<<14,	/* Write Pointer Test On */
 	GMF_WP_TST_OFF	= 1<<13,	/* Write Pointer Test Off */
 	GMF_WP_STEP	= 1<<12,	/* Write Pointer Step/Increment */
@@ -1720,6 +1819,15 @@
 
 /*	GMAC_CTRL		32 bit	GMAC Control Reg (YUKON only) */
 enum {
+	GMC_SET_RST	    = 1<<15,/* MAC SEC RST */
+	GMC_SEC_RST_OFF     = 1<<14,/* MAC SEC RSt OFF */
+	GMC_BYP_MACSECRX_ON = 1<<13,/* Bypass macsec RX */
+	GMC_BYP_MACSECRX_OFF= 1<<12,/* Bypass macsec RX off */
+	GMC_BYP_MACSECTX_ON = 1<<11,/* Bypass macsec TX */
+	GMC_BYP_MACSECTX_OFF= 1<<10,/* Bypass macsec TX  off*/
+	GMC_BYP_RETR_ON	= 1<<9, /* Bypass retransmit FIFO On */
+	GMC_BYP_RETR_OFF= 1<<8, /* Bypass retransmit FIFO Off */
+
 	GMC_H_BURST_ON	= 1<<7,	/* Half Duplex Burst Mode On */
 	GMC_H_BURST_OFF	= 1<<6,	/* Half Duplex Burst Mode Off */
 	GMC_F_LOOPB_ON	= 1<<5,	/* FIFO Loopback On */
@@ -1805,9 +1913,13 @@
 	OP_ADDR64VLAN	= OP_ADDR64 | OP_VLAN,
 	OP_LRGLEN	= 0x24,
 	OP_LRGLENVLAN	= OP_LRGLEN | OP_VLAN,
+	OP_MSS		= 0x28,
+	OP_MSSVLAN	= OP_MSS | OP_VLAN,
+
 	OP_BUFFER	= 0x40,
 	OP_PACKET	= 0x41,
 	OP_LARGESEND	= 0x43,
+	OP_LSOV2	= 0x45,
 
 /* YUKON-2 STATUS opcodes defines */
 	OP_RXSTAT	= 0x60,
@@ -1818,6 +1930,19 @@
 	OP_RXTIMEVLAN	= OP_RXTIMESTAMP | OP_RXVLAN,
 	OP_RSS_HASH	= 0x65,
 	OP_TXINDEXLE	= 0x68,
+	OP_MACSEC	= 0x6c,
+	OP_PUTIDX	= 0x70,
+};
+
+enum status_css {
+	CSS_TCPUDPCSOK	= 1<<7,	/* TCP / UDP checksum is ok */
+	CSS_ISUDP	= 1<<6, /* packet is a UDP packet */
+	CSS_ISTCP	= 1<<5, /* packet is a TCP packet */
+	CSS_ISIPFRAG	= 1<<4, /* packet is a TCP/UDP frag, CS calc not done */
+	CSS_ISIPV6	= 1<<3, /* packet is a IPv6 packet */
+	CSS_IPV4CSUMOK	= 1<<2, /* IP v4: TCP header checksum is ok */
+	CSS_ISIPV4	= 1<<1, /* packet is a IPv4 packet */
+	CSS_LINK_BIT	= 1<<0, /* port number (legacy) */
 };
 
 /* Yukon 2 hardware interface */
@@ -1838,7 +1963,7 @@
 struct sky2_status_le {
 	__le32	status;	/* also checksum */
 	__le16	length;	/* also vlan tag */
-	u8	link;
+	u8	css;
 	u8	opcode;
 } __attribute((packed));
 
diff --git a/drivers/net/sni_82596.c b/drivers/net/sni_82596.c
new file mode 100644
index 0000000..2cf6794
--- /dev/null
+++ b/drivers/net/sni_82596.c
@@ -0,0 +1,185 @@
+/*
+ * sni_82596.c -- driver for intel 82596 ethernet controller, as
+ *  		  used in older SNI RM machines
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+
+#define SNI_82596_DRIVER_VERSION "SNI RM 82596 driver - Revision: 0.01"
+
+static const char sni_82596_string[] = "snirm_82596";
+
+#define DMA_ALLOC                      dma_alloc_coherent
+#define DMA_FREE                       dma_free_coherent
+#define DMA_WBACK(priv, addr, len)     do { } while (0)
+#define DMA_INV(priv, addr, len)       do { } while (0)
+#define DMA_WBACK_INV(priv, addr, len) do { } while (0)
+
+#define SYSBUS      0x00004400
+
+/* big endian CPU, 82596 little endian */
+#define SWAP32(x)   cpu_to_le32((u32)(x))
+#define SWAP16(x)   cpu_to_le16((u16)(x))
+
+#define OPT_MPU_16BIT    0x01
+
+#include "lib82596.c"
+
+MODULE_AUTHOR("Thomas Bogendoerfer");
+MODULE_DESCRIPTION("i82596 driver");
+MODULE_LICENSE("GPL");
+module_param(i596_debug, int, 0);
+MODULE_PARM_DESC(i596_debug, "82596 debug mask");
+
+static inline void ca(struct net_device *dev)
+{
+	struct i596_private *lp = netdev_priv(dev);
+
+	writel(0, lp->ca);
+}
+
+
+static void mpu_port(struct net_device *dev, int c, dma_addr_t x)
+{
+	struct i596_private *lp = netdev_priv(dev);
+
+	u32 v = (u32) (c) | (u32) (x);
+
+	if (lp->options & OPT_MPU_16BIT) {
+		writew(v & 0xffff, lp->mpu_port);
+		wmb();  /* order writes to MPU port */
+		udelay(1);
+		writew(v >> 16, lp->mpu_port);
+	} else {
+		writel(v, lp->mpu_port);
+		wmb();  /* order writes to MPU port */
+		udelay(1);
+		writel(v, lp->mpu_port);
+	}
+}
+
+
+static int __devinit sni_82596_probe(struct platform_device *dev)
+{
+	struct	net_device *netdevice;
+	struct i596_private *lp;
+	struct  resource *res, *ca, *idprom, *options;
+	int	retval = -ENOMEM;
+	void __iomem *mpu_addr;
+	void __iomem *ca_addr;
+	u8 __iomem *eth_addr;
+
+	res = platform_get_resource(dev, IORESOURCE_MEM, 0);
+	ca = platform_get_resource(dev, IORESOURCE_MEM, 1);
+	options = platform_get_resource(dev, 0, 0);
+	idprom = platform_get_resource(dev, IORESOURCE_MEM, 2);
+	if (!res || !ca || !options || !idprom)
+		return -ENODEV;
+	mpu_addr = ioremap_nocache(res->start, 4);
+	if (!mpu_addr)
+		return -ENOMEM;
+	ca_addr = ioremap_nocache(ca->start, 4);
+	if (!ca_addr)
+		goto probe_failed_free_mpu;
+
+	printk(KERN_INFO "Found i82596 at 0x%x\n", res->start);
+
+	netdevice = alloc_etherdev(sizeof(struct i596_private));
+	if (!netdevice)
+		goto probe_failed_free_ca;
+
+	SET_NETDEV_DEV(netdevice, &dev->dev);
+	platform_set_drvdata (dev, netdevice);
+
+	netdevice->base_addr = res->start;
+	netdevice->irq = platform_get_irq(dev, 0);
+
+	eth_addr = ioremap_nocache(idprom->start, 0x10);
+	if (!eth_addr)
+		goto probe_failed;
+
+	/* someone seems to like messed up stuff */
+	netdevice->dev_addr[0] = readb(eth_addr + 0x0b);
+	netdevice->dev_addr[1] = readb(eth_addr + 0x0a);
+	netdevice->dev_addr[2] = readb(eth_addr + 0x09);
+	netdevice->dev_addr[3] = readb(eth_addr + 0x08);
+	netdevice->dev_addr[4] = readb(eth_addr + 0x07);
+	netdevice->dev_addr[5] = readb(eth_addr + 0x06);
+	iounmap(eth_addr);
+
+	if (!netdevice->irq) {
+		printk(KERN_ERR "%s: IRQ not found for i82596 at 0x%lx\n",
+			__FILE__, netdevice->base_addr);
+		goto probe_failed;
+	}
+
+	lp = netdev_priv(netdevice);
+	lp->options = options->flags & IORESOURCE_BITS;
+	lp->ca = ca_addr;
+	lp->mpu_port = mpu_addr;
+
+	retval = i82596_probe(netdevice);
+	if (retval == 0)
+		return 0;
+
+probe_failed:
+	free_netdev(netdevice);
+probe_failed_free_ca:
+	iounmap(ca_addr);
+probe_failed_free_mpu:
+	iounmap(mpu_addr);
+	return retval;
+}
+
+static int __devexit sni_82596_driver_remove(struct platform_device *pdev)
+{
+	struct net_device *dev = platform_get_drvdata(pdev);
+	struct i596_private *lp = netdev_priv(dev);
+
+	unregister_netdev(dev);
+	DMA_FREE(dev->dev.parent, sizeof(struct i596_private),
+		 lp->dma, lp->dma_addr);
+	iounmap(lp->ca);
+	iounmap(lp->mpu_port);
+	free_netdev (dev);
+	return 0;
+}
+
+static struct platform_driver sni_82596_driver = {
+	.probe	= sni_82596_probe,
+	.remove	= __devexit_p(sni_82596_driver_remove),
+	.driver	= {
+		.name	= sni_82596_string,
+	},
+};
+
+static int __devinit sni_82596_init(void)
+{
+	printk(KERN_INFO SNI_82596_DRIVER_VERSION "\n");
+	return platform_driver_register(&sni_82596_driver);
+}
+
+
+static void __exit sni_82596_exit(void)
+{
+	platform_driver_unregister(&sni_82596_driver);
+}
+
+module_init(sni_82596_init);
+module_exit(sni_82596_exit);
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c
index 7a4aa6a..f5abb52 100644
--- a/drivers/net/spider_net.c
+++ b/drivers/net/spider_net.c
@@ -434,7 +434,8 @@
 				      bufsize + SPIDER_NET_RXBUF_ALIGN - 1);
 	if (!descr->skb) {
 		if (netif_msg_rx_err(card) && net_ratelimit())
-			pr_err("Not enough memory to allocate rx buffer\n");
+			dev_err(&card->netdev->dev,
+			        "Not enough memory to allocate rx buffer\n");
 		card->spider_stats.alloc_rx_skb_error++;
 		return -ENOMEM;
 	}
@@ -455,7 +456,7 @@
 		dev_kfree_skb_any(descr->skb);
 		descr->skb = NULL;
 		if (netif_msg_rx_err(card) && net_ratelimit())
-			pr_err("Could not iommu-map rx buffer\n");
+			dev_err(&card->netdev->dev, "Could not iommu-map rx buffer\n");
 		card->spider_stats.rx_iommu_map_error++;
 		hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
 	} else {
@@ -500,6 +501,20 @@
 }
 
 /**
+ * spider_net_disable_rxdmac - disables the receive DMA controller
+ * @card: card structure
+ *
+ * spider_net_disable_rxdmac terminates processing on the DMA controller
+ * by turing off the DMA controller, with the force-end flag set.
+ */
+static inline void
+spider_net_disable_rxdmac(struct spider_net_card *card)
+{
+	spider_net_write_reg(card, SPIDER_NET_GDADMACCNTR,
+			     SPIDER_NET_DMA_RX_FEND_VALUE);
+}
+
+/**
  * spider_net_refill_rx_chain - refills descriptors/skbs in the rx chains
  * @card: card structure
  *
@@ -655,20 +670,6 @@
 }
 
 /**
- * spider_net_disable_rxdmac - disables the receive DMA controller
- * @card: card structure
- *
- * spider_net_disable_rxdmac terminates processing on the DMA controller by
- * turing off DMA and issueing a force end
- */
-static void
-spider_net_disable_rxdmac(struct spider_net_card *card)
-{
-	spider_net_write_reg(card, SPIDER_NET_GDADMACCNTR,
-			     SPIDER_NET_DMA_RX_FEND_VALUE);
-}
-
-/**
  * spider_net_prepare_tx_descr - fill tx descriptor with skb data
  * @card: card structure
  * @descr: descriptor structure to fill out
@@ -692,7 +693,7 @@
 	buf = pci_map_single(card->pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
 	if (pci_dma_mapping_error(buf)) {
 		if (netif_msg_tx_err(card) && net_ratelimit())
-			pr_err("could not iommu-map packet (%p, %i). "
+			dev_err(&card->netdev->dev, "could not iommu-map packet (%p, %i). "
 				  "Dropping packet\n", skb->data, skb->len);
 		card->spider_stats.tx_iommu_map_error++;
 		return -ENOMEM;
@@ -715,7 +716,7 @@
 	hwdescr->data_status = 0;
 
 	hwdescr->dmac_cmd_status =
-			SPIDER_NET_DESCR_CARDOWNED | SPIDER_NET_DMAC_NOCS;
+			SPIDER_NET_DESCR_CARDOWNED | SPIDER_NET_DMAC_TXFRMTL;
 	spin_unlock_irqrestore(&chain->lock, flags);
 
 	if (skb->ip_summed == CHECKSUM_PARTIAL)
@@ -832,9 +833,8 @@
 		case SPIDER_NET_DESCR_PROTECTION_ERROR:
 		case SPIDER_NET_DESCR_FORCE_END:
 			if (netif_msg_tx_err(card))
-				pr_err("%s: forcing end of tx descriptor "
-				       "with status x%02x\n",
-				       card->netdev->name, status);
+				dev_err(&card->netdev->dev, "forcing end of tx descriptor "
+				       "with status x%02x\n", status);
 			card->netdev_stats.tx_errors++;
 			break;
 
@@ -1022,34 +1022,94 @@
 	netif_receive_skb(skb);
 }
 
-#ifdef DEBUG
 static void show_rx_chain(struct spider_net_card *card)
 {
 	struct spider_net_descr_chain *chain = &card->rx_chain;
 	struct spider_net_descr *start= chain->tail;
 	struct spider_net_descr *descr= start;
+	struct spider_net_hw_descr *hwd = start->hwdescr;
+	struct device *dev = &card->netdev->dev;
+	u32 curr_desc, next_desc;
 	int status;
 
+	int tot = 0;
 	int cnt = 0;
-	int cstat = spider_net_get_descr_status(descr);
-	printk(KERN_INFO "RX chain tail at descr=%ld\n",
-	     (start - card->descr) - card->tx_chain.num_desc);
+	int off = start - chain->ring;
+	int cstat = hwd->dmac_cmd_status;
+
+	dev_info(dev, "Total number of descrs=%d\n",
+		chain->num_desc);
+	dev_info(dev, "Chain tail located at descr=%d, status=0x%x\n",
+		off, cstat);
+
+	curr_desc = spider_net_read_reg(card, SPIDER_NET_GDACTDPA);
+	next_desc = spider_net_read_reg(card, SPIDER_NET_GDACNEXTDA);
+
 	status = cstat;
 	do
 	{
-		status = spider_net_get_descr_status(descr);
+		hwd = descr->hwdescr;
+		off = descr - chain->ring;
+		status = hwd->dmac_cmd_status;
+
+		if (descr == chain->head)
+			dev_info(dev, "Chain head is at %d, head status=0x%x\n",
+			         off, status);
+
+		if (curr_desc == descr->bus_addr)
+			dev_info(dev, "HW curr desc (GDACTDPA) is at %d, status=0x%x\n",
+			         off, status);
+
+		if (next_desc == descr->bus_addr)
+			dev_info(dev, "HW next desc (GDACNEXTDA) is at %d, status=0x%x\n",
+			         off, status);
+
+		if (hwd->next_descr_addr == 0)
+			dev_info(dev, "chain is cut at %d\n", off);
+
 		if (cstat != status) {
-			printk(KERN_INFO "Have %d descrs with stat=x%08x\n", cnt, cstat);
+			int from = (chain->num_desc + off - cnt) % chain->num_desc;
+			int to = (chain->num_desc + off - 1) % chain->num_desc;
+			dev_info(dev, "Have %d (from %d to %d) descrs "
+			         "with stat=0x%08x\n", cnt, from, to, cstat);
 			cstat = status;
 			cnt = 0;
 		}
+
 		cnt ++;
+		tot ++;
 		descr = descr->next;
 	} while (descr != start);
-	printk(KERN_INFO "Last %d descrs with stat=x%08x\n", cnt, cstat);
-}
+
+	dev_info(dev, "Last %d descrs with stat=0x%08x "
+	         "for a total of %d descrs\n", cnt, cstat, tot);
+
+#ifdef DEBUG
+	/* Now dump the whole ring */
+	descr = start;
+	do
+	{
+		struct spider_net_hw_descr *hwd = descr->hwdescr;
+		status = spider_net_get_descr_status(hwd);
+		cnt = descr - chain->ring;
+		dev_info(dev, "Descr %d stat=0x%08x skb=%p\n",
+		         cnt, status, descr->skb);
+		dev_info(dev, "bus addr=%08x buf addr=%08x sz=%d\n",
+		         descr->bus_addr, hwd->buf_addr, hwd->buf_size);
+		dev_info(dev, "next=%08x result sz=%d valid sz=%d\n",
+		         hwd->next_descr_addr, hwd->result_size,
+		         hwd->valid_size);
+		dev_info(dev, "dmac=%08x data stat=%08x data err=%08x\n",
+		         hwd->dmac_cmd_status, hwd->data_status,
+		         hwd->data_error);
+		dev_info(dev, "\n");
+
+		descr = descr->next;
+	} while (descr != start);
 #endif
 
+}
+
 /**
  * spider_net_resync_head_ptr - Advance head ptr past empty descrs
  *
@@ -1127,6 +1187,7 @@
 	struct spider_net_descr_chain *chain = &card->rx_chain;
 	struct spider_net_descr *descr = chain->tail;
 	struct spider_net_hw_descr *hwdescr = descr->hwdescr;
+	u32 hw_buf_addr;
 	int status;
 
 	status = spider_net_get_descr_status(hwdescr);
@@ -1140,15 +1201,17 @@
 	chain->tail = descr->next;
 
 	/* unmap descriptor */
-	pci_unmap_single(card->pdev, hwdescr->buf_addr,
+	hw_buf_addr = hwdescr->buf_addr;
+	hwdescr->buf_addr = 0xffffffff;
+	pci_unmap_single(card->pdev, hw_buf_addr,
 			SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE);
 
 	if ( (status == SPIDER_NET_DESCR_RESPONSE_ERROR) ||
 	     (status == SPIDER_NET_DESCR_PROTECTION_ERROR) ||
 	     (status == SPIDER_NET_DESCR_FORCE_END) ) {
 		if (netif_msg_rx_err(card))
-			pr_err("%s: dropping RX descriptor with state %d\n",
-			       card->netdev->name, status);
+			dev_err(&card->netdev->dev,
+			       "dropping RX descriptor with state %d\n", status);
 		card->netdev_stats.rx_dropped++;
 		goto bad_desc;
 	}
@@ -1156,8 +1219,8 @@
 	if ( (status != SPIDER_NET_DESCR_COMPLETE) &&
 	     (status != SPIDER_NET_DESCR_FRAME_END) ) {
 		if (netif_msg_rx_err(card))
-			pr_err("%s: RX descriptor with unknown state %d\n",
-			       card->netdev->name, status);
+			dev_err(&card->netdev->dev,
+			       "RX descriptor with unknown state %d\n", status);
 		card->spider_stats.rx_desc_unk_state++;
 		goto bad_desc;
 	}
@@ -1165,18 +1228,17 @@
 	/* The cases we'll throw away the packet immediately */
 	if (hwdescr->data_error & SPIDER_NET_DESTROY_RX_FLAGS) {
 		if (netif_msg_rx_err(card))
-			pr_err("%s: error in received descriptor found, "
+			dev_err(&card->netdev->dev,
+			       "error in received descriptor found, "
 			       "data_status=x%08x, data_error=x%08x\n",
-			       card->netdev->name,
 			       hwdescr->data_status, hwdescr->data_error);
 		goto bad_desc;
 	}
 
-	if (hwdescr->dmac_cmd_status & 0xfcf4) {
-		pr_err("%s: bad status, cmd_status=x%08x\n",
-			       card->netdev->name,
+	if (hwdescr->dmac_cmd_status & SPIDER_NET_DESCR_BAD_STATUS) {
+		dev_err(&card->netdev->dev, "bad status, cmd_status=x%08x\n",
 			       hwdescr->dmac_cmd_status);
-		pr_err("buf_addr=x%08x\n", hwdescr->buf_addr);
+		pr_err("buf_addr=x%08x\n", hw_buf_addr);
 		pr_err("buf_size=x%08x\n", hwdescr->buf_size);
 		pr_err("next_descr_addr=x%08x\n", hwdescr->next_descr_addr);
 		pr_err("result_size=x%08x\n", hwdescr->result_size);
@@ -1196,6 +1258,8 @@
 	return 1;
 
 bad_desc:
+	if (netif_msg_rx_err(card))
+		show_rx_chain(card);
 	dev_kfree_skb_irq(descr->skb);
 	descr->skb = NULL;
 	hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
@@ -1221,7 +1285,6 @@
 	int packets_to_do, packets_done = 0;
 	int no_more_packets = 0;
 
-	spider_net_cleanup_tx_ring(card);
 	packets_to_do = min(*budget, netdev->quota);
 
 	while (packets_to_do) {
@@ -1246,6 +1309,8 @@
 	spider_net_refill_rx_chain(card);
 	spider_net_enable_rxdmac(card);
 
+	spider_net_cleanup_tx_ring(card);
+
 	/* if all packets are in the stack, enable interrupts and return 0 */
 	/* if not, return 1 */
 	if (no_more_packets) {
@@ -1415,7 +1480,7 @@
 	case SPIDER_NET_GPWFFINT:
 		/* PHY command queue full */
 		if (netif_msg_intr(card))
-			pr_err("PHY write queue full\n");
+			dev_err(&card->netdev->dev, "PHY write queue full\n");
 		show_error = 0;
 		break;
 
@@ -1582,9 +1647,8 @@
 	}
 
 	if ((show_error) && (netif_msg_intr(card)) && net_ratelimit())
-		pr_err("Got error interrupt on %s, GHIINT0STS = 0x%08x, "
+		dev_err(&card->netdev->dev, "Error interrupt, GHIINT0STS = 0x%08x, "
 		       "GHIINT1STS = 0x%08x, GHIINT2STS = 0x%08x\n",
-		       card->netdev->name,
 		       status_reg, error_reg1, error_reg2);
 
 	/* clear interrupt sources */
@@ -1849,7 +1913,8 @@
 			     SPIDER_NET_FIRMWARE_NAME, &card->pdev->dev) == 0) {
 		if ( (firmware->size != SPIDER_NET_FIRMWARE_LEN) &&
 		     netif_msg_probe(card) ) {
-			pr_err("Incorrect size of spidernet firmware in " \
+			dev_err(&card->netdev->dev,
+			       "Incorrect size of spidernet firmware in " \
 			       "filesystem. Looking in host firmware...\n");
 			goto try_host_fw;
 		}
@@ -1873,8 +1938,8 @@
 
 	if ( (fw_size != SPIDER_NET_FIRMWARE_LEN) &&
 	     netif_msg_probe(card) ) {
-		pr_err("Incorrect size of spidernet firmware in " \
-		       "host firmware\n");
+		dev_err(&card->netdev->dev,
+		       "Incorrect size of spidernet firmware in host firmware\n");
 		goto done;
 	}
 
@@ -1884,7 +1949,8 @@
 	return err;
 out_err:
 	if (netif_msg_probe(card))
-		pr_err("Couldn't find spidernet firmware in filesystem " \
+		dev_err(&card->netdev->dev,
+		       "Couldn't find spidernet firmware in filesystem " \
 		       "or host firmware\n");
 	return err;
 }
@@ -2279,13 +2345,14 @@
 
 	result = spider_net_set_mac(netdev, &addr);
 	if ((result) && (netif_msg_probe(card)))
-		pr_err("Failed to set MAC address: %i\n", result);
+		dev_err(&card->netdev->dev,
+		        "Failed to set MAC address: %i\n", result);
 
 	result = register_netdev(netdev);
 	if (result) {
 		if (netif_msg_probe(card))
-			pr_err("Couldn't register net_device: %i\n",
-				  result);
+			dev_err(&card->netdev->dev,
+			        "Couldn't register net_device: %i\n", result);
 		return result;
 	}
 
@@ -2363,17 +2430,19 @@
 	unsigned long mmio_start, mmio_len;
 
 	if (pci_enable_device(pdev)) {
-		pr_err("Couldn't enable PCI device\n");
+		dev_err(&pdev->dev, "Couldn't enable PCI device\n");
 		return NULL;
 	}
 
 	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
-		pr_err("Couldn't find proper PCI device base address.\n");
+		dev_err(&pdev->dev,
+		        "Couldn't find proper PCI device base address.\n");
 		goto out_disable_dev;
 	}
 
 	if (pci_request_regions(pdev, spider_net_driver_name)) {
-		pr_err("Couldn't obtain PCI resources, aborting.\n");
+		dev_err(&pdev->dev,
+		        "Couldn't obtain PCI resources, aborting.\n");
 		goto out_disable_dev;
 	}
 
@@ -2381,8 +2450,8 @@
 
 	card = spider_net_alloc_card();
 	if (!card) {
-		pr_err("Couldn't allocate net_device structure, "
-			  "aborting.\n");
+		dev_err(&pdev->dev,
+		        "Couldn't allocate net_device structure, aborting.\n");
 		goto out_release_regions;
 	}
 	card->pdev = pdev;
@@ -2396,7 +2465,8 @@
 	card->regs = ioremap(mmio_start, mmio_len);
 
 	if (!card->regs) {
-		pr_err("Couldn't obtain PCI resources, aborting.\n");
+		dev_err(&pdev->dev,
+		        "Couldn't obtain PCI resources, aborting.\n");
 		goto out_release_regions;
 	}
 
diff --git a/drivers/net/spider_net.h b/drivers/net/spider_net.h
index 1d054aa..dbbdb8c 100644
--- a/drivers/net/spider_net.h
+++ b/drivers/net/spider_net.h
@@ -349,11 +349,23 @@
 #define SPIDER_NET_GPRDAT_MASK			0x0000ffff
 
 #define SPIDER_NET_DMAC_NOINTR_COMPLETE		0x00800000
-#define SPIDER_NET_DMAC_NOCS			0x00040000
+#define SPIDER_NET_DMAC_TXFRMTL		0x00040000
 #define SPIDER_NET_DMAC_TCP			0x00020000
 #define SPIDER_NET_DMAC_UDP			0x00030000
 #define SPIDER_NET_TXDCEST			0x08000000
 
+#define SPIDER_NET_DESCR_RXFDIS        0x00000001
+#define SPIDER_NET_DESCR_RXDCEIS       0x00000002
+#define SPIDER_NET_DESCR_RXDEN0IS      0x00000004
+#define SPIDER_NET_DESCR_RXINVDIS      0x00000008
+#define SPIDER_NET_DESCR_RXRERRIS      0x00000010
+#define SPIDER_NET_DESCR_RXFDCIMS      0x00000100
+#define SPIDER_NET_DESCR_RXDCEIMS      0x00000200
+#define SPIDER_NET_DESCR_RXDEN0IMS     0x00000400
+#define SPIDER_NET_DESCR_RXINVDIMS     0x00000800
+#define SPIDER_NET_DESCR_RXRERRMIS     0x00001000
+#define SPIDER_NET_DESCR_UNUSED        0x077fe0e0
+
 #define SPIDER_NET_DESCR_IND_PROC_MASK		0xF0000000
 #define SPIDER_NET_DESCR_COMPLETE		0x00000000 /* used in rx and tx */
 #define SPIDER_NET_DESCR_RESPONSE_ERROR		0x10000000 /* used in rx and tx */
@@ -364,6 +376,13 @@
 #define SPIDER_NET_DESCR_NOT_IN_USE		0xF0000000
 #define SPIDER_NET_DESCR_TXDESFLG		0x00800000
 
+#define SPIDER_NET_DESCR_BAD_STATUS   (SPIDER_NET_DESCR_RXDEN0IS | \
+                                       SPIDER_NET_DESCR_RXRERRIS | \
+                                       SPIDER_NET_DESCR_RXDEN0IMS | \
+                                       SPIDER_NET_DESCR_RXINVDIMS | \
+                                       SPIDER_NET_DESCR_RXRERRMIS | \
+                                       SPIDER_NET_DESCR_UNUSED)
+
 /* Descriptor, as defined by the hardware */
 struct spider_net_hw_descr {
 	u32 buf_addr;
diff --git a/drivers/net/tulip/Kconfig b/drivers/net/tulip/Kconfig
index 8c9634a..1c537d5 100644
--- a/drivers/net/tulip/Kconfig
+++ b/drivers/net/tulip/Kconfig
@@ -2,17 +2,17 @@
 # Tulip family network device configuration
 #
 
-menu "Tulip family network device support"
-	depends on NET_ETHERNET && (PCI || EISA || CARDBUS)
-
-config NET_TULIP
+menuconfig NET_TULIP
 	bool "\"Tulip\" family network device support"
+	depends on PCI || EISA || CARDBUS
 	help
 	  This selects the "Tulip" family of EISA/PCI network cards.
 
+if NET_TULIP
+
 config DE2104X
 	tristate "Early DECchip Tulip (dc2104x) PCI support (EXPERIMENTAL)"
-	depends on NET_TULIP && PCI && EXPERIMENTAL
+	depends on PCI && EXPERIMENTAL
 	select CRC32
 	---help---
 	  This driver is developed for the SMC EtherPower series Ethernet
@@ -30,7 +30,7 @@
 
 config TULIP
 	tristate "DECchip Tulip (dc2114x) PCI support"
-	depends on NET_TULIP && PCI
+	depends on PCI
 	select CRC32
 	---help---
 	  This driver is developed for the SMC EtherPower series Ethernet
@@ -95,7 +95,7 @@
 
 config DE4X5
 	tristate "Generic DECchip & DIGITAL EtherWORKS PCI/EISA"
-	depends on NET_TULIP && (PCI || EISA)
+	depends on PCI || EISA
 	select CRC32
 	---help---
 	  This is support for the DIGITAL series of PCI/EISA Ethernet cards.
@@ -112,7 +112,7 @@
 
 config WINBOND_840
 	tristate "Winbond W89c840 Ethernet support"
-	depends on NET_TULIP && PCI
+	depends on PCI
 	select CRC32
 	select MII
 	help
@@ -123,7 +123,7 @@
 
 config DM9102
 	tristate "Davicom DM910x/DM980x support"
-	depends on NET_TULIP && PCI
+	depends on PCI
 	select CRC32
 	---help---
 	  This driver is for DM9102(A)/DM9132/DM9801 compatible PCI cards from
@@ -137,7 +137,7 @@
 
 config ULI526X
 	tristate "ULi M526x controller support"
-	depends on NET_TULIP && PCI
+	depends on PCI
 	select CRC32
 	---help---
 	  This driver is for ULi M5261/M5263 10/100M Ethernet Controller
@@ -149,7 +149,7 @@
 	  
 config PCMCIA_XIRCOM
 	tristate "Xircom CardBus support (new driver)"
-	depends on NET_TULIP && CARDBUS
+	depends on CARDBUS
 	---help---
 	  This driver is for the Digital "Tulip" Ethernet CardBus adapters.
 	  It should work with most DEC 21*4*-based chips/ethercards, as well
@@ -162,7 +162,7 @@
 
 config PCMCIA_XIRTULIP
 	tristate "Xircom Tulip-like CardBus support (old driver)"
-	depends on NET_TULIP && CARDBUS && BROKEN_ON_SMP
+	depends on CARDBUS && BROKEN_ON_SMP
 	select CRC32
 	---help---
 	  This driver is for the Digital "Tulip" Ethernet CardBus adapters.
@@ -174,5 +174,4 @@
 	  <file:Documentation/networking/net-modules.txt>.  The module will
 	  be called xircom_tulip_cb.  If unsure, say N.
 
-endmenu
-
+endif # NET_TULIP
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
index 8617298..d380e0b 100644
--- a/drivers/net/tulip/de2104x.c
+++ b/drivers/net/tulip/de2104x.c
@@ -785,7 +785,6 @@
 
 	de->tx_head = NEXT_TX(entry);
 
-	BUG_ON(TX_BUFFS_AVAIL(de) < 0);
 	if (TX_BUFFS_AVAIL(de) == 0)
 		netif_stop_queue(dev);
 
diff --git a/drivers/net/tulip/de4x5.c b/drivers/net/tulip/de4x5.c
index 62143f9..42fca26 100644
--- a/drivers/net/tulip/de4x5.c
+++ b/drivers/net/tulip/de4x5.c
@@ -597,7 +597,7 @@
 #endif
 
 struct parameters {
-    int fdx;
+    bool fdx;
     int autosense;
 };
 
@@ -809,10 +809,10 @@
     s32  irq_en;                            /* Summary interrupt bits       */
     int  media;                             /* Media (eg TP), mode (eg 100B)*/
     int  c_media;                           /* Remember the last media conn */
-    int  fdx;                               /* media full duplex flag       */
+    bool fdx;                               /* media full duplex flag       */
     int  linkOK;                            /* Link is OK                   */
     int  autosense;                         /* Allow/disallow autosensing   */
-    int  tx_enable;                         /* Enable descriptor polling    */
+    bool tx_enable;                         /* Enable descriptor polling    */
     int  setup_f;                           /* Setup frame filtering type   */
     int  local_state;                       /* State within a 'media' state */
     struct mii_phy phy[DE4X5_MAX_PHY];      /* List of attached PHY devices */
@@ -838,8 +838,8 @@
     struct de4x5_srom srom;                 /* A copy of the SROM           */
     int cfrv;				    /* Card CFRV copy */
     int rx_ovf;                             /* Check for 'RX overflow' tag  */
-    int useSROM;                            /* For non-DEC card use SROM    */
-    int useMII;                             /* Infoblock using the MII      */
+    bool useSROM;                           /* For non-DEC card use SROM    */
+    bool useMII;                            /* Infoblock using the MII      */
     int asBitValid;                         /* Autosense bits in GEP?       */
     int asPolarity;                         /* 0 => asserted high           */
     int asBit;                              /* Autosense bit number in GEP  */
@@ -928,7 +928,7 @@
 static int     test_media(struct net_device *dev, s32 irqs, s32 irq_mask, s32 csr13, s32 csr14, s32 csr15, s32 msec);
 static int     test_for_100Mb(struct net_device *dev, int msec);
 static int     wait_for_link(struct net_device *dev);
-static int     test_mii_reg(struct net_device *dev, int reg, int mask, int pol, long msec);
+static int     test_mii_reg(struct net_device *dev, int reg, int mask, bool pol, long msec);
 static int     is_spd_100(struct net_device *dev);
 static int     is_100_up(struct net_device *dev);
 static int     is_10_up(struct net_device *dev);
@@ -1109,7 +1109,7 @@
     /*
     ** Now find out what kind of DC21040/DC21041/DC21140 board we have.
     */
-    lp->useSROM = FALSE;
+    lp->useSROM = false;
     if (lp->bus == PCI) {
 	PCI_signature(name, lp);
     } else {
@@ -1137,7 +1137,7 @@
 	lp->cache.gepc = GEP_INIT;
 	lp->asBit = GEP_SLNK;
 	lp->asPolarity = GEP_SLNK;
-	lp->asBitValid = TRUE;
+	lp->asBitValid = ~0;
 	lp->timeout = -1;
 	lp->gendev = gendev;
 	spin_lock_init(&lp->lock);
@@ -1463,7 +1463,7 @@
     u_long flags = 0;
 
     netif_stop_queue(dev);
-    if (lp->tx_enable == NO) {                   /* Cannot send for now */
+    if (!lp->tx_enable) {                   /* Cannot send for now */
 	return -1;
     }
 
@@ -2424,7 +2424,7 @@
     switch (lp->media) {
     case INIT:
 	DISABLE_IRQs;
-	lp->tx_enable = NO;
+	lp->tx_enable = false;
 	lp->timeout = -1;
 	de4x5_save_skbs(dev);
 	if ((lp->autosense == AUTO) || (lp->autosense == TP)) {
@@ -2477,7 +2477,7 @@
 	    lp->c_media = lp->media;
 	}
 	lp->media = INIT;
-	lp->tx_enable = NO;
+	lp->tx_enable = false;
 	break;
     }
 
@@ -2578,7 +2578,7 @@
     switch (lp->media) {
     case INIT:
 	DISABLE_IRQs;
-	lp->tx_enable = NO;
+	lp->tx_enable = false;
 	lp->timeout = -1;
 	de4x5_save_skbs(dev);          /* Save non transmitted skb's */
 	if ((lp->autosense == AUTO) || (lp->autosense == TP_NW)) {
@@ -2757,7 +2757,7 @@
 	    lp->c_media = lp->media;
 	}
 	lp->media = INIT;
-	lp->tx_enable = NO;
+	lp->tx_enable = false;
 	break;
     }
 
@@ -2781,7 +2781,7 @@
     case INIT:
         if (lp->timeout < 0) {
 	    DISABLE_IRQs;
-	    lp->tx_enable = FALSE;
+	    lp->tx_enable = false;
 	    lp->linkOK = 0;
 	    de4x5_save_skbs(dev);          /* Save non transmitted skb's */
 	}
@@ -2830,7 +2830,7 @@
 	    if (lp->timeout < 0) {
 		mii_wr(MII_CR_ASSE | MII_CR_RAN, MII_CR, lp->phy[lp->active].addr, DE4X5_MII);
 	    }
-	    cr = test_mii_reg(dev, MII_CR, MII_CR_RAN, FALSE, 500);
+	    cr = test_mii_reg(dev, MII_CR, MII_CR_RAN, false, 500);
 	    if (cr < 0) {
 		next_tick = cr & ~TIMER_CB;
 	    } else {
@@ -2845,7 +2845,7 @@
 	    break;
 
 	case 1:
-	    if ((sr=test_mii_reg(dev, MII_SR, MII_SR_ASSC, TRUE, 2000)) < 0) {
+	    if ((sr=test_mii_reg(dev, MII_SR, MII_SR_ASSC, true, 2000)) < 0) {
 		next_tick = sr & ~TIMER_CB;
 	    } else {
 		lp->media = SPD_DET;
@@ -2857,10 +2857,10 @@
 		    if (!(anlpa & MII_ANLPA_RF) &&
 			 (cap = anlpa & MII_ANLPA_TAF & ana)) {
 			if (cap & MII_ANA_100M) {
-			    lp->fdx = ((ana & anlpa & MII_ANA_FDAM & MII_ANA_100M) ? TRUE : FALSE);
+			    lp->fdx = (ana & anlpa & MII_ANA_FDAM & MII_ANA_100M) != 0;
 			    lp->media = _100Mb;
 			} else if (cap & MII_ANA_10M) {
-			    lp->fdx = ((ana & anlpa & MII_ANA_FDAM & MII_ANA_10M) ? TRUE : FALSE);
+			    lp->fdx = (ana & anlpa & MII_ANA_FDAM & MII_ANA_10M) != 0;
 
 			    lp->media = _10Mb;
 			}
@@ -2932,7 +2932,7 @@
 	    lp->c_media = lp->media;
 	}
 	lp->media = INIT;
-	lp->tx_enable = FALSE;
+	lp->tx_enable = false;
 	break;
     }
 
@@ -2965,7 +2965,7 @@
     case INIT:
         if (lp->timeout < 0) {
 	    DISABLE_IRQs;
-	    lp->tx_enable = FALSE;
+	    lp->tx_enable = false;
 	    lp->linkOK = 0;
             lp->timeout = -1;
 	    de4x5_save_skbs(dev);            /* Save non transmitted skb's */
@@ -3013,7 +3013,7 @@
 	    if (lp->timeout < 0) {
 		mii_wr(MII_CR_ASSE | MII_CR_RAN, MII_CR, lp->phy[lp->active].addr, DE4X5_MII);
 	    }
-	    cr = test_mii_reg(dev, MII_CR, MII_CR_RAN, FALSE, 500);
+	    cr = test_mii_reg(dev, MII_CR, MII_CR_RAN, false, 500);
 	    if (cr < 0) {
 		next_tick = cr & ~TIMER_CB;
 	    } else {
@@ -3028,7 +3028,8 @@
 	    break;
 
 	case 1:
-	    if ((sr=test_mii_reg(dev, MII_SR, MII_SR_ASSC, TRUE, 2000)) < 0) {
+	    sr = test_mii_reg(dev, MII_SR, MII_SR_ASSC, true, 2000);
+	    if (sr < 0) {
 		next_tick = sr & ~TIMER_CB;
 	    } else {
 		lp->media = SPD_DET;
@@ -3040,10 +3041,10 @@
 		    if (!(anlpa & MII_ANLPA_RF) &&
 			 (cap = anlpa & MII_ANLPA_TAF & ana)) {
 			if (cap & MII_ANA_100M) {
-			    lp->fdx = ((ana & anlpa & MII_ANA_FDAM & MII_ANA_100M) ? TRUE : FALSE);
+			    lp->fdx = (ana & anlpa & MII_ANA_FDAM & MII_ANA_100M) != 0;
 			    lp->media = _100Mb;
 			} else if (cap & MII_ANA_10M) {
-			    lp->fdx = ((ana & anlpa & MII_ANA_FDAM & MII_ANA_10M) ? TRUE : FALSE);
+			    lp->fdx = (ana & anlpa & MII_ANA_FDAM & MII_ANA_10M) != 0;
 			    lp->media = _10Mb;
 			}
 		    }
@@ -3222,14 +3223,14 @@
 {
     struct de4x5_private *lp = netdev_priv(dev);
 
-    lp->fdx = 0;
+    lp->fdx = false;
     if (lp->infoblock_media == lp->media)
       return 0;
 
     switch(lp->infoblock_media) {
       case SROM_10BASETF:
 	if (!lp->params.fdx) return -1;
-	lp->fdx = TRUE;
+	lp->fdx = true;
       case SROM_10BASET:
 	if (lp->params.fdx && !lp->fdx) return -1;
 	if ((lp->chipset == DC21140) || ((lp->chipset & ~0x00ff) == DC2114x)) {
@@ -3249,7 +3250,7 @@
 
       case SROM_100BASETF:
         if (!lp->params.fdx) return -1;
-	lp->fdx = TRUE;
+	lp->fdx = true;
       case SROM_100BASET:
 	if (lp->params.fdx && !lp->fdx) return -1;
 	lp->media = _100Mb;
@@ -3261,7 +3262,7 @@
 
       case SROM_100BASEFF:
 	if (!lp->params.fdx) return -1;
-	lp->fdx = TRUE;
+	lp->fdx = true;
       case SROM_100BASEF:
 	if (lp->params.fdx && !lp->fdx) return -1;
 	lp->media = _100Mb;
@@ -3297,7 +3298,7 @@
     spin_lock_irqsave(&lp->lock, flags);
     de4x5_rst_desc_ring(dev);
     de4x5_setup_intr(dev);
-    lp->tx_enable = YES;
+    lp->tx_enable = true;
     spin_unlock_irqrestore(&lp->lock, flags);
     outl(POLL_DEMAND, DE4X5_TPD);
 
@@ -3336,7 +3337,7 @@
             }
         }
 	if (lp->useMII) {
-	    next_tick = test_mii_reg(dev, MII_CR, MII_CR_RST, FALSE, 500);
+	    next_tick = test_mii_reg(dev, MII_CR, MII_CR_RST, false, 500);
 	}
     } else if (lp->chipset == DC21140) {
 	PHY_HARD_RESET;
@@ -3466,7 +3467,7 @@
 **
 */
 static int
-test_mii_reg(struct net_device *dev, int reg, int mask, int pol, long msec)
+test_mii_reg(struct net_device *dev, int reg, int mask, bool pol, long msec)
 {
     struct de4x5_private *lp = netdev_priv(dev);
     int test;
@@ -3476,9 +3477,8 @@
 	lp->timeout = msec/100;
     }
 
-    if (pol) pol = ~0;
     reg = mii_rd((u_char)reg, lp->phy[lp->active].addr, DE4X5_MII) & mask;
-    test = (reg ^ pol) & mask;
+    test = (reg ^ (pol ? ~0 : 0)) & mask;
 
     if (test && --lp->timeout) {
 	reg = 100 | TIMER_CB;
@@ -3992,10 +3992,10 @@
 			     )))))));
 	}
 	if (lp->chipset != DC21041) {
-	    lp->useSROM = TRUE;             /* card is not recognisably DEC */
+	    lp->useSROM = true;             /* card is not recognisably DEC */
 	}
     } else if ((lp->chipset & ~0x00ff) == DC2114x) {
-	lp->useSROM = TRUE;
+	lp->useSROM = true;
     }
 
     return status;
@@ -4216,7 +4216,7 @@
 	memset((char *)&lp->srom, 0, sizeof(struct de4x5_srom));
 	memcpy(lp->srom.ieee_addr, (char *)dev->dev_addr, ETH_ALEN);
 	memcpy(lp->srom.info, (char *)&srom_repair_info[SMC-1], 100);
-	lp->useSROM = TRUE;
+	lp->useSROM = true;
 	break;
     }
 
@@ -4392,7 +4392,7 @@
 	if (lp->chipset == infoleaf_array[i].chipset) break;
     }
     if (i == INFOLEAF_SIZE) {
-	lp->useSROM = FALSE;
+	lp->useSROM = false;
 	printk("%s: Cannot find correct chipset for SROM decoding!\n",
 	                                                          dev->name);
 	return -ENXIO;
@@ -4409,7 +4409,7 @@
 	    if (lp->device == *p) break;
 	}
 	if (i == 0) {
-	    lp->useSROM = FALSE;
+	    lp->useSROM = false;
 	    printk("%s: Cannot find correct PCI device [%d] for SROM decoding!\n",
 	                                               dev->name, lp->device);
 	    return -ENXIO;
@@ -4542,7 +4542,7 @@
 	}
 	lp->media = INIT;
 	lp->tcount = 0;
-	lp->tx_enable = FALSE;
+	lp->tx_enable = false;
     }
 
     return next_tick & ~TIMER_CB;
@@ -4577,7 +4577,7 @@
 	}
 	lp->media = INIT;
 	lp->tcount = 0;
-	lp->tx_enable = FALSE;
+	lp->tx_enable = false;
     }
 
     return next_tick & ~TIMER_CB;
@@ -4611,7 +4611,7 @@
 	}
 	lp->media = INIT;
 	lp->tcount = 0;
-	lp->tx_enable = FALSE;
+	lp->tx_enable = false;
     }
 
     return next_tick & ~TIMER_CB;
@@ -4650,7 +4650,7 @@
 	lp->asBit = 1 << ((csr6 >> 1) & 0x07);
 	lp->asPolarity = ((csr6 & 0x80) ? -1 : 0) & lp->asBit;
 	lp->infoblock_csr6 = OMR_DEF | ((csr6 & 0x71) << 18);
-	lp->useMII = FALSE;
+	lp->useMII = false;
 
 	de4x5_switch_mac_port(dev);
     }
@@ -4691,7 +4691,7 @@
 	lp->asBit = 1 << ((csr6 >> 1) & 0x07);
 	lp->asPolarity = ((csr6 & 0x80) ? -1 : 0) & lp->asBit;
 	lp->infoblock_csr6 = OMR_DEF | ((csr6 & 0x71) << 18);
-	lp->useMII = FALSE;
+	lp->useMII = false;
 
 	de4x5_switch_mac_port(dev);
     }
@@ -4731,7 +4731,7 @@
         lp->ibn = 1;
         lp->active = *p;
 	lp->infoblock_csr6 = OMR_MII_100;
-	lp->useMII = TRUE;
+	lp->useMII = true;
 	lp->infoblock_media = ANS;
 
 	de4x5_switch_mac_port(dev);
@@ -4773,7 +4773,7 @@
         lp->cache.gepc = ((s32)(TWIDDLE(p)) << 16); p += 2;
         lp->cache.gep  = ((s32)(TWIDDLE(p)) << 16);
 	lp->infoblock_csr6 = OMR_SIA;
-	lp->useMII = FALSE;
+	lp->useMII = false;
 
 	de4x5_switch_mac_port(dev);
     }
@@ -4814,7 +4814,7 @@
 	lp->active = *p;
 	if (MOTO_SROM_BUG) lp->active = 0;
 	lp->infoblock_csr6 = OMR_MII_100;
-	lp->useMII = TRUE;
+	lp->useMII = true;
 	lp->infoblock_media = ANS;
 
 	de4x5_switch_mac_port(dev);
@@ -4856,7 +4856,7 @@
 	lp->asBit = 1 << ((csr6 >> 1) & 0x07);
 	lp->asPolarity = ((csr6 & 0x80) ? -1 : 0) & lp->asBit;
 	lp->infoblock_csr6 = OMR_DEF | ((csr6 & 0x71) << 18);
-	lp->useMII = FALSE;
+	lp->useMII = false;
 
 	de4x5_switch_mac_port(dev);
     }
@@ -5077,7 +5077,7 @@
     int id;
 
     lp->active = 0;
-    lp->useMII = TRUE;
+    lp->useMII = true;
 
     /* Search the MII address space for possible PHY devices */
     for (n=0, lp->mii_cnt=0, i=1; !((i==1) && (n==1)); i=(i+1)%DE4X5_MAX_MII) {
@@ -5127,7 +5127,7 @@
 	    de4x5_dbg_mii(dev, k);
 	}
     }
-    if (!lp->mii_cnt) lp->useMII = FALSE;
+    if (!lp->mii_cnt) lp->useMII = false;
 
     return lp->mii_cnt;
 }
diff --git a/drivers/net/tulip/de4x5.h b/drivers/net/tulip/de4x5.h
index 57226e5..12af0cc 100644
--- a/drivers/net/tulip/de4x5.h
+++ b/drivers/net/tulip/de4x5.h
@@ -893,15 +893,6 @@
 #define PHYS_ADDR_ONLY       1     /* Update the physical address only */
 
 /*
-** Booleans
-*/
-#define NO                   0
-#define FALSE                0
-
-#define YES                  ~0
-#define TRUE                 ~0
-
-/*
 ** Adapter state
 */
 #define INITIALISED          0     /* After h/w initialised and mem alloc'd */
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index a12f576..86b6908 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -192,7 +192,7 @@
 				usb_pipeendpoint(pipe), maxp, period);
 		}
 	}
-	return  0;
+	return 0;
 }
 
 /* Passes this packet up the stack, updating its accounting.
@@ -326,7 +326,7 @@
 	if (netif_running (dev->net)
 			&& netif_device_present (dev->net)
 			&& !test_bit (EVENT_RX_HALT, &dev->flags)) {
-		switch (retval = usb_submit_urb (urb, GFP_ATOMIC)){
+		switch (retval = usb_submit_urb (urb, GFP_ATOMIC)) {
 		case -EPIPE:
 			usbnet_defer_kevent (dev, EVENT_RX_HALT);
 			break;
@@ -393,8 +393,8 @@
 	entry->urb = NULL;
 
 	switch (urb_status) {
-	    // success
-	    case 0:
+	/* success */
+	case 0:
 		if (skb->len < dev->net->hard_header_len) {
 			entry->state = rx_cleanup;
 			dev->stats.rx_errors++;
@@ -404,28 +404,30 @@
 		}
 		break;
 
-	    // stalls need manual reset. this is rare ... except that
-	    // when going through USB 2.0 TTs, unplug appears this way.
-	    // we avoid the highspeed version of the ETIMEOUT/EILSEQ
-	    // storm, recovering as needed.
-	    case -EPIPE:
+	/* stalls need manual reset. this is rare ... except that
+	 * when going through USB 2.0 TTs, unplug appears this way.
+	 * we avoid the highspeed version of the ETIMEOUT/EILSEQ
+	 * storm, recovering as needed.
+	 */
+	case -EPIPE:
 		dev->stats.rx_errors++;
 		usbnet_defer_kevent (dev, EVENT_RX_HALT);
 		// FALLTHROUGH
 
-	    // software-driven interface shutdown
-	    case -ECONNRESET:		// async unlink
-	    case -ESHUTDOWN:		// hardware gone
+	/* software-driven interface shutdown */
+	case -ECONNRESET:		/* async unlink */
+	case -ESHUTDOWN:		/* hardware gone */
 		if (netif_msg_ifdown (dev))
 			devdbg (dev, "rx shutdown, code %d", urb_status);
 		goto block;
 
-	    // we get controller i/o faults during khubd disconnect() delays.
-	    // throttle down resubmits, to avoid log floods; just temporarily,
-	    // so we still recover when the fault isn't a khubd delay.
-	    case -EPROTO:
-	    case -ETIME:
-	    case -EILSEQ:
+	/* we get controller i/o faults during khubd disconnect() delays.
+	 * throttle down resubmits, to avoid log floods; just temporarily,
+	 * so we still recover when the fault isn't a khubd delay.
+	 */
+	case -EPROTO:
+	case -ETIME:
+	case -EILSEQ:
 		dev->stats.rx_errors++;
 		if (!timer_pending (&dev->delay)) {
 			mod_timer (&dev->delay, jiffies + THROTTLE_JIFFIES);
@@ -438,12 +440,12 @@
 		urb = NULL;
 		break;
 
-	    // data overrun ... flush fifo?
-	    case -EOVERFLOW:
+	/* data overrun ... flush fifo? */
+	case -EOVERFLOW:
 		dev->stats.rx_over_errors++;
 		// FALLTHROUGH
 
-	    default:
+	default:
 		entry->state = rx_cleanup;
 		dev->stats.rx_errors++;
 		if (netif_msg_rx_err (dev))
@@ -471,22 +473,22 @@
 	int		status = urb->status;
 
 	switch (status) {
-	    /* success */
-	    case 0:
+	/* success */
+	case 0:
 		dev->driver_info->status(dev, urb);
 		break;
 
-	    /* software-driven interface shutdown */
-	    case -ENOENT:		// urb killed
-	    case -ESHUTDOWN:		// hardware gone
+	/* software-driven interface shutdown */
+	case -ENOENT:		/* urb killed */
+	case -ESHUTDOWN:	/* hardware gone */
 		if (netif_msg_ifdown (dev))
 			devdbg (dev, "intr shutdown, code %d", status);
 		return;
 
-	    /* NOTE:  not throttling like RX/TX, since this endpoint
-	     * already polls infrequently
-	     */
-	    default:
+	/* NOTE:  not throttling like RX/TX, since this endpoint
+	 * already polls infrequently
+	 */
+	default:
 		devdbg (dev, "intr status %d", status);
 		break;
 	}
@@ -569,9 +571,9 @@
 	temp = unlink_urbs (dev, &dev->txq) + unlink_urbs (dev, &dev->rxq);
 
 	// maybe wait for deletions to finish.
-	while (!skb_queue_empty(&dev->rxq) &&
-	       !skb_queue_empty(&dev->txq) &&
-	       !skb_queue_empty(&dev->done)) {
+	while (!skb_queue_empty(&dev->rxq)
+			&& !skb_queue_empty(&dev->txq)
+			&& !skb_queue_empty(&dev->done)) {
 		msleep(UNLINK_TIMEOUT_MS);
 		if (netif_msg_ifdown (dev))
 			devdbg (dev, "waited for %d urb completions", temp);
@@ -1011,16 +1013,16 @@
 	while ((skb = skb_dequeue (&dev->done))) {
 		entry = (struct skb_data *) skb->cb;
 		switch (entry->state) {
-		    case rx_done:
+		case rx_done:
 			entry->state = rx_cleanup;
 			rx_process (dev, skb);
 			continue;
-		    case tx_done:
-		    case rx_cleanup:
+		case tx_done:
+		case rx_cleanup:
 			usb_free_urb (entry->urb);
 			dev_kfree_skb (skb);
 			continue;
-		    default:
+		default:
 			devdbg (dev, "bogus skb state %d", entry->state);
 		}
 	}
diff --git a/drivers/net/usb/usbnet.h b/drivers/net/usb/usbnet.h
index a3f8b9e..a6c5820 100644
--- a/drivers/net/usb/usbnet.h
+++ b/drivers/net/usb/usbnet.h
@@ -47,7 +47,7 @@
 	unsigned long		data [5];
 	u32			xid;
 	u32			hard_mtu;	/* count any extra framing */
-	size_t		        rx_urb_size;    /* size for rx urbs  */
+	size_t			rx_urb_size;	/* size for rx urbs */
 	struct mii_if_info	mii;
 
 	/* various kinds of pending driver work */
@@ -85,7 +85,7 @@
 #define FLAG_NO_SETINT	0x0010		/* device can't set_interface() */
 #define FLAG_ETHER	0x0020		/* maybe use "eth%d" names */
 
-#define FLAG_FRAMING_AX 0x0040          /* AX88772/178 packets */
+#define FLAG_FRAMING_AX 0x0040		/* AX88772/178 packets */
 
 	/* init device ... can sleep, or cause probe() failure */
 	int	(*bind)(struct usbnet *, struct usb_interface *);
@@ -146,9 +146,9 @@
 
 /* CDC and RNDIS support the same host-chosen packet filters for IN transfers */
 #define	DEFAULT_FILTER	(USB_CDC_PACKET_TYPE_BROADCAST \
- 			|USB_CDC_PACKET_TYPE_ALL_MULTICAST \
- 			|USB_CDC_PACKET_TYPE_PROMISCUOUS \
- 			|USB_CDC_PACKET_TYPE_DIRECTED)
+			|USB_CDC_PACKET_TYPE_ALL_MULTICAST \
+			|USB_CDC_PACKET_TYPE_PROMISCUOUS \
+			|USB_CDC_PACKET_TYPE_DIRECTED)
 
 
 /* we record the state for each of our queued skbs */
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index fa2399c..ae27af0 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -546,6 +546,18 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called zd1201.
 
+config RTL8187
+	tristate "Realtek 8187 USB support"
+	depends on MAC80211 && USB && WLAN_80211 && EXPERIMENTAL
+	select EEPROM_93CX6
+	---help---
+	  This is a driver for RTL8187 based cards.
+	  These are USB based chips found in cards such as:
+
+	  Netgear WG111v2
+
+	  Thanks to Realtek for their support!
+
 source "drivers/net/wireless/hostap/Kconfig"
 source "drivers/net/wireless/bcm43xx/Kconfig"
 source "drivers/net/wireless/zd1211rw/Kconfig"
diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile
index d212460..ef35bc6 100644
--- a/drivers/net/wireless/Makefile
+++ b/drivers/net/wireless/Makefile
@@ -44,3 +44,6 @@
 
 obj-$(CONFIG_USB_ZD1201)	+= zd1201.o
 obj-$(CONFIG_LIBERTAS_USB)     += libertas/
+
+rtl8187-objs		:= rtl8187_dev.o rtl8187_rtl8225.o
+obj-$(CONFIG_RTL8187)	+= rtl8187.o
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_phy.c b/drivers/net/wireless/bcm43xx/bcm43xx_phy.c
index b37f1e3..d779199 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_phy.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_phy.c
@@ -1638,7 +1638,7 @@
 		return;
 	}
 
-	if (phy->analog > 1) {
+	if (phy->analog == 1) {
 		value = bcm43xx_phy_read(bcm, 0x0060) & ~0x003C;
 		value |= (baseband_attenuation << 2) & 0x003C;
 	} else {
diff --git a/drivers/net/wireless/hostap/hostap_ap.c b/drivers/net/wireless/hostap/hostap_ap.c
index 5b3abd5..9090052 100644
--- a/drivers/net/wireless/hostap/hostap_ap.c
+++ b/drivers/net/wireless/hostap/hostap_ap.c
@@ -326,7 +326,6 @@
 	char *p = page;
 	struct ap_data *ap = (struct ap_data *) data;
 	char *policy_txt;
-	struct list_head *ptr;
 	struct mac_entry *entry;
 
 	if (off != 0) {
@@ -352,14 +351,12 @@
 	p += sprintf(p, "MAC entries: %u\n", ap->mac_restrictions.entries);
 	p += sprintf(p, "MAC list:\n");
 	spin_lock_bh(&ap->mac_restrictions.lock);
-	for (ptr = ap->mac_restrictions.mac_list.next;
-	     ptr != &ap->mac_restrictions.mac_list; ptr = ptr->next) {
+	list_for_each_entry(entry, &ap->mac_restrictions.mac_list, list) {
 		if (p - page > PAGE_SIZE - 80) {
 			p += sprintf(p, "All entries did not fit one page.\n");
 			break;
 		}
 
-		entry = list_entry(ptr, struct mac_entry, list);
 		p += sprintf(p, MACSTR "\n", MAC2STR(entry->addr));
 	}
 	spin_unlock_bh(&ap->mac_restrictions.lock);
@@ -413,7 +410,6 @@
 static int ap_control_mac_deny(struct mac_restrictions *mac_restrictions,
 			       u8 *mac)
 {
-	struct list_head *ptr;
 	struct mac_entry *entry;
 	int found = 0;
 
@@ -421,10 +417,7 @@
 		return 0;
 
 	spin_lock_bh(&mac_restrictions->lock);
-	for (ptr = mac_restrictions->mac_list.next;
-	     ptr != &mac_restrictions->mac_list; ptr = ptr->next) {
-		entry = list_entry(ptr, struct mac_entry, list);
-
+	list_for_each_entry(entry, &mac_restrictions->mac_list, list) {
 		if (memcmp(entry->addr, mac, ETH_ALEN) == 0) {
 			found = 1;
 			break;
@@ -519,7 +512,7 @@
 {
 	char *p = page;
 	struct ap_data *ap = (struct ap_data *) data;
-	struct list_head *ptr;
+	struct sta_info *sta;
 	int i;
 
 	if (off > PROC_LIMIT) {
@@ -529,9 +522,7 @@
 
 	p += sprintf(p, "# BSSID CHAN SIGNAL NOISE RATE SSID FLAGS\n");
 	spin_lock_bh(&ap->sta_table_lock);
-	for (ptr = ap->sta_list.next; ptr != &ap->sta_list; ptr = ptr->next) {
-		struct sta_info *sta = (struct sta_info *) ptr;
-
+	list_for_each_entry(sta, &ap->sta_list, list) {
 		if (!sta->ap)
 			continue;
 
@@ -861,7 +852,7 @@
 
 void hostap_free_data(struct ap_data *ap)
 {
-	struct list_head *n, *ptr;
+	struct sta_info *n, *sta;
 
 	if (ap == NULL || !ap->initialized) {
 		printk(KERN_DEBUG "hostap_free_data: ap has not yet been "
@@ -875,8 +866,7 @@
 	ap->crypt = ap->crypt_priv = NULL;
 #endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
 
-	list_for_each_safe(ptr, n, &ap->sta_list) {
-		struct sta_info *sta = list_entry(ptr, struct sta_info, list);
+	list_for_each_entry_safe(sta, n, &ap->sta_list, list) {
 		ap_sta_hash_del(ap, sta);
 		list_del(&sta->list);
 		if ((sta->flags & WLAN_STA_ASSOC) && !sta->ap && sta->local)
@@ -2704,6 +2694,8 @@
 
 	if (hdr->addr1[0] & 0x01) {
 		/* broadcast/multicast frame - no AP related processing */
+		if (local->ap->num_sta <= 0)
+			ret = AP_TX_DROP;
 		goto out;
 	}
 
@@ -3198,15 +3190,14 @@
 
 void hostap_update_rates(local_info_t *local)
 {
-	struct list_head *ptr;
+	struct sta_info *sta;
 	struct ap_data *ap = local->ap;
 
 	if (!ap)
 		return;
 
 	spin_lock_bh(&ap->sta_table_lock);
-	for (ptr = ap->sta_list.next; ptr != &ap->sta_list; ptr = ptr->next) {
-		struct sta_info *sta = (struct sta_info *) ptr;
+	list_for_each_entry(sta, &ap->sta_list, list) {
 		prism2_check_tx_rates(sta);
 	}
 	spin_unlock_bh(&ap->sta_table_lock);
@@ -3242,11 +3233,10 @@
 void hostap_add_wds_links(local_info_t *local)
 {
 	struct ap_data *ap = local->ap;
-	struct list_head *ptr;
+	struct sta_info *sta;
 
 	spin_lock_bh(&ap->sta_table_lock);
-	list_for_each(ptr, &ap->sta_list) {
-		struct sta_info *sta = list_entry(ptr, struct sta_info, list);
+	list_for_each_entry(sta, &ap->sta_list, list) {
 		if (sta->ap)
 			hostap_wds_link_oper(local, sta->addr, WDS_ADD);
 	}
diff --git a/drivers/net/wireless/hostap/hostap_config.h b/drivers/net/wireless/hostap/hostap_config.h
index c090a5a..30acd39 100644
--- a/drivers/net/wireless/hostap/hostap_config.h
+++ b/drivers/net/wireless/hostap/hostap_config.h
@@ -1,8 +1,6 @@
 #ifndef HOSTAP_CONFIG_H
 #define HOSTAP_CONFIG_H
 
-#define PRISM2_VERSION "0.4.4-kernel"
-
 /* In the previous versions of Host AP driver, support for user space version
  * of IEEE 802.11 management (hostapd) used to be disabled in the default
  * configuration. From now on, support for hostapd is always included and it is
diff --git a/drivers/net/wireless/hostap/hostap_cs.c b/drivers/net/wireless/hostap/hostap_cs.c
index ee1532b..30e723f 100644
--- a/drivers/net/wireless/hostap/hostap_cs.c
+++ b/drivers/net/wireless/hostap/hostap_cs.c
@@ -22,7 +22,6 @@
 #include "hostap_wlan.h"
 
 
-static char *version = PRISM2_VERSION " (Jouni Malinen <j@w1.fi>)";
 static dev_info_t dev_info = "hostap_cs";
 
 MODULE_AUTHOR("Jouni Malinen");
@@ -30,7 +29,6 @@
 		   "cards (PC Card).");
 MODULE_SUPPORTED_DEVICE("Intersil Prism2-based WLAN cards (PC Card)");
 MODULE_LICENSE("GPL");
-MODULE_VERSION(PRISM2_VERSION);
 
 
 static int ignore_cis_vcc;
@@ -910,14 +908,12 @@
 
 static int __init init_prism2_pccard(void)
 {
-	printk(KERN_INFO "%s: %s\n", dev_info, version);
 	return pcmcia_register_driver(&hostap_driver);
 }
 
 static void __exit exit_prism2_pccard(void)
 {
 	pcmcia_unregister_driver(&hostap_driver);
-	printk(KERN_INFO "%s: Driver unloaded\n", dev_info);
 }
 
 
diff --git a/drivers/net/wireless/hostap/hostap_ioctl.c b/drivers/net/wireless/hostap/hostap_ioctl.c
index cdea7f7..8c71077 100644
--- a/drivers/net/wireless/hostap/hostap_ioctl.c
+++ b/drivers/net/wireless/hostap/hostap_ioctl.c
@@ -3893,8 +3893,6 @@
 	local = iface->local;
 
 	strncpy(info->driver, "hostap", sizeof(info->driver) - 1);
-	strncpy(info->version, PRISM2_VERSION,
-		sizeof(info->version) - 1);
 	snprintf(info->fw_version, sizeof(info->fw_version) - 1,
 		 "%d.%d.%d", (local->sta_fw_ver >> 16) & 0xff,
 		 (local->sta_fw_ver >> 8) & 0xff,
diff --git a/drivers/net/wireless/hostap/hostap_main.c b/drivers/net/wireless/hostap/hostap_main.c
index 4743426..446de51 100644
--- a/drivers/net/wireless/hostap/hostap_main.c
+++ b/drivers/net/wireless/hostap/hostap_main.c
@@ -37,7 +37,6 @@
 MODULE_AUTHOR("Jouni Malinen");
 MODULE_DESCRIPTION("Host AP common routines");
 MODULE_LICENSE("GPL");
-MODULE_VERSION(PRISM2_VERSION);
 
 #define TX_TIMEOUT (2 * HZ)
 
diff --git a/drivers/net/wireless/hostap/hostap_pci.c b/drivers/net/wireless/hostap/hostap_pci.c
index db4899e..0cd48d1 100644
--- a/drivers/net/wireless/hostap/hostap_pci.c
+++ b/drivers/net/wireless/hostap/hostap_pci.c
@@ -20,7 +20,6 @@
 #include "hostap_wlan.h"
 
 
-static char *version = PRISM2_VERSION " (Jouni Malinen <j@w1.fi>)";
 static char *dev_info = "hostap_pci";
 
 
@@ -29,7 +28,6 @@
 		   "PCI cards.");
 MODULE_SUPPORTED_DEVICE("Intersil Prism2.5-based WLAN PCI cards");
 MODULE_LICENSE("GPL");
-MODULE_VERSION(PRISM2_VERSION);
 
 
 /* struct local_info::hw_priv */
@@ -462,8 +460,6 @@
 
 static int __init init_prism2_pci(void)
 {
-	printk(KERN_INFO "%s: %s\n", dev_info, version);
-
 	return pci_register_driver(&prism2_pci_drv_id);
 }
 
@@ -471,7 +467,6 @@
 static void __exit exit_prism2_pci(void)
 {
 	pci_unregister_driver(&prism2_pci_drv_id);
-	printk(KERN_INFO "%s: Driver unloaded\n", dev_info);
 }
 
 
diff --git a/drivers/net/wireless/hostap/hostap_plx.c b/drivers/net/wireless/hostap/hostap_plx.c
index f0fd5ec..0183df7 100644
--- a/drivers/net/wireless/hostap/hostap_plx.c
+++ b/drivers/net/wireless/hostap/hostap_plx.c
@@ -23,7 +23,6 @@
 #include "hostap_wlan.h"
 
 
-static char *version = PRISM2_VERSION " (Jouni Malinen <j@w1.fi>)";
 static char *dev_info = "hostap_plx";
 
 
@@ -32,7 +31,6 @@
 		   "cards (PLX).");
 MODULE_SUPPORTED_DEVICE("Intersil Prism2-based WLAN cards (PLX)");
 MODULE_LICENSE("GPL");
-MODULE_VERSION(PRISM2_VERSION);
 
 
 static int ignore_cis;
@@ -623,8 +621,6 @@
 
 static int __init init_prism2_plx(void)
 {
-	printk(KERN_INFO "%s: %s\n", dev_info, version);
-
 	return pci_register_driver(&prism2_plx_drv_id);
 }
 
@@ -632,7 +628,6 @@
 static void __exit exit_prism2_plx(void)
 {
 	pci_unregister_driver(&prism2_plx_drv_id);
-	printk(KERN_INFO "%s: Driver unloaded\n", dev_info);
 }
 
 
diff --git a/drivers/net/wireless/rtl8187.h b/drivers/net/wireless/rtl8187.h
new file mode 100644
index 0000000..6124e46
--- /dev/null
+++ b/drivers/net/wireless/rtl8187.h
@@ -0,0 +1,145 @@
+/*
+ * Definitions for RTL8187 hardware
+ *
+ * Copyright 2007 Michael Wu <flamingice@sourmilk.net>
+ * Copyright 2007 Andrea Merello <andreamrl@tiscali.it>
+ *
+ * Based on the r8187 driver, which is:
+ * Copyright 2005 Andrea Merello <andreamrl@tiscali.it>, et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef RTL8187_H
+#define RTL8187_H
+
+#include "rtl818x.h"
+
+#define RTL8187_EEPROM_TXPWR_BASE	0x05
+#define RTL8187_EEPROM_MAC_ADDR		0x07
+#define RTL8187_EEPROM_TXPWR_CHAN_1	0x16	/* 3 channels */
+#define RTL8187_EEPROM_TXPWR_CHAN_6	0x1B	/* 2 channels */
+#define RTL8187_EEPROM_TXPWR_CHAN_4	0x3D	/* 2 channels */
+
+#define RTL8187_REQT_READ	0xC0
+#define RTL8187_REQT_WRITE	0x40
+#define RTL8187_REQ_GET_REG	0x05
+#define RTL8187_REQ_SET_REG	0x05
+
+#define RTL8187_MAX_RX		0x9C4
+
+struct rtl8187_rx_info {
+	struct urb *urb;
+	struct ieee80211_hw *dev;
+};
+
+struct rtl8187_rx_hdr {
+	__le16 len;
+	__le16 rate;
+	u8 noise;
+	u8 signal;
+	u8 agc;
+	u8 reserved;
+	__le64 mac_time;
+} __attribute__((packed));
+
+struct rtl8187_tx_info {
+	struct ieee80211_tx_control *control;
+	struct urb *urb;
+	struct ieee80211_hw *dev;
+};
+
+struct rtl8187_tx_hdr {
+	__le32 flags;
+#define RTL8187_TX_FLAG_NO_ENCRYPT	(1 << 15)
+#define RTL8187_TX_FLAG_MORE_FRAG	(1 << 17)
+#define RTL8187_TX_FLAG_CTS		(1 << 18)
+#define RTL8187_TX_FLAG_RTS		(1 << 23)
+	__le16 rts_duration;
+	__le16 len;
+	__le32 retry;
+} __attribute__((packed));
+
+struct rtl8187_priv {
+	/* common between rtl818x drivers */
+	struct rtl818x_csr *map;
+	void (*rf_init)(struct ieee80211_hw *);
+	int mode;
+
+	/* rtl8187 specific */
+	struct ieee80211_channel channels[14];
+	struct ieee80211_rate rates[12];
+	struct ieee80211_hw_mode modes[2];
+	struct usb_device *udev;
+	u8 *hwaddr;
+	u16 txpwr_base;
+	u8 asic_rev;
+	struct sk_buff_head rx_queue;
+};
+
+void rtl8187_write_phy(struct ieee80211_hw *dev, u8 addr, u32 data);
+
+static inline u8 rtl818x_ioread8(struct rtl8187_priv *priv, u8 *addr)
+{
+	u8 val;
+
+	usb_control_msg(priv->udev, usb_rcvctrlpipe(priv->udev, 0),
+			RTL8187_REQ_GET_REG, RTL8187_REQT_READ,
+			(unsigned long)addr, 0, &val, sizeof(val), HZ / 2);
+
+	return val;
+}
+
+static inline u16 rtl818x_ioread16(struct rtl8187_priv *priv, __le16 *addr)
+{
+	__le16 val;
+
+	usb_control_msg(priv->udev, usb_rcvctrlpipe(priv->udev, 0),
+			RTL8187_REQ_GET_REG, RTL8187_REQT_READ,
+			(unsigned long)addr, 0, &val, sizeof(val), HZ / 2);
+
+	return le16_to_cpu(val);
+}
+
+static inline u32 rtl818x_ioread32(struct rtl8187_priv *priv, __le32 *addr)
+{
+	__le32 val;
+
+	usb_control_msg(priv->udev, usb_rcvctrlpipe(priv->udev, 0),
+			RTL8187_REQ_GET_REG, RTL8187_REQT_READ,
+			(unsigned long)addr, 0, &val, sizeof(val), HZ / 2);
+
+	return le32_to_cpu(val);
+}
+
+static inline void rtl818x_iowrite8(struct rtl8187_priv *priv,
+				    u8 *addr, u8 val)
+{
+	usb_control_msg(priv->udev, usb_sndctrlpipe(priv->udev, 0),
+			RTL8187_REQ_SET_REG, RTL8187_REQT_WRITE,
+			(unsigned long)addr, 0, &val, sizeof(val), HZ / 2);
+}
+
+static inline void rtl818x_iowrite16(struct rtl8187_priv *priv,
+				     __le16 *addr, u16 val)
+{
+	__le16 buf = cpu_to_le16(val);
+
+	usb_control_msg(priv->udev, usb_sndctrlpipe(priv->udev, 0),
+			RTL8187_REQ_SET_REG, RTL8187_REQT_WRITE,
+			(unsigned long)addr, 0, &buf, sizeof(buf), HZ / 2);
+}
+
+static inline void rtl818x_iowrite32(struct rtl8187_priv *priv,
+				     __le32 *addr, u32 val)
+{
+	__le32 buf = cpu_to_le32(val);
+
+	usb_control_msg(priv->udev, usb_sndctrlpipe(priv->udev, 0),
+			RTL8187_REQ_SET_REG, RTL8187_REQT_WRITE,
+			(unsigned long)addr, 0, &buf, sizeof(buf), HZ / 2);
+}
+
+#endif /* RTL8187_H */
diff --git a/drivers/net/wireless/rtl8187_dev.c b/drivers/net/wireless/rtl8187_dev.c
new file mode 100644
index 0000000..cea8589
--- /dev/null
+++ b/drivers/net/wireless/rtl8187_dev.c
@@ -0,0 +1,731 @@
+/*
+ * Linux device driver for RTL8187
+ *
+ * Copyright 2007 Michael Wu <flamingice@sourmilk.net>
+ * Copyright 2007 Andrea Merello <andreamrl@tiscali.it>
+ *
+ * Based on the r8187 driver, which is:
+ * Copyright 2005 Andrea Merello <andreamrl@tiscali.it>, et al.
+ *
+ * Magic delays and register offsets below are taken from the original
+ * r8187 driver sources.  Thanks to Realtek for their support!
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/usb.h>
+#include <linux/delay.h>
+#include <linux/etherdevice.h>
+#include <linux/eeprom_93cx6.h>
+#include <net/mac80211.h>
+
+#include "rtl8187.h"
+#include "rtl8187_rtl8225.h"
+
+MODULE_AUTHOR("Michael Wu <flamingice@sourmilk.net>");
+MODULE_AUTHOR("Andrea Merello <andreamrl@tiscali.it>");
+MODULE_DESCRIPTION("RTL8187 USB wireless driver");
+MODULE_LICENSE("GPL");
+
+static struct usb_device_id rtl8187_table[] __devinitdata = {
+	/* Realtek */
+	{USB_DEVICE(0x0bda, 0x8187)},
+	/* Netgear */
+	{USB_DEVICE(0x0846, 0x6100)},
+	{USB_DEVICE(0x0846, 0x6a00)},
+	{}
+};
+
+MODULE_DEVICE_TABLE(usb, rtl8187_table);
+
+void rtl8187_write_phy(struct ieee80211_hw *dev, u8 addr, u32 data)
+{
+	struct rtl8187_priv *priv = dev->priv;
+
+	data <<= 8;
+	data |= addr | 0x80;
+
+	rtl818x_iowrite8(priv, &priv->map->PHY[3], (data >> 24) & 0xFF);
+	rtl818x_iowrite8(priv, &priv->map->PHY[2], (data >> 16) & 0xFF);
+	rtl818x_iowrite8(priv, &priv->map->PHY[1], (data >> 8) & 0xFF);
+	rtl818x_iowrite8(priv, &priv->map->PHY[0], data & 0xFF);
+
+	msleep(1);
+}
+
+static void rtl8187_tx_cb(struct urb *urb)
+{
+	struct ieee80211_tx_status status = { {0} };
+	struct sk_buff *skb = (struct sk_buff *)urb->context;
+	struct rtl8187_tx_info *info = (struct rtl8187_tx_info *)skb->cb;
+
+	usb_free_urb(info->urb);
+	if (info->control)
+		memcpy(&status.control, info->control, sizeof(status.control));
+	kfree(info->control);
+	skb_pull(skb, sizeof(struct rtl8187_tx_hdr));
+	status.flags |= IEEE80211_TX_STATUS_ACK;
+	ieee80211_tx_status_irqsafe(info->dev, skb, &status);
+}
+
+static int rtl8187_tx(struct ieee80211_hw *dev, struct sk_buff *skb,
+		      struct ieee80211_tx_control *control)
+{
+	struct rtl8187_priv *priv = dev->priv;
+	struct rtl8187_tx_hdr *hdr;
+	struct rtl8187_tx_info *info;
+	struct urb *urb;
+	u32 tmp;
+
+	urb = usb_alloc_urb(0, GFP_ATOMIC);
+	if (!urb) {
+		kfree_skb(skb);
+		return 0;
+	}
+
+	hdr = (struct rtl8187_tx_hdr *)skb_push(skb, sizeof(*hdr));
+	tmp = skb->len - sizeof(*hdr);
+	tmp |= RTL8187_TX_FLAG_NO_ENCRYPT;
+	tmp |= control->rts_cts_rate << 19;
+	tmp |= control->tx_rate << 24;
+	if (ieee80211_get_morefrag((struct ieee80211_hdr *)skb))
+		tmp |= RTL8187_TX_FLAG_MORE_FRAG;
+	if (control->flags & IEEE80211_TXCTL_USE_RTS_CTS) {
+		tmp |= RTL8187_TX_FLAG_RTS;
+		hdr->rts_duration =
+			ieee80211_rts_duration(dev, skb->len, control);
+	}
+	if (control->flags & IEEE80211_TXCTL_USE_CTS_PROTECT)
+		tmp |= RTL8187_TX_FLAG_CTS;
+	hdr->flags = cpu_to_le32(tmp);
+	hdr->len = 0;
+	tmp = control->retry_limit << 8;
+	hdr->retry = cpu_to_le32(tmp);
+
+	info = (struct rtl8187_tx_info *)skb->cb;
+	info->control = kmemdup(control, sizeof(*control), GFP_ATOMIC);
+	info->urb = urb;
+	info->dev = dev;
+	usb_fill_bulk_urb(urb, priv->udev, usb_sndbulkpipe(priv->udev, 2),
+			  hdr, skb->len, rtl8187_tx_cb, skb);
+	usb_submit_urb(urb, GFP_ATOMIC);
+
+	return 0;
+}
+
+static void rtl8187_rx_cb(struct urb *urb)
+{
+	struct sk_buff *skb = (struct sk_buff *)urb->context;
+	struct rtl8187_rx_info *info = (struct rtl8187_rx_info *)skb->cb;
+	struct ieee80211_hw *dev = info->dev;
+	struct rtl8187_priv *priv = dev->priv;
+	struct rtl8187_rx_hdr *hdr;
+	struct ieee80211_rx_status rx_status = { 0 };
+	int rate, signal;
+
+	spin_lock(&priv->rx_queue.lock);
+	if (skb->next)
+		__skb_unlink(skb, &priv->rx_queue);
+	else {
+		spin_unlock(&priv->rx_queue.lock);
+		return;
+	}
+	spin_unlock(&priv->rx_queue.lock);
+
+	if (unlikely(urb->status)) {
+		usb_free_urb(urb);
+		dev_kfree_skb_irq(skb);
+		return;
+	}
+
+	skb_put(skb, urb->actual_length);
+	hdr = (struct rtl8187_rx_hdr *)(skb_tail_pointer(skb) - sizeof(*hdr));
+	skb_trim(skb, le16_to_cpu(hdr->len) & 0x0FFF);
+
+	signal = hdr->agc >> 1;
+	rate = (le16_to_cpu(hdr->rate) >> 4) & 0xF;
+	if (rate > 3) {	/* OFDM rate */
+		if (signal > 90)
+			signal = 90;
+		else if (signal < 25)
+			signal = 25;
+		signal = 90 - signal;
+	} else {	/* CCK rate */
+		if (signal > 95)
+			signal = 95;
+		else if (signal < 30)
+			signal = 30;
+		signal = 95 - signal;
+	}
+
+	rx_status.antenna = (hdr->signal >> 7) & 1;
+	rx_status.signal = 64 - min(hdr->noise, (u8)64);
+	rx_status.ssi = signal;
+	rx_status.rate = rate;
+	rx_status.freq = dev->conf.freq;
+	rx_status.channel = dev->conf.channel;
+	rx_status.phymode = dev->conf.phymode;
+	rx_status.mactime = le64_to_cpu(hdr->mac_time);
+	ieee80211_rx_irqsafe(dev, skb, &rx_status);
+
+	skb = dev_alloc_skb(RTL8187_MAX_RX);
+	if (unlikely(!skb)) {
+		usb_free_urb(urb);
+		/* TODO check rx queue length and refill *somewhere* */
+		return;
+	}
+
+	info = (struct rtl8187_rx_info *)skb->cb;
+	info->urb = urb;
+	info->dev = dev;
+	urb->transfer_buffer = skb_tail_pointer(skb);
+	urb->context = skb;
+	skb_queue_tail(&priv->rx_queue, skb);
+
+	usb_submit_urb(urb, GFP_ATOMIC);
+}
+
+static int rtl8187_init_urbs(struct ieee80211_hw *dev)
+{
+	struct rtl8187_priv *priv = dev->priv;
+	struct urb *entry;
+	struct sk_buff *skb;
+	struct rtl8187_rx_info *info;
+
+	while (skb_queue_len(&priv->rx_queue) < 8) {
+		skb = __dev_alloc_skb(RTL8187_MAX_RX, GFP_KERNEL);
+		if (!skb)
+			break;
+		entry = usb_alloc_urb(0, GFP_KERNEL);
+		if (!entry) {
+			kfree_skb(skb);
+			break;
+		}
+		usb_fill_bulk_urb(entry, priv->udev,
+				  usb_rcvbulkpipe(priv->udev, 1),
+				  skb_tail_pointer(skb),
+				  RTL8187_MAX_RX, rtl8187_rx_cb, skb);
+		info = (struct rtl8187_rx_info *)skb->cb;
+		info->urb = entry;
+		info->dev = dev;
+		skb_queue_tail(&priv->rx_queue, skb);
+		usb_submit_urb(entry, GFP_KERNEL);
+	}
+
+	return 0;
+}
+
+static int rtl8187_init_hw(struct ieee80211_hw *dev)
+{
+	struct rtl8187_priv *priv = dev->priv;
+	u8 reg;
+	int i;
+
+	/* reset */
+	rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG);
+	reg = rtl818x_ioread8(priv, &priv->map->CONFIG3);
+	rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg | RTL818X_CONFIG3_ANAPARAM_WRITE);
+	rtl818x_iowrite32(priv, &priv->map->ANAPARAM, RTL8225_ANAPARAM_ON);
+	rtl818x_iowrite32(priv, &priv->map->ANAPARAM2, RTL8225_ANAPARAM2_ON);
+	rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg & ~RTL818X_CONFIG3_ANAPARAM_WRITE);
+	rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL);
+
+	rtl818x_iowrite16(priv, &priv->map->INT_MASK, 0);
+
+	msleep(200);
+	rtl818x_iowrite8(priv, (u8 *)0xFE18, 0x10);
+	rtl818x_iowrite8(priv, (u8 *)0xFE18, 0x11);
+	rtl818x_iowrite8(priv, (u8 *)0xFE18, 0x00);
+	msleep(200);
+
+	reg = rtl818x_ioread8(priv, &priv->map->CMD);
+	reg &= (1 << 1);
+	reg |= RTL818X_CMD_RESET;
+	rtl818x_iowrite8(priv, &priv->map->CMD, reg);
+
+	i = 10;
+	do {
+		msleep(2);
+		if (!(rtl818x_ioread8(priv, &priv->map->CMD) &
+		      RTL818X_CMD_RESET))
+			break;
+	} while (--i);
+
+	if (!i) {
+		printk(KERN_ERR "%s: Reset timeout!\n", wiphy_name(dev->wiphy));
+		return -ETIMEDOUT;
+	}
+
+	/* reload registers from eeprom */
+	rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_LOAD);
+
+	i = 10;
+	do {
+		msleep(4);
+		if (!(rtl818x_ioread8(priv, &priv->map->EEPROM_CMD) &
+		      RTL818X_EEPROM_CMD_CONFIG))
+			break;
+	} while (--i);
+
+	if (!i) {
+		printk(KERN_ERR "%s: eeprom reset timeout!\n",
+		       wiphy_name(dev->wiphy));
+		return -ETIMEDOUT;
+	}
+
+	rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG);
+	reg = rtl818x_ioread8(priv, &priv->map->CONFIG3);
+	rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg | RTL818X_CONFIG3_ANAPARAM_WRITE);
+	rtl818x_iowrite32(priv, &priv->map->ANAPARAM, RTL8225_ANAPARAM_ON);
+	rtl818x_iowrite32(priv, &priv->map->ANAPARAM2, RTL8225_ANAPARAM2_ON);
+	rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg & ~RTL818X_CONFIG3_ANAPARAM_WRITE);
+	rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL);
+
+	/* setup card */
+	rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, 0);
+	rtl818x_iowrite8(priv, &priv->map->GPIO, 0);
+
+	rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, (4 << 8));
+	rtl818x_iowrite8(priv, &priv->map->GPIO, 1);
+	rtl818x_iowrite8(priv, &priv->map->GP_ENABLE, 0);
+
+	rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG);
+	for (i = 0; i < ETH_ALEN; i++)
+		rtl818x_iowrite8(priv, &priv->map->MAC[i], priv->hwaddr[i]);
+
+	rtl818x_iowrite16(priv, (__le16 *)0xFFF4, 0xFFFF);
+	reg = rtl818x_ioread8(priv, &priv->map->CONFIG1);
+	reg &= 0x3F;
+	reg |= 0x80;
+	rtl818x_iowrite8(priv, &priv->map->CONFIG1, reg);
+
+	rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL);
+
+	rtl818x_iowrite32(priv, &priv->map->INT_TIMEOUT, 0);
+	rtl818x_iowrite8(priv, &priv->map->WPA_CONF, 0);
+	rtl818x_iowrite8(priv, &priv->map->RATE_FALLBACK, 0x81);
+
+	// TODO: set RESP_RATE and BRSR properly
+	rtl818x_iowrite8(priv, &priv->map->RESP_RATE, (8 << 4) | 0);
+	rtl818x_iowrite16(priv, &priv->map->BRSR, 0x01F3);
+
+	/* host_usb_init */
+	rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, 0);
+	rtl818x_iowrite8(priv, &priv->map->GPIO, 0);
+	reg = rtl818x_ioread8(priv, (u8 *)0xFE53);
+	rtl818x_iowrite8(priv, (u8 *)0xFE53, reg | (1 << 7));
+	rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, (4 << 8));
+	rtl818x_iowrite8(priv, &priv->map->GPIO, 0x20);
+	rtl818x_iowrite8(priv, &priv->map->GP_ENABLE, 0);
+	rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, 0x80);
+	rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, 0x80);
+	rtl818x_iowrite16(priv, &priv->map->RFPinsEnable, 0x80);
+	msleep(100);
+
+	rtl818x_iowrite32(priv, &priv->map->RF_TIMING, 0x000a8008);
+	rtl818x_iowrite16(priv, &priv->map->BRSR, 0xFFFF);
+	rtl818x_iowrite32(priv, &priv->map->RF_PARA, 0x00100044);
+	rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG);
+	rtl818x_iowrite8(priv, &priv->map->CONFIG3, 0x44);
+	rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL);
+	rtl818x_iowrite16(priv, &priv->map->RFPinsEnable, 0x1FF7);
+	msleep(100);
+
+	priv->rf_init(dev);
+
+	rtl818x_iowrite16(priv, &priv->map->BRSR, 0x01F3);
+	reg = rtl818x_ioread16(priv, &priv->map->PGSELECT) & 0xfffe;
+	rtl818x_iowrite16(priv, &priv->map->PGSELECT, reg | 0x1);
+	rtl818x_iowrite16(priv, (__le16 *)0xFFFE, 0x10);
+	rtl818x_iowrite8(priv, &priv->map->TALLY_SEL, 0x80);
+	rtl818x_iowrite8(priv, (u8 *)0xFFFF, 0x60);
+	rtl818x_iowrite16(priv, &priv->map->PGSELECT, reg);
+
+	return 0;
+}
+
+static void rtl8187_set_channel(struct ieee80211_hw *dev, int channel)
+{
+	u32 reg;
+	struct rtl8187_priv *priv = dev->priv;
+
+	reg = rtl818x_ioread32(priv, &priv->map->TX_CONF);
+	/* Enable TX loopback on MAC level to avoid TX during channel
+	 * changes, as this has be seen to causes problems and the
+	 * card will stop work until next reset
+	 */
+	rtl818x_iowrite32(priv, &priv->map->TX_CONF,
+			  reg | RTL818X_TX_CONF_LOOPBACK_MAC);
+	msleep(10);
+	rtl8225_rf_set_channel(dev, channel);
+	msleep(10);
+	rtl818x_iowrite32(priv, &priv->map->TX_CONF, reg);
+}
+
+static int rtl8187_open(struct ieee80211_hw *dev)
+{
+	struct rtl8187_priv *priv = dev->priv;
+	u32 reg;
+	int ret;
+
+	ret = rtl8187_init_hw(dev);
+	if (ret)
+		return ret;
+
+	rtl818x_iowrite16(priv, &priv->map->INT_MASK, 0xFFFF);
+
+	rtl8187_init_urbs(dev);
+
+	reg = RTL818X_RX_CONF_ONLYERLPKT |
+	      RTL818X_RX_CONF_RX_AUTORESETPHY |
+	      RTL818X_RX_CONF_BSSID |
+	      RTL818X_RX_CONF_MGMT |
+	      RTL818X_RX_CONF_CTRL |
+	      RTL818X_RX_CONF_DATA |
+	      (7 << 13 /* RX FIFO threshold NONE */) |
+	      (7 << 10 /* MAX RX DMA */) |
+	      RTL818X_RX_CONF_BROADCAST |
+	      RTL818X_RX_CONF_MULTICAST |
+	      RTL818X_RX_CONF_NICMAC;
+	if (priv->mode == IEEE80211_IF_TYPE_MNTR)
+		reg |= RTL818X_RX_CONF_MONITOR;
+
+	rtl818x_iowrite32(priv, &priv->map->RX_CONF, reg);
+
+	reg = rtl818x_ioread8(priv, &priv->map->CW_CONF);
+	reg &= ~RTL818X_CW_CONF_PERPACKET_CW_SHIFT;
+	reg |= RTL818X_CW_CONF_PERPACKET_RETRY_SHIFT;
+	rtl818x_iowrite8(priv, &priv->map->CW_CONF, reg);
+
+	reg = rtl818x_ioread8(priv, &priv->map->TX_AGC_CTL);
+	reg &= ~RTL818X_TX_AGC_CTL_PERPACKET_GAIN_SHIFT;
+	reg &= ~RTL818X_TX_AGC_CTL_PERPACKET_ANTSEL_SHIFT;
+	reg &= ~RTL818X_TX_AGC_CTL_FEEDBACK_ANT;
+	rtl818x_iowrite8(priv, &priv->map->TX_AGC_CTL, reg);
+
+	reg  = RTL818X_TX_CONF_CW_MIN |
+	       (7 << 21 /* MAX TX DMA */) |
+	       RTL818X_TX_CONF_NO_ICV;
+	rtl818x_iowrite32(priv, &priv->map->TX_CONF, reg);
+
+	reg = rtl818x_ioread8(priv, &priv->map->CMD);
+	reg |= RTL818X_CMD_TX_ENABLE;
+	reg |= RTL818X_CMD_RX_ENABLE;
+	rtl818x_iowrite8(priv, &priv->map->CMD, reg);
+
+	return 0;
+}
+
+static int rtl8187_stop(struct ieee80211_hw *dev)
+{
+	struct rtl8187_priv *priv = dev->priv;
+	struct rtl8187_rx_info *info;
+	struct sk_buff *skb;
+	u32 reg;
+
+	rtl818x_iowrite16(priv, &priv->map->INT_MASK, 0);
+
+	reg = rtl818x_ioread8(priv, &priv->map->CMD);
+	reg &= ~RTL818X_CMD_TX_ENABLE;
+	reg &= ~RTL818X_CMD_RX_ENABLE;
+	rtl818x_iowrite8(priv, &priv->map->CMD, reg);
+
+	rtl8225_rf_stop(dev);
+
+	rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG);
+	reg = rtl818x_ioread8(priv, &priv->map->CONFIG4);
+	rtl818x_iowrite8(priv, &priv->map->CONFIG4, reg | RTL818X_CONFIG4_VCOOFF);
+	rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL);
+
+	while ((skb = skb_dequeue(&priv->rx_queue))) {
+		info = (struct rtl8187_rx_info *)skb->cb;
+		usb_kill_urb(info->urb);
+		kfree_skb(skb);
+	}
+	return 0;
+}
+
+static int rtl8187_add_interface(struct ieee80211_hw *dev,
+				 struct ieee80211_if_init_conf *conf)
+{
+	struct rtl8187_priv *priv = dev->priv;
+
+	/* NOTE: using IEEE80211_IF_TYPE_MGMT to indicate no mode selected */
+	if (priv->mode != IEEE80211_IF_TYPE_MGMT)
+		return -1;
+
+	switch (conf->type) {
+	case IEEE80211_IF_TYPE_STA:
+	case IEEE80211_IF_TYPE_MNTR:
+		priv->mode = conf->type;
+		break;
+	default:
+		return -EOPNOTSUPP;
+	}
+
+	priv->hwaddr = conf->mac_addr;
+
+	return 0;
+}
+
+static void rtl8187_remove_interface(struct ieee80211_hw *dev,
+				     struct ieee80211_if_init_conf *conf)
+{
+	struct rtl8187_priv *priv = dev->priv;
+	priv->mode = IEEE80211_IF_TYPE_MGMT;
+}
+
+static int rtl8187_config(struct ieee80211_hw *dev, struct ieee80211_conf *conf)
+{
+	struct rtl8187_priv *priv = dev->priv;
+	rtl8187_set_channel(dev, conf->channel);
+
+	rtl818x_iowrite8(priv, &priv->map->SIFS, 0x22);
+
+	if (conf->flags & IEEE80211_CONF_SHORT_SLOT_TIME) {
+		rtl818x_iowrite8(priv, &priv->map->SLOT, 0x9);
+		rtl818x_iowrite8(priv, &priv->map->DIFS, 0x14);
+		rtl818x_iowrite8(priv, &priv->map->EIFS, 91 - 0x14);
+		rtl818x_iowrite8(priv, &priv->map->CW_VAL, 0x73);
+	} else {
+		rtl818x_iowrite8(priv, &priv->map->SLOT, 0x14);
+		rtl818x_iowrite8(priv, &priv->map->DIFS, 0x24);
+		rtl818x_iowrite8(priv, &priv->map->EIFS, 91 - 0x24);
+		rtl818x_iowrite8(priv, &priv->map->CW_VAL, 0xa5);
+	}
+
+	rtl818x_iowrite16(priv, &priv->map->ATIM_WND, 2);
+	rtl818x_iowrite16(priv, &priv->map->ATIMTR_INTERVAL, 100);
+	rtl818x_iowrite16(priv, &priv->map->BEACON_INTERVAL, 100);
+	rtl818x_iowrite16(priv, &priv->map->BEACON_INTERVAL_TIME, 100);
+	return 0;
+}
+
+static int rtl8187_config_interface(struct ieee80211_hw *dev, int if_id,
+				    struct ieee80211_if_conf *conf)
+{
+	struct rtl8187_priv *priv = dev->priv;
+	int i;
+
+	for (i = 0; i < ETH_ALEN; i++)
+		rtl818x_iowrite8(priv, &priv->map->BSSID[i], conf->bssid[i]);
+
+	if (is_valid_ether_addr(conf->bssid))
+		rtl818x_iowrite8(priv, &priv->map->MSR, RTL818X_MSR_INFRA);
+	else
+		rtl818x_iowrite8(priv, &priv->map->MSR, RTL818X_MSR_NO_LINK);
+
+	return 0;
+}
+
+static const struct ieee80211_ops rtl8187_ops = {
+	.tx			= rtl8187_tx,
+	.open			= rtl8187_open,
+	.stop			= rtl8187_stop,
+	.add_interface		= rtl8187_add_interface,
+	.remove_interface	= rtl8187_remove_interface,
+	.config			= rtl8187_config,
+	.config_interface	= rtl8187_config_interface,
+};
+
+static void rtl8187_eeprom_register_read(struct eeprom_93cx6 *eeprom)
+{
+	struct ieee80211_hw *dev = eeprom->data;
+	struct rtl8187_priv *priv = dev->priv;
+	u8 reg = rtl818x_ioread8(priv, &priv->map->EEPROM_CMD);
+
+	eeprom->reg_data_in = reg & RTL818X_EEPROM_CMD_WRITE;
+	eeprom->reg_data_out = reg & RTL818X_EEPROM_CMD_READ;
+	eeprom->reg_data_clock = reg & RTL818X_EEPROM_CMD_CK;
+	eeprom->reg_chip_select = reg & RTL818X_EEPROM_CMD_CS;
+}
+
+static void rtl8187_eeprom_register_write(struct eeprom_93cx6 *eeprom)
+{
+	struct ieee80211_hw *dev = eeprom->data;
+	struct rtl8187_priv *priv = dev->priv;
+	u8 reg = RTL818X_EEPROM_CMD_PROGRAM;
+
+	if (eeprom->reg_data_in)
+		reg |= RTL818X_EEPROM_CMD_WRITE;
+	if (eeprom->reg_data_out)
+		reg |= RTL818X_EEPROM_CMD_READ;
+	if (eeprom->reg_data_clock)
+		reg |= RTL818X_EEPROM_CMD_CK;
+	if (eeprom->reg_chip_select)
+		reg |= RTL818X_EEPROM_CMD_CS;
+
+	rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, reg);
+	udelay(10);
+}
+
+static int __devinit rtl8187_probe(struct usb_interface *intf,
+				   const struct usb_device_id *id)
+{
+	struct usb_device *udev = interface_to_usbdev(intf);
+	struct ieee80211_hw *dev;
+	struct rtl8187_priv *priv;
+	struct eeprom_93cx6 eeprom;
+	struct ieee80211_channel *channel;
+	u16 txpwr, reg;
+	int err, i;
+
+	dev = ieee80211_alloc_hw(sizeof(*priv), &rtl8187_ops);
+	if (!dev) {
+		printk(KERN_ERR "rtl8187: ieee80211 alloc failed\n");
+		return -ENOMEM;
+	}
+
+	priv = dev->priv;
+
+	SET_IEEE80211_DEV(dev, &intf->dev);
+	usb_set_intfdata(intf, dev);
+	priv->udev = udev;
+
+	usb_get_dev(udev);
+
+	skb_queue_head_init(&priv->rx_queue);
+	memcpy(priv->channels, rtl818x_channels, sizeof(rtl818x_channels));
+	memcpy(priv->rates, rtl818x_rates, sizeof(rtl818x_rates));
+	priv->map = (struct rtl818x_csr *)0xFF00;
+	priv->modes[0].mode = MODE_IEEE80211G;
+	priv->modes[0].num_rates = ARRAY_SIZE(rtl818x_rates);
+	priv->modes[0].rates = priv->rates;
+	priv->modes[0].num_channels = ARRAY_SIZE(rtl818x_channels);
+	priv->modes[0].channels = priv->channels;
+	priv->modes[1].mode = MODE_IEEE80211B;
+	priv->modes[1].num_rates = 4;
+	priv->modes[1].rates = priv->rates;
+	priv->modes[1].num_channels = ARRAY_SIZE(rtl818x_channels);
+	priv->modes[1].channels = priv->channels;
+	priv->mode = IEEE80211_IF_TYPE_MGMT;
+	dev->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
+		     IEEE80211_HW_RX_INCLUDES_FCS |
+		     IEEE80211_HW_WEP_INCLUDE_IV |
+		     IEEE80211_HW_DATA_NULLFUNC_ACK;
+	dev->extra_tx_headroom = sizeof(struct rtl8187_tx_hdr);
+	dev->queues = 1;
+	dev->max_rssi = 65;
+	dev->max_signal = 64;
+
+	for (i = 0; i < 2; i++)
+		if ((err = ieee80211_register_hwmode(dev, &priv->modes[i])))
+			goto err_free_dev;
+
+	eeprom.data = dev;
+	eeprom.register_read = rtl8187_eeprom_register_read;
+	eeprom.register_write = rtl8187_eeprom_register_write;
+	if (rtl818x_ioread32(priv, &priv->map->RX_CONF) & (1 << 6))
+		eeprom.width = PCI_EEPROM_WIDTH_93C66;
+	else
+		eeprom.width = PCI_EEPROM_WIDTH_93C46;
+
+	rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG);
+	udelay(10);
+
+	eeprom_93cx6_multiread(&eeprom, RTL8187_EEPROM_MAC_ADDR,
+			       (__le16 __force *)dev->wiphy->perm_addr, 3);
+	if (!is_valid_ether_addr(dev->wiphy->perm_addr)) {
+		printk(KERN_WARNING "rtl8187: Invalid hwaddr! Using randomly "
+		       "generated MAC address\n");
+		random_ether_addr(dev->wiphy->perm_addr);
+	}
+
+	channel = priv->channels;
+	for (i = 0; i < 3; i++) {
+		eeprom_93cx6_read(&eeprom, RTL8187_EEPROM_TXPWR_CHAN_1 + i,
+				  &txpwr);
+		(*channel++).val = txpwr & 0xFF;
+		(*channel++).val = txpwr >> 8;
+	}
+	for (i = 0; i < 2; i++) {
+		eeprom_93cx6_read(&eeprom, RTL8187_EEPROM_TXPWR_CHAN_4 + i,
+				  &txpwr);
+		(*channel++).val = txpwr & 0xFF;
+		(*channel++).val = txpwr >> 8;
+	}
+	for (i = 0; i < 2; i++) {
+		eeprom_93cx6_read(&eeprom, RTL8187_EEPROM_TXPWR_CHAN_6 + i,
+				  &txpwr);
+		(*channel++).val = txpwr & 0xFF;
+		(*channel++).val = txpwr >> 8;
+	}
+
+	eeprom_93cx6_read(&eeprom, RTL8187_EEPROM_TXPWR_BASE,
+			  &priv->txpwr_base);
+
+	reg = rtl818x_ioread16(priv, &priv->map->PGSELECT) & ~1;
+	rtl818x_iowrite16(priv, &priv->map->PGSELECT, reg | 1);
+	/* 0 means asic B-cut, we should use SW 3 wire
+	 * bit-by-bit banging for radio. 1 means we can use
+	 * USB specific request to write radio registers */
+	priv->asic_rev = rtl818x_ioread8(priv, (u8 *)0xFFFE) & 0x3;
+	rtl818x_iowrite16(priv, &priv->map->PGSELECT, reg);
+	rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL);
+
+	rtl8225_write(dev, 0, 0x1B7);
+
+	if (rtl8225_read(dev, 8) != 0x588 || rtl8225_read(dev, 9) != 0x700)
+		priv->rf_init = rtl8225_rf_init;
+	else
+		priv->rf_init = rtl8225z2_rf_init;
+
+	rtl8225_write(dev, 0, 0x0B7);
+
+	err = ieee80211_register_hw(dev);
+	if (err) {
+		printk(KERN_ERR "rtl8187: Cannot register device\n");
+		goto err_free_dev;
+	}
+
+	printk(KERN_INFO "%s: hwaddr " MAC_FMT ", rtl8187 V%d + %s\n",
+	       wiphy_name(dev->wiphy), MAC_ARG(dev->wiphy->perm_addr),
+	       priv->asic_rev, priv->rf_init == rtl8225_rf_init ?
+	       "rtl8225" : "rtl8225z2");
+
+	return 0;
+
+ err_free_dev:
+	ieee80211_free_hw(dev);
+	usb_set_intfdata(intf, NULL);
+	usb_put_dev(udev);
+	return err;
+}
+
+static void __devexit rtl8187_disconnect(struct usb_interface *intf)
+{
+	struct ieee80211_hw *dev = usb_get_intfdata(intf);
+	struct rtl8187_priv *priv;
+
+	if (!dev)
+		return;
+
+	ieee80211_unregister_hw(dev);
+
+	priv = dev->priv;
+	usb_put_dev(interface_to_usbdev(intf));
+	ieee80211_free_hw(dev);
+}
+
+static struct usb_driver rtl8187_driver = {
+	.name		= KBUILD_MODNAME,
+	.id_table	= rtl8187_table,
+	.probe		= rtl8187_probe,
+	.disconnect	= rtl8187_disconnect,
+};
+
+static int __init rtl8187_init(void)
+{
+	return usb_register(&rtl8187_driver);
+}
+
+static void __exit rtl8187_exit(void)
+{
+	usb_deregister(&rtl8187_driver);
+}
+
+module_init(rtl8187_init);
+module_exit(rtl8187_exit);
diff --git a/drivers/net/wireless/rtl8187_rtl8225.c b/drivers/net/wireless/rtl8187_rtl8225.c
new file mode 100644
index 0000000..e25a09f
--- /dev/null
+++ b/drivers/net/wireless/rtl8187_rtl8225.c
@@ -0,0 +1,745 @@
+/*
+ * Radio tuning for RTL8225 on RTL8187
+ *
+ * Copyright 2007 Michael Wu <flamingice@sourmilk.net>
+ * Copyright 2007 Andrea Merello <andreamrl@tiscali.it>
+ *
+ * Based on the r8187 driver, which is:
+ * Copyright 2005 Andrea Merello <andreamrl@tiscali.it>, et al.
+ *
+ * Magic delays, register offsets, and phy value tables below are
+ * taken from the original r8187 driver sources.  Thanks to Realtek
+ * for their support!
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/usb.h>
+#include <net/mac80211.h>
+
+#include "rtl8187.h"
+#include "rtl8187_rtl8225.h"
+
+static void rtl8225_write_bitbang(struct ieee80211_hw *dev, u8 addr, u16 data)
+{
+	struct rtl8187_priv *priv = dev->priv;
+	u16 reg80, reg84, reg82;
+	u32 bangdata;
+	int i;
+
+	bangdata = (data << 4) | (addr & 0xf);
+
+	reg80 = rtl818x_ioread16(priv, &priv->map->RFPinsOutput) & 0xfff3;
+	reg82 = rtl818x_ioread16(priv, &priv->map->RFPinsEnable);
+
+	rtl818x_iowrite16(priv, &priv->map->RFPinsEnable, reg82 | 0x7);
+
+	reg84 = rtl818x_ioread16(priv, &priv->map->RFPinsSelect);
+	rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, reg84 | 0x7);
+	udelay(10);
+
+	rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg80 | (1 << 2));
+	udelay(2);
+	rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg80);
+	udelay(10);
+
+	for (i = 15; i >= 0; i--) {
+		u16 reg = reg80 | (bangdata & (1 << i)) >> i;
+
+		if (i & 1)
+			rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg);
+
+		rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg | (1 << 1));
+		rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg | (1 << 1));
+
+		if (!(i & 1))
+			rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg);
+	}
+
+	rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg80 | (1 << 2));
+	udelay(10);
+
+	rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg80 | (1 << 2));
+	rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, reg84);
+	msleep(2);
+}
+
+static void rtl8225_write_8051(struct ieee80211_hw *dev, u8 addr, u16 data)
+{
+	struct rtl8187_priv *priv = dev->priv;
+	u16 reg80, reg82, reg84;
+
+	reg80 = rtl818x_ioread16(priv, &priv->map->RFPinsOutput);
+	reg82 = rtl818x_ioread16(priv, &priv->map->RFPinsEnable);
+	reg84 = rtl818x_ioread16(priv, &priv->map->RFPinsSelect);
+
+	reg80 &= ~(0x3 << 2);
+	reg84 &= ~0xF;
+
+	rtl818x_iowrite16(priv, &priv->map->RFPinsEnable, reg82 | 0x0007);
+	rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, reg84 | 0x0007);
+	udelay(10);
+
+	rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg80 | (1 << 2));
+	udelay(2);
+
+	rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg80);
+	udelay(10);
+
+	usb_control_msg(priv->udev, usb_sndctrlpipe(priv->udev, 0),
+			RTL8187_REQ_SET_REG, RTL8187_REQT_WRITE,
+			addr, 0x8225, &data, sizeof(data), HZ / 2);
+
+	rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg80 | (1 << 2));
+	udelay(10);
+
+	rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg80 | (1 << 2));
+	rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, reg84);
+	msleep(2);
+}
+
+void rtl8225_write(struct ieee80211_hw *dev, u8 addr, u16 data)
+{
+	struct rtl8187_priv *priv = dev->priv;
+
+	if (priv->asic_rev)
+		rtl8225_write_8051(dev, addr, data);
+	else
+		rtl8225_write_bitbang(dev, addr, data);
+}
+
+u16 rtl8225_read(struct ieee80211_hw *dev, u8 addr)
+{
+	struct rtl8187_priv *priv = dev->priv;
+	u16 reg80, reg82, reg84, out;
+	int i;
+
+	reg80 = rtl818x_ioread16(priv, &priv->map->RFPinsOutput);
+	reg82 = rtl818x_ioread16(priv, &priv->map->RFPinsEnable);
+	reg84 = rtl818x_ioread16(priv, &priv->map->RFPinsSelect);
+
+	reg80 &= ~0xF;
+
+	rtl818x_iowrite16(priv, &priv->map->RFPinsEnable, reg82 | 0x000F);
+	rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, reg84 | 0x000F);
+
+	rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg80 | (1 << 2));
+	udelay(4);
+	rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg80);
+	udelay(5);
+
+	for (i = 4; i >= 0; i--) {
+		u16 reg = reg80 | ((addr >> i) & 1);
+
+		if (!(i & 1)) {
+			rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg);
+			udelay(1);
+		}
+
+		rtl818x_iowrite16(priv, &priv->map->RFPinsOutput,
+				  reg | (1 << 1));
+		udelay(2);
+		rtl818x_iowrite16(priv, &priv->map->RFPinsOutput,
+				  reg | (1 << 1));
+		udelay(2);
+
+		if (i & 1) {
+			rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg);
+			udelay(1);
+		}
+	}
+
+	rtl818x_iowrite16(priv, &priv->map->RFPinsOutput,
+			  reg80 | (1 << 3) | (1 << 1));
+	udelay(2);
+	rtl818x_iowrite16(priv, &priv->map->RFPinsOutput,
+			  reg80 | (1 << 3));
+	udelay(2);
+	rtl818x_iowrite16(priv, &priv->map->RFPinsOutput,
+			  reg80 | (1 << 3));
+	udelay(2);
+
+	out = 0;
+	for (i = 11; i >= 0; i--) {
+		rtl818x_iowrite16(priv, &priv->map->RFPinsOutput,
+				  reg80 | (1 << 3));
+		udelay(1);
+		rtl818x_iowrite16(priv, &priv->map->RFPinsOutput,
+				  reg80 | (1 << 3) | (1 << 1));
+		udelay(2);
+		rtl818x_iowrite16(priv, &priv->map->RFPinsOutput,
+				  reg80 | (1 << 3) | (1 << 1));
+		udelay(2);
+		rtl818x_iowrite16(priv, &priv->map->RFPinsOutput,
+				  reg80 | (1 << 3) | (1 << 1));
+		udelay(2);
+
+		if (rtl818x_ioread16(priv, &priv->map->RFPinsInput) & (1 << 1))
+			out |= 1 << i;
+
+		rtl818x_iowrite16(priv, &priv->map->RFPinsOutput,
+				  reg80 | (1 << 3));
+		udelay(2);
+	}
+
+	rtl818x_iowrite16(priv, &priv->map->RFPinsOutput,
+			  reg80 | (1 << 3) | (1 << 2));
+	udelay(2);
+
+	rtl818x_iowrite16(priv, &priv->map->RFPinsEnable, reg82);
+	rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, reg84);
+	rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, 0x03A0);
+
+	return out;
+}
+
+static const u16 rtl8225bcd_rxgain[] = {
+	0x0400, 0x0401, 0x0402, 0x0403, 0x0404, 0x0405, 0x0408, 0x0409,
+	0x040a, 0x040b, 0x0502, 0x0503, 0x0504, 0x0505, 0x0540, 0x0541,
+	0x0542, 0x0543, 0x0544, 0x0545, 0x0580, 0x0581, 0x0582, 0x0583,
+	0x0584, 0x0585, 0x0588, 0x0589, 0x058a, 0x058b, 0x0643, 0x0644,
+	0x0645, 0x0680, 0x0681, 0x0682, 0x0683, 0x0684, 0x0685, 0x0688,
+	0x0689, 0x068a, 0x068b, 0x068c, 0x0742, 0x0743, 0x0744, 0x0745,
+	0x0780, 0x0781, 0x0782, 0x0783, 0x0784, 0x0785, 0x0788, 0x0789,
+	0x078a, 0x078b, 0x078c, 0x078d, 0x0790, 0x0791, 0x0792, 0x0793,
+	0x0794, 0x0795, 0x0798, 0x0799, 0x079a, 0x079b, 0x079c, 0x079d,
+	0x07a0, 0x07a1, 0x07a2, 0x07a3, 0x07a4, 0x07a5, 0x07a8, 0x07a9,
+	0x07aa, 0x07ab, 0x07ac, 0x07ad, 0x07b0, 0x07b1, 0x07b2, 0x07b3,
+	0x07b4, 0x07b5, 0x07b8, 0x07b9, 0x07ba, 0x07bb, 0x07bb
+};
+
+static const u8 rtl8225_agc[] = {
+	0x9e, 0x9e, 0x9e, 0x9e, 0x9e, 0x9e, 0x9e, 0x9e,
+	0x9d, 0x9c, 0x9b, 0x9a, 0x99, 0x98, 0x97, 0x96,
+	0x95, 0x94, 0x93, 0x92, 0x91, 0x90, 0x8f, 0x8e,
+	0x8d, 0x8c, 0x8b, 0x8a, 0x89, 0x88, 0x87, 0x86,
+	0x85, 0x84, 0x83, 0x82, 0x81, 0x80, 0x3f, 0x3e,
+	0x3d, 0x3c, 0x3b, 0x3a, 0x39, 0x38, 0x37, 0x36,
+	0x35, 0x34, 0x33, 0x32, 0x31, 0x30, 0x2f, 0x2e,
+	0x2d, 0x2c, 0x2b, 0x2a, 0x29, 0x28, 0x27, 0x26,
+	0x25, 0x24, 0x23, 0x22, 0x21, 0x20, 0x1f, 0x1e,
+	0x1d, 0x1c, 0x1b, 0x1a, 0x19, 0x18, 0x17, 0x16,
+	0x15, 0x14, 0x13, 0x12, 0x11, 0x10, 0x0f, 0x0e,
+	0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x08, 0x07, 0x06,
+	0x05, 0x04, 0x03, 0x02, 0x01, 0x01, 0x01, 0x01,
+	0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+	0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+	0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01
+};
+
+static const u8 rtl8225_gain[] = {
+	0x23, 0x88, 0x7c, 0xa5,	/* -82dBm */
+	0x23, 0x88, 0x7c, 0xb5,	/* -82dBm */
+	0x23, 0x88, 0x7c, 0xc5,	/* -82dBm */
+	0x33, 0x80, 0x79, 0xc5,	/* -78dBm */
+	0x43, 0x78, 0x76, 0xc5,	/* -74dBm */
+	0x53, 0x60, 0x73, 0xc5,	/* -70dBm */
+	0x63, 0x58, 0x70, 0xc5,	/* -66dBm */
+};
+
+static const u8 rtl8225_threshold[] = {
+	0x8d, 0x8d, 0x8d, 0x8d, 0x9d, 0xad, 0xbd
+};
+
+static const u8 rtl8225_tx_gain_cck_ofdm[] = {
+	0x02, 0x06, 0x0e, 0x1e, 0x3e, 0x7e
+};
+
+static const u8 rtl8225_tx_power_cck[] = {
+	0x18, 0x17, 0x15, 0x11, 0x0c, 0x08, 0x04, 0x02,
+	0x1b, 0x1a, 0x17, 0x13, 0x0e, 0x09, 0x04, 0x02,
+	0x1f, 0x1e, 0x1a, 0x15, 0x10, 0x0a, 0x05, 0x02,
+	0x22, 0x21, 0x1d, 0x18, 0x11, 0x0b, 0x06, 0x02,
+	0x26, 0x25, 0x21, 0x1b, 0x14, 0x0d, 0x06, 0x03,
+	0x2b, 0x2a, 0x25, 0x1e, 0x16, 0x0e, 0x07, 0x03
+};
+
+static const u8 rtl8225_tx_power_cck_ch14[] = {
+	0x18, 0x17, 0x15, 0x0c, 0x00, 0x00, 0x00, 0x00,
+	0x1b, 0x1a, 0x17, 0x0e, 0x00, 0x00, 0x00, 0x00,
+	0x1f, 0x1e, 0x1a, 0x0f, 0x00, 0x00, 0x00, 0x00,
+	0x22, 0x21, 0x1d, 0x11, 0x00, 0x00, 0x00, 0x00,
+	0x26, 0x25, 0x21, 0x13, 0x00, 0x00, 0x00, 0x00,
+	0x2b, 0x2a, 0x25, 0x15, 0x00, 0x00, 0x00, 0x00
+};
+
+static const u8 rtl8225_tx_power_ofdm[] = {
+	0x80, 0x90, 0xa2, 0xb5, 0xcb, 0xe4
+};
+
+static const u32 rtl8225_chan[] = {
+	0x085c, 0x08dc, 0x095c, 0x09dc, 0x0a5c, 0x0adc, 0x0b5c,
+	0x0bdc, 0x0c5c, 0x0cdc, 0x0d5c, 0x0ddc, 0x0e5c, 0x0f72
+};
+
+static void rtl8225_rf_set_tx_power(struct ieee80211_hw *dev, int channel)
+{
+	struct rtl8187_priv *priv = dev->priv;
+	u8 cck_power, ofdm_power;
+	const u8 *tmp;
+	u32 reg;
+	int i;
+
+	cck_power = priv->channels[channel - 1].val & 0xF;
+	ofdm_power = priv->channels[channel - 1].val >> 4;
+
+	cck_power = min(cck_power, (u8)11);
+	ofdm_power = min(ofdm_power, (u8)35);
+
+	rtl818x_iowrite8(priv, &priv->map->TX_GAIN_CCK,
+			 rtl8225_tx_gain_cck_ofdm[cck_power / 6] >> 1);
+
+	if (channel == 14)
+		tmp = &rtl8225_tx_power_cck_ch14[(cck_power % 6) * 8];
+	else
+		tmp = &rtl8225_tx_power_cck[(cck_power % 6) * 8];
+
+	for (i = 0; i < 8; i++)
+		rtl8225_write_phy_cck(dev, 0x44 + i, *tmp++);
+
+	msleep(1); // FIXME: optional?
+
+	/* anaparam2 on */
+	rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG);
+	reg = rtl818x_ioread8(priv, &priv->map->CONFIG3);
+	rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg | RTL818X_CONFIG3_ANAPARAM_WRITE);
+	rtl818x_iowrite32(priv, &priv->map->ANAPARAM2, RTL8225_ANAPARAM2_ON);
+	rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg & ~RTL818X_CONFIG3_ANAPARAM_WRITE);
+	rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL);
+
+	rtl8225_write_phy_ofdm(dev, 2, 0x42);
+	rtl8225_write_phy_ofdm(dev, 6, 0x00);
+	rtl8225_write_phy_ofdm(dev, 8, 0x00);
+
+	rtl818x_iowrite8(priv, &priv->map->TX_GAIN_OFDM,
+			 rtl8225_tx_gain_cck_ofdm[ofdm_power / 6] >> 1);
+
+	tmp = &rtl8225_tx_power_ofdm[ofdm_power % 6];
+
+	rtl8225_write_phy_ofdm(dev, 5, *tmp);
+	rtl8225_write_phy_ofdm(dev, 7, *tmp);
+
+	msleep(1);
+}
+
+void rtl8225_rf_init(struct ieee80211_hw *dev)
+{
+	struct rtl8187_priv *priv = dev->priv;
+	int i;
+
+	rtl8225_write(dev, 0x0, 0x067); msleep(1);
+	rtl8225_write(dev, 0x1, 0xFE0); msleep(1);
+	rtl8225_write(dev, 0x2, 0x44D); msleep(1);
+	rtl8225_write(dev, 0x3, 0x441); msleep(1);
+	rtl8225_write(dev, 0x4, 0x486); msleep(1);
+	rtl8225_write(dev, 0x5, 0xBC0); msleep(1);
+	rtl8225_write(dev, 0x6, 0xAE6); msleep(1);
+	rtl8225_write(dev, 0x7, 0x82A); msleep(1);
+	rtl8225_write(dev, 0x8, 0x01F); msleep(1);
+	rtl8225_write(dev, 0x9, 0x334); msleep(1);
+	rtl8225_write(dev, 0xA, 0xFD4); msleep(1);
+	rtl8225_write(dev, 0xB, 0x391); msleep(1);
+	rtl8225_write(dev, 0xC, 0x050); msleep(1);
+	rtl8225_write(dev, 0xD, 0x6DB); msleep(1);
+	rtl8225_write(dev, 0xE, 0x029); msleep(1);
+	rtl8225_write(dev, 0xF, 0x914); msleep(100);
+
+	rtl8225_write(dev, 0x2, 0xC4D); msleep(200);
+	rtl8225_write(dev, 0x2, 0x44D); msleep(200);
+
+	if (!(rtl8225_read(dev, 6) & (1 << 7))) {
+		rtl8225_write(dev, 0x02, 0x0c4d);
+		msleep(200);
+		rtl8225_write(dev, 0x02, 0x044d);
+		msleep(100);
+		if (!(rtl8225_read(dev, 6) & (1 << 7)))
+			printk(KERN_WARNING "%s: RF Calibration Failed! %x\n",
+			       wiphy_name(dev->wiphy), rtl8225_read(dev, 6));
+	}
+
+	rtl8225_write(dev, 0x0, 0x127);
+
+	for (i = 0; i < ARRAY_SIZE(rtl8225bcd_rxgain); i++) {
+		rtl8225_write(dev, 0x1, i + 1);
+		rtl8225_write(dev, 0x2, rtl8225bcd_rxgain[i]);
+	}
+
+	rtl8225_write(dev, 0x0, 0x027);
+	rtl8225_write(dev, 0x0, 0x22F);
+
+	for (i = 0; i < ARRAY_SIZE(rtl8225_agc); i++) {
+		rtl8225_write_phy_ofdm(dev, 0xB, rtl8225_agc[i]);
+		msleep(1);
+		rtl8225_write_phy_ofdm(dev, 0xA, 0x80 + i);
+		msleep(1);
+	}
+
+	msleep(1);
+
+	rtl8225_write_phy_ofdm(dev, 0x00, 0x01); msleep(1);
+	rtl8225_write_phy_ofdm(dev, 0x01, 0x02); msleep(1);
+	rtl8225_write_phy_ofdm(dev, 0x02, 0x42); msleep(1);
+	rtl8225_write_phy_ofdm(dev, 0x03, 0x00); msleep(1);
+	rtl8225_write_phy_ofdm(dev, 0x04, 0x00); msleep(1);
+	rtl8225_write_phy_ofdm(dev, 0x05, 0x00); msleep(1);
+	rtl8225_write_phy_ofdm(dev, 0x06, 0x40); msleep(1);
+	rtl8225_write_phy_ofdm(dev, 0x07, 0x00); msleep(1);
+	rtl8225_write_phy_ofdm(dev, 0x08, 0x40); msleep(1);
+	rtl8225_write_phy_ofdm(dev, 0x09, 0xfe); msleep(1);
+	rtl8225_write_phy_ofdm(dev, 0x0a, 0x09); msleep(1);
+	rtl8225_write_phy_ofdm(dev, 0x0b, 0x80); msleep(1);
+	rtl8225_write_phy_ofdm(dev, 0x0c, 0x01); msleep(1);
+	rtl8225_write_phy_ofdm(dev, 0x0e, 0xd3); msleep(1);
+	rtl8225_write_phy_ofdm(dev, 0x0f, 0x38); msleep(1);
+	rtl8225_write_phy_ofdm(dev, 0x10, 0x84); msleep(1);
+	rtl8225_write_phy_ofdm(dev, 0x11, 0x06); msleep(1);
+	rtl8225_write_phy_ofdm(dev, 0x12, 0x20); msleep(1);
+	rtl8225_write_phy_ofdm(dev, 0x13, 0x20); msleep(1);
+	rtl8225_write_phy_ofdm(dev, 0x14, 0x00); msleep(1);
+	rtl8225_write_phy_ofdm(dev, 0x15, 0x40); msleep(1);
+	rtl8225_write_phy_ofdm(dev, 0x16, 0x00); msleep(1);
+	rtl8225_write_phy_ofdm(dev, 0x17, 0x40); msleep(1);
+	rtl8225_write_phy_ofdm(dev, 0x18, 0xef); msleep(1);
+	rtl8225_write_phy_ofdm(dev, 0x19, 0x19); msleep(1);
+	rtl8225_write_phy_ofdm(dev, 0x1a, 0x20); msleep(1);
+	rtl8225_write_phy_ofdm(dev, 0x1b, 0x76); msleep(1);
+	rtl8225_write_phy_ofdm(dev, 0x1c, 0x04); msleep(1);
+	rtl8225_write_phy_ofdm(dev, 0x1e, 0x95); msleep(1);
+	rtl8225_write_phy_ofdm(dev, 0x1f, 0x75); msleep(1);
+	rtl8225_write_phy_ofdm(dev, 0x20, 0x1f); msleep(1);
+	rtl8225_write_phy_ofdm(dev, 0x21, 0x27); msleep(1);
+	rtl8225_write_phy_ofdm(dev, 0x22, 0x16); msleep(1);
+	rtl8225_write_phy_ofdm(dev, 0x24, 0x46); msleep(1);
+	rtl8225_write_phy_ofdm(dev, 0x25, 0x20); msleep(1);
+	rtl8225_write_phy_ofdm(dev, 0x26, 0x90); msleep(1);
+	rtl8225_write_phy_ofdm(dev, 0x27, 0x88); msleep(1);
+
+	rtl8225_write_phy_ofdm(dev, 0x0d, rtl8225_gain[2 * 4]);
+	rtl8225_write_phy_ofdm(dev, 0x1b, rtl8225_gain[2 * 4 + 2]);
+	rtl8225_write_phy_ofdm(dev, 0x1d, rtl8225_gain[2 * 4 + 3]);
+	rtl8225_write_phy_ofdm(dev, 0x23, rtl8225_gain[2 * 4 + 1]);
+
+	rtl8225_write_phy_cck(dev, 0x00, 0x98); msleep(1);
+	rtl8225_write_phy_cck(dev, 0x03, 0x20); msleep(1);
+	rtl8225_write_phy_cck(dev, 0x04, 0x7e); msleep(1);
+	rtl8225_write_phy_cck(dev, 0x05, 0x12); msleep(1);
+	rtl8225_write_phy_cck(dev, 0x06, 0xfc); msleep(1);
+	rtl8225_write_phy_cck(dev, 0x07, 0x78); msleep(1);
+	rtl8225_write_phy_cck(dev, 0x08, 0x2e); msleep(1);
+	rtl8225_write_phy_cck(dev, 0x10, 0x9b); msleep(1);
+	rtl8225_write_phy_cck(dev, 0x11, 0x88); msleep(1);
+	rtl8225_write_phy_cck(dev, 0x12, 0x47); msleep(1);
+	rtl8225_write_phy_cck(dev, 0x13, 0xd0);
+	rtl8225_write_phy_cck(dev, 0x19, 0x00);
+	rtl8225_write_phy_cck(dev, 0x1a, 0xa0);
+	rtl8225_write_phy_cck(dev, 0x1b, 0x08);
+	rtl8225_write_phy_cck(dev, 0x40, 0x86);
+	rtl8225_write_phy_cck(dev, 0x41, 0x8d); msleep(1);
+	rtl8225_write_phy_cck(dev, 0x42, 0x15); msleep(1);
+	rtl8225_write_phy_cck(dev, 0x43, 0x18); msleep(1);
+	rtl8225_write_phy_cck(dev, 0x44, 0x1f); msleep(1);
+	rtl8225_write_phy_cck(dev, 0x45, 0x1e); msleep(1);
+	rtl8225_write_phy_cck(dev, 0x46, 0x1a); msleep(1);
+	rtl8225_write_phy_cck(dev, 0x47, 0x15); msleep(1);
+	rtl8225_write_phy_cck(dev, 0x48, 0x10); msleep(1);
+	rtl8225_write_phy_cck(dev, 0x49, 0x0a); msleep(1);
+	rtl8225_write_phy_cck(dev, 0x4a, 0x05); msleep(1);
+	rtl8225_write_phy_cck(dev, 0x4b, 0x02); msleep(1);
+	rtl8225_write_phy_cck(dev, 0x4c, 0x05); msleep(1);
+
+	rtl818x_iowrite8(priv, &priv->map->TESTR, 0x0D);
+
+	rtl8225_rf_set_tx_power(dev, 1);
+
+	/* RX antenna default to A */
+	rtl8225_write_phy_cck(dev, 0x10, 0x9b); msleep(1);	/* B: 0xDB */
+	rtl8225_write_phy_ofdm(dev, 0x26, 0x90); msleep(1);	/* B: 0x10 */
+
+	rtl818x_iowrite8(priv, &priv->map->TX_ANTENNA, 0x03);	/* B: 0x00 */
+	msleep(1);
+	rtl818x_iowrite32(priv, (__le32 *)0xFF94, 0x3dc00002);
+
+	/* set sensitivity */
+	rtl8225_write(dev, 0x0c, 0x50);
+	rtl8225_write_phy_ofdm(dev, 0x0d, rtl8225_gain[2 * 4]);
+	rtl8225_write_phy_ofdm(dev, 0x1b, rtl8225_gain[2 * 4 + 2]);
+	rtl8225_write_phy_ofdm(dev, 0x1d, rtl8225_gain[2 * 4 + 3]);
+	rtl8225_write_phy_ofdm(dev, 0x23, rtl8225_gain[2 * 4 + 1]);
+	rtl8225_write_phy_cck(dev, 0x41, rtl8225_threshold[2]);
+}
+
+static const u8 rtl8225z2_tx_power_cck_ch14[] = {
+	0x36, 0x35, 0x2e, 0x1b, 0x00, 0x00, 0x00, 0x00
+};
+
+static const u8 rtl8225z2_tx_power_cck[] = {
+	0x36, 0x35, 0x2e, 0x25, 0x1c, 0x12, 0x09, 0x04
+};
+
+static const u8 rtl8225z2_tx_power_ofdm[] = {
+	0x42, 0x00, 0x40, 0x00, 0x40
+};
+
+static const u8 rtl8225z2_tx_gain_cck_ofdm[] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
+	0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b,
+	0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11,
+	0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d,
+	0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23
+};
+
+static void rtl8225z2_rf_set_tx_power(struct ieee80211_hw *dev, int channel)
+{
+	struct rtl8187_priv *priv = dev->priv;
+	u8 cck_power, ofdm_power;
+	const u8 *tmp;
+	u32 reg;
+	int i;
+
+	cck_power = priv->channels[channel - 1].val & 0xF;
+	ofdm_power = priv->channels[channel - 1].val >> 4;
+
+	cck_power = min(cck_power, (u8)15);
+	cck_power += priv->txpwr_base & 0xF;
+	cck_power = min(cck_power, (u8)35);
+
+	ofdm_power = min(ofdm_power, (u8)15);
+	ofdm_power += priv->txpwr_base >> 4;
+	ofdm_power = min(ofdm_power, (u8)35);
+
+	if (channel == 14)
+		tmp = rtl8225z2_tx_power_cck_ch14;
+	else
+		tmp = rtl8225z2_tx_power_cck;
+
+	for (i = 0; i < 8; i++)
+		rtl8225_write_phy_cck(dev, 0x44 + i, *tmp++);
+
+	rtl818x_iowrite8(priv, &priv->map->TX_GAIN_CCK,
+			 rtl8225z2_tx_gain_cck_ofdm[cck_power]);
+	msleep(1);
+
+	/* anaparam2 on */
+	rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG);
+	reg = rtl818x_ioread8(priv, &priv->map->CONFIG3);
+	rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg | RTL818X_CONFIG3_ANAPARAM_WRITE);
+	rtl818x_iowrite32(priv, &priv->map->ANAPARAM2, RTL8225_ANAPARAM2_ON);
+	rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg & ~RTL818X_CONFIG3_ANAPARAM_WRITE);
+	rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL);
+
+	rtl8225_write_phy_ofdm(dev, 2, 0x42);
+	rtl8225_write_phy_ofdm(dev, 5, 0x00);
+	rtl8225_write_phy_ofdm(dev, 6, 0x40);
+	rtl8225_write_phy_ofdm(dev, 7, 0x00);
+	rtl8225_write_phy_ofdm(dev, 8, 0x40);
+
+	rtl818x_iowrite8(priv, &priv->map->TX_GAIN_OFDM,
+			 rtl8225z2_tx_gain_cck_ofdm[ofdm_power]);
+	msleep(1);
+}
+
+static const u16 rtl8225z2_rxgain[] = {
+	0x0400, 0x0401, 0x0402, 0x0403, 0x0404, 0x0405, 0x0408, 0x0409,
+	0x040a, 0x040b, 0x0502, 0x0503, 0x0504, 0x0505, 0x0540, 0x0541,
+	0x0542, 0x0543, 0x0544, 0x0545, 0x0580, 0x0581, 0x0582, 0x0583,
+	0x0584, 0x0585, 0x0588, 0x0589, 0x058a, 0x058b, 0x0643, 0x0644,
+	0x0645, 0x0680, 0x0681, 0x0682, 0x0683, 0x0684, 0x0685, 0x0688,
+	0x0689, 0x068a, 0x068b, 0x068c, 0x0742, 0x0743, 0x0744, 0x0745,
+	0x0780, 0x0781, 0x0782, 0x0783, 0x0784, 0x0785, 0x0788, 0x0789,
+	0x078a, 0x078b, 0x078c, 0x078d, 0x0790, 0x0791, 0x0792, 0x0793,
+	0x0794, 0x0795, 0x0798, 0x0799, 0x079a, 0x079b, 0x079c, 0x079d,
+	0x07a0, 0x07a1, 0x07a2, 0x07a3, 0x07a4, 0x07a5, 0x07a8, 0x07a9,
+	0x03aa, 0x03ab, 0x03ac, 0x03ad, 0x03b0, 0x03b1, 0x03b2, 0x03b3,
+	0x03b4, 0x03b5, 0x03b8, 0x03b9, 0x03ba, 0x03bb, 0x03bb
+};
+
+static const u8 rtl8225z2_gain_bg[] = {
+	0x23, 0x15, 0xa5, /* -82-1dBm */
+	0x23, 0x15, 0xb5, /* -82-2dBm */
+	0x23, 0x15, 0xc5, /* -82-3dBm */
+	0x33, 0x15, 0xc5, /* -78dBm */
+	0x43, 0x15, 0xc5, /* -74dBm */
+	0x53, 0x15, 0xc5, /* -70dBm */
+	0x63, 0x15, 0xc5  /* -66dBm */
+};
+
+void rtl8225z2_rf_init(struct ieee80211_hw *dev)
+{
+	struct rtl8187_priv *priv = dev->priv;
+	int i;
+
+	rtl8225_write(dev, 0x0, 0x2BF); msleep(1);
+	rtl8225_write(dev, 0x1, 0xEE0); msleep(1);
+	rtl8225_write(dev, 0x2, 0x44D); msleep(1);
+	rtl8225_write(dev, 0x3, 0x441); msleep(1);
+	rtl8225_write(dev, 0x4, 0x8C3); msleep(1);
+	rtl8225_write(dev, 0x5, 0xC72); msleep(1);
+	rtl8225_write(dev, 0x6, 0x0E6); msleep(1);
+	rtl8225_write(dev, 0x7, 0x82A); msleep(1);
+	rtl8225_write(dev, 0x8, 0x03F); msleep(1);
+	rtl8225_write(dev, 0x9, 0x335); msleep(1);
+	rtl8225_write(dev, 0xa, 0x9D4); msleep(1);
+	rtl8225_write(dev, 0xb, 0x7BB); msleep(1);
+	rtl8225_write(dev, 0xc, 0x850); msleep(1);
+	rtl8225_write(dev, 0xd, 0xCDF); msleep(1);
+	rtl8225_write(dev, 0xe, 0x02B); msleep(1);
+	rtl8225_write(dev, 0xf, 0x114); msleep(100);
+
+	rtl8225_write(dev, 0x0, 0x1B7);
+
+	for (i = 0; i < ARRAY_SIZE(rtl8225z2_rxgain); i++) {
+		rtl8225_write(dev, 0x1, i + 1);
+		rtl8225_write(dev, 0x2, rtl8225z2_rxgain[i]);
+	}
+
+	rtl8225_write(dev, 0x3, 0x080);
+	rtl8225_write(dev, 0x5, 0x004);
+	rtl8225_write(dev, 0x0, 0x0B7);
+	rtl8225_write(dev, 0x2, 0xc4D);
+
+	msleep(200);
+	rtl8225_write(dev, 0x2, 0x44D);
+	msleep(100);
+
+	if (!(rtl8225_read(dev, 6) & (1 << 7))) {
+		rtl8225_write(dev, 0x02, 0x0C4D);
+		msleep(200);
+		rtl8225_write(dev, 0x02, 0x044D);
+		msleep(100);
+		if (!(rtl8225_read(dev, 6) & (1 << 7)))
+			printk(KERN_WARNING "%s: RF Calibration Failed! %x\n",
+			       wiphy_name(dev->wiphy), rtl8225_read(dev, 6));
+	}
+
+	msleep(200);
+
+	rtl8225_write(dev, 0x0, 0x2BF);
+
+	for (i = 0; i < ARRAY_SIZE(rtl8225_agc); i++) {
+		rtl8225_write_phy_ofdm(dev, 0xB, rtl8225_agc[i]);
+		msleep(1);
+		rtl8225_write_phy_ofdm(dev, 0xA, 0x80 + i);
+		msleep(1);
+	}
+
+	msleep(1);
+
+	rtl8225_write_phy_ofdm(dev, 0x00, 0x01); msleep(1);
+	rtl8225_write_phy_ofdm(dev, 0x01, 0x02); msleep(1);
+	rtl8225_write_phy_ofdm(dev, 0x02, 0x42); msleep(1);
+	rtl8225_write_phy_ofdm(dev, 0x03, 0x00); msleep(1);
+	rtl8225_write_phy_ofdm(dev, 0x04, 0x00); msleep(1);
+	rtl8225_write_phy_ofdm(dev, 0x05, 0x00); msleep(1);
+	rtl8225_write_phy_ofdm(dev, 0x06, 0x40); msleep(1);
+	rtl8225_write_phy_ofdm(dev, 0x07, 0x00); msleep(1);
+	rtl8225_write_phy_ofdm(dev, 0x08, 0x40); msleep(1);
+	rtl8225_write_phy_ofdm(dev, 0x09, 0xfe); msleep(1);
+	rtl8225_write_phy_ofdm(dev, 0x0a, 0x08); msleep(1);
+	rtl8225_write_phy_ofdm(dev, 0x0b, 0x80); msleep(1);
+	rtl8225_write_phy_ofdm(dev, 0x0c, 0x01); msleep(1);
+	rtl8225_write_phy_ofdm(dev, 0x0d, 0x43);
+	rtl8225_write_phy_ofdm(dev, 0x0e, 0xd3); msleep(1);
+	rtl8225_write_phy_ofdm(dev, 0x0f, 0x38); msleep(1);
+	rtl8225_write_phy_ofdm(dev, 0x10, 0x84); msleep(1);
+	rtl8225_write_phy_ofdm(dev, 0x11, 0x07); msleep(1);
+	rtl8225_write_phy_ofdm(dev, 0x12, 0x20); msleep(1);
+	rtl8225_write_phy_ofdm(dev, 0x13, 0x20); msleep(1);
+	rtl8225_write_phy_ofdm(dev, 0x14, 0x00); msleep(1);
+	rtl8225_write_phy_ofdm(dev, 0x15, 0x40); msleep(1);
+	rtl8225_write_phy_ofdm(dev, 0x16, 0x00); msleep(1);
+	rtl8225_write_phy_ofdm(dev, 0x17, 0x40); msleep(1);
+	rtl8225_write_phy_ofdm(dev, 0x18, 0xef); msleep(1);
+	rtl8225_write_phy_ofdm(dev, 0x19, 0x19); msleep(1);
+	rtl8225_write_phy_ofdm(dev, 0x1a, 0x20); msleep(1);
+	rtl8225_write_phy_ofdm(dev, 0x1b, 0x15); msleep(1);
+	rtl8225_write_phy_ofdm(dev, 0x1c, 0x04); msleep(1);
+	rtl8225_write_phy_ofdm(dev, 0x1d, 0xc5); msleep(1);
+	rtl8225_write_phy_ofdm(dev, 0x1e, 0x95); msleep(1);
+	rtl8225_write_phy_ofdm(dev, 0x1f, 0x75); msleep(1);
+	rtl8225_write_phy_ofdm(dev, 0x20, 0x1f); msleep(1);
+	rtl8225_write_phy_ofdm(dev, 0x21, 0x17); msleep(1);
+	rtl8225_write_phy_ofdm(dev, 0x22, 0x16); msleep(1);
+	rtl8225_write_phy_ofdm(dev, 0x23, 0x80); msleep(1); //FIXME: not needed?
+	rtl8225_write_phy_ofdm(dev, 0x24, 0x46); msleep(1);
+	rtl8225_write_phy_ofdm(dev, 0x25, 0x00); msleep(1);
+	rtl8225_write_phy_ofdm(dev, 0x26, 0x90); msleep(1);
+	rtl8225_write_phy_ofdm(dev, 0x27, 0x88); msleep(1);
+
+	rtl8225_write_phy_ofdm(dev, 0x0b, rtl8225z2_gain_bg[4 * 3]);
+	rtl8225_write_phy_ofdm(dev, 0x1b, rtl8225z2_gain_bg[4 * 3 + 1]);
+	rtl8225_write_phy_ofdm(dev, 0x1d, rtl8225z2_gain_bg[4 * 3 + 2]);
+	rtl8225_write_phy_ofdm(dev, 0x21, 0x37);
+
+	rtl8225_write_phy_cck(dev, 0x00, 0x98); msleep(1);
+	rtl8225_write_phy_cck(dev, 0x03, 0x20); msleep(1);
+	rtl8225_write_phy_cck(dev, 0x04, 0x7e); msleep(1);
+	rtl8225_write_phy_cck(dev, 0x05, 0x12); msleep(1);
+	rtl8225_write_phy_cck(dev, 0x06, 0xfc); msleep(1);
+	rtl8225_write_phy_cck(dev, 0x07, 0x78); msleep(1);
+	rtl8225_write_phy_cck(dev, 0x08, 0x2e); msleep(1);
+	rtl8225_write_phy_cck(dev, 0x10, 0x9b); msleep(1);
+	rtl8225_write_phy_cck(dev, 0x11, 0x88); msleep(1);
+	rtl8225_write_phy_cck(dev, 0x12, 0x47); msleep(1);
+	rtl8225_write_phy_cck(dev, 0x13, 0xd0);
+	rtl8225_write_phy_cck(dev, 0x19, 0x00);
+	rtl8225_write_phy_cck(dev, 0x1a, 0xa0);
+	rtl8225_write_phy_cck(dev, 0x1b, 0x08);
+	rtl8225_write_phy_cck(dev, 0x40, 0x86);
+	rtl8225_write_phy_cck(dev, 0x41, 0x8d); msleep(1);
+	rtl8225_write_phy_cck(dev, 0x42, 0x15); msleep(1);
+	rtl8225_write_phy_cck(dev, 0x43, 0x18); msleep(1);
+	rtl8225_write_phy_cck(dev, 0x44, 0x36); msleep(1);
+	rtl8225_write_phy_cck(dev, 0x45, 0x35); msleep(1);
+	rtl8225_write_phy_cck(dev, 0x46, 0x2e); msleep(1);
+	rtl8225_write_phy_cck(dev, 0x47, 0x25); msleep(1);
+	rtl8225_write_phy_cck(dev, 0x48, 0x1c); msleep(1);
+	rtl8225_write_phy_cck(dev, 0x49, 0x12); msleep(1);
+	rtl8225_write_phy_cck(dev, 0x4a, 0x09); msleep(1);
+	rtl8225_write_phy_cck(dev, 0x4b, 0x04); msleep(1);
+	rtl8225_write_phy_cck(dev, 0x4c, 0x05); msleep(1);
+
+	rtl818x_iowrite8(priv, (u8 *)0xFF5B, 0x0D); msleep(1);
+
+	rtl8225z2_rf_set_tx_power(dev, 1);
+
+	/* RX antenna default to A */
+	rtl8225_write_phy_cck(dev, 0x10, 0x9b); msleep(1);	/* B: 0xDB */
+	rtl8225_write_phy_ofdm(dev, 0x26, 0x90); msleep(1);	/* B: 0x10 */
+
+	rtl818x_iowrite8(priv, &priv->map->TX_ANTENNA, 0x03);	/* B: 0x00 */
+	msleep(1);
+	rtl818x_iowrite32(priv, (__le32 *)0xFF94, 0x3dc00002);
+}
+
+void rtl8225_rf_stop(struct ieee80211_hw *dev)
+{
+	u8 reg;
+	struct rtl8187_priv *priv = dev->priv;
+
+	rtl8225_write(dev, 0x4, 0x1f); msleep(1);
+
+	rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG);
+	reg = rtl818x_ioread8(priv, &priv->map->CONFIG3);
+	rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg | RTL818X_CONFIG3_ANAPARAM_WRITE);
+	rtl818x_iowrite32(priv, &priv->map->ANAPARAM2, RTL8225_ANAPARAM2_OFF);
+	rtl818x_iowrite32(priv, &priv->map->ANAPARAM, RTL8225_ANAPARAM_OFF);
+	rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg & ~RTL818X_CONFIG3_ANAPARAM_WRITE);
+	rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL);
+}
+
+void rtl8225_rf_set_channel(struct ieee80211_hw *dev, int channel)
+{
+	struct rtl8187_priv *priv = dev->priv;
+
+	if (priv->rf_init == rtl8225_rf_init)
+		rtl8225_rf_set_tx_power(dev, channel);
+	else
+		rtl8225z2_rf_set_tx_power(dev, channel);
+
+	rtl8225_write(dev, 0x7, rtl8225_chan[channel - 1]);
+	msleep(10);
+}
diff --git a/drivers/net/wireless/rtl8187_rtl8225.h b/drivers/net/wireless/rtl8187_rtl8225.h
new file mode 100644
index 0000000..798ba4a
--- /dev/null
+++ b/drivers/net/wireless/rtl8187_rtl8225.h
@@ -0,0 +1,44 @@
+/*
+ * Radio tuning definitions for RTL8225 on RTL8187
+ *
+ * Copyright 2007 Michael Wu <flamingice@sourmilk.net>
+ * Copyright 2007 Andrea Merello <andreamrl@tiscali.it>
+ *
+ * Based on the r8187 driver, which is:
+ * Copyright 2005 Andrea Merello <andreamrl@tiscali.it>, et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef RTL8187_RTL8225_H
+#define RTL8187_RTL8225_H
+
+#define RTL8225_ANAPARAM_ON	0xa0000a59
+#define RTL8225_ANAPARAM2_ON	0x860c7312
+#define RTL8225_ANAPARAM_OFF	0xa00beb59
+#define RTL8225_ANAPARAM2_OFF	0x840dec11
+
+void rtl8225_write(struct ieee80211_hw *, u8 addr, u16 data);
+u16  rtl8225_read(struct ieee80211_hw *, u8 addr);
+
+void rtl8225_rf_init(struct ieee80211_hw *);
+void rtl8225z2_rf_init(struct ieee80211_hw *);
+void rtl8225_rf_stop(struct ieee80211_hw *);
+void rtl8225_rf_set_channel(struct ieee80211_hw *, int);
+
+
+static inline void rtl8225_write_phy_ofdm(struct ieee80211_hw *dev,
+					  u8 addr, u32 data)
+{
+	rtl8187_write_phy(dev, addr, data);
+}
+
+static inline void rtl8225_write_phy_cck(struct ieee80211_hw *dev,
+					 u8 addr, u32 data)
+{
+	rtl8187_write_phy(dev, addr, data | 0x10000);
+}
+
+#endif /* RTL8187_RTL8225_H */
diff --git a/drivers/net/wireless/rtl818x.h b/drivers/net/wireless/rtl818x.h
new file mode 100644
index 0000000..283de30
--- /dev/null
+++ b/drivers/net/wireless/rtl818x.h
@@ -0,0 +1,226 @@
+/*
+ * Definitions for RTL818x hardware
+ *
+ * Copyright 2007 Michael Wu <flamingice@sourmilk.net>
+ * Copyright 2007 Andrea Merello <andreamrl@tiscali.it>
+ *
+ * Based on the r8187 driver, which is:
+ * Copyright 2005 Andrea Merello <andreamrl@tiscali.it>, et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef RTL818X_H
+#define RTL818X_H
+
+struct rtl818x_csr {
+	u8	MAC[6];
+	u8	reserved_0[2];
+	__le32	MAR[2];
+	u8	RX_FIFO_COUNT;
+	u8	reserved_1;
+	u8	TX_FIFO_COUNT;
+	u8	BQREQ;
+	u8	reserved_2[4];
+	__le32	TSFT[2];
+	__le32	TLPDA;
+	__le32	TNPDA;
+	__le32	THPDA;
+	__le16	BRSR;
+	u8	BSSID[6];
+	u8	RESP_RATE;
+	u8	EIFS;
+	u8	reserved_3[1];
+	u8	CMD;
+#define RTL818X_CMD_TX_ENABLE		(1 << 2)
+#define RTL818X_CMD_RX_ENABLE		(1 << 3)
+#define RTL818X_CMD_RESET		(1 << 4)
+	u8	reserved_4[4];
+	__le16	INT_MASK;
+	__le16	INT_STATUS;
+#define RTL818X_INT_RX_OK		(1 <<  0)
+#define RTL818X_INT_RX_ERR		(1 <<  1)
+#define RTL818X_INT_TXL_OK		(1 <<  2)
+#define RTL818X_INT_TXL_ERR		(1 <<  3)
+#define RTL818X_INT_RX_DU		(1 <<  4)
+#define RTL818X_INT_RX_FO		(1 <<  5)
+#define RTL818X_INT_TXN_OK		(1 <<  6)
+#define RTL818X_INT_TXN_ERR		(1 <<  7)
+#define RTL818X_INT_TXH_OK		(1 <<  8)
+#define RTL818X_INT_TXH_ERR		(1 <<  9)
+#define RTL818X_INT_TXB_OK		(1 << 10)
+#define RTL818X_INT_TXB_ERR		(1 << 11)
+#define RTL818X_INT_ATIM		(1 << 12)
+#define RTL818X_INT_BEACON		(1 << 13)
+#define RTL818X_INT_TIME_OUT		(1 << 14)
+#define RTL818X_INT_TX_FO		(1 << 15)
+	__le32	TX_CONF;
+#define RTL818X_TX_CONF_LOOPBACK_MAC	(1 << 17)
+#define RTL818X_TX_CONF_NO_ICV		(1 << 19)
+#define RTL818X_TX_CONF_DISCW		(1 << 20)
+#define RTL818X_TX_CONF_R8180_ABCD	(2 << 25)
+#define RTL818X_TX_CONF_R8180_F		(3 << 25)
+#define RTL818X_TX_CONF_R8185_ABC	(4 << 25)
+#define RTL818X_TX_CONF_R8185_D		(5 << 25)
+#define RTL818X_TX_CONF_HWVER_MASK	(7 << 25)
+#define RTL818X_TX_CONF_CW_MIN		(1 << 31)
+	__le32	RX_CONF;
+#define RTL818X_RX_CONF_MONITOR		(1 <<  0)
+#define RTL818X_RX_CONF_NICMAC		(1 <<  1)
+#define RTL818X_RX_CONF_MULTICAST	(1 <<  2)
+#define RTL818X_RX_CONF_BROADCAST	(1 <<  3)
+#define RTL818X_RX_CONF_DATA		(1 << 18)
+#define RTL818X_RX_CONF_CTRL		(1 << 19)
+#define RTL818X_RX_CONF_MGMT		(1 << 20)
+#define RTL818X_RX_CONF_BSSID		(1 << 23)
+#define RTL818X_RX_CONF_RX_AUTORESETPHY	(1 << 28)
+#define RTL818X_RX_CONF_ONLYERLPKT	(1 << 31)
+	__le32	INT_TIMEOUT;
+	__le32	TBDA;
+	u8	EEPROM_CMD;
+#define RTL818X_EEPROM_CMD_READ		(1 << 0)
+#define RTL818X_EEPROM_CMD_WRITE	(1 << 1)
+#define RTL818X_EEPROM_CMD_CK		(1 << 2)
+#define RTL818X_EEPROM_CMD_CS		(1 << 3)
+#define RTL818X_EEPROM_CMD_NORMAL	(0 << 6)
+#define RTL818X_EEPROM_CMD_LOAD		(1 << 6)
+#define RTL818X_EEPROM_CMD_PROGRAM	(2 << 6)
+#define RTL818X_EEPROM_CMD_CONFIG	(3 << 6)
+	u8	CONFIG0;
+	u8	CONFIG1;
+	u8	CONFIG2;
+	__le32	ANAPARAM;
+	u8	MSR;
+#define RTL818X_MSR_NO_LINK		(0 << 2)
+#define RTL818X_MSR_ADHOC		(1 << 2)
+#define RTL818X_MSR_INFRA		(2 << 2)
+	u8	CONFIG3;
+#define RTL818X_CONFIG3_ANAPARAM_WRITE	(1 << 6)
+	u8	CONFIG4;
+#define RTL818X_CONFIG4_POWEROFF	(1 << 6)
+#define RTL818X_CONFIG4_VCOOFF		(1 << 7)
+	u8	TESTR;
+	u8	reserved_9[2];
+	__le16	PGSELECT;
+	__le32	ANAPARAM2;
+	u8	reserved_10[12];
+	__le16	BEACON_INTERVAL;
+	__le16	ATIM_WND;
+	__le16	BEACON_INTERVAL_TIME;
+	__le16	ATIMTR_INTERVAL;
+	u8	reserved_11[4];
+	u8	PHY[4];
+	__le16	RFPinsOutput;
+	__le16	RFPinsEnable;
+	__le16	RFPinsSelect;
+	__le16	RFPinsInput;
+	__le32	RF_PARA;
+	__le32	RF_TIMING;
+	u8	GP_ENABLE;
+	u8	GPIO;
+	u8	reserved_12[10];
+	u8	TX_AGC_CTL;
+#define RTL818X_TX_AGC_CTL_PERPACKET_GAIN_SHIFT		(1 << 0)
+#define RTL818X_TX_AGC_CTL_PERPACKET_ANTSEL_SHIFT	(1 << 1)
+#define RTL818X_TX_AGC_CTL_FEEDBACK_ANT			(1 << 2)
+	u8	TX_GAIN_CCK;
+	u8	TX_GAIN_OFDM;
+	u8	TX_ANTENNA;
+	u8	reserved_13[16];
+	u8	WPA_CONF;
+	u8	reserved_14[3];
+	u8	SIFS;
+	u8	DIFS;
+	u8	SLOT;
+	u8	reserved_15[5];
+	u8	CW_CONF;
+#define RTL818X_CW_CONF_PERPACKET_CW_SHIFT	(1 << 0)
+#define RTL818X_CW_CONF_PERPACKET_RETRY_SHIFT	(1 << 1)
+	u8	CW_VAL;
+	u8	RATE_FALLBACK;
+	u8	reserved_16[25];
+	u8	CONFIG5;
+	u8	TX_DMA_POLLING;
+	u8	reserved_17[2];
+	__le16	CWR;
+	u8	RETRY_CTR;
+	u8	reserved_18[5];
+	__le32	RDSAR;
+	u8	reserved_19[18];
+	u16	TALLY_CNT;
+	u8	TALLY_SEL;
+} __attribute__((packed));
+
+static const struct ieee80211_rate rtl818x_rates[] = {
+	{ .rate = 10,
+	  .val = 0,
+	  .flags = IEEE80211_RATE_CCK },
+	{ .rate = 20,
+	  .val = 1,
+	  .flags = IEEE80211_RATE_CCK },
+	{ .rate = 55,
+	  .val = 2,
+	  .flags = IEEE80211_RATE_CCK },
+	{ .rate = 110,
+	  .val = 3,
+	  .flags = IEEE80211_RATE_CCK },
+	{ .rate = 60,
+	  .val = 4,
+	  .flags = IEEE80211_RATE_OFDM },
+	{ .rate = 90,
+	  .val = 5,
+	  .flags = IEEE80211_RATE_OFDM },
+	{ .rate = 120,
+	  .val = 6,
+	  .flags = IEEE80211_RATE_OFDM },
+	{ .rate = 180,
+	  .val = 7,
+	  .flags = IEEE80211_RATE_OFDM },
+	{ .rate = 240,
+	  .val = 8,
+	  .flags = IEEE80211_RATE_OFDM },
+	{ .rate = 360,
+	  .val = 9,
+	  .flags = IEEE80211_RATE_OFDM },
+	{ .rate = 480,
+	  .val = 10,
+	  .flags = IEEE80211_RATE_OFDM },
+	{ .rate = 540,
+	  .val = 11,
+	  .flags = IEEE80211_RATE_OFDM },
+};
+
+static const struct ieee80211_channel rtl818x_channels[] = {
+	{ .chan = 1,
+	  .freq = 2412},
+	{ .chan = 2,
+	  .freq = 2417},
+	{ .chan = 3,
+	  .freq = 2422},
+	{ .chan = 4,
+	  .freq = 2427},
+	{ .chan = 5,
+	  .freq = 2432},
+	{ .chan = 6,
+	  .freq = 2437},
+	{ .chan = 7,
+	  .freq = 2442},
+	{ .chan = 8,
+	  .freq = 2447},
+	{ .chan = 9,
+	  .freq = 2452},
+	{ .chan = 10,
+	  .freq = 2457},
+	{ .chan = 11,
+	  .freq = 2462},
+	{ .chan = 12,
+	  .freq = 2467},
+	{ .chan = 13,
+	  .freq = 2472},
+	{ .chan = 14,
+	  .freq = 2484}
+};
+
+#endif /* RTL818X_H */
diff --git a/drivers/net/wireless/zd1211rw/Makefile b/drivers/net/wireless/zd1211rw/Makefile
index 6603ad5..4d50590 100644
--- a/drivers/net/wireless/zd1211rw/Makefile
+++ b/drivers/net/wireless/zd1211rw/Makefile
@@ -3,7 +3,7 @@
 zd1211rw-objs := zd_chip.o zd_ieee80211.o \
 		zd_mac.o zd_netdev.o \
 		zd_rf_al2230.o zd_rf_rf2959.o \
-		zd_rf_al7230b.o \
+		zd_rf_al7230b.o zd_rf_uw2453.o \
 		zd_rf.o zd_usb.o zd_util.o
 
 ifeq ($(CONFIG_ZD1211RW_DEBUG),y)
diff --git a/drivers/net/wireless/zd1211rw/zd_chip.c b/drivers/net/wireless/zd1211rw/zd_chip.c
index 95b4a2a..5b624bf 100644
--- a/drivers/net/wireless/zd1211rw/zd_chip.c
+++ b/drivers/net/wireless/zd1211rw/zd_chip.c
@@ -1253,6 +1253,9 @@
 {
 	int r;
 
+	if (!zd_rf_should_update_pwr_int(&chip->rf))
+		return 0;
+
 	r = update_pwr_int(chip, channel);
 	if (r)
 		return r;
@@ -1283,7 +1286,7 @@
 	int r;
 	u32 value;
 
-	if (!chip->patch_cck_gain)
+	if (!chip->patch_cck_gain || !zd_rf_should_patch_cck_gain(&chip->rf))
 		return 0;
 
 	ZD_ASSERT(mutex_is_locked(&chip->mutex));
diff --git a/drivers/net/wireless/zd1211rw/zd_chip.h b/drivers/net/wireless/zd1211rw/zd_chip.h
index ce0a5f6..79d0288 100644
--- a/drivers/net/wireless/zd1211rw/zd_chip.h
+++ b/drivers/net/wireless/zd1211rw/zd_chip.h
@@ -608,6 +608,9 @@
 #define CR_ZD1211B_TXOP			CTL_REG(0x0b20)
 #define CR_ZD1211B_RETRY_MAX		CTL_REG(0x0b28)
 
+/* Used to detect PLL lock */
+#define UW2453_INTR_REG			((zd_addr_t)0x85c1)
+
 #define CWIN_SIZE			0x007f043f
 
 
diff --git a/drivers/net/wireless/zd1211rw/zd_rf.c b/drivers/net/wireless/zd1211rw/zd_rf.c
index 549c23b..7407409 100644
--- a/drivers/net/wireless/zd1211rw/zd_rf.c
+++ b/drivers/net/wireless/zd1211rw/zd_rf.c
@@ -52,34 +52,38 @@
 void zd_rf_init(struct zd_rf *rf)
 {
 	memset(rf, 0, sizeof(*rf));
+
+	/* default to update channel integration, as almost all RF's do want
+	 * this */
+	rf->update_channel_int = 1;
 }
 
 void zd_rf_clear(struct zd_rf *rf)
 {
+	if (rf->clear)
+		rf->clear(rf);
 	ZD_MEMCLEAR(rf, sizeof(*rf));
 }
 
 int zd_rf_init_hw(struct zd_rf *rf, u8 type)
 {
-	int r, t;
+	int r = 0;
+	int t;
 	struct zd_chip *chip = zd_rf_to_chip(rf);
 
 	ZD_ASSERT(mutex_is_locked(&chip->mutex));
 	switch (type) {
 	case RF2959_RF:
 		r = zd_rf_init_rf2959(rf);
-		if (r)
-			return r;
 		break;
 	case AL2230_RF:
 		r = zd_rf_init_al2230(rf);
-		if (r)
-			return r;
 		break;
 	case AL7230B_RF:
 		r = zd_rf_init_al7230b(rf);
-		if (r)
-			return r;
+		break;
+	case UW2453_RF:
+		r = zd_rf_init_uw2453(rf);
 		break;
 	default:
 		dev_err(zd_chip_dev(chip),
@@ -88,6 +92,9 @@
 		return -ENODEV;
 	}
 
+	if (r)
+		return r;
+
 	rf->type = type;
 
 	r = zd_chip_lock_phy_regs(chip);
diff --git a/drivers/net/wireless/zd1211rw/zd_rf.h b/drivers/net/wireless/zd1211rw/zd_rf.h
index aa9cc10..c6dfd82 100644
--- a/drivers/net/wireless/zd1211rw/zd_rf.h
+++ b/drivers/net/wireless/zd1211rw/zd_rf.h
@@ -48,12 +48,26 @@
 
 	u8 channel;
 
+	/* whether channel integration and calibration should be updated
+	 * defaults to 1 (yes) */
+	u8 update_channel_int:1;
+
+	/* whether CR47 should be patched from the EEPROM, if the appropriate
+	 * flag is set in the POD. The vendor driver suggests that this should
+	 * be done for all RF's, but a bug in their code prevents but their
+	 * HW_OverWritePhyRegFromE2P() routine from ever taking effect. */
+	u8 patch_cck_gain:1;
+
+	/* private RF driver data */
+	void *priv;
+
 	/* RF-specific functions */
 	int (*init_hw)(struct zd_rf *rf);
 	int (*set_channel)(struct zd_rf *rf, u8 channel);
 	int (*switch_radio_on)(struct zd_rf *rf);
 	int (*switch_radio_off)(struct zd_rf *rf);
 	int (*patch_6m_band_edge)(struct zd_rf *rf, u8 channel);
+	void (*clear)(struct zd_rf *rf);
 };
 
 const char *zd_rf_name(u8 type);
@@ -71,10 +85,24 @@
 int zd_rf_patch_6m_band_edge(struct zd_rf *rf, u8 channel);
 int zd_rf_generic_patch_6m(struct zd_rf *rf, u8 channel);
 
+static inline int zd_rf_should_update_pwr_int(struct zd_rf *rf)
+{
+	return rf->update_channel_int;
+}
+
+static inline int zd_rf_should_patch_cck_gain(struct zd_rf *rf)
+{
+	return rf->patch_cck_gain;
+}
+
+int zd_rf_patch_6m_band_edge(struct zd_rf *rf, u8 channel);
+int zd_rf_generic_patch_6m(struct zd_rf *rf, u8 channel);
+
 /* Functions for individual RF chips */
 
 int zd_rf_init_rf2959(struct zd_rf *rf);
 int zd_rf_init_al2230(struct zd_rf *rf);
 int zd_rf_init_al7230b(struct zd_rf *rf);
+int zd_rf_init_uw2453(struct zd_rf *rf);
 
 #endif /* _ZD_RF_H */
diff --git a/drivers/net/wireless/zd1211rw/zd_rf_al2230.c b/drivers/net/wireless/zd1211rw/zd_rf_al2230.c
index 511392a..e7a4ecf 100644
--- a/drivers/net/wireless/zd1211rw/zd_rf_al2230.c
+++ b/drivers/net/wireless/zd1211rw/zd_rf_al2230.c
@@ -432,5 +432,6 @@
 		rf->switch_radio_on = zd1211_al2230_switch_radio_on;
 	}
 	rf->patch_6m_band_edge = zd_rf_generic_patch_6m;
+	rf->patch_cck_gain = 1;
 	return 0;
 }
diff --git a/drivers/net/wireless/zd1211rw/zd_rf_al7230b.c b/drivers/net/wireless/zd1211rw/zd_rf_al7230b.c
index 5e5e9dd..f4e8b6a 100644
--- a/drivers/net/wireless/zd1211rw/zd_rf_al7230b.c
+++ b/drivers/net/wireless/zd1211rw/zd_rf_al7230b.c
@@ -483,6 +483,7 @@
 		rf->switch_radio_on = zd1211_al7230b_switch_radio_on;
 		rf->set_channel = zd1211_al7230b_set_channel;
 		rf->patch_6m_band_edge = zd_rf_generic_patch_6m;
+		rf->patch_cck_gain = 1;
 	}
 
 	rf->switch_radio_off = al7230b_switch_radio_off;
diff --git a/drivers/net/wireless/zd1211rw/zd_rf_uw2453.c b/drivers/net/wireless/zd1211rw/zd_rf_uw2453.c
new file mode 100644
index 0000000..414e40d
--- /dev/null
+++ b/drivers/net/wireless/zd1211rw/zd_rf_uw2453.c
@@ -0,0 +1,534 @@
+/* zd_rf_uw2453.c: Functions for the UW2453 RF controller
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+
+#include "zd_rf.h"
+#include "zd_usb.h"
+#include "zd_chip.h"
+
+/* This RF programming code is based upon the code found in v2.16.0.0 of the
+ * ZyDAS vendor driver. Unlike other RF's, Ubec publish full technical specs
+ * for this RF on their website, so we're able to understand more than
+ * usual as to what is going on. Thumbs up for Ubec for doing that. */
+
+/* The 3-wire serial interface provides access to 8 write-only registers.
+ * The data format is a 4 bit register address followed by a 20 bit value. */
+#define UW2453_REGWRITE(reg, val) ((((reg) & 0xf) << 20) | ((val) & 0xfffff))
+
+/* For channel tuning, we have to configure registers 1 (synthesizer), 2 (synth
+ * fractional divide ratio) and 3 (VCO config).
+ *
+ * We configure the RF to produce an interrupt when the PLL is locked onto
+ * the configured frequency. During initialization, we run through a variety
+ * of different VCO configurations on channel 1 until we detect a PLL lock.
+ * When this happens, we remember which VCO configuration produced the lock
+ * and use it later. Actually, we use the configuration *after* the one that
+ * produced the lock, which seems odd, but it works.
+ *
+ * If we do not see a PLL lock on any standard VCO config, we fall back on an
+ * autocal configuration, which has a fixed (as opposed to per-channel) VCO
+ * config and different synth values from the standard set (divide ratio
+ * is still shared with the standard set). */
+
+/* The per-channel synth values for all standard VCO configurations. These get
+ * written to register 1. */
+static const u8 uw2453_std_synth[] = {
+	RF_CHANNEL( 1) = 0x47,
+	RF_CHANNEL( 2) = 0x47,
+	RF_CHANNEL( 3) = 0x67,
+	RF_CHANNEL( 4) = 0x67,
+	RF_CHANNEL( 5) = 0x67,
+	RF_CHANNEL( 6) = 0x67,
+	RF_CHANNEL( 7) = 0x57,
+	RF_CHANNEL( 8) = 0x57,
+	RF_CHANNEL( 9) = 0x57,
+	RF_CHANNEL(10) = 0x57,
+	RF_CHANNEL(11) = 0x77,
+	RF_CHANNEL(12) = 0x77,
+	RF_CHANNEL(13) = 0x77,
+	RF_CHANNEL(14) = 0x4f,
+};
+
+/* This table stores the synthesizer fractional divide ratio for *all* VCO
+ * configurations (both standard and autocal). These get written to register 2.
+ */
+static const u16 uw2453_synth_divide[] = {
+	RF_CHANNEL( 1) = 0x999,
+	RF_CHANNEL( 2) = 0x99b,
+	RF_CHANNEL( 3) = 0x998,
+	RF_CHANNEL( 4) = 0x99a,
+	RF_CHANNEL( 5) = 0x999,
+	RF_CHANNEL( 6) = 0x99b,
+	RF_CHANNEL( 7) = 0x998,
+	RF_CHANNEL( 8) = 0x99a,
+	RF_CHANNEL( 9) = 0x999,
+	RF_CHANNEL(10) = 0x99b,
+	RF_CHANNEL(11) = 0x998,
+	RF_CHANNEL(12) = 0x99a,
+	RF_CHANNEL(13) = 0x999,
+	RF_CHANNEL(14) = 0xccc,
+};
+
+/* Here is the data for all the standard VCO configurations. We shrink our
+ * table a little by observing that both channels in a consecutive pair share
+ * the same value. We also observe that the high 4 bits ([0:3] in the specs)
+ * are all 'Reserved' and are always set to 0x4 - we chop them off in the data
+ * below. */
+#define CHAN_TO_PAIRIDX(a) ((a - 1) / 2)
+#define RF_CHANPAIR(a,b) [CHAN_TO_PAIRIDX(a)]
+static const u16 uw2453_std_vco_cfg[][7] = {
+	{ /* table 1 */
+		RF_CHANPAIR( 1,  2) = 0x664d,
+		RF_CHANPAIR( 3,  4) = 0x604d,
+		RF_CHANPAIR( 5,  6) = 0x6675,
+		RF_CHANPAIR( 7,  8) = 0x6475,
+		RF_CHANPAIR( 9, 10) = 0x6655,
+		RF_CHANPAIR(11, 12) = 0x6455,
+		RF_CHANPAIR(13, 14) = 0x6665,
+	},
+	{ /* table 2 */
+		RF_CHANPAIR( 1,  2) = 0x666d,
+		RF_CHANPAIR( 3,  4) = 0x606d,
+		RF_CHANPAIR( 5,  6) = 0x664d,
+		RF_CHANPAIR( 7,  8) = 0x644d,
+		RF_CHANPAIR( 9, 10) = 0x6675,
+		RF_CHANPAIR(11, 12) = 0x6475,
+		RF_CHANPAIR(13, 14) = 0x6655,
+	},
+	{ /* table 3 */
+		RF_CHANPAIR( 1,  2) = 0x665d,
+		RF_CHANPAIR( 3,  4) = 0x605d,
+		RF_CHANPAIR( 5,  6) = 0x666d,
+		RF_CHANPAIR( 7,  8) = 0x646d,
+		RF_CHANPAIR( 9, 10) = 0x664d,
+		RF_CHANPAIR(11, 12) = 0x644d,
+		RF_CHANPAIR(13, 14) = 0x6675,
+	},
+	{ /* table 4 */
+		RF_CHANPAIR( 1,  2) = 0x667d,
+		RF_CHANPAIR( 3,  4) = 0x607d,
+		RF_CHANPAIR( 5,  6) = 0x665d,
+		RF_CHANPAIR( 7,  8) = 0x645d,
+		RF_CHANPAIR( 9, 10) = 0x666d,
+		RF_CHANPAIR(11, 12) = 0x646d,
+		RF_CHANPAIR(13, 14) = 0x664d,
+	},
+	{ /* table 5 */
+		RF_CHANPAIR( 1,  2) = 0x6643,
+		RF_CHANPAIR( 3,  4) = 0x6043,
+		RF_CHANPAIR( 5,  6) = 0x667d,
+		RF_CHANPAIR( 7,  8) = 0x647d,
+		RF_CHANPAIR( 9, 10) = 0x665d,
+		RF_CHANPAIR(11, 12) = 0x645d,
+		RF_CHANPAIR(13, 14) = 0x666d,
+	},
+	{ /* table 6 */
+		RF_CHANPAIR( 1,  2) = 0x6663,
+		RF_CHANPAIR( 3,  4) = 0x6063,
+		RF_CHANPAIR( 5,  6) = 0x6643,
+		RF_CHANPAIR( 7,  8) = 0x6443,
+		RF_CHANPAIR( 9, 10) = 0x667d,
+		RF_CHANPAIR(11, 12) = 0x647d,
+		RF_CHANPAIR(13, 14) = 0x665d,
+	},
+	{ /* table 7 */
+		RF_CHANPAIR( 1,  2) = 0x6653,
+		RF_CHANPAIR( 3,  4) = 0x6053,
+		RF_CHANPAIR( 5,  6) = 0x6663,
+		RF_CHANPAIR( 7,  8) = 0x6463,
+		RF_CHANPAIR( 9, 10) = 0x6643,
+		RF_CHANPAIR(11, 12) = 0x6443,
+		RF_CHANPAIR(13, 14) = 0x667d,
+	},
+	{ /* table 8 */
+		RF_CHANPAIR( 1,  2) = 0x6673,
+		RF_CHANPAIR( 3,  4) = 0x6073,
+		RF_CHANPAIR( 5,  6) = 0x6653,
+		RF_CHANPAIR( 7,  8) = 0x6453,
+		RF_CHANPAIR( 9, 10) = 0x6663,
+		RF_CHANPAIR(11, 12) = 0x6463,
+		RF_CHANPAIR(13, 14) = 0x6643,
+	},
+	{ /* table 9 */
+		RF_CHANPAIR( 1,  2) = 0x664b,
+		RF_CHANPAIR( 3,  4) = 0x604b,
+		RF_CHANPAIR( 5,  6) = 0x6673,
+		RF_CHANPAIR( 7,  8) = 0x6473,
+		RF_CHANPAIR( 9, 10) = 0x6653,
+		RF_CHANPAIR(11, 12) = 0x6453,
+		RF_CHANPAIR(13, 14) = 0x6663,
+	},
+	{ /* table 10 */
+		RF_CHANPAIR( 1,  2) = 0x666b,
+		RF_CHANPAIR( 3,  4) = 0x606b,
+		RF_CHANPAIR( 5,  6) = 0x664b,
+		RF_CHANPAIR( 7,  8) = 0x644b,
+		RF_CHANPAIR( 9, 10) = 0x6673,
+		RF_CHANPAIR(11, 12) = 0x6473,
+		RF_CHANPAIR(13, 14) = 0x6653,
+	},
+	{ /* table 11 */
+		RF_CHANPAIR( 1,  2) = 0x665b,
+		RF_CHANPAIR( 3,  4) = 0x605b,
+		RF_CHANPAIR( 5,  6) = 0x666b,
+		RF_CHANPAIR( 7,  8) = 0x646b,
+		RF_CHANPAIR( 9, 10) = 0x664b,
+		RF_CHANPAIR(11, 12) = 0x644b,
+		RF_CHANPAIR(13, 14) = 0x6673,
+	},
+
+};
+
+/* The per-channel synth values for autocal. These get written to register 1. */
+static const u16 uw2453_autocal_synth[] = {
+	RF_CHANNEL( 1) = 0x6847,
+	RF_CHANNEL( 2) = 0x6847,
+	RF_CHANNEL( 3) = 0x6867,
+	RF_CHANNEL( 4) = 0x6867,
+	RF_CHANNEL( 5) = 0x6867,
+	RF_CHANNEL( 6) = 0x6867,
+	RF_CHANNEL( 7) = 0x6857,
+	RF_CHANNEL( 8) = 0x6857,
+	RF_CHANNEL( 9) = 0x6857,
+	RF_CHANNEL(10) = 0x6857,
+	RF_CHANNEL(11) = 0x6877,
+	RF_CHANNEL(12) = 0x6877,
+	RF_CHANNEL(13) = 0x6877,
+	RF_CHANNEL(14) = 0x684f,
+};
+
+/* The VCO configuration for autocal (all channels) */
+static const u16 UW2453_AUTOCAL_VCO_CFG = 0x6662;
+
+/* TX gain settings. The array index corresponds to the TX power integration
+ * values found in the EEPROM. The values get written to register 7. */
+static u32 uw2453_txgain[] = {
+	[0x00] = 0x0e313,
+	[0x01] = 0x0fb13,
+	[0x02] = 0x0e093,
+	[0x03] = 0x0f893,
+	[0x04] = 0x0ea93,
+	[0x05] = 0x1f093,
+	[0x06] = 0x1f493,
+	[0x07] = 0x1f693,
+	[0x08] = 0x1f393,
+	[0x09] = 0x1f35b,
+	[0x0a] = 0x1e6db,
+	[0x0b] = 0x1ff3f,
+	[0x0c] = 0x1ffff,
+	[0x0d] = 0x361d7,
+	[0x0e] = 0x37fbf,
+	[0x0f] = 0x3ff8b,
+	[0x10] = 0x3ff33,
+	[0x11] = 0x3fb3f,
+	[0x12] = 0x3ffff,
+};
+
+/* RF-specific structure */
+struct uw2453_priv {
+	/* index into synth/VCO config tables where PLL lock was found
+	 * -1 means autocal */
+	int config;
+};
+
+#define UW2453_PRIV(rf) ((struct uw2453_priv *) (rf)->priv)
+
+static int uw2453_synth_set_channel(struct zd_chip *chip, int channel,
+	bool autocal)
+{
+	int r;
+	int idx = channel - 1;
+	u32 val;
+
+	if (autocal)
+		val = UW2453_REGWRITE(1, uw2453_autocal_synth[idx]);
+	else
+		val = UW2453_REGWRITE(1, uw2453_std_synth[idx]);
+
+	r = zd_rfwrite_locked(chip, val, RF_RV_BITS);
+	if (r)
+		return r;
+
+	return zd_rfwrite_locked(chip,
+		UW2453_REGWRITE(2, uw2453_synth_divide[idx]), RF_RV_BITS);
+}
+
+static int uw2453_write_vco_cfg(struct zd_chip *chip, u16 value)
+{
+	/* vendor driver always sets these upper bits even though the specs say
+	 * they are reserved */
+	u32 val = 0x40000 | value;
+	return zd_rfwrite_locked(chip, UW2453_REGWRITE(3, val), RF_RV_BITS);
+}
+
+static int uw2453_init_mode(struct zd_chip *chip)
+{
+	static const u32 rv[] = {
+		UW2453_REGWRITE(0, 0x25f98), /* enter IDLE mode */
+		UW2453_REGWRITE(0, 0x25f9a), /* enter CAL_VCO mode */
+		UW2453_REGWRITE(0, 0x25f94), /* enter RX/TX mode */
+		UW2453_REGWRITE(0, 0x27fd4), /* power down RSSI circuit */
+	};
+
+	return zd_rfwritev_locked(chip, rv, ARRAY_SIZE(rv), RF_RV_BITS);
+}
+
+static int uw2453_set_tx_gain_level(struct zd_chip *chip, int channel)
+{
+	u8 int_value = chip->pwr_int_values[channel - 1];
+
+	if (int_value >= ARRAY_SIZE(uw2453_txgain)) {
+		dev_dbg_f(zd_chip_dev(chip), "can't configure TX gain for "
+			  "int value %x on channel %d\n", int_value, channel);
+		return 0;
+	}
+
+	return zd_rfwrite_locked(chip,
+		UW2453_REGWRITE(7, uw2453_txgain[int_value]), RF_RV_BITS);
+}
+
+static int uw2453_init_hw(struct zd_rf *rf)
+{
+	int i, r;
+	int found_config = -1;
+	u16 intr_status;
+	struct zd_chip *chip = zd_rf_to_chip(rf);
+
+	static const struct zd_ioreq16 ioreqs[] = {
+		{ CR10,  0x89 }, { CR15,  0x20 },
+		{ CR17,  0x28 }, /* 6112 no change */
+		{ CR23,  0x38 }, { CR24,  0x20 }, { CR26,  0x93 },
+		{ CR27,  0x15 }, { CR28,  0x3e }, { CR29,  0x00 },
+		{ CR33,  0x28 }, { CR34,  0x30 },
+		{ CR35,  0x43 }, /* 6112 3e->43 */
+		{ CR41,  0x24 }, { CR44,  0x32 },
+		{ CR46,  0x92 }, /* 6112 96->92 */
+		{ CR47,  0x1e },
+		{ CR48,  0x04 }, /* 5602 Roger */
+		{ CR49,  0xfa }, { CR79,  0x58 }, { CR80,  0x30 },
+		{ CR81,  0x30 }, { CR87,  0x0a }, { CR89,  0x04 },
+		{ CR91,  0x00 }, { CR92,  0x0a }, { CR98,  0x8d },
+		{ CR99,  0x28 }, { CR100, 0x02 },
+		{ CR101, 0x09 }, /* 6112 13->1f 6220 1f->13 6407 13->9 */
+		{ CR102, 0x27 },
+		{ CR106, 0x1c }, /* 5d07 5112 1f->1c 6220 1c->1f 6221 1f->1c */
+		{ CR107, 0x1c }, /* 6220 1c->1a 5221 1a->1c */
+		{ CR109, 0x13 },
+		{ CR110, 0x1f }, /* 6112 13->1f 6221 1f->13 6407 13->0x09 */
+		{ CR111, 0x13 }, { CR112, 0x1f }, { CR113, 0x27 },
+		{ CR114, 0x23 }, /* 6221 27->23 */
+		{ CR115, 0x24 }, /* 6112 24->1c 6220 1c->24 */
+		{ CR116, 0x24 }, /* 6220 1c->24 */
+		{ CR117, 0xfa }, /* 6112 fa->f8 6220 f8->f4 6220 f4->fa */
+		{ CR118, 0xf0 }, /* 5d07 6112 f0->f2 6220 f2->f0 */
+		{ CR119, 0x1a }, /* 6112 1a->10 6220 10->14 6220 14->1a */
+		{ CR120, 0x4f },
+		{ CR121, 0x1f }, /* 6220 4f->1f */
+		{ CR122, 0xf0 }, { CR123, 0x57 }, { CR125, 0xad },
+		{ CR126, 0x6c }, { CR127, 0x03 },
+		{ CR128, 0x14 }, /* 6302 12->11 */
+		{ CR129, 0x12 }, /* 6301 10->0f */
+		{ CR130, 0x10 }, { CR137, 0x50 }, { CR138, 0xa8 },
+		{ CR144, 0xac }, { CR146, 0x20 }, { CR252, 0xff },
+		{ CR253, 0xff },
+	};
+
+	static const u32 rv[] = {
+		UW2453_REGWRITE(4, 0x2b),    /* configure reciever gain */
+		UW2453_REGWRITE(5, 0x19e4f), /* configure transmitter gain */
+		UW2453_REGWRITE(6, 0xf81ad), /* enable RX/TX filter tuning */
+		UW2453_REGWRITE(7, 0x3fffe), /* disable TX gain in test mode */
+
+		/* enter CAL_FIL mode, TX gain set by registers, RX gain set by pins,
+		 * RSSI circuit powered down, reduced RSSI range */
+		UW2453_REGWRITE(0, 0x25f9c), /* 5d01 cal_fil */
+
+		/* synthesizer configuration for channel 1 */
+		UW2453_REGWRITE(1, 0x47),
+		UW2453_REGWRITE(2, 0x999),
+
+		/* disable manual VCO band selection */
+		UW2453_REGWRITE(3, 0x7602),
+
+		/* enable manual VCO band selection, configure current level */
+		UW2453_REGWRITE(3, 0x46063),
+	};
+
+	r = zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs));
+	if (r)
+		return r;
+
+	r = zd_rfwritev_locked(chip, rv, ARRAY_SIZE(rv), RF_RV_BITS);
+	if (r)
+		return r;
+
+	r = uw2453_init_mode(chip);
+	if (r)
+		return r;
+
+	/* Try all standard VCO configuration settings on channel 1 */
+	for (i = 0; i < ARRAY_SIZE(uw2453_std_vco_cfg) - 1; i++) {
+		/* Configure synthesizer for channel 1 */
+		r = uw2453_synth_set_channel(chip, 1, false);
+		if (r)
+			return r;
+
+		/* Write VCO config */
+		r = uw2453_write_vco_cfg(chip, uw2453_std_vco_cfg[i][0]);
+		if (r)
+			return r;
+
+		/* ack interrupt event */
+		r = zd_iowrite16_locked(chip, 0x0f, UW2453_INTR_REG);
+		if (r)
+			return r;
+
+		/* check interrupt status */
+		r = zd_ioread16_locked(chip, &intr_status, UW2453_INTR_REG);
+		if (r)
+			return r;
+
+		if (!intr_status & 0xf) {
+			dev_dbg_f(zd_chip_dev(chip),
+				"PLL locked on configuration %d\n", i);
+			found_config = i;
+			break;
+		}
+	}
+
+	if (found_config == -1) {
+		/* autocal */
+		dev_dbg_f(zd_chip_dev(chip),
+			"PLL did not lock, using autocal\n");
+
+		r = uw2453_synth_set_channel(chip, 1, true);
+		if (r)
+			return r;
+
+		r = uw2453_write_vco_cfg(chip, UW2453_AUTOCAL_VCO_CFG);
+		if (r)
+			return r;
+	}
+
+	/* To match the vendor driver behaviour, we use the configuration after
+	 * the one that produced a lock. */
+	UW2453_PRIV(rf)->config = found_config + 1;
+
+	return zd_iowrite16_locked(chip, 0x06, CR203);
+}
+
+static int uw2453_set_channel(struct zd_rf *rf, u8 channel)
+{
+	int r;
+	u16 vco_cfg;
+	int config = UW2453_PRIV(rf)->config;
+	bool autocal = (config == -1);
+	struct zd_chip *chip = zd_rf_to_chip(rf);
+
+	static const struct zd_ioreq16 ioreqs[] = {
+		{ CR80,  0x30 }, { CR81,  0x30 }, { CR79,  0x58 },
+		{ CR12,  0xf0 }, { CR77,  0x1b }, { CR78,  0x58 },
+	};
+
+	r = uw2453_synth_set_channel(chip, channel, autocal);
+	if (r)
+		return r;
+
+	if (autocal)
+		vco_cfg = UW2453_AUTOCAL_VCO_CFG;
+	else
+		vco_cfg = uw2453_std_vco_cfg[config][CHAN_TO_PAIRIDX(channel)];
+
+	r = uw2453_write_vco_cfg(chip, vco_cfg);
+	if (r)
+		return r;
+
+	r = uw2453_init_mode(chip);
+	if (r)
+		return r;
+
+	r = zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs));
+	if (r)
+		return r;
+
+	r = uw2453_set_tx_gain_level(chip, channel);
+	if (r)
+		return r;
+
+	return zd_iowrite16_locked(chip, 0x06, CR203);
+}
+
+static int uw2453_switch_radio_on(struct zd_rf *rf)
+{
+	int r;
+	struct zd_chip *chip = zd_rf_to_chip(rf);
+	struct zd_ioreq16 ioreqs[] = {
+		{ CR11,  0x00 }, { CR251, 0x3f },
+	};
+
+	/* enter RXTX mode */
+	r = zd_rfwrite_locked(chip, UW2453_REGWRITE(0, 0x25f94), RF_RV_BITS);
+	if (r)
+		return r;
+
+	if (chip->is_zd1211b)
+		ioreqs[1].value = 0x7f;
+
+	return zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs));
+}
+
+static int uw2453_switch_radio_off(struct zd_rf *rf)
+{
+	int r;
+	struct zd_chip *chip = zd_rf_to_chip(rf);
+	static const struct zd_ioreq16 ioreqs[] = {
+		{ CR11,  0x04 }, { CR251, 0x2f },
+	};
+
+	/* enter IDLE mode */
+	/* FIXME: shouldn't we go to SLEEP? sent email to zydas */
+	r = zd_rfwrite_locked(chip, UW2453_REGWRITE(0, 0x25f90), RF_RV_BITS);
+	if (r)
+		return r;
+
+	return zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs));
+}
+
+static void uw2453_clear(struct zd_rf *rf)
+{
+	kfree(rf->priv);
+}
+
+int zd_rf_init_uw2453(struct zd_rf *rf)
+{
+	rf->init_hw = uw2453_init_hw;
+	rf->set_channel = uw2453_set_channel;
+	rf->switch_radio_on = uw2453_switch_radio_on;
+	rf->switch_radio_off = uw2453_switch_radio_off;
+	rf->patch_6m_band_edge = zd_rf_generic_patch_6m;
+	rf->clear = uw2453_clear;
+	/* we have our own TX integration code */
+	rf->update_channel_int = 0;
+
+	rf->priv = kmalloc(sizeof(struct uw2453_priv), GFP_KERNEL);
+	if (rf->priv == NULL)
+		return -ENOMEM;
+
+	return 0;
+}
+
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index 8459549..740a219 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -54,6 +54,7 @@
 	{ USB_DEVICE(0x0586, 0x3401), .driver_info = DEVICE_ZD1211 },
 	{ USB_DEVICE(0x14ea, 0xab13), .driver_info = DEVICE_ZD1211 },
 	{ USB_DEVICE(0x13b1, 0x001e), .driver_info = DEVICE_ZD1211 },
+	{ USB_DEVICE(0x0586, 0x3407), .driver_info = DEVICE_ZD1211 },
 	/* ZD1211B */
 	{ USB_DEVICE(0x0ace, 0x1215), .driver_info = DEVICE_ZD1211B },
 	{ USB_DEVICE(0x157e, 0x300d), .driver_info = DEVICE_ZD1211B },
diff --git a/drivers/serial/serial_cs.c b/drivers/serial/serial_cs.c
index 6b76bab..a0ea435 100644
--- a/drivers/serial/serial_cs.c
+++ b/drivers/serial/serial_cs.c
@@ -842,12 +842,16 @@
 	PCMCIA_PFC_DEVICE_PROD_ID12(1, "Linksys", "EtherFast 10&100 + 56K PC Card (PCMLM56)", 0x0733cc81, 0xb3765033),
 	PCMCIA_PFC_DEVICE_PROD_ID12(1, "LINKSYS", "PCMLM336", 0xf7cb0b07, 0x7a821b58),
 	PCMCIA_PFC_DEVICE_PROD_ID12(1, "MEGAHERTZ", "XJEM1144/CCEM1144", 0xf510db04, 0x52d21e1e),
+	PCMCIA_PFC_DEVICE_PROD_ID12(1, "MICRO RESEARCH", "COMBO-L/M-336", 0xb2ced065, 0x3ced0555),
+	PCMCIA_PFC_DEVICE_PROD_ID12(1, "NEC", "PK-UG-J001" ,0x18df0ba0 ,0x831b1064),
 	PCMCIA_PFC_DEVICE_PROD_ID12(1, "Ositech", "Trumpcard:Jack of Diamonds Modem+Ethernet", 0xc2f80cd, 0x656947b9),
 	PCMCIA_PFC_DEVICE_PROD_ID12(1, "Ositech", "Trumpcard:Jack of Hearts Modem+Ethernet", 0xc2f80cd, 0xdc9ba5ed),
 	PCMCIA_PFC_DEVICE_PROD_ID12(1, "PCMCIAs", "ComboCard", 0xdcfe12d3, 0xcd8906cc),
 	PCMCIA_PFC_DEVICE_PROD_ID12(1, "PCMCIAs", "LanModem", 0xdcfe12d3, 0xc67c648f),
 	PCMCIA_PFC_DEVICE_PROD_ID12(1, "TDK", "GlobalNetworker 3410/3412", 0x1eae9475, 0xd9a93bed),
 	PCMCIA_PFC_DEVICE_PROD_ID12(1, "Xircom", "CreditCard Ethernet+Modem II", 0x2e3ee845, 0xeca401bf),
+	PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x0a05),
+	PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x1101),
 	PCMCIA_MFC_DEVICE_MANF_CARD(0, 0x0104, 0x0070),
 	PCMCIA_MFC_DEVICE_MANF_CARD(1, 0x0101, 0x0562),
 	PCMCIA_MFC_DEVICE_MANF_CARD(1, 0x0104, 0x0070),
diff --git a/fs/jfs/endian24.h b/fs/jfs/endian24.h
index 79494c4..fa92f7f 100644
--- a/fs/jfs/endian24.h
+++ b/fs/jfs/endian24.h
@@ -29,7 +29,7 @@
 	__u32 __x = (x); \
 	((__u32)( \
 		((__x & (__u32)0x000000ffUL) << 16) | \
-		 (__x & (__u32)0x0000ff00UL)        | \
+		 (__x & (__u32)0x0000ff00UL)	    | \
 		((__x & (__u32)0x00ff0000UL) >> 16) )); \
 })
 
diff --git a/fs/jfs/jfs_debug.c b/fs/jfs/jfs_debug.c
index 9c5d596..887f575 100644
--- a/fs/jfs/jfs_debug.c
+++ b/fs/jfs/jfs_debug.c
@@ -26,34 +26,6 @@
 #include "jfs_filsys.h"
 #include "jfs_debug.h"
 
-#ifdef CONFIG_JFS_DEBUG
-void dump_mem(char *label, void *data, int length)
-{
-	int i, j;
-	int *intptr = data;
-	char *charptr = data;
-	char buf[10], line[80];
-
-	printk("%s: dump of %d bytes of data at 0x%p\n\n", label, length,
-	       data);
-	for (i = 0; i < length; i += 16) {
-		line[0] = 0;
-		for (j = 0; (j < 4) && (i + j * 4 < length); j++) {
-			sprintf(buf, " %08x", intptr[i / 4 + j]);
-			strcat(line, buf);
-		}
-		buf[0] = ' ';
-		buf[2] = 0;
-		for (j = 0; (j < 16) && (i + j < length); j++) {
-			buf[1] =
-			    isprint(charptr[i + j]) ? charptr[i + j] : '.';
-			strcat(line, buf);
-		}
-		printk("%s\n", line);
-	}
-}
-#endif
-
 #ifdef PROC_FS_JFS /* see jfs_debug.h */
 
 static struct proc_dir_entry *base;
diff --git a/fs/jfs/jfs_debug.h b/fs/jfs/jfs_debug.h
index 7378798..044c1e6 100644
--- a/fs/jfs/jfs_debug.h
+++ b/fs/jfs/jfs_debug.h
@@ -62,7 +62,6 @@
 
 extern int jfsloglevel;
 
-extern void dump_mem(char *label, void *data, int length);
 extern int jfs_txanchor_read(char *, char **, off_t, int, int *, void *);
 
 /* information message: e.g., configuration, major event */
@@ -94,7 +93,6 @@
  *	---------
  */
 #else				/* CONFIG_JFS_DEBUG */
-#define dump_mem(label,data,length) do {} while (0)
 #define ASSERT(p) do {} while (0)
 #define jfs_info(fmt, arg...) do {} while (0)
 #define jfs_debug(fmt, arg...) do {} while (0)
diff --git a/fs/jfs/jfs_dinode.h b/fs/jfs/jfs_dinode.h
index 40b2011..c387540 100644
--- a/fs/jfs/jfs_dinode.h
+++ b/fs/jfs/jfs_dinode.h
@@ -19,23 +19,23 @@
 #define _H_JFS_DINODE
 
 /*
- *      jfs_dinode.h: on-disk inode manager
+ *	jfs_dinode.h: on-disk inode manager
  */
 
-#define INODESLOTSIZE           128
-#define L2INODESLOTSIZE         7
-#define log2INODESIZE           9	/* log2(bytes per dinode) */
+#define INODESLOTSIZE		128
+#define L2INODESLOTSIZE		7
+#define log2INODESIZE		9	/* log2(bytes per dinode) */
 
 
 /*
- *      on-disk inode : 512 bytes
+ *	on-disk inode : 512 bytes
  *
  * note: align 64-bit fields on 8-byte boundary.
  */
 struct dinode {
 	/*
-	 *      I. base area (128 bytes)
-	 *      ------------------------
+	 *	I. base area (128 bytes)
+	 *	------------------------
 	 *
 	 * define generic/POSIX attributes
 	 */
@@ -70,16 +70,16 @@
 	__le32 di_acltype;	/* 4: Type of ACL */
 
 	/*
-	 *      Extension Areas.
+	 *	Extension Areas.
 	 *
-	 *      Historically, the inode was partitioned into 4 128-byte areas,
-	 *      the last 3 being defined as unions which could have multiple
-	 *      uses.  The first 96 bytes had been completely unused until
-	 *      an index table was added to the directory.  It is now more
-	 *      useful to describe the last 3/4 of the inode as a single
-	 *      union.  We would probably be better off redesigning the
-	 *      entire structure from scratch, but we don't want to break
-	 *      commonality with OS/2's JFS at this time.
+	 *	Historically, the inode was partitioned into 4 128-byte areas,
+	 *	the last 3 being defined as unions which could have multiple
+	 *	uses.  The first 96 bytes had been completely unused until
+	 *	an index table was added to the directory.  It is now more
+	 *	useful to describe the last 3/4 of the inode as a single
+	 *	union.  We would probably be better off redesigning the
+	 *	entire structure from scratch, but we don't want to break
+	 *	commonality with OS/2's JFS at this time.
 	 */
 	union {
 		struct {
@@ -95,7 +95,7 @@
 		} _dir;					/* (384) */
 #define di_dirtable	u._dir._table
 #define di_dtroot	u._dir._dtroot
-#define di_parent       di_dtroot.header.idotdot
+#define di_parent	di_dtroot.header.idotdot
 #define di_DASD		di_dtroot.header.DASD
 
 		struct {
@@ -127,14 +127,14 @@
 #define di_inlinedata	u._file._u2._special._u
 #define di_rdev		u._file._u2._special._u._rdev
 #define di_fastsymlink	u._file._u2._special._u._fastsymlink
-#define di_inlineea     u._file._u2._special._inlineea
+#define di_inlineea	u._file._u2._special._inlineea
 	} u;
 };
 
 /* extended mode bits (on-disk inode di_mode) */
-#define IFJOURNAL       0x00010000	/* journalled file */
-#define ISPARSE         0x00020000	/* sparse file enabled */
-#define INLINEEA        0x00040000	/* inline EA area free */
+#define IFJOURNAL	0x00010000	/* journalled file */
+#define ISPARSE		0x00020000	/* sparse file enabled */
+#define INLINEEA	0x00040000	/* inline EA area free */
 #define ISWAPFILE	0x00800000	/* file open for pager swap space */
 
 /* more extended mode bits: attributes for OS/2 */
diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c
index f3b1ebb..e198506 100644
--- a/fs/jfs/jfs_dmap.c
+++ b/fs/jfs/jfs_dmap.c
@@ -154,12 +154,12 @@
  *		the in-core descriptor is initialized from disk.
  *
  * PARAMETERS:
- *      ipbmap	-  pointer to in-core inode for the block map.
+ *	ipbmap	- pointer to in-core inode for the block map.
  *
  * RETURN VALUES:
- *      0	- success
- *      -ENOMEM	- insufficient memory
- *      -EIO	- i/o error
+ *	0	- success
+ *	-ENOMEM	- insufficient memory
+ *	-EIO	- i/o error
  */
 int dbMount(struct inode *ipbmap)
 {
@@ -232,11 +232,11 @@
  *		the memory for this descriptor is freed.
  *
  * PARAMETERS:
- *      ipbmap	-  pointer to in-core inode for the block map.
+ *	ipbmap	- pointer to in-core inode for the block map.
  *
  * RETURN VALUES:
- *      0	- success
- *      -EIO	- i/o error
+ *	0	- success
+ *	-EIO	- i/o error
  */
 int dbUnmount(struct inode *ipbmap, int mounterror)
 {
@@ -320,13 +320,13 @@
  *		at a time.
  *
  * PARAMETERS:
- *      ip	-  pointer to in-core inode;
- *      blkno	-  starting block number to be freed.
- *      nblocks	-  number of blocks to be freed.
+ *	ip	- pointer to in-core inode;
+ *	blkno	- starting block number to be freed.
+ *	nblocks	- number of blocks to be freed.
  *
  * RETURN VALUES:
- *      0	- success
- *      -EIO	- i/o error
+ *	0	- success
+ *	-EIO	- i/o error
  */
 int dbFree(struct inode *ip, s64 blkno, s64 nblocks)
 {
@@ -395,23 +395,23 @@
 /*
  * NAME:	dbUpdatePMap()
  *
- * FUNCTION:    update the allocation state (free or allocate) of the
+ * FUNCTION:	update the allocation state (free or allocate) of the
  *		specified block range in the persistent block allocation map.
  *
  *		the blocks will be updated in the persistent map one
  *		dmap at a time.
  *
  * PARAMETERS:
- *      ipbmap	-  pointer to in-core inode for the block map.
- *      free	-  'true' if block range is to be freed from the persistent
- *		   map; 'false' if it is to   be allocated.
- *      blkno	-  starting block number of the range.
- *      nblocks	-  number of contiguous blocks in the range.
- *      tblk	-  transaction block;
+ *	ipbmap	- pointer to in-core inode for the block map.
+ *	free	- 'true' if block range is to be freed from the persistent
+ *		  map; 'false' if it is to be allocated.
+ *	blkno	- starting block number of the range.
+ *	nblocks	- number of contiguous blocks in the range.
+ *	tblk	- transaction block;
  *
  * RETURN VALUES:
- *      0	- success
- *      -EIO	- i/o error
+ *	0	- success
+ *	-EIO	- i/o error
  */
 int
 dbUpdatePMap(struct inode *ipbmap,
@@ -573,7 +573,7 @@
 /*
  * NAME:	dbNextAG()
  *
- * FUNCTION:    find the preferred allocation group for new allocations.
+ * FUNCTION:	find the preferred allocation group for new allocations.
  *
  *		Within the allocation groups, we maintain a preferred
  *		allocation group which consists of a group with at least
@@ -589,10 +589,10 @@
  *		empty ags around for large allocations.
  *
  * PARAMETERS:
- *      ipbmap	-  pointer to in-core inode for the block map.
+ *	ipbmap	- pointer to in-core inode for the block map.
  *
  * RETURN VALUES:
- *      the preferred allocation group number.
+ *	the preferred allocation group number.
  */
 int dbNextAG(struct inode *ipbmap)
 {
@@ -656,7 +656,7 @@
 /*
  * NAME:	dbAlloc()
  *
- * FUNCTION:    attempt to allocate a specified number of contiguous free
+ * FUNCTION:	attempt to allocate a specified number of contiguous free
  *		blocks from the working allocation block map.
  *
  *		the block allocation policy uses hints and a multi-step
@@ -680,16 +680,16 @@
  *		size or requests that specify no hint value.
  *
  * PARAMETERS:
- *      ip	-  pointer to in-core inode;
- *      hint	- allocation hint.
- *      nblocks	- number of contiguous blocks in the range.
- *      results	- on successful return, set to the starting block number
+ *	ip	- pointer to in-core inode;
+ *	hint	- allocation hint.
+ *	nblocks	- number of contiguous blocks in the range.
+ *	results	- on successful return, set to the starting block number
  *		  of the newly allocated contiguous range.
  *
  * RETURN VALUES:
- *      0	- success
- *      -ENOSPC	- insufficient disk resources
- *      -EIO	- i/o error
+ *	0	- success
+ *	-ENOSPC	- insufficient disk resources
+ *	-EIO	- i/o error
  */
 int dbAlloc(struct inode *ip, s64 hint, s64 nblocks, s64 * results)
 {
@@ -706,12 +706,6 @@
 	/* assert that nblocks is valid */
 	assert(nblocks > 0);
 
-#ifdef _STILL_TO_PORT
-	/* DASD limit check                                     F226941 */
-	if (OVER_LIMIT(ip, nblocks))
-		return -ENOSPC;
-#endif				/* _STILL_TO_PORT */
-
 	/* get the log2 number of blocks to be allocated.
 	 * if the number of blocks is not a log2 multiple,
 	 * it will be rounded up to the next log2 multiple.
@@ -720,7 +714,6 @@
 
 	bmp = JFS_SBI(ip->i_sb)->bmap;
 
-//retry:        /* serialize w.r.t.extendfs() */
 	mapSize = bmp->db_mapsize;
 
 	/* the hint should be within the map */
@@ -879,17 +872,17 @@
 /*
  * NAME:	dbAllocExact()
  *
- * FUNCTION:    try to allocate the requested extent;
+ * FUNCTION:	try to allocate the requested extent;
  *
  * PARAMETERS:
- *      ip	- pointer to in-core inode;
- *      blkno	- extent address;
- *      nblocks	- extent length;
+ *	ip	- pointer to in-core inode;
+ *	blkno	- extent address;
+ *	nblocks	- extent length;
  *
  * RETURN VALUES:
- *      0	- success
- *      -ENOSPC	- insufficient disk resources
- *      -EIO	- i/o error
+ *	0	- success
+ *	-ENOSPC	- insufficient disk resources
+ *	-EIO	- i/o error
  */
 int dbAllocExact(struct inode *ip, s64 blkno, int nblocks)
 {
@@ -946,7 +939,7 @@
 /*
  * NAME:	dbReAlloc()
  *
- * FUNCTION:    attempt to extend a current allocation by a specified
+ * FUNCTION:	attempt to extend a current allocation by a specified
  *		number of blocks.
  *
  *		this routine attempts to satisfy the allocation request
@@ -959,21 +952,21 @@
  *		number of blocks required.
  *
  * PARAMETERS:
- *      ip	    -  pointer to in-core inode requiring allocation.
- *      blkno	    -  starting block of the current allocation.
- *      nblocks	    -  number of contiguous blocks within the current
+ *	ip	    -  pointer to in-core inode requiring allocation.
+ *	blkno	    -  starting block of the current allocation.
+ *	nblocks	    -  number of contiguous blocks within the current
  *		       allocation.
- *      addnblocks  -  number of blocks to add to the allocation.
- *      results	-      on successful return, set to the starting block number
+ *	addnblocks  -  number of blocks to add to the allocation.
+ *	results	-      on successful return, set to the starting block number
  *		       of the existing allocation if the existing allocation
  *		       was extended in place or to a newly allocated contiguous
  *		       range if the existing allocation could not be extended
  *		       in place.
  *
  * RETURN VALUES:
- *      0	- success
- *      -ENOSPC	- insufficient disk resources
- *      -EIO	- i/o error
+ *	0	- success
+ *	-ENOSPC	- insufficient disk resources
+ *	-EIO	- i/o error
  */
 int
 dbReAlloc(struct inode *ip,
@@ -1004,7 +997,7 @@
 /*
  * NAME:	dbExtend()
  *
- * FUNCTION:    attempt to extend a current allocation by a specified
+ * FUNCTION:	attempt to extend a current allocation by a specified
  *		number of blocks.
  *
  *		this routine attempts to satisfy the allocation request
@@ -1013,16 +1006,16 @@
  *		immediately following the current allocation.
  *
  * PARAMETERS:
- *      ip	    -  pointer to in-core inode requiring allocation.
- *      blkno	    -  starting block of the current allocation.
- *      nblocks	    -  number of contiguous blocks within the current
+ *	ip	    -  pointer to in-core inode requiring allocation.
+ *	blkno	    -  starting block of the current allocation.
+ *	nblocks	    -  number of contiguous blocks within the current
  *		       allocation.
- *      addnblocks  -  number of blocks to add to the allocation.
+ *	addnblocks  -  number of blocks to add to the allocation.
  *
  * RETURN VALUES:
- *      0	- success
- *      -ENOSPC	- insufficient disk resources
- *      -EIO	- i/o error
+ *	0	- success
+ *	-ENOSPC	- insufficient disk resources
+ *	-EIO	- i/o error
  */
 static int dbExtend(struct inode *ip, s64 blkno, s64 nblocks, s64 addnblocks)
 {
@@ -1109,19 +1102,19 @@
 /*
  * NAME:	dbAllocNext()
  *
- * FUNCTION:    attempt to allocate the blocks of the specified block
+ * FUNCTION:	attempt to allocate the blocks of the specified block
  *		range within a dmap.
  *
  * PARAMETERS:
- *      bmp	-  pointer to bmap descriptor
- *      dp	-  pointer to dmap.
- *      blkno	-  starting block number of the range.
- *      nblocks	-  number of contiguous free blocks of the range.
+ *	bmp	-  pointer to bmap descriptor
+ *	dp	-  pointer to dmap.
+ *	blkno	-  starting block number of the range.
+ *	nblocks	-  number of contiguous free blocks of the range.
  *
  * RETURN VALUES:
- *      0	- success
- *      -ENOSPC	- insufficient disk resources
- *      -EIO	- i/o error
+ *	0	- success
+ *	-ENOSPC	- insufficient disk resources
+ *	-EIO	- i/o error
  *
  * serialization: IREAD_LOCK(ipbmap) held on entry/exit;
  */
@@ -1233,7 +1226,7 @@
 /*
  * NAME:	dbAllocNear()
  *
- * FUNCTION:    attempt to allocate a number of contiguous free blocks near
+ * FUNCTION:	attempt to allocate a number of contiguous free blocks near
  *		a specified block (hint) within a dmap.
  *
  *		starting with the dmap leaf that covers the hint, we'll
@@ -1242,18 +1235,18 @@
  *		the desired free space.
  *
  * PARAMETERS:
- *      bmp	-  pointer to bmap descriptor
- *      dp	-  pointer to dmap.
- *      blkno	-  block number to allocate near.
- *      nblocks	-  actual number of contiguous free blocks desired.
- *      l2nb	-  log2 number of contiguous free blocks desired.
- *      results	-  on successful return, set to the starting block number
+ *	bmp	-  pointer to bmap descriptor
+ *	dp	-  pointer to dmap.
+ *	blkno	-  block number to allocate near.
+ *	nblocks	-  actual number of contiguous free blocks desired.
+ *	l2nb	-  log2 number of contiguous free blocks desired.
+ *	results	-  on successful return, set to the starting block number
  *		   of the newly allocated range.
  *
  * RETURN VALUES:
- *      0	- success
- *      -ENOSPC	- insufficient disk resources
- *      -EIO	- i/o error
+ *	0	- success
+ *	-ENOSPC	- insufficient disk resources
+ *	-EIO	- i/o error
  *
  * serialization: IREAD_LOCK(ipbmap) held on entry/exit;
  */
@@ -1316,7 +1309,7 @@
 /*
  * NAME:	dbAllocAG()
  *
- * FUNCTION:    attempt to allocate the specified number of contiguous
+ * FUNCTION:	attempt to allocate the specified number of contiguous
  *		free blocks within the specified allocation group.
  *
  *		unless the allocation group size is equal to the number
@@ -1353,17 +1346,17 @@
  *		the allocation group.
  *
  * PARAMETERS:
- *      bmp	-  pointer to bmap descriptor
+ *	bmp	-  pointer to bmap descriptor
  *	agno	- allocation group number.
- *      nblocks	-  actual number of contiguous free blocks desired.
- *      l2nb	-  log2 number of contiguous free blocks desired.
- *      results	-  on successful return, set to the starting block number
+ *	nblocks	-  actual number of contiguous free blocks desired.
+ *	l2nb	-  log2 number of contiguous free blocks desired.
+ *	results	-  on successful return, set to the starting block number
  *		   of the newly allocated range.
  *
  * RETURN VALUES:
- *      0	- success
- *      -ENOSPC	- insufficient disk resources
- *      -EIO	- i/o error
+ *	0	- success
+ *	-ENOSPC	- insufficient disk resources
+ *	-EIO	- i/o error
  *
  * note: IWRITE_LOCK(ipmap) held on entry/exit;
  */
@@ -1546,7 +1539,7 @@
 /*
  * NAME:	dbAllocAny()
  *
- * FUNCTION:    attempt to allocate the specified number of contiguous
+ * FUNCTION:	attempt to allocate the specified number of contiguous
  *		free blocks anywhere in the file system.
  *
  *		dbAllocAny() attempts to find the sufficient free space by
@@ -1556,16 +1549,16 @@
  *		desired free space is allocated.
  *
  * PARAMETERS:
- *      bmp	-  pointer to bmap descriptor
- *      nblocks	 -  actual number of contiguous free blocks desired.
- *      l2nb	 -  log2 number of contiguous free blocks desired.
- *      results	-  on successful return, set to the starting block number
+ *	bmp	-  pointer to bmap descriptor
+ *	nblocks	 -  actual number of contiguous free blocks desired.
+ *	l2nb	 -  log2 number of contiguous free blocks desired.
+ *	results	-  on successful return, set to the starting block number
  *		   of the newly allocated range.
  *
  * RETURN VALUES:
- *      0	- success
- *      -ENOSPC	- insufficient disk resources
- *      -EIO	- i/o error
+ *	0	- success
+ *	-ENOSPC	- insufficient disk resources
+ *	-EIO	- i/o error
  *
  * serialization: IWRITE_LOCK(ipbmap) held on entry/exit;
  */
@@ -1598,9 +1591,9 @@
 /*
  * NAME:	dbFindCtl()
  *
- * FUNCTION:    starting at a specified dmap control page level and block
+ * FUNCTION:	starting at a specified dmap control page level and block
  *		number, search down the dmap control levels for a range of
- *	        contiguous free blocks large enough to satisfy an allocation
+ *		contiguous free blocks large enough to satisfy an allocation
  *		request for the specified number of free blocks.
  *
  *		if sufficient contiguous free blocks are found, this routine
@@ -1609,17 +1602,17 @@
  *		is sufficient in size.
  *
  * PARAMETERS:
- *      bmp	-  pointer to bmap descriptor
- *      level	-  starting dmap control page level.
- *      l2nb	-  log2 number of contiguous free blocks desired.
- *      *blkno	-  on entry, starting block number for conducting the search.
+ *	bmp	-  pointer to bmap descriptor
+ *	level	-  starting dmap control page level.
+ *	l2nb	-  log2 number of contiguous free blocks desired.
+ *	*blkno	-  on entry, starting block number for conducting the search.
  *		   on successful return, the first block within a dmap page
  *		   that contains or starts a range of contiguous free blocks.
  *
  * RETURN VALUES:
- *      0	- success
- *      -ENOSPC	- insufficient disk resources
- *      -EIO	- i/o error
+ *	0	- success
+ *	-ENOSPC	- insufficient disk resources
+ *	-EIO	- i/o error
  *
  * serialization: IWRITE_LOCK(ipbmap) held on entry/exit;
  */
@@ -1699,7 +1692,7 @@
 /*
  * NAME:	dbAllocCtl()
  *
- * FUNCTION:    attempt to allocate a specified number of contiguous
+ * FUNCTION:	attempt to allocate a specified number of contiguous
  *		blocks starting within a specific dmap.
  *
  *		this routine is called by higher level routines that search
@@ -1726,18 +1719,18 @@
  *		first dmap (i.e. blkno).
  *
  * PARAMETERS:
- *      bmp	-  pointer to bmap descriptor
- *      nblocks	 -  actual number of contiguous free blocks to allocate.
- *      l2nb	 -  log2 number of contiguous free blocks to allocate.
- *      blkno	 -  starting block number of the dmap to start the allocation
+ *	bmp	-  pointer to bmap descriptor
+ *	nblocks	 -  actual number of contiguous free blocks to allocate.
+ *	l2nb	 -  log2 number of contiguous free blocks to allocate.
+ *	blkno	 -  starting block number of the dmap to start the allocation
  *		    from.
- *      results	-  on successful return, set to the starting block number
+ *	results	-  on successful return, set to the starting block number
  *		   of the newly allocated range.
  *
  * RETURN VALUES:
- *      0	- success
- *      -ENOSPC	- insufficient disk resources
- *      -EIO	- i/o error
+ *	0	- success
+ *	-ENOSPC	- insufficient disk resources
+ *	-EIO	- i/o error
  *
  * serialization: IWRITE_LOCK(ipbmap) held on entry/exit;
  */
@@ -1870,7 +1863,7 @@
 /*
  * NAME:	dbAllocDmapLev()
  *
- * FUNCTION:    attempt to allocate a specified number of contiguous blocks
+ * FUNCTION:	attempt to allocate a specified number of contiguous blocks
  *		from a specified dmap.
  *
  *		this routine checks if the contiguous blocks are available.
@@ -1878,17 +1871,17 @@
  *		returned.
  *
  * PARAMETERS:
- *      mp	-  pointer to bmap descriptor
- *      dp	-  pointer to dmap to attempt to allocate blocks from.
- *      l2nb	-  log2 number of contiguous block desired.
- *      nblocks	-  actual number of contiguous block desired.
- *      results	-  on successful return, set to the starting block number
+ *	mp	-  pointer to bmap descriptor
+ *	dp	-  pointer to dmap to attempt to allocate blocks from.
+ *	l2nb	-  log2 number of contiguous block desired.
+ *	nblocks	-  actual number of contiguous block desired.
+ *	results	-  on successful return, set to the starting block number
  *		   of the newly allocated range.
  *
  * RETURN VALUES:
- *      0	- success
- *      -ENOSPC	- insufficient disk resources
- *      -EIO	- i/o error
+ *	0	- success
+ *	-ENOSPC	- insufficient disk resources
+ *	-EIO	- i/o error
  *
  * serialization: IREAD_LOCK(ipbmap), e.g., from dbAlloc(), or
  *	IWRITE_LOCK(ipbmap), e.g., dbAllocCtl(), held on entry/exit;
@@ -1933,7 +1926,7 @@
 /*
  * NAME:	dbAllocDmap()
  *
- * FUNCTION:    adjust the disk allocation map to reflect the allocation
+ * FUNCTION:	adjust the disk allocation map to reflect the allocation
  *		of a specified block range within a dmap.
  *
  *		this routine allocates the specified blocks from the dmap
@@ -1946,14 +1939,14 @@
  *		covers this dmap.
  *
  * PARAMETERS:
- *      bmp	-  pointer to bmap descriptor
- *      dp	-  pointer to dmap to allocate the block range from.
- *      blkno	-  starting block number of the block to be allocated.
- *      nblocks	-  number of blocks to be allocated.
+ *	bmp	-  pointer to bmap descriptor
+ *	dp	-  pointer to dmap to allocate the block range from.
+ *	blkno	-  starting block number of the block to be allocated.
+ *	nblocks	-  number of blocks to be allocated.
  *
  * RETURN VALUES:
- *      0	- success
- *      -EIO	- i/o error
+ *	0	- success
+ *	-EIO	- i/o error
  *
  * serialization: IREAD_LOCK(ipbmap) or IWRITE_LOCK(ipbmap) held on entry/exit;
  */
@@ -1989,7 +1982,7 @@
 /*
  * NAME:	dbFreeDmap()
  *
- * FUNCTION:    adjust the disk allocation map to reflect the allocation
+ * FUNCTION:	adjust the disk allocation map to reflect the allocation
  *		of a specified block range within a dmap.
  *
  *		this routine frees the specified blocks from the dmap through
@@ -1997,18 +1990,18 @@
  *		causes the maximum string of free blocks within the dmap to
  *		change (i.e. the value of the root of the dmap's dmtree), this
  *		routine will cause this change to be reflected up through the
- *	        appropriate levels of the dmap control pages by a call to
+ *		appropriate levels of the dmap control pages by a call to
  *		dbAdjCtl() for the L0 dmap control page that covers this dmap.
  *
  * PARAMETERS:
- *      bmp	-  pointer to bmap descriptor
- *      dp	-  pointer to dmap to free the block range from.
- *      blkno	-  starting block number of the block to be freed.
- *      nblocks	-  number of blocks to be freed.
+ *	bmp	-  pointer to bmap descriptor
+ *	dp	-  pointer to dmap to free the block range from.
+ *	blkno	-  starting block number of the block to be freed.
+ *	nblocks	-  number of blocks to be freed.
  *
  * RETURN VALUES:
- *      0	- success
- *      -EIO	- i/o error
+ *	0	- success
+ *	-EIO	- i/o error
  *
  * serialization: IREAD_LOCK(ipbmap) or IWRITE_LOCK(ipbmap) held on entry/exit;
  */
@@ -2055,7 +2048,7 @@
 /*
  * NAME:	dbAllocBits()
  *
- * FUNCTION:    allocate a specified block range from a dmap.
+ * FUNCTION:	allocate a specified block range from a dmap.
  *
  *		this routine updates the dmap to reflect the working
  *		state allocation of the specified block range. it directly
@@ -2065,10 +2058,10 @@
  *		dmap's dmtree, as a whole, to reflect the allocated range.
  *
  * PARAMETERS:
- *      bmp	-  pointer to bmap descriptor
- *      dp	-  pointer to dmap to allocate bits from.
- *      blkno	-  starting block number of the bits to be allocated.
- *      nblocks	-  number of bits to be allocated.
+ *	bmp	-  pointer to bmap descriptor
+ *	dp	-  pointer to dmap to allocate bits from.
+ *	blkno	-  starting block number of the bits to be allocated.
+ *	nblocks	-  number of bits to be allocated.
  *
  * RETURN VALUES: none
  *
@@ -2149,7 +2142,7 @@
 			 * the allocated words.
 			 */
 			for (; nwords > 0; nwords -= nw) {
-			        if (leaf[word] < BUDMIN) {
+				if (leaf[word] < BUDMIN) {
 					jfs_error(bmp->db_ipbmap->i_sb,
 						  "dbAllocBits: leaf page "
 						  "corrupt");
@@ -2202,7 +2195,7 @@
 /*
  * NAME:	dbFreeBits()
  *
- * FUNCTION:    free a specified block range from a dmap.
+ * FUNCTION:	free a specified block range from a dmap.
  *
  *		this routine updates the dmap to reflect the working
  *		state allocation of the specified block range. it directly
@@ -2212,10 +2205,10 @@
  *		dmtree, as a whole, to reflect the deallocated range.
  *
  * PARAMETERS:
- *      bmp	-  pointer to bmap descriptor
- *      dp	-  pointer to dmap to free bits from.
- *      blkno	-  starting block number of the bits to be freed.
- *      nblocks	-  number of bits to be freed.
+ *	bmp	-  pointer to bmap descriptor
+ *	dp	-  pointer to dmap to free bits from.
+ *	blkno	-  starting block number of the bits to be freed.
+ *	nblocks	-  number of bits to be freed.
  *
  * RETURN VALUES: 0 for success
  *
@@ -2388,19 +2381,19 @@
  *		the new root value and the next dmap control page level to
  *		be adjusted.
  * PARAMETERS:
- *      bmp	-  pointer to bmap descriptor
- *      blkno	-  the first block of a block range within a dmap.  it is
+ *	bmp	-  pointer to bmap descriptor
+ *	blkno	-  the first block of a block range within a dmap.  it is
  *		   the allocation or deallocation of this block range that
  *		   requires the dmap control page to be adjusted.
- *      newval	-  the new value of the lower level dmap or dmap control
+ *	newval	-  the new value of the lower level dmap or dmap control
  *		   page root.
- *      alloc	-  'true' if adjustment is due to an allocation.
- *      level	-  current level of dmap control page (i.e. L0, L1, L2) to
+ *	alloc	-  'true' if adjustment is due to an allocation.
+ *	level	-  current level of dmap control page (i.e. L0, L1, L2) to
  *		   be adjusted.
  *
  * RETURN VALUES:
- *      0	- success
- *      -EIO	- i/o error
+ *	0	- success
+ *	-EIO	- i/o error
  *
  * serialization: IREAD_LOCK(ipbmap) or IWRITE_LOCK(ipbmap) held on entry/exit;
  */
@@ -2544,16 +2537,16 @@
 /*
  * NAME:	dbSplit()
  *
- * FUNCTION:    update the leaf of a dmtree with a new value, splitting
+ * FUNCTION:	update the leaf of a dmtree with a new value, splitting
  *		the leaf from the binary buddy system of the dmtree's
  *		leaves, as required.
  *
  * PARAMETERS:
- *      tp	- pointer to the tree containing the leaf.
- *      leafno	- the number of the leaf to be updated.
- *      splitsz	- the size the binary buddy system starting at the leaf
+ *	tp	- pointer to the tree containing the leaf.
+ *	leafno	- the number of the leaf to be updated.
+ *	splitsz	- the size the binary buddy system starting at the leaf
  *		  must be split to, specified as the log2 number of blocks.
- *      newval	- the new value for the leaf.
+ *	newval	- the new value for the leaf.
  *
  * RETURN VALUES: none
  *
@@ -2600,7 +2593,7 @@
 /*
  * NAME:	dbBackSplit()
  *
- * FUNCTION:    back split the binary buddy system of dmtree leaves
+ * FUNCTION:	back split the binary buddy system of dmtree leaves
  *		that hold a specified leaf until the specified leaf
  *		starts its own binary buddy system.
  *
@@ -2617,8 +2610,8 @@
  *		in which a previous join operation must be backed out.
  *
  * PARAMETERS:
- *      tp	- pointer to the tree containing the leaf.
- *      leafno	- the number of the leaf to be updated.
+ *	tp	- pointer to the tree containing the leaf.
+ *	leafno	- the number of the leaf to be updated.
  *
  * RETURN VALUES: none
  *
@@ -2692,14 +2685,14 @@
 /*
  * NAME:	dbJoin()
  *
- * FUNCTION:    update the leaf of a dmtree with a new value, joining
+ * FUNCTION:	update the leaf of a dmtree with a new value, joining
  *		the leaf with other leaves of the dmtree into a multi-leaf
  *		binary buddy system, as required.
  *
  * PARAMETERS:
- *      tp	- pointer to the tree containing the leaf.
- *      leafno	- the number of the leaf to be updated.
- *      newval	- the new value for the leaf.
+ *	tp	- pointer to the tree containing the leaf.
+ *	leafno	- the number of the leaf to be updated.
+ *	newval	- the new value for the leaf.
  *
  * RETURN VALUES: none
  */
@@ -2785,15 +2778,15 @@
 /*
  * NAME:	dbAdjTree()
  *
- * FUNCTION:    update a leaf of a dmtree with a new value, adjusting
+ * FUNCTION:	update a leaf of a dmtree with a new value, adjusting
  *		the dmtree, as required, to reflect the new leaf value.
  *		the combination of any buddies must already be done before
  *		this is called.
  *
  * PARAMETERS:
- *      tp	- pointer to the tree to be adjusted.
- *      leafno	- the number of the leaf to be updated.
- *      newval	- the new value for the leaf.
+ *	tp	- pointer to the tree to be adjusted.
+ *	leafno	- the number of the leaf to be updated.
+ *	newval	- the new value for the leaf.
  *
  * RETURN VALUES: none
  */
@@ -2852,7 +2845,7 @@
 /*
  * NAME:	dbFindLeaf()
  *
- * FUNCTION:    search a dmtree_t for sufficient free blocks, returning
+ * FUNCTION:	search a dmtree_t for sufficient free blocks, returning
  *		the index of a leaf describing the free blocks if
  *		sufficient free blocks are found.
  *
@@ -2861,15 +2854,15 @@
  *		free space.
  *
  * PARAMETERS:
- *      tp	- pointer to the tree to be searched.
- *      l2nb	- log2 number of free blocks to search for.
+ *	tp	- pointer to the tree to be searched.
+ *	l2nb	- log2 number of free blocks to search for.
  *	leafidx	- return pointer to be set to the index of the leaf
  *		  describing at least l2nb free blocks if sufficient
  *		  free blocks are found.
  *
  * RETURN VALUES:
- *      0	- success
- *      -ENOSPC	- insufficient free blocks.
+ *	0	- success
+ *	-ENOSPC	- insufficient free blocks.
  */
 static int dbFindLeaf(dmtree_t * tp, int l2nb, int *leafidx)
 {
@@ -2916,18 +2909,18 @@
 /*
  * NAME:	dbFindBits()
  *
- * FUNCTION:    find a specified number of binary buddy free bits within a
+ * FUNCTION:	find a specified number of binary buddy free bits within a
  *		dmap bitmap word value.
  *
  *		this routine searches the bitmap value for (1 << l2nb) free
  *		bits at (1 << l2nb) alignments within the value.
  *
  * PARAMETERS:
- *      word	-  dmap bitmap word value.
- *      l2nb	-  number of free bits specified as a log2 number.
+ *	word	-  dmap bitmap word value.
+ *	l2nb	-  number of free bits specified as a log2 number.
  *
  * RETURN VALUES:
- *      starting bit number of free bits.
+ *	starting bit number of free bits.
  */
 static int dbFindBits(u32 word, int l2nb)
 {
@@ -2963,14 +2956,14 @@
 /*
  * NAME:	dbMaxBud(u8 *cp)
  *
- * FUNCTION:    determine the largest binary buddy string of free
+ * FUNCTION:	determine the largest binary buddy string of free
  *		bits within 32-bits of the map.
  *
  * PARAMETERS:
- *      cp	-  pointer to the 32-bit value.
+ *	cp	-  pointer to the 32-bit value.
  *
  * RETURN VALUES:
- *      largest binary buddy of free bits within a dmap word.
+ *	largest binary buddy of free bits within a dmap word.
  */
 static int dbMaxBud(u8 * cp)
 {
@@ -3000,14 +2993,14 @@
 /*
  * NAME:	cnttz(uint word)
  *
- * FUNCTION:    determine the number of trailing zeros within a 32-bit
+ * FUNCTION:	determine the number of trailing zeros within a 32-bit
  *		value.
  *
  * PARAMETERS:
- *      value	-  32-bit value to be examined.
+ *	value	-  32-bit value to be examined.
  *
  * RETURN VALUES:
- *      count of trailing zeros
+ *	count of trailing zeros
  */
 static int cnttz(u32 word)
 {
@@ -3025,14 +3018,14 @@
 /*
  * NAME:	cntlz(u32 value)
  *
- * FUNCTION:    determine the number of leading zeros within a 32-bit
+ * FUNCTION:	determine the number of leading zeros within a 32-bit
  *		value.
  *
  * PARAMETERS:
- *      value	-  32-bit value to be examined.
+ *	value	-  32-bit value to be examined.
  *
  * RETURN VALUES:
- *      count of leading zeros
+ *	count of leading zeros
  */
 static int cntlz(u32 value)
 {
@@ -3050,14 +3043,14 @@
  * NAME:	blkstol2(s64 nb)
  *
  * FUNCTION:	convert a block count to its log2 value. if the block
- *	        count is not a l2 multiple, it is rounded up to the next
+ *		count is not a l2 multiple, it is rounded up to the next
  *		larger l2 multiple.
  *
  * PARAMETERS:
- *      nb	-  number of blocks
+ *	nb	-  number of blocks
  *
  * RETURN VALUES:
- *      log2 number of blocks
+ *	log2 number of blocks
  */
 static int blkstol2(s64 nb)
 {
@@ -3099,13 +3092,13 @@
  *		at a time.
  *
  * PARAMETERS:
- *      ip	-  pointer to in-core inode;
- *      blkno	-  starting block number to be freed.
- *      nblocks	-  number of blocks to be freed.
+ *	ip	-  pointer to in-core inode;
+ *	blkno	-  starting block number to be freed.
+ *	nblocks	-  number of blocks to be freed.
  *
  * RETURN VALUES:
- *      0	- success
- *      -EIO	- i/o error
+ *	0	- success
+ *	-EIO	- i/o error
  */
 int dbAllocBottomUp(struct inode *ip, s64 blkno, s64 nblocks)
 {
@@ -3278,10 +3271,10 @@
  * L2
  *  |
  *   L1---------------------------------L1
- *    |                                  |
- *     L0---------L0---------L0           L0---------L0---------L0
- *      |          |          |            |          |          |
- *       d0,...,dn  d0,...,dn  d0,...,dn    d0,...,dn  d0,...,dn  d0,.,dm;
+ *    |					 |
+ *     L0---------L0---------L0		  L0---------L0---------L0
+ *      |	   |	      |		   |	      |		 |
+ *	 d0,...,dn  d0,...,dn  d0,...,dn    d0,...,dn  d0,...,dn  d0,.,dm;
  * L2L1L0d0,...,dnL0d0,...,dnL0d0,...,dnL1L0d0,...,dnL0d0,...,dnL0d0,..dm
  *
  * <---old---><----------------------------extend----------------------->
@@ -3307,7 +3300,7 @@
 		 (long long) blkno, (long long) nblocks, (long long) newsize);
 
 	/*
-	 *      initialize bmap control page.
+	 *	initialize bmap control page.
 	 *
 	 * all the data in bmap control page should exclude
 	 * the mkfs hidden dmap page.
@@ -3330,7 +3323,7 @@
 	bmp->db_numag += ((u32) newsize % (u32) bmp->db_agsize) ? 1 : 0;
 
 	/*
-	 *      reconfigure db_agfree[]
+	 *	reconfigure db_agfree[]
 	 * from old AG configuration to new AG configuration;
 	 *
 	 * coalesce contiguous k (newAGSize/oldAGSize) AGs;
@@ -3362,7 +3355,7 @@
 	bmp->db_maxag = bmp->db_maxag / k;
 
 	/*
-	 *      extend bmap
+	 *	extend bmap
 	 *
 	 * update bit maps and corresponding level control pages;
 	 * global control page db_nfree, db_agfree[agno], db_maxfreebud;
@@ -3410,7 +3403,7 @@
 			/* compute start L0 */
 			j = 0;
 			l1leaf = l1dcp->stree + CTLLEAFIND;
-			p += nbperpage;	/* 1st L0 of L1.k  */
+			p += nbperpage;	/* 1st L0 of L1.k */
 		}
 
 		/*
@@ -3548,7 +3541,7 @@
 	return -EIO;
 
 	/*
-	 *      finalize bmap control page
+	 *	finalize bmap control page
 	 */
 finalize:
 
@@ -3567,7 +3560,7 @@
 	int i, n;
 
 	/*
-	 *      finalize bmap control page
+	 *	finalize bmap control page
 	 */
 //finalize:
 	/*
@@ -3953,8 +3946,8 @@
  * convert number of map pages to the zero origin top dmapctl level
  */
 #define BMAPPGTOLEV(npages)	\
-	(((npages) <= 3 + MAXL0PAGES) ? 0 \
-       : ((npages) <= 2 + MAXL1PAGES) ? 1 : 2)
+	(((npages) <= 3 + MAXL0PAGES) ? 0 : \
+	 ((npages) <= 2 + MAXL1PAGES) ? 1 : 2)
 
 s64 dbMapFileSizeToMapSize(struct inode * ipbmap)
 {
@@ -3981,8 +3974,8 @@
 		factor =
 		    (i == 2) ? MAXL1PAGES : ((i == 1) ? MAXL0PAGES : 1);
 		complete = (u32) npages / factor;
-		ndmaps += complete * ((i == 2) ? LPERCTL * LPERCTL
-				      : ((i == 1) ? LPERCTL : 1));
+		ndmaps += complete * ((i == 2) ? LPERCTL * LPERCTL :
+				      ((i == 1) ? LPERCTL : 1));
 
 		/* pages in last/incomplete child */
 		npages = (u32) npages % factor;
diff --git a/fs/jfs/jfs_dmap.h b/fs/jfs/jfs_dmap.h
index 45ea454..11e6d47 100644
--- a/fs/jfs/jfs_dmap.h
+++ b/fs/jfs/jfs_dmap.h
@@ -83,7 +83,7 @@
  *	- 1 is added to account for the control page of the map.
  */
 #define BLKTODMAP(b,s)    \
-        ((((b) >> 13) + ((b) >> 23) + ((b) >> 33) + 3 + 1) << (s))
+	((((b) >> 13) + ((b) >> 23) + ((b) >> 33) + 3 + 1) << (s))
 
 /*
  * convert disk block number to the logical block number of the LEVEL 0
@@ -98,7 +98,7 @@
  *	- 1 is added to account for the control page of the map.
  */
 #define BLKTOL0(b,s)      \
-        (((((b) >> 23) << 10) + ((b) >> 23) + ((b) >> 33) + 2 + 1) << (s))
+	(((((b) >> 23) << 10) + ((b) >> 23) + ((b) >> 33) + 2 + 1) << (s))
 
 /*
  * convert disk block number to the logical block number of the LEVEL 1
@@ -120,7 +120,7 @@
  * at the specified level which describes the disk block.
  */
 #define BLKTOCTL(b,s,l)   \
-        (((l) == 2) ? 1 : ((l) == 1) ? BLKTOL1((b),(s)) : BLKTOL0((b),(s)))
+	(((l) == 2) ? 1 : ((l) == 1) ? BLKTOL1((b),(s)) : BLKTOL0((b),(s)))
 
 /*
  * convert aggregate map size to the zero origin dmapctl level of the
@@ -145,27 +145,27 @@
  * dmaptree must be consistent with dmapctl.
  */
 struct dmaptree {
-	__le32 nleafs;		/* 4: number of tree leafs      */
-	__le32 l2nleafs;	/* 4: l2 number of tree leafs   */
-	__le32 leafidx;		/* 4: index of first tree leaf  */
-	__le32 height;		/* 4: height of the tree        */
+	__le32 nleafs;		/* 4: number of tree leafs	*/
+	__le32 l2nleafs;	/* 4: l2 number of tree leafs	*/
+	__le32 leafidx;		/* 4: index of first tree leaf	*/
+	__le32 height;		/* 4: height of the tree	*/
 	s8 budmin;		/* 1: min l2 tree leaf value to combine */
-	s8 stree[TREESIZE];	/* TREESIZE: tree               */
-	u8 pad[2];		/* 2: pad to word boundary      */
-};				/* - 360 -                      */
+	s8 stree[TREESIZE];	/* TREESIZE: tree		*/
+	u8 pad[2];		/* 2: pad to word boundary	*/
+};				/* - 360 -			*/
 
 /*
  *	dmap page per 8K blocks bitmap
  */
 struct dmap {
-	__le32 nblocks;		/* 4: num blks covered by this dmap     */
-	__le32 nfree;		/* 4: num of free blks in this dmap     */
-	__le64 start;		/* 8: starting blkno for this dmap      */
-	struct dmaptree tree;	/* 360: dmap tree                       */
-	u8 pad[1672];		/* 1672: pad to 2048 bytes              */
-	__le32 wmap[LPERDMAP];	/* 1024: bits of the working map        */
-	__le32 pmap[LPERDMAP];	/* 1024: bits of the persistent map     */
-};				/* - 4096 -                             */
+	__le32 nblocks;		/* 4: num blks covered by this dmap	*/
+	__le32 nfree;		/* 4: num of free blks in this dmap	*/
+	__le64 start;		/* 8: starting blkno for this dmap	*/
+	struct dmaptree tree;	/* 360: dmap tree			*/
+	u8 pad[1672];		/* 1672: pad to 2048 bytes		*/
+	__le32 wmap[LPERDMAP];	/* 1024: bits of the working map	*/
+	__le32 pmap[LPERDMAP];	/* 1024: bits of the persistent map	*/
+};				/* - 4096 -				*/
 
 /*
  *	disk map control page per level.
@@ -173,14 +173,14 @@
  * dmapctl must be consistent with dmaptree.
  */
 struct dmapctl {
-	__le32 nleafs;		/* 4: number of tree leafs      */
-	__le32 l2nleafs;	/* 4: l2 number of tree leafs   */
-	__le32 leafidx;		/* 4: index of the first tree leaf      */
-	__le32 height;		/* 4: height of tree            */
-	s8 budmin;		/* 1: minimum l2 tree leaf value        */
-	s8 stree[CTLTREESIZE];	/* CTLTREESIZE: dmapctl tree    */
-	u8 pad[2714];		/* 2714: pad to 4096            */
-};				/* - 4096 -                     */
+	__le32 nleafs;		/* 4: number of tree leafs	*/
+	__le32 l2nleafs;	/* 4: l2 number of tree leafs	*/
+	__le32 leafidx;		/* 4: index of the first tree leaf	*/
+	__le32 height;		/* 4: height of tree		*/
+	s8 budmin;		/* 1: minimum l2 tree leaf value	*/
+	s8 stree[CTLTREESIZE];	/* CTLTREESIZE: dmapctl tree	*/
+	u8 pad[2714];		/* 2714: pad to 4096		*/
+};				/* - 4096 -			*/
 
 /*
  *	common definition for dmaptree within dmap and dmapctl
@@ -202,41 +202,41 @@
  *	on-disk aggregate disk allocation map descriptor.
  */
 struct dbmap_disk {
-	__le64 dn_mapsize;	/* 8: number of blocks in aggregate     */
-	__le64 dn_nfree;	/* 8: num free blks in aggregate map    */
-	__le32 dn_l2nbperpage;	/* 4: number of blks per page           */
-	__le32 dn_numag;	/* 4: total number of ags               */
-	__le32 dn_maxlevel;	/* 4: number of active ags              */
-	__le32 dn_maxag;	/* 4: max active alloc group number     */
-	__le32 dn_agpref;	/* 4: preferred alloc group (hint)      */
-	__le32 dn_aglevel;	/* 4: dmapctl level holding the AG      */
-	__le32 dn_agheigth;	/* 4: height in dmapctl of the AG       */
-	__le32 dn_agwidth;	/* 4: width in dmapctl of the AG        */
-	__le32 dn_agstart;	/* 4: start tree index at AG height     */
-	__le32 dn_agl2size;	/* 4: l2 num of blks per alloc group    */
-	__le64 dn_agfree[MAXAG];/* 8*MAXAG: per AG free count           */
-	__le64 dn_agsize;	/* 8: num of blks per alloc group       */
-	s8 dn_maxfreebud;	/* 1: max free buddy system             */
-	u8 pad[3007];		/* 3007: pad to 4096                    */
-};				/* - 4096 -                             */
+	__le64 dn_mapsize;	/* 8: number of blocks in aggregate	*/
+	__le64 dn_nfree;	/* 8: num free blks in aggregate map	*/
+	__le32 dn_l2nbperpage;	/* 4: number of blks per page		*/
+	__le32 dn_numag;	/* 4: total number of ags		*/
+	__le32 dn_maxlevel;	/* 4: number of active ags		*/
+	__le32 dn_maxag;	/* 4: max active alloc group number	*/
+	__le32 dn_agpref;	/* 4: preferred alloc group (hint)	*/
+	__le32 dn_aglevel;	/* 4: dmapctl level holding the AG	*/
+	__le32 dn_agheigth;	/* 4: height in dmapctl of the AG	*/
+	__le32 dn_agwidth;	/* 4: width in dmapctl of the AG	*/
+	__le32 dn_agstart;	/* 4: start tree index at AG height	*/
+	__le32 dn_agl2size;	/* 4: l2 num of blks per alloc group	*/
+	__le64 dn_agfree[MAXAG];/* 8*MAXAG: per AG free count		*/
+	__le64 dn_agsize;	/* 8: num of blks per alloc group	*/
+	s8 dn_maxfreebud;	/* 1: max free buddy system		*/
+	u8 pad[3007];		/* 3007: pad to 4096			*/
+};				/* - 4096 -				*/
 
 struct dbmap {
-	s64 dn_mapsize;		/* number of blocks in aggregate     */
-	s64 dn_nfree;		/* num free blks in aggregate map    */
-	int dn_l2nbperpage;	/* number of blks per page           */
-	int dn_numag;		/* total number of ags               */
-	int dn_maxlevel;	/* number of active ags              */
-	int dn_maxag;		/* max active alloc group number     */
-	int dn_agpref;		/* preferred alloc group (hint)      */
-	int dn_aglevel;		/* dmapctl level holding the AG      */
-	int dn_agheigth;	/* height in dmapctl of the AG       */
-	int dn_agwidth;		/* width in dmapctl of the AG        */
-	int dn_agstart;		/* start tree index at AG height     */
-	int dn_agl2size;	/* l2 num of blks per alloc group    */
-	s64 dn_agfree[MAXAG];	/* per AG free count           */
-	s64 dn_agsize;		/* num of blks per alloc group       */
-	signed char dn_maxfreebud;	/* max free buddy system             */
-};				/* - 4096 -                             */
+	s64 dn_mapsize;		/* number of blocks in aggregate	*/
+	s64 dn_nfree;		/* num free blks in aggregate map	*/
+	int dn_l2nbperpage;	/* number of blks per page		*/
+	int dn_numag;		/* total number of ags			*/
+	int dn_maxlevel;	/* number of active ags			*/
+	int dn_maxag;		/* max active alloc group number	*/
+	int dn_agpref;		/* preferred alloc group (hint)		*/
+	int dn_aglevel;		/* dmapctl level holding the AG		*/
+	int dn_agheigth;	/* height in dmapctl of the AG		*/
+	int dn_agwidth;		/* width in dmapctl of the AG		*/
+	int dn_agstart;		/* start tree index at AG height	*/
+	int dn_agl2size;	/* l2 num of blks per alloc group	*/
+	s64 dn_agfree[MAXAG];	/* per AG free count			*/
+	s64 dn_agsize;		/* num of blks per alloc group		*/
+	signed char dn_maxfreebud;	/* max free buddy system	*/
+};				/* - 4096 -				*/
 /*
  *	in-memory aggregate disk allocation map descriptor.
  */
diff --git a/fs/jfs/jfs_dtree.c b/fs/jfs/jfs_dtree.c
index 6d62f32..c14ba3c 100644
--- a/fs/jfs/jfs_dtree.c
+++ b/fs/jfs/jfs_dtree.c
@@ -315,8 +315,8 @@
 	lv = &llck->lv[llck->index];
 
 	/*
-	 *      Linelock slot size is twice the size of directory table
-	 *      slot size.  512 entries per page.
+	 *	Linelock slot size is twice the size of directory table
+	 *	slot size.  512 entries per page.
 	 */
 	lv->offset = ((index - 2) & 511) >> 1;
 	lv->length = 1;
@@ -615,7 +615,7 @@
 	btstack->nsplit = 1;
 
 	/*
-	 *      search down tree from root:
+	 *	search down tree from root:
 	 *
 	 * between two consecutive entries of <Ki, Pi> and <Kj, Pj> of
 	 * internal page, child page Pi contains entry with k, Ki <= K < Kj.
@@ -659,7 +659,7 @@
 			}
 			if (cmp == 0) {
 				/*
-				 *      search hit
+				 *	search hit
 				 */
 				/* search hit - leaf page:
 				 * return the entry found
@@ -723,7 +723,7 @@
 		}
 
 		/*
-		 *      search miss
+		 *	search miss
 		 *
 		 * base is the smallest index with key (Kj) greater than
 		 * search key (K) and may be zero or (maxindex + 1) index.
@@ -834,7 +834,7 @@
 	struct lv *lv;
 
 	/*
-	 *      retrieve search result
+	 *	retrieve search result
 	 *
 	 * dtSearch() returns (leaf page pinned, index at which to insert).
 	 * n.b. dtSearch() may return index of (maxindex + 1) of
@@ -843,7 +843,7 @@
 	DT_GETSEARCH(ip, btstack->top, bn, mp, p, index);
 
 	/*
-	 *      insert entry for new key
+	 *	insert entry for new key
 	 */
 	if (DO_INDEX(ip)) {
 		if (JFS_IP(ip)->next_index == DIREND) {
@@ -860,9 +860,9 @@
 	data.leaf.ino = *fsn;
 
 	/*
-	 *      leaf page does not have enough room for new entry:
+	 *	leaf page does not have enough room for new entry:
 	 *
-	 *      extend/split the leaf page;
+	 *	extend/split the leaf page;
 	 *
 	 * dtSplitUp() will insert the entry and unpin the leaf page.
 	 */
@@ -877,9 +877,9 @@
 	}
 
 	/*
-	 *      leaf page does have enough room for new entry:
+	 *	leaf page does have enough room for new entry:
 	 *
-	 *      insert the new data entry into the leaf page;
+	 *	insert the new data entry into the leaf page;
 	 */
 	BT_MARK_DIRTY(mp, ip);
 	/*
@@ -967,13 +967,13 @@
 	}
 
 	/*
-	 *      split leaf page
+	 *	split leaf page
 	 *
 	 * The split routines insert the new entry, and
 	 * acquire txLock as appropriate.
 	 */
 	/*
-	 *      split root leaf page:
+	 *	split root leaf page:
 	 */
 	if (sp->header.flag & BT_ROOT) {
 		/*
@@ -1012,7 +1012,7 @@
 	}
 
 	/*
-	 *      extend first leaf page
+	 *	extend first leaf page
 	 *
 	 * extend the 1st extent if less than buffer page size
 	 * (dtExtendPage() reurns leaf page unpinned)
@@ -1068,7 +1068,7 @@
 	}
 
 	/*
-	 *      split leaf page <sp> into <sp> and a new right page <rp>.
+	 *	split leaf page <sp> into <sp> and a new right page <rp>.
 	 *
 	 * return <rp> pinned and its extent descriptor <rpxd>
 	 */
@@ -1433,7 +1433,7 @@
 	rp->header.freecnt = rp->header.maxslot - fsi;
 
 	/*
-	 *      sequential append at tail: append without split
+	 *	sequential append at tail: append without split
 	 *
 	 * If splitting the last page on a level because of appending
 	 * a entry to it (skip is maxentry), it's likely that the access is
@@ -1467,7 +1467,7 @@
 	}
 
 	/*
-	 *      non-sequential insert (at possibly middle page)
+	 *	non-sequential insert (at possibly middle page)
 	 */
 
 	/*
@@ -1508,7 +1508,7 @@
 	left = 0;
 
 	/*
-	 *      compute fill factor for split pages
+	 *	compute fill factor for split pages
 	 *
 	 * <nxt> traces the next entry to move to rp
 	 * <off> traces the next entry to stay in sp
@@ -1551,7 +1551,7 @@
 	/* <nxt> poins to the 1st entry to move */
 
 	/*
-	 *      move entries to right page
+	 *	move entries to right page
 	 *
 	 * dtMoveEntry() initializes rp and reserves entry for insertion
 	 *
@@ -1677,7 +1677,7 @@
 		return (rc);
 
 	/*
-	 *      extend the extent
+	 *	extend the extent
 	 */
 	pxdlist = split->pxdlist;
 	pxd = &pxdlist->pxd[pxdlist->npxd];
@@ -1722,7 +1722,7 @@
 	}
 
 	/*
-	 *      extend the page
+	 *	extend the page
 	 */
 	sp->header.self = *pxd;
 
@@ -1739,9 +1739,6 @@
 	/* update buffer extent descriptor of extended page */
 	xlen = lengthPXD(pxd);
 	xsize = xlen << JFS_SBI(sb)->l2bsize;
-#ifdef _STILL_TO_PORT
-	bmSetXD(smp, xaddr, xsize);
-#endif				/*  _STILL_TO_PORT */
 
 	/*
 	 * copy old stbl to new stbl at start of extended area
@@ -1836,7 +1833,7 @@
 	}
 
 	/*
-	 *      update parent entry on the parent/root page
+	 *	update parent entry on the parent/root page
 	 */
 	/*
 	 * acquire a transaction lock on the parent/root page
@@ -1904,7 +1901,7 @@
 	sp = &JFS_IP(ip)->i_dtroot;
 
 	/*
-	 *      allocate/initialize a single (right) child page
+	 *	allocate/initialize a single (right) child page
 	 *
 	 * N.B. at first split, a one (or two) block to fit new entry
 	 * is allocated; at subsequent split, a full page is allocated;
@@ -1943,7 +1940,7 @@
 	rp->header.prev = 0;
 
 	/*
-	 *      move in-line root page into new right page extent
+	 *	move in-line root page into new right page extent
 	 */
 	/* linelock header + copied entries + new stbl (1st slot) in new page */
 	ASSERT(dtlck->index == 0);
@@ -2016,7 +2013,7 @@
 	dtInsertEntry(rp, split->index, split->key, split->data, &dtlck);
 
 	/*
-	 *      reset parent/root page
+	 *	reset parent/root page
 	 *
 	 * set the 1st entry offset to 0, which force the left-most key
 	 * at any level of the tree to be less than any search key.
@@ -2102,7 +2099,7 @@
 	dtpage_t *np;
 
 	/*
-	 *      search for the entry to delete:
+	 *	search for the entry to delete:
 	 *
 	 * dtSearch() returns (leaf page pinned, index at which to delete).
 	 */
@@ -2253,7 +2250,7 @@
 	int i;
 
 	/*
-	 *      keep the root leaf page which has become empty
+	 *	keep the root leaf page which has become empty
 	 */
 	if (BT_IS_ROOT(fmp)) {
 		/*
@@ -2269,7 +2266,7 @@
 	}
 
 	/*
-	 *      free the non-root leaf page
+	 *	free the non-root leaf page
 	 */
 	/*
 	 * acquire a transaction lock on the page
@@ -2299,7 +2296,7 @@
 	discard_metapage(fmp);
 
 	/*
-	 *      propagate page deletion up the directory tree
+	 *	propagate page deletion up the directory tree
 	 *
 	 * If the delete from the parent page makes it empty,
 	 * continue all the way up the tree.
@@ -2440,10 +2437,10 @@
 
 #ifdef _NOTYET
 /*
- * NAME:        dtRelocate()
+ * NAME:	dtRelocate()
  *
- * FUNCTION:    relocate dtpage (internal or leaf) of directory;
- *              This function is mainly used by defragfs utility.
+ * FUNCTION:	relocate dtpage (internal or leaf) of directory;
+ *		This function is mainly used by defragfs utility.
  */
 int dtRelocate(tid_t tid, struct inode *ip, s64 lmxaddr, pxd_t * opxd,
 	       s64 nxaddr)
@@ -2471,8 +2468,8 @@
 		   xlen);
 
 	/*
-	 *      1. get the internal parent dtpage covering
-	 *      router entry for the tartget page to be relocated;
+	 *	1. get the internal parent dtpage covering
+	 *	router entry for the tartget page to be relocated;
 	 */
 	rc = dtSearchNode(ip, lmxaddr, opxd, &btstack);
 	if (rc)
@@ -2483,7 +2480,7 @@
 	jfs_info("dtRelocate: parent router entry validated.");
 
 	/*
-	 *      2. relocate the target dtpage
+	 *	2. relocate the target dtpage
 	 */
 	/* read in the target page from src extent */
 	DT_GETPAGE(ip, oxaddr, mp, PSIZE, p, rc);
@@ -2581,9 +2578,7 @@
 
 	/* update the buffer extent descriptor of the dtpage */
 	xsize = xlen << JFS_SBI(ip->i_sb)->l2bsize;
-#ifdef _STILL_TO_PORT
-	bmSetXD(mp, nxaddr, xsize);
-#endif /* _STILL_TO_PORT */
+
 	/* unpin the relocated page */
 	DT_PUTPAGE(mp);
 	jfs_info("dtRelocate: target dtpage relocated.");
@@ -2594,7 +2589,7 @@
 	 */
 
 	/*
-	 *      3. acquire maplock for the source extent to be freed;
+	 *	3. acquire maplock for the source extent to be freed;
 	 */
 	/* for dtpage relocation, write a LOG_NOREDOPAGE record
 	 * for the source dtpage (logredo() will init NoRedoPage
@@ -2609,7 +2604,7 @@
 	pxdlock->index = 1;
 
 	/*
-	 *      4. update the parent router entry for relocation;
+	 *	4. update the parent router entry for relocation;
 	 *
 	 * acquire tlck for the parent entry covering the target dtpage;
 	 * write LOG_REDOPAGE to apply after image only;
@@ -2637,7 +2632,7 @@
  * NAME:	dtSearchNode()
  *
  * FUNCTION:	Search for an dtpage containing a specified address
- *              This function is mainly used by defragfs utility.
+ *		This function is mainly used by defragfs utility.
  *
  * NOTE:	Search result on stack, the found page is pinned at exit.
  *		The result page must be an internal dtpage.
@@ -2660,7 +2655,7 @@
 	BT_CLR(btstack);	/* reset stack */
 
 	/*
-	 *      descend tree to the level with specified leftmost page
+	 *	descend tree to the level with specified leftmost page
 	 *
 	 *  by convention, root bn = 0.
 	 */
@@ -2699,7 +2694,7 @@
 	}
 
 	/*
-	 *      search each page at the current levevl
+	 *	search each page at the current levevl
 	 */
       loop:
 	stbl = DT_GETSTBL(p);
@@ -3044,9 +3039,9 @@
 	if (DO_INDEX(ip)) {
 		/*
 		 * persistent index is stored in directory entries.
-		 * Special cases:        0 = .
-		 *                       1 = ..
-		 *                      -1 = End of directory
+		 * Special cases:	 0 = .
+		 *			 1 = ..
+		 *			-1 = End of directory
 		 */
 		do_index = 1;
 
@@ -3128,10 +3123,10 @@
 		/*
 		 * Legacy filesystem - OS/2 & Linux JFS < 0.3.6
 		 *
-		 * pn = index = 0:      First entry "."
-		 * pn = 0; index = 1:   Second entry ".."
-		 * pn > 0:              Real entries, pn=1 -> leftmost page
-		 * pn = index = -1:     No more entries
+		 * pn = index = 0:	First entry "."
+		 * pn = 0; index = 1:	Second entry ".."
+		 * pn > 0:		Real entries, pn=1 -> leftmost page
+		 * pn = index = -1:	No more entries
 		 */
 		dtpos = filp->f_pos;
 		if (dtpos == 0) {
@@ -3351,7 +3346,7 @@
 	BT_CLR(btstack);	/* reset stack */
 
 	/*
-	 *      descend leftmost path of the tree
+	 *	descend leftmost path of the tree
 	 *
 	 * by convention, root bn = 0.
 	 */
@@ -4531,7 +4526,7 @@
 	struct ldtentry *entry;
 
 	/*
-	 *      search for the entry to modify:
+	 *	search for the entry to modify:
 	 *
 	 * dtSearch() returns (leaf page pinned, index at which to modify).
 	 */
diff --git a/fs/jfs/jfs_dtree.h b/fs/jfs/jfs_dtree.h
index af8513f..8561c6e 100644
--- a/fs/jfs/jfs_dtree.h
+++ b/fs/jfs/jfs_dtree.h
@@ -35,7 +35,7 @@
 
 
 /*
- *      entry segment/slot
+ *	entry segment/slot
  *
  * an entry consists of type dependent head/only segment/slot and
  * additional segments/slots linked vi next field;
diff --git a/fs/jfs/jfs_extent.c b/fs/jfs/jfs_extent.c
index a35bdca..7ae1e32 100644
--- a/fs/jfs/jfs_extent.c
+++ b/fs/jfs/jfs_extent.c
@@ -34,8 +34,8 @@
 #endif
 static s64 extRoundDown(s64 nb);
 
-#define DPD(a)          (printk("(a): %d\n",(a)))
-#define DPC(a)          (printk("(a): %c\n",(a)))
+#define DPD(a)		(printk("(a): %d\n",(a)))
+#define DPC(a)		(printk("(a): %c\n",(a)))
 #define DPL1(a)					\
 {						\
 	if ((a) >> 32)				\
@@ -51,19 +51,19 @@
 		printk("(a): %x\n",(a) << 32);	\
 }
 
-#define DPD1(a)         (printk("(a): %d  ",(a)))
-#define DPX(a)          (printk("(a): %08x\n",(a)))
-#define DPX1(a)         (printk("(a): %08x  ",(a)))
-#define DPS(a)          (printk("%s\n",(a)))
-#define DPE(a)          (printk("\nENTERING: %s\n",(a)))
-#define DPE1(a)          (printk("\nENTERING: %s",(a)))
-#define DPS1(a)         (printk("  %s  ",(a)))
+#define DPD1(a)		(printk("(a): %d  ",(a)))
+#define DPX(a)		(printk("(a): %08x\n",(a)))
+#define DPX1(a)		(printk("(a): %08x  ",(a)))
+#define DPS(a)		(printk("%s\n",(a)))
+#define DPE(a)		(printk("\nENTERING: %s\n",(a)))
+#define DPE1(a)		(printk("\nENTERING: %s",(a)))
+#define DPS1(a)		(printk("  %s  ",(a)))
 
 
 /*
  * NAME:	extAlloc()
  *
- * FUNCTION:    allocate an extent for a specified page range within a
+ * FUNCTION:	allocate an extent for a specified page range within a
  *		file.
  *
  * PARAMETERS:
@@ -78,9 +78,9 @@
  *		  should be marked as allocated but not recorded.
  *
  * RETURN VALUES:
- *      0       - success
- *      -EIO	- i/o error.
- *      -ENOSPC	- insufficient disk resources.
+ *	0	- success
+ *	-EIO	- i/o error.
+ *	-ENOSPC	- insufficient disk resources.
  */
 int
 extAlloc(struct inode *ip, s64 xlen, s64 pno, xad_t * xp, bool abnr)
@@ -192,9 +192,9 @@
 
 #ifdef _NOTYET
 /*
- * NAME:        extRealloc()
+ * NAME:	extRealloc()
  *
- * FUNCTION:    extend the allocation of a file extent containing a
+ * FUNCTION:	extend the allocation of a file extent containing a
  *		partial back last page.
  *
  * PARAMETERS:
@@ -207,9 +207,9 @@
  *		  should be marked as allocated but not recorded.
  *
  * RETURN VALUES:
- *      0       - success
- *      -EIO	- i/o error.
- *      -ENOSPC	- insufficient disk resources.
+ *	0	- success
+ *	-EIO	- i/o error.
+ *	-ENOSPC	- insufficient disk resources.
  */
 int extRealloc(struct inode *ip, s64 nxlen, xad_t * xp, bool abnr)
 {
@@ -345,9 +345,9 @@
 
 
 /*
- * NAME:        extHint()
+ * NAME:	extHint()
  *
- * FUNCTION:    produce an extent allocation hint for a file offset.
+ * FUNCTION:	produce an extent allocation hint for a file offset.
  *
  * PARAMETERS:
  *	ip	- the inode of the file.
@@ -356,8 +356,8 @@
  *		  the hint.
  *
  * RETURN VALUES:
- *      0       - success
- *      -EIO	- i/o error.
+ *	0	- success
+ *	-EIO	- i/o error.
  */
 int extHint(struct inode *ip, s64 offset, xad_t * xp)
 {
@@ -387,7 +387,7 @@
 	lxdl.nlxd = 1;
 	lxdl.lxd = &lxd;
 	LXDoffset(&lxd, prev)
-	    LXDlength(&lxd, nbperpage);
+	LXDlength(&lxd, nbperpage);
 
 	xadl.maxnxad = 1;
 	xadl.nxad = 0;
@@ -397,11 +397,11 @@
 	if ((rc = xtLookupList(ip, &lxdl, &xadl, 0)))
 		return (rc);
 
-	/* check if not extent exists for the previous page.
+	/* check if no extent exists for the previous page.
 	 * this is possible for sparse files.
 	 */
 	if (xadl.nxad == 0) {
-//              assert(ISSPARSE(ip));
+//		assert(ISSPARSE(ip));
 		return (0);
 	}
 
@@ -410,28 +410,28 @@
 	 */
 	xp->flag &= XAD_NOTRECORDED;
 
-        if(xadl.nxad != 1 || lengthXAD(xp) != nbperpage) {
+	if(xadl.nxad != 1 || lengthXAD(xp) != nbperpage) {
 		jfs_error(ip->i_sb, "extHint: corrupt xtree");
 		return -EIO;
-        }
+	}
 
 	return (0);
 }
 
 
 /*
- * NAME:        extRecord()
+ * NAME:	extRecord()
  *
- * FUNCTION:    change a page with a file from not recorded to recorded.
+ * FUNCTION:	change a page with a file from not recorded to recorded.
  *
  * PARAMETERS:
  *	ip	- inode of the file.
  *	cp	- cbuf of the file page.
  *
  * RETURN VALUES:
- *      0       - success
- *      -EIO	- i/o error.
- *      -ENOSPC	- insufficient disk resources.
+ *	0	- success
+ *	-EIO	- i/o error.
+ *	-ENOSPC	- insufficient disk resources.
  */
 int extRecord(struct inode *ip, xad_t * xp)
 {
@@ -451,9 +451,9 @@
 
 #ifdef _NOTYET
 /*
- * NAME:        extFill()
+ * NAME:	extFill()
  *
- * FUNCTION:    allocate disk space for a file page that represents
+ * FUNCTION:	allocate disk space for a file page that represents
  *		a file hole.
  *
  * PARAMETERS:
@@ -461,16 +461,16 @@
  *	cp	- cbuf of the file page represent the hole.
  *
  * RETURN VALUES:
- *      0       - success
- *      -EIO	- i/o error.
- *      -ENOSPC	- insufficient disk resources.
+ *	0	- success
+ *	-EIO	- i/o error.
+ *	-ENOSPC	- insufficient disk resources.
  */
 int extFill(struct inode *ip, xad_t * xp)
 {
 	int rc, nbperpage = JFS_SBI(ip->i_sb)->nbperpage;
 	s64 blkno = offsetXAD(xp) >> ip->i_blkbits;
 
-//      assert(ISSPARSE(ip));
+//	assert(ISSPARSE(ip));
 
 	/* initialize the extent allocation hint */
 	XADaddress(xp, 0);
@@ -489,7 +489,7 @@
 /*
  * NAME:	extBalloc()
  *
- * FUNCTION:    allocate disk blocks to form an extent.
+ * FUNCTION:	allocate disk blocks to form an extent.
  *
  *		initially, we will try to allocate disk blocks for the
  *		requested size (nblocks).  if this fails (nblocks
@@ -513,9 +513,9 @@
  *		   allocated block range.
  *
  * RETURN VALUES:
- *      0       - success
- *      -EIO	- i/o error.
- *      -ENOSPC	- insufficient disk resources.
+ *	0	- success
+ *	-EIO	- i/o error.
+ *	-ENOSPC	- insufficient disk resources.
  */
 static int
 extBalloc(struct inode *ip, s64 hint, s64 * nblocks, s64 * blkno)
@@ -580,7 +580,7 @@
 /*
  * NAME:	extBrealloc()
  *
- * FUNCTION:    attempt to extend an extent's allocation.
+ * FUNCTION:	attempt to extend an extent's allocation.
  *
  *		Initially, we will try to extend the extent's allocation
  *		in place.  If this fails, we'll try to move the extent
@@ -597,8 +597,8 @@
  *
  * PARAMETERS:
  *	ip	 - the inode of the file.
- *	blkno    - starting block number of the extents current allocation.
- *	nblks    - number of blocks within the extents current allocation.
+ *	blkno	 - starting block number of the extents current allocation.
+ *	nblks	 - number of blocks within the extents current allocation.
  *	newnblks - pointer to a s64 value.  on entry, this value is the
  *		   the new desired extent size (number of blocks).  on
  *		   successful exit, this value is set to the extent's actual
@@ -606,9 +606,9 @@
  *	newblkno - the starting block number of the extents new allocation.
  *
  * RETURN VALUES:
- *      0       - success
- *      -EIO	- i/o error.
- *      -ENOSPC	- insufficient disk resources.
+ *	0	- success
+ *	-EIO	- i/o error.
+ *	-ENOSPC	- insufficient disk resources.
  */
 static int
 extBrealloc(struct inode *ip,
@@ -634,16 +634,16 @@
 
 
 /*
- * NAME:        extRoundDown()
+ * NAME:	extRoundDown()
  *
- * FUNCTION:    round down a specified number of blocks to the next
+ * FUNCTION:	round down a specified number of blocks to the next
  *		smallest power of 2 number.
  *
  * PARAMETERS:
  *	nb	- the inode of the file.
  *
  * RETURN VALUES:
- *      next smallest power of 2 number.
+ *	next smallest power of 2 number.
  */
 static s64 extRoundDown(s64 nb)
 {
diff --git a/fs/jfs/jfs_filsys.h b/fs/jfs/jfs_filsys.h
index 38f70ac..b3f5463 100644
--- a/fs/jfs/jfs_filsys.h
+++ b/fs/jfs/jfs_filsys.h
@@ -34,9 +34,9 @@
 #define JFS_UNICODE	0x00000001	/* unicode name */
 
 /* mount time flags for error handling */
-#define JFS_ERR_REMOUNT_RO 0x00000002   /* remount read-only */
-#define JFS_ERR_CONTINUE   0x00000004   /* continue */
-#define JFS_ERR_PANIC      0x00000008   /* panic */
+#define JFS_ERR_REMOUNT_RO 0x00000002	/* remount read-only */
+#define JFS_ERR_CONTINUE   0x00000004	/* continue */
+#define JFS_ERR_PANIC      0x00000008	/* panic */
 
 /* Quota support */
 #define	JFS_USRQUOTA	0x00000010
@@ -83,7 +83,6 @@
 /*	case-insensitive name/directory support */
 
 #define JFS_AIX		0x80000000	/* AIX support */
-/*	POSIX name/directory  support - Never implemented*/
 
 /*
  *	buffer cache configuration
@@ -113,10 +112,10 @@
 #define IDATASIZE	256	/* inode inline data size */
 #define	IXATTRSIZE	128	/* inode inline extended attribute size */
 
-#define XTPAGE_SIZE     4096
-#define log2_PAGESIZE     12
+#define XTPAGE_SIZE	4096
+#define log2_PAGESIZE	12
 
-#define IAG_SIZE        4096
+#define IAG_SIZE	4096
 #define IAG_EXTENT_SIZE 4096
 #define	INOSPERIAG	4096	/* number of disk inodes per iag */
 #define	L2INOSPERIAG	12	/* l2 number of disk inodes per iag */
diff --git a/fs/jfs/jfs_imap.c b/fs/jfs/jfs_imap.c
index c653022..3870ba8 100644
--- a/fs/jfs/jfs_imap.c
+++ b/fs/jfs/jfs_imap.c
@@ -93,21 +93,21 @@
 static void copy_to_dinode(struct dinode *, struct inode *);
 
 /*
- * NAME:        diMount()
+ * NAME:	diMount()
  *
- * FUNCTION:    initialize the incore inode map control structures for
+ * FUNCTION:	initialize the incore inode map control structures for
  *		a fileset or aggregate init time.
  *
- *              the inode map's control structure (dinomap) is
- *              brought in from disk and placed in virtual memory.
+ *		the inode map's control structure (dinomap) is
+ *		brought in from disk and placed in virtual memory.
  *
  * PARAMETERS:
- *      ipimap  - pointer to inode map inode for the aggregate or fileset.
+ *	ipimap	- pointer to inode map inode for the aggregate or fileset.
  *
  * RETURN VALUES:
- *      0       - success
- *      -ENOMEM  - insufficient free virtual memory.
- *      -EIO	- i/o error.
+ *	0	- success
+ *	-ENOMEM	- insufficient free virtual memory.
+ *	-EIO	- i/o error.
  */
 int diMount(struct inode *ipimap)
 {
@@ -180,18 +180,18 @@
 
 
 /*
- * NAME:        diUnmount()
+ * NAME:	diUnmount()
  *
- * FUNCTION:    write to disk the incore inode map control structures for
+ * FUNCTION:	write to disk the incore inode map control structures for
  *		a fileset or aggregate at unmount time.
  *
  * PARAMETERS:
- *      ipimap  - pointer to inode map inode for the aggregate or fileset.
+ *	ipimap	- pointer to inode map inode for the aggregate or fileset.
  *
  * RETURN VALUES:
- *      0       - success
- *      -ENOMEM  - insufficient free virtual memory.
- *      -EIO	- i/o error.
+ *	0	- success
+ *	-ENOMEM	- insufficient free virtual memory.
+ *	-EIO	- i/o error.
  */
 int diUnmount(struct inode *ipimap, int mounterror)
 {
@@ -274,9 +274,9 @@
 
 
 /*
- * NAME:        diRead()
+ * NAME:	diRead()
  *
- * FUNCTION:    initialize an incore inode from disk.
+ * FUNCTION:	initialize an incore inode from disk.
  *
  *		on entry, the specifed incore inode should itself
  *		specify the disk inode number corresponding to the
@@ -285,7 +285,7 @@
  *		this routine handles incore inode initialization for
  *		both "special" and "regular" inodes.  special inodes
  *		are those required early in the mount process and
- *	        require special handling since much of the file system
+ *		require special handling since much of the file system
  *		is not yet initialized.  these "special" inodes are
  *		identified by a NULL inode map inode pointer and are
  *		actually initialized by a call to diReadSpecial().
@@ -298,12 +298,12 @@
  *		incore inode.
  *
  * PARAMETERS:
- *      ip  -  pointer to incore inode to be initialized from disk.
+ *	ip	-  pointer to incore inode to be initialized from disk.
  *
  * RETURN VALUES:
- *      0       - success
- *      -EIO	- i/o error.
- *      -ENOMEM	- insufficient memory
+ *	0	- success
+ *	-EIO	- i/o error.
+ *	-ENOMEM	- insufficient memory
  *
  */
 int diRead(struct inode *ip)
@@ -410,26 +410,26 @@
 
 
 /*
- * NAME:        diReadSpecial()
+ * NAME:	diReadSpecial()
  *
- * FUNCTION:    initialize a 'special' inode from disk.
+ * FUNCTION:	initialize a 'special' inode from disk.
  *
  *		this routines handles aggregate level inodes.  The
  *		inode cache cannot differentiate between the
  *		aggregate inodes and the filesystem inodes, so we
  *		handle these here.  We don't actually use the aggregate
- *	        inode map, since these inodes are at a fixed location
+ *		inode map, since these inodes are at a fixed location
  *		and in some cases the aggregate inode map isn't initialized
  *		yet.
  *
  * PARAMETERS:
- *      sb - filesystem superblock
+ *	sb - filesystem superblock
  *	inum - aggregate inode number
  *	secondary - 1 if secondary aggregate inode table
  *
  * RETURN VALUES:
- *      new inode	- success
- *      NULL		- i/o error.
+ *	new inode	- success
+ *	NULL		- i/o error.
  */
 struct inode *diReadSpecial(struct super_block *sb, ino_t inum, int secondary)
 {
@@ -502,12 +502,12 @@
 }
 
 /*
- * NAME:        diWriteSpecial()
+ * NAME:	diWriteSpecial()
  *
- * FUNCTION:    Write the special inode to disk
+ * FUNCTION:	Write the special inode to disk
  *
  * PARAMETERS:
- *      ip - special inode
+ *	ip - special inode
  *	secondary - 1 if secondary aggregate inode table
  *
  * RETURN VALUES: none
@@ -554,9 +554,9 @@
 }
 
 /*
- * NAME:        diFreeSpecial()
+ * NAME:	diFreeSpecial()
  *
- * FUNCTION:    Free allocated space for special inode
+ * FUNCTION:	Free allocated space for special inode
  */
 void diFreeSpecial(struct inode *ip)
 {
@@ -572,9 +572,9 @@
 
 
 /*
- * NAME:        diWrite()
+ * NAME:	diWrite()
  *
- * FUNCTION:    write the on-disk inode portion of the in-memory inode
+ * FUNCTION:	write the on-disk inode portion of the in-memory inode
  *		to its corresponding on-disk inode.
  *
  *		on entry, the specifed incore inode should itself
@@ -589,11 +589,11 @@
  *
  * PARAMETERS:
  *	tid -  transacation id
- *      ip  -  pointer to incore inode to be written to the inode extent.
+ *	ip  -  pointer to incore inode to be written to the inode extent.
  *
  * RETURN VALUES:
- *      0       - success
- *      -EIO	- i/o error.
+ *	0	- success
+ *	-EIO	- i/o error.
  */
 int diWrite(tid_t tid, struct inode *ip)
 {
@@ -730,7 +730,7 @@
 	ilinelock = (struct linelock *) & tlck->lock;
 
 	/*
-	 *      regular file: 16 byte (XAD slot) granularity
+	 *	regular file: 16 byte (XAD slot) granularity
 	 */
 	if (type & tlckXTREE) {
 		xtpage_t *p, *xp;
@@ -755,7 +755,7 @@
 				xad->flag &= ~(XAD_NEW | XAD_EXTENDED);
 	}
 	/*
-	 *      directory: 32 byte (directory entry slot) granularity
+	 *	directory: 32 byte (directory entry slot) granularity
 	 */
 	else if (type & tlckDTREE) {
 		dtpage_t *p, *xp;
@@ -800,9 +800,8 @@
 	}
 
 	/*
-	 *      lock/copy inode base: 128 byte slot granularity
+	 *	lock/copy inode base: 128 byte slot granularity
 	 */
-// baseDinode:
 	lv = & dilinelock->lv[dilinelock->index];
 	lv->offset = dioffset >> L2INODESLOTSIZE;
 	copy_to_dinode(dp, ip);
@@ -813,17 +812,6 @@
 		lv->length = 1;
 	dilinelock->index++;
 
-#ifdef _JFS_FASTDASD
-	/*
-	 * We aren't logging changes to the DASD used in directory inodes,
-	 * but we need to write them to disk.  If we don't unmount cleanly,
-	 * mount will recalculate the DASD used.
-	 */
-	if (S_ISDIR(ip->i_mode)
-	    && (ip->i_ipmnt->i_mntflag & JFS_DASD_ENABLED))
-		memcpy(&dp->di_DASD, &ip->i_DASD, sizeof(struct dasd));
-#endif				/*  _JFS_FASTDASD */
-
 	/* release the buffer holding the updated on-disk inode.
 	 * the buffer will be later written by commit processing.
 	 */
@@ -834,9 +822,9 @@
 
 
 /*
- * NAME:        diFree(ip)
+ * NAME:	diFree(ip)
  *
- * FUNCTION:    free a specified inode from the inode working map
+ * FUNCTION:	free a specified inode from the inode working map
  *		for a fileset or aggregate.
  *
  *		if the inode to be freed represents the first (only)
@@ -865,11 +853,11 @@
  *		any updates and are held until all updates are complete.
  *
  * PARAMETERS:
- *      ip	- inode to be freed.
+ *	ip	- inode to be freed.
  *
  * RETURN VALUES:
- *      0       - success
- *      -EIO	- i/o error.
+ *	0	- success
+ *	-EIO	- i/o error.
  */
 int diFree(struct inode *ip)
 {
@@ -902,7 +890,8 @@
 	 * the map.
 	 */
 	if (iagno >= imap->im_nextiag) {
-		dump_mem("imap", imap, 32);
+		print_hex_dump(KERN_ERR, "imap: ", DUMP_PREFIX_ADDRESS, 16, 4,
+			       imap, 32, 0);
 		jfs_error(ip->i_sb,
 			  "diFree: inum = %d, iagno = %d, nextiag = %d",
 			  (uint) inum, iagno, imap->im_nextiag);
@@ -964,8 +953,8 @@
 		return -EIO;
 	}
 	/*
-	 *      inode extent still has some inodes or below low water mark:
-	 *      keep the inode extent;
+	 *	inode extent still has some inodes or below low water mark:
+	 *	keep the inode extent;
 	 */
 	if (bitmap ||
 	    imap->im_agctl[agno].numfree < 96 ||
@@ -1047,12 +1036,12 @@
 
 
 	/*
-	 *      inode extent has become free and above low water mark:
-	 *      free the inode extent;
+	 *	inode extent has become free and above low water mark:
+	 *	free the inode extent;
 	 */
 
 	/*
-	 *      prepare to update iag list(s) (careful update step 1)
+	 *	prepare to update iag list(s) (careful update step 1)
 	 */
 	amp = bmp = cmp = dmp = NULL;
 	fwd = back = -1;
@@ -1152,7 +1141,7 @@
 	invalidate_pxd_metapages(ip, freepxd);
 
 	/*
-	 *      update iag list(s) (careful update step 2)
+	 *	update iag list(s) (careful update step 2)
 	 */
 	/* add the iag to the ag extent free list if this is the
 	 * first free extent for the iag.
@@ -1338,20 +1327,20 @@
 
 
 /*
- * NAME:        diAlloc(pip,dir,ip)
+ * NAME:	diAlloc(pip,dir,ip)
  *
- * FUNCTION:    allocate a disk inode from the inode working map
+ * FUNCTION:	allocate a disk inode from the inode working map
  *		for a fileset or aggregate.
  *
  * PARAMETERS:
- *      pip	- pointer to incore inode for the parent inode.
- *      dir	- 'true' if the new disk inode is for a directory.
- *      ip	- pointer to a new inode
+ *	pip	- pointer to incore inode for the parent inode.
+ *	dir	- 'true' if the new disk inode is for a directory.
+ *	ip	- pointer to a new inode
  *
  * RETURN VALUES:
- *      0       - success.
- *      -ENOSPC	- insufficient disk resources.
- *      -EIO	- i/o error.
+ *	0	- success.
+ *	-ENOSPC	- insufficient disk resources.
+ *	-EIO	- i/o error.
  */
 int diAlloc(struct inode *pip, bool dir, struct inode *ip)
 {
@@ -1433,7 +1422,7 @@
 	addext = (imap->im_agctl[agno].numfree < 32 && iagp->nfreeexts);
 
 	/*
-	 *      try to allocate from the IAG
+	 *	try to allocate from the IAG
 	 */
 	/* check if the inode may be allocated from the iag
 	 * (i.e. the inode has free inodes or new extent can be added).
@@ -1633,9 +1622,9 @@
 
 
 /*
- * NAME:        diAllocAG(imap,agno,dir,ip)
+ * NAME:	diAllocAG(imap,agno,dir,ip)
  *
- * FUNCTION:    allocate a disk inode from the allocation group.
+ * FUNCTION:	allocate a disk inode from the allocation group.
  *
  *		this routine first determines if a new extent of free
  *		inodes should be added for the allocation group, with
@@ -1649,17 +1638,17 @@
  * PRE CONDITION: Already have the AG lock for this AG.
  *
  * PARAMETERS:
- *      imap	- pointer to inode map control structure.
- *      agno	- allocation group to allocate from.
- *      dir	- 'true' if the new disk inode is for a directory.
- *      ip	- pointer to the new inode to be filled in on successful return
+ *	imap	- pointer to inode map control structure.
+ *	agno	- allocation group to allocate from.
+ *	dir	- 'true' if the new disk inode is for a directory.
+ *	ip	- pointer to the new inode to be filled in on successful return
  *		  with the disk inode number allocated, its extent address
  *		  and the start of the ag.
  *
  * RETURN VALUES:
- *      0       - success.
- *      -ENOSPC	- insufficient disk resources.
- *      -EIO	- i/o error.
+ *	0	- success.
+ *	-ENOSPC	- insufficient disk resources.
+ *	-EIO	- i/o error.
  */
 static int
 diAllocAG(struct inomap * imap, int agno, bool dir, struct inode *ip)
@@ -1709,9 +1698,9 @@
 
 
 /*
- * NAME:        diAllocAny(imap,agno,dir,iap)
+ * NAME:	diAllocAny(imap,agno,dir,iap)
  *
- * FUNCTION:    allocate a disk inode from any other allocation group.
+ * FUNCTION:	allocate a disk inode from any other allocation group.
  *
  *		this routine is called when an allocation attempt within
  *		the primary allocation group has failed. if attempts to
@@ -1719,17 +1708,17 @@
  *		specified primary group.
  *
  * PARAMETERS:
- *      imap	- pointer to inode map control structure.
- *      agno	- primary allocation group (to avoid).
- *      dir	- 'true' if the new disk inode is for a directory.
- *      ip	- pointer to a new inode to be filled in on successful return
+ *	imap	- pointer to inode map control structure.
+ *	agno	- primary allocation group (to avoid).
+ *	dir	- 'true' if the new disk inode is for a directory.
+ *	ip	- pointer to a new inode to be filled in on successful return
  *		  with the disk inode number allocated, its extent address
  *		  and the start of the ag.
  *
  * RETURN VALUES:
- *      0       - success.
- *      -ENOSPC	- insufficient disk resources.
- *      -EIO	- i/o error.
+ *	0	- success.
+ *	-ENOSPC	- insufficient disk resources.
+ *	-EIO	- i/o error.
  */
 static int
 diAllocAny(struct inomap * imap, int agno, bool dir, struct inode *ip)
@@ -1772,9 +1761,9 @@
 
 
 /*
- * NAME:        diAllocIno(imap,agno,ip)
+ * NAME:	diAllocIno(imap,agno,ip)
  *
- * FUNCTION:    allocate a disk inode from the allocation group's free
+ * FUNCTION:	allocate a disk inode from the allocation group's free
  *		inode list, returning an error if this free list is
  *		empty (i.e. no iags on the list).
  *
@@ -1785,16 +1774,16 @@
  * PRE CONDITION: Already have AG lock for this AG.
  *
  * PARAMETERS:
- *      imap	- pointer to inode map control structure.
- *      agno	- allocation group.
- *      ip	- pointer to new inode to be filled in on successful return
+ *	imap	- pointer to inode map control structure.
+ *	agno	- allocation group.
+ *	ip	- pointer to new inode to be filled in on successful return
  *		  with the disk inode number allocated, its extent address
  *		  and the start of the ag.
  *
  * RETURN VALUES:
- *      0       - success.
- *      -ENOSPC	- insufficient disk resources.
- *      -EIO	- i/o error.
+ *	0	- success.
+ *	-ENOSPC	- insufficient disk resources.
+ *	-EIO	- i/o error.
  */
 static int diAllocIno(struct inomap * imap, int agno, struct inode *ip)
 {
@@ -1890,7 +1879,7 @@
 
 
 /*
- * NAME:        diAllocExt(imap,agno,ip)
+ * NAME:	diAllocExt(imap,agno,ip)
  *
  * FUNCTION:	add a new extent of free inodes to an iag, allocating
  *		an inode from this extent to satisfy the current allocation
@@ -1910,16 +1899,16 @@
  *		for the purpose of satisfying this request.
  *
  * PARAMETERS:
- *      imap	- pointer to inode map control structure.
- *      agno	- allocation group number.
- *      ip	- pointer to new inode to be filled in on successful return
+ *	imap	- pointer to inode map control structure.
+ *	agno	- allocation group number.
+ *	ip	- pointer to new inode to be filled in on successful return
  *		  with the disk inode number allocated, its extent address
  *		  and the start of the ag.
  *
  * RETURN VALUES:
- *      0       - success.
- *      -ENOSPC	- insufficient disk resources.
- *      -EIO	- i/o error.
+ *	0	- success.
+ *	-ENOSPC	- insufficient disk resources.
+ *	-EIO	- i/o error.
  */
 static int diAllocExt(struct inomap * imap, int agno, struct inode *ip)
 {
@@ -2010,7 +1999,7 @@
 
 
 /*
- * NAME:        diAllocBit(imap,iagp,ino)
+ * NAME:	diAllocBit(imap,iagp,ino)
  *
  * FUNCTION:	allocate a backed inode from an iag.
  *
@@ -2030,14 +2019,14 @@
  *	this AG.  Must have read lock on imap inode.
  *
  * PARAMETERS:
- *      imap	- pointer to inode map control structure.
- *      iagp	- pointer to iag.
- *      ino	- inode number to be allocated within the iag.
+ *	imap	- pointer to inode map control structure.
+ *	iagp	- pointer to iag.
+ *	ino	- inode number to be allocated within the iag.
  *
  * RETURN VALUES:
- *      0       - success.
- *      -ENOSPC	- insufficient disk resources.
- *      -EIO	- i/o error.
+ *	0	- success.
+ *	-ENOSPC	- insufficient disk resources.
+ *	-EIO	- i/o error.
  */
 static int diAllocBit(struct inomap * imap, struct iag * iagp, int ino)
 {
@@ -2144,11 +2133,11 @@
 
 
 /*
- * NAME:        diNewExt(imap,iagp,extno)
+ * NAME:	diNewExt(imap,iagp,extno)
  *
- * FUNCTION:    initialize a new extent of inodes for an iag, allocating
- *	        the first inode of the extent for use for the current
- *	        allocation request.
+ * FUNCTION:	initialize a new extent of inodes for an iag, allocating
+ *		the first inode of the extent for use for the current
+ *		allocation request.
  *
  *		disk resources are allocated for the new extent of inodes
  *		and the inodes themselves are initialized to reflect their
@@ -2177,14 +2166,14 @@
  *	this AG.  Must have read lock on imap inode.
  *
  * PARAMETERS:
- *      imap	- pointer to inode map control structure.
- *      iagp	- pointer to iag.
- *      extno	- extent number.
+ *	imap	- pointer to inode map control structure.
+ *	iagp	- pointer to iag.
+ *	extno	- extent number.
  *
  * RETURN VALUES:
- *      0       - success.
- *      -ENOSPC	- insufficient disk resources.
- *      -EIO	- i/o error.
+ *	0	- success.
+ *	-ENOSPC	- insufficient disk resources.
+ *	-EIO	- i/o error.
  */
 static int diNewExt(struct inomap * imap, struct iag * iagp, int extno)
 {
@@ -2430,7 +2419,7 @@
 
 
 /*
- * NAME:        diNewIAG(imap,iagnop,agno)
+ * NAME:	diNewIAG(imap,iagnop,agno)
  *
  * FUNCTION:	allocate a new iag for an allocation group.
  *
@@ -2443,16 +2432,16 @@
  *		and returned to satisfy the request.
  *
  * PARAMETERS:
- *      imap	- pointer to inode map control structure.
- *      iagnop	- pointer to an iag number set with the number of the
+ *	imap	- pointer to inode map control structure.
+ *	iagnop	- pointer to an iag number set with the number of the
  *		  newly allocated iag upon successful return.
- *      agno	- allocation group number.
+ *	agno	- allocation group number.
  *	bpp	- Buffer pointer to be filled in with new IAG's buffer
  *
  * RETURN VALUES:
- *      0       - success.
- *      -ENOSPC	- insufficient disk resources.
- *      -EIO	- i/o error.
+ *	0	- success.
+ *	-ENOSPC	- insufficient disk resources.
+ *	-EIO	- i/o error.
  *
  * serialization:
  *	AG lock held on entry/exit;
@@ -2461,7 +2450,7 @@
  *
  * note: new iag transaction:
  * . synchronously write iag;
- * . write log of xtree and inode  of imap;
+ * . write log of xtree and inode of imap;
  * . commit;
  * . synchronous write of xtree (right to left, bottom to top);
  * . at start of logredo(): init in-memory imap with one additional iag page;
@@ -2481,9 +2470,6 @@
 	s64 xaddr = 0;
 	s64 blkno;
 	tid_t tid;
-#ifdef _STILL_TO_PORT
-	xad_t xad;
-#endif				/*  _STILL_TO_PORT */
 	struct inode *iplist[1];
 
 	/* pick up pointers to the inode map and mount inodes */
@@ -2674,15 +2660,15 @@
 }
 
 /*
- * NAME:        diIAGRead()
+ * NAME:	diIAGRead()
  *
- * FUNCTION:    get the buffer for the specified iag within a fileset
+ * FUNCTION:	get the buffer for the specified iag within a fileset
  *		or aggregate inode map.
  *
  * PARAMETERS:
- *      imap	- pointer to inode map control structure.
- *      iagno	- iag number.
- *      bpp	- point to buffer pointer to be filled in on successful
+ *	imap	- pointer to inode map control structure.
+ *	iagno	- iag number.
+ *	bpp	- point to buffer pointer to be filled in on successful
  *		  exit.
  *
  * SERIALIZATION:
@@ -2691,8 +2677,8 @@
  *	 the read lock is unnecessary.)
  *
  * RETURN VALUES:
- *      0       - success.
- *      -EIO	- i/o error.
+ *	0	- success.
+ *	-EIO	- i/o error.
  */
 static int diIAGRead(struct inomap * imap, int iagno, struct metapage ** mpp)
 {
@@ -2712,17 +2698,17 @@
 }
 
 /*
- * NAME:        diFindFree()
+ * NAME:	diFindFree()
  *
- * FUNCTION:    find the first free bit in a word starting at
+ * FUNCTION:	find the first free bit in a word starting at
  *		the specified bit position.
  *
  * PARAMETERS:
- *      word	- word to be examined.
- *      start	- starting bit position.
+ *	word	- word to be examined.
+ *	start	- starting bit position.
  *
  * RETURN VALUES:
- *      bit position of first free bit in the word or 32 if
+ *	bit position of first free bit in the word or 32 if
  *	no free bits were found.
  */
 static int diFindFree(u32 word, int start)
@@ -2897,7 +2883,7 @@
 		   atomic_read(&imap->im_numfree));
 
 	/*
-	 *      reconstruct imap
+	 *	reconstruct imap
 	 *
 	 * coalesce contiguous k (newAGSize/oldAGSize) AGs;
 	 * i.e., (AGi, ..., AGj) where i = k*n and j = k*(n+1) - 1 to AGn;
@@ -2913,7 +2899,7 @@
 	}
 
 	/*
-	 *      process each iag page of the map.
+	 *	process each iag page of the map.
 	 *
 	 * rebuild AG Free Inode List, AG Free Inode Extent List;
 	 */
@@ -2932,7 +2918,7 @@
 
 		/* leave free iag in the free iag list */
 		if (iagp->nfreeexts == cpu_to_le32(EXTSPERIAG)) {
-		        release_metapage(bp);
+			release_metapage(bp);
 			continue;
 		}
 
@@ -3063,13 +3049,13 @@
 }
 
 /*
- * NAME:        copy_from_dinode()
+ * NAME:	copy_from_dinode()
  *
- * FUNCTION:    Copies inode info from disk inode to in-memory inode
+ * FUNCTION:	Copies inode info from disk inode to in-memory inode
  *
  * RETURN VALUES:
- *      0       - success
- *      -ENOMEM	- insufficient memory
+ *	0	- success
+ *	-ENOMEM	- insufficient memory
  */
 static int copy_from_dinode(struct dinode * dip, struct inode *ip)
 {
@@ -3151,9 +3137,9 @@
 }
 
 /*
- * NAME:        copy_to_dinode()
+ * NAME:	copy_to_dinode()
  *
- * FUNCTION:    Copies inode info from in-memory inode to disk inode
+ * FUNCTION:	Copies inode info from in-memory inode to disk inode
  */
 static void copy_to_dinode(struct dinode * dip, struct inode *ip)
 {
diff --git a/fs/jfs/jfs_imap.h b/fs/jfs/jfs_imap.h
index 4f9c346..610a0e9 100644
--- a/fs/jfs/jfs_imap.h
+++ b/fs/jfs/jfs_imap.h
@@ -24,17 +24,17 @@
  *	jfs_imap.h: disk inode manager
  */
 
-#define	EXTSPERIAG	128	/* number of disk inode extent per iag  */
-#define IMAPBLKNO	0	/* lblkno of dinomap within inode map   */
-#define SMAPSZ		4	/* number of words per summary map      */
+#define	EXTSPERIAG	128	/* number of disk inode extent per iag	*/
+#define IMAPBLKNO	0	/* lblkno of dinomap within inode map	*/
+#define SMAPSZ		4	/* number of words per summary map	*/
 #define	EXTSPERSUM	32	/* number of extents per summary map entry */
 #define	L2EXTSPERSUM	5	/* l2 number of extents per summary map */
 #define	PGSPERIEXT	4	/* number of 4K pages per dinode extent */
-#define	MAXIAGS		((1<<20)-1)	/* maximum number of iags       */
-#define	MAXAG		128	/* maximum number of allocation groups  */
+#define	MAXIAGS		((1<<20)-1)	/* maximum number of iags	*/
+#define	MAXAG		128	/* maximum number of allocation groups	*/
 
-#define AMAPSIZE      512	/* bytes in the IAG allocation maps */
-#define SMAPSIZE      16	/* bytes in the IAG summary maps */
+#define AMAPSIZE	512	/* bytes in the IAG allocation maps */
+#define SMAPSIZE	16	/* bytes in the IAG summary maps */
 
 /* convert inode number to iag number */
 #define	INOTOIAG(ino)	((ino) >> L2INOSPERIAG)
@@ -60,31 +60,31 @@
  *	inode allocation group page (per 4096 inodes of an AG)
  */
 struct iag {
-	__le64 agstart;		/* 8: starting block of ag              */
-	__le32 iagnum;		/* 4: inode allocation group number     */
-	__le32 inofreefwd;	/* 4: ag inode free list forward        */
-	__le32 inofreeback;	/* 4: ag inode free list back           */
-	__le32 extfreefwd;	/* 4: ag inode extent free list forward */
-	__le32 extfreeback;	/* 4: ag inode extent free list back    */
-	__le32 iagfree;		/* 4: iag free list                     */
+	__le64 agstart;		/* 8: starting block of ag		*/
+	__le32 iagnum;		/* 4: inode allocation group number	*/
+	__le32 inofreefwd;	/* 4: ag inode free list forward	*/
+	__le32 inofreeback;	/* 4: ag inode free list back		*/
+	__le32 extfreefwd;	/* 4: ag inode extent free list forward	*/
+	__le32 extfreeback;	/* 4: ag inode extent free list back	*/
+	__le32 iagfree;		/* 4: iag free list			*/
 
 	/* summary map: 1 bit per inode extent */
 	__le32 inosmap[SMAPSZ];	/* 16: sum map of mapwords w/ free inodes;
-				 *      note: this indicates free and backed
-				 *      inodes, if the extent is not backed the
-				 *      value will be 1.  if the extent is
-				 *      backed but all inodes are being used the
-				 *      value will be 1.  if the extent is
-				 *      backed but at least one of the inodes is
-				 *      free the value will be 0.
+				 *	note: this indicates free and backed
+				 *	inodes, if the extent is not backed the
+				 *	value will be 1.  if the extent is
+				 *	backed but all inodes are being used the
+				 *	value will be 1.  if the extent is
+				 *	backed but at least one of the inodes is
+				 *	free the value will be 0.
 				 */
 	__le32 extsmap[SMAPSZ];	/* 16: sum map of mapwords w/ free extents */
-	__le32 nfreeinos;		/* 4: number of free inodes             */
-	__le32 nfreeexts;		/* 4: number of free extents            */
+	__le32 nfreeinos;	/* 4: number of free inodes		*/
+	__le32 nfreeexts;	/* 4: number of free extents		*/
 	/* (72) */
 	u8 pad[1976];		/* 1976: pad to 2048 bytes */
 	/* allocation bit map: 1 bit per inode (0 - free, 1 - allocated) */
-	__le32 wmap[EXTSPERIAG];	/* 512: working allocation map  */
+	__le32 wmap[EXTSPERIAG];	/* 512: working allocation map */
 	__le32 pmap[EXTSPERIAG];	/* 512: persistent allocation map */
 	pxd_t inoext[EXTSPERIAG];	/* 1024: inode extent addresses */
 };				/* (4096) */
@@ -93,44 +93,44 @@
  *	per AG control information (in inode map control page)
  */
 struct iagctl_disk {
-	__le32 inofree;		/* 4: free inode list anchor            */
-	__le32 extfree;		/* 4: free extent list anchor           */
-	__le32 numinos;		/* 4: number of backed inodes           */
-	__le32 numfree;		/* 4: number of free inodes             */
+	__le32 inofree;		/* 4: free inode list anchor		*/
+	__le32 extfree;		/* 4: free extent list anchor		*/
+	__le32 numinos;		/* 4: number of backed inodes		*/
+	__le32 numfree;		/* 4: number of free inodes		*/
 };				/* (16) */
 
 struct iagctl {
-	int inofree;		/* free inode list anchor            */
-	int extfree;		/* free extent list anchor           */
-	int numinos;		/* number of backed inodes           */
-	int numfree;		/* number of free inodes             */
+	int inofree;		/* free inode list anchor		*/
+	int extfree;		/* free extent list anchor		*/
+	int numinos;		/* number of backed inodes		*/
+	int numfree;		/* number of free inodes		*/
 };
 
 /*
  *	per fileset/aggregate inode map control page
  */
 struct dinomap_disk {
-	__le32 in_freeiag;	/* 4: free iag list anchor     */
-	__le32 in_nextiag;	/* 4: next free iag number     */
-	__le32 in_numinos;	/* 4: num of backed inodes */
+	__le32 in_freeiag;	/* 4: free iag list anchor	*/
+	__le32 in_nextiag;	/* 4: next free iag number	*/
+	__le32 in_numinos;	/* 4: num of backed inodes	*/
 	__le32 in_numfree;	/* 4: num of free backed inodes */
 	__le32 in_nbperiext;	/* 4: num of blocks per inode extent */
-	__le32 in_l2nbperiext;	/* 4: l2 of in_nbperiext */
-	__le32 in_diskblock;	/* 4: for standalone test driver  */
-	__le32 in_maxag;	/* 4: for standalone test driver  */
-	u8 pad[2016];		/* 2016: pad to 2048 */
+	__le32 in_l2nbperiext;	/* 4: l2 of in_nbperiext	*/
+	__le32 in_diskblock;	/* 4: for standalone test driver */
+	__le32 in_maxag;	/* 4: for standalone test driver */
+	u8 pad[2016];		/* 2016: pad to 2048		*/
 	struct iagctl_disk in_agctl[MAXAG]; /* 2048: AG control information */
 };				/* (4096) */
 
 struct dinomap {
-	int in_freeiag;		/* free iag list anchor     */
-	int in_nextiag;		/* next free iag number     */
-	int in_numinos;		/* num of backed inodes */
-	int in_numfree;		/* num of free backed inodes */
+	int in_freeiag;		/* free iag list anchor		*/
+	int in_nextiag;		/* next free iag number		*/
+	int in_numinos;		/* num of backed inodes		*/
+	int in_numfree;		/* num of free backed inodes	*/
 	int in_nbperiext;	/* num of blocks per inode extent */
-	int in_l2nbperiext;	/* l2 of in_nbperiext */
-	int in_diskblock;	/* for standalone test driver  */
-	int in_maxag;		/* for standalone test driver  */
+	int in_l2nbperiext;	/* l2 of in_nbperiext		*/
+	int in_diskblock;	/* for standalone test driver	*/
+	int in_maxag;		/* for standalone test driver	*/
 	struct iagctl in_agctl[MAXAG];	/* AG control information */
 };
 
@@ -139,9 +139,9 @@
  */
 struct inomap {
 	struct dinomap im_imap;		/* 4096: inode allocation control */
-	struct inode *im_ipimap;	/* 4: ptr to inode for imap   */
-	struct mutex im_freelock;	/* 4: iag free list lock      */
-	struct mutex im_aglock[MAXAG];	/* 512: per AG locks          */
+	struct inode *im_ipimap;	/* 4: ptr to inode for imap	*/
+	struct mutex im_freelock;	/* 4: iag free list lock	*/
+	struct mutex im_aglock[MAXAG];	/* 512: per AG locks		*/
 	u32 *im_DBGdimap;
 	atomic_t im_numinos;	/* num of backed inodes */
 	atomic_t im_numfree;	/* num of free backed inodes */
diff --git a/fs/jfs/jfs_incore.h b/fs/jfs/jfs_incore.h
index 8f453ef..cb8f309 100644
--- a/fs/jfs/jfs_incore.h
+++ b/fs/jfs/jfs_incore.h
@@ -40,7 +40,7 @@
 	uint	mode2;		/* jfs-specific mode		*/
 	uint	saved_uid;	/* saved for uid mount option */
 	uint	saved_gid;	/* saved for gid mount option */
-	pxd_t   ixpxd;		/* inode extent descriptor	*/
+	pxd_t	ixpxd;		/* inode extent descriptor	*/
 	dxd_t	acl;		/* dxd describing acl	*/
 	dxd_t	ea;		/* dxd describing ea	*/
 	time_t	otime;		/* time created	*/
@@ -190,7 +190,7 @@
 	uint		gengen;		/* inode generation generator*/
 	uint		inostamp;	/* shows inode belongs to fileset*/
 
-        /* Formerly in ipbmap */
+	/* Formerly in ipbmap */
 	struct bmap	*bmap;		/* incore bmap descriptor	*/
 	struct nls_table *nls_tab;	/* current codepage		*/
 	struct inode *direct_inode;	/* metadata inode */
diff --git a/fs/jfs/jfs_logmgr.c b/fs/jfs/jfs_logmgr.c
index 44a2f33..de3e4a5 100644
--- a/fs/jfs/jfs_logmgr.c
+++ b/fs/jfs/jfs_logmgr.c
@@ -244,7 +244,7 @@
 		goto writeRecord;
 
 	/*
-	 *      initialize/update page/transaction recovery lsn
+	 *	initialize/update page/transaction recovery lsn
 	 */
 	lsn = log->lsn;
 
@@ -263,7 +263,7 @@
 	}
 
 	/*
-	 *      initialize/update lsn of tblock of the page
+	 *	initialize/update lsn of tblock of the page
 	 *
 	 * transaction inherits oldest lsn of pages associated
 	 * with allocation/deallocation of resources (their
@@ -307,7 +307,7 @@
 	LOGSYNC_UNLOCK(log, flags);
 
 	/*
-	 *      write the log record
+	 *	write the log record
 	 */
       writeRecord:
 	lsn = lmWriteRecord(log, tblk, lrd, tlck);
@@ -372,7 +372,7 @@
 		goto moveLrd;
 
 	/*
-	 *      move log record data
+	 *	move log record data
 	 */
 	/* retrieve source meta-data page to log */
 	if (tlck->flag & tlckPAGELOCK) {
@@ -465,7 +465,7 @@
 	}
 
 	/*
-	 *      move log record descriptor
+	 *	move log record descriptor
 	 */
       moveLrd:
 	lrd->length = cpu_to_le16(len);
@@ -574,7 +574,7 @@
 	LOGGC_LOCK(log);
 
 	/*
-	 *      write or queue the full page at the tail of write queue
+	 *	write or queue the full page at the tail of write queue
 	 */
 	/* get the tail tblk on commit queue */
 	if (list_empty(&log->cqueue))
@@ -625,7 +625,7 @@
 	LOGGC_UNLOCK(log);
 
 	/*
-	 *      allocate/initialize next page
+	 *	allocate/initialize next page
 	 */
 	/* if log wraps, the first data page of log is 2
 	 * (0 never used, 1 is superblock).
@@ -953,7 +953,7 @@
 		}
 
 	/*
-	 *      forward syncpt
+	 *	forward syncpt
 	 */
 	/* if last sync is same as last syncpt,
 	 * invoke sync point forward processing to update sync.
@@ -989,7 +989,7 @@
 		lsn = log->lsn;
 
 	/*
-	 *      setup next syncpt trigger (SWAG)
+	 *	setup next syncpt trigger (SWAG)
 	 */
 	logsize = log->logsize;
 
@@ -1000,11 +1000,11 @@
 	if (more < 2 * LOGPSIZE) {
 		jfs_warn("\n ... Log Wrap ... Log Wrap ... Log Wrap ...\n");
 		/*
-		 *      log wrapping
+		 *	log wrapping
 		 *
 		 * option 1 - panic ? No.!
 		 * option 2 - shutdown file systems
-		 *            associated with log ?
+		 *	      associated with log ?
 		 * option 3 - extend log ?
 		 */
 		/*
@@ -1062,7 +1062,7 @@
 /*
  * NAME:	lmLogOpen()
  *
- * FUNCTION:    open the log on first open;
+ * FUNCTION:	open the log on first open;
  *	insert filesystem in the active list of the log.
  *
  * PARAMETER:	ipmnt	- file system mount inode
@@ -1113,7 +1113,7 @@
 	init_waitqueue_head(&log->syncwait);
 
 	/*
-	 *      external log as separate logical volume
+	 *	external log as separate logical volume
 	 *
 	 * file systems to log may have n-to-1 relationship;
 	 */
@@ -1155,7 +1155,7 @@
 	return 0;
 
 	/*
-	 *      unwind on error
+	 *	unwind on error
 	 */
       shutdown:		/* unwind lbmLogInit() */
 	list_del(&log->journal_list);
@@ -1427,7 +1427,7 @@
 	return 0;
 
 	/*
-	 *      unwind on error
+	 *	unwind on error
 	 */
       errout30:		/* release log page */
 	log->wqueue = NULL;
@@ -1480,7 +1480,7 @@
 
 	if (test_bit(log_INLINELOG, &log->flag)) {
 		/*
-		 *      in-line log in host file system
+		 *	in-line log in host file system
 		 */
 		rc = lmLogShutdown(log);
 		kfree(log);
@@ -1504,7 +1504,7 @@
 		goto out;
 
 	/*
-	 *      external log as separate logical volume
+	 *	external log as separate logical volume
 	 */
 	list_del(&log->journal_list);
 	bdev = log->bdev;
@@ -1622,20 +1622,26 @@
 	if (!list_empty(&log->synclist)) {
 		struct logsyncblk *lp;
 
+		printk(KERN_ERR "jfs_flush_journal: synclist not empty\n");
 		list_for_each_entry(lp, &log->synclist, synclist) {
 			if (lp->xflag & COMMIT_PAGE) {
 				struct metapage *mp = (struct metapage *)lp;
-				dump_mem("orphan metapage", lp,
-					 sizeof(struct metapage));
-				dump_mem("page", mp->page, sizeof(struct page));
-			}
-			else
-				dump_mem("orphan tblock", lp,
-					 sizeof(struct tblock));
+				print_hex_dump(KERN_ERR, "metapage: ",
+					       DUMP_PREFIX_ADDRESS, 16, 4,
+					       mp, sizeof(struct metapage), 0);
+				print_hex_dump(KERN_ERR, "page: ",
+					       DUMP_PREFIX_ADDRESS, 16,
+					       sizeof(long), mp->page,
+					       sizeof(struct page), 0);
+			} else
+				print_hex_dump(KERN_ERR, "tblock:",
+					       DUMP_PREFIX_ADDRESS, 16, 4,
+					       lp, sizeof(struct tblock), 0);
 		}
 	}
+#else
+	WARN_ON(!list_empty(&log->synclist));
 #endif
-	//assert(list_empty(&log->synclist));
 	clear_bit(log_FLUSH, &log->flag);
 }
 
@@ -1723,7 +1729,7 @@
  *
  * PARAMETE:	log	- pointer to logs inode.
  *		fsdev	- kdev_t of filesystem.
- *		serial  - pointer to returned log serial number
+ *		serial	- pointer to returned log serial number
  *		activate - insert/remove device from active list.
  *
  * RETURN:	0	- success
@@ -1963,7 +1969,7 @@
  * FUNCTION:	add a log buffer to the log redrive list
  *
  * PARAMETER:
- *     bp	- log buffer
+ *	bp	- log buffer
  *
  * NOTES:
  *	Takes log_redrive_lock.
@@ -2054,7 +2060,7 @@
 	bp->l_flag = flag;
 
 	/*
-	 *      insert bp at tail of write queue associated with log
+	 *	insert bp at tail of write queue associated with log
 	 *
 	 * (request is either for bp already/currently at head of queue
 	 * or new bp to be inserted at tail)
@@ -2117,7 +2123,7 @@
 	    log->base + (bp->l_pn << (L2LOGPSIZE - log->l2bsize));
 
 	/*
-	 *      initiate pageout of the page
+	 *	initiate pageout of the page
 	 */
 	lbmStartIO(bp);
 }
@@ -2128,7 +2134,7 @@
  *
  * FUNCTION:	Interface to DD strategy routine
  *
- * RETURN:      none
+ * RETURN:	none
  *
  * serialization: LCACHE_LOCK() is NOT held during log i/o;
  */
@@ -2222,7 +2228,7 @@
 	bio_put(bio);
 
 	/*
-	 *      pagein completion
+	 *	pagein completion
 	 */
 	if (bp->l_flag & lbmREAD) {
 		bp->l_flag &= ~lbmREAD;
@@ -2236,7 +2242,7 @@
 	}
 
 	/*
-	 *      pageout completion
+	 *	pageout completion
 	 *
 	 * the bp at the head of write queue has completed pageout.
 	 *
@@ -2302,7 +2308,7 @@
 	}
 
 	/*
-	 *      synchronous pageout:
+	 *	synchronous pageout:
 	 *
 	 * buffer has not necessarily been removed from write queue
 	 * (e.g., synchronous write of partial-page with COMMIT):
@@ -2316,7 +2322,7 @@
 	}
 
 	/*
-	 *      Group Commit pageout:
+	 *	Group Commit pageout:
 	 */
 	else if (bp->l_flag & lbmGC) {
 		LCACHE_UNLOCK(flags);
@@ -2324,7 +2330,7 @@
 	}
 
 	/*
-	 *      asynchronous pageout:
+	 *	asynchronous pageout:
 	 *
 	 * buffer must have been removed from write queue:
 	 * insert buffer at head of freelist where it can be recycled
@@ -2375,7 +2381,7 @@
  * FUNCTION:	format file system log
  *
  * PARAMETERS:
- *      log	- volume log
+ *	log	- volume log
  *	logAddress - start address of log space in FS block
  *	logSize	- length of log space in FS block;
  *
@@ -2407,16 +2413,16 @@
 	npages = logSize >> sbi->l2nbperpage;
 
 	/*
-	 *      log space:
+	 *	log space:
 	 *
 	 * page 0 - reserved;
 	 * page 1 - log superblock;
 	 * page 2 - log data page: A SYNC log record is written
-	 *          into this page at logform time;
+	 *	    into this page at logform time;
 	 * pages 3-N - log data page: set to empty log data pages;
 	 */
 	/*
-	 *      init log superblock: log page 1
+	 *	init log superblock: log page 1
 	 */
 	logsuper = (struct logsuper *) bp->l_ldata;
 
@@ -2436,7 +2442,7 @@
 		goto exit;
 
 	/*
-	 *      init pages 2 to npages-1 as log data pages:
+	 *	init pages 2 to npages-1 as log data pages:
 	 *
 	 * log page sequence number (lpsn) initialization:
 	 *
@@ -2479,7 +2485,7 @@
 		goto exit;
 
 	/*
-	 *      initialize succeeding log pages: lpsn = 0, 1, ..., (N-2)
+	 *	initialize succeeding log pages: lpsn = 0, 1, ..., (N-2)
 	 */
 	for (lspn = 0; lspn < npages - 3; lspn++) {
 		lp->h.page = lp->t.page = cpu_to_le32(lspn);
@@ -2495,7 +2501,7 @@
 	rc = 0;
 exit:
 	/*
-	 *      finalize log
+	 *	finalize log
 	 */
 	/* release the buffer */
 	lbmFree(bp);
diff --git a/fs/jfs/jfs_logmgr.h b/fs/jfs/jfs_logmgr.h
index a53fb17..1f85ef0 100644
--- a/fs/jfs/jfs_logmgr.h
+++ b/fs/jfs/jfs_logmgr.h
@@ -144,7 +144,7 @@
  *
  * (this comment should be rewritten !)
  * jfs uses only "after" log records (only a single writer is allowed
- * in a  page, pages are written to temporary paging space if
+ * in a page, pages are written to temporary paging space if
  * if they must be written to disk before commit, and i/o is
  * scheduled for modified pages to their home location after
  * the log records containing the after values and the commit
@@ -153,7 +153,7 @@
  *
  * a log record consists of a data area of variable length followed by
  * a descriptor of fixed size LOGRDSIZE bytes.
- * the  data area is rounded up to an integral number of 4-bytes and
+ * the data area is rounded up to an integral number of 4-bytes and
  * must be no longer than LOGPSIZE.
  * the descriptor is of size of multiple of 4-bytes and aligned on a
  * 4-byte boundary.
@@ -215,13 +215,13 @@
 	union {
 
 		/*
-		 *      COMMIT: commit
+		 *	COMMIT: commit
 		 *
 		 * transaction commit: no type-dependent information;
 		 */
 
 		/*
-		 *      REDOPAGE: after-image
+		 *	REDOPAGE: after-image
 		 *
 		 * apply after-image;
 		 *
@@ -236,7 +236,7 @@
 		} redopage;	/* (20) */
 
 		/*
-		 *      NOREDOPAGE: the page is freed
+		 *	NOREDOPAGE: the page is freed
 		 *
 		 * do not apply after-image records which precede this record
 		 * in the log with the same page block number to this page.
@@ -252,7 +252,7 @@
 		} noredopage;	/* (20) */
 
 		/*
-		 *      UPDATEMAP: update block allocation map
+		 *	UPDATEMAP: update block allocation map
 		 *
 		 * either in-line PXD,
 		 * or     out-of-line  XADLIST;
@@ -268,7 +268,7 @@
 		} updatemap;	/* (20) */
 
 		/*
-		 *      NOREDOINOEXT: the inode extent is freed
+		 *	NOREDOINOEXT: the inode extent is freed
 		 *
 		 * do not apply after-image records which precede this
 		 * record in the log with the any of the 4 page block
@@ -286,7 +286,7 @@
 		} noredoinoext;	/* (20) */
 
 		/*
-		 *      SYNCPT: log sync point
+		 *	SYNCPT: log sync point
 		 *
 		 * replay log upto syncpt address specified;
 		 */
@@ -295,13 +295,13 @@
 		} syncpt;
 
 		/*
-		 *      MOUNT: file system mount
+		 *	MOUNT: file system mount
 		 *
 		 * file system mount: no type-dependent information;
 		 */
 
 		/*
-		 *      ? FREEXTENT: free specified extent(s)
+		 *	? FREEXTENT: free specified extent(s)
 		 *
 		 * free specified extent(s) from block allocation map
 		 * N.B.: nextents should be length of data/sizeof(xad_t)
@@ -314,7 +314,7 @@
 		} freextent;
 
 		/*
-		 *      ? NOREDOFILE: this file is freed
+		 *	? NOREDOFILE: this file is freed
 		 *
 		 * do not apply records which precede this record in the log
 		 * with the same inode number.
@@ -330,7 +330,7 @@
 		} noredofile;
 
 		/*
-		 *      ? NEWPAGE:
+		 *	? NEWPAGE:
 		 *
 		 * metadata type dependent
 		 */
@@ -342,7 +342,7 @@
 		} newpage;
 
 		/*
-		 *      ? DUMMY: filler
+		 *	? DUMMY: filler
 		 *
 		 * no type-dependent information
 		 */
diff --git a/fs/jfs/jfs_metapage.c b/fs/jfs/jfs_metapage.c
index 43d4f69..77c7f11 100644
--- a/fs/jfs/jfs_metapage.c
+++ b/fs/jfs/jfs_metapage.c
@@ -472,7 +472,8 @@
 	printk(KERN_ERR "JFS: bio_add_page failed unexpectedly\n");
 	goto skip;
 dump_bio:
-	dump_mem("bio", bio, sizeof(*bio));
+	print_hex_dump(KERN_ERR, "JFS: dump of bio: ", DUMP_PREFIX_ADDRESS, 16,
+		       4, bio, sizeof(*bio), 0);
 skip:
 	bio_put(bio);
 	unlock_page(page);
diff --git a/fs/jfs/jfs_mount.c b/fs/jfs/jfs_mount.c
index 4dd4798..644429a 100644
--- a/fs/jfs/jfs_mount.c
+++ b/fs/jfs/jfs_mount.c
@@ -80,7 +80,7 @@
  */
 int jfs_mount(struct super_block *sb)
 {
-	int rc = 0;		/* Return code          */
+	int rc = 0;		/* Return code */
 	struct jfs_sb_info *sbi = JFS_SBI(sb);
 	struct inode *ipaimap = NULL;
 	struct inode *ipaimap2 = NULL;
@@ -169,7 +169,7 @@
 		sbi->ipaimap2 = NULL;
 
 	/*
-	 *      mount (the only/single) fileset
+	 *	mount (the only/single) fileset
 	 */
 	/*
 	 * open fileset inode allocation map (aka fileset inode)
@@ -195,7 +195,7 @@
 	goto out;
 
 	/*
-	 *      unwind on error
+	 *	unwind on error
 	 */
       errout41:		/* close fileset inode allocation map inode */
 	diFreeSpecial(ipimap);
diff --git a/fs/jfs/jfs_txnmgr.c b/fs/jfs/jfs_txnmgr.c
index 25430d0..7aa1f70 100644
--- a/fs/jfs/jfs_txnmgr.c
+++ b/fs/jfs/jfs_txnmgr.c
@@ -18,7 +18,7 @@
  */
 
 /*
- *      jfs_txnmgr.c: transaction manager
+ *	jfs_txnmgr.c: transaction manager
  *
  * notes:
  * transaction starts with txBegin() and ends with txCommit()
@@ -60,7 +60,7 @@
 #include "jfs_debug.h"
 
 /*
- *      transaction management structures
+ *	transaction management structures
  */
 static struct {
 	int freetid;		/* index of a free tid structure */
@@ -103,19 +103,19 @@
 MODULE_PARM_DESC(nTxLock,
 		 "Number of transaction locks (max:65536)");
 
-struct tblock *TxBlock;	        /* transaction block table */
-static int TxLockLWM;		/* Low water mark for number of txLocks used */
-static int TxLockHWM;		/* High water mark for number of txLocks used */
-static int TxLockVHWM;		/* Very High water mark */
-struct tlock *TxLock;           /* transaction lock table */
+struct tblock *TxBlock;	/* transaction block table */
+static int TxLockLWM;	/* Low water mark for number of txLocks used */
+static int TxLockHWM;	/* High water mark for number of txLocks used */
+static int TxLockVHWM;	/* Very High water mark */
+struct tlock *TxLock;	/* transaction lock table */
 
 /*
- *      transaction management lock
+ *	transaction management lock
  */
 static DEFINE_SPINLOCK(jfsTxnLock);
 
-#define TXN_LOCK()              spin_lock(&jfsTxnLock)
-#define TXN_UNLOCK()            spin_unlock(&jfsTxnLock)
+#define TXN_LOCK()		spin_lock(&jfsTxnLock)
+#define TXN_UNLOCK()		spin_unlock(&jfsTxnLock)
 
 #define LAZY_LOCK_INIT()	spin_lock_init(&TxAnchor.LazyLock);
 #define LAZY_LOCK(flags)	spin_lock_irqsave(&TxAnchor.LazyLock, flags)
@@ -148,7 +148,7 @@
 #define TXN_WAKEUP(event) wake_up_all(event)
 
 /*
- *      statistics
+ *	statistics
  */
 static struct {
 	tid_t maxtid;		/* 4: biggest tid ever used */
@@ -181,8 +181,8 @@
 static void LogSyncRelease(struct metapage * mp);
 
 /*
- *              transaction block/lock management
- *              ---------------------------------
+ *		transaction block/lock management
+ *		---------------------------------
  */
 
 /*
@@ -227,9 +227,9 @@
 }
 
 /*
- * NAME:        txInit()
+ * NAME:	txInit()
  *
- * FUNCTION:    initialize transaction management structures
+ * FUNCTION:	initialize transaction management structures
  *
  * RETURN:
  *
@@ -333,9 +333,9 @@
 }
 
 /*
- * NAME:        txExit()
+ * NAME:	txExit()
  *
- * FUNCTION:    clean up when module is unloaded
+ * FUNCTION:	clean up when module is unloaded
  */
 void txExit(void)
 {
@@ -346,12 +346,12 @@
 }
 
 /*
- * NAME:        txBegin()
+ * NAME:	txBegin()
  *
- * FUNCTION:    start a transaction.
+ * FUNCTION:	start a transaction.
  *
- * PARAMETER:   sb	- superblock
- *              flag	- force for nested tx;
+ * PARAMETER:	sb	- superblock
+ *		flag	- force for nested tx;
  *
  * RETURN:	tid	- transaction id
  *
@@ -447,13 +447,13 @@
 }
 
 /*
- * NAME:        txBeginAnon()
+ * NAME:	txBeginAnon()
  *
- * FUNCTION:    start an anonymous transaction.
+ * FUNCTION:	start an anonymous transaction.
  *		Blocks if logsync or available tlocks are low to prevent
  *		anonymous tlocks from depleting supply.
  *
- * PARAMETER:   sb	- superblock
+ * PARAMETER:	sb	- superblock
  *
  * RETURN:	none
  */
@@ -489,11 +489,11 @@
 }
 
 /*
- *      txEnd()
+ *	txEnd()
  *
  * function: free specified transaction block.
  *
- *      logsync barrier processing:
+ *	logsync barrier processing:
  *
  * serialization:
  */
@@ -577,13 +577,13 @@
 }
 
 /*
- *      txLock()
+ *	txLock()
  *
  * function: acquire a transaction lock on the specified <mp>
  *
  * parameter:
  *
- * return:      transaction lock id
+ * return:	transaction lock id
  *
  * serialization:
  */
@@ -829,12 +829,16 @@
 	/* Only locks on ipimap or ipaimap should reach here */
 	/* assert(jfs_ip->fileset == AGGREGATE_I); */
 	if (jfs_ip->fileset != AGGREGATE_I) {
-		jfs_err("txLock: trying to lock locked page!");
-		dump_mem("ip", ip, sizeof(struct inode));
-		dump_mem("mp", mp, sizeof(struct metapage));
-		dump_mem("Locker's tblk", tid_to_tblock(tid),
-			 sizeof(struct tblock));
-		dump_mem("Tlock", tlck, sizeof(struct tlock));
+		printk(KERN_ERR "txLock: trying to lock locked page!");
+		print_hex_dump(KERN_ERR, "ip: ", DUMP_PREFIX_ADDRESS, 16, 4,
+			       ip, sizeof(*ip), 0);
+		print_hex_dump(KERN_ERR, "mp: ", DUMP_PREFIX_ADDRESS, 16, 4,
+			       mp, sizeof(*mp), 0);
+		print_hex_dump(KERN_ERR, "Locker's tblock: ",
+			       DUMP_PREFIX_ADDRESS, 16, 4, tid_to_tblock(tid),
+			       sizeof(struct tblock), 0);
+		print_hex_dump(KERN_ERR, "Tlock: ", DUMP_PREFIX_ADDRESS, 16, 4,
+			       tlck, sizeof(*tlck), 0);
 		BUG();
 	}
 	INCREMENT(stattx.waitlock);	/* statistics */
@@ -857,17 +861,17 @@
 }
 
 /*
- * NAME:        txRelease()
+ * NAME:	txRelease()
  *
- * FUNCTION:    Release buffers associated with transaction locks, but don't
+ * FUNCTION:	Release buffers associated with transaction locks, but don't
  *		mark homeok yet.  The allows other transactions to modify
  *		buffers, but won't let them go to disk until commit record
  *		actually gets written.
  *
  * PARAMETER:
- *              tblk    -
+ *		tblk	-
  *
- * RETURN:      Errors from subroutines.
+ * RETURN:	Errors from subroutines.
  */
 static void txRelease(struct tblock * tblk)
 {
@@ -896,10 +900,10 @@
 }
 
 /*
- * NAME:        txUnlock()
+ * NAME:	txUnlock()
  *
- * FUNCTION:    Initiates pageout of pages modified by tid in journalled
- *              objects and frees their lockwords.
+ * FUNCTION:	Initiates pageout of pages modified by tid in journalled
+ *		objects and frees their lockwords.
  */
 static void txUnlock(struct tblock * tblk)
 {
@@ -983,10 +987,10 @@
 }
 
 /*
- *      txMaplock()
+ *	txMaplock()
  *
  * function: allocate a transaction lock for freed page/entry;
- *      for freed page, maplock is used as xtlock/dtlock type;
+ *	for freed page, maplock is used as xtlock/dtlock type;
  */
 struct tlock *txMaplock(tid_t tid, struct inode *ip, int type)
 {
@@ -1057,7 +1061,7 @@
 }
 
 /*
- *      txLinelock()
+ *	txLinelock()
  *
  * function: allocate a transaction lock for log vector list
  */
@@ -1092,39 +1096,39 @@
 }
 
 /*
- *              transaction commit management
- *              -----------------------------
+ *		transaction commit management
+ *		-----------------------------
  */
 
 /*
- * NAME:        txCommit()
+ * NAME:	txCommit()
  *
- * FUNCTION:    commit the changes to the objects specified in
- *              clist.  For journalled segments only the
- *              changes of the caller are committed, ie by tid.
- *              for non-journalled segments the data are flushed to
- *              disk and then the change to the disk inode and indirect
- *              blocks committed (so blocks newly allocated to the
- *              segment will be made a part of the segment atomically).
+ * FUNCTION:	commit the changes to the objects specified in
+ *		clist.  For journalled segments only the
+ *		changes of the caller are committed, ie by tid.
+ *		for non-journalled segments the data are flushed to
+ *		disk and then the change to the disk inode and indirect
+ *		blocks committed (so blocks newly allocated to the
+ *		segment will be made a part of the segment atomically).
  *
- *              all of the segments specified in clist must be in
- *              one file system. no more than 6 segments are needed
- *              to handle all unix svcs.
+ *		all of the segments specified in clist must be in
+ *		one file system. no more than 6 segments are needed
+ *		to handle all unix svcs.
  *
- *              if the i_nlink field (i.e. disk inode link count)
- *              is zero, and the type of inode is a regular file or
- *              directory, or symbolic link , the inode is truncated
- *              to zero length. the truncation is committed but the
- *              VM resources are unaffected until it is closed (see
- *              iput and iclose).
+ *		if the i_nlink field (i.e. disk inode link count)
+ *		is zero, and the type of inode is a regular file or
+ *		directory, or symbolic link , the inode is truncated
+ *		to zero length. the truncation is committed but the
+ *		VM resources are unaffected until it is closed (see
+ *		iput and iclose).
  *
  * PARAMETER:
  *
  * RETURN:
  *
  * serialization:
- *              on entry the inode lock on each segment is assumed
- *              to be held.
+ *		on entry the inode lock on each segment is assumed
+ *		to be held.
  *
  * i/o error:
  */
@@ -1175,7 +1179,7 @@
 	if ((flag & (COMMIT_FORCE | COMMIT_SYNC)) == 0)
 		tblk->xflag |= COMMIT_LAZY;
 	/*
-	 *      prepare non-journaled objects for commit
+	 *	prepare non-journaled objects for commit
 	 *
 	 * flush data pages of non-journaled file
 	 * to prevent the file getting non-initialized disk blocks
@@ -1186,7 +1190,7 @@
 	cd.nip = nip;
 
 	/*
-	 *      acquire transaction lock on (on-disk) inodes
+	 *	acquire transaction lock on (on-disk) inodes
 	 *
 	 * update on-disk inode from in-memory inode
 	 * acquiring transaction locks for AFTER records
@@ -1262,7 +1266,7 @@
 	}
 
 	/*
-	 *      write log records from transaction locks
+	 *	write log records from transaction locks
 	 *
 	 * txUpdateMap() resets XAD_NEW in XAD.
 	 */
@@ -1294,7 +1298,7 @@
 		!test_cflag(COMMIT_Nolink, tblk->u.ip)));
 
 	/*
-	 *      write COMMIT log record
+	 *	write COMMIT log record
 	 */
 	lrd->type = cpu_to_le16(LOG_COMMIT);
 	lrd->length = 0;
@@ -1303,7 +1307,7 @@
 	lmGroupCommit(log, tblk);
 
 	/*
-	 *      - transaction is now committed -
+	 *	- transaction is now committed -
 	 */
 
 	/*
@@ -1314,11 +1318,11 @@
 		txForce(tblk);
 
 	/*
-	 *      update allocation map.
+	 *	update allocation map.
 	 *
 	 * update inode allocation map and inode:
 	 * free pager lock on memory object of inode if any.
-	 * update  block allocation map.
+	 * update block allocation map.
 	 *
 	 * txUpdateMap() resets XAD_NEW in XAD.
 	 */
@@ -1326,7 +1330,7 @@
 		txUpdateMap(tblk);
 
 	/*
-	 *      free transaction locks and pageout/free pages
+	 *	free transaction locks and pageout/free pages
 	 */
 	txRelease(tblk);
 
@@ -1335,7 +1339,7 @@
 
 
 	/*
-	 *      reset in-memory object state
+	 *	reset in-memory object state
 	 */
 	for (k = 0; k < cd.nip; k++) {
 		ip = cd.iplist[k];
@@ -1358,11 +1362,11 @@
 }
 
 /*
- * NAME:        txLog()
+ * NAME:	txLog()
  *
- * FUNCTION:    Writes AFTER log records for all lines modified
- *              by tid for segments specified by inodes in comdata.
- *              Code assumes only WRITELOCKS are recorded in lockwords.
+ * FUNCTION:	Writes AFTER log records for all lines modified
+ *		by tid for segments specified by inodes in comdata.
+ *		Code assumes only WRITELOCKS are recorded in lockwords.
  *
  * PARAMETERS:
  *
@@ -1421,12 +1425,12 @@
 }
 
 /*
- *      diLog()
+ *	diLog()
  *
- * function:    log inode tlock and format maplock to update bmap;
+ * function:	log inode tlock and format maplock to update bmap;
  */
 static int diLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
-	  struct tlock * tlck, struct commit * cd)
+		 struct tlock * tlck, struct commit * cd)
 {
 	int rc = 0;
 	struct metapage *mp;
@@ -1442,7 +1446,7 @@
 	pxd = &lrd->log.redopage.pxd;
 
 	/*
-	 *      inode after image
+	 *	inode after image
 	 */
 	if (tlck->type & tlckENTRY) {
 		/* log after-image for logredo(): */
@@ -1456,7 +1460,7 @@
 		tlck->flag |= tlckWRITEPAGE;
 	} else if (tlck->type & tlckFREE) {
 		/*
-		 *      free inode extent
+		 *	free inode extent
 		 *
 		 * (pages of the freed inode extent have been invalidated and
 		 * a maplock for free of the extent has been formatted at
@@ -1498,7 +1502,7 @@
 		jfs_err("diLog: UFO type tlck:0x%p", tlck);
 #ifdef  _JFS_WIP
 	/*
-	 *      alloc/free external EA extent
+	 *	alloc/free external EA extent
 	 *
 	 * a maplock for txUpdateMap() to update bPWMAP for alloc/free
 	 * of the extent has been formatted at txLock() time;
@@ -1534,9 +1538,9 @@
 }
 
 /*
- *      dataLog()
+ *	dataLog()
  *
- * function:    log data tlock
+ * function:	log data tlock
  */
 static int dataLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
 	    struct tlock * tlck)
@@ -1580,9 +1584,9 @@
 }
 
 /*
- *      dtLog()
+ *	dtLog()
  *
- * function:    log dtree tlock and format maplock to update bmap;
+ * function:	log dtree tlock and format maplock to update bmap;
  */
 static void dtLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
 	   struct tlock * tlck)
@@ -1603,10 +1607,10 @@
 		lrd->log.redopage.type |= cpu_to_le16(LOG_BTROOT);
 
 	/*
-	 *      page extension via relocation: entry insertion;
-	 *      page extension in-place: entry insertion;
-	 *      new right page from page split, reinitialized in-line
-	 *      root from root page split: entry insertion;
+	 *	page extension via relocation: entry insertion;
+	 *	page extension in-place: entry insertion;
+	 *	new right page from page split, reinitialized in-line
+	 *	root from root page split: entry insertion;
 	 */
 	if (tlck->type & (tlckNEW | tlckEXTEND)) {
 		/* log after-image of the new page for logredo():
@@ -1641,8 +1645,8 @@
 	}
 
 	/*
-	 *      entry insertion/deletion,
-	 *      sibling page link update (old right page before split);
+	 *	entry insertion/deletion,
+	 *	sibling page link update (old right page before split);
 	 */
 	if (tlck->type & (tlckENTRY | tlckRELINK)) {
 		/* log after-image for logredo(): */
@@ -1658,11 +1662,11 @@
 	}
 
 	/*
-	 *      page deletion: page has been invalidated
-	 *      page relocation: source extent
+	 *	page deletion: page has been invalidated
+	 *	page relocation: source extent
 	 *
-	 *      a maplock for free of the page has been formatted
-	 *      at txLock() time);
+	 *	a maplock for free of the page has been formatted
+	 *	at txLock() time);
 	 */
 	if (tlck->type & (tlckFREE | tlckRELOCATE)) {
 		/* log LOG_NOREDOPAGE of the deleted page for logredo()
@@ -1683,9 +1687,9 @@
 }
 
 /*
- *      xtLog()
+ *	xtLog()
  *
- * function:    log xtree tlock and format maplock to update bmap;
+ * function:	log xtree tlock and format maplock to update bmap;
  */
 static void xtLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
 	   struct tlock * tlck)
@@ -1725,8 +1729,8 @@
 	xadlock = (struct xdlistlock *) maplock;
 
 	/*
-	 *      entry insertion/extension;
-	 *      sibling page link update (old right page before split);
+	 *	entry insertion/extension;
+	 *	sibling page link update (old right page before split);
 	 */
 	if (tlck->type & (tlckNEW | tlckGROW | tlckRELINK)) {
 		/* log after-image for logredo():
@@ -1801,7 +1805,7 @@
 	}
 
 	/*
-	 *      page deletion: file deletion/truncation (ref. xtTruncate())
+	 *	page deletion: file deletion/truncation (ref. xtTruncate())
 	 *
 	 * (page will be invalidated after log is written and bmap
 	 * is updated from the page);
@@ -1908,13 +1912,13 @@
 	}
 
 	/*
-	 *      page/entry truncation: file truncation (ref. xtTruncate())
+	 *	page/entry truncation: file truncation (ref. xtTruncate())
 	 *
-	 *     |----------+------+------+---------------|
-	 *                |      |      |
-	 *                |      |     hwm - hwm before truncation
-	 *                |     next - truncation point
-	 *               lwm - lwm before truncation
+	 *	|----------+------+------+---------------|
+	 *		   |      |      |
+	 *		   |      |     hwm - hwm before truncation
+	 *		   |     next - truncation point
+	 *		  lwm - lwm before truncation
 	 * header ?
 	 */
 	if (tlck->type & tlckTRUNCATE) {
@@ -1937,7 +1941,7 @@
 		twm = xtlck->twm.offset;
 
 		/*
-		 *      write log records
+		 *	write log records
 		 */
 		/* log after-image for logredo():
 		 *
@@ -1997,7 +2001,7 @@
 		}
 
 		/*
-		 *      format maplock(s) for txUpdateMap() to update bmap
+		 *	format maplock(s) for txUpdateMap() to update bmap
 		 */
 		maplock->index = 0;
 
@@ -2069,9 +2073,9 @@
 }
 
 /*
- *      mapLog()
+ *	mapLog()
  *
- * function:    log from maplock of freed data extents;
+ * function:	log from maplock of freed data extents;
  */
 static void mapLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
 		   struct tlock * tlck)
@@ -2081,7 +2085,7 @@
 	pxd_t *pxd;
 
 	/*
-	 *      page relocation: free the source page extent
+	 *	page relocation: free the source page extent
 	 *
 	 * a maplock for txUpdateMap() for free of the page
 	 * has been formatted at txLock() time saving the src
@@ -2155,10 +2159,10 @@
 }
 
 /*
- *      txEA()
+ *	txEA()
  *
- * function:    acquire maplock for EA/ACL extents or
- *              set COMMIT_INLINE flag;
+ * function:	acquire maplock for EA/ACL extents or
+ *		set COMMIT_INLINE flag;
  */
 void txEA(tid_t tid, struct inode *ip, dxd_t * oldea, dxd_t * newea)
 {
@@ -2207,10 +2211,10 @@
 }
 
 /*
- *      txForce()
+ *	txForce()
  *
  * function: synchronously write pages locked by transaction
- *              after txLog() but before txUpdateMap();
+ *	     after txLog() but before txUpdateMap();
  */
 static void txForce(struct tblock * tblk)
 {
@@ -2273,10 +2277,10 @@
 }
 
 /*
- *      txUpdateMap()
+ *	txUpdateMap()
  *
- * function:    update persistent allocation map (and working map
- *              if appropriate);
+ * function:	update persistent allocation map (and working map
+ *		if appropriate);
  *
  * parameter:
  */
@@ -2298,7 +2302,7 @@
 
 
 	/*
-	 *      update block allocation map
+	 *	update block allocation map
 	 *
 	 * update allocation state in pmap (and wmap) and
 	 * update lsn of the pmap page;
@@ -2382,7 +2386,7 @@
 		}
 	}
 	/*
-	 *      update inode allocation map
+	 *	update inode allocation map
 	 *
 	 * update allocation state in pmap and
 	 * update lsn of the pmap page;
@@ -2407,24 +2411,24 @@
 }
 
 /*
- *      txAllocPMap()
+ *	txAllocPMap()
  *
  * function: allocate from persistent map;
  *
  * parameter:
- *      ipbmap  -
- *      malock -
- *              xad list:
- *              pxd:
+ *	ipbmap	-
+ *	malock	-
+ *		xad list:
+ *		pxd:
  *
- *      maptype -
- *              allocate from persistent map;
- *              free from persistent map;
- *              (e.g., tmp file - free from working map at releae
- *               of last reference);
- *              free from persistent and working map;
+ *	maptype -
+ *		allocate from persistent map;
+ *		free from persistent map;
+ *		(e.g., tmp file - free from working map at releae
+ *		 of last reference);
+ *		free from persistent and working map;
  *
- *      lsn     - log sequence number;
+ *	lsn	- log sequence number;
  */
 static void txAllocPMap(struct inode *ip, struct maplock * maplock,
 			struct tblock * tblk)
@@ -2478,9 +2482,9 @@
 }
 
 /*
- *      txFreeMap()
+ *	txFreeMap()
  *
- * function:    free from persistent and/or working map;
+ * function:	free from persistent and/or working map;
  *
  * todo: optimization
  */
@@ -2579,9 +2583,9 @@
 }
 
 /*
- *      txFreelock()
+ *	txFreelock()
  *
- * function:    remove tlock from inode anonymous locklist
+ * function:	remove tlock from inode anonymous locklist
  */
 void txFreelock(struct inode *ip)
 {
@@ -2619,7 +2623,7 @@
 }
 
 /*
- *      txAbort()
+ *	txAbort()
  *
  * function: abort tx before commit;
  *
@@ -2679,7 +2683,7 @@
 }
 
 /*
- *      txLazyCommit(void)
+ *	txLazyCommit(void)
  *
  *	All transactions except those changing ipimap (COMMIT_FORCE) are
  *	processed by this routine.  This insures that the inode and block
@@ -2728,7 +2732,7 @@
 }
 
 /*
- *      jfs_lazycommit(void)
+ *	jfs_lazycommit(void)
  *
  *	To be run as a kernel daemon.  If lbmIODone is called in an interrupt
  *	context, or where blocking is not wanted, this routine will process
@@ -2913,7 +2917,7 @@
 }
 
 /*
- *      jfs_sync(void)
+ *	jfs_sync(void)
  *
  *	To be run as a kernel daemon.  This is awakened when tlocks run low.
  *	We write any inodes that have anonymous tlocks so they will become
diff --git a/fs/jfs/jfs_txnmgr.h b/fs/jfs/jfs_txnmgr.h
index 7863cf2..ab72889 100644
--- a/fs/jfs/jfs_txnmgr.h
+++ b/fs/jfs/jfs_txnmgr.h
@@ -94,7 +94,7 @@
  */
 struct tlock {
 	lid_t next;		/* 2: index next lockword on tid locklist
-				 *          next lockword on freelist
+				 *	    next lockword on freelist
 				 */
 	tid_t tid;		/* 2: transaction id holding lock */
 
diff --git a/fs/jfs/jfs_types.h b/fs/jfs/jfs_types.h
index 09b2529..649f981 100644
--- a/fs/jfs/jfs_types.h
+++ b/fs/jfs/jfs_types.h
@@ -21,7 +21,7 @@
 /*
  *	jfs_types.h:
  *
- * basic type/utility  definitions
+ * basic type/utility definitions
  *
  * note: this header file must be the 1st include file
  * of JFS include list in all JFS .c file.
@@ -54,8 +54,8 @@
  */
 
 #define LEFTMOSTONE	0x80000000
-#define	HIGHORDER	0x80000000u	/* high order bit on            */
-#define	ONES		0xffffffffu	/* all bit on                   */
+#define	HIGHORDER	0x80000000u	/* high order bit on	*/
+#define	ONES		0xffffffffu	/* all bit on		*/
 
 /*
  *	logical xd (lxd)
@@ -148,7 +148,7 @@
 #define sizeDXD(dxd)	le32_to_cpu((dxd)->size)
 
 /*
- *      directory entry argument
+ *	directory entry argument
  */
 struct component_name {
 	int namlen;
@@ -160,14 +160,14 @@
  *	DASD limit information - stored in directory inode
  */
 struct dasd {
-	u8 thresh;		/* Alert Threshold (in percent) */
-	u8 delta;		/* Alert Threshold delta (in percent)   */
+	u8 thresh;		/* Alert Threshold (in percent)		*/
+	u8 delta;		/* Alert Threshold delta (in percent)	*/
 	u8 rsrvd1;
-	u8 limit_hi;		/* DASD limit (in logical blocks)       */
-	__le32 limit_lo;	/* DASD limit (in logical blocks)       */
+	u8 limit_hi;		/* DASD limit (in logical blocks)	*/
+	__le32 limit_lo;	/* DASD limit (in logical blocks)	*/
 	u8 rsrvd2[3];
-	u8 used_hi;		/* DASD usage (in logical blocks)       */
-	__le32 used_lo;		/* DASD usage (in logical blocks)       */
+	u8 used_hi;		/* DASD usage (in logical blocks)	*/
+	__le32 used_lo;		/* DASD usage (in logical blocks)	*/
 };
 
 #define DASDLIMIT(dasdp) \
diff --git a/fs/jfs/jfs_umount.c b/fs/jfs/jfs_umount.c
index a386f48..7971f37 100644
--- a/fs/jfs/jfs_umount.c
+++ b/fs/jfs/jfs_umount.c
@@ -60,7 +60,7 @@
 	jfs_info("UnMount JFS: sb:0x%p", sb);
 
 	/*
-	 *      update superblock and close log
+	 *	update superblock and close log
 	 *
 	 * if mounted read-write and log based recovery was enabled
 	 */
diff --git a/fs/jfs/jfs_xtree.c b/fs/jfs/jfs_xtree.c
index acc97c4..1543906 100644
--- a/fs/jfs/jfs_xtree.c
+++ b/fs/jfs/jfs_xtree.c
@@ -16,7 +16,7 @@
  *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  */
 /*
- *      jfs_xtree.c: extent allocation descriptor B+-tree manager
+ *	jfs_xtree.c: extent allocation descriptor B+-tree manager
  */
 
 #include <linux/fs.h>
@@ -32,30 +32,30 @@
 /*
  * xtree local flag
  */
-#define XT_INSERT       0x00000001
+#define XT_INSERT	0x00000001
 
 /*
- *       xtree key/entry comparison: extent offset
+ *	xtree key/entry comparison: extent offset
  *
  * return:
- *      -1: k < start of extent
- *       0: start_of_extent <= k <= end_of_extent
- *       1: k > end_of_extent
+ *	-1: k < start of extent
+ *	 0: start_of_extent <= k <= end_of_extent
+ *	 1: k > end_of_extent
  */
 #define XT_CMP(CMP, K, X, OFFSET64)\
 {\
-        OFFSET64 = offsetXAD(X);\
-        (CMP) = ((K) >= OFFSET64 + lengthXAD(X)) ? 1 :\
-              ((K) < OFFSET64) ? -1 : 0;\
+	OFFSET64 = offsetXAD(X);\
+	(CMP) = ((K) >= OFFSET64 + lengthXAD(X)) ? 1 :\
+		((K) < OFFSET64) ? -1 : 0;\
 }
 
 /* write a xad entry */
 #define XT_PUTENTRY(XAD, FLAG, OFF, LEN, ADDR)\
 {\
-        (XAD)->flag = (FLAG);\
-        XADoffset((XAD), (OFF));\
-        XADlength((XAD), (LEN));\
-        XADaddress((XAD), (ADDR));\
+	(XAD)->flag = (FLAG);\
+	XADoffset((XAD), (OFF));\
+	XADlength((XAD), (LEN));\
+	XADaddress((XAD), (ADDR));\
 }
 
 #define XT_PAGE(IP, MP) BT_PAGE(IP, MP, xtpage_t, i_xtroot)
@@ -76,13 +76,13 @@
 			MP = NULL;\
 			RC = -EIO;\
 		}\
-        }\
+	}\
 }
 
 /* for consistency */
 #define XT_PUTPAGE(MP) BT_PUTPAGE(MP)
 
-#define XT_GETSEARCH(IP, LEAF, BN, MP,  P, INDEX) \
+#define XT_GETSEARCH(IP, LEAF, BN, MP, P, INDEX) \
 	BT_GETSEARCH(IP, LEAF, BN, MP, xtpage_t, P, INDEX, i_xtroot)
 /* xtree entry parameter descriptor */
 struct xtsplit {
@@ -97,7 +97,7 @@
 
 
 /*
- *      statistics
+ *	statistics
  */
 #ifdef CONFIG_JFS_STATISTICS
 static struct {
@@ -136,7 +136,7 @@
 #endif				/*  _STILL_TO_PORT */
 
 /*
- *      xtLookup()
+ *	xtLookup()
  *
  * function: map a single page into a physical extent;
  */
@@ -179,7 +179,7 @@
 	}
 
 	/*
-	 *      compute the physical extent covering logical extent
+	 *	compute the physical extent covering logical extent
 	 *
 	 * N.B. search may have failed (e.g., hole in sparse file),
 	 * and returned the index of the next entry.
@@ -220,27 +220,27 @@
 
 
 /*
- *      xtLookupList()
+ *	xtLookupList()
  *
  * function: map a single logical extent into a list of physical extent;
  *
  * parameter:
- *      struct inode    *ip,
- *      struct lxdlist  *lxdlist,       lxd list (in)
- *      struct xadlist  *xadlist,       xad list (in/out)
- *      int		flag)
+ *	struct inode	*ip,
+ *	struct lxdlist	*lxdlist,	lxd list (in)
+ *	struct xadlist	*xadlist,	xad list (in/out)
+ *	int		flag)
  *
  * coverage of lxd by xad under assumption of
  * . lxd's are ordered and disjoint.
  * . xad's are ordered and disjoint.
  *
  * return:
- *      0:      success
+ *	0:	success
  *
  * note: a page being written (even a single byte) is backed fully,
- *      except the last page which is only backed with blocks
- *      required to cover the last byte;
- *      the extent backing a page is fully contained within an xad;
+ *	except the last page which is only backed with blocks
+ *	required to cover the last byte;
+ *	the extent backing a page is fully contained within an xad;
  */
 int xtLookupList(struct inode *ip, struct lxdlist * lxdlist,
 		 struct xadlist * xadlist, int flag)
@@ -284,7 +284,7 @@
 		return rc;
 
 	/*
-	 *      compute the physical extent covering logical extent
+	 *	compute the physical extent covering logical extent
 	 *
 	 * N.B. search may have failed (e.g., hole in sparse file),
 	 * and returned the index of the next entry.
@@ -343,7 +343,7 @@
 		if (lstart >= size)
 			goto mapend;
 
-		/* compare with the current xad  */
+		/* compare with the current xad */
 		goto compare1;
 	}
 	/* lxd is covered by xad */
@@ -430,7 +430,7 @@
 	/*
 	 * lxd is partially covered by xad
 	 */
-	else {			/* (xend < lend)  */
+	else {			/* (xend < lend) */
 
 		/*
 		 * get next xad
@@ -477,22 +477,22 @@
 
 
 /*
- *      xtSearch()
+ *	xtSearch()
  *
- * function:    search for the xad entry covering specified offset.
+ * function:	search for the xad entry covering specified offset.
  *
  * parameters:
- *      ip      - file object;
- *      xoff    - extent offset;
- *      nextp	- address of next extent (if any) for search miss
- *      cmpp    - comparison result:
- *      btstack - traverse stack;
- *      flag    - search process flag (XT_INSERT);
+ *	ip	- file object;
+ *	xoff	- extent offset;
+ *	nextp	- address of next extent (if any) for search miss
+ *	cmpp	- comparison result:
+ *	btstack - traverse stack;
+ *	flag	- search process flag (XT_INSERT);
  *
  * returns:
- *      btstack contains (bn, index) of search path traversed to the entry.
- *      *cmpp is set to result of comparison with the entry returned.
- *      the page containing the entry is pinned at exit.
+ *	btstack contains (bn, index) of search path traversed to the entry.
+ *	*cmpp is set to result of comparison with the entry returned.
+ *	the page containing the entry is pinned at exit.
  */
 static int xtSearch(struct inode *ip, s64 xoff,	s64 *nextp,
 		    int *cmpp, struct btstack * btstack, int flag)
@@ -517,7 +517,7 @@
 	btstack->nsplit = 0;
 
 	/*
-	 *      search down tree from root:
+	 *	search down tree from root:
 	 *
 	 * between two consecutive entries of <Ki, Pi> and <Kj, Pj> of
 	 * internal page, child page Pi contains entry with k, Ki <= K < Kj.
@@ -642,7 +642,7 @@
 			XT_CMP(cmp, xoff, &p->xad[index], t64);
 			if (cmp == 0) {
 				/*
-				 *      search hit
+				 *	search hit
 				 */
 				/* search hit - leaf page:
 				 * return the entry found
@@ -692,7 +692,7 @@
 		}
 
 		/*
-		 *      search miss
+		 *	search miss
 		 *
 		 * base is the smallest index with key (Kj) greater than
 		 * search key (K) and may be zero or maxentry index.
@@ -773,22 +773,22 @@
 }
 
 /*
- *      xtInsert()
+ *	xtInsert()
  *
  * function:
  *
  * parameter:
- *      tid     - transaction id;
- *      ip      - file object;
- *      xflag   - extent flag (XAD_NOTRECORDED):
- *      xoff    - extent offset;
- *      xlen    - extent length;
- *      xaddrp  - extent address pointer (in/out):
- *              if (*xaddrp)
- *                      caller allocated data extent at *xaddrp;
- *              else
- *                      allocate data extent and return its xaddr;
- *      flag    -
+ *	tid	- transaction id;
+ *	ip	- file object;
+ *	xflag	- extent flag (XAD_NOTRECORDED):
+ *	xoff	- extent offset;
+ *	xlen	- extent length;
+ *	xaddrp	- extent address pointer (in/out):
+ *		if (*xaddrp)
+ *			caller allocated data extent at *xaddrp;
+ *		else
+ *			allocate data extent and return its xaddr;
+ *	flag	-
  *
  * return:
  */
@@ -813,7 +813,7 @@
 	jfs_info("xtInsert: nxoff:0x%lx nxlen:0x%x", (ulong) xoff, xlen);
 
 	/*
-	 *      search for the entry location at which to insert:
+	 *	search for the entry location at which to insert:
 	 *
 	 * xtFastSearch() and xtSearch() both returns (leaf page
 	 * pinned, index at which to insert).
@@ -853,13 +853,13 @@
 	}
 
 	/*
-	 *      insert entry for new extent
+	 *	insert entry for new extent
 	 */
 	xflag |= XAD_NEW;
 
 	/*
-	 *      if the leaf page is full, split the page and
-	 *      propagate up the router entry for the new page from split
+	 *	if the leaf page is full, split the page and
+	 *	propagate up the router entry for the new page from split
 	 *
 	 * The xtSplitUp() will insert the entry and unpin the leaf page.
 	 */
@@ -886,7 +886,7 @@
 	}
 
 	/*
-	 *      insert the new entry into the leaf page
+	 *	insert the new entry into the leaf page
 	 */
 	/*
 	 * acquire a transaction lock on the leaf page;
@@ -930,16 +930,16 @@
 
 
 /*
- *      xtSplitUp()
+ *	xtSplitUp()
  *
  * function:
- *      split full pages as propagating insertion up the tree
+ *	split full pages as propagating insertion up the tree
  *
  * parameter:
- *      tid     - transaction id;
- *      ip      - file object;
- *      split   - entry parameter descriptor;
- *      btstack - traverse stack from xtSearch()
+ *	tid	- transaction id;
+ *	ip	- file object;
+ *	split	- entry parameter descriptor;
+ *	btstack - traverse stack from xtSearch()
  *
  * return:
  */
@@ -1199,22 +1199,22 @@
 
 
 /*
- *      xtSplitPage()
+ *	xtSplitPage()
  *
  * function:
- *      split a full non-root page into
- *      original/split/left page and new right page
- *      i.e., the original/split page remains as left page.
+ *	split a full non-root page into
+ *	original/split/left page and new right page
+ *	i.e., the original/split page remains as left page.
  *
  * parameter:
- *      int		tid,
- *      struct inode    *ip,
- *      struct xtsplit  *split,
- *      struct metapage	**rmpp,
- *      u64		*rbnp,
+ *	int		tid,
+ *	struct inode	*ip,
+ *	struct xtsplit	*split,
+ *	struct metapage	**rmpp,
+ *	u64		*rbnp,
  *
  * return:
- *      Pointer to page in which to insert or NULL on error.
+ *	Pointer to page in which to insert or NULL on error.
  */
 static int
 xtSplitPage(tid_t tid, struct inode *ip,
@@ -1248,9 +1248,9 @@
 	rbn = addressPXD(pxd);
 
 	/* Allocate blocks to quota. */
-       if (DQUOT_ALLOC_BLOCK(ip, lengthPXD(pxd))) {
-	       rc = -EDQUOT;
-	       goto clean_up;
+	if (DQUOT_ALLOC_BLOCK(ip, lengthPXD(pxd))) {
+		rc = -EDQUOT;
+		goto clean_up;
 	}
 
 	quota_allocation += lengthPXD(pxd);
@@ -1304,7 +1304,7 @@
 	skip = split->index;
 
 	/*
-	 *      sequential append at tail (after last entry of last page)
+	 *	sequential append at tail (after last entry of last page)
 	 *
 	 * if splitting the last page on a level because of appending
 	 * a entry to it (skip is maxentry), it's likely that the access is
@@ -1342,7 +1342,7 @@
 	}
 
 	/*
-	 *      non-sequential insert (at possibly middle page)
+	 *	non-sequential insert (at possibly middle page)
 	 */
 
 	/*
@@ -1465,25 +1465,24 @@
 
 
 /*
- *      xtSplitRoot()
+ *	xtSplitRoot()
  *
  * function:
- *      split the full root page into
- *      original/root/split page and new right page
- *      i.e., root remains fixed in tree anchor (inode) and
- *      the root is copied to a single new right child page
- *      since root page << non-root page, and
- *      the split root page contains a single entry for the
- *      new right child page.
+ *	split the full root page into original/root/split page and new
+ *	right page
+ *	i.e., root remains fixed in tree anchor (inode) and the root is
+ *	copied to a single new right child page since root page <<
+ *	non-root page, and the split root page contains a single entry
+ *	for the new right child page.
  *
  * parameter:
- *      int		tid,
- *      struct inode    *ip,
- *      struct xtsplit  *split,
- *      struct metapage	**rmpp)
+ *	int		tid,
+ *	struct inode	*ip,
+ *	struct xtsplit	*split,
+ *	struct metapage	**rmpp)
  *
  * return:
- *      Pointer to page in which to insert or NULL on error.
+ *	Pointer to page in which to insert or NULL on error.
  */
 static int
 xtSplitRoot(tid_t tid,
@@ -1505,7 +1504,7 @@
 	INCREMENT(xtStat.split);
 
 	/*
-	 *      allocate a single (right) child page
+	 *	allocate a single (right) child page
 	 */
 	pxdlist = split->pxdlist;
 	pxd = &pxdlist->pxd[pxdlist->npxd];
@@ -1573,7 +1572,7 @@
 	}
 
 	/*
-	 *      reset the root
+	 *	reset the root
 	 *
 	 * init root with the single entry for the new right page
 	 * set the 1st entry offset to 0, which force the left-most key
@@ -1610,7 +1609,7 @@
 
 
 /*
- *      xtExtend()
+ *	xtExtend()
  *
  * function: extend in-place;
  *
@@ -1677,7 +1676,7 @@
 		goto extendOld;
 
 	/*
-	 *      extent overflow: insert entry for new extent
+	 *	extent overflow: insert entry for new extent
 	 */
 //insertNew:
 	xoff = offsetXAD(xad) + MAXXLEN;
@@ -1685,8 +1684,8 @@
 	nextindex = le16_to_cpu(p->header.nextindex);
 
 	/*
-	 *      if the leaf page is full, insert the new entry and
-	 *      propagate up the router entry for the new page from split
+	 *	if the leaf page is full, insert the new entry and
+	 *	propagate up the router entry for the new page from split
 	 *
 	 * The xtSplitUp() will insert the entry and unpin the leaf page.
 	 */
@@ -1731,7 +1730,7 @@
 		}
 	}
 	/*
-	 *      insert the new entry into the leaf page
+	 *	insert the new entry into the leaf page
 	 */
 	else {
 		/* insert the new entry: mark the entry NEW */
@@ -1771,11 +1770,11 @@
 
 #ifdef _NOTYET
 /*
- *      xtTailgate()
+ *	xtTailgate()
  *
  * function: split existing 'tail' extent
- *      (split offset >= start offset of tail extent), and
- *      relocate and extend the split tail half;
+ *	(split offset >= start offset of tail extent), and
+ *	relocate and extend the split tail half;
  *
  * note: existing extent may or may not have been committed.
  * caller is responsible for pager buffer cache update, and
@@ -1804,7 +1803,7 @@
 
 /*
 printf("xtTailgate: nxoff:0x%lx nxlen:0x%x nxaddr:0x%lx\n",
-        (ulong)xoff, xlen, (ulong)xaddr);
+	(ulong)xoff, xlen, (ulong)xaddr);
 */
 
 	/* there must exist extent to be tailgated */
@@ -1842,18 +1841,18 @@
 	xad = &p->xad[index];
 /*
 printf("xtTailgate: xoff:0x%lx xlen:0x%x xaddr:0x%lx\n",
-        (ulong)offsetXAD(xad), lengthXAD(xad), (ulong)addressXAD(xad));
+	(ulong)offsetXAD(xad), lengthXAD(xad), (ulong)addressXAD(xad));
 */
 	if ((llen = xoff - offsetXAD(xad)) == 0)
 		goto updateOld;
 
 	/*
-	 *      partially replace extent: insert entry for new extent
+	 *	partially replace extent: insert entry for new extent
 	 */
 //insertNew:
 	/*
-	 *      if the leaf page is full, insert the new entry and
-	 *      propagate up the router entry for the new page from split
+	 *	if the leaf page is full, insert the new entry and
+	 *	propagate up the router entry for the new page from split
 	 *
 	 * The xtSplitUp() will insert the entry and unpin the leaf page.
 	 */
@@ -1898,7 +1897,7 @@
 		}
 	}
 	/*
-	 *      insert the new entry into the leaf page
+	 *	insert the new entry into the leaf page
 	 */
 	else {
 		/* insert the new entry: mark the entry NEW */
@@ -1955,17 +1954,17 @@
 #endif /* _NOTYET */
 
 /*
- *      xtUpdate()
+ *	xtUpdate()
  *
  * function: update XAD;
  *
- *      update extent for allocated_but_not_recorded or
- *      compressed extent;
+ *	update extent for allocated_but_not_recorded or
+ *	compressed extent;
  *
  * parameter:
- *      nxad    - new XAD;
- *                logical extent of the specified XAD must be completely
- *                contained by an existing XAD;
+ *	nxad	- new XAD;
+ *		logical extent of the specified XAD must be completely
+ *		contained by an existing XAD;
  */
 int xtUpdate(tid_t tid, struct inode *ip, xad_t * nxad)
 {				/* new XAD */
@@ -2416,19 +2415,19 @@
 
 
 /*
- *      xtAppend()
+ *	xtAppend()
  *
  * function: grow in append mode from contiguous region specified ;
  *
  * parameter:
- *      tid             - transaction id;
- *      ip              - file object;
- *      xflag           - extent flag:
- *      xoff            - extent offset;
- *      maxblocks       - max extent length;
- *      xlen            - extent length (in/out);
- *      xaddrp          - extent address pointer (in/out):
- *      flag            -
+ *	tid		- transaction id;
+ *	ip		- file object;
+ *	xflag		- extent flag:
+ *	xoff		- extent offset;
+ *	maxblocks	- max extent length;
+ *	xlen		- extent length (in/out);
+ *	xaddrp		- extent address pointer (in/out):
+ *	flag		-
  *
  * return:
  */
@@ -2460,7 +2459,7 @@
 		 (ulong) xoff, maxblocks, xlen, (ulong) xaddr);
 
 	/*
-	 *      search for the entry location at which to insert:
+	 *	search for the entry location at which to insert:
 	 *
 	 * xtFastSearch() and xtSearch() both returns (leaf page
 	 * pinned, index at which to insert).
@@ -2482,13 +2481,13 @@
 		xlen = min(xlen, (int)(next - xoff));
 //insert:
 	/*
-	 *      insert entry for new extent
+	 *	insert entry for new extent
 	 */
 	xflag |= XAD_NEW;
 
 	/*
-	 *      if the leaf page is full, split the page and
-	 *      propagate up the router entry for the new page from split
+	 *	if the leaf page is full, split the page and
+	 *	propagate up the router entry for the new page from split
 	 *
 	 * The xtSplitUp() will insert the entry and unpin the leaf page.
 	 */
@@ -2545,7 +2544,7 @@
 	return 0;
 
 	/*
-	 *      insert the new entry into the leaf page
+	 *	insert the new entry into the leaf page
 	 */
       insertLeaf:
 	/*
@@ -2589,17 +2588,17 @@
 
 /* - TBD for defragmentaion/reorganization -
  *
- *      xtDelete()
+ *	xtDelete()
  *
  * function:
- *      delete the entry with the specified key.
+ *	delete the entry with the specified key.
  *
- *      N.B.: whole extent of the entry is assumed to be deleted.
+ *	N.B.: whole extent of the entry is assumed to be deleted.
  *
  * parameter:
  *
  * return:
- *       ENOENT: if the entry is not found.
+ *	ENOENT: if the entry is not found.
  *
  * exception:
  */
@@ -2665,10 +2664,10 @@
 
 /* - TBD for defragmentaion/reorganization -
  *
- *      xtDeleteUp()
+ *	xtDeleteUp()
  *
  * function:
- *      free empty pages as propagating deletion up the tree
+ *	free empty pages as propagating deletion up the tree
  *
  * parameter:
  *
@@ -2815,15 +2814,15 @@
 
 
 /*
- * NAME:        xtRelocate()
+ * NAME:	xtRelocate()
  *
- * FUNCTION:    relocate xtpage or data extent of regular file;
- *              This function is mainly used by defragfs utility.
+ * FUNCTION:	relocate xtpage or data extent of regular file;
+ *		This function is mainly used by defragfs utility.
  *
- * NOTE:        This routine does not have the logic to handle
- *              uncommitted allocated extent. The caller should call
- *              txCommit() to commit all the allocation before call
- *              this routine.
+ * NOTE:	This routine does not have the logic to handle
+ *		uncommitted allocated extent. The caller should call
+ *		txCommit() to commit all the allocation before call
+ *		this routine.
  */
 int
 xtRelocate(tid_t tid, struct inode * ip, xad_t * oxad,	/* old XAD */
@@ -2865,8 +2864,8 @@
 		 xtype, (ulong) xoff, xlen, (ulong) oxaddr, (ulong) nxaddr);
 
 	/*
-	 *      1. get and validate the parent xtpage/xad entry
-	 *      covering the source extent to be relocated;
+	 *	1. get and validate the parent xtpage/xad entry
+	 *	covering the source extent to be relocated;
 	 */
 	if (xtype == DATAEXT) {
 		/* search in leaf entry */
@@ -2910,7 +2909,7 @@
 	jfs_info("xtRelocate: parent xad entry validated.");
 
 	/*
-	 *      2. relocate the extent
+	 *	2. relocate the extent
 	 */
 	if (xtype == DATAEXT) {
 		/* if the extent is allocated-but-not-recorded
@@ -2923,7 +2922,7 @@
 			XT_PUTPAGE(pmp);
 
 		/*
-		 *      cmRelocate()
+		 *	cmRelocate()
 		 *
 		 * copy target data pages to be relocated;
 		 *
@@ -2945,8 +2944,8 @@
 		pno = offset >> CM_L2BSIZE;
 		npages = (nbytes + (CM_BSIZE - 1)) >> CM_L2BSIZE;
 /*
-                npages = ((offset + nbytes - 1) >> CM_L2BSIZE) -
-                         (offset >> CM_L2BSIZE) + 1;
+		npages = ((offset + nbytes - 1) >> CM_L2BSIZE) -
+			  (offset >> CM_L2BSIZE) + 1;
 */
 		sxaddr = oxaddr;
 		dxaddr = nxaddr;
@@ -2981,7 +2980,7 @@
 
 		XT_GETSEARCH(ip, btstack.top, bn, pmp, pp, index);
 		jfs_info("xtRelocate: target data extent relocated.");
-	} else {		/* (xtype  == XTPAGE) */
+	} else {		/* (xtype == XTPAGE) */
 
 		/*
 		 * read in the target xtpage from the source extent;
@@ -3026,16 +3025,14 @@
 		 */
 		if (lmp) {
 			BT_MARK_DIRTY(lmp, ip);
-			tlck =
-			    txLock(tid, ip, lmp, tlckXTREE | tlckRELINK);
+			tlck = txLock(tid, ip, lmp, tlckXTREE | tlckRELINK);
 			lp->header.next = cpu_to_le64(nxaddr);
 			XT_PUTPAGE(lmp);
 		}
 
 		if (rmp) {
 			BT_MARK_DIRTY(rmp, ip);
-			tlck =
-			    txLock(tid, ip, rmp, tlckXTREE | tlckRELINK);
+			tlck = txLock(tid, ip, rmp, tlckXTREE | tlckRELINK);
 			rp->header.prev = cpu_to_le64(nxaddr);
 			XT_PUTPAGE(rmp);
 		}
@@ -3062,7 +3059,7 @@
 		 * scan may be skipped by commit() and logredo();
 		 */
 		BT_MARK_DIRTY(mp, ip);
-		/* tlckNEW init  xtlck->lwm.offset = XTENTRYSTART; */
+		/* tlckNEW init xtlck->lwm.offset = XTENTRYSTART; */
 		tlck = txLock(tid, ip, mp, tlckXTREE | tlckNEW);
 		xtlck = (struct xtlock *) & tlck->lock;
 
@@ -3084,7 +3081,7 @@
 	}
 
 	/*
-	 *      3. acquire maplock for the source extent to be freed;
+	 *	3. acquire maplock for the source extent to be freed;
 	 *
 	 * acquire a maplock saving the src relocated extent address;
 	 * to free of the extent at commit time;
@@ -3105,7 +3102,7 @@
 	 *      is no buffer associated with this lock since the buffer
 	 *      has been redirected to the target location.
 	 */
-	else			/* (xtype  == XTPAGE) */
+	else			/* (xtype == XTPAGE) */
 		tlck = txMaplock(tid, ip, tlckMAP | tlckRELOCATE);
 
 	pxdlock = (struct pxd_lock *) & tlck->lock;
@@ -3115,7 +3112,7 @@
 	pxdlock->index = 1;
 
 	/*
-	 *      4. update the parent xad entry for relocation;
+	 *	4. update the parent xad entry for relocation;
 	 *
 	 * acquire tlck for the parent entry with XAD_NEW as entry
 	 * update which will write LOG_REDOPAGE and update bmap for
@@ -3143,22 +3140,22 @@
 
 
 /*
- *      xtSearchNode()
+ *	xtSearchNode()
  *
- * function:    search for the internal xad entry covering specified extent.
- *              This function is mainly used by defragfs utility.
+ * function:	search for the internal xad entry covering specified extent.
+ *		This function is mainly used by defragfs utility.
  *
  * parameters:
- *      ip      - file object;
- *      xad     - extent to find;
- *      cmpp    - comparison result:
- *      btstack - traverse stack;
- *      flag    - search process flag;
+ *	ip	- file object;
+ *	xad	- extent to find;
+ *	cmpp	- comparison result:
+ *	btstack - traverse stack;
+ *	flag	- search process flag;
  *
  * returns:
- *      btstack contains (bn, index) of search path traversed to the entry.
- *      *cmpp is set to result of comparison with the entry returned.
- *      the page containing the entry is pinned at exit.
+ *	btstack contains (bn, index) of search path traversed to the entry.
+ *	*cmpp is set to result of comparison with the entry returned.
+ *	the page containing the entry is pinned at exit.
  */
 static int xtSearchNode(struct inode *ip, xad_t * xad,	/* required XAD entry */
 			int *cmpp, struct btstack * btstack, int flag)
@@ -3181,7 +3178,7 @@
 	xaddr = addressXAD(xad);
 
 	/*
-	 *      search down tree from root:
+	 *	search down tree from root:
 	 *
 	 * between two consecutive entries of <Ki, Pi> and <Kj, Pj> of
 	 * internal page, child page Pi contains entry with k, Ki <= K < Kj.
@@ -3217,7 +3214,7 @@
 			XT_CMP(cmp, xoff, &p->xad[index], t64);
 			if (cmp == 0) {
 				/*
-				 *      search hit
+				 *	search hit
 				 *
 				 * verify for exact match;
 				 */
@@ -3245,7 +3242,7 @@
 		}
 
 		/*
-		 *      search miss - non-leaf page:
+		 *	search miss - non-leaf page:
 		 *
 		 * base is the smallest index with key (Kj) greater than
 		 * search key (K) and may be zero or maxentry index.
@@ -3268,15 +3265,15 @@
 
 
 /*
- *      xtRelink()
+ *	xtRelink()
  *
  * function:
- *      link around a freed page.
+ *	link around a freed page.
  *
  * Parameter:
- *      int           tid,
- *      struct inode    *ip,
- *      xtpage_t        *p)
+ *	int		tid,
+ *	struct inode	*ip,
+ *	xtpage_t	*p)
  *
  * returns:
  */
@@ -3338,7 +3335,7 @@
 
 
 /*
- *      xtInitRoot()
+ *	xtInitRoot()
  *
  * initialize file root (inline in inode)
  */
@@ -3385,42 +3382,42 @@
 #define MAX_TRUNCATE_LEAVES 50
 
 /*
- *      xtTruncate()
+ *	xtTruncate()
  *
  * function:
- *      traverse for truncation logging backward bottom up;
- *      terminate at the last extent entry at the current subtree
- *      root page covering new down size.
- *      truncation may occur within the last extent entry.
+ *	traverse for truncation logging backward bottom up;
+ *	terminate at the last extent entry at the current subtree
+ *	root page covering new down size.
+ *	truncation may occur within the last extent entry.
  *
  * parameter:
- *      int           tid,
- *      struct inode    *ip,
- *      s64           newsize,
- *      int           type)   {PWMAP, PMAP, WMAP; DELETE, TRUNCATE}
+ *	int		tid,
+ *	struct inode	*ip,
+ *	s64		newsize,
+ *	int		type)	{PWMAP, PMAP, WMAP; DELETE, TRUNCATE}
  *
  * return:
  *
  * note:
- *      PWMAP:
- *       1. truncate (non-COMMIT_NOLINK file)
- *          by jfs_truncate() or jfs_open(O_TRUNC):
- *          xtree is updated;
+ *	PWMAP:
+ *	 1. truncate (non-COMMIT_NOLINK file)
+ *	    by jfs_truncate() or jfs_open(O_TRUNC):
+ *	    xtree is updated;
  *	 2. truncate index table of directory when last entry removed
- *       map update via tlock at commit time;
- *      PMAP:
+ *	map update via tlock at commit time;
+ *	PMAP:
  *	 Call xtTruncate_pmap instead
- *      WMAP:
- *       1. remove (free zero link count) on last reference release
- *          (pmap has been freed at commit zero link count);
- *       2. truncate (COMMIT_NOLINK file, i.e., tmp file):
- *          xtree is updated;
- *       map update directly at truncation time;
+ *	WMAP:
+ *	 1. remove (free zero link count) on last reference release
+ *	    (pmap has been freed at commit zero link count);
+ *	 2. truncate (COMMIT_NOLINK file, i.e., tmp file):
+ *	    xtree is updated;
+ *	 map update directly at truncation time;
  *
- *      if (DELETE)
- *              no LOG_NOREDOPAGE is required (NOREDOFILE is sufficient);
- *      else if (TRUNCATE)
- *              must write LOG_NOREDOPAGE for deleted index page;
+ *	if (DELETE)
+ *		no LOG_NOREDOPAGE is required (NOREDOFILE is sufficient);
+ *	else if (TRUNCATE)
+ *		must write LOG_NOREDOPAGE for deleted index page;
  *
  * pages may already have been tlocked by anonymous transactions
  * during file growth (i.e., write) before truncation;
@@ -3493,7 +3490,7 @@
 	 * retained in the new sized file.
 	 * if type is PMAP, the data and index pages are NOT
 	 * freed, and the data and index blocks are NOT freed
-	 * from  working map.
+	 * from working map.
 	 * (this will allow continued access of data/index of
 	 * temporary file (zerolink count file truncated to zero-length)).
 	 */
@@ -3542,7 +3539,7 @@
 		goto getChild;
 
 	/*
-	 *      leaf page
+	 *	leaf page
 	 */
 	freed = 0;
 
@@ -3916,7 +3913,7 @@
 	}
 
 	/*
-	 *      internal page: go down to child page of current entry
+	 *	internal page: go down to child page of current entry
 	 */
       getChild:
 	/* save current parent entry for the child page */
@@ -3965,7 +3962,7 @@
 
 
 /*
- *      xtTruncate_pmap()
+ *	xtTruncate_pmap()
  *
  * function:
  *	Perform truncate to zero lenghth for deleted file, leaving the
@@ -3974,9 +3971,9 @@
  *	is committed to disk.
  *
  * parameter:
- *      tid_t		tid,
- *      struct inode	*ip,
- *      s64		committed_size)
+ *	tid_t		tid,
+ *	struct inode	*ip,
+ *	s64		committed_size)
  *
  * return: new committed size
  *
@@ -4050,7 +4047,7 @@
 	}
 
 	/*
-	 *      leaf page
+	 *	leaf page
 	 */
 
 	if (++locked_leaves > MAX_TRUNCATE_LEAVES) {
@@ -4062,7 +4059,7 @@
 		xoff = offsetXAD(xad);
 		xlen = lengthXAD(xad);
 		XT_PUTPAGE(mp);
-		return  (xoff + xlen) << JFS_SBI(ip->i_sb)->l2bsize;
+		return (xoff + xlen) << JFS_SBI(ip->i_sb)->l2bsize;
 	}
 	tlck = txLock(tid, ip, mp, tlckXTREE);
 	tlck->type = tlckXTREE | tlckFREE;
@@ -4099,8 +4096,7 @@
 		 */
 		tlck = txLock(tid, ip, mp, tlckXTREE);
 		xtlck = (struct xtlock *) & tlck->lock;
-		xtlck->hwm.offset =
-		    le16_to_cpu(p->header.nextindex) - 1;
+		xtlck->hwm.offset = le16_to_cpu(p->header.nextindex) - 1;
 		tlck->type = tlckXTREE | tlckFREE;
 
 		XT_PUTPAGE(mp);
@@ -4118,7 +4114,7 @@
 	else
 		index--;
 	/*
-	 *      internal page: go down to child page of current entry
+	 *	internal page: go down to child page of current entry
 	 */
       getChild:
 	/* save current parent entry for the child page */
diff --git a/fs/jfs/jfs_xtree.h b/fs/jfs/jfs_xtree.h
index 164f6f2..70815c8a3 100644
--- a/fs/jfs/jfs_xtree.h
+++ b/fs/jfs/jfs_xtree.h
@@ -19,14 +19,14 @@
 #define _H_JFS_XTREE
 
 /*
- *      jfs_xtree.h: extent allocation descriptor B+-tree manager
+ *	jfs_xtree.h: extent allocation descriptor B+-tree manager
  */
 
 #include "jfs_btree.h"
 
 
 /*
- *      extent allocation descriptor (xad)
+ *	extent allocation descriptor (xad)
  */
 typedef struct xad {
 	unsigned flag:8;	/* 1: flag */
@@ -38,30 +38,30 @@
 	__le32 addr2;		/* 4: address in unit of fsblksize */
 } xad_t;			/* (16) */
 
-#define MAXXLEN         ((1 << 24) - 1)
+#define MAXXLEN		((1 << 24) - 1)
 
-#define XTSLOTSIZE      16
-#define L2XTSLOTSIZE    4
+#define XTSLOTSIZE	16
+#define L2XTSLOTSIZE	4
 
 /* xad_t field construction */
 #define XADoffset(xad, offset64)\
 {\
-        (xad)->off1 = ((u64)offset64) >> 32;\
-        (xad)->off2 = __cpu_to_le32((offset64) & 0xffffffff);\
+	(xad)->off1 = ((u64)offset64) >> 32;\
+	(xad)->off2 = __cpu_to_le32((offset64) & 0xffffffff);\
 }
 #define XADaddress(xad, address64)\
 {\
-        (xad)->addr1 = ((u64)address64) >> 32;\
-        (xad)->addr2 = __cpu_to_le32((address64) & 0xffffffff);\
+	(xad)->addr1 = ((u64)address64) >> 32;\
+	(xad)->addr2 = __cpu_to_le32((address64) & 0xffffffff);\
 }
-#define XADlength(xad, length32)        (xad)->len = __cpu_to_le24(length32)
+#define XADlength(xad, length32)	(xad)->len = __cpu_to_le24(length32)
 
 /* xad_t field extraction */
 #define offsetXAD(xad)\
-        ( ((s64)((xad)->off1)) << 32 | __le32_to_cpu((xad)->off2))
+	( ((s64)((xad)->off1)) << 32 | __le32_to_cpu((xad)->off2))
 #define addressXAD(xad)\
-        ( ((s64)((xad)->addr1)) << 32 | __le32_to_cpu((xad)->addr2))
-#define lengthXAD(xad)  __le24_to_cpu((xad)->len)
+	( ((s64)((xad)->addr1)) << 32 | __le32_to_cpu((xad)->addr2))
+#define lengthXAD(xad)	__le24_to_cpu((xad)->len)
 
 /* xad list */
 struct xadlist {
@@ -71,22 +71,22 @@
 };
 
 /* xad_t flags */
-#define XAD_NEW         0x01	/* new */
-#define XAD_EXTENDED    0x02	/* extended */
-#define XAD_COMPRESSED  0x04	/* compressed with recorded length */
+#define XAD_NEW		0x01	/* new */
+#define XAD_EXTENDED	0x02	/* extended */
+#define XAD_COMPRESSED	0x04	/* compressed with recorded length */
 #define XAD_NOTRECORDED 0x08	/* allocated but not recorded */
-#define XAD_COW         0x10	/* copy-on-write */
+#define XAD_COW		0x10	/* copy-on-write */
 
 
 /* possible values for maxentry */
-#define XTROOTINITSLOT_DIR  6
-#define XTROOTINITSLOT  10
-#define XTROOTMAXSLOT   18
-#define XTPAGEMAXSLOT   256
-#define XTENTRYSTART    2
+#define XTROOTINITSLOT_DIR 6
+#define XTROOTINITSLOT	10
+#define XTROOTMAXSLOT	18
+#define XTPAGEMAXSLOT	256
+#define XTENTRYSTART	2
 
 /*
- *      xtree page:
+ *	xtree page:
  */
 typedef union {
 	struct xtheader {
@@ -106,7 +106,7 @@
 } xtpage_t;
 
 /*
- *      external declaration
+ *	external declaration
  */
 extern int xtLookup(struct inode *ip, s64 lstart, s64 llen,
 		    int *pflag, s64 * paddr, int *plen, int flag);
diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c
index 41c2047..25161c4 100644
--- a/fs/jfs/namei.c
+++ b/fs/jfs/namei.c
@@ -328,7 +328,7 @@
  *		dentry	- child directory dentry
  *
  * RETURN:	-EINVAL	- if name is . or ..
- *		-EINVAL  - if . or .. exist but are invalid.
+ *		-EINVAL - if . or .. exist but are invalid.
  *		errors from subroutines
  *
  * note:
@@ -517,7 +517,7 @@
 	inode_dec_link_count(ip);
 
 	/*
-	 *      commit zero link count object
+	 *	commit zero link count object
 	 */
 	if (ip->i_nlink == 0) {
 		assert(!test_cflag(COMMIT_Nolink, ip));
@@ -596,7 +596,7 @@
 /*
  * NAME:	commitZeroLink()
  *
- * FUNCTION:    for non-directory, called by jfs_remove(),
+ * FUNCTION:	for non-directory, called by jfs_remove(),
  *		truncate a regular file, directory or symbolic
  *		link to zero length. return 0 if type is not
  *		one of these.
@@ -676,7 +676,7 @@
 /*
  * NAME:	jfs_free_zero_link()
  *
- * FUNCTION:    for non-directory, called by iClose(),
+ * FUNCTION:	for non-directory, called by iClose(),
  *		free resources of a file from cache and WORKING map
  *		for a file previously committed with zero link count
  *		while associated with a pager object,
@@ -855,12 +855,12 @@
  * NAME:	jfs_symlink(dip, dentry, name)
  *
  * FUNCTION:	creates a symbolic link to <symlink> by name <name>
- *		        in directory <dip>
+ *			in directory <dip>
  *
- * PARAMETER:	dip	    - parent directory vnode
- *		        dentry	- dentry of symbolic link
- *		        name    - the path name of the existing object
- *			              that will be the source of the link
+ * PARAMETER:	dip	- parent directory vnode
+ *		dentry	- dentry of symbolic link
+ *		name	- the path name of the existing object
+ *			  that will be the source of the link
  *
  * RETURN:	errors from subroutines
  *
@@ -1052,9 +1052,9 @@
 
 
 /*
- * NAME:        jfs_rename
+ * NAME:	jfs_rename
  *
- * FUNCTION:    rename a file or directory
+ * FUNCTION:	rename a file or directory
  */
 static int jfs_rename(struct inode *old_dir, struct dentry *old_dentry,
 	       struct inode *new_dir, struct dentry *new_dentry)
@@ -1331,9 +1331,9 @@
 
 
 /*
- * NAME:        jfs_mknod
+ * NAME:	jfs_mknod
  *
- * FUNCTION:    Create a special file (device)
+ * FUNCTION:	Create a special file (device)
  */
 static int jfs_mknod(struct inode *dir, struct dentry *dentry,
 		int mode, dev_t rdev)
diff --git a/fs/jfs/resize.c b/fs/jfs/resize.c
index 79d625f..71984ee 100644
--- a/fs/jfs/resize.c
+++ b/fs/jfs/resize.c
@@ -29,17 +29,17 @@
 #include "jfs_txnmgr.h"
 #include "jfs_debug.h"
 
-#define BITSPERPAGE     (PSIZE << 3)
-#define L2MEGABYTE      20
-#define MEGABYTE        (1 << L2MEGABYTE)
-#define MEGABYTE32     (MEGABYTE << 5)
+#define BITSPERPAGE	(PSIZE << 3)
+#define L2MEGABYTE	20
+#define MEGABYTE	(1 << L2MEGABYTE)
+#define MEGABYTE32	(MEGABYTE << 5)
 
 /* convert block number to bmap file page number */
 #define BLKTODMAPN(b)\
-        (((b) >> 13) + ((b) >> 23) + ((b) >> 33) + 3 + 1)
+	(((b) >> 13) + ((b) >> 23) + ((b) >> 33) + 3 + 1)
 
 /*
- *      jfs_extendfs()
+ *	jfs_extendfs()
  *
  * function: extend file system;
  *
@@ -48,9 +48,9 @@
  *                                   workspace  space
  *
  * input:
- *      new LVSize: in LV blocks (required)
- *      new LogSize: in LV blocks (optional)
- *      new FSSize: in LV blocks (optional)
+ *	new LVSize: in LV blocks (required)
+ *	new LogSize: in LV blocks (optional)
+ *	new FSSize: in LV blocks (optional)
  *
  * new configuration:
  * 1. set new LogSize as specified or default from new LVSize;
@@ -125,8 +125,8 @@
 	}
 
 	/*
-	 *      reconfigure LV spaces
-	 *      ---------------------
+	 *	reconfigure LV spaces
+	 *	---------------------
 	 *
 	 * validate new size, or, if not specified, determine new size
 	 */
@@ -198,7 +198,7 @@
 		log_formatted = 1;
 	}
 	/*
-	 *      quiesce file system
+	 *	quiesce file system
 	 *
 	 * (prepare to move the inline log and to prevent map update)
 	 *
@@ -270,8 +270,8 @@
 	}
 
 	/*
-	 *      extend block allocation map
-	 *      ---------------------------
+	 *	extend block allocation map
+	 *	---------------------------
 	 *
 	 * extendfs() for new extension, retry after crash recovery;
 	 *
@@ -283,7 +283,7 @@
 	 *  s_size: aggregate size in physical blocks;
 	 */
 	/*
-	 *      compute the new block allocation map configuration
+	 *	compute the new block allocation map configuration
 	 *
 	 * map dinode:
 	 *  di_size: map file size in byte;
@@ -301,7 +301,7 @@
 	newNpages = BLKTODMAPN(t64) + 1;
 
 	/*
-	 *      extend map from current map (WITHOUT growing mapfile)
+	 *	extend map from current map (WITHOUT growing mapfile)
 	 *
 	 * map new extension with unmapped part of the last partial
 	 * dmap page, if applicable, and extra page(s) allocated
@@ -341,8 +341,8 @@
 	XSize -= nblocks;
 
 	/*
-	 *      grow map file to cover remaining extension
-	 *      and/or one extra dmap page for next extendfs();
+	 *	grow map file to cover remaining extension
+	 *	and/or one extra dmap page for next extendfs();
 	 *
 	 * allocate new map pages and its backing blocks, and
 	 * update map file xtree
@@ -422,8 +422,8 @@
 	dbFinalizeBmap(ipbmap);
 
 	/*
-	 *      update inode allocation map
-	 *      ---------------------------
+	 *	update inode allocation map
+	 *	---------------------------
 	 *
 	 * move iag lists from old to new iag;
 	 * agstart field is not updated for logredo() to reconstruct
@@ -442,8 +442,8 @@
 	}
 
 	/*
-	 *      finalize
-	 *      --------
+	 *	finalize
+	 *	--------
 	 *
 	 * extension is committed when on-disk super block is
 	 * updated with new descriptors: logredo will recover
@@ -480,7 +480,7 @@
 	diFreeSpecial(ipbmap2);
 
 	/*
-	 *      update superblock
+	 *	update superblock
 	 */
 	if ((rc = readSuper(sb, &bh)))
 		goto error_out;
@@ -530,7 +530,7 @@
 
       resume:
 	/*
-	 *      resume file system transactions
+	 *	resume file system transactions
 	 */
 	txResume(sb);
 
diff --git a/fs/jfs/xattr.c b/fs/jfs/xattr.c
index b753ba2..b2375f0 100644
--- a/fs/jfs/xattr.c
+++ b/fs/jfs/xattr.c
@@ -63,9 +63,9 @@
  *
  *   On-disk:
  *
- *     FEALISTs are stored on disk using blocks allocated by dbAlloc() and
- *     written directly. An EA list may be in-lined in the inode if there is
- *     sufficient room available.
+ *	FEALISTs are stored on disk using blocks allocated by dbAlloc() and
+ *	written directly. An EA list may be in-lined in the inode if there is
+ *	sufficient room available.
  */
 
 struct ea_buffer {
@@ -590,7 +590,8 @@
       size_check:
 	if (EALIST_SIZE(ea_buf->xattr) != ea_size) {
 		printk(KERN_ERR "ea_get: invalid extended attribute\n");
-		dump_mem("xattr", ea_buf->xattr, ea_size);
+		print_hex_dump(KERN_ERR, "", DUMP_PREFIX_ADDRESS, 16, 1,
+				     ea_buf->xattr, ea_size, 1);
 		ea_release(inode, ea_buf);
 		rc = -EIO;
 		goto clean_up;
diff --git a/fs/proc/array.c b/fs/proc/array.c
index 74f30e0..98e78e2 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -165,7 +165,6 @@
 	rcu_read_lock();
 	buffer += sprintf(buffer,
 		"State:\t%s\n"
-		"SleepAVG:\t%lu%%\n"
 		"Tgid:\t%d\n"
 		"Pid:\t%d\n"
 		"PPid:\t%d\n"
@@ -173,7 +172,6 @@
 		"Uid:\t%d\t%d\t%d\t%d\n"
 		"Gid:\t%d\t%d\t%d\t%d\n",
 		get_task_state(p),
-		(p->sleep_avg/1024)*100/(1020000000/1024),
 	       	p->tgid, p->pid,
 	       	pid_alive(p) ? rcu_dereference(p->real_parent)->tgid : 0,
 		pid_alive(p) && p->ptrace ? rcu_dereference(p->parent)->pid : 0,
@@ -312,6 +310,41 @@
 	return buffer - orig;
 }
 
+static clock_t task_utime(struct task_struct *p)
+{
+	clock_t utime = cputime_to_clock_t(p->utime),
+		total = utime + cputime_to_clock_t(p->stime);
+	u64 temp;
+
+	/*
+	 * Use CFS's precise accounting:
+	 */
+	temp = (u64)nsec_to_clock_t(p->se.sum_exec_runtime);
+
+	if (total) {
+		temp *= utime;
+		do_div(temp, total);
+	}
+	utime = (clock_t)temp;
+
+	return utime;
+}
+
+static clock_t task_stime(struct task_struct *p)
+{
+	clock_t stime = cputime_to_clock_t(p->stime);
+
+	/*
+	 * Use CFS's precise accounting. (we subtract utime from
+	 * the total, to make sure the total observed by userspace
+	 * grows monotonically - apps rely on that):
+	 */
+	stime = nsec_to_clock_t(p->se.sum_exec_runtime) - task_utime(p);
+
+	return stime;
+}
+
+
 static int do_task_stat(struct task_struct *task, char * buffer, int whole)
 {
 	unsigned long vsize, eip, esp, wchan = ~0UL;
@@ -326,7 +359,8 @@
 	unsigned long long start_time;
 	unsigned long cmin_flt = 0, cmaj_flt = 0;
 	unsigned long  min_flt = 0,  maj_flt = 0;
-	cputime_t cutime, cstime, utime, stime;
+	cputime_t cutime, cstime;
+	clock_t utime, stime;
 	unsigned long rsslim = 0;
 	char tcomm[sizeof(task->comm)];
 	unsigned long flags;
@@ -344,7 +378,8 @@
 
 	sigemptyset(&sigign);
 	sigemptyset(&sigcatch);
-	cutime = cstime = utime = stime = cputime_zero;
+	cutime = cstime = cputime_zero;
+	utime = stime = 0;
 
 	rcu_read_lock();
 	if (lock_task_sighand(task, &flags)) {
@@ -370,15 +405,15 @@
 			do {
 				min_flt += t->min_flt;
 				maj_flt += t->maj_flt;
-				utime = cputime_add(utime, t->utime);
-				stime = cputime_add(stime, t->stime);
+				utime += task_utime(t);
+				stime += task_stime(t);
 				t = next_thread(t);
 			} while (t != task);
 
 			min_flt += sig->min_flt;
 			maj_flt += sig->maj_flt;
-			utime = cputime_add(utime, sig->utime);
-			stime = cputime_add(stime, sig->stime);
+			utime += cputime_to_clock_t(sig->utime);
+			stime += cputime_to_clock_t(sig->stime);
 		}
 
 		sid = signal_session(sig);
@@ -394,8 +429,8 @@
 	if (!whole) {
 		min_flt = task->min_flt;
 		maj_flt = task->maj_flt;
-		utime = task->utime;
-		stime = task->stime;
+		utime = task_utime(task);
+		stime = task_stime(task);
 	}
 
 	/* scale priority and nice values from timeslices to -20..20 */
@@ -426,8 +461,8 @@
 		cmin_flt,
 		maj_flt,
 		cmaj_flt,
-		cputime_to_clock_t(utime),
-		cputime_to_clock_t(stime),
+		utime,
+		stime,
 		cputime_to_clock_t(cutime),
 		cputime_to_clock_t(cstime),
 		priority,
diff --git a/fs/proc/base.c b/fs/proc/base.c
index a5fa1fd..46ea5d5 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -296,7 +296,7 @@
  */
 static int proc_pid_schedstat(struct task_struct *task, char *buffer)
 {
-	return sprintf(buffer, "%lu %lu %lu\n",
+	return sprintf(buffer, "%llu %llu %lu\n",
 			task->sched_info.cpu_time,
 			task->sched_info.run_delay,
 			task->sched_info.pcnt);
@@ -929,6 +929,69 @@
 };
 #endif
 
+#ifdef CONFIG_SCHED_DEBUG
+/*
+ * Print out various scheduling related per-task fields:
+ */
+static int sched_show(struct seq_file *m, void *v)
+{
+	struct inode *inode = m->private;
+	struct task_struct *p;
+
+	WARN_ON(!inode);
+
+	p = get_proc_task(inode);
+	if (!p)
+		return -ESRCH;
+	proc_sched_show_task(p, m);
+
+	put_task_struct(p);
+
+	return 0;
+}
+
+static ssize_t
+sched_write(struct file *file, const char __user *buf,
+	    size_t count, loff_t *offset)
+{
+	struct inode *inode = file->f_path.dentry->d_inode;
+	struct task_struct *p;
+
+	WARN_ON(!inode);
+
+	p = get_proc_task(inode);
+	if (!p)
+		return -ESRCH;
+	proc_sched_set_task(p);
+
+	put_task_struct(p);
+
+	return count;
+}
+
+static int sched_open(struct inode *inode, struct file *filp)
+{
+	int ret;
+
+	ret = single_open(filp, sched_show, NULL);
+	if (!ret) {
+		struct seq_file *m = filp->private_data;
+
+		m->private = inode;
+	}
+	return ret;
+}
+
+static const struct file_operations proc_pid_sched_operations = {
+	.open		= sched_open,
+	.read		= seq_read,
+	.write		= sched_write,
+	.llseek		= seq_lseek,
+	.release	= seq_release,
+};
+
+#endif
+
 static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
 {
 	struct inode *inode = dentry->d_inode;
@@ -1963,6 +2026,9 @@
 	INF("environ",    S_IRUSR, pid_environ),
 	INF("auxv",       S_IRUSR, pid_auxv),
 	INF("status",     S_IRUGO, pid_status),
+#ifdef CONFIG_SCHED_DEBUG
+	REG("sched",      S_IRUGO|S_IWUSR, pid_sched),
+#endif
 	INF("cmdline",    S_IRUGO, pid_cmdline),
 	INF("stat",       S_IRUGO, tgid_stat),
 	INF("statm",      S_IRUGO, pid_statm),
@@ -2247,6 +2313,9 @@
 	INF("environ",   S_IRUSR, pid_environ),
 	INF("auxv",      S_IRUSR, pid_auxv),
 	INF("status",    S_IRUGO, pid_status),
+#ifdef CONFIG_SCHED_DEBUG
+	REG("sched",     S_IRUGO|S_IWUSR, pid_sched),
+#endif
 	INF("cmdline",   S_IRUGO, pid_cmdline),
 	INF("stat",      S_IRUGO, tid_stat),
 	INF("statm",     S_IRUGO, pid_statm),
diff --git a/include/asm-generic/bitops/sched.h b/include/asm-generic/bitops/sched.h
index 815bb01..604fab7 100644
--- a/include/asm-generic/bitops/sched.h
+++ b/include/asm-generic/bitops/sched.h
@@ -6,28 +6,23 @@
 
 /*
  * Every architecture must define this function. It's the fastest
- * way of searching a 140-bit bitmap where the first 100 bits are
- * unlikely to be set. It's guaranteed that at least one of the 140
- * bits is cleared.
+ * way of searching a 100-bit bitmap.  It's guaranteed that at least
+ * one of the 100 bits is cleared.
  */
 static inline int sched_find_first_bit(const unsigned long *b)
 {
 #if BITS_PER_LONG == 64
-	if (unlikely(b[0]))
+	if (b[0])
 		return __ffs(b[0]);
-	if (likely(b[1]))
-		return __ffs(b[1]) + 64;
-	return __ffs(b[2]) + 128;
+	return __ffs(b[1]) + 64;
 #elif BITS_PER_LONG == 32
-	if (unlikely(b[0]))
+	if (b[0])
 		return __ffs(b[0]);
-	if (unlikely(b[1]))
+	if (b[1])
 		return __ffs(b[1]) + 32;
-	if (unlikely(b[2]))
+	if (b[2])
 		return __ffs(b[2]) + 64;
-	if (b[3])
-		return __ffs(b[3]) + 96;
-	return __ffs(b[4]) + 128;
+	return __ffs(b[3]) + 96;
 #else
 #error BITS_PER_LONG not defined
 #endif
diff --git a/include/asm-mips/mach-au1x00/au1xxx_ide.h b/include/asm-mips/mach-au1x00/au1xxx_ide.h
index 8fcae21..4663e8b 100644
--- a/include/asm-mips/mach-au1x00/au1xxx_ide.h
+++ b/include/asm-mips/mach-au1x00/au1xxx_ide.h
@@ -88,26 +88,26 @@
 /*
  * Hitachi
  */
-        { "HITACHI_DK14FA-20"    ,       "ALL"           },
-        { "HTS726060M9AT00"      ,       "ALL"           },
+        { "HITACHI_DK14FA-20"    ,       NULL            },
+        { "HTS726060M9AT00"      ,       NULL            },
 /*
  * Maxtor
  */
-        { "Maxtor 6E040L0"      ,       "ALL"           },
-        { "Maxtor 6Y080P0"      ,       "ALL"           },
-        { "Maxtor 6Y160P0"      ,       "ALL"           },
+        { "Maxtor 6E040L0"      ,       NULL            },
+        { "Maxtor 6Y080P0"      ,       NULL            },
+        { "Maxtor 6Y160P0"      ,       NULL            },
 /*
  * Seagate
  */
-        { "ST3120026A"          ,       "ALL"           },
-        { "ST320014A"           ,       "ALL"           },
-        { "ST94011A"            ,       "ALL"           },
-        { "ST340016A"           ,       "ALL"           },
+        { "ST3120026A"          ,       NULL            },
+        { "ST320014A"           ,       NULL            },
+        { "ST94011A"            ,       NULL            },
+        { "ST340016A"           ,       NULL            },
 /*
  * Western Digital
  */
-        { "WDC WD400UE-00HCT0"  ,       "ALL"           },
-        { "WDC WD400JB-00JJC0"  ,       "ALL"           },
+        { "WDC WD400UE-00HCT0"  ,       NULL            },
+        { "WDC WD400JB-00JJC0"  ,       NULL            },
         { NULL                  ,       NULL            }
 };
 
@@ -116,9 +116,9 @@
 /*
  * Western Digital
  */
-        { "WDC WD100EB-00CGH0"  ,       "ALL"           },
-        { "WDC WD200BB-00AUA1"  ,       "ALL"           },
-        { "WDC AC24300L"        ,       "ALL"           },
+        { "WDC WD100EB-00CGH0"  ,       NULL            },
+        { "WDC WD200BB-00AUA1"  ,       NULL            },
+        { "WDC AC24300L"        ,       NULL            },
         { NULL                  ,       NULL            }
 };
 #endif
diff --git a/include/linux/eeprom_93cx6.h b/include/linux/eeprom_93cx6.h
new file mode 100644
index 0000000..d774b77
--- /dev/null
+++ b/include/linux/eeprom_93cx6.h
@@ -0,0 +1,72 @@
+/*
+	Copyright (C) 2004 - 2006 rt2x00 SourceForge Project
+	<http://rt2x00.serialmonkey.com>
+
+	This program is free software; you can redistribute it and/or modify
+	it under the terms of the GNU General Public License as published by
+	the Free Software Foundation; either version 2 of the License, or
+	(at your option) any later version.
+
+	This program is distributed in the hope that it will be useful,
+	but WITHOUT ANY WARRANTY; without even the implied warranty of
+	MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+	GNU General Public License for more details.
+
+	You should have received a copy of the GNU General Public License
+	along with this program; if not, write to the
+	Free Software Foundation, Inc.,
+	59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+/*
+	Module: eeprom_93cx6
+	Abstract: EEPROM reader datastructures for 93cx6 chipsets.
+	Supported chipsets: 93c46 & 93c66.
+ */
+
+/*
+ * EEPROM operation defines.
+ */
+#define PCI_EEPROM_WIDTH_93C46	6
+#define PCI_EEPROM_WIDTH_93C66	8
+#define PCI_EEPROM_WIDTH_OPCODE	3
+#define PCI_EEPROM_WRITE_OPCODE	0x05
+#define PCI_EEPROM_READ_OPCODE	0x06
+#define PCI_EEPROM_EWDS_OPCODE	0x10
+#define PCI_EEPROM_EWEN_OPCODE	0x13
+
+/**
+ * struct eeprom_93cx6 - control structure for setting the commands
+ * for reading the eeprom data.
+ * @data: private pointer for the driver.
+ * @register_read(struct eeprom_93cx6 *eeprom): handler to
+ * read the eeprom register, this function should set all reg_* fields.
+ * @register_write(struct eeprom_93cx6 *eeprom): handler to
+ * write to the eeprom register by using all reg_* fields.
+ * @width: eeprom width, should be one of the PCI_EEPROM_WIDTH_* defines
+ * @reg_data_in: register field to indicate data input
+ * @reg_data_out: register field to indicate data output
+ * @reg_data_clock: register field to set the data clock
+ * @reg_chip_select: register field to set the chip select
+ *
+ * This structure is used for the communication between the driver
+ * and the eeprom_93cx6 handlers for reading the eeprom.
+ */
+struct eeprom_93cx6 {
+	void *data;
+
+	void (*register_read)(struct eeprom_93cx6 *eeprom);
+	void (*register_write)(struct eeprom_93cx6 *eeprom);
+
+	int width;
+
+	char reg_data_in;
+	char reg_data_out;
+	char reg_data_clock;
+	char reg_chip_select;
+};
+
+extern void eeprom_93cx6_read(struct eeprom_93cx6 *eeprom,
+	const u8 word, u16 *data);
+extern void eeprom_93cx6_multiread(struct eeprom_93cx6 *eeprom,
+	const u8 word, __le16 *data, const u16 words);
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
index 7803014..8d30229 100644
--- a/include/linux/hardirq.h
+++ b/include/linux/hardirq.h
@@ -79,6 +79,19 @@
 #endif
 
 #ifdef CONFIG_PREEMPT
+# define PREEMPT_CHECK_OFFSET 1
+#else
+# define PREEMPT_CHECK_OFFSET 0
+#endif
+
+/*
+ * Check whether we were atomic before we did preempt_disable():
+ * (used by the scheduler)
+ */
+#define in_atomic_preempt_off() \
+		((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_CHECK_OFFSET)
+
+#ifdef CONFIG_PREEMPT
 # define preemptible()	(preempt_count() == 0 && !irqs_disabled())
 # define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1)
 #else
diff --git a/include/linux/hid.h b/include/linux/hid.h
index 827ee74..898103b 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -263,19 +263,28 @@
 #define HID_QUIRK_2WHEEL_MOUSE_HACK_5		0x00000100
 #define HID_QUIRK_2WHEEL_MOUSE_HACK_ON		0x00000200
 #define HID_QUIRK_MIGHTYMOUSE			0x00000400
-#define HID_QUIRK_CYMOTION			0x00000800
-#define HID_QUIRK_POWERBOOK_HAS_FN		0x00001000
-#define HID_QUIRK_POWERBOOK_FN_ON		0x00002000
-#define HID_QUIRK_INVERT_HWHEEL			0x00004000
-#define HID_QUIRK_POWERBOOK_ISO_KEYBOARD        0x00008000
-#define HID_QUIRK_BAD_RELATIVE_KEYS		0x00010000
-#define HID_QUIRK_SKIP_OUTPUT_REPORTS		0x00020000
-#define HID_QUIRK_IGNORE_MOUSE			0x00040000
-#define HID_QUIRK_SONY_PS3_CONTROLLER		0x00080000
-#define HID_QUIRK_LOGITECH_DESCRIPTOR		0x00100000
-#define HID_QUIRK_DUPLICATE_USAGES		0x00200000
-#define HID_QUIRK_RESET_LEDS			0x00400000
-#define HID_QUIRK_SWAPPED_MIN_MAX		0x00800000
+#define HID_QUIRK_POWERBOOK_HAS_FN		0x00000800
+#define HID_QUIRK_POWERBOOK_FN_ON		0x00001000
+#define HID_QUIRK_INVERT_HWHEEL			0x00002000
+#define HID_QUIRK_POWERBOOK_ISO_KEYBOARD        0x00004000
+#define HID_QUIRK_BAD_RELATIVE_KEYS		0x00008000
+#define HID_QUIRK_SKIP_OUTPUT_REPORTS		0x00010000
+#define HID_QUIRK_IGNORE_MOUSE			0x00020000
+#define HID_QUIRK_SONY_PS3_CONTROLLER		0x00040000
+#define HID_QUIRK_DUPLICATE_USAGES		0x00080000
+#define HID_QUIRK_RESET_LEDS			0x00100000
+#define HID_QUIRK_HIDINPUT			0x00200000
+#define HID_QUIRK_LOGITECH_IGNORE_DOUBLED_WHEEL	0x00400000
+#define HID_QUIRK_LOGITECH_EXPANDED_KEYMAP	0x00800000
+
+/*
+ * Separate quirks for runtime report descriptor fixup
+ */
+
+#define HID_QUIRK_RDESC_CYMOTION		0x00000001
+#define HID_QUIRK_RDESC_LOGITECH		0x00000002
+#define HID_QUIRK_RDESC_SWAPPED_MIN_MAX		0x00000004
+#define HID_QUIRK_RDESC_PETALYNX		0x00000008
 
 /*
  * This is the global environment of the parser. This information is
@@ -488,6 +497,11 @@
 #define IS_INPUT_APPLICATION(a) (((a >= 0x00010000) && (a <= 0x00010008)) || (a == 0x00010080) || (a == 0x000c0001))
 
 /* HID core API */
+
+#ifdef CONFIG_HID_DEBUG
+extern int hid_debug;
+#endif
+
 extern void hidinput_hid_event(struct hid_device *, struct hid_field *, struct hid_usage *, __s32);
 extern void hidinput_report_event(struct hid_device *hid, struct hid_report *report);
 extern int hidinput_connect(struct hid_device *);
@@ -506,6 +520,7 @@
 int usbhid_modify_dquirk(const u16 idVendor, const u16 idProduct, const u32 quirks);
 int usbhid_quirks_init(char **quirks_param);
 void usbhid_quirks_exit(void);
+void usbhid_fixup_report_descriptor(const u16, const u16, char *, unsigned, char **);
 
 #ifdef CONFIG_HID_FF
 int hid_ff_init(struct hid_device *hid);
@@ -523,14 +538,19 @@
 #else
 static inline int hid_ff_init(struct hid_device *hid) { return -1; }
 #endif
-#ifdef DEBUG
-#define dbg(format, arg...) printk(KERN_DEBUG "%s: " format "\n" , \
-		__FILE__ , ## arg)
+
+#ifdef CONFIG_HID_DEBUG
+#define dbg_hid(format, arg...) if (hid_debug) \
+				printk(KERN_DEBUG "%s: " format ,\
+				__FILE__ , ## arg)
+#define dbg_hid_line(format, arg...) if (hid_debug) \
+				printk(format, ## arg)
 #else
-#define dbg(format, arg...) do {} while (0)
+#define dbg_hid(format, arg...) do {} while (0)
+#define dbg_hid_line dbg_hid
 #endif
 
-#define err(format, arg...) printk(KERN_ERR "%s: " format "\n" , \
+#define err_hid(format, arg...) printk(KERN_ERR "%s: " format "\n" , \
 		__FILE__ , ## arg)
 #endif
 
diff --git a/include/linux/ide.h b/include/linux/ide.h
index 1e365ac..19ab258 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -25,6 +25,7 @@
 #include <asm/system.h>
 #include <asm/io.h>
 #include <asm/semaphore.h>
+#include <asm/mutex.h>
 
 /******************************************************************************
  * IDE driver configuration options (play with these as desired):
@@ -685,6 +686,8 @@
 	u8 mwdma_mask;
 	u8 swdma_mask;
 
+	u8 cbl;		/* cable type */
+
 	hwif_chipset_t chipset;	/* sub-module for tuning.. */
 
 	struct pci_dev  *pci_dev;	/* for pci chipsets */
@@ -735,8 +738,8 @@
 	void (*ide_dma_clear_irq)(ide_drive_t *drive);
 	void (*dma_host_on)(ide_drive_t *drive);
 	void (*dma_host_off)(ide_drive_t *drive);
-	int (*ide_dma_lostirq)(ide_drive_t *drive);
-	int (*ide_dma_timeout)(ide_drive_t *drive);
+	void (*dma_lost_irq)(ide_drive_t *drive);
+	void (*dma_timeout)(ide_drive_t *drive);
 
 	void (*OUTB)(u8 addr, unsigned long port);
 	void (*OUTBSYNC)(ide_drive_t *drive, u8 addr, unsigned long port);
@@ -791,7 +794,6 @@
 	unsigned	sharing_irq: 1;	/* 1 = sharing irq with another hwif */
 	unsigned	reset      : 1;	/* reset after probe */
 	unsigned	autodma    : 1;	/* auto-attempt using DMA at boot */
-	unsigned	udma_four  : 1;	/* 1=ATA-66 capable, 0=default */
 	unsigned	no_lba48   : 1; /* 1 = cannot do LBA48 */
 	unsigned	no_lba48_dma : 1; /* 1 = cannot do LBA48 DMA */
 	unsigned	auto_poll  : 1; /* supports nop auto-poll */
@@ -863,7 +865,7 @@
 
 typedef struct ide_driver_s ide_driver_t;
 
-extern struct semaphore ide_setting_sem;
+extern struct mutex ide_setting_mtx;
 
 int set_io_32bit(ide_drive_t *, int);
 int set_pio_mode(ide_drive_t *, int);
@@ -1304,8 +1306,8 @@
 extern int ide_dma_setup(ide_drive_t *);
 extern void ide_dma_start(ide_drive_t *);
 extern int __ide_dma_end(ide_drive_t *);
-extern int __ide_dma_lostirq(ide_drive_t *);
-extern int __ide_dma_timeout(ide_drive_t *);
+extern void ide_dma_lost_irq(ide_drive_t *);
+extern void ide_dma_timeout(ide_drive_t *);
 #endif /* CONFIG_BLK_DEV_IDEDMA_PCI */
 
 #else
@@ -1382,11 +1384,11 @@
 
 
 extern spinlock_t ide_lock;
-extern struct semaphore ide_cfg_sem;
+extern struct mutex ide_cfg_mtx;
 /*
  * Structure locking:
  *
- * ide_cfg_sem and ide_lock together protect changes to
+ * ide_cfg_mtx and ide_lock together protect changes to
  * ide_hwif_t->{next,hwgroup}
  * ide_drive_t->next
  *
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 693f0e6..cfb6805 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -34,6 +34,8 @@
 #define SCHED_FIFO		1
 #define SCHED_RR		2
 #define SCHED_BATCH		3
+/* SCHED_ISO: reserved but not implemented yet */
+#define SCHED_IDLE		5
 
 #ifdef __KERNEL__
 
@@ -130,6 +132,26 @@
 extern unsigned long nr_iowait(void);
 extern unsigned long weighted_cpuload(const int cpu);
 
+struct seq_file;
+struct cfs_rq;
+#ifdef CONFIG_SCHED_DEBUG
+extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m);
+extern void proc_sched_set_task(struct task_struct *p);
+extern void
+print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq, u64 now);
+#else
+static inline void
+proc_sched_show_task(struct task_struct *p, struct seq_file *m)
+{
+}
+static inline void proc_sched_set_task(struct task_struct *p)
+{
+}
+static inline void
+print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq, u64 now)
+{
+}
+#endif
 
 /*
  * Task state bitmask. NOTE! These bits are also
@@ -193,6 +215,7 @@
 extern void sched_init(void);
 extern void sched_init_smp(void);
 extern void init_idle(struct task_struct *idle, int cpu);
+extern void init_idle_bootup_task(struct task_struct *idle);
 
 extern cpumask_t nohz_cpu_mask;
 #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
@@ -479,7 +502,7 @@
 	 * from jiffies_to_ns(utime + stime) if sched_clock uses something
 	 * other than jiffies.)
 	 */
-	unsigned long long sched_time;
+	unsigned long long sum_sched_runtime;
 
 	/*
 	 * We don't bother to synchronize most readers of this at all,
@@ -521,31 +544,6 @@
 #define SIGNAL_STOP_CONTINUED	0x00000004 /* SIGCONT since WCONTINUED reap */
 #define SIGNAL_GROUP_EXIT	0x00000008 /* group exit in progress */
 
-
-/*
- * Priority of a process goes from 0..MAX_PRIO-1, valid RT
- * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH
- * tasks are in the range MAX_RT_PRIO..MAX_PRIO-1. Priority
- * values are inverted: lower p->prio value means higher priority.
- *
- * The MAX_USER_RT_PRIO value allows the actual maximum
- * RT priority to be separate from the value exported to
- * user-space.  This allows kernel threads to set their
- * priority to a value higher than any user task. Note:
- * MAX_RT_PRIO must not be smaller than MAX_USER_RT_PRIO.
- */
-
-#define MAX_USER_RT_PRIO	100
-#define MAX_RT_PRIO		MAX_USER_RT_PRIO
-
-#define MAX_PRIO		(MAX_RT_PRIO + 40)
-
-#define rt_prio(prio)		unlikely((prio) < MAX_RT_PRIO)
-#define rt_task(p)		rt_prio((p)->prio)
-#define batch_task(p)		(unlikely((p)->policy == SCHED_BATCH))
-#define is_rt_policy(p)		((p) != SCHED_NORMAL && (p) != SCHED_BATCH)
-#define has_rt_policy(p)	unlikely(is_rt_policy((p)->policy))
-
 /*
  * Some day this will be a full-fledged user tracking system..
  */
@@ -583,13 +581,13 @@
 #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
 struct sched_info {
 	/* cumulative counters */
-	unsigned long	cpu_time,	/* time spent on the cpu */
-			run_delay,	/* time spent waiting on a runqueue */
-			pcnt;		/* # of timeslices run on this cpu */
+	unsigned long pcnt;	      /* # of times run on this cpu */
+	unsigned long long cpu_time,  /* time spent on the cpu */
+			   run_delay; /* time spent waiting on a runqueue */
 
 	/* timestamps */
-	unsigned long	last_arrival,	/* when we last ran on a cpu */
-			last_queued;	/* when we were last queued to run */
+	unsigned long long last_arrival,/* when we last ran on a cpu */
+			   last_queued;	/* when we were last queued to run */
 };
 #endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */
 
@@ -639,18 +637,24 @@
 #endif
 }
 
-enum idle_type
-{
-	SCHED_IDLE,
-	NOT_IDLE,
-	NEWLY_IDLE,
-	MAX_IDLE_TYPES
+enum cpu_idle_type {
+	CPU_IDLE,
+	CPU_NOT_IDLE,
+	CPU_NEWLY_IDLE,
+	CPU_MAX_IDLE_TYPES
 };
 
 /*
  * sched-domains (multiprocessor balancing) declarations:
  */
-#define SCHED_LOAD_SCALE	128UL	/* increase resolution of load */
+
+/*
+ * Increase resolution of nice-level calculations:
+ */
+#define SCHED_LOAD_SHIFT	10
+#define SCHED_LOAD_SCALE	(1L << SCHED_LOAD_SHIFT)
+
+#define SCHED_LOAD_SCALE_FUZZ	(SCHED_LOAD_SCALE >> 5)
 
 #ifdef CONFIG_SMP
 #define SD_LOAD_BALANCE		1	/* Do load balancing on this domain. */
@@ -719,14 +723,14 @@
 
 #ifdef CONFIG_SCHEDSTATS
 	/* load_balance() stats */
-	unsigned long lb_cnt[MAX_IDLE_TYPES];
-	unsigned long lb_failed[MAX_IDLE_TYPES];
-	unsigned long lb_balanced[MAX_IDLE_TYPES];
-	unsigned long lb_imbalance[MAX_IDLE_TYPES];
-	unsigned long lb_gained[MAX_IDLE_TYPES];
-	unsigned long lb_hot_gained[MAX_IDLE_TYPES];
-	unsigned long lb_nobusyg[MAX_IDLE_TYPES];
-	unsigned long lb_nobusyq[MAX_IDLE_TYPES];
+	unsigned long lb_cnt[CPU_MAX_IDLE_TYPES];
+	unsigned long lb_failed[CPU_MAX_IDLE_TYPES];
+	unsigned long lb_balanced[CPU_MAX_IDLE_TYPES];
+	unsigned long lb_imbalance[CPU_MAX_IDLE_TYPES];
+	unsigned long lb_gained[CPU_MAX_IDLE_TYPES];
+	unsigned long lb_hot_gained[CPU_MAX_IDLE_TYPES];
+	unsigned long lb_nobusyg[CPU_MAX_IDLE_TYPES];
+	unsigned long lb_nobusyq[CPU_MAX_IDLE_TYPES];
 
 	/* Active load balancing */
 	unsigned long alb_cnt;
@@ -753,12 +757,6 @@
 extern int partition_sched_domains(cpumask_t *partition1,
 				    cpumask_t *partition2);
 
-/*
- * Maximum cache size the migration-costs auto-tuning code will
- * search from:
- */
-extern unsigned int max_cache_size;
-
 #endif	/* CONFIG_SMP */
 
 
@@ -809,14 +807,86 @@
 struct pipe_inode_info;
 struct uts_namespace;
 
-enum sleep_type {
-	SLEEP_NORMAL,
-	SLEEP_NONINTERACTIVE,
-	SLEEP_INTERACTIVE,
-	SLEEP_INTERRUPTED,
+struct rq;
+struct sched_domain;
+
+struct sched_class {
+	struct sched_class *next;
+
+	void (*enqueue_task) (struct rq *rq, struct task_struct *p,
+			      int wakeup, u64 now);
+	void (*dequeue_task) (struct rq *rq, struct task_struct *p,
+			      int sleep, u64 now);
+	void (*yield_task) (struct rq *rq, struct task_struct *p);
+
+	void (*check_preempt_curr) (struct rq *rq, struct task_struct *p);
+
+	struct task_struct * (*pick_next_task) (struct rq *rq, u64 now);
+	void (*put_prev_task) (struct rq *rq, struct task_struct *p, u64 now);
+
+	int (*load_balance) (struct rq *this_rq, int this_cpu,
+			struct rq *busiest,
+			unsigned long max_nr_move, unsigned long max_load_move,
+			struct sched_domain *sd, enum cpu_idle_type idle,
+			int *all_pinned, unsigned long *total_load_moved);
+
+	void (*set_curr_task) (struct rq *rq);
+	void (*task_tick) (struct rq *rq, struct task_struct *p);
+	void (*task_new) (struct rq *rq, struct task_struct *p);
 };
 
-struct prio_array;
+struct load_weight {
+	unsigned long weight, inv_weight;
+};
+
+/*
+ * CFS stats for a schedulable entity (task, task-group etc)
+ *
+ * Current field usage histogram:
+ *
+ *     4 se->block_start
+ *     4 se->run_node
+ *     4 se->sleep_start
+ *     4 se->sleep_start_fair
+ *     6 se->load.weight
+ *     7 se->delta_fair
+ *    15 se->wait_runtime
+ */
+struct sched_entity {
+	long			wait_runtime;
+	unsigned long		delta_fair_run;
+	unsigned long		delta_fair_sleep;
+	unsigned long		delta_exec;
+	s64			fair_key;
+	struct load_weight	load;		/* for load-balancing */
+	struct rb_node		run_node;
+	unsigned int		on_rq;
+
+	u64			wait_start_fair;
+	u64			wait_start;
+	u64			exec_start;
+	u64			sleep_start;
+	u64			sleep_start_fair;
+	u64			block_start;
+	u64			sleep_max;
+	u64			block_max;
+	u64			exec_max;
+	u64			wait_max;
+	u64			last_ran;
+
+	u64			sum_exec_runtime;
+	s64			sum_wait_runtime;
+	s64			sum_sleep_runtime;
+	unsigned long		wait_runtime_overruns;
+	unsigned long		wait_runtime_underruns;
+#ifdef CONFIG_FAIR_GROUP_SCHED
+	struct sched_entity	*parent;
+	/* rq on which this entity is (to be) queued: */
+	struct cfs_rq		*cfs_rq;
+	/* rq "owned" by this entity/group: */
+	struct cfs_rq		*my_q;
+#endif
+};
 
 struct task_struct {
 	volatile long state;	/* -1 unrunnable, 0 runnable, >0 stopped */
@@ -832,23 +902,20 @@
 	int oncpu;
 #endif
 #endif
-	int load_weight;	/* for niceness load balancing purposes */
+
 	int prio, static_prio, normal_prio;
 	struct list_head run_list;
-	struct prio_array *array;
+	struct sched_class *sched_class;
+	struct sched_entity se;
 
 	unsigned short ioprio;
 #ifdef CONFIG_BLK_DEV_IO_TRACE
 	unsigned int btrace_seq;
 #endif
-	unsigned long sleep_avg;
-	unsigned long long timestamp, last_ran;
-	unsigned long long sched_time; /* sched_clock time spent running */
-	enum sleep_type sleep_type;
 
 	unsigned int policy;
 	cpumask_t cpus_allowed;
-	unsigned int time_slice, first_time_slice;
+	unsigned int time_slice;
 
 #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
 	struct sched_info sched_info;
@@ -1078,6 +1145,37 @@
 #endif
 };
 
+/*
+ * Priority of a process goes from 0..MAX_PRIO-1, valid RT
+ * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH
+ * tasks are in the range MAX_RT_PRIO..MAX_PRIO-1. Priority
+ * values are inverted: lower p->prio value means higher priority.
+ *
+ * The MAX_USER_RT_PRIO value allows the actual maximum
+ * RT priority to be separate from the value exported to
+ * user-space.  This allows kernel threads to set their
+ * priority to a value higher than any user task. Note:
+ * MAX_RT_PRIO must not be smaller than MAX_USER_RT_PRIO.
+ */
+
+#define MAX_USER_RT_PRIO	100
+#define MAX_RT_PRIO		MAX_USER_RT_PRIO
+
+#define MAX_PRIO		(MAX_RT_PRIO + 40)
+#define DEFAULT_PRIO		(MAX_RT_PRIO + 20)
+
+static inline int rt_prio(int prio)
+{
+	if (unlikely(prio < MAX_RT_PRIO))
+		return 1;
+	return 0;
+}
+
+static inline int rt_task(struct task_struct *p)
+{
+	return rt_prio(p->prio);
+}
+
 static inline pid_t process_group(struct task_struct *tsk)
 {
 	return tsk->signal->pgrp;
@@ -1223,7 +1321,7 @@
 
 extern unsigned long long sched_clock(void);
 extern unsigned long long
-current_sched_time(const struct task_struct *current_task);
+task_sched_runtime(struct task_struct *task);
 
 /* sched_exec is called by processes performing an exec */
 #ifdef CONFIG_SMP
@@ -1232,6 +1330,8 @@
 #define sched_exec()   {}
 #endif
 
+extern void sched_clock_unstable_event(void);
+
 #ifdef CONFIG_HOTPLUG_CPU
 extern void idle_task_exit(void);
 #else
@@ -1240,6 +1340,14 @@
 
 extern void sched_idle_next(void);
 
+extern unsigned int sysctl_sched_granularity;
+extern unsigned int sysctl_sched_wakeup_granularity;
+extern unsigned int sysctl_sched_batch_wakeup_granularity;
+extern unsigned int sysctl_sched_stat_granularity;
+extern unsigned int sysctl_sched_runtime_limit;
+extern unsigned int sysctl_sched_child_runs_first;
+extern unsigned int sysctl_sched_features;
+
 #ifdef CONFIG_RT_MUTEXES
 extern int rt_mutex_getprio(struct task_struct *p);
 extern void rt_mutex_setprio(struct task_struct *p, int prio);
@@ -1317,8 +1425,8 @@
 #else
  static inline void kick_process(struct task_struct *tsk) { }
 #endif
-extern void FASTCALL(sched_fork(struct task_struct * p, int clone_flags));
-extern void FASTCALL(sched_exit(struct task_struct * p));
+extern void sched_fork(struct task_struct *p, int clone_flags);
+extern void sched_dead(struct task_struct *p);
 
 extern int in_group_p(gid_t);
 extern int in_egroup_p(gid_t);
@@ -1406,7 +1514,7 @@
 extern void FASTCALL(__mmdrop(struct mm_struct *));
 static inline void mmdrop(struct mm_struct * mm)
 {
-	if (atomic_dec_and_test(&mm->mm_count))
+	if (unlikely(atomic_dec_and_test(&mm->mm_count)))
 		__mmdrop(mm);
 }
 
@@ -1638,10 +1746,7 @@
 	return task_thread_info(p)->cpu;
 }
 
-static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
-{
-	task_thread_info(p)->cpu = cpu;
-}
+extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
 
 #else
 
diff --git a/include/linux/topology.h b/include/linux/topology.h
index a9d1f04..da6c39b 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -98,7 +98,7 @@
 	.cache_nice_tries	= 0,			\
 	.busy_idx		= 0,			\
 	.idle_idx		= 0,			\
-	.newidle_idx		= 1,			\
+	.newidle_idx		= 0,			\
 	.wake_idx		= 0,			\
 	.forkexec_idx		= 0,			\
 	.flags			= SD_LOAD_BALANCE	\
@@ -128,14 +128,15 @@
 	.imbalance_pct		= 125,			\
 	.cache_nice_tries	= 1,			\
 	.busy_idx		= 2,			\
-	.idle_idx		= 1,			\
-	.newidle_idx		= 2,			\
+	.idle_idx		= 0,			\
+	.newidle_idx		= 0,			\
 	.wake_idx		= 1,			\
 	.forkexec_idx		= 1,			\
 	.flags			= SD_LOAD_BALANCE	\
 				| SD_BALANCE_NEWIDLE	\
 				| SD_BALANCE_EXEC	\
 				| SD_WAKE_AFFINE	\
+				| SD_WAKE_IDLE		\
 				| SD_SHARE_PKG_RESOURCES\
 				| BALANCE_FOR_MC_POWER,	\
 	.last_balance		= jiffies,		\
@@ -158,14 +159,15 @@
 	.imbalance_pct		= 125,			\
 	.cache_nice_tries	= 1,			\
 	.busy_idx		= 2,			\
-	.idle_idx		= 1,			\
-	.newidle_idx		= 2,			\
+	.idle_idx		= 0,			\
+	.newidle_idx		= 0,			\
 	.wake_idx		= 1,			\
 	.forkexec_idx		= 1,			\
 	.flags			= SD_LOAD_BALANCE	\
 				| SD_BALANCE_NEWIDLE	\
 				| SD_BALANCE_EXEC	\
 				| SD_WAKE_AFFINE	\
+				| SD_WAKE_IDLE		\
 				| BALANCE_FOR_PKG_POWER,\
 	.last_balance		= jiffies,		\
 	.balance_interval	= 1,			\
diff --git a/include/linux/wait.h b/include/linux/wait.h
index e820d00..0e68628 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -366,15 +366,15 @@
 
 /*
  * These are the old interfaces to sleep waiting for an event.
- * They are racy.  DO NOT use them, use the wait_event* interfaces above.  
- * We plan to remove these interfaces during 2.7.
+ * They are racy.  DO NOT use them, use the wait_event* interfaces above.
+ * We plan to remove these interfaces.
  */
-extern void FASTCALL(sleep_on(wait_queue_head_t *q));
-extern long FASTCALL(sleep_on_timeout(wait_queue_head_t *q,
-				      signed long timeout));
-extern void FASTCALL(interruptible_sleep_on(wait_queue_head_t *q));
-extern long FASTCALL(interruptible_sleep_on_timeout(wait_queue_head_t *q,
-						    signed long timeout));
+extern void sleep_on(wait_queue_head_t *q);
+extern long sleep_on_timeout(wait_queue_head_t *q,
+				      signed long timeout);
+extern void interruptible_sleep_on(wait_queue_head_t *q);
+extern long interruptible_sleep_on_timeout(wait_queue_head_t *q,
+					   signed long timeout);
 
 /*
  * Waitqueues which are removed from the waitqueue_head at wakeup time
diff --git a/include/pcmcia/ciscode.h b/include/pcmcia/ciscode.h
index eae7e2e..ad6e278 100644
--- a/include/pcmcia/ciscode.h
+++ b/include/pcmcia/ciscode.h
@@ -126,4 +126,6 @@
 #define MANFID_POSSIO			0x030c
 #define PRODID_POSSIO_GCC		0x0003
 
+#define MANFID_NEC			0x0010
+
 #endif /* _LINUX_CISCODE_H */
diff --git a/init/main.c b/init/main.c
index eb8bdba..0eb1c74 100644
--- a/init/main.c
+++ b/init/main.c
@@ -436,15 +436,16 @@
 
 	/*
 	 * The boot idle thread must execute schedule()
-	 * at least one to get things moving:
+	 * at least once to get things moving:
 	 */
+	init_idle_bootup_task(current);
 	preempt_enable_no_resched();
 	schedule();
 	preempt_disable();
 
 	/* Call into cpu_idle with preempt disabled */
 	cpu_idle();
-} 
+}
 
 /* Check for early params. */
 static int __init do_early_param(char *param, char *val)
diff --git a/kernel/delayacct.c b/kernel/delayacct.c
index c0148ae..81e6978 100644
--- a/kernel/delayacct.c
+++ b/kernel/delayacct.c
@@ -99,9 +99,10 @@
 int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk)
 {
 	s64 tmp;
-	struct timespec ts;
-	unsigned long t1,t2,t3;
+	unsigned long t1;
+	unsigned long long t2, t3;
 	unsigned long flags;
+	struct timespec ts;
 
 	/* Though tsk->delays accessed later, early exit avoids
 	 * unnecessary returning of other data
@@ -124,11 +125,10 @@
 
 	d->cpu_count += t1;
 
-	jiffies_to_timespec(t2, &ts);
-	tmp = (s64)d->cpu_delay_total + timespec_to_ns(&ts);
+	tmp = (s64)d->cpu_delay_total + t2;
 	d->cpu_delay_total = (tmp < (s64)d->cpu_delay_total) ? 0 : tmp;
 
-	tmp = (s64)d->cpu_run_virtual_total + (s64)jiffies_to_usecs(t3) * 1000;
+	tmp = (s64)d->cpu_run_virtual_total + t3;
 	d->cpu_run_virtual_total =
 		(tmp < (s64)d->cpu_run_virtual_total) ?	0 : tmp;
 
diff --git a/kernel/exit.c b/kernel/exit.c
index 5c8ecba..ca6a11b 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -122,9 +122,9 @@
 		sig->maj_flt += tsk->maj_flt;
 		sig->nvcsw += tsk->nvcsw;
 		sig->nivcsw += tsk->nivcsw;
-		sig->sched_time += tsk->sched_time;
 		sig->inblock += task_io_get_inblock(tsk);
 		sig->oublock += task_io_get_oublock(tsk);
+		sig->sum_sched_runtime += tsk->se.sum_exec_runtime;
 		sig = NULL; /* Marker for below. */
 	}
 
@@ -182,7 +182,6 @@
 		zap_leader = (leader->exit_signal == -1);
 	}
 
-	sched_exit(p);
 	write_unlock_irq(&tasklist_lock);
 	proc_flush_task(p);
 	release_thread(p);
@@ -291,7 +290,7 @@
 	/* Set the exit signal to SIGCHLD so we signal init on exit */
 	current->exit_signal = SIGCHLD;
 
-	if (!has_rt_policy(current) && (task_nice(current) < 0))
+	if (task_nice(current) < 0)
 		set_user_nice(current, 0);
 	/* cpus_allowed? */
 	/* rt_priority? */
diff --git a/kernel/fork.c b/kernel/fork.c
index 73ad5cd..da3a155 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -877,7 +877,7 @@
 	sig->nvcsw = sig->nivcsw = sig->cnvcsw = sig->cnivcsw = 0;
 	sig->min_flt = sig->maj_flt = sig->cmin_flt = sig->cmaj_flt = 0;
 	sig->inblock = sig->oublock = sig->cinblock = sig->coublock = 0;
-	sig->sched_time = 0;
+	sig->sum_sched_runtime = 0;
 	INIT_LIST_HEAD(&sig->cpu_timers[0]);
 	INIT_LIST_HEAD(&sig->cpu_timers[1]);
 	INIT_LIST_HEAD(&sig->cpu_timers[2]);
@@ -1040,7 +1040,7 @@
 
 	p->utime = cputime_zero;
 	p->stime = cputime_zero;
- 	p->sched_time = 0;
+
 #ifdef CONFIG_TASK_XACCT
 	p->rchar = 0;		/* I/O counter: bytes read */
 	p->wchar = 0;		/* I/O counter: bytes written */
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index 1de710e1..b53c8fc 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -161,7 +161,7 @@
 }
 static inline unsigned long long sched_ns(struct task_struct *p)
 {
-	return (p == current) ? current_sched_time(p) : p->sched_time;
+	return task_sched_runtime(p);
 }
 
 int posix_cpu_clock_getres(const clockid_t which_clock, struct timespec *tp)
@@ -246,10 +246,10 @@
 		} while (t != p);
 		break;
 	case CPUCLOCK_SCHED:
-		cpu->sched = p->signal->sched_time;
+		cpu->sched = p->signal->sum_sched_runtime;
 		/* Add in each other live thread.  */
 		while ((t = next_thread(t)) != p) {
-			cpu->sched += t->sched_time;
+			cpu->sched += t->se.sum_exec_runtime;
 		}
 		cpu->sched += sched_ns(p);
 		break;
@@ -422,7 +422,7 @@
  */
 static void cleanup_timers(struct list_head *head,
 			   cputime_t utime, cputime_t stime,
-			   unsigned long long sched_time)
+			   unsigned long long sum_exec_runtime)
 {
 	struct cpu_timer_list *timer, *next;
 	cputime_t ptime = cputime_add(utime, stime);
@@ -451,10 +451,10 @@
 	++head;
 	list_for_each_entry_safe(timer, next, head, entry) {
 		list_del_init(&timer->entry);
-		if (timer->expires.sched < sched_time) {
+		if (timer->expires.sched < sum_exec_runtime) {
 			timer->expires.sched = 0;
 		} else {
-			timer->expires.sched -= sched_time;
+			timer->expires.sched -= sum_exec_runtime;
 		}
 	}
 }
@@ -467,7 +467,7 @@
 void posix_cpu_timers_exit(struct task_struct *tsk)
 {
 	cleanup_timers(tsk->cpu_timers,
-		       tsk->utime, tsk->stime, tsk->sched_time);
+		       tsk->utime, tsk->stime, tsk->se.sum_exec_runtime);
 
 }
 void posix_cpu_timers_exit_group(struct task_struct *tsk)
@@ -475,7 +475,7 @@
 	cleanup_timers(tsk->signal->cpu_timers,
 		       cputime_add(tsk->utime, tsk->signal->utime),
 		       cputime_add(tsk->stime, tsk->signal->stime),
-		       tsk->sched_time + tsk->signal->sched_time);
+		     tsk->se.sum_exec_runtime + tsk->signal->sum_sched_runtime);
 }
 
 
@@ -536,7 +536,7 @@
 		nsleft = max_t(unsigned long long, nsleft, 1);
 		do {
 			if (likely(!(t->flags & PF_EXITING))) {
-				ns = t->sched_time + nsleft;
+				ns = t->se.sum_exec_runtime + nsleft;
 				if (t->it_sched_expires == 0 ||
 				    t->it_sched_expires > ns) {
 					t->it_sched_expires = ns;
@@ -1004,7 +1004,7 @@
 		struct cpu_timer_list *t = list_first_entry(timers,
 						      struct cpu_timer_list,
 						      entry);
-		if (!--maxfire || tsk->sched_time < t->expires.sched) {
+		if (!--maxfire || tsk->se.sum_exec_runtime < t->expires.sched) {
 			tsk->it_sched_expires = t->expires.sched;
 			break;
 		}
@@ -1024,7 +1024,7 @@
 	int maxfire;
 	struct signal_struct *const sig = tsk->signal;
 	cputime_t utime, stime, ptime, virt_expires, prof_expires;
-	unsigned long long sched_time, sched_expires;
+	unsigned long long sum_sched_runtime, sched_expires;
 	struct task_struct *t;
 	struct list_head *timers = sig->cpu_timers;
 
@@ -1044,12 +1044,12 @@
 	 */
 	utime = sig->utime;
 	stime = sig->stime;
-	sched_time = sig->sched_time;
+	sum_sched_runtime = sig->sum_sched_runtime;
 	t = tsk;
 	do {
 		utime = cputime_add(utime, t->utime);
 		stime = cputime_add(stime, t->stime);
-		sched_time += t->sched_time;
+		sum_sched_runtime += t->se.sum_exec_runtime;
 		t = next_thread(t);
 	} while (t != tsk);
 	ptime = cputime_add(utime, stime);
@@ -1090,7 +1090,7 @@
 		struct cpu_timer_list *t = list_first_entry(timers,
 						      struct cpu_timer_list,
 						      entry);
-		if (!--maxfire || sched_time < t->expires.sched) {
+		if (!--maxfire || sum_sched_runtime < t->expires.sched) {
 			sched_expires = t->expires.sched;
 			break;
 		}
@@ -1182,7 +1182,7 @@
 		virt_left = cputime_sub(virt_expires, utime);
 		virt_left = cputime_div_non_zero(virt_left, nthreads);
 		if (sched_expires) {
-			sched_left = sched_expires - sched_time;
+			sched_left = sched_expires - sum_sched_runtime;
 			do_div(sched_left, nthreads);
 			sched_left = max_t(unsigned long long, sched_left, 1);
 		} else {
@@ -1208,7 +1208,7 @@
 				t->it_virt_expires = ticks;
 			}
 
-			sched = t->sched_time + sched_left;
+			sched = t->se.sum_exec_runtime + sched_left;
 			if (sched_expires && (t->it_sched_expires == 0 ||
 					      t->it_sched_expires > sched)) {
 				t->it_sched_expires = sched;
@@ -1300,7 +1300,7 @@
 
 	if (UNEXPIRED(prof) && UNEXPIRED(virt) &&
 	    (tsk->it_sched_expires == 0 ||
-	     tsk->sched_time < tsk->it_sched_expires))
+	     tsk->se.sum_exec_runtime < tsk->it_sched_expires))
 		return;
 
 #undef	UNEXPIRED
diff --git a/kernel/sched.c b/kernel/sched.c
index 50e1a31..9fbced6 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -16,13 +16,19 @@
  *		by Davide Libenzi, preemptible kernel bits by Robert Love.
  *  2003-09-03	Interactivity tuning by Con Kolivas.
  *  2004-04-02	Scheduler domains code by Nick Piggin
+ *  2007-04-15  Work begun on replacing all interactivity tuning with a
+ *              fair scheduling design by Con Kolivas.
+ *  2007-05-05  Load balancing (smp-nice) and other improvements
+ *              by Peter Williams
+ *  2007-05-06  Interactivity improvements to CFS by Mike Galbraith
+ *  2007-07-01  Group scheduling enhancements by Srivatsa Vaddagiri
  */
 
 #include <linux/mm.h>
 #include <linux/module.h>
 #include <linux/nmi.h>
 #include <linux/init.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
 #include <linux/highmem.h>
 #include <linux/smp_lock.h>
 #include <asm/mmu_context.h>
@@ -53,9 +59,9 @@
 #include <linux/kprobes.h>
 #include <linux/delayacct.h>
 #include <linux/reciprocal_div.h>
+#include <linux/unistd.h>
 
 #include <asm/tlb.h>
-#include <asm/unistd.h>
 
 /*
  * Scheduler clock - returns current time in nanosec units.
@@ -91,6 +97,9 @@
 #define NS_TO_JIFFIES(TIME)	((TIME) / (1000000000 / HZ))
 #define JIFFIES_TO_NS(TIME)	((TIME) * (1000000000 / HZ))
 
+#define NICE_0_LOAD		SCHED_LOAD_SCALE
+#define NICE_0_SHIFT		SCHED_LOAD_SHIFT
+
 /*
  * These are the 'tuning knobs' of the scheduler:
  *
@@ -100,87 +109,6 @@
  */
 #define MIN_TIMESLICE		max(5 * HZ / 1000, 1)
 #define DEF_TIMESLICE		(100 * HZ / 1000)
-#define ON_RUNQUEUE_WEIGHT	 30
-#define CHILD_PENALTY		 95
-#define PARENT_PENALTY		100
-#define EXIT_WEIGHT		  3
-#define PRIO_BONUS_RATIO	 25
-#define MAX_BONUS		(MAX_USER_PRIO * PRIO_BONUS_RATIO / 100)
-#define INTERACTIVE_DELTA	  2
-#define MAX_SLEEP_AVG		(DEF_TIMESLICE * MAX_BONUS)
-#define STARVATION_LIMIT	(MAX_SLEEP_AVG)
-#define NS_MAX_SLEEP_AVG	(JIFFIES_TO_NS(MAX_SLEEP_AVG))
-
-/*
- * If a task is 'interactive' then we reinsert it in the active
- * array after it has expired its current timeslice. (it will not
- * continue to run immediately, it will still roundrobin with
- * other interactive tasks.)
- *
- * This part scales the interactivity limit depending on niceness.
- *
- * We scale it linearly, offset by the INTERACTIVE_DELTA delta.
- * Here are a few examples of different nice levels:
- *
- *  TASK_INTERACTIVE(-20): [1,1,1,1,1,1,1,1,1,0,0]
- *  TASK_INTERACTIVE(-10): [1,1,1,1,1,1,1,0,0,0,0]
- *  TASK_INTERACTIVE(  0): [1,1,1,1,0,0,0,0,0,0,0]
- *  TASK_INTERACTIVE( 10): [1,1,0,0,0,0,0,0,0,0,0]
- *  TASK_INTERACTIVE( 19): [0,0,0,0,0,0,0,0,0,0,0]
- *
- * (the X axis represents the possible -5 ... 0 ... +5 dynamic
- *  priority range a task can explore, a value of '1' means the
- *  task is rated interactive.)
- *
- * Ie. nice +19 tasks can never get 'interactive' enough to be
- * reinserted into the active array. And only heavily CPU-hog nice -20
- * tasks will be expired. Default nice 0 tasks are somewhere between,
- * it takes some effort for them to get interactive, but it's not
- * too hard.
- */
-
-#define CURRENT_BONUS(p) \
-	(NS_TO_JIFFIES((p)->sleep_avg) * MAX_BONUS / \
-		MAX_SLEEP_AVG)
-
-#define GRANULARITY	(10 * HZ / 1000 ? : 1)
-
-#ifdef CONFIG_SMP
-#define TIMESLICE_GRANULARITY(p)	(GRANULARITY * \
-		(1 << (((MAX_BONUS - CURRENT_BONUS(p)) ? : 1) - 1)) * \
-			num_online_cpus())
-#else
-#define TIMESLICE_GRANULARITY(p)	(GRANULARITY * \
-		(1 << (((MAX_BONUS - CURRENT_BONUS(p)) ? : 1) - 1)))
-#endif
-
-#define SCALE(v1,v1_max,v2_max) \
-	(v1) * (v2_max) / (v1_max)
-
-#define DELTA(p) \
-	(SCALE(TASK_NICE(p) + 20, 40, MAX_BONUS) - 20 * MAX_BONUS / 40 + \
-		INTERACTIVE_DELTA)
-
-#define TASK_INTERACTIVE(p) \
-	((p)->prio <= (p)->static_prio - DELTA(p))
-
-#define INTERACTIVE_SLEEP(p) \
-	(JIFFIES_TO_NS(MAX_SLEEP_AVG * \
-		(MAX_BONUS / 2 + DELTA((p)) + 1) / MAX_BONUS - 1))
-
-#define TASK_PREEMPTS_CURR(p, rq) \
-	((p)->prio < (rq)->curr->prio)
-
-#define SCALE_PRIO(x, prio) \
-	max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO / 2), MIN_TIMESLICE)
-
-static unsigned int static_prio_timeslice(int static_prio)
-{
-	if (static_prio < NICE_TO_PRIO(0))
-		return SCALE_PRIO(DEF_TIMESLICE * 4, static_prio);
-	else
-		return SCALE_PRIO(DEF_TIMESLICE, static_prio);
-}
 
 #ifdef CONFIG_SMP
 /*
@@ -203,28 +131,87 @@
 }
 #endif
 
-/*
- * task_timeslice() scales user-nice values [ -20 ... 0 ... 19 ]
- * to time slice values: [800ms ... 100ms ... 5ms]
- *
- * The higher a thread's priority, the bigger timeslices
- * it gets during one round of execution. But even the lowest
- * priority thread gets MIN_TIMESLICE worth of execution time.
- */
+#define SCALE_PRIO(x, prio) \
+	max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO / 2), MIN_TIMESLICE)
 
-static inline unsigned int task_timeslice(struct task_struct *p)
+/*
+ * static_prio_timeslice() scales user-nice values [ -20 ... 0 ... 19 ]
+ * to time slice values: [800ms ... 100ms ... 5ms]
+ */
+static unsigned int static_prio_timeslice(int static_prio)
 {
-	return static_prio_timeslice(p->static_prio);
+	if (static_prio == NICE_TO_PRIO(19))
+		return 1;
+
+	if (static_prio < NICE_TO_PRIO(0))
+		return SCALE_PRIO(DEF_TIMESLICE * 4, static_prio);
+	else
+		return SCALE_PRIO(DEF_TIMESLICE, static_prio);
+}
+
+static inline int rt_policy(int policy)
+{
+	if (unlikely(policy == SCHED_FIFO) || unlikely(policy == SCHED_RR))
+		return 1;
+	return 0;
+}
+
+static inline int task_has_rt_policy(struct task_struct *p)
+{
+	return rt_policy(p->policy);
 }
 
 /*
- * These are the runqueue data structures:
+ * This is the priority-queue data structure of the RT scheduling class:
  */
+struct rt_prio_array {
+	DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */
+	struct list_head queue[MAX_RT_PRIO];
+};
 
-struct prio_array {
-	unsigned int nr_active;
-	DECLARE_BITMAP(bitmap, MAX_PRIO+1); /* include 1 bit for delimiter */
-	struct list_head queue[MAX_PRIO];
+struct load_stat {
+	struct load_weight load;
+	u64 load_update_start, load_update_last;
+	unsigned long delta_fair, delta_exec, delta_stat;
+};
+
+/* CFS-related fields in a runqueue */
+struct cfs_rq {
+	struct load_weight load;
+	unsigned long nr_running;
+
+	s64 fair_clock;
+	u64 exec_clock;
+	s64 wait_runtime;
+	u64 sleeper_bonus;
+	unsigned long wait_runtime_overruns, wait_runtime_underruns;
+
+	struct rb_root tasks_timeline;
+	struct rb_node *rb_leftmost;
+	struct rb_node *rb_load_balance_curr;
+#ifdef CONFIG_FAIR_GROUP_SCHED
+	/* 'curr' points to currently running entity on this cfs_rq.
+	 * It is set to NULL otherwise (i.e when none are currently running).
+	 */
+	struct sched_entity *curr;
+	struct rq *rq;	/* cpu runqueue to which this cfs_rq is attached */
+
+	/* leaf cfs_rqs are those that hold tasks (lowest schedulable entity in
+	 * a hierarchy). Non-leaf lrqs hold other higher schedulable entities
+	 * (like users, containers etc.)
+	 *
+	 * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a cpu. This
+	 * list is used during load balance.
+	 */
+	struct list_head leaf_cfs_rq_list; /* Better name : task_cfs_rq_list? */
+#endif
+};
+
+/* Real-Time classes' related field in a runqueue: */
+struct rt_rq {
+	struct rt_prio_array active;
+	int rt_load_balance_idx;
+	struct list_head *rt_load_balance_head, *rt_load_balance_curr;
 };
 
 /*
@@ -235,22 +222,28 @@
  * acquire operations must be ordered by ascending &runqueue.
  */
 struct rq {
-	spinlock_t lock;
+	spinlock_t lock;	/* runqueue lock */
 
 	/*
 	 * nr_running and cpu_load should be in the same cacheline because
 	 * remote CPUs use both these fields when doing load calculation.
 	 */
 	unsigned long nr_running;
-	unsigned long raw_weighted_load;
-#ifdef CONFIG_SMP
-	unsigned long cpu_load[3];
+	#define CPU_LOAD_IDX_MAX 5
+	unsigned long cpu_load[CPU_LOAD_IDX_MAX];
 	unsigned char idle_at_tick;
 #ifdef CONFIG_NO_HZ
 	unsigned char in_nohz_recently;
 #endif
+	struct load_stat ls;	/* capture load from *all* tasks on this cpu */
+	unsigned long nr_load_updates;
+	u64 nr_switches;
+
+	struct cfs_rq cfs;
+#ifdef CONFIG_FAIR_GROUP_SCHED
+	struct list_head leaf_cfs_rq_list; /* list of leaf cfs_rq on this cpu */
 #endif
-	unsigned long long nr_switches;
+	struct rt_rq  rt;
 
 	/*
 	 * This is part of a global counter where only the total sum
@@ -260,14 +253,18 @@
 	 */
 	unsigned long nr_uninterruptible;
 
-	unsigned long expired_timestamp;
-	/* Cached timestamp set by update_cpu_clock() */
-	unsigned long long most_recent_timestamp;
 	struct task_struct *curr, *idle;
 	unsigned long next_balance;
 	struct mm_struct *prev_mm;
-	struct prio_array *active, *expired, arrays[2];
-	int best_expired_prio;
+
+	u64 clock, prev_clock_raw;
+	s64 clock_max_delta;
+
+	unsigned int clock_warps, clock_overflows;
+	unsigned int clock_unstable_events;
+
+	struct sched_class *load_balance_class;
+
 	atomic_t nr_iowait;
 
 #ifdef CONFIG_SMP
@@ -307,6 +304,11 @@
 static DEFINE_PER_CPU(struct rq, runqueues) ____cacheline_aligned_in_smp;
 static DEFINE_MUTEX(sched_hotcpu_mutex);
 
+static inline void check_preempt_curr(struct rq *rq, struct task_struct *p)
+{
+	rq->curr->sched_class->check_preempt_curr(rq, p);
+}
+
 static inline int cpu_of(struct rq *rq)
 {
 #ifdef CONFIG_SMP
@@ -317,6 +319,52 @@
 }
 
 /*
+ * Per-runqueue clock, as finegrained as the platform can give us:
+ */
+static unsigned long long __rq_clock(struct rq *rq)
+{
+	u64 prev_raw = rq->prev_clock_raw;
+	u64 now = sched_clock();
+	s64 delta = now - prev_raw;
+	u64 clock = rq->clock;
+
+	/*
+	 * Protect against sched_clock() occasionally going backwards:
+	 */
+	if (unlikely(delta < 0)) {
+		clock++;
+		rq->clock_warps++;
+	} else {
+		/*
+		 * Catch too large forward jumps too:
+		 */
+		if (unlikely(delta > 2*TICK_NSEC)) {
+			clock++;
+			rq->clock_overflows++;
+		} else {
+			if (unlikely(delta > rq->clock_max_delta))
+				rq->clock_max_delta = delta;
+			clock += delta;
+		}
+	}
+
+	rq->prev_clock_raw = now;
+	rq->clock = clock;
+
+	return clock;
+}
+
+static inline unsigned long long rq_clock(struct rq *rq)
+{
+	int this_cpu = smp_processor_id();
+
+	if (this_cpu == cpu_of(rq))
+		return __rq_clock(rq);
+
+	return rq->clock;
+}
+
+/*
  * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
  * See detach_destroy_domains: synchronize_sched for details.
  *
@@ -331,6 +379,18 @@
 #define task_rq(p)		cpu_rq(task_cpu(p))
 #define cpu_curr(cpu)		(cpu_rq(cpu)->curr)
 
+#ifdef CONFIG_FAIR_GROUP_SCHED
+/* Change a task's ->cfs_rq if it moves across CPUs */
+static inline void set_task_cfs_rq(struct task_struct *p)
+{
+	p->se.cfs_rq = &task_rq(p)->cfs;
+}
+#else
+static inline void set_task_cfs_rq(struct task_struct *p)
+{
+}
+#endif
+
 #ifndef prepare_arch_switch
 # define prepare_arch_switch(next)	do { } while (0)
 #endif
@@ -460,134 +520,6 @@
 	spin_unlock_irqrestore(&rq->lock, *flags);
 }
 
-#ifdef CONFIG_SCHEDSTATS
-/*
- * bump this up when changing the output format or the meaning of an existing
- * format, so that tools can adapt (or abort)
- */
-#define SCHEDSTAT_VERSION 14
-
-static int show_schedstat(struct seq_file *seq, void *v)
-{
-	int cpu;
-
-	seq_printf(seq, "version %d\n", SCHEDSTAT_VERSION);
-	seq_printf(seq, "timestamp %lu\n", jiffies);
-	for_each_online_cpu(cpu) {
-		struct rq *rq = cpu_rq(cpu);
-#ifdef CONFIG_SMP
-		struct sched_domain *sd;
-		int dcnt = 0;
-#endif
-
-		/* runqueue-specific stats */
-		seq_printf(seq,
-		    "cpu%d %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu",
-		    cpu, rq->yld_both_empty,
-		    rq->yld_act_empty, rq->yld_exp_empty, rq->yld_cnt,
-		    rq->sched_switch, rq->sched_cnt, rq->sched_goidle,
-		    rq->ttwu_cnt, rq->ttwu_local,
-		    rq->rq_sched_info.cpu_time,
-		    rq->rq_sched_info.run_delay, rq->rq_sched_info.pcnt);
-
-		seq_printf(seq, "\n");
-
-#ifdef CONFIG_SMP
-		/* domain-specific stats */
-		preempt_disable();
-		for_each_domain(cpu, sd) {
-			enum idle_type itype;
-			char mask_str[NR_CPUS];
-
-			cpumask_scnprintf(mask_str, NR_CPUS, sd->span);
-			seq_printf(seq, "domain%d %s", dcnt++, mask_str);
-			for (itype = SCHED_IDLE; itype < MAX_IDLE_TYPES;
-					itype++) {
-				seq_printf(seq, " %lu %lu %lu %lu %lu %lu %lu "
-						"%lu",
-				    sd->lb_cnt[itype],
-				    sd->lb_balanced[itype],
-				    sd->lb_failed[itype],
-				    sd->lb_imbalance[itype],
-				    sd->lb_gained[itype],
-				    sd->lb_hot_gained[itype],
-				    sd->lb_nobusyq[itype],
-				    sd->lb_nobusyg[itype]);
-			}
-			seq_printf(seq, " %lu %lu %lu %lu %lu %lu %lu %lu %lu"
-			    " %lu %lu %lu\n",
-			    sd->alb_cnt, sd->alb_failed, sd->alb_pushed,
-			    sd->sbe_cnt, sd->sbe_balanced, sd->sbe_pushed,
-			    sd->sbf_cnt, sd->sbf_balanced, sd->sbf_pushed,
-			    sd->ttwu_wake_remote, sd->ttwu_move_affine,
-			    sd->ttwu_move_balance);
-		}
-		preempt_enable();
-#endif
-	}
-	return 0;
-}
-
-static int schedstat_open(struct inode *inode, struct file *file)
-{
-	unsigned int size = PAGE_SIZE * (1 + num_online_cpus() / 32);
-	char *buf = kmalloc(size, GFP_KERNEL);
-	struct seq_file *m;
-	int res;
-
-	if (!buf)
-		return -ENOMEM;
-	res = single_open(file, show_schedstat, NULL);
-	if (!res) {
-		m = file->private_data;
-		m->buf = buf;
-		m->size = size;
-	} else
-		kfree(buf);
-	return res;
-}
-
-const struct file_operations proc_schedstat_operations = {
-	.open    = schedstat_open,
-	.read    = seq_read,
-	.llseek  = seq_lseek,
-	.release = single_release,
-};
-
-/*
- * Expects runqueue lock to be held for atomicity of update
- */
-static inline void
-rq_sched_info_arrive(struct rq *rq, unsigned long delta_jiffies)
-{
-	if (rq) {
-		rq->rq_sched_info.run_delay += delta_jiffies;
-		rq->rq_sched_info.pcnt++;
-	}
-}
-
-/*
- * Expects runqueue lock to be held for atomicity of update
- */
-static inline void
-rq_sched_info_depart(struct rq *rq, unsigned long delta_jiffies)
-{
-	if (rq)
-		rq->rq_sched_info.cpu_time += delta_jiffies;
-}
-# define schedstat_inc(rq, field)	do { (rq)->field++; } while (0)
-# define schedstat_add(rq, field, amt)	do { (rq)->field += (amt); } while (0)
-#else /* !CONFIG_SCHEDSTATS */
-static inline void
-rq_sched_info_arrive(struct rq *rq, unsigned long delta_jiffies)
-{}
-static inline void
-rq_sched_info_depart(struct rq *rq, unsigned long delta_jiffies)
-{}
-# define schedstat_inc(rq, field)	do { } while (0)
-# define schedstat_add(rq, field, amt)	do { } while (0)
-#endif
-
 /*
  * this_rq_lock - lock this runqueue and disable interrupts.
  */
@@ -603,444 +535,18 @@
 	return rq;
 }
 
-#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
 /*
- * Called when a process is dequeued from the active array and given
- * the cpu.  We should note that with the exception of interactive
- * tasks, the expired queue will become the active queue after the active
- * queue is empty, without explicitly dequeuing and requeuing tasks in the
- * expired queue.  (Interactive tasks may be requeued directly to the
- * active queue, thus delaying tasks in the expired queue from running;
- * see scheduler_tick()).
- *
- * This function is only called from sched_info_arrive(), rather than
- * dequeue_task(). Even though a task may be queued and dequeued multiple
- * times as it is shuffled about, we're really interested in knowing how
- * long it was from the *first* time it was queued to the time that it
- * finally hit a cpu.
+ * CPU frequency is/was unstable - start new by setting prev_clock_raw:
  */
-static inline void sched_info_dequeued(struct task_struct *t)
+void sched_clock_unstable_event(void)
 {
-	t->sched_info.last_queued = 0;
-}
+	unsigned long flags;
+	struct rq *rq;
 
-/*
- * Called when a task finally hits the cpu.  We can now calculate how
- * long it was waiting to run.  We also note when it began so that we
- * can keep stats on how long its timeslice is.
- */
-static void sched_info_arrive(struct task_struct *t)
-{
-	unsigned long now = jiffies, delta_jiffies = 0;
-
-	if (t->sched_info.last_queued)
-		delta_jiffies = now - t->sched_info.last_queued;
-	sched_info_dequeued(t);
-	t->sched_info.run_delay += delta_jiffies;
-	t->sched_info.last_arrival = now;
-	t->sched_info.pcnt++;
-
-	rq_sched_info_arrive(task_rq(t), delta_jiffies);
-}
-
-/*
- * Called when a process is queued into either the active or expired
- * array.  The time is noted and later used to determine how long we
- * had to wait for us to reach the cpu.  Since the expired queue will
- * become the active queue after active queue is empty, without dequeuing
- * and requeuing any tasks, we are interested in queuing to either. It
- * is unusual but not impossible for tasks to be dequeued and immediately
- * requeued in the same or another array: this can happen in sched_yield(),
- * set_user_nice(), and even load_balance() as it moves tasks from runqueue
- * to runqueue.
- *
- * This function is only called from enqueue_task(), but also only updates
- * the timestamp if it is already not set.  It's assumed that
- * sched_info_dequeued() will clear that stamp when appropriate.
- */
-static inline void sched_info_queued(struct task_struct *t)
-{
-	if (unlikely(sched_info_on()))
-		if (!t->sched_info.last_queued)
-			t->sched_info.last_queued = jiffies;
-}
-
-/*
- * Called when a process ceases being the active-running process, either
- * voluntarily or involuntarily.  Now we can calculate how long we ran.
- */
-static inline void sched_info_depart(struct task_struct *t)
-{
-	unsigned long delta_jiffies = jiffies - t->sched_info.last_arrival;
-
-	t->sched_info.cpu_time += delta_jiffies;
-	rq_sched_info_depart(task_rq(t), delta_jiffies);
-}
-
-/*
- * Called when tasks are switched involuntarily due, typically, to expiring
- * their time slice.  (This may also be called when switching to or from
- * the idle task.)  We are only called when prev != next.
- */
-static inline void
-__sched_info_switch(struct task_struct *prev, struct task_struct *next)
-{
-	struct rq *rq = task_rq(prev);
-
-	/*
-	 * prev now departs the cpu.  It's not interesting to record
-	 * stats about how efficient we were at scheduling the idle
-	 * process, however.
-	 */
-	if (prev != rq->idle)
-		sched_info_depart(prev);
-
-	if (next != rq->idle)
-		sched_info_arrive(next);
-}
-static inline void
-sched_info_switch(struct task_struct *prev, struct task_struct *next)
-{
-	if (unlikely(sched_info_on()))
-		__sched_info_switch(prev, next);
-}
-#else
-#define sched_info_queued(t)		do { } while (0)
-#define sched_info_switch(t, next)	do { } while (0)
-#endif /* CONFIG_SCHEDSTATS || CONFIG_TASK_DELAY_ACCT */
-
-/*
- * Adding/removing a task to/from a priority array:
- */
-static void dequeue_task(struct task_struct *p, struct prio_array *array)
-{
-	array->nr_active--;
-	list_del(&p->run_list);
-	if (list_empty(array->queue + p->prio))
-		__clear_bit(p->prio, array->bitmap);
-}
-
-static void enqueue_task(struct task_struct *p, struct prio_array *array)
-{
-	sched_info_queued(p);
-	list_add_tail(&p->run_list, array->queue + p->prio);
-	__set_bit(p->prio, array->bitmap);
-	array->nr_active++;
-	p->array = array;
-}
-
-/*
- * Put task to the end of the run list without the overhead of dequeue
- * followed by enqueue.
- */
-static void requeue_task(struct task_struct *p, struct prio_array *array)
-{
-	list_move_tail(&p->run_list, array->queue + p->prio);
-}
-
-static inline void
-enqueue_task_head(struct task_struct *p, struct prio_array *array)
-{
-	list_add(&p->run_list, array->queue + p->prio);
-	__set_bit(p->prio, array->bitmap);
-	array->nr_active++;
-	p->array = array;
-}
-
-/*
- * __normal_prio - return the priority that is based on the static
- * priority but is modified by bonuses/penalties.
- *
- * We scale the actual sleep average [0 .... MAX_SLEEP_AVG]
- * into the -5 ... 0 ... +5 bonus/penalty range.
- *
- * We use 25% of the full 0...39 priority range so that:
- *
- * 1) nice +19 interactive tasks do not preempt nice 0 CPU hogs.
- * 2) nice -20 CPU hogs do not get preempted by nice 0 tasks.
- *
- * Both properties are important to certain workloads.
- */
-
-static inline int __normal_prio(struct task_struct *p)
-{
-	int bonus, prio;
-
-	bonus = CURRENT_BONUS(p) - MAX_BONUS / 2;
-
-	prio = p->static_prio - bonus;
-	if (prio < MAX_RT_PRIO)
-		prio = MAX_RT_PRIO;
-	if (prio > MAX_PRIO-1)
-		prio = MAX_PRIO-1;
-	return prio;
-}
-
-/*
- * To aid in avoiding the subversion of "niceness" due to uneven distribution
- * of tasks with abnormal "nice" values across CPUs the contribution that
- * each task makes to its run queue's load is weighted according to its
- * scheduling class and "nice" value.  For SCHED_NORMAL tasks this is just a
- * scaled version of the new time slice allocation that they receive on time
- * slice expiry etc.
- */
-
-/*
- * Assume: static_prio_timeslice(NICE_TO_PRIO(0)) == DEF_TIMESLICE
- * If static_prio_timeslice() is ever changed to break this assumption then
- * this code will need modification
- */
-#define TIME_SLICE_NICE_ZERO DEF_TIMESLICE
-#define LOAD_WEIGHT(lp) \
-	(((lp) * SCHED_LOAD_SCALE) / TIME_SLICE_NICE_ZERO)
-#define PRIO_TO_LOAD_WEIGHT(prio) \
-	LOAD_WEIGHT(static_prio_timeslice(prio))
-#define RTPRIO_TO_LOAD_WEIGHT(rp) \
-	(PRIO_TO_LOAD_WEIGHT(MAX_RT_PRIO) + LOAD_WEIGHT(rp))
-
-static void set_load_weight(struct task_struct *p)
-{
-	if (has_rt_policy(p)) {
-#ifdef CONFIG_SMP
-		if (p == task_rq(p)->migration_thread)
-			/*
-			 * The migration thread does the actual balancing.
-			 * Giving its load any weight will skew balancing
-			 * adversely.
-			 */
-			p->load_weight = 0;
-		else
-#endif
-			p->load_weight = RTPRIO_TO_LOAD_WEIGHT(p->rt_priority);
-	} else
-		p->load_weight = PRIO_TO_LOAD_WEIGHT(p->static_prio);
-}
-
-static inline void
-inc_raw_weighted_load(struct rq *rq, const struct task_struct *p)
-{
-	rq->raw_weighted_load += p->load_weight;
-}
-
-static inline void
-dec_raw_weighted_load(struct rq *rq, const struct task_struct *p)
-{
-	rq->raw_weighted_load -= p->load_weight;
-}
-
-static inline void inc_nr_running(struct task_struct *p, struct rq *rq)
-{
-	rq->nr_running++;
-	inc_raw_weighted_load(rq, p);
-}
-
-static inline void dec_nr_running(struct task_struct *p, struct rq *rq)
-{
-	rq->nr_running--;
-	dec_raw_weighted_load(rq, p);
-}
-
-/*
- * Calculate the expected normal priority: i.e. priority
- * without taking RT-inheritance into account. Might be
- * boosted by interactivity modifiers. Changes upon fork,
- * setprio syscalls, and whenever the interactivity
- * estimator recalculates.
- */
-static inline int normal_prio(struct task_struct *p)
-{
-	int prio;
-
-	if (has_rt_policy(p))
-		prio = MAX_RT_PRIO-1 - p->rt_priority;
-	else
-		prio = __normal_prio(p);
-	return prio;
-}
-
-/*
- * Calculate the current priority, i.e. the priority
- * taken into account by the scheduler. This value might
- * be boosted by RT tasks, or might be boosted by
- * interactivity modifiers. Will be RT if the task got
- * RT-boosted. If not then it returns p->normal_prio.
- */
-static int effective_prio(struct task_struct *p)
-{
-	p->normal_prio = normal_prio(p);
-	/*
-	 * If we are RT tasks or we were boosted to RT priority,
-	 * keep the priority unchanged. Otherwise, update priority
-	 * to the normal priority:
-	 */
-	if (!rt_prio(p->prio))
-		return p->normal_prio;
-	return p->prio;
-}
-
-/*
- * __activate_task - move a task to the runqueue.
- */
-static void __activate_task(struct task_struct *p, struct rq *rq)
-{
-	struct prio_array *target = rq->active;
-
-	if (batch_task(p))
-		target = rq->expired;
-	enqueue_task(p, target);
-	inc_nr_running(p, rq);
-}
-
-/*
- * __activate_idle_task - move idle task to the _front_ of runqueue.
- */
-static inline void __activate_idle_task(struct task_struct *p, struct rq *rq)
-{
-	enqueue_task_head(p, rq->active);
-	inc_nr_running(p, rq);
-}
-
-/*
- * Recalculate p->normal_prio and p->prio after having slept,
- * updating the sleep-average too:
- */
-static int recalc_task_prio(struct task_struct *p, unsigned long long now)
-{
-	/* Caller must always ensure 'now >= p->timestamp' */
-	unsigned long sleep_time = now - p->timestamp;
-
-	if (batch_task(p))
-		sleep_time = 0;
-
-	if (likely(sleep_time > 0)) {
-		/*
-		 * This ceiling is set to the lowest priority that would allow
-		 * a task to be reinserted into the active array on timeslice
-		 * completion.
-		 */
-		unsigned long ceiling = INTERACTIVE_SLEEP(p);
-
-		if (p->mm && sleep_time > ceiling && p->sleep_avg < ceiling) {
-			/*
-			 * Prevents user tasks from achieving best priority
-			 * with one single large enough sleep.
-			 */
-			p->sleep_avg = ceiling;
-			/*
-			 * Using INTERACTIVE_SLEEP() as a ceiling places a
-			 * nice(0) task 1ms sleep away from promotion, and
-			 * gives it 700ms to round-robin with no chance of
-			 * being demoted.  This is more than generous, so
-			 * mark this sleep as non-interactive to prevent the
-			 * on-runqueue bonus logic from intervening should
-			 * this task not receive cpu immediately.
-			 */
-			p->sleep_type = SLEEP_NONINTERACTIVE;
-		} else {
-			/*
-			 * Tasks waking from uninterruptible sleep are
-			 * limited in their sleep_avg rise as they
-			 * are likely to be waiting on I/O
-			 */
-			if (p->sleep_type == SLEEP_NONINTERACTIVE && p->mm) {
-				if (p->sleep_avg >= ceiling)
-					sleep_time = 0;
-				else if (p->sleep_avg + sleep_time >=
-					 ceiling) {
-						p->sleep_avg = ceiling;
-						sleep_time = 0;
-				}
-			}
-
-			/*
-			 * This code gives a bonus to interactive tasks.
-			 *
-			 * The boost works by updating the 'average sleep time'
-			 * value here, based on ->timestamp. The more time a
-			 * task spends sleeping, the higher the average gets -
-			 * and the higher the priority boost gets as well.
-			 */
-			p->sleep_avg += sleep_time;
-
-		}
-		if (p->sleep_avg > NS_MAX_SLEEP_AVG)
-			p->sleep_avg = NS_MAX_SLEEP_AVG;
-	}
-
-	return effective_prio(p);
-}
-
-/*
- * activate_task - move a task to the runqueue and do priority recalculation
- *
- * Update all the scheduling statistics stuff. (sleep average
- * calculation, priority modifiers, etc.)
- */
-static void activate_task(struct task_struct *p, struct rq *rq, int local)
-{
-	unsigned long long now;
-
-	if (rt_task(p))
-		goto out;
-
-	now = sched_clock();
-#ifdef CONFIG_SMP
-	if (!local) {
-		/* Compensate for drifting sched_clock */
-		struct rq *this_rq = this_rq();
-		now = (now - this_rq->most_recent_timestamp)
-			+ rq->most_recent_timestamp;
-	}
-#endif
-
-	/*
-	 * Sleep time is in units of nanosecs, so shift by 20 to get a
-	 * milliseconds-range estimation of the amount of time that the task
-	 * spent sleeping:
-	 */
-	if (unlikely(prof_on == SLEEP_PROFILING)) {
-		if (p->state == TASK_UNINTERRUPTIBLE)
-			profile_hits(SLEEP_PROFILING, (void *)get_wchan(p),
-				     (now - p->timestamp) >> 20);
-	}
-
-	p->prio = recalc_task_prio(p, now);
-
-	/*
-	 * This checks to make sure it's not an uninterruptible task
-	 * that is now waking up.
-	 */
-	if (p->sleep_type == SLEEP_NORMAL) {
-		/*
-		 * Tasks which were woken up by interrupts (ie. hw events)
-		 * are most likely of interactive nature. So we give them
-		 * the credit of extending their sleep time to the period
-		 * of time they spend on the runqueue, waiting for execution
-		 * on a CPU, first time around:
-		 */
-		if (in_interrupt())
-			p->sleep_type = SLEEP_INTERRUPTED;
-		else {
-			/*
-			 * Normal first-time wakeups get a credit too for
-			 * on-runqueue time, but it will be weighted down:
-			 */
-			p->sleep_type = SLEEP_INTERACTIVE;
-		}
-	}
-	p->timestamp = now;
-out:
-	__activate_task(p, rq);
-}
-
-/*
- * deactivate_task - remove a task from the runqueue.
- */
-static void deactivate_task(struct task_struct *p, struct rq *rq)
-{
-	dec_nr_running(p, rq);
-	dequeue_task(p, p->array);
-	p->array = NULL;
+	rq = task_rq_lock(current, &flags);
+	rq->prev_clock_raw = sched_clock();
+	rq->clock_unstable_events++;
+	task_rq_unlock(rq, &flags);
 }
 
 /*
@@ -1095,6 +601,345 @@
 }
 #endif
 
+static u64 div64_likely32(u64 divident, unsigned long divisor)
+{
+#if BITS_PER_LONG == 32
+	if (likely(divident <= 0xffffffffULL))
+		return (u32)divident / divisor;
+	do_div(divident, divisor);
+
+	return divident;
+#else
+	return divident / divisor;
+#endif
+}
+
+#if BITS_PER_LONG == 32
+# define WMULT_CONST	(~0UL)
+#else
+# define WMULT_CONST	(1UL << 32)
+#endif
+
+#define WMULT_SHIFT	32
+
+static inline unsigned long
+calc_delta_mine(unsigned long delta_exec, unsigned long weight,
+		struct load_weight *lw)
+{
+	u64 tmp;
+
+	if (unlikely(!lw->inv_weight))
+		lw->inv_weight = WMULT_CONST / lw->weight;
+
+	tmp = (u64)delta_exec * weight;
+	/*
+	 * Check whether we'd overflow the 64-bit multiplication:
+	 */
+	if (unlikely(tmp > WMULT_CONST)) {
+		tmp = ((tmp >> WMULT_SHIFT/2) * lw->inv_weight)
+				>> (WMULT_SHIFT/2);
+	} else {
+		tmp = (tmp * lw->inv_weight) >> WMULT_SHIFT;
+	}
+
+	return (unsigned long)min(tmp, (u64)sysctl_sched_runtime_limit);
+}
+
+static inline unsigned long
+calc_delta_fair(unsigned long delta_exec, struct load_weight *lw)
+{
+	return calc_delta_mine(delta_exec, NICE_0_LOAD, lw);
+}
+
+static void update_load_add(struct load_weight *lw, unsigned long inc)
+{
+	lw->weight += inc;
+	lw->inv_weight = 0;
+}
+
+static void update_load_sub(struct load_weight *lw, unsigned long dec)
+{
+	lw->weight -= dec;
+	lw->inv_weight = 0;
+}
+
+static void __update_curr_load(struct rq *rq, struct load_stat *ls)
+{
+	if (rq->curr != rq->idle && ls->load.weight) {
+		ls->delta_exec += ls->delta_stat;
+		ls->delta_fair += calc_delta_fair(ls->delta_stat, &ls->load);
+		ls->delta_stat = 0;
+	}
+}
+
+/*
+ * Update delta_exec, delta_fair fields for rq.
+ *
+ * delta_fair clock advances at a rate inversely proportional to
+ * total load (rq->ls.load.weight) on the runqueue, while
+ * delta_exec advances at the same rate as wall-clock (provided
+ * cpu is not idle).
+ *
+ * delta_exec / delta_fair is a measure of the (smoothened) load on this
+ * runqueue over any given interval. This (smoothened) load is used
+ * during load balance.
+ *
+ * This function is called /before/ updating rq->ls.load
+ * and when switching tasks.
+ */
+static void update_curr_load(struct rq *rq, u64 now)
+{
+	struct load_stat *ls = &rq->ls;
+	u64 start;
+
+	start = ls->load_update_start;
+	ls->load_update_start = now;
+	ls->delta_stat += now - start;
+	/*
+	 * Stagger updates to ls->delta_fair. Very frequent updates
+	 * can be expensive.
+	 */
+	if (ls->delta_stat >= sysctl_sched_stat_granularity)
+		__update_curr_load(rq, ls);
+}
+
+/*
+ * To aid in avoiding the subversion of "niceness" due to uneven distribution
+ * of tasks with abnormal "nice" values across CPUs the contribution that
+ * each task makes to its run queue's load is weighted according to its
+ * scheduling class and "nice" value.  For SCHED_NORMAL tasks this is just a
+ * scaled version of the new time slice allocation that they receive on time
+ * slice expiry etc.
+ */
+
+/*
+ * Assume: static_prio_timeslice(NICE_TO_PRIO(0)) == DEF_TIMESLICE
+ * If static_prio_timeslice() is ever changed to break this assumption then
+ * this code will need modification
+ */
+#define TIME_SLICE_NICE_ZERO DEF_TIMESLICE
+#define load_weight(lp) \
+	(((lp) * SCHED_LOAD_SCALE) / TIME_SLICE_NICE_ZERO)
+#define PRIO_TO_LOAD_WEIGHT(prio) \
+	load_weight(static_prio_timeslice(prio))
+#define RTPRIO_TO_LOAD_WEIGHT(rp) \
+	(PRIO_TO_LOAD_WEIGHT(MAX_RT_PRIO) + load_weight(rp))
+
+#define WEIGHT_IDLEPRIO		2
+#define WMULT_IDLEPRIO		(1 << 31)
+
+/*
+ * Nice levels are multiplicative, with a gentle 10% change for every
+ * nice level changed. I.e. when a CPU-bound task goes from nice 0 to
+ * nice 1, it will get ~10% less CPU time than another CPU-bound task
+ * that remained on nice 0.
+ *
+ * The "10% effect" is relative and cumulative: from _any_ nice level,
+ * if you go up 1 level, it's -10% CPU usage, if you go down 1 level
+ * it's +10% CPU usage.
+ */
+static const int prio_to_weight[40] = {
+/* -20 */ 88818, 71054, 56843, 45475, 36380, 29104, 23283, 18626, 14901, 11921,
+/* -10 */  9537,  7629,  6103,  4883,  3906,  3125,  2500,  2000,  1600,  1280,
+/*   0 */  NICE_0_LOAD /* 1024 */,
+/*   1 */          819,   655,   524,   419,   336,   268,   215,   172,   137,
+/*  10 */   110,    87,    70,    56,    45,    36,    29,    23,    18,    15,
+};
+
+static const u32 prio_to_wmult[40] = {
+	48356,   60446,   75558,   94446,  118058,  147573,
+	184467,  230589,  288233,  360285,  450347,
+	562979,  703746,  879575, 1099582, 1374389,
+	717986, 2147483, 2684354, 3355443, 4194304,
+	244160, 6557201, 8196502, 10250518, 12782640,
+	16025997, 19976592, 24970740, 31350126, 39045157,
+	49367440, 61356675, 76695844, 95443717, 119304647,
+	148102320, 186737708, 238609294, 286331153,
+};
+
+static inline void
+inc_load(struct rq *rq, const struct task_struct *p, u64 now)
+{
+	update_curr_load(rq, now);
+	update_load_add(&rq->ls.load, p->se.load.weight);
+}
+
+static inline void
+dec_load(struct rq *rq, const struct task_struct *p, u64 now)
+{
+	update_curr_load(rq, now);
+	update_load_sub(&rq->ls.load, p->se.load.weight);
+}
+
+static inline void inc_nr_running(struct task_struct *p, struct rq *rq, u64 now)
+{
+	rq->nr_running++;
+	inc_load(rq, p, now);
+}
+
+static inline void dec_nr_running(struct task_struct *p, struct rq *rq, u64 now)
+{
+	rq->nr_running--;
+	dec_load(rq, p, now);
+}
+
+static void activate_task(struct rq *rq, struct task_struct *p, int wakeup);
+
+/*
+ * runqueue iterator, to support SMP load-balancing between different
+ * scheduling classes, without having to expose their internal data
+ * structures to the load-balancing proper:
+ */
+struct rq_iterator {
+	void *arg;
+	struct task_struct *(*start)(void *);
+	struct task_struct *(*next)(void *);
+};
+
+static int balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
+		      unsigned long max_nr_move, unsigned long max_load_move,
+		      struct sched_domain *sd, enum cpu_idle_type idle,
+		      int *all_pinned, unsigned long *load_moved,
+		      int this_best_prio, int best_prio, int best_prio_seen,
+		      struct rq_iterator *iterator);
+
+#include "sched_stats.h"
+#include "sched_rt.c"
+#include "sched_fair.c"
+#include "sched_idletask.c"
+#ifdef CONFIG_SCHED_DEBUG
+# include "sched_debug.c"
+#endif
+
+#define sched_class_highest (&rt_sched_class)
+
+static void set_load_weight(struct task_struct *p)
+{
+	task_rq(p)->cfs.wait_runtime -= p->se.wait_runtime;
+	p->se.wait_runtime = 0;
+
+	if (task_has_rt_policy(p)) {
+		p->se.load.weight = prio_to_weight[0] * 2;
+		p->se.load.inv_weight = prio_to_wmult[0] >> 1;
+		return;
+	}
+
+	/*
+	 * SCHED_IDLE tasks get minimal weight:
+	 */
+	if (p->policy == SCHED_IDLE) {
+		p->se.load.weight = WEIGHT_IDLEPRIO;
+		p->se.load.inv_weight = WMULT_IDLEPRIO;
+		return;
+	}
+
+	p->se.load.weight = prio_to_weight[p->static_prio - MAX_RT_PRIO];
+	p->se.load.inv_weight = prio_to_wmult[p->static_prio - MAX_RT_PRIO];
+}
+
+static void
+enqueue_task(struct rq *rq, struct task_struct *p, int wakeup, u64 now)
+{
+	sched_info_queued(p);
+	p->sched_class->enqueue_task(rq, p, wakeup, now);
+	p->se.on_rq = 1;
+}
+
+static void
+dequeue_task(struct rq *rq, struct task_struct *p, int sleep, u64 now)
+{
+	p->sched_class->dequeue_task(rq, p, sleep, now);
+	p->se.on_rq = 0;
+}
+
+/*
+ * __normal_prio - return the priority that is based on the static prio
+ */
+static inline int __normal_prio(struct task_struct *p)
+{
+	return p->static_prio;
+}
+
+/*
+ * Calculate the expected normal priority: i.e. priority
+ * without taking RT-inheritance into account. Might be
+ * boosted by interactivity modifiers. Changes upon fork,
+ * setprio syscalls, and whenever the interactivity
+ * estimator recalculates.
+ */
+static inline int normal_prio(struct task_struct *p)
+{
+	int prio;
+
+	if (task_has_rt_policy(p))
+		prio = MAX_RT_PRIO-1 - p->rt_priority;
+	else
+		prio = __normal_prio(p);
+	return prio;
+}
+
+/*
+ * Calculate the current priority, i.e. the priority
+ * taken into account by the scheduler. This value might
+ * be boosted by RT tasks, or might be boosted by
+ * interactivity modifiers. Will be RT if the task got
+ * RT-boosted. If not then it returns p->normal_prio.
+ */
+static int effective_prio(struct task_struct *p)
+{
+	p->normal_prio = normal_prio(p);
+	/*
+	 * If we are RT tasks or we were boosted to RT priority,
+	 * keep the priority unchanged. Otherwise, update priority
+	 * to the normal priority:
+	 */
+	if (!rt_prio(p->prio))
+		return p->normal_prio;
+	return p->prio;
+}
+
+/*
+ * activate_task - move a task to the runqueue.
+ */
+static void activate_task(struct rq *rq, struct task_struct *p, int wakeup)
+{
+	u64 now = rq_clock(rq);
+
+	if (p->state == TASK_UNINTERRUPTIBLE)
+		rq->nr_uninterruptible--;
+
+	enqueue_task(rq, p, wakeup, now);
+	inc_nr_running(p, rq, now);
+}
+
+/*
+ * activate_idle_task - move idle task to the _front_ of runqueue.
+ */
+static inline void activate_idle_task(struct task_struct *p, struct rq *rq)
+{
+	u64 now = rq_clock(rq);
+
+	if (p->state == TASK_UNINTERRUPTIBLE)
+		rq->nr_uninterruptible--;
+
+	enqueue_task(rq, p, 0, now);
+	inc_nr_running(p, rq, now);
+}
+
+/*
+ * deactivate_task - remove a task from the runqueue.
+ */
+static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep)
+{
+	u64 now = rq_clock(rq);
+
+	if (p->state == TASK_UNINTERRUPTIBLE)
+		rq->nr_uninterruptible++;
+
+	dequeue_task(rq, p, sleep, now);
+	dec_nr_running(p, rq, now);
+}
+
 /**
  * task_curr - is this task currently executing on a CPU?
  * @p: the task in question.
@@ -1107,10 +952,42 @@
 /* Used instead of source_load when we know the type == 0 */
 unsigned long weighted_cpuload(const int cpu)
 {
-	return cpu_rq(cpu)->raw_weighted_load;
+	return cpu_rq(cpu)->ls.load.weight;
+}
+
+static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
+{
+#ifdef CONFIG_SMP
+	task_thread_info(p)->cpu = cpu;
+	set_task_cfs_rq(p);
+#endif
 }
 
 #ifdef CONFIG_SMP
+
+void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
+{
+	int old_cpu = task_cpu(p);
+	struct rq *old_rq = cpu_rq(old_cpu), *new_rq = cpu_rq(new_cpu);
+	u64 clock_offset, fair_clock_offset;
+
+	clock_offset = old_rq->clock - new_rq->clock;
+	fair_clock_offset = old_rq->cfs.fair_clock -
+						 new_rq->cfs.fair_clock;
+	if (p->se.wait_start)
+		p->se.wait_start -= clock_offset;
+	if (p->se.wait_start_fair)
+		p->se.wait_start_fair -= fair_clock_offset;
+	if (p->se.sleep_start)
+		p->se.sleep_start -= clock_offset;
+	if (p->se.block_start)
+		p->se.block_start -= clock_offset;
+	if (p->se.sleep_start_fair)
+		p->se.sleep_start_fair -= fair_clock_offset;
+
+	__set_task_cpu(p, new_cpu);
+}
+
 struct migration_req {
 	struct list_head list;
 
@@ -1133,7 +1010,7 @@
 	 * If the task is not on a runqueue (and not running), then
 	 * it is sufficient to simply update the task's cpu field.
 	 */
-	if (!p->array && !task_running(rq, p)) {
+	if (!p->se.on_rq && !task_running(rq, p)) {
 		set_task_cpu(p, dest_cpu);
 		return 0;
 	}
@@ -1158,9 +1035,8 @@
 void wait_task_inactive(struct task_struct *p)
 {
 	unsigned long flags;
+	int running, on_rq;
 	struct rq *rq;
-	struct prio_array *array;
-	int running;
 
 repeat:
 	/*
@@ -1192,7 +1068,7 @@
 	 */
 	rq = task_rq_lock(p, &flags);
 	running = task_running(rq, p);
-	array = p->array;
+	on_rq = p->se.on_rq;
 	task_rq_unlock(rq, &flags);
 
 	/*
@@ -1215,7 +1091,7 @@
 	 * running right now), it's preempted, and we should
 	 * yield - it could be a while.
 	 */
-	if (unlikely(array)) {
+	if (unlikely(on_rq)) {
 		yield();
 		goto repeat;
 	}
@@ -1261,11 +1137,12 @@
 static inline unsigned long source_load(int cpu, int type)
 {
 	struct rq *rq = cpu_rq(cpu);
+	unsigned long total = weighted_cpuload(cpu);
 
 	if (type == 0)
-		return rq->raw_weighted_load;
+		return total;
 
-	return min(rq->cpu_load[type-1], rq->raw_weighted_load);
+	return min(rq->cpu_load[type-1], total);
 }
 
 /*
@@ -1275,11 +1152,12 @@
 static inline unsigned long target_load(int cpu, int type)
 {
 	struct rq *rq = cpu_rq(cpu);
+	unsigned long total = weighted_cpuload(cpu);
 
 	if (type == 0)
-		return rq->raw_weighted_load;
+		return total;
 
-	return max(rq->cpu_load[type-1], rq->raw_weighted_load);
+	return max(rq->cpu_load[type-1], total);
 }
 
 /*
@@ -1288,9 +1166,10 @@
 static inline unsigned long cpu_avg_load_per_task(int cpu)
 {
 	struct rq *rq = cpu_rq(cpu);
+	unsigned long total = weighted_cpuload(cpu);
 	unsigned long n = rq->nr_running;
 
-	return n ? rq->raw_weighted_load / n : SCHED_LOAD_SCALE;
+	return n ? total / n : SCHED_LOAD_SCALE;
 }
 
 /*
@@ -1392,9 +1271,9 @@
 	struct sched_domain *tmp, *sd = NULL;
 
 	for_each_domain(cpu, tmp) {
- 		/*
- 	 	 * If power savings logic is enabled for a domain, stop there.
- 	 	 */
+		/*
+		 * If power savings logic is enabled for a domain, stop there.
+		 */
 		if (tmp->flags & SD_POWERSAVINGS_BALANCE)
 			break;
 		if (tmp->flags & flag)
@@ -1477,9 +1356,9 @@
 				if (idle_cpu(i))
 					return i;
 			}
-		}
-		else
+		} else {
 			break;
+		}
 	}
 	return cpu;
 }
@@ -1521,7 +1400,7 @@
 	if (!(old_state & state))
 		goto out;
 
-	if (p->array)
+	if (p->se.on_rq)
 		goto out_running;
 
 	cpu = task_cpu(p);
@@ -1576,11 +1455,11 @@
 			 * of the current CPU:
 			 */
 			if (sync)
-				tl -= current->load_weight;
+				tl -= current->se.load.weight;
 
 			if ((tl <= load &&
 				tl + target_load(cpu, idx) <= tl_per_task) ||
-				100*(tl + p->load_weight) <= imbalance*load) {
+			       100*(tl + p->se.load.weight) <= imbalance*load) {
 				/*
 				 * This domain has SD_WAKE_AFFINE and
 				 * p is cache cold in this domain, and
@@ -1614,7 +1493,7 @@
 		old_state = p->state;
 		if (!(old_state & state))
 			goto out;
-		if (p->array)
+		if (p->se.on_rq)
 			goto out_running;
 
 		this_cpu = smp_processor_id();
@@ -1623,25 +1502,7 @@
 
 out_activate:
 #endif /* CONFIG_SMP */
-	if (old_state == TASK_UNINTERRUPTIBLE) {
-		rq->nr_uninterruptible--;
-		/*
-		 * Tasks on involuntary sleep don't earn
-		 * sleep_avg beyond just interactive state.
-		 */
-		p->sleep_type = SLEEP_NONINTERACTIVE;
-	} else
-
-	/*
-	 * Tasks that have marked their sleep as noninteractive get
-	 * woken up with their sleep average not weighted in an
-	 * interactive way.
-	 */
-		if (old_state & TASK_NONINTERACTIVE)
-			p->sleep_type = SLEEP_NONINTERACTIVE;
-
-
-	activate_task(p, rq, cpu == this_cpu);
+	activate_task(rq, p, 1);
 	/*
 	 * Sync wakeups (i.e. those types of wakeups where the waker
 	 * has indicated that it will leave the CPU in short order)
@@ -1650,10 +1511,8 @@
 	 * the waker guarantees that the freshly woken up task is going
 	 * to be considered on this CPU.)
 	 */
-	if (!sync || cpu != this_cpu) {
-		if (TASK_PREEMPTS_CURR(p, rq))
-			resched_task(rq->curr);
-	}
+	if (!sync || cpu != this_cpu)
+		check_preempt_curr(rq, p);
 	success = 1;
 
 out_running:
@@ -1676,19 +1535,36 @@
 	return try_to_wake_up(p, state, 0);
 }
 
-static void task_running_tick(struct rq *rq, struct task_struct *p);
 /*
  * Perform scheduler related setup for a newly forked process p.
  * p is forked by current.
+ *
+ * __sched_fork() is basic setup used by init_idle() too:
  */
-void fastcall sched_fork(struct task_struct *p, int clone_flags)
+static void __sched_fork(struct task_struct *p)
 {
-	int cpu = get_cpu();
+	p->se.wait_start_fair		= 0;
+	p->se.wait_start		= 0;
+	p->se.exec_start		= 0;
+	p->se.sum_exec_runtime		= 0;
+	p->se.delta_exec		= 0;
+	p->se.delta_fair_run		= 0;
+	p->se.delta_fair_sleep		= 0;
+	p->se.wait_runtime		= 0;
+	p->se.sum_wait_runtime		= 0;
+	p->se.sum_sleep_runtime		= 0;
+	p->se.sleep_start		= 0;
+	p->se.sleep_start_fair		= 0;
+	p->se.block_start		= 0;
+	p->se.sleep_max			= 0;
+	p->se.block_max			= 0;
+	p->se.exec_max			= 0;
+	p->se.wait_max			= 0;
+	p->se.wait_runtime_overruns	= 0;
+	p->se.wait_runtime_underruns	= 0;
 
-#ifdef CONFIG_SMP
-	cpu = sched_balance_self(cpu, SD_BALANCE_FORK);
-#endif
-	set_task_cpu(p, cpu);
+	INIT_LIST_HEAD(&p->run_list);
+	p->se.on_rq = 0;
 
 	/*
 	 * We mark the process as running here, but have not actually
@@ -1697,16 +1573,29 @@
 	 * event cannot wake it up and insert it on the runqueue either.
 	 */
 	p->state = TASK_RUNNING;
+}
+
+/*
+ * fork()/clone()-time setup:
+ */
+void sched_fork(struct task_struct *p, int clone_flags)
+{
+	int cpu = get_cpu();
+
+	__sched_fork(p);
+
+#ifdef CONFIG_SMP
+	cpu = sched_balance_self(cpu, SD_BALANCE_FORK);
+#endif
+	__set_task_cpu(p, cpu);
 
 	/*
 	 * Make sure we do not leak PI boosting priority to the child:
 	 */
 	p->prio = current->normal_prio;
 
-	INIT_LIST_HEAD(&p->run_list);
-	p->array = NULL;
 #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
-	if (unlikely(sched_info_on()))
+	if (likely(sched_info_on()))
 		memset(&p->sched_info, 0, sizeof(p->sched_info));
 #endif
 #if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
@@ -1716,34 +1605,16 @@
 	/* Want to start with kernel preemption disabled. */
 	task_thread_info(p)->preempt_count = 1;
 #endif
-	/*
-	 * Share the timeslice between parent and child, thus the
-	 * total amount of pending timeslices in the system doesn't change,
-	 * resulting in more scheduling fairness.
-	 */
-	local_irq_disable();
-	p->time_slice = (current->time_slice + 1) >> 1;
-	/*
-	 * The remainder of the first timeslice might be recovered by
-	 * the parent if the child exits early enough.
-	 */
-	p->first_time_slice = 1;
-	current->time_slice >>= 1;
-	p->timestamp = sched_clock();
-	if (unlikely(!current->time_slice)) {
-		/*
-		 * This case is rare, it happens when the parent has only
-		 * a single jiffy left from its timeslice. Taking the
-		 * runqueue lock is not a problem.
-		 */
-		current->time_slice = 1;
-		task_running_tick(cpu_rq(cpu), current);
-	}
-	local_irq_enable();
 	put_cpu();
 }
 
 /*
+ * After fork, child runs first. (default) If set to 0 then
+ * parent will (try to) run first.
+ */
+unsigned int __read_mostly sysctl_sched_child_runs_first = 1;
+
+/*
  * wake_up_new_task - wake up a newly created task for the first time.
  *
  * This function will do some initial scheduler statistics housekeeping
@@ -1752,107 +1623,27 @@
  */
 void fastcall wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
 {
-	struct rq *rq, *this_rq;
 	unsigned long flags;
-	int this_cpu, cpu;
+	struct rq *rq;
+	int this_cpu;
 
 	rq = task_rq_lock(p, &flags);
 	BUG_ON(p->state != TASK_RUNNING);
-	this_cpu = smp_processor_id();
-	cpu = task_cpu(p);
-
-	/*
-	 * We decrease the sleep average of forking parents
-	 * and children as well, to keep max-interactive tasks
-	 * from forking tasks that are max-interactive. The parent
-	 * (current) is done further down, under its lock.
-	 */
-	p->sleep_avg = JIFFIES_TO_NS(CURRENT_BONUS(p) *
-		CHILD_PENALTY / 100 * MAX_SLEEP_AVG / MAX_BONUS);
+	this_cpu = smp_processor_id(); /* parent's CPU */
 
 	p->prio = effective_prio(p);
 
-	if (likely(cpu == this_cpu)) {
-		if (!(clone_flags & CLONE_VM)) {
-			/*
-			 * The VM isn't cloned, so we're in a good position to
-			 * do child-runs-first in anticipation of an exec. This
-			 * usually avoids a lot of COW overhead.
-			 */
-			if (unlikely(!current->array))
-				__activate_task(p, rq);
-			else {
-				p->prio = current->prio;
-				p->normal_prio = current->normal_prio;
-				list_add_tail(&p->run_list, &current->run_list);
-				p->array = current->array;
-				p->array->nr_active++;
-				inc_nr_running(p, rq);
-			}
-			set_need_resched();
-		} else
-			/* Run child last */
-			__activate_task(p, rq);
-		/*
-		 * We skip the following code due to cpu == this_cpu
-	 	 *
-		 *   task_rq_unlock(rq, &flags);
-		 *   this_rq = task_rq_lock(current, &flags);
-		 */
-		this_rq = rq;
+	if (!sysctl_sched_child_runs_first || (clone_flags & CLONE_VM) ||
+			task_cpu(p) != this_cpu || !current->se.on_rq) {
+		activate_task(rq, p, 0);
 	} else {
-		this_rq = cpu_rq(this_cpu);
-
 		/*
-		 * Not the local CPU - must adjust timestamp. This should
-		 * get optimised away in the !CONFIG_SMP case.
+		 * Let the scheduling class do new task startup
+		 * management (if any):
 		 */
-		p->timestamp = (p->timestamp - this_rq->most_recent_timestamp)
-					+ rq->most_recent_timestamp;
-		__activate_task(p, rq);
-		if (TASK_PREEMPTS_CURR(p, rq))
-			resched_task(rq->curr);
-
-		/*
-		 * Parent and child are on different CPUs, now get the
-		 * parent runqueue to update the parent's ->sleep_avg:
-		 */
-		task_rq_unlock(rq, &flags);
-		this_rq = task_rq_lock(current, &flags);
+		p->sched_class->task_new(rq, p);
 	}
-	current->sleep_avg = JIFFIES_TO_NS(CURRENT_BONUS(current) *
-		PARENT_PENALTY / 100 * MAX_SLEEP_AVG / MAX_BONUS);
-	task_rq_unlock(this_rq, &flags);
-}
-
-/*
- * Potentially available exiting-child timeslices are
- * retrieved here - this way the parent does not get
- * penalized for creating too many threads.
- *
- * (this cannot be used to 'generate' timeslices
- * artificially, because any timeslice recovered here
- * was given away by the parent in the first place.)
- */
-void fastcall sched_exit(struct task_struct *p)
-{
-	unsigned long flags;
-	struct rq *rq;
-
-	/*
-	 * If the child was a (relative-) CPU hog then decrease
-	 * the sleep_avg of the parent as well.
-	 */
-	rq = task_rq_lock(p->parent, &flags);
-	if (p->first_time_slice && task_cpu(p) == task_cpu(p->parent)) {
-		p->parent->time_slice += p->time_slice;
-		if (unlikely(p->parent->time_slice > task_timeslice(p)))
-			p->parent->time_slice = task_timeslice(p);
-	}
-	if (p->sleep_avg < p->parent->sleep_avg)
-		p->parent->sleep_avg = p->parent->sleep_avg /
-		(EXIT_WEIGHT + 1) * EXIT_WEIGHT + p->sleep_avg /
-		(EXIT_WEIGHT + 1);
+	check_preempt_curr(rq, p);
 	task_rq_unlock(rq, &flags);
 }
 
@@ -1917,7 +1708,7 @@
 		/*
 		 * Remove function-return probe instances associated with this
 		 * task and put them back on the free list.
-	 	 */
+		 */
 		kprobe_flush_task(prev);
 		put_task_struct(prev);
 	}
@@ -1945,13 +1736,15 @@
  * context_switch - switch to the new MM and the new
  * thread's register state.
  */
-static inline struct task_struct *
+static inline void
 context_switch(struct rq *rq, struct task_struct *prev,
 	       struct task_struct *next)
 {
-	struct mm_struct *mm = next->mm;
-	struct mm_struct *oldmm = prev->active_mm;
+	struct mm_struct *mm, *oldmm;
 
+	prepare_task_switch(rq, next);
+	mm = next->mm;
+	oldmm = prev->active_mm;
 	/*
 	 * For paravirt, this is coupled with an exit in switch_to to
 	 * combine the page table reload and the switch backend into
@@ -1959,16 +1752,15 @@
 	 */
 	arch_enter_lazy_cpu_mode();
 
-	if (!mm) {
+	if (unlikely(!mm)) {
 		next->active_mm = oldmm;
 		atomic_inc(&oldmm->mm_count);
 		enter_lazy_tlb(oldmm, next);
 	} else
 		switch_mm(oldmm, mm, next);
 
-	if (!prev->mm) {
+	if (unlikely(!prev->mm)) {
 		prev->active_mm = NULL;
-		WARN_ON(rq->prev_mm);
 		rq->prev_mm = oldmm;
 	}
 	/*
@@ -1984,7 +1776,13 @@
 	/* Here we just switch the register state and the stack. */
 	switch_to(prev, next, prev);
 
-	return prev;
+	barrier();
+	/*
+	 * this_rq must be evaluated again because prev may have moved
+	 * CPUs since it called schedule(), thus the 'rq' on its stack
+	 * frame will be invalid.
+	 */
+	finish_task_switch(this_rq(), prev);
 }
 
 /*
@@ -2057,17 +1855,65 @@
 	return running + uninterruptible;
 }
 
-#ifdef CONFIG_SMP
-
 /*
- * Is this task likely cache-hot:
+ * Update rq->cpu_load[] statistics. This function is usually called every
+ * scheduler tick (TICK_NSEC).
  */
-static inline int
-task_hot(struct task_struct *p, unsigned long long now, struct sched_domain *sd)
+static void update_cpu_load(struct rq *this_rq)
 {
-	return (long long)(now - p->last_ran) < (long long)sd->cache_hot_time;
+	u64 fair_delta64, exec_delta64, idle_delta64, sample_interval64, tmp64;
+	unsigned long total_load = this_rq->ls.load.weight;
+	unsigned long this_load =  total_load;
+	struct load_stat *ls = &this_rq->ls;
+	u64 now = __rq_clock(this_rq);
+	int i, scale;
+
+	this_rq->nr_load_updates++;
+	if (unlikely(!(sysctl_sched_features & SCHED_FEAT_PRECISE_CPU_LOAD)))
+		goto do_avg;
+
+	/* Update delta_fair/delta_exec fields first */
+	update_curr_load(this_rq, now);
+
+	fair_delta64 = ls->delta_fair + 1;
+	ls->delta_fair = 0;
+
+	exec_delta64 = ls->delta_exec + 1;
+	ls->delta_exec = 0;
+
+	sample_interval64 = now - ls->load_update_last;
+	ls->load_update_last = now;
+
+	if ((s64)sample_interval64 < (s64)TICK_NSEC)
+		sample_interval64 = TICK_NSEC;
+
+	if (exec_delta64 > sample_interval64)
+		exec_delta64 = sample_interval64;
+
+	idle_delta64 = sample_interval64 - exec_delta64;
+
+	tmp64 = div64_64(SCHED_LOAD_SCALE * exec_delta64, fair_delta64);
+	tmp64 = div64_64(tmp64 * exec_delta64, sample_interval64);
+
+	this_load = (unsigned long)tmp64;
+
+do_avg:
+
+	/* Update our load: */
+	for (i = 0, scale = 1; i < CPU_LOAD_IDX_MAX; i++, scale += scale) {
+		unsigned long old_load, new_load;
+
+		/* scale is effectively 1 << i now, and >> i divides by scale */
+
+		old_load = this_rq->cpu_load[i];
+		new_load = this_load;
+
+		this_rq->cpu_load[i] = (old_load*(scale-1) + new_load) >> i;
+	}
 }
 
+#ifdef CONFIG_SMP
+
 /*
  * double_rq_lock - safely lock two runqueues
  *
@@ -2184,23 +2030,17 @@
  * pull_task - move a task from a remote runqueue to the local runqueue.
  * Both runqueues must be locked.
  */
-static void pull_task(struct rq *src_rq, struct prio_array *src_array,
-		      struct task_struct *p, struct rq *this_rq,
-		      struct prio_array *this_array, int this_cpu)
+static void pull_task(struct rq *src_rq, struct task_struct *p,
+		      struct rq *this_rq, int this_cpu)
 {
-	dequeue_task(p, src_array);
-	dec_nr_running(p, src_rq);
+	deactivate_task(src_rq, p, 0);
 	set_task_cpu(p, this_cpu);
-	inc_nr_running(p, this_rq);
-	enqueue_task(p, this_array);
-	p->timestamp = (p->timestamp - src_rq->most_recent_timestamp)
-				+ this_rq->most_recent_timestamp;
+	activate_task(this_rq, p, 0);
 	/*
 	 * Note that idle threads have a prio of MAX_PRIO, for this test
 	 * to be always true for them.
 	 */
-	if (TASK_PREEMPTS_CURR(p, this_rq))
-		resched_task(this_rq->curr);
+	check_preempt_curr(this_rq, p);
 }
 
 /*
@@ -2208,7 +2048,7 @@
  */
 static
 int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu,
-		     struct sched_domain *sd, enum idle_type idle,
+		     struct sched_domain *sd, enum cpu_idle_type idle,
 		     int *all_pinned)
 {
 	/*
@@ -2225,132 +2065,67 @@
 		return 0;
 
 	/*
-	 * Aggressive migration if:
-	 * 1) task is cache cold, or
-	 * 2) too many balance attempts have failed.
+	 * Aggressive migration if too many balance attempts have failed:
 	 */
-
-	if (sd->nr_balance_failed > sd->cache_nice_tries) {
-#ifdef CONFIG_SCHEDSTATS
-		if (task_hot(p, rq->most_recent_timestamp, sd))
-			schedstat_inc(sd, lb_hot_gained[idle]);
-#endif
+	if (sd->nr_balance_failed > sd->cache_nice_tries)
 		return 1;
-	}
 
-	if (task_hot(p, rq->most_recent_timestamp, sd))
-		return 0;
 	return 1;
 }
 
-#define rq_best_prio(rq) min((rq)->curr->prio, (rq)->best_expired_prio)
-
-/*
- * move_tasks tries to move up to max_nr_move tasks and max_load_move weighted
- * load from busiest to this_rq, as part of a balancing operation within
- * "domain". Returns the number of tasks moved.
- *
- * Called with both runqueues locked.
- */
-static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
+static int balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
 		      unsigned long max_nr_move, unsigned long max_load_move,
-		      struct sched_domain *sd, enum idle_type idle,
-		      int *all_pinned)
+		      struct sched_domain *sd, enum cpu_idle_type idle,
+		      int *all_pinned, unsigned long *load_moved,
+		      int this_best_prio, int best_prio, int best_prio_seen,
+		      struct rq_iterator *iterator)
 {
-	int idx, pulled = 0, pinned = 0, this_best_prio, best_prio,
-	    best_prio_seen, skip_for_load;
-	struct prio_array *array, *dst_array;
-	struct list_head *head, *curr;
-	struct task_struct *tmp;
-	long rem_load_move;
+	int pulled = 0, pinned = 0, skip_for_load;
+	struct task_struct *p;
+	long rem_load_move = max_load_move;
 
 	if (max_nr_move == 0 || max_load_move == 0)
 		goto out;
 
-	rem_load_move = max_load_move;
 	pinned = 1;
-	this_best_prio = rq_best_prio(this_rq);
-	best_prio = rq_best_prio(busiest);
-	/*
-	 * Enable handling of the case where there is more than one task
-	 * with the best priority.   If the current running task is one
-	 * of those with prio==best_prio we know it won't be moved
-	 * and therefore it's safe to override the skip (based on load) of
-	 * any task we find with that prio.
-	 */
-	best_prio_seen = best_prio == busiest->curr->prio;
 
 	/*
-	 * We first consider expired tasks. Those will likely not be
-	 * executed in the near future, and they are most likely to
-	 * be cache-cold, thus switching CPUs has the least effect
-	 * on them.
+	 * Start the load-balancing iterator:
 	 */
-	if (busiest->expired->nr_active) {
-		array = busiest->expired;
-		dst_array = this_rq->expired;
-	} else {
-		array = busiest->active;
-		dst_array = this_rq->active;
-	}
-
-new_array:
-	/* Start searching at priority 0: */
-	idx = 0;
-skip_bitmap:
-	if (!idx)
-		idx = sched_find_first_bit(array->bitmap);
-	else
-		idx = find_next_bit(array->bitmap, MAX_PRIO, idx);
-	if (idx >= MAX_PRIO) {
-		if (array == busiest->expired && busiest->active->nr_active) {
-			array = busiest->active;
-			dst_array = this_rq->active;
-			goto new_array;
-		}
+	p = iterator->start(iterator->arg);
+next:
+	if (!p)
 		goto out;
-	}
-
-	head = array->queue + idx;
-	curr = head->prev;
-skip_queue:
-	tmp = list_entry(curr, struct task_struct, run_list);
-
-	curr = curr->prev;
-
 	/*
 	 * To help distribute high priority tasks accross CPUs we don't
 	 * skip a task if it will be the highest priority task (i.e. smallest
 	 * prio value) on its new queue regardless of its load weight
 	 */
-	skip_for_load = tmp->load_weight > rem_load_move;
-	if (skip_for_load && idx < this_best_prio)
-		skip_for_load = !best_prio_seen && idx == best_prio;
+	skip_for_load = (p->se.load.weight >> 1) > rem_load_move +
+							 SCHED_LOAD_SCALE_FUZZ;
+	if (skip_for_load && p->prio < this_best_prio)
+		skip_for_load = !best_prio_seen && p->prio == best_prio;
 	if (skip_for_load ||
-	    !can_migrate_task(tmp, busiest, this_cpu, sd, idle, &pinned)) {
+	    !can_migrate_task(p, busiest, this_cpu, sd, idle, &pinned)) {
 
-		best_prio_seen |= idx == best_prio;
-		if (curr != head)
-			goto skip_queue;
-		idx++;
-		goto skip_bitmap;
+		best_prio_seen |= p->prio == best_prio;
+		p = iterator->next(iterator->arg);
+		goto next;
 	}
 
-	pull_task(busiest, array, tmp, this_rq, dst_array, this_cpu);
+	pull_task(busiest, p, this_rq, this_cpu);
 	pulled++;
-	rem_load_move -= tmp->load_weight;
+	rem_load_move -= p->se.load.weight;
 
 	/*
 	 * We only want to steal up to the prescribed number of tasks
 	 * and the prescribed amount of weighted load.
 	 */
 	if (pulled < max_nr_move && rem_load_move > 0) {
-		if (idx < this_best_prio)
-			this_best_prio = idx;
-		if (curr != head)
-			goto skip_queue;
-		idx++;
-		goto skip_bitmap;
+		if (p->prio < this_best_prio)
+			this_best_prio = p->prio;
+		p = iterator->next(iterator->arg);
+		goto next;
 	}
 out:
 	/*
@@ -2362,18 +2137,48 @@
 
 	if (all_pinned)
 		*all_pinned = pinned;
+	*load_moved = max_load_move - rem_load_move;
 	return pulled;
 }
 
 /*
+ * move_tasks tries to move up to max_nr_move tasks and max_load_move weighted
+ * load from busiest to this_rq, as part of a balancing operation within
+ * "domain". Returns the number of tasks moved.
+ *
+ * Called with both runqueues locked.
+ */
+static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
+		      unsigned long max_nr_move, unsigned long max_load_move,
+		      struct sched_domain *sd, enum cpu_idle_type idle,
+		      int *all_pinned)
+{
+	struct sched_class *class = sched_class_highest;
+	unsigned long load_moved, total_nr_moved = 0, nr_moved;
+	long rem_load_move = max_load_move;
+
+	do {
+		nr_moved = class->load_balance(this_rq, this_cpu, busiest,
+				max_nr_move, (unsigned long)rem_load_move,
+				sd, idle, all_pinned, &load_moved);
+		total_nr_moved += nr_moved;
+		max_nr_move -= nr_moved;
+		rem_load_move -= load_moved;
+		class = class->next;
+	} while (class && max_nr_move && rem_load_move > 0);
+
+	return total_nr_moved;
+}
+
+/*
  * find_busiest_group finds and returns the busiest CPU group within the
  * domain. It calculates and returns the amount of weighted load which
  * should be moved to restore balance via the imbalance parameter.
  */
 static struct sched_group *
 find_busiest_group(struct sched_domain *sd, int this_cpu,
-		   unsigned long *imbalance, enum idle_type idle, int *sd_idle,
-		   cpumask_t *cpus, int *balance)
+		   unsigned long *imbalance, enum cpu_idle_type idle,
+		   int *sd_idle, cpumask_t *cpus, int *balance)
 {
 	struct sched_group *busiest = NULL, *this = NULL, *group = sd->groups;
 	unsigned long max_load, avg_load, total_load, this_load, total_pwr;
@@ -2391,9 +2196,9 @@
 	max_load = this_load = total_load = total_pwr = 0;
 	busiest_load_per_task = busiest_nr_running = 0;
 	this_load_per_task = this_nr_running = 0;
-	if (idle == NOT_IDLE)
+	if (idle == CPU_NOT_IDLE)
 		load_idx = sd->busy_idx;
-	else if (idle == NEWLY_IDLE)
+	else if (idle == CPU_NEWLY_IDLE)
 		load_idx = sd->newidle_idx;
 	else
 		load_idx = sd->idle_idx;
@@ -2437,7 +2242,7 @@
 
 			avg_load += load;
 			sum_nr_running += rq->nr_running;
-			sum_weighted_load += rq->raw_weighted_load;
+			sum_weighted_load += weighted_cpuload(i);
 		}
 
 		/*
@@ -2477,8 +2282,9 @@
 		 * Busy processors will not participate in power savings
 		 * balance.
 		 */
- 		if (idle == NOT_IDLE || !(sd->flags & SD_POWERSAVINGS_BALANCE))
- 			goto group_next;
+		if (idle == CPU_NOT_IDLE ||
+				!(sd->flags & SD_POWERSAVINGS_BALANCE))
+			goto group_next;
 
 		/*
 		 * If the local group is idle or completely loaded
@@ -2488,42 +2294,42 @@
 				    !this_nr_running))
 			power_savings_balance = 0;
 
- 		/*
+		/*
 		 * If a group is already running at full capacity or idle,
 		 * don't include that group in power savings calculations
- 		 */
- 		if (!power_savings_balance || sum_nr_running >= group_capacity
+		 */
+		if (!power_savings_balance || sum_nr_running >= group_capacity
 		    || !sum_nr_running)
- 			goto group_next;
+			goto group_next;
 
- 		/*
+		/*
 		 * Calculate the group which has the least non-idle load.
- 		 * This is the group from where we need to pick up the load
- 		 * for saving power
- 		 */
- 		if ((sum_nr_running < min_nr_running) ||
- 		    (sum_nr_running == min_nr_running &&
+		 * This is the group from where we need to pick up the load
+		 * for saving power
+		 */
+		if ((sum_nr_running < min_nr_running) ||
+		    (sum_nr_running == min_nr_running &&
 		     first_cpu(group->cpumask) <
 		     first_cpu(group_min->cpumask))) {
- 			group_min = group;
- 			min_nr_running = sum_nr_running;
+			group_min = group;
+			min_nr_running = sum_nr_running;
 			min_load_per_task = sum_weighted_load /
 						sum_nr_running;
- 		}
+		}
 
- 		/*
+		/*
 		 * Calculate the group which is almost near its
- 		 * capacity but still has some space to pick up some load
- 		 * from other group and save more power
- 		 */
- 		if (sum_nr_running <= group_capacity - 1) {
- 			if (sum_nr_running > leader_nr_running ||
- 			    (sum_nr_running == leader_nr_running &&
- 			     first_cpu(group->cpumask) >
- 			      first_cpu(group_leader->cpumask))) {
- 				group_leader = group;
- 				leader_nr_running = sum_nr_running;
- 			}
+		 * capacity but still has some space to pick up some load
+		 * from other group and save more power
+		 */
+		if (sum_nr_running <= group_capacity - 1) {
+			if (sum_nr_running > leader_nr_running ||
+			    (sum_nr_running == leader_nr_running &&
+			     first_cpu(group->cpumask) >
+			      first_cpu(group_leader->cpumask))) {
+				group_leader = group;
+				leader_nr_running = sum_nr_running;
+			}
 		}
 group_next:
 #endif
@@ -2578,7 +2384,7 @@
 	 * a think about bumping its value to force at least one task to be
 	 * moved
 	 */
-	if (*imbalance < busiest_load_per_task) {
+	if (*imbalance + SCHED_LOAD_SCALE_FUZZ < busiest_load_per_task/2) {
 		unsigned long tmp, pwr_now, pwr_move;
 		unsigned int imbn;
 
@@ -2592,7 +2398,8 @@
 		} else
 			this_load_per_task = SCHED_LOAD_SCALE;
 
-		if (max_load - this_load >= busiest_load_per_task * imbn) {
+		if (max_load - this_load + SCHED_LOAD_SCALE_FUZZ >=
+					busiest_load_per_task * imbn) {
 			*imbalance = busiest_load_per_task;
 			return busiest;
 		}
@@ -2639,7 +2446,7 @@
 
 out_balanced:
 #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
-	if (idle == NOT_IDLE || !(sd->flags & SD_POWERSAVINGS_BALANCE))
+	if (idle == CPU_NOT_IDLE || !(sd->flags & SD_POWERSAVINGS_BALANCE))
 		goto ret;
 
 	if (this == group_leader && group_leader != group_min) {
@@ -2656,7 +2463,7 @@
  * find_busiest_queue - find the busiest runqueue among the cpus in group.
  */
 static struct rq *
-find_busiest_queue(struct sched_group *group, enum idle_type idle,
+find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle,
 		   unsigned long imbalance, cpumask_t *cpus)
 {
 	struct rq *busiest = NULL, *rq;
@@ -2664,17 +2471,19 @@
 	int i;
 
 	for_each_cpu_mask(i, group->cpumask) {
+		unsigned long wl;
 
 		if (!cpu_isset(i, *cpus))
 			continue;
 
 		rq = cpu_rq(i);
+		wl = weighted_cpuload(i);
 
-		if (rq->nr_running == 1 && rq->raw_weighted_load > imbalance)
+		if (rq->nr_running == 1 && wl > imbalance)
 			continue;
 
-		if (rq->raw_weighted_load > max_load) {
-			max_load = rq->raw_weighted_load;
+		if (wl > max_load) {
+			max_load = wl;
 			busiest = rq;
 		}
 	}
@@ -2698,7 +2507,7 @@
  * tasks if there is an imbalance.
  */
 static int load_balance(int this_cpu, struct rq *this_rq,
-			struct sched_domain *sd, enum idle_type idle,
+			struct sched_domain *sd, enum cpu_idle_type idle,
 			int *balance)
 {
 	int nr_moved, all_pinned = 0, active_balance = 0, sd_idle = 0;
@@ -2711,10 +2520,10 @@
 	/*
 	 * When power savings policy is enabled for the parent domain, idle
 	 * sibling can pick up load irrespective of busy siblings. In this case,
-	 * let the state of idle sibling percolate up as IDLE, instead of
-	 * portraying it as NOT_IDLE.
+	 * let the state of idle sibling percolate up as CPU_IDLE, instead of
+	 * portraying it as CPU_NOT_IDLE.
 	 */
-	if (idle != NOT_IDLE && sd->flags & SD_SHARE_CPUPOWER &&
+	if (idle != CPU_NOT_IDLE && sd->flags & SD_SHARE_CPUPOWER &&
 	    !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
 		sd_idle = 1;
 
@@ -2848,7 +2657,7 @@
  * Check this_cpu to ensure it is balanced within domain. Attempt to move
  * tasks if there is an imbalance.
  *
- * Called from schedule when this_rq is about to become idle (NEWLY_IDLE).
+ * Called from schedule when this_rq is about to become idle (CPU_NEWLY_IDLE).
  * this_rq is locked.
  */
 static int
@@ -2865,31 +2674,31 @@
 	 * When power savings policy is enabled for the parent domain, idle
 	 * sibling can pick up load irrespective of busy siblings. In this case,
 	 * let the state of idle sibling percolate up as IDLE, instead of
-	 * portraying it as NOT_IDLE.
+	 * portraying it as CPU_NOT_IDLE.
 	 */
 	if (sd->flags & SD_SHARE_CPUPOWER &&
 	    !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
 		sd_idle = 1;
 
-	schedstat_inc(sd, lb_cnt[NEWLY_IDLE]);
+	schedstat_inc(sd, lb_cnt[CPU_NEWLY_IDLE]);
 redo:
-	group = find_busiest_group(sd, this_cpu, &imbalance, NEWLY_IDLE,
+	group = find_busiest_group(sd, this_cpu, &imbalance, CPU_NEWLY_IDLE,
 				   &sd_idle, &cpus, NULL);
 	if (!group) {
-		schedstat_inc(sd, lb_nobusyg[NEWLY_IDLE]);
+		schedstat_inc(sd, lb_nobusyg[CPU_NEWLY_IDLE]);
 		goto out_balanced;
 	}
 
-	busiest = find_busiest_queue(group, NEWLY_IDLE, imbalance,
+	busiest = find_busiest_queue(group, CPU_NEWLY_IDLE, imbalance,
 				&cpus);
 	if (!busiest) {
-		schedstat_inc(sd, lb_nobusyq[NEWLY_IDLE]);
+		schedstat_inc(sd, lb_nobusyq[CPU_NEWLY_IDLE]);
 		goto out_balanced;
 	}
 
 	BUG_ON(busiest == this_rq);
 
-	schedstat_add(sd, lb_imbalance[NEWLY_IDLE], imbalance);
+	schedstat_add(sd, lb_imbalance[CPU_NEWLY_IDLE], imbalance);
 
 	nr_moved = 0;
 	if (busiest->nr_running > 1) {
@@ -2897,7 +2706,7 @@
 		double_lock_balance(this_rq, busiest);
 		nr_moved = move_tasks(this_rq, this_cpu, busiest,
 					minus_1_or_zero(busiest->nr_running),
-					imbalance, sd, NEWLY_IDLE, NULL);
+					imbalance, sd, CPU_NEWLY_IDLE, NULL);
 		spin_unlock(&busiest->lock);
 
 		if (!nr_moved) {
@@ -2908,7 +2717,7 @@
 	}
 
 	if (!nr_moved) {
-		schedstat_inc(sd, lb_failed[NEWLY_IDLE]);
+		schedstat_inc(sd, lb_failed[CPU_NEWLY_IDLE]);
 		if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
 		    !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
 			return -1;
@@ -2918,7 +2727,7 @@
 	return nr_moved;
 
 out_balanced:
-	schedstat_inc(sd, lb_balanced[NEWLY_IDLE]);
+	schedstat_inc(sd, lb_balanced[CPU_NEWLY_IDLE]);
 	if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
 	    !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
 		return -1;
@@ -2934,8 +2743,8 @@
 static void idle_balance(int this_cpu, struct rq *this_rq)
 {
 	struct sched_domain *sd;
-	int pulled_task = 0;
-	unsigned long next_balance = jiffies + 60 *  HZ;
+	int pulled_task = -1;
+	unsigned long next_balance = jiffies + HZ;
 
 	for_each_domain(this_cpu, sd) {
 		unsigned long interval;
@@ -2954,12 +2763,13 @@
 		if (pulled_task)
 			break;
 	}
-	if (!pulled_task)
+	if (pulled_task || time_after(jiffies, this_rq->next_balance)) {
 		/*
 		 * We are going idle. next_balance may be set based on
 		 * a busy processor. So reset next_balance.
 		 */
 		this_rq->next_balance = next_balance;
+	}
 }
 
 /*
@@ -3003,7 +2813,7 @@
 		schedstat_inc(sd, alb_cnt);
 
 		if (move_tasks(target_rq, target_cpu, busiest_rq, 1,
-			       RTPRIO_TO_LOAD_WEIGHT(100), sd, SCHED_IDLE,
+			       RTPRIO_TO_LOAD_WEIGHT(100), sd, CPU_IDLE,
 			       NULL))
 			schedstat_inc(sd, alb_pushed);
 		else
@@ -3012,32 +2822,6 @@
 	spin_unlock(&target_rq->lock);
 }
 
-static void update_load(struct rq *this_rq)
-{
-	unsigned long this_load;
-	unsigned int i, scale;
-
-	this_load = this_rq->raw_weighted_load;
-
-	/* Update our load: */
-	for (i = 0, scale = 1; i < 3; i++, scale += scale) {
-		unsigned long old_load, new_load;
-
-		/* scale is effectively 1 << i now, and >> i divides by scale */
-
-		old_load = this_rq->cpu_load[i];
-		new_load = this_load;
-		/*
-		 * Round up the averaging division if load is increasing. This
-		 * prevents us from getting stuck on 9 if the load is 10, for
-		 * example.
-		 */
-		if (new_load > old_load)
-			new_load += scale-1;
-		this_rq->cpu_load[i] = (old_load*(scale-1) + new_load) >> i;
-	}
-}
-
 #ifdef CONFIG_NO_HZ
 static struct {
 	atomic_t load_balancer;
@@ -3120,7 +2904,7 @@
  *
  * Balancing parameters are set up in arch_init_sched_domains.
  */
-static inline void rebalance_domains(int cpu, enum idle_type idle)
+static inline void rebalance_domains(int cpu, enum cpu_idle_type idle)
 {
 	int balance = 1;
 	struct rq *rq = cpu_rq(cpu);
@@ -3134,13 +2918,16 @@
 			continue;
 
 		interval = sd->balance_interval;
-		if (idle != SCHED_IDLE)
+		if (idle != CPU_IDLE)
 			interval *= sd->busy_factor;
 
 		/* scale ms to jiffies */
 		interval = msecs_to_jiffies(interval);
 		if (unlikely(!interval))
 			interval = 1;
+		if (interval > HZ*NR_CPUS/10)
+			interval = HZ*NR_CPUS/10;
+
 
 		if (sd->flags & SD_SERIALIZE) {
 			if (!spin_trylock(&balancing))
@@ -3154,7 +2941,7 @@
 				 * longer idle, or one of our SMT siblings is
 				 * not idle.
 				 */
-				idle = NOT_IDLE;
+				idle = CPU_NOT_IDLE;
 			}
 			sd->last_balance = jiffies;
 		}
@@ -3182,11 +2969,12 @@
  */
 static void run_rebalance_domains(struct softirq_action *h)
 {
-	int local_cpu = smp_processor_id();
-	struct rq *local_rq = cpu_rq(local_cpu);
-	enum idle_type idle = local_rq->idle_at_tick ? SCHED_IDLE : NOT_IDLE;
+	int this_cpu = smp_processor_id();
+	struct rq *this_rq = cpu_rq(this_cpu);
+	enum cpu_idle_type idle = this_rq->idle_at_tick ?
+						CPU_IDLE : CPU_NOT_IDLE;
 
-	rebalance_domains(local_cpu, idle);
+	rebalance_domains(this_cpu, idle);
 
 #ifdef CONFIG_NO_HZ
 	/*
@@ -3194,13 +2982,13 @@
 	 * balancing on behalf of the other idle cpus whose ticks are
 	 * stopped.
 	 */
-	if (local_rq->idle_at_tick &&
-	    atomic_read(&nohz.load_balancer) == local_cpu) {
+	if (this_rq->idle_at_tick &&
+	    atomic_read(&nohz.load_balancer) == this_cpu) {
 		cpumask_t cpus = nohz.cpu_mask;
 		struct rq *rq;
 		int balance_cpu;
 
-		cpu_clear(local_cpu, cpus);
+		cpu_clear(this_cpu, cpus);
 		for_each_cpu_mask(balance_cpu, cpus) {
 			/*
 			 * If this cpu gets work to do, stop the load balancing
@@ -3213,8 +3001,8 @@
 			rebalance_domains(balance_cpu, SCHED_IDLE);
 
 			rq = cpu_rq(balance_cpu);
-			if (time_after(local_rq->next_balance, rq->next_balance))
-				local_rq->next_balance = rq->next_balance;
+			if (time_after(this_rq->next_balance, rq->next_balance))
+				this_rq->next_balance = rq->next_balance;
 		}
 	}
 #endif
@@ -3227,9 +3015,8 @@
  * idle load balancing owner or decide to stop the periodic load balancing,
  * if the whole system is idle.
  */
-static inline void trigger_load_balance(int cpu)
+static inline void trigger_load_balance(struct rq *rq, int cpu)
 {
-	struct rq *rq = cpu_rq(cpu);
 #ifdef CONFIG_NO_HZ
 	/*
 	 * If we were in the nohz mode recently and busy at the current
@@ -3281,13 +3068,29 @@
 	if (time_after_eq(jiffies, rq->next_balance))
 		raise_softirq(SCHED_SOFTIRQ);
 }
-#else
+
+#else	/* CONFIG_SMP */
+
 /*
  * on UP we do not need to balance between CPUs:
  */
 static inline void idle_balance(int cpu, struct rq *rq)
 {
 }
+
+/* Avoid "used but not defined" warning on UP */
+static int balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
+		      unsigned long max_nr_move, unsigned long max_load_move,
+		      struct sched_domain *sd, enum cpu_idle_type idle,
+		      int *all_pinned, unsigned long *load_moved,
+		      int this_best_prio, int best_prio, int best_prio_seen,
+		      struct rq_iterator *iterator)
+{
+	*load_moved = 0;
+
+	return 0;
+}
+
 #endif
 
 DEFINE_PER_CPU(struct kernel_stat, kstat);
@@ -3295,54 +3098,28 @@
 EXPORT_PER_CPU_SYMBOL(kstat);
 
 /*
- * This is called on clock ticks and on context switches.
- * Bank in p->sched_time the ns elapsed since the last tick or switch.
+ * Return p->sum_exec_runtime plus any more ns on the sched_clock
+ * that have not yet been banked in case the task is currently running.
  */
-static inline void
-update_cpu_clock(struct task_struct *p, struct rq *rq, unsigned long long now)
+unsigned long long task_sched_runtime(struct task_struct *p)
 {
-	p->sched_time += now - p->last_ran;
-	p->last_ran = rq->most_recent_timestamp = now;
-}
-
-/*
- * Return current->sched_time plus any more ns on the sched_clock
- * that have not yet been banked.
- */
-unsigned long long current_sched_time(const struct task_struct *p)
-{
-	unsigned long long ns;
 	unsigned long flags;
+	u64 ns, delta_exec;
+	struct rq *rq;
 
-	local_irq_save(flags);
-	ns = p->sched_time + sched_clock() - p->last_ran;
-	local_irq_restore(flags);
+	rq = task_rq_lock(p, &flags);
+	ns = p->se.sum_exec_runtime;
+	if (rq->curr == p) {
+		delta_exec = rq_clock(rq) - p->se.exec_start;
+		if ((s64)delta_exec > 0)
+			ns += delta_exec;
+	}
+	task_rq_unlock(rq, &flags);
 
 	return ns;
 }
 
 /*
- * We place interactive tasks back into the active array, if possible.
- *
- * To guarantee that this does not starve expired tasks we ignore the
- * interactivity of a task if the first expired task had to wait more
- * than a 'reasonable' amount of time. This deadline timeout is
- * load-dependent, as the frequency of array switched decreases with
- * increasing number of running tasks. We also ignore the interactivity
- * if a better static_prio task has expired:
- */
-static inline int expired_starving(struct rq *rq)
-{
-	if (rq->curr->static_prio > rq->best_expired_prio)
-		return 1;
-	if (!STARVATION_LIMIT || !rq->expired_timestamp)
-		return 0;
-	if (jiffies - rq->expired_timestamp > STARVATION_LIMIT * rq->nr_running)
-		return 1;
-	return 0;
-}
-
-/*
  * Account user cpu time to a process.
  * @p: the process that the cpu time gets accounted to
  * @hardirq_offset: the offset to subtract from hardirq_count()
@@ -3415,81 +3192,6 @@
 		cpustat->steal = cputime64_add(cpustat->steal, tmp);
 }
 
-static void task_running_tick(struct rq *rq, struct task_struct *p)
-{
-	if (p->array != rq->active) {
-		/* Task has expired but was not scheduled yet */
-		set_tsk_need_resched(p);
-		return;
-	}
-	spin_lock(&rq->lock);
-	/*
-	 * The task was running during this tick - update the
-	 * time slice counter. Note: we do not update a thread's
-	 * priority until it either goes to sleep or uses up its
-	 * timeslice. This makes it possible for interactive tasks
-	 * to use up their timeslices at their highest priority levels.
-	 */
-	if (rt_task(p)) {
-		/*
-		 * RR tasks need a special form of timeslice management.
-		 * FIFO tasks have no timeslices.
-		 */
-		if ((p->policy == SCHED_RR) && !--p->time_slice) {
-			p->time_slice = task_timeslice(p);
-			p->first_time_slice = 0;
-			set_tsk_need_resched(p);
-
-			/* put it at the end of the queue: */
-			requeue_task(p, rq->active);
-		}
-		goto out_unlock;
-	}
-	if (!--p->time_slice) {
-		dequeue_task(p, rq->active);
-		set_tsk_need_resched(p);
-		p->prio = effective_prio(p);
-		p->time_slice = task_timeslice(p);
-		p->first_time_slice = 0;
-
-		if (!rq->expired_timestamp)
-			rq->expired_timestamp = jiffies;
-		if (!TASK_INTERACTIVE(p) || expired_starving(rq)) {
-			enqueue_task(p, rq->expired);
-			if (p->static_prio < rq->best_expired_prio)
-				rq->best_expired_prio = p->static_prio;
-		} else
-			enqueue_task(p, rq->active);
-	} else {
-		/*
-		 * Prevent a too long timeslice allowing a task to monopolize
-		 * the CPU. We do this by splitting up the timeslice into
-		 * smaller pieces.
-		 *
-		 * Note: this does not mean the task's timeslices expire or
-		 * get lost in any way, they just might be preempted by
-		 * another task of equal priority. (one with higher
-		 * priority would have preempted this task already.) We
-		 * requeue this task to the end of the list on this priority
-		 * level, which is in essence a round-robin of tasks with
-		 * equal priority.
-		 *
-		 * This only applies to tasks in the interactive
-		 * delta range with at least TIMESLICE_GRANULARITY to requeue.
-		 */
-		if (TASK_INTERACTIVE(p) && !((task_timeslice(p) -
-			p->time_slice) % TIMESLICE_GRANULARITY(p)) &&
-			(p->time_slice >= TIMESLICE_GRANULARITY(p)) &&
-			(p->array == rq->active)) {
-
-			requeue_task(p, rq->active);
-			set_tsk_need_resched(p);
-		}
-	}
-out_unlock:
-	spin_unlock(&rq->lock);
-}
-
 /*
  * This function gets called by the timer code, with HZ frequency.
  * We call it with interrupts disabled.
@@ -3499,20 +3201,19 @@
  */
 void scheduler_tick(void)
 {
-	unsigned long long now = sched_clock();
-	struct task_struct *p = current;
 	int cpu = smp_processor_id();
-	int idle_at_tick = idle_cpu(cpu);
 	struct rq *rq = cpu_rq(cpu);
+	struct task_struct *curr = rq->curr;
 
-	update_cpu_clock(p, rq, now);
+	spin_lock(&rq->lock);
+	if (curr != rq->idle) /* FIXME: needed? */
+		curr->sched_class->task_tick(rq, curr);
+	update_cpu_load(rq);
+	spin_unlock(&rq->lock);
 
-	if (!idle_at_tick)
-		task_running_tick(rq, p);
 #ifdef CONFIG_SMP
-	update_load(rq);
-	rq->idle_at_tick = idle_at_tick;
-	trigger_load_balance(cpu);
+	rq->idle_at_tick = idle_cpu(cpu);
+	trigger_load_balance(rq, cpu);
 #endif
 }
 
@@ -3554,10 +3255,67 @@
 
 #endif
 
-static inline int interactive_sleep(enum sleep_type sleep_type)
+/*
+ * Print scheduling while atomic bug:
+ */
+static noinline void __schedule_bug(struct task_struct *prev)
 {
-	return (sleep_type == SLEEP_INTERACTIVE ||
-		sleep_type == SLEEP_INTERRUPTED);
+	printk(KERN_ERR "BUG: scheduling while atomic: %s/0x%08x/%d\n",
+		prev->comm, preempt_count(), prev->pid);
+	debug_show_held_locks(prev);
+	if (irqs_disabled())
+		print_irqtrace_events(prev);
+	dump_stack();
+}
+
+/*
+ * Various schedule()-time debugging checks and statistics:
+ */
+static inline void schedule_debug(struct task_struct *prev)
+{
+	/*
+	 * Test if we are atomic.  Since do_exit() needs to call into
+	 * schedule() atomically, we ignore that path for now.
+	 * Otherwise, whine if we are scheduling when we should not be.
+	 */
+	if (unlikely(in_atomic_preempt_off()) && unlikely(!prev->exit_state))
+		__schedule_bug(prev);
+
+	profile_hit(SCHED_PROFILING, __builtin_return_address(0));
+
+	schedstat_inc(this_rq(), sched_cnt);
+}
+
+/*
+ * Pick up the highest-prio task:
+ */
+static inline struct task_struct *
+pick_next_task(struct rq *rq, struct task_struct *prev, u64 now)
+{
+	struct sched_class *class;
+	struct task_struct *p;
+
+	/*
+	 * Optimization: we know that if all tasks are in
+	 * the fair class we can call that function directly:
+	 */
+	if (likely(rq->nr_running == rq->cfs.nr_running)) {
+		p = fair_sched_class.pick_next_task(rq, now);
+		if (likely(p))
+			return p;
+	}
+
+	class = sched_class_highest;
+	for ( ; ; ) {
+		p = class->pick_next_task(rq, now);
+		if (p)
+			return p;
+		/*
+		 * Will never be NULL as the idle class always
+		 * returns a non-NULL p:
+		 */
+		class = class->next;
+	}
 }
 
 /*
@@ -3566,158 +3324,60 @@
 asmlinkage void __sched schedule(void)
 {
 	struct task_struct *prev, *next;
-	struct prio_array *array;
-	struct list_head *queue;
-	unsigned long long now;
-	unsigned long run_time;
-	int cpu, idx, new_prio;
 	long *switch_count;
 	struct rq *rq;
-
-	/*
-	 * Test if we are atomic.  Since do_exit() needs to call into
-	 * schedule() atomically, we ignore that path for now.
-	 * Otherwise, whine if we are scheduling when we should not be.
-	 */
-	if (unlikely(in_atomic() && !current->exit_state)) {
-		printk(KERN_ERR "BUG: scheduling while atomic: "
-			"%s/0x%08x/%d\n",
-			current->comm, preempt_count(), current->pid);
-		debug_show_held_locks(current);
-		if (irqs_disabled())
-			print_irqtrace_events(current);
-		dump_stack();
-	}
-	profile_hit(SCHED_PROFILING, __builtin_return_address(0));
+	u64 now;
+	int cpu;
 
 need_resched:
 	preempt_disable();
-	prev = current;
+	cpu = smp_processor_id();
+	rq = cpu_rq(cpu);
+	rcu_qsctr_inc(cpu);
+	prev = rq->curr;
+	switch_count = &prev->nivcsw;
+
 	release_kernel_lock(prev);
 need_resched_nonpreemptible:
-	rq = this_rq();
 
-	/*
-	 * The idle thread is not allowed to schedule!
-	 * Remove this check after it has been exercised a bit.
-	 */
-	if (unlikely(prev == rq->idle) && prev->state != TASK_RUNNING) {
-		printk(KERN_ERR "bad: scheduling from the idle thread!\n");
-		dump_stack();
-	}
-
-	schedstat_inc(rq, sched_cnt);
-	now = sched_clock();
-	if (likely((long long)(now - prev->timestamp) < NS_MAX_SLEEP_AVG)) {
-		run_time = now - prev->timestamp;
-		if (unlikely((long long)(now - prev->timestamp) < 0))
-			run_time = 0;
-	} else
-		run_time = NS_MAX_SLEEP_AVG;
-
-	/*
-	 * Tasks charged proportionately less run_time at high sleep_avg to
-	 * delay them losing their interactive status
-	 */
-	run_time /= (CURRENT_BONUS(prev) ? : 1);
+	schedule_debug(prev);
 
 	spin_lock_irq(&rq->lock);
-
-	switch_count = &prev->nivcsw;
-	if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
-		switch_count = &prev->nvcsw;
-		if (unlikely((prev->state & TASK_INTERRUPTIBLE) &&
-				unlikely(signal_pending(prev))))
-			prev->state = TASK_RUNNING;
-		else {
-			if (prev->state == TASK_UNINTERRUPTIBLE)
-				rq->nr_uninterruptible++;
-			deactivate_task(prev, rq);
-		}
-	}
-
-	cpu = smp_processor_id();
-	if (unlikely(!rq->nr_running)) {
-		idle_balance(cpu, rq);
-		if (!rq->nr_running) {
-			next = rq->idle;
-			rq->expired_timestamp = 0;
-			goto switch_tasks;
-		}
-	}
-
-	array = rq->active;
-	if (unlikely(!array->nr_active)) {
-		/*
-		 * Switch the active and expired arrays.
-		 */
-		schedstat_inc(rq, sched_switch);
-		rq->active = rq->expired;
-		rq->expired = array;
-		array = rq->active;
-		rq->expired_timestamp = 0;
-		rq->best_expired_prio = MAX_PRIO;
-	}
-
-	idx = sched_find_first_bit(array->bitmap);
-	queue = array->queue + idx;
-	next = list_entry(queue->next, struct task_struct, run_list);
-
-	if (!rt_task(next) && interactive_sleep(next->sleep_type)) {
-		unsigned long long delta = now - next->timestamp;
-		if (unlikely((long long)(now - next->timestamp) < 0))
-			delta = 0;
-
-		if (next->sleep_type == SLEEP_INTERACTIVE)
-			delta = delta * (ON_RUNQUEUE_WEIGHT * 128 / 100) / 128;
-
-		array = next->array;
-		new_prio = recalc_task_prio(next, next->timestamp + delta);
-
-		if (unlikely(next->prio != new_prio)) {
-			dequeue_task(next, array);
-			next->prio = new_prio;
-			enqueue_task(next, array);
-		}
-	}
-	next->sleep_type = SLEEP_NORMAL;
-switch_tasks:
-	if (next == rq->idle)
-		schedstat_inc(rq, sched_goidle);
-	prefetch(next);
-	prefetch_stack(next);
 	clear_tsk_need_resched(prev);
-	rcu_qsctr_inc(task_cpu(prev));
 
-	update_cpu_clock(prev, rq, now);
+	if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
+		if (unlikely((prev->state & TASK_INTERRUPTIBLE) &&
+				unlikely(signal_pending(prev)))) {
+			prev->state = TASK_RUNNING;
+		} else {
+			deactivate_task(rq, prev, 1);
+		}
+		switch_count = &prev->nvcsw;
+	}
 
-	prev->sleep_avg -= run_time;
-	if ((long)prev->sleep_avg <= 0)
-		prev->sleep_avg = 0;
-	prev->timestamp = prev->last_ran = now;
+	if (unlikely(!rq->nr_running))
+		idle_balance(cpu, rq);
+
+	now = __rq_clock(rq);
+	prev->sched_class->put_prev_task(rq, prev, now);
+	next = pick_next_task(rq, prev, now);
 
 	sched_info_switch(prev, next);
+
 	if (likely(prev != next)) {
-		next->timestamp = next->last_ran = now;
 		rq->nr_switches++;
 		rq->curr = next;
 		++*switch_count;
 
-		prepare_task_switch(rq, next);
-		prev = context_switch(rq, prev, next);
-		barrier();
-		/*
-		 * this_rq must be evaluated again because prev may have moved
-		 * CPUs since it called schedule(), thus the 'rq' on its stack
-		 * frame will be invalid.
-		 */
-		finish_task_switch(this_rq(), prev);
+		context_switch(rq, prev, next); /* unlocks the rq */
 	} else
 		spin_unlock_irq(&rq->lock);
 
-	prev = current;
-	if (unlikely(reacquire_kernel_lock(prev) < 0))
+	if (unlikely(reacquire_kernel_lock(current) < 0)) {
+		cpu = smp_processor_id();
+		rq = cpu_rq(cpu);
 		goto need_resched_nonpreemptible;
+	}
 	preempt_enable_no_resched();
 	if (unlikely(test_thread_flag(TIF_NEED_RESCHED)))
 		goto need_resched;
@@ -4045,74 +3705,85 @@
 }
 EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
 
-
-#define	SLEEP_ON_VAR					\
-	unsigned long flags;				\
-	wait_queue_t wait;				\
-	init_waitqueue_entry(&wait, current);
-
-#define SLEEP_ON_HEAD					\
-	spin_lock_irqsave(&q->lock,flags);		\
-	__add_wait_queue(q, &wait);			\
-	spin_unlock(&q->lock);
-
-#define	SLEEP_ON_TAIL					\
-	spin_lock_irq(&q->lock);			\
-	__remove_wait_queue(q, &wait);			\
-	spin_unlock_irqrestore(&q->lock, flags);
-
-void fastcall __sched interruptible_sleep_on(wait_queue_head_t *q)
+static inline void
+sleep_on_head(wait_queue_head_t *q, wait_queue_t *wait, unsigned long *flags)
 {
-	SLEEP_ON_VAR
+	spin_lock_irqsave(&q->lock, *flags);
+	__add_wait_queue(q, wait);
+	spin_unlock(&q->lock);
+}
+
+static inline void
+sleep_on_tail(wait_queue_head_t *q, wait_queue_t *wait, unsigned long *flags)
+{
+	spin_lock_irq(&q->lock);
+	__remove_wait_queue(q, wait);
+	spin_unlock_irqrestore(&q->lock, *flags);
+}
+
+void __sched interruptible_sleep_on(wait_queue_head_t *q)
+{
+	unsigned long flags;
+	wait_queue_t wait;
+
+	init_waitqueue_entry(&wait, current);
 
 	current->state = TASK_INTERRUPTIBLE;
 
-	SLEEP_ON_HEAD
+	sleep_on_head(q, &wait, &flags);
 	schedule();
-	SLEEP_ON_TAIL
+	sleep_on_tail(q, &wait, &flags);
 }
 EXPORT_SYMBOL(interruptible_sleep_on);
 
-long fastcall __sched
+long __sched
 interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout)
 {
-	SLEEP_ON_VAR
+	unsigned long flags;
+	wait_queue_t wait;
+
+	init_waitqueue_entry(&wait, current);
 
 	current->state = TASK_INTERRUPTIBLE;
 
-	SLEEP_ON_HEAD
+	sleep_on_head(q, &wait, &flags);
 	timeout = schedule_timeout(timeout);
-	SLEEP_ON_TAIL
+	sleep_on_tail(q, &wait, &flags);
 
 	return timeout;
 }
 EXPORT_SYMBOL(interruptible_sleep_on_timeout);
 
-void fastcall __sched sleep_on(wait_queue_head_t *q)
+void __sched sleep_on(wait_queue_head_t *q)
 {
-	SLEEP_ON_VAR
+	unsigned long flags;
+	wait_queue_t wait;
+
+	init_waitqueue_entry(&wait, current);
 
 	current->state = TASK_UNINTERRUPTIBLE;
 
-	SLEEP_ON_HEAD
+	sleep_on_head(q, &wait, &flags);
 	schedule();
-	SLEEP_ON_TAIL
+	sleep_on_tail(q, &wait, &flags);
 }
 EXPORT_SYMBOL(sleep_on);
 
-long fastcall __sched sleep_on_timeout(wait_queue_head_t *q, long timeout)
+long __sched sleep_on_timeout(wait_queue_head_t *q, long timeout)
 {
-	SLEEP_ON_VAR
+	unsigned long flags;
+	wait_queue_t wait;
+
+	init_waitqueue_entry(&wait, current);
 
 	current->state = TASK_UNINTERRUPTIBLE;
 
-	SLEEP_ON_HEAD
+	sleep_on_head(q, &wait, &flags);
 	timeout = schedule_timeout(timeout);
-	SLEEP_ON_TAIL
+	sleep_on_tail(q, &wait, &flags);
 
 	return timeout;
 }
-
 EXPORT_SYMBOL(sleep_on_timeout);
 
 #ifdef CONFIG_RT_MUTEXES
@@ -4129,29 +3800,30 @@
  */
 void rt_mutex_setprio(struct task_struct *p, int prio)
 {
-	struct prio_array *array;
 	unsigned long flags;
+	int oldprio, on_rq;
 	struct rq *rq;
-	int oldprio;
+	u64 now;
 
 	BUG_ON(prio < 0 || prio > MAX_PRIO);
 
 	rq = task_rq_lock(p, &flags);
+	now = rq_clock(rq);
 
 	oldprio = p->prio;
-	array = p->array;
-	if (array)
-		dequeue_task(p, array);
+	on_rq = p->se.on_rq;
+	if (on_rq)
+		dequeue_task(rq, p, 0, now);
+
+	if (rt_prio(prio))
+		p->sched_class = &rt_sched_class;
+	else
+		p->sched_class = &fair_sched_class;
+
 	p->prio = prio;
 
-	if (array) {
-		/*
-		 * If changing to an RT priority then queue it
-		 * in the active array!
-		 */
-		if (rt_task(p))
-			array = rq->active;
-		enqueue_task(p, array);
+	if (on_rq) {
+		enqueue_task(rq, p, 0, now);
 		/*
 		 * Reschedule if we are currently running on this runqueue and
 		 * our priority decreased, or if we are not currently running on
@@ -4160,8 +3832,9 @@
 		if (task_running(rq, p)) {
 			if (p->prio > oldprio)
 				resched_task(rq->curr);
-		} else if (TASK_PREEMPTS_CURR(p, rq))
-			resched_task(rq->curr);
+		} else {
+			check_preempt_curr(rq, p);
+		}
 	}
 	task_rq_unlock(rq, &flags);
 }
@@ -4170,10 +3843,10 @@
 
 void set_user_nice(struct task_struct *p, long nice)
 {
-	struct prio_array *array;
-	int old_prio, delta;
+	int old_prio, delta, on_rq;
 	unsigned long flags;
 	struct rq *rq;
+	u64 now;
 
 	if (TASK_NICE(p) == nice || nice < -20 || nice > 19)
 		return;
@@ -4182,20 +3855,21 @@
 	 * the task might be in the middle of scheduling on another CPU.
 	 */
 	rq = task_rq_lock(p, &flags);
+	now = rq_clock(rq);
 	/*
 	 * The RT priorities are set via sched_setscheduler(), but we still
 	 * allow the 'normal' nice value to be set - but as expected
 	 * it wont have any effect on scheduling until the task is
-	 * not SCHED_NORMAL/SCHED_BATCH:
+	 * SCHED_FIFO/SCHED_RR:
 	 */
-	if (has_rt_policy(p)) {
+	if (task_has_rt_policy(p)) {
 		p->static_prio = NICE_TO_PRIO(nice);
 		goto out_unlock;
 	}
-	array = p->array;
-	if (array) {
-		dequeue_task(p, array);
-		dec_raw_weighted_load(rq, p);
+	on_rq = p->se.on_rq;
+	if (on_rq) {
+		dequeue_task(rq, p, 0, now);
+		dec_load(rq, p, now);
 	}
 
 	p->static_prio = NICE_TO_PRIO(nice);
@@ -4204,9 +3878,9 @@
 	p->prio = effective_prio(p);
 	delta = p->prio - old_prio;
 
-	if (array) {
-		enqueue_task(p, array);
-		inc_raw_weighted_load(rq, p);
+	if (on_rq) {
+		enqueue_task(rq, p, 0, now);
+		inc_load(rq, p, now);
 		/*
 		 * If the task increased its priority or is running and
 		 * lowered its priority, then reschedule its CPU:
@@ -4326,20 +4000,28 @@
 }
 
 /* Actually do priority change: must hold rq lock. */
-static void __setscheduler(struct task_struct *p, int policy, int prio)
+static void
+__setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio)
 {
-	BUG_ON(p->array);
+	BUG_ON(p->se.on_rq);
 
 	p->policy = policy;
+	switch (p->policy) {
+	case SCHED_NORMAL:
+	case SCHED_BATCH:
+	case SCHED_IDLE:
+		p->sched_class = &fair_sched_class;
+		break;
+	case SCHED_FIFO:
+	case SCHED_RR:
+		p->sched_class = &rt_sched_class;
+		break;
+	}
+
 	p->rt_priority = prio;
 	p->normal_prio = normal_prio(p);
 	/* we are holding p->pi_lock already */
 	p->prio = rt_mutex_getprio(p);
-	/*
-	 * SCHED_BATCH tasks are treated as perpetual CPU hogs:
-	 */
-	if (policy == SCHED_BATCH)
-		p->sleep_avg = 0;
 	set_load_weight(p);
 }
 
@@ -4354,8 +4036,7 @@
 int sched_setscheduler(struct task_struct *p, int policy,
 		       struct sched_param *param)
 {
-	int retval, oldprio, oldpolicy = -1;
-	struct prio_array *array;
+	int retval, oldprio, oldpolicy = -1, on_rq;
 	unsigned long flags;
 	struct rq *rq;
 
@@ -4366,27 +4047,27 @@
 	if (policy < 0)
 		policy = oldpolicy = p->policy;
 	else if (policy != SCHED_FIFO && policy != SCHED_RR &&
-			policy != SCHED_NORMAL && policy != SCHED_BATCH)
+			policy != SCHED_NORMAL && policy != SCHED_BATCH &&
+			policy != SCHED_IDLE)
 		return -EINVAL;
 	/*
 	 * Valid priorities for SCHED_FIFO and SCHED_RR are
-	 * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL and
-	 * SCHED_BATCH is 0.
+	 * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL,
+	 * SCHED_BATCH and SCHED_IDLE is 0.
 	 */
 	if (param->sched_priority < 0 ||
 	    (p->mm && param->sched_priority > MAX_USER_RT_PRIO-1) ||
 	    (!p->mm && param->sched_priority > MAX_RT_PRIO-1))
 		return -EINVAL;
-	if (is_rt_policy(policy) != (param->sched_priority != 0))
+	if (rt_policy(policy) != (param->sched_priority != 0))
 		return -EINVAL;
 
 	/*
 	 * Allow unprivileged RT tasks to decrease priority:
 	 */
 	if (!capable(CAP_SYS_NICE)) {
-		if (is_rt_policy(policy)) {
+		if (rt_policy(policy)) {
 			unsigned long rlim_rtprio;
-			unsigned long flags;
 
 			if (!lock_task_sighand(p, &flags))
 				return -ESRCH;
@@ -4402,6 +4083,12 @@
 			    param->sched_priority > rlim_rtprio)
 				return -EPERM;
 		}
+		/*
+		 * Like positive nice levels, dont allow tasks to
+		 * move out of SCHED_IDLE either:
+		 */
+		if (p->policy == SCHED_IDLE && policy != SCHED_IDLE)
+			return -EPERM;
 
 		/* can't change other user's priorities */
 		if ((current->euid != p->euid) &&
@@ -4429,13 +4116,13 @@
 		spin_unlock_irqrestore(&p->pi_lock, flags);
 		goto recheck;
 	}
-	array = p->array;
-	if (array)
-		deactivate_task(p, rq);
+	on_rq = p->se.on_rq;
+	if (on_rq)
+		deactivate_task(rq, p, 0);
 	oldprio = p->prio;
-	__setscheduler(p, policy, param->sched_priority);
-	if (array) {
-		__activate_task(p, rq);
+	__setscheduler(rq, p, policy, param->sched_priority);
+	if (on_rq) {
+		activate_task(rq, p, 0);
 		/*
 		 * Reschedule if we are currently running on this runqueue and
 		 * our priority decreased, or if we are not currently running on
@@ -4444,8 +4131,9 @@
 		if (task_running(rq, p)) {
 			if (p->prio > oldprio)
 				resched_task(rq->curr);
-		} else if (TASK_PREEMPTS_CURR(p, rq))
-			resched_task(rq->curr);
+		} else {
+			check_preempt_curr(rq, p);
+		}
 	}
 	__task_rq_unlock(rq);
 	spin_unlock_irqrestore(&p->pi_lock, flags);
@@ -4717,41 +4405,18 @@
 /**
  * sys_sched_yield - yield the current processor to other threads.
  *
- * This function yields the current CPU by moving the calling thread
- * to the expired array. If there are no other threads running on this
- * CPU then this function will return.
+ * This function yields the current CPU to other tasks. If there are no
+ * other threads running on this CPU then this function will return.
  */
 asmlinkage long sys_sched_yield(void)
 {
 	struct rq *rq = this_rq_lock();
-	struct prio_array *array = current->array, *target = rq->expired;
 
 	schedstat_inc(rq, yld_cnt);
-	/*
-	 * We implement yielding by moving the task into the expired
-	 * queue.
-	 *
-	 * (special rule: RT tasks will just roundrobin in the active
-	 *  array.)
-	 */
-	if (rt_task(current))
-		target = rq->active;
-
-	if (array->nr_active == 1) {
+	if (unlikely(rq->nr_running == 1))
 		schedstat_inc(rq, yld_act_empty);
-		if (!rq->expired->nr_active)
-			schedstat_inc(rq, yld_both_empty);
-	} else if (!rq->expired->nr_active)
-		schedstat_inc(rq, yld_exp_empty);
-
-	if (array != target) {
-		dequeue_task(current, array);
-		enqueue_task(current, target);
-	} else
-		/*
-		 * requeue_task is cheaper so perform that if possible.
-		 */
-		requeue_task(current, array);
+	else
+		current->sched_class->yield_task(rq, current);
 
 	/*
 	 * Since we are going to call schedule() anyway, there's
@@ -4902,6 +4567,7 @@
 		break;
 	case SCHED_NORMAL:
 	case SCHED_BATCH:
+	case SCHED_IDLE:
 		ret = 0;
 		break;
 	}
@@ -4926,6 +4592,7 @@
 		break;
 	case SCHED_NORMAL:
 	case SCHED_BATCH:
+	case SCHED_IDLE:
 		ret = 0;
 	}
 	return ret;
@@ -4960,7 +4627,7 @@
 		goto out_unlock;
 
 	jiffies_to_timespec(p->policy == SCHED_FIFO ?
-				0 : task_timeslice(p), &t);
+				0 : static_prio_timeslice(p->static_prio), &t);
 	read_unlock(&tasklist_lock);
 	retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
 out_nounlock:
@@ -5035,6 +4702,9 @@
 
 	touch_all_softlockup_watchdogs();
 
+#ifdef CONFIG_SCHED_DEBUG
+	sysrq_sched_debug_show();
+#endif
 	read_unlock(&tasklist_lock);
 	/*
 	 * Only show locks if all tasks are dumped:
@@ -5043,6 +4713,11 @@
 		debug_show_all_locks();
 }
 
+void __cpuinit init_idle_bootup_task(struct task_struct *idle)
+{
+	idle->sched_class = &idle_sched_class;
+}
+
 /**
  * init_idle - set up an idle thread for a given CPU
  * @idle: task in question
@@ -5056,13 +4731,12 @@
 	struct rq *rq = cpu_rq(cpu);
 	unsigned long flags;
 
-	idle->timestamp = sched_clock();
-	idle->sleep_avg = 0;
-	idle->array = NULL;
+	__sched_fork(idle);
+	idle->se.exec_start = sched_clock();
+
 	idle->prio = idle->normal_prio = MAX_PRIO;
-	idle->state = TASK_RUNNING;
 	idle->cpus_allowed = cpumask_of_cpu(cpu);
-	set_task_cpu(idle, cpu);
+	__set_task_cpu(idle, cpu);
 
 	spin_lock_irqsave(&rq->lock, flags);
 	rq->curr = rq->idle = idle;
@@ -5077,6 +4751,10 @@
 #else
 	task_thread_info(idle)->preempt_count = 0;
 #endif
+	/*
+	 * The idle tasks have their own, simple scheduling class:
+	 */
+	idle->sched_class = &idle_sched_class;
 }
 
 /*
@@ -5088,6 +4766,28 @@
  */
 cpumask_t nohz_cpu_mask = CPU_MASK_NONE;
 
+/*
+ * Increase the granularity value when there are more CPUs,
+ * because with more CPUs the 'effective latency' as visible
+ * to users decreases. But the relationship is not linear,
+ * so pick a second-best guess by going with the log2 of the
+ * number of CPUs.
+ *
+ * This idea comes from the SD scheduler of Con Kolivas:
+ */
+static inline void sched_init_granularity(void)
+{
+	unsigned int factor = 1 + ilog2(num_online_cpus());
+	const unsigned long gran_limit = 10000000;
+
+	sysctl_sched_granularity *= factor;
+	if (sysctl_sched_granularity > gran_limit)
+		sysctl_sched_granularity = gran_limit;
+
+	sysctl_sched_runtime_limit = sysctl_sched_granularity * 4;
+	sysctl_sched_wakeup_granularity = sysctl_sched_granularity / 2;
+}
+
 #ifdef CONFIG_SMP
 /*
  * This is how migration works:
@@ -5161,7 +4861,7 @@
 static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
 {
 	struct rq *rq_dest, *rq_src;
-	int ret = 0;
+	int ret = 0, on_rq;
 
 	if (unlikely(cpu_is_offline(dest_cpu)))
 		return ret;
@@ -5177,20 +4877,13 @@
 	if (!cpu_isset(dest_cpu, p->cpus_allowed))
 		goto out;
 
+	on_rq = p->se.on_rq;
+	if (on_rq)
+		deactivate_task(rq_src, p, 0);
 	set_task_cpu(p, dest_cpu);
-	if (p->array) {
-		/*
-		 * Sync timestamp with rq_dest's before activating.
-		 * The same thing could be achieved by doing this step
-		 * afterwards, and pretending it was a local activate.
-		 * This way is cleaner and logically correct.
-		 */
-		p->timestamp = p->timestamp - rq_src->most_recent_timestamp
-				+ rq_dest->most_recent_timestamp;
-		deactivate_task(p, rq_src);
-		__activate_task(p, rq_dest);
-		if (TASK_PREEMPTS_CURR(p, rq_dest))
-			resched_task(rq_dest->curr);
+	if (on_rq) {
+		activate_task(rq_dest, p, 0);
+		check_preempt_curr(rq_dest, p);
 	}
 	ret = 1;
 out:
@@ -5342,7 +5035,8 @@
 	write_unlock_irq(&tasklist_lock);
 }
 
-/* Schedules idle task to be the next runnable task on current CPU.
+/*
+ * Schedules idle task to be the next runnable task on current CPU.
  * It does so by boosting its priority to highest possible and adding it to
  * the _front_ of the runqueue. Used by CPU offline code.
  */
@@ -5362,10 +5056,10 @@
 	 */
 	spin_lock_irqsave(&rq->lock, flags);
 
-	__setscheduler(p, SCHED_FIFO, MAX_RT_PRIO-1);
+	__setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1);
 
 	/* Add idle task to the _front_ of its priority queue: */
-	__activate_idle_task(p, rq);
+	activate_idle_task(p, rq);
 
 	spin_unlock_irqrestore(&rq->lock, flags);
 }
@@ -5415,16 +5109,15 @@
 static void migrate_dead_tasks(unsigned int dead_cpu)
 {
 	struct rq *rq = cpu_rq(dead_cpu);
-	unsigned int arr, i;
+	struct task_struct *next;
 
-	for (arr = 0; arr < 2; arr++) {
-		for (i = 0; i < MAX_PRIO; i++) {
-			struct list_head *list = &rq->arrays[arr].queue[i];
-
-			while (!list_empty(list))
-				migrate_dead(dead_cpu, list_entry(list->next,
-					     struct task_struct, run_list));
-		}
+	for ( ; ; ) {
+		if (!rq->nr_running)
+			break;
+		next = pick_next_task(rq, rq->curr, rq_clock(rq));
+		if (!next)
+			break;
+		migrate_dead(dead_cpu, next);
 	}
 }
 #endif /* CONFIG_HOTPLUG_CPU */
@@ -5448,14 +5141,14 @@
 
 	case CPU_UP_PREPARE:
 	case CPU_UP_PREPARE_FROZEN:
-		p = kthread_create(migration_thread, hcpu, "migration/%d",cpu);
+		p = kthread_create(migration_thread, hcpu, "migration/%d", cpu);
 		if (IS_ERR(p))
 			return NOTIFY_BAD;
 		p->flags |= PF_NOFREEZE;
 		kthread_bind(p, cpu);
 		/* Must be high prio: stop_machine expects to yield to it. */
 		rq = task_rq_lock(p, &flags);
-		__setscheduler(p, SCHED_FIFO, MAX_RT_PRIO-1);
+		__setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1);
 		task_rq_unlock(rq, &flags);
 		cpu_rq(cpu)->migration_thread = p;
 		break;
@@ -5486,9 +5179,10 @@
 		rq->migration_thread = NULL;
 		/* Idle task back to normal (off runqueue, low prio) */
 		rq = task_rq_lock(rq->idle, &flags);
-		deactivate_task(rq->idle, rq);
+		deactivate_task(rq, rq->idle, 0);
 		rq->idle->static_prio = MAX_PRIO;
-		__setscheduler(rq->idle, SCHED_NORMAL, 0);
+		__setscheduler(rq, rq->idle, SCHED_NORMAL, 0);
+		rq->idle->sched_class = &idle_sched_class;
 		migrate_dead_tasks(cpu);
 		task_rq_unlock(rq, &flags);
 		migrate_nr_uninterruptible(rq);
@@ -5797,483 +5491,6 @@
 
 #define SD_NODES_PER_DOMAIN 16
 
-/*
- * Self-tuning task migration cost measurement between source and target CPUs.
- *
- * This is done by measuring the cost of manipulating buffers of varying
- * sizes. For a given buffer-size here are the steps that are taken:
- *
- * 1) the source CPU reads+dirties a shared buffer
- * 2) the target CPU reads+dirties the same shared buffer
- *
- * We measure how long they take, in the following 4 scenarios:
- *
- *  - source: CPU1, target: CPU2 | cost1
- *  - source: CPU2, target: CPU1 | cost2
- *  - source: CPU1, target: CPU1 | cost3
- *  - source: CPU2, target: CPU2 | cost4
- *
- * We then calculate the cost3+cost4-cost1-cost2 difference - this is
- * the cost of migration.
- *
- * We then start off from a small buffer-size and iterate up to larger
- * buffer sizes, in 5% steps - measuring each buffer-size separately, and
- * doing a maximum search for the cost. (The maximum cost for a migration
- * normally occurs when the working set size is around the effective cache
- * size.)
- */
-#define SEARCH_SCOPE		2
-#define MIN_CACHE_SIZE		(64*1024U)
-#define DEFAULT_CACHE_SIZE	(5*1024*1024U)
-#define ITERATIONS		1
-#define SIZE_THRESH		130
-#define COST_THRESH		130
-
-/*
- * The migration cost is a function of 'domain distance'. Domain
- * distance is the number of steps a CPU has to iterate down its
- * domain tree to share a domain with the other CPU. The farther
- * two CPUs are from each other, the larger the distance gets.
- *
- * Note that we use the distance only to cache measurement results,
- * the distance value is not used numerically otherwise. When two
- * CPUs have the same distance it is assumed that the migration
- * cost is the same. (this is a simplification but quite practical)
- */
-#define MAX_DOMAIN_DISTANCE 32
-
-static unsigned long long migration_cost[MAX_DOMAIN_DISTANCE] =
-		{ [ 0 ... MAX_DOMAIN_DISTANCE-1 ] =
-/*
- * Architectures may override the migration cost and thus avoid
- * boot-time calibration. Unit is nanoseconds. Mostly useful for
- * virtualized hardware:
- */
-#ifdef CONFIG_DEFAULT_MIGRATION_COST
-			CONFIG_DEFAULT_MIGRATION_COST
-#else
-			-1LL
-#endif
-};
-
-/*
- * Allow override of migration cost - in units of microseconds.
- * E.g. migration_cost=1000,2000,3000 will set up a level-1 cost
- * of 1 msec, level-2 cost of 2 msecs and level3 cost of 3 msecs:
- */
-static int __init migration_cost_setup(char *str)
-{
-	int ints[MAX_DOMAIN_DISTANCE+1], i;
-
-	str = get_options(str, ARRAY_SIZE(ints), ints);
-
-	printk("#ints: %d\n", ints[0]);
-	for (i = 1; i <= ints[0]; i++) {
-		migration_cost[i-1] = (unsigned long long)ints[i]*1000;
-		printk("migration_cost[%d]: %Ld\n", i-1, migration_cost[i-1]);
-	}
-	return 1;
-}
-
-__setup ("migration_cost=", migration_cost_setup);
-
-/*
- * Global multiplier (divisor) for migration-cutoff values,
- * in percentiles. E.g. use a value of 150 to get 1.5 times
- * longer cache-hot cutoff times.
- *
- * (We scale it from 100 to 128 to long long handling easier.)
- */
-
-#define MIGRATION_FACTOR_SCALE 128
-
-static unsigned int migration_factor = MIGRATION_FACTOR_SCALE;
-
-static int __init setup_migration_factor(char *str)
-{
-	get_option(&str, &migration_factor);
-	migration_factor = migration_factor * MIGRATION_FACTOR_SCALE / 100;
-	return 1;
-}
-
-__setup("migration_factor=", setup_migration_factor);
-
-/*
- * Estimated distance of two CPUs, measured via the number of domains
- * we have to pass for the two CPUs to be in the same span:
- */
-static unsigned long domain_distance(int cpu1, int cpu2)
-{
-	unsigned long distance = 0;
-	struct sched_domain *sd;
-
-	for_each_domain(cpu1, sd) {
-		WARN_ON(!cpu_isset(cpu1, sd->span));
-		if (cpu_isset(cpu2, sd->span))
-			return distance;
-		distance++;
-	}
-	if (distance >= MAX_DOMAIN_DISTANCE) {
-		WARN_ON(1);
-		distance = MAX_DOMAIN_DISTANCE-1;
-	}
-
-	return distance;
-}
-
-static unsigned int migration_debug;
-
-static int __init setup_migration_debug(char *str)
-{
-	get_option(&str, &migration_debug);
-	return 1;
-}
-
-__setup("migration_debug=", setup_migration_debug);
-
-/*
- * Maximum cache-size that the scheduler should try to measure.
- * Architectures with larger caches should tune this up during
- * bootup. Gets used in the domain-setup code (i.e. during SMP
- * bootup).
- */
-unsigned int max_cache_size;
-
-static int __init setup_max_cache_size(char *str)
-{
-	get_option(&str, &max_cache_size);
-	return 1;
-}
-
-__setup("max_cache_size=", setup_max_cache_size);
-
-/*
- * Dirty a big buffer in a hard-to-predict (for the L2 cache) way. This
- * is the operation that is timed, so we try to generate unpredictable
- * cachemisses that still end up filling the L2 cache:
- */
-static void touch_cache(void *__cache, unsigned long __size)
-{
-	unsigned long size = __size / sizeof(long);
-	unsigned long chunk1 = size / 3;
-	unsigned long chunk2 = 2 * size / 3;
-	unsigned long *cache = __cache;
-	int i;
-
-	for (i = 0; i < size/6; i += 8) {
-		switch (i % 6) {
-			case 0: cache[i]++;
-			case 1: cache[size-1-i]++;
-			case 2: cache[chunk1-i]++;
-			case 3: cache[chunk1+i]++;
-			case 4: cache[chunk2-i]++;
-			case 5: cache[chunk2+i]++;
-		}
-	}
-}
-
-/*
- * Measure the cache-cost of one task migration. Returns in units of nsec.
- */
-static unsigned long long
-measure_one(void *cache, unsigned long size, int source, int target)
-{
-	cpumask_t mask, saved_mask;
-	unsigned long long t0, t1, t2, t3, cost;
-
-	saved_mask = current->cpus_allowed;
-
-	/*
-	 * Flush source caches to RAM and invalidate them:
-	 */
-	sched_cacheflush();
-
-	/*
-	 * Migrate to the source CPU:
-	 */
-	mask = cpumask_of_cpu(source);
-	set_cpus_allowed(current, mask);
-	WARN_ON(smp_processor_id() != source);
-
-	/*
-	 * Dirty the working set:
-	 */
-	t0 = sched_clock();
-	touch_cache(cache, size);
-	t1 = sched_clock();
-
-	/*
-	 * Migrate to the target CPU, dirty the L2 cache and access
-	 * the shared buffer. (which represents the working set
-	 * of a migrated task.)
-	 */
-	mask = cpumask_of_cpu(target);
-	set_cpus_allowed(current, mask);
-	WARN_ON(smp_processor_id() != target);
-
-	t2 = sched_clock();
-	touch_cache(cache, size);
-	t3 = sched_clock();
-
-	cost = t1-t0 + t3-t2;
-
-	if (migration_debug >= 2)
-		printk("[%d->%d]: %8Ld %8Ld %8Ld => %10Ld.\n",
-			source, target, t1-t0, t1-t0, t3-t2, cost);
-	/*
-	 * Flush target caches to RAM and invalidate them:
-	 */
-	sched_cacheflush();
-
-	set_cpus_allowed(current, saved_mask);
-
-	return cost;
-}
-
-/*
- * Measure a series of task migrations and return the average
- * result. Since this code runs early during bootup the system
- * is 'undisturbed' and the average latency makes sense.
- *
- * The algorithm in essence auto-detects the relevant cache-size,
- * so it will properly detect different cachesizes for different
- * cache-hierarchies, depending on how the CPUs are connected.
- *
- * Architectures can prime the upper limit of the search range via
- * max_cache_size, otherwise the search range defaults to 20MB...64K.
- */
-static unsigned long long
-measure_cost(int cpu1, int cpu2, void *cache, unsigned int size)
-{
-	unsigned long long cost1, cost2;
-	int i;
-
-	/*
-	 * Measure the migration cost of 'size' bytes, over an
-	 * average of 10 runs:
-	 *
-	 * (We perturb the cache size by a small (0..4k)
-	 *  value to compensate size/alignment related artifacts.
-	 *  We also subtract the cost of the operation done on
-	 *  the same CPU.)
-	 */
-	cost1 = 0;
-
-	/*
-	 * dry run, to make sure we start off cache-cold on cpu1,
-	 * and to get any vmalloc pagefaults in advance:
-	 */
-	measure_one(cache, size, cpu1, cpu2);
-	for (i = 0; i < ITERATIONS; i++)
-		cost1 += measure_one(cache, size - i * 1024, cpu1, cpu2);
-
-	measure_one(cache, size, cpu2, cpu1);
-	for (i = 0; i < ITERATIONS; i++)
-		cost1 += measure_one(cache, size - i * 1024, cpu2, cpu1);
-
-	/*
-	 * (We measure the non-migrating [cached] cost on both
-	 *  cpu1 and cpu2, to handle CPUs with different speeds)
-	 */
-	cost2 = 0;
-
-	measure_one(cache, size, cpu1, cpu1);
-	for (i = 0; i < ITERATIONS; i++)
-		cost2 += measure_one(cache, size - i * 1024, cpu1, cpu1);
-
-	measure_one(cache, size, cpu2, cpu2);
-	for (i = 0; i < ITERATIONS; i++)
-		cost2 += measure_one(cache, size - i * 1024, cpu2, cpu2);
-
-	/*
-	 * Get the per-iteration migration cost:
-	 */
-	do_div(cost1, 2 * ITERATIONS);
-	do_div(cost2, 2 * ITERATIONS);
-
-	return cost1 - cost2;
-}
-
-static unsigned long long measure_migration_cost(int cpu1, int cpu2)
-{
-	unsigned long long max_cost = 0, fluct = 0, avg_fluct = 0;
-	unsigned int max_size, size, size_found = 0;
-	long long cost = 0, prev_cost;
-	void *cache;
-
-	/*
-	 * Search from max_cache_size*5 down to 64K - the real relevant
-	 * cachesize has to lie somewhere inbetween.
-	 */
-	if (max_cache_size) {
-		max_size = max(max_cache_size * SEARCH_SCOPE, MIN_CACHE_SIZE);
-		size = max(max_cache_size / SEARCH_SCOPE, MIN_CACHE_SIZE);
-	} else {
-		/*
-		 * Since we have no estimation about the relevant
-		 * search range
-		 */
-		max_size = DEFAULT_CACHE_SIZE * SEARCH_SCOPE;
-		size = MIN_CACHE_SIZE;
-	}
-
-	if (!cpu_online(cpu1) || !cpu_online(cpu2)) {
-		printk("cpu %d and %d not both online!\n", cpu1, cpu2);
-		return 0;
-	}
-
-	/*
-	 * Allocate the working set:
-	 */
-	cache = vmalloc(max_size);
-	if (!cache) {
-		printk("could not vmalloc %d bytes for cache!\n", 2 * max_size);
-		return 1000000; /* return 1 msec on very small boxen */
-	}
-
-	while (size <= max_size) {
-		prev_cost = cost;
-		cost = measure_cost(cpu1, cpu2, cache, size);
-
-		/*
-		 * Update the max:
-		 */
-		if (cost > 0) {
-			if (max_cost < cost) {
-				max_cost = cost;
-				size_found = size;
-			}
-		}
-		/*
-		 * Calculate average fluctuation, we use this to prevent
-		 * noise from triggering an early break out of the loop:
-		 */
-		fluct = abs(cost - prev_cost);
-		avg_fluct = (avg_fluct + fluct)/2;
-
-		if (migration_debug)
-			printk("-> [%d][%d][%7d] %3ld.%ld [%3ld.%ld] (%ld): "
-				"(%8Ld %8Ld)\n",
-				cpu1, cpu2, size,
-				(long)cost / 1000000,
-				((long)cost / 100000) % 10,
-				(long)max_cost / 1000000,
-				((long)max_cost / 100000) % 10,
-				domain_distance(cpu1, cpu2),
-				cost, avg_fluct);
-
-		/*
-		 * If we iterated at least 20% past the previous maximum,
-		 * and the cost has dropped by more than 20% already,
-		 * (taking fluctuations into account) then we assume to
-		 * have found the maximum and break out of the loop early:
-		 */
-		if (size_found && (size*100 > size_found*SIZE_THRESH))
-			if (cost+avg_fluct <= 0 ||
-				max_cost*100 > (cost+avg_fluct)*COST_THRESH) {
-
-				if (migration_debug)
-					printk("-> found max.\n");
-				break;
-			}
-		/*
-		 * Increase the cachesize in 10% steps:
-		 */
-		size = size * 10 / 9;
-	}
-
-	if (migration_debug)
-		printk("[%d][%d] working set size found: %d, cost: %Ld\n",
-			cpu1, cpu2, size_found, max_cost);
-
-	vfree(cache);
-
-	/*
-	 * A task is considered 'cache cold' if at least 2 times
-	 * the worst-case cost of migration has passed.
-	 *
-	 * (this limit is only listened to if the load-balancing
-	 * situation is 'nice' - if there is a large imbalance we
-	 * ignore it for the sake of CPU utilization and
-	 * processing fairness.)
-	 */
-	return 2 * max_cost * migration_factor / MIGRATION_FACTOR_SCALE;
-}
-
-static void calibrate_migration_costs(const cpumask_t *cpu_map)
-{
-	int cpu1 = -1, cpu2 = -1, cpu, orig_cpu = raw_smp_processor_id();
-	unsigned long j0, j1, distance, max_distance = 0;
-	struct sched_domain *sd;
-
-	j0 = jiffies;
-
-	/*
-	 * First pass - calculate the cacheflush times:
-	 */
-	for_each_cpu_mask(cpu1, *cpu_map) {
-		for_each_cpu_mask(cpu2, *cpu_map) {
-			if (cpu1 == cpu2)
-				continue;
-			distance = domain_distance(cpu1, cpu2);
-			max_distance = max(max_distance, distance);
-			/*
-			 * No result cached yet?
-			 */
-			if (migration_cost[distance] == -1LL)
-				migration_cost[distance] =
-					measure_migration_cost(cpu1, cpu2);
-		}
-	}
-	/*
-	 * Second pass - update the sched domain hierarchy with
-	 * the new cache-hot-time estimations:
-	 */
-	for_each_cpu_mask(cpu, *cpu_map) {
-		distance = 0;
-		for_each_domain(cpu, sd) {
-			sd->cache_hot_time = migration_cost[distance];
-			distance++;
-		}
-	}
-	/*
-	 * Print the matrix:
-	 */
-	if (migration_debug)
-		printk("migration: max_cache_size: %d, cpu: %d MHz:\n",
-			max_cache_size,
-#ifdef CONFIG_X86
-			cpu_khz/1000
-#else
-			-1
-#endif
-		);
-	if (system_state == SYSTEM_BOOTING && num_online_cpus() > 1) {
-		printk("migration_cost=");
-		for (distance = 0; distance <= max_distance; distance++) {
-			if (distance)
-				printk(",");
-			printk("%ld", (long)migration_cost[distance] / 1000);
-		}
-		printk("\n");
-	}
-	j1 = jiffies;
-	if (migration_debug)
-		printk("migration: %ld seconds\n", (j1-j0) / HZ);
-
-	/*
-	 * Move back to the original CPU. NUMA-Q gets confused
-	 * if we migrate to another quad during bootup.
-	 */
-	if (raw_smp_processor_id() != orig_cpu) {
-		cpumask_t mask = cpumask_of_cpu(orig_cpu),
-			saved_mask = current->cpus_allowed;
-
-		set_cpus_allowed(current, mask);
-		set_cpus_allowed(current, saved_mask);
-	}
-}
-
 #ifdef CONFIG_NUMA
 
 /**
@@ -6574,7 +5791,6 @@
 static int build_sched_domains(const cpumask_t *cpu_map)
 {
 	int i;
-	struct sched_domain *sd;
 #ifdef CONFIG_NUMA
 	struct sched_group **sched_group_nodes = NULL;
 	int sd_allnodes = 0;
@@ -6582,7 +5798,7 @@
 	/*
 	 * Allocate the per-node list of sched groups
 	 */
-	sched_group_nodes = kzalloc(sizeof(struct sched_group*)*MAX_NUMNODES,
+	sched_group_nodes = kzalloc(sizeof(struct sched_group *)*MAX_NUMNODES,
 					   GFP_KERNEL);
 	if (!sched_group_nodes) {
 		printk(KERN_WARNING "Can not alloc sched group node list\n");
@@ -6601,8 +5817,8 @@
 		cpus_and(nodemask, nodemask, *cpu_map);
 
 #ifdef CONFIG_NUMA
-		if (cpus_weight(*cpu_map)
-				> SD_NODES_PER_DOMAIN*cpus_weight(nodemask)) {
+		if (cpus_weight(*cpu_map) >
+				SD_NODES_PER_DOMAIN*cpus_weight(nodemask)) {
 			sd = &per_cpu(allnodes_domains, i);
 			*sd = SD_ALLNODES_INIT;
 			sd->span = *cpu_map;
@@ -6661,7 +5877,8 @@
 		if (i != first_cpu(this_sibling_map))
 			continue;
 
-		init_sched_build_groups(this_sibling_map, cpu_map, &cpu_to_cpu_group);
+		init_sched_build_groups(this_sibling_map, cpu_map,
+					&cpu_to_cpu_group);
 	}
 #endif
 
@@ -6672,11 +5889,11 @@
 		cpus_and(this_core_map, this_core_map, *cpu_map);
 		if (i != first_cpu(this_core_map))
 			continue;
-		init_sched_build_groups(this_core_map, cpu_map, &cpu_to_core_group);
+		init_sched_build_groups(this_core_map, cpu_map,
+					&cpu_to_core_group);
 	}
 #endif
 
-
 	/* Set up physical groups */
 	for (i = 0; i < MAX_NUMNODES; i++) {
 		cpumask_t nodemask = node_to_cpumask(i);
@@ -6691,7 +5908,8 @@
 #ifdef CONFIG_NUMA
 	/* Set up node groups */
 	if (sd_allnodes)
-		init_sched_build_groups(*cpu_map, cpu_map, &cpu_to_allnodes_group);
+		init_sched_build_groups(*cpu_map, cpu_map,
+					&cpu_to_allnodes_group);
 
 	for (i = 0; i < MAX_NUMNODES; i++) {
 		/* Set up node groups */
@@ -6719,6 +5937,7 @@
 		sched_group_nodes[i] = sg;
 		for_each_cpu_mask(j, nodemask) {
 			struct sched_domain *sd;
+
 			sd = &per_cpu(node_domains, j);
 			sd->groups = sg;
 		}
@@ -6763,19 +5982,22 @@
 	/* Calculate CPU power for physical packages and nodes */
 #ifdef CONFIG_SCHED_SMT
 	for_each_cpu_mask(i, *cpu_map) {
-		sd = &per_cpu(cpu_domains, i);
+		struct sched_domain *sd = &per_cpu(cpu_domains, i);
+
 		init_sched_groups_power(i, sd);
 	}
 #endif
 #ifdef CONFIG_SCHED_MC
 	for_each_cpu_mask(i, *cpu_map) {
-		sd = &per_cpu(core_domains, i);
+		struct sched_domain *sd = &per_cpu(core_domains, i);
+
 		init_sched_groups_power(i, sd);
 	}
 #endif
 
 	for_each_cpu_mask(i, *cpu_map) {
-		sd = &per_cpu(phys_domains, i);
+		struct sched_domain *sd = &per_cpu(phys_domains, i);
+
 		init_sched_groups_power(i, sd);
 	}
 
@@ -6803,10 +6025,6 @@
 #endif
 		cpu_attach_domain(sd, i);
 	}
-	/*
-	 * Tune cache-hot values:
-	 */
-	calibrate_migration_costs(cpu_map);
 
 	return 0;
 
@@ -7013,10 +6231,12 @@
 	/* Move init over to a non-isolated CPU */
 	if (set_cpus_allowed(current, non_isolated_cpus) < 0)
 		BUG();
+	sched_init_granularity();
 }
 #else
 void __init sched_init_smp(void)
 {
+	sched_init_granularity();
 }
 #endif /* CONFIG_SMP */
 
@@ -7030,28 +6250,51 @@
 		&& addr < (unsigned long)__sched_text_end);
 }
 
+static inline void init_cfs_rq(struct cfs_rq *cfs_rq, struct rq *rq)
+{
+	cfs_rq->tasks_timeline = RB_ROOT;
+	cfs_rq->fair_clock = 1;
+#ifdef CONFIG_FAIR_GROUP_SCHED
+	cfs_rq->rq = rq;
+#endif
+}
+
 void __init sched_init(void)
 {
-	int i, j, k;
+	u64 now = sched_clock();
 	int highest_cpu = 0;
+	int i, j;
+
+	/*
+	 * Link up the scheduling class hierarchy:
+	 */
+	rt_sched_class.next = &fair_sched_class;
+	fair_sched_class.next = &idle_sched_class;
+	idle_sched_class.next = NULL;
 
 	for_each_possible_cpu(i) {
-		struct prio_array *array;
+		struct rt_prio_array *array;
 		struct rq *rq;
 
 		rq = cpu_rq(i);
 		spin_lock_init(&rq->lock);
 		lockdep_set_class(&rq->lock, &rq->rq_lock_key);
 		rq->nr_running = 0;
-		rq->active = rq->arrays;
-		rq->expired = rq->arrays + 1;
-		rq->best_expired_prio = MAX_PRIO;
+		rq->clock = 1;
+		init_cfs_rq(&rq->cfs, rq);
+#ifdef CONFIG_FAIR_GROUP_SCHED
+		INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
+		list_add(&rq->cfs.leaf_cfs_rq_list, &rq->leaf_cfs_rq_list);
+#endif
+		rq->ls.load_update_last = now;
+		rq->ls.load_update_start = now;
 
+		for (j = 0; j < CPU_LOAD_IDX_MAX; j++)
+			rq->cpu_load[j] = 0;
 #ifdef CONFIG_SMP
 		rq->sd = NULL;
-		for (j = 1; j < 3; j++)
-			rq->cpu_load[j] = 0;
 		rq->active_balance = 0;
+		rq->next_balance = jiffies;
 		rq->push_cpu = 0;
 		rq->cpu = i;
 		rq->migration_thread = NULL;
@@ -7059,16 +6302,14 @@
 #endif
 		atomic_set(&rq->nr_iowait, 0);
 
-		for (j = 0; j < 2; j++) {
-			array = rq->arrays + j;
-			for (k = 0; k < MAX_PRIO; k++) {
-				INIT_LIST_HEAD(array->queue + k);
-				__clear_bit(k, array->bitmap);
-			}
-			// delimiter for bitsearch
-			__set_bit(MAX_PRIO, array->bitmap);
+		array = &rq->rt.active;
+		for (j = 0; j < MAX_RT_PRIO; j++) {
+			INIT_LIST_HEAD(array->queue + j);
+			__clear_bit(j, array->bitmap);
 		}
 		highest_cpu = i;
+		/* delimiter for bitsearch: */
+		__set_bit(MAX_RT_PRIO, array->bitmap);
 	}
 
 	set_load_weight(&init_task);
@@ -7095,6 +6336,10 @@
 	 * when this runqueue becomes "idle".
 	 */
 	init_idle(current, smp_processor_id());
+	/*
+	 * During early bootup we pretend to be a normal task:
+	 */
+	current->sched_class = &fair_sched_class;
 }
 
 #ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
@@ -7125,29 +6370,55 @@
 #ifdef CONFIG_MAGIC_SYSRQ
 void normalize_rt_tasks(void)
 {
-	struct prio_array *array;
 	struct task_struct *g, *p;
 	unsigned long flags;
 	struct rq *rq;
+	int on_rq;
 
 	read_lock_irq(&tasklist_lock);
-
 	do_each_thread(g, p) {
-		if (!rt_task(p))
+		p->se.fair_key			= 0;
+		p->se.wait_runtime		= 0;
+		p->se.wait_start_fair		= 0;
+		p->se.wait_start		= 0;
+		p->se.exec_start		= 0;
+		p->se.sleep_start		= 0;
+		p->se.sleep_start_fair		= 0;
+		p->se.block_start		= 0;
+		task_rq(p)->cfs.fair_clock	= 0;
+		task_rq(p)->clock		= 0;
+
+		if (!rt_task(p)) {
+			/*
+			 * Renice negative nice level userspace
+			 * tasks back to 0:
+			 */
+			if (TASK_NICE(p) < 0 && p->mm)
+				set_user_nice(p, 0);
 			continue;
+		}
 
 		spin_lock_irqsave(&p->pi_lock, flags);
 		rq = __task_rq_lock(p);
+#ifdef CONFIG_SMP
+		/*
+		 * Do not touch the migration thread:
+		 */
+		if (p == rq->migration_thread)
+			goto out_unlock;
+#endif
 
-		array = p->array;
-		if (array)
-			deactivate_task(p, task_rq(p));
-		__setscheduler(p, SCHED_NORMAL, 0);
-		if (array) {
-			__activate_task(p, task_rq(p));
+		on_rq = p->se.on_rq;
+		if (on_rq)
+			deactivate_task(task_rq(p), p, 0);
+		__setscheduler(rq, p, SCHED_NORMAL, 0);
+		if (on_rq) {
+			activate_task(task_rq(p), p, 0);
 			resched_task(rq->curr);
 		}
-
+#ifdef CONFIG_SMP
+ out_unlock:
+#endif
 		__task_rq_unlock(rq);
 		spin_unlock_irqrestore(&p->pi_lock, flags);
 	} while_each_thread(g, p);
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
new file mode 100644
index 0000000..1baf87c
--- /dev/null
+++ b/kernel/sched_debug.c
@@ -0,0 +1,275 @@
+/*
+ * kernel/time/sched_debug.c
+ *
+ * Print the CFS rbtree
+ *
+ * Copyright(C) 2007, Red Hat, Inc., Ingo Molnar
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/proc_fs.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/kallsyms.h>
+#include <linux/utsname.h>
+
+/*
+ * This allows printing both to /proc/sched_debug and
+ * to the console
+ */
+#define SEQ_printf(m, x...)			\
+ do {						\
+	if (m)					\
+		seq_printf(m, x);		\
+	else					\
+		printk(x);			\
+ } while (0)
+
+static void
+print_task(struct seq_file *m, struct rq *rq, struct task_struct *p, u64 now)
+{
+	if (rq->curr == p)
+		SEQ_printf(m, "R");
+	else
+		SEQ_printf(m, " ");
+
+	SEQ_printf(m, "%15s %5d %15Ld %13Ld %13Ld %9Ld %5d "
+		      "%15Ld %15Ld %15Ld %15Ld %15Ld\n",
+		p->comm, p->pid,
+		(long long)p->se.fair_key,
+		(long long)(p->se.fair_key - rq->cfs.fair_clock),
+		(long long)p->se.wait_runtime,
+		(long long)(p->nvcsw + p->nivcsw),
+		p->prio,
+		(long long)p->se.sum_exec_runtime,
+		(long long)p->se.sum_wait_runtime,
+		(long long)p->se.sum_sleep_runtime,
+		(long long)p->se.wait_runtime_overruns,
+		(long long)p->se.wait_runtime_underruns);
+}
+
+static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu, u64 now)
+{
+	struct task_struct *g, *p;
+
+	SEQ_printf(m,
+	"\nrunnable tasks:\n"
+	"            task   PID        tree-key         delta       waiting"
+	"  switches  prio"
+	"        sum-exec        sum-wait       sum-sleep"
+	"    wait-overrun   wait-underrun\n"
+	"------------------------------------------------------------------"
+	"----------------"
+	"------------------------------------------------"
+	"--------------------------------\n");
+
+	read_lock_irq(&tasklist_lock);
+
+	do_each_thread(g, p) {
+		if (!p->se.on_rq || task_cpu(p) != rq_cpu)
+			continue;
+
+		print_task(m, rq, p, now);
+	} while_each_thread(g, p);
+
+	read_unlock_irq(&tasklist_lock);
+}
+
+static void
+print_cfs_rq_runtime_sum(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
+{
+	s64 wait_runtime_rq_sum = 0;
+	struct task_struct *p;
+	struct rb_node *curr;
+	unsigned long flags;
+	struct rq *rq = &per_cpu(runqueues, cpu);
+
+	spin_lock_irqsave(&rq->lock, flags);
+	curr = first_fair(cfs_rq);
+	while (curr) {
+		p = rb_entry(curr, struct task_struct, se.run_node);
+		wait_runtime_rq_sum += p->se.wait_runtime;
+
+		curr = rb_next(curr);
+	}
+	spin_unlock_irqrestore(&rq->lock, flags);
+
+	SEQ_printf(m, "  .%-30s: %Ld\n", "wait_runtime_rq_sum",
+		(long long)wait_runtime_rq_sum);
+}
+
+void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq, u64 now)
+{
+	SEQ_printf(m, "\ncfs_rq %p\n", cfs_rq);
+
+#define P(x) \
+	SEQ_printf(m, "  .%-30s: %Ld\n", #x, (long long)(cfs_rq->x))
+
+	P(fair_clock);
+	P(exec_clock);
+	P(wait_runtime);
+	P(wait_runtime_overruns);
+	P(wait_runtime_underruns);
+	P(sleeper_bonus);
+#undef P
+
+	print_cfs_rq_runtime_sum(m, cpu, cfs_rq);
+}
+
+static void print_cpu(struct seq_file *m, int cpu, u64 now)
+{
+	struct rq *rq = &per_cpu(runqueues, cpu);
+
+#ifdef CONFIG_X86
+	{
+		unsigned int freq = cpu_khz ? : 1;
+
+		SEQ_printf(m, "\ncpu#%d, %u.%03u MHz\n",
+			   cpu, freq / 1000, (freq % 1000));
+	}
+#else
+	SEQ_printf(m, "\ncpu#%d\n", cpu);
+#endif
+
+#define P(x) \
+	SEQ_printf(m, "  .%-30s: %Ld\n", #x, (long long)(rq->x))
+
+	P(nr_running);
+	SEQ_printf(m, "  .%-30s: %lu\n", "load",
+		   rq->ls.load.weight);
+	P(ls.delta_fair);
+	P(ls.delta_exec);
+	P(nr_switches);
+	P(nr_load_updates);
+	P(nr_uninterruptible);
+	SEQ_printf(m, "  .%-30s: %lu\n", "jiffies", jiffies);
+	P(next_balance);
+	P(curr->pid);
+	P(clock);
+	P(prev_clock_raw);
+	P(clock_warps);
+	P(clock_overflows);
+	P(clock_unstable_events);
+	P(clock_max_delta);
+	P(cpu_load[0]);
+	P(cpu_load[1]);
+	P(cpu_load[2]);
+	P(cpu_load[3]);
+	P(cpu_load[4]);
+#undef P
+
+	print_cfs_stats(m, cpu, now);
+
+	print_rq(m, rq, cpu, now);
+}
+
+static int sched_debug_show(struct seq_file *m, void *v)
+{
+	u64 now = ktime_to_ns(ktime_get());
+	int cpu;
+
+	SEQ_printf(m, "Sched Debug Version: v0.04, cfs-v20, %s %.*s\n",
+		init_utsname()->release,
+		(int)strcspn(init_utsname()->version, " "),
+		init_utsname()->version);
+
+	SEQ_printf(m, "now at %Lu nsecs\n", (unsigned long long)now);
+
+	for_each_online_cpu(cpu)
+		print_cpu(m, cpu, now);
+
+	SEQ_printf(m, "\n");
+
+	return 0;
+}
+
+void sysrq_sched_debug_show(void)
+{
+	sched_debug_show(NULL, NULL);
+}
+
+static int sched_debug_open(struct inode *inode, struct file *filp)
+{
+	return single_open(filp, sched_debug_show, NULL);
+}
+
+static struct file_operations sched_debug_fops = {
+	.open		= sched_debug_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= seq_release,
+};
+
+static int __init init_sched_debug_procfs(void)
+{
+	struct proc_dir_entry *pe;
+
+	pe = create_proc_entry("sched_debug", 0644, NULL);
+	if (!pe)
+		return -ENOMEM;
+
+	pe->proc_fops = &sched_debug_fops;
+
+	return 0;
+}
+
+__initcall(init_sched_debug_procfs);
+
+void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
+{
+	unsigned long flags;
+	int num_threads = 1;
+
+	rcu_read_lock();
+	if (lock_task_sighand(p, &flags)) {
+		num_threads = atomic_read(&p->signal->count);
+		unlock_task_sighand(p, &flags);
+	}
+	rcu_read_unlock();
+
+	SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, p->pid, num_threads);
+	SEQ_printf(m, "----------------------------------------------\n");
+#define P(F) \
+	SEQ_printf(m, "%-25s:%20Ld\n", #F, (long long)p->F)
+
+	P(se.wait_start);
+	P(se.wait_start_fair);
+	P(se.exec_start);
+	P(se.sleep_start);
+	P(se.sleep_start_fair);
+	P(se.block_start);
+	P(se.sleep_max);
+	P(se.block_max);
+	P(se.exec_max);
+	P(se.wait_max);
+	P(se.wait_runtime);
+	P(se.wait_runtime_overruns);
+	P(se.wait_runtime_underruns);
+	P(se.sum_wait_runtime);
+	P(se.sum_exec_runtime);
+	SEQ_printf(m, "%-25s:%20Ld\n",
+		   "nr_switches", (long long)(p->nvcsw + p->nivcsw));
+	P(se.load.weight);
+	P(policy);
+	P(prio);
+#undef P
+
+	{
+		u64 t0, t1;
+
+		t0 = sched_clock();
+		t1 = sched_clock();
+		SEQ_printf(m, "%-25s:%20Ld\n",
+			   "clock-delta", (long long)(t1-t0));
+	}
+}
+
+void proc_sched_set_task(struct task_struct *p)
+{
+	p->se.sleep_max = p->se.block_max = p->se.exec_max = p->se.wait_max = 0;
+	p->se.wait_runtime_overruns = p->se.wait_runtime_underruns = 0;
+	p->se.sum_exec_runtime = 0;
+}
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
new file mode 100644
index 0000000..6971db0
--- /dev/null
+++ b/kernel/sched_fair.c
@@ -0,0 +1,1131 @@
+/*
+ * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH)
+ *
+ *  Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
+ *
+ *  Interactivity improvements by Mike Galbraith
+ *  (C) 2007 Mike Galbraith <efault@gmx.de>
+ *
+ *  Various enhancements by Dmitry Adamushko.
+ *  (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com>
+ *
+ *  Group scheduling enhancements by Srivatsa Vaddagiri
+ *  Copyright IBM Corporation, 2007
+ *  Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
+ *
+ *  Scaled math optimizations by Thomas Gleixner
+ *  Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
+ */
+
+/*
+ * Preemption granularity:
+ * (default: 2 msec, units: nanoseconds)
+ *
+ * NOTE: this granularity value is not the same as the concept of
+ * 'timeslice length' - timeslices in CFS will typically be somewhat
+ * larger than this value. (to see the precise effective timeslice
+ * length of your workload, run vmstat and monitor the context-switches
+ * field)
+ *
+ * On SMP systems the value of this is multiplied by the log2 of the
+ * number of CPUs. (i.e. factor 2x on 2-way systems, 3x on 4-way
+ * systems, 4x on 8-way systems, 5x on 16-way systems, etc.)
+ */
+unsigned int sysctl_sched_granularity __read_mostly = 2000000000ULL/HZ;
+
+/*
+ * SCHED_BATCH wake-up granularity.
+ * (default: 10 msec, units: nanoseconds)
+ *
+ * This option delays the preemption effects of decoupled workloads
+ * and reduces their over-scheduling. Synchronous workloads will still
+ * have immediate wakeup/sleep latencies.
+ */
+unsigned int sysctl_sched_batch_wakeup_granularity __read_mostly =
+							10000000000ULL/HZ;
+
+/*
+ * SCHED_OTHER wake-up granularity.
+ * (default: 1 msec, units: nanoseconds)
+ *
+ * This option delays the preemption effects of decoupled workloads
+ * and reduces their over-scheduling. Synchronous workloads will still
+ * have immediate wakeup/sleep latencies.
+ */
+unsigned int sysctl_sched_wakeup_granularity __read_mostly = 1000000000ULL/HZ;
+
+unsigned int sysctl_sched_stat_granularity __read_mostly;
+
+/*
+ * Initialized in sched_init_granularity():
+ */
+unsigned int sysctl_sched_runtime_limit __read_mostly;
+
+/*
+ * Debugging: various feature bits
+ */
+enum {
+	SCHED_FEAT_FAIR_SLEEPERS	= 1,
+	SCHED_FEAT_SLEEPER_AVG		= 2,
+	SCHED_FEAT_SLEEPER_LOAD_AVG	= 4,
+	SCHED_FEAT_PRECISE_CPU_LOAD	= 8,
+	SCHED_FEAT_START_DEBIT		= 16,
+	SCHED_FEAT_SKIP_INITIAL		= 32,
+};
+
+unsigned int sysctl_sched_features __read_mostly =
+		SCHED_FEAT_FAIR_SLEEPERS	*1 |
+		SCHED_FEAT_SLEEPER_AVG		*1 |
+		SCHED_FEAT_SLEEPER_LOAD_AVG	*1 |
+		SCHED_FEAT_PRECISE_CPU_LOAD	*1 |
+		SCHED_FEAT_START_DEBIT		*1 |
+		SCHED_FEAT_SKIP_INITIAL		*0;
+
+extern struct sched_class fair_sched_class;
+
+/**************************************************************
+ * CFS operations on generic schedulable entities:
+ */
+
+#ifdef CONFIG_FAIR_GROUP_SCHED
+
+/* cpu runqueue to which this cfs_rq is attached */
+static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
+{
+	return cfs_rq->rq;
+}
+
+/* currently running entity (if any) on this cfs_rq */
+static inline struct sched_entity *cfs_rq_curr(struct cfs_rq *cfs_rq)
+{
+	return cfs_rq->curr;
+}
+
+/* An entity is a task if it doesn't "own" a runqueue */
+#define entity_is_task(se)	(!se->my_q)
+
+static inline void
+set_cfs_rq_curr(struct cfs_rq *cfs_rq, struct sched_entity *se)
+{
+	cfs_rq->curr = se;
+}
+
+#else	/* CONFIG_FAIR_GROUP_SCHED */
+
+static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
+{
+	return container_of(cfs_rq, struct rq, cfs);
+}
+
+static inline struct sched_entity *cfs_rq_curr(struct cfs_rq *cfs_rq)
+{
+	struct rq *rq = rq_of(cfs_rq);
+
+	if (unlikely(rq->curr->sched_class != &fair_sched_class))
+		return NULL;
+
+	return &rq->curr->se;
+}
+
+#define entity_is_task(se)	1
+
+static inline void
+set_cfs_rq_curr(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
+
+#endif	/* CONFIG_FAIR_GROUP_SCHED */
+
+static inline struct task_struct *task_of(struct sched_entity *se)
+{
+	return container_of(se, struct task_struct, se);
+}
+
+
+/**************************************************************
+ * Scheduling class tree data structure manipulation methods:
+ */
+
+/*
+ * Enqueue an entity into the rb-tree:
+ */
+static inline void
+__enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
+{
+	struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
+	struct rb_node *parent = NULL;
+	struct sched_entity *entry;
+	s64 key = se->fair_key;
+	int leftmost = 1;
+
+	/*
+	 * Find the right place in the rbtree:
+	 */
+	while (*link) {
+		parent = *link;
+		entry = rb_entry(parent, struct sched_entity, run_node);
+		/*
+		 * We dont care about collisions. Nodes with
+		 * the same key stay together.
+		 */
+		if (key - entry->fair_key < 0) {
+			link = &parent->rb_left;
+		} else {
+			link = &parent->rb_right;
+			leftmost = 0;
+		}
+	}
+
+	/*
+	 * Maintain a cache of leftmost tree entries (it is frequently
+	 * used):
+	 */
+	if (leftmost)
+		cfs_rq->rb_leftmost = &se->run_node;
+
+	rb_link_node(&se->run_node, parent, link);
+	rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
+	update_load_add(&cfs_rq->load, se->load.weight);
+	cfs_rq->nr_running++;
+	se->on_rq = 1;
+}
+
+static inline void
+__dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
+{
+	if (cfs_rq->rb_leftmost == &se->run_node)
+		cfs_rq->rb_leftmost = rb_next(&se->run_node);
+	rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
+	update_load_sub(&cfs_rq->load, se->load.weight);
+	cfs_rq->nr_running--;
+	se->on_rq = 0;
+}
+
+static inline struct rb_node *first_fair(struct cfs_rq *cfs_rq)
+{
+	return cfs_rq->rb_leftmost;
+}
+
+static struct sched_entity *__pick_next_entity(struct cfs_rq *cfs_rq)
+{
+	return rb_entry(first_fair(cfs_rq), struct sched_entity, run_node);
+}
+
+/**************************************************************
+ * Scheduling class statistics methods:
+ */
+
+/*
+ * We rescale the rescheduling granularity of tasks according to their
+ * nice level, but only linearly, not exponentially:
+ */
+static long
+niced_granularity(struct sched_entity *curr, unsigned long granularity)
+{
+	u64 tmp;
+
+	/*
+	 * Negative nice levels get the same granularity as nice-0:
+	 */
+	if (likely(curr->load.weight >= NICE_0_LOAD))
+		return granularity;
+	/*
+	 * Positive nice level tasks get linearly finer
+	 * granularity:
+	 */
+	tmp = curr->load.weight * (u64)granularity;
+
+	/*
+	 * It will always fit into 'long':
+	 */
+	return (long) (tmp >> NICE_0_SHIFT);
+}
+
+static inline void
+limit_wait_runtime(struct cfs_rq *cfs_rq, struct sched_entity *se)
+{
+	long limit = sysctl_sched_runtime_limit;
+
+	/*
+	 * Niced tasks have the same history dynamic range as
+	 * non-niced tasks:
+	 */
+	if (unlikely(se->wait_runtime > limit)) {
+		se->wait_runtime = limit;
+		schedstat_inc(se, wait_runtime_overruns);
+		schedstat_inc(cfs_rq, wait_runtime_overruns);
+	}
+	if (unlikely(se->wait_runtime < -limit)) {
+		se->wait_runtime = -limit;
+		schedstat_inc(se, wait_runtime_underruns);
+		schedstat_inc(cfs_rq, wait_runtime_underruns);
+	}
+}
+
+static inline void
+__add_wait_runtime(struct cfs_rq *cfs_rq, struct sched_entity *se, long delta)
+{
+	se->wait_runtime += delta;
+	schedstat_add(se, sum_wait_runtime, delta);
+	limit_wait_runtime(cfs_rq, se);
+}
+
+static void
+add_wait_runtime(struct cfs_rq *cfs_rq, struct sched_entity *se, long delta)
+{
+	schedstat_add(cfs_rq, wait_runtime, -se->wait_runtime);
+	__add_wait_runtime(cfs_rq, se, delta);
+	schedstat_add(cfs_rq, wait_runtime, se->wait_runtime);
+}
+
+/*
+ * Update the current task's runtime statistics. Skip current tasks that
+ * are not in our scheduling class.
+ */
+static inline void
+__update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr, u64 now)
+{
+	unsigned long delta, delta_exec, delta_fair;
+	long delta_mine;
+	struct load_weight *lw = &cfs_rq->load;
+	unsigned long load = lw->weight;
+
+	if (unlikely(!load))
+		return;
+
+	delta_exec = curr->delta_exec;
+#ifdef CONFIG_SCHEDSTATS
+	if (unlikely(delta_exec > curr->exec_max))
+		curr->exec_max = delta_exec;
+#endif
+
+	curr->sum_exec_runtime += delta_exec;
+	cfs_rq->exec_clock += delta_exec;
+
+	delta_fair = calc_delta_fair(delta_exec, lw);
+	delta_mine = calc_delta_mine(delta_exec, curr->load.weight, lw);
+
+	if (cfs_rq->sleeper_bonus > sysctl_sched_stat_granularity) {
+		delta = calc_delta_mine(cfs_rq->sleeper_bonus,
+					curr->load.weight, lw);
+		if (unlikely(delta > cfs_rq->sleeper_bonus))
+			delta = cfs_rq->sleeper_bonus;
+
+		cfs_rq->sleeper_bonus -= delta;
+		delta_mine -= delta;
+	}
+
+	cfs_rq->fair_clock += delta_fair;
+	/*
+	 * We executed delta_exec amount of time on the CPU,
+	 * but we were only entitled to delta_mine amount of
+	 * time during that period (if nr_running == 1 then
+	 * the two values are equal)
+	 * [Note: delta_mine - delta_exec is negative]:
+	 */
+	add_wait_runtime(cfs_rq, curr, delta_mine - delta_exec);
+}
+
+static void update_curr(struct cfs_rq *cfs_rq, u64 now)
+{
+	struct sched_entity *curr = cfs_rq_curr(cfs_rq);
+	unsigned long delta_exec;
+
+	if (unlikely(!curr))
+		return;
+
+	/*
+	 * Get the amount of time the current task was running
+	 * since the last time we changed load (this cannot
+	 * overflow on 32 bits):
+	 */
+	delta_exec = (unsigned long)(now - curr->exec_start);
+
+	curr->delta_exec += delta_exec;
+
+	if (unlikely(curr->delta_exec > sysctl_sched_stat_granularity)) {
+		__update_curr(cfs_rq, curr, now);
+		curr->delta_exec = 0;
+	}
+	curr->exec_start = now;
+}
+
+static inline void
+update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se, u64 now)
+{
+	se->wait_start_fair = cfs_rq->fair_clock;
+	se->wait_start = now;
+}
+
+/*
+ * We calculate fair deltas here, so protect against the random effects
+ * of a multiplication overflow by capping it to the runtime limit:
+ */
+#if BITS_PER_LONG == 32
+static inline unsigned long
+calc_weighted(unsigned long delta, unsigned long weight, int shift)
+{
+	u64 tmp = (u64)delta * weight >> shift;
+
+	if (unlikely(tmp > sysctl_sched_runtime_limit*2))
+		return sysctl_sched_runtime_limit*2;
+	return tmp;
+}
+#else
+static inline unsigned long
+calc_weighted(unsigned long delta, unsigned long weight, int shift)
+{
+	return delta * weight >> shift;
+}
+#endif
+
+/*
+ * Task is being enqueued - update stats:
+ */
+static void
+update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se, u64 now)
+{
+	s64 key;
+
+	/*
+	 * Are we enqueueing a waiting task? (for current tasks
+	 * a dequeue/enqueue event is a NOP)
+	 */
+	if (se != cfs_rq_curr(cfs_rq))
+		update_stats_wait_start(cfs_rq, se, now);
+	/*
+	 * Update the key:
+	 */
+	key = cfs_rq->fair_clock;
+
+	/*
+	 * Optimize the common nice 0 case:
+	 */
+	if (likely(se->load.weight == NICE_0_LOAD)) {
+		key -= se->wait_runtime;
+	} else {
+		u64 tmp;
+
+		if (se->wait_runtime < 0) {
+			tmp = -se->wait_runtime;
+			key += (tmp * se->load.inv_weight) >>
+					(WMULT_SHIFT - NICE_0_SHIFT);
+		} else {
+			tmp = se->wait_runtime;
+			key -= (tmp * se->load.weight) >> NICE_0_SHIFT;
+		}
+	}
+
+	se->fair_key = key;
+}
+
+/*
+ * Note: must be called with a freshly updated rq->fair_clock.
+ */
+static inline void
+__update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se, u64 now)
+{
+	unsigned long delta_fair = se->delta_fair_run;
+
+#ifdef CONFIG_SCHEDSTATS
+	{
+		s64 delta_wait = now - se->wait_start;
+		if (unlikely(delta_wait > se->wait_max))
+			se->wait_max = delta_wait;
+	}
+#endif
+
+	if (unlikely(se->load.weight != NICE_0_LOAD))
+		delta_fair = calc_weighted(delta_fair, se->load.weight,
+							NICE_0_SHIFT);
+
+	add_wait_runtime(cfs_rq, se, delta_fair);
+}
+
+static void
+update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se, u64 now)
+{
+	unsigned long delta_fair;
+
+	delta_fair = (unsigned long)min((u64)(2*sysctl_sched_runtime_limit),
+			(u64)(cfs_rq->fair_clock - se->wait_start_fair));
+
+	se->delta_fair_run += delta_fair;
+	if (unlikely(abs(se->delta_fair_run) >=
+				sysctl_sched_stat_granularity)) {
+		__update_stats_wait_end(cfs_rq, se, now);
+		se->delta_fair_run = 0;
+	}
+
+	se->wait_start_fair = 0;
+	se->wait_start = 0;
+}
+
+static inline void
+update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se, u64 now)
+{
+	update_curr(cfs_rq, now);
+	/*
+	 * Mark the end of the wait period if dequeueing a
+	 * waiting task:
+	 */
+	if (se != cfs_rq_curr(cfs_rq))
+		update_stats_wait_end(cfs_rq, se, now);
+}
+
+/*
+ * We are picking a new current task - update its stats:
+ */
+static inline void
+update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se, u64 now)
+{
+	/*
+	 * We are starting a new run period:
+	 */
+	se->exec_start = now;
+}
+
+/*
+ * We are descheduling a task - update its stats:
+ */
+static inline void
+update_stats_curr_end(struct cfs_rq *cfs_rq, struct sched_entity *se, u64 now)
+{
+	se->exec_start = 0;
+}
+
+/**************************************************
+ * Scheduling class queueing methods:
+ */
+
+static void
+__enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se, u64 now)
+{
+	unsigned long load = cfs_rq->load.weight, delta_fair;
+	long prev_runtime;
+
+	if (sysctl_sched_features & SCHED_FEAT_SLEEPER_LOAD_AVG)
+		load = rq_of(cfs_rq)->cpu_load[2];
+
+	delta_fair = se->delta_fair_sleep;
+
+	/*
+	 * Fix up delta_fair with the effect of us running
+	 * during the whole sleep period:
+	 */
+	if (sysctl_sched_features & SCHED_FEAT_SLEEPER_AVG)
+		delta_fair = div64_likely32((u64)delta_fair * load,
+						load + se->load.weight);
+
+	if (unlikely(se->load.weight != NICE_0_LOAD))
+		delta_fair = calc_weighted(delta_fair, se->load.weight,
+							NICE_0_SHIFT);
+
+	prev_runtime = se->wait_runtime;
+	__add_wait_runtime(cfs_rq, se, delta_fair);
+	delta_fair = se->wait_runtime - prev_runtime;
+
+	/*
+	 * Track the amount of bonus we've given to sleepers:
+	 */
+	cfs_rq->sleeper_bonus += delta_fair;
+
+	schedstat_add(cfs_rq, wait_runtime, se->wait_runtime);
+}
+
+static void
+enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se, u64 now)
+{
+	struct task_struct *tsk = task_of(se);
+	unsigned long delta_fair;
+
+	if ((entity_is_task(se) && tsk->policy == SCHED_BATCH) ||
+			 !(sysctl_sched_features & SCHED_FEAT_FAIR_SLEEPERS))
+		return;
+
+	delta_fair = (unsigned long)min((u64)(2*sysctl_sched_runtime_limit),
+		(u64)(cfs_rq->fair_clock - se->sleep_start_fair));
+
+	se->delta_fair_sleep += delta_fair;
+	if (unlikely(abs(se->delta_fair_sleep) >=
+				sysctl_sched_stat_granularity)) {
+		__enqueue_sleeper(cfs_rq, se, now);
+		se->delta_fair_sleep = 0;
+	}
+
+	se->sleep_start_fair = 0;
+
+#ifdef CONFIG_SCHEDSTATS
+	if (se->sleep_start) {
+		u64 delta = now - se->sleep_start;
+
+		if ((s64)delta < 0)
+			delta = 0;
+
+		if (unlikely(delta > se->sleep_max))
+			se->sleep_max = delta;
+
+		se->sleep_start = 0;
+		se->sum_sleep_runtime += delta;
+	}
+	if (se->block_start) {
+		u64 delta = now - se->block_start;
+
+		if ((s64)delta < 0)
+			delta = 0;
+
+		if (unlikely(delta > se->block_max))
+			se->block_max = delta;
+
+		se->block_start = 0;
+		se->sum_sleep_runtime += delta;
+	}
+#endif
+}
+
+static void
+enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
+	       int wakeup, u64 now)
+{
+	/*
+	 * Update the fair clock.
+	 */
+	update_curr(cfs_rq, now);
+
+	if (wakeup)
+		enqueue_sleeper(cfs_rq, se, now);
+
+	update_stats_enqueue(cfs_rq, se, now);
+	__enqueue_entity(cfs_rq, se);
+}
+
+static void
+dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
+	       int sleep, u64 now)
+{
+	update_stats_dequeue(cfs_rq, se, now);
+	if (sleep) {
+		se->sleep_start_fair = cfs_rq->fair_clock;
+#ifdef CONFIG_SCHEDSTATS
+		if (entity_is_task(se)) {
+			struct task_struct *tsk = task_of(se);
+
+			if (tsk->state & TASK_INTERRUPTIBLE)
+				se->sleep_start = now;
+			if (tsk->state & TASK_UNINTERRUPTIBLE)
+				se->block_start = now;
+		}
+		cfs_rq->wait_runtime -= se->wait_runtime;
+#endif
+	}
+	__dequeue_entity(cfs_rq, se);
+}
+
+/*
+ * Preempt the current task with a newly woken task if needed:
+ */
+static void
+__check_preempt_curr_fair(struct cfs_rq *cfs_rq, struct sched_entity *se,
+			  struct sched_entity *curr, unsigned long granularity)
+{
+	s64 __delta = curr->fair_key - se->fair_key;
+
+	/*
+	 * Take scheduling granularity into account - do not
+	 * preempt the current task unless the best task has
+	 * a larger than sched_granularity fairness advantage:
+	 */
+	if (__delta > niced_granularity(curr, granularity))
+		resched_task(rq_of(cfs_rq)->curr);
+}
+
+static inline void
+set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, u64 now)
+{
+	/*
+	 * Any task has to be enqueued before it get to execute on
+	 * a CPU. So account for the time it spent waiting on the
+	 * runqueue. (note, here we rely on pick_next_task() having
+	 * done a put_prev_task_fair() shortly before this, which
+	 * updated rq->fair_clock - used by update_stats_wait_end())
+	 */
+	update_stats_wait_end(cfs_rq, se, now);
+	update_stats_curr_start(cfs_rq, se, now);
+	set_cfs_rq_curr(cfs_rq, se);
+}
+
+static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq, u64 now)
+{
+	struct sched_entity *se = __pick_next_entity(cfs_rq);
+
+	set_next_entity(cfs_rq, se, now);
+
+	return se;
+}
+
+static void
+put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev, u64 now)
+{
+	/*
+	 * If still on the runqueue then deactivate_task()
+	 * was not called and update_curr() has to be done:
+	 */
+	if (prev->on_rq)
+		update_curr(cfs_rq, now);
+
+	update_stats_curr_end(cfs_rq, prev, now);
+
+	if (prev->on_rq)
+		update_stats_wait_start(cfs_rq, prev, now);
+	set_cfs_rq_curr(cfs_rq, NULL);
+}
+
+static void entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
+{
+	struct rq *rq = rq_of(cfs_rq);
+	struct sched_entity *next;
+	u64 now = __rq_clock(rq);
+
+	/*
+	 * Dequeue and enqueue the task to update its
+	 * position within the tree:
+	 */
+	dequeue_entity(cfs_rq, curr, 0, now);
+	enqueue_entity(cfs_rq, curr, 0, now);
+
+	/*
+	 * Reschedule if another task tops the current one.
+	 */
+	next = __pick_next_entity(cfs_rq);
+	if (next == curr)
+		return;
+
+	__check_preempt_curr_fair(cfs_rq, next, curr, sysctl_sched_granularity);
+}
+
+/**************************************************
+ * CFS operations on tasks:
+ */
+
+#ifdef CONFIG_FAIR_GROUP_SCHED
+
+/* Walk up scheduling entities hierarchy */
+#define for_each_sched_entity(se) \
+		for (; se; se = se->parent)
+
+static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
+{
+	return p->se.cfs_rq;
+}
+
+/* runqueue on which this entity is (to be) queued */
+static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
+{
+	return se->cfs_rq;
+}
+
+/* runqueue "owned" by this group */
+static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
+{
+	return grp->my_q;
+}
+
+/* Given a group's cfs_rq on one cpu, return its corresponding cfs_rq on
+ * another cpu ('this_cpu')
+ */
+static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
+{
+	/* A later patch will take group into account */
+	return &cpu_rq(this_cpu)->cfs;
+}
+
+/* Iterate thr' all leaf cfs_rq's on a runqueue */
+#define for_each_leaf_cfs_rq(rq, cfs_rq) \
+	list_for_each_entry(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
+
+/* Do the two (enqueued) tasks belong to the same group ? */
+static inline int is_same_group(struct task_struct *curr, struct task_struct *p)
+{
+	if (curr->se.cfs_rq == p->se.cfs_rq)
+		return 1;
+
+	return 0;
+}
+
+#else	/* CONFIG_FAIR_GROUP_SCHED */
+
+#define for_each_sched_entity(se) \
+		for (; se; se = NULL)
+
+static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
+{
+	return &task_rq(p)->cfs;
+}
+
+static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
+{
+	struct task_struct *p = task_of(se);
+	struct rq *rq = task_rq(p);
+
+	return &rq->cfs;
+}
+
+/* runqueue "owned" by this group */
+static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
+{
+	return NULL;
+}
+
+static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
+{
+	return &cpu_rq(this_cpu)->cfs;
+}
+
+#define for_each_leaf_cfs_rq(rq, cfs_rq) \
+		for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
+
+static inline int is_same_group(struct task_struct *curr, struct task_struct *p)
+{
+	return 1;
+}
+
+#endif	/* CONFIG_FAIR_GROUP_SCHED */
+
+/*
+ * The enqueue_task method is called before nr_running is
+ * increased. Here we update the fair scheduling stats and
+ * then put the task into the rbtree:
+ */
+static void
+enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup, u64 now)
+{
+	struct cfs_rq *cfs_rq;
+	struct sched_entity *se = &p->se;
+
+	for_each_sched_entity(se) {
+		if (se->on_rq)
+			break;
+		cfs_rq = cfs_rq_of(se);
+		enqueue_entity(cfs_rq, se, wakeup, now);
+	}
+}
+
+/*
+ * The dequeue_task method is called before nr_running is
+ * decreased. We remove the task from the rbtree and
+ * update the fair scheduling stats:
+ */
+static void
+dequeue_task_fair(struct rq *rq, struct task_struct *p, int sleep, u64 now)
+{
+	struct cfs_rq *cfs_rq;
+	struct sched_entity *se = &p->se;
+
+	for_each_sched_entity(se) {
+		cfs_rq = cfs_rq_of(se);
+		dequeue_entity(cfs_rq, se, sleep, now);
+		/* Don't dequeue parent if it has other entities besides us */
+		if (cfs_rq->load.weight)
+			break;
+	}
+}
+
+/*
+ * sched_yield() support is very simple - we dequeue and enqueue
+ */
+static void yield_task_fair(struct rq *rq, struct task_struct *p)
+{
+	struct cfs_rq *cfs_rq = task_cfs_rq(p);
+	u64 now = __rq_clock(rq);
+
+	/*
+	 * Dequeue and enqueue the task to update its
+	 * position within the tree:
+	 */
+	dequeue_entity(cfs_rq, &p->se, 0, now);
+	enqueue_entity(cfs_rq, &p->se, 0, now);
+}
+
+/*
+ * Preempt the current task with a newly woken task if needed:
+ */
+static void check_preempt_curr_fair(struct rq *rq, struct task_struct *p)
+{
+	struct task_struct *curr = rq->curr;
+	struct cfs_rq *cfs_rq = task_cfs_rq(curr);
+	unsigned long gran;
+
+	if (unlikely(rt_prio(p->prio))) {
+		update_curr(cfs_rq, rq_clock(rq));
+		resched_task(curr);
+		return;
+	}
+
+	gran = sysctl_sched_wakeup_granularity;
+	/*
+	 * Batch tasks prefer throughput over latency:
+	 */
+	if (unlikely(p->policy == SCHED_BATCH))
+		gran = sysctl_sched_batch_wakeup_granularity;
+
+	if (is_same_group(curr, p))
+		__check_preempt_curr_fair(cfs_rq, &p->se, &curr->se, gran);
+}
+
+static struct task_struct *pick_next_task_fair(struct rq *rq, u64 now)
+{
+	struct cfs_rq *cfs_rq = &rq->cfs;
+	struct sched_entity *se;
+
+	if (unlikely(!cfs_rq->nr_running))
+		return NULL;
+
+	do {
+		se = pick_next_entity(cfs_rq, now);
+		cfs_rq = group_cfs_rq(se);
+	} while (cfs_rq);
+
+	return task_of(se);
+}
+
+/*
+ * Account for a descheduled task:
+ */
+static void put_prev_task_fair(struct rq *rq, struct task_struct *prev, u64 now)
+{
+	struct sched_entity *se = &prev->se;
+	struct cfs_rq *cfs_rq;
+
+	for_each_sched_entity(se) {
+		cfs_rq = cfs_rq_of(se);
+		put_prev_entity(cfs_rq, se, now);
+	}
+}
+
+/**************************************************
+ * Fair scheduling class load-balancing methods:
+ */
+
+/*
+ * Load-balancing iterator. Note: while the runqueue stays locked
+ * during the whole iteration, the current task might be
+ * dequeued so the iterator has to be dequeue-safe. Here we
+ * achieve that by always pre-iterating before returning
+ * the current task:
+ */
+static inline struct task_struct *
+__load_balance_iterator(struct cfs_rq *cfs_rq, struct rb_node *curr)
+{
+	struct task_struct *p;
+
+	if (!curr)
+		return NULL;
+
+	p = rb_entry(curr, struct task_struct, se.run_node);
+	cfs_rq->rb_load_balance_curr = rb_next(curr);
+
+	return p;
+}
+
+static struct task_struct *load_balance_start_fair(void *arg)
+{
+	struct cfs_rq *cfs_rq = arg;
+
+	return __load_balance_iterator(cfs_rq, first_fair(cfs_rq));
+}
+
+static struct task_struct *load_balance_next_fair(void *arg)
+{
+	struct cfs_rq *cfs_rq = arg;
+
+	return __load_balance_iterator(cfs_rq, cfs_rq->rb_load_balance_curr);
+}
+
+static int cfs_rq_best_prio(struct cfs_rq *cfs_rq)
+{
+	struct sched_entity *curr;
+	struct task_struct *p;
+
+	if (!cfs_rq->nr_running)
+		return MAX_PRIO;
+
+	curr = __pick_next_entity(cfs_rq);
+	p = task_of(curr);
+
+	return p->prio;
+}
+
+static int
+load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
+			unsigned long max_nr_move, unsigned long max_load_move,
+			struct sched_domain *sd, enum cpu_idle_type idle,
+			int *all_pinned, unsigned long *total_load_moved)
+{
+	struct cfs_rq *busy_cfs_rq;
+	unsigned long load_moved, total_nr_moved = 0, nr_moved;
+	long rem_load_move = max_load_move;
+	struct rq_iterator cfs_rq_iterator;
+
+	cfs_rq_iterator.start = load_balance_start_fair;
+	cfs_rq_iterator.next = load_balance_next_fair;
+
+	for_each_leaf_cfs_rq(busiest, busy_cfs_rq) {
+		struct cfs_rq *this_cfs_rq;
+		long imbalance;
+		unsigned long maxload;
+		int this_best_prio, best_prio, best_prio_seen = 0;
+
+		this_cfs_rq = cpu_cfs_rq(busy_cfs_rq, this_cpu);
+
+		imbalance = busy_cfs_rq->load.weight -
+						 this_cfs_rq->load.weight;
+		/* Don't pull if this_cfs_rq has more load than busy_cfs_rq */
+		if (imbalance <= 0)
+			continue;
+
+		/* Don't pull more than imbalance/2 */
+		imbalance /= 2;
+		maxload = min(rem_load_move, imbalance);
+
+		this_best_prio = cfs_rq_best_prio(this_cfs_rq);
+		best_prio = cfs_rq_best_prio(busy_cfs_rq);
+
+		/*
+		 * Enable handling of the case where there is more than one task
+		 * with the best priority. If the current running task is one
+		 * of those with prio==best_prio we know it won't be moved
+		 * and therefore it's safe to override the skip (based on load)
+		 * of any task we find with that prio.
+		 */
+		if (cfs_rq_curr(busy_cfs_rq) == &busiest->curr->se)
+			best_prio_seen = 1;
+
+		/* pass busy_cfs_rq argument into
+		 * load_balance_[start|next]_fair iterators
+		 */
+		cfs_rq_iterator.arg = busy_cfs_rq;
+		nr_moved = balance_tasks(this_rq, this_cpu, busiest,
+				max_nr_move, maxload, sd, idle, all_pinned,
+				&load_moved, this_best_prio, best_prio,
+				best_prio_seen, &cfs_rq_iterator);
+
+		total_nr_moved += nr_moved;
+		max_nr_move -= nr_moved;
+		rem_load_move -= load_moved;
+
+		if (max_nr_move <= 0 || rem_load_move <= 0)
+			break;
+	}
+
+	*total_load_moved = max_load_move - rem_load_move;
+
+	return total_nr_moved;
+}
+
+/*
+ * scheduler tick hitting a task of our scheduling class:
+ */
+static void task_tick_fair(struct rq *rq, struct task_struct *curr)
+{
+	struct cfs_rq *cfs_rq;
+	struct sched_entity *se = &curr->se;
+
+	for_each_sched_entity(se) {
+		cfs_rq = cfs_rq_of(se);
+		entity_tick(cfs_rq, se);
+	}
+}
+
+/*
+ * Share the fairness runtime between parent and child, thus the
+ * total amount of pressure for CPU stays equal - new tasks
+ * get a chance to run but frequent forkers are not allowed to
+ * monopolize the CPU. Note: the parent runqueue is locked,
+ * the child is not running yet.
+ */
+static void task_new_fair(struct rq *rq, struct task_struct *p)
+{
+	struct cfs_rq *cfs_rq = task_cfs_rq(p);
+	struct sched_entity *se = &p->se;
+	u64 now = rq_clock(rq);
+
+	sched_info_queued(p);
+
+	update_stats_enqueue(cfs_rq, se, now);
+	/*
+	 * Child runs first: we let it run before the parent
+	 * until it reschedules once. We set up the key so that
+	 * it will preempt the parent:
+	 */
+	p->se.fair_key = current->se.fair_key -
+		niced_granularity(&rq->curr->se, sysctl_sched_granularity) - 1;
+	/*
+	 * The first wait is dominated by the child-runs-first logic,
+	 * so do not credit it with that waiting time yet:
+	 */
+	if (sysctl_sched_features & SCHED_FEAT_SKIP_INITIAL)
+		p->se.wait_start_fair = 0;
+
+	/*
+	 * The statistical average of wait_runtime is about
+	 * -granularity/2, so initialize the task with that:
+	 */
+	if (sysctl_sched_features & SCHED_FEAT_START_DEBIT)
+		p->se.wait_runtime = -(sysctl_sched_granularity / 2);
+
+	__enqueue_entity(cfs_rq, se);
+	inc_nr_running(p, rq, now);
+}
+
+#ifdef CONFIG_FAIR_GROUP_SCHED
+/* Account for a task changing its policy or group.
+ *
+ * This routine is mostly called to set cfs_rq->curr field when a task
+ * migrates between groups/classes.
+ */
+static void set_curr_task_fair(struct rq *rq)
+{
+	struct task_struct *curr = rq->curr;
+	struct sched_entity *se = &curr->se;
+	u64 now = rq_clock(rq);
+	struct cfs_rq *cfs_rq;
+
+	for_each_sched_entity(se) {
+		cfs_rq = cfs_rq_of(se);
+		set_next_entity(cfs_rq, se, now);
+	}
+}
+#else
+static void set_curr_task_fair(struct rq *rq)
+{
+}
+#endif
+
+/*
+ * All the scheduling class methods:
+ */
+struct sched_class fair_sched_class __read_mostly = {
+	.enqueue_task		= enqueue_task_fair,
+	.dequeue_task		= dequeue_task_fair,
+	.yield_task		= yield_task_fair,
+
+	.check_preempt_curr	= check_preempt_curr_fair,
+
+	.pick_next_task		= pick_next_task_fair,
+	.put_prev_task		= put_prev_task_fair,
+
+	.load_balance		= load_balance_fair,
+
+	.set_curr_task          = set_curr_task_fair,
+	.task_tick		= task_tick_fair,
+	.task_new		= task_new_fair,
+};
+
+#ifdef CONFIG_SCHED_DEBUG
+void print_cfs_stats(struct seq_file *m, int cpu, u64 now)
+{
+	struct rq *rq = cpu_rq(cpu);
+	struct cfs_rq *cfs_rq;
+
+	for_each_leaf_cfs_rq(rq, cfs_rq)
+		print_cfs_rq(m, cpu, cfs_rq, now);
+}
+#endif
diff --git a/kernel/sched_idletask.c b/kernel/sched_idletask.c
new file mode 100644
index 0000000..41841e7
--- /dev/null
+++ b/kernel/sched_idletask.c
@@ -0,0 +1,71 @@
+/*
+ * idle-task scheduling class.
+ *
+ * (NOTE: these are not related to SCHED_IDLE tasks which are
+ *  handled in sched_fair.c)
+ */
+
+/*
+ * Idle tasks are unconditionally rescheduled:
+ */
+static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p)
+{
+	resched_task(rq->idle);
+}
+
+static struct task_struct *pick_next_task_idle(struct rq *rq, u64 now)
+{
+	schedstat_inc(rq, sched_goidle);
+
+	return rq->idle;
+}
+
+/*
+ * It is not legal to sleep in the idle task - print a warning
+ * message if some code attempts to do it:
+ */
+static void
+dequeue_task_idle(struct rq *rq, struct task_struct *p, int sleep, u64 now)
+{
+	spin_unlock_irq(&rq->lock);
+	printk(KERN_ERR "bad: scheduling from the idle thread!\n");
+	dump_stack();
+	spin_lock_irq(&rq->lock);
+}
+
+static void put_prev_task_idle(struct rq *rq, struct task_struct *prev, u64 now)
+{
+}
+
+static int
+load_balance_idle(struct rq *this_rq, int this_cpu, struct rq *busiest,
+			unsigned long max_nr_move, unsigned long max_load_move,
+			struct sched_domain *sd, enum cpu_idle_type idle,
+			int *all_pinned, unsigned long *total_load_moved)
+{
+	return 0;
+}
+
+static void task_tick_idle(struct rq *rq, struct task_struct *curr)
+{
+}
+
+/*
+ * Simple, special scheduling class for the per-CPU idle tasks:
+ */
+static struct sched_class idle_sched_class __read_mostly = {
+	/* no enqueue/yield_task for idle tasks */
+
+	/* dequeue is not valid, we print a debug message there: */
+	.dequeue_task		= dequeue_task_idle,
+
+	.check_preempt_curr	= check_preempt_curr_idle,
+
+	.pick_next_task		= pick_next_task_idle,
+	.put_prev_task		= put_prev_task_idle,
+
+	.load_balance		= load_balance_idle,
+
+	.task_tick		= task_tick_idle,
+	/* no .task_new for idle tasks */
+};
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
new file mode 100644
index 0000000..1192a27
--- /dev/null
+++ b/kernel/sched_rt.c
@@ -0,0 +1,255 @@
+/*
+ * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
+ * policies)
+ */
+
+/*
+ * Update the current task's runtime statistics. Skip current tasks that
+ * are not in our scheduling class.
+ */
+static inline void update_curr_rt(struct rq *rq, u64 now)
+{
+	struct task_struct *curr = rq->curr;
+	u64 delta_exec;
+
+	if (!task_has_rt_policy(curr))
+		return;
+
+	delta_exec = now - curr->se.exec_start;
+	if (unlikely((s64)delta_exec < 0))
+		delta_exec = 0;
+	if (unlikely(delta_exec > curr->se.exec_max))
+		curr->se.exec_max = delta_exec;
+
+	curr->se.sum_exec_runtime += delta_exec;
+	curr->se.exec_start = now;
+}
+
+static void
+enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup, u64 now)
+{
+	struct rt_prio_array *array = &rq->rt.active;
+
+	list_add_tail(&p->run_list, array->queue + p->prio);
+	__set_bit(p->prio, array->bitmap);
+}
+
+/*
+ * Adding/removing a task to/from a priority array:
+ */
+static void
+dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep, u64 now)
+{
+	struct rt_prio_array *array = &rq->rt.active;
+
+	update_curr_rt(rq, now);
+
+	list_del(&p->run_list);
+	if (list_empty(array->queue + p->prio))
+		__clear_bit(p->prio, array->bitmap);
+}
+
+/*
+ * Put task to the end of the run list without the overhead of dequeue
+ * followed by enqueue.
+ */
+static void requeue_task_rt(struct rq *rq, struct task_struct *p)
+{
+	struct rt_prio_array *array = &rq->rt.active;
+
+	list_move_tail(&p->run_list, array->queue + p->prio);
+}
+
+static void
+yield_task_rt(struct rq *rq, struct task_struct *p)
+{
+	requeue_task_rt(rq, p);
+}
+
+/*
+ * Preempt the current task with a newly woken task if needed:
+ */
+static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p)
+{
+	if (p->prio < rq->curr->prio)
+		resched_task(rq->curr);
+}
+
+static struct task_struct *pick_next_task_rt(struct rq *rq, u64 now)
+{
+	struct rt_prio_array *array = &rq->rt.active;
+	struct task_struct *next;
+	struct list_head *queue;
+	int idx;
+
+	idx = sched_find_first_bit(array->bitmap);
+	if (idx >= MAX_RT_PRIO)
+		return NULL;
+
+	queue = array->queue + idx;
+	next = list_entry(queue->next, struct task_struct, run_list);
+
+	next->se.exec_start = now;
+
+	return next;
+}
+
+static void put_prev_task_rt(struct rq *rq, struct task_struct *p, u64 now)
+{
+	update_curr_rt(rq, now);
+	p->se.exec_start = 0;
+}
+
+/*
+ * Load-balancing iterator. Note: while the runqueue stays locked
+ * during the whole iteration, the current task might be
+ * dequeued so the iterator has to be dequeue-safe. Here we
+ * achieve that by always pre-iterating before returning
+ * the current task:
+ */
+static struct task_struct *load_balance_start_rt(void *arg)
+{
+	struct rq *rq = arg;
+	struct rt_prio_array *array = &rq->rt.active;
+	struct list_head *head, *curr;
+	struct task_struct *p;
+	int idx;
+
+	idx = sched_find_first_bit(array->bitmap);
+	if (idx >= MAX_RT_PRIO)
+		return NULL;
+
+	head = array->queue + idx;
+	curr = head->prev;
+
+	p = list_entry(curr, struct task_struct, run_list);
+
+	curr = curr->prev;
+
+	rq->rt.rt_load_balance_idx = idx;
+	rq->rt.rt_load_balance_head = head;
+	rq->rt.rt_load_balance_curr = curr;
+
+	return p;
+}
+
+static struct task_struct *load_balance_next_rt(void *arg)
+{
+	struct rq *rq = arg;
+	struct rt_prio_array *array = &rq->rt.active;
+	struct list_head *head, *curr;
+	struct task_struct *p;
+	int idx;
+
+	idx = rq->rt.rt_load_balance_idx;
+	head = rq->rt.rt_load_balance_head;
+	curr = rq->rt.rt_load_balance_curr;
+
+	/*
+	 * If we arrived back to the head again then
+	 * iterate to the next queue (if any):
+	 */
+	if (unlikely(head == curr)) {
+		int next_idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx+1);
+
+		if (next_idx >= MAX_RT_PRIO)
+			return NULL;
+
+		idx = next_idx;
+		head = array->queue + idx;
+		curr = head->prev;
+
+		rq->rt.rt_load_balance_idx = idx;
+		rq->rt.rt_load_balance_head = head;
+	}
+
+	p = list_entry(curr, struct task_struct, run_list);
+
+	curr = curr->prev;
+
+	rq->rt.rt_load_balance_curr = curr;
+
+	return p;
+}
+
+static int
+load_balance_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
+			unsigned long max_nr_move, unsigned long max_load_move,
+			struct sched_domain *sd, enum cpu_idle_type idle,
+			int *all_pinned, unsigned long *load_moved)
+{
+	int this_best_prio, best_prio, best_prio_seen = 0;
+	int nr_moved;
+	struct rq_iterator rt_rq_iterator;
+
+	best_prio = sched_find_first_bit(busiest->rt.active.bitmap);
+	this_best_prio = sched_find_first_bit(this_rq->rt.active.bitmap);
+
+	/*
+	 * Enable handling of the case where there is more than one task
+	 * with the best priority.   If the current running task is one
+	 * of those with prio==best_prio we know it won't be moved
+	 * and therefore it's safe to override the skip (based on load)
+	 * of any task we find with that prio.
+	 */
+	if (busiest->curr->prio == best_prio)
+		best_prio_seen = 1;
+
+	rt_rq_iterator.start = load_balance_start_rt;
+	rt_rq_iterator.next = load_balance_next_rt;
+	/* pass 'busiest' rq argument into
+	 * load_balance_[start|next]_rt iterators
+	 */
+	rt_rq_iterator.arg = busiest;
+
+	nr_moved = balance_tasks(this_rq, this_cpu, busiest, max_nr_move,
+			max_load_move, sd, idle, all_pinned, load_moved,
+			this_best_prio, best_prio, best_prio_seen,
+			&rt_rq_iterator);
+
+	return nr_moved;
+}
+
+static void task_tick_rt(struct rq *rq, struct task_struct *p)
+{
+	/*
+	 * RR tasks need a special form of timeslice management.
+	 * FIFO tasks have no timeslices.
+	 */
+	if (p->policy != SCHED_RR)
+		return;
+
+	if (--p->time_slice)
+		return;
+
+	p->time_slice = static_prio_timeslice(p->static_prio);
+	set_tsk_need_resched(p);
+
+	/* put it at the end of the queue: */
+	requeue_task_rt(rq, p);
+}
+
+/*
+ * No parent/child timeslice management necessary for RT tasks,
+ * just activate them:
+ */
+static void task_new_rt(struct rq *rq, struct task_struct *p)
+{
+	activate_task(rq, p, 1);
+}
+
+static struct sched_class rt_sched_class __read_mostly = {
+	.enqueue_task		= enqueue_task_rt,
+	.dequeue_task		= dequeue_task_rt,
+	.yield_task		= yield_task_rt,
+
+	.check_preempt_curr	= check_preempt_curr_rt,
+
+	.pick_next_task		= pick_next_task_rt,
+	.put_prev_task		= put_prev_task_rt,
+
+	.load_balance		= load_balance_rt,
+
+	.task_tick		= task_tick_rt,
+	.task_new		= task_new_rt,
+};
diff --git a/kernel/sched_stats.h b/kernel/sched_stats.h
new file mode 100644
index 0000000..c63c38f
--- /dev/null
+++ b/kernel/sched_stats.h
@@ -0,0 +1,235 @@
+
+#ifdef CONFIG_SCHEDSTATS
+/*
+ * bump this up when changing the output format or the meaning of an existing
+ * format, so that tools can adapt (or abort)
+ */
+#define SCHEDSTAT_VERSION 14
+
+static int show_schedstat(struct seq_file *seq, void *v)
+{
+	int cpu;
+
+	seq_printf(seq, "version %d\n", SCHEDSTAT_VERSION);
+	seq_printf(seq, "timestamp %lu\n", jiffies);
+	for_each_online_cpu(cpu) {
+		struct rq *rq = cpu_rq(cpu);
+#ifdef CONFIG_SMP
+		struct sched_domain *sd;
+		int dcnt = 0;
+#endif
+
+		/* runqueue-specific stats */
+		seq_printf(seq,
+		    "cpu%d %lu %lu %lu %lu %lu %lu %lu %lu %lu %llu %llu %lu",
+		    cpu, rq->yld_both_empty,
+		    rq->yld_act_empty, rq->yld_exp_empty, rq->yld_cnt,
+		    rq->sched_switch, rq->sched_cnt, rq->sched_goidle,
+		    rq->ttwu_cnt, rq->ttwu_local,
+		    rq->rq_sched_info.cpu_time,
+		    rq->rq_sched_info.run_delay, rq->rq_sched_info.pcnt);
+
+		seq_printf(seq, "\n");
+
+#ifdef CONFIG_SMP
+		/* domain-specific stats */
+		preempt_disable();
+		for_each_domain(cpu, sd) {
+			enum cpu_idle_type itype;
+			char mask_str[NR_CPUS];
+
+			cpumask_scnprintf(mask_str, NR_CPUS, sd->span);
+			seq_printf(seq, "domain%d %s", dcnt++, mask_str);
+			for (itype = CPU_IDLE; itype < CPU_MAX_IDLE_TYPES;
+					itype++) {
+				seq_printf(seq, " %lu %lu %lu %lu %lu %lu %lu "
+						"%lu",
+				    sd->lb_cnt[itype],
+				    sd->lb_balanced[itype],
+				    sd->lb_failed[itype],
+				    sd->lb_imbalance[itype],
+				    sd->lb_gained[itype],
+				    sd->lb_hot_gained[itype],
+				    sd->lb_nobusyq[itype],
+				    sd->lb_nobusyg[itype]);
+			}
+			seq_printf(seq, " %lu %lu %lu %lu %lu %lu %lu %lu %lu"
+			    " %lu %lu %lu\n",
+			    sd->alb_cnt, sd->alb_failed, sd->alb_pushed,
+			    sd->sbe_cnt, sd->sbe_balanced, sd->sbe_pushed,
+			    sd->sbf_cnt, sd->sbf_balanced, sd->sbf_pushed,
+			    sd->ttwu_wake_remote, sd->ttwu_move_affine,
+			    sd->ttwu_move_balance);
+		}
+		preempt_enable();
+#endif
+	}
+	return 0;
+}
+
+static int schedstat_open(struct inode *inode, struct file *file)
+{
+	unsigned int size = PAGE_SIZE * (1 + num_online_cpus() / 32);
+	char *buf = kmalloc(size, GFP_KERNEL);
+	struct seq_file *m;
+	int res;
+
+	if (!buf)
+		return -ENOMEM;
+	res = single_open(file, show_schedstat, NULL);
+	if (!res) {
+		m = file->private_data;
+		m->buf = buf;
+		m->size = size;
+	} else
+		kfree(buf);
+	return res;
+}
+
+const struct file_operations proc_schedstat_operations = {
+	.open    = schedstat_open,
+	.read    = seq_read,
+	.llseek  = seq_lseek,
+	.release = single_release,
+};
+
+/*
+ * Expects runqueue lock to be held for atomicity of update
+ */
+static inline void
+rq_sched_info_arrive(struct rq *rq, unsigned long long delta)
+{
+	if (rq) {
+		rq->rq_sched_info.run_delay += delta;
+		rq->rq_sched_info.pcnt++;
+	}
+}
+
+/*
+ * Expects runqueue lock to be held for atomicity of update
+ */
+static inline void
+rq_sched_info_depart(struct rq *rq, unsigned long long delta)
+{
+	if (rq)
+		rq->rq_sched_info.cpu_time += delta;
+}
+# define schedstat_inc(rq, field)	do { (rq)->field++; } while (0)
+# define schedstat_add(rq, field, amt)	do { (rq)->field += (amt); } while (0)
+#else /* !CONFIG_SCHEDSTATS */
+static inline void
+rq_sched_info_arrive(struct rq *rq, unsigned long long delta)
+{}
+static inline void
+rq_sched_info_depart(struct rq *rq, unsigned long long delta)
+{}
+# define schedstat_inc(rq, field)	do { } while (0)
+# define schedstat_add(rq, field, amt)	do { } while (0)
+#endif
+
+#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
+/*
+ * Called when a process is dequeued from the active array and given
+ * the cpu.  We should note that with the exception of interactive
+ * tasks, the expired queue will become the active queue after the active
+ * queue is empty, without explicitly dequeuing and requeuing tasks in the
+ * expired queue.  (Interactive tasks may be requeued directly to the
+ * active queue, thus delaying tasks in the expired queue from running;
+ * see scheduler_tick()).
+ *
+ * This function is only called from sched_info_arrive(), rather than
+ * dequeue_task(). Even though a task may be queued and dequeued multiple
+ * times as it is shuffled about, we're really interested in knowing how
+ * long it was from the *first* time it was queued to the time that it
+ * finally hit a cpu.
+ */
+static inline void sched_info_dequeued(struct task_struct *t)
+{
+	t->sched_info.last_queued = 0;
+}
+
+/*
+ * Called when a task finally hits the cpu.  We can now calculate how
+ * long it was waiting to run.  We also note when it began so that we
+ * can keep stats on how long its timeslice is.
+ */
+static void sched_info_arrive(struct task_struct *t)
+{
+	unsigned long long now = sched_clock(), delta = 0;
+
+	if (t->sched_info.last_queued)
+		delta = now - t->sched_info.last_queued;
+	sched_info_dequeued(t);
+	t->sched_info.run_delay += delta;
+	t->sched_info.last_arrival = now;
+	t->sched_info.pcnt++;
+
+	rq_sched_info_arrive(task_rq(t), delta);
+}
+
+/*
+ * Called when a process is queued into either the active or expired
+ * array.  The time is noted and later used to determine how long we
+ * had to wait for us to reach the cpu.  Since the expired queue will
+ * become the active queue after active queue is empty, without dequeuing
+ * and requeuing any tasks, we are interested in queuing to either. It
+ * is unusual but not impossible for tasks to be dequeued and immediately
+ * requeued in the same or another array: this can happen in sched_yield(),
+ * set_user_nice(), and even load_balance() as it moves tasks from runqueue
+ * to runqueue.
+ *
+ * This function is only called from enqueue_task(), but also only updates
+ * the timestamp if it is already not set.  It's assumed that
+ * sched_info_dequeued() will clear that stamp when appropriate.
+ */
+static inline void sched_info_queued(struct task_struct *t)
+{
+	if (unlikely(sched_info_on()))
+		if (!t->sched_info.last_queued)
+			t->sched_info.last_queued = sched_clock();
+}
+
+/*
+ * Called when a process ceases being the active-running process, either
+ * voluntarily or involuntarily.  Now we can calculate how long we ran.
+ */
+static inline void sched_info_depart(struct task_struct *t)
+{
+	unsigned long long delta = sched_clock() - t->sched_info.last_arrival;
+
+	t->sched_info.cpu_time += delta;
+	rq_sched_info_depart(task_rq(t), delta);
+}
+
+/*
+ * Called when tasks are switched involuntarily due, typically, to expiring
+ * their time slice.  (This may also be called when switching to or from
+ * the idle task.)  We are only called when prev != next.
+ */
+static inline void
+__sched_info_switch(struct task_struct *prev, struct task_struct *next)
+{
+	struct rq *rq = task_rq(prev);
+
+	/*
+	 * prev now departs the cpu.  It's not interesting to record
+	 * stats about how efficient we were at scheduling the idle
+	 * process, however.
+	 */
+	if (prev != rq->idle)
+		sched_info_depart(prev);
+
+	if (next != rq->idle)
+		sched_info_arrive(next);
+}
+static inline void
+sched_info_switch(struct task_struct *prev, struct task_struct *next)
+{
+	if (unlikely(sched_info_on()))
+		__sched_info_switch(prev, next);
+}
+#else
+#define sched_info_queued(t)		do { } while (0)
+#define sched_info_switch(t, next)	do { } while (0)
+#endif /* CONFIG_SCHEDSTATS || CONFIG_TASK_DELAY_ACCT */
+
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 0b9886a..73217a9 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -488,7 +488,6 @@
 
 static int ksoftirqd(void * __bind_cpu)
 {
-	set_user_nice(current, 19);
 	current->flags |= PF_NOFREEZE;
 
 	set_current_state(TASK_INTERRUPTIBLE);
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 30ee462..51f5dac 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -206,7 +206,87 @@
 	{ .ctl_name = 0 }
 };
 
+#ifdef CONFIG_SCHED_DEBUG
+static unsigned long min_sched_granularity_ns = 100000;		/* 100 usecs */
+static unsigned long max_sched_granularity_ns = 1000000000;	/* 1 second */
+static unsigned long min_wakeup_granularity_ns;			/* 0 usecs */
+static unsigned long max_wakeup_granularity_ns = 1000000000;	/* 1 second */
+#endif
+
 static ctl_table kern_table[] = {
+#ifdef CONFIG_SCHED_DEBUG
+	{
+		.ctl_name	= CTL_UNNUMBERED,
+		.procname	= "sched_granularity_ns",
+		.data		= &sysctl_sched_granularity,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec_minmax,
+		.strategy	= &sysctl_intvec,
+		.extra1		= &min_sched_granularity_ns,
+		.extra2		= &max_sched_granularity_ns,
+	},
+	{
+		.ctl_name	= CTL_UNNUMBERED,
+		.procname	= "sched_wakeup_granularity_ns",
+		.data		= &sysctl_sched_wakeup_granularity,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec_minmax,
+		.strategy	= &sysctl_intvec,
+		.extra1		= &min_wakeup_granularity_ns,
+		.extra2		= &max_wakeup_granularity_ns,
+	},
+	{
+		.ctl_name	= CTL_UNNUMBERED,
+		.procname	= "sched_batch_wakeup_granularity_ns",
+		.data		= &sysctl_sched_batch_wakeup_granularity,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec_minmax,
+		.strategy	= &sysctl_intvec,
+		.extra1		= &min_wakeup_granularity_ns,
+		.extra2		= &max_wakeup_granularity_ns,
+	},
+	{
+		.ctl_name	= CTL_UNNUMBERED,
+		.procname	= "sched_stat_granularity_ns",
+		.data		= &sysctl_sched_stat_granularity,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec_minmax,
+		.strategy	= &sysctl_intvec,
+		.extra1		= &min_wakeup_granularity_ns,
+		.extra2		= &max_wakeup_granularity_ns,
+	},
+	{
+		.ctl_name	= CTL_UNNUMBERED,
+		.procname	= "sched_runtime_limit_ns",
+		.data		= &sysctl_sched_runtime_limit,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec_minmax,
+		.strategy	= &sysctl_intvec,
+		.extra1		= &min_sched_granularity_ns,
+		.extra2		= &max_sched_granularity_ns,
+	},
+	{
+		.ctl_name	= CTL_UNNUMBERED,
+		.procname	= "sched_child_runs_first",
+		.data		= &sysctl_sched_child_runs_first,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec,
+	},
+	{
+		.ctl_name	= CTL_UNNUMBERED,
+		.procname	= "sched_features",
+		.data		= &sysctl_sched_features,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec,
+	},
+#endif
 	{
 		.ctl_name	= KERN_PANIC,
 		.procname	= "panic",
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index da95e10..fab32a2 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -105,6 +105,15 @@
 	   can be detected via the NMI-watchdog, on platforms that
 	   support it.)
 
+config SCHED_DEBUG
+	bool "Collect scheduler debugging info"
+	depends on DEBUG_KERNEL && PROC_FS
+	default y
+	help
+	  If you say Y here, the /proc/sched_debug file will be provided
+	  that can help debug the scheduler. The runtime overhead of this
+	  option is minimal.
+
 config SCHEDSTATS
 	bool "Collect scheduler statistics"
 	depends on DEBUG_KERNEL && PROC_FS
diff --git a/net/ieee80211/softmac/ieee80211softmac_module.c b/net/ieee80211/softmac/ieee80211softmac_module.c
index c308756..6398e6e 100644
--- a/net/ieee80211/softmac/ieee80211softmac_module.c
+++ b/net/ieee80211/softmac/ieee80211softmac_module.c
@@ -456,18 +456,13 @@
 ieee80211softmac_add_network_locked(struct ieee80211softmac_device *mac,
 	struct ieee80211softmac_network *add_net)
 {
-	struct list_head *list_ptr;
-	struct ieee80211softmac_network *softmac_net = NULL;
+	struct ieee80211softmac_network *softmac_net;
 
-	list_for_each(list_ptr, &mac->network_list) {
-		softmac_net = list_entry(list_ptr, struct ieee80211softmac_network, list);
+	list_for_each_entry(softmac_net, &mac->network_list, list) {
 		if(!memcmp(softmac_net->bssid, add_net->bssid, ETH_ALEN))
-			break;
-		else
-			softmac_net = NULL;
+			return;
 	}
-	if(softmac_net == NULL)
-		list_add(&(add_net->list), &mac->network_list);
+	list_add(&(add_net->list), &mac->network_list);
 }
 
 /* Add a network to the list, with locking */
@@ -506,16 +501,13 @@
 ieee80211softmac_get_network_by_bssid_locked(struct ieee80211softmac_device *mac,
 	u8 *bssid)
 {
-	struct list_head *list_ptr;
-	struct ieee80211softmac_network *softmac_net = NULL;
-	list_for_each(list_ptr, &mac->network_list) {
-		softmac_net = list_entry(list_ptr, struct ieee80211softmac_network, list);
+	struct ieee80211softmac_network *softmac_net;
+
+	list_for_each_entry(softmac_net, &mac->network_list, list) {
 		if(!memcmp(softmac_net->bssid, bssid, ETH_ALEN))
-			break;
-		else
-			softmac_net = NULL;
+			return softmac_net;
 	}
-	return softmac_net;
+	return NULL;
 }
 
 /* Get a network from the list by BSSID with locking */
@@ -537,11 +529,9 @@
 ieee80211softmac_get_network_by_essid_locked(struct ieee80211softmac_device *mac,
 	struct ieee80211softmac_essid *essid)
 {
-	struct list_head *list_ptr;
-	struct ieee80211softmac_network *softmac_net = NULL;
+	struct ieee80211softmac_network *softmac_net;
 
-	list_for_each(list_ptr, &mac->network_list) {
-		softmac_net = list_entry(list_ptr, struct ieee80211softmac_network, list);
+	list_for_each_entry(softmac_net, &mac->network_list, list) {
 		if (softmac_net->essid.len == essid->len &&
 			!memcmp(softmac_net->essid.data, essid->data, essid->len))
 			return softmac_net;