Merge branch 'lockdep-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/peterz/linux-2.6-lockdep

* 'lockdep-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/peterz/linux-2.6-lockdep:
  lockdep: Fix lockdep annotation for pipe_double_lock()
diff --git a/CREDITS b/CREDITS
index 2b88fb3..e76d300 100644
--- a/CREDITS
+++ b/CREDITS
@@ -1856,7 +1856,7 @@
 D: The Linux Support Team Erlangen
 
 N: Andreas Koensgen
-E: ajk@iehk.rwth-aachen.de
+E: ajk@comnets.uni-bremen.de
 D: 6pack driver for AX.25
 
 N: Harald Koerfgen
diff --git a/Documentation/RCU/rculist_nulls.txt b/Documentation/RCU/rculist_nulls.txt
index 93cb28d..18f9651 100644
--- a/Documentation/RCU/rculist_nulls.txt
+++ b/Documentation/RCU/rculist_nulls.txt
@@ -83,11 +83,12 @@
 obj = kmem_cache_alloc(...);
 lock_chain(); // typically a spin_lock()
 obj->key = key;
-atomic_inc(&obj->refcnt);
 /*
  * we need to make sure obj->key is updated before obj->next
+ * or obj->refcnt
  */
 smp_wmb();
+atomic_set(&obj->refcnt, 1);
 hlist_add_head_rcu(&obj->obj_node, list);
 unlock_chain(); // typically a spin_unlock()
 
@@ -159,6 +160,10 @@
 obj = kmem_cache_alloc(cachep);
 lock_chain(); // typically a spin_lock()
 obj->key = key;
+/*
+ * changes to obj->key must be visible before refcnt one
+ */
+smp_wmb();
 atomic_set(&obj->refcnt, 1);
 /*
  * insert obj in RCU way (readers might be traversing chain)
diff --git a/Documentation/connector/cn_test.c b/Documentation/connector/cn_test.c
index f688eba..6a5be5d 100644
--- a/Documentation/connector/cn_test.c
+++ b/Documentation/connector/cn_test.c
@@ -1,7 +1,7 @@
 /*
  * 	cn_test.c
  * 
- * 2004-2005 Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
+ * 2004+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net>
  * All rights reserved.
  * 
  * This program is free software; you can redistribute it and/or modify
@@ -194,5 +194,5 @@
 module_exit(cn_test_fini);
 
 MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Evgeniy Polyakov <johnpol@2ka.mipt.ru>");
+MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>");
 MODULE_DESCRIPTION("Connector's test module");
diff --git a/Documentation/connector/ucon.c b/Documentation/connector/ucon.c
index d738cde..c5092ad 100644
--- a/Documentation/connector/ucon.c
+++ b/Documentation/connector/ucon.c
@@ -1,7 +1,7 @@
 /*
  * 	ucon.c
  *
- * Copyright (c) 2004+ Evgeniy Polyakov <johnpol@2ka.mipt.ru>
+ * Copyright (c) 2004+ Evgeniy Polyakov <zbr@ioremap.net>
  *
  *
  * This program is free software; you can redistribute it and/or modify
diff --git a/Documentation/networking/6pack.txt b/Documentation/networking/6pack.txt
index d0777a1..8f33942 100644
--- a/Documentation/networking/6pack.txt
+++ b/Documentation/networking/6pack.txt
@@ -1,7 +1,7 @@
 This is the 6pack-mini-HOWTO, written by
 
 Andreas Könsgen DG3KQ
-Internet: ajk@iehk.rwth-aachen.de
+Internet: ajk@comnets.uni-bremen.de
 AMPR-net: dg3kq@db0pra.ampr.org
 AX.25:    dg3kq@db0ach.#nrw.deu.eu
 
diff --git a/MAINTAINERS b/MAINTAINERS
index 18c3f0c..ebc2691 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -150,7 +150,7 @@
 
 6PACK NETWORK DRIVER FOR AX.25
 P:	Andreas Koensgen
-M:	ajk@iehk.rwth-aachen.de
+M:	ajk@comnets.uni-bremen.de
 L:	linux-hams@vger.kernel.org
 S:	Maintained
 F:	drivers/net/hamradio/6pack.c
@@ -1612,6 +1612,13 @@
 F:	fs/configfs/
 F:	include/linux/configfs.h
 
+CONNECTOR
+P:	Evgeniy Polyakov
+M:	zbr@ioremap.net
+L:	netdev@vger.kernel.org
+S:	Maintained
+F:	drivers/connector/
+
 CONTROL GROUPS (CGROUPS)
 P:	Paul Menage
 M:	menage@google.com
@@ -4089,6 +4096,7 @@
 L:	coreteam@netfilter.org
 W:	http://www.netfilter.org/
 W:	http://www.iptables.org/
+T:	git://git.kernel.org/pub/scm/linux/kernel/git/kaber/nf-2.6.git
 S:	Supported
 F:	include/linux/netfilter*
 F:	include/linux/netfilter/
@@ -5586,7 +5594,6 @@
 F:	drivers/net/starfire*
 
 STARMODE RADIO IP (STRIP) PROTOCOL DRIVER
-W:	http://mosquitonet.Stanford.EDU/strip.html
 S:	Orphan
 F:	drivers/net/wireless/strip.c
 F:	include/linux/if_strip.h
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
index 36c3dc7..a7aa8f9 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -66,6 +66,52 @@
 };
 
 /*
+ * Not sure about some of these
+ */
+static const u64 p6_perfmon_event_map[] =
+{
+  [PERF_COUNT_HW_CPU_CYCLES]		= 0x0079,
+  [PERF_COUNT_HW_INSTRUCTIONS]		= 0x00c0,
+  [PERF_COUNT_HW_CACHE_REFERENCES]	= 0x0000,
+  [PERF_COUNT_HW_CACHE_MISSES]		= 0x0000,
+  [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= 0x00c4,
+  [PERF_COUNT_HW_BRANCH_MISSES]		= 0x00c5,
+  [PERF_COUNT_HW_BUS_CYCLES]		= 0x0062,
+};
+
+static u64 p6_pmu_event_map(int event)
+{
+	return p6_perfmon_event_map[event];
+}
+
+/*
+ * Counter setting that is specified not to count anything.
+ * We use this to effectively disable a counter.
+ *
+ * L2_RQSTS with 0 MESI unit mask.
+ */
+#define P6_NOP_COUNTER			0x0000002EULL
+
+static u64 p6_pmu_raw_event(u64 event)
+{
+#define P6_EVNTSEL_EVENT_MASK		0x000000FFULL
+#define P6_EVNTSEL_UNIT_MASK		0x0000FF00ULL
+#define P6_EVNTSEL_EDGE_MASK		0x00040000ULL
+#define P6_EVNTSEL_INV_MASK		0x00800000ULL
+#define P6_EVNTSEL_COUNTER_MASK		0xFF000000ULL
+
+#define P6_EVNTSEL_MASK			\
+	(P6_EVNTSEL_EVENT_MASK |	\
+	 P6_EVNTSEL_UNIT_MASK  |	\
+	 P6_EVNTSEL_EDGE_MASK  |	\
+	 P6_EVNTSEL_INV_MASK   |	\
+	 P6_EVNTSEL_COUNTER_MASK)
+
+	return event & P6_EVNTSEL_MASK;
+}
+
+
+/*
  * Intel PerfMon v3. Used on Core2 and later.
  */
 static const u64 intel_perfmon_event_map[] =
@@ -666,6 +712,7 @@
 {
 	struct perf_counter_attr *attr = &counter->attr;
 	struct hw_perf_counter *hwc = &counter->hw;
+	u64 config;
 	int err;
 
 	if (!x86_pmu_initialized())
@@ -718,14 +765,40 @@
 
 	if (attr->config >= x86_pmu.max_events)
 		return -EINVAL;
+
 	/*
 	 * The generic map:
 	 */
-	hwc->config |= x86_pmu.event_map(attr->config);
+	config = x86_pmu.event_map(attr->config);
+
+	if (config == 0)
+		return -ENOENT;
+
+	if (config == -1LL)
+		return -EINVAL;
+
+	hwc->config |= config;
 
 	return 0;
 }
 
+static void p6_pmu_disable_all(void)
+{
+	struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
+	u64 val;
+
+	if (!cpuc->enabled)
+		return;
+
+	cpuc->enabled = 0;
+	barrier();
+
+	/* p6 only has one enable register */
+	rdmsrl(MSR_P6_EVNTSEL0, val);
+	val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
+	wrmsrl(MSR_P6_EVNTSEL0, val);
+}
+
 static void intel_pmu_disable_all(void)
 {
 	wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
@@ -767,6 +840,23 @@
 	return x86_pmu.disable_all();
 }
 
+static void p6_pmu_enable_all(void)
+{
+	struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
+	unsigned long val;
+
+	if (cpuc->enabled)
+		return;
+
+	cpuc->enabled = 1;
+	barrier();
+
+	/* p6 only has one enable register */
+	rdmsrl(MSR_P6_EVNTSEL0, val);
+	val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
+	wrmsrl(MSR_P6_EVNTSEL0, val);
+}
+
 static void intel_pmu_enable_all(void)
 {
 	wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
@@ -784,13 +874,13 @@
 	barrier();
 
 	for (idx = 0; idx < x86_pmu.num_counters; idx++) {
+		struct perf_counter *counter = cpuc->counters[idx];
 		u64 val;
 
 		if (!test_bit(idx, cpuc->active_mask))
 			continue;
-		rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
-		if (val & ARCH_PERFMON_EVENTSEL0_ENABLE)
-			continue;
+
+		val = counter->hw.config;
 		val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
 		wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
 	}
@@ -819,16 +909,13 @@
 
 static inline void x86_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
 {
-	int err;
-	err = checking_wrmsrl(hwc->config_base + idx,
+	(void)checking_wrmsrl(hwc->config_base + idx,
 			      hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE);
 }
 
 static inline void x86_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
 {
-	int err;
-	err = checking_wrmsrl(hwc->config_base + idx,
-			      hwc->config);
+	(void)checking_wrmsrl(hwc->config_base + idx, hwc->config);
 }
 
 static inline void
@@ -836,13 +923,24 @@
 {
 	int idx = __idx - X86_PMC_IDX_FIXED;
 	u64 ctrl_val, mask;
-	int err;
 
 	mask = 0xfULL << (idx * 4);
 
 	rdmsrl(hwc->config_base, ctrl_val);
 	ctrl_val &= ~mask;
-	err = checking_wrmsrl(hwc->config_base, ctrl_val);
+	(void)checking_wrmsrl(hwc->config_base, ctrl_val);
+}
+
+static inline void
+p6_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
+{
+	struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
+	u64 val = P6_NOP_COUNTER;
+
+	if (cpuc->enabled)
+		val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
+
+	(void)checking_wrmsrl(hwc->config_base + idx, val);
 }
 
 static inline void
@@ -943,6 +1041,19 @@
 	err = checking_wrmsrl(hwc->config_base, ctrl_val);
 }
 
+static void p6_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
+{
+	struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
+	u64 val;
+
+	val = hwc->config;
+	if (cpuc->enabled)
+		val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
+
+	(void)checking_wrmsrl(hwc->config_base + idx, val);
+}
+
+
 static void intel_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
 {
 	if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
@@ -959,8 +1070,6 @@
 
 	if (cpuc->enabled)
 		x86_pmu_enable_counter(hwc, idx);
-	else
-		x86_pmu_disable_counter(hwc, idx);
 }
 
 static int
@@ -1176,6 +1285,49 @@
 	local_irq_restore(flags);
 }
 
+static int p6_pmu_handle_irq(struct pt_regs *regs)
+{
+	struct perf_sample_data data;
+	struct cpu_hw_counters *cpuc;
+	struct perf_counter *counter;
+	struct hw_perf_counter *hwc;
+	int idx, handled = 0;
+	u64 val;
+
+	data.regs = regs;
+	data.addr = 0;
+
+	cpuc = &__get_cpu_var(cpu_hw_counters);
+
+	for (idx = 0; idx < x86_pmu.num_counters; idx++) {
+		if (!test_bit(idx, cpuc->active_mask))
+			continue;
+
+		counter = cpuc->counters[idx];
+		hwc = &counter->hw;
+
+		val = x86_perf_counter_update(counter, hwc, idx);
+		if (val & (1ULL << (x86_pmu.counter_bits - 1)))
+			continue;
+
+		/*
+		 * counter overflow
+		 */
+		handled		= 1;
+		data.period	= counter->hw.last_period;
+
+		if (!x86_perf_counter_set_period(counter, hwc, idx))
+			continue;
+
+		if (perf_counter_overflow(counter, 1, &data))
+			p6_pmu_disable_counter(hwc, idx);
+	}
+
+	if (handled)
+		inc_irq_stat(apic_perf_irqs);
+
+	return handled;
+}
 
 /*
  * This handler is triggered by the local APIC, so the APIC IRQ handling
@@ -1185,14 +1337,13 @@
 {
 	struct perf_sample_data data;
 	struct cpu_hw_counters *cpuc;
-	int bit, cpu, loops;
+	int bit, loops;
 	u64 ack, status;
 
 	data.regs = regs;
 	data.addr = 0;
 
-	cpu = smp_processor_id();
-	cpuc = &per_cpu(cpu_hw_counters, cpu);
+	cpuc = &__get_cpu_var(cpu_hw_counters);
 
 	perf_disable();
 	status = intel_pmu_get_status();
@@ -1249,14 +1400,13 @@
 	struct cpu_hw_counters *cpuc;
 	struct perf_counter *counter;
 	struct hw_perf_counter *hwc;
-	int cpu, idx, handled = 0;
+	int idx, handled = 0;
 	u64 val;
 
 	data.regs = regs;
 	data.addr = 0;
 
-	cpu = smp_processor_id();
-	cpuc = &per_cpu(cpu_hw_counters, cpu);
+	cpuc = &__get_cpu_var(cpu_hw_counters);
 
 	for (idx = 0; idx < x86_pmu.num_counters; idx++) {
 		if (!test_bit(idx, cpuc->active_mask))
@@ -1353,6 +1503,32 @@
 	.priority		= 1
 };
 
+static struct x86_pmu p6_pmu = {
+	.name			= "p6",
+	.handle_irq		= p6_pmu_handle_irq,
+	.disable_all		= p6_pmu_disable_all,
+	.enable_all		= p6_pmu_enable_all,
+	.enable			= p6_pmu_enable_counter,
+	.disable		= p6_pmu_disable_counter,
+	.eventsel		= MSR_P6_EVNTSEL0,
+	.perfctr		= MSR_P6_PERFCTR0,
+	.event_map		= p6_pmu_event_map,
+	.raw_event		= p6_pmu_raw_event,
+	.max_events		= ARRAY_SIZE(p6_perfmon_event_map),
+	.max_period		= (1ULL << 31) - 1,
+	.version		= 0,
+	.num_counters		= 2,
+	/*
+	 * Counters have 40 bits implemented. However they are designed such
+	 * that bits [32-39] are sign extensions of bit 31. As such the
+	 * effective width of a counter for P6-like PMU is 32 bits only.
+	 *
+	 * See IA-32 Intel Architecture Software developer manual Vol 3B
+	 */
+	.counter_bits		= 32,
+	.counter_mask		= (1ULL << 32) - 1,
+};
+
 static struct x86_pmu intel_pmu = {
 	.name			= "Intel",
 	.handle_irq		= intel_pmu_handle_irq,
@@ -1392,6 +1568,37 @@
 	.max_period		= (1ULL << 47) - 1,
 };
 
+static int p6_pmu_init(void)
+{
+	switch (boot_cpu_data.x86_model) {
+	case 1:
+	case 3:  /* Pentium Pro */
+	case 5:
+	case 6:  /* Pentium II */
+	case 7:
+	case 8:
+	case 11: /* Pentium III */
+		break;
+	case 9:
+	case 13:
+		/* Pentium M */
+		break;
+	default:
+		pr_cont("unsupported p6 CPU model %d ",
+			boot_cpu_data.x86_model);
+		return -ENODEV;
+	}
+
+	if (!cpu_has_apic) {
+		pr_info("no Local APIC, try rebooting with lapic");
+		return -ENODEV;
+	}
+
+	x86_pmu				= p6_pmu;
+
+	return 0;
+}
+
 static int intel_pmu_init(void)
 {
 	union cpuid10_edx edx;
@@ -1400,8 +1607,14 @@
 	unsigned int ebx;
 	int version;
 
-	if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
+	if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
+		/* check for P6 processor family */
+	   if (boot_cpu_data.x86 == 6) {
+		return p6_pmu_init();
+	   } else {
 		return -ENODEV;
+	   }
+	}
 
 	/*
 	 * Check whether the Architectural PerfMon supports
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index b1cd040..418d636 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -16,9 +16,9 @@
 };
 
 static ssize_t
-queue_var_show(unsigned int var, char *page)
+queue_var_show(unsigned long var, char *page)
 {
-	return sprintf(page, "%d\n", var);
+	return sprintf(page, "%lu\n", var);
 }
 
 static ssize_t
@@ -77,7 +77,8 @@
 
 static ssize_t queue_ra_show(struct request_queue *q, char *page)
 {
-	int ra_kb = q->backing_dev_info.ra_pages << (PAGE_CACHE_SHIFT - 10);
+	unsigned long ra_kb = q->backing_dev_info.ra_pages <<
+					(PAGE_CACHE_SHIFT - 10);
 
 	return queue_var_show(ra_kb, (page));
 }
@@ -189,9 +190,9 @@
 
 static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page)
 {
-	unsigned int set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags);
+	bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags);
 
-	return queue_var_show(set != 0, page);
+	return queue_var_show(set, page);
 }
 
 static ssize_t
diff --git a/block/elevator.c b/block/elevator.c
index 6f23753..2d511f9 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -101,11 +101,16 @@
 		return 0;
 
 	/*
-	 * Don't merge if failfast settings don't match
+	 * Don't merge if failfast settings don't match.
+	 *
+	 * FIXME: The negation in front of each condition is necessary
+	 * because bio and request flags use different bit positions
+	 * and the accessors return those bits directly.  This
+	 * ugliness will soon go away.
 	 */
-	if (bio_failfast_dev(bio)	!= blk_failfast_dev(rq)		||
-	    bio_failfast_transport(bio)	!= blk_failfast_transport(rq)	||
-	    bio_failfast_driver(bio)	!= blk_failfast_driver(rq))
+	if (!bio_failfast_dev(bio)	 != !blk_failfast_dev(rq)	||
+	    !bio_failfast_transport(bio) != !blk_failfast_transport(rq)	||
+	    !bio_failfast_driver(bio)	 != !blk_failfast_driver(rq))
 		return 0;
 
 	if (!elv_iosched_allow_merge(rq, bio))
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c
index f5e7180..3ff0294 100644
--- a/drivers/block/ataflop.c
+++ b/drivers/block/ataflop.c
@@ -1627,7 +1627,7 @@
 				drive, dtp->blocks, dtp->spt, dtp->stretch);
 
 		/* sanity check */
-		if (!dtp || setprm.track != dtp->blocks/dtp->spt/2 ||
+		if (setprm.track != dtp->blocks/dtp->spt/2 ||
 		    setprm.head != 2) {
 			redo_fd_request();
 			return -EINVAL;
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index fbeefb6..aa1a3d5 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -427,7 +427,12 @@
 	VIRTIO_BLK_F_SCSI, VIRTIO_BLK_F_IDENTIFY
 };
 
-static struct virtio_driver virtio_blk = {
+/*
+ * virtio_blk causes spurious section mismatch warning by
+ * simultaneously referring to a __devinit and a __devexit function.
+ * Use __refdata to avoid this warning.
+ */
+static struct virtio_driver __refdata virtio_blk = {
 	.feature_table = features,
 	.feature_table_size = ARRAY_SIZE(features),
 	.driver.name =	KBUILD_MODNAME,
diff --git a/drivers/block/z2ram.c b/drivers/block/z2ram.c
index 4575171..b259040 100644
--- a/drivers/block/z2ram.c
+++ b/drivers/block/z2ram.c
@@ -374,7 +374,7 @@
 static void __exit z2_exit(void)
 {
     int i, j;
-    blk_unregister_region(MKDEV(Z2RAM_MAJOR, 0), 256);
+    blk_unregister_region(MKDEV(Z2RAM_MAJOR, 0), Z2MINOR_COUNT);
     unregister_blkdev(Z2RAM_MAJOR, DEVICE_NAME);
     del_gendisk(z2ram_gendisk);
     put_disk(z2ram_gendisk);
diff --git a/drivers/connector/cn_queue.c b/drivers/connector/cn_queue.c
index c769ef2..408c2af 100644
--- a/drivers/connector/cn_queue.c
+++ b/drivers/connector/cn_queue.c
@@ -1,7 +1,7 @@
 /*
  * 	cn_queue.c
  *
- * 2004-2005 Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
+ * 2004+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net>
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c
index fd336c5..08b2500 100644
--- a/drivers/connector/connector.c
+++ b/drivers/connector/connector.c
@@ -1,7 +1,7 @@
 /*
  * 	connector.c
  *
- * 2004-2005 Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
+ * 2004+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net>
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
@@ -33,7 +33,7 @@
 #include <net/sock.h>
 
 MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Evgeniy Polyakov <johnpol@2ka.mipt.ru>");
+MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>");
 MODULE_DESCRIPTION("Generic userspace <-> kernelspace connector.");
 
 static u32 cn_idx = CN_IDX_CONNECTOR;
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index f2c21d5..5eb10c2 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1075,14 +1075,16 @@
  */
 int hid_input_report(struct hid_device *hid, int type, u8 *data, int size, int interrupt)
 {
-	struct hid_report_enum *report_enum = hid->report_enum + type;
-	struct hid_driver *hdrv = hid->driver;
+	struct hid_report_enum *report_enum;
+	struct hid_driver *hdrv;
 	struct hid_report *report;
 	unsigned int i;
 	int ret;
 
 	if (!hid || !hid->driver)
 		return -ENODEV;
+	report_enum = hid->report_enum + type;
+	hdrv = hid->driver;
 
 	if (!size) {
 		dbg_hid("empty report\n");
diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
index 9e94215..215b2ad 100644
--- a/drivers/hid/usbhid/hiddev.c
+++ b/drivers/hid/usbhid/hiddev.c
@@ -527,8 +527,10 @@
 			goto goodreturn;
 
 		case HIDIOCGCOLLECTIONINDEX:
+			i = field->usage[uref->usage_index].collection_index;
+			unlock_kernel();
 			kfree(uref_multi);
-			return field->usage[uref->usage_index].collection_index;
+			return i;
 		case HIDIOCGUSAGES:
 			for (i = 0; i < uref_multi->num_values; i++)
 				uref_multi->values[i] =
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
index 6951811..7f87801 100644
--- a/drivers/ide/ide-disk.c
+++ b/drivers/ide/ide-disk.c
@@ -455,6 +455,7 @@
 
 	rq->cmd_type = REQ_TYPE_ATA_TASKFILE;
 	rq->special = cmd;
+	cmd->rq = rq;
 }
 
 ide_devset_get(multcount, mult_count);
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c
index 013dc59..bc5fb12 100644
--- a/drivers/ide/ide-tape.c
+++ b/drivers/ide/ide-tape.c
@@ -1064,6 +1064,7 @@
 		tape->best_dsc_rw_freq = config.dsc_rw_frequency;
 		break;
 	case 0x0350:
+		memset(&config, 0, sizeof(config));
 		config.dsc_rw_frequency = (int) tape->best_dsc_rw_freq;
 		config.nr_stages = 1;
 		if (copy_to_user(argp, &config, sizeof(config)))
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index 114efd8..1148140 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -608,8 +608,7 @@
 						    p, compat_mode);
 
 			if (_IOC_NR(cmd) == _IOC_NR(EVIOCGNAME(0)))
-				return str_to_user(dev_name(&evdev->dev),
-						   _IOC_SIZE(cmd), p);
+				return str_to_user(dev->name, _IOC_SIZE(cmd), p);
 
 			if (_IOC_NR(cmd) == _IOC_NR(EVIOCGPHYS(0)))
 				return str_to_user(dev->phys, _IOC_SIZE(cmd), p);
diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c
index 0e12f89..4cfd084f 100644
--- a/drivers/input/joydev.c
+++ b/drivers/input/joydev.c
@@ -536,7 +536,7 @@
 	default:
 		if ((cmd & ~IOCSIZE_MASK) == JSIOCGNAME(0)) {
 			int len;
-			const char *name = dev_name(&dev->dev);
+			const char *name = dev->name;
 
 			if (!name)
 				return 0;
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index b868b8d..f155ad8 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -470,20 +470,20 @@
 	status = urb->status;
 
 	switch (status) {
-		case 0:
+	case 0:
 		/* success */
-		break;
-		case -ECONNRESET:
-		case -ENOENT:
-		case -ESHUTDOWN:
-			/* this urb is terminated, clean up */
-			dbg("%s - urb shutting down with status: %d",
-				__func__, status);
-			return;
-		default:
-			dbg("%s - nonzero urb status received: %d",
-				__func__, status);
-			goto exit;
+		return;
+
+	case -ECONNRESET:
+	case -ENOENT:
+	case -ESHUTDOWN:
+		/* this urb is terminated, clean up */
+		dbg("%s - urb shutting down with status: %d", __func__, status);
+		return;
+
+	default:
+		dbg("%s - nonzero urb status received: %d", __func__, status);
+		goto exit;
 	}
 
 exit:
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
index df3f8aa..95fe045 100644
--- a/drivers/input/keyboard/atkbd.c
+++ b/drivers/input/keyboard/atkbd.c
@@ -895,6 +895,13 @@
 };
 
 /*
+ * Amilo Pi 3525 key release for Fn+Volume keys not working
+ */
+static unsigned int atkbd_amilo_pi3525_forced_release_keys[] = {
+	0x20, 0xa0, 0x2e, 0xae, 0x30, 0xb0, -1U
+};
+
+/*
  * Amilo Xi 3650 key release for light touch bar not working
  */
 static unsigned int atkbd_amilo_xi3650_forced_release_keys[] = {
@@ -902,6 +909,13 @@
 };
 
 /*
+ * Soltech TA12 system with broken key release on volume keys and mute key
+ */
+static unsigned int atkdb_soltech_ta12_forced_release_keys[] = {
+	0xa0, 0xae, 0xb0, -1U
+};
+
+/*
  * atkbd_set_keycode_table() initializes keyboard's keycode table
  * according to the selected scancode set
  */
@@ -1568,6 +1582,15 @@
 		.driver_data = atkbd_amilo_pa1510_forced_release_keys,
 	},
 	{
+		.ident = "Fujitsu Amilo Pi 3525",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "AMILO Pi 3525"),
+		},
+		.callback = atkbd_setup_forced_release,
+		.driver_data = atkbd_amilo_pi3525_forced_release_keys,
+	},
+	{
 		.ident = "Fujitsu Amilo Xi 3650",
 		.matches = {
 			DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
@@ -1576,6 +1599,15 @@
 		.callback = atkbd_setup_forced_release,
 		.driver_data = atkbd_amilo_xi3650_forced_release_keys,
 	},
+	{
+		.ident = "Soltech Corporation TA12",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "Soltech Corporation"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "TA12"),
+		},
+		.callback = atkbd_setup_forced_release,
+		.driver_data = atkdb_soltech_ta12_forced_release_keys,
+	},
 	{ }
 };
 
diff --git a/drivers/input/misc/pcspkr.c b/drivers/input/misc/pcspkr.c
index 6d67af5..21cb755 100644
--- a/drivers/input/misc/pcspkr.c
+++ b/drivers/input/misc/pcspkr.c
@@ -114,7 +114,7 @@
 	return 0;
 }
 
-static int pcspkr_suspend(struct platform_device *dev, pm_message_t state)
+static int pcspkr_suspend(struct device *dev)
 {
 	pcspkr_event(NULL, EV_SND, SND_BELL, 0);
 
@@ -127,14 +127,18 @@
 	pcspkr_event(NULL, EV_SND, SND_BELL, 0);
 }
 
+static struct dev_pm_ops pcspkr_pm_ops = {
+	.suspend = pcspkr_suspend,
+};
+
 static struct platform_driver pcspkr_platform_driver = {
 	.driver		= {
 		.name	= "pcspkr",
 		.owner	= THIS_MODULE,
+		.pm	= &pcspkr_pm_ops,
 	},
 	.probe		= pcspkr_probe,
 	.remove		= __devexit_p(pcspkr_remove),
-	.suspend	= pcspkr_suspend,
 	.shutdown	= pcspkr_shutdown,
 };
 
diff --git a/drivers/input/misc/wistron_btns.c b/drivers/input/misc/wistron_btns.c
index 7c8957d..26e17a9 100644
--- a/drivers/input/misc/wistron_btns.c
+++ b/drivers/input/misc/wistron_btns.c
@@ -646,6 +646,15 @@
 	},
 	{
 		.callback = dmi_matched,
+		.ident = "Maxdata Pro 7000 DX",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "MAXDATA"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "Pro 7000"),
+		},
+		.driver_data = keymap_fs_amilo_pro_v2000
+	},
+	{
+		.callback = dmi_matched,
 		.ident = "Fujitsu N3510",
 		.matches = {
 			DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
diff --git a/drivers/mtd/ubi/gluebi.c b/drivers/mtd/ubi/gluebi.c
index 95aaac0..b5e478f 100644
--- a/drivers/mtd/ubi/gluebi.c
+++ b/drivers/mtd/ubi/gluebi.c
@@ -332,6 +332,7 @@
 	}
 
 	gluebi->vol_id = vi->vol_id;
+	gluebi->ubi_num = vi->ubi_num;
 	mtd->type = MTD_UBIVOLUME;
 	if (!di->ro_mode)
 		mtd->flags = MTD_WRITEABLE;
diff --git a/drivers/mtd/ubi/scan.c b/drivers/mtd/ubi/scan.c
index f60895e..a423131 100644
--- a/drivers/mtd/ubi/scan.c
+++ b/drivers/mtd/ubi/scan.c
@@ -781,7 +781,7 @@
 			return -EINVAL;
 		}
 
-		image_seq = be32_to_cpu(ech->ec);
+		image_seq = be32_to_cpu(ech->image_seq);
 		if (!si->image_seq_set) {
 			ubi->image_seq = image_seq;
 			si->image_seq_set = 1;
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index c155bd3..b5a7513 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1729,6 +1729,12 @@
 	help
 	  This platform driver is for Micrel KSZ8842 chip.
 
+config KS8851
+       tristate "Micrel KS8851 SPI"
+       depends on SPI
+       help
+         SPI driver for Micrel KS8851 SPI attached network chip.
+
 config VIA_RHINE
 	tristate "VIA Rhine support"
 	depends on NET_PCI && PCI
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 4b58a59..ead8cab 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -88,6 +88,7 @@
 obj-$(CONFIG_SKY2) += sky2.o
 obj-$(CONFIG_SKFP) += skfp/
 obj-$(CONFIG_KS8842)	+= ks8842.o
+obj-$(CONFIG_KS8851)	+= ks8851.o
 obj-$(CONFIG_VIA_RHINE) += via-rhine.o
 obj-$(CONFIG_VIA_VELOCITY) += via-velocity.o
 obj-$(CONFIG_ADAPTEC_STARFIRE) += starfire.o
diff --git a/drivers/net/arm/Kconfig b/drivers/net/arm/Kconfig
index 2895db1..c37ee9e 100644
--- a/drivers/net/arm/Kconfig
+++ b/drivers/net/arm/Kconfig
@@ -63,3 +63,11 @@
 	help
 	  Say Y here if you want to use built-in Ethernet ports
 	  on IXP4xx processor.
+
+config W90P910_ETH
+	tristate "Nuvoton w90p910 Ethernet support"
+	depends on ARM && ARCH_W90X900
+	select PHYLIB
+	help
+	  Say Y here if you want to use built-in Ethernet ports
+	  on w90p910 processor.
diff --git a/drivers/net/arm/Makefile b/drivers/net/arm/Makefile
index 811a3cc..303171f 100644
--- a/drivers/net/arm/Makefile
+++ b/drivers/net/arm/Makefile
@@ -11,3 +11,4 @@
 obj-$(CONFIG_ARM_KS8695_ETHER)	+= ks8695net.o
 obj-$(CONFIG_EP93XX_ETH)	+= ep93xx_eth.o
 obj-$(CONFIG_IXP4XX_ETH)	+= ixp4xx_eth.o
+obj-$(CONFIG_W90P910_ETH)	+= w90p910_ether.o
diff --git a/drivers/net/arm/w90p910_ether.c b/drivers/net/arm/w90p910_ether.c
new file mode 100644
index 0000000..616fb79
--- /dev/null
+++ b/drivers/net/arm/w90p910_ether.c
@@ -0,0 +1,1105 @@
+/*
+ * Copyright (c) 2008-2009 Nuvoton technology corporation.
+ *
+ * Wan ZongShun <mcuos.com@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation;version 2 of the License.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/mii.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/ethtool.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+
+#define DRV_MODULE_NAME		"w90p910-emc"
+#define DRV_MODULE_VERSION	"0.1"
+
+/* Ethernet MAC Registers */
+#define REG_CAMCMR		0x00
+#define REG_CAMEN		0x04
+#define REG_CAMM_BASE		0x08
+#define REG_CAML_BASE		0x0c
+#define REG_TXDLSA		0x88
+#define REG_RXDLSA		0x8C
+#define REG_MCMDR		0x90
+#define REG_MIID		0x94
+#define REG_MIIDA		0x98
+#define REG_FFTCR		0x9C
+#define REG_TSDR		0xa0
+#define REG_RSDR		0xa4
+#define REG_DMARFC		0xa8
+#define REG_MIEN		0xac
+#define REG_MISTA		0xb0
+#define REG_CTXDSA		0xcc
+#define REG_CTXBSA		0xd0
+#define REG_CRXDSA		0xd4
+#define REG_CRXBSA		0xd8
+
+/* mac controller bit */
+#define MCMDR_RXON		0x01
+#define MCMDR_ACP		(0x01 << 3)
+#define MCMDR_SPCRC		(0x01 << 5)
+#define MCMDR_TXON		(0x01 << 8)
+#define MCMDR_FDUP		(0x01 << 18)
+#define MCMDR_ENMDC		(0x01 << 19)
+#define MCMDR_OPMOD		(0x01 << 20)
+#define SWR			(0x01 << 24)
+
+/* cam command regiser */
+#define CAMCMR_AUP		0x01
+#define CAMCMR_AMP		(0x01 << 1)
+#define CAMCMR_ABP		(0x01 << 2)
+#define CAMCMR_CCAM		(0x01 << 3)
+#define CAMCMR_ECMP		(0x01 << 4)
+#define CAM0EN			0x01
+
+/* mac mii controller bit */
+#define MDCCR			(0x0a << 20)
+#define PHYAD			(0x01 << 8)
+#define PHYWR			(0x01 << 16)
+#define PHYBUSY			(0x01 << 17)
+#define PHYPRESP		(0x01 << 18)
+#define CAM_ENTRY_SIZE		0x08
+
+/* rx and tx status */
+#define TXDS_TXCP		(0x01 << 19)
+#define RXDS_CRCE		(0x01 << 17)
+#define RXDS_PTLE		(0x01 << 19)
+#define RXDS_RXGD		(0x01 << 20)
+#define RXDS_ALIE		(0x01 << 21)
+#define RXDS_RP			(0x01 << 22)
+
+/* mac interrupt status*/
+#define MISTA_EXDEF		(0x01 << 19)
+#define MISTA_TXBERR		(0x01 << 24)
+#define MISTA_TDU		(0x01 << 23)
+#define MISTA_RDU		(0x01 << 10)
+#define MISTA_RXBERR		(0x01 << 11)
+
+#define ENSTART			0x01
+#define ENRXINTR		0x01
+#define ENRXGD			(0x01 << 4)
+#define ENRXBERR		(0x01 << 11)
+#define ENTXINTR		(0x01 << 16)
+#define ENTXCP			(0x01 << 18)
+#define ENTXABT			(0x01 << 21)
+#define ENTXBERR		(0x01 << 24)
+#define ENMDC			(0x01 << 19)
+#define PHYBUSY			(0x01 << 17)
+#define MDCCR_VAL		0xa00000
+
+/* rx and tx owner bit */
+#define RX_OWEN_DMA		(0x01 << 31)
+#define RX_OWEN_CPU		(~(0x03 << 30))
+#define TX_OWEN_DMA		(0x01 << 31)
+#define TX_OWEN_CPU		(~(0x01 << 31))
+
+/* tx frame desc controller bit */
+#define MACTXINTEN		0x04
+#define CRCMODE			0x02
+#define PADDINGMODE		0x01
+
+/* fftcr controller bit */
+#define TXTHD 			(0x03 << 8)
+#define BLENGTH			(0x01 << 20)
+
+/* global setting for driver */
+#define RX_DESC_SIZE		50
+#define TX_DESC_SIZE		10
+#define MAX_RBUFF_SZ		0x600
+#define MAX_TBUFF_SZ		0x600
+#define TX_TIMEOUT		50
+#define DELAY			1000
+#define CAM0			0x0
+
+static int w90p910_mdio_read(struct net_device *dev, int phy_id, int reg);
+
+struct w90p910_rxbd {
+	unsigned int sl;
+	unsigned int buffer;
+	unsigned int reserved;
+	unsigned int next;
+};
+
+struct w90p910_txbd {
+	unsigned int mode;
+	unsigned int buffer;
+	unsigned int sl;
+	unsigned int next;
+};
+
+struct recv_pdesc {
+	struct w90p910_rxbd desclist[RX_DESC_SIZE];
+	char recv_buf[RX_DESC_SIZE][MAX_RBUFF_SZ];
+};
+
+struct tran_pdesc {
+	struct w90p910_txbd desclist[TX_DESC_SIZE];
+	char tran_buf[RX_DESC_SIZE][MAX_TBUFF_SZ];
+};
+
+struct  w90p910_ether {
+	struct recv_pdesc *rdesc;
+	struct recv_pdesc *rdesc_phys;
+	struct tran_pdesc *tdesc;
+	struct tran_pdesc *tdesc_phys;
+	struct net_device_stats stats;
+	struct platform_device *pdev;
+	struct sk_buff *skb;
+	struct clk *clk;
+	struct clk *rmiiclk;
+	struct mii_if_info mii;
+	struct timer_list check_timer;
+	void __iomem *reg;
+	unsigned int rxirq;
+	unsigned int txirq;
+	unsigned int cur_tx;
+	unsigned int cur_rx;
+	unsigned int finish_tx;
+	unsigned int rx_packets;
+	unsigned int rx_bytes;
+	unsigned int start_tx_ptr;
+	unsigned int start_rx_ptr;
+	unsigned int linkflag;
+	spinlock_t lock;
+};
+
+static void update_linkspeed_register(struct net_device *dev,
+				unsigned int speed, unsigned int duplex)
+{
+	struct w90p910_ether *ether = netdev_priv(dev);
+	unsigned int val;
+
+	val = __raw_readl(ether->reg + REG_MCMDR);
+
+	if (speed == SPEED_100) {
+		/* 100 full/half duplex */
+		if (duplex == DUPLEX_FULL) {
+			val |= (MCMDR_OPMOD | MCMDR_FDUP);
+		} else {
+			val |= MCMDR_OPMOD;
+			val &= ~MCMDR_FDUP;
+		}
+	} else {
+		/* 10 full/half duplex */
+		if (duplex == DUPLEX_FULL) {
+			val |= MCMDR_FDUP;
+			val &= ~MCMDR_OPMOD;
+		} else {
+			val &= ~(MCMDR_FDUP | MCMDR_OPMOD);
+		}
+	}
+
+	__raw_writel(val, ether->reg + REG_MCMDR);
+}
+
+static void update_linkspeed(struct net_device *dev)
+{
+	struct w90p910_ether *ether = netdev_priv(dev);
+	struct platform_device *pdev;
+	unsigned int bmsr, bmcr, lpa, speed, duplex;
+
+	pdev = ether->pdev;
+
+	if (!mii_link_ok(&ether->mii)) {
+		ether->linkflag = 0x0;
+		netif_carrier_off(dev);
+		dev_warn(&pdev->dev, "%s: Link down.\n", dev->name);
+		return;
+	}
+
+	if (ether->linkflag == 1)
+		return;
+
+	bmsr = w90p910_mdio_read(dev, ether->mii.phy_id, MII_BMSR);
+	bmcr = w90p910_mdio_read(dev, ether->mii.phy_id, MII_BMCR);
+
+	if (bmcr & BMCR_ANENABLE) {
+		if (!(bmsr & BMSR_ANEGCOMPLETE))
+			return;
+
+		lpa = w90p910_mdio_read(dev, ether->mii.phy_id, MII_LPA);
+
+		if ((lpa & LPA_100FULL) || (lpa & LPA_100HALF))
+			speed = SPEED_100;
+		else
+			speed = SPEED_10;
+
+		if ((lpa & LPA_100FULL) || (lpa & LPA_10FULL))
+			duplex = DUPLEX_FULL;
+		else
+			duplex = DUPLEX_HALF;
+
+	} else {
+		speed = (bmcr & BMCR_SPEED100) ? SPEED_100 : SPEED_10;
+		duplex = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF;
+	}
+
+	update_linkspeed_register(dev, speed, duplex);
+
+	dev_info(&pdev->dev, "%s: Link now %i-%s\n", dev->name, speed,
+			(duplex == DUPLEX_FULL) ? "FullDuplex" : "HalfDuplex");
+	ether->linkflag = 0x01;
+
+	netif_carrier_on(dev);
+}
+
+static void w90p910_check_link(unsigned long dev_id)
+{
+	struct net_device *dev = (struct net_device *) dev_id;
+	struct w90p910_ether *ether = netdev_priv(dev);
+
+	update_linkspeed(dev);
+	mod_timer(&ether->check_timer, jiffies + msecs_to_jiffies(1000));
+}
+
+static void w90p910_write_cam(struct net_device *dev,
+				unsigned int x, unsigned char *pval)
+{
+	struct w90p910_ether *ether = netdev_priv(dev);
+	unsigned int msw, lsw;
+
+	msw = (pval[0] << 24) | (pval[1] << 16) | (pval[2] << 8) | pval[3];
+
+	lsw = (pval[4] << 24) | (pval[5] << 16);
+
+	__raw_writel(lsw, ether->reg + REG_CAML_BASE + x * CAM_ENTRY_SIZE);
+	__raw_writel(msw, ether->reg + REG_CAMM_BASE + x * CAM_ENTRY_SIZE);
+}
+
+static void w90p910_init_desc(struct net_device *dev)
+{
+	struct w90p910_ether *ether;
+	struct w90p910_txbd  *tdesc, *tdesc_phys;
+	struct w90p910_rxbd  *rdesc, *rdesc_phys;
+	unsigned int i, j;
+
+	ether = netdev_priv(dev);
+
+	ether->tdesc = (struct tran_pdesc *)
+			dma_alloc_coherent(NULL, sizeof(struct tran_pdesc),
+				(dma_addr_t *) &ether->tdesc_phys, GFP_KERNEL);
+
+	ether->rdesc = (struct recv_pdesc *)
+			dma_alloc_coherent(NULL, sizeof(struct recv_pdesc),
+				(dma_addr_t *) &ether->rdesc_phys, GFP_KERNEL);
+
+	for (i = 0; i < TX_DESC_SIZE; i++) {
+		tdesc = &(ether->tdesc->desclist[i]);
+
+		j = ((i + 1) / TX_DESC_SIZE);
+
+		if (j != 0) {
+			tdesc_phys = &(ether->tdesc_phys->desclist[0]);
+			ether->start_tx_ptr = (unsigned int)tdesc_phys;
+			tdesc->next = (unsigned int)ether->start_tx_ptr;
+		} else {
+			tdesc_phys = &(ether->tdesc_phys->desclist[i+1]);
+			tdesc->next = (unsigned int)tdesc_phys;
+		}
+
+		tdesc->buffer = (unsigned int)ether->tdesc_phys->tran_buf[i];
+		tdesc->sl = 0;
+		tdesc->mode = 0;
+	}
+
+	for (i = 0; i < RX_DESC_SIZE; i++) {
+		rdesc = &(ether->rdesc->desclist[i]);
+
+		j = ((i + 1) / RX_DESC_SIZE);
+
+		if (j != 0) {
+			rdesc_phys = &(ether->rdesc_phys->desclist[0]);
+			ether->start_rx_ptr = (unsigned int)rdesc_phys;
+			rdesc->next = (unsigned int)ether->start_rx_ptr;
+		} else {
+			rdesc_phys = &(ether->rdesc_phys->desclist[i+1]);
+			rdesc->next = (unsigned int)rdesc_phys;
+		}
+
+		rdesc->sl = RX_OWEN_DMA;
+		rdesc->buffer = (unsigned int)ether->rdesc_phys->recv_buf[i];
+	  }
+}
+
+static void w90p910_set_fifo_threshold(struct net_device *dev)
+{
+	struct w90p910_ether *ether = netdev_priv(dev);
+	unsigned int val;
+
+	val = TXTHD | BLENGTH;
+	__raw_writel(val, ether->reg + REG_FFTCR);
+}
+
+static void w90p910_return_default_idle(struct net_device *dev)
+{
+	struct w90p910_ether *ether = netdev_priv(dev);
+	unsigned int val;
+
+	val = __raw_readl(ether->reg + REG_MCMDR);
+	val |= SWR;
+	__raw_writel(val, ether->reg + REG_MCMDR);
+}
+
+static void w90p910_trigger_rx(struct net_device *dev)
+{
+	struct w90p910_ether *ether = netdev_priv(dev);
+
+	__raw_writel(ENSTART, ether->reg + REG_RSDR);
+}
+
+static void w90p910_trigger_tx(struct net_device *dev)
+{
+	struct w90p910_ether *ether = netdev_priv(dev);
+
+	__raw_writel(ENSTART, ether->reg + REG_TSDR);
+}
+
+static void w90p910_enable_mac_interrupt(struct net_device *dev)
+{
+	struct w90p910_ether *ether = netdev_priv(dev);
+	unsigned int val;
+
+	val = ENTXINTR | ENRXINTR | ENRXGD | ENTXCP;
+	val |= ENTXBERR | ENRXBERR | ENTXABT;
+
+	__raw_writel(val, ether->reg + REG_MIEN);
+}
+
+static void w90p910_get_and_clear_int(struct net_device *dev,
+							unsigned int *val)
+{
+	struct w90p910_ether *ether = netdev_priv(dev);
+
+	*val = __raw_readl(ether->reg + REG_MISTA);
+	__raw_writel(*val, ether->reg + REG_MISTA);
+}
+
+static void w90p910_set_global_maccmd(struct net_device *dev)
+{
+	struct w90p910_ether *ether = netdev_priv(dev);
+	unsigned int val;
+
+	val = __raw_readl(ether->reg + REG_MCMDR);
+	val |= MCMDR_SPCRC | MCMDR_ENMDC | MCMDR_ACP | ENMDC;
+	__raw_writel(val, ether->reg + REG_MCMDR);
+}
+
+static void w90p910_enable_cam(struct net_device *dev)
+{
+	struct w90p910_ether *ether = netdev_priv(dev);
+	unsigned int val;
+
+	w90p910_write_cam(dev, CAM0, dev->dev_addr);
+
+	val = __raw_readl(ether->reg + REG_CAMEN);
+	val |= CAM0EN;
+	__raw_writel(val, ether->reg + REG_CAMEN);
+}
+
+static void w90p910_enable_cam_command(struct net_device *dev)
+{
+	struct w90p910_ether *ether = netdev_priv(dev);
+	unsigned int val;
+
+	val = CAMCMR_ECMP | CAMCMR_ABP | CAMCMR_AMP;
+	__raw_writel(val, ether->reg + REG_CAMCMR);
+}
+
+static void w90p910_enable_tx(struct net_device *dev, unsigned int enable)
+{
+	struct w90p910_ether *ether = netdev_priv(dev);
+	unsigned int val;
+
+	val = __raw_readl(ether->reg + REG_MCMDR);
+
+	if (enable)
+		val |= MCMDR_TXON;
+	else
+		val &= ~MCMDR_TXON;
+
+	__raw_writel(val, ether->reg + REG_MCMDR);
+}
+
+static void w90p910_enable_rx(struct net_device *dev, unsigned int enable)
+{
+	struct w90p910_ether *ether = netdev_priv(dev);
+	unsigned int val;
+
+	val = __raw_readl(ether->reg + REG_MCMDR);
+
+	if (enable)
+		val |= MCMDR_RXON;
+	else
+		val &= ~MCMDR_RXON;
+
+	__raw_writel(val, ether->reg + REG_MCMDR);
+}
+
+static void w90p910_set_curdest(struct net_device *dev)
+{
+	struct w90p910_ether *ether = netdev_priv(dev);
+
+	__raw_writel(ether->start_rx_ptr, ether->reg + REG_RXDLSA);
+	__raw_writel(ether->start_tx_ptr, ether->reg + REG_TXDLSA);
+}
+
+static void w90p910_reset_mac(struct net_device *dev)
+{
+	struct w90p910_ether *ether = netdev_priv(dev);
+
+	spin_lock(&ether->lock);
+
+	w90p910_enable_tx(dev, 0);
+	w90p910_enable_rx(dev, 0);
+	w90p910_set_fifo_threshold(dev);
+	w90p910_return_default_idle(dev);
+
+	if (!netif_queue_stopped(dev))
+		netif_stop_queue(dev);
+
+	w90p910_init_desc(dev);
+
+	dev->trans_start = jiffies;
+	ether->cur_tx = 0x0;
+	ether->finish_tx = 0x0;
+	ether->cur_rx = 0x0;
+
+	w90p910_set_curdest(dev);
+	w90p910_enable_cam(dev);
+	w90p910_enable_cam_command(dev);
+	w90p910_enable_mac_interrupt(dev);
+	w90p910_enable_tx(dev, 1);
+	w90p910_enable_rx(dev, 1);
+	w90p910_trigger_tx(dev);
+	w90p910_trigger_rx(dev);
+
+	dev->trans_start = jiffies;
+
+	if (netif_queue_stopped(dev))
+		netif_wake_queue(dev);
+
+	spin_unlock(&ether->lock);
+}
+
+static void w90p910_mdio_write(struct net_device *dev,
+					int phy_id, int reg, int data)
+{
+	struct w90p910_ether *ether = netdev_priv(dev);
+	struct platform_device *pdev;
+	unsigned int val, i;
+
+	pdev = ether->pdev;
+
+	__raw_writel(data, ether->reg + REG_MIID);
+
+	val = (phy_id << 0x08) | reg;
+	val |= PHYBUSY | PHYWR | MDCCR_VAL;
+	__raw_writel(val, ether->reg + REG_MIIDA);
+
+	for (i = 0; i < DELAY; i++) {
+		if ((__raw_readl(ether->reg + REG_MIIDA) & PHYBUSY) == 0)
+			break;
+	}
+
+	if (i == DELAY)
+		dev_warn(&pdev->dev, "mdio write timed out\n");
+}
+
+static int w90p910_mdio_read(struct net_device *dev, int phy_id, int reg)
+{
+	struct w90p910_ether *ether = netdev_priv(dev);
+	struct platform_device *pdev;
+	unsigned int val, i, data;
+
+	pdev = ether->pdev;
+
+	val = (phy_id << 0x08) | reg;
+	val |= PHYBUSY | MDCCR_VAL;
+	__raw_writel(val, ether->reg + REG_MIIDA);
+
+	for (i = 0; i < DELAY; i++) {
+		if ((__raw_readl(ether->reg + REG_MIIDA) & PHYBUSY) == 0)
+			break;
+	}
+
+	if (i == DELAY) {
+		dev_warn(&pdev->dev, "mdio read timed out\n");
+		data = 0xffff;
+	} else {
+		data = __raw_readl(ether->reg + REG_MIID);
+	}
+
+	return data;
+}
+
+static int set_mac_address(struct net_device *dev, void *addr)
+{
+	struct sockaddr *address = addr;
+
+	if (!is_valid_ether_addr(address->sa_data))
+		return -EADDRNOTAVAIL;
+
+	memcpy(dev->dev_addr, address->sa_data, dev->addr_len);
+	w90p910_write_cam(dev, CAM0, dev->dev_addr);
+
+	return 0;
+}
+
+static int w90p910_ether_close(struct net_device *dev)
+{
+	struct w90p910_ether *ether = netdev_priv(dev);
+
+	dma_free_writecombine(NULL, sizeof(struct w90p910_rxbd),
+				ether->rdesc, (dma_addr_t)ether->rdesc_phys);
+	dma_free_writecombine(NULL, sizeof(struct w90p910_txbd),
+				ether->tdesc, (dma_addr_t)ether->tdesc_phys);
+
+	netif_stop_queue(dev);
+
+	del_timer_sync(&ether->check_timer);
+	clk_disable(ether->rmiiclk);
+	clk_disable(ether->clk);
+
+	free_irq(ether->txirq, dev);
+	free_irq(ether->rxirq, dev);
+
+	return 0;
+}
+
+static struct net_device_stats *w90p910_ether_stats(struct net_device *dev)
+{
+	struct w90p910_ether *ether;
+
+	ether = netdev_priv(dev);
+
+	return &ether->stats;
+}
+
+static int w90p910_send_frame(struct net_device *dev,
+					unsigned char *data, int length)
+{
+	struct w90p910_ether *ether;
+	struct w90p910_txbd *txbd;
+	struct platform_device *pdev;
+	unsigned char *buffer;
+
+	ether = netdev_priv(dev);
+	pdev = ether->pdev;
+
+	txbd = &ether->tdesc->desclist[ether->cur_tx];
+	buffer = ether->tdesc->tran_buf[ether->cur_tx];
+	if (length > 1514) {
+		dev_err(&pdev->dev, "send data %d bytes, check it\n", length);
+		length = 1514;
+	}
+
+	txbd->sl = length & 0xFFFF;
+
+	memcpy(buffer, data, length);
+
+	txbd->mode = TX_OWEN_DMA | PADDINGMODE | CRCMODE | MACTXINTEN;
+
+	w90p910_enable_tx(dev, 1);
+
+	w90p910_trigger_tx(dev);
+
+	ether->cur_tx = (ether->cur_tx+1) % TX_DESC_SIZE;
+	txbd = &ether->tdesc->desclist[ether->cur_tx];
+
+	dev->trans_start = jiffies;
+
+	if (txbd->mode & TX_OWEN_DMA)
+		netif_stop_queue(dev);
+
+	return 0;
+}
+
+static int w90p910_ether_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct w90p910_ether *ether = netdev_priv(dev);
+
+	if (!(w90p910_send_frame(dev, skb->data, skb->len))) {
+		ether->skb = skb;
+		dev_kfree_skb_irq(skb);
+		return 0;
+	}
+	return -1;
+}
+
+static irqreturn_t w90p910_tx_interrupt(int irq, void *dev_id)
+{
+	struct w90p910_ether *ether;
+	struct w90p910_txbd  *txbd;
+	struct platform_device *pdev;
+	struct tran_pdesc *tran_pdesc;
+	struct net_device *dev;
+	unsigned int cur_entry, entry, status;
+
+	dev = (struct net_device *)dev_id;
+	ether = netdev_priv(dev);
+	pdev = ether->pdev;
+
+	spin_lock(&ether->lock);
+
+	w90p910_get_and_clear_int(dev, &status);
+
+	cur_entry = __raw_readl(ether->reg + REG_CTXDSA);
+
+	tran_pdesc = ether->tdesc_phys;
+	entry = (unsigned int)(&tran_pdesc->desclist[ether->finish_tx]);
+
+	while (entry != cur_entry) {
+		txbd = &ether->tdesc->desclist[ether->finish_tx];
+
+		ether->finish_tx = (ether->finish_tx + 1) % TX_DESC_SIZE;
+
+		if (txbd->sl & TXDS_TXCP) {
+			ether->stats.tx_packets++;
+			ether->stats.tx_bytes += txbd->sl & 0xFFFF;
+		} else {
+			ether->stats.tx_errors++;
+		}
+
+		txbd->sl = 0x0;
+		txbd->mode = 0x0;
+
+		if (netif_queue_stopped(dev))
+			netif_wake_queue(dev);
+
+		entry = (unsigned int)(&tran_pdesc->desclist[ether->finish_tx]);
+	}
+
+	if (status & MISTA_EXDEF) {
+		dev_err(&pdev->dev, "emc defer exceed interrupt\n");
+	} else if (status & MISTA_TXBERR) {
+			dev_err(&pdev->dev, "emc bus error interrupt\n");
+			w90p910_reset_mac(dev);
+		} else if (status & MISTA_TDU) {
+				if (netif_queue_stopped(dev))
+					netif_wake_queue(dev);
+			}
+
+	spin_unlock(&ether->lock);
+
+	return IRQ_HANDLED;
+}
+
+static void netdev_rx(struct net_device *dev)
+{
+	struct w90p910_ether *ether;
+	struct w90p910_rxbd *rxbd;
+	struct platform_device *pdev;
+	struct recv_pdesc *rdesc_phys;
+	struct sk_buff *skb;
+	unsigned char *data;
+	unsigned int length, status, val, entry;
+
+	ether = netdev_priv(dev);
+	pdev = ether->pdev;
+	rdesc_phys = ether->rdesc_phys;
+
+	rxbd = &ether->rdesc->desclist[ether->cur_rx];
+
+	do {
+		val = __raw_readl(ether->reg + REG_CRXDSA);
+		entry = (unsigned int)&rdesc_phys->desclist[ether->cur_rx];
+
+		if (val == entry)
+			break;
+
+		status = rxbd->sl;
+		length = status & 0xFFFF;
+
+		if (status & RXDS_RXGD) {
+			data = ether->rdesc->recv_buf[ether->cur_rx];
+			skb = dev_alloc_skb(length+2);
+			if (!skb) {
+				dev_err(&pdev->dev, "get skb buffer error\n");
+				ether->stats.rx_dropped++;
+				return;
+			}
+
+			skb->dev = dev;
+			skb_reserve(skb, 2);
+			skb_put(skb, length);
+			skb_copy_to_linear_data(skb, data, length);
+			skb->protocol = eth_type_trans(skb, dev);
+			ether->stats.rx_packets++;
+			ether->stats.rx_bytes += length;
+			netif_rx(skb);
+		} else {
+			ether->stats.rx_errors++;
+
+			if (status & RXDS_RP) {
+				dev_err(&pdev->dev, "rx runt err\n");
+				ether->stats.rx_length_errors++;
+			} else if (status & RXDS_CRCE) {
+					dev_err(&pdev->dev, "rx crc err\n");
+					ether->stats.rx_crc_errors++;
+				}
+
+			if (status & RXDS_ALIE) {
+				dev_err(&pdev->dev, "rx aligment err\n");
+				ether->stats.rx_frame_errors++;
+			} else if (status & RXDS_PTLE) {
+					dev_err(&pdev->dev, "rx longer err\n");
+					ether->stats.rx_over_errors++;
+				}
+			}
+
+		rxbd->sl = RX_OWEN_DMA;
+		rxbd->reserved = 0x0;
+		ether->cur_rx = (ether->cur_rx+1) % RX_DESC_SIZE;
+		rxbd = &ether->rdesc->desclist[ether->cur_rx];
+
+		dev->last_rx = jiffies;
+	} while (1);
+}
+
+static irqreturn_t w90p910_rx_interrupt(int irq, void *dev_id)
+{
+	struct net_device *dev;
+	struct w90p910_ether  *ether;
+	struct platform_device *pdev;
+	unsigned int status;
+
+	dev = (struct net_device *)dev_id;
+	ether = netdev_priv(dev);
+	pdev = ether->pdev;
+
+	spin_lock(&ether->lock);
+
+	w90p910_get_and_clear_int(dev, &status);
+
+	if (status & MISTA_RDU) {
+		netdev_rx(dev);
+
+		w90p910_trigger_rx(dev);
+
+		spin_unlock(&ether->lock);
+		return IRQ_HANDLED;
+	} else if (status & MISTA_RXBERR) {
+			dev_err(&pdev->dev, "emc rx bus error\n");
+			w90p910_reset_mac(dev);
+		}
+
+	netdev_rx(dev);
+	spin_unlock(&ether->lock);
+	return IRQ_HANDLED;
+}
+
+static int w90p910_ether_open(struct net_device *dev)
+{
+	struct w90p910_ether *ether;
+	struct platform_device *pdev;
+
+	ether = netdev_priv(dev);
+	pdev = ether->pdev;
+
+	w90p910_reset_mac(dev);
+	w90p910_set_fifo_threshold(dev);
+	w90p910_set_curdest(dev);
+	w90p910_enable_cam(dev);
+	w90p910_enable_cam_command(dev);
+	w90p910_enable_mac_interrupt(dev);
+	w90p910_set_global_maccmd(dev);
+	w90p910_enable_rx(dev, 1);
+
+	ether->rx_packets = 0x0;
+	ether->rx_bytes = 0x0;
+
+	if (request_irq(ether->txirq, w90p910_tx_interrupt,
+						0x0, pdev->name, dev)) {
+		dev_err(&pdev->dev, "register irq tx failed\n");
+		return -EAGAIN;
+	}
+
+	if (request_irq(ether->rxirq, w90p910_rx_interrupt,
+						0x0, pdev->name, dev)) {
+		dev_err(&pdev->dev, "register irq rx failed\n");
+		return -EAGAIN;
+	}
+
+	mod_timer(&ether->check_timer, jiffies + msecs_to_jiffies(1000));
+	netif_start_queue(dev);
+	w90p910_trigger_rx(dev);
+
+	dev_info(&pdev->dev, "%s is OPENED\n", dev->name);
+
+	return 0;
+}
+
+static void w90p910_ether_set_multicast_list(struct net_device *dev)
+{
+	struct w90p910_ether *ether;
+	unsigned int rx_mode;
+
+	ether = netdev_priv(dev);
+
+	if (dev->flags & IFF_PROMISC)
+		rx_mode = CAMCMR_AUP | CAMCMR_AMP | CAMCMR_ABP | CAMCMR_ECMP;
+	else if ((dev->flags & IFF_ALLMULTI) || dev->mc_list)
+			rx_mode = CAMCMR_AMP | CAMCMR_ABP | CAMCMR_ECMP;
+		else
+				rx_mode = CAMCMR_ECMP | CAMCMR_ABP;
+	__raw_writel(rx_mode, ether->reg + REG_CAMCMR);
+}
+
+static int w90p910_ether_ioctl(struct net_device *dev,
+						struct ifreq *ifr, int cmd)
+{
+	struct w90p910_ether *ether = netdev_priv(dev);
+	struct mii_ioctl_data *data = if_mii(ifr);
+
+	return generic_mii_ioctl(&ether->mii, data, cmd, NULL);
+}
+
+static void w90p910_get_drvinfo(struct net_device *dev,
+					struct ethtool_drvinfo *info)
+{
+	strcpy(info->driver, DRV_MODULE_NAME);
+	strcpy(info->version, DRV_MODULE_VERSION);
+}
+
+static int w90p910_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+	struct w90p910_ether *ether = netdev_priv(dev);
+	return mii_ethtool_gset(&ether->mii, cmd);
+}
+
+static int w90p910_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+	struct w90p910_ether *ether = netdev_priv(dev);
+	return mii_ethtool_sset(&ether->mii, cmd);
+}
+
+static int w90p910_nway_reset(struct net_device *dev)
+{
+	struct w90p910_ether *ether = netdev_priv(dev);
+	return mii_nway_restart(&ether->mii);
+}
+
+static u32 w90p910_get_link(struct net_device *dev)
+{
+	struct w90p910_ether *ether = netdev_priv(dev);
+	return mii_link_ok(&ether->mii);
+}
+
+static const struct ethtool_ops w90p910_ether_ethtool_ops = {
+	.get_settings	= w90p910_get_settings,
+	.set_settings	= w90p910_set_settings,
+	.get_drvinfo	= w90p910_get_drvinfo,
+	.nway_reset	= w90p910_nway_reset,
+	.get_link	= w90p910_get_link,
+};
+
+static const struct net_device_ops w90p910_ether_netdev_ops = {
+	.ndo_open		= w90p910_ether_open,
+	.ndo_stop		= w90p910_ether_close,
+	.ndo_start_xmit		= w90p910_ether_start_xmit,
+	.ndo_get_stats		= w90p910_ether_stats,
+	.ndo_set_multicast_list	= w90p910_ether_set_multicast_list,
+	.ndo_set_mac_address	= set_mac_address,
+	.ndo_do_ioctl		= w90p910_ether_ioctl,
+	.ndo_validate_addr	= eth_validate_addr,
+	.ndo_change_mtu		= eth_change_mtu,
+};
+
+static void __init get_mac_address(struct net_device *dev)
+{
+	struct w90p910_ether *ether = netdev_priv(dev);
+	struct platform_device *pdev;
+	char addr[6];
+
+	pdev = ether->pdev;
+
+	addr[0] = 0x00;
+	addr[1] = 0x02;
+	addr[2] = 0xac;
+	addr[3] = 0x55;
+	addr[4] = 0x88;
+	addr[5] = 0xa8;
+
+	if (is_valid_ether_addr(addr))
+		memcpy(dev->dev_addr, &addr, 0x06);
+	else
+		dev_err(&pdev->dev, "invalid mac address\n");
+}
+
+static int w90p910_ether_setup(struct net_device *dev)
+{
+	struct w90p910_ether *ether = netdev_priv(dev);
+
+	ether_setup(dev);
+	dev->netdev_ops = &w90p910_ether_netdev_ops;
+	dev->ethtool_ops = &w90p910_ether_ethtool_ops;
+
+	dev->tx_queue_len = 16;
+	dev->dma = 0x0;
+	dev->watchdog_timeo = TX_TIMEOUT;
+
+	get_mac_address(dev);
+
+	spin_lock_init(&ether->lock);
+
+	ether->cur_tx = 0x0;
+	ether->cur_rx = 0x0;
+	ether->finish_tx = 0x0;
+	ether->linkflag = 0x0;
+	ether->mii.phy_id = 0x01;
+	ether->mii.phy_id_mask = 0x1f;
+	ether->mii.reg_num_mask = 0x1f;
+	ether->mii.dev = dev;
+	ether->mii.mdio_read = w90p910_mdio_read;
+	ether->mii.mdio_write = w90p910_mdio_write;
+
+	setup_timer(&ether->check_timer, w90p910_check_link,
+						(unsigned long)dev);
+
+	return 0;
+}
+
+static int __devinit w90p910_ether_probe(struct platform_device *pdev)
+{
+	struct w90p910_ether *ether;
+	struct net_device *dev;
+	struct resource *res;
+	int error;
+
+	dev = alloc_etherdev(sizeof(struct w90p910_ether));
+	if (!dev)
+		return -ENOMEM;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (res == NULL) {
+		dev_err(&pdev->dev, "failed to get I/O memory\n");
+		error = -ENXIO;
+		goto failed_free;
+	}
+
+	res = request_mem_region(res->start, resource_size(res), pdev->name);
+	if (res == NULL) {
+		dev_err(&pdev->dev, "failed to request I/O memory\n");
+		error = -EBUSY;
+		goto failed_free;
+	}
+
+	ether = netdev_priv(dev);
+
+	ether->reg = ioremap(res->start, resource_size(res));
+	if (ether->reg == NULL) {
+		dev_err(&pdev->dev, "failed to remap I/O memory\n");
+		error = -ENXIO;
+		goto failed_free_mem;
+	}
+
+	ether->txirq = platform_get_irq(pdev, 0);
+	if (ether->txirq < 0) {
+		dev_err(&pdev->dev, "failed to get ether tx irq\n");
+		error = -ENXIO;
+		goto failed_free_io;
+	}
+
+	ether->rxirq = platform_get_irq(pdev, 1);
+	if (ether->rxirq < 0) {
+		dev_err(&pdev->dev, "failed to get ether rx irq\n");
+		error = -ENXIO;
+		goto failed_free_txirq;
+	}
+
+	platform_set_drvdata(pdev, dev);
+
+	ether->clk = clk_get(&pdev->dev, NULL);
+	if (IS_ERR(ether->clk)) {
+		dev_err(&pdev->dev, "failed to get ether clock\n");
+		error = PTR_ERR(ether->clk);
+		goto failed_free_rxirq;
+	}
+
+	ether->rmiiclk = clk_get(&pdev->dev, "RMII");
+	if (IS_ERR(ether->rmiiclk)) {
+		dev_err(&pdev->dev, "failed to get ether clock\n");
+		error = PTR_ERR(ether->rmiiclk);
+		goto failed_put_clk;
+	}
+
+	ether->pdev = pdev;
+
+	w90p910_ether_setup(dev);
+
+	error = register_netdev(dev);
+	if (error != 0) {
+		dev_err(&pdev->dev, "Regiter EMC w90p910 FAILED\n");
+		error = -ENODEV;
+		goto failed_put_rmiiclk;
+	}
+
+	return 0;
+failed_put_rmiiclk:
+	clk_put(ether->rmiiclk);
+failed_put_clk:
+	clk_put(ether->clk);
+failed_free_rxirq:
+	free_irq(ether->rxirq, pdev);
+	platform_set_drvdata(pdev, NULL);
+failed_free_txirq:
+	free_irq(ether->txirq, pdev);
+failed_free_io:
+	iounmap(ether->reg);
+failed_free_mem:
+	release_mem_region(res->start, resource_size(res));
+failed_free:
+	free_netdev(dev);
+	return error;
+}
+
+static int __devexit w90p910_ether_remove(struct platform_device *pdev)
+{
+	struct net_device *dev = platform_get_drvdata(pdev);
+	struct w90p910_ether *ether = netdev_priv(dev);
+
+	unregister_netdev(dev);
+	clk_put(ether->rmiiclk);
+	clk_put(ether->clk);
+	del_timer_sync(&ether->check_timer);
+	platform_set_drvdata(pdev, NULL);
+	free_netdev(dev);
+	return 0;
+}
+
+static struct platform_driver w90p910_ether_driver = {
+	.probe		= w90p910_ether_probe,
+	.remove		= __devexit_p(w90p910_ether_remove),
+	.driver		= {
+		.name	= "w90p910-emc",
+		.owner	= THIS_MODULE,
+	},
+};
+
+static int __init w90p910_ether_init(void)
+{
+	return platform_driver_register(&w90p910_ether_driver);
+}
+
+static void __exit w90p910_ether_exit(void)
+{
+	platform_driver_unregister(&w90p910_ether_driver);
+}
+
+module_init(w90p910_ether_init);
+module_exit(w90p910_ether_exit);
+
+MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>");
+MODULE_DESCRIPTION("w90p910 MAC driver!");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:w90p910-emc");
+
diff --git a/drivers/net/atl1c/atl1c.h b/drivers/net/atl1c/atl1c.h
index e1658ef..2a1120a 100644
--- a/drivers/net/atl1c/atl1c.h
+++ b/drivers/net/atl1c/atl1c.h
@@ -188,14 +188,14 @@
 #define RRS_HDS_TYPE_DATA	2
 
 #define RRS_IS_NO_HDS_TYPE(flag) \
-	(((flag) >> (RRS_HDS_TYPE_SHIFT)) & RRS_HDS_TYPE_MASK == 0)
+	((((flag) >> (RRS_HDS_TYPE_SHIFT)) & RRS_HDS_TYPE_MASK) == 0)
 
 #define RRS_IS_HDS_HEAD(flag) \
-	(((flag) >> (RRS_HDS_TYPE_SHIFT)) & RRS_HDS_TYPE_MASK == \
+	((((flag) >> (RRS_HDS_TYPE_SHIFT)) & RRS_HDS_TYPE_MASK) == \
 			RRS_HDS_TYPE_HEAD)
 
 #define RRS_IS_HDS_DATA(flag) \
-	(((flag) >> (RRS_HDS_TYPE_SHIFT)) & RRS_HDS_TYPE_MASK == \
+	((((flag) >> (RRS_HDS_TYPE_SHIFT)) & RRS_HDS_TYPE_MASK) == \
 			RRS_HDS_TYPE_DATA)
 
 /* rrs word 3 bit 0:31 */
@@ -245,7 +245,7 @@
 #define RRS_PACKET_TYPE_802_3  	1
 #define RRS_PACKET_TYPE_ETH	0
 #define RRS_PACKET_IS_ETH(word) \
-	(((word) >> RRS_PACKET_TYPE_SHIFT) & RRS_PACKET_TYPE_MASK == \
+	((((word) >> RRS_PACKET_TYPE_SHIFT) & RRS_PACKET_TYPE_MASK) == \
 			RRS_PACKET_TYPE_ETH)
 #define RRS_RXD_IS_VALID(word) \
 	((((word) >> RRS_RXD_UPDATED_SHIFT) & RRS_RXD_UPDATED_MASK) == 1)
diff --git a/drivers/net/atl1c/atl1c_main.c b/drivers/net/atl1c/atl1c_main.c
index cd547a2..a383122 100644
--- a/drivers/net/atl1c/atl1c_main.c
+++ b/drivers/net/atl1c/atl1c_main.c
@@ -1689,7 +1689,7 @@
 		if (likely(RRS_RXD_IS_VALID(rrs->word3))) {
 			rfd_num = (rrs->word0 >> RRS_RX_RFD_CNT_SHIFT) &
 				RRS_RX_RFD_CNT_MASK;
-			if (unlikely(rfd_num) != 1)
+			if (unlikely(rfd_num != 1))
 				/* TODO support mul rfd*/
 				if (netif_msg_rx_err(adapter))
 					dev_warn(&pdev->dev,
diff --git a/drivers/net/bnx2x_link.c b/drivers/net/bnx2x_link.c
index ed648ac..2ee581a 100644
--- a/drivers/net/bnx2x_link.c
+++ b/drivers/net/bnx2x_link.c
@@ -4212,13 +4212,14 @@
 u8 bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded,
 			      u8 *version, u16 len)
 {
-	struct bnx2x *bp = params->bp;
+	struct bnx2x *bp;
 	u32 ext_phy_type = 0;
 	u32 spirom_ver = 0;
 	u8 status = 0 ;
 
 	if (version == NULL || params == NULL)
 		return -EINVAL;
+	bp = params->bp;
 
 	spirom_ver = REG_RD(bp, params->shmem_base +
 		   offsetof(struct shmem_region,
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index d927f71..aa1be1f 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1459,8 +1459,16 @@
 	 * ether type (eg ARPHRD_ETHER and ARPHRD_INFINIBAND) share the same bond
 	 */
 	if (bond->slave_cnt == 0) {
-		if (slave_dev->type != ARPHRD_ETHER)
-			bond_setup_by_slave(bond_dev, slave_dev);
+		if (bond_dev->type != slave_dev->type) {
+			dev_close(bond_dev);
+			pr_debug("%s: change device type from %d to %d\n",
+				bond_dev->name, bond_dev->type, slave_dev->type);
+			if (slave_dev->type != ARPHRD_ETHER)
+				bond_setup_by_slave(bond_dev, slave_dev);
+			else
+				ether_setup(bond_dev);
+			dev_open(bond_dev);
+		}
 	} else if (bond_dev->type != slave_dev->type) {
 		pr_err(DRV_NAME ": %s ether type (%d) is different "
 			"from other slaves (%d), can not enslave it.\n",
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index 574dadd..9e4283a 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -346,7 +346,7 @@
 	skb = dev_alloc_skb(sizeof(struct can_frame));
 	if (skb == NULL) {
 		err = -ENOMEM;
-		goto out;
+		goto restart;
 	}
 	skb->dev = dev;
 	skb->protocol = htons(ETH_P_CAN);
@@ -361,13 +361,13 @@
 	stats->rx_packets++;
 	stats->rx_bytes += cf->can_dlc;
 
+restart:
 	dev_dbg(dev->dev.parent, "restarted\n");
 	priv->can_stats.restarts++;
 
 	/* Now restart the device */
 	err = priv->do_set_mode(dev, CAN_MODE_START);
 
-out:
 	netif_carrier_on(dev);
 	if (err)
 		dev_err(dev->dev.parent, "Error %d during restart", err);
@@ -473,6 +473,10 @@
 		return -EINVAL;
 	}
 
+	/* Switch carrier on if device was stopped while in bus-off state */
+	if (!netif_carrier_ok(dev))
+		netif_carrier_on(dev);
+
 	setup_timer(&priv->restart_timer, can_restart, (unsigned long)dev);
 
 	return 0;
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
index 571f133..08ebee7 100644
--- a/drivers/net/can/sja1000/sja1000.c
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -63,7 +63,6 @@
 #include <linux/can.h>
 #include <linux/can/dev.h>
 #include <linux/can/error.h>
-#include <linux/can/dev.h>
 
 #include "sja1000.h"
 
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index efa680f..41b648a 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -1897,6 +1897,9 @@
 
 			if (ioread8(&nic->csr->scb.status) & rus_no_res)
 				nic->ru_running = RU_SUSPENDED;
+		pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr,
+					       sizeof(struct rfd),
+					       PCI_DMA_BIDIRECTIONAL);
 		return -ENODATA;
 	}
 
diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
index 1551600..981ab53 100644
--- a/drivers/net/hamradio/6pack.c
+++ b/drivers/net/hamradio/6pack.c
@@ -3,7 +3,7 @@
  *		devices like TTY. It interfaces between a raw TTY and the
  *		kernel's AX.25 protocol layers.
  *
- * Authors:	Andreas Könsgen <ajk@iehk.rwth-aachen.de>
+ * Authors:	Andreas Könsgen <ajk@comnets.uni-bremen.de>
  *              Ralf Baechle DL5RB <ralf@linux-mips.org>
  *
  * Quite a lot of stuff "stolen" by Joerg Reuter from slip.c, written by
diff --git a/drivers/net/ibm_newemac/rgmii.c b/drivers/net/ibm_newemac/rgmii.c
index 1d5379d..8d76cb8 100644
--- a/drivers/net/ibm_newemac/rgmii.c
+++ b/drivers/net/ibm_newemac/rgmii.c
@@ -188,11 +188,12 @@
 void rgmii_detach(struct of_device *ofdev, int input)
 {
 	struct rgmii_instance *dev = dev_get_drvdata(&ofdev->dev);
-	struct rgmii_regs __iomem *p = dev->base;
-
-	mutex_lock(&dev->lock);
+	struct rgmii_regs __iomem *p;
 
 	BUG_ON(!dev || dev->users == 0);
+	p = dev->base;
+
+	mutex_lock(&dev->lock);
 
 	RGMII_DBG(dev, "detach(%d)" NL, input);
 
diff --git a/drivers/net/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ixgbe/ixgbe_dcb_nl.c
index 7c5978a..da2c851 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_nl.c
@@ -106,8 +106,6 @@
 {
 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
 
-	DPRINTK(DRV, INFO, "Get DCB Admin Mode.\n");
-
 	return !!(adapter->flags & IXGBE_FLAG_DCB_ENABLED);
 }
 
@@ -116,8 +114,6 @@
 	u8 err = 0;
 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
 
-	DPRINTK(DRV, INFO, "Set DCB Admin Mode.\n");
-
 	if (state > 0) {
 		/* Turn on DCB */
 		if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
@@ -175,6 +171,8 @@
 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
 	int i, j;
 
+	memset(perm_addr, 0xff, MAX_ADDR_LEN);
+
 	for (i = 0; i < netdev->addr_len; i++)
 		perm_addr[i] = adapter->hw.mac.perm_addr[i];
 
diff --git a/drivers/net/jazzsonic.c b/drivers/net/jazzsonic.c
index d12106b..2f28609 100644
--- a/drivers/net/jazzsonic.c
+++ b/drivers/net/jazzsonic.c
@@ -229,6 +229,7 @@
 	lp = netdev_priv(dev);
 	lp->device = &pdev->dev;
 	SET_NETDEV_DEV(dev, &pdev->dev);
+	platform_set_drvdata(pdev, dev);
 
 	netdev_boot_setup_check(dev);
 
diff --git a/drivers/net/ks8851.c b/drivers/net/ks8851.c
new file mode 100644
index 0000000..9a1dea6
--- /dev/null
+++ b/drivers/net/ks8851.c
@@ -0,0 +1,1322 @@
+/* drivers/net/ks8651.c
+ *
+ * Copyright 2009 Simtec Electronics
+ *	http://www.simtec.co.uk/
+ *	Ben Dooks <ben@simtec.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define DEBUG
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/cache.h>
+#include <linux/crc32.h>
+#include <linux/mii.h>
+
+#include <linux/spi/spi.h>
+
+#include "ks8851.h"
+
+/**
+ * struct ks8851_rxctrl - KS8851 driver rx control
+ * @mchash: Multicast hash-table data.
+ * @rxcr1: KS_RXCR1 register setting
+ * @rxcr2: KS_RXCR2 register setting
+ *
+ * Representation of the settings needs to control the receive filtering
+ * such as the multicast hash-filter and the receive register settings. This
+ * is used to make the job of working out if the receive settings change and
+ * then issuing the new settings to the worker that will send the necessary
+ * commands.
+ */
+struct ks8851_rxctrl {
+	u16	mchash[4];
+	u16	rxcr1;
+	u16	rxcr2;
+};
+
+/**
+ * union ks8851_tx_hdr - tx header data
+ * @txb: The header as bytes
+ * @txw: The header as 16bit, little-endian words
+ *
+ * A dual representation of the tx header data to allow
+ * access to individual bytes, and to allow 16bit accesses
+ * with 16bit alignment.
+ */
+union ks8851_tx_hdr {
+	u8	txb[6];
+	__le16	txw[3];
+};
+
+/**
+ * struct ks8851_net - KS8851 driver private data
+ * @netdev: The network device we're bound to
+ * @spidev: The spi device we're bound to.
+ * @lock: Lock to ensure that the device is not accessed when busy.
+ * @statelock: Lock on this structure for tx list.
+ * @mii: The MII state information for the mii calls.
+ * @rxctrl: RX settings for @rxctrl_work.
+ * @tx_work: Work queue for tx packets
+ * @irq_work: Work queue for servicing interrupts
+ * @rxctrl_work: Work queue for updating RX mode and multicast lists
+ * @txq: Queue of packets for transmission.
+ * @spi_msg1: pre-setup SPI transfer with one message, @spi_xfer1.
+ * @spi_msg2: pre-setup SPI transfer with two messages, @spi_xfer2.
+ * @txh: Space for generating packet TX header in DMA-able data
+ * @rxd: Space for receiving SPI data, in DMA-able space.
+ * @txd: Space for transmitting SPI data, in DMA-able space.
+ * @msg_enable: The message flags controlling driver output (see ethtool).
+ * @fid: Incrementing frame id tag.
+ * @rc_ier: Cached copy of KS_IER.
+ * @rc_rxqcr: Cached copy of KS_RXQCR.
+ *
+ * The @lock ensures that the chip is protected when certain operations are
+ * in progress. When the read or write packet transfer is in progress, most
+ * of the chip registers are not ccessible until the transfer is finished and
+ * the DMA has been de-asserted.
+ *
+ * The @statelock is used to protect information in the structure which may
+ * need to be accessed via several sources, such as the network driver layer
+ * or one of the work queues.
+ *
+ * We align the buffers we may use for rx/tx to ensure that if the SPI driver
+ * wants to DMA map them, it will not have any problems with data the driver
+ * modifies.
+ */
+struct ks8851_net {
+	struct net_device	*netdev;
+	struct spi_device	*spidev;
+	struct mutex		lock;
+	spinlock_t		statelock;
+
+	union ks8851_tx_hdr	txh ____cacheline_aligned;
+	u8			rxd[8];
+	u8			txd[8];
+
+	u32			msg_enable ____cacheline_aligned;
+	u16			tx_space;
+	u8			fid;
+
+	u16			rc_ier;
+	u16			rc_rxqcr;
+
+	struct mii_if_info	mii;
+	struct ks8851_rxctrl	rxctrl;
+
+	struct work_struct	tx_work;
+	struct work_struct	irq_work;
+	struct work_struct	rxctrl_work;
+
+	struct sk_buff_head	txq;
+
+	struct spi_message	spi_msg1;
+	struct spi_message	spi_msg2;
+	struct spi_transfer	spi_xfer1;
+	struct spi_transfer	spi_xfer2[2];
+};
+
+static int msg_enable;
+
+#define ks_info(_ks, _msg...) dev_info(&(_ks)->spidev->dev, _msg)
+#define ks_warn(_ks, _msg...) dev_warn(&(_ks)->spidev->dev, _msg)
+#define ks_dbg(_ks, _msg...) dev_dbg(&(_ks)->spidev->dev, _msg)
+#define ks_err(_ks, _msg...) dev_err(&(_ks)->spidev->dev, _msg)
+
+/* shift for byte-enable data */
+#define BYTE_EN(_x)	((_x) << 2)
+
+/* turn register number and byte-enable mask into data for start of packet */
+#define MK_OP(_byteen, _reg) (BYTE_EN(_byteen) | (_reg)  << (8+2) | (_reg) >> 6)
+
+/* SPI register read/write calls.
+ *
+ * All these calls issue SPI transactions to access the chip's registers. They
+ * all require that the necessary lock is held to prevent accesses when the
+ * chip is busy transfering packet data (RX/TX FIFO accesses).
+ */
+
+/**
+ * ks8851_wrreg16 - write 16bit register value to chip
+ * @ks: The chip state
+ * @reg: The register address
+ * @val: The value to write
+ *
+ * Issue a write to put the value @val into the register specified in @reg.
+ */
+static void ks8851_wrreg16(struct ks8851_net *ks, unsigned reg, unsigned val)
+{
+	struct spi_transfer *xfer = &ks->spi_xfer1;
+	struct spi_message *msg = &ks->spi_msg1;
+	__le16 txb[2];
+	int ret;
+
+	txb[0] = cpu_to_le16(MK_OP(reg & 2 ? 0xC : 0x03, reg) | KS_SPIOP_WR);
+	txb[1] = cpu_to_le16(val);
+
+	xfer->tx_buf = txb;
+	xfer->rx_buf = NULL;
+	xfer->len = 4;
+
+	ret = spi_sync(ks->spidev, msg);
+	if (ret < 0)
+		ks_err(ks, "spi_sync() failed\n");
+}
+
+/**
+ * ks8851_rx_1msg - select whether to use one or two messages for spi read
+ * @ks: The device structure
+ *
+ * Return whether to generate a single message with a tx and rx buffer
+ * supplied to spi_sync(), or alternatively send the tx and rx buffers
+ * as separate messages.
+ *
+ * Depending on the hardware in use, a single message may be more efficient
+ * on interrupts or work done by the driver.
+ *
+ * This currently always returns true until we add some per-device data passed
+ * from the platform code to specify which mode is better.
+ */
+static inline bool ks8851_rx_1msg(struct ks8851_net *ks)
+{
+	return true;
+}
+
+/**
+ * ks8851_rdreg - issue read register command and return the data
+ * @ks: The device state
+ * @op: The register address and byte enables in message format.
+ * @rxb: The RX buffer to return the result into
+ * @rxl: The length of data expected.
+ *
+ * This is the low level read call that issues the necessary spi message(s)
+ * to read data from the register specified in @op.
+ */
+static void ks8851_rdreg(struct ks8851_net *ks, unsigned op,
+			 u8 *rxb, unsigned rxl)
+{
+	struct spi_transfer *xfer;
+	struct spi_message *msg;
+	__le16 *txb = (__le16 *)ks->txd;
+	u8 *trx = ks->rxd;
+	int ret;
+
+	txb[0] = cpu_to_le16(op | KS_SPIOP_RD);
+
+	if (ks8851_rx_1msg(ks)) {
+		msg = &ks->spi_msg1;
+		xfer = &ks->spi_xfer1;
+
+		xfer->tx_buf = txb;
+		xfer->rx_buf = trx;
+		xfer->len = rxl + 2;
+	} else {
+		msg = &ks->spi_msg2;
+		xfer = ks->spi_xfer2;
+
+		xfer->tx_buf = txb;
+		xfer->rx_buf = NULL;
+		xfer->len = 2;
+
+		xfer++;
+		xfer->tx_buf = NULL;
+		xfer->rx_buf = trx;
+		xfer->len = rxl;
+	}
+
+	ret = spi_sync(ks->spidev, msg);
+	if (ret < 0)
+		ks_err(ks, "read: spi_sync() failed\n");
+	else if (ks8851_rx_1msg(ks))
+		memcpy(rxb, trx + 2, rxl);
+	else
+		memcpy(rxb, trx, rxl);
+}
+
+/**
+ * ks8851_rdreg8 - read 8 bit register from device
+ * @ks: The chip information
+ * @reg: The register address
+ *
+ * Read a 8bit register from the chip, returning the result
+*/
+static unsigned ks8851_rdreg8(struct ks8851_net *ks, unsigned reg)
+{
+	u8 rxb[1];
+
+	ks8851_rdreg(ks, MK_OP(1 << (reg & 3), reg), rxb, 1);
+	return rxb[0];
+}
+
+/**
+ * ks8851_rdreg16 - read 16 bit register from device
+ * @ks: The chip information
+ * @reg: The register address
+ *
+ * Read a 16bit register from the chip, returning the result
+*/
+static unsigned ks8851_rdreg16(struct ks8851_net *ks, unsigned reg)
+{
+	__le16 rx = 0;
+
+	ks8851_rdreg(ks, MK_OP(reg & 2 ? 0xC : 0x3, reg), (u8 *)&rx, 2);
+	return le16_to_cpu(rx);
+}
+
+/**
+ * ks8851_rdreg32 - read 32 bit register from device
+ * @ks: The chip information
+ * @reg: The register address
+ *
+ * Read a 32bit register from the chip.
+ *
+ * Note, this read requires the address be aligned to 4 bytes.
+*/
+static unsigned ks8851_rdreg32(struct ks8851_net *ks, unsigned reg)
+{
+	__le32 rx = 0;
+
+	WARN_ON(reg & 3);
+
+	ks8851_rdreg(ks, MK_OP(0xf, reg), (u8 *)&rx, 4);
+	return le32_to_cpu(rx);
+}
+
+/**
+ * ks8851_soft_reset - issue one of the soft reset to the device
+ * @ks: The device state.
+ * @op: The bit(s) to set in the GRR
+ *
+ * Issue the relevant soft-reset command to the device's GRR register
+ * specified by @op.
+ *
+ * Note, the delays are in there as a caution to ensure that the reset
+ * has time to take effect and then complete. Since the datasheet does
+ * not currently specify the exact sequence, we have chosen something
+ * that seems to work with our device.
+ */
+static void ks8851_soft_reset(struct ks8851_net *ks, unsigned op)
+{
+	ks8851_wrreg16(ks, KS_GRR, op);
+	mdelay(1);	/* wait a short time to effect reset */
+	ks8851_wrreg16(ks, KS_GRR, 0);
+	mdelay(1);	/* wait for condition to clear */
+}
+
+/**
+ * ks8851_write_mac_addr - write mac address to device registers
+ * @dev: The network device
+ *
+ * Update the KS8851 MAC address registers from the address in @dev.
+ *
+ * This call assumes that the chip is not running, so there is no need to
+ * shutdown the RXQ process whilst setting this.
+*/
+static int ks8851_write_mac_addr(struct net_device *dev)
+{
+	struct ks8851_net *ks = netdev_priv(dev);
+	u16 *mcp = (u16 *)dev->dev_addr;
+
+	mutex_lock(&ks->lock);
+
+	ks8851_wrreg16(ks, KS_MARL, mcp[0]);
+	ks8851_wrreg16(ks, KS_MARM, mcp[1]);
+	ks8851_wrreg16(ks, KS_MARH, mcp[2]);
+
+	mutex_unlock(&ks->lock);
+
+	return 0;
+}
+
+/**
+ * ks8851_init_mac - initialise the mac address
+ * @ks: The device structure
+ *
+ * Get or create the initial mac address for the device and then set that
+ * into the station address register. Currently we assume that the device
+ * does not have a valid mac address in it, and so we use random_ether_addr()
+ * to create a new one.
+ *
+ * In future, the driver should check to see if the device has an EEPROM
+ * attached and whether that has a valid ethernet address in it.
+ */
+static void ks8851_init_mac(struct ks8851_net *ks)
+{
+	struct net_device *dev = ks->netdev;
+
+	random_ether_addr(dev->dev_addr);
+	ks8851_write_mac_addr(dev);
+}
+
+/**
+ * ks8851_irq - device interrupt handler
+ * @irq: Interrupt number passed from the IRQ hnalder.
+ * @pw: The private word passed to register_irq(), our struct ks8851_net.
+ *
+ * Disable the interrupt from happening again until we've processed the
+ * current status by scheduling ks8851_irq_work().
+ */
+static irqreturn_t ks8851_irq(int irq, void *pw)
+{
+	struct ks8851_net *ks = pw;
+
+	disable_irq_nosync(irq);
+	schedule_work(&ks->irq_work);
+	return IRQ_HANDLED;
+}
+
+/**
+ * ks8851_rdfifo - read data from the receive fifo
+ * @ks: The device state.
+ * @buff: The buffer address
+ * @len: The length of the data to read
+ *
+ * Issue an RXQ FIFO read command and read the @len ammount of data from
+ * the FIFO into the buffer specified by @buff.
+ */
+static void ks8851_rdfifo(struct ks8851_net *ks, u8 *buff, unsigned len)
+{
+	struct spi_transfer *xfer = ks->spi_xfer2;
+	struct spi_message *msg = &ks->spi_msg2;
+	u8 txb[1];
+	int ret;
+
+	if (netif_msg_rx_status(ks))
+		ks_dbg(ks, "%s: %d@%p\n", __func__, len, buff);
+
+	/* set the operation we're issuing */
+	txb[0] = KS_SPIOP_RXFIFO;
+
+	xfer->tx_buf = txb;
+	xfer->rx_buf = NULL;
+	xfer->len = 1;
+
+	xfer++;
+	xfer->rx_buf = buff;
+	xfer->tx_buf = NULL;
+	xfer->len = len;
+
+	ret = spi_sync(ks->spidev, msg);
+	if (ret < 0)
+		ks_err(ks, "%s: spi_sync() failed\n", __func__);
+}
+
+/**
+ * ks8851_dbg_dumpkkt - dump initial packet contents to debug
+ * @ks: The device state
+ * @rxpkt: The data for the received packet
+ *
+ * Dump the initial data from the packet to dev_dbg().
+*/
+static void ks8851_dbg_dumpkkt(struct ks8851_net *ks, u8 *rxpkt)
+{
+	ks_dbg(ks, "pkt %02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x\n",
+	       rxpkt[4], rxpkt[5], rxpkt[6], rxpkt[7],
+	       rxpkt[8], rxpkt[9], rxpkt[10], rxpkt[11],
+	       rxpkt[12], rxpkt[13], rxpkt[14], rxpkt[15]);
+}
+
+/**
+ * ks8851_rx_pkts - receive packets from the host
+ * @ks: The device information.
+ *
+ * This is called from the IRQ work queue when the system detects that there
+ * are packets in the receive queue. Find out how many packets there are and
+ * read them from the FIFO.
+ */
+static void ks8851_rx_pkts(struct ks8851_net *ks)
+{
+	struct sk_buff *skb;
+	unsigned rxfc;
+	unsigned rxlen;
+	unsigned rxstat;
+	u32 rxh;
+	u8 *rxpkt;
+
+	rxfc = ks8851_rdreg8(ks, KS_RXFC);
+
+	if (netif_msg_rx_status(ks))
+		ks_dbg(ks, "%s: %d packets\n", __func__, rxfc);
+
+	/* Currently we're issuing a read per packet, but we could possibly
+	 * improve the code by issuing a single read, getting the receive
+	 * header, allocating the packet and then reading the packet data
+	 * out in one go.
+	 *
+	 * This form of operation would require us to hold the SPI bus'
+	 * chipselect low during the entie transaction to avoid any
+	 * reset to the data stream comming from the chip.
+	 */
+
+	for (; rxfc != 0; rxfc--) {
+		rxh = ks8851_rdreg32(ks, KS_RXFHSR);
+		rxstat = rxh & 0xffff;
+		rxlen = rxh >> 16;
+
+		if (netif_msg_rx_status(ks))
+			ks_dbg(ks, "rx: stat 0x%04x, len 0x%04x\n",
+				rxstat, rxlen);
+
+		/* the length of the packet includes the 32bit CRC */
+
+		/* set dma read address */
+		ks8851_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI | 0x00);
+
+		/* start the packet dma process, and set auto-dequeue rx */
+		ks8851_wrreg16(ks, KS_RXQCR,
+			       ks->rc_rxqcr | RXQCR_SDA | RXQCR_ADRFE);
+
+		if (rxlen > 0) {
+			skb = netdev_alloc_skb(ks->netdev, rxlen + 2 + 8);
+			if (!skb) {
+				/* todo - dump frame and move on */
+			}
+
+			/* two bytes to ensure ip is aligned, and four bytes
+			 * for the status header and 4 bytes of garbage */
+			skb_reserve(skb, 2 + 4 + 4);
+
+			rxpkt = skb_put(skb, rxlen - 4) - 8;
+
+			/* align the packet length to 4 bytes, and add 4 bytes
+			 * as we're getting the rx status header as well */
+			ks8851_rdfifo(ks, rxpkt, ALIGN(rxlen, 4) + 8);
+
+			if (netif_msg_pktdata(ks))
+				ks8851_dbg_dumpkkt(ks, rxpkt);
+
+			skb->protocol = eth_type_trans(skb, ks->netdev);
+			netif_rx(skb);
+
+			ks->netdev->stats.rx_packets++;
+			ks->netdev->stats.rx_bytes += rxlen - 4;
+		}
+
+		ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr);
+	}
+}
+
+/**
+ * ks8851_irq_work - work queue handler for dealing with interrupt requests
+ * @work: The work structure that was scheduled by schedule_work()
+ *
+ * This is the handler invoked when the ks8851_irq() is called to find out
+ * what happened, as we cannot allow ourselves to sleep whilst waiting for
+ * anything other process has the chip's lock.
+ *
+ * Read the interrupt status, work out what needs to be done and then clear
+ * any of the interrupts that are not needed.
+ */
+static void ks8851_irq_work(struct work_struct *work)
+{
+	struct ks8851_net *ks = container_of(work, struct ks8851_net, irq_work);
+	unsigned status;
+	unsigned handled = 0;
+
+	mutex_lock(&ks->lock);
+
+	status = ks8851_rdreg16(ks, KS_ISR);
+
+	if (netif_msg_intr(ks))
+		dev_dbg(&ks->spidev->dev, "%s: status 0x%04x\n",
+			__func__, status);
+
+	if (status & IRQ_LCI) {
+		/* should do something about checking link status */
+		handled |= IRQ_LCI;
+	}
+
+	if (status & IRQ_LDI) {
+		u16 pmecr = ks8851_rdreg16(ks, KS_PMECR);
+		pmecr &= ~PMECR_WKEVT_MASK;
+		ks8851_wrreg16(ks, KS_PMECR, pmecr | PMECR_WKEVT_LINK);
+
+		handled |= IRQ_LDI;
+	}
+
+	if (status & IRQ_RXPSI)
+		handled |= IRQ_RXPSI;
+
+	if (status & IRQ_TXI) {
+		handled |= IRQ_TXI;
+
+		/* no lock here, tx queue should have been stopped */
+
+		/* update our idea of how much tx space is available to the
+		 * system */
+		ks->tx_space = ks8851_rdreg16(ks, KS_TXMIR);
+
+		if (netif_msg_intr(ks))
+			ks_dbg(ks, "%s: txspace %d\n", __func__, ks->tx_space);
+	}
+
+	if (status & IRQ_RXI)
+		handled |= IRQ_RXI;
+
+	if (status & IRQ_SPIBEI) {
+		dev_err(&ks->spidev->dev, "%s: spi bus error\n", __func__);
+		handled |= IRQ_SPIBEI;
+	}
+
+	ks8851_wrreg16(ks, KS_ISR, handled);
+
+	if (status & IRQ_RXI) {
+		/* the datasheet says to disable the rx interrupt during
+		 * packet read-out, however we're masking the interrupt
+		 * from the device so do not bother masking just the RX
+		 * from the device. */
+
+		ks8851_rx_pkts(ks);
+	}
+
+	/* if something stopped the rx process, probably due to wanting
+	 * to change the rx settings, then do something about restarting
+	 * it. */
+	if (status & IRQ_RXPSI) {
+		struct ks8851_rxctrl *rxc = &ks->rxctrl;
+
+		/* update the multicast hash table */
+		ks8851_wrreg16(ks, KS_MAHTR0, rxc->mchash[0]);
+		ks8851_wrreg16(ks, KS_MAHTR1, rxc->mchash[1]);
+		ks8851_wrreg16(ks, KS_MAHTR2, rxc->mchash[2]);
+		ks8851_wrreg16(ks, KS_MAHTR3, rxc->mchash[3]);
+
+		ks8851_wrreg16(ks, KS_RXCR2, rxc->rxcr2);
+		ks8851_wrreg16(ks, KS_RXCR1, rxc->rxcr1);
+	}
+
+	mutex_unlock(&ks->lock);
+
+	if (status & IRQ_TXI)
+		netif_wake_queue(ks->netdev);
+
+	enable_irq(ks->netdev->irq);
+}
+
+/**
+ * calc_txlen - calculate size of message to send packet
+ * @len: Lenght of data
+ *
+ * Returns the size of the TXFIFO message needed to send
+ * this packet.
+ */
+static inline unsigned calc_txlen(unsigned len)
+{
+	return ALIGN(len + 4, 4);
+}
+
+/**
+ * ks8851_wrpkt - write packet to TX FIFO
+ * @ks: The device state.
+ * @txp: The sk_buff to transmit.
+ * @irq: IRQ on completion of the packet.
+ *
+ * Send the @txp to the chip. This means creating the relevant packet header
+ * specifying the length of the packet and the other information the chip
+ * needs, such as IRQ on completion. Send the header and the packet data to
+ * the device.
+ */
+static void ks8851_wrpkt(struct ks8851_net *ks, struct sk_buff *txp, bool irq)
+{
+	struct spi_transfer *xfer = ks->spi_xfer2;
+	struct spi_message *msg = &ks->spi_msg2;
+	unsigned fid = 0;
+	int ret;
+
+	if (netif_msg_tx_queued(ks))
+		dev_dbg(&ks->spidev->dev, "%s: skb %p, %d@%p, irq %d\n",
+			__func__, txp, txp->len, txp->data, irq);
+
+	fid = ks->fid++;
+	fid &= TXFR_TXFID_MASK;
+
+	if (irq)
+		fid |= TXFR_TXIC;	/* irq on completion */
+
+	/* start header at txb[1] to align txw entries */
+	ks->txh.txb[1] = KS_SPIOP_TXFIFO;
+	ks->txh.txw[1] = cpu_to_le16(fid);
+	ks->txh.txw[2] = cpu_to_le16(txp->len);
+
+	xfer->tx_buf = &ks->txh.txb[1];
+	xfer->rx_buf = NULL;
+	xfer->len = 5;
+
+	xfer++;
+	xfer->tx_buf = txp->data;
+	xfer->rx_buf = NULL;
+	xfer->len = ALIGN(txp->len, 4);
+
+	ret = spi_sync(ks->spidev, msg);
+	if (ret < 0)
+		ks_err(ks, "%s: spi_sync() failed\n", __func__);
+}
+
+/**
+ * ks8851_done_tx - update and then free skbuff after transmitting
+ * @ks: The device state
+ * @txb: The buffer transmitted
+ */
+static void ks8851_done_tx(struct ks8851_net *ks, struct sk_buff *txb)
+{
+	struct net_device *dev = ks->netdev;
+
+	dev->stats.tx_bytes += txb->len;
+	dev->stats.tx_packets++;
+
+	dev_kfree_skb(txb);
+}
+
+/**
+ * ks8851_tx_work - process tx packet(s)
+ * @work: The work strucutre what was scheduled.
+ *
+ * This is called when a number of packets have been scheduled for
+ * transmission and need to be sent to the device.
+ */
+static void ks8851_tx_work(struct work_struct *work)
+{
+	struct ks8851_net *ks = container_of(work, struct ks8851_net, tx_work);
+	struct sk_buff *txb;
+	bool last = false;
+
+	mutex_lock(&ks->lock);
+
+	while (!last) {
+		txb = skb_dequeue(&ks->txq);
+		last = skb_queue_empty(&ks->txq);
+
+		ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_SDA);
+		ks8851_wrpkt(ks, txb, last);
+		ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr);
+		ks8851_wrreg16(ks, KS_TXQCR, TXQCR_METFE);
+
+		ks8851_done_tx(ks, txb);
+	}
+
+	mutex_unlock(&ks->lock);
+}
+
+/**
+ * ks8851_set_powermode - set power mode of the device
+ * @ks: The device state
+ * @pwrmode: The power mode value to write to KS_PMECR.
+ *
+ * Change the power mode of the chip.
+ */
+static void ks8851_set_powermode(struct ks8851_net *ks, unsigned pwrmode)
+{
+	unsigned pmecr;
+
+	if (netif_msg_hw(ks))
+		ks_dbg(ks, "setting power mode %d\n", pwrmode);
+
+	pmecr = ks8851_rdreg16(ks, KS_PMECR);
+	pmecr &= ~PMECR_PM_MASK;
+	pmecr |= pwrmode;
+
+	ks8851_wrreg16(ks, KS_PMECR, pmecr);
+}
+
+/**
+ * ks8851_net_open - open network device
+ * @dev: The network device being opened.
+ *
+ * Called when the network device is marked active, such as a user executing
+ * 'ifconfig up' on the device.
+ */
+static int ks8851_net_open(struct net_device *dev)
+{
+	struct ks8851_net *ks = netdev_priv(dev);
+
+	/* lock the card, even if we may not actually be doing anything
+	 * else at the moment */
+	mutex_lock(&ks->lock);
+
+	if (netif_msg_ifup(ks))
+		ks_dbg(ks, "opening %s\n", dev->name);
+
+	/* bring chip out of any power saving mode it was in */
+	ks8851_set_powermode(ks, PMECR_PM_NORMAL);
+
+	/* issue a soft reset to the RX/TX QMU to put it into a known
+	 * state. */
+	ks8851_soft_reset(ks, GRR_QMU);
+
+	/* setup transmission parameters */
+
+	ks8851_wrreg16(ks, KS_TXCR, (TXCR_TXE | /* enable transmit process */
+				     TXCR_TXPE | /* pad to min length */
+				     TXCR_TXCRC | /* add CRC */
+				     TXCR_TXFCE)); /* enable flow control */
+
+	/* auto-increment tx data, reset tx pointer */
+	ks8851_wrreg16(ks, KS_TXFDPR, TXFDPR_TXFPAI);
+
+	/* setup receiver control */
+
+	ks8851_wrreg16(ks, KS_RXCR1, (RXCR1_RXPAFMA | /*  from mac filter */
+				      RXCR1_RXFCE | /* enable flow control */
+				      RXCR1_RXBE | /* broadcast enable */
+				      RXCR1_RXUE | /* unicast enable */
+				      RXCR1_RXE)); /* enable rx block */
+
+	/* transfer entire frames out in one go */
+	ks8851_wrreg16(ks, KS_RXCR2, RXCR2_SRDBL_FRAME);
+
+	/* set receive counter timeouts */
+	ks8851_wrreg16(ks, KS_RXDTTR, 1000); /* 1ms after first frame to IRQ */
+	ks8851_wrreg16(ks, KS_RXDBCTR, 4096); /* >4Kbytes in buffer to IRQ */
+	ks8851_wrreg16(ks, KS_RXFCTR, 10);  /* 10 frames to IRQ */
+
+	ks->rc_rxqcr = (RXQCR_RXFCTE |  /* IRQ on frame count exceeded */
+			RXQCR_RXDBCTE | /* IRQ on byte count exceeded */
+			RXQCR_RXDTTE);  /* IRQ on time exceeded */
+
+	ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr);
+
+	/* clear then enable interrupts */
+
+#define STD_IRQ (IRQ_LCI |	/* Link Change */	\
+		 IRQ_TXI |	/* TX done */		\
+		 IRQ_RXI |	/* RX done */		\
+		 IRQ_SPIBEI |	/* SPI bus error */	\
+		 IRQ_TXPSI |	/* TX process stop */	\
+		 IRQ_RXPSI)	/* RX process stop */
+
+	ks->rc_ier = STD_IRQ;
+	ks8851_wrreg16(ks, KS_ISR, STD_IRQ);
+	ks8851_wrreg16(ks, KS_IER, STD_IRQ);
+
+	netif_start_queue(ks->netdev);
+
+	if (netif_msg_ifup(ks))
+		ks_dbg(ks, "network device %s up\n", dev->name);
+
+	mutex_unlock(&ks->lock);
+	return 0;
+}
+
+/**
+ * ks8851_net_stop - close network device
+ * @dev: The device being closed.
+ *
+ * Called to close down a network device which has been active. Cancell any
+ * work, shutdown the RX and TX process and then place the chip into a low
+ * power state whilst it is not being used.
+ */
+static int ks8851_net_stop(struct net_device *dev)
+{
+	struct ks8851_net *ks = netdev_priv(dev);
+
+	if (netif_msg_ifdown(ks))
+		ks_info(ks, "%s: shutting down\n", dev->name);
+
+	netif_stop_queue(dev);
+
+	mutex_lock(&ks->lock);
+
+	/* stop any outstanding work */
+	flush_work(&ks->irq_work);
+	flush_work(&ks->tx_work);
+	flush_work(&ks->rxctrl_work);
+
+	/* turn off the IRQs and ack any outstanding */
+	ks8851_wrreg16(ks, KS_IER, 0x0000);
+	ks8851_wrreg16(ks, KS_ISR, 0xffff);
+
+	/* shutdown RX process */
+	ks8851_wrreg16(ks, KS_RXCR1, 0x0000);
+
+	/* shutdown TX process */
+	ks8851_wrreg16(ks, KS_TXCR, 0x0000);
+
+	/* set powermode to soft power down to save power */
+	ks8851_set_powermode(ks, PMECR_PM_SOFTDOWN);
+
+	/* ensure any queued tx buffers are dumped */
+	while (!skb_queue_empty(&ks->txq)) {
+		struct sk_buff *txb = skb_dequeue(&ks->txq);
+
+		if (netif_msg_ifdown(ks))
+			ks_dbg(ks, "%s: freeing txb %p\n", __func__, txb);
+
+		dev_kfree_skb(txb);
+	}
+
+	mutex_unlock(&ks->lock);
+	return 0;
+}
+
+/**
+ * ks8851_start_xmit - transmit packet
+ * @skb: The buffer to transmit
+ * @dev: The device used to transmit the packet.
+ *
+ * Called by the network layer to transmit the @skb. Queue the packet for
+ * the device and schedule the necessary work to transmit the packet when
+ * it is free.
+ *
+ * We do this to firstly avoid sleeping with the network device locked,
+ * and secondly so we can round up more than one packet to transmit which
+ * means we can try and avoid generating too many transmit done interrupts.
+ */
+static int ks8851_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct ks8851_net *ks = netdev_priv(dev);
+	unsigned needed = calc_txlen(skb->len);
+	int ret = NETDEV_TX_OK;
+
+	if (netif_msg_tx_queued(ks))
+		ks_dbg(ks, "%s: skb %p, %d@%p\n", __func__,
+		       skb, skb->len, skb->data);
+
+	spin_lock(&ks->statelock);
+
+	if (needed > ks->tx_space) {
+		netif_stop_queue(dev);
+		ret = NETDEV_TX_BUSY;
+	} else {
+		ks->tx_space -= needed;
+		skb_queue_tail(&ks->txq, skb);
+	}
+
+	spin_unlock(&ks->statelock);
+	schedule_work(&ks->tx_work);
+
+	return ret;
+}
+
+/**
+ * ks8851_rxctrl_work - work handler to change rx mode
+ * @work: The work structure this belongs to.
+ *
+ * Lock the device and issue the necessary changes to the receive mode from
+ * the network device layer. This is done so that we can do this without
+ * having to sleep whilst holding the network device lock.
+ *
+ * Since the recommendation from Micrel is that the RXQ is shutdown whilst the
+ * receive parameters are programmed, we issue a write to disable the RXQ and
+ * then wait for the interrupt handler to be triggered once the RXQ shutdown is
+ * complete. The interrupt handler then writes the new values into the chip.
+ */
+static void ks8851_rxctrl_work(struct work_struct *work)
+{
+	struct ks8851_net *ks = container_of(work, struct ks8851_net, rxctrl_work);
+
+	mutex_lock(&ks->lock);
+
+	/* need to shutdown RXQ before modifying filter parameters */
+	ks8851_wrreg16(ks, KS_RXCR1, 0x00);
+
+	mutex_unlock(&ks->lock);
+}
+
+static void ks8851_set_rx_mode(struct net_device *dev)
+{
+	struct ks8851_net *ks = netdev_priv(dev);
+	struct ks8851_rxctrl rxctrl;
+
+	memset(&rxctrl, 0, sizeof(rxctrl));
+
+	if (dev->flags & IFF_PROMISC) {
+		/* interface to receive everything */
+
+		rxctrl.rxcr1 = RXCR1_RXAE | RXCR1_RXINVF;
+	} else if (dev->flags & IFF_ALLMULTI) {
+		/* accept all multicast packets */
+
+		rxctrl.rxcr1 = (RXCR1_RXME | RXCR1_RXAE |
+				RXCR1_RXPAFMA | RXCR1_RXMAFMA);
+	} else if (dev->flags & IFF_MULTICAST && dev->mc_count > 0) {
+		struct dev_mc_list *mcptr = dev->mc_list;
+		u32 crc;
+		int i;
+
+		/* accept some multicast */
+
+		for (i = dev->mc_count; i > 0; i--) {
+			crc = ether_crc(ETH_ALEN, mcptr->dmi_addr);
+			crc >>= (32 - 6);  /* get top six bits */
+
+			rxctrl.mchash[crc >> 4] |= (1 << (crc & 0xf));
+			mcptr = mcptr->next;
+		}
+
+		rxctrl.rxcr1 = RXCR1_RXME | RXCR1_RXAE | RXCR1_RXPAFMA;
+	} else {
+		/* just accept broadcast / unicast */
+		rxctrl.rxcr1 = RXCR1_RXPAFMA;
+	}
+
+	rxctrl.rxcr1 |= (RXCR1_RXUE | /* unicast enable */
+			 RXCR1_RXBE | /* broadcast enable */
+			 RXCR1_RXE | /* RX process enable */
+			 RXCR1_RXFCE); /* enable flow control */
+
+	rxctrl.rxcr2 |= RXCR2_SRDBL_FRAME;
+
+	/* schedule work to do the actual set of the data if needed */
+
+	spin_lock(&ks->statelock);
+
+	if (memcmp(&rxctrl, &ks->rxctrl, sizeof(rxctrl)) != 0) {
+		memcpy(&ks->rxctrl, &rxctrl, sizeof(ks->rxctrl));
+		schedule_work(&ks->rxctrl_work);
+	}
+
+	spin_unlock(&ks->statelock);
+}
+
+static int ks8851_set_mac_address(struct net_device *dev, void *addr)
+{
+	struct sockaddr *sa = addr;
+
+	if (netif_running(dev))
+		return -EBUSY;
+
+	if (!is_valid_ether_addr(sa->sa_data))
+		return -EADDRNOTAVAIL;
+
+	memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN);
+	return ks8851_write_mac_addr(dev);
+}
+
+static int ks8851_net_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
+{
+	struct ks8851_net *ks = netdev_priv(dev);
+
+	if (!netif_running(dev))
+		return -EINVAL;
+
+	return generic_mii_ioctl(&ks->mii, if_mii(req), cmd, NULL);
+}
+
+static const struct net_device_ops ks8851_netdev_ops = {
+	.ndo_open		= ks8851_net_open,
+	.ndo_stop		= ks8851_net_stop,
+	.ndo_do_ioctl		= ks8851_net_ioctl,
+	.ndo_start_xmit		= ks8851_start_xmit,
+	.ndo_set_mac_address	= ks8851_set_mac_address,
+	.ndo_set_rx_mode	= ks8851_set_rx_mode,
+	.ndo_change_mtu		= eth_change_mtu,
+	.ndo_validate_addr	= eth_validate_addr,
+};
+
+/* ethtool support */
+
+static void ks8851_get_drvinfo(struct net_device *dev,
+			       struct ethtool_drvinfo *di)
+{
+	strlcpy(di->driver, "KS8851", sizeof(di->driver));
+	strlcpy(di->version, "1.00", sizeof(di->version));
+	strlcpy(di->bus_info, dev_name(dev->dev.parent), sizeof(di->bus_info));
+}
+
+static u32 ks8851_get_msglevel(struct net_device *dev)
+{
+	struct ks8851_net *ks = netdev_priv(dev);
+	return ks->msg_enable;
+}
+
+static void ks8851_set_msglevel(struct net_device *dev, u32 to)
+{
+	struct ks8851_net *ks = netdev_priv(dev);
+	ks->msg_enable = to;
+}
+
+static int ks8851_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+	struct ks8851_net *ks = netdev_priv(dev);
+	return mii_ethtool_gset(&ks->mii, cmd);
+}
+
+static int ks8851_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+	struct ks8851_net *ks = netdev_priv(dev);
+	return mii_ethtool_sset(&ks->mii, cmd);
+}
+
+static u32 ks8851_get_link(struct net_device *dev)
+{
+	struct ks8851_net *ks = netdev_priv(dev);
+	return mii_link_ok(&ks->mii);
+}
+
+static int ks8851_nway_reset(struct net_device *dev)
+{
+	struct ks8851_net *ks = netdev_priv(dev);
+	return mii_nway_restart(&ks->mii);
+}
+
+static const struct ethtool_ops ks8851_ethtool_ops = {
+	.get_drvinfo	= ks8851_get_drvinfo,
+	.get_msglevel	= ks8851_get_msglevel,
+	.set_msglevel	= ks8851_set_msglevel,
+	.get_settings	= ks8851_get_settings,
+	.set_settings	= ks8851_set_settings,
+	.get_link	= ks8851_get_link,
+	.nway_reset	= ks8851_nway_reset,
+};
+
+/* MII interface controls */
+
+/**
+ * ks8851_phy_reg - convert MII register into a KS8851 register
+ * @reg: MII register number.
+ *
+ * Return the KS8851 register number for the corresponding MII PHY register
+ * if possible. Return zero if the MII register has no direct mapping to the
+ * KS8851 register set.
+ */
+static int ks8851_phy_reg(int reg)
+{
+	switch (reg) {
+	case MII_BMCR:
+		return KS_P1MBCR;
+	case MII_BMSR:
+		return KS_P1MBSR;
+	case MII_PHYSID1:
+		return KS_PHY1ILR;
+	case MII_PHYSID2:
+		return KS_PHY1IHR;
+	case MII_ADVERTISE:
+		return KS_P1ANAR;
+	case MII_LPA:
+		return KS_P1ANLPR;
+	}
+
+	return 0x0;
+}
+
+/**
+ * ks8851_phy_read - MII interface PHY register read.
+ * @dev: The network device the PHY is on.
+ * @phy_addr: Address of PHY (ignored as we only have one)
+ * @reg: The register to read.
+ *
+ * This call reads data from the PHY register specified in @reg. Since the
+ * device does not support all the MII registers, the non-existant values
+ * are always returned as zero.
+ *
+ * We return zero for unsupported registers as the MII code does not check
+ * the value returned for any error status, and simply returns it to the
+ * caller. The mii-tool that the driver was tested with takes any -ve error
+ * as real PHY capabilities, thus displaying incorrect data to the user.
+ */
+static int ks8851_phy_read(struct net_device *dev, int phy_addr, int reg)
+{
+	struct ks8851_net *ks = netdev_priv(dev);
+	int ksreg;
+	int result;
+
+	ksreg = ks8851_phy_reg(reg);
+	if (!ksreg)
+		return 0x0;	/* no error return allowed, so use zero */
+
+	mutex_lock(&ks->lock);
+	result = ks8851_rdreg16(ks, ksreg);
+	mutex_unlock(&ks->lock);
+
+	return result;
+}
+
+static void ks8851_phy_write(struct net_device *dev,
+			     int phy, int reg, int value)
+{
+	struct ks8851_net *ks = netdev_priv(dev);
+	int ksreg;
+
+	ksreg = ks8851_phy_reg(reg);
+	if (ksreg) {
+		mutex_lock(&ks->lock);
+		ks8851_wrreg16(ks, ksreg, value);
+		mutex_unlock(&ks->lock);
+	}
+}
+
+/**
+ * ks8851_read_selftest - read the selftest memory info.
+ * @ks: The device state
+ *
+ * Read and check the TX/RX memory selftest information.
+ */
+static int ks8851_read_selftest(struct ks8851_net *ks)
+{
+	unsigned both_done = MBIR_TXMBF | MBIR_RXMBF;
+	int ret = 0;
+	unsigned rd;
+
+	rd = ks8851_rdreg16(ks, KS_MBIR);
+
+	if ((rd & both_done) != both_done) {
+		ks_warn(ks, "Memory selftest not finished\n");
+		return 0;
+	}
+
+	if (rd & MBIR_TXMBFA) {
+		ks_err(ks, "TX memory selftest fail\n");
+		ret |= 1;
+	}
+
+	if (rd & MBIR_RXMBFA) {
+		ks_err(ks, "RX memory selftest fail\n");
+		ret |= 2;
+	}
+
+	return 0;
+}
+
+/* driver bus management functions */
+
+static int __devinit ks8851_probe(struct spi_device *spi)
+{
+	struct net_device *ndev;
+	struct ks8851_net *ks;
+	int ret;
+
+	ndev = alloc_etherdev(sizeof(struct ks8851_net));
+	if (!ndev) {
+		dev_err(&spi->dev, "failed to alloc ethernet device\n");
+		return -ENOMEM;
+	}
+
+	spi->bits_per_word = 8;
+
+	ks = netdev_priv(ndev);
+
+	ks->netdev = ndev;
+	ks->spidev = spi;
+	ks->tx_space = 6144;
+
+	mutex_init(&ks->lock);
+	spin_lock_init(&ks->statelock);
+
+	INIT_WORK(&ks->tx_work, ks8851_tx_work);
+	INIT_WORK(&ks->irq_work, ks8851_irq_work);
+	INIT_WORK(&ks->rxctrl_work, ks8851_rxctrl_work);
+
+	/* initialise pre-made spi transfer messages */
+
+	spi_message_init(&ks->spi_msg1);
+	spi_message_add_tail(&ks->spi_xfer1, &ks->spi_msg1);
+
+	spi_message_init(&ks->spi_msg2);
+	spi_message_add_tail(&ks->spi_xfer2[0], &ks->spi_msg2);
+	spi_message_add_tail(&ks->spi_xfer2[1], &ks->spi_msg2);
+
+	/* setup mii state */
+	ks->mii.dev		= ndev;
+	ks->mii.phy_id		= 1,
+	ks->mii.phy_id_mask	= 1;
+	ks->mii.reg_num_mask	= 0xf;
+	ks->mii.mdio_read	= ks8851_phy_read;
+	ks->mii.mdio_write	= ks8851_phy_write;
+
+	dev_info(&spi->dev, "message enable is %d\n", msg_enable);
+
+	/* set the default message enable */
+	ks->msg_enable = netif_msg_init(msg_enable, (NETIF_MSG_DRV |
+						     NETIF_MSG_PROBE |
+						     NETIF_MSG_LINK));
+
+	skb_queue_head_init(&ks->txq);
+
+	SET_ETHTOOL_OPS(ndev, &ks8851_ethtool_ops);
+	SET_NETDEV_DEV(ndev, &spi->dev);
+
+	dev_set_drvdata(&spi->dev, ks);
+
+	ndev->if_port = IF_PORT_100BASET;
+	ndev->netdev_ops = &ks8851_netdev_ops;
+	ndev->irq = spi->irq;
+
+	/* simple check for a valid chip being connected to the bus */
+
+	if ((ks8851_rdreg16(ks, KS_CIDER) & ~CIDER_REV_MASK) != CIDER_ID) {
+		dev_err(&spi->dev, "failed to read device ID\n");
+		ret = -ENODEV;
+		goto err_id;
+	}
+
+	ks8851_read_selftest(ks);
+	ks8851_init_mac(ks);
+
+	ret = request_irq(spi->irq, ks8851_irq, IRQF_TRIGGER_LOW,
+			  ndev->name, ks);
+	if (ret < 0) {
+		dev_err(&spi->dev, "failed to get irq\n");
+		goto err_irq;
+	}
+
+	ret = register_netdev(ndev);
+	if (ret) {
+		dev_err(&spi->dev, "failed to register network device\n");
+		goto err_netdev;
+	}
+
+	dev_info(&spi->dev, "revision %d, MAC %pM, IRQ %d\n",
+		 CIDER_REV_GET(ks8851_rdreg16(ks, KS_CIDER)),
+		 ndev->dev_addr, ndev->irq);
+
+	return 0;
+
+
+err_netdev:
+	free_irq(ndev->irq, ndev);
+
+err_id:
+err_irq:
+	free_netdev(ndev);
+	return ret;
+}
+
+static int __devexit ks8851_remove(struct spi_device *spi)
+{
+	struct ks8851_net *priv = dev_get_drvdata(&spi->dev);
+
+	if (netif_msg_drv(priv))
+		dev_info(&spi->dev, "remove");
+
+	unregister_netdev(priv->netdev);
+	free_irq(spi->irq, priv);
+	free_netdev(priv->netdev);
+
+	return 0;
+}
+
+static struct spi_driver ks8851_driver = {
+	.driver = {
+		.name = "ks8851",
+		.owner = THIS_MODULE,
+	},
+	.probe = ks8851_probe,
+	.remove = __devexit_p(ks8851_remove),
+};
+
+static int __init ks8851_init(void)
+{
+	return spi_register_driver(&ks8851_driver);
+}
+
+static void __exit ks8851_exit(void)
+{
+	spi_unregister_driver(&ks8851_driver);
+}
+
+module_init(ks8851_init);
+module_exit(ks8851_exit);
+
+MODULE_DESCRIPTION("KS8851 Network driver");
+MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
+MODULE_LICENSE("GPL");
+
+module_param_named(message, msg_enable, int, 0);
+MODULE_PARM_DESC(message, "Message verbosity level (0=none, 31=all)");
diff --git a/drivers/net/ks8851.h b/drivers/net/ks8851.h
new file mode 100644
index 0000000..85abe14
--- /dev/null
+++ b/drivers/net/ks8851.h
@@ -0,0 +1,296 @@
+/* drivers/net/ks8851.h
+ *
+ * Copyright 2009 Simtec Electronics
+ *      Ben Dooks <ben@simtec.co.uk>
+ *
+ * KS8851 register definitions
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#define KS_CCR					0x08
+#define CCR_EEPROM				(1 << 9)
+#define CCR_SPI					(1 << 8)
+#define CCR_32PIN				(1 << 0)
+
+/* MAC address registers */
+#define KS_MARL					0x10
+#define KS_MARM					0x12
+#define KS_MARH					0x14
+
+#define KS_OBCR					0x20
+#define OBCR_ODS_16mA				(1 << 6)
+
+#define KS_EEPCR				0x22
+#define EEPCR_EESA				(1 << 4)
+#define EEPCR_EESB				(1 << 3)
+#define EEPCR_EEDO				(1 << 2)
+#define EEPCR_EESCK				(1 << 1)
+#define EEPCR_EECS				(1 << 0)
+
+#define KS_MBIR					0x24
+#define MBIR_TXMBF				(1 << 12)
+#define MBIR_TXMBFA				(1 << 11)
+#define MBIR_RXMBF				(1 << 4)
+#define MBIR_RXMBFA				(1 << 3)
+
+#define KS_GRR					0x26
+#define GRR_QMU					(1 << 1)
+#define GRR_GSR					(1 << 0)
+
+#define KS_WFCR					0x2A
+#define WFCR_MPRXE				(1 << 7)
+#define WFCR_WF3E				(1 << 3)
+#define WFCR_WF2E				(1 << 2)
+#define WFCR_WF1E				(1 << 1)
+#define WFCR_WF0E				(1 << 0)
+
+#define KS_WF0CRC0				0x30
+#define KS_WF0CRC1				0x32
+#define KS_WF0BM0				0x34
+#define KS_WF0BM1				0x36
+#define KS_WF0BM2				0x38
+#define KS_WF0BM3				0x3A
+
+#define KS_WF1CRC0				0x40
+#define KS_WF1CRC1				0x42
+#define KS_WF1BM0				0x44
+#define KS_WF1BM1				0x46
+#define KS_WF1BM2				0x48
+#define KS_WF1BM3				0x4A
+
+#define KS_WF2CRC0				0x50
+#define KS_WF2CRC1				0x52
+#define KS_WF2BM0				0x54
+#define KS_WF2BM1				0x56
+#define KS_WF2BM2				0x58
+#define KS_WF2BM3				0x5A
+
+#define KS_WF3CRC0				0x60
+#define KS_WF3CRC1				0x62
+#define KS_WF3BM0				0x64
+#define KS_WF3BM1				0x66
+#define KS_WF3BM2				0x68
+#define KS_WF3BM3				0x6A
+
+#define KS_TXCR					0x70
+#define TXCR_TCGICMP				(1 << 8)
+#define TXCR_TCGUDP				(1 << 7)
+#define TXCR_TCGTCP				(1 << 6)
+#define TXCR_TCGIP				(1 << 5)
+#define TXCR_FTXQ				(1 << 4)
+#define TXCR_TXFCE				(1 << 3)
+#define TXCR_TXPE				(1 << 2)
+#define TXCR_TXCRC				(1 << 1)
+#define TXCR_TXE				(1 << 0)
+
+#define KS_TXSR					0x72
+#define TXSR_TXLC				(1 << 13)
+#define TXSR_TXMC				(1 << 12)
+#define TXSR_TXFID_MASK				(0x3f << 0)
+#define TXSR_TXFID_SHIFT			(0)
+#define TXSR_TXFID_GET(_v)			(((_v) >> 0) & 0x3f)
+
+#define KS_RXCR1				0x74
+#define RXCR1_FRXQ				(1 << 15)
+#define RXCR1_RXUDPFCC				(1 << 14)
+#define RXCR1_RXTCPFCC				(1 << 13)
+#define RXCR1_RXIPFCC				(1 << 12)
+#define RXCR1_RXPAFMA				(1 << 11)
+#define RXCR1_RXFCE				(1 << 10)
+#define RXCR1_RXEFE				(1 << 9)
+#define RXCR1_RXMAFMA				(1 << 8)
+#define RXCR1_RXBE				(1 << 7)
+#define RXCR1_RXME				(1 << 6)
+#define RXCR1_RXUE				(1 << 5)
+#define RXCR1_RXAE				(1 << 4)
+#define RXCR1_RXINVF				(1 << 1)
+#define RXCR1_RXE				(1 << 0)
+
+#define KS_RXCR2				0x76
+#define RXCR2_SRDBL_MASK			(0x7 << 5)
+#define RXCR2_SRDBL_SHIFT			(5)
+#define RXCR2_SRDBL_4B				(0x0 << 5)
+#define RXCR2_SRDBL_8B				(0x1 << 5)
+#define RXCR2_SRDBL_16B				(0x2 << 5)
+#define RXCR2_SRDBL_32B				(0x3 << 5)
+#define RXCR2_SRDBL_FRAME			(0x4 << 5)
+#define RXCR2_IUFFP				(1 << 4)
+#define RXCR2_RXIUFCEZ				(1 << 3)
+#define RXCR2_UDPLFE				(1 << 2)
+#define RXCR2_RXICMPFCC				(1 << 1)
+#define RXCR2_RXSAF				(1 << 0)
+
+#define KS_TXMIR				0x78
+
+#define KS_RXFHSR				0x7C
+#define RXFSHR_RXFV				(1 << 15)
+#define RXFSHR_RXICMPFCS			(1 << 13)
+#define RXFSHR_RXIPFCS				(1 << 12)
+#define RXFSHR_RXTCPFCS				(1 << 11)
+#define RXFSHR_RXUDPFCS				(1 << 10)
+#define RXFSHR_RXBF				(1 << 7)
+#define RXFSHR_RXMF				(1 << 6)
+#define RXFSHR_RXUF				(1 << 5)
+#define RXFSHR_RXMR				(1 << 4)
+#define RXFSHR_RXFT				(1 << 3)
+#define RXFSHR_RXFTL				(1 << 2)
+#define RXFSHR_RXRF				(1 << 1)
+#define RXFSHR_RXCE				(1 << 0)
+
+#define KS_RXFHBCR				0x7E
+#define KS_TXQCR				0x80
+#define TXQCR_AETFE				(1 << 2)
+#define TXQCR_TXQMAM				(1 << 1)
+#define TXQCR_METFE				(1 << 0)
+
+#define KS_RXQCR				0x82
+#define RXQCR_RXDTTS				(1 << 12)
+#define RXQCR_RXDBCTS				(1 << 11)
+#define RXQCR_RXFCTS				(1 << 10)
+#define RXQCR_RXIPHTOE				(1 << 9)
+#define RXQCR_RXDTTE				(1 << 7)
+#define RXQCR_RXDBCTE				(1 << 6)
+#define RXQCR_RXFCTE				(1 << 5)
+#define RXQCR_ADRFE				(1 << 4)
+#define RXQCR_SDA				(1 << 3)
+#define RXQCR_RRXEF				(1 << 0)
+
+#define KS_TXFDPR				0x84
+#define TXFDPR_TXFPAI				(1 << 14)
+#define TXFDPR_TXFP_MASK			(0x7ff << 0)
+#define TXFDPR_TXFP_SHIFT			(0)
+
+#define KS_RXFDPR				0x86
+#define RXFDPR_RXFPAI				(1 << 14)
+
+#define KS_RXDTTR				0x8C
+#define KS_RXDBCTR				0x8E
+
+#define KS_IER					0x90
+#define KS_ISR					0x92
+#define IRQ_LCI					(1 << 15)
+#define IRQ_TXI					(1 << 14)
+#define IRQ_RXI					(1 << 13)
+#define IRQ_RXOI				(1 << 11)
+#define IRQ_TXPSI				(1 << 9)
+#define IRQ_RXPSI				(1 << 8)
+#define IRQ_TXSAI				(1 << 6)
+#define IRQ_RXWFDI				(1 << 5)
+#define IRQ_RXMPDI				(1 << 4)
+#define IRQ_LDI					(1 << 3)
+#define IRQ_EDI					(1 << 2)
+#define IRQ_SPIBEI				(1 << 1)
+#define IRQ_DEDI				(1 << 0)
+
+#define KS_RXFCTR				0x9C
+#define KS_RXFC					0x9D
+#define RXFCTR_RXFC_MASK			(0xff << 8)
+#define RXFCTR_RXFC_SHIFT			(8)
+#define RXFCTR_RXFC_GET(_v)			(((_v) >> 8) & 0xff)
+#define RXFCTR_RXFCT_MASK			(0xff << 0)
+#define RXFCTR_RXFCT_SHIFT			(0)
+
+#define KS_TXNTFSR				0x9E
+
+#define KS_MAHTR0				0xA0
+#define KS_MAHTR1				0xA2
+#define KS_MAHTR2				0xA4
+#define KS_MAHTR3				0xA6
+
+#define KS_FCLWR				0xB0
+#define KS_FCHWR				0xB2
+#define KS_FCOWR				0xB4
+
+#define KS_CIDER				0xC0
+#define CIDER_ID				0x8870
+#define CIDER_REV_MASK				(0x7 << 1)
+#define CIDER_REV_SHIFT				(1)
+#define CIDER_REV_GET(_v)			(((_v) >> 1) & 0x7)
+
+#define KS_CGCR					0xC6
+
+#define KS_IACR					0xC8
+#define IACR_RDEN				(1 << 12)
+#define IACR_TSEL_MASK				(0x3 << 10)
+#define IACR_TSEL_SHIFT				(10)
+#define IACR_TSEL_MIB				(0x3 << 10)
+#define IACR_ADDR_MASK				(0x1f << 0)
+#define IACR_ADDR_SHIFT				(0)
+
+#define KS_IADLR				0xD0
+#define KS_IAHDR				0xD2
+
+#define KS_PMECR				0xD4
+#define PMECR_PME_DELAY				(1 << 14)
+#define PMECR_PME_POL				(1 << 12)
+#define PMECR_WOL_WAKEUP			(1 << 11)
+#define PMECR_WOL_MAGICPKT			(1 << 10)
+#define PMECR_WOL_LINKUP			(1 << 9)
+#define PMECR_WOL_ENERGY			(1 << 8)
+#define PMECR_AUTO_WAKE_EN			(1 << 7)
+#define PMECR_WAKEUP_NORMAL			(1 << 6)
+#define PMECR_WKEVT_MASK			(0xf << 2)
+#define PMECR_WKEVT_SHIFT			(2)
+#define PMECR_WKEVT_GET(_v)			(((_v) >> 2) & 0xf)
+#define PMECR_WKEVT_ENERGY			(0x1 << 2)
+#define PMECR_WKEVT_LINK			(0x2 << 2)
+#define PMECR_WKEVT_MAGICPKT			(0x4 << 2)
+#define PMECR_WKEVT_FRAME			(0x8 << 2)
+#define PMECR_PM_MASK				(0x3 << 0)
+#define PMECR_PM_SHIFT				(0)
+#define PMECR_PM_NORMAL				(0x0 << 0)
+#define PMECR_PM_ENERGY				(0x1 << 0)
+#define PMECR_PM_SOFTDOWN			(0x2 << 0)
+#define PMECR_PM_POWERSAVE			(0x3 << 0)
+
+/* Standard MII PHY data */
+#define KS_P1MBCR				0xE4
+#define KS_P1MBSR				0xE6
+#define KS_PHY1ILR				0xE8
+#define KS_PHY1IHR				0xEA
+#define KS_P1ANAR				0xEC
+#define KS_P1ANLPR				0xEE
+
+#define KS_P1SCLMD				0xF4
+#define P1SCLMD_LEDOFF				(1 << 15)
+#define P1SCLMD_TXIDS				(1 << 14)
+#define P1SCLMD_RESTARTAN			(1 << 13)
+#define P1SCLMD_DISAUTOMDIX			(1 << 10)
+#define P1SCLMD_FORCEMDIX			(1 << 9)
+#define P1SCLMD_AUTONEGEN			(1 << 7)
+#define P1SCLMD_FORCE100			(1 << 6)
+#define P1SCLMD_FORCEFDX			(1 << 5)
+#define P1SCLMD_ADV_FLOW			(1 << 4)
+#define P1SCLMD_ADV_100BT_FDX			(1 << 3)
+#define P1SCLMD_ADV_100BT_HDX			(1 << 2)
+#define P1SCLMD_ADV_10BT_FDX			(1 << 1)
+#define P1SCLMD_ADV_10BT_HDX			(1 << 0)
+
+#define KS_P1CR					0xF6
+#define P1CR_HP_MDIX				(1 << 15)
+#define P1CR_REV_POL				(1 << 13)
+#define P1CR_OP_100M				(1 << 10)
+#define P1CR_OP_FDX				(1 << 9)
+#define P1CR_OP_MDI				(1 << 7)
+#define P1CR_AN_DONE				(1 << 6)
+#define P1CR_LINK_GOOD				(1 << 5)
+#define P1CR_PNTR_FLOW				(1 << 4)
+#define P1CR_PNTR_100BT_FDX			(1 << 3)
+#define P1CR_PNTR_100BT_HDX			(1 << 2)
+#define P1CR_PNTR_10BT_FDX			(1 << 1)
+#define P1CR_PNTR_10BT_HDX			(1 << 0)
+
+/* TX Frame control */
+
+#define TXFR_TXIC				(1 << 15)
+#define TXFR_TXFID_MASK				(0x3f << 0)
+#define TXFR_TXFID_SHIFT			(0)
+
+/* SPI frame opcodes */
+#define KS_SPIOP_RD				(0x00)
+#define KS_SPIOP_WR				(0x40)
+#define KS_SPIOP_RXFIFO				(0x80)
+#define KS_SPIOP_TXFIFO				(0xC0)
diff --git a/drivers/net/macsonic.c b/drivers/net/macsonic.c
index acd143d..61eabca 100644
--- a/drivers/net/macsonic.c
+++ b/drivers/net/macsonic.c
@@ -179,7 +179,7 @@
 	.ndo_set_mac_address	= eth_mac_addr,
 };
 
-static int __init macsonic_init(struct net_device *dev)
+static int __devinit macsonic_init(struct net_device *dev)
 {
 	struct sonic_local* lp = netdev_priv(dev);
 
@@ -223,7 +223,7 @@
 	return 0;
 }
 
-static int __init mac_onboard_sonic_ethernet_addr(struct net_device *dev)
+static int __devinit mac_onboard_sonic_ethernet_addr(struct net_device *dev)
 {
 	struct sonic_local *lp = netdev_priv(dev);
 	const int prom_addr = ONBOARD_SONIC_PROM_BASE;
@@ -288,7 +288,7 @@
 	} else return 0;
 }
 
-static int __init mac_onboard_sonic_probe(struct net_device *dev)
+static int __devinit mac_onboard_sonic_probe(struct net_device *dev)
 {
 	/* Bwahahaha */
 	static int once_is_more_than_enough;
@@ -409,7 +409,7 @@
 	return macsonic_init(dev);
 }
 
-static int __init mac_nubus_sonic_ethernet_addr(struct net_device *dev,
+static int __devinit mac_nubus_sonic_ethernet_addr(struct net_device *dev,
 						unsigned long prom_addr,
 						int id)
 {
@@ -424,7 +424,7 @@
 	return 0;
 }
 
-static int __init macsonic_ident(struct nubus_dev *ndev)
+static int __devinit macsonic_ident(struct nubus_dev *ndev)
 {
 	if (ndev->dr_hw == NUBUS_DRHW_ASANTE_LC &&
 	    ndev->dr_sw == NUBUS_DRSW_SONIC_LC)
@@ -449,7 +449,7 @@
 	return -1;
 }
 
-static int __init mac_nubus_sonic_probe(struct net_device *dev)
+static int __devinit mac_nubus_sonic_probe(struct net_device *dev)
 {
 	static int slots;
 	struct nubus_dev* ndev = NULL;
@@ -562,7 +562,7 @@
 	return macsonic_init(dev);
 }
 
-static int __init mac_sonic_probe(struct platform_device *pdev)
+static int __devinit mac_sonic_probe(struct platform_device *pdev)
 {
 	struct net_device *dev;
 	struct sonic_local *lp;
@@ -575,6 +575,7 @@
 	lp = netdev_priv(dev);
 	lp->device = &pdev->dev;
 	SET_NETDEV_DEV(dev, &pdev->dev);
+	platform_set_drvdata(pdev, dev);
 
 	/* This will catch fatal stuff like -ENOMEM as well as success */
 	err = mac_onboard_sonic_probe(dev);
diff --git a/drivers/net/mlx4/en_ethtool.c b/drivers/net/mlx4/en_ethtool.c
index 091f990..86467b4 100644
--- a/drivers/net/mlx4/en_ethtool.c
+++ b/drivers/net/mlx4/en_ethtool.c
@@ -220,7 +220,7 @@
 {
 	cmd->autoneg = AUTONEG_DISABLE;
 	cmd->supported = SUPPORTED_10000baseT_Full;
-	cmd->advertising = SUPPORTED_10000baseT_Full;
+	cmd->advertising = ADVERTISED_1000baseT_Full;
 	if (netif_carrier_ok(dev)) {
 		cmd->speed = SPEED_10000;
 		cmd->duplex = DUPLEX_FULL;
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h
index e1cdba7..f86e050 100644
--- a/drivers/net/netxen/netxen_nic.h
+++ b/drivers/net/netxen/netxen_nic.h
@@ -210,6 +210,7 @@
 #define NETXEN_CTX_SIGNATURE	0xdee0
 #define NETXEN_CTX_SIGNATURE_V2	0x0002dee0
 #define NETXEN_CTX_RESET	0xbad0
+#define NETXEN_CTX_D3_RESET	0xacc0
 #define NETXEN_RCV_PRODUCER(ringid)	(ringid)
 
 #define PHAN_PEG_RCV_INITIALIZED	0xff01
@@ -773,6 +774,8 @@
 	u32 crb_cmd_consumer;
 	u32 num_desc;
 
+	struct netdev_queue *txq;
+
 	struct netxen_cmd_buffer *cmd_buf_arr;
 	struct cmd_desc_type0 *desc_head;
 	dma_addr_t phys_addr;
diff --git a/drivers/net/netxen/netxen_nic_ctx.c b/drivers/net/netxen/netxen_nic_ctx.c
index 4754f5c..9f8ae47 100644
--- a/drivers/net/netxen/netxen_nic_ctx.c
+++ b/drivers/net/netxen/netxen_nic_ctx.c
@@ -684,10 +684,8 @@
 			goto err_out_free;
 	} else {
 		err = netxen_init_old_ctx(adapter);
-		if (err) {
-			netxen_free_hw_resources(adapter);
-			return err;
-		}
+		if (err)
+			goto err_out_free;
 	}
 
 	return 0;
@@ -708,15 +706,18 @@
 	int port = adapter->portnum;
 
 	if (adapter->fw_major >= 4) {
-		nx_fw_cmd_destroy_tx_ctx(adapter);
 		nx_fw_cmd_destroy_rx_ctx(adapter);
+		nx_fw_cmd_destroy_tx_ctx(adapter);
 	} else {
 		netxen_api_lock(adapter);
 		NXWR32(adapter, CRB_CTX_SIGNATURE_REG(port),
-				NETXEN_CTX_RESET | port);
+				NETXEN_CTX_D3_RESET | port);
 		netxen_api_unlock(adapter);
 	}
 
+	/* Allow dma queues to drain after context reset */
+	msleep(20);
+
 	recv_ctx = &adapter->recv_ctx;
 
 	if (recv_ctx->hwctx != NULL) {
diff --git a/drivers/net/netxen/netxen_nic_hw.c b/drivers/net/netxen/netxen_nic_hw.c
index ce3b89d..b9123d4 100644
--- a/drivers/net/netxen/netxen_nic_hw.c
+++ b/drivers/net/netxen/netxen_nic_hw.c
@@ -461,13 +461,14 @@
 	i = 0;
 
 	tx_ring = adapter->tx_ring;
-	netif_tx_lock_bh(adapter->netdev);
+	__netif_tx_lock_bh(tx_ring->txq);
 
 	producer = tx_ring->producer;
 	consumer = tx_ring->sw_consumer;
 
-	if (nr_desc >= find_diff_among(producer, consumer, tx_ring->num_desc)) {
-		netif_tx_unlock_bh(adapter->netdev);
+	if (nr_desc >= netxen_tx_avail(tx_ring)) {
+		netif_tx_stop_queue(tx_ring->txq);
+		__netif_tx_unlock_bh(tx_ring->txq);
 		return -EBUSY;
 	}
 
@@ -490,7 +491,7 @@
 
 	netxen_nic_update_cmd_producer(adapter, tx_ring);
 
-	netif_tx_unlock_bh(adapter->netdev);
+	__netif_tx_unlock_bh(tx_ring->txq);
 
 	return 0;
 }
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c
index b899bd5..5d3343e 100644
--- a/drivers/net/netxen/netxen_nic_init.c
+++ b/drivers/net/netxen/netxen_nic_init.c
@@ -214,6 +214,7 @@
 	adapter->tx_ring = tx_ring;
 
 	tx_ring->num_desc = adapter->num_txd;
+	tx_ring->txq = netdev_get_tx_queue(netdev, 0);
 
 	cmd_buf_arr = vmalloc(TX_BUFF_RINGSIZE(tx_ring));
 	if (cmd_buf_arr == NULL) {
@@ -1400,10 +1401,10 @@
 		smp_mb();
 
 		if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) {
-			netif_tx_lock(netdev);
+			__netif_tx_lock(tx_ring->txq, smp_processor_id());
 			if (netxen_tx_avail(tx_ring) > TX_STOP_THRESH)
 				netif_wake_queue(netdev);
-			netif_tx_unlock(netdev);
+			__netif_tx_unlock(tx_ring->txq);
 		}
 	}
 	/*
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index 27539dd..637ac8b 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -215,9 +215,9 @@
 
 	for (ring = 0; ring < adapter->max_sds_rings; ring++) {
 		sds_ring = &recv_ctx->sds_rings[ring];
-		napi_disable(&sds_ring->napi);
 		netxen_nic_disable_int(sds_ring);
-		synchronize_irq(sds_ring->irq);
+		napi_synchronize(&sds_ring->napi);
+		napi_disable(&sds_ring->napi);
 	}
 }
 
@@ -833,11 +833,11 @@
 
 	adapter->ahw.linkup = 0;
 
-	netxen_napi_enable(adapter);
-
 	if (adapter->max_sds_rings > 1)
 		netxen_config_rss(adapter, 1);
 
+	netxen_napi_enable(adapter);
+
 	if (adapter->capabilities & NX_FW_CAPABILITY_LINK_NOTIFICATION)
 		netxen_linkevent_request(adapter, 1);
 	else
@@ -851,8 +851,9 @@
 static void
 netxen_nic_down(struct netxen_adapter *adapter, struct net_device *netdev)
 {
+	spin_lock(&adapter->tx_clean_lock);
 	netif_carrier_off(netdev);
-	netif_stop_queue(netdev);
+	netif_tx_disable(netdev);
 
 	if (adapter->stop_port)
 		adapter->stop_port(adapter);
@@ -863,9 +864,10 @@
 	netxen_napi_disable(adapter);
 
 	netxen_release_tx_buffers(adapter);
+	spin_unlock(&adapter->tx_clean_lock);
 
-	FLUSH_SCHEDULED_WORK();
 	del_timer_sync(&adapter->watchdog_timer);
+	FLUSH_SCHEDULED_WORK();
 }
 
 
@@ -943,8 +945,8 @@
 static void
 netxen_nic_detach(struct netxen_adapter *adapter)
 {
-	netxen_release_rx_buffers(adapter);
 	netxen_free_hw_resources(adapter);
+	netxen_release_rx_buffers(adapter);
 	netxen_nic_free_irq(adapter);
 	netxen_free_sw_resources(adapter);
 
@@ -1533,10 +1535,12 @@
 		printk(KERN_ALERT
 		       "%s: Device temperature %d degrees C exceeds"
 		       " maximum allowed. Hardware has been shut down.\n",
-		       netxen_nic_driver_name, temp_val);
+		       netdev->name, temp_val);
 
-		netif_carrier_off(netdev);
-		netif_stop_queue(netdev);
+		netif_device_detach(netdev);
+		netxen_nic_down(adapter, netdev);
+		netxen_nic_detach(adapter);
+
 		rv = 1;
 	} else if (temp_state == NX_TEMP_WARN) {
 		if (adapter->temp == NX_TEMP_NORMAL) {
@@ -1544,13 +1548,13 @@
 			       "%s: Device temperature %d degrees C "
 			       "exceeds operating range."
 			       " Immediate action needed.\n",
-			       netxen_nic_driver_name, temp_val);
+			       netdev->name, temp_val);
 		}
 	} else {
 		if (adapter->temp == NX_TEMP_WARN) {
 			printk(KERN_INFO
 			       "%s: Device temperature is now %d degrees C"
-			       " in normal range.\n", netxen_nic_driver_name,
+			       " in normal range.\n", netdev->name,
 			       temp_val);
 		}
 	}
@@ -1623,7 +1627,7 @@
 	struct netxen_adapter *adapter =
 		container_of(work, struct netxen_adapter, watchdog_task);
 
-	if ((adapter->portnum  == 0) && netxen_nic_check_temp(adapter))
+	if (netxen_nic_check_temp(adapter))
 		return;
 
 	if (!adapter->has_link_events)
@@ -1645,6 +1649,9 @@
 	struct netxen_adapter *adapter =
 		container_of(work, struct netxen_adapter, tx_timeout_task);
 
+	if (!netif_running(adapter->netdev))
+		return;
+
 	printk(KERN_ERR "%s %s: transmit timeout, resetting.\n",
 	       netxen_nic_driver_name, adapter->netdev->name);
 
@@ -1757,7 +1764,8 @@
 
 	if ((work_done < budget) && tx_complete) {
 		napi_complete(&sds_ring->napi);
-		netxen_nic_enable_int(sds_ring);
+		if (netif_running(adapter->netdev))
+			netxen_nic_enable_int(sds_ring);
 	}
 
 	return work_done;
diff --git a/drivers/net/pcmcia/3c589_cs.c b/drivers/net/pcmcia/3c589_cs.c
index ec7cf5a..690b9c7 100644
--- a/drivers/net/pcmcia/3c589_cs.c
+++ b/drivers/net/pcmcia/3c589_cs.c
@@ -156,6 +156,7 @@
 static int el3_rx(struct net_device *dev);
 static int el3_close(struct net_device *dev);
 static void el3_tx_timeout(struct net_device *dev);
+static void set_rx_mode(struct net_device *dev);
 static void set_multicast_list(struct net_device *dev);
 static const struct ethtool_ops netdev_ethtool_ops;
 
@@ -488,8 +489,7 @@
     /* Switch to register set 1 for normal use. */
     EL3WINDOW(1);
 
-    /* Accept b-cast and phys addr only. */
-    outw(SetRxFilter | RxStation | RxBroadcast, ioaddr + EL3_CMD);
+    set_rx_mode(dev);
     outw(StatsEnable, ioaddr + EL3_CMD); /* Turn on statistics. */
     outw(RxEnable, ioaddr + EL3_CMD); /* Enable the receiver. */
     outw(TxEnable, ioaddr + EL3_CMD); /* Enable transmitter. */
@@ -700,7 +700,7 @@
 		if (fifo_diag & 0x2000) {
 		    /* Rx underrun */
 		    tc589_wait_for_completion(dev, RxReset);
-		    set_multicast_list(dev);
+		    set_rx_mode(dev);
 		    outw(RxEnable, ioaddr + EL3_CMD);
 		}
 		outw(AckIntr | AdapterFailure, ioaddr + EL3_CMD);
@@ -905,14 +905,11 @@
     return 0;
 }
 
-static void set_multicast_list(struct net_device *dev)
+static void set_rx_mode(struct net_device *dev)
 {
-    struct el3_private *lp = netdev_priv(dev);
-    struct pcmcia_device *link = lp->p_dev;
     unsigned int ioaddr = dev->base_addr;
     u16 opts = SetRxFilter | RxStation | RxBroadcast;
 
-    if (!pcmcia_dev_present(link)) return;
     if (dev->flags & IFF_PROMISC)
 	opts |= RxMulticast | RxProm;
     else if (dev->mc_count || (dev->flags & IFF_ALLMULTI))
@@ -920,6 +917,16 @@
     outw(opts, ioaddr + EL3_CMD);
 }
 
+static void set_multicast_list(struct net_device *dev)
+{
+	struct el3_private *priv = netdev_priv(dev);
+	unsigned long flags;
+
+	spin_lock_irqsave(&priv->lock, flags);
+	set_rx_mode(dev);
+	spin_unlock_irqrestore(&priv->lock, flags);
+}
+
 static int el3_close(struct net_device *dev)
 {
     struct el3_private *lp = netdev_priv(dev);
diff --git a/drivers/net/sc92031.c b/drivers/net/sc92031.c
index 18821f2..e3156c9 100644
--- a/drivers/net/sc92031.c
+++ b/drivers/net/sc92031.c
@@ -1593,6 +1593,7 @@
 static struct pci_device_id sc92031_pci_device_id_table[] __devinitdata = {
 	{ PCI_DEVICE(PCI_VENDOR_ID_SILAN, 0x2031) },
 	{ PCI_DEVICE(PCI_VENDOR_ID_SILAN, 0x8139) },
+	{ PCI_DEVICE(0x1088, 0x2031) },
 	{ 0, }
 };
 MODULE_DEVICE_TABLE(pci, sc92031_pci_device_id_table);
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index daf961a..3550c5d 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -1151,14 +1151,7 @@
 
 	/* reset the Rx prefetch unit */
 	sky2_write32(hw, Y2_QADDR(rxq, PREF_UNIT_CTRL), PREF_UNIT_RST_SET);
-
-	/* Reset the RAM Buffer receive queue */
-	sky2_write8(hw, RB_ADDR(rxq, RB_CTRL), RB_RST_SET);
-
-	/* Reset Rx MAC FIFO */
-	sky2_write8(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), GMF_RST_SET);
-
-	sky2_read8(hw, B0_CTST);
+	mmiowb();
 }
 
 /* Clean out receive buffer area, assumes receiver hardware stopped */
@@ -1825,12 +1818,6 @@
 	if (netif_msg_ifdown(sky2))
 		printk(KERN_INFO PFX "%s: disabling interface\n", dev->name);
 
-	/* Disable port IRQ */
-	imask = sky2_read32(hw, B0_IMSK);
-	imask &= ~portirq_msk[port];
-	sky2_write32(hw, B0_IMSK, imask);
-	sky2_read32(hw, B0_IMSK);
-
 	/* Force flow control off */
 	sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
 
@@ -1870,8 +1857,6 @@
 
 	sky2_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), RB_RST_SET);
 
-	sky2_rx_stop(sky2);
-
 	sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET);
 	sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_SET);
 
@@ -1881,6 +1866,14 @@
 	sky2_write32(hw, STAT_ISR_TIMER_CNT, 0);
 	sky2_read8(hw, STAT_ISR_TIMER_CTRL);
 
+	sky2_rx_stop(sky2);
+
+	/* Disable port IRQ */
+	imask = sky2_read32(hw, B0_IMSK);
+	imask &= ~portirq_msk[port];
+	sky2_write32(hw, B0_IMSK, imask);
+	sky2_read32(hw, B0_IMSK);
+
 	synchronize_irq(hw->pdev->irq);
 	napi_synchronize(&hw->napi);
 
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index a906d39..c47237c 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -369,4 +369,12 @@
 	  (Powerline Communications) solution with an Intellon
 	  INT51x1/INT5200 chip, like the "devolo dLan duo".
 
+config USB_CDC_PHONET
+	tristate "CDC Phonet support"
+	depends on PHONET
+	help
+	  Choose this option to support the Phonet interface to a Nokia
+	  cellular modem, as found on most Nokia handsets with the
+	  "PC suite" USB profile.
+
 endmenu
diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile
index b870b0b..e17afb7 100644
--- a/drivers/net/usb/Makefile
+++ b/drivers/net/usb/Makefile
@@ -21,4 +21,5 @@
 obj-$(CONFIG_USB_NET_MCS7830)	+= mcs7830.o
 obj-$(CONFIG_USB_USBNET)	+= usbnet.o
 obj-$(CONFIG_USB_NET_INT51X1)	+= int51x1.o
+obj-$(CONFIG_USB_CDC_PHONET)	+= cdc-phonet.o
 
diff --git a/drivers/net/usb/cdc-phonet.c b/drivers/net/usb/cdc-phonet.c
new file mode 100644
index 0000000..792af72
--- /dev/null
+++ b/drivers/net/usb/cdc-phonet.c
@@ -0,0 +1,461 @@
+/*
+ * phonet.c -- USB CDC Phonet host driver
+ *
+ * Copyright (C) 2008-2009 Nokia Corporation. All rights reserved.
+ *
+ * Author: Rémi Denis-Courmont
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/usb.h>
+#include <linux/usb/cdc.h>
+#include <linux/netdevice.h>
+#include <linux/if_arp.h>
+#include <linux/if_phonet.h>
+
+#define PN_MEDIA_USB	0x1B
+
+static const unsigned rxq_size = 17;
+
+struct usbpn_dev {
+	struct net_device	*dev;
+
+	struct usb_interface	*intf, *data_intf;
+	struct usb_device	*usb;
+	unsigned int		tx_pipe, rx_pipe;
+	u8 active_setting;
+	u8 disconnected;
+
+	unsigned		tx_queue;
+	spinlock_t		tx_lock;
+
+	spinlock_t		rx_lock;
+	struct sk_buff		*rx_skb;
+	struct urb		*urbs[0];
+};
+
+static void tx_complete(struct urb *req);
+static void rx_complete(struct urb *req);
+
+/*
+ * Network device callbacks
+ */
+static int usbpn_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct usbpn_dev *pnd = netdev_priv(dev);
+	struct urb *req = NULL;
+	unsigned long flags;
+	int err;
+
+	if (skb->protocol != htons(ETH_P_PHONET))
+		goto drop;
+
+	req = usb_alloc_urb(0, GFP_ATOMIC);
+	if (!req)
+		goto drop;
+	usb_fill_bulk_urb(req, pnd->usb, pnd->tx_pipe, skb->data, skb->len,
+				tx_complete, skb);
+	req->transfer_flags = URB_ZERO_PACKET;
+	err = usb_submit_urb(req, GFP_ATOMIC);
+	if (err) {
+		usb_free_urb(req);
+		goto drop;
+	}
+
+	spin_lock_irqsave(&pnd->tx_lock, flags);
+	pnd->tx_queue++;
+	if (pnd->tx_queue >= dev->tx_queue_len)
+		netif_stop_queue(dev);
+	spin_unlock_irqrestore(&pnd->tx_lock, flags);
+	return 0;
+
+drop:
+	dev_kfree_skb(skb);
+	dev->stats.tx_dropped++;
+	return 0;
+}
+
+static void tx_complete(struct urb *req)
+{
+	struct sk_buff *skb = req->context;
+	struct net_device *dev = skb->dev;
+	struct usbpn_dev *pnd = netdev_priv(dev);
+
+	switch (req->status) {
+	case 0:
+		dev->stats.tx_bytes += skb->len;
+		break;
+
+	case -ENOENT:
+	case -ECONNRESET:
+	case -ESHUTDOWN:
+		dev->stats.tx_aborted_errors++;
+	default:
+		dev->stats.tx_errors++;
+		dev_dbg(&dev->dev, "TX error (%d)\n", req->status);
+	}
+	dev->stats.tx_packets++;
+
+	spin_lock(&pnd->tx_lock);
+	pnd->tx_queue--;
+	netif_wake_queue(dev);
+	spin_unlock(&pnd->tx_lock);
+
+	dev_kfree_skb_any(skb);
+	usb_free_urb(req);
+}
+
+static int rx_submit(struct usbpn_dev *pnd, struct urb *req, gfp_t gfp_flags)
+{
+	struct net_device *dev = pnd->dev;
+	struct page *page;
+	int err;
+
+	page = __netdev_alloc_page(dev, gfp_flags);
+	if (!page)
+		return -ENOMEM;
+
+	usb_fill_bulk_urb(req, pnd->usb, pnd->rx_pipe, page_address(page),
+				PAGE_SIZE, rx_complete, dev);
+	req->transfer_flags = 0;
+	err = usb_submit_urb(req, gfp_flags);
+	if (unlikely(err)) {
+		dev_dbg(&dev->dev, "RX submit error (%d)\n", err);
+		netdev_free_page(dev, page);
+	}
+	return err;
+}
+
+static void rx_complete(struct urb *req)
+{
+	struct net_device *dev = req->context;
+	struct usbpn_dev *pnd = netdev_priv(dev);
+	struct page *page = virt_to_page(req->transfer_buffer);
+	struct sk_buff *skb;
+	unsigned long flags;
+
+	switch (req->status) {
+	case 0:
+		spin_lock_irqsave(&pnd->rx_lock, flags);
+		skb = pnd->rx_skb;
+		if (!skb) {
+			skb = pnd->rx_skb = netdev_alloc_skb(dev, 12);
+			if (likely(skb)) {
+				/* Can't use pskb_pull() on page in IRQ */
+				memcpy(skb_put(skb, 1), page_address(page), 1);
+				skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+						page, 1, req->actual_length);
+				page = NULL;
+			}
+		} else {
+			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+					page, 0, req->actual_length);
+			page = NULL;
+		}
+		if (req->actual_length < PAGE_SIZE)
+			pnd->rx_skb = NULL; /* Last fragment */
+		else
+			skb = NULL;
+		spin_unlock_irqrestore(&pnd->rx_lock, flags);
+		if (skb) {
+			skb->protocol = htons(ETH_P_PHONET);
+			skb_reset_mac_header(skb);
+			__skb_pull(skb, 1);
+			skb->dev = dev;
+			dev->stats.rx_packets++;
+			dev->stats.rx_bytes += skb->len;
+
+			netif_rx(skb);
+		}
+		goto resubmit;
+
+	case -ENOENT:
+	case -ECONNRESET:
+	case -ESHUTDOWN:
+		req = NULL;
+		break;
+
+	case -EOVERFLOW:
+		dev->stats.rx_over_errors++;
+		dev_dbg(&dev->dev, "RX overflow\n");
+		break;
+
+	case -EILSEQ:
+		dev->stats.rx_crc_errors++;
+		break;
+	}
+
+	dev->stats.rx_errors++;
+resubmit:
+	if (page)
+		netdev_free_page(dev, page);
+	if (req)
+		rx_submit(pnd, req, GFP_ATOMIC);
+}
+
+static int usbpn_close(struct net_device *dev);
+
+static int usbpn_open(struct net_device *dev)
+{
+	struct usbpn_dev *pnd = netdev_priv(dev);
+	int err;
+	unsigned i;
+	unsigned num = pnd->data_intf->cur_altsetting->desc.bInterfaceNumber;
+
+	err = usb_set_interface(pnd->usb, num, pnd->active_setting);
+	if (err)
+		return err;
+
+	for (i = 0; i < rxq_size; i++) {
+		struct urb *req = usb_alloc_urb(0, GFP_KERNEL);
+
+		if (!req || rx_submit(pnd, req, GFP_KERNEL)) {
+			usbpn_close(dev);
+			return -ENOMEM;
+		}
+		pnd->urbs[i] = req;
+	}
+
+	netif_wake_queue(dev);
+	return 0;
+}
+
+static int usbpn_close(struct net_device *dev)
+{
+	struct usbpn_dev *pnd = netdev_priv(dev);
+	unsigned i;
+	unsigned num = pnd->data_intf->cur_altsetting->desc.bInterfaceNumber;
+
+	netif_stop_queue(dev);
+
+	for (i = 0; i < rxq_size; i++) {
+		struct urb *req = pnd->urbs[i];
+
+		if (!req)
+			continue;
+		usb_kill_urb(req);
+		usb_free_urb(req);
+		pnd->urbs[i] = NULL;
+	}
+
+	return usb_set_interface(pnd->usb, num, !pnd->active_setting);
+}
+
+static int usbpn_set_mtu(struct net_device *dev, int new_mtu)
+{
+	if ((new_mtu < PHONET_MIN_MTU) || (new_mtu > PHONET_MAX_MTU))
+		return -EINVAL;
+
+	dev->mtu = new_mtu;
+	return 0;
+}
+
+static const struct net_device_ops usbpn_ops = {
+	.ndo_open	= usbpn_open,
+	.ndo_stop	= usbpn_close,
+	.ndo_start_xmit = usbpn_xmit,
+	.ndo_change_mtu = usbpn_set_mtu,
+};
+
+static void usbpn_setup(struct net_device *dev)
+{
+	dev->features		= 0;
+	dev->netdev_ops		= &usbpn_ops,
+	dev->header_ops		= &phonet_header_ops;
+	dev->type		= ARPHRD_PHONET;
+	dev->flags		= IFF_POINTOPOINT | IFF_NOARP;
+	dev->mtu		= PHONET_MAX_MTU;
+	dev->hard_header_len	= 1;
+	dev->dev_addr[0]	= PN_MEDIA_USB;
+	dev->addr_len		= 1;
+	dev->tx_queue_len	= 3;
+
+	dev->destructor		= free_netdev;
+}
+
+/*
+ * USB driver callbacks
+ */
+static struct usb_device_id usbpn_ids[] = {
+	{
+		.match_flags = USB_DEVICE_ID_MATCH_VENDOR
+			| USB_DEVICE_ID_MATCH_INT_CLASS
+			| USB_DEVICE_ID_MATCH_INT_SUBCLASS,
+		.idVendor = 0x0421, /* Nokia */
+		.bInterfaceClass = USB_CLASS_COMM,
+		.bInterfaceSubClass = 0xFE,
+	},
+	{ },
+};
+
+MODULE_DEVICE_TABLE(usb, usbpn_ids);
+
+static struct usb_driver usbpn_driver;
+
+int usbpn_probe(struct usb_interface *intf, const struct usb_device_id *id)
+{
+	static const char ifname[] = "usbpn%d";
+	const struct usb_cdc_union_desc *union_header = NULL;
+	const struct usb_cdc_header_desc *phonet_header = NULL;
+	const struct usb_host_interface *data_desc;
+	struct usb_interface *data_intf;
+	struct usb_device *usbdev = interface_to_usbdev(intf);
+	struct net_device *dev;
+	struct usbpn_dev *pnd;
+	u8 *data;
+	int len, err;
+
+	data = intf->altsetting->extra;
+	len = intf->altsetting->extralen;
+	while (len >= 3) {
+		u8 dlen = data[0];
+		if (dlen < 3)
+			return -EINVAL;
+
+		/* bDescriptorType */
+		if (data[1] == USB_DT_CS_INTERFACE) {
+			/* bDescriptorSubType */
+			switch (data[2]) {
+			case USB_CDC_UNION_TYPE:
+				if (union_header || dlen < 5)
+					break;
+				union_header =
+					(struct usb_cdc_union_desc *)data;
+				break;
+			case 0xAB:
+				if (phonet_header || dlen < 5)
+					break;
+				phonet_header =
+					(struct usb_cdc_header_desc *)data;
+				break;
+			}
+		}
+		data += dlen;
+		len -= dlen;
+	}
+
+	if (!union_header || !phonet_header)
+		return -EINVAL;
+
+	data_intf = usb_ifnum_to_if(usbdev, union_header->bSlaveInterface0);
+	if (data_intf == NULL)
+		return -ENODEV;
+	/* Data interface has one inactive and one active setting */
+	if (data_intf->num_altsetting != 2)
+		return -EINVAL;
+	if (data_intf->altsetting[0].desc.bNumEndpoints == 0
+	 && data_intf->altsetting[1].desc.bNumEndpoints == 2)
+		data_desc = data_intf->altsetting + 1;
+	else
+	if (data_intf->altsetting[0].desc.bNumEndpoints == 2
+	 && data_intf->altsetting[1].desc.bNumEndpoints == 0)
+		data_desc = data_intf->altsetting;
+	else
+		return -EINVAL;
+
+	dev = alloc_netdev(sizeof(*pnd) + sizeof(pnd->urbs[0]) * rxq_size,
+				ifname, usbpn_setup);
+	if (!dev)
+		return -ENOMEM;
+
+	pnd = netdev_priv(dev);
+	SET_NETDEV_DEV(dev, &intf->dev);
+	netif_stop_queue(dev);
+
+	pnd->dev = dev;
+	pnd->usb = usb_get_dev(usbdev);
+	pnd->intf = intf;
+	pnd->data_intf = data_intf;
+	spin_lock_init(&pnd->tx_lock);
+	spin_lock_init(&pnd->rx_lock);
+	/* Endpoints */
+	if (usb_pipein(data_desc->endpoint[0].desc.bEndpointAddress)) {
+		pnd->rx_pipe = usb_rcvbulkpipe(usbdev,
+			data_desc->endpoint[0].desc.bEndpointAddress);
+		pnd->tx_pipe = usb_sndbulkpipe(usbdev,
+			data_desc->endpoint[1].desc.bEndpointAddress);
+	} else {
+		pnd->rx_pipe = usb_rcvbulkpipe(usbdev,
+			data_desc->endpoint[1].desc.bEndpointAddress);
+		pnd->tx_pipe = usb_sndbulkpipe(usbdev,
+			data_desc->endpoint[0].desc.bEndpointAddress);
+	}
+	pnd->active_setting = data_desc - data_intf->altsetting;
+
+	err = usb_driver_claim_interface(&usbpn_driver, data_intf, pnd);
+	if (err)
+		goto out;
+
+	/* Force inactive mode until the network device is brought UP */
+	usb_set_interface(usbdev, union_header->bSlaveInterface0,
+				!pnd->active_setting);
+	usb_set_intfdata(intf, pnd);
+
+	err = register_netdev(dev);
+	if (err) {
+		usb_driver_release_interface(&usbpn_driver, data_intf);
+		goto out;
+	}
+
+	dev_dbg(&dev->dev, "USB CDC Phonet device found\n");
+	return 0;
+
+out:
+	usb_set_intfdata(intf, NULL);
+	free_netdev(dev);
+	return err;
+}
+
+static void usbpn_disconnect(struct usb_interface *intf)
+{
+	struct usbpn_dev *pnd = usb_get_intfdata(intf);
+	struct usb_device *usb = pnd->usb;
+
+	if (pnd->disconnected)
+		return;
+
+	pnd->disconnected = 1;
+	usb_driver_release_interface(&usbpn_driver,
+			(pnd->intf == intf) ? pnd->data_intf : pnd->intf);
+	unregister_netdev(pnd->dev);
+	usb_put_dev(usb);
+}
+
+static struct usb_driver usbpn_driver = {
+	.name =		"cdc_phonet",
+	.probe =	usbpn_probe,
+	.disconnect =	usbpn_disconnect,
+	.id_table =	usbpn_ids,
+};
+
+static int __init usbpn_init(void)
+{
+	return usb_register(&usbpn_driver);
+}
+
+static void __exit usbpn_exit(void)
+{
+	usb_deregister(&usbpn_driver);
+}
+
+module_init(usbpn_init);
+module_exit(usbpn_exit);
+
+MODULE_AUTHOR("Remi Denis-Courmont");
+MODULE_DESCRIPTION("USB CDC Phonet host interface");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/usb/cdc_eem.c b/drivers/net/usb/cdc_eem.c
index cd35d50..45cebfb 100644
--- a/drivers/net/usb/cdc_eem.c
+++ b/drivers/net/usb/cdc_eem.c
@@ -311,7 +311,7 @@
 			 *	bmCRC = 0	: CRC = 0xDEADBEEF
 			 */
 			if (header & BIT(14))
-				crc2 = ~crc32_le(~0, skb2->data, len);
+				crc2 = ~crc32_le(~0, skb2->data, skb2->len);
 			else
 				crc2 = 0xdeadbeef;
 
diff --git a/drivers/video/fbmon.c b/drivers/video/fbmon.c
index 5c1a2c0..9ae9cd3 100644
--- a/drivers/video/fbmon.c
+++ b/drivers/video/fbmon.c
@@ -256,8 +256,8 @@
 
 static int edid_checksum(unsigned char *edid)
 {
-	unsigned char i, csum = 0, all_null = 0;
-	int err = 0, fix = check_edid(edid);
+	unsigned char csum = 0, all_null = 0;
+	int i, err = 0, fix = check_edid(edid);
 
 	if (fix)
 		fix_edid(edid, fix);
diff --git a/fs/Kconfig b/fs/Kconfig
index a97263b..0e7da7b 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -186,32 +186,7 @@
 source "fs/sysv/Kconfig"
 source "fs/ufs/Kconfig"
 source "fs/exofs/Kconfig"
-
-config NILFS2_FS
-	tristate "NILFS2 file system support (EXPERIMENTAL)"
-	depends on BLOCK && EXPERIMENTAL
-	select CRC32
-	help
-	  NILFS2 is a log-structured file system (LFS) supporting continuous
-	  snapshotting.  In addition to versioning capability of the entire
-	  file system, users can even restore files mistakenly overwritten or
-	  destroyed just a few seconds ago.  Since this file system can keep
-	  consistency like conventional LFS, it achieves quick recovery after
-	  system crashes.
-
-	  NILFS2 creates a number of checkpoints every few seconds or per
-	  synchronous write basis (unless there is no change).  Users can
-	  select significant versions among continuously created checkpoints,
-	  and can change them into snapshots which will be preserved for long
-	  periods until they are changed back to checkpoints.  Each
-	  snapshot is mountable as a read-only file system concurrently with
-	  its writable mount, and this feature is convenient for online backup.
-
-	  Some features including atime, extended attributes, and POSIX ACLs,
-	  are not supported yet.
-
-	  To compile this file system support as a module, choose M here: the
-	  module will be called nilfs2.  If unsure, say N.
+source "fs/nilfs2/Kconfig"
 
 endif # MISC_FILESYSTEMS
 
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index c2d0616..8d25ccb 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -1242,20 +1242,6 @@
 	return error;
 }
 
-/*
- * Initialize a session.
- * Note: save the mount rsize and wsize for create_server negotiation.
- */
-static void nfs4_init_session(struct nfs_client *clp,
-			      unsigned int wsize, unsigned int rsize)
-{
-#if defined(CONFIG_NFS_V4_1)
-	if (nfs4_has_session(clp)) {
-		clp->cl_session->fc_attrs.max_rqst_sz = wsize;
-		clp->cl_session->fc_attrs.max_resp_sz = rsize;
-	}
-#endif /* CONFIG_NFS_V4_1 */
-}
 
 /*
  * Session has been established, and the client marked ready.
@@ -1350,7 +1336,9 @@
 	BUG_ON(!server->nfs_client->rpc_ops);
 	BUG_ON(!server->nfs_client->rpc_ops->file_inode_ops);
 
-	nfs4_init_session(server->nfs_client, server->wsize, server->rsize);
+	error = nfs4_init_session(server);
+	if (error < 0)
+		goto error;
 
 	/* Probe the root fh to retrieve its FSID */
 	error = nfs4_path_walk(server, mntfh, data->nfs_server.export_path);
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 38d42c2..32062c3 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -1025,12 +1025,12 @@
 				res = NULL;
 				goto out;
 			/* This turned out not to be a regular file */
-			case -EISDIR:
 			case -ENOTDIR:
 				goto no_open;
 			case -ELOOP:
 				if (!(nd->intent.open.flags & O_NOFOLLOW))
 					goto no_open;
+			/* case -EISDIR: */
 			/* case -EINVAL: */
 			default:
 				goto out;
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 61bc3a3..6ea07a3 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -220,6 +220,7 @@
 extern struct nfs4_session *nfs4_alloc_session(struct nfs_client *clp);
 extern int nfs4_proc_create_session(struct nfs_client *, int reset);
 extern int nfs4_proc_destroy_session(struct nfs4_session *);
+extern int nfs4_init_session(struct nfs_server *server);
 #else /* CONFIG_NFS_v4_1 */
 static inline int nfs4_setup_sequence(struct nfs_client *clp,
 		struct nfs4_sequence_args *args, struct nfs4_sequence_res *res,
@@ -227,6 +228,11 @@
 {
 	return 0;
 }
+
+static inline int nfs4_init_session(struct nfs_server *server)
+{
+	return 0;
+}
 #endif /* CONFIG_NFS_V4_1 */
 
 extern struct nfs4_state_maintenance_ops *nfs4_state_renewal_ops[];
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index ff0c080..6917311 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -2040,15 +2040,9 @@
 		.rpc_argp = &args,
 		.rpc_resp = &res,
 	};
-	int status;
 
 	nfs_fattr_init(info->fattr);
-	status = nfs4_recover_expired_lease(server);
-	if (!status)
-		status = nfs4_check_client_ready(server->nfs_client);
-	if (!status)
-		status = nfs4_call_sync(server, &msg, &args, &res, 0);
-	return status;
+	return nfs4_call_sync(server, &msg, &args, &res, 0);
 }
 
 static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
@@ -4099,15 +4093,23 @@
 	if (request->fl_start < 0 || request->fl_end < 0)
 		return -EINVAL;
 
-	if (IS_GETLK(cmd))
-		return nfs4_proc_getlk(state, F_GETLK, request);
+	if (IS_GETLK(cmd)) {
+		if (state != NULL)
+			return nfs4_proc_getlk(state, F_GETLK, request);
+		return 0;
+	}
 
 	if (!(IS_SETLK(cmd) || IS_SETLKW(cmd)))
 		return -EINVAL;
 
-	if (request->fl_type == F_UNLCK)
-		return nfs4_proc_unlck(state, cmd, request);
+	if (request->fl_type == F_UNLCK) {
+		if (state != NULL)
+			return nfs4_proc_unlck(state, cmd, request);
+		return 0;
+	}
 
+	if (state == NULL)
+		return -ENOLCK;
 	do {
 		status = nfs4_proc_setlk(state, cmd, request);
 		if ((status != -EAGAIN) || IS_SETLK(cmd))
@@ -4793,6 +4795,22 @@
 	return status;
 }
 
+int nfs4_init_session(struct nfs_server *server)
+{
+	struct nfs_client *clp = server->nfs_client;
+	int ret;
+
+	if (!nfs4_has_session(clp))
+		return 0;
+
+	clp->cl_session->fc_attrs.max_rqst_sz = server->wsize;
+	clp->cl_session->fc_attrs.max_resp_sz = server->rsize;
+	ret = nfs4_recover_expired_lease(server);
+	if (!ret)
+		ret = nfs4_check_client_ready(clp);
+	return ret;
+}
+
 /*
  * Renew the cl_session lease.
  */
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index b73c5a7..65ca8c1 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -553,6 +553,7 @@
 	INIT_LIST_HEAD(&lsp->ls_sequence.list);
 	lsp->ls_seqid.sequence = &lsp->ls_sequence;
 	atomic_set(&lsp->ls_count, 1);
+	lsp->ls_state = state;
 	lsp->ls_owner = fl_owner;
 	spin_lock(&clp->cl_lock);
 	nfs_alloc_unique_id(&clp->cl_lockowner_id, &lsp->ls_id, 1, 64);
@@ -587,7 +588,6 @@
 		if (lsp != NULL)
 			break;
 		if (new != NULL) {
-			new->ls_state = state;
 			list_add(&new->ls_locks, &state->lock_states);
 			set_bit(LK_STATE_IN_USE, &state->flags);
 			lsp = new;
diff --git a/fs/nilfs2/Kconfig b/fs/nilfs2/Kconfig
new file mode 100644
index 0000000..72da095
--- /dev/null
+++ b/fs/nilfs2/Kconfig
@@ -0,0 +1,25 @@
+config NILFS2_FS
+	tristate "NILFS2 file system support (EXPERIMENTAL)"
+	depends on BLOCK && EXPERIMENTAL
+	select CRC32
+	help
+	  NILFS2 is a log-structured file system (LFS) supporting continuous
+	  snapshotting.  In addition to versioning capability of the entire
+	  file system, users can even restore files mistakenly overwritten or
+	  destroyed just a few seconds ago.  Since this file system can keep
+	  consistency like conventional LFS, it achieves quick recovery after
+	  system crashes.
+
+	  NILFS2 creates a number of checkpoints every few seconds or per
+	  synchronous write basis (unless there is no change).  Users can
+	  select significant versions among continuously created checkpoints,
+	  and can change them into snapshots which will be preserved for long
+	  periods until they are changed back to checkpoints.  Each
+	  snapshot is mountable as a read-only file system concurrently with
+	  its writable mount, and this feature is convenient for online backup.
+
+	  Some features including atime, extended attributes, and POSIX ACLs,
+	  are not supported yet.
+
+	  To compile this file system support as a module, choose M here: the
+	  module will be called nilfs2.  If unsure, say N.
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 2721f07..35e7df1 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -14,6 +14,7 @@
 #include <linux/irqflags.h>
 #include <linux/smp.h>
 #include <linux/percpu.h>
+#include <linux/hrtimer.h>
 
 #include <asm/atomic.h>
 #include <asm/ptrace.h>
@@ -64,11 +65,13 @@
  * IRQTF_RUNTHREAD - signals that the interrupt handler thread should run
  * IRQTF_DIED      - handler thread died
  * IRQTF_WARNED    - warning "IRQ_WAKE_THREAD w/o thread_fn" has been printed
+ * IRQTF_AFFINITY  - irq thread is requested to adjust affinity
  */
 enum {
 	IRQTF_RUNTHREAD,
 	IRQTF_DIED,
 	IRQTF_WARNED,
+	IRQTF_AFFINITY,
 };
 
 typedef irqreturn_t (*irq_handler_t)(int, void *);
@@ -517,6 +520,31 @@
 extern void tasklet_init(struct tasklet_struct *t,
 			 void (*func)(unsigned long), unsigned long data);
 
+struct tasklet_hrtimer {
+	struct hrtimer		timer;
+	struct tasklet_struct	tasklet;
+	enum hrtimer_restart	(*function)(struct hrtimer *);
+};
+
+extern void
+tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
+		     enum hrtimer_restart (*function)(struct hrtimer *),
+		     clockid_t which_clock, enum hrtimer_mode mode);
+
+static inline
+int tasklet_hrtimer_start(struct tasklet_hrtimer *ttimer, ktime_t time,
+			  const enum hrtimer_mode mode)
+{
+	return hrtimer_start(&ttimer->timer, time, mode);
+}
+
+static inline
+void tasklet_hrtimer_cancel(struct tasklet_hrtimer *ttimer)
+{
+	hrtimer_cancel(&ttimer->timer);
+	tasklet_kill(&ttimer->tasklet);
+}
+
 /*
  * Autoprobing for irqs:
  *
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h
index 5e970c7..bd15d7a 100644
--- a/include/linux/perf_counter.h
+++ b/include/linux/perf_counter.h
@@ -120,8 +120,9 @@
 	PERF_SAMPLE_ID				= 1U << 6,
 	PERF_SAMPLE_CPU				= 1U << 7,
 	PERF_SAMPLE_PERIOD			= 1U << 8,
+	PERF_SAMPLE_STREAM_ID			= 1U << 9,
 
-	PERF_SAMPLE_MAX = 1U << 9,		/* non-ABI */
+	PERF_SAMPLE_MAX = 1U << 10,		/* non-ABI */
 };
 
 /*
@@ -312,16 +313,7 @@
 	 *	struct perf_event_header	header;
 	 *	u64				time;
 	 *	u64				id;
-	 *	u64				sample_period;
-	 * };
-	 */
-	PERF_EVENT_PERIOD		= 4,
-
-	/*
-	 * struct {
-	 *	struct perf_event_header	header;
-	 *	u64				time;
-	 *	u64				id;
+	 *	u64				stream_id;
 	 * };
 	 */
 	PERF_EVENT_THROTTLE		= 5,
@@ -356,6 +348,7 @@
 	 *	{ u64			time;     } && PERF_SAMPLE_TIME
 	 *	{ u64			addr;     } && PERF_SAMPLE_ADDR
 	 *	{ u64			id;	  } && PERF_SAMPLE_ID
+	 *	{ u64			stream_id;} && PERF_SAMPLE_STREAM_ID
 	 *	{ u32			cpu, res; } && PERF_SAMPLE_CPU
 	 * 	{ u64			period;   } && PERF_SAMPLE_PERIOD
 	 *
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 16a982e..3ab08e4 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -209,7 +209,7 @@
 			((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
 #define task_contributes_to_load(task)	\
 				((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
-				 (task->flags & PF_FROZEN) == 0)
+				 (task->flags & PF_FREEZING) == 0)
 
 #define __set_task_state(tsk, state_value)		\
 	do { (tsk)->state = (state_value); } while (0)
@@ -1680,6 +1680,7 @@
 #define PF_MEMALLOC	0x00000800	/* Allocating memory */
 #define PF_FLUSHER	0x00001000	/* responsible for disk writeback */
 #define PF_USED_MATH	0x00002000	/* if unset the fpu must be initialized before use */
+#define PF_FREEZING	0x00004000	/* freeze in progress. do not account to load */
 #define PF_NOFREEZE	0x00008000	/* this thread should not be frozen */
 #define PF_FROZEN	0x00010000	/* frozen for system suspend */
 #define PF_FSTRANS	0x00020000	/* inside a filesystem transaction */
diff --git a/include/net/sock.h b/include/net/sock.h
index 2c0da92..950409d 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -104,15 +104,15 @@
 
 /**
  *	struct sock_common - minimal network layer representation of sockets
+ *	@skc_node: main hash linkage for various protocol lookup tables
+ *	@skc_nulls_node: main hash linkage for UDP/UDP-Lite protocol
+ *	@skc_refcnt: reference count
+ *	@skc_hash: hash value used with various protocol lookup tables
  *	@skc_family: network address family
  *	@skc_state: Connection state
  *	@skc_reuse: %SO_REUSEADDR setting
  *	@skc_bound_dev_if: bound device index if != 0
- *	@skc_node: main hash linkage for various protocol lookup tables
- *	@skc_nulls_node: main hash linkage for UDP/UDP-Lite protocol
  *	@skc_bind_node: bind hash linkage for various protocol lookup tables
- *	@skc_refcnt: reference count
- *	@skc_hash: hash value used with various protocol lookup tables
  *	@skc_prot: protocol handlers inside a network family
  *	@skc_net: reference to the network namespace of this socket
  *
@@ -120,17 +120,21 @@
  *	for struct sock and struct inet_timewait_sock.
  */
 struct sock_common {
-	unsigned short		skc_family;
-	volatile unsigned char	skc_state;
-	unsigned char		skc_reuse;
-	int			skc_bound_dev_if;
+	/*
+	 * first fields are not copied in sock_copy()
+	 */
 	union {
 		struct hlist_node	skc_node;
 		struct hlist_nulls_node skc_nulls_node;
 	};
-	struct hlist_node	skc_bind_node;
 	atomic_t		skc_refcnt;
+
 	unsigned int		skc_hash;
+	unsigned short		skc_family;
+	volatile unsigned char	skc_state;
+	unsigned char		skc_reuse;
+	int			skc_bound_dev_if;
+	struct hlist_node	skc_bind_node;
 	struct proto		*skc_prot;
 #ifdef CONFIG_NET_NS
 	struct net	 	*skc_net;
@@ -208,15 +212,17 @@
 	 * don't add nothing before this first member (__sk_common) --acme
 	 */
 	struct sock_common	__sk_common;
+#define sk_node			__sk_common.skc_node
+#define sk_nulls_node		__sk_common.skc_nulls_node
+#define sk_refcnt		__sk_common.skc_refcnt
+
+#define sk_copy_start		__sk_common.skc_hash
+#define sk_hash			__sk_common.skc_hash
 #define sk_family		__sk_common.skc_family
 #define sk_state		__sk_common.skc_state
 #define sk_reuse		__sk_common.skc_reuse
 #define sk_bound_dev_if		__sk_common.skc_bound_dev_if
-#define sk_node			__sk_common.skc_node
-#define sk_nulls_node		__sk_common.skc_nulls_node
 #define sk_bind_node		__sk_common.skc_bind_node
-#define sk_refcnt		__sk_common.skc_refcnt
-#define sk_hash			__sk_common.skc_hash
 #define sk_prot			__sk_common.skc_prot
 #define sk_net			__sk_common.skc_net
 	kmemcheck_bitfield_begin(flags);
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 19f4150..88af843 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -1425,6 +1425,11 @@
 #ifdef CONFIG_TCP_MD5SIG
 	struct tcp_md5sig_key	*(*md5_lookup) (struct sock *sk,
 						struct request_sock *req);
+	int			(*calc_md5_hash) (char *location,
+						  struct tcp_md5sig_key *md5,
+						  struct sock *sk,
+						  struct request_sock *req,
+						  struct sk_buff *skb);
 #endif
 };
 
diff --git a/init/Kconfig b/init/Kconfig
index 1ce05a4..cb2c092 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -962,7 +962,7 @@
 
 config EVENT_PROFILE
 	bool "Tracepoint profile sources"
-	depends on PERF_COUNTERS && EVENT_TRACER
+	depends on PERF_COUNTERS && EVENT_TRACING
 	default y
 
 endmenu
diff --git a/kernel/fork.c b/kernel/fork.c
index bd29592..9b42695 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1407,14 +1407,11 @@
 		if (clone_flags & CLONE_VFORK) {
 			p->vfork_done = &vfork;
 			init_completion(&vfork);
-		} else if (!(clone_flags & CLONE_VM)) {
-			/*
-			 * vfork will do an exec which will call
-			 * set_task_comm()
-			 */
-			perf_counter_fork(p);
 		}
 
+		if (!(clone_flags & CLONE_THREAD))
+			perf_counter_fork(p);
+
 		audit_finish_fork(p);
 		tracehook_report_clone(regs, clone_flags, nr, p);
 
diff --git a/kernel/freezer.c b/kernel/freezer.c
index 2f4936c..bd1d42b 100644
--- a/kernel/freezer.c
+++ b/kernel/freezer.c
@@ -44,12 +44,19 @@
 	recalc_sigpending(); /* We sent fake signal, clean it up */
 	spin_unlock_irq(&current->sighand->siglock);
 
+	/* prevent accounting of that task to load */
+	current->flags |= PF_FREEZING;
+
 	for (;;) {
 		set_current_state(TASK_UNINTERRUPTIBLE);
 		if (!frozen(current))
 			break;
 		schedule();
 	}
+
+	/* Remove the accounting blocker */
+	current->flags &= ~PF_FREEZING;
+
 	pr_debug("%s left refrigerator\n", current->comm);
 	__set_current_state(save);
 }
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index 7346825..e70ed55 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -42,8 +42,7 @@
 
 extern int irq_select_affinity_usr(unsigned int irq);
 
-extern void
-irq_set_thread_affinity(struct irq_desc *desc, const struct cpumask *cpumask);
+extern void irq_set_thread_affinity(struct irq_desc *desc);
 
 /*
  * Debugging printout:
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 50da676..f0de36f 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -80,14 +80,22 @@
 	return 1;
 }
 
-void
-irq_set_thread_affinity(struct irq_desc *desc, const struct cpumask *cpumask)
+/**
+ *	irq_set_thread_affinity - Notify irq threads to adjust affinity
+ *	@desc:		irq descriptor which has affitnity changed
+ *
+ *	We just set IRQTF_AFFINITY and delegate the affinity setting
+ *	to the interrupt thread itself. We can not call
+ *	set_cpus_allowed_ptr() here as we hold desc->lock and this
+ *	code can be called from hard interrupt context.
+ */
+void irq_set_thread_affinity(struct irq_desc *desc)
 {
 	struct irqaction *action = desc->action;
 
 	while (action) {
 		if (action->thread)
-			set_cpus_allowed_ptr(action->thread, cpumask);
+			set_bit(IRQTF_AFFINITY, &action->thread_flags);
 		action = action->next;
 	}
 }
@@ -112,7 +120,7 @@
 	if (desc->status & IRQ_MOVE_PCNTXT) {
 		if (!desc->chip->set_affinity(irq, cpumask)) {
 			cpumask_copy(desc->affinity, cpumask);
-			irq_set_thread_affinity(desc, cpumask);
+			irq_set_thread_affinity(desc);
 		}
 	}
 	else {
@@ -122,7 +130,7 @@
 #else
 	if (!desc->chip->set_affinity(irq, cpumask)) {
 		cpumask_copy(desc->affinity, cpumask);
-		irq_set_thread_affinity(desc, cpumask);
+		irq_set_thread_affinity(desc);
 	}
 #endif
 	desc->status |= IRQ_AFFINITY_SET;
@@ -176,7 +184,7 @@
 	spin_lock_irqsave(&desc->lock, flags);
 	ret = setup_affinity(irq, desc);
 	if (!ret)
-		irq_set_thread_affinity(desc, desc->affinity);
+		irq_set_thread_affinity(desc);
 	spin_unlock_irqrestore(&desc->lock, flags);
 
 	return ret;
@@ -444,6 +452,34 @@
 }
 
 /*
+ * Check whether we need to change the affinity of the interrupt thread.
+ */
+static void
+irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
+{
+	cpumask_var_t mask;
+
+	if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
+		return;
+
+	/*
+	 * In case we are out of memory we set IRQTF_AFFINITY again and
+	 * try again next time
+	 */
+	if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
+		set_bit(IRQTF_AFFINITY, &action->thread_flags);
+		return;
+	}
+
+	spin_lock_irq(&desc->lock);
+	cpumask_copy(mask, desc->affinity);
+	spin_unlock_irq(&desc->lock);
+
+	set_cpus_allowed_ptr(current, mask);
+	free_cpumask_var(mask);
+}
+
+/*
  * Interrupt handler thread
  */
 static int irq_thread(void *data)
@@ -458,6 +494,8 @@
 
 	while (!irq_wait_for_interrupt(action)) {
 
+		irq_thread_check_affinity(desc, action);
+
 		atomic_inc(&desc->threads_active);
 
 		spin_lock_irq(&desc->lock);
diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c
index cfe767c..fcb6c96 100644
--- a/kernel/irq/migration.c
+++ b/kernel/irq/migration.c
@@ -45,7 +45,7 @@
 		   < nr_cpu_ids))
 		if (!desc->chip->set_affinity(irq, desc->pending_mask)) {
 			cpumask_copy(desc->affinity, desc->pending_mask);
-			irq_set_thread_affinity(desc, desc->pending_mask);
+			irq_set_thread_affinity(desc);
 		}
 
 	cpumask_clear(desc->pending_mask);
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index a641eb7..9509310 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -146,6 +146,28 @@
 	}
 }
 
+static void unclone_ctx(struct perf_counter_context *ctx)
+{
+	if (ctx->parent_ctx) {
+		put_ctx(ctx->parent_ctx);
+		ctx->parent_ctx = NULL;
+	}
+}
+
+/*
+ * If we inherit counters we want to return the parent counter id
+ * to userspace.
+ */
+static u64 primary_counter_id(struct perf_counter *counter)
+{
+	u64 id = counter->id;
+
+	if (counter->parent)
+		id = counter->parent->id;
+
+	return id;
+}
+
 /*
  * Get the perf_counter_context for a task and lock it.
  * This has to cope with with the fact that until it is locked,
@@ -1288,7 +1310,6 @@
 #define MAX_INTERRUPTS (~0ULL)
 
 static void perf_log_throttle(struct perf_counter *counter, int enable);
-static void perf_log_period(struct perf_counter *counter, u64 period);
 
 static void perf_adjust_period(struct perf_counter *counter, u64 events)
 {
@@ -1307,8 +1328,6 @@
 	if (!sample_period)
 		sample_period = 1;
 
-	perf_log_period(counter, sample_period);
-
 	hwc->sample_period = sample_period;
 }
 
@@ -1463,10 +1482,8 @@
 	/*
 	 * Unclone this context if we enabled any counter.
 	 */
-	if (enabled && ctx->parent_ctx) {
-		put_ctx(ctx->parent_ctx);
-		ctx->parent_ctx = NULL;
-	}
+	if (enabled)
+		unclone_ctx(ctx);
 
 	spin_unlock(&ctx->lock);
 
@@ -1526,7 +1543,6 @@
 
 static struct perf_counter_context *find_get_context(pid_t pid, int cpu)
 {
-	struct perf_counter_context *parent_ctx;
 	struct perf_counter_context *ctx;
 	struct perf_cpu_context *cpuctx;
 	struct task_struct *task;
@@ -1586,11 +1602,7 @@
  retry:
 	ctx = perf_lock_task_context(task, &flags);
 	if (ctx) {
-		parent_ctx = ctx->parent_ctx;
-		if (parent_ctx) {
-			put_ctx(parent_ctx);
-			ctx->parent_ctx = NULL;		/* no longer a clone */
-		}
+		unclone_ctx(ctx);
 		spin_unlock_irqrestore(&ctx->lock, flags);
 	}
 
@@ -1704,7 +1716,7 @@
 		values[n++] = counter->total_time_running +
 			atomic64_read(&counter->child_total_time_running);
 	if (counter->attr.read_format & PERF_FORMAT_ID)
-		values[n++] = counter->id;
+		values[n++] = primary_counter_id(counter);
 	mutex_unlock(&counter->child_mutex);
 
 	if (count < n * sizeof(u64))
@@ -1811,8 +1823,6 @@
 
 		counter->attr.sample_freq = value;
 	} else {
-		perf_log_period(counter, value);
-
 		counter->attr.sample_period = value;
 		counter->hw.sample_period = value;
 	}
@@ -2661,10 +2671,14 @@
 	if (sample_type & PERF_SAMPLE_ID)
 		header.size += sizeof(u64);
 
+	if (sample_type & PERF_SAMPLE_STREAM_ID)
+		header.size += sizeof(u64);
+
 	if (sample_type & PERF_SAMPLE_CPU) {
 		header.size += sizeof(cpu_entry);
 
 		cpu_entry.cpu = raw_smp_processor_id();
+		cpu_entry.reserved = 0;
 	}
 
 	if (sample_type & PERF_SAMPLE_PERIOD)
@@ -2703,7 +2717,13 @@
 	if (sample_type & PERF_SAMPLE_ADDR)
 		perf_output_put(&handle, data->addr);
 
-	if (sample_type & PERF_SAMPLE_ID)
+	if (sample_type & PERF_SAMPLE_ID) {
+		u64 id = primary_counter_id(counter);
+
+		perf_output_put(&handle, id);
+	}
+
+	if (sample_type & PERF_SAMPLE_STREAM_ID)
 		perf_output_put(&handle, counter->id);
 
 	if (sample_type & PERF_SAMPLE_CPU)
@@ -2726,7 +2746,7 @@
 			if (sub != counter)
 				sub->pmu->read(sub);
 
-			group_entry.id = sub->id;
+			group_entry.id = primary_counter_id(sub);
 			group_entry.counter = atomic64_read(&sub->count);
 
 			perf_output_put(&handle, group_entry);
@@ -2786,15 +2806,8 @@
 	}
 
 	if (counter->attr.read_format & PERF_FORMAT_ID) {
-		u64 id;
-
 		event.header.size += sizeof(u64);
-		if (counter->parent)
-			id = counter->parent->id;
-		else
-			id = counter->id;
-
-		event.format[i++] = id;
+		event.format[i++] = primary_counter_id(counter);
 	}
 
 	ret = perf_output_begin(&handle, counter, event.header.size, 0, 0);
@@ -2895,8 +2908,11 @@
 		.event  = {
 			.header = {
 				.type = PERF_EVENT_FORK,
+				.misc = 0,
 				.size = sizeof(fork_event.event),
 			},
+			/* .pid  */
+			/* .ppid */
 		},
 	};
 
@@ -2968,8 +2984,10 @@
 	struct perf_cpu_context *cpuctx;
 	struct perf_counter_context *ctx;
 	unsigned int size;
-	char *comm = comm_event->task->comm;
+	char comm[TASK_COMM_LEN];
 
+	memset(comm, 0, sizeof(comm));
+	strncpy(comm, comm_event->task->comm, sizeof(comm));
 	size = ALIGN(strlen(comm)+1, sizeof(u64));
 
 	comm_event->comm = comm;
@@ -3004,8 +3022,16 @@
 
 	comm_event = (struct perf_comm_event){
 		.task	= task,
+		/* .comm      */
+		/* .comm_size */
 		.event  = {
-			.header = { .type = PERF_EVENT_COMM, },
+			.header = {
+				.type = PERF_EVENT_COMM,
+				.misc = 0,
+				/* .size */
+			},
+			/* .pid */
+			/* .tid */
 		},
 	};
 
@@ -3088,8 +3114,15 @@
 	char *buf = NULL;
 	const char *name;
 
+	memset(tmp, 0, sizeof(tmp));
+
 	if (file) {
-		buf = kzalloc(PATH_MAX, GFP_KERNEL);
+		/*
+		 * d_path works from the end of the buffer backwards, so we
+		 * need to add enough zero bytes after the string to handle
+		 * the 64bit alignment we do later.
+		 */
+		buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
 		if (!buf) {
 			name = strncpy(tmp, "//enomem", sizeof(tmp));
 			goto got_name;
@@ -3100,9 +3133,11 @@
 			goto got_name;
 		}
 	} else {
-		name = arch_vma_name(mmap_event->vma);
-		if (name)
+		if (arch_vma_name(mmap_event->vma)) {
+			name = strncpy(tmp, arch_vma_name(mmap_event->vma),
+				       sizeof(tmp));
 			goto got_name;
+		}
 
 		if (!vma->vm_mm) {
 			name = strncpy(tmp, "[vdso]", sizeof(tmp));
@@ -3147,8 +3182,16 @@
 
 	mmap_event = (struct perf_mmap_event){
 		.vma	= vma,
+		/* .file_name */
+		/* .file_size */
 		.event  = {
-			.header = { .type = PERF_EVENT_MMAP, },
+			.header = {
+				.type = PERF_EVENT_MMAP,
+				.misc = 0,
+				/* .size */
+			},
+			/* .pid */
+			/* .tid */
 			.start  = vma->vm_start,
 			.len    = vma->vm_end - vma->vm_start,
 			.pgoff  = vma->vm_pgoff,
@@ -3159,49 +3202,6 @@
 }
 
 /*
- * Log sample_period changes so that analyzing tools can re-normalize the
- * event flow.
- */
-
-struct freq_event {
-	struct perf_event_header	header;
-	u64				time;
-	u64				id;
-	u64				period;
-};
-
-static void perf_log_period(struct perf_counter *counter, u64 period)
-{
-	struct perf_output_handle handle;
-	struct freq_event event;
-	int ret;
-
-	if (counter->hw.sample_period == period)
-		return;
-
-	if (counter->attr.sample_type & PERF_SAMPLE_PERIOD)
-		return;
-
-	event = (struct freq_event) {
-		.header = {
-			.type = PERF_EVENT_PERIOD,
-			.misc = 0,
-			.size = sizeof(event),
-		},
-		.time = sched_clock(),
-		.id = counter->id,
-		.period = period,
-	};
-
-	ret = perf_output_begin(&handle, counter, sizeof(event), 1, 0);
-	if (ret)
-		return;
-
-	perf_output_put(&handle, event);
-	perf_output_end(&handle);
-}
-
-/*
  * IRQ throttle logging
  */
 
@@ -3214,16 +3214,21 @@
 		struct perf_event_header	header;
 		u64				time;
 		u64				id;
+		u64				stream_id;
 	} throttle_event = {
 		.header = {
-			.type = PERF_EVENT_THROTTLE + 1,
+			.type = PERF_EVENT_THROTTLE,
 			.misc = 0,
 			.size = sizeof(throttle_event),
 		},
-		.time	= sched_clock(),
-		.id	= counter->id,
+		.time		= sched_clock(),
+		.id		= primary_counter_id(counter),
+		.stream_id	= counter->id,
 	};
 
+	if (enable)
+		throttle_event.header.type = PERF_EVENT_UNTHROTTLE;
+
 	ret = perf_output_begin(&handle, counter, sizeof(throttle_event), 1, 0);
 	if (ret)
 		return;
@@ -3671,7 +3676,7 @@
 void perf_tpcounter_event(int event_id)
 {
 	struct perf_sample_data data = {
-		.regs = get_irq_regs();
+		.regs = get_irq_regs(),
 		.addr = 0,
 	};
 
@@ -3687,16 +3692,12 @@
 
 static void tp_perf_counter_destroy(struct perf_counter *counter)
 {
-	ftrace_profile_disable(perf_event_id(&counter->attr));
+	ftrace_profile_disable(counter->attr.config);
 }
 
 static const struct pmu *tp_perf_counter_init(struct perf_counter *counter)
 {
-	int event_id = perf_event_id(&counter->attr);
-	int ret;
-
-	ret = ftrace_profile_enable(event_id);
-	if (ret)
+	if (ftrace_profile_enable(counter->attr.config))
 		return NULL;
 
 	counter->destroy = tp_perf_counter_destroy;
@@ -4255,15 +4256,12 @@
 	 */
 	spin_lock(&child_ctx->lock);
 	child->perf_counter_ctxp = NULL;
-	if (child_ctx->parent_ctx) {
-		/*
-		 * This context is a clone; unclone it so it can't get
-		 * swapped to another process while we're removing all
-		 * the counters from it.
-		 */
-		put_ctx(child_ctx->parent_ctx);
-		child_ctx->parent_ctx = NULL;
-	}
+	/*
+	 * If this context is a clone; unclone it so it can't get
+	 * swapped to another process while we're removing all
+	 * the counters from it.
+	 */
+	unclone_ctx(child_ctx);
 	spin_unlock(&child_ctx->lock);
 	local_irq_restore(flags);
 
diff --git a/kernel/sched.c b/kernel/sched.c
index 98972d3..1b59e26 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -7289,6 +7289,7 @@
 static void calc_global_load_remove(struct rq *rq)
 {
 	atomic_long_sub(rq->calc_load_active, &calc_load_tasks);
+	rq->calc_load_active = 0;
 }
 #endif /* CONFIG_HOTPLUG_CPU */
 
@@ -7515,6 +7516,7 @@
 		task_rq_unlock(rq, &flags);
 		get_task_struct(p);
 		cpu_rq(cpu)->migration_thread = p;
+		rq->calc_load_update = calc_load_update;
 		break;
 
 	case CPU_ONLINE:
@@ -7525,8 +7527,6 @@
 		/* Update our root-domain */
 		rq = cpu_rq(cpu);
 		spin_lock_irqsave(&rq->lock, flags);
-		rq->calc_load_update = calc_load_update;
-		rq->calc_load_active = 0;
 		if (rq->rd) {
 			BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
 
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 7c248dc..9ffb2b2 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -266,6 +266,12 @@
 	return min_vruntime;
 }
 
+static inline int entity_before(struct sched_entity *a,
+				struct sched_entity *b)
+{
+	return (s64)(a->vruntime - b->vruntime) < 0;
+}
+
 static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se)
 {
 	return se->vruntime - cfs_rq->min_vruntime;
@@ -1017,7 +1023,7 @@
 	/*
 	 * Already in the rightmost position?
 	 */
-	if (unlikely(!rightmost || rightmost->vruntime < se->vruntime))
+	if (unlikely(!rightmost || entity_before(rightmost, se)))
 		return;
 
 	/*
@@ -1713,7 +1719,7 @@
 
 	/* 'curr' will be NULL if the child belongs to a different group */
 	if (sysctl_sched_child_runs_first && this_cpu == task_cpu(p) &&
-			curr && curr->vruntime < se->vruntime) {
+			curr && entity_before(curr, se)) {
 		/*
 		 * Upon rescheduling, sched_class::put_prev_task() will place
 		 * 'current' within the tree based on its new key value.
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 3a94905..eb5e131 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -345,7 +345,9 @@
 	softirq_vec[nr].action = action;
 }
 
-/* Tasklets */
+/*
+ * Tasklets
+ */
 struct tasklet_head
 {
 	struct tasklet_struct *head;
@@ -493,6 +495,66 @@
 
 EXPORT_SYMBOL(tasklet_kill);
 
+/*
+ * tasklet_hrtimer
+ */
+
+/*
+ * The trampoline is called when the hrtimer expires. If this is
+ * called from the hrtimer interrupt then we schedule the tasklet as
+ * the timer callback function expects to run in softirq context. If
+ * it's called in softirq context anyway (i.e. high resolution timers
+ * disabled) then the hrtimer callback is called right away.
+ */
+static enum hrtimer_restart __hrtimer_tasklet_trampoline(struct hrtimer *timer)
+{
+	struct tasklet_hrtimer *ttimer =
+		container_of(timer, struct tasklet_hrtimer, timer);
+
+	if (hrtimer_is_hres_active(timer)) {
+		tasklet_hi_schedule(&ttimer->tasklet);
+		return HRTIMER_NORESTART;
+	}
+	return ttimer->function(timer);
+}
+
+/*
+ * Helper function which calls the hrtimer callback from
+ * tasklet/softirq context
+ */
+static void __tasklet_hrtimer_trampoline(unsigned long data)
+{
+	struct tasklet_hrtimer *ttimer = (void *)data;
+	enum hrtimer_restart restart;
+
+	restart = ttimer->function(&ttimer->timer);
+	if (restart != HRTIMER_NORESTART)
+		hrtimer_restart(&ttimer->timer);
+}
+
+/**
+ * tasklet_hrtimer_init - Init a tasklet/hrtimer combo for softirq callbacks
+ * @ttimer:	 tasklet_hrtimer which is initialized
+ * @function:	 hrtimer callback funtion which gets called from softirq context
+ * @which_clock: clock id (CLOCK_MONOTONIC/CLOCK_REALTIME)
+ * @mode:	 hrtimer mode (HRTIMER_MODE_ABS/HRTIMER_MODE_REL)
+ */
+void tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
+			  enum hrtimer_restart (*function)(struct hrtimer *),
+			  clockid_t which_clock, enum hrtimer_mode mode)
+{
+	hrtimer_init(&ttimer->timer, which_clock, mode);
+	ttimer->timer.function = __hrtimer_tasklet_trampoline;
+	tasklet_init(&ttimer->tasklet, __tasklet_hrtimer_trampoline,
+		     (unsigned long)ttimer);
+	ttimer->function = function;
+}
+EXPORT_SYMBOL_GPL(tasklet_hrtimer_init);
+
+/*
+ * Remote softirq bits
+ */
+
 DEFINE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list);
 EXPORT_PER_CPU_SYMBOL(softirq_work_list);
 
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index 592bf58..7466cb8 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -513,7 +513,7 @@
 	 * Check to make sure we don't switch to a non-highres capable
 	 * clocksource if the tick code is in oneshot mode (highres or nohz)
 	 */
-	if (tick_oneshot_mode_active() &&
+	if (tick_oneshot_mode_active() && ovr &&
 	    !(ovr->flags & CLOCK_SOURCE_VALID_FOR_HRES)) {
 		printk(KERN_WARNING "%s clocksource is not HRT compatible. "
 			"Cannot switch while in HRT/NOHZ mode\n", ovr->name);
diff --git a/kernel/timer.c b/kernel/timer.c
index 0b36b9e..a7f07d5 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -714,7 +714,7 @@
 	 * networking code - if the timer is re-modified
 	 * to be the same thing then just return:
 	 */
-	if (timer->expires == expires && timer_pending(timer))
+	if (timer_pending(timer) && timer->expires == expires)
 		return 1;
 
 	return __mod_timer(timer, expires, false, TIMER_NOT_PINNED);
diff --git a/net/can/bcm.c b/net/can/bcm.c
index 95d7f32..72720c7 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -75,6 +75,7 @@
 MODULE_DESCRIPTION("PF_CAN broadcast manager protocol");
 MODULE_LICENSE("Dual BSD/GPL");
 MODULE_AUTHOR("Oliver Hartkopp <oliver.hartkopp@volkswagen.de>");
+MODULE_ALIAS("can-proto-2");
 
 /* easy access to can_frame payload */
 static inline u64 GET_U64(const struct can_frame *cp)
@@ -1469,6 +1470,9 @@
 		bo->ifindex = 0;
 	}
 
+	sock_orphan(sk);
+	sock->sk = NULL;
+
 	release_sock(sk);
 	sock_put(sk);
 
diff --git a/net/can/raw.c b/net/can/raw.c
index 6aa154e..f4cc445 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -62,6 +62,7 @@
 MODULE_DESCRIPTION("PF_CAN raw protocol");
 MODULE_LICENSE("Dual BSD/GPL");
 MODULE_AUTHOR("Urs Thuermann <urs.thuermann@volkswagen.de>");
+MODULE_ALIAS("can-proto-1");
 
 #define MASK_ALL 0
 
@@ -306,6 +307,9 @@
 	ro->bound   = 0;
 	ro->count   = 0;
 
+	sock_orphan(sk);
+	sock->sk = NULL;
+
 	release_sock(sk);
 	sock_put(sk);
 
diff --git a/net/core/sock.c b/net/core/sock.c
index ba5d211..bbb25be 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -631,7 +631,7 @@
 
 	case SO_TIMESTAMPING:
 		if (val & ~SOF_TIMESTAMPING_MASK) {
-			ret = EINVAL;
+			ret = -EINVAL;
 			break;
 		}
 		sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE,
@@ -919,13 +919,19 @@
 			af_family_keys + sk->sk_family);
 }
 
+/*
+ * Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet,
+ * even temporarly, because of RCU lookups. sk_node should also be left as is.
+ */
 static void sock_copy(struct sock *nsk, const struct sock *osk)
 {
 #ifdef CONFIG_SECURITY_NETWORK
 	void *sptr = nsk->sk_security;
 #endif
-
-	memcpy(nsk, osk, osk->sk_prot->obj_size);
+	BUILD_BUG_ON(offsetof(struct sock, sk_copy_start) !=
+		     sizeof(osk->sk_node) + sizeof(osk->sk_refcnt));
+	memcpy(&nsk->sk_copy_start, &osk->sk_copy_start,
+	       osk->sk_prot->obj_size - offsetof(struct sock, sk_copy_start));
 #ifdef CONFIG_SECURITY_NETWORK
 	nsk->sk_security = sptr;
 	security_sk_clone(osk, nsk);
@@ -1140,6 +1146,11 @@
 
 		newsk->sk_err	   = 0;
 		newsk->sk_priority = 0;
+		/*
+		 * Before updating sk_refcnt, we must commit prior changes to memory
+		 * (Documentation/RCU/rculist_nulls.txt for details)
+		 */
+		smp_wmb();
 		atomic_set(&newsk->sk_refcnt, 2);
 
 		/*
@@ -1855,6 +1866,11 @@
 
 	sk->sk_stamp = ktime_set(-1L, 0);
 
+	/*
+	 * Before updating sk_refcnt, we must commit prior changes to memory
+	 * (Documentation/RCU/rculist_nulls.txt for details)
+	 */
+	smp_wmb();
 	atomic_set(&sk->sk_refcnt, 1);
 	atomic_set(&sk->sk_wmem_alloc, 1);
 	atomic_set(&sk->sk_drops, 0);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 5a1ca26..6d88219 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1160,6 +1160,7 @@
 #ifdef CONFIG_TCP_MD5SIG
 static struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
 	.md5_lookup	=	tcp_v4_reqsk_md5_lookup,
+	.calc_md5_hash	=	tcp_v4_md5_hash_skb,
 };
 #endif
 
@@ -1373,7 +1374,7 @@
 		 */
 		char *newkey = kmemdup(key->key, key->keylen, GFP_ATOMIC);
 		if (newkey != NULL)
-			tcp_v4_md5_do_add(newsk, inet_sk(sk)->daddr,
+			tcp_v4_md5_do_add(newsk, newinet->daddr,
 					  newkey, key->keylen);
 		newsk->sk_route_caps &= ~NETIF_F_GSO_MASK;
 	}
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 5bdf08d..bd62712 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2261,7 +2261,7 @@
 #ifdef CONFIG_TCP_MD5SIG
 	/* Okay, we have all we need - do the md5 hash if needed */
 	if (md5) {
-		tp->af_specific->calc_md5_hash(md5_hash_location,
+		tcp_rsk(req)->af_specific->calc_md5_hash(md5_hash_location,
 					       md5, NULL, req, skb);
 	}
 #endif
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 58810c6..d849dd5 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -896,6 +896,7 @@
 #ifdef CONFIG_TCP_MD5SIG
 static struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
 	.md5_lookup	=	tcp_v6_reqsk_md5_lookup,
+	.calc_md5_hash	=	tcp_v6_md5_hash_skb,
 };
 #endif
 
@@ -1441,7 +1442,7 @@
 		 */
 		char *newkey = kmemdup(key->key, key->keylen, GFP_ATOMIC);
 		if (newkey != NULL)
-			tcp_v6_md5_do_add(newsk, &inet6_sk(sk)->daddr,
+			tcp_v6_md5_do_add(newsk, &newnp->daddr,
 					  newkey, key->keylen);
 	}
 #endif
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 7508f11..b5869b9 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -561,23 +561,38 @@
 		}
 	}
 
-	ct = kmem_cache_zalloc(nf_conntrack_cachep, gfp);
+	/*
+	 * Do not use kmem_cache_zalloc(), as this cache uses
+	 * SLAB_DESTROY_BY_RCU.
+	 */
+	ct = kmem_cache_alloc(nf_conntrack_cachep, gfp);
 	if (ct == NULL) {
 		pr_debug("nf_conntrack_alloc: Can't alloc conntrack.\n");
 		atomic_dec(&net->ct.count);
 		return ERR_PTR(-ENOMEM);
 	}
-
+	/*
+	 * Let ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode.next
+	 * and ct->tuplehash[IP_CT_DIR_REPLY].hnnode.next unchanged.
+	 */
+	memset(&ct->tuplehash[IP_CT_DIR_MAX], 0,
+	       sizeof(*ct) - offsetof(struct nf_conn, tuplehash[IP_CT_DIR_MAX]));
 	spin_lock_init(&ct->lock);
-	atomic_set(&ct->ct_general.use, 1);
 	ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig;
+	ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode.pprev = NULL;
 	ct->tuplehash[IP_CT_DIR_REPLY].tuple = *repl;
+	ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev = NULL;
 	/* Don't set timer yet: wait for confirmation */
 	setup_timer(&ct->timeout, death_by_timeout, (unsigned long)ct);
 #ifdef CONFIG_NET_NS
 	ct->ct_net = net;
 #endif
 
+	/*
+	 * changes to lookup keys must be done before setting refcnt to 1
+	 */
+	smp_wmb();
+	atomic_set(&ct->ct_general.use, 1);
 	return ct;
 }
 EXPORT_SYMBOL_GPL(nf_conntrack_alloc);
diff --git a/net/netfilter/xt_osf.c b/net/netfilter/xt_osf.c
index 863e409..0f482e2 100644
--- a/net/netfilter/xt_osf.c
+++ b/net/netfilter/xt_osf.c
@@ -330,7 +330,8 @@
 			fcount++;
 
 			if (info->flags & XT_OSF_LOG)
-				nf_log_packet(p->hooknum, 0, skb, p->in, p->out, NULL,
+				nf_log_packet(p->family, p->hooknum, skb,
+					p->in, p->out, NULL,
 					"%s [%s:%s] : %pi4:%d -> %pi4:%d hops=%d\n",
 					f->genre, f->version, f->subtype,
 					&ip->saddr, ntohs(tcp->source),
@@ -345,7 +346,7 @@
 	rcu_read_unlock();
 
 	if (!fcount && (info->flags & XT_OSF_LOG))
-		nf_log_packet(p->hooknum, 0, skb, p->in, p->out, NULL,
+		nf_log_packet(p->family, p->hooknum, skb, p->in, p->out, NULL,
 			"Remote OS is not known: %pi4:%u -> %pi4:%u\n",
 				&ip->saddr, ntohs(tcp->source),
 				&ip->daddr, ntohs(tcp->dest));
diff --git a/sound/core/seq/Makefile b/sound/core/seq/Makefile
index 1bcb360..941f64a 100644
--- a/sound/core/seq/Makefile
+++ b/sound/core/seq/Makefile
@@ -3,10 +3,6 @@
 # Copyright (c) 1999 by Jaroslav Kysela <perex@perex.cz>
 #
 
-ifeq ($(CONFIG_SND_SEQUENCER_OSS),y)
-  obj-$(CONFIG_SND_SEQUENCER) += oss/
-endif
-
 snd-seq-device-objs := seq_device.o
 snd-seq-objs := seq.o seq_lock.o seq_clientmgr.o seq_memory.o seq_queue.o \
                 seq_fifo.o seq_prioq.o seq_timer.o \
@@ -19,7 +15,8 @@
 
 obj-$(CONFIG_SND_SEQUENCER) += snd-seq.o snd-seq-device.o
 ifeq ($(CONFIG_SND_SEQUENCER_OSS),y)
-obj-$(CONFIG_SND_SEQUENCER) += snd-seq-midi-event.o
+  obj-$(CONFIG_SND_SEQUENCER) += snd-seq-midi-event.o
+  obj-$(CONFIG_SND_SEQUENCER) += oss/
 endif
 obj-$(CONFIG_SND_SEQ_DUMMY) += snd-seq-dummy.o
 
diff --git a/sound/isa/gus/gus_pcm.c b/sound/isa/gus/gus_pcm.c
index edb11ee..2dcf45b 100644
--- a/sound/isa/gus/gus_pcm.c
+++ b/sound/isa/gus/gus_pcm.c
@@ -795,13 +795,13 @@
 		if (!(pcmp->flags & SNDRV_GF1_PCM_PFLG_ACTIVE))
 			continue;
 		/* load real volume - better precision */
-		spin_lock_irqsave(&gus->reg_lock, flags);
+		spin_lock(&gus->reg_lock);
 		snd_gf1_select_voice(gus, pvoice->number);
 		snd_gf1_ctrl_stop(gus, SNDRV_GF1_VB_VOLUME_CONTROL);
 		vol = pvoice == pcmp->pvoices[0] ? gus->gf1.pcm_volume_level_left : gus->gf1.pcm_volume_level_right;
 		snd_gf1_write16(gus, SNDRV_GF1_VW_VOLUME, vol);
 		pcmp->final_volume = 1;
-		spin_unlock_irqrestore(&gus->reg_lock, flags);
+		spin_unlock(&gus->reg_lock);
 	}
 	spin_unlock_irqrestore(&gus->voice_alloc, flags);
 	return change;
diff --git a/sound/pci/ca0106/ca0106_main.c b/sound/pci/ca0106/ca0106_main.c
index f24bf1e..15e4138 100644
--- a/sound/pci/ca0106/ca0106_main.c
+++ b/sound/pci/ca0106/ca0106_main.c
@@ -325,9 +325,9 @@
 	.rate_max =		192000,
 	.channels_min =		2,
 	.channels_max =		2,
-	.buffer_bytes_max =	((65536 - 64) * 8),
+	.buffer_bytes_max =	65536 - 128,
 	.period_bytes_min =	64,
-	.period_bytes_max =	(65536 - 64),
+	.period_bytes_max =	32768 - 64,
 	.periods_min =		2,
 	.periods_max =		2,
 	.fifo_size =		0,
diff --git a/sound/pci/ctxfi/ctdaio.c b/sound/pci/ctxfi/ctdaio.c
index 082e35c..deb6cfa 100644
--- a/sound/pci/ctxfi/ctdaio.c
+++ b/sound/pci/ctxfi/ctdaio.c
@@ -57,9 +57,9 @@
 
 struct daio_rsc_idx idx_20k2[NUM_DAIOTYP] = {
 	[LINEO1] = {.left = 0x40, .right = 0x41},
-	[LINEO2] = {.left = 0x70, .right = 0x71},
+	[LINEO2] = {.left = 0x60, .right = 0x61},
 	[LINEO3] = {.left = 0x50, .right = 0x51},
-	[LINEO4] = {.left = 0x60, .right = 0x61},
+	[LINEO4] = {.left = 0x70, .right = 0x71},
 	[LINEIM] = {.left = 0x45, .right = 0xc5},
 	[SPDIFOO] = {.left = 0x00, .right = 0x01},
 	[SPDIFIO] = {.left = 0x05, .right = 0x85},
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index 26d255d..88480c0 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -332,6 +332,12 @@
 						  AC_VERB_GET_CONNECT_LIST, i);
 		range_val = !!(parm & (1 << (shift-1))); /* ranges */
 		val = parm & mask;
+		if (val == 0) {
+			snd_printk(KERN_WARNING "hda_codec: "
+				   "invalid CONNECT_LIST verb %x[%i]:%x\n",
+				    nid, i, parm);
+			return 0;
+		}
 		parm >>= shift;
 		if (range_val) {
 			/* ranges between the previous and this one */
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index 41b5b3a..da7f9f6 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -2378,6 +2378,7 @@
 	SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0228,
 		      "Dell Vostro 1500", STAC_9205_DELL_M42),
 	/* Gateway */
+	SND_PCI_QUIRK(0x107b, 0x0560, "Gateway T6834c", STAC_9205_EAPD),
 	SND_PCI_QUIRK(0x107b, 0x0565, "Gateway T1616", STAC_9205_EAPD),
 	{} /* terminator */
 };
@@ -5854,6 +5855,8 @@
 };
 
 static struct snd_pci_quirk stac9872_cfg_tbl[] = {
+	SND_PCI_QUIRK_MASK(0x104d, 0xfff0, 0x81e0,
+			   "Sony VAIO F/S", STAC_9872_VAIO),
 	{} /* terminator */
 };
 
@@ -5866,6 +5869,8 @@
 	if (spec == NULL)
 		return -ENOMEM;
 	codec->spec = spec;
+	spec->num_pins = ARRAY_SIZE(stac9872_pin_nids);
+	spec->pin_nids = stac9872_pin_nids;
 
 	spec->board_config = snd_hda_check_board_config(codec, STAC_9872_MODELS,
 							stac9872_models,
@@ -5877,8 +5882,6 @@
 		stac92xx_set_config_regs(codec,
 					 stac9872_brd_tbl[spec->board_config]);
 
-	spec->num_pins = ARRAY_SIZE(stac9872_pin_nids);
-	spec->pin_nids = stac9872_pin_nids;
 	spec->multiout.dac_nids = spec->dac_nids;
 	spec->num_adcs = ARRAY_SIZE(stac9872_adc_nids);
 	spec->adc_nids = stac9872_adc_nids;
diff --git a/tools/perf/Documentation/perf-report.txt b/tools/perf/Documentation/perf-report.txt
index 8aa3f8c..e72e931 100644
--- a/tools/perf/Documentation/perf-report.txt
+++ b/tools/perf/Documentation/perf-report.txt
@@ -24,6 +24,9 @@
 --dsos=::
 	Only consider symbols in these dsos. CSV that understands
 	file://filename entries.
+-n
+--show-nr-samples
+	Show the number of samples for each symbol
 -C::
 --comms=::
 	Only consider symbols in these comms. CSV that understands
@@ -33,6 +36,18 @@
 	Only consider these symbols. CSV that understands
 	file://filename entries.
 
+-w::
+--field-width=::
+	Force each column width to the provided list, for large terminal
+	readability.
+
+-t::
+--field-separator=::
+
+	Use a special separator character and don't pad with spaces, replacing
+	all occurances of this separator in symbol names (and other output)
+	with a '.' character, that thus it's the only non valid separator.
+
 SEE ALSO
 --------
 linkperf:perf-stat[1]
diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index 7822b3d..a5e9b87 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -345,7 +345,7 @@
 BUILTIN_OBJS += builtin-top.o
 
 PERFLIBS = $(LIB_FILE)
-EXTLIBS =
+EXTLIBS = -lbfd
 
 #
 # Platform specific tweaks
diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c
index 5f9eefe..1dba568 100644
--- a/tools/perf/builtin-annotate.c
+++ b/tools/perf/builtin-annotate.c
@@ -74,20 +74,12 @@
 	u32 pid, ppid;
 };
 
-struct period_event {
-	struct perf_event_header header;
-	u64 time;
-	u64 id;
-	u64 sample_period;
-};
-
 typedef union event_union {
 	struct perf_event_header	header;
 	struct ip_event			ip;
 	struct mmap_event		mmap;
 	struct comm_event		comm;
 	struct fork_event		fork;
-	struct period_event		period;
 } event_t;
 
 
@@ -998,19 +990,6 @@
 }
 
 static int
-process_period_event(event_t *event, unsigned long offset, unsigned long head)
-{
-	dprintf("%p [%p]: PERF_EVENT_PERIOD: time:%Ld, id:%Ld: period:%Ld\n",
-		(void *)(offset + head),
-		(void *)(long)(event->header.size),
-		event->period.time,
-		event->period.id,
-		event->period.sample_period);
-
-	return 0;
-}
-
-static int
 process_event(event_t *event, unsigned long offset, unsigned long head)
 {
 	switch (event->header.type) {
@@ -1025,9 +1004,6 @@
 
 	case PERF_EVENT_FORK:
 		return process_fork_event(event, offset, head);
-
-	case PERF_EVENT_PERIOD:
-		return process_period_event(event, offset, head);
 	/*
 	 * We dont process them right now but they are fine:
 	 */
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 4ef78a5..6da0992 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -43,6 +43,7 @@
 static int			verbose				= 0;
 static int			inherit_stat			= 0;
 static int			no_samples			= 0;
+static int			sample_address			= 0;
 
 static long			samples;
 static struct timeval		last_read;
@@ -313,6 +314,10 @@
 		if (*pbf == 'x') { /* vm_exec */
 			char *execname = strchr(bf, '/');
 
+			/* Catch VDSO */
+			if (execname == NULL)
+				execname = strstr(bf, "[vdso]");
+
 			if (execname == NULL)
 				continue;
 
@@ -401,6 +406,9 @@
 	if (inherit_stat)
 		attr->inherit_stat = 1;
 
+	if (sample_address)
+		attr->sample_type	|= PERF_SAMPLE_ADDR;
+
 	if (call_graph)
 		attr->sample_type	|= PERF_SAMPLE_CALLCHAIN;
 
@@ -645,6 +653,8 @@
 		    "be more verbose (show counter open errors, etc)"),
 	OPT_BOOLEAN('s', "stat", &inherit_stat,
 		    "per thread counts"),
+	OPT_BOOLEAN('d', "data", &sample_address,
+		    "Sample addresses"),
 	OPT_BOOLEAN('n', "no-samples", &no_samples,
 		    "don't sample"),
 	OPT_END()
@@ -654,7 +664,8 @@
 {
 	int counter;
 
-	argc = parse_options(argc, argv, options, record_usage, 0);
+	argc = parse_options(argc, argv, options, record_usage,
+		PARSE_OPT_STOP_AT_NON_OPTION);
 	if (!argc && target_pid == -1 && !system_wide)
 		usage_with_options(record_usage, options);
 
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 4e5cc26..b20a4b6 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -33,8 +33,10 @@
 
 static char		default_sort_order[] = "comm,dso";
 static char		*sort_order = default_sort_order;
-static char		*dso_list_str, *comm_list_str, *sym_list_str;
+static char		*dso_list_str, *comm_list_str, *sym_list_str,
+			*col_width_list_str;
 static struct strlist	*dso_list, *comm_list, *sym_list;
+static char		*field_sep;
 
 static int		input;
 static int		show_mask = SHOW_KERNEL | SHOW_USER | SHOW_HV;
@@ -49,6 +51,7 @@
 static int		modules;
 
 static int		full_paths;
+static int		show_nr_samples;
 
 static unsigned long	page_size;
 static unsigned long	mmap_window = 32;
@@ -98,13 +101,6 @@
 	u32 pid, ppid;
 };
 
-struct period_event {
-	struct perf_event_header header;
-	u64 time;
-	u64 id;
-	u64 sample_period;
-};
-
 struct lost_event {
 	struct perf_event_header header;
 	u64 id;
@@ -124,11 +120,37 @@
 	struct mmap_event		mmap;
 	struct comm_event		comm;
 	struct fork_event		fork;
-	struct period_event		period;
 	struct lost_event		lost;
 	struct read_event		read;
 } event_t;
 
+static int repsep_fprintf(FILE *fp, const char *fmt, ...)
+{
+	int n;
+	va_list ap;
+
+	va_start(ap, fmt);
+	if (!field_sep)
+		n = vfprintf(fp, fmt, ap);
+	else {
+		char *bf = NULL;
+		n = vasprintf(&bf, fmt, ap);
+		if (n > 0) {
+			char *sep = bf;
+			while (1) {
+				sep = strchr(sep, *field_sep);
+				if (sep == NULL)
+					break;
+				*sep = '.';
+			}
+		}
+		fputs(bf, fp);
+		free(bf);
+	}
+	va_end(ap);
+	return n;
+}
+
 static LIST_HEAD(dsos);
 static struct dso *kernel_dso;
 static struct dso *vdso;
@@ -360,12 +382,28 @@
 	return self;
 }
 
+static unsigned int dsos__col_width,
+		    comms__col_width,
+		    threads__col_width;
+
 static int thread__set_comm(struct thread *self, const char *comm)
 {
 	if (self->comm)
 		free(self->comm);
 	self->comm = strdup(comm);
-	return self->comm ? 0 : -ENOMEM;
+	if (!self->comm)
+		return -ENOMEM;
+
+	if (!col_width_list_str && !field_sep &&
+	    (!comm_list || strlist__has_entry(comm_list, comm))) {
+		unsigned int slen = strlen(comm);
+		if (slen > comms__col_width) {
+			comms__col_width = slen;
+			threads__col_width = slen + 6;
+		}
+	}
+
+	return 0;
 }
 
 static size_t thread__fprintf(struct thread *self, FILE *fp)
@@ -536,7 +574,9 @@
 
 	int64_t (*cmp)(struct hist_entry *, struct hist_entry *);
 	int64_t (*collapse)(struct hist_entry *, struct hist_entry *);
-	size_t	(*print)(FILE *fp, struct hist_entry *);
+	size_t	(*print)(FILE *fp, struct hist_entry *, unsigned int width);
+	unsigned int *width;
+	bool	elide;
 };
 
 static int64_t cmp_null(void *l, void *r)
@@ -558,15 +598,17 @@
 }
 
 static size_t
-sort__thread_print(FILE *fp, struct hist_entry *self)
+sort__thread_print(FILE *fp, struct hist_entry *self, unsigned int width)
 {
-	return fprintf(fp, "%16s:%5d", self->thread->comm ?: "", self->thread->pid);
+	return repsep_fprintf(fp, "%*s:%5d", width - 6,
+			      self->thread->comm ?: "", self->thread->pid);
 }
 
 static struct sort_entry sort_thread = {
-	.header = "         Command:  Pid",
+	.header = "Command:  Pid",
 	.cmp	= sort__thread_cmp,
 	.print	= sort__thread_print,
+	.width	= &threads__col_width,
 };
 
 /* --sort comm */
@@ -590,16 +632,17 @@
 }
 
 static size_t
-sort__comm_print(FILE *fp, struct hist_entry *self)
+sort__comm_print(FILE *fp, struct hist_entry *self, unsigned int width)
 {
-	return fprintf(fp, "%16s", self->thread->comm);
+	return repsep_fprintf(fp, "%*s", width, self->thread->comm);
 }
 
 static struct sort_entry sort_comm = {
-	.header		= "         Command",
+	.header		= "Command",
 	.cmp		= sort__comm_cmp,
 	.collapse	= sort__comm_collapse,
 	.print		= sort__comm_print,
+	.width		= &comms__col_width,
 };
 
 /* --sort dso */
@@ -617,18 +660,19 @@
 }
 
 static size_t
-sort__dso_print(FILE *fp, struct hist_entry *self)
+sort__dso_print(FILE *fp, struct hist_entry *self, unsigned int width)
 {
 	if (self->dso)
-		return fprintf(fp, "%-25s", self->dso->name);
+		return repsep_fprintf(fp, "%-*s", width, self->dso->name);
 
-	return fprintf(fp, "%016llx         ", (u64)self->ip);
+	return repsep_fprintf(fp, "%*llx", width, (u64)self->ip);
 }
 
 static struct sort_entry sort_dso = {
-	.header = "Shared Object            ",
+	.header = "Shared Object",
 	.cmp	= sort__dso_cmp,
 	.print	= sort__dso_print,
+	.width	= &dsos__col_width,
 };
 
 /* --sort symbol */
@@ -648,22 +692,22 @@
 }
 
 static size_t
-sort__sym_print(FILE *fp, struct hist_entry *self)
+sort__sym_print(FILE *fp, struct hist_entry *self, unsigned int width __used)
 {
 	size_t ret = 0;
 
 	if (verbose)
-		ret += fprintf(fp, "%#018llx  ", (u64)self->ip);
+		ret += repsep_fprintf(fp, "%#018llx  ", (u64)self->ip);
 
+	ret += repsep_fprintf(fp, "[%c] ", self->level);
 	if (self->sym) {
-		ret += fprintf(fp, "[%c] %s",
-			self->dso == kernel_dso ? 'k' :
-			self->dso == hypervisor_dso ? 'h' : '.', self->sym->name);
+		ret += repsep_fprintf(fp, "%s", self->sym->name);
 
 		if (self->sym->module)
-			ret += fprintf(fp, "\t[%s]", self->sym->module->name);
+			ret += repsep_fprintf(fp, "\t[%s]",
+					     self->sym->module->name);
 	} else {
-		ret += fprintf(fp, "%#016llx", (u64)self->ip);
+		ret += repsep_fprintf(fp, "%#016llx", (u64)self->ip);
 	}
 
 	return ret;
@@ -690,19 +734,19 @@
 }
 
 static size_t
-sort__parent_print(FILE *fp, struct hist_entry *self)
+sort__parent_print(FILE *fp, struct hist_entry *self, unsigned int width)
 {
-	size_t ret = 0;
-
-	ret += fprintf(fp, "%-20s", self->parent ? self->parent->name : "[other]");
-
-	return ret;
+	return repsep_fprintf(fp, "%-*s", width,
+			      self->parent ? self->parent->name : "[other]");
 }
 
+static unsigned int parent_symbol__col_width;
+
 static struct sort_entry sort_parent = {
-	.header = "Parent symbol       ",
+	.header = "Parent symbol",
 	.cmp	= sort__parent_cmp,
 	.print	= sort__parent_print,
+	.width	= &parent_symbol__col_width,
 };
 
 static int sort__need_collapse = 0;
@@ -967,17 +1011,25 @@
 		return 0;
 
 	if (total_samples)
-		ret = percent_color_fprintf(fp, "   %6.2f%%",
-				(self->count * 100.0) / total_samples);
+		ret = percent_color_fprintf(fp,
+					    field_sep ? "%.2f" : "   %6.2f%%",
+					(self->count * 100.0) / total_samples);
 	else
-		ret = fprintf(fp, "%12Ld ", self->count);
+		ret = fprintf(fp, field_sep ? "%lld" : "%12lld ", self->count);
+
+	if (show_nr_samples) {
+		if (field_sep)
+			fprintf(fp, "%c%lld", *field_sep, self->count);
+		else
+			fprintf(fp, "%11lld", self->count);
+	}
 
 	list_for_each_entry(se, &hist_entry__sort_list, list) {
-		if (exclude_other && (se == &sort_parent))
+		if (se->elide)
 			continue;
 
-		fprintf(fp, "  ");
-		ret += se->print(fp, self);
+		fprintf(fp, "%s", field_sep ?: "  ");
+		ret += se->print(fp, self, se->width ? *se->width : 0);
 	}
 
 	ret += fprintf(fp, "\n");
@@ -992,6 +1044,18 @@
  *
  */
 
+static void dso__calc_col_width(struct dso *self)
+{
+	if (!col_width_list_str && !field_sep &&
+	    (!dso_list || strlist__has_entry(dso_list, self->name))) {
+		unsigned int slen = strlen(self->name);
+		if (slen > dsos__col_width)
+			dsos__col_width = slen;
+	}
+
+	self->slen_calculated = 1;
+}
+
 static struct symbol *
 resolve_symbol(struct thread *thread, struct map **mapp,
 	       struct dso **dsop, u64 *ipp)
@@ -1011,6 +1075,14 @@
 
 	map = thread__find_map(thread, ip);
 	if (map != NULL) {
+		/*
+		 * We have to do this here as we may have a dso
+		 * with no symbol hit that has a name longer than
+		 * the ones with symbols sampled.
+		 */
+		if (!sort_dso.elide && !map->dso->slen_calculated)
+			dso__calc_col_width(map->dso);
+
 		if (mapp)
 			*mapp = map;
 got_map:
@@ -1282,35 +1354,67 @@
 	struct sort_entry *se;
 	struct rb_node *nd;
 	size_t ret = 0;
+	unsigned int width;
+	char *col_width = col_width_list_str;
 
-	fprintf(fp, "\n");
-	fprintf(fp, "#\n");
-	fprintf(fp, "# (%Ld samples)\n", (u64)total_samples);
+	fprintf(fp, "# Samples: %Ld\n", (u64)total_samples);
 	fprintf(fp, "#\n");
 
 	fprintf(fp, "# Overhead");
+	if (show_nr_samples) {
+		if (field_sep)
+			fprintf(fp, "%cSamples", *field_sep);
+		else
+			fputs("  Samples  ", fp);
+	}
 	list_for_each_entry(se, &hist_entry__sort_list, list) {
-		if (exclude_other && (se == &sort_parent))
+		if (se->elide)
 			continue;
-		fprintf(fp, "  %s", se->header);
+		if (field_sep) {
+			fprintf(fp, "%c%s", *field_sep, se->header);
+			continue;
+		}
+		width = strlen(se->header);
+		if (se->width) {
+			if (col_width_list_str) {
+				if (col_width) {
+					*se->width = atoi(col_width);
+					col_width = strchr(col_width, ',');
+					if (col_width)
+						++col_width;
+				}
+			}
+			width = *se->width = max(*se->width, width);
+		}
+		fprintf(fp, "  %*s", width, se->header);
 	}
 	fprintf(fp, "\n");
 
+	if (field_sep)
+		goto print_entries;
+
 	fprintf(fp, "# ........");
+	if (show_nr_samples)
+		fprintf(fp, " ..........");
 	list_for_each_entry(se, &hist_entry__sort_list, list) {
 		unsigned int i;
 
-		if (exclude_other && (se == &sort_parent))
+		if (se->elide)
 			continue;
 
 		fprintf(fp, "  ");
-		for (i = 0; i < strlen(se->header); i++)
+		if (se->width)
+			width = *se->width;
+		else
+			width = strlen(se->header);
+		for (i = 0; i < width; i++)
 			fprintf(fp, ".");
 	}
 	fprintf(fp, "\n");
 
 	fprintf(fp, "#\n");
 
+print_entries:
 	for (nd = rb_first(&output_hists); nd; nd = rb_next(nd)) {
 		pos = rb_entry(nd, struct hist_entry, rb_node);
 		ret += hist_entry__fprintf(fp, pos, total_samples);
@@ -1524,19 +1628,6 @@
 }
 
 static int
-process_period_event(event_t *event, unsigned long offset, unsigned long head)
-{
-	dprintf("%p [%p]: PERF_EVENT_PERIOD: time:%Ld, id:%Ld: period:%Ld\n",
-		(void *)(offset + head),
-		(void *)(long)(event->header.size),
-		event->period.time,
-		event->period.id,
-		event->period.sample_period);
-
-	return 0;
-}
-
-static int
 process_lost_event(event_t *event, unsigned long offset, unsigned long head)
 {
 	dprintf("%p [%p]: PERF_EVENT_LOST: id:%Ld: lost:%Ld\n",
@@ -1617,9 +1708,6 @@
 	case PERF_EVENT_FORK:
 		return process_fork_event(event, offset, head);
 
-	case PERF_EVENT_PERIOD:
-		return process_period_event(event, offset, head);
-
 	case PERF_EVENT_LOST:
 		return process_lost_event(event, offset, head);
 
@@ -1883,6 +1971,8 @@
 	OPT_STRING('k', "vmlinux", &vmlinux, "file", "vmlinux pathname"),
 	OPT_BOOLEAN('m', "modules", &modules,
 		    "load module symbols - WARNING: use only with -k and LIVE kernel"),
+	OPT_BOOLEAN('n', "show-nr-samples", &show_nr_samples,
+		    "Show a column with the number of samples"),
 	OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
 		   "sort by key(s): pid, comm, dso, symbol, parent"),
 	OPT_BOOLEAN('P', "full-paths", &full_paths,
@@ -1891,15 +1981,21 @@
 		   "regex filter to identify parent, see: '--sort parent'"),
 	OPT_BOOLEAN('x', "exclude-other", &exclude_other,
 		    "Only display entries with parent-match"),
-	OPT_CALLBACK_DEFAULT('c', "callchain", NULL, "output_type,min_percent",
+	OPT_CALLBACK_DEFAULT('g', "call-graph", NULL, "output_type,min_percent",
 		     "Display callchains using output_type and min percent threshold. "
-		     "Default: flat,0", &parse_callchain_opt, callchain_default_opt),
+		     "Default: fractal,0.5", &parse_callchain_opt, callchain_default_opt),
 	OPT_STRING('d', "dsos", &dso_list_str, "dso[,dso...]",
 		   "only consider symbols in these dsos"),
 	OPT_STRING('C', "comms", &comm_list_str, "comm[,comm...]",
 		   "only consider symbols in these comms"),
 	OPT_STRING('S', "symbols", &sym_list_str, "symbol[,symbol...]",
 		   "only consider these symbols"),
+	OPT_STRING('w', "column-widths", &col_width_list_str,
+		   "width[,width...]",
+		   "don't try to adjust column width, use these fixed values"),
+	OPT_STRING('t', "field-separator", &field_sep, "separator",
+		   "separator for columns, no spaces will be added between "
+		   "columns '.' is reserved."),
 	OPT_END()
 };
 
@@ -1919,7 +2015,8 @@
 }
 
 static void setup_list(struct strlist **list, const char *list_str,
-		       const char *list_name)
+		       struct sort_entry *se, const char *list_name,
+		       FILE *fp)
 {
 	if (list_str) {
 		*list = strlist__new(true, list_str);
@@ -1928,6 +2025,11 @@
 				list_name);
 			exit(129);
 		}
+		if (strlist__nr_entries(*list) == 1) {
+			fprintf(fp, "# %s: %s\n", list_name,
+				strlist__entry(*list, 0)->s);
+			se->elide = true;
+		}
 	}
 }
 
@@ -1941,9 +2043,10 @@
 
 	setup_sorting();
 
-	if (parent_pattern != default_parent_pattern)
+	if (parent_pattern != default_parent_pattern) {
 		sort_dimension__add("parent");
-	else
+		sort_parent.elide = 1;
+	} else
 		exclude_other = 0;
 
 	/*
@@ -1952,11 +2055,17 @@
 	if (argc)
 		usage_with_options(report_usage, options);
 
-	setup_list(&dso_list, dso_list_str, "dso");
-	setup_list(&comm_list, comm_list_str, "comm");
-	setup_list(&sym_list, sym_list_str, "symbol");
-
 	setup_pager();
 
+	setup_list(&dso_list, dso_list_str, &sort_dso, "dso", stdout);
+	setup_list(&comm_list, comm_list_str, &sort_comm, "comm", stdout);
+	setup_list(&sym_list, sym_list_str, &sort_sym, "symbol", stdout);
+
+	if (field_sep && *field_sep == '.') {
+		fputs("'.' is the only non valid --field-separator argument\n",
+		      stderr);
+		exit(129);
+	}
+
 	return __cmd_report();
 }
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 27921a8..f9510ee 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -511,7 +511,8 @@
 {
 	int status;
 
-	argc = parse_options(argc, argv, options, stat_usage, 0);
+	argc = parse_options(argc, argv, options, stat_usage,
+		PARSE_OPT_STOP_AT_NON_OPTION);
 	if (!argc)
 		usage_with_options(stat_usage, options);
 	if (run_count <= 0 || run_count > MAX_RUN)
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 95d5c0a..c0a4230 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -58,6 +58,7 @@
 static int			print_entries			= 15;
 
 static int			target_pid			= -1;
+static int			inherit				=  0;
 static int			profile_cpu			= -1;
 static int			nr_cpus				=  0;
 static unsigned int		realtime_prio			=  0;
@@ -549,7 +550,7 @@
 static void start_counter(int i, int counter)
 {
 	struct perf_counter_attr *attr;
-	unsigned int cpu;
+	int cpu;
 
 	cpu = profile_cpu;
 	if (target_pid == -1 && profile_cpu == -1)
@@ -559,6 +560,7 @@
 
 	attr->sample_type	= PERF_SAMPLE_IP | PERF_SAMPLE_TID;
 	attr->freq		= freq;
+	attr->inherit		= (cpu < 0) && inherit;
 
 try_again:
 	fd[i][counter] = sys_perf_counter_open(attr, target_pid, cpu, group_fd, 0);
@@ -685,6 +687,8 @@
 		    "only display functions with more events than this"),
 	OPT_BOOLEAN('g', "group", &group,
 			    "put the counters into a counter group"),
+	OPT_BOOLEAN('i', "inherit", &inherit,
+		    "child tasks inherit counters"),
 	OPT_STRING('s', "sym-filter", &sym_filter, "pattern",
 		    "only display symbols matchig this pattern"),
 	OPT_BOOLEAN('z', "zero", &zero,
diff --git a/tools/perf/perf.c b/tools/perf/perf.c
index c565678..31982ad 100644
--- a/tools/perf/perf.c
+++ b/tools/perf/perf.c
@@ -12,6 +12,8 @@
 #include "util/cache.h"
 #include "util/quote.h"
 #include "util/run-command.h"
+#include "util/parse-events.h"
+#include "util/string.h"
 
 const char perf_usage_string[] =
 	"perf [--version] [--help] COMMAND [ARGS]";
@@ -25,6 +27,8 @@
 	int val;
 };
 
+static char debugfs_mntpt[MAXPATHLEN];
+
 static int pager_command_config(const char *var, const char *value, void *data)
 {
 	struct pager_config *c = data;
@@ -56,6 +60,15 @@
 	}
 }
 
+static void set_debugfs_path(void)
+{
+	char *path;
+
+	path = getenv(PERF_DEBUGFS_ENVIRONMENT);
+	snprintf(debugfs_path, MAXPATHLEN, "%s/%s", path ?: debugfs_mntpt,
+		 "tracing/events");
+}
+
 static int handle_options(const char*** argv, int* argc, int* envchanged)
 {
 	int handled = 0;
@@ -122,6 +135,22 @@
 			setenv(PERF_WORK_TREE_ENVIRONMENT, cmd + 12, 1);
 			if (envchanged)
 				*envchanged = 1;
+		} else if (!strcmp(cmd, "--debugfs-dir")) {
+			if (*argc < 2) {
+				fprintf(stderr, "No directory given for --debugfs-dir.\n");
+				usage(perf_usage_string);
+			}
+			strncpy(debugfs_mntpt, (*argv)[1], MAXPATHLEN);
+			debugfs_mntpt[MAXPATHLEN - 1] = '\0';
+			if (envchanged)
+				*envchanged = 1;
+			(*argv)++;
+			(*argc)--;
+		} else if (!prefixcmp(cmd, "--debugfs-dir=")) {
+			strncpy(debugfs_mntpt, cmd + 14, MAXPATHLEN);
+			debugfs_mntpt[MAXPATHLEN - 1] = '\0';
+			if (envchanged)
+				*envchanged = 1;
 		} else {
 			fprintf(stderr, "Unknown option: %s\n", cmd);
 			usage(perf_usage_string);
@@ -228,6 +257,7 @@
 	if (use_pager == -1 && p->option & USE_PAGER)
 		use_pager = 1;
 	commit_pager_choice();
+	set_debugfs_path();
 
 	status = p->fn(argc, argv, prefix);
 	if (status)
@@ -346,6 +376,49 @@
 	return done_alias;
 }
 
+/* mini /proc/mounts parser: searching for "^blah /mount/point debugfs" */
+static void get_debugfs_mntpt(void)
+{
+	FILE *file;
+	char fs_type[100];
+	char debugfs[MAXPATHLEN];
+
+	/*
+	 * try the standard location
+	 */
+	if (valid_debugfs_mount("/sys/kernel/debug/") == 0) {
+		strcpy(debugfs_mntpt, "/sys/kernel/debug/");
+		return;
+	}
+
+	/*
+	 * try the sane location
+	 */
+	if (valid_debugfs_mount("/debug/") == 0) {
+		strcpy(debugfs_mntpt, "/debug/");
+		return;
+	}
+
+	/*
+	 * give up and parse /proc/mounts
+	 */
+	file = fopen("/proc/mounts", "r");
+	if (file == NULL)
+		return;
+
+	while (fscanf(file, "%*s %"
+		      STR(MAXPATHLEN)
+		      "s %99s %*s %*d %*d\n",
+		      debugfs, fs_type) == 2) {
+		if (strcmp(fs_type, "debugfs") == 0)
+			break;
+	}
+	fclose(file);
+	if (strcmp(fs_type, "debugfs") == 0) {
+		strncpy(debugfs_mntpt, debugfs, MAXPATHLEN);
+		debugfs_mntpt[MAXPATHLEN - 1] = '\0';
+	}
+}
 
 int main(int argc, const char **argv)
 {
@@ -354,7 +427,8 @@
 	cmd = perf_extract_argv0_path(argv[0]);
 	if (!cmd)
 		cmd = "perf-help";
-
+	/* get debugfs mount point from /proc/mounts */
+	get_debugfs_mntpt();
 	/*
 	 * "perf-xxxx" is the same as "perf xxxx", but we obviously:
 	 *
@@ -377,6 +451,7 @@
 	argc--;
 	handle_options(&argv, &argc, NULL);
 	commit_pager_choice();
+	set_debugfs_path();
 	if (argc > 0) {
 		if (!prefixcmp(argv[0], "--"))
 			argv[0] += 2;
diff --git a/tools/perf/perf.h b/tools/perf/perf.h
index 63e67cc..e5148e2 100644
--- a/tools/perf/perf.h
+++ b/tools/perf/perf.h
@@ -1,7 +1,13 @@
 #ifndef _PERF_PERF_H
 #define _PERF_PERF_H
 
-#if defined(__x86_64__) || defined(__i386__)
+#if defined(__i386__)
+#include "../../arch/x86/include/asm/unistd.h"
+#define rmb()		asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
+#define cpu_relax()	asm volatile("rep; nop" ::: "memory");
+#endif
+
+#if defined(__x86_64__)
 #include "../../arch/x86/include/asm/unistd.h"
 #define rmb()		asm volatile("lfence" ::: "memory")
 #define cpu_relax()	asm volatile("rep; nop" ::: "memory");
diff --git a/tools/perf/util/cache.h b/tools/perf/util/cache.h
index 161d5f4..4b50c41 100644
--- a/tools/perf/util/cache.h
+++ b/tools/perf/util/cache.h
@@ -18,6 +18,7 @@
 #define PERFATTRIBUTES_FILE ".perfattributes"
 #define INFOATTRIBUTES_FILE "info/attributes"
 #define ATTRIBUTE_MACRO_PREFIX "[attr]"
+#define PERF_DEBUGFS_ENVIRONMENT "PERF_DEBUGFS_DIR"
 
 typedef int (*config_fn_t)(const char *, const char *, void *);
 extern int perf_default_config(const char *, const char *, void *);
diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h
index b5ef53a..bf28044 100644
--- a/tools/perf/util/header.h
+++ b/tools/perf/util/header.h
@@ -16,7 +16,7 @@
 	int frozen;
 	int attrs, size;
 	struct perf_header_attr **attr;
-	off_t attr_offset;
+	s64 attr_offset;
 	u64 data_offset;
 	u64 data_size;
 };
diff --git a/tools/perf/util/include/linux/kernel.h b/tools/perf/util/include/linux/kernel.h
index 99c1b3d..a6b8739 100644
--- a/tools/perf/util/include/linux/kernel.h
+++ b/tools/perf/util/include/linux/kernel.h
@@ -18,4 +18,12 @@
 	(type *)((char *)__mptr - offsetof(type, member)); })
 #endif
 
+#ifndef max
+#define max(x, y) ({				\
+	typeof(x) _max1 = (x);			\
+	typeof(y) _max2 = (y);			\
+	(void) (&_max1 == &_max2);		\
+	_max1 > _max2 ? _max1 : _max2; })
+#endif
+
 #endif
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 5184959..7bdad8d 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -5,6 +5,7 @@
 #include "parse-events.h"
 #include "exec_cmd.h"
 #include "string.h"
+#include "cache.h"
 
 extern char *strcasestr(const char *haystack, const char *needle);
 
@@ -19,6 +20,8 @@
 	char	*alias;
 };
 
+char debugfs_path[MAXPATHLEN];
+
 #define CHW(x) .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_##x
 #define CSW(x) .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_##x
 
@@ -71,8 +74,8 @@
 #define MAX_ALIASES 8
 
 static char *hw_cache[][MAX_ALIASES] = {
- { "L1-d$",	"l1-d",		"l1d",		"L1-data",		},
- { "L1-i$",	"l1-i",		"l1i",		"L1-instruction",	},
+ { "L1-dcache",	"l1-d",		"l1d",		"L1-data",		},
+ { "L1-icache",	"l1-i",		"l1i",		"L1-instruction",	},
  { "LLC",	"L2"							},
  { "dTLB",	"d-tlb",	"Data-TLB",				},
  { "iTLB",	"i-tlb",	"Instruction-TLB",			},
@@ -110,6 +113,88 @@
  [C(BPU)]	= (CACHE_READ),
 };
 
+#define for_each_subsystem(sys_dir, sys_dirent, sys_next, file, st)	       \
+	while (!readdir_r(sys_dir, &sys_dirent, &sys_next) && sys_next)	       \
+	if (snprintf(file, MAXPATHLEN, "%s/%s", debugfs_path,	       	       \
+			sys_dirent.d_name) &&		       		       \
+	   (!stat(file, &st)) && (S_ISDIR(st.st_mode)) &&		       \
+	   (strcmp(sys_dirent.d_name, ".")) &&				       \
+	   (strcmp(sys_dirent.d_name, "..")))
+
+#define for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next, file, st)    \
+	while (!readdir_r(evt_dir, &evt_dirent, &evt_next) && evt_next)        \
+	if (snprintf(file, MAXPATHLEN, "%s/%s/%s", debugfs_path,	       \
+		     sys_dirent.d_name, evt_dirent.d_name) &&		       \
+	   (!stat(file, &st)) && (S_ISDIR(st.st_mode)) &&		       \
+	   (strcmp(evt_dirent.d_name, ".")) &&				       \
+	   (strcmp(evt_dirent.d_name, "..")))
+
+#define MAX_EVENT_LENGTH 30
+
+int valid_debugfs_mount(const char *debugfs)
+{
+	struct statfs st_fs;
+
+	if (statfs(debugfs, &st_fs) < 0)
+		return -ENOENT;
+	else if (st_fs.f_type != (long) DEBUGFS_MAGIC)
+		return -ENOENT;
+	return 0;
+}
+
+static char *tracepoint_id_to_name(u64 config)
+{
+	static char tracepoint_name[2 * MAX_EVENT_LENGTH];
+	DIR *sys_dir, *evt_dir;
+	struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent;
+	struct stat st;
+	char id_buf[4];
+	int fd;
+	u64 id;
+	char evt_path[MAXPATHLEN];
+
+	if (valid_debugfs_mount(debugfs_path))
+		return "unkown";
+
+	sys_dir = opendir(debugfs_path);
+	if (!sys_dir)
+		goto cleanup;
+
+	for_each_subsystem(sys_dir, sys_dirent, sys_next, evt_path, st) {
+		evt_dir = opendir(evt_path);
+		if (!evt_dir)
+			goto cleanup;
+		for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next,
+								evt_path, st) {
+			snprintf(evt_path, MAXPATHLEN, "%s/%s/%s/id",
+				 debugfs_path, sys_dirent.d_name,
+				 evt_dirent.d_name);
+			fd = open(evt_path, O_RDONLY);
+			if (fd < 0)
+				continue;
+			if (read(fd, id_buf, sizeof(id_buf)) < 0) {
+				close(fd);
+				continue;
+			}
+			close(fd);
+			id = atoll(id_buf);
+			if (id == config) {
+				closedir(evt_dir);
+				closedir(sys_dir);
+				snprintf(tracepoint_name, 2 * MAX_EVENT_LENGTH,
+					"%s:%s", sys_dirent.d_name,
+					evt_dirent.d_name);
+				return tracepoint_name;
+			}
+		}
+		closedir(evt_dir);
+	}
+
+cleanup:
+	closedir(sys_dir);
+	return "unkown";
+}
+
 static int is_cache_op_valid(u8 cache_type, u8 cache_op)
 {
 	if (hw_cache_stat[cache_type] & COP(cache_op))
@@ -177,6 +262,9 @@
 			return sw_event_names[config];
 		return "unknown-software";
 
+	case PERF_TYPE_TRACEPOINT:
+		return tracepoint_id_to_name(config);
+
 	default:
 		break;
 	}
@@ -265,6 +353,53 @@
 	return 1;
 }
 
+static int parse_tracepoint_event(const char **strp,
+				    struct perf_counter_attr *attr)
+{
+	const char *evt_name;
+	char sys_name[MAX_EVENT_LENGTH];
+	char id_buf[4];
+	int fd;
+	unsigned int sys_length, evt_length;
+	u64 id;
+	char evt_path[MAXPATHLEN];
+
+	if (valid_debugfs_mount(debugfs_path))
+		return 0;
+
+	evt_name = strchr(*strp, ':');
+	if (!evt_name)
+		return 0;
+
+	sys_length = evt_name - *strp;
+	if (sys_length >= MAX_EVENT_LENGTH)
+		return 0;
+
+	strncpy(sys_name, *strp, sys_length);
+	sys_name[sys_length] = '\0';
+	evt_name = evt_name + 1;
+	evt_length = strlen(evt_name);
+	if (evt_length >= MAX_EVENT_LENGTH)
+		return 0;
+
+	snprintf(evt_path, MAXPATHLEN, "%s/%s/%s/id", debugfs_path,
+		 sys_name, evt_name);
+	fd = open(evt_path, O_RDONLY);
+	if (fd < 0)
+		return 0;
+
+	if (read(fd, id_buf, sizeof(id_buf)) < 0) {
+		close(fd);
+		return 0;
+	}
+	close(fd);
+	id = atoll(id_buf);
+	attr->config = id;
+	attr->type = PERF_TYPE_TRACEPOINT;
+	*strp = evt_name + evt_length;
+	return 1;
+}
+
 static int check_events(const char *str, unsigned int i)
 {
 	int n;
@@ -374,7 +509,8 @@
  */
 static int parse_event_symbols(const char **str, struct perf_counter_attr *attr)
 {
-	if (!(parse_raw_event(str, attr) ||
+	if (!(parse_tracepoint_event(str, attr) ||
+	      parse_raw_event(str, attr) ||
 	      parse_numeric_event(str, attr) ||
 	      parse_symbolic_event(str, attr) ||
 	      parse_generic_hw_event(str, attr)))
@@ -423,6 +559,42 @@
 };
 
 /*
+ * Print the events from <debugfs_mount_point>/tracing/events
+ */
+
+static void print_tracepoint_events(void)
+{
+	DIR *sys_dir, *evt_dir;
+	struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent;
+	struct stat st;
+	char evt_path[MAXPATHLEN];
+
+	if (valid_debugfs_mount(debugfs_path))
+		return;
+
+	sys_dir = opendir(debugfs_path);
+	if (!sys_dir)
+		goto cleanup;
+
+	for_each_subsystem(sys_dir, sys_dirent, sys_next, evt_path, st) {
+		evt_dir = opendir(evt_path);
+		if (!evt_dir)
+			goto cleanup;
+		for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next,
+								evt_path, st) {
+			snprintf(evt_path, MAXPATHLEN, "%s:%s",
+				 sys_dirent.d_name, evt_dirent.d_name);
+			fprintf(stderr, "  %-40s [%s]\n", evt_path,
+				event_type_descriptors[PERF_TYPE_TRACEPOINT+1]);
+		}
+		closedir(evt_dir);
+	}
+
+cleanup:
+	closedir(sys_dir);
+}
+
+/*
  * Print the help text for the event symbols:
  */
 void print_events(void)
@@ -436,7 +608,7 @@
 
 	for (i = 0; i < ARRAY_SIZE(event_symbols); i++, syms++) {
 		type = syms->type + 1;
-		if (type > ARRAY_SIZE(event_type_descriptors))
+		if (type >= ARRAY_SIZE(event_type_descriptors))
 			type = 0;
 
 		if (type != prev_type)
@@ -472,5 +644,7 @@
 		"rNNN");
 	fprintf(stderr, "\n");
 
+	print_tracepoint_events();
+
 	exit(129);
 }
diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h
index e3d5529..1ea5d09 100644
--- a/tools/perf/util/parse-events.h
+++ b/tools/perf/util/parse-events.h
@@ -3,6 +3,8 @@
  * Parse symbolic events/counts passed in as options:
  */
 
+struct option;
+
 extern int			nr_counters;
 
 extern struct perf_counter_attr attrs[MAX_COUNTERS];
@@ -15,3 +17,6 @@
 
 extern void print_events(void);
 
+extern char debugfs_path[];
+extern int valid_debugfs_mount(const char *debugfs);
+
diff --git a/tools/perf/util/string.h b/tools/perf/util/string.h
index 3dca2f6..bf39dfa 100644
--- a/tools/perf/util/string.h
+++ b/tools/perf/util/string.h
@@ -5,4 +5,7 @@
 
 int hex2u64(const char *ptr, u64 *val);
 
+#define _STR(x) #x
+#define STR(x) _STR(x)
+
 #endif
diff --git a/tools/perf/util/strlist.c b/tools/perf/util/strlist.c
index 025a78e..7ad3817 100644
--- a/tools/perf/util/strlist.c
+++ b/tools/perf/util/strlist.c
@@ -64,6 +64,7 @@
 
 	rb_link_node(&sn->rb_node, parent, p);
 	rb_insert_color(&sn->rb_node, &self->entries);
+	++self->nr_entries;
 
 	return 0;
 }
@@ -155,8 +156,9 @@
 	struct strlist *self = malloc(sizeof(*self));
 
 	if (self != NULL) {
-		self->entries = RB_ROOT;
-		self->dupstr = dupstr;
+		self->entries	 = RB_ROOT;
+		self->dupstr	 = dupstr;
+		self->nr_entries = 0;
 		if (slist && strlist__parse_list(self, slist) != 0)
 			goto out_error;
 	}
@@ -182,3 +184,17 @@
 		free(self);
 	}
 }
+
+struct str_node *strlist__entry(const struct strlist *self, unsigned int idx)
+{
+	struct rb_node *nd;
+
+	for (nd = rb_first(&self->entries); nd; nd = rb_next(nd)) {
+		struct str_node *pos = rb_entry(nd, struct str_node, rb_node);
+
+		if (!idx--)
+			return pos;
+	}
+
+	return NULL;
+}
diff --git a/tools/perf/util/strlist.h b/tools/perf/util/strlist.h
index 2fdcfee..921818e 100644
--- a/tools/perf/util/strlist.h
+++ b/tools/perf/util/strlist.h
@@ -11,7 +11,8 @@
 
 struct strlist {
 	struct rb_root entries;
-	bool dupstr;
+	unsigned int   nr_entries;
+	bool	       dupstr;
 };
 
 struct strlist *strlist__new(bool dupstr, const char *slist);
@@ -21,11 +22,17 @@
 int strlist__load(struct strlist *self, const char *filename);
 int strlist__add(struct strlist *self, const char *str);
 
+struct str_node *strlist__entry(const struct strlist *self, unsigned int idx);
 bool strlist__has_entry(struct strlist *self, const char *entry);
 
 static inline bool strlist__empty(const struct strlist *self)
 {
-	return rb_first(&self->entries) == NULL;
+	return self->nr_entries == 0;
+}
+
+static inline unsigned int strlist__nr_entries(const struct strlist *self)
+{
+	return self->nr_entries;
 }
 
 int strlist__parse_list(struct strlist *self, const char *s);
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 4683b67..2810605 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -6,9 +6,15 @@
 #include <libelf.h>
 #include <gelf.h>
 #include <elf.h>
+#include <bfd.h>
 
 const char *sym_hist_filter;
 
+#ifndef DMGL_PARAMS
+#define DMGL_PARAMS      (1 << 0)       /* Include function args */
+#define DMGL_ANSI        (1 << 1)       /* Include const, volatile, etc */
+#endif
+
 static struct symbol *symbol__new(u64 start, u64 len,
 				  const char *name, unsigned int priv_size,
 				  u64 obj_start, int verbose)
@@ -65,6 +71,7 @@
 		self->syms = RB_ROOT;
 		self->sym_priv_size = sym_priv_size;
 		self->find_symbol = dso__find_symbol;
+		self->slen_calculated = 0;
 	}
 
 	return self;
@@ -373,36 +380,61 @@
 	     idx < nr_entries; \
 	     ++idx, pos = gelf_getrela(reldata, idx, &pos_mem))
 
-static int dso__synthesize_plt_symbols(struct  dso *self, Elf *elf,
-				       GElf_Ehdr *ehdr, Elf_Scn *scn_dynsym,
-				       GElf_Shdr *shdr_dynsym,
-				       size_t dynsym_idx, int verbose)
+/*
+ * We need to check if we have a .dynsym, so that we can handle the
+ * .plt, synthesizing its symbols, that aren't on the symtabs (be it
+ * .dynsym or .symtab).
+ * And always look at the original dso, not at debuginfo packages, that
+ * have the PLT data stripped out (shdr_rel_plt.sh_type == SHT_NOBITS).
+ */
+static int dso__synthesize_plt_symbols(struct  dso *self, int verbose)
 {
 	uint32_t nr_rel_entries, idx;
 	GElf_Sym sym;
 	u64 plt_offset;
 	GElf_Shdr shdr_plt;
 	struct symbol *f;
-	GElf_Shdr shdr_rel_plt;
+	GElf_Shdr shdr_rel_plt, shdr_dynsym;
 	Elf_Data *reldata, *syms, *symstrs;
-	Elf_Scn *scn_plt_rel, *scn_symstrs;
+	Elf_Scn *scn_plt_rel, *scn_symstrs, *scn_dynsym;
+	size_t dynsym_idx;
+	GElf_Ehdr ehdr;
 	char sympltname[1024];
-	int nr = 0, symidx;
+	Elf *elf;
+	int nr = 0, symidx, fd, err = 0;
 
-	scn_plt_rel = elf_section_by_name(elf, ehdr, &shdr_rel_plt,
+	fd = open(self->name, O_RDONLY);
+	if (fd < 0)
+		goto out;
+
+	elf = elf_begin(fd, ELF_C_READ_MMAP, NULL);
+	if (elf == NULL)
+		goto out_close;
+
+	if (gelf_getehdr(elf, &ehdr) == NULL)
+		goto out_elf_end;
+
+	scn_dynsym = elf_section_by_name(elf, &ehdr, &shdr_dynsym,
+					 ".dynsym", &dynsym_idx);
+	if (scn_dynsym == NULL)
+		goto out_elf_end;
+
+	scn_plt_rel = elf_section_by_name(elf, &ehdr, &shdr_rel_plt,
 					  ".rela.plt", NULL);
 	if (scn_plt_rel == NULL) {
-		scn_plt_rel = elf_section_by_name(elf, ehdr, &shdr_rel_plt,
+		scn_plt_rel = elf_section_by_name(elf, &ehdr, &shdr_rel_plt,
 						  ".rel.plt", NULL);
 		if (scn_plt_rel == NULL)
-			return 0;
+			goto out_elf_end;
 	}
 
-	if (shdr_rel_plt.sh_link != dynsym_idx)
-		return 0;
+	err = -1;
 
-	if (elf_section_by_name(elf, ehdr, &shdr_plt, ".plt", NULL) == NULL)
-		return 0;
+	if (shdr_rel_plt.sh_link != dynsym_idx)
+		goto out_elf_end;
+
+	if (elf_section_by_name(elf, &ehdr, &shdr_plt, ".plt", NULL) == NULL)
+		goto out_elf_end;
 
 	/*
 	 * Fetch the relocation section to find the indexes to the GOT
@@ -410,19 +442,19 @@
 	 */
 	reldata = elf_getdata(scn_plt_rel, NULL);
 	if (reldata == NULL)
-		return -1;
+		goto out_elf_end;
 
 	syms = elf_getdata(scn_dynsym, NULL);
 	if (syms == NULL)
-		return -1;
+		goto out_elf_end;
 
-	scn_symstrs = elf_getscn(elf, shdr_dynsym->sh_link);
+	scn_symstrs = elf_getscn(elf, shdr_dynsym.sh_link);
 	if (scn_symstrs == NULL)
-		return -1;
+		goto out_elf_end;
 
 	symstrs = elf_getdata(scn_symstrs, NULL);
 	if (symstrs == NULL)
-		return -1;
+		goto out_elf_end;
 
 	nr_rel_entries = shdr_rel_plt.sh_size / shdr_rel_plt.sh_entsize;
 	plt_offset = shdr_plt.sh_offset;
@@ -441,7 +473,7 @@
 			f = symbol__new(plt_offset, shdr_plt.sh_entsize,
 					sympltname, self->sym_priv_size, 0, verbose);
 			if (!f)
-				return -1;
+				goto out_elf_end;
 
 			dso__insert_symbol(self, f);
 			++nr;
@@ -459,19 +491,25 @@
 			f = symbol__new(plt_offset, shdr_plt.sh_entsize,
 					sympltname, self->sym_priv_size, 0, verbose);
 			if (!f)
-				return -1;
+				goto out_elf_end;
 
 			dso__insert_symbol(self, f);
 			++nr;
 		}
-	} else {
-		/*
-		 * TODO: There are still one more shdr_rel_plt.sh_type
-		 * I have to investigate, but probably should be ignored.
-		 */
 	}
 
-	return nr;
+	err = 0;
+out_elf_end:
+	elf_end(elf);
+out_close:
+	close(fd);
+
+	if (err == 0)
+		return nr;
+out:
+	fprintf(stderr, "%s: problems reading %s PLT info.\n",
+		__func__, self->name);
+	return 0;
 }
 
 static int dso__load_sym(struct dso *self, int fd, const char *name,
@@ -485,10 +523,9 @@
 	GElf_Shdr shdr;
 	Elf_Data *syms;
 	GElf_Sym sym;
-	Elf_Scn *sec, *sec_dynsym, *sec_strndx;
+	Elf_Scn *sec, *sec_strndx;
 	Elf *elf;
-	size_t dynsym_idx;
-	int nr = 0;
+	int nr = 0, kernel = !strcmp("[kernel]", self->name);
 
 	elf = elf_begin(fd, ELF_C_READ_MMAP, NULL);
 	if (elf == NULL) {
@@ -504,32 +541,11 @@
 		goto out_elf_end;
 	}
 
-	/*
-	 * We need to check if we have a .dynsym, so that we can handle the
-	 * .plt, synthesizing its symbols, that aren't on the symtabs (be it
-	 * .dynsym or .symtab)
-	 */
-	sec_dynsym = elf_section_by_name(elf, &ehdr, &shdr,
-					 ".dynsym", &dynsym_idx);
-	if (sec_dynsym != NULL) {
-		nr = dso__synthesize_plt_symbols(self, elf, &ehdr,
-						 sec_dynsym, &shdr,
-						 dynsym_idx, verbose);
-		if (nr < 0)
-			goto out_elf_end;
-	}
-
-	/*
-	 * But if we have a full .symtab (that is a superset of .dynsym) we
-	 * should add the symbols not in the .dynsyn
-	 */
 	sec = elf_section_by_name(elf, &ehdr, &shdr, ".symtab", NULL);
 	if (sec == NULL) {
-		if (sec_dynsym == NULL)
+		sec = elf_section_by_name(elf, &ehdr, &shdr, ".dynsym", NULL);
+		if (sec == NULL)
 			goto out_elf_end;
-
-		sec = sec_dynsym;
-		gelf_getshdr(sec, &shdr);
 	}
 
 	syms = elf_getdata(sec, NULL);
@@ -555,12 +571,17 @@
 	nr_syms = shdr.sh_size / shdr.sh_entsize;
 
 	memset(&sym, 0, sizeof(sym));
-	self->adjust_symbols = (ehdr.e_type == ET_EXEC ||
+	if (!kernel) {
+		self->adjust_symbols = (ehdr.e_type == ET_EXEC ||
 				elf_section_by_name(elf, &ehdr, &shdr,
 						     ".gnu.prelink_undo",
 						     NULL) != NULL);
+	} else self->adjust_symbols = 0;
+
 	elf_symtab__for_each_symbol(syms, nr_syms, index, sym) {
 		struct symbol *f;
+		const char *name;
+		char *demangled;
 		u64 obj_start;
 		struct section *section = NULL;
 		int is_label = elf_sym__is_label(&sym);
@@ -599,10 +620,19 @@
 				goto out_elf_end;
 			}
 		}
+		/*
+		 * We need to figure out if the object was created from C++ sources
+		 * DWARF DW_compile_unit has this, but we don't always have access
+		 * to it...
+		 */
+		name = elf_sym__name(&sym, symstrs);
+		demangled = bfd_demangle(NULL, name, DMGL_PARAMS | DMGL_ANSI);
+		if (demangled != NULL)
+			name = demangled;
 
-		f = symbol__new(sym.st_value, sym.st_size,
-				elf_sym__name(&sym, symstrs),
+		f = symbol__new(sym.st_value, sym.st_size, name,
 				self->sym_priv_size, obj_start, verbose);
+		free(demangled);
 		if (!f)
 			goto out_elf_end;
 
@@ -668,6 +698,11 @@
 	if (!ret)
 		goto more;
 
+	if (ret > 0) {
+		int nr_plt = dso__synthesize_plt_symbols(self, verbose);
+		if (nr_plt > 0)
+			ret += nr_plt;
+	}
 out:
 	free(name);
 	return ret;
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index 7918cff..2f92b21 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -25,6 +25,7 @@
 	struct symbol    *(*find_symbol)(struct dso *, u64 ip);
 	unsigned int	 sym_priv_size;
 	unsigned char	 adjust_symbols;
+	unsigned char	 slen_calculated;
 	char		 name[0];
 };
 
diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h
index b4be607..68fe157 100644
--- a/tools/perf/util/util.h
+++ b/tools/perf/util/util.h
@@ -50,6 +50,7 @@
 #include <unistd.h>
 #include <stdio.h>
 #include <sys/stat.h>
+#include <sys/statfs.h>
 #include <fcntl.h>
 #include <stddef.h>
 #include <stdlib.h>
@@ -80,6 +81,7 @@
 #include <netdb.h>
 #include <pwd.h>
 #include <inttypes.h>
+#include "../../../include/linux/magic.h"
 
 #ifndef NO_ICONV
 #include <iconv.h>