Merge master.kernel.org:/pub/scm/linux/kernel/git/lethal/sh-2.6
* master.kernel.org:/pub/scm/linux/kernel/git/lethal/sh-2.6:
sh: Kill off I/O cruft for R7780RP.
sh: Revert lazy dcache writeback changes.
sh: Enable SM501 support for RTS7751R2D.
sh: Use L1_CACHE_BYTES for .data.cacheline_aligned.
sysctl: Support vdso_enabled sysctl on SH.
sh: Fix kernel thread stack corruption with preempt.
doc: Add SH to vdso and earlyprintk in kernel-parameters.txt
sh: Fix sigmask trampling in signal delivery.
sh: Clear UBC when not in use.
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index 6a451f4..c3b1430 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -304,3 +304,15 @@
Who: Richard Purdie <rpurdie@rpsys.net>
---------------------------
+
+What: Wireless extensions over netlink (CONFIG_NET_WIRELESS_RTNETLINK)
+When: with the merge of wireless-dev, 2.6.22 or later
+Why: The option/code is
+ * not enabled on most kernels
+ * not required by any userspace tools (except an experimental one,
+ and even there only for some parts, others use ioctl)
+ * pointless since wext is no longer evolving and the ioctl
+ interface needs to be kept
+Who: Johannes Berg <johannes@sipsolutions.net>
+
+---------------------------
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
index 72af5de..5484ab5 100644
--- a/Documentation/filesystems/proc.txt
+++ b/Documentation/filesystems/proc.txt
@@ -41,6 +41,7 @@
2.11 /proc/sys/fs/mqueue - POSIX message queues filesystem
2.12 /proc/<pid>/oom_adj - Adjust the oom-killer score
2.13 /proc/<pid>/oom_score - Display current oom-killer score
+ 2.14 /proc/<pid>/io - Display the IO accounting fields
------------------------------------------------------------------------------
Preface
@@ -1990,3 +1991,107 @@
command to write value into these files, thereby changing the default settings
of the kernel.
------------------------------------------------------------------------------
+
+2.14 /proc/<pid>/io - Display the IO accounting fields
+-------------------------------------------------------
+
+This file contains IO statistics for each running process
+
+Example
+-------
+
+test:/tmp # dd if=/dev/zero of=/tmp/test.dat &
+[1] 3828
+
+test:/tmp # cat /proc/3828/io
+rchar: 323934931
+wchar: 323929600
+syscr: 632687
+syscw: 632675
+read_bytes: 0
+write_bytes: 323932160
+cancelled_write_bytes: 0
+
+
+Description
+-----------
+
+rchar
+-----
+
+I/O counter: chars read
+The number of bytes which this task has caused to be read from storage. This
+is simply the sum of bytes which this process passed to read() and pread().
+It includes things like tty IO and it is unaffected by whether or not actual
+physical disk IO was required (the read might have been satisfied from
+pagecache)
+
+
+wchar
+-----
+
+I/O counter: chars written
+The number of bytes which this task has caused, or shall cause to be written
+to disk. Similar caveats apply here as with rchar.
+
+
+syscr
+-----
+
+I/O counter: read syscalls
+Attempt to count the number of read I/O operations, i.e. syscalls like read()
+and pread().
+
+
+syscw
+-----
+
+I/O counter: write syscalls
+Attempt to count the number of write I/O operations, i.e. syscalls like
+write() and pwrite().
+
+
+read_bytes
+----------
+
+I/O counter: bytes read
+Attempt to count the number of bytes which this process really did cause to
+be fetched from the storage layer. Done at the submit_bio() level, so it is
+accurate for block-backed filesystems. <please add status regarding NFS and
+CIFS at a later time>
+
+
+write_bytes
+-----------
+
+I/O counter: bytes written
+Attempt to count the number of bytes which this process caused to be sent to
+the storage layer. This is done at page-dirtying time.
+
+
+cancelled_write_bytes
+---------------------
+
+The big inaccuracy here is truncate. If a process writes 1MB to a file and
+then deletes the file, it will in fact perform no writeout. But it will have
+been accounted as having caused 1MB of write.
+In other words: The number of bytes which this process caused to not happen,
+by truncating pagecache. A task can cause "negative" IO too. If this task
+truncates some dirty pagecache, some IO which another task has been accounted
+for (in it's write_bytes) will not be happening. We _could_ just subtract that
+from the truncating task's write_bytes, but there is information loss in doing
+that.
+
+
+Note
+----
+
+At its current implementation state, this is a bit racy on 32-bit machines: if
+process A reads process B's /proc/pid/io while process B is updating one of
+those 64-bit counters, process A could see an intermediate result.
+
+
+More information about this can be found within the taskstats documentation in
+Documentation/accounting.
+
+------------------------------------------------------------------------------
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 757dd99..9141193 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1686,6 +1686,22 @@
stifb= [HW]
Format: bpp:<bpp1>[:<bpp2>[:<bpp3>...]]
+ sunrpc.pool_mode=
+ [NFS]
+ Control how the NFS server code allocates CPUs to
+ service thread pools. Depending on how many NICs
+ you have and where their interrupts are bound, this
+ option will affect which CPUs will do NFS serving.
+ Note: this parameter cannot be changed while the
+ NFS server is running.
+
+ auto the server chooses an appropriate mode
+ automatically using heuristics
+ global a single global pool contains all CPUs
+ percpu one pool for each CPU
+ pernode one pool for each NUMA node (equivalent
+ to global on non-NUMA machines)
+
swiotlb= [IA-64] Number of I/O TLB slabs
switches= [HW,M68k]
diff --git a/Documentation/sound/alsa/ALSA-Configuration.txt b/Documentation/sound/alsa/ALSA-Configuration.txt
index c30ff1b..db398a64 100644
--- a/Documentation/sound/alsa/ALSA-Configuration.txt
+++ b/Documentation/sound/alsa/ALSA-Configuration.txt
@@ -370,7 +370,9 @@
mpu_port - 0x300,0x310,0x320,0x330 = legacy port,
1 = integrated PCI port,
0 = disable (default)
- fm_port - 0x388 (default), 0 = disable (default)
+ fm_port - 0x388 = legacy port,
+ 1 = integrated PCI port (default),
+ 0 = disable
soft_ac3 - Software-conversion of raw SPDIF packets (model 033 only)
(default = 1)
joystick_port - Joystick port address (0 = disable, 1 = auto-detect)
@@ -895,10 +897,16 @@
can be adjusted. Appearing only when compiled with
$CONFIG_SND_DEBUG=y
- STAC9200/9205/9220/9221/9254
+ STAC9200/9205/9254
+ ref Reference board
+
+ STAC9220/9221
ref Reference board
3stack D945 3stack
5stack D945 5stack + SPDIF
+ macmini Intel Mac Mini
+ macbook Intel Mac Book
+ macbook-pro Intel Mac Book Pro
STAC9202/9250/9251
ref Reference board, base config
diff --git a/MAINTAINERS b/MAINTAINERS
index 1dfba85..9993b90 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2553,16 +2553,8 @@
S: Maintained
PARALLEL PORT SUPPORT
-P: Phil Blundell
-M: philb@gnu.org
-P: Tim Waugh
-M: tim@cyberelk.net
-P: David Campbell
-P: Andrea Arcangeli
-M: andrea@suse.de
L: linux-parport@lists.infradead.org
-W: http://people.redhat.com/twaugh/parport/
-S: Maintained
+S: Orphan
PARIDE DRIVERS FOR PARALLEL PORT IDE DEVICES
P: Tim Waugh
diff --git a/Makefile b/Makefile
index 66ee8b8..6393738 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 2
PATCHLEVEL = 6
SUBLEVEL = 21
-EXTRAVERSION = -rc2
+EXTRAVERSION = -rc3
NAME = Homicidal Dwarf Hamster
# *DOCUMENTATION*
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index ac2ffdc..e7baca2 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -21,6 +21,10 @@
config SYS_SUPPORTS_APM_EMULATION
bool
+config GENERIC_GPIO
+ bool
+ default n
+
config GENERIC_TIME
bool
default n
@@ -163,6 +167,7 @@
config ARCH_AT91
bool "Atmel AT91"
+ select GENERIC_GPIO
help
This enables support for systems based on the Atmel AT91RM9200
and AT91SAM9xxx processors.
@@ -304,6 +309,7 @@
bool "PXA2xx-based"
depends on MMU
select ARCH_MTD_XIP
+ select GENERIC_GPIO
select GENERIC_TIME
help
Support for Intel's PXA2XX processor line.
@@ -325,11 +331,13 @@
select ISA
select ARCH_DISCONTIGMEM_ENABLE
select ARCH_MTD_XIP
+ select GENERIC_GPIO
help
Support for StrongARM 11x0 based boards.
config ARCH_S3C2410
bool "Samsung S3C2410, S3C2412, S3C2413, S3C2440, S3C2442, S3C2443"
+ select GENERIC_GPIO
help
Samsung S3C2410X CPU based systems, such as the Simtec Electronics
BAST (<http://www.simtec.co.uk/products/EB110ITX/>), the IPAQ 1940 or
@@ -354,6 +362,7 @@
config ARCH_OMAP
bool "TI OMAP"
+ select GENERIC_GPIO
help
Support for TI's OMAP platform (OMAP1 and OMAP2).
diff --git a/arch/avr32/Kconfig b/arch/avr32/Kconfig
index bb059a4..ce4013a 100644
--- a/arch/avr32/Kconfig
+++ b/arch/avr32/Kconfig
@@ -22,6 +22,10 @@
config UID16
bool
+config GENERIC_GPIO
+ bool
+ default y
+
config GENERIC_HARDIRQS
bool
default y
diff --git a/arch/i386/Kconfig b/arch/i386/Kconfig
index 2f76725..27e8453 100644
--- a/arch/i386/Kconfig
+++ b/arch/i386/Kconfig
@@ -220,11 +220,11 @@
config VMI
bool "VMI Paravirt-ops support"
- depends on PARAVIRT && !NO_HZ
- default y
+ depends on PARAVIRT
help
- VMI provides a paravirtualized interface to multiple hypervisors
- include VMware ESX server and Xen by connecting to a ROM module
+ VMI provides a paravirtualized interface to the VMware ESX server
+ (it could be used by other hypervisors in theory too, but is not
+ at the moment), by linking the kernel to a GPL-ed ROM module
provided by the hypervisor.
config ACPI_SRAT
@@ -893,7 +893,6 @@
config COMPAT_VDSO
bool "Compat VDSO support"
default y
- depends on !PARAVIRT
help
Map the VDSO to the predictable old-style address too.
---help---
@@ -1287,12 +1286,3 @@
config KTIME_SCALAR
bool
default y
-
-config NO_IDLE_HZ
- bool
- depends on PARAVIRT
- default y
- help
- Switches the regular HZ timer off when the system is going idle.
- This helps a hypervisor detect that the Linux system is idle,
- reducing the overhead of idle systems.
diff --git a/arch/i386/kernel/apic.c b/arch/i386/kernel/apic.c
index 7a2c9cb..2383bcf 100644
--- a/arch/i386/kernel/apic.c
+++ b/arch/i386/kernel/apic.c
@@ -493,8 +493,15 @@
/* No broadcast on UP ! */
if (num_possible_cpus() == 1)
return;
- } else
- lapic_clockevent.features &= ~CLOCK_EVT_FEAT_DUMMY;
+ } else {
+ /*
+ * If nmi_watchdog is set to IO_APIC, we need the
+ * PIT/HPET going. Otherwise register lapic as a dummy
+ * device.
+ */
+ if (nmi_watchdog != NMI_IO_APIC)
+ lapic_clockevent.features &= ~CLOCK_EVT_FEAT_DUMMY;
+ }
/* Setup the lapic or request the broadcast */
setup_APIC_timer();
diff --git a/arch/i386/kernel/hpet.c b/arch/i386/kernel/hpet.c
index e1006b7..f3ab61e 100644
--- a/arch/i386/kernel/hpet.c
+++ b/arch/i386/kernel/hpet.c
@@ -201,12 +201,30 @@
}
/*
+ * Clock source related code
+ */
+static cycle_t read_hpet(void)
+{
+ return (cycle_t)hpet_readl(HPET_COUNTER);
+}
+
+static struct clocksource clocksource_hpet = {
+ .name = "hpet",
+ .rating = 250,
+ .read = read_hpet,
+ .mask = HPET_MASK,
+ .shift = HPET_SHIFT,
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+/*
* Try to setup the HPET timer
*/
int __init hpet_enable(void)
{
unsigned long id;
uint64_t hpet_freq;
+ u64 tmp;
if (!is_hpet_capable())
return 0;
@@ -253,6 +271,25 @@
/* Start the counter */
hpet_start_counter();
+ /* Initialize and register HPET clocksource
+ *
+ * hpet period is in femto seconds per cycle
+ * so we need to convert this to ns/cyc units
+ * aproximated by mult/2^shift
+ *
+ * fsec/cyc * 1nsec/1000000fsec = nsec/cyc = mult/2^shift
+ * fsec/cyc * 1ns/1000000fsec * 2^shift = mult
+ * fsec/cyc * 2^shift * 1nsec/1000000fsec = mult
+ * (fsec/cyc << shift)/1000000 = mult
+ * (hpet_period << shift)/FSEC_PER_NSEC = mult
+ */
+ tmp = (u64)hpet_period << HPET_SHIFT;
+ do_div(tmp, FSEC_PER_NSEC);
+ clocksource_hpet.mult = (u32)tmp;
+
+ clocksource_register(&clocksource_hpet);
+
+
if (id & HPET_ID_LEGSUP) {
hpet_enable_int();
hpet_reserve_platform_timers(id);
@@ -273,49 +310,6 @@
return 0;
}
-/*
- * Clock source related code
- */
-static cycle_t read_hpet(void)
-{
- return (cycle_t)hpet_readl(HPET_COUNTER);
-}
-
-static struct clocksource clocksource_hpet = {
- .name = "hpet",
- .rating = 250,
- .read = read_hpet,
- .mask = HPET_MASK,
- .shift = HPET_SHIFT,
- .flags = CLOCK_SOURCE_IS_CONTINUOUS,
-};
-
-static int __init init_hpet_clocksource(void)
-{
- u64 tmp;
-
- if (!hpet_virt_address)
- return -ENODEV;
-
- /*
- * hpet period is in femto seconds per cycle
- * so we need to convert this to ns/cyc units
- * aproximated by mult/2^shift
- *
- * fsec/cyc * 1nsec/1000000fsec = nsec/cyc = mult/2^shift
- * fsec/cyc * 1ns/1000000fsec * 2^shift = mult
- * fsec/cyc * 2^shift * 1nsec/1000000fsec = mult
- * (fsec/cyc << shift)/1000000 = mult
- * (hpet_period << shift)/FSEC_PER_NSEC = mult
- */
- tmp = (u64)hpet_period << HPET_SHIFT;
- do_div(tmp, FSEC_PER_NSEC);
- clocksource_hpet.mult = (u32)tmp;
-
- return clocksource_register(&clocksource_hpet);
-}
-
-module_init(init_hpet_clocksource);
#ifdef CONFIG_HPET_EMULATE_RTC
diff --git a/arch/i386/kernel/i8253.c b/arch/i386/kernel/i8253.c
index a6bc7bb..5cbb776 100644
--- a/arch/i386/kernel/i8253.c
+++ b/arch/i386/kernel/i8253.c
@@ -195,4 +195,4 @@
clocksource_pit.mult = clocksource_hz2mult(CLOCK_TICK_RATE, 20);
return clocksource_register(&clocksource_pit);
}
-module_init(init_pit_clocksource);
+arch_initcall(init_pit_clocksource);
diff --git a/arch/i386/kernel/paravirt.c b/arch/i386/kernel/paravirt.c
index c156ecf..2ec331e 100644
--- a/arch/i386/kernel/paravirt.c
+++ b/arch/i386/kernel/paravirt.c
@@ -32,6 +32,7 @@
#include <asm/fixmap.h>
#include <asm/apic.h>
#include <asm/tlbflush.h>
+#include <asm/timer.h>
/* nop stub */
static void native_nop(void)
@@ -493,7 +494,7 @@
.memory_setup = machine_specific_memory_setup,
.get_wallclock = native_get_wallclock,
.set_wallclock = native_set_wallclock,
- .time_init = time_init_hook,
+ .time_init = hpet_time_init,
.init_IRQ = native_init_IRQ,
.cpuid = native_cpuid,
@@ -520,6 +521,8 @@
.write_msr = native_write_msr,
.read_tsc = native_read_tsc,
.read_pmc = native_read_pmc,
+ .get_scheduled_cycles = native_read_tsc,
+ .get_cpu_khz = native_calculate_cpu_khz,
.load_tr_desc = native_load_tr_desc,
.set_ldt = native_set_ldt,
.load_gdt = native_load_gdt,
@@ -535,7 +538,6 @@
.set_iopl_mask = native_set_iopl_mask,
.io_delay = native_io_delay,
- .const_udelay = __const_udelay,
#ifdef CONFIG_X86_LOCAL_APIC
.apic_write = native_apic_write,
@@ -550,6 +552,8 @@
.flush_tlb_kernel = native_flush_tlb_global,
.flush_tlb_single = native_flush_tlb_single,
+ .map_pt_hook = (void *)native_nop,
+
.alloc_pt = (void *)native_nop,
.alloc_pd = (void *)native_nop,
.alloc_pd_clone = (void *)native_nop,
diff --git a/arch/i386/kernel/setup.c b/arch/i386/kernel/setup.c
index 122623d..698c24f 100644
--- a/arch/i386/kernel/setup.c
+++ b/arch/i386/kernel/setup.c
@@ -657,5 +657,4 @@
conswitchp = &dummy_con;
#endif
#endif
- tsc_init();
}
diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c
index 48bfcaa..4ff55e6 100644
--- a/arch/i386/kernel/smpboot.c
+++ b/arch/i386/kernel/smpboot.c
@@ -33,11 +33,6 @@
* Dave Jones : Report invalid combinations of Athlon CPUs.
* Rusty Russell : Hacked into shape for new "hotplug" boot process. */
-
-/* SMP boot always wants to use real time delay to allow sufficient time for
- * the APs to come online */
-#define USE_REAL_TIME_DELAY
-
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
@@ -50,6 +45,7 @@
#include <linux/notifier.h>
#include <linux/cpu.h>
#include <linux/percpu.h>
+#include <linux/nmi.h>
#include <linux/delay.h>
#include <linux/mc146818rtc.h>
@@ -1283,8 +1279,9 @@
int __cpuinit __cpu_up(unsigned int cpu)
{
+ unsigned long flags;
#ifdef CONFIG_HOTPLUG_CPU
- int ret=0;
+ int ret = 0;
/*
* We do warm boot only on cpus that had booted earlier
@@ -1302,23 +1299,25 @@
/* In case one didn't come up */
if (!cpu_isset(cpu, cpu_callin_map)) {
printk(KERN_DEBUG "skipping cpu%d, didn't come online\n", cpu);
- local_irq_enable();
return -EIO;
}
- local_irq_enable();
-
per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
/* Unleash the CPU! */
cpu_set(cpu, smp_commenced_mask);
/*
- * Check TSC synchronization with the AP:
+ * Check TSC synchronization with the AP (keep irqs disabled
+ * while doing so):
*/
+ local_irq_save(flags);
check_tsc_sync_source(cpu);
+ local_irq_restore(flags);
- while (!cpu_isset(cpu, cpu_online_map))
+ while (!cpu_isset(cpu, cpu_online_map)) {
cpu_relax();
+ touch_nmi_watchdog();
+ }
#ifdef CONFIG_X86_GENERICARCH
if (num_online_cpus() > 8 && genapic == &apic_default)
diff --git a/arch/i386/kernel/time.c b/arch/i386/kernel/time.c
index a535005..94e5cb0 100644
--- a/arch/i386/kernel/time.c
+++ b/arch/i386/kernel/time.c
@@ -262,14 +262,23 @@
extern void (*late_time_init)(void);
/* Duplicate of time_init() below, with hpet_enable part added */
-static void __init hpet_time_init(void)
+void __init hpet_time_init(void)
{
if (!hpet_enable())
setup_pit_timer();
- do_time_init();
+ time_init_hook();
}
+/*
+ * This is called directly from init code; we must delay timer setup in the
+ * HPET case as we can't make the decision to turn on HPET this early in the
+ * boot process.
+ *
+ * The chosen time_init function will usually be hpet_time_init, above, but
+ * in the case of virtual hardware, an alternative function may be substituted.
+ */
void __init time_init(void)
{
- late_time_init = hpet_time_init;
+ tsc_init();
+ late_time_init = choose_time_init();
}
diff --git a/arch/i386/kernel/tsc.c b/arch/i386/kernel/tsc.c
index 3082a41..602660d 100644
--- a/arch/i386/kernel/tsc.c
+++ b/arch/i386/kernel/tsc.c
@@ -14,6 +14,7 @@
#include <asm/delay.h>
#include <asm/tsc.h>
#include <asm/io.h>
+#include <asm/timer.h>
#include "mach_timer.h"
@@ -23,7 +24,6 @@
* an extra value to store the TSC freq
*/
unsigned int tsc_khz;
-unsigned long long (*custom_sched_clock)(void);
int tsc_disable;
@@ -102,9 +102,6 @@
{
unsigned long long this_offset;
- if (unlikely(custom_sched_clock))
- return (*custom_sched_clock)();
-
/*
* Fall back to jiffies if there's no TSC available:
*/
@@ -113,13 +110,13 @@
return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ);
/* read the Time Stamp Counter: */
- rdtscll(this_offset);
+ get_scheduled_cycles(this_offset);
/* return the value in ns */
return cycles_2_ns(this_offset);
}
-static unsigned long calculate_cpu_khz(void)
+unsigned long native_calculate_cpu_khz(void)
{
unsigned long long start, end;
unsigned long count;
@@ -186,34 +183,6 @@
EXPORT_SYMBOL(recalibrate_cpu_khz);
-void __init tsc_init(void)
-{
- if (!cpu_has_tsc || tsc_disable)
- goto out_no_tsc;
-
- cpu_khz = calculate_cpu_khz();
- tsc_khz = cpu_khz;
-
- if (!cpu_khz)
- goto out_no_tsc;
-
- printk("Detected %lu.%03lu MHz processor.\n",
- (unsigned long)cpu_khz / 1000,
- (unsigned long)cpu_khz % 1000);
-
- set_cyc2ns_scale(cpu_khz);
- use_tsc_delay();
- return;
-
-out_no_tsc:
- /*
- * Set the tsc_disable flag if there's no TSC support, this
- * makes it a fast flag for the kernel to see whether it
- * should be using the TSC.
- */
- tsc_disable = 1;
-}
-
#ifdef CONFIG_CPU_FREQ
/*
@@ -383,28 +352,47 @@
static inline void check_geode_tsc_reliable(void) { }
#endif
-static int __init init_tsc_clocksource(void)
+
+void __init tsc_init(void)
{
+ if (!cpu_has_tsc || tsc_disable)
+ goto out_no_tsc;
- if (cpu_has_tsc && tsc_khz && !tsc_disable) {
- /* check blacklist */
- dmi_check_system(bad_tsc_dmi_table);
+ cpu_khz = calculate_cpu_khz();
+ tsc_khz = cpu_khz;
- unsynchronized_tsc();
- check_geode_tsc_reliable();
- current_tsc_khz = tsc_khz;
- clocksource_tsc.mult = clocksource_khz2mult(current_tsc_khz,
+ if (!cpu_khz)
+ goto out_no_tsc;
+
+ printk("Detected %lu.%03lu MHz processor.\n",
+ (unsigned long)cpu_khz / 1000,
+ (unsigned long)cpu_khz % 1000);
+
+ set_cyc2ns_scale(cpu_khz);
+ use_tsc_delay();
+
+ /* Check and install the TSC clocksource */
+ dmi_check_system(bad_tsc_dmi_table);
+
+ unsynchronized_tsc();
+ check_geode_tsc_reliable();
+ current_tsc_khz = tsc_khz;
+ clocksource_tsc.mult = clocksource_khz2mult(current_tsc_khz,
clocksource_tsc.shift);
- /* lower the rating if we already know its unstable: */
- if (check_tsc_unstable()) {
- clocksource_tsc.rating = 0;
- clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS;
- }
-
- return clocksource_register(&clocksource_tsc);
+ /* lower the rating if we already know its unstable: */
+ if (check_tsc_unstable()) {
+ clocksource_tsc.rating = 0;
+ clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS;
}
+ clocksource_register(&clocksource_tsc);
- return 0;
+ return;
+
+out_no_tsc:
+ /*
+ * Set the tsc_disable flag if there's no TSC support, this
+ * makes it a fast flag for the kernel to see whether it
+ * should be using the TSC.
+ */
+ tsc_disable = 1;
}
-
-module_init(init_tsc_clocksource);
diff --git a/arch/i386/kernel/vmi.c b/arch/i386/kernel/vmi.c
index bb5a7ab..fbf45fa 100644
--- a/arch/i386/kernel/vmi.c
+++ b/arch/i386/kernel/vmi.c
@@ -35,6 +35,7 @@
#include <asm/processor.h>
#include <asm/timer.h>
#include <asm/vmi_time.h>
+#include <asm/kmap_types.h>
/* Convenient for calling VMI functions indirectly in the ROM */
typedef u32 __attribute__((regparm(1))) (VROMFUNC)(void);
@@ -48,12 +49,13 @@
static struct vrom_header *vmi_rom;
static int license_gplok;
-static int disable_nodelay;
static int disable_pge;
static int disable_pse;
static int disable_sep;
static int disable_tsc;
static int disable_mtrr;
+static int disable_noidle;
+static int disable_vmi_timer;
/* Cached VMI operations */
struct {
@@ -255,7 +257,6 @@
}
/* For NO_IDLE_HZ, we stop the clock when halting the kernel */
-#ifdef CONFIG_NO_IDLE_HZ
static fastcall void vmi_safe_halt(void)
{
int idle = vmi_stop_hz_timer();
@@ -266,7 +267,6 @@
local_irq_enable();
}
}
-#endif
#ifdef CONFIG_DEBUG_PAGE_TYPE
@@ -371,6 +371,24 @@
#define vmi_check_page_type(p,t) do { } while (0)
#endif
+static void vmi_map_pt_hook(int type, pte_t *va, u32 pfn)
+{
+ /*
+ * Internally, the VMI ROM must map virtual addresses to physical
+ * addresses for processing MMU updates. By the time MMU updates
+ * are issued, this information is typically already lost.
+ * Fortunately, the VMI provides a cache of mapping slots for active
+ * page tables.
+ *
+ * We use slot zero for the linear mapping of physical memory, and
+ * in HIGHPTE kernels, slot 1 and 2 for KM_PTE0 and KM_PTE1.
+ *
+ * args: SLOT VA COUNT PFN
+ */
+ BUG_ON(type != KM_PTE0 && type != KM_PTE1);
+ vmi_ops.set_linear_mapping((type - KM_PTE0)+1, (u32)va, 1, pfn);
+}
+
static void vmi_allocate_pt(u32 pfn)
{
vmi_set_page_type(pfn, VMI_PAGE_L1);
@@ -508,13 +526,14 @@
#endif
#ifdef CONFIG_SMP
-struct vmi_ap_state ap;
extern void setup_pda(void);
-static void __init /* XXX cpu hotplug */
+static void __devinit
vmi_startup_ipi_hook(int phys_apicid, unsigned long start_eip,
unsigned long start_esp)
{
+ struct vmi_ap_state ap;
+
/* Default everything to zero. This is fine for most GPRs. */
memset(&ap, 0, sizeof(struct vmi_ap_state));
@@ -553,7 +572,7 @@
/* Protected mode, paging, AM, WP, NE, MP. */
ap.cr0 = 0x80050023;
ap.cr4 = mmu_cr4_features;
- vmi_ops.set_initial_ap_state(__pa(&ap), phys_apicid);
+ vmi_ops.set_initial_ap_state((u32)&ap, phys_apicid);
}
#endif
@@ -645,12 +664,12 @@
void vmi_bringup(void)
{
/* We must establish the lowmem mapping for MMU ops to work */
- if (vmi_rom)
+ if (vmi_ops.set_linear_mapping)
vmi_ops.set_linear_mapping(0, __PAGE_OFFSET, max_low_pfn, 0);
}
/*
- * Return a pointer to the VMI function or a NOP stub
+ * Return a pointer to a VMI function or NULL if unimplemented
*/
static void *vmi_get_function(int vmicall)
{
@@ -661,12 +680,13 @@
if (rel->type == VMI_RELOCATION_CALL_REL)
return (void *)rel->eip;
else
- return (void *)vmi_nop;
+ return NULL;
}
/*
* Helper macro for making the VMI paravirt-ops fill code readable.
- * For unimplemented operations, fall back to default.
+ * For unimplemented operations, fall back to default, unless nop
+ * is returned by the ROM.
*/
#define para_fill(opname, vmicall) \
do { \
@@ -675,9 +695,29 @@
if (rel->type != VMI_RELOCATION_NONE) { \
BUG_ON(rel->type != VMI_RELOCATION_CALL_REL); \
paravirt_ops.opname = (void *)rel->eip; \
+ } else if (rel->type == VMI_RELOCATION_NOP) \
+ paravirt_ops.opname = (void *)vmi_nop; \
+} while (0)
+
+/*
+ * Helper macro for making the VMI paravirt-ops fill code readable.
+ * For cached operations which do not match the VMI ROM ABI and must
+ * go through a tranlation stub. Ignore NOPs, since it is not clear
+ * a NOP * VMI function corresponds to a NOP paravirt-op when the
+ * functions are not in 1-1 correspondence.
+ */
+#define para_wrap(opname, wrapper, cache, vmicall) \
+do { \
+ reloc = call_vrom_long_func(vmi_rom, get_reloc, \
+ VMI_CALL_##vmicall); \
+ BUG_ON(rel->type == VMI_RELOCATION_JUMP_REL); \
+ if (rel->type == VMI_RELOCATION_CALL_REL) { \
+ paravirt_ops.opname = wrapper; \
+ vmi_ops.cache = (void *)rel->eip; \
} \
} while (0)
+
/*
* Activate the VMI interface and switch into paravirtualized mode
*/
@@ -714,13 +754,8 @@
* rdpmc is not yet used in Linux
*/
- /* CPUID is special, so very special */
- reloc = call_vrom_long_func(vmi_rom, get_reloc, VMI_CALL_CPUID);
- if (rel->type != VMI_RELOCATION_NONE) {
- BUG_ON(rel->type != VMI_RELOCATION_CALL_REL);
- vmi_ops.cpuid = (void *)rel->eip;
- paravirt_ops.cpuid = vmi_cpuid;
- }
+ /* CPUID is special, so very special it gets wrapped like a present */
+ para_wrap(cpuid, vmi_cpuid, cpuid, CPUID);
para_fill(clts, CLTS);
para_fill(get_debugreg, GetDR);
@@ -737,38 +772,26 @@
para_fill(restore_fl, SetInterruptMask);
para_fill(irq_disable, DisableInterrupts);
para_fill(irq_enable, EnableInterrupts);
+
/* irq_save_disable !!! sheer pain */
patch_offset(&irq_save_disable_callout[IRQ_PATCH_INT_MASK],
(char *)paravirt_ops.save_fl);
patch_offset(&irq_save_disable_callout[IRQ_PATCH_DISABLE],
(char *)paravirt_ops.irq_disable);
-#ifndef CONFIG_NO_IDLE_HZ
- para_fill(safe_halt, Halt);
-#else
- vmi_ops.halt = vmi_get_function(VMI_CALL_Halt);
- paravirt_ops.safe_halt = vmi_safe_halt;
-#endif
+
para_fill(wbinvd, WBINVD);
+ para_fill(read_tsc, RDTSC);
+
+ /* The following we emulate with trap and emulate for now */
/* paravirt_ops.read_msr = vmi_rdmsr */
/* paravirt_ops.write_msr = vmi_wrmsr */
- para_fill(read_tsc, RDTSC);
/* paravirt_ops.rdpmc = vmi_rdpmc */
- /* TR interface doesn't pass TR value */
- reloc = call_vrom_long_func(vmi_rom, get_reloc, VMI_CALL_SetTR);
- if (rel->type != VMI_RELOCATION_NONE) {
- BUG_ON(rel->type != VMI_RELOCATION_CALL_REL);
- vmi_ops.set_tr = (void *)rel->eip;
- paravirt_ops.load_tr_desc = vmi_set_tr;
- }
+ /* TR interface doesn't pass TR value, wrap */
+ para_wrap(load_tr_desc, vmi_set_tr, set_tr, SetTR);
/* LDT is special, too */
- reloc = call_vrom_long_func(vmi_rom, get_reloc, VMI_CALL_SetLDT);
- if (rel->type != VMI_RELOCATION_NONE) {
- BUG_ON(rel->type != VMI_RELOCATION_CALL_REL);
- vmi_ops._set_ldt = (void *)rel->eip;
- paravirt_ops.set_ldt = vmi_set_ldt;
- }
+ para_wrap(set_ldt, vmi_set_ldt, _set_ldt, SetLDT);
para_fill(load_gdt, SetGDT);
para_fill(load_idt, SetIDT);
@@ -779,28 +802,14 @@
para_fill(write_ldt_entry, WriteLDTEntry);
para_fill(write_gdt_entry, WriteGDTEntry);
para_fill(write_idt_entry, WriteIDTEntry);
- reloc = call_vrom_long_func(vmi_rom, get_reloc,
- VMI_CALL_UpdateKernelStack);
- if (rel->type != VMI_RELOCATION_NONE) {
- BUG_ON(rel->type != VMI_RELOCATION_CALL_REL);
- vmi_ops.set_kernel_stack = (void *)rel->eip;
- paravirt_ops.load_esp0 = vmi_load_esp0;
- }
-
+ para_wrap(load_esp0, vmi_load_esp0, set_kernel_stack, UpdateKernelStack);
para_fill(set_iopl_mask, SetIOPLMask);
- paravirt_ops.io_delay = (void *)vmi_nop;
- if (!disable_nodelay) {
- paravirt_ops.const_udelay = (void *)vmi_nop;
- }
-
+ para_fill(io_delay, IODelay);
para_fill(set_lazy_mode, SetLazyMode);
- reloc = call_vrom_long_func(vmi_rom, get_reloc, VMI_CALL_FlushTLB);
- if (rel->type != VMI_RELOCATION_NONE) {
- vmi_ops.flush_tlb = (void *)rel->eip;
- paravirt_ops.flush_tlb_user = vmi_flush_tlb_user;
- paravirt_ops.flush_tlb_kernel = vmi_flush_tlb_kernel;
- }
+ /* user and kernel flush are just handled with different flags to FlushTLB */
+ para_wrap(flush_tlb_user, vmi_flush_tlb_user, flush_tlb, FlushTLB);
+ para_wrap(flush_tlb_kernel, vmi_flush_tlb_kernel, flush_tlb, FlushTLB);
para_fill(flush_tlb_single, InvalPage);
/*
@@ -815,27 +824,40 @@
vmi_ops.set_pte = vmi_get_function(VMI_CALL_SetPxE);
vmi_ops.update_pte = vmi_get_function(VMI_CALL_UpdatePxE);
#endif
- vmi_ops.set_linear_mapping = vmi_get_function(VMI_CALL_SetLinearMapping);
- vmi_ops.allocate_page = vmi_get_function(VMI_CALL_AllocatePage);
- vmi_ops.release_page = vmi_get_function(VMI_CALL_ReleasePage);
- paravirt_ops.alloc_pt = vmi_allocate_pt;
- paravirt_ops.alloc_pd = vmi_allocate_pd;
- paravirt_ops.alloc_pd_clone = vmi_allocate_pd_clone;
- paravirt_ops.release_pt = vmi_release_pt;
- paravirt_ops.release_pd = vmi_release_pd;
- paravirt_ops.set_pte = vmi_set_pte;
- paravirt_ops.set_pte_at = vmi_set_pte_at;
- paravirt_ops.set_pmd = vmi_set_pmd;
- paravirt_ops.pte_update = vmi_update_pte;
- paravirt_ops.pte_update_defer = vmi_update_pte_defer;
+ if (vmi_ops.set_pte) {
+ paravirt_ops.set_pte = vmi_set_pte;
+ paravirt_ops.set_pte_at = vmi_set_pte_at;
+ paravirt_ops.set_pmd = vmi_set_pmd;
#ifdef CONFIG_X86_PAE
- paravirt_ops.set_pte_atomic = vmi_set_pte_atomic;
- paravirt_ops.set_pte_present = vmi_set_pte_present;
- paravirt_ops.set_pud = vmi_set_pud;
- paravirt_ops.pte_clear = vmi_pte_clear;
- paravirt_ops.pmd_clear = vmi_pmd_clear;
+ paravirt_ops.set_pte_atomic = vmi_set_pte_atomic;
+ paravirt_ops.set_pte_present = vmi_set_pte_present;
+ paravirt_ops.set_pud = vmi_set_pud;
+ paravirt_ops.pte_clear = vmi_pte_clear;
+ paravirt_ops.pmd_clear = vmi_pmd_clear;
#endif
+ }
+
+ if (vmi_ops.update_pte) {
+ paravirt_ops.pte_update = vmi_update_pte;
+ paravirt_ops.pte_update_defer = vmi_update_pte_defer;
+ }
+
+ vmi_ops.allocate_page = vmi_get_function(VMI_CALL_AllocatePage);
+ if (vmi_ops.allocate_page) {
+ paravirt_ops.alloc_pt = vmi_allocate_pt;
+ paravirt_ops.alloc_pd = vmi_allocate_pd;
+ paravirt_ops.alloc_pd_clone = vmi_allocate_pd_clone;
+ }
+
+ vmi_ops.release_page = vmi_get_function(VMI_CALL_ReleasePage);
+ if (vmi_ops.release_page) {
+ paravirt_ops.release_pt = vmi_release_pt;
+ paravirt_ops.release_pd = vmi_release_pd;
+ }
+ para_wrap(map_pt_hook, vmi_map_pt_hook, set_linear_mapping,
+ SetLinearMapping);
+
/*
* These MUST always be patched. Don't support indirect jumps
* through these operations, as the VMI interface may use either
@@ -847,21 +869,20 @@
paravirt_ops.iret = (void *)0xbadbab0;
#ifdef CONFIG_SMP
- paravirt_ops.startup_ipi_hook = vmi_startup_ipi_hook;
- vmi_ops.set_initial_ap_state = vmi_get_function(VMI_CALL_SetInitialAPState);
+ para_wrap(startup_ipi_hook, vmi_startup_ipi_hook, set_initial_ap_state, SetInitialAPState);
#endif
#ifdef CONFIG_X86_LOCAL_APIC
- paravirt_ops.apic_read = vmi_get_function(VMI_CALL_APICRead);
- paravirt_ops.apic_write = vmi_get_function(VMI_CALL_APICWrite);
- paravirt_ops.apic_write_atomic = vmi_get_function(VMI_CALL_APICWrite);
+ para_fill(apic_read, APICRead);
+ para_fill(apic_write, APICWrite);
+ para_fill(apic_write_atomic, APICWrite);
#endif
/*
* Check for VMI timer functionality by probing for a cycle frequency method
*/
reloc = call_vrom_long_func(vmi_rom, get_reloc, VMI_CALL_GetCycleFrequency);
- if (rel->type != VMI_RELOCATION_NONE) {
+ if (!disable_vmi_timer && rel->type != VMI_RELOCATION_NONE) {
vmi_timer_ops.get_cycle_frequency = (void *)rel->eip;
vmi_timer_ops.get_cycle_counter =
vmi_get_function(VMI_CALL_GetCycleCounter);
@@ -879,9 +900,22 @@
paravirt_ops.setup_boot_clock = vmi_timer_setup_boot_alarm;
paravirt_ops.setup_secondary_clock = vmi_timer_setup_secondary_alarm;
#endif
- custom_sched_clock = vmi_sched_clock;
+ paravirt_ops.get_scheduled_cycles = vmi_get_sched_cycles;
+ paravirt_ops.get_cpu_khz = vmi_cpu_khz;
+
+ /* We have true wallclock functions; disable CMOS clock sync */
+ no_sync_cmos_clock = 1;
+ } else {
+ disable_noidle = 1;
+ disable_vmi_timer = 1;
}
+ /* No idle HZ mode only works if VMI timer and no idle is enabled */
+ if (disable_noidle || disable_vmi_timer)
+ para_fill(safe_halt, Halt);
+ else
+ para_wrap(safe_halt, vmi_safe_halt, halt, Halt);
+
/*
* Alternative instruction rewriting doesn't happen soon enough
* to convert VMI_IRET to a call instead of a jump; so we have
@@ -914,7 +948,9 @@
local_irq_save(flags);
activate_vmi();
-#ifdef CONFIG_SMP
+
+#ifdef CONFIG_X86_IO_APIC
+ /* This is virtual hardware; timer routing is wired correctly */
no_timer_check = 1;
#endif
local_irq_restore(flags & X86_EFLAGS_IF);
@@ -925,9 +961,7 @@
if (!arg)
return -EINVAL;
- if (!strcmp(arg, "disable_nodelay"))
- disable_nodelay = 1;
- else if (!strcmp(arg, "disable_pge")) {
+ if (!strcmp(arg, "disable_pge")) {
clear_bit(X86_FEATURE_PGE, boot_cpu_data.x86_capability);
disable_pge = 1;
} else if (!strcmp(arg, "disable_pse")) {
@@ -942,7 +976,11 @@
} else if (!strcmp(arg, "disable_mtrr")) {
clear_bit(X86_FEATURE_MTRR, boot_cpu_data.x86_capability);
disable_mtrr = 1;
- }
+ } else if (!strcmp(arg, "disable_timer")) {
+ disable_vmi_timer = 1;
+ disable_noidle = 1;
+ } else if (!strcmp(arg, "disable_noidle"))
+ disable_noidle = 1;
return 0;
}
diff --git a/arch/i386/kernel/vmitime.c b/arch/i386/kernel/vmitime.c
index 76d2adc..9dfb177 100644
--- a/arch/i386/kernel/vmitime.c
+++ b/arch/i386/kernel/vmitime.c
@@ -123,12 +123,10 @@
static irqreturn_t vmi_timer_interrupt(int irq, void *dev_id);
static struct irqaction vmi_timer_irq = {
- vmi_timer_interrupt,
- SA_INTERRUPT,
- CPU_MASK_NONE,
- "VMI-alarm",
- NULL,
- NULL
+ .handler = vmi_timer_interrupt,
+ .flags = IRQF_DISABLED,
+ .mask = CPU_MASK_NONE,
+ .name = "VMI-alarm",
};
/* Alarm rate */
@@ -153,13 +151,6 @@
ts->tv_sec = wallclock;
}
-static void update_xtime_from_wallclock(void)
-{
- struct timespec ts;
- vmi_get_wallclock_ts(&ts);
- do_settimeofday(&ts);
-}
-
unsigned long vmi_get_wallclock(void)
{
struct timespec ts;
@@ -172,11 +163,20 @@
return -1;
}
-unsigned long long vmi_sched_clock(void)
+unsigned long long vmi_get_sched_cycles(void)
{
return read_available_cycles();
}
+unsigned long vmi_cpu_khz(void)
+{
+ unsigned long long khz;
+
+ khz = vmi_timer_ops.get_cycle_frequency();
+ (void)do_div(khz, 1000);
+ return khz;
+}
+
void __init vmi_time_init(void)
{
unsigned long long cycles_per_sec, cycles_per_msec;
@@ -188,25 +188,16 @@
set_intr_gate(LOCAL_TIMER_VECTOR, apic_vmi_timer_interrupt);
#endif
- no_sync_cmos_clock = 1;
-
- vmi_get_wallclock_ts(&xtime);
- set_normalized_timespec(&wall_to_monotonic,
- -xtime.tv_sec, -xtime.tv_nsec);
-
real_cycles_accounted_system = read_real_cycles();
- update_xtime_from_wallclock();
per_cpu(process_times_cycles_accounted_cpu, 0) = read_available_cycles();
cycles_per_sec = vmi_timer_ops.get_cycle_frequency();
-
cycles_per_jiffy = cycles_per_sec;
(void)do_div(cycles_per_jiffy, HZ);
cycles_per_alarm = cycles_per_sec;
(void)do_div(cycles_per_alarm, alarm_hz);
cycles_per_msec = cycles_per_sec;
(void)do_div(cycles_per_msec, 1000);
- cpu_khz = cycles_per_msec;
printk(KERN_WARNING "VMI timer cycles/sec = %llu ; cycles/jiffy = %llu ;"
"cycles/alarm = %llu\n", cycles_per_sec, cycles_per_jiffy,
@@ -250,7 +241,7 @@
/* Initialize the time accounting variables for an AP on an SMP system.
* Also, set the local alarm for the AP. */
-void __init vmi_timer_setup_secondary_alarm(void)
+void __devinit vmi_timer_setup_secondary_alarm(void)
{
int cpu = smp_processor_id();
@@ -276,16 +267,13 @@
cycles_not_accounted = cur_real_cycles - real_cycles_accounted_system;
while (cycles_not_accounted >= cycles_per_jiffy) {
- /* systems wide jiffies and wallclock. */
+ /* systems wide jiffies. */
do_timer(1);
cycles_not_accounted -= cycles_per_jiffy;
real_cycles_accounted_system += cycles_per_jiffy;
}
- if (vmi_timer_ops.wallclock_updated())
- update_xtime_from_wallclock();
-
write_sequnlock(&xtime_lock);
}
@@ -380,7 +368,6 @@
unsigned long seq, next;
unsigned long long real_cycles_expiry;
int cpu = smp_processor_id();
- int idle;
BUG_ON(!irqs_disabled());
if (sysctl_hz_timer != 0)
@@ -388,13 +375,13 @@
cpu_set(cpu, nohz_cpu_mask);
smp_mb();
+
if (rcu_needs_cpu(cpu) || local_softirq_pending() ||
- (next = next_timer_interrupt(), time_before_eq(next, jiffies))) {
+ (next = next_timer_interrupt(),
+ time_before_eq(next, jiffies + HZ/CONFIG_VMI_ALARM_HZ))) {
cpu_clear(cpu, nohz_cpu_mask);
- next = jiffies;
- idle = 0;
- } else
- idle = 1;
+ return 0;
+ }
/* Convert jiffies to the real cycle counter. */
do {
@@ -404,17 +391,13 @@
} while (read_seqretry(&xtime_lock, seq));
/* This cpu is going idle. Disable the periodic alarm. */
- if (idle) {
- vmi_timer_ops.cancel_alarm(VMI_CYCLES_AVAILABLE);
- per_cpu(idle_start_jiffies, cpu) = jiffies;
- }
-
+ vmi_timer_ops.cancel_alarm(VMI_CYCLES_AVAILABLE);
+ per_cpu(idle_start_jiffies, cpu) = jiffies;
/* Set the real time alarm to expire at the next event. */
vmi_timer_ops.set_alarm(
- VMI_ALARM_WIRING | VMI_ALARM_IS_ONESHOT | VMI_CYCLES_REAL,
- real_cycles_expiry, 0);
-
- return idle;
+ VMI_ALARM_WIRING | VMI_ALARM_IS_ONESHOT | VMI_CYCLES_REAL,
+ real_cycles_expiry, 0);
+ return 1;
}
static void vmi_reenable_hz_timer(int cpu)
diff --git a/arch/ia64/ia32/sys_ia32.c b/arch/ia64/ia32/sys_ia32.c
index d430d36..0afb4fe 100644
--- a/arch/ia64/ia32/sys_ia32.c
+++ b/arch/ia64/ia32/sys_ia32.c
@@ -1267,6 +1267,10 @@
struct getdents32_callback buf;
int error;
+ error = -EFAULT;
+ if (!access_ok(VERIFY_WRITE, dirent, count))
+ goto out;
+
error = -EBADF;
file = fget(fd);
if (!file)
@@ -1283,10 +1287,10 @@
error = buf.error;
lastdirent = buf.previous;
if (lastdirent) {
- error = -EINVAL;
if (put_user(file->f_pos, &lastdirent->d_off))
- goto out_putf;
- error = count - buf.count;
+ error = -EFAULT;
+ else
+ error = count - buf.count;
}
out_putf:
diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c
index 772ba6f..4061593 100644
--- a/arch/ia64/kernel/efi.c
+++ b/arch/ia64/kernel/efi.c
@@ -21,6 +21,7 @@
* Skip non-WB memory and ignore empty memory ranges.
*/
#include <linux/module.h>
+#include <linux/bootmem.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/types.h>
@@ -1009,6 +1010,11 @@
} else
ae = efi_md_end(md);
+#ifdef CONFIG_CRASH_DUMP
+ /* saved_max_pfn should ignore max_addr= command line arg */
+ if (saved_max_pfn < (ae >> PAGE_SHIFT))
+ saved_max_pfn = (ae >> PAGE_SHIFT);
+#endif
/* keep within max_addr= and min_addr= command line arg */
as = max(as, min_addr);
ae = min(ae, max_addr);
@@ -1177,3 +1183,33 @@
return ~0UL;
}
#endif
+
+#ifdef CONFIG_PROC_VMCORE
+/* locate the size find a the descriptor at a certain address */
+unsigned long
+vmcore_find_descriptor_size (unsigned long address)
+{
+ void *efi_map_start, *efi_map_end, *p;
+ efi_memory_desc_t *md;
+ u64 efi_desc_size;
+ unsigned long ret = 0;
+
+ efi_map_start = __va(ia64_boot_param->efi_memmap);
+ efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
+ efi_desc_size = ia64_boot_param->efi_memdesc_size;
+
+ for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
+ md = p;
+ if (efi_wb(md) && md->type == EFI_LOADER_DATA
+ && md->phys_addr == address) {
+ ret = efi_md_size(md);
+ break;
+ }
+ }
+
+ if (ret == 0)
+ printk(KERN_WARNING "Cannot locate EFI vmcore descriptor\n");
+
+ return ret;
+}
+#endif
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index 9ddf896..abc7ad0 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -2299,7 +2299,7 @@
* allocate a sampling buffer and remaps it into the user address space of the task
*/
static int
-pfm_smpl_buffer_alloc(struct task_struct *task, pfm_context_t *ctx, unsigned long rsize, void **user_vaddr)
+pfm_smpl_buffer_alloc(struct task_struct *task, struct file *filp, pfm_context_t *ctx, unsigned long rsize, void **user_vaddr)
{
struct mm_struct *mm = task->mm;
struct vm_area_struct *vma = NULL;
@@ -2349,6 +2349,7 @@
* partially initialize the vma for the sampling buffer
*/
vma->vm_mm = mm;
+ vma->vm_file = filp;
vma->vm_flags = VM_READ| VM_MAYREAD |VM_RESERVED;
vma->vm_page_prot = PAGE_READONLY; /* XXX may need to change */
@@ -2387,6 +2388,8 @@
goto error;
}
+ get_file(filp);
+
/*
* now insert the vma in the vm list for the process, must be
* done with mmap lock held
@@ -2464,7 +2467,7 @@
}
static int
-pfm_setup_buffer_fmt(struct task_struct *task, pfm_context_t *ctx, unsigned int ctx_flags,
+pfm_setup_buffer_fmt(struct task_struct *task, struct file *filp, pfm_context_t *ctx, unsigned int ctx_flags,
unsigned int cpu, pfarg_context_t *arg)
{
pfm_buffer_fmt_t *fmt = NULL;
@@ -2505,7 +2508,7 @@
/*
* buffer is always remapped into the caller's address space
*/
- ret = pfm_smpl_buffer_alloc(current, ctx, size, &uaddr);
+ ret = pfm_smpl_buffer_alloc(current, filp, ctx, size, &uaddr);
if (ret) goto error;
/* keep track of user address of buffer */
@@ -2716,7 +2719,7 @@
* does the user want to sample?
*/
if (pfm_uuid_cmp(req->ctx_smpl_buf_id, pfm_null_uuid)) {
- ret = pfm_setup_buffer_fmt(current, ctx, ctx_flags, 0, req);
+ ret = pfm_setup_buffer_fmt(current, filp, ctx, ctx_flags, 0, req);
if (ret) goto buffer_error;
}
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index 5fa09d1..7d6fe65 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -251,6 +251,12 @@
}
#endif
+#ifdef CONFIG_PROC_VMCORE
+ if (reserve_elfcorehdr(&rsvd_region[n].start,
+ &rsvd_region[n].end) == 0)
+ n++;
+#endif
+
efi_memmap_init(&rsvd_region[n].start, &rsvd_region[n].end);
n++;
@@ -453,6 +459,30 @@
return 0;
}
early_param("elfcorehdr", parse_elfcorehdr);
+
+int __init reserve_elfcorehdr(unsigned long *start, unsigned long *end)
+{
+ unsigned long length;
+
+ /* We get the address using the kernel command line,
+ * but the size is extracted from the EFI tables.
+ * Both address and size are required for reservation
+ * to work properly.
+ */
+
+ if (elfcorehdr_addr >= ELFCORE_ADDR_MAX)
+ return -EINVAL;
+
+ if ((length = vmcore_find_descriptor_size(elfcorehdr_addr)) == 0) {
+ elfcorehdr_addr = ELFCORE_ADDR_MAX;
+ return -EINVAL;
+ }
+
+ *start = (unsigned long)__va(elfcorehdr_addr);
+ *end = *start + length;
+ return 0;
+}
+
#endif /* CONFIG_PROC_VMCORE */
void __init
diff --git a/arch/ia64/lib/Makefile b/arch/ia64/lib/Makefile
index 38fa6e4..46edf84 100644
--- a/arch/ia64/lib/Makefile
+++ b/arch/ia64/lib/Makefile
@@ -9,12 +9,11 @@
checksum.o clear_page.o csum_partial_copy.o \
clear_user.o strncpy_from_user.o strlen_user.o strnlen_user.o \
flush.o ip_fast_csum.o do_csum.o \
- memset.o strlen.o
+ memset.o strlen.o xor.o
lib-$(CONFIG_ITANIUM) += copy_page.o copy_user.o memcpy.o
lib-$(CONFIG_MCKINLEY) += copy_page_mck.o memcpy_mck.o
lib-$(CONFIG_PERFMON) += carta_random.o
-lib-$(CONFIG_MD_RAID456) += xor.o
AFLAGS___divdi3.o =
AFLAGS___udivdi3.o = -DUNSIGNED
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c
index ca4d41e..fb0f469 100644
--- a/arch/ia64/mm/contig.c
+++ b/arch/ia64/mm/contig.c
@@ -197,11 +197,6 @@
find_initrd();
-#ifdef CONFIG_CRASH_DUMP
- /* If we are doing a crash dump, we still need to know the real mem
- * size before original memory map is reset. */
- saved_max_pfn = max_pfn;
-#endif
}
#ifdef CONFIG_SMP
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index 1683510..11a2d88 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -480,12 +480,6 @@
max_pfn = max_low_pfn;
find_initrd();
-
-#ifdef CONFIG_CRASH_DUMP
- /* If we are doing a crash dump, we still need to know the real mem
- * size before original memory map is reset. */
- saved_max_pfn = max_pfn;
-#endif
}
#ifdef CONFIG_SMP
diff --git a/arch/m68knommu/kernel/setup.c b/arch/m68knommu/kernel/setup.c
index d5c25d2..8133b10 100644
--- a/arch/m68knommu/kernel/setup.c
+++ b/arch/m68knommu/kernel/setup.c
@@ -51,7 +51,7 @@
{
}
-void (*mach_sched_init) (irqreturn_t (*handler)(int, void *, struct pt_regs *));
+void (*mach_sched_init) (irq_handler_t handler);
void (*mach_tick)( void );
/* machine dependent keyboard functions */
int (*mach_keyb_init) (void);
@@ -66,7 +66,7 @@
/* machine dependent timer functions */
unsigned long (*mach_gettimeoffset) (void);
void (*mach_gettod) (int*, int*, int*, int*, int*, int*);
-int (*mach_hwclk) (int, struct hwclk_time*);
+int (*mach_hwclk) (int, struct rtc_time*);
int (*mach_set_clock_mmss) (unsigned long);
void (*mach_mksound)( unsigned int count, unsigned int ticks );
void (*mach_reset)( void );
diff --git a/arch/m68knommu/platform/5307/ints.c b/arch/m68knommu/platform/5307/ints.c
index 20f12a1..7516330 100644
--- a/arch/m68knommu/platform/5307/ints.c
+++ b/arch/m68knommu/platform/5307/ints.c
@@ -42,7 +42,6 @@
/* The number of spurious interrupts */
volatile unsigned int num_spurious;
-unsigned int local_bh_count[NR_CPUS];
unsigned int local_irq_count[NR_CPUS];
static irqreturn_t default_irq_handler(int irq, void *ptr)
diff --git a/arch/m68knommu/platform/68328/ints.c b/arch/m68knommu/platform/68328/ints.c
index 2dda733..3de6e33 100644
--- a/arch/m68knommu/platform/68328/ints.c
+++ b/arch/m68knommu/platform/68328/ints.c
@@ -15,6 +15,7 @@
#include <linux/sched.h>
#include <linux/kernel_stat.h>
#include <linux/errno.h>
+#include <linux/interrupt.h>
#include <asm/system.h>
#include <asm/irq.h>
@@ -64,7 +65,7 @@
asmlinkage void trap45(void);
asmlinkage void trap46(void);
asmlinkage void trap47(void);
-asmlinkage irqreturn_t bad_interrupt(int, void *, struct pt_regs *);
+asmlinkage irqreturn_t bad_interrupt(int, void *);
asmlinkage irqreturn_t inthandler(void);
asmlinkage irqreturn_t inthandler1(void);
asmlinkage irqreturn_t inthandler2(void);
@@ -121,7 +122,7 @@
int request_irq(
unsigned int irq,
- irqreturn_t (*handler)(int, void *, struct pt_regs *),
+ irq_handler_t handler,
unsigned long flags,
const char *devname,
void *dev_id)
diff --git a/arch/m68knommu/platform/68328/timers.c b/arch/m68knommu/platform/68328/timers.c
index 438ef6e..ef067f4 100644
--- a/arch/m68knommu/platform/68328/timers.c
+++ b/arch/m68knommu/platform/68328/timers.c
@@ -17,6 +17,7 @@
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/mm.h>
+#include <linux/interrupt.h>
#include <asm/setup.h>
#include <asm/system.h>
#include <asm/pgtable.h>
@@ -52,7 +53,7 @@
/***************************************************************************/
-void m68328_timer_init(irqreturn_t (*timer_routine) (int, void *, struct pt_regs *))
+void m68328_timer_init(irq_handler_t timer_routine)
{
/* disable timer 1 */
TCTL = 0;
diff --git a/arch/m68knommu/platform/68360/config.c b/arch/m68knommu/platform/68360/config.c
index 1b36f62..4ff13bd 100644
--- a/arch/m68knommu/platform/68360/config.c
+++ b/arch/m68knommu/platform/68360/config.c
@@ -16,6 +16,7 @@
#include <linux/mm.h>
#include <linux/tty.h>
#include <linux/console.h>
+#include <linux/interrupt.h>
#include <asm/setup.h>
#include <asm/system.h>
@@ -50,7 +51,7 @@
extern void config_M68360_irq(void);
-void BSP_sched_init(void (*timer_routine)(int, void *, struct pt_regs *))
+void BSP_sched_init(irq_handler_t timer_routine)
{
unsigned char prescaler;
unsigned short tgcr_save;
diff --git a/arch/m68knommu/platform/68EZ328/config.c b/arch/m68knommu/platform/68EZ328/config.c
index 659b80a..ab36551 100644
--- a/arch/m68knommu/platform/68EZ328/config.c
+++ b/arch/m68knommu/platform/68EZ328/config.c
@@ -19,6 +19,7 @@
#include <linux/mm.h>
#include <linux/tty.h>
#include <linux/console.h>
+#include <linux/interrupt.h>
#include <asm/setup.h>
#include <asm/system.h>
@@ -31,7 +32,7 @@
/***************************************************************************/
-void m68328_timer_init(irqreturn_t (*timer_routine) (int, void *, struct pt_regs *));
+void m68328_timer_init(irq_handler_t timer_routine);
void m68328_timer_tick(void);
unsigned long m68328_timer_gettimeoffset(void);
void m68328_timer_gettod(int *year, int *mon, int *day, int *hour, int *min, int *sec);
diff --git a/arch/m68knommu/platform/68VZ328/config.c b/arch/m68knommu/platform/68VZ328/config.c
index fcd100b..8abe0f6 100644
--- a/arch/m68knommu/platform/68VZ328/config.c
+++ b/arch/m68knommu/platform/68VZ328/config.c
@@ -21,6 +21,7 @@
#include <linux/console.h>
#include <linux/kd.h>
#include <linux/netdevice.h>
+#include <linux/interrupt.h>
#include <asm/setup.h>
#include <asm/system.h>
@@ -36,7 +37,7 @@
/***************************************************************************/
-void m68328_timer_init(irqreturn_t (*timer_routine) (int, void *, struct pt_regs *));
+void m68328_timer_init(irq_handler_t timer_routine);
void m68328_timer_tick(void);
unsigned long m68328_timer_gettimeoffset(void);
void m68328_timer_gettod(int *year, int *mon, int *day, int *hour, int *min, int *sec);
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 4ec2dd5..a1cd84f 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -167,6 +167,7 @@
select IRQ_CPU
select MIPS_GT64111
select SYS_HAS_CPU_NEVADA
+ select SYS_HAS_EARLY_PRINTK
select SYS_SUPPORTS_32BIT_KERNEL
select SYS_SUPPORTS_64BIT_KERNEL if EXPERIMENTAL
select SYS_SUPPORTS_LITTLE_ENDIAN
@@ -837,7 +838,6 @@
source "arch/mips/tx4938/Kconfig"
source "arch/mips/vr41xx/Kconfig"
source "arch/mips/philips/pnx8550/common/Kconfig"
-source "arch/mips/cobalt/Kconfig"
endmenu
diff --git a/arch/mips/cobalt/Kconfig b/arch/mips/cobalt/Kconfig
deleted file mode 100644
index 7c42b08..0000000
--- a/arch/mips/cobalt/Kconfig
+++ /dev/null
@@ -1,7 +0,0 @@
-config EARLY_PRINTK
- bool "Early console support"
- depends on MIPS_COBALT
- help
- Provide early console support by direct access to the
- on board UART. The UART must have been previously
- initialised by the boot loader.
diff --git a/arch/mips/cobalt/console.c b/arch/mips/cobalt/console.c
index fff20d2..ca56b41 100644
--- a/arch/mips/cobalt/console.c
+++ b/arch/mips/cobalt/console.c
@@ -9,11 +9,8 @@
#include <asm/addrspace.h>
#include <asm/mach-cobalt/cobalt.h>
-static void putchar(int c)
+void prom_putchar(char c)
{
- if(c == '\n')
- putchar('\r');
-
while(!(COBALT_UART[UART_LSR] & UART_LSR_THRE))
;
diff --git a/arch/mips/kernel/mips_ksyms.c b/arch/mips/kernel/mips_ksyms.c
index 2ef857c..225755d 100644
--- a/arch/mips/kernel/mips_ksyms.c
+++ b/arch/mips/kernel/mips_ksyms.c
@@ -37,6 +37,7 @@
* Userspace access stuff.
*/
EXPORT_SYMBOL(__copy_user);
+EXPORT_SYMBOL(__copy_user_inatomic);
EXPORT_SYMBOL(__bzero);
EXPORT_SYMBOL(__strncpy_from_user_nocheck_asm);
EXPORT_SYMBOL(__strncpy_from_user_asm);
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index 7c0b393..0c9a9ff 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -656,6 +656,8 @@
sys sys_kexec_load 4
sys sys_getcpu 3
sys sys_epoll_pwait 6
+ sys sys_ioprio_set 3
+ sys sys_ioprio_get 2
.endm
/* We pre-compute the number of _instruction_ bytes needed to
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
index e569b84..23f3b11 100644
--- a/arch/mips/kernel/scall64-64.S
+++ b/arch/mips/kernel/scall64-64.S
@@ -471,3 +471,6 @@
PTR sys_kexec_load /* 5270 */
PTR sys_getcpu
PTR sys_epoll_pwait
+ PTR sys_ioprio_set
+ PTR sys_ioprio_get
+ .size sys_call_table,.-sys_call_table
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index f17e31e..6eac283 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -395,5 +395,8 @@
PTR compat_sys_set_robust_list
PTR compat_sys_get_robust_list
PTR compat_sys_kexec_load
- PTR sys_getcpu
+ PTR sys_getcpu /* 6275 */
PTR compat_sys_epoll_pwait
+ PTR sys_ioprio_set
+ PTR sys_ioprio_get
+ .size sysn32_call_table,.-sysn32_call_table
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index 142c9b7..7e74b41 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -519,4 +519,6 @@
PTR compat_sys_kexec_load
PTR sys_getcpu
PTR compat_sys_epoll_pwait
+ PTR sys_ioprio_set
+ PTR sys_ioprio_get /* 4315 */
.size sys_call_table,.-sys_call_table
diff --git a/arch/mips/mips-boards/generic/init.c b/arch/mips/mips-boards/generic/init.c
index b113376..1acdf09 100644
--- a/arch/mips/mips-boards/generic/init.c
+++ b/arch/mips/mips-boards/generic/init.c
@@ -251,8 +251,6 @@
void __init prom_init(void)
{
- u32 start, map, mask, data;
-
prom_argc = fw_arg0;
_prom_argv = (int *) fw_arg1;
_prom_envp = (int *) fw_arg2;
@@ -278,6 +276,8 @@
mips_revision_corid = MIPS_REVISION_CORID_CORE_EMUL_MSC;
}
switch(mips_revision_corid) {
+ u32 start, map, mask, data;
+
case MIPS_REVISION_CORID_QED_RM5261:
case MIPS_REVISION_CORID_CORE_LV:
case MIPS_REVISION_CORID_CORE_FPGA:
diff --git a/arch/mips/mips-boards/malta/Makefile b/arch/mips/mips-boards/malta/Makefile
index cb7f349b..377d9e8 100644
--- a/arch/mips/mips-boards/malta/Makefile
+++ b/arch/mips/mips-boards/malta/Makefile
@@ -21,4 +21,4 @@
obj-y := malta_int.o malta_setup.o
obj-$(CONFIG_MTD) += malta_mtd.o
-obj-$(CONFIG_SMP) += malta_smp.o
+obj-$(CONFIG_MIPS_MT_SMTC) += malta_smtc.o
diff --git a/arch/mips/mips-boards/malta/malta_smp.c b/arch/mips/mips-boards/malta/malta_smtc.c
similarity index 67%
rename from arch/mips/mips-boards/malta/malta_smp.c
rename to arch/mips/mips-boards/malta/malta_smtc.c
index cf96717..d1c80f6 100644
--- a/arch/mips/mips-boards/malta/malta_smp.c
+++ b/arch/mips/mips-boards/malta/malta_smtc.c
@@ -1,25 +1,14 @@
/*
* Malta Platform-specific hooks for SMP operation
*/
+#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/cpumask.h>
-#include <linux/interrupt.h>
-
-#include <asm/atomic.h>
-#include <asm/cpu.h>
-#include <asm/processor.h>
-#include <asm/system.h>
-#include <asm/hardirq.h>
-#include <asm/mmu_context.h>
-#include <asm/smp.h>
-#ifdef CONFIG_MIPS_MT_SMTC
+#include <asm/mipsregs.h>
+#include <asm/mipsmtregs.h>
+#include <asm/smtc.h>
#include <asm/smtc_ipi.h>
-#endif /* CONFIG_MIPS_MT_SMTC */
/* VPE/SMP Prototype implements platform interfaces directly */
-#if !defined(CONFIG_MIPS_MT_SMP)
/*
* Cause the specified action to be performed on a targeted "CPU"
@@ -27,10 +16,8 @@
void core_send_ipi(int cpu, unsigned int action)
{
-/* "CPU" may be TC of same VPE, VPE of same CPU, or different CPU */
-#ifdef CONFIG_MIPS_MT_SMTC
+ /* "CPU" may be TC of same VPE, VPE of same CPU, or different CPU */
smtc_send_ipi(cpu, LINUX_SMP_IPI, action);
-#endif /* CONFIG_MIPS_MT_SMTC */
}
/*
@@ -39,9 +26,7 @@
void prom_boot_secondary(int cpu, struct task_struct *idle)
{
-#ifdef CONFIG_MIPS_MT_SMTC
smtc_boot_secondary(cpu, idle);
-#endif /* CONFIG_MIPS_MT_SMTC */
}
/*
@@ -50,7 +35,6 @@
void prom_init_secondary(void)
{
-#ifdef CONFIG_MIPS_MT_SMTC
void smtc_init_secondary(void);
int myvpe;
@@ -65,7 +49,6 @@
}
smtc_init_secondary();
-#endif /* CONFIG_MIPS_MT_SMTC */
}
/*
@@ -93,9 +76,7 @@
void prom_smp_finish(void)
{
-#ifdef CONFIG_MIPS_MT_SMTC
smtc_smp_finish();
-#endif /* CONFIG_MIPS_MT_SMTC */
}
/*
@@ -105,5 +86,3 @@
void prom_cpus_done(void)
{
}
-
-#endif /* CONFIG_MIPS32R2_MT_SMP */
diff --git a/arch/mips/mm/c-tx39.c b/arch/mips/mm/c-tx39.c
index f32ebde..560a6de 100644
--- a/arch/mips/mm/c-tx39.c
+++ b/arch/mips/mm/c-tx39.c
@@ -128,7 +128,6 @@
return;
tx39_blast_dcache();
- tx39_blast_icache();
}
static inline void tx39___flush_cache_all(void)
@@ -142,24 +141,19 @@
if (!cpu_has_dc_aliases)
return;
- if (cpu_context(smp_processor_id(), mm) != 0) {
- tx39_flush_cache_all();
- }
+ if (cpu_context(smp_processor_id(), mm) != 0)
+ tx39_blast_dcache();
}
static void tx39_flush_cache_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
- int exec;
-
+ if (!cpu_has_dc_aliases)
+ return;
if (!(cpu_context(smp_processor_id(), vma->vm_mm)))
return;
- exec = vma->vm_flags & VM_EXEC;
- if (cpu_has_dc_aliases || exec)
- tx39_blast_dcache();
- if (exec)
- tx39_blast_icache();
+ tx39_blast_dcache();
}
static void tx39_flush_cache_page(struct vm_area_struct *vma, unsigned long page, unsigned long pfn)
@@ -218,7 +212,7 @@
static void local_tx39_flush_data_cache_page(void * addr)
{
- tx39_blast_dcache_page(addr);
+ tx39_blast_dcache_page((unsigned long)addr);
}
static void tx39_flush_data_cache_page(unsigned long addr)
diff --git a/arch/mips/momentum/jaguar_atx/platform.c b/arch/mips/momentum/jaguar_atx/platform.c
index 771e55f..5618448 100644
--- a/arch/mips/momentum/jaguar_atx/platform.c
+++ b/arch/mips/momentum/jaguar_atx/platform.c
@@ -48,6 +48,8 @@
};
static struct mv643xx_eth_platform_data eth0_pd = {
+ .port_number = 0,
+
.tx_sram_addr = MV_SRAM_BASE_ETH0,
.tx_sram_size = MV_SRAM_TXRING_SIZE,
.tx_queue_size = MV_SRAM_TXRING_SIZE / 16,
@@ -77,6 +79,8 @@
};
static struct mv643xx_eth_platform_data eth1_pd = {
+ .port_number = 1,
+
.tx_sram_addr = MV_SRAM_BASE_ETH1,
.tx_sram_size = MV_SRAM_TXRING_SIZE,
.tx_queue_size = MV_SRAM_TXRING_SIZE / 16,
@@ -105,7 +109,9 @@
},
};
-static struct mv643xx_eth_platform_data eth2_pd;
+static struct mv643xx_eth_platform_data eth2_pd = {
+ .port_number = 2,
+};
static struct platform_device eth2_device = {
.name = MV643XX_ETH_NAME,
diff --git a/arch/mips/momentum/ocelot_3/platform.c b/arch/mips/momentum/ocelot_3/platform.c
index b80733f..44e4c3f 100644
--- a/arch/mips/momentum/ocelot_3/platform.c
+++ b/arch/mips/momentum/ocelot_3/platform.c
@@ -48,6 +48,8 @@
};
static struct mv643xx_eth_platform_data eth0_pd = {
+ .port_number = 0,
+
.tx_sram_addr = MV_SRAM_BASE_ETH0,
.tx_sram_size = MV_SRAM_TXRING_SIZE,
.tx_queue_size = MV_SRAM_TXRING_SIZE / 16,
@@ -77,6 +79,8 @@
};
static struct mv643xx_eth_platform_data eth1_pd = {
+ .port_number = 1,
+
.tx_sram_addr = MV_SRAM_BASE_ETH1,
.tx_sram_size = MV_SRAM_TXRING_SIZE,
.tx_queue_size = MV_SRAM_TXRING_SIZE / 16,
@@ -105,7 +109,9 @@
},
};
-static struct mv643xx_eth_platform_data eth2_pd;
+static struct mv643xx_eth_platform_data eth2_pd = {
+ .port_number = 2,
+};
static struct platform_device eth2_device = {
.name = MV643XX_ETH_NAME,
diff --git a/arch/mips/momentum/ocelot_c/platform.c b/arch/mips/momentum/ocelot_c/platform.c
index f7cd303..7780aa0 100644
--- a/arch/mips/momentum/ocelot_c/platform.c
+++ b/arch/mips/momentum/ocelot_c/platform.c
@@ -47,6 +47,8 @@
};
static struct mv643xx_eth_platform_data eth0_pd = {
+ .port_number = 0,
+
.tx_sram_addr = MV_SRAM_BASE_ETH0,
.tx_sram_size = MV_SRAM_TXRING_SIZE,
.tx_queue_size = MV_SRAM_TXRING_SIZE / 16,
@@ -76,6 +78,8 @@
};
static struct mv643xx_eth_platform_data eth1_pd = {
+ .port_number = 1,
+
.tx_sram_addr = MV_SRAM_BASE_ETH1,
.tx_sram_size = MV_SRAM_TXRING_SIZE,
.tx_queue_size = MV_SRAM_TXRING_SIZE / 16,
diff --git a/arch/mips/sgi-ip27/ip27-init.c b/arch/mips/sgi-ip27/ip27-init.c
index 9094baf..74158d3 100644
--- a/arch/mips/sgi-ip27/ip27-init.c
+++ b/arch/mips/sgi-ip27/ip27-init.c
@@ -191,7 +191,6 @@
ioc3->eier = 0;
}
-extern void ip27_setup_console(void);
extern void ip27_time_init(void);
extern void ip27_reboot_setup(void);
@@ -200,7 +199,6 @@
hubreg_t p, e, n_mode;
nasid_t nid;
- ip27_setup_console();
ip27_reboot_setup();
/*
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 919fbf5..1009308 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -968,7 +968,6 @@
int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec) {return -1;}
void pci_disable_msix(struct pci_dev *dev) {}
void msi_remove_pci_irq_vectors(struct pci_dev *dev) {}
-void disable_msi_mode(struct pci_dev *dev, int pos, int type) {}
void pci_no_msi(void) {}
EXPORT_SYMBOL(pci_enable_msix);
EXPORT_SYMBOL(pci_disable_msix);
diff --git a/arch/powerpc/platforms/chrp/pegasos_eth.c b/arch/powerpc/platforms/chrp/pegasos_eth.c
index 6ad4b1a..7104567 100644
--- a/arch/powerpc/platforms/chrp/pegasos_eth.c
+++ b/arch/powerpc/platforms/chrp/pegasos_eth.c
@@ -58,6 +58,7 @@
static struct mv643xx_eth_platform_data eth0_pd = {
+ .port_number = 0,
.tx_sram_addr = PEGASOS2_SRAM_BASE_ETH0,
.tx_sram_size = PEGASOS2_SRAM_TXRING_SIZE,
.tx_queue_size = PEGASOS2_SRAM_TXRING_SIZE/16,
@@ -87,6 +88,7 @@
};
static struct mv643xx_eth_platform_data eth1_pd = {
+ .port_number = 1,
.tx_sram_addr = PEGASOS2_SRAM_BASE_ETH1,
.tx_sram_size = PEGASOS2_SRAM_TXRING_SIZE,
.tx_queue_size = PEGASOS2_SRAM_TXRING_SIZE/16,
diff --git a/arch/ppc/syslib/mv64x60.c b/arch/ppc/syslib/mv64x60.c
index 3b039c3..a6f8b68 100644
--- a/arch/ppc/syslib/mv64x60.c
+++ b/arch/ppc/syslib/mv64x60.c
@@ -339,7 +339,9 @@
},
};
-static struct mv643xx_eth_platform_data eth0_pd;
+static struct mv643xx_eth_platform_data eth0_pd = {
+ .port_number = 0,
+};
static struct platform_device eth0_device = {
.name = MV643XX_ETH_NAME,
@@ -362,7 +364,9 @@
},
};
-static struct mv643xx_eth_platform_data eth1_pd;
+static struct mv643xx_eth_platform_data eth1_pd = {
+ .port_number = 1,
+};
static struct platform_device eth1_device = {
.name = MV643XX_ETH_NAME,
@@ -385,7 +389,9 @@
},
};
-static struct mv643xx_eth_platform_data eth2_pd;
+static struct mv643xx_eth_platform_data eth2_pd = {
+ .port_number = 2,
+};
static struct platform_device eth2_device = {
.name = MV643XX_ETH_NAME,
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index d9425f5..0f293aa 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -376,6 +376,8 @@
Select this option, if you want to share the text segment of the
Linux kernel between different VM guests. This reduces memory
usage with lots of guests but greatly increases kernel size.
+ Also if a kernel was IPL'ed from a shared segment the kexec system
+ call will not work.
You should only select this option if you know what you are
doing and want to exploit this feature.
diff --git a/arch/s390/kernel/head31.S b/arch/s390/kernel/head31.S
index da7c8bb..dc364c1 100644
--- a/arch/s390/kernel/head31.S
+++ b/arch/s390/kernel/head31.S
@@ -121,7 +121,7 @@
.long .Lduct # cr2: dispatchable unit control table
.long 0 # cr3: instruction authorization
.long 0 # cr4: instruction authorization
- .long 0xffffffff # cr5: primary-aste origin
+ .long .Lduct # cr5: primary-aste origin
.long 0 # cr6: I/O interrupts
.long 0 # cr7: secondary space segment table
.long 0 # cr8: access registers translation
@@ -132,8 +132,6 @@
.long 0 # cr13: home space segment table
.long 0xc0000000 # cr14: machine check handling off
.long 0 # cr15: linkage stack operations
-.Lduct: .long 0,0,0,0,0,0,0,0
- .long 0,0,0,0,0,0,0,0
.Lpcfpu:.long 0x00080000,0x80000000 + .Lchkfpu
.Lpccsp:.long 0x00080000,0x80000000 + .Lchkcsp
.Lpcmvpg:.long 0x00080000,0x80000000 + .Lchkmvpg
@@ -147,6 +145,13 @@
.Linittu: .long init_thread_union
.Lstartup_init:
.long startup_init
+ .align 64
+.Lduct: .long 0,0,0,0,.Lduald,0,0,0
+ .long 0,0,0,0,0,0,0,0
+ .align 128
+.Lduald:.rept 8
+ .long 0x80000000,0,0,0 # invalid access-list entries
+ .endr
.org 0x12000
.globl _ehead
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S
index af09e18..3701070 100644
--- a/arch/s390/kernel/head64.S
+++ b/arch/s390/kernel/head64.S
@@ -134,7 +134,7 @@
.quad .Lduct # cr2: dispatchable unit control table
.quad 0 # cr3: instruction authorization
.quad 0 # cr4: instruction authorization
- .quad 0xffffffffffffffff # cr5: primary-aste origin
+ .quad .Lduct # cr5: primary-aste origin
.quad 0 # cr6: I/O interrupts
.quad 0 # cr7: secondary space segment table
.quad 0 # cr8: access registers translation
@@ -145,14 +145,19 @@
.quad 0 # cr13: home space segment table
.quad 0xc0000000 # cr14: machine check handling off
.quad 0 # cr15: linkage stack operations
-.Lduct: .long 0,0,0,0,0,0,0,0
- .long 0,0,0,0,0,0,0,0
.Lpcmsk:.quad 0x0000000180000000
.L4malign:.quad 0xffffffffffc00000
.Lscan2g:.quad 0x80000000 + 0x20000 - 8 # 2GB + 128K - 8
.Lnop: .long 0x07000700
.Lparmaddr:
.quad PARMAREA
+ .align 64
+.Lduct: .long 0,0,0,0,.Lduald,0,0,0
+ .long 0,0,0,0,0,0,0,0
+ .align 128
+.Lduald:.rept 8
+ .long 0x80000000,0,0,0 # invalid access-list entries
+ .endr
.org 0x12000
.globl _ehead
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index 5a863a3..d125a4e 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -1066,7 +1066,7 @@
reset->fn();
}
-extern __u32 dump_prefix_page;
+u32 dump_prefix_page;
void s390_reset_system(void)
{
@@ -1078,7 +1078,7 @@
lc->panic_stack = S390_lowcore.panic_stack;
/* Save prefix page address for dump case */
- dump_prefix_page = (unsigned long) lc;
+ dump_prefix_page = (u32)(unsigned long) lc;
/* Disable prefixing */
set_prefix(0);
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index a466bab..8af549e 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -337,21 +337,14 @@
}
p = get_kprobe(addr);
- if (!p) {
- if (*addr != BREAKPOINT_INSTRUCTION) {
- /*
- * The breakpoint instruction was removed right
- * after we hit it. Another cpu has removed
- * either a probepoint or a debugger breakpoint
- * at this address. In either case, no further
- * handling of this interrupt is appropriate.
- *
- */
- ret = 1;
- }
- /* Not one of ours: let kernel handle it */
+ if (!p)
+ /*
+ * No kprobe at this address. The fault has not been
+ * caused by a kprobe breakpoint. The race of breakpoint
+ * vs. kprobe remove does not exist because on s390 we
+ * use stop_machine_run to arm/disarm the breakpoints.
+ */
goto no_kprobe;
- }
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
set_current_kprobe(p, regs, kcb);
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c
index 52f57af..3c77dd3 100644
--- a/arch/s390/kernel/machine_kexec.c
+++ b/arch/s390/kernel/machine_kexec.c
@@ -19,6 +19,7 @@
#include <asm/system.h>
#include <asm/smp.h>
#include <asm/reset.h>
+#include <asm/ipl.h>
typedef void (*relocate_kernel_t)(kimage_entry_t *, unsigned long);
@@ -29,6 +30,10 @@
{
void *reboot_code_buffer;
+ /* Can't replace kernel image since it is read-only. */
+ if (ipl_flags & IPL_NSS_VALID)
+ return -ENOSYS;
+
/* We don't support anything but the default image type for now. */
if (image->type != KEXEC_TYPE_DEFAULT)
return -EINVAL;
diff --git a/arch/s390/kernel/reipl.S b/arch/s390/kernel/reipl.S
index c3f4d9b..2f481cc 100644
--- a/arch/s390/kernel/reipl.S
+++ b/arch/s390/kernel/reipl.S
@@ -8,6 +8,10 @@
#include <asm/lowcore.h>
+#
+# do_reipl_asm
+# Parameter: r2 = schid of reipl device
+#
.globl do_reipl_asm
do_reipl_asm: basr %r13,0
.Lpg0: lpsw .Lnewpsw-.Lpg0(%r13)
@@ -16,12 +20,12 @@
stm %r0,%r15,__LC_GPREGS_SAVE_AREA
stctl %c0,%c15,__LC_CREGS_SAVE_AREA
stam %a0,%a15,__LC_AREGS_SAVE_AREA
- mvc __LC_PREFIX_SAVE_AREA(4),dump_prefix_page-.Lpg0(%r13)
+ l %r10,.Ldump_pfx-.Lpg0(%r13)
+ mvc __LC_PREFIX_SAVE_AREA(4),0(%r10)
stckc .Lclkcmp-.Lpg0(%r13)
mvc __LC_CLOCK_COMP_SAVE_AREA(8),.Lclkcmp-.Lpg0(%r13)
stpt __LC_CPU_TIMER_SAVE_AREA
st %r13, __LC_PSW_SAVE_AREA+4
-
lctl %c6,%c6,.Lall-.Lpg0(%r13)
lr %r1,%r2
mvc __LC_PGM_NEW_PSW(8),.Lpcnew-.Lpg0(%r13)
@@ -55,6 +59,7 @@
.align 8
.Lclkcmp: .quad 0x0000000000000000
.Lall: .long 0xff000000
+.Ldump_pfx: .long dump_prefix_page
.align 8
.Lnewpsw: .long 0x00080000,0x80000000+.Lpg1
.Lpcnew: .long 0x00080000,0x80000000+.Lecs
@@ -79,7 +84,3 @@
.long 0x00000000,0x00000000
.long 0x00000000,0x00000000
.long 0x00000000,0x00000000
- .globl dump_prefix_page
-dump_prefix_page:
- .long 0x00000000
-
diff --git a/arch/s390/kernel/reipl64.S b/arch/s390/kernel/reipl64.S
index dbb3eed..c419304 100644
--- a/arch/s390/kernel/reipl64.S
+++ b/arch/s390/kernel/reipl64.S
@@ -8,6 +8,12 @@
*/
#include <asm/lowcore.h>
+
+#
+# do_reipl_asm
+# Parameter: r2 = schid of reipl device
+#
+
.globl do_reipl_asm
do_reipl_asm: basr %r13,0
.Lpg0: lpswe .Lnewpsw-.Lpg0(%r13)
@@ -20,7 +26,8 @@
stg %r0,__LC_GPREGS_SAVE_AREA-0x1000+8(%r1)
stctg %c0,%c15,__LC_CREGS_SAVE_AREA-0x1000(%r1)
stam %a0,%a15,__LC_AREGS_SAVE_AREA-0x1000(%r1)
- mvc __LC_PREFIX_SAVE_AREA-0x1000(4,%r1),dump_prefix_page-.Lpg0(%r13)
+ lg %r10,.Ldump_pfx-.Lpg0(%r13)
+ mvc __LC_PREFIX_SAVE_AREA-0x1000(4,%r1),0(%r10)
stfpc __LC_FP_CREG_SAVE_AREA-0x1000(%r1)
stckc .Lclkcmp-.Lpg0(%r13)
mvc __LC_CLOCK_COMP_SAVE_AREA-0x1000(8,%r1),.Lclkcmp-.Lpg0(%r13)
@@ -64,6 +71,7 @@
.align 8
.Lclkcmp: .quad 0x0000000000000000
.Lall: .quad 0x00000000ff000000
+.Ldump_pfx: .quad dump_prefix_page
.Lregsave: .quad 0x0000000000000000
.align 16
/*
@@ -103,6 +111,3 @@
.long 0x00000000,0x00000000
.long 0x00000000,0x00000000
.long 0x00000000,0x00000000
- .globl dump_prefix_page
-dump_prefix_page:
- .long 0x00000000
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index ecaa432..97764f7 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -94,10 +94,9 @@
int cpu, local = 0;
/*
- * Can deadlock when interrupts are disabled or if in wrong context,
- * caller must disable preemption
+ * Can deadlock when interrupts are disabled or if in wrong context.
*/
- WARN_ON(irqs_disabled() || in_irq() || preemptible());
+ WARN_ON(irqs_disabled() || in_irq());
/*
* Check for local function call. We have to have the same call order
@@ -152,17 +151,18 @@
* Run a function on all other CPUs.
*
* You must not call this function with disabled interrupts or from a
- * hardware interrupt handler. Must be called with preemption disabled.
- * You may call it from a bottom half.
+ * hardware interrupt handler. You may call it from a bottom half.
*/
int smp_call_function(void (*func) (void *info), void *info, int nonatomic,
int wait)
{
cpumask_t map;
+ preempt_disable();
map = cpu_online_map;
cpu_clear(smp_processor_id(), map);
__smp_call_function_map(func, info, nonatomic, wait, map);
+ preempt_enable();
return 0;
}
EXPORT_SYMBOL(smp_call_function);
@@ -178,16 +178,17 @@
* Run a function on one processor.
*
* You must not call this function with disabled interrupts or from a
- * hardware interrupt handler. Must be called with preemption disabled.
- * You may call it from a bottom half.
+ * hardware interrupt handler. You may call it from a bottom half.
*/
int smp_call_function_on(void (*func) (void *info), void *info, int nonatomic,
int wait, int cpu)
{
cpumask_t map = CPU_MASK_NONE;
+ preempt_disable();
cpu_set(cpu, map);
__smp_call_function_map(func, info, nonatomic, wait, map);
+ preempt_enable();
return 0;
}
EXPORT_SYMBOL(smp_call_function_on);
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 641aef3..7462aeb 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -108,53 +108,40 @@
}
/*
- * Check which address space is addressed by the access
- * register in S390_lowcore.exc_access_id.
- * Returns 1 for user space and 0 for kernel space.
+ * Returns the address space associated with the fault.
+ * Returns 0 for kernel space, 1 for user space and
+ * 2 for code execution in user space with noexec=on.
*/
-static int __check_access_register(struct pt_regs *regs, int error_code)
-{
- int areg = S390_lowcore.exc_access_id;
-
- if (areg == 0)
- /* Access via access register 0 -> kernel address */
- return 0;
- save_access_regs(current->thread.acrs);
- if (regs && areg < NUM_ACRS && current->thread.acrs[areg] <= 1)
- /*
- * access register contains 0 -> kernel address,
- * access register contains 1 -> user space address
- */
- return current->thread.acrs[areg];
-
- /* Something unhealthy was done with the access registers... */
- die("page fault via unknown access register", regs, error_code);
- do_exit(SIGKILL);
- return 0;
-}
-
-/*
- * Check which address space the address belongs to.
- * May return 1 or 2 for user space and 0 for kernel space.
- * Returns 2 for user space in primary addressing mode with
- * CONFIG_S390_EXEC_PROTECT on and kernel parameter noexec=on.
- */
-static inline int check_user_space(struct pt_regs *regs, int error_code)
+static inline int check_space(struct task_struct *tsk)
{
/*
- * The lowest two bits of S390_lowcore.trans_exc_code indicate
- * which paging table was used:
- * 0: Primary Segment Table Descriptor
- * 1: STD determined via access register
- * 2: Secondary Segment Table Descriptor
- * 3: Home Segment Table Descriptor
+ * The lowest two bits of S390_lowcore.trans_exc_code
+ * indicate which paging table was used.
*/
- int descriptor = S390_lowcore.trans_exc_code & 3;
- if (unlikely(descriptor == 1))
- return __check_access_register(regs, error_code);
- if (descriptor == 2)
- return current->thread.mm_segment.ar4;
- return ((descriptor != 0) ^ (switch_amode)) << s390_noexec;
+ int desc = S390_lowcore.trans_exc_code & 3;
+
+ if (desc == 3) /* Home Segment Table Descriptor */
+ return switch_amode == 0;
+ if (desc == 2) /* Secondary Segment Table Descriptor */
+ return tsk->thread.mm_segment.ar4;
+#ifdef CONFIG_S390_SWITCH_AMODE
+ if (unlikely(desc == 1)) { /* STD determined via access register */
+ /* %a0 always indicates primary space. */
+ if (S390_lowcore.exc_access_id != 0) {
+ save_access_regs(tsk->thread.acrs);
+ /*
+ * An alet of 0 indicates primary space.
+ * An alet of 1 indicates secondary space.
+ * Any other alet values generate an
+ * alen-translation exception.
+ */
+ if (tsk->thread.acrs[S390_lowcore.exc_access_id])
+ return tsk->thread.mm_segment.ar4;
+ }
+ }
+#endif
+ /* Primary Segment Table Descriptor */
+ return switch_amode << s390_noexec;
}
/*
@@ -265,16 +252,16 @@
* 11 Page translation -> Not present (nullification)
* 3b Region third trans. -> Not present (nullification)
*/
-static inline void __kprobes
+static inline void
do_exception(struct pt_regs *regs, unsigned long error_code, int is_protection)
{
struct task_struct *tsk;
struct mm_struct *mm;
struct vm_area_struct * vma;
unsigned long address;
- int user_address;
const struct exception_table_entry *fixup;
- int si_code = SEGV_MAPERR;
+ int si_code;
+ int space;
tsk = current;
mm = tsk->mm;
@@ -294,7 +281,7 @@
NULL pointer write access in kernel mode. */
if (!(regs->psw.mask & PSW_MASK_PSTATE)) {
address = 0;
- user_address = 0;
+ space = 0;
goto no_context;
}
@@ -309,15 +296,15 @@
* the address
*/
address = S390_lowcore.trans_exc_code & __FAIL_ADDR_MASK;
- user_address = check_user_space(regs, error_code);
+ space = check_space(tsk);
/*
* Verify that the fault happened in user space, that
* we are not in an interrupt and that there is a
* user context.
*/
- if (user_address == 0 || in_atomic() || !mm)
- goto no_context;
+ if (unlikely(space == 0 || in_atomic() || !mm))
+ goto no_context;
/*
* When we get here, the fault happened in the current
@@ -328,12 +315,13 @@
down_read(&mm->mmap_sem);
- vma = find_vma(mm, address);
- if (!vma)
- goto bad_area;
+ si_code = SEGV_MAPERR;
+ vma = find_vma(mm, address);
+ if (!vma)
+ goto bad_area;
#ifdef CONFIG_S390_EXEC_PROTECT
- if (unlikely((user_address == 2) && !(vma->vm_flags & VM_EXEC)))
+ if (unlikely((space == 2) && !(vma->vm_flags & VM_EXEC)))
if (!signal_return(mm, regs, address, error_code))
/*
* signal_return() has done an up_read(&mm->mmap_sem)
@@ -389,7 +377,7 @@
* The instruction that caused the program check will
* be repeated. Don't signal single step via SIGTRAP.
*/
- clear_tsk_thread_flag(current, TIF_SINGLE_STEP);
+ clear_tsk_thread_flag(tsk, TIF_SINGLE_STEP);
return;
/*
@@ -419,7 +407,7 @@
* Oops. The kernel tried to access some bad page. We'll have to
* terminate things with extreme prejudice.
*/
- if (user_address == 0)
+ if (space == 0)
printk(KERN_ALERT "Unable to handle kernel pointer dereference"
" at virtual kernel address %p\n", (void *)address);
else
@@ -462,13 +450,14 @@
goto no_context;
}
-void do_protection_exception(struct pt_regs *regs, unsigned long error_code)
+void __kprobes do_protection_exception(struct pt_regs *regs,
+ unsigned long error_code)
{
regs->psw.addr -= (error_code >> 16);
do_exception(regs, 4, 1);
}
-void do_dat_exception(struct pt_regs *regs, unsigned long error_code)
+void __kprobes do_dat_exception(struct pt_regs *regs, unsigned long error_code)
{
do_exception(regs, error_code & 0xff, 0);
}
diff --git a/arch/um/kernel/signal.c b/arch/um/kernel/signal.c
index 2a32e5e..3c798cd 100644
--- a/arch/um/kernel/signal.c
+++ b/arch/um/kernel/signal.c
@@ -158,12 +158,12 @@
clear_thread_flag(TIF_RESTORE_SIGMASK);
sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL);
}
- return(handled_sig);
+ return handled_sig;
}
int do_signal(void)
{
- return(kern_do_signal(¤t->thread.regs));
+ return kern_do_signal(¤t->thread.regs);
}
/*
@@ -186,5 +186,5 @@
long sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss)
{
- return(do_sigaltstack(uss, uoss, PT_REGS_SP(¤t->thread.regs)));
+ return do_sigaltstack(uss, uoss, PT_REGS_SP(¤t->thread.regs));
}
diff --git a/arch/um/os-Linux/skas/process.c b/arch/um/os-Linux/skas/process.c
index 9b34fe6..dda0678 100644
--- a/arch/um/os-Linux/skas/process.c
+++ b/arch/um/os-Linux/skas/process.c
@@ -419,9 +419,12 @@
.offset = code_offset
} } });
n = os_write_file(fd, &mmop, sizeof(mmop));
- if(n != sizeof(mmop))
+ if(n != sizeof(mmop)){
+ printk("mmap args - addr = 0x%lx, fd = %d, offset = %llx\n",
+ code, code_fd, (unsigned long long) code_offset);
panic("map_stub_pages : /proc/mm map for code failed, "
"err = %d\n", -n);
+ }
if ( stack ) {
__u64 map_offset;
diff --git a/arch/um/os-Linux/trap.c b/arch/um/os-Linux/trap.c
index 1df231a..d221214 100644
--- a/arch/um/os-Linux/trap.c
+++ b/arch/um/os-Linux/trap.c
@@ -16,6 +16,7 @@
CHOOSE_MODE(syscall_handler_tt(sig, regs), (void) 0);
}
+/* Initialized from linux_main() */
void (*sig_info[NSIG])(int, union uml_pt_regs *);
void os_fill_handlinfo(struct kern_handlers h)
diff --git a/arch/x86_64/kernel/hpet.c b/arch/x86_64/kernel/hpet.c
index 65a0edd..8cf0b8a 100644
--- a/arch/x86_64/kernel/hpet.c
+++ b/arch/x86_64/kernel/hpet.c
@@ -12,6 +12,12 @@
#include <asm/timex.h>
#include <asm/hpet.h>
+#define HPET_MASK 0xFFFFFFFF
+#define HPET_SHIFT 22
+
+/* FSEC = 10^-15 NSEC = 10^-9 */
+#define FSEC_PER_NSEC 1000000
+
int nohpet __initdata;
unsigned long hpet_address;
@@ -106,9 +112,31 @@
return 0;
}
+static cycle_t read_hpet(void)
+{
+ return (cycle_t)hpet_readl(HPET_COUNTER);
+}
+
+static cycle_t __vsyscall_fn vread_hpet(void)
+{
+ return readl((void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0);
+}
+
+struct clocksource clocksource_hpet = {
+ .name = "hpet",
+ .rating = 250,
+ .read = read_hpet,
+ .mask = (cycle_t)HPET_MASK,
+ .mult = 0, /* set below */
+ .shift = HPET_SHIFT,
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+ .vread = vread_hpet,
+};
+
int hpet_arch_init(void)
{
unsigned int id;
+ u64 tmp;
if (!hpet_address)
return -1;
@@ -132,6 +160,22 @@
hpet_use_timer = (id & HPET_ID_LEGSUP);
+ /*
+ * hpet period is in femto seconds per cycle
+ * so we need to convert this to ns/cyc units
+ * aproximated by mult/2^shift
+ *
+ * fsec/cyc * 1nsec/1000000fsec = nsec/cyc = mult/2^shift
+ * fsec/cyc * 1ns/1000000fsec * 2^shift = mult
+ * fsec/cyc * 2^shift * 1nsec/1000000fsec = mult
+ * (fsec/cyc << shift)/1000000 = mult
+ * (hpet_period << shift)/FSEC_PER_NSEC = mult
+ */
+ tmp = (u64)hpet_period << HPET_SHIFT;
+ do_div(tmp, FSEC_PER_NSEC);
+ clocksource_hpet.mult = (u32)tmp;
+ clocksource_register(&clocksource_hpet);
+
return hpet_timer_stop_set_go(hpet_tick);
}
@@ -444,68 +488,3 @@
}
__setup("nohpet", nohpet_setup);
-
-#define HPET_MASK 0xFFFFFFFF
-#define HPET_SHIFT 22
-
-/* FSEC = 10^-15 NSEC = 10^-9 */
-#define FSEC_PER_NSEC 1000000
-
-static void *hpet_ptr;
-
-static cycle_t read_hpet(void)
-{
- return (cycle_t)readl(hpet_ptr);
-}
-
-static cycle_t __vsyscall_fn vread_hpet(void)
-{
- return readl((void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0);
-}
-
-struct clocksource clocksource_hpet = {
- .name = "hpet",
- .rating = 250,
- .read = read_hpet,
- .mask = (cycle_t)HPET_MASK,
- .mult = 0, /* set below */
- .shift = HPET_SHIFT,
- .flags = CLOCK_SOURCE_IS_CONTINUOUS,
- .vread = vread_hpet,
-};
-
-static int __init init_hpet_clocksource(void)
-{
- unsigned long hpet_period;
- void __iomem *hpet_base;
- u64 tmp;
-
- if (!hpet_address)
- return -ENODEV;
-
- /* calculate the hpet address: */
- hpet_base = ioremap_nocache(hpet_address, HPET_MMAP_SIZE);
- hpet_ptr = hpet_base + HPET_COUNTER;
-
- /* calculate the frequency: */
- hpet_period = readl(hpet_base + HPET_PERIOD);
-
- /*
- * hpet period is in femto seconds per cycle
- * so we need to convert this to ns/cyc units
- * aproximated by mult/2^shift
- *
- * fsec/cyc * 1nsec/1000000fsec = nsec/cyc = mult/2^shift
- * fsec/cyc * 1ns/1000000fsec * 2^shift = mult
- * fsec/cyc * 2^shift * 1nsec/1000000fsec = mult
- * (fsec/cyc << shift)/1000000 = mult
- * (hpet_period << shift)/FSEC_PER_NSEC = mult
- */
- tmp = (u64)hpet_period << HPET_SHIFT;
- do_div(tmp, FSEC_PER_NSEC);
- clocksource_hpet.mult = (u32)tmp;
-
- return clocksource_register(&clocksource_hpet);
-}
-
-module_init(init_hpet_clocksource);
diff --git a/arch/x86_64/kernel/io_apic.c b/arch/x86_64/kernel/io_apic.c
index 0a91368..c6a5bc7 100644
--- a/arch/x86_64/kernel/io_apic.c
+++ b/arch/x86_64/kernel/io_apic.c
@@ -789,7 +789,6 @@
struct irq_cfg *cfg = irq_cfg + irq;
struct IO_APIC_route_entry entry;
cpumask_t mask;
- unsigned long flags;
if (!IO_APIC_IRQ(irq))
return;
diff --git a/arch/x86_64/kernel/smpboot.c b/arch/x86_64/kernel/smpboot.c
index 3544372..cd4643a 100644
--- a/arch/x86_64/kernel/smpboot.c
+++ b/arch/x86_64/kernel/smpboot.c
@@ -923,8 +923,9 @@
*/
int __cpuinit __cpu_up(unsigned int cpu)
{
- int err;
int apicid = cpu_present_to_apicid(cpu);
+ unsigned long flags;
+ int err;
WARN_ON(irqs_disabled());
@@ -958,7 +959,9 @@
/*
* Make sure and check TSC sync:
*/
+ local_irq_save(flags);
check_tsc_sync_source(cpu);
+ local_irq_restore(flags);
while (!cpu_isset(cpu, cpu_online_map))
cpu_relax();
diff --git a/arch/x86_64/kernel/time.c b/arch/x86_64/kernel/time.c
index c9addcf..75d73a9 100644
--- a/arch/x86_64/kernel/time.c
+++ b/arch/x86_64/kernel/time.c
@@ -358,6 +358,8 @@
set_cyc2ns_scale(cpu_khz);
printk(KERN_INFO "time.c: Detected %d.%03d MHz processor.\n",
cpu_khz / 1000, cpu_khz % 1000);
+ init_tsc_clocksource();
+
setup_irq(0, &irq0);
}
diff --git a/arch/x86_64/kernel/tsc.c b/arch/x86_64/kernel/tsc.c
index 8958318..1a0edbb 100644
--- a/arch/x86_64/kernel/tsc.c
+++ b/arch/x86_64/kernel/tsc.c
@@ -210,7 +210,7 @@
}
EXPORT_SYMBOL_GPL(mark_tsc_unstable);
-static int __init init_tsc_clocksource(void)
+void __init init_tsc_clocksource(void)
{
if (!notsc) {
clocksource_tsc.mult = clocksource_khz2mult(cpu_khz,
@@ -218,9 +218,6 @@
if (check_tsc_unstable())
clocksource_tsc.rating = 0;
- return clocksource_register(&clocksource_tsc);
+ clocksource_register(&clocksource_tsc);
}
- return 0;
}
-
-module_init(init_tsc_clocksource);
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 43cc43d..dc7b562 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -389,6 +389,7 @@
{ PCI_VDEVICE(INTEL, 0x2929), board_ahci_pi }, /* ICH9M */
{ PCI_VDEVICE(INTEL, 0x292a), board_ahci_pi }, /* ICH9M */
{ PCI_VDEVICE(INTEL, 0x292b), board_ahci_pi }, /* ICH9M */
+ { PCI_VDEVICE(INTEL, 0x292c), board_ahci_pi }, /* ICH9M */
{ PCI_VDEVICE(INTEL, 0x292f), board_ahci_pi }, /* ICH9M */
{ PCI_VDEVICE(INTEL, 0x294d), board_ahci_pi }, /* ICH9 */
{ PCI_VDEVICE(INTEL, 0x294e), board_ahci_pi }, /* ICH9M */
diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c
index fc5b73d..86fbcd6 100644
--- a/drivers/ata/pata_legacy.c
+++ b/drivers/ata/pata_legacy.c
@@ -69,7 +69,7 @@
#define NR_HOST 6
static int legacy_port[NR_HOST] = { 0x1f0, 0x170, 0x1e8, 0x168, 0x1e0, 0x160 };
-static int legacy_irq[NR_HOST] = { 15, 14, 11, 10, 8, 12 };
+static int legacy_irq[NR_HOST] = { 14, 15, 11, 10, 8, 12 };
struct legacy_data {
unsigned long timing;
diff --git a/drivers/ata/pata_pdc202xx_old.c b/drivers/ata/pata_pdc202xx_old.c
index 3fb4177..acdc52c 100644
--- a/drivers/ata/pata_pdc202xx_old.c
+++ b/drivers/ata/pata_pdc202xx_old.c
@@ -2,13 +2,14 @@
* pata_pdc202xx_old.c - Promise PDC202xx PATA for new ATA layer
* (C) 2005 Red Hat Inc
* Alan Cox <alan@redhat.com>
+ * (C) 2007 Bartlomiej Zolnierkiewicz
*
* Based in part on linux/drivers/ide/pci/pdc202xx_old.c
*
* First cut with LBA48/ATAPI
*
* TODO:
- * Channel interlock/reset on both required ?
+ * Channel interlock/reset on both required
*/
#include <linux/kernel.h>
@@ -21,7 +22,7 @@
#include <linux/libata.h>
#define DRV_NAME "pata_pdc202xx_old"
-#define DRV_VERSION "0.3.0"
+#define DRV_VERSION "0.4.0"
/**
* pdc2024x_pre_reset - probe begin
@@ -76,7 +77,7 @@
static void pdc202xx_configure_piomode(struct ata_port *ap, struct ata_device *adev, int pio)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
- int port = 0x60 + 4 * ap->port_no + 2 * adev->devno;
+ int port = 0x60 + 8 * ap->port_no + 4 * adev->devno;
static u16 pio_timing[5] = {
0x0913, 0x050C , 0x0308, 0x0206, 0x0104
};
@@ -85,7 +86,7 @@
pci_read_config_byte(pdev, port, &r_ap);
pci_read_config_byte(pdev, port + 1, &r_bp);
r_ap &= ~0x3F; /* Preserve ERRDY_EN, SYNC_IN */
- r_bp &= ~0x07;
+ r_bp &= ~0x1F;
r_ap |= (pio_timing[pio] >> 8);
r_bp |= (pio_timing[pio] & 0xFF);
@@ -123,7 +124,7 @@
static void pdc202xx_set_dmamode(struct ata_port *ap, struct ata_device *adev)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
- int port = 0x60 + 4 * ap->port_no + 2 * adev->devno;
+ int port = 0x60 + 8 * ap->port_no + 4 * adev->devno;
static u8 udma_timing[6][2] = {
{ 0x60, 0x03 }, /* 33 Mhz Clock */
{ 0x40, 0x02 },
@@ -132,12 +133,17 @@
{ 0x20, 0x01 },
{ 0x20, 0x01 }
};
+ static u8 mdma_timing[3][2] = {
+ { 0x60, 0x03 },
+ { 0x60, 0x04 },
+ { 0xe0, 0x0f },
+ };
u8 r_bp, r_cp;
pci_read_config_byte(pdev, port + 1, &r_bp);
pci_read_config_byte(pdev, port + 2, &r_cp);
- r_bp &= ~0xF0;
+ r_bp &= ~0xE0;
r_cp &= ~0x0F;
if (adev->dma_mode >= XFER_UDMA_0) {
@@ -147,8 +153,8 @@
} else {
int speed = adev->dma_mode - XFER_MW_DMA_0;
- r_bp |= 0x60;
- r_cp |= (5 - speed);
+ r_bp |= mdma_timing[speed][0];
+ r_cp |= mdma_timing[speed][1];
}
pci_write_config_byte(pdev, port + 1, r_bp);
pci_write_config_byte(pdev, port + 2, r_cp);
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index cacb1c8..17ee97f 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -406,22 +406,6 @@
setups function - apparently needed by the rd_load_image routine
that supposes the filesystem in the image uses a 1024 blocksize.
-config BLK_DEV_INITRD
- bool "Initial RAM filesystem and RAM disk (initramfs/initrd) support"
- depends on BROKEN || !FRV
- help
- The initial RAM filesystem is a ramfs which is loaded by the
- boot loader (loadlin or lilo) and that is mounted as root
- before the normal boot procedure. It is typically used to
- load modules needed to mount the "real" root file system,
- etc. See <file:Documentation/initrd.txt> for details.
-
- If RAM disk support (BLK_DEV_RAM) is also included, this
- also enables initial RAM disk (initrd) support and adds
- 15 Kbytes (more on some other architectures) to the kernel size.
-
- If unsure say Y.
-
config CDROM_PKTCDVD
tristate "Packet writing on CD/DVD media"
depends on !UML
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 05dfe35..0c716ee 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -1291,13 +1291,19 @@
if (inq_buff == NULL)
goto mem_msg;
+ /* testing to see if 16-byte CDBs are already being used */
+ if (h->cciss_read == CCISS_READ_16) {
+ cciss_read_capacity_16(h->ctlr, drv_index, 1,
+ &total_size, &block_size);
+ goto geo_inq;
+ }
+
cciss_read_capacity(ctlr, drv_index, 1,
&total_size, &block_size);
- /* total size = last LBA + 1 */
- /* FFFFFFFF + 1 = 0, cannot have a logical volume of size 0 */
- /* so we assume this volume this must be >2TB in size */
- if (total_size == (__u32) 0) {
+ /* if read_capacity returns all F's this volume is >2TB in size */
+ /* so we switch to 16-byte CDB's for all read/write ops */
+ if (total_size == 0xFFFFFFFFULL) {
cciss_read_capacity_16(ctlr, drv_index, 1,
&total_size, &block_size);
h->cciss_read = CCISS_READ_16;
@@ -1306,6 +1312,7 @@
h->cciss_read = CCISS_READ_10;
h->cciss_write = CCISS_WRITE_10;
}
+geo_inq:
cciss_geometry_inquiry(ctlr, drv_index, 1, total_size, block_size,
inq_buff, &h->drv[drv_index]);
@@ -1917,13 +1924,14 @@
drv->raid_level = inq_buff->data_byte[8];
}
drv->block_size = block_size;
- drv->nr_blocks = total_size;
+ drv->nr_blocks = total_size + 1;
t = drv->heads * drv->sectors;
if (t > 1) {
- unsigned rem = sector_div(total_size, t);
+ sector_t real_size = total_size + 1;
+ unsigned long rem = sector_div(real_size, t);
if (rem)
- total_size++;
- drv->cylinders = total_size;
+ real_size++;
+ drv->cylinders = real_size;
}
} else { /* Get geometry failed */
printk(KERN_WARNING "cciss: reading geometry failed\n");
@@ -1953,16 +1961,16 @@
ctlr, buf, sizeof(ReadCapdata_struct),
1, logvol, 0, NULL, TYPE_CMD);
if (return_code == IO_OK) {
- *total_size = be32_to_cpu(*(__u32 *) buf->total_size)+1;
+ *total_size = be32_to_cpu(*(__u32 *) buf->total_size);
*block_size = be32_to_cpu(*(__u32 *) buf->block_size);
} else { /* read capacity command failed */
printk(KERN_WARNING "cciss: read capacity failed\n");
*total_size = 0;
*block_size = BLOCK_SIZE;
}
- if (*total_size != (__u32) 0)
+ if (*total_size != 0)
printk(KERN_INFO " blocks= %llu block_size= %d\n",
- (unsigned long long)*total_size, *block_size);
+ (unsigned long long)*total_size+1, *block_size);
kfree(buf);
return;
}
@@ -1989,7 +1997,7 @@
1, logvol, 0, NULL, TYPE_CMD);
}
if (return_code == IO_OK) {
- *total_size = be64_to_cpu(*(__u64 *) buf->total_size)+1;
+ *total_size = be64_to_cpu(*(__u64 *) buf->total_size);
*block_size = be32_to_cpu(*(__u32 *) buf->block_size);
} else { /* read capacity command failed */
printk(KERN_WARNING "cciss: read capacity failed\n");
@@ -1997,7 +2005,7 @@
*block_size = BLOCK_SIZE;
}
printk(KERN_INFO " blocks= %llu block_size= %d\n",
- (unsigned long long)*total_size, *block_size);
+ (unsigned long long)*total_size+1, *block_size);
kfree(buf);
return;
}
@@ -3119,8 +3127,9 @@
}
cciss_read_capacity(cntl_num, i, 0, &total_size, &block_size);
- /* total_size = last LBA + 1 */
- if(total_size == (__u32) 0) {
+ /* If read_capacity returns all F's the logical is >2TB */
+ /* so we switch to 16-byte CDBs for all read/write ops */
+ if(total_size == 0xFFFFFFFFULL) {
cciss_read_capacity_16(cntl_num, i, 0,
&total_size, &block_size);
hba[cntl_num]->cciss_read = CCISS_READ_16;
@@ -3395,7 +3404,7 @@
return -1;
}
-static void __devexit cciss_remove_one(struct pci_dev *pdev)
+static void cciss_remove_one(struct pci_dev *pdev)
{
ctlr_info_t *tmp_ptr;
int i, j;
@@ -3419,9 +3428,10 @@
memset(flush_buf, 0, 4);
return_code = sendcmd(CCISS_CACHE_FLUSH, i, flush_buf, 4, 0, 0, 0, NULL,
TYPE_CMD);
- if (return_code != IO_OK) {
- printk(KERN_WARNING "Error Flushing cache on controller %d\n",
- i);
+ if (return_code == IO_OK) {
+ printk(KERN_INFO "Completed flushing cache on controller %d\n", i);
+ } else {
+ printk(KERN_WARNING "Error flushing cache on controller %d\n", i);
}
free_irq(hba[i]->intr[2], hba[i]);
@@ -3472,6 +3482,7 @@
.probe = cciss_init_one,
.remove = __devexit_p(cciss_remove_one),
.id_table = cciss_pci_device_id, /* id_table */
+ .shutdown = cciss_remove_one,
};
/*
diff --git a/drivers/cdrom/viocd.c b/drivers/cdrom/viocd.c
index dc13eba..44cd7b2 100644
--- a/drivers/cdrom/viocd.c
+++ b/drivers/cdrom/viocd.c
@@ -376,6 +376,25 @@
return 0;
}
+static void viocd_end_request(struct request *req, int uptodate)
+{
+ int nsectors = req->hard_nr_sectors;
+
+ /*
+ * Make sure it's fully ended, and ensure that we process
+ * at least one sector.
+ */
+ if (blk_pc_request(req))
+ nsectors = (req->data_len + 511) >> 9;
+ if (!nsectors)
+ nsectors = 1;
+
+ if (end_that_request_first(req, uptodate, nsectors))
+ BUG();
+ add_disk_randomness(req->rq_disk);
+ blkdev_dequeue_request(req);
+ end_that_request_last(req, uptodate);
+}
static int rwreq;
@@ -385,11 +404,11 @@
while ((rwreq == 0) && ((req = elv_next_request(q)) != NULL)) {
if (!blk_fs_request(req))
- end_request(req, 0);
+ viocd_end_request(req, 0);
else if (send_request(req) < 0) {
printk(VIOCD_KERN_WARNING
"unable to send message to OS/400!");
- end_request(req, 0);
+ viocd_end_request(req, 0);
} else
rwreq++;
}
@@ -601,9 +620,9 @@
"with rc %d:0x%04X: %s\n",
req, event->xRc,
bevent->sub_result, err->msg);
- end_request(req, 0);
+ viocd_end_request(req, 0);
} else
- end_request(req, 1);
+ viocd_end_request(req, 1);
/* restart handling of incoming requests */
spin_unlock_irqrestore(&viocd_reqlock, flags);
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index d0a6dc5..3429ece 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -1026,16 +1026,17 @@
source "drivers/char/tpm/Kconfig"
config TELCLOCK
- tristate "Telecom clock driver for MPBL0010 ATCA SBC"
+ tristate "Telecom clock driver for ATCA SBC"
depends on EXPERIMENTAL && X86
default n
help
- The telecom clock device is specific to the MPBL0010 ATCA computer and
- allows direct userspace access to the configuration of the telecom clock
- configuration settings. This device is used for hardware synchronization
- across the ATCA backplane fabric. Upon loading, the driver exports a
- sysfs directory, /sys/devices/platform/telco_clock, with a number of
- files for controlling the behavior of this hardware.
+ The telecom clock device is specific to the MPCBL0010 and MPCBL0050
+ ATCA computers and allows direct userspace access to the
+ configuration of the telecom clock configuration settings. This
+ device is used for hardware synchronization across the ATCA backplane
+ fabric. Upon loading, the driver exports a sysfs directory,
+ /sys/devices/platform/telco_clock, with a number of files for
+ controlling the behavior of this hardware.
endmenu
diff --git a/drivers/char/cyclades.c b/drivers/char/cyclades.c
index 54df355..16dc5d1 100644
--- a/drivers/char/cyclades.c
+++ b/drivers/char/cyclades.c
@@ -3501,6 +3501,7 @@
tmp.irq = cinfo->irq;
tmp.flags = info->flags;
tmp.close_delay = info->close_delay;
+ tmp.closing_wait = info->closing_wait;
tmp.baud_base = info->baud;
tmp.custom_divisor = info->custom_divisor;
tmp.hub6 = 0; /*!!! */
diff --git a/drivers/char/epca.c b/drivers/char/epca.c
index 88fc24f..de5be30 100644
--- a/drivers/char/epca.c
+++ b/drivers/char/epca.c
@@ -209,7 +209,6 @@
static void setup_empty_event(struct tty_struct *tty, struct channel *ch);
void epca_setup(char *, int *);
-static int get_termio(struct tty_struct *, struct termio __user *);
static int pc_write(struct tty_struct *, const unsigned char *, int);
static int pc_init(void);
static int init_PCI(void);
@@ -2362,15 +2361,6 @@
switch (cmd)
{ /* Begin switch cmd */
-
-#if 0 /* Handled by calling layer properly */
- case TCGETS:
- if (copy_to_user(argp, tty->termios, sizeof(struct ktermios)))
- return -EFAULT;
- return 0;
- case TCGETA:
- return get_termio(tty, argp);
-#endif
case TCSBRK: /* SVID version: non-zero arg --> no break */
retval = tty_check_change(tty);
if (retval)
@@ -2735,13 +2725,6 @@
memoff(ch);
} /* End setup_empty_event */
-/* --------------------- Begin get_termio ----------------------- */
-
-static int get_termio(struct tty_struct * tty, struct termio __user * termio)
-{ /* Begin get_termio */
- return kernel_termios_to_user_termio(termio, tty->termios);
-} /* End get_termio */
-
/* ---------------------- Begin epca_setup -------------------------- */
void epca_setup(char *str, int *ints)
{ /* Begin epca_setup */
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index a7b33d2..e221465 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -2478,6 +2478,11 @@
if (!info)
return;
+#ifdef CONFIG_PPC_MERGE
+ if (check_legacy_ioport(ipmi_defaults[i].port))
+ continue;
+#endif
+
info->addr_source = NULL;
info->si_type = ipmi_defaults[i].type;
diff --git a/drivers/char/pcmcia/cm4040_cs.c b/drivers/char/pcmcia/cm4040_cs.c
index 0e82968..f2e4ec4 100644
--- a/drivers/char/pcmcia/cm4040_cs.c
+++ b/drivers/char/pcmcia/cm4040_cs.c
@@ -273,6 +273,7 @@
DEBUGP(6, dev, "BytesToRead=%lu\n", bytes_to_read);
min_bytes_to_read = min(count, bytes_to_read + 5);
+ min_bytes_to_read = min_t(size_t, min_bytes_to_read, READ_WRITE_BUFFER_SIZE);
DEBUGP(6, dev, "Min=%lu\n", min_bytes_to_read);
@@ -340,7 +341,7 @@
return 0;
}
- if (count < 5) {
+ if ((count < 5) || (count > READ_WRITE_BUFFER_SIZE)) {
DEBUGP(2, dev, "<- cm4040_write buffersize=%Zd < 5\n", count);
return -EIO;
}
diff --git a/drivers/clocksource/acpi_pm.c b/drivers/clocksource/acpi_pm.c
index ccaa6a3..d42060e 100644
--- a/drivers/clocksource/acpi_pm.c
+++ b/drivers/clocksource/acpi_pm.c
@@ -214,4 +214,7 @@
return clocksource_register(&clocksource_acpi_pm);
}
-module_init(init_acpi_pm_clocksource);
+/* We use fs_initcall because we want the PCI fixups to have run
+ * but we still need to load before device_initcall
+ */
+fs_initcall(init_acpi_pm_clocksource);
diff --git a/drivers/clocksource/cyclone.c b/drivers/clocksource/cyclone.c
index 4f3925c..1bde303 100644
--- a/drivers/clocksource/cyclone.c
+++ b/drivers/clocksource/cyclone.c
@@ -116,4 +116,4 @@
return clocksource_register(&clocksource_cyclone);
}
-module_init(init_cyclone_clocksource);
+arch_initcall(init_cyclone_clocksource);
diff --git a/drivers/crypto/geode-aes.c b/drivers/crypto/geode-aes.c
index 0eb6284..6d3840e 100644
--- a/drivers/crypto/geode-aes.c
+++ b/drivers/crypto/geode-aes.c
@@ -99,9 +99,8 @@
static unsigned int
geode_aes_crypt(struct geode_aes_op *op)
{
-
u32 flags = 0;
- int iflags;
+ unsigned long iflags;
if (op->len == 0 || op->src == op->dst)
return 0;
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index 6450968..f17e9c7 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -215,14 +215,16 @@
module will be called aaed2000_kbd.
config KEYBOARD_GPIO
- tristate "Buttons on CPU GPIOs (PXA)"
- depends on (ARCH_SA1100 || ARCH_PXA || ARCH_S3C2410)
+ tristate "GPIO Buttons"
+ depends on GENERIC_GPIO
help
This driver implements support for buttons connected
- directly to GPIO pins of SA1100, PXA or S3C24xx CPUs.
+ to GPIO pins of various CPUs (and some other chips).
Say Y here if your device has buttons connected
- directly to GPIO pins of the CPU.
+ directly to such GPIO pins. Your board-specific
+ setup logic must also provide a platform device,
+ with configuration data saying which GPIOs are used.
To compile this driver as a module, choose M here: the
module will be called gpio-keys.
diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c
index fa03a00..ccf6df3 100644
--- a/drivers/input/keyboard/gpio_keys.c
+++ b/drivers/input/keyboard/gpio_keys.c
@@ -23,11 +23,9 @@
#include <linux/platform_device.h>
#include <linux/input.h>
#include <linux/irq.h>
+#include <linux/gpio_keys.h>
#include <asm/gpio.h>
-#include <asm/arch/hardware.h>
-
-#include <asm/hardware/gpio_keys.h>
static irqreturn_t gpio_keys_isr(int irq, void *dev_id)
{
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index d247429..54a1ad5 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -3071,7 +3071,7 @@
release_stripe(sh);
}
spin_lock_irq(&conf->device_lock);
- conf->expand_progress = (sector_nr + i)*(conf->raid_disks-1);
+ conf->expand_progress = (sector_nr + i) * new_data_disks;
spin_unlock_irq(&conf->device_lock);
/* Ok, those stripe are ready. We can start scheduling
* reads on the source stripes.
diff --git a/drivers/mmc/mmc.c b/drivers/mmc/mmc.c
index 5046a16..4a73e8b 100644
--- a/drivers/mmc/mmc.c
+++ b/drivers/mmc/mmc.c
@@ -376,10 +376,11 @@
{
struct mmc_ios *ios = &host->ios;
- pr_debug("%s: clock %uHz busmode %u powermode %u cs %u Vdd %u width %u\n",
+ pr_debug("%s: clock %uHz busmode %u powermode %u cs %u Vdd %u "
+ "width %u timing %u\n",
mmc_hostname(host), ios->clock, ios->bus_mode,
ios->power_mode, ios->chip_select, ios->vdd,
- ios->bus_width);
+ ios->bus_width, ios->timing);
host->ops->set_ios(host, ios);
}
@@ -809,6 +810,7 @@
host->ios.chip_select = MMC_CS_DONTCARE;
host->ios.power_mode = MMC_POWER_UP;
host->ios.bus_width = MMC_BUS_WIDTH_1;
+ host->ios.timing = MMC_TIMING_LEGACY;
mmc_set_ios(host);
mmc_delay(1);
@@ -828,6 +830,7 @@
host->ios.chip_select = MMC_CS_DONTCARE;
host->ios.power_mode = MMC_POWER_OFF;
host->ios.bus_width = MMC_BUS_WIDTH_1;
+ host->ios.timing = MMC_TIMING_LEGACY;
mmc_set_ios(host);
}
@@ -1112,46 +1115,50 @@
continue;
}
- /* Activate highspeed support. */
- cmd.opcode = MMC_SWITCH;
- cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
- (EXT_CSD_HS_TIMING << 16) |
- (1 << 8) |
- EXT_CSD_CMD_SET_NORMAL;
- cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
+ if (host->caps & MMC_CAP_MMC_HIGHSPEED) {
+ /* Activate highspeed support. */
+ cmd.opcode = MMC_SWITCH;
+ cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
+ (EXT_CSD_HS_TIMING << 16) |
+ (1 << 8) |
+ EXT_CSD_CMD_SET_NORMAL;
+ cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
- err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES);
- if (err != MMC_ERR_NONE) {
- printk("%s: failed to switch card to mmc v4 "
- "high-speed mode.\n",
- mmc_hostname(card->host));
- continue;
+ err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES);
+ if (err != MMC_ERR_NONE) {
+ printk("%s: failed to switch card to mmc v4 "
+ "high-speed mode.\n",
+ mmc_hostname(card->host));
+ continue;
+ }
+
+ mmc_card_set_highspeed(card);
+
+ host->ios.timing = MMC_TIMING_SD_HS;
+ mmc_set_ios(host);
}
- mmc_card_set_highspeed(card);
-
/* Check for host support for wide-bus modes. */
- if (!(host->caps & MMC_CAP_4_BIT_DATA)) {
- continue;
+ if (host->caps & MMC_CAP_4_BIT_DATA) {
+ /* Activate 4-bit support. */
+ cmd.opcode = MMC_SWITCH;
+ cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
+ (EXT_CSD_BUS_WIDTH << 16) |
+ (EXT_CSD_BUS_WIDTH_4 << 8) |
+ EXT_CSD_CMD_SET_NORMAL;
+ cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
+
+ err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES);
+ if (err != MMC_ERR_NONE) {
+ printk("%s: failed to switch card to "
+ "mmc v4 4-bit bus mode.\n",
+ mmc_hostname(card->host));
+ continue;
+ }
+
+ host->ios.bus_width = MMC_BUS_WIDTH_4;
+ mmc_set_ios(host);
}
-
- /* Activate 4-bit support. */
- cmd.opcode = MMC_SWITCH;
- cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
- (EXT_CSD_BUS_WIDTH << 16) |
- (EXT_CSD_BUS_WIDTH_4 << 8) |
- EXT_CSD_CMD_SET_NORMAL;
- cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
-
- err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES);
- if (err != MMC_ERR_NONE) {
- printk("%s: failed to switch card to "
- "mmc v4 4-bit bus mode.\n",
- mmc_hostname(card->host));
- continue;
- }
-
- host->ios.bus_width = MMC_BUS_WIDTH_4;
}
kfree(ext_csd);
@@ -1241,6 +1248,9 @@
unsigned char *status;
struct scatterlist sg;
+ if (!(host->caps & MMC_CAP_SD_HIGHSPEED))
+ return;
+
status = kmalloc(64, GFP_KERNEL);
if (!status) {
printk(KERN_WARNING "%s: Unable to allocate buffer for "
@@ -1332,6 +1342,9 @@
}
mmc_card_set_highspeed(card);
+
+ host->ios.timing = MMC_TIMING_SD_HS;
+ mmc_set_ios(host);
}
kfree(status);
diff --git a/drivers/mmc/sdhci.c b/drivers/mmc/sdhci.c
index 7522f76..d749f08 100644
--- a/drivers/mmc/sdhci.c
+++ b/drivers/mmc/sdhci.c
@@ -606,7 +606,6 @@
static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
{
int div;
- u8 ctrl;
u16 clk;
unsigned long timeout;
@@ -615,13 +614,6 @@
writew(0, host->ioaddr + SDHCI_CLOCK_CONTROL);
- ctrl = readb(host->ioaddr + SDHCI_HOST_CONTROL);
- if (clock > 25000000)
- ctrl |= SDHCI_CTRL_HISPD;
- else
- ctrl &= ~SDHCI_CTRL_HISPD;
- writeb(ctrl, host->ioaddr + SDHCI_HOST_CONTROL);
-
if (clock == 0)
goto out;
@@ -761,10 +753,17 @@
sdhci_set_power(host, ios->vdd);
ctrl = readb(host->ioaddr + SDHCI_HOST_CONTROL);
+
if (ios->bus_width == MMC_BUS_WIDTH_4)
ctrl |= SDHCI_CTRL_4BITBUS;
else
ctrl &= ~SDHCI_CTRL_4BITBUS;
+
+ if (ios->timing == MMC_TIMING_SD_HS)
+ ctrl |= SDHCI_CTRL_HISPD;
+ else
+ ctrl &= ~SDHCI_CTRL_HISPD;
+
writeb(ctrl, host->ioaddr + SDHCI_HOST_CONTROL);
mmiowb();
@@ -994,7 +993,7 @@
intmask = readl(host->ioaddr + SDHCI_INT_STATUS);
- if (!intmask) {
+ if (!intmask || intmask == 0xffffffff) {
result = IRQ_NONE;
goto out;
}
@@ -1080,6 +1079,13 @@
pci_save_state(pdev);
pci_enable_wake(pdev, pci_choose_state(pdev, state), 0);
+
+ for (i = 0;i < chip->num_slots;i++) {
+ if (!chip->hosts[i])
+ continue;
+ free_irq(chip->hosts[i]->irq, chip->hosts[i]);
+ }
+
pci_disable_device(pdev);
pci_set_power_state(pdev, pci_choose_state(pdev, state));
@@ -1108,6 +1114,11 @@
continue;
if (chip->hosts[i]->flags & SDHCI_USE_DMA)
pci_set_master(pdev);
+ ret = request_irq(chip->hosts[i]->irq, sdhci_irq,
+ IRQF_SHARED, chip->hosts[i]->slot_descr,
+ chip->hosts[i]);
+ if (ret)
+ return ret;
sdhci_init(chip->hosts[i]);
mmiowb();
ret = mmc_resume_host(chip->hosts[i]->mmc);
@@ -1274,6 +1285,9 @@
mmc->f_max = host->max_clk;
mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_MULTIWRITE | MMC_CAP_BYTEBLOCK;
+ if (caps & SDHCI_CAN_DO_HISPD)
+ mmc->caps |= MMC_CAP_SD_HIGHSPEED;
+
mmc->ocr_avail = 0;
if (caps & SDHCI_CAN_VDD_330)
mmc->ocr_avail |= MMC_VDD_32_33|MMC_VDD_33_34;
@@ -1282,13 +1296,6 @@
if (caps & SDHCI_CAN_VDD_180)
mmc->ocr_avail |= MMC_VDD_17_18|MMC_VDD_18_19;
- if ((host->max_clk > 25000000) && !(caps & SDHCI_CAN_DO_HISPD)) {
- printk(KERN_ERR "%s: Controller reports > 25 MHz base clock,"
- " but no high speed support.\n",
- host->slot_descr);
- mmc->f_max = 25000000;
- }
-
if (mmc->ocr_avail == 0) {
printk(KERN_ERR "%s: Hardware doesn't report any "
"support voltages.\n", host->slot_descr);
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index 716a472..7299577 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -822,11 +822,17 @@
{
struct net_device *dev = pci_get_drvdata(pdev);
struct vortex_private *vp = netdev_priv(dev);
+ int err;
if (dev && vp) {
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
- pci_enable_device(pdev);
+ err = pci_enable_device(pdev);
+ if (err) {
+ printk(KERN_WARNING "%s: Could not enable device \n",
+ dev->name);
+ return err;
+ }
pci_set_master(pdev);
if (request_irq(dev->irq, vp->full_bus_master_rx ?
&boomerang_interrupt : &vortex_interrupt, IRQF_SHARED, dev->name, dev)) {
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index ea73ebf..e4724d8 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -60,6 +60,7 @@
#include <linux/errno.h>
#include <linux/netdevice.h>
#include <linux/inetdevice.h>
+#include <linux/igmp.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <net/sock.h>
@@ -861,6 +862,28 @@
}
}
+
+/*
+ * Retrieve the list of registered multicast addresses for the bonding
+ * device and retransmit an IGMP JOIN request to the current active
+ * slave.
+ */
+static void bond_resend_igmp_join_requests(struct bonding *bond)
+{
+ struct in_device *in_dev;
+ struct ip_mc_list *im;
+
+ rcu_read_lock();
+ in_dev = __in_dev_get_rcu(bond->dev);
+ if (in_dev) {
+ for (im = in_dev->mc_list; im; im = im->next) {
+ ip_mc_rejoin_group(im);
+ }
+ }
+
+ rcu_read_unlock();
+}
+
/*
* Totally destroys the mc_list in bond
*/
@@ -874,6 +897,7 @@
kfree(dmi);
dmi = bond->mc_list;
}
+ bond->mc_list = NULL;
}
/*
@@ -967,6 +991,7 @@
for (dmi = bond->dev->mc_list; dmi; dmi = dmi->next) {
dev_mc_add(new_active->dev, dmi->dmi_addr, dmi->dmi_addrlen, 0);
}
+ bond_resend_igmp_join_requests(bond);
}
}
@@ -3423,15 +3448,21 @@
{
struct packet_type *pt = &bond->arp_mon_pt;
+ if (pt->type)
+ return;
+
pt->type = htons(ETH_P_ARP);
- pt->dev = NULL; /*bond->dev;XXX*/
+ pt->dev = bond->dev;
pt->func = bond_arp_rcv;
dev_add_pack(pt);
}
void bond_unregister_arp(struct bonding *bond)
{
- dev_remove_pack(&bond->arp_mon_pt);
+ struct packet_type *pt = &bond->arp_mon_pt;
+
+ dev_remove_pack(pt);
+ pt->type = 0;
}
/*---------------------------- Hashing Policies -----------------------------*/
@@ -4011,42 +4042,6 @@
return 0;
}
-static void bond_activebackup_xmit_copy(struct sk_buff *skb,
- struct bonding *bond,
- struct slave *slave)
-{
- struct sk_buff *skb2 = skb_copy(skb, GFP_ATOMIC);
- struct ethhdr *eth_data;
- u8 *hwaddr;
- int res;
-
- if (!skb2) {
- printk(KERN_ERR DRV_NAME ": Error: "
- "bond_activebackup_xmit_copy(): skb_copy() failed\n");
- return;
- }
-
- skb2->mac.raw = (unsigned char *)skb2->data;
- eth_data = eth_hdr(skb2);
-
- /* Pick an appropriate source MAC address
- * -- use slave's perm MAC addr, unless used by bond
- * -- otherwise, borrow active slave's perm MAC addr
- * since that will not be used
- */
- hwaddr = slave->perm_hwaddr;
- if (!memcmp(eth_data->h_source, hwaddr, ETH_ALEN))
- hwaddr = bond->curr_active_slave->perm_hwaddr;
-
- /* Set source MAC address appropriately */
- memcpy(eth_data->h_source, hwaddr, ETH_ALEN);
-
- res = bond_dev_queue_xmit(bond, skb2, slave->dev);
- if (res)
- dev_kfree_skb(skb2);
-
- return;
-}
/*
* in active-backup mode, we know that bond->curr_active_slave is always valid if
@@ -4067,21 +4062,6 @@
if (!bond->curr_active_slave)
goto out;
- /* Xmit IGMP frames on all slaves to ensure rapid fail-over
- for multicast traffic on snooping switches */
- if (skb->protocol == __constant_htons(ETH_P_IP) &&
- skb->nh.iph->protocol == IPPROTO_IGMP) {
- struct slave *slave, *active_slave;
- int i;
-
- active_slave = bond->curr_active_slave;
- bond_for_each_slave_from_to(bond, slave, i, active_slave->next,
- active_slave->prev)
- if (IS_UP(slave->dev) &&
- (slave->link == BOND_LINK_UP))
- bond_activebackup_xmit_copy(skb, bond, slave);
- }
-
res = bond_dev_queue_xmit(bond, skb, bond->curr_active_slave->dev);
out:
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 02b61b8..d981d4c 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -1132,7 +1132,7 @@
spin_lock_irqsave(&priv->rxlock, flags);
- vlan_group_set_device(priv->vgrp, vid, NULL);
+ vlan_group_set_device(priv->vlgrp, vid, NULL);
spin_unlock_irqrestore(&priv->rxlock, flags);
}
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index be2ddbb..9ba21e0 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -1309,7 +1309,7 @@
static int mv643xx_eth_probe(struct platform_device *pdev)
{
struct mv643xx_eth_platform_data *pd;
- int port_num = pdev->id;
+ int port_num;
struct mv643xx_private *mp;
struct net_device *dev;
u8 *p;
@@ -1319,6 +1319,12 @@
int duplex = DUPLEX_HALF;
int speed = 0; /* default to auto-negotiation */
+ pd = pdev->dev.platform_data;
+ if (pd == NULL) {
+ printk(KERN_ERR "No mv643xx_eth_platform_data\n");
+ return -ENODEV;
+ }
+
dev = alloc_etherdev(sizeof(struct mv643xx_private));
if (!dev)
return -ENOMEM;
@@ -1331,8 +1337,6 @@
BUG_ON(!res);
dev->irq = res->start;
- mp->port_num = port_num;
-
dev->open = mv643xx_eth_open;
dev->stop = mv643xx_eth_stop;
dev->hard_start_xmit = mv643xx_eth_start_xmit;
@@ -1373,39 +1377,40 @@
spin_lock_init(&mp->lock);
+ port_num = pd->port_number;
+
/* set default config values */
eth_port_uc_addr_get(dev, dev->dev_addr);
mp->rx_ring_size = MV643XX_ETH_PORT_DEFAULT_RECEIVE_QUEUE_SIZE;
mp->tx_ring_size = MV643XX_ETH_PORT_DEFAULT_TRANSMIT_QUEUE_SIZE;
- pd = pdev->dev.platform_data;
- if (pd) {
- if (is_valid_ether_addr(pd->mac_addr))
- memcpy(dev->dev_addr, pd->mac_addr, 6);
+ if (is_valid_ether_addr(pd->mac_addr))
+ memcpy(dev->dev_addr, pd->mac_addr, 6);
- if (pd->phy_addr || pd->force_phy_addr)
- ethernet_phy_set(port_num, pd->phy_addr);
+ if (pd->phy_addr || pd->force_phy_addr)
+ ethernet_phy_set(port_num, pd->phy_addr);
- if (pd->rx_queue_size)
- mp->rx_ring_size = pd->rx_queue_size;
+ if (pd->rx_queue_size)
+ mp->rx_ring_size = pd->rx_queue_size;
- if (pd->tx_queue_size)
- mp->tx_ring_size = pd->tx_queue_size;
+ if (pd->tx_queue_size)
+ mp->tx_ring_size = pd->tx_queue_size;
- if (pd->tx_sram_size) {
- mp->tx_sram_size = pd->tx_sram_size;
- mp->tx_sram_addr = pd->tx_sram_addr;
- }
-
- if (pd->rx_sram_size) {
- mp->rx_sram_size = pd->rx_sram_size;
- mp->rx_sram_addr = pd->rx_sram_addr;
- }
-
- duplex = pd->duplex;
- speed = pd->speed;
+ if (pd->tx_sram_size) {
+ mp->tx_sram_size = pd->tx_sram_size;
+ mp->tx_sram_addr = pd->tx_sram_addr;
}
+ if (pd->rx_sram_size) {
+ mp->rx_sram_size = pd->rx_sram_size;
+ mp->rx_sram_addr = pd->rx_sram_addr;
+ }
+
+ duplex = pd->duplex;
+ speed = pd->speed;
+
+ mp->port_num = port_num;
+
/* Hook up MII support for ethtool */
mp->mii.dev = dev;
mp->mii.mdio_read = mv643xx_mdio_read;
diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c
index 5c57433..c6172a7 100644
--- a/drivers/net/natsemi.c
+++ b/drivers/net/natsemi.c
@@ -2024,6 +2024,7 @@
struct netdev_private *np = netdev_priv(dev);
void __iomem * ioaddr = ns_ioaddr(dev);
unsigned entry;
+ unsigned long flags;
/* Note: Ordering is important here, set the field with the
"ownership" bit last, and only then increment cur_tx. */
@@ -2037,7 +2038,7 @@
np->tx_ring[entry].addr = cpu_to_le32(np->tx_dma[entry]);
- spin_lock_irq(&np->lock);
+ spin_lock_irqsave(&np->lock, flags);
if (!np->hands_off) {
np->tx_ring[entry].cmd_status = cpu_to_le32(DescOwn | skb->len);
@@ -2056,7 +2057,7 @@
dev_kfree_skb_irq(skb);
np->stats.tx_dropped++;
}
- spin_unlock_irq(&np->lock);
+ spin_unlock_irqrestore(&np->lock, flags);
dev->trans_start = jiffies;
@@ -2222,6 +2223,8 @@
pkt_len = (desc_status & DescSizeMask) - 4;
if ((desc_status&(DescMore|DescPktOK|DescRxLong)) != DescPktOK){
if (desc_status & DescMore) {
+ unsigned long flags;
+
if (netif_msg_rx_err(np))
printk(KERN_WARNING
"%s: Oversized(?) Ethernet "
@@ -2236,12 +2239,12 @@
* reset procedure documented in
* AN-1287. */
- spin_lock_irq(&np->lock);
+ spin_lock_irqsave(&np->lock, flags);
reset_rx(dev);
reinit_rx(dev);
writel(np->ring_dma, ioaddr + RxRingPtr);
check_link(dev);
- spin_unlock_irq(&np->lock);
+ spin_unlock_irqrestore(&np->lock, flags);
/* We'll enable RX on exit from this
* function. */
@@ -2396,8 +2399,19 @@
#ifdef CONFIG_NET_POLL_CONTROLLER
static void natsemi_poll_controller(struct net_device *dev)
{
+ struct netdev_private *np = netdev_priv(dev);
+
disable_irq(dev->irq);
- intr_handler(dev->irq, dev);
+
+ /*
+ * A real interrupt might have already reached us at this point
+ * but NAPI might still haven't called us back. As the interrupt
+ * status register is cleared by reading, we should prevent an
+ * interrupt loss in this case...
+ */
+ if (!np->intr_status)
+ intr_handler(dev->irq, dev);
+
enable_irq(dev->irq);
}
#endif
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
index 36f9d98..4d94ba7 100644
--- a/drivers/net/pcnet32.c
+++ b/drivers/net/pcnet32.c
@@ -1234,14 +1234,14 @@
skb_put(skb, pkt_len); /* Make room */
pci_dma_sync_single_for_cpu(lp->pci_dev,
lp->rx_dma_addr[entry],
- PKT_BUF_SZ - 2,
+ pkt_len,
PCI_DMA_FROMDEVICE);
eth_copy_and_sum(skb,
(unsigned char *)(lp->rx_skbuff[entry]->data),
pkt_len, 0);
pci_dma_sync_single_for_device(lp->pci_dev,
lp->rx_dma_addr[entry],
- PKT_BUF_SZ - 2,
+ pkt_len,
PCI_DMA_FROMDEVICE);
}
lp->stats.rx_bytes += skb->len;
diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c
index 86e56f1..ebfa296 100644
--- a/drivers/net/pppoe.c
+++ b/drivers/net/pppoe.c
@@ -140,7 +140,7 @@
ret = item_hash_table[hash];
- while (ret && !(cmp_addr(&ret->pppoe_pa, sid, addr) && ret->pppoe_dev->ifindex == ifindex))
+ while (ret && !(cmp_addr(&ret->pppoe_pa, sid, addr) && ret->pppoe_ifindex == ifindex))
ret = ret->next;
return ret;
@@ -153,7 +153,7 @@
ret = item_hash_table[hash];
while (ret) {
- if (cmp_2_addr(&ret->pppoe_pa, &po->pppoe_pa) && ret->pppoe_dev->ifindex == po->pppoe_dev->ifindex)
+ if (cmp_2_addr(&ret->pppoe_pa, &po->pppoe_pa) && ret->pppoe_ifindex == po->pppoe_ifindex)
return -EALREADY;
ret = ret->next;
@@ -174,7 +174,7 @@
src = &item_hash_table[hash];
while (ret) {
- if (cmp_addr(&ret->pppoe_pa, sid, addr) && ret->pppoe_dev->ifindex == ifindex) {
+ if (cmp_addr(&ret->pppoe_pa, sid, addr) && ret->pppoe_ifindex == ifindex) {
*src = ret->next;
break;
}
@@ -529,7 +529,7 @@
po = pppox_sk(sk);
if (po->pppoe_pa.sid) {
- delete_item(po->pppoe_pa.sid, po->pppoe_pa.remote, po->pppoe_dev->ifindex);
+ delete_item(po->pppoe_pa.sid, po->pppoe_pa.remote, po->pppoe_ifindex);
}
if (po->pppoe_dev)
@@ -577,7 +577,7 @@
pppox_unbind_sock(sk);
/* Delete the old binding */
- delete_item(po->pppoe_pa.sid,po->pppoe_pa.remote,po->pppoe_dev->ifindex);
+ delete_item(po->pppoe_pa.sid,po->pppoe_pa.remote,po->pppoe_ifindex);
if(po->pppoe_dev)
dev_put(po->pppoe_dev);
@@ -597,6 +597,7 @@
goto end;
po->pppoe_dev = dev;
+ po->pppoe_ifindex = dev->ifindex;
if (!(dev->flags & IFF_UP))
goto err_put;
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c
index fb2b530..b3750f2 100644
--- a/drivers/net/sis900.c
+++ b/drivers/net/sis900.c
@@ -968,10 +968,10 @@
static u16 sis900_reset_phy(struct net_device *net_dev, int phy_addr)
{
- int i = 0;
+ int i;
u16 status;
- while (i++ < 2)
+ for (i = 0; i < 2; i++)
status = mdio_read(net_dev, phy_addr, MII_STATUS);
mdio_write( net_dev, phy_addr, MII_CONTROL, MII_CNTL_RESET );
@@ -1430,7 +1430,7 @@
int i = 0;
u32 status;
- while (i++ < 2)
+ for (i = 0; i < 2; i++)
status = mdio_read(net_dev, phy_addr, MII_STATUS);
if (!(status & MII_STAT_LINK)){
@@ -1466,9 +1466,9 @@
int phy_addr = sis_priv->cur_phy;
u32 status;
u16 autoadv, autorec;
- int i = 0;
+ int i;
- while (i++ < 2)
+ for (i = 0; i < 2; i++)
status = mdio_read(net_dev, phy_addr, MII_STATUS);
if (!(status & MII_STAT_LINK))
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
index dacea4f..c82befa 100644
--- a/drivers/net/tulip/de2104x.c
+++ b/drivers/net/tulip/de2104x.c
@@ -1685,7 +1685,7 @@
.get_regs = de_get_regs,
};
-static void __init de21040_get_mac_address (struct de_private *de)
+static void __devinit de21040_get_mac_address (struct de_private *de)
{
unsigned i;
@@ -1703,7 +1703,7 @@
}
}
-static void __init de21040_get_media_info(struct de_private *de)
+static void __devinit de21040_get_media_info(struct de_private *de)
{
unsigned int i;
@@ -1765,7 +1765,7 @@
return retval;
}
-static void __init de21041_get_srom_info (struct de_private *de)
+static void __devinit de21041_get_srom_info (struct de_private *de)
{
unsigned i, sa_offset = 0, ofs;
u8 ee_data[DE_EEPROM_SIZE + 6] = {};
diff --git a/drivers/net/tulip/dmfe.c b/drivers/net/tulip/dmfe.c
index 7f59a3d..24a29c9 100644
--- a/drivers/net/tulip/dmfe.c
+++ b/drivers/net/tulip/dmfe.c
@@ -143,9 +143,16 @@
#define DMFE_TX_TIMEOUT ((3*HZ)/2) /* tx packet time-out time 1.5 s" */
#define DMFE_TX_KICK (HZ/2) /* tx packet Kick-out time 0.5 s" */
-#define DMFE_DBUG(dbug_now, msg, value) if (dmfe_debug || (dbug_now)) printk(KERN_ERR DRV_NAME ": %s %lx\n", (msg), (long) (value))
+#define DMFE_DBUG(dbug_now, msg, value) \
+ do { \
+ if (dmfe_debug || (dbug_now)) \
+ printk(KERN_ERR DRV_NAME ": %s %lx\n",\
+ (msg), (long) (value)); \
+ } while (0)
-#define SHOW_MEDIA_TYPE(mode) printk(KERN_ERR DRV_NAME ": Change Speed to %sMhz %s duplex\n",mode & 1 ?"100":"10", mode & 4 ? "full":"half");
+#define SHOW_MEDIA_TYPE(mode) \
+ printk (KERN_INFO DRV_NAME ": Change Speed to %sMhz %s duplex\n" , \
+ (mode & 1) ? "100":"10", (mode & 4) ? "full":"half");
/* CR9 definition: SROM/MII */
@@ -163,10 +170,20 @@
#define SROM_V41_CODE 0x14
-#define SROM_CLK_WRITE(data, ioaddr) outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr);udelay(5);outl(data|CR9_SROM_READ|CR9_SRCS|CR9_SRCLK,ioaddr);udelay(5);outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr);udelay(5);
+#define SROM_CLK_WRITE(data, ioaddr) \
+ outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr); \
+ udelay(5); \
+ outl(data|CR9_SROM_READ|CR9_SRCS|CR9_SRCLK,ioaddr); \
+ udelay(5); \
+ outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr); \
+ udelay(5);
-#define __CHK_IO_SIZE(pci_id, dev_rev) ( ((pci_id)==PCI_DM9132_ID) || ((dev_rev) >= 0x02000030) ) ? DM9102A_IO_SIZE: DM9102_IO_SIZE
-#define CHK_IO_SIZE(pci_dev, dev_rev) __CHK_IO_SIZE(((pci_dev)->device << 16) | (pci_dev)->vendor, dev_rev)
+#define __CHK_IO_SIZE(pci_id, dev_rev) \
+ (( ((pci_id)==PCI_DM9132_ID) || ((dev_rev) >= 0x02000030) ) ? \
+ DM9102A_IO_SIZE: DM9102_IO_SIZE)
+
+#define CHK_IO_SIZE(pci_dev, dev_rev) \
+ (__CHK_IO_SIZE(((pci_dev)->device << 16) | (pci_dev)->vendor, dev_rev))
/* Sten Check */
#define DEVICE net_device
@@ -187,7 +204,7 @@
struct dmfe_board_info {
u32 chip_id; /* Chip vendor/Device ID */
u32 chip_revision; /* Chip revision */
- struct DEVICE *dev; /* net device */
+ struct DEVICE *next_dev; /* next device */
struct pci_dev *pdev; /* PCI device */
spinlock_t lock;
@@ -231,7 +248,6 @@
u8 media_mode; /* user specify media mode */
u8 op_mode; /* real work media mode */
u8 phy_addr;
- u8 link_failed; /* Ever link failed */
u8 wait_reset; /* Hardware failed, need to reset */
u8 dm910x_chk_mode; /* Operating mode check */
u8 first_in_callback; /* Flag to record state */
@@ -329,7 +345,7 @@
static void dmfe_HPNA_remote_cmd_chk(struct dmfe_board_info * );
static void dmfe_set_phyxcer(struct dmfe_board_info *);
-/* DM910X network baord routine ---------------------------- */
+/* DM910X network board routine ---------------------------- */
/*
* Search DM910X board ,allocate space and register it
@@ -356,7 +372,8 @@
SET_NETDEV_DEV(dev, &pdev->dev);
if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
- printk(KERN_WARNING DRV_NAME ": 32-bit PCI DMA not available.\n");
+ printk(KERN_WARNING DRV_NAME
+ ": 32-bit PCI DMA not available.\n");
err = -ENODEV;
goto err_out_free;
}
@@ -399,11 +416,12 @@
/* Init system & device */
db = netdev_priv(dev);
- db->dev = dev;
-
/* Allocate Tx/Rx descriptor memory */
- db->desc_pool_ptr = pci_alloc_consistent(pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20, &db->desc_pool_dma_ptr);
- db->buf_pool_ptr = pci_alloc_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4, &db->buf_pool_dma_ptr);
+ db->desc_pool_ptr = pci_alloc_consistent(pdev, sizeof(struct tx_desc) *
+ DESC_ALL_CNT + 0x20, &db->desc_pool_dma_ptr);
+
+ db->buf_pool_ptr = pci_alloc_consistent(pdev, TX_BUF_ALLOC *
+ TX_DESC_CNT + 4, &db->buf_pool_dma_ptr);
db->first_tx_desc = (struct tx_desc *) db->desc_pool_ptr;
db->first_tx_desc_dma = db->desc_pool_dma_ptr;
@@ -428,7 +446,7 @@
dev->poll_controller = &poll_dmfe;
#endif
dev->ethtool_ops = &netdev_ethtool_ops;
- netif_carrier_off(db->dev);
+ netif_carrier_off(dev);
spin_lock_init(&db->lock);
pci_read_config_dword(pdev, 0x50, &pci_pmr);
@@ -440,7 +458,8 @@
/* read 64 word srom data */
for (i = 0; i < 64; i++)
- ((u16 *) db->srom)[i] = cpu_to_le16(read_srom_word(db->ioaddr, i));
+ ((u16 *) db->srom)[i] =
+ cpu_to_le16(read_srom_word(db->ioaddr, i));
/* Set Node address */
for (i = 0; i < 6; i++)
@@ -482,14 +501,17 @@
DMFE_DBUG(0, "dmfe_remove_one()", 0);
if (dev) {
+
+ unregister_netdev(dev);
+
pci_free_consistent(db->pdev, sizeof(struct tx_desc) *
DESC_ALL_CNT + 0x20, db->desc_pool_ptr,
db->desc_pool_dma_ptr);
pci_free_consistent(db->pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
db->buf_pool_ptr, db->buf_pool_dma_ptr);
- unregister_netdev(dev);
pci_release_regions(pdev);
free_netdev(dev); /* free board information */
+
pci_set_drvdata(pdev, NULL);
}
@@ -509,7 +531,8 @@
DMFE_DBUG(0, "dmfe_open", 0);
- ret = request_irq(dev->irq, &dmfe_interrupt, IRQF_SHARED, dev->name, dev);
+ ret = request_irq(dev->irq, &dmfe_interrupt,
+ IRQF_SHARED, dev->name, dev);
if (ret)
return ret;
@@ -518,7 +541,6 @@
db->tx_packet_cnt = 0;
db->tx_queue_cnt = 0;
db->rx_avail_cnt = 0;
- db->link_failed = 1;
db->wait_reset = 0;
db->first_in_callback = 0;
@@ -650,7 +672,8 @@
/* No Tx resource check, it never happen nromally */
if (db->tx_queue_cnt >= TX_FREE_DESC_CNT) {
spin_unlock_irqrestore(&db->lock, flags);
- printk(KERN_ERR DRV_NAME ": No Tx resource %ld\n", db->tx_queue_cnt);
+ printk(KERN_ERR DRV_NAME ": No Tx resource %ld\n",
+ db->tx_queue_cnt);
return 1;
}
@@ -722,7 +745,8 @@
#if 0
/* show statistic counter */
- printk(DRV_NAME ": FU:%lx EC:%lx LC:%lx NC:%lx LOC:%lx TXJT:%lx RESET:%lx RCR8:%lx FAL:%lx TT:%lx\n",
+ printk(DRV_NAME ": FU:%lx EC:%lx LC:%lx NC:%lx"
+ " LOC:%lx TXJT:%lx RESET:%lx RCR8:%lx FAL:%lx TT:%lx\n",
db->tx_fifo_underrun, db->tx_excessive_collision,
db->tx_late_collision, db->tx_no_carrier, db->tx_loss_carrier,
db->tx_jabber_timeout, db->reset_count, db->reset_cr8,
@@ -905,7 +929,7 @@
static void dmfe_rx_packet(struct DEVICE *dev, struct dmfe_board_info * db)
{
struct rx_desc *rxptr;
- struct sk_buff *skb;
+ struct sk_buff *skb, *newskb;
int rxlen;
u32 rdes0;
@@ -919,7 +943,9 @@
db->rx_avail_cnt--;
db->interval_rx_cnt++;
- pci_unmap_single(db->pdev, le32_to_cpu(rxptr->rdes2), RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE);
+ pci_unmap_single(db->pdev, le32_to_cpu(rxptr->rdes2),
+ RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE);
+
if ( (rdes0 & 0x300) != 0x300) {
/* A packet without First/Last flag */
/* reuse this SKB */
@@ -956,9 +982,11 @@
} else {
/* Good packet, send to upper layer */
/* Shorst packet used new SKB */
- if ( (rxlen < RX_COPY_SIZE) &&
- ( (skb = dev_alloc_skb(rxlen + 2) )
- != NULL) ) {
+ if ((rxlen < RX_COPY_SIZE) &&
+ ((newskb = dev_alloc_skb(rxlen + 2))
+ != NULL)) {
+
+ skb = newskb;
/* size less than COPY_SIZE, allocate a rxlen SKB */
skb->dev = dev;
skb_reserve(skb, 2); /* 16byte align */
@@ -1069,6 +1097,8 @@
struct dmfe_board_info *db = netdev_priv(dev);
unsigned long flags;
+ int link_ok, link_ok_phy;
+
DMFE_DBUG(0, "dmfe_timer()", 0);
spin_lock_irqsave(&db->lock, flags);
@@ -1078,7 +1108,8 @@
if (db->chip_type && (db->chip_id==PCI_DM9102_ID)) {
db->cr6_data &= ~0x40000;
update_cr6(db->cr6_data, db->ioaddr);
- phy_write(db->ioaddr, db->phy_addr, 0, 0x1000, db->chip_id);
+ phy_write(db->ioaddr,
+ db->phy_addr, 0, 0x1000, db->chip_id);
db->cr6_data |= 0x40000;
update_cr6(db->cr6_data, db->ioaddr);
db->timer.expires = DMFE_TIMER_WUT + HZ * 2;
@@ -1139,21 +1170,41 @@
(db->chip_revision == 0x02000010)) ) {
/* DM9102A Chip */
if (tmp_cr12 & 2)
- tmp_cr12 = 0x0; /* Link failed */
+ link_ok = 0;
else
- tmp_cr12 = 0x3; /* Link OK */
+ link_ok = 1;
}
+ else
+ /*0x43 is used instead of 0x3 because bit 6 should represent
+ link status of external PHY */
+ link_ok = (tmp_cr12 & 0x43) ? 1 : 0;
- if ( !(tmp_cr12 & 0x3) && !db->link_failed ) {
+
+ /* If chip reports that link is failed it could be because external
+ PHY link status pin is not conected correctly to chip
+ To be sure ask PHY too.
+ */
+
+ /* need a dummy read because of PHY's register latch*/
+ phy_read (db->ioaddr, db->phy_addr, 1, db->chip_id);
+ link_ok_phy = (phy_read (db->ioaddr,
+ db->phy_addr, 1, db->chip_id) & 0x4) ? 1 : 0;
+
+ if (link_ok_phy != link_ok) {
+ DMFE_DBUG (0, "PHY and chip report different link status", 0);
+ link_ok = link_ok | link_ok_phy;
+ }
+
+ if ( !link_ok && netif_carrier_ok(dev)) {
/* Link Failed */
DMFE_DBUG(0, "Link Failed", tmp_cr12);
- db->link_failed = 1;
- netif_carrier_off(db->dev);
+ netif_carrier_off(dev);
/* For Force 10/100M Half/Full mode: Enable Auto-Nego mode */
/* AUTO or force 1M Homerun/Longrun don't need */
if ( !(db->media_mode & 0x38) )
- phy_write(db->ioaddr, db->phy_addr, 0, 0x1000, db->chip_id);
+ phy_write(db->ioaddr, db->phy_addr,
+ 0, 0x1000, db->chip_id);
/* AUTO mode, if INT phyxcer link failed, select EXT device */
if (db->media_mode & DMFE_AUTO) {
@@ -1162,21 +1213,19 @@
db->cr6_data&=~0x00000200; /* bit9=0, HD mode */
update_cr6(db->cr6_data, db->ioaddr);
}
- } else
- if ((tmp_cr12 & 0x3) && db->link_failed) {
- DMFE_DBUG(0, "Link link OK", tmp_cr12);
- db->link_failed = 0;
+ } else if (!netif_carrier_ok(dev)) {
- /* Auto Sense Speed */
- if ( (db->media_mode & DMFE_AUTO) &&
- dmfe_sense_speed(db) )
- db->link_failed = 1;
- else
- netif_carrier_on(db->dev);
- dmfe_process_mode(db);
- /* SHOW_MEDIA_TYPE(db->op_mode); */
+ DMFE_DBUG(0, "Link link OK", tmp_cr12);
+
+ /* Auto Sense Speed */
+ if ( !(db->media_mode & DMFE_AUTO) || !dmfe_sense_speed(db)) {
+ netif_carrier_on(dev);
+ SHOW_MEDIA_TYPE(db->op_mode);
}
+ dmfe_process_mode(db);
+ }
+
/* HPNA remote command check */
if (db->HPNA_command & 0xf00) {
db->HPNA_timer--;
@@ -1221,7 +1270,7 @@
db->tx_packet_cnt = 0;
db->tx_queue_cnt = 0;
db->rx_avail_cnt = 0;
- db->link_failed = 1;
+ netif_carrier_off(dev);
db->wait_reset = 0;
/* Re-initilize DM910X board */
@@ -1259,7 +1308,8 @@
if (!(rxptr->rdes0 & cpu_to_le32(0x80000000))) {
rxptr->rx_skb_ptr = skb;
- rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->data, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) );
+ rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev,
+ skb->data, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) );
wmb();
rxptr->rdes0 = cpu_to_le32(0x80000000);
db->rx_avail_cnt++;
@@ -1291,8 +1341,11 @@
outl(db->first_tx_desc_dma, ioaddr + DCR4); /* TX DESC address */
/* rx descriptor start pointer */
- db->first_rx_desc = (void *)db->first_tx_desc + sizeof(struct tx_desc) * TX_DESC_CNT;
- db->first_rx_desc_dma = db->first_tx_desc_dma + sizeof(struct tx_desc) * TX_DESC_CNT;
+ db->first_rx_desc = (void *)db->first_tx_desc +
+ sizeof(struct tx_desc) * TX_DESC_CNT;
+
+ db->first_rx_desc_dma = db->first_tx_desc_dma +
+ sizeof(struct tx_desc) * TX_DESC_CNT;
db->rx_insert_ptr = db->first_rx_desc;
db->rx_ready_ptr = db->first_rx_desc;
outl(db->first_rx_desc_dma, ioaddr + DCR3); /* RX DESC address */
@@ -1470,7 +1523,8 @@
if ( ( skb = dev_alloc_skb(RX_ALLOC_SIZE) ) == NULL )
break;
rxptr->rx_skb_ptr = skb; /* FIXME (?) */
- rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->data, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) );
+ rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->data,
+ RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) );
wmb();
rxptr->rdes0 = cpu_to_le32(0x80000000);
rxptr = rxptr->next_rx_desc;
@@ -1510,7 +1564,8 @@
for (i = 16; i > 0; i--) {
outl(CR9_SROM_READ | CR9_SRCS | CR9_SRCLK, cr9_ioaddr);
udelay(5);
- srom_data = (srom_data << 1) | ((inl(cr9_ioaddr) & CR9_CRDOUT) ? 1 : 0);
+ srom_data = (srom_data << 1) |
+ ((inl(cr9_ioaddr) & CR9_CRDOUT) ? 1 : 0);
outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr);
udelay(5);
}
@@ -1537,9 +1592,11 @@
if ( (phy_mode & 0x24) == 0x24 ) {
if (db->chip_id == PCI_DM9132_ID) /* DM9132 */
- phy_mode = phy_read(db->ioaddr, db->phy_addr, 7, db->chip_id) & 0xf000;
+ phy_mode = phy_read(db->ioaddr,
+ db->phy_addr, 7, db->chip_id) & 0xf000;
else /* DM9102/DM9102A */
- phy_mode = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id) & 0xf000;
+ phy_mode = phy_read(db->ioaddr,
+ db->phy_addr, 17, db->chip_id) & 0xf000;
/* printk(DRV_NAME ": Phy_mode %x ",phy_mode); */
switch (phy_mode) {
case 0x1000: db->op_mode = DMFE_10MHF; break;
@@ -1576,8 +1633,11 @@
/* DM9009 Chip: Phyxcer reg18 bit12=0 */
if (db->chip_id == PCI_DM9009_ID) {
- phy_reg = phy_read(db->ioaddr, db->phy_addr, 18, db->chip_id) & ~0x1000;
- phy_write(db->ioaddr, db->phy_addr, 18, phy_reg, db->chip_id);
+ phy_reg = phy_read(db->ioaddr,
+ db->phy_addr, 18, db->chip_id) & ~0x1000;
+
+ phy_write(db->ioaddr,
+ db->phy_addr, 18, phy_reg, db->chip_id);
}
/* Phyxcer capability setting */
@@ -1650,10 +1710,12 @@
case DMFE_100MHF: phy_reg = 0x2000; break;
case DMFE_100MFD: phy_reg = 0x2100; break;
}
- phy_write(db->ioaddr, db->phy_addr, 0, phy_reg, db->chip_id);
+ phy_write(db->ioaddr,
+ db->phy_addr, 0, phy_reg, db->chip_id);
if ( db->chip_type && (db->chip_id == PCI_DM9102_ID) )
mdelay(20);
- phy_write(db->ioaddr, db->phy_addr, 0, phy_reg, db->chip_id);
+ phy_write(db->ioaddr,
+ db->phy_addr, 0, phy_reg, db->chip_id);
}
}
}
@@ -1663,7 +1725,8 @@
* Write a word to Phy register
*/
-static void phy_write(unsigned long iobase, u8 phy_addr, u8 offset, u16 phy_data, u32 chip_id)
+static void phy_write(unsigned long iobase, u8 phy_addr, u8 offset,
+ u16 phy_data, u32 chip_id)
{
u16 i;
unsigned long ioaddr;
@@ -1689,11 +1752,13 @@
/* Send Phy address */
for (i = 0x10; i > 0; i = i >> 1)
- phy_write_1bit(ioaddr, phy_addr & i ? PHY_DATA_1 : PHY_DATA_0);
+ phy_write_1bit(ioaddr,
+ phy_addr & i ? PHY_DATA_1 : PHY_DATA_0);
/* Send register address */
for (i = 0x10; i > 0; i = i >> 1)
- phy_write_1bit(ioaddr, offset & i ? PHY_DATA_1 : PHY_DATA_0);
+ phy_write_1bit(ioaddr,
+ offset & i ? PHY_DATA_1 : PHY_DATA_0);
/* written trasnition */
phy_write_1bit(ioaddr, PHY_DATA_1);
@@ -1701,7 +1766,8 @@
/* Write a word data to PHY controller */
for ( i = 0x8000; i > 0; i >>= 1)
- phy_write_1bit(ioaddr, phy_data & i ? PHY_DATA_1 : PHY_DATA_0);
+ phy_write_1bit(ioaddr,
+ phy_data & i ? PHY_DATA_1 : PHY_DATA_0);
}
}
@@ -1738,11 +1804,13 @@
/* Send Phy address */
for (i = 0x10; i > 0; i = i >> 1)
- phy_write_1bit(ioaddr, phy_addr & i ? PHY_DATA_1 : PHY_DATA_0);
+ phy_write_1bit(ioaddr,
+ phy_addr & i ? PHY_DATA_1 : PHY_DATA_0);
/* Send register address */
for (i = 0x10; i > 0; i = i >> 1)
- phy_write_1bit(ioaddr, offset & i ? PHY_DATA_1 : PHY_DATA_0);
+ phy_write_1bit(ioaddr,
+ offset & i ? PHY_DATA_1 : PHY_DATA_0);
/* Skip transition state */
phy_read_1bit(ioaddr);
@@ -1963,7 +2031,8 @@
/* Check remote device status match our setting ot not */
if ( phy_reg != (db->HPNA_command & 0x0f00) ) {
- phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command, db->chip_id);
+ phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command,
+ db->chip_id);
db->HPNA_timer=8;
} else
db->HPNA_timer=600; /* Match, every 10 minutes, check */
@@ -2003,8 +2072,11 @@
module_param(HPNA_NoiseFloor, byte, 0);
module_param(SF_mode, byte, 0);
MODULE_PARM_DESC(debug, "Davicom DM9xxx enable debugging (0-1)");
-MODULE_PARM_DESC(mode, "Davicom DM9xxx: Bit 0: 10/100Mbps, bit 2: duplex, bit 8: HomePNA");
-MODULE_PARM_DESC(SF_mode, "Davicom DM9xxx special function (bit 0: VLAN, bit 1 Flow Control, bit 2: TX pause packet)");
+MODULE_PARM_DESC(mode, "Davicom DM9xxx: "
+ "Bit 0: 10/100Mbps, bit 2: duplex, bit 8: HomePNA");
+
+MODULE_PARM_DESC(SF_mode, "Davicom DM9xxx special function "
+ "(bit 0: VLAN, bit 1 Flow Control, bit 2: TX pause packet)");
/* Description:
* when user used insmod to add module, system invoked init_module()
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index 885e73d..dab88b9 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -3598,17 +3598,20 @@
/* Move to next BD in the ring */
if (!(bd_status & T_W))
- ugeth->txBd[txQ] = bd + sizeof(struct qe_bd);
+ bd += sizeof(struct qe_bd);
else
- ugeth->txBd[txQ] = ugeth->p_tx_bd_ring[txQ];
+ bd = ugeth->p_tx_bd_ring[txQ];
/* If the next BD still needs to be cleaned up, then the bds
are full. We need to tell the kernel to stop sending us stuff. */
if (bd == ugeth->confBd[txQ]) {
if (!netif_queue_stopped(dev))
netif_stop_queue(dev);
+ return NETDEV_TX_BUSY;
}
+ ugeth->txBd[txQ] = bd;
+
if (ugeth->p_scheduler) {
ugeth->cpucount[txQ]++;
/* Indicate to QE that there are more Tx bds ready for
@@ -3620,7 +3623,7 @@
spin_unlock_irq(&ugeth->lock);
- return 0;
+ return NETDEV_TX_OK;
}
static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit)
@@ -3722,7 +3725,7 @@
/* Handle the transmitted buffer and release */
/* the BD to be used with the current frame */
- if ((bd = ugeth->txBd[txQ]) && (netif_queue_stopped(dev) == 0))
+ if ((bd == ugeth->txBd[txQ]) && (netif_queue_stopped(dev) == 0))
break;
ugeth->stats.tx_packets++;
@@ -3741,10 +3744,12 @@
/* Advance the confirmation BD pointer */
if (!(bd_status & T_W))
- ugeth->confBd[txQ] += sizeof(struct qe_bd);
+ bd += sizeof(struct qe_bd);
else
- ugeth->confBd[txQ] = ugeth->p_tx_bd_ring[txQ];
+ bd = ugeth->p_tx_bd_ring[txQ];
+ bd_status = in_be32((u32 *)bd);
}
+ ugeth->confBd[txQ] = bd;
return 0;
}
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 68555c1..01869b1 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -38,6 +38,36 @@
return 0;
}
+static void msi_set_enable(struct pci_dev *dev, int enable)
+{
+ int pos;
+ u16 control;
+
+ pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
+ if (pos) {
+ pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
+ control &= ~PCI_MSI_FLAGS_ENABLE;
+ if (enable)
+ control |= PCI_MSI_FLAGS_ENABLE;
+ pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
+ }
+}
+
+static void msix_set_enable(struct pci_dev *dev, int enable)
+{
+ int pos;
+ u16 control;
+
+ pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
+ if (pos) {
+ pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
+ control &= ~PCI_MSIX_FLAGS_ENABLE;
+ if (enable)
+ control |= PCI_MSIX_FLAGS_ENABLE;
+ pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
+ }
+}
+
static void msi_set_mask_bit(unsigned int irq, int flag)
{
struct msi_desc *entry;
@@ -55,6 +85,8 @@
mask_bits &= ~(1);
mask_bits |= flag;
pci_write_config_dword(entry->dev, pos, mask_bits);
+ } else {
+ msi_set_enable(entry->dev, !flag);
}
break;
case PCI_CAP_ID_MSIX:
@@ -192,44 +224,6 @@
return entry;
}
-static void enable_msi_mode(struct pci_dev *dev, int pos, int type)
-{
- u16 control;
-
- pci_read_config_word(dev, msi_control_reg(pos), &control);
- if (type == PCI_CAP_ID_MSI) {
- /* Set enabled bits to single MSI & enable MSI_enable bit */
- msi_enable(control, 1);
- pci_write_config_word(dev, msi_control_reg(pos), control);
- dev->msi_enabled = 1;
- } else {
- msix_enable(control);
- pci_write_config_word(dev, msi_control_reg(pos), control);
- dev->msix_enabled = 1;
- }
-
- pci_intx(dev, 0); /* disable intx */
-}
-
-void disable_msi_mode(struct pci_dev *dev, int pos, int type)
-{
- u16 control;
-
- pci_read_config_word(dev, msi_control_reg(pos), &control);
- if (type == PCI_CAP_ID_MSI) {
- /* Set enabled bits to single MSI & enable MSI_enable bit */
- msi_disable(control);
- pci_write_config_word(dev, msi_control_reg(pos), control);
- dev->msi_enabled = 0;
- } else {
- msix_disable(control);
- pci_write_config_word(dev, msi_control_reg(pos), control);
- dev->msix_enabled = 0;
- }
-
- pci_intx(dev, 1); /* enable intx */
-}
-
#ifdef CONFIG_PM
static int __pci_save_msi_state(struct pci_dev *dev)
{
@@ -238,12 +232,11 @@
struct pci_cap_saved_state *save_state;
u32 *cap;
- pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
- if (pos <= 0 || dev->no_msi)
+ if (!dev->msi_enabled)
return 0;
- pci_read_config_word(dev, msi_control_reg(pos), &control);
- if (!(control & PCI_MSI_FLAGS_ENABLE))
+ pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
+ if (pos <= 0)
return 0;
save_state = kzalloc(sizeof(struct pci_cap_saved_state) + sizeof(u32) * 5,
@@ -276,13 +269,18 @@
struct pci_cap_saved_state *save_state;
u32 *cap;
+ if (!dev->msi_enabled)
+ return;
+
save_state = pci_find_saved_cap(dev, PCI_CAP_ID_MSI);
pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
if (!save_state || pos <= 0)
return;
cap = &save_state->data[0];
+ pci_intx(dev, 0); /* disable intx */
control = cap[i++] >> 16;
+ msi_set_enable(dev, 0);
pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_LO, cap[i++]);
if (control & PCI_MSI_FLAGS_64BIT) {
pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_HI, cap[i++]);
@@ -292,7 +290,6 @@
if (control & PCI_MSI_FLAGS_MASKBIT)
pci_write_config_dword(dev, pos + PCI_MSI_MASK_BIT, cap[i++]);
pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
- enable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
pci_remove_saved_cap(save_state);
kfree(save_state);
}
@@ -308,13 +305,11 @@
return 0;
pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
- if (pos <= 0 || dev->no_msi)
+ if (pos <= 0)
return 0;
/* save the capability */
pci_read_config_word(dev, msi_control_reg(pos), &control);
- if (!(control & PCI_MSIX_FLAGS_ENABLE))
- return 0;
save_state = kzalloc(sizeof(struct pci_cap_saved_state) + sizeof(u16),
GFP_KERNEL);
if (!save_state) {
@@ -376,6 +371,8 @@
return;
/* route the table */
+ pci_intx(dev, 0); /* disable intx */
+ msix_set_enable(dev, 0);
irq = head = dev->first_msi_irq;
while (head != tail) {
entry = get_irq_msi(irq);
@@ -386,7 +383,6 @@
}
pci_write_config_word(dev, msi_control_reg(pos), save);
- enable_msi_mode(dev, pos, PCI_CAP_ID_MSIX);
}
void pci_restore_msi_state(struct pci_dev *dev)
@@ -411,6 +407,8 @@
int pos, irq;
u16 control;
+ msi_set_enable(dev, 0); /* Ensure msi is disabled as I set it up */
+
pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
pci_read_config_word(dev, msi_control_reg(pos), &control);
/* MSI Entry Initialization */
@@ -454,7 +452,9 @@
set_irq_msi(irq, entry);
/* Set MSI enabled bits */
- enable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
+ pci_intx(dev, 0); /* disable intx */
+ msi_set_enable(dev, 1);
+ dev->msi_enabled = 1;
dev->irq = irq;
return 0;
@@ -481,6 +481,8 @@
u8 bir;
void __iomem *base;
+ msix_set_enable(dev, 0);/* Ensure msix is disabled as I set it up */
+
pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
/* Request & Map MSI-X table region */
pci_read_config_word(dev, msi_control_reg(pos), &control);
@@ -549,7 +551,9 @@
}
dev->first_msi_irq = entries[0].vector;
/* Set MSI-X enabled bits */
- enable_msi_mode(dev, pos, PCI_CAP_ID_MSIX);
+ pci_intx(dev, 0); /* disable intx */
+ msix_set_enable(dev, 1);
+ dev->msix_enabled = 1;
return 0;
}
@@ -611,12 +615,11 @@
WARN_ON(!!dev->msi_enabled);
/* Check whether driver already requested for MSI-X irqs */
- pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
- if (pos > 0 && dev->msix_enabled) {
- printk(KERN_INFO "PCI: %s: Can't enable MSI. "
- "Device already has MSI-X enabled\n",
- pci_name(dev));
- return -EINVAL;
+ if (dev->msix_enabled) {
+ printk(KERN_INFO "PCI: %s: Can't enable MSI. "
+ "Device already has MSI-X enabled\n",
+ pci_name(dev));
+ return -EINVAL;
}
status = msi_capability_init(dev);
return status;
@@ -625,8 +628,7 @@
void pci_disable_msi(struct pci_dev* dev)
{
struct msi_desc *entry;
- int pos, default_irq;
- u16 control;
+ int default_irq;
if (!pci_msi_enable)
return;
@@ -636,16 +638,9 @@
if (!dev->msi_enabled)
return;
- pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
- if (!pos)
- return;
-
- pci_read_config_word(dev, msi_control_reg(pos), &control);
- if (!(control & PCI_MSI_FLAGS_ENABLE))
- return;
-
-
- disable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
+ msi_set_enable(dev, 0);
+ pci_intx(dev, 1); /* enable intx */
+ dev->msi_enabled = 0;
entry = get_irq_msi(dev->first_msi_irq);
if (!entry || !entry->dev || entry->msi_attrib.type != PCI_CAP_ID_MSI) {
@@ -746,8 +741,7 @@
WARN_ON(!!dev->msix_enabled);
/* Check whether driver already requested for MSI irq */
- if (pci_find_capability(dev, PCI_CAP_ID_MSI) > 0 &&
- dev->msi_enabled) {
+ if (dev->msi_enabled) {
printk(KERN_INFO "PCI: %s: Can't enable MSI-X. "
"Device already has an MSI irq assigned\n",
pci_name(dev));
@@ -760,8 +754,6 @@
void pci_disable_msix(struct pci_dev* dev)
{
int irq, head, tail = 0, warning = 0;
- int pos;
- u16 control;
if (!pci_msi_enable)
return;
@@ -771,15 +763,9 @@
if (!dev->msix_enabled)
return;
- pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
- if (!pos)
- return;
-
- pci_read_config_word(dev, msi_control_reg(pos), &control);
- if (!(control & PCI_MSIX_FLAGS_ENABLE))
- return;
-
- disable_msi_mode(dev, pos, PCI_CAP_ID_MSIX);
+ msix_set_enable(dev, 0);
+ pci_intx(dev, 1); /* enable intx */
+ dev->msix_enabled = 0;
irq = head = dev->first_msi_irq;
while (head != tail) {
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 1e74e1e..df49530 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -881,13 +881,6 @@
if (atomic_sub_return(1, &dev->enable_cnt) != 0)
return;
- if (dev->msi_enabled)
- disable_msi_mode(dev, pci_find_capability(dev, PCI_CAP_ID_MSI),
- PCI_CAP_ID_MSI);
- if (dev->msix_enabled)
- disable_msi_mode(dev, pci_find_capability(dev, PCI_CAP_ID_MSI),
- PCI_CAP_ID_MSIX);
-
pci_read_config_word(dev, PCI_COMMAND, &pci_command);
if (pci_command & PCI_COMMAND_MASTER) {
pci_command &= ~PCI_COMMAND_MASTER;
@@ -1277,6 +1270,33 @@
}
}
+/**
+ * pci_msi_off - disables any msi or msix capabilities
+ * @pdev: the PCI device to operate on
+ *
+ * If you want to use msi see pci_enable_msi and friends.
+ * This is a lower level primitive that allows us to disable
+ * msi operation at the device level.
+ */
+void pci_msi_off(struct pci_dev *dev)
+{
+ int pos;
+ u16 control;
+
+ pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
+ if (pos) {
+ pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
+ control &= ~PCI_MSI_FLAGS_ENABLE;
+ pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
+ }
+ pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
+ if (pos) {
+ pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
+ control &= ~PCI_MSIX_FLAGS_ENABLE;
+ pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
+ }
+}
+
#ifndef HAVE_ARCH_PCI_SET_DMA_MASK
/*
* These can be overridden by arch-specific implementations
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index a4f2d58..ae7a975 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -46,10 +46,8 @@
extern unsigned int pci_pm_d3_delay;
#ifdef CONFIG_PCI_MSI
-void disable_msi_mode(struct pci_dev *dev, int pos, int type);
void pci_no_msi(void);
#else
-static inline void disable_msi_mode(struct pci_dev *dev, int pos, int type) { }
static inline void pci_no_msi(void) { }
#endif
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 1bf5482..7f94fc0 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -1438,8 +1438,8 @@
*/
static void __devinit quirk_pcie_pxh(struct pci_dev *dev)
{
- disable_msi_mode(dev, pci_find_capability(dev, PCI_CAP_ID_MSI),
- PCI_CAP_ID_MSI);
+ pci_msi_off(dev);
+
dev->no_msi = 1;
printk(KERN_WARNING "PCI: PXH quirk detected, "
diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c
index 4b8a95f..a1dc8c4 100644
--- a/drivers/s390/block/dasd_eer.c
+++ b/drivers/s390/block/dasd_eer.c
@@ -461,6 +461,7 @@
cqr->device = device;
cqr->retries = 255;
cqr->expires = 10 * HZ;
+ clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
cqr->cpaddr->cmd_code = DASD_ECKD_CCW_SNSS;
cqr->cpaddr->count = SNSS_DATA_SIZE;
diff --git a/drivers/s390/char/tape_std.c b/drivers/s390/char/tape_std.c
index 7a76ec4..2a1af4e 100644
--- a/drivers/s390/char/tape_std.c
+++ b/drivers/s390/char/tape_std.c
@@ -647,7 +647,10 @@
return PTR_ERR(request);
request->op = TO_NOP;
/* setup ccws */
- *device->modeset_byte = (mt_count == 0) ? 0x00 : 0x08;
+ if (mt_count == 0)
+ *device->modeset_byte &= ~0x08;
+ else
+ *device->modeset_byte |= 0x08;
tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
tape_ccw_end(request->cpaddr + 1, NOP, 0, NULL);
/* execute it */
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index 51238e7..089a3dd 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -144,8 +144,8 @@
ret = stsch(sch->schid, &sch->schib);
if (ret || !sch->schib.pmcw.dnv)
return -ENODEV;
- if (!sch->schib.pmcw.ena || sch->schib.scsw.actl == 0)
- /* Not operational or no activity -> done. */
+ if (!sch->schib.pmcw.ena)
+ /* Not operational -> done. */
return 0;
/* Stage 1: cancel io. */
if (!(sch->schib.scsw.actl & SCSW_ACTL_HALT_PEND) &&
@@ -334,20 +334,29 @@
struct ccw_device *cdev;
struct subchannel *sch;
int ret;
+ unsigned long flags;
priv = container_of(work, struct ccw_device_private, kick_work);
cdev = priv->cdev;
+ spin_lock_irqsave(cdev->ccwlock, flags);
sch = to_subchannel(cdev->dev.parent);
- ret = (sch->driver && sch->driver->notify) ?
- sch->driver->notify(&sch->dev, CIO_OPER) : 0;
+ if (sch->driver && sch->driver->notify) {
+ spin_unlock_irqrestore(cdev->ccwlock, flags);
+ ret = sch->driver->notify(&sch->dev, CIO_OPER);
+ spin_lock_irqsave(cdev->ccwlock, flags);
+ } else
+ ret = 0;
+ if (ret) {
+ /* Reenable channel measurements, if needed. */
+ spin_unlock_irqrestore(cdev->ccwlock, flags);
+ cmf_reenable(cdev);
+ spin_lock_irqsave(cdev->ccwlock, flags);
+ wake_up(&cdev->private->wait_q);
+ }
+ spin_unlock_irqrestore(cdev->ccwlock, flags);
if (!ret)
/* Driver doesn't want device back. */
ccw_device_do_unreg_rereg(work);
- else {
- /* Reenable channel measurements, if needed. */
- cmf_reenable(cdev);
- wake_up(&cdev->private->wait_q);
- }
}
/*
@@ -534,15 +543,21 @@
struct ccw_device *cdev;
struct subchannel *sch;
int ret;
+ unsigned long flags;
priv = container_of(work, struct ccw_device_private, kick_work);
cdev = priv->cdev;
+ spin_lock_irqsave(cdev->ccwlock, flags);
sch = to_subchannel(cdev->dev.parent);
/* Extra sanity. */
if (sch->lpm)
- return;
- ret = (sch->driver && sch->driver->notify) ?
- sch->driver->notify(&sch->dev, CIO_NO_PATH) : 0;
+ goto out_unlock;
+ if (sch->driver && sch->driver->notify) {
+ spin_unlock_irqrestore(cdev->ccwlock, flags);
+ ret = sch->driver->notify(&sch->dev, CIO_NO_PATH);
+ spin_lock_irqsave(cdev->ccwlock, flags);
+ } else
+ ret = 0;
if (!ret) {
if (get_device(&sch->dev)) {
/* Driver doesn't want to keep device. */
@@ -562,6 +577,8 @@
cdev->private->state = DEV_STATE_DISCONNECTED;
wake_up(&cdev->private->wait_q);
}
+out_unlock:
+ spin_unlock_irqrestore(cdev->ccwlock, flags);
}
void
@@ -607,10 +624,13 @@
default:
/* Reset oper notify indication after verify error. */
cdev->private->flags.donotify = 0;
- PREPARE_WORK(&cdev->private->kick_work,
- ccw_device_nopath_notify);
- queue_work(ccw_device_notify_work, &cdev->private->kick_work);
- ccw_device_done(cdev, DEV_STATE_NOT_OPER);
+ if (cdev->online) {
+ PREPARE_WORK(&cdev->private->kick_work,
+ ccw_device_nopath_notify);
+ queue_work(ccw_device_notify_work,
+ &cdev->private->kick_work);
+ } else
+ ccw_device_done(cdev, DEV_STATE_NOT_OPER);
break;
}
}
@@ -756,15 +776,22 @@
ccw_device_online_notoper(struct ccw_device *cdev, enum dev_event dev_event)
{
struct subchannel *sch;
+ int ret;
sch = to_subchannel(cdev->dev.parent);
- if (sch->driver->notify &&
- sch->driver->notify(&sch->dev, sch->lpm ? CIO_GONE : CIO_NO_PATH)) {
- ccw_device_set_timeout(cdev, 0);
- cdev->private->flags.fake_irb = 0;
- cdev->private->state = DEV_STATE_DISCONNECTED;
- wake_up(&cdev->private->wait_q);
- return;
+ if (sch->driver->notify) {
+ spin_unlock_irq(cdev->ccwlock);
+ ret = sch->driver->notify(&sch->dev,
+ sch->lpm ? CIO_GONE : CIO_NO_PATH);
+ spin_lock_irq(cdev->ccwlock);
+ } else
+ ret = 0;
+ if (ret) {
+ ccw_device_set_timeout(cdev, 0);
+ cdev->private->flags.fake_irb = 0;
+ cdev->private->state = DEV_STATE_DISCONNECTED;
+ wake_up(&cdev->private->wait_q);
+ return;
}
cdev->private->state = DEV_STATE_NOT_OPER;
cio_disable_subchannel(sch);
@@ -969,18 +996,12 @@
sch = to_subchannel(cdev->dev.parent);
ccw_device_set_timeout(cdev, 0);
+ /* Start delayed path verification. */
+ ccw_device_online_verify(cdev, 0);
/* OK, i/o is dead now. Call interrupt handler. */
- cdev->private->state = DEV_STATE_ONLINE;
if (cdev->handler)
cdev->handler(cdev, cdev->private->intparm,
ERR_PTR(-EIO));
- if (!sch->lpm) {
- PREPARE_WORK(&cdev->private->kick_work,
- ccw_device_nopath_notify);
- queue_work(ccw_device_notify_work, &cdev->private->kick_work);
- } else if (cdev->private->flags.doverify)
- /* Start delayed path verification. */
- ccw_device_online_verify(cdev, 0);
}
static void
@@ -993,21 +1014,8 @@
ccw_device_set_timeout(cdev, 3*HZ);
return;
}
- if (ret == -ENODEV) {
- struct subchannel *sch;
-
- sch = to_subchannel(cdev->dev.parent);
- if (!sch->lpm) {
- PREPARE_WORK(&cdev->private->kick_work,
- ccw_device_nopath_notify);
- queue_work(ccw_device_notify_work,
- &cdev->private->kick_work);
- } else
- dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
- return;
- }
- //FIXME: Can we get here?
- cdev->private->state = DEV_STATE_ONLINE;
+ /* Start delayed path verification. */
+ ccw_device_online_verify(cdev, 0);
if (cdev->handler)
cdev->handler(cdev, cdev->private->intparm,
ERR_PTR(-EIO));
@@ -1025,26 +1033,11 @@
cdev->private->state = DEV_STATE_TIMEOUT_KILL;
return;
}
- if (ret == -ENODEV) {
- if (!sch->lpm) {
- PREPARE_WORK(&cdev->private->kick_work,
- ccw_device_nopath_notify);
- queue_work(ccw_device_notify_work,
- &cdev->private->kick_work);
- } else
- dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
- return;
- }
+ /* Start delayed path verification. */
+ ccw_device_online_verify(cdev, 0);
if (cdev->handler)
cdev->handler(cdev, cdev->private->intparm,
ERR_PTR(-EIO));
- if (!sch->lpm) {
- PREPARE_WORK(&cdev->private->kick_work,
- ccw_device_nopath_notify);
- queue_work(ccw_device_notify_work, &cdev->private->kick_work);
- } else
- /* Start delayed path verification. */
- ccw_device_online_verify(cdev, 0);
}
static void
diff --git a/drivers/serial/dz.c b/drivers/serial/dz.c
index 587d87b..d31721f 100644
--- a/drivers/serial/dz.c
+++ b/drivers/serial/dz.c
@@ -170,8 +170,7 @@
* This routine deals with inputs from any lines.
* ------------------------------------------------------------
*/
-static inline void dz_receive_chars(struct dz_port *dport_in,
- struct pt_regs *regs)
+static inline void dz_receive_chars(struct dz_port *dport_in)
{
struct dz_port *dport;
struct tty_struct *tty = NULL;
@@ -226,7 +225,7 @@
break;
}
- if (uart_handle_sysrq_char(&dport->port, ch, regs))
+ if (uart_handle_sysrq_char(&dport->port, ch))
continue;
if ((status & dport->port.ignore_status_mask) == 0) {
@@ -332,7 +331,7 @@
status = dz_in(dport, DZ_CSR);
if ((status & (DZ_RDONE | DZ_RIE)) == (DZ_RDONE | DZ_RIE))
- dz_receive_chars(dport, regs);
+ dz_receive_chars(dport);
if ((status & (DZ_TRDY | DZ_TIE)) == (DZ_TRDY | DZ_TIE))
dz_transmit_chars(dport);
diff --git a/drivers/serial/mcfserial.c b/drivers/serial/mcfserial.c
index 0843096..99af084 100644
--- a/drivers/serial/mcfserial.c
+++ b/drivers/serial/mcfserial.c
@@ -425,15 +425,13 @@
* -------------------------------------------------------------------
*/
-static void mcfrs_offintr(void *private)
+static void mcfrs_offintr(struct work_struct *work)
{
- struct mcf_serial *info = (struct mcf_serial *) private;
- struct tty_struct *tty;
+ struct mcf_serial *info = container_of(work, struct mcf_serial, tqueue);
+ struct tty_struct *tty = info->tty;
- tty = info->tty;
- if (!tty)
- return;
- tty_wakeup(tty);
+ if (tty)
+ tty_wakeup(tty);
}
@@ -497,16 +495,13 @@
* do_serial_hangup() -> tty->hangup() -> mcfrs_hangup()
*
*/
-static void do_serial_hangup(void *private)
+static void do_serial_hangup(struct work_struct *work)
{
- struct mcf_serial *info = (struct mcf_serial *) private;
- struct tty_struct *tty;
+ struct mcf_serial *info = container_of(work, struct mcf_serial, tqueue_hangup);
+ struct tty_struct *tty = info->tty;
- tty = info->tty;
- if (!tty)
- return;
-
- tty_hangup(tty);
+ if (tty)
+ tty_hangup(tty);
}
static int startup(struct mcf_serial * info)
@@ -857,7 +852,7 @@
#ifdef SERIAL_DEBUG_THROTTLE
char buf[64];
- printk("throttle %s: %d....\n", _tty_name(tty, buf),
+ printk("throttle %s: %d....\n", tty_name(tty, buf),
tty->ldisc.chars_in_buffer(tty));
#endif
@@ -876,7 +871,7 @@
#ifdef SERIAL_DEBUG_THROTTLE
char buf[64];
- printk("unthrottle %s: %d....\n", _tty_name(tty, buf),
+ printk("unthrottle %s: %d....\n", tty_name(tty, buf),
tty->ldisc.chars_in_buffer(tty));
#endif
@@ -1541,8 +1536,8 @@
* External Pin Mask Setting & Enable External Pin for Interface
* mrcbis@aliceposta.it
*/
- unsigned short *serpin_enable_mask;
- serpin_enable_mask = (MCF_IPSBAR + MCF_GPIO_PAR_UART);
+ u16 *serpin_enable_mask;
+ serpin_enable_mask = (u16 *) (MCF_IPSBAR + MCF_GPIO_PAR_UART);
if (info->line == 0)
*serpin_enable_mask |= UART0_ENABLE_MASK;
else if (info->line == 1)
@@ -1551,6 +1546,13 @@
*serpin_enable_mask |= UART2_ENABLE_MASK;
}
#endif
+#if defined(CONFIG_M528x)
+ /* make sure PUAPAR is set for UART0 and UART1 */
+ if (info->line < 2) {
+ volatile unsigned char *portp = (volatile unsigned char *) (MCF_MBAR + MCF5282_GPIO_PUAPAR);
+ *portp |= (0x03 << (info->line * 2));
+ }
+#endif
#elif defined(CONFIG_M520x)
volatile unsigned char *icrp, *uartp;
volatile unsigned long *imrp;
@@ -1783,8 +1785,8 @@
info->event = 0;
info->count = 0;
info->blocked_open = 0;
- INIT_WORK(&info->tqueue, mcfrs_offintr, info);
- INIT_WORK(&info->tqueue_hangup, do_serial_hangup, info);
+ INIT_WORK(&info->tqueue, mcfrs_offintr);
+ INIT_WORK(&info->tqueue_hangup, do_serial_hangup);
init_waitqueue_head(&info->open_wait);
init_waitqueue_head(&info->close_wait);
diff --git a/drivers/usb/input/hid-core.c b/drivers/usb/input/hid-core.c
index 12ec8b4..827a75a 100644
--- a/drivers/usb/input/hid-core.c
+++ b/drivers/usb/input/hid-core.c
@@ -686,10 +686,8 @@
#define USB_DEVICE_ID_SMARTJOY_DUAL_PLUS 0x8802
#define USB_VENDOR_ID_CODEMERCS 0x07c0
-#define USB_DEVICE_ID_CODEMERCS_IOW40 0x1500
-#define USB_DEVICE_ID_CODEMERCS_IOW24 0x1501
-#define USB_DEVICE_ID_CODEMERCS_IOW48 0x1502
-#define USB_DEVICE_ID_CODEMERCS_IOW28 0x1503
+#define USB_DEVICE_ID_CODEMERCS_IOW_FIRST 0x1500
+#define USB_DEVICE_ID_CODEMERCS_IOW_LAST 0x15ff
#define USB_VENDOR_ID_DELORME 0x1163
#define USB_DEVICE_ID_DELORME_EARTHMATE 0x0100
@@ -789,10 +787,6 @@
{ USB_VENDOR_ID_AIRCABLE, USB_DEVICE_ID_AIRCABLE1, HID_QUIRK_IGNORE },
{ USB_VENDOR_ID_ALCOR, USB_DEVICE_ID_ALCOR_USBRS232, HID_QUIRK_IGNORE },
{ USB_VENDOR_ID_BERKSHIRE, USB_DEVICE_ID_BERKSHIRE_PCWD, HID_QUIRK_IGNORE },
- { USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW40, HID_QUIRK_IGNORE },
- { USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW24, HID_QUIRK_IGNORE },
- { USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW48, HID_QUIRK_IGNORE },
- { USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW28, HID_QUIRK_IGNORE },
{ USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_HIDCOM, HID_QUIRK_IGNORE },
{ USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_ULTRAMOUSE, HID_QUIRK_IGNORE },
{ USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EARTHMATE, HID_QUIRK_IGNORE },
@@ -1070,9 +1064,14 @@
int n, len, insize = 0;
struct usbhid_device *usbhid;
- /* Ignore all Wacom devices */
- if (le16_to_cpu(dev->descriptor.idVendor) == USB_VENDOR_ID_WACOM)
- return NULL;
+ /* Ignore all Wacom devices */
+ if (le16_to_cpu(dev->descriptor.idVendor) == USB_VENDOR_ID_WACOM)
+ return NULL;
+ /* ignore all Code Mercenaries IOWarrior devices */
+ if (le16_to_cpu(dev->descriptor.idVendor) == USB_VENDOR_ID_CODEMERCS)
+ if (le16_to_cpu(dev->descriptor.idProduct) >= USB_DEVICE_ID_CODEMERCS_IOW_FIRST &&
+ le16_to_cpu(dev->descriptor.idProduct) <= USB_DEVICE_ID_CODEMERCS_IOW_LAST)
+ return NULL;
for (n = 0; hid_blacklist[n].idVendor; n++)
if ((hid_blacklist[n].idVendor == le16_to_cpu(dev->descriptor.idVendor)) &&
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
index 7e7ec29..8e898e3 100644
--- a/drivers/usb/storage/usb.c
+++ b/drivers/usb/storage/usb.c
@@ -55,7 +55,7 @@
#include <linux/slab.h>
#include <linux/kthread.h>
#include <linux/mutex.h>
-#include <linux/utsrelease.h>
+#include <linux/utsname.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
@@ -547,7 +547,7 @@
idesc->bInterfaceSubClass,
idesc->bInterfaceProtocol,
msgs[msg],
- UTS_RELEASE);
+ utsname()->release);
}
return 0;
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index b8f0a11..7f5a598 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -677,8 +677,6 @@
config FB_NVIDIA
tristate "nVidia Framebuffer Support"
depends on FB && PCI
- select I2C_ALGOBIT if FB_NVIDIA_I2C
- select I2C if FB_NVIDIA_I2C
select FB_BACKLIGHT if FB_NVIDIA_BACKLIGHT
select FB_MODE_HELPERS
select FB_CFB_FILLRECT
@@ -697,6 +695,7 @@
config FB_NVIDIA_I2C
bool "Enable DDC Support"
depends on FB_NVIDIA
+ select FB_DDC
help
This enables I2C support for nVidia Chipsets. This is used
only for getting EDID information from the attached display
@@ -716,7 +715,6 @@
config FB_RIVA
tristate "nVidia Riva support"
depends on FB && PCI
- select FB_DDC if FB_RIVA_I2C
select FB_BACKLIGHT if FB_RIVA_BACKLIGHT
select FB_MODE_HELPERS
select FB_CFB_FILLRECT
@@ -734,6 +732,7 @@
config FB_RIVA_I2C
bool "Enable DDC Support"
depends on FB_RIVA
+ select FB_DDC
help
This enables I2C support for nVidia Chipsets. This is used
only for getting EDID information from the attached display
@@ -812,8 +811,6 @@
depends on FB && EXPERIMENTAL && PCI && X86
select AGP
select AGP_INTEL
- select I2C_ALGOBIT if FB_INTEL_I2C
- select I2C if FB_INTEL_I2C
select FB_MODE_HELPERS
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
@@ -846,6 +843,7 @@
config FB_INTEL_I2C
bool "DDC/I2C for Intel framebuffer support"
depends on FB_INTEL
+ select FB_DDC
default y
help
Say Y here if you want DDC/I2C support for your on-board Intel graphics.
@@ -924,8 +922,8 @@
config FB_MATROX_I2C
tristate "Matrox I2C support"
- depends on FB_MATROX && I2C
- select I2C_ALGOBIT
+ depends on FB_MATROX
+ select FB_DDC
---help---
This drivers creates I2C buses which are needed for accessing the
DDC (I2C) bus present on all Matroxes, an I2C bus which
@@ -993,7 +991,6 @@
config FB_RADEON
tristate "ATI Radeon display support"
depends on FB && PCI
- select FB_DDC if FB_RADEON_I2C
select FB_BACKLIGHT if FB_RADEON_BACKLIGHT
select FB_MODE_HELPERS
select FB_CFB_FILLRECT
@@ -1018,6 +1015,7 @@
config FB_RADEON_I2C
bool "DDC/I2C for ATI Radeon support"
depends on FB_RADEON
+ select FB_DDC
default y
help
Say Y here if you want DDC/I2C support for your Radeon board.
@@ -1125,7 +1123,6 @@
config FB_SAVAGE
tristate "S3 Savage support"
depends on FB && PCI && EXPERIMENTAL
- select FB_DDC if FB_SAVAGE_I2C
select FB_MODE_HELPERS
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
@@ -1142,6 +1139,7 @@
config FB_SAVAGE_I2C
bool "Enable DDC2 Support"
depends on FB_SAVAGE
+ select FB_DDC
help
This enables I2C support for S3 Savage Chipsets. This is used
only for getting EDID information from the attached display
diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
index 8726c36..e86d7e0 100644
--- a/drivers/video/aty/aty128fb.c
+++ b/drivers/video/aty/aty128fb.c
@@ -357,6 +357,12 @@
static int mtrr = 1;
#endif
+#ifdef CONFIG_PMAC_BACKLIGHT
+static int backlight __devinitdata = 1;
+#else
+static int backlight __devinitdata = 0;
+#endif
+
/* PLL constants */
struct aty128_constants {
u32 ref_clk;
@@ -1652,6 +1658,9 @@
} else if (!strncmp(this_opt, "crt:", 4)) {
default_crt_on = simple_strtoul(this_opt+4, NULL, 0);
continue;
+ } else if (!strncmp(this_opt, "backlight:", 10)) {
+ backlight = simple_strtoul(this_opt+10, NULL, 0);
+ continue;
}
#ifdef CONFIG_MTRR
if(!strncmp(this_opt, "nomtrr", 6)) {
@@ -1985,7 +1994,8 @@
par->lock_blank = 0;
#ifdef CONFIG_FB_ATY128_BACKLIGHT
- aty128_bl_init(par);
+ if (backlight)
+ aty128_bl_init(par);
#endif
if (register_framebuffer(info) < 0)
diff --git a/drivers/video/aty/atyfb.h b/drivers/video/aty/atyfb.h
index f72faff..dc62f8e 100644
--- a/drivers/video/aty/atyfb.h
+++ b/drivers/video/aty/atyfb.h
@@ -284,7 +284,8 @@
#endif
}
-#if defined(CONFIG_PM) || defined(CONFIG_PMAC_BACKLIGHT) || defined (CONFIG_FB_ATY_GENERIC_LCD)
+#if defined(CONFIG_PM) || defined(CONFIG_PMAC_BACKLIGHT) || \
+defined (CONFIG_FB_ATY_GENERIC_LCD) || defined (CONFIG_FB_ATY_BACKLIGHT)
extern void aty_st_lcd(int index, u32 val, const struct atyfb_par *par);
extern u32 aty_ld_lcd(int index, const struct atyfb_par *par);
#endif
diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
index a7e0062..d7627fc 100644
--- a/drivers/video/aty/atyfb_base.c
+++ b/drivers/video/aty/atyfb_base.c
@@ -131,7 +131,8 @@
#define PRINTKI(fmt, args...) printk(KERN_INFO "atyfb: " fmt, ## args)
#define PRINTKE(fmt, args...) printk(KERN_ERR "atyfb: " fmt, ## args)
-#if defined(CONFIG_PM) || defined(CONFIG_PMAC_BACKLIGHT) || defined (CONFIG_FB_ATY_GENERIC_LCD)
+#if defined(CONFIG_PM) || defined(CONFIG_PMAC_BACKLIGHT) || \
+defined (CONFIG_FB_ATY_GENERIC_LCD) || defined(CONFIG_FB_ATY_BACKLIGHT)
static const u32 lt_lcd_regs[] = {
CONFIG_PANEL_LG,
LCD_GEN_CNTL_LG,
@@ -308,6 +309,12 @@
static int comp_sync __devinitdata = -1;
static char *mode;
+#ifdef CONFIG_PMAC_BACKLIGHT
+static int backlight __devinitdata = 1;
+#else
+static int backlight __devinitdata = 0;
+#endif
+
#ifdef CONFIG_PPC
static int default_vmode __devinitdata = VMODE_CHOOSE;
static int default_cmode __devinitdata = CMODE_CHOOSE;
@@ -2575,7 +2582,7 @@
| (USE_F32KHZ | TRISTATE_MEM_EN), par);
} else
#endif
- if (M64_HAS(MOBIL_BUS)) {
+ if (M64_HAS(MOBIL_BUS) && backlight) {
#ifdef CONFIG_FB_ATY_BACKLIGHT
aty_bl_init (par);
#endif
@@ -3757,6 +3764,8 @@
xclk = simple_strtoul(this_opt+5, NULL, 0);
else if (!strncmp(this_opt, "comp_sync:", 10))
comp_sync = simple_strtoul(this_opt+10, NULL, 0);
+ else if (!strncmp(this_opt, "backlight:", 10))
+ backlight = simple_strtoul(this_opt+10, NULL, 0);
#ifdef CONFIG_PPC
else if (!strncmp(this_opt, "vmode:", 6)) {
unsigned int vmode =
diff --git a/drivers/video/aty/mach64_ct.c b/drivers/video/aty/mach64_ct.c
index f3b487b..1fdcfdb 100644
--- a/drivers/video/aty/mach64_ct.c
+++ b/drivers/video/aty/mach64_ct.c
@@ -598,7 +598,6 @@
struct atyfb_par *par = info->par;
if (par->mclk_per != par->xclk_per) {
- int i;
/*
* This disables the sclk, crashes the computer as reported:
* aty_st_pll_ct(SPLL_CNTL2, 3, info);
@@ -614,7 +613,7 @@
* helps for Rage Mobilities that sometimes crash when
* we switch to sclk. (Daniel Mantione, 13-05-2003)
*/
- for (i=0;i<=0x1ffff;i++);
+ udelay(500);
}
aty_st_pll_ct(PLL_REF_DIV, pll->ct.pll_ref_div, par);
diff --git a/drivers/video/aty/radeon_base.c b/drivers/video/aty/radeon_base.c
index 46ba123..1bf6f42 100644
--- a/drivers/video/aty/radeon_base.c
+++ b/drivers/video/aty/radeon_base.c
@@ -268,6 +268,11 @@
#endif
static int force_sleep;
static int ignore_devlist;
+#ifdef CONFIG_PMAC_BACKLIGHT
+static int backlight = 1;
+#else
+static int backlight = 0;
+#endif
/*
* prototypes
@@ -2348,7 +2353,8 @@
MTRR_TYPE_WRCOMB, 1);
#endif
- radeonfb_bl_init(rinfo);
+ if (backlight)
+ radeonfb_bl_init(rinfo);
printk ("radeonfb (%s): %s\n", pci_name(rinfo->pdev), rinfo->name);
@@ -2469,6 +2475,8 @@
force_dfp = 1;
} else if (!strncmp(this_opt, "panel_yres:", 11)) {
panel_yres = simple_strtoul((this_opt+11), NULL, 0);
+ } else if (!strncmp(this_opt, "backlight:", 10)) {
+ backlight = simple_strtoul(this_opt+10, NULL, 0);
#ifdef CONFIG_MTRR
} else if (!strncmp(this_opt, "nomtrr", 6)) {
nomtrr = 1;
diff --git a/drivers/video/nvidia/nv_backlight.c b/drivers/video/nvidia/nv_backlight.c
index b7016e9..43f62d8 100644
--- a/drivers/video/nvidia/nv_backlight.c
+++ b/drivers/video/nvidia/nv_backlight.c
@@ -12,6 +12,11 @@
#include <linux/backlight.h>
#include <linux/fb.h>
#include <linux/pci.h>
+
+#ifdef CONFIG_PMAC_BACKLIGHT
+#include <asm/backlight.h>
+#endif
+
#include "nv_local.h"
#include "nv_type.h"
#include "nv_proto.h"
@@ -23,8 +28,6 @@
#define MAX_LEVEL 0x534
#define LEVEL_STEP ((MAX_LEVEL - MIN_LEVEL) / FB_BACKLIGHT_MAX)
-static struct backlight_properties nvidia_bl_data;
-
static int nvidia_bl_get_level_brightness(struct nvidia_par *par,
int level)
{
@@ -119,7 +122,7 @@
0x534 * FB_BACKLIGHT_MAX / MAX_LEVEL);
bd->props.max_brightness = FB_BACKLIGHT_LEVELS - 1;
- bd->props.brightness = nvidia_bl_data.max_brightness;
+ bd->props.brightness = bd->props.max_brightness;
bd->props.power = FB_BLANK_UNBLANK;
backlight_update_status(bd);
diff --git a/drivers/video/nvidia/nvidia.c b/drivers/video/nvidia/nvidia.c
index c18e955..b97ec69 100644
--- a/drivers/video/nvidia/nvidia.c
+++ b/drivers/video/nvidia/nvidia.c
@@ -83,6 +83,11 @@
#ifdef CONFIG_MTRR
static int nomtrr __devinitdata = 0;
#endif
+#ifdef CONFIG_PMAC_BACKLIGHT
+static int backlight __devinitdata = 1;
+#else
+static int backlight __devinitdata = 0;
+#endif
static char *mode_option __devinitdata = NULL;
@@ -1311,7 +1316,10 @@
nvidia_save_vga(par, &par->SavedReg);
pci_set_drvdata(pd, info);
- nvidia_bl_init(par);
+
+ if (backlight)
+ nvidia_bl_init(par);
+
if (register_framebuffer(info) < 0) {
printk(KERN_ERR PFX "error registering nVidia framebuffer\n");
goto err_out_iounmap_fb;
@@ -1408,6 +1416,8 @@
paneltweak = simple_strtoul(this_opt+11, NULL, 0);
} else if (!strncmp(this_opt, "vram:", 5)) {
vram = simple_strtoul(this_opt+5, NULL, 0);
+ } else if (!strncmp(this_opt, "backlight:", 10)) {
+ backlight = simple_strtoul(this_opt+10, NULL, 0);
#ifdef CONFIG_MTRR
} else if (!strncmp(this_opt, "nomtrr", 6)) {
nomtrr = 1;
diff --git a/drivers/video/riva/fbdev.c b/drivers/video/riva/fbdev.c
index f8a3d60..1d1c7c6 100644
--- a/drivers/video/riva/fbdev.c
+++ b/drivers/video/riva/fbdev.c
@@ -215,6 +215,11 @@
#ifdef CONFIG_MTRR
static int nomtrr __devinitdata = 0;
#endif
+#ifdef CONFIG_PMAC_BACKLIGHT
+static int backlight __devinitdata = 1;
+#else
+static int backlight __devinitdata = 0;
+#endif
static char *mode_option __devinitdata = NULL;
static int strictmode = 0;
@@ -2059,7 +2064,10 @@
info->monspecs.modedb = NULL;
pci_set_drvdata(pd, info);
- riva_bl_init(info->par);
+
+ if (backlight)
+ riva_bl_init(info->par);
+
ret = register_framebuffer(info);
if (ret < 0) {
printk(KERN_ERR PFX
@@ -2157,6 +2165,8 @@
forceCRTC = -1;
} else if (!strncmp(this_opt, "flatpanel", 9)) {
flatpanel = 1;
+ } else if (!strncmp(this_opt, "backlight:", 10)) {
+ backlight = simple_strtoul(this_opt+10, NULL, 0);
#ifdef CONFIG_MTRR
} else if (!strncmp(this_opt, "nomtrr", 6)) {
nomtrr = 1;
diff --git a/drivers/video/sm501fb.c b/drivers/video/sm501fb.c
index 58c0ac7..0a44c44 100644
--- a/drivers/video/sm501fb.c
+++ b/drivers/video/sm501fb.c
@@ -1074,9 +1074,9 @@
if (len < 1)
return -EINVAL;
- if (strnicmp(buf, "crt", sizeof("crt")) == 0)
+ if (strnicmp(buf, "crt", 3) == 0)
head = HEAD_CRT;
- else if (strnicmp(buf, "panel", sizeof("panel")) == 0)
+ else if (strnicmp(buf, "panel", 5) == 0)
head = HEAD_PANEL;
else
return -EINVAL;
@@ -1098,7 +1098,7 @@
writel(ctrl, info->regs + SM501_DC_CRT_CONTROL);
sm501fb_sync_regs(info);
- return (head == HEAD_CRT) ? 3 : 5;
+ return len;
}
/* Prepare the device_attr for registration with sysfs later */
diff --git a/fs/buffer.c b/fs/buffer.c
index e8504b6..1d0852f 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -2365,6 +2365,10 @@
}
EXPORT_SYMBOL(nobh_prepare_write);
+/*
+ * Make sure any changes to nobh_commit_write() are reflected in
+ * nobh_truncate_page(), since it doesn't call commit_write().
+ */
int nobh_commit_write(struct file *file, struct page *page,
unsigned from, unsigned to)
{
@@ -2466,6 +2470,11 @@
memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
flush_dcache_page(page);
kunmap_atomic(kaddr, KM_USER0);
+ /*
+ * It would be more correct to call aops->commit_write()
+ * here, but this is more efficient.
+ */
+ SetPageUptodate(page);
set_page_dirty(page);
}
unlock_page(page);
diff --git a/fs/cifs/CHANGES b/fs/cifs/CHANGES
index 5fe1359..6247628 100644
--- a/fs/cifs/CHANGES
+++ b/fs/cifs/CHANGES
@@ -1,3 +1,10 @@
+Verison 1.48
+------------
+Fix mtime bouncing around from local idea of last write times to remote time.
+Fix hang (in i_size_read) when simultaneous size update of same remote file
+on smp system corrupts sequence number. Do not reread unnecessarily partial page
+(which we are about to overwrite anyway) when writing out file opened rw.
+
Version 1.47
------------
Fix oops in list_del during mount caused by unaligned string.
diff --git a/fs/cifs/Makefile b/fs/cifs/Makefile
index a26f26e..6ecd9d6 100644
--- a/fs/cifs/Makefile
+++ b/fs/cifs/Makefile
@@ -3,4 +3,4 @@
#
obj-$(CONFIG_CIFS) += cifs.o
-cifs-objs := cifsfs.o cifssmb.o cifs_debug.o connect.o dir.o file.o inode.o link.o misc.o netmisc.o smbdes.o smbencrypt.o transport.o asn1.o md4.o md5.o cifs_unicode.o nterr.o xattr.o cifsencrypt.o fcntl.o readdir.o ioctl.o sess.o
+cifs-objs := cifsfs.o cifssmb.o cifs_debug.o connect.o dir.o file.o inode.o link.o misc.o netmisc.o smbdes.o smbencrypt.o transport.o asn1.o md4.o md5.o cifs_unicode.o nterr.o xattr.o cifsencrypt.o fcntl.o readdir.o ioctl.o sess.o export.o
diff --git a/fs/cifs/TODO b/fs/cifs/TODO
index 6837294..d7b9c27 100644
--- a/fs/cifs/TODO
+++ b/fs/cifs/TODO
@@ -18,7 +18,9 @@
d) Kerberos/SPNEGO session setup support - (started)
-e) NTLMv2 authentication (mostly implemented)
+e) NTLMv2 authentication (mostly implemented - double check
+that NTLMv2 signing works, also need to cleanup now unneeded SessSetup code in
+fs/cifs/connect.c)
f) MD5-HMAC signing SMB PDUs when SPNEGO style SessionSetup
used (Kerberos or NTLMSSP). Signing alreadyimplemented for NTLM
@@ -88,11 +90,12 @@
time to the client (default time, of now or time 0 is used now for these
very old servers)
-x) Add support for OS/2 (LANMAN 1.2 and LANMAN2.1 based SMB servers)
+x) In support for OS/2 (LANMAN 1.2 and LANMAN2.1 based SMB servers)
+need to add ability to set time to server (utimes command)
y) Finish testing of Windows 9x/Windows ME server support (started).
-KNOWN BUGS (updated April 29, 2005)
+KNOWN BUGS (updated February 26, 2007)
====================================
See http://bugzilla.samba.org - search on product "CifsVFS" for
current bug list.
@@ -107,11 +110,6 @@
succeed but still return access denied (appears to be Windows
server not cifs client problem) and has not been reproduced recently.
NTFS partitions do not have this problem.
-4) debug connectathon lock test case 10 which fails against
-Samba (may be unmappable due to POSIX to Windows lock model
-differences but worth investigating). Also debug Samba to
-see why lock test case 7 takes longer to complete to Samba
-than to Windows.
Misc testing to do
==================
@@ -119,7 +117,7 @@
types. Try nested symlinks (8 deep). Return max path name in stat -f information
2) Modify file portion of ltp so it can run against a mounted network
-share and run it against cifs vfs.
+share and run it against cifs vfs in automated fashion.
3) Additional performance testing and optimization using iozone and similar -
there are some easy changes that can be done to parallelize sequential writes,
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index bc2c0ac..faba4d6 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -1,7 +1,7 @@
/*
* fs/cifs/cifsfs.c
*
- * Copyright (C) International Business Machines Corp., 2002,2004
+ * Copyright (C) International Business Machines Corp., 2002,2007
* Author(s): Steve French (sfrench@us.ibm.com)
*
* Common Internet FileSystem (CIFS) client
@@ -47,7 +47,11 @@
#ifdef CONFIG_CIFS_QUOTA
static struct quotactl_ops cifs_quotactl_ops;
-#endif
+#endif /* QUOTA */
+
+#ifdef CONFIG_CIFS_EXPERIMENTAL
+extern struct export_operations cifs_export_ops;
+#endif /* EXPERIMENTAL */
int cifsFYI = 0;
int cifsERROR = 1;
@@ -62,8 +66,8 @@
unsigned int sign_CIFS_PDUs = 1;
extern struct task_struct * oplockThread; /* remove sparse warning */
struct task_struct * oplockThread = NULL;
-extern struct task_struct * dnotifyThread; /* remove sparse warning */
-struct task_struct * dnotifyThread = NULL;
+/* extern struct task_struct * dnotifyThread; remove sparse warning */
+static struct task_struct * dnotifyThread = NULL;
static const struct super_operations cifs_super_ops;
unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE;
module_param(CIFSMaxBufSize, int, 0);
@@ -110,6 +114,10 @@
sb->s_magic = CIFS_MAGIC_NUMBER;
sb->s_op = &cifs_super_ops;
+#ifdef CONFIG_CIFS_EXPERIMENTAL
+ if(experimEnabled != 0)
+ sb->s_export_op = &cifs_export_ops;
+#endif /* EXPERIMENTAL */
/* if(cifs_sb->tcon->ses->server->maxBuf > MAX_CIFS_HDR_SIZE + 512)
sb->s_blocksize = cifs_sb->tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE; */
#ifdef CONFIG_CIFS_QUOTA
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index c97c08e..2c2c384 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -38,8 +38,8 @@
/* Functions related to super block operations */
/* extern const struct super_operations cifs_super_ops;*/
extern void cifs_read_inode(struct inode *);
-extern void cifs_delete_inode(struct inode *);
-/* extern void cifs_write_inode(struct inode *); *//* BB not needed yet */
+/*extern void cifs_delete_inode(struct inode *);*/ /* BB not needed yet */
+/* extern void cifs_write_inode(struct inode *); */ /* BB not needed yet */
/* Functions related to inodes */
extern const struct inode_operations cifs_dir_inode_ops;
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 74d3ccb..e4de8eb 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -525,15 +525,17 @@
*/
GLOBAL_EXTERN struct smbUidInfo *GlobalUidList[UID_HASH];
-GLOBAL_EXTERN struct list_head GlobalServerList; /* BB not implemented yet */
+/* GLOBAL_EXTERN struct list_head GlobalServerList; BB not implemented yet */
GLOBAL_EXTERN struct list_head GlobalSMBSessionList;
GLOBAL_EXTERN struct list_head GlobalTreeConnectionList;
GLOBAL_EXTERN rwlock_t GlobalSMBSeslock; /* protects list inserts on 3 above */
GLOBAL_EXTERN struct list_head GlobalOplock_Q;
-GLOBAL_EXTERN struct list_head GlobalDnotifyReqList; /* Outstanding dir notify requests */
-GLOBAL_EXTERN struct list_head GlobalDnotifyRsp_Q;/* DirNotify response queue */
+/* Outstanding dir notify requests */
+GLOBAL_EXTERN struct list_head GlobalDnotifyReqList;
+/* DirNotify response queue */
+GLOBAL_EXTERN struct list_head GlobalDnotifyRsp_Q;
/*
* Global transaction id (XID) information
diff --git a/fs/cifs/cifspdu.h b/fs/cifs/cifspdu.h
index 2498d64..0efdf35 100644
--- a/fs/cifs/cifspdu.h
+++ b/fs/cifs/cifspdu.h
@@ -220,6 +220,9 @@
*/
#define CIFS_NO_HANDLE 0xFFFF
+#define NO_CHANGE_64 0xFFFFFFFFFFFFFFFFULL
+#define NO_CHANGE_32 0xFFFFFFFFUL
+
/* IPC$ in ASCII */
#define CIFS_IPC_RESOURCE "\x49\x50\x43\x24"
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index 6148b82..32eb1ac 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -43,7 +43,7 @@
#define FreeXid(curr_xid) {_FreeXid(curr_xid); cFYI(1,("CIFS VFS: leaving %s (xid = %d) rc = %d",__FUNCTION__,curr_xid,(int)rc));}
extern char *build_path_from_dentry(struct dentry *);
extern char *build_wildcard_path_from_dentry(struct dentry *direntry);
-extern void renew_parental_timestamps(struct dentry *direntry);
+/* extern void renew_parental_timestamps(struct dentry *direntry);*/
extern int SendReceive(const unsigned int /* xid */ , struct cifsSesInfo *,
struct smb_hdr * /* input */ ,
struct smb_hdr * /* out */ ,
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 2436410..48fc0c2 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -4803,6 +4803,16 @@
pSMB->InformationLevel = cpu_to_le16(SMB_SET_FILE_UNIX_BASIC);
pSMB->Reserved4 = 0;
pSMB->hdr.smb_buf_length += byte_count;
+ /* Samba server ignores set of file size to zero due to bugs in some
+ older clients, but we should be precise - we use SetFileSize to
+ set file size and do not want to truncate file size to zero
+ accidently as happened on one Samba server beta by putting
+ zero instead of -1 here */
+ data_offset->EndOfFile = NO_CHANGE_64;
+ data_offset->NumOfBytes = NO_CHANGE_64;
+ data_offset->LastStatusChange = NO_CHANGE_64;
+ data_offset->LastAccessTime = NO_CHANGE_64;
+ data_offset->LastModificationTime = NO_CHANGE_64;
data_offset->Uid = cpu_to_le64(uid);
data_offset->Gid = cpu_to_le64(gid);
/* better to leave device as zero when it is */
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index 66b825a..3fad638 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -31,7 +31,7 @@
#include "cifs_debug.h"
#include "cifs_fs_sb.h"
-void
+static void
renew_parental_timestamps(struct dentry *direntry)
{
/* BB check if there is a way to get the kernel to do this or if we really need this */
diff --git a/fs/cifs/export.c b/fs/cifs/export.c
new file mode 100644
index 0000000..1d71639
--- /dev/null
+++ b/fs/cifs/export.c
@@ -0,0 +1,52 @@
+/*
+ * fs/cifs/export.c
+ *
+ * Copyright (C) International Business Machines Corp., 2007
+ * Author(s): Steve French (sfrench@us.ibm.com)
+ *
+ * Common Internet FileSystem (CIFS) client
+ *
+ * Operations related to support for exporting files via NFSD
+ *
+ * This library is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation; either version 2.1 of the License, or
+ * (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+ /*
+ * See Documentation/filesystems/Exporting
+ * and examples in fs/exportfs
+ */
+
+#include <linux/fs.h>
+
+#ifdef CONFIG_CIFS_EXPERIMENTAL
+
+static struct dentry *cifs_get_parent(struct dentry *dentry)
+{
+ /* BB need to add code here eventually to enable export via NFSD */
+ return ERR_PTR(-EACCES);
+}
+
+struct export_operations cifs_export_ops = {
+ .get_parent = cifs_get_parent,
+/* Following five export operations are unneeded so far and can default */
+/* .get_dentry =
+ .get_name =
+ .find_exported_dentry =
+ .decode_fh =
+ .encode_fs = */
+ };
+
+#endif /* EXPERIMENTAL */
+
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index a1265c9..2d3275b 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -879,18 +879,19 @@
cifs_stats_bytes_written(pTcon, total_written);
/* since the write may have blocked check these pointers again */
- if (file->f_path.dentry) {
- if (file->f_path.dentry->d_inode) {
- struct inode *inode = file->f_path.dentry->d_inode;
- inode->i_ctime = inode->i_mtime =
- current_fs_time(inode->i_sb);
- if (total_written > 0) {
- if (*poffset > file->f_path.dentry->d_inode->i_size)
- i_size_write(file->f_path.dentry->d_inode,
+ if ((file->f_path.dentry) && (file->f_path.dentry->d_inode)) {
+ struct inode *inode = file->f_path.dentry->d_inode;
+/* Do not update local mtime - server will set its actual value on write
+ * inode->i_ctime = inode->i_mtime =
+ * current_fs_time(inode->i_sb);*/
+ if (total_written > 0) {
+ spin_lock(&inode->i_lock);
+ if (*poffset > file->f_path.dentry->d_inode->i_size)
+ i_size_write(file->f_path.dentry->d_inode,
*poffset);
- }
- mark_inode_dirty_sync(file->f_path.dentry->d_inode);
+ spin_unlock(&inode->i_lock);
}
+ mark_inode_dirty_sync(file->f_path.dentry->d_inode);
}
FreeXid(xid);
return total_written;
@@ -1012,18 +1013,18 @@
cifs_stats_bytes_written(pTcon, total_written);
/* since the write may have blocked check these pointers again */
- if (file->f_path.dentry) {
- if (file->f_path.dentry->d_inode) {
+ if ((file->f_path.dentry) && (file->f_path.dentry->d_inode)) {
/*BB We could make this contingent on superblock ATIME flag too */
-/* file->f_path.dentry->d_inode->i_ctime =
- file->f_path.dentry->d_inode->i_mtime = CURRENT_TIME;*/
- if (total_written > 0) {
- if (*poffset > file->f_path.dentry->d_inode->i_size)
- i_size_write(file->f_path.dentry->d_inode,
- *poffset);
- }
- mark_inode_dirty_sync(file->f_path.dentry->d_inode);
+/* file->f_path.dentry->d_inode->i_ctime =
+ file->f_path.dentry->d_inode->i_mtime = CURRENT_TIME;*/
+ if (total_written > 0) {
+ spin_lock(&file->f_path.dentry->d_inode->i_lock);
+ if (*poffset > file->f_path.dentry->d_inode->i_size)
+ i_size_write(file->f_path.dentry->d_inode,
+ *poffset);
+ spin_unlock(&file->f_path.dentry->d_inode->i_lock);
}
+ mark_inode_dirty_sync(file->f_path.dentry->d_inode);
}
FreeXid(xid);
return total_written;
@@ -1400,6 +1401,7 @@
xid = GetXid();
cFYI(1, ("commit write for page %p up to position %lld for %d",
page, position, to));
+ spin_lock(&inode->i_lock);
if (position > inode->i_size) {
i_size_write(inode, position);
/* if (file->private_data == NULL) {
@@ -1429,6 +1431,7 @@
cFYI(1, (" SetEOF (commit write) rc = %d", rc));
} */
}
+ spin_unlock(&inode->i_lock);
if (!PageUptodate(page)) {
position = ((loff_t)page->index << PAGE_CACHE_SHIFT) + offset;
/* can not rely on (or let) writepage write this data */
@@ -1989,34 +1992,52 @@
unsigned from, unsigned to)
{
int rc = 0;
- loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
- cFYI(1, ("prepare write for page %p from %d to %d",page,from,to));
- if (!PageUptodate(page)) {
- /* if (to - from != PAGE_CACHE_SIZE) {
- void *kaddr = kmap_atomic(page, KM_USER0);
- memset(kaddr, 0, from);
- memset(kaddr + to, 0, PAGE_CACHE_SIZE - to);
- flush_dcache_page(page);
- kunmap_atomic(kaddr, KM_USER0);
- } */
- /* If we are writing a full page it will be up to date,
- no need to read from the server */
- if ((to == PAGE_CACHE_SIZE) && (from == 0))
- SetPageUptodate(page);
+ loff_t i_size;
+ loff_t offset;
- /* might as well read a page, it is fast enough */
- if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
- rc = cifs_readpage_worker(file, page, &offset);
- } else {
- /* should we try using another file handle if there is one -
- how would we lock it to prevent close of that handle
- racing with this read?
- In any case this will be written out by commit_write */
- }
+ cFYI(1, ("prepare write for page %p from %d to %d",page,from,to));
+ if (PageUptodate(page))
+ return 0;
+
+ /* If we are writing a full page it will be up to date,
+ no need to read from the server */
+ if ((to == PAGE_CACHE_SIZE) && (from == 0)) {
+ SetPageUptodate(page);
+ return 0;
}
- /* BB should we pass any errors back?
- e.g. if we do not have read access to the file */
+ offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
+ i_size = i_size_read(page->mapping->host);
+
+ if ((offset >= i_size) ||
+ ((from == 0) && (offset + to) >= i_size)) {
+ /*
+ * We don't need to read data beyond the end of the file.
+ * zero it, and set the page uptodate
+ */
+ void *kaddr = kmap_atomic(page, KM_USER0);
+
+ if (from)
+ memset(kaddr, 0, from);
+ if (to < PAGE_CACHE_SIZE)
+ memset(kaddr + to, 0, PAGE_CACHE_SIZE - to);
+ flush_dcache_page(page);
+ kunmap_atomic(kaddr, KM_USER0);
+ SetPageUptodate(page);
+ } else if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
+ /* might as well read a page, it is fast enough */
+ rc = cifs_readpage_worker(file, page, &offset);
+ } else {
+ /* we could try using another file handle if there is one -
+ but how would we lock it to prevent close of that handle
+ racing with this read? In any case
+ this will be written out by commit_write so is fine */
+ }
+
+ /* we do not need to pass errors back
+ e.g. if we do not have read access to the file
+ because cifs_commit_write will do the right thing. -- shaggy */
+
return 0;
}
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 37c6ce8..86b9dbb 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -143,10 +143,10 @@
inode->i_gid = le64_to_cpu(findData.Gid);
inode->i_nlink = le64_to_cpu(findData.Nlinks);
+ spin_lock(&inode->i_lock);
if (is_size_safe_to_change(cifsInfo, end_of_file)) {
/* can not safely change the file size here if the
client is writing to it due to potential races */
-
i_size_write(inode, end_of_file);
/* blksize needs to be multiple of two. So safer to default to
@@ -162,6 +162,7 @@
/* for this calculation */
inode->i_blocks = (512 - 1 + num_of_bytes) >> 9;
}
+ spin_unlock(&inode->i_lock);
if (num_of_bytes < end_of_file)
cFYI(1, ("allocation size less than end of file"));
@@ -496,6 +497,8 @@
/* BB add code here -
validate if device or weird share or device type? */
}
+
+ spin_lock(&inode->i_lock);
if (is_size_safe_to_change(cifsInfo, le64_to_cpu(pfindData->EndOfFile))) {
/* can not safely shrink the file size here if the
client is writing to it due to potential races */
@@ -506,6 +509,7 @@
inode->i_blocks = (512 - 1 + le64_to_cpu(
pfindData->AllocationSize)) >> 9;
}
+ spin_unlock(&inode->i_lock);
inode->i_nlink = le32_to_cpu(pfindData->NumberOfLinks);
@@ -834,8 +838,10 @@
if (!rc) {
drop_nlink(inode);
+ spin_lock(&direntry->d_inode->i_lock);
i_size_write(direntry->d_inode,0);
clear_nlink(direntry->d_inode);
+ spin_unlock(&direntry->d_inode->i_lock);
}
cifsInode = CIFS_I(direntry->d_inode);
@@ -1128,6 +1134,52 @@
return rc;
}
+static int cifs_vmtruncate(struct inode * inode, loff_t offset)
+{
+ struct address_space *mapping = inode->i_mapping;
+ unsigned long limit;
+
+ spin_lock(&inode->i_lock);
+ if (inode->i_size < offset)
+ goto do_expand;
+ /*
+ * truncation of in-use swapfiles is disallowed - it would cause
+ * subsequent swapout to scribble on the now-freed blocks.
+ */
+ if (IS_SWAPFILE(inode)) {
+ spin_unlock(&inode->i_lock);
+ goto out_busy;
+ }
+ i_size_write(inode, offset);
+ spin_unlock(&inode->i_lock);
+ unmap_mapping_range(mapping, offset + PAGE_SIZE - 1, 0, 1);
+ truncate_inode_pages(mapping, offset);
+ goto out_truncate;
+
+do_expand:
+ limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
+ if (limit != RLIM_INFINITY && offset > limit) {
+ spin_unlock(&inode->i_lock);
+ goto out_sig;
+ }
+ if (offset > inode->i_sb->s_maxbytes) {
+ spin_unlock(&inode->i_lock);
+ goto out_big;
+ }
+ i_size_write(inode, offset);
+ spin_unlock(&inode->i_lock);
+out_truncate:
+ if (inode->i_op && inode->i_op->truncate)
+ inode->i_op->truncate(inode);
+ return 0;
+out_sig:
+ send_sig(SIGXFSZ, current, 0);
+out_big:
+ return -EFBIG;
+out_busy:
+ return -ETXTBSY;
+}
+
int cifs_setattr(struct dentry *direntry, struct iattr *attrs)
{
int xid;
@@ -1244,7 +1296,7 @@
*/
if (rc == 0) {
- rc = vmtruncate(direntry->d_inode, attrs->ia_size);
+ rc = cifs_vmtruncate(direntry->d_inode, attrs->ia_size);
cifs_truncate_page(direntry->d_inode->i_mapping,
direntry->d_inode->i_size);
} else
@@ -1379,9 +1431,11 @@
return rc;
}
+#if 0
void cifs_delete_inode(struct inode *inode)
{
cFYI(1, ("In cifs_delete_inode, inode = 0x%p", inode));
/* may have to add back in if and when safe distributed caching of
directories added e.g. via FindNotify */
}
+#endif
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index c444798..44cfb52 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -3,7 +3,7 @@
*
* Directory search handling
*
- * Copyright (C) International Business Machines Corp., 2004, 2005
+ * Copyright (C) International Business Machines Corp., 2004, 2007
* Author(s): Steve French (sfrench@us.ibm.com)
*
* This library is free software; you can redistribute it and/or modify
@@ -226,6 +226,7 @@
atomic_set(&cifsInfo->inUse, 1);
}
+ spin_lock(&tmp_inode->i_lock);
if (is_size_safe_to_change(cifsInfo, end_of_file)) {
/* can not safely change the file size here if the
client is writing to it due to potential races */
@@ -235,6 +236,7 @@
/* for this calculation, even though the reported blocksize is larger */
tmp_inode->i_blocks = (512 - 1 + allocation_size) >> 9;
}
+ spin_unlock(&tmp_inode->i_lock);
if (allocation_size < end_of_file)
cFYI(1, ("May be sparse file, allocation less than file size"));
@@ -355,6 +357,7 @@
tmp_inode->i_gid = le64_to_cpu(pfindData->Gid);
tmp_inode->i_nlink = le64_to_cpu(pfindData->Nlinks);
+ spin_lock(&tmp_inode->i_lock);
if (is_size_safe_to_change(cifsInfo, end_of_file)) {
/* can not safely change the file size here if the
client is writing to it due to potential races */
@@ -364,6 +367,7 @@
/* for this calculation, not the real blocksize */
tmp_inode->i_blocks = (512 - 1 + num_of_bytes) >> 9;
}
+ spin_unlock(&tmp_inode->i_lock);
if (S_ISREG(tmp_inode->i_mode)) {
cFYI(1, ("File inode"));
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index f80007e..5f46845 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -499,7 +499,7 @@
due to last connection to this server being unmounted */
if (signal_pending(current)) {
/* if signal pending do not hold up user for full smb timeout
- but we still give response a change to complete */
+ but we still give response a chance to complete */
timeout = 2 * HZ;
}
@@ -587,7 +587,6 @@
}
out:
-
DeleteMidQEntry(midQ);
atomic_dec(&ses->server->inFlight);
wake_up(&ses->server->request_q);
@@ -681,7 +680,7 @@
due to last connection to this server being unmounted */
if (signal_pending(current)) {
/* if signal pending do not hold up user for full smb timeout
- but we still give response a change to complete */
+ but we still give response a chance to complete */
timeout = 2 * HZ;
}
@@ -765,7 +764,6 @@
}
out:
-
DeleteMidQEntry(midQ);
atomic_dec(&ses->server->inFlight);
wake_up(&ses->server->request_q);
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
index 0cfff4f..e62f3fc 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -168,9 +168,9 @@
goto out;
}
i_size_write(inode, 0);
- ecryptfs_write_inode_size_to_metadata(lower_file, lower_inode, inode,
- ecryptfs_dentry,
- ECRYPTFS_LOWER_I_MUTEX_NOT_HELD);
+ rc = ecryptfs_write_inode_size_to_metadata(lower_file, lower_inode,
+ inode, ecryptfs_dentry,
+ ECRYPTFS_LOWER_I_MUTEX_NOT_HELD);
ecryptfs_inode_to_private(inode)->crypt_stat.flags |= ECRYPTFS_NEW_FILE;
out:
return rc;
diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c
index 812427e..fc4a3a2 100644
--- a/fs/ecryptfs/main.c
+++ b/fs/ecryptfs/main.c
@@ -484,18 +484,12 @@
struct vfsmount *lower_mnt;
memset(&nd, 0, sizeof(struct nameidata));
- rc = path_lookup(dev_name, LOOKUP_FOLLOW, &nd);
+ rc = path_lookup(dev_name, LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &nd);
if (rc) {
ecryptfs_printk(KERN_WARNING, "path_lookup() failed\n");
goto out;
}
lower_root = nd.dentry;
- if (!lower_root->d_inode) {
- ecryptfs_printk(KERN_WARNING,
- "No directory to interpose on\n");
- rc = -ENOENT;
- goto out_free;
- }
lower_mnt = nd.mnt;
ecryptfs_set_superblock_lower(sb, lower_root->d_sb);
sb->s_maxbytes = lower_root->d_sb->s_maxbytes;
diff --git a/fs/ecryptfs/mmap.c b/fs/ecryptfs/mmap.c
index 7be8e91..b731b09 100644
--- a/fs/ecryptfs/mmap.c
+++ b/fs/ecryptfs/mmap.c
@@ -446,6 +446,7 @@
const struct address_space_operations *lower_a_ops;
u64 file_size;
+retry:
header_page = grab_cache_page(lower_inode->i_mapping, 0);
if (!header_page) {
ecryptfs_printk(KERN_ERR, "grab_cache_page for "
@@ -456,9 +457,10 @@
lower_a_ops = lower_inode->i_mapping->a_ops;
rc = lower_a_ops->prepare_write(lower_file, header_page, 0, 8);
if (rc) {
- if (rc == AOP_TRUNCATED_PAGE)
+ if (rc == AOP_TRUNCATED_PAGE) {
ecryptfs_release_lower_page(header_page, 0);
- else
+ goto retry;
+ } else
ecryptfs_release_lower_page(header_page, 1);
goto out;
}
@@ -473,9 +475,10 @@
if (rc < 0)
ecryptfs_printk(KERN_ERR, "Error commiting header page "
"write\n");
- if (rc == AOP_TRUNCATED_PAGE)
+ if (rc == AOP_TRUNCATED_PAGE) {
ecryptfs_release_lower_page(header_page, 0);
- else
+ goto retry;
+ } else
ecryptfs_release_lower_page(header_page, 1);
lower_inode->i_mtime = lower_inode->i_ctime = CURRENT_TIME;
mark_inode_dirty_sync(inode);
@@ -502,7 +505,8 @@
goto out;
}
lower_dentry = ecryptfs_dentry_to_lower(ecryptfs_dentry);
- if (!lower_dentry->d_inode->i_op->getxattr) {
+ if (!lower_dentry->d_inode->i_op->getxattr ||
+ !lower_dentry->d_inode->i_op->setxattr) {
printk(KERN_WARNING
"No support for setting xattr in lower filesystem\n");
rc = -ENOSYS;
@@ -564,6 +568,7 @@
{
int rc = 0;
+retry:
*lower_page = grab_cache_page(lower_inode->i_mapping, lower_page_index);
if (!(*lower_page)) {
rc = -EINVAL;
@@ -577,18 +582,18 @@
byte_offset,
region_bytes);
if (rc) {
- ecryptfs_printk(KERN_ERR, "prepare_write for "
+ if (rc == AOP_TRUNCATED_PAGE) {
+ ecryptfs_release_lower_page(*lower_page, 0);
+ goto retry;
+ } else {
+ ecryptfs_printk(KERN_ERR, "prepare_write for "
"lower_page_index = [0x%.16x] failed; rc = "
"[%d]\n", lower_page_index, rc);
+ ecryptfs_release_lower_page(*lower_page, 1);
+ (*lower_page) = NULL;
+ }
}
out:
- if (rc && (*lower_page)) {
- if (rc == AOP_TRUNCATED_PAGE)
- ecryptfs_release_lower_page(*lower_page, 0);
- else
- ecryptfs_release_lower_page(*lower_page, 1);
- (*lower_page) = NULL;
- }
return rc;
}
diff --git a/fs/libfs.c b/fs/libfs.c
index cf79196..d93842d 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -190,6 +190,10 @@
.lookup = simple_lookup,
};
+static const struct super_operations simple_super_operations = {
+ .statfs = simple_statfs,
+};
+
/*
* Common helper for pseudo-filesystems (sockfs, pipefs, bdev - stuff that
* will never be mountable)
@@ -199,7 +203,6 @@
struct vfsmount *mnt)
{
struct super_block *s = sget(fs_type, NULL, set_anon_super, NULL);
- static const struct super_operations default_ops = {.statfs = simple_statfs};
struct dentry *dentry;
struct inode *root;
struct qstr d_name = {.name = name, .len = strlen(name)};
@@ -212,7 +215,7 @@
s->s_blocksize = 1024;
s->s_blocksize_bits = 10;
s->s_magic = magic;
- s->s_op = ops ? ops : &default_ops;
+ s->s_op = ops ? ops : &simple_super_operations;
s->s_time_gran = 1;
root = new_inode(s);
if (!root)
@@ -359,7 +362,6 @@
int simple_fill_super(struct super_block *s, int magic, struct tree_descr *files)
{
- static struct super_operations s_ops = {.statfs = simple_statfs};
struct inode *inode;
struct dentry *root;
struct dentry *dentry;
@@ -368,7 +370,7 @@
s->s_blocksize = PAGE_CACHE_SIZE;
s->s_blocksize_bits = PAGE_CACHE_SHIFT;
s->s_magic = magic;
- s->s_op = &s_ops;
+ s->s_op = &simple_super_operations;
s->s_time_gran = 1;
inode = new_inode(s);
diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c
index 14939dd..7285c94 100644
--- a/fs/ncpfs/inode.c
+++ b/fs/ncpfs/inode.c
@@ -576,6 +576,12 @@
server->packet = vmalloc(NCP_PACKET_SIZE);
if (server->packet == NULL)
goto out_nls;
+ server->txbuf = vmalloc(NCP_PACKET_SIZE);
+ if (server->txbuf == NULL)
+ goto out_packet;
+ server->rxbuf = vmalloc(NCP_PACKET_SIZE);
+ if (server->rxbuf == NULL)
+ goto out_txbuf;
sock->sk->sk_data_ready = ncp_tcp_data_ready;
sock->sk->sk_error_report = ncp_tcp_error_report;
@@ -597,7 +603,7 @@
error = ncp_connect(server);
ncp_unlock_server(server);
if (error < 0)
- goto out_packet;
+ goto out_rxbuf;
DPRINTK("ncp_fill_super: NCP_SBP(sb) = %x\n", (int) NCP_SBP(sb));
error = -EMSGSIZE; /* -EREMOTESIDEINCOMPATIBLE */
@@ -666,8 +672,12 @@
ncp_lock_server(server);
ncp_disconnect(server);
ncp_unlock_server(server);
-out_packet:
+out_rxbuf:
ncp_stop_tasks(server);
+ vfree(server->rxbuf);
+out_txbuf:
+ vfree(server->txbuf);
+out_packet:
vfree(server->packet);
out_nls:
#ifdef CONFIG_NCPFS_NLS
@@ -723,6 +733,8 @@
kfree(server->priv.data);
kfree(server->auth.object_name);
+ vfree(server->rxbuf);
+ vfree(server->txbuf);
vfree(server->packet);
sb->s_fs_info = NULL;
kfree(server);
diff --git a/fs/ncpfs/sock.c b/fs/ncpfs/sock.c
index e496d8b..e37df8d 100644
--- a/fs/ncpfs/sock.c
+++ b/fs/ncpfs/sock.c
@@ -14,6 +14,7 @@
#include <linux/socket.h>
#include <linux/fcntl.h>
#include <linux/stat.h>
+#include <linux/string.h>
#include <asm/uaccess.h>
#include <linux/in.h>
#include <linux/net.h>
@@ -55,10 +56,11 @@
struct ncp_request_reply {
struct list_head req;
wait_queue_head_t wq;
- struct ncp_reply_header* reply_buf;
+ atomic_t refs;
+ unsigned char* reply_buf;
size_t datalen;
int result;
- enum { RQ_DONE, RQ_INPROGRESS, RQ_QUEUED, RQ_IDLE } status;
+ enum { RQ_DONE, RQ_INPROGRESS, RQ_QUEUED, RQ_IDLE, RQ_ABANDONED } status;
struct kvec* tx_ciov;
size_t tx_totallen;
size_t tx_iovlen;
@@ -67,6 +69,32 @@
u_int32_t sign[6];
};
+static inline struct ncp_request_reply* ncp_alloc_req(void)
+{
+ struct ncp_request_reply *req;
+
+ req = kmalloc(sizeof(struct ncp_request_reply), GFP_KERNEL);
+ if (!req)
+ return NULL;
+
+ init_waitqueue_head(&req->wq);
+ atomic_set(&req->refs, (1));
+ req->status = RQ_IDLE;
+
+ return req;
+}
+
+static void ncp_req_get(struct ncp_request_reply *req)
+{
+ atomic_inc(&req->refs);
+}
+
+static void ncp_req_put(struct ncp_request_reply *req)
+{
+ if (atomic_dec_and_test(&req->refs))
+ kfree(req);
+}
+
void ncp_tcp_data_ready(struct sock *sk, int len)
{
struct ncp_server *server = sk->sk_user_data;
@@ -101,14 +129,17 @@
schedule_work(&server->timeout_tq);
}
-static inline void ncp_finish_request(struct ncp_request_reply *req, int result)
+static inline void ncp_finish_request(struct ncp_server *server, struct ncp_request_reply *req, int result)
{
req->result = result;
+ if (req->status != RQ_ABANDONED)
+ memcpy(req->reply_buf, server->rxbuf, req->datalen);
req->status = RQ_DONE;
wake_up_all(&req->wq);
+ ncp_req_put(req);
}
-static void __abort_ncp_connection(struct ncp_server *server, struct ncp_request_reply *aborted, int err)
+static void __abort_ncp_connection(struct ncp_server *server)
{
struct ncp_request_reply *req;
@@ -118,31 +149,19 @@
req = list_entry(server->tx.requests.next, struct ncp_request_reply, req);
list_del_init(&req->req);
- if (req == aborted) {
- ncp_finish_request(req, err);
- } else {
- ncp_finish_request(req, -EIO);
- }
+ ncp_finish_request(server, req, -EIO);
}
req = server->rcv.creq;
if (req) {
server->rcv.creq = NULL;
- if (req == aborted) {
- ncp_finish_request(req, err);
- } else {
- ncp_finish_request(req, -EIO);
- }
+ ncp_finish_request(server, req, -EIO);
server->rcv.ptr = NULL;
server->rcv.state = 0;
}
req = server->tx.creq;
if (req) {
server->tx.creq = NULL;
- if (req == aborted) {
- ncp_finish_request(req, err);
- } else {
- ncp_finish_request(req, -EIO);
- }
+ ncp_finish_request(server, req, -EIO);
}
}
@@ -160,10 +179,12 @@
break;
case RQ_QUEUED:
list_del_init(&req->req);
- ncp_finish_request(req, err);
+ ncp_finish_request(server, req, err);
break;
case RQ_INPROGRESS:
- __abort_ncp_connection(server, req, err);
+ req->status = RQ_ABANDONED;
+ break;
+ case RQ_ABANDONED:
break;
}
}
@@ -177,7 +198,7 @@
static inline void __ncptcp_abort(struct ncp_server *server)
{
- __abort_ncp_connection(server, NULL, 0);
+ __abort_ncp_connection(server);
}
static int ncpdgram_send(struct socket *sock, struct ncp_request_reply *req)
@@ -294,6 +315,11 @@
static inline void __ncp_start_request(struct ncp_server *server, struct ncp_request_reply *req)
{
+ /* we copy the data so that we do not depend on the caller
+ staying alive */
+ memcpy(server->txbuf, req->tx_iov[1].iov_base, req->tx_iov[1].iov_len);
+ req->tx_iov[1].iov_base = server->txbuf;
+
if (server->ncp_sock->type == SOCK_STREAM)
ncptcp_start_request(server, req);
else
@@ -308,6 +334,7 @@
printk(KERN_ERR "ncpfs: tcp: Server died\n");
return -EIO;
}
+ ncp_req_get(req);
if (server->tx.creq || server->rcv.creq) {
req->status = RQ_QUEUED;
list_add_tail(&req->req, &server->tx.requests);
@@ -409,7 +436,7 @@
server->timeout_last = NCP_MAX_RPC_TIMEOUT;
mod_timer(&server->timeout_tm, jiffies + NCP_MAX_RPC_TIMEOUT);
} else if (reply.type == NCP_REPLY) {
- result = _recv(sock, (void*)req->reply_buf, req->datalen, MSG_DONTWAIT);
+ result = _recv(sock, server->rxbuf, req->datalen, MSG_DONTWAIT);
#ifdef CONFIG_NCPFS_PACKET_SIGNING
if (result >= 0 && server->sign_active && req->tx_type != NCP_DEALLOC_SLOT_REQUEST) {
if (result < 8 + 8) {
@@ -419,7 +446,7 @@
result -= 8;
hdrl = sock->sk->sk_family == AF_INET ? 8 : 6;
- if (sign_verify_reply(server, ((char*)req->reply_buf) + hdrl, result - hdrl, cpu_to_le32(result), ((char*)req->reply_buf) + result)) {
+ if (sign_verify_reply(server, server->rxbuf + hdrl, result - hdrl, cpu_to_le32(result), server->rxbuf + result)) {
printk(KERN_INFO "ncpfs: Signature violation\n");
result = -EIO;
}
@@ -428,7 +455,7 @@
#endif
del_timer(&server->timeout_tm);
server->rcv.creq = NULL;
- ncp_finish_request(req, result);
+ ncp_finish_request(server, req, result);
__ncp_next_request(server);
mutex_unlock(&server->rcv.creq_mutex);
continue;
@@ -478,12 +505,6 @@
mutex_unlock(&server->rcv.creq_mutex);
}
-static inline void ncp_init_req(struct ncp_request_reply* req)
-{
- init_waitqueue_head(&req->wq);
- req->status = RQ_IDLE;
-}
-
static int do_tcp_rcv(struct ncp_server *server, void *buffer, size_t len)
{
int result;
@@ -601,8 +622,8 @@
goto skipdata;
}
req->datalen = datalen - 8;
- req->reply_buf->type = NCP_REPLY;
- server->rcv.ptr = (unsigned char*)(req->reply_buf) + 2;
+ ((struct ncp_reply_header*)server->rxbuf)->type = NCP_REPLY;
+ server->rcv.ptr = server->rxbuf + 2;
server->rcv.len = datalen - 10;
server->rcv.state = 1;
break;
@@ -615,12 +636,12 @@
case 1:
req = server->rcv.creq;
if (req->tx_type != NCP_ALLOC_SLOT_REQUEST) {
- if (req->reply_buf->sequence != server->sequence) {
+ if (((struct ncp_reply_header*)server->rxbuf)->sequence != server->sequence) {
printk(KERN_ERR "ncpfs: tcp: Bad sequence number\n");
__ncp_abort_request(server, req, -EIO);
return -EIO;
}
- if ((req->reply_buf->conn_low | (req->reply_buf->conn_high << 8)) != server->connection) {
+ if ((((struct ncp_reply_header*)server->rxbuf)->conn_low | (((struct ncp_reply_header*)server->rxbuf)->conn_high << 8)) != server->connection) {
printk(KERN_ERR "ncpfs: tcp: Connection number mismatch\n");
__ncp_abort_request(server, req, -EIO);
return -EIO;
@@ -628,14 +649,14 @@
}
#ifdef CONFIG_NCPFS_PACKET_SIGNING
if (server->sign_active && req->tx_type != NCP_DEALLOC_SLOT_REQUEST) {
- if (sign_verify_reply(server, (unsigned char*)(req->reply_buf) + 6, req->datalen - 6, cpu_to_be32(req->datalen + 16), &server->rcv.buf.type)) {
+ if (sign_verify_reply(server, server->rxbuf + 6, req->datalen - 6, cpu_to_be32(req->datalen + 16), &server->rcv.buf.type)) {
printk(KERN_ERR "ncpfs: tcp: Signature violation\n");
__ncp_abort_request(server, req, -EIO);
return -EIO;
}
}
#endif
- ncp_finish_request(req, req->datalen);
+ ncp_finish_request(server, req, req->datalen);
nextreq:;
__ncp_next_request(server);
case 2:
@@ -645,7 +666,7 @@
server->rcv.state = 0;
break;
case 3:
- ncp_finish_request(server->rcv.creq, -EIO);
+ ncp_finish_request(server, server->rcv.creq, -EIO);
goto nextreq;
case 5:
info_server(server, 0, server->unexpected_packet.data, server->unexpected_packet.len);
@@ -675,28 +696,39 @@
}
static int do_ncp_rpc_call(struct ncp_server *server, int size,
- struct ncp_reply_header* reply_buf, int max_reply_size)
+ unsigned char* reply_buf, int max_reply_size)
{
int result;
- struct ncp_request_reply req;
+ struct ncp_request_reply *req;
- ncp_init_req(&req);
- req.reply_buf = reply_buf;
- req.datalen = max_reply_size;
- req.tx_iov[1].iov_base = server->packet;
- req.tx_iov[1].iov_len = size;
- req.tx_iovlen = 1;
- req.tx_totallen = size;
- req.tx_type = *(u_int16_t*)server->packet;
+ req = ncp_alloc_req();
+ if (!req)
+ return -ENOMEM;
- result = ncp_add_request(server, &req);
- if (result < 0) {
- return result;
+ req->reply_buf = reply_buf;
+ req->datalen = max_reply_size;
+ req->tx_iov[1].iov_base = server->packet;
+ req->tx_iov[1].iov_len = size;
+ req->tx_iovlen = 1;
+ req->tx_totallen = size;
+ req->tx_type = *(u_int16_t*)server->packet;
+
+ result = ncp_add_request(server, req);
+ if (result < 0)
+ goto out;
+
+ if (wait_event_interruptible(req->wq, req->status == RQ_DONE)) {
+ ncp_abort_request(server, req, -EINTR);
+ result = -EINTR;
+ goto out;
}
- if (wait_event_interruptible(req.wq, req.status == RQ_DONE)) {
- ncp_abort_request(server, &req, -EIO);
- }
- return req.result;
+
+ result = req->result;
+
+out:
+ ncp_req_put(req);
+
+ return result;
}
/*
@@ -751,11 +783,6 @@
DDPRINTK("do_ncp_rpc_call returned %d\n", result);
- if (result < 0) {
- /* There was a problem with I/O, so the connections is
- * no longer usable. */
- ncp_invalidate_conn(server);
- }
return result;
}
diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
index 8813990..85a6686 100644
--- a/fs/sysfs/dir.c
+++ b/fs/sysfs/dir.c
@@ -431,6 +431,8 @@
new_parent_dentry = new_parent ?
new_parent->dentry : sysfs_mount->mnt_sb->s_root;
+ if (old_parent_dentry->d_inode == new_parent_dentry->d_inode)
+ return 0; /* nothing to move */
again:
mutex_lock(&old_parent_dentry->d_inode->i_mutex);
if (!mutex_trylock(&new_parent_dentry->d_inode->i_mutex)) {
diff --git a/fs/sysfs/inode.c b/fs/sysfs/inode.c
index dd1344b..ccb7d72 100644
--- a/fs/sysfs/inode.c
+++ b/fs/sysfs/inode.c
@@ -227,11 +227,8 @@
mutex_lock_nested(&node->i_mutex, I_MUTEX_CHILD);
if (node->i_private) {
- list_for_each_entry(buf, &set->associates, associates) {
- down(&buf->sem);
+ list_for_each_entry(buf, &set->associates, associates)
buf->orphaned = 1;
- up(&buf->sem);
- }
}
mutex_unlock(&node->i_mutex);
}
diff --git a/include/asm-generic/page.h b/include/asm-generic/page.h
index b55052c..a96b5d9 100644
--- a/include/asm-generic/page.h
+++ b/include/asm-generic/page.h
@@ -4,51 +4,21 @@
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
-#include <linux/log2.h>
+#include <linux/compiler.h>
-/*
- * non-const pure 2^n version of get_order
- * - the arch may override these in asm/bitops.h if they can be implemented
- * more efficiently than using the arch log2 routines
- * - we use the non-const log2() instead if the arch has defined one suitable
- */
-#ifndef ARCH_HAS_GET_ORDER
-static inline __attribute__((const))
-int __get_order(unsigned long size, int page_shift)
+/* Pure 2^n version of get_order */
+static __inline__ __attribute_const__ int get_order(unsigned long size)
{
-#if BITS_PER_LONG == 32 && defined(ARCH_HAS_ILOG2_U32)
- int order = __ilog2_u32(size) - page_shift;
- return order >= 0 ? order : 0;
-#elif BITS_PER_LONG == 64 && defined(ARCH_HAS_ILOG2_U64)
- int order = __ilog2_u64(size) - page_shift;
- return order >= 0 ? order : 0;
-#else
int order;
- size = (size - 1) >> (page_shift - 1);
+ size = (size - 1) >> (PAGE_SHIFT - 1);
order = -1;
do {
size >>= 1;
order++;
} while (size);
return order;
-#endif
}
-#endif
-
-/**
- * get_order - calculate log2(pages) to hold a block of the specified size
- * @n - size
- *
- * calculate allocation order based on the current page size
- * - this can be used to initialise global variables from constant data
- */
-#define get_order(n) \
-( \
- __builtin_constant_p(n) ? \
- ((n < (1UL << PAGE_SHIFT)) ? 0 : ilog2(n) - PAGE_SHIFT) : \
- __get_order(n, PAGE_SHIFT) \
- )
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
diff --git a/include/asm-i386/delay.h b/include/asm-i386/delay.h
index 32d6678..9ae5e37 100644
--- a/include/asm-i386/delay.h
+++ b/include/asm-i386/delay.h
@@ -16,13 +16,6 @@
extern void __const_udelay(unsigned long usecs);
extern void __delay(unsigned long loops);
-#if defined(CONFIG_PARAVIRT) && !defined(USE_REAL_TIME_DELAY)
-#define udelay(n) paravirt_ops.const_udelay((n) * 0x10c7ul)
-
-#define ndelay(n) paravirt_ops.const_udelay((n) * 5ul)
-
-#else /* !PARAVIRT || USE_REAL_TIME_DELAY */
-
/* 0x10c7 is 2**32 / 1000000 (rounded up) */
#define udelay(n) (__builtin_constant_p(n) ? \
((n) > 20000 ? __bad_udelay() : __const_udelay((n) * 0x10c7ul)) : \
@@ -32,7 +25,6 @@
#define ndelay(n) (__builtin_constant_p(n) ? \
((n) > 20000 ? __bad_ndelay() : __const_udelay((n) * 5ul)) : \
__ndelay(n))
-#endif
void use_tsc_delay(void);
diff --git a/include/asm-i386/io_apic.h b/include/asm-i386/io_apic.h
index 059a9ff..3407640 100644
--- a/include/asm-i386/io_apic.h
+++ b/include/asm-i386/io_apic.h
@@ -3,6 +3,7 @@
#include <asm/types.h>
#include <asm/mpspec.h>
+#include <asm/apicdef.h>
/*
* Intel IO-APIC support for SMP and UP systems.
diff --git a/include/asm-i386/nmi.h b/include/asm-i386/nmi.h
index b04333e..64544cb 100644
--- a/include/asm-i386/nmi.h
+++ b/include/asm-i386/nmi.h
@@ -33,7 +33,7 @@
extern atomic_t nmi_active;
extern unsigned int nmi_watchdog;
-#define NMI_DEFAULT -1
+#define NMI_DEFAULT 0
#define NMI_NONE 0
#define NMI_IO_APIC 1
#define NMI_LOCAL_APIC 2
diff --git a/include/asm-i386/paravirt.h b/include/asm-i386/paravirt.h
index 6317e0a..f8319ca 100644
--- a/include/asm-i386/paravirt.h
+++ b/include/asm-i386/paravirt.h
@@ -94,6 +94,8 @@
u64 (*read_tsc)(void);
u64 (*read_pmc)(void);
+ u64 (*get_scheduled_cycles)(void);
+ unsigned long (*get_cpu_khz)(void);
void (*load_tr_desc)(void);
void (*load_gdt)(const struct Xgt_desc_struct *);
@@ -115,7 +117,6 @@
void (*set_iopl_mask)(unsigned mask);
void (*io_delay)(void);
- void (*const_udelay)(unsigned long loops);
#ifdef CONFIG_X86_LOCAL_APIC
void (*apic_write)(unsigned long reg, unsigned long v);
@@ -129,6 +130,8 @@
void (*flush_tlb_kernel)(void);
void (*flush_tlb_single)(u32 addr);
+ void (fastcall *map_pt_hook)(int type, pte_t *va, u32 pfn);
+
void (*alloc_pt)(u32 pfn);
void (*alloc_pd)(u32 pfn);
void (*alloc_pd_clone)(u32 pfn, u32 clonepfn, u32 start, u32 count);
@@ -183,9 +186,9 @@
return paravirt_ops.set_wallclock(nowtime);
}
-static inline void do_time_init(void)
+static inline void (*choose_time_init(void))(void)
{
- return paravirt_ops.time_init();
+ return paravirt_ops.time_init;
}
/* The paravirtualized CPUID instruction. */
@@ -273,6 +276,9 @@
#define rdtscll(val) (val = paravirt_ops.read_tsc())
+#define get_scheduled_cycles(val) (val = paravirt_ops.get_scheduled_cycles())
+#define calculate_cpu_khz() (paravirt_ops.get_cpu_khz())
+
#define write_tsc(val1,val2) wrmsr(0x10, val1, val2)
#define rdpmc(counter,low,high) do { \
@@ -349,6 +355,8 @@
#define __flush_tlb_global() paravirt_ops.flush_tlb_kernel()
#define __flush_tlb_single(addr) paravirt_ops.flush_tlb_single(addr)
+#define paravirt_map_pt_hook(type, va, pfn) paravirt_ops.map_pt_hook(type, va, pfn)
+
#define paravirt_alloc_pt(pfn) paravirt_ops.alloc_pt(pfn)
#define paravirt_release_pt(pfn) paravirt_ops.release_pt(pfn)
diff --git a/include/asm-i386/pgtable.h b/include/asm-i386/pgtable.h
index e6a4723..c3b58d4 100644
--- a/include/asm-i386/pgtable.h
+++ b/include/asm-i386/pgtable.h
@@ -263,6 +263,7 @@
*/
#define pte_update(mm, addr, ptep) do { } while (0)
#define pte_update_defer(mm, addr, ptep) do { } while (0)
+#define paravirt_map_pt_hook(slot, va, pfn) do { } while (0)
#endif
/*
@@ -469,10 +470,24 @@
#endif
#if defined(CONFIG_HIGHPTE)
-#define pte_offset_map(dir, address) \
- ((pte_t *)kmap_atomic(pmd_page(*(dir)),KM_PTE0) + pte_index(address))
-#define pte_offset_map_nested(dir, address) \
- ((pte_t *)kmap_atomic(pmd_page(*(dir)),KM_PTE1) + pte_index(address))
+#define pte_offset_map(dir, address) \
+({ \
+ pte_t *__ptep; \
+ unsigned pfn = pmd_val(*(dir)) >> PAGE_SHIFT; \
+ __ptep = (pte_t *)kmap_atomic(pfn_to_page(pfn),KM_PTE0);\
+ paravirt_map_pt_hook(KM_PTE0,__ptep, pfn); \
+ __ptep = __ptep + pte_index(address); \
+ __ptep; \
+})
+#define pte_offset_map_nested(dir, address) \
+({ \
+ pte_t *__ptep; \
+ unsigned pfn = pmd_val(*(dir)) >> PAGE_SHIFT; \
+ __ptep = (pte_t *)kmap_atomic(pfn_to_page(pfn),KM_PTE1);\
+ paravirt_map_pt_hook(KM_PTE1,__ptep, pfn); \
+ __ptep = __ptep + pte_index(address); \
+ __ptep; \
+})
#define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0)
#define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1)
#else
diff --git a/include/asm-i386/time.h b/include/asm-i386/time.h
index 571b429..eac0113 100644
--- a/include/asm-i386/time.h
+++ b/include/asm-i386/time.h
@@ -28,14 +28,16 @@
return retval;
}
+extern void (*late_time_init)(void);
+extern void hpet_time_init(void);
+
#ifdef CONFIG_PARAVIRT
#include <asm/paravirt.h>
-extern unsigned long long native_sched_clock(void);
#else /* !CONFIG_PARAVIRT */
#define get_wallclock() native_get_wallclock()
#define set_wallclock(x) native_set_wallclock(x)
-#define do_time_init() time_init_hook()
+#define choose_time_init() hpet_time_init
#endif /* CONFIG_PARAVIRT */
diff --git a/include/asm-i386/timer.h b/include/asm-i386/timer.h
index 4752c3a..12dd67b 100644
--- a/include/asm-i386/timer.h
+++ b/include/asm-i386/timer.h
@@ -4,13 +4,21 @@
#include <linux/pm.h>
#define TICK_SIZE (tick_nsec / 1000)
+
void setup_pit_timer(void);
+unsigned long long native_sched_clock(void);
+unsigned long native_calculate_cpu_khz(void);
+
/* Modifiers for buggy PIT handling */
extern int pit_latch_buggy;
extern int timer_ack;
extern int no_timer_check;
-extern unsigned long long (*custom_sched_clock)(void);
extern int no_sync_cmos_clock;
extern int recalibrate_cpu_khz(void);
+#ifndef CONFIG_PARAVIRT
+#define get_scheduled_cycles(val) rdtscll(val)
+#define calculate_cpu_khz() native_calculate_cpu_khz()
+#endif
+
#endif
diff --git a/include/asm-i386/topology.h b/include/asm-i386/topology.h
index ac58580..7fc512d 100644
--- a/include/asm-i386/topology.h
+++ b/include/asm-i386/topology.h
@@ -85,7 +85,6 @@
.idle_idx = 1, \
.newidle_idx = 2, \
.wake_idx = 1, \
- .per_cpu_gain = 100, \
.flags = SD_LOAD_BALANCE \
| SD_BALANCE_EXEC \
| SD_BALANCE_FORK \
diff --git a/include/asm-i386/tsc.h b/include/asm-i386/tsc.h
index e997891..84016ff 100644
--- a/include/asm-i386/tsc.h
+++ b/include/asm-i386/tsc.h
@@ -1 +1,67 @@
-#include <asm-x86_64/tsc.h>
+/*
+ * linux/include/asm-i386/tsc.h
+ *
+ * i386 TSC related functions
+ */
+#ifndef _ASM_i386_TSC_H
+#define _ASM_i386_TSC_H
+
+#include <asm/processor.h>
+
+/*
+ * Standard way to access the cycle counter.
+ */
+typedef unsigned long long cycles_t;
+
+extern unsigned int cpu_khz;
+extern unsigned int tsc_khz;
+
+static inline cycles_t get_cycles(void)
+{
+ unsigned long long ret = 0;
+
+#ifndef CONFIG_X86_TSC
+ if (!cpu_has_tsc)
+ return 0;
+#endif
+
+#if defined(CONFIG_X86_GENERIC) || defined(CONFIG_X86_TSC)
+ rdtscll(ret);
+#endif
+ return ret;
+}
+
+/* Like get_cycles, but make sure the CPU is synchronized. */
+static __always_inline cycles_t get_cycles_sync(void)
+{
+ unsigned long long ret;
+#ifdef X86_FEATURE_SYNC_RDTSC
+ unsigned eax;
+
+ /*
+ * Don't do an additional sync on CPUs where we know
+ * RDTSC is already synchronous:
+ */
+ alternative_io("cpuid", ASM_NOP2, X86_FEATURE_SYNC_RDTSC,
+ "=a" (eax), "0" (1) : "ebx","ecx","edx","memory");
+#else
+ sync_core();
+#endif
+ rdtscll(ret);
+
+ return ret;
+}
+
+extern void tsc_init(void);
+extern void mark_tsc_unstable(void);
+extern int unsynchronized_tsc(void);
+extern void init_tsc_clocksource(void);
+
+/*
+ * Boot-time check whether the TSCs are synchronized across
+ * all CPUs/cores:
+ */
+extern void check_tsc_sync_source(int cpu);
+extern void check_tsc_sync_target(void);
+
+#endif
diff --git a/include/asm-i386/vmi.h b/include/asm-i386/vmi.h
index 43c8933..eb8bd89 100644
--- a/include/asm-i386/vmi.h
+++ b/include/asm-i386/vmi.h
@@ -97,6 +97,7 @@
#define VMI_CALL_SetInitialAPState 62
#define VMI_CALL_APICWrite 63
#define VMI_CALL_APICRead 64
+#define VMI_CALL_IODelay 65
#define VMI_CALL_SetLazyMode 73
/*
diff --git a/include/asm-i386/vmi_time.h b/include/asm-i386/vmi_time.h
index c129312..94d0a12 100644
--- a/include/asm-i386/vmi_time.h
+++ b/include/asm-i386/vmi_time.h
@@ -49,7 +49,8 @@
extern void __init vmi_time_init(void);
extern unsigned long vmi_get_wallclock(void);
extern int vmi_set_wallclock(unsigned long now);
-extern unsigned long long vmi_sched_clock(void);
+extern unsigned long long vmi_get_sched_cycles(void);
+extern unsigned long vmi_cpu_khz(void);
#ifdef CONFIG_X86_LOCAL_APIC
extern void __init vmi_timer_setup_boot_alarm(void);
@@ -60,6 +61,14 @@
#ifdef CONFIG_NO_IDLE_HZ
extern int vmi_stop_hz_timer(void);
extern void vmi_account_time_restart_hz_timer(void);
+#else
+static inline int vmi_stop_hz_timer(void)
+{
+ return 0;
+}
+static inline void vmi_account_time_restart_hz_timer(void)
+{
+}
#endif
/*
diff --git a/include/asm-ia64/meminit.h b/include/asm-ia64/meminit.h
index 6dd476b..21ec5f3 100644
--- a/include/asm-ia64/meminit.h
+++ b/include/asm-ia64/meminit.h
@@ -17,10 +17,11 @@
* - kernel code & data
* - crash dumping code reserved region
* - Kernel memory map built from EFI memory map
+ * - ELF core header
*
* More could be added if necessary
*/
-#define IA64_MAX_RSVD_REGIONS 7
+#define IA64_MAX_RSVD_REGIONS 8
struct rsvd_region {
unsigned long start; /* virtual address of beginning of element */
@@ -36,6 +37,9 @@
extern int filter_rsvd_memory (unsigned long start, unsigned long end, void *arg);
extern void efi_memmap_init(unsigned long *, unsigned long *);
+extern unsigned long vmcore_find_descriptor_size(unsigned long address);
+extern int reserve_elfcorehdr(unsigned long *start, unsigned long *end);
+
/*
* For rounding an address to the next IA64_GRANULE_SIZE or order
*/
diff --git a/include/asm-ia64/resource.h b/include/asm-ia64/resource.h
index 77b1eee..ba2272a 100644
--- a/include/asm-ia64/resource.h
+++ b/include/asm-ia64/resource.h
@@ -2,7 +2,6 @@
#define _ASM_IA64_RESOURCE_H
#include <asm/ustack.h>
-#define _STK_LIM_MAX DEFAULT_USER_STACK_SIZE
#include <asm-generic/resource.h>
#endif /* _ASM_IA64_RESOURCE_H */
diff --git a/include/asm-ia64/swiotlb.h b/include/asm-ia64/swiotlb.h
deleted file mode 100644
index 452c162..0000000
--- a/include/asm-ia64/swiotlb.h
+++ /dev/null
@@ -1,9 +0,0 @@
-#ifndef _ASM_SWIOTLB_H
-#define _ASM_SWIOTLB_H 1
-
-#include <asm/machvec.h>
-
-#define SWIOTLB_ARCH_NEED_LATE_INIT
-#define SWIOTLB_ARCH_NEED_ALLOC
-
-#endif /* _ASM_SWIOTLB_H */
diff --git a/include/asm-ia64/topology.h b/include/asm-ia64/topology.h
index 22ed674..233f1ca 100644
--- a/include/asm-ia64/topology.h
+++ b/include/asm-ia64/topology.h
@@ -65,7 +65,6 @@
.max_interval = 4, \
.busy_factor = 64, \
.imbalance_pct = 125, \
- .per_cpu_gain = 100, \
.cache_nice_tries = 2, \
.busy_idx = 2, \
.idle_idx = 1, \
@@ -97,7 +96,6 @@
.newidle_idx = 0, /* unused */ \
.wake_idx = 1, \
.forkexec_idx = 1, \
- .per_cpu_gain = 100, \
.flags = SD_LOAD_BALANCE \
| SD_BALANCE_EXEC \
| SD_BALANCE_FORK \
diff --git a/include/asm-m68knommu/m528xsim.h b/include/asm-m68knommu/m528xsim.h
index 1a3b1ae..28bf783 100644
--- a/include/asm-m68knommu/m528xsim.h
+++ b/include/asm-m68knommu/m528xsim.h
@@ -47,6 +47,9 @@
/* set Port AS pin for I2C or UART */
#define MCF5282_GPIO_PASPAR (volatile u16 *) (MCF_IPSBAR + 0x00100056)
+/* Port UA Pin Assignment Register (8 Bit) */
+#define MCF5282_GPIO_PUAPAR 0x10005C
+
/* Interrupt Mask Register Register Low */
#define MCF5282_INTC0_IMRL (volatile u32 *) (MCF_IPSBAR + 0x0C0C)
/* Interrupt Control Register 7 */
diff --git a/include/asm-mips/bitops.h b/include/asm-mips/bitops.h
index 89436b9..8959da2 100644
--- a/include/asm-mips/bitops.h
+++ b/include/asm-mips/bitops.h
@@ -54,6 +54,7 @@
static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
+ unsigned short bit = nr & SZLONG_MASK;
unsigned long temp;
if (cpu_has_llsc && R10000_LLSC_WAR) {
@@ -65,9 +66,9 @@
" beqzl %0, 1b \n"
" .set mips0 \n"
: "=&r" (temp), "=m" (*m)
- : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m));
+ : "ir" (1UL << bit), "m" (*m));
#ifdef CONFIG_CPU_MIPSR2
- } else if (__builtin_constant_p(nr)) {
+ } else if (__builtin_constant_p(bit)) {
__asm__ __volatile__(
"1: " __LL "%0, %1 # set_bit \n"
" " __INS "%0, %4, %2, 1 \n"
@@ -77,7 +78,7 @@
"2: b 1b \n"
" .previous \n"
: "=&r" (temp), "=m" (*m)
- : "ir" (nr & SZLONG_MASK), "m" (*m), "r" (~0));
+ : "ir" (bit), "m" (*m), "r" (~0));
#endif /* CONFIG_CPU_MIPSR2 */
} else if (cpu_has_llsc) {
__asm__ __volatile__(
@@ -91,14 +92,14 @@
" .previous \n"
" .set mips0 \n"
: "=&r" (temp), "=m" (*m)
- : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m));
+ : "ir" (1UL << bit), "m" (*m));
} else {
volatile unsigned long *a = addr;
unsigned long mask;
unsigned long flags;
a += nr >> SZLONG_LOG;
- mask = 1UL << (nr & SZLONG_MASK);
+ mask = 1UL << bit;
local_irq_save(flags);
*a |= mask;
local_irq_restore(flags);
@@ -118,6 +119,7 @@
static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
+ unsigned short bit = nr & SZLONG_MASK;
unsigned long temp;
if (cpu_has_llsc && R10000_LLSC_WAR) {
@@ -129,9 +131,9 @@
" beqzl %0, 1b \n"
" .set mips0 \n"
: "=&r" (temp), "=m" (*m)
- : "ir" (~(1UL << (nr & SZLONG_MASK))), "m" (*m));
+ : "ir" (~(1UL << bit)), "m" (*m));
#ifdef CONFIG_CPU_MIPSR2
- } else if (__builtin_constant_p(nr)) {
+ } else if (__builtin_constant_p(bit)) {
__asm__ __volatile__(
"1: " __LL "%0, %1 # clear_bit \n"
" " __INS "%0, $0, %2, 1 \n"
@@ -141,7 +143,7 @@
"2: b 1b \n"
" .previous \n"
: "=&r" (temp), "=m" (*m)
- : "ir" (nr & SZLONG_MASK), "m" (*m));
+ : "ir" (bit), "m" (*m));
#endif /* CONFIG_CPU_MIPSR2 */
} else if (cpu_has_llsc) {
__asm__ __volatile__(
@@ -155,14 +157,14 @@
" .previous \n"
" .set mips0 \n"
: "=&r" (temp), "=m" (*m)
- : "ir" (~(1UL << (nr & SZLONG_MASK))), "m" (*m));
+ : "ir" (~(1UL << bit)), "m" (*m));
} else {
volatile unsigned long *a = addr;
unsigned long mask;
unsigned long flags;
a += nr >> SZLONG_LOG;
- mask = 1UL << (nr & SZLONG_MASK);
+ mask = 1UL << bit;
local_irq_save(flags);
*a &= ~mask;
local_irq_restore(flags);
@@ -180,6 +182,8 @@
*/
static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
{
+ unsigned short bit = nr & SZLONG_MASK;
+
if (cpu_has_llsc && R10000_LLSC_WAR) {
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
unsigned long temp;
@@ -192,7 +196,7 @@
" beqzl %0, 1b \n"
" .set mips0 \n"
: "=&r" (temp), "=m" (*m)
- : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m));
+ : "ir" (1UL << bit), "m" (*m));
} else if (cpu_has_llsc) {
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
unsigned long temp;
@@ -208,14 +212,14 @@
" .previous \n"
" .set mips0 \n"
: "=&r" (temp), "=m" (*m)
- : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m));
+ : "ir" (1UL << bit), "m" (*m));
} else {
volatile unsigned long *a = addr;
unsigned long mask;
unsigned long flags;
a += nr >> SZLONG_LOG;
- mask = 1UL << (nr & SZLONG_MASK);
+ mask = 1UL << bit;
local_irq_save(flags);
*a ^= mask;
local_irq_restore(flags);
@@ -233,6 +237,8 @@
static inline int test_and_set_bit(unsigned long nr,
volatile unsigned long *addr)
{
+ unsigned short bit = nr & SZLONG_MASK;
+
if (cpu_has_llsc && R10000_LLSC_WAR) {
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
unsigned long temp, res;
@@ -246,7 +252,7 @@
" and %2, %0, %3 \n"
" .set mips0 \n"
: "=&r" (temp), "=m" (*m), "=&r" (res)
- : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
+ : "r" (1UL << bit), "m" (*m)
: "memory");
return res != 0;
@@ -269,7 +275,7 @@
" .previous \n"
" .set pop \n"
: "=&r" (temp), "=m" (*m), "=&r" (res)
- : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
+ : "r" (1UL << bit), "m" (*m)
: "memory");
return res != 0;
@@ -280,7 +286,7 @@
unsigned long flags;
a += nr >> SZLONG_LOG;
- mask = 1UL << (nr & SZLONG_MASK);
+ mask = 1UL << bit;
local_irq_save(flags);
retval = (mask & *a) != 0;
*a |= mask;
@@ -303,6 +309,8 @@
static inline int test_and_clear_bit(unsigned long nr,
volatile unsigned long *addr)
{
+ unsigned short bit = nr & SZLONG_MASK;
+
if (cpu_has_llsc && R10000_LLSC_WAR) {
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
unsigned long temp, res;
@@ -317,7 +325,7 @@
" and %2, %0, %3 \n"
" .set mips0 \n"
: "=&r" (temp), "=m" (*m), "=&r" (res)
- : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
+ : "r" (1UL << bit), "m" (*m)
: "memory");
return res != 0;
@@ -336,7 +344,7 @@
"2: b 1b \n"
" .previous \n"
: "=&r" (temp), "=m" (*m), "=&r" (res)
- : "ri" (nr & SZLONG_MASK), "m" (*m)
+ : "ri" (bit), "m" (*m)
: "memory");
return res;
@@ -361,7 +369,7 @@
" .previous \n"
" .set pop \n"
: "=&r" (temp), "=m" (*m), "=&r" (res)
- : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
+ : "r" (1UL << bit), "m" (*m)
: "memory");
return res != 0;
@@ -372,7 +380,7 @@
unsigned long flags;
a += nr >> SZLONG_LOG;
- mask = 1UL << (nr & SZLONG_MASK);
+ mask = 1UL << bit;
local_irq_save(flags);
retval = (mask & *a) != 0;
*a &= ~mask;
@@ -395,6 +403,8 @@
static inline int test_and_change_bit(unsigned long nr,
volatile unsigned long *addr)
{
+ unsigned short bit = nr & SZLONG_MASK;
+
if (cpu_has_llsc && R10000_LLSC_WAR) {
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
unsigned long temp, res;
@@ -408,7 +418,7 @@
" and %2, %0, %3 \n"
" .set mips0 \n"
: "=&r" (temp), "=m" (*m), "=&r" (res)
- : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
+ : "r" (1UL << bit), "m" (*m)
: "memory");
return res != 0;
@@ -431,7 +441,7 @@
" .previous \n"
" .set pop \n"
: "=&r" (temp), "=m" (*m), "=&r" (res)
- : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
+ : "r" (1UL << bit), "m" (*m)
: "memory");
return res != 0;
@@ -441,7 +451,7 @@
unsigned long flags;
a += nr >> SZLONG_LOG;
- mask = 1UL << (nr & SZLONG_MASK);
+ mask = 1UL << bit;
local_irq_save(flags);
retval = (mask & *a) != 0;
*a ^= mask;
diff --git a/include/asm-mips/mach-ip27/topology.h b/include/asm-mips/mach-ip27/topology.h
index 44790fd..61d9be3 100644
--- a/include/asm-mips/mach-ip27/topology.h
+++ b/include/asm-mips/mach-ip27/topology.h
@@ -28,7 +28,6 @@
.busy_factor = 32, \
.imbalance_pct = 125, \
.cache_nice_tries = 1, \
- .per_cpu_gain = 100, \
.flags = SD_LOAD_BALANCE \
| SD_BALANCE_EXEC \
| SD_WAKE_BALANCE, \
diff --git a/include/asm-mips/mips_mt.h b/include/asm-mips/mips_mt.h
index fdfff0b..8045abc 100644
--- a/include/asm-mips/mips_mt.h
+++ b/include/asm-mips/mips_mt.h
@@ -6,6 +6,8 @@
#ifndef __ASM_MIPS_MT_H
#define __ASM_MIPS_MT_H
+#include <linux/cpumask.h>
+
extern cpumask_t mt_fpu_cpumask;
extern unsigned long mt_fpemul_threshold;
diff --git a/include/asm-mips/smtc.h b/include/asm-mips/smtc.h
index e1941d1..44dfa4a 100644
--- a/include/asm-mips/smtc.h
+++ b/include/asm-mips/smtc.h
@@ -34,6 +34,9 @@
extern asiduse smtc_live_asid[MAX_SMTC_TLBS][MAX_SMTC_ASIDS];
+struct mm_struct;
+struct task_struct;
+
void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu);
void smtc_flush_tlb_asid(unsigned long asid);
diff --git a/include/asm-mips/smtc_ipi.h b/include/asm-mips/smtc_ipi.h
index 55f3419..360ea6d 100644
--- a/include/asm-mips/smtc_ipi.h
+++ b/include/asm-mips/smtc_ipi.h
@@ -4,6 +4,8 @@
#ifndef __ASM_SMTC_IPI_H
#define __ASM_SMTC_IPI_H
+#include <linux/spinlock.h>
+
//#define SMTC_IPI_DEBUG
#ifdef SMTC_IPI_DEBUG
diff --git a/include/asm-mips/spinlock.h b/include/asm-mips/spinlock.h
index f1755d2..35e431c 100644
--- a/include/asm-mips/spinlock.h
+++ b/include/asm-mips/spinlock.h
@@ -287,7 +287,7 @@
" .set noreorder # __raw_read_trylock \n"
" li %2, 0 \n"
"1: ll %1, %3 \n"
- " bnez %1, 2f \n"
+ " bltz %1, 2f \n"
" addu %1, 1 \n"
" sc %1, %0 \n"
" .set reorder \n"
@@ -304,7 +304,7 @@
" .set noreorder # __raw_read_trylock \n"
" li %2, 0 \n"
"1: ll %1, %3 \n"
- " bnez %1, 2f \n"
+ " bltz %1, 2f \n"
" addu %1, 1 \n"
" sc %1, %0 \n"
" beqz %1, 1b \n"
diff --git a/include/asm-mips/uaccess.h b/include/asm-mips/uaccess.h
index c62c20e..b255117 100644
--- a/include/asm-mips/uaccess.h
+++ b/include/asm-mips/uaccess.h
@@ -435,6 +435,8 @@
__cu_len; \
})
+extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
+
#define __copy_to_user_inatomic(to,from,n) \
({ \
void __user *__cu_to; \
diff --git a/include/asm-mips/unistd.h b/include/asm-mips/unistd.h
index 696cff3..2f1087b 100644
--- a/include/asm-mips/unistd.h
+++ b/include/asm-mips/unistd.h
@@ -334,16 +334,18 @@
#define __NR_kexec_load (__NR_Linux + 311)
#define __NR_getcpu (__NR_Linux + 312)
#define __NR_epoll_pwait (__NR_Linux + 313)
+#define __NR_ioprio_set (__NR_Linux + 314)
+#define __NR_ioprio_get (__NR_Linux + 315)
/*
* Offset of the last Linux o32 flavoured syscall
*/
-#define __NR_Linux_syscalls 313
+#define __NR_Linux_syscalls 315
#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
#define __NR_O32_Linux 4000
-#define __NR_O32_Linux_syscalls 313
+#define __NR_O32_Linux_syscalls 315
#if _MIPS_SIM == _MIPS_SIM_ABI64
@@ -624,16 +626,18 @@
#define __NR_kexec_load (__NR_Linux + 270)
#define __NR_getcpu (__NR_Linux + 271)
#define __NR_epoll_pwait (__NR_Linux + 272)
+#define __NR_ioprio_set (__NR_Linux + 273)
+#define __NR_ioprio_get (__NR_Linux + 274)
/*
* Offset of the last Linux 64-bit flavoured syscall
*/
-#define __NR_Linux_syscalls 272
+#define __NR_Linux_syscalls 274
#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
#define __NR_64_Linux 5000
-#define __NR_64_Linux_syscalls 272
+#define __NR_64_Linux_syscalls 274
#if _MIPS_SIM == _MIPS_SIM_NABI32
@@ -918,16 +922,18 @@
#define __NR_kexec_load (__NR_Linux + 274)
#define __NR_getcpu (__NR_Linux + 275)
#define __NR_epoll_pwait (__NR_Linux + 276)
+#define __NR_ioprio_set (__NR_Linux + 277)
+#define __NR_ioprio_get (__NR_Linux + 278)
/*
* Offset of the last N32 flavoured syscall
*/
-#define __NR_Linux_syscalls 276
+#define __NR_Linux_syscalls 278
#endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
#define __NR_N32_Linux 6000
-#define __NR_N32_Linux_syscalls 276
+#define __NR_N32_Linux_syscalls 278
#ifdef __KERNEL__
diff --git a/include/asm-powerpc/topology.h b/include/asm-powerpc/topology.h
index 6610495..0ad21a8 100644
--- a/include/asm-powerpc/topology.h
+++ b/include/asm-powerpc/topology.h
@@ -57,7 +57,6 @@
.busy_factor = 32, \
.imbalance_pct = 125, \
.cache_nice_tries = 1, \
- .per_cpu_gain = 100, \
.busy_idx = 3, \
.idle_idx = 1, \
.newidle_idx = 2, \
diff --git a/include/asm-s390/bugs.h b/include/asm-s390/bugs.h
index 2c36596..011f1e6 100644
--- a/include/asm-s390/bugs.h
+++ b/include/asm-s390/bugs.h
@@ -16,7 +16,7 @@
* void check_bugs(void);
*/
-static void __init check_bugs(void)
+static inline void check_bugs(void)
{
/* s390 has no bugs ... */
}
diff --git a/include/asm-s390/ipl.h b/include/asm-s390/ipl.h
index 5650d3d..660f782 100644
--- a/include/asm-s390/ipl.h
+++ b/include/asm-s390/ipl.h
@@ -74,6 +74,7 @@
extern u32 ipl_flags;
extern u16 ipl_devno;
+extern u32 dump_prefix_page;
extern void do_reipl(void);
extern void ipl_save_parameters(void);
diff --git a/include/asm-sparc64/dma.h b/include/asm-sparc64/dma.h
index 1bf4f7a8..a9fd0618 100644
--- a/include/asm-sparc64/dma.h
+++ b/include/asm-sparc64/dma.h
@@ -15,17 +15,6 @@
#include <asm/delay.h>
#include <asm/oplib.h>
-extern spinlock_t dma_spin_lock;
-
-#define claim_dma_lock() \
-({ unsigned long flags; \
- spin_lock_irqsave(&dma_spin_lock, flags); \
- flags; \
-})
-
-#define release_dma_lock(__flags) \
- spin_unlock_irqrestore(&dma_spin_lock, __flags);
-
/* These are irrelevant for Sparc DMA, but we leave it in so that
* things can compile.
*/
diff --git a/include/asm-sparc64/floppy.h b/include/asm-sparc64/floppy.h
index dbe033e..331013a 100644
--- a/include/asm-sparc64/floppy.h
+++ b/include/asm-sparc64/floppy.h
@@ -854,4 +854,15 @@
#define EXTRA_FLOPPY_PARAMS
+static DEFINE_SPINLOCK(dma_spin_lock);
+
+#define claim_dma_lock() \
+({ unsigned long flags; \
+ spin_lock_irqsave(&dma_spin_lock, flags); \
+ flags; \
+})
+
+#define release_dma_lock(__flags) \
+ spin_unlock_irqrestore(&dma_spin_lock, __flags);
+
#endif /* !(__ASM_SPARC64_FLOPPY_H) */
diff --git a/include/asm-x86_64/io_apic.h b/include/asm-x86_64/io_apic.h
index f4fb238..969d225 100644
--- a/include/asm-x86_64/io_apic.h
+++ b/include/asm-x86_64/io_apic.h
@@ -3,6 +3,7 @@
#include <asm/types.h>
#include <asm/mpspec.h>
+#include <asm/apicdef.h>
/*
* Intel IO-APIC support for SMP and UP systems.
diff --git a/include/asm-x86_64/nmi.h b/include/asm-x86_64/nmi.h
index 72375e7..ceb3d8d 100644
--- a/include/asm-x86_64/nmi.h
+++ b/include/asm-x86_64/nmi.h
@@ -64,7 +64,7 @@
extern atomic_t nmi_active;
extern unsigned int nmi_watchdog;
-#define NMI_DEFAULT -1
+#define NMI_DEFAULT 0
#define NMI_NONE 0
#define NMI_IO_APIC 1
#define NMI_LOCAL_APIC 2
diff --git a/include/asm-x86_64/swiotlb.h b/include/asm-x86_64/swiotlb.h
index ab913ff..f9c5895 100644
--- a/include/asm-x86_64/swiotlb.h
+++ b/include/asm-x86_64/swiotlb.h
@@ -44,7 +44,6 @@
extern int swiotlb_force;
#ifdef CONFIG_SWIOTLB
-#define SWIOTLB_ARCH_NEED_ALLOC
extern int swiotlb;
#else
#define swiotlb 0
diff --git a/include/asm-x86_64/topology.h b/include/asm-x86_64/topology.h
index 2facec5..4fd6fb2 100644
--- a/include/asm-x86_64/topology.h
+++ b/include/asm-x86_64/topology.h
@@ -43,7 +43,6 @@
.newidle_idx = 0, \
.wake_idx = 1, \
.forkexec_idx = 1, \
- .per_cpu_gain = 100, \
.flags = SD_LOAD_BALANCE \
| SD_BALANCE_FORK \
| SD_BALANCE_EXEC \
diff --git a/include/asm-x86_64/tsc.h b/include/asm-x86_64/tsc.h
index 9a0a368..d66ba6e 100644
--- a/include/asm-x86_64/tsc.h
+++ b/include/asm-x86_64/tsc.h
@@ -1,66 +1 @@
-/*
- * linux/include/asm-x86_64/tsc.h
- *
- * x86_64 TSC related functions
- */
-#ifndef _ASM_x86_64_TSC_H
-#define _ASM_x86_64_TSC_H
-
-#include <asm/processor.h>
-
-/*
- * Standard way to access the cycle counter.
- */
-typedef unsigned long long cycles_t;
-
-extern unsigned int cpu_khz;
-extern unsigned int tsc_khz;
-
-static inline cycles_t get_cycles(void)
-{
- unsigned long long ret = 0;
-
-#ifndef CONFIG_X86_TSC
- if (!cpu_has_tsc)
- return 0;
-#endif
-
-#if defined(CONFIG_X86_GENERIC) || defined(CONFIG_X86_TSC)
- rdtscll(ret);
-#endif
- return ret;
-}
-
-/* Like get_cycles, but make sure the CPU is synchronized. */
-static __always_inline cycles_t get_cycles_sync(void)
-{
- unsigned long long ret;
-#ifdef X86_FEATURE_SYNC_RDTSC
- unsigned eax;
-
- /*
- * Don't do an additional sync on CPUs where we know
- * RDTSC is already synchronous:
- */
- alternative_io("cpuid", ASM_NOP2, X86_FEATURE_SYNC_RDTSC,
- "=a" (eax), "0" (1) : "ebx","ecx","edx","memory");
-#else
- sync_core();
-#endif
- rdtscll(ret);
-
- return ret;
-}
-
-extern void tsc_init(void);
-extern void mark_tsc_unstable(void);
-extern int unsynchronized_tsc(void);
-
-/*
- * Boot-time check whether the TSCs are synchronized across
- * all CPUs/cores:
- */
-extern void check_tsc_sync_source(int cpu);
-extern void check_tsc_sync_target(void);
-
-#endif
+#include <asm-i386/tsc.h>
diff --git a/include/linux/audit.h b/include/linux/audit.h
index 229fa01..773e30d 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -24,6 +24,7 @@
#ifndef _LINUX_AUDIT_H_
#define _LINUX_AUDIT_H_
+#include <linux/types.h>
#include <linux/elf-em.h>
/* The netlink messages for the audit system is divided into blocks:
diff --git a/include/asm-arm/hardware/gpio_keys.h b/include/linux/gpio_keys.h
similarity index 100%
rename from include/asm-arm/hardware/gpio_keys.h
rename to include/linux/gpio_keys.h
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 3bef961..5bdbc74 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -47,7 +47,7 @@
* HRTIMER_CB_IRQSAFE: Callback may run in hardirq context
* HRTIMER_CB_IRQSAFE_NO_RESTART: Callback may run in hardirq context and
* does not restart the timer
- * HRTIMER_CB_IRQSAFE_NO_SOFTIRQ: Callback must run in softirq context
+ * HRTIMER_CB_IRQSAFE_NO_SOFTIRQ: Callback must run in hardirq context
* Special mode for tick emultation
*/
enum hrtimer_cb_mode {
@@ -139,7 +139,7 @@
};
/**
- * struct hrtimer_base - the timer base for a specific clock
+ * struct hrtimer_clock_base - the timer base for a specific clock
* @cpu_base: per cpu clock base
* @index: clock type index for per_cpu support when moving a
* timer to a base on another cpu.
diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
index 4fab3d0..e33ee76 100644
--- a/include/linux/if_pppox.h
+++ b/include/linux/if_pppox.h
@@ -114,6 +114,7 @@
#ifdef __KERNEL__
struct pppoe_opt {
struct net_device *dev; /* device associated with socket*/
+ int ifindex; /* ifindex of device associated with socket */
struct pppoe_addr pa; /* what this socket is bound to*/
struct sockaddr_pppox relay; /* what socket data will be
relayed to (PPPoE relaying) */
@@ -132,6 +133,7 @@
unsigned short num;
};
#define pppoe_dev proto.pppoe.dev
+#define pppoe_ifindex proto.pppoe.ifindex
#define pppoe_pa proto.pppoe.pa
#define pppoe_relay proto.pppoe.relay
diff --git a/include/linux/igmp.h b/include/linux/igmp.h
index 9dbb525..a113fe6 100644
--- a/include/linux/igmp.h
+++ b/include/linux/igmp.h
@@ -218,5 +218,7 @@
extern void ip_mc_down(struct in_device *);
extern void ip_mc_dec_group(struct in_device *in_dev, __be32 addr);
extern void ip_mc_inc_group(struct in_device *in_dev, __be32 addr);
+extern void ip_mc_rejoin_group(struct ip_mc_list *im);
+
#endif
#endif
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index 48148e0..75e55dc 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -5,6 +5,14 @@
typedef struct page *new_page_t(struct page *, unsigned long private, int **);
+/* Check if a vma is migratable */
+static inline int vma_migratable(struct vm_area_struct *vma)
+{
+ if (vma->vm_flags & (VM_IO|VM_HUGETLB|VM_PFNMAP|VM_RESERVED))
+ return 0;
+ return 1;
+}
+
#ifdef CONFIG_MIGRATION
extern int isolate_lru_page(struct page *p, struct list_head *pagelist);
extern int putback_lru_pages(struct list_head *l);
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 913e575..bfcef8a 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -62,6 +62,12 @@
#define MMC_BUS_WIDTH_1 0
#define MMC_BUS_WIDTH_4 2
+
+ unsigned char timing; /* timing specification used */
+
+#define MMC_TIMING_LEGACY 0
+#define MMC_TIMING_MMC_HS 1
+#define MMC_TIMING_SD_HS 2
};
struct mmc_host_ops {
@@ -87,6 +93,8 @@
#define MMC_CAP_4_BIT_DATA (1 << 0) /* Can the host do 4 bit transfers */
#define MMC_CAP_MULTIWRITE (1 << 1) /* Can accurately report bytes sent to card on error */
#define MMC_CAP_BYTEBLOCK (1 << 2) /* Can do non-log2 block sizes */
+#define MMC_CAP_MMC_HIGHSPEED (1 << 3) /* Can do MMC high-speed timing */
+#define MMC_CAP_SD_HIGHSPEED (1 << 4) /* Can do SD high-speed timing */
/* host specific block data */
unsigned int max_seg_size; /* see blk_queue_max_segment_size */
diff --git a/include/linux/mv643xx.h b/include/linux/mv643xx.h
index e7d4da1..c6d4ab8 100644
--- a/include/linux/mv643xx.h
+++ b/include/linux/mv643xx.h
@@ -1288,6 +1288,7 @@
#define MV643XX_ETH_NAME "mv643xx_eth"
struct mv643xx_eth_platform_data {
+ int port_number;
u16 force_phy_addr; /* force override if phy_addr == 0 */
u16 phy_addr;
diff --git a/include/linux/ncp_fs_sb.h b/include/linux/ncp_fs_sb.h
index a503052..6330fc7 100644
--- a/include/linux/ncp_fs_sb.h
+++ b/include/linux/ncp_fs_sb.h
@@ -50,6 +50,8 @@
int packet_size;
unsigned char *packet; /* Here we prepare requests and
receive replies */
+ unsigned char *txbuf; /* Storage for current request */
+ unsigned char *rxbuf; /* Storage for reply to current request */
int lock; /* To prevent mismatch in protocols. */
struct mutex mutex;
diff --git a/include/linux/netfilter_ipv4/ip_conntrack_core.h b/include/linux/netfilter_ipv4/ip_conntrack_core.h
index 907d4f5..e3a6df0 100644
--- a/include/linux/netfilter_ipv4/ip_conntrack_core.h
+++ b/include/linux/netfilter_ipv4/ip_conntrack_core.h
@@ -45,7 +45,7 @@
int ret = NF_ACCEPT;
if (ct) {
- if (!is_confirmed(ct))
+ if (!is_confirmed(ct) && !is_dying(ct))
ret = __ip_conntrack_confirm(pskb);
ip_ct_deliver_cached_events(ct);
}
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 2c4b684..78417e4 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -543,6 +543,7 @@
int __must_check pci_set_mwi(struct pci_dev *dev);
void pci_clear_mwi(struct pci_dev *dev);
void pci_intx(struct pci_dev *dev, int enable);
+void pci_msi_off(struct pci_dev *dev);
int pci_set_dma_mask(struct pci_dev *dev, u64 mask);
int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask);
void pci_update_resource(struct pci_dev *dev, struct resource *res, int resno);
diff --git a/include/linux/pci_regs.h b/include/linux/pci_regs.h
index 7a6d34e..f09cce2 100644
--- a/include/linux/pci_regs.h
+++ b/include/linux/pci_regs.h
@@ -292,9 +292,10 @@
#define PCI_MSI_DATA_64 12 /* 16 bits of data for 64-bit devices */
#define PCI_MSI_MASK_BIT 16 /* Mask bits register */
-/* MSI-X registers (these are at offset PCI_MSI_FLAGS) */
-#define PCI_MSIX_FLAGS_QSIZE 0x7FF
-#define PCI_MSIX_FLAGS_ENABLE (1 << 15)
+/* MSI-X registers (these are at offset PCI_MSIX_FLAGS) */
+#define PCI_MSIX_FLAGS 2
+#define PCI_MSIX_FLAGS_QSIZE 0x7FF
+#define PCI_MSIX_FLAGS_ENABLE (1 << 15)
#define PCI_MSIX_FLAGS_BIRMASK (7 << 0)
#define PCI_MSIX_FLAGS_BITMASK (1 << 0)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 6f7c9a4..49fe299 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -684,7 +684,6 @@
unsigned int imbalance_pct; /* No balance until over watermark */
unsigned long long cache_hot_time; /* Task considered cache hot (ns) */
unsigned int cache_nice_tries; /* Leave cache hot tasks for # tries */
- unsigned int per_cpu_gain; /* CPU % gained by adding domain cpus */
unsigned int busy_idx;
unsigned int idle_idx;
unsigned int newidle_idx;
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 61fef37..a946176 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -283,6 +283,43 @@
})
/*
+ * Locks two spinlocks l1 and l2.
+ * l1_first indicates if spinlock l1 should be taken first.
+ */
+static inline void double_spin_lock(spinlock_t *l1, spinlock_t *l2,
+ bool l1_first)
+ __acquires(l1)
+ __acquires(l2)
+{
+ if (l1_first) {
+ spin_lock(l1);
+ spin_lock(l2);
+ } else {
+ spin_lock(l2);
+ spin_lock(l1);
+ }
+}
+
+/*
+ * Unlocks two spinlocks l1 and l2.
+ * l1_taken_first indicates if spinlock l1 was taken first and therefore
+ * should be released after spinlock l2.
+ */
+static inline void double_spin_unlock(spinlock_t *l1, spinlock_t *l2,
+ bool l1_taken_first)
+ __releases(l1)
+ __releases(l2)
+{
+ if (l1_taken_first) {
+ spin_unlock(l2);
+ spin_unlock(l1);
+ } else {
+ spin_unlock(l1);
+ spin_unlock(l2);
+ }
+}
+
+/*
* Pull the atomic_t declaration:
* (asm-mips/atomic.h needs above definitions)
*/
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index 83b3c7b..35fa4d5 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -194,9 +194,7 @@
union svc_addr_u {
struct in_addr addr;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
struct in6_addr addr6;
-#endif
};
/*
diff --git a/include/linux/sunrpc/svcsock.h b/include/linux/sunrpc/svcsock.h
index cccea0a..7909687 100644
--- a/include/linux/sunrpc/svcsock.h
+++ b/include/linux/sunrpc/svcsock.h
@@ -66,7 +66,7 @@
* Function prototypes.
*/
int svc_makesock(struct svc_serv *, int, unsigned short, int flags);
-void svc_close_socket(struct svc_sock *);
+void svc_force_close_socket(struct svc_sock *);
int svc_recv(struct svc_rqst *, long);
int svc_send(struct svc_rqst *);
void svc_drop(struct svc_rqst *);
diff --git a/include/linux/topology.h b/include/linux/topology.h
index 6c5a6e6..a9d1f04 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -96,7 +96,6 @@
.busy_factor = 64, \
.imbalance_pct = 110, \
.cache_nice_tries = 0, \
- .per_cpu_gain = 25, \
.busy_idx = 0, \
.idle_idx = 0, \
.newidle_idx = 1, \
@@ -128,7 +127,6 @@
.busy_factor = 64, \
.imbalance_pct = 125, \
.cache_nice_tries = 1, \
- .per_cpu_gain = 100, \
.busy_idx = 2, \
.idle_idx = 1, \
.newidle_idx = 2, \
@@ -159,7 +157,6 @@
.busy_factor = 64, \
.imbalance_pct = 125, \
.cache_nice_tries = 1, \
- .per_cpu_gain = 100, \
.busy_idx = 2, \
.idle_idx = 1, \
.newidle_idx = 2, \
@@ -193,7 +190,6 @@
.newidle_idx = 0, /* unused */ \
.wake_idx = 0, /* unused */ \
.forkexec_idx = 0, /* unused */ \
- .per_cpu_gain = 100, \
.flags = SD_LOAD_BALANCE \
| SD_SERIALIZE, \
.last_balance = jiffies, \
diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h
index f7be1ac..09a2532 100644
--- a/include/net/inet_timewait_sock.h
+++ b/include/net/inet_timewait_sock.h
@@ -66,7 +66,7 @@
struct inet_timewait_death_row {
/* Short-time timewait calendar */
int twcal_hand;
- int twcal_jiffie;
+ unsigned long twcal_jiffie;
struct timer_list twcal_timer;
struct hlist_head twcal_row[INET_TWDR_RECYCLE_SLOTS];
diff --git a/include/net/netfilter/nf_conntrack_core.h b/include/net/netfilter/nf_conntrack_core.h
index 7fdc72c..85634e1 100644
--- a/include/net/netfilter/nf_conntrack_core.h
+++ b/include/net/netfilter/nf_conntrack_core.h
@@ -64,7 +64,7 @@
int ret = NF_ACCEPT;
if (ct) {
- if (!nf_ct_is_confirmed(ct))
+ if (!nf_ct_is_confirmed(ct) && !nf_ct_is_dying(ct))
ret = __nf_conntrack_confirm(pskb);
nf_ct_deliver_cached_events(ct);
}
diff --git a/include/net/sock.h b/include/net/sock.h
index 849c7df..2c7d60c 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -426,7 +426,7 @@
static inline int sk_acceptq_is_full(struct sock *sk)
{
- return sk->sk_ack_backlog >= sk->sk_max_ack_backlog;
+ return sk->sk_ack_backlog > sk->sk_max_ack_backlog;
}
/*
diff --git a/include/sound/version.h b/include/sound/version.h
index a9ba7ee..5f72750 100644
--- a/include/sound/version.h
+++ b/include/sound/version.h
@@ -1,3 +1,3 @@
/* include/version.h. Generated by alsa/ksync script. */
-#define CONFIG_SND_VERSION "1.0.14rc2"
-#define CONFIG_SND_DATE " (Wed Feb 14 07:42:13 2007 UTC)"
+#define CONFIG_SND_VERSION "1.0.14rc3"
+#define CONFIG_SND_DATE " (Tue Mar 06 13:10:00 2007 UTC)"
diff --git a/init/Kconfig b/init/Kconfig
index f977086..b170aa1 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -304,6 +304,22 @@
If unsure, say N.
+config BLK_DEV_INITRD
+ bool "Initial RAM filesystem and RAM disk (initramfs/initrd) support"
+ depends on BROKEN || !FRV
+ help
+ The initial RAM filesystem is a ramfs which is loaded by the
+ boot loader (loadlin or lilo) and that is mounted as root
+ before the normal boot procedure. It is typically used to
+ load modules needed to mount the "real" root file system,
+ etc. See <file:Documentation/initrd.txt> for details.
+
+ If RAM disk support (BLK_DEV_RAM) is also included, this
+ also enables initial RAM disk (initrd) support and adds
+ 15 Kbytes (more on some other architectures) to the kernel size.
+
+ If unsure say Y.
+
if BLK_DEV_INITRD
source "usr/Kconfig"
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index 0b5ecbe..554ac36 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -731,7 +731,8 @@
if (IS_ERR(name))
return PTR_ERR(name);
- mutex_lock(&mqueue_mnt->mnt_root->d_inode->i_mutex);
+ mutex_lock_nested(&mqueue_mnt->mnt_root->d_inode->i_mutex,
+ I_MUTEX_PARENT);
dentry = lookup_one_len(name, mqueue_mnt->mnt_root, strlen(name));
if (IS_ERR(dentry)) {
err = PTR_ERR(dentry);
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index 476cb0c..ec4cb9f 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -540,19 +540,19 @@
/*
* Switch to high resolution mode
*/
-static void hrtimer_switch_to_hres(void)
+static int hrtimer_switch_to_hres(void)
{
struct hrtimer_cpu_base *base = &__get_cpu_var(hrtimer_bases);
unsigned long flags;
if (base->hres_active)
- return;
+ return 1;
local_irq_save(flags);
if (tick_init_highres()) {
local_irq_restore(flags);
- return;
+ return 0;
}
base->hres_active = 1;
base->clock_base[CLOCK_REALTIME].resolution = KTIME_HIGH_RES;
@@ -565,13 +565,14 @@
local_irq_restore(flags);
printk(KERN_INFO "Switched to high resolution mode on CPU %d\n",
smp_processor_id());
+ return 1;
}
#else
static inline int hrtimer_hres_active(void) { return 0; }
static inline int hrtimer_is_hres_enabled(void) { return 0; }
-static inline void hrtimer_switch_to_hres(void) { }
+static inline int hrtimer_switch_to_hres(void) { return 0; }
static inline void hrtimer_force_reprogram(struct hrtimer_cpu_base *base) { }
static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
struct hrtimer_clock_base *base)
@@ -1130,6 +1131,9 @@
if (base->softirq_time.tv64 <= timer->expires.tv64)
break;
+#ifdef CONFIG_HIGH_RES_TIMERS
+ WARN_ON_ONCE(timer->cb_mode == HRTIMER_CB_IRQSAFE_NO_SOFTIRQ);
+#endif
timer_stats_account_hrtimer(timer);
fn = timer->function;
@@ -1173,7 +1177,8 @@
* deadlock vs. xtime_lock.
*/
if (tick_check_oneshot_change(!hrtimer_is_hres_enabled()))
- hrtimer_switch_to_hres();
+ if (hrtimer_switch_to_hres())
+ return;
hrtimer_get_softirq_time(cpu_base);
@@ -1355,17 +1360,16 @@
tick_cancel_sched_timer(cpu);
local_irq_disable();
-
- spin_lock(&new_base->lock);
- spin_lock(&old_base->lock);
+ double_spin_lock(&new_base->lock, &old_base->lock,
+ smp_processor_id() < cpu);
for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
migrate_hrtimer_list(&old_base->clock_base[i],
&new_base->clock_base[i]);
}
- spin_unlock(&old_base->lock);
- spin_unlock(&new_base->lock);
+ double_spin_unlock(&new_base->lock, &old_base->lock,
+ smp_processor_id() < cpu);
local_irq_enable();
put_cpu_var(hrtimer_bases);
}
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index 95f6657..51a4dd0 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -81,30 +81,35 @@
bool "Software Suspend"
depends on PM && SWAP && ((X86 && (!SMP || SUSPEND_SMP)) || ((FRV || PPC32) && !SMP))
---help---
- Enable the possibility of suspending the machine.
- It doesn't need ACPI or APM.
- You may suspend your machine by 'swsusp' or 'shutdown -z <time>'
- (patch for sysvinit needed).
+ Enable the suspend to disk (STD) functionality.
- It creates an image which is saved in your active swap. Upon next
+ You can suspend your machine with 'echo disk > /sys/power/state'.
+ Alternatively, you can use the additional userland tools available
+ from <http://suspend.sf.net>.
+
+ In principle it does not require ACPI or APM, although for example
+ ACPI will be used if available.
+
+ It creates an image which is saved in your active swap. Upon the next
boot, pass the 'resume=/dev/swappartition' argument to the kernel to
have it detect the saved image, restore memory state from it, and
continue to run as before. If you do not want the previous state to
- be reloaded, then use the 'noresume' kernel argument. However, note
- that your partitions will be fsck'd and you must re-mkswap your swap
- partitions. It does not work with swap files.
+ be reloaded, then use the 'noresume' kernel command line argument.
+ Note, however, that fsck will be run on your filesystems and you will
+ need to run mkswap against the swap partition used for the suspend.
- Right now you may boot without resuming and then later resume but
- in meantime you cannot use those swap partitions/files which were
- involved in suspending. Also in this case there is a risk that buffers
- on disk won't match with saved ones.
+ It also works with swap files to a limited extent (for details see
+ <file:Documentation/power/swsusp-and-swap-files.txt>).
+
+ Right now you may boot without resuming and resume later but in the
+ meantime you cannot use the swap partition(s)/file(s) involved in
+ suspending. Also in this case you must not use the filesystems
+ that were mounted before the suspend. In particular, you MUST NOT
+ MOUNT any journaled filesystems mounted before the suspend or they
+ will get corrupted in a nasty way.
For more information take a look at <file:Documentation/power/swsusp.txt>.
- (For now, swsusp is incompatible with PAE aka HIGHMEM_64G on i386.
- we need identity mapping for resume to work, and that is trivial
- to get with 4MB pages, but less than trivial on PAE).
-
config PM_STD_PARTITION
string "Default resume partition"
depends on SOFTWARE_SUSPEND
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
index 482b11f..bcd14e8 100644
--- a/kernel/rcutorture.c
+++ b/kernel/rcutorture.c
@@ -60,19 +60,19 @@
static int shuffle_interval = 5; /* Interval between shuffles (in sec)*/
static char *torture_type = "rcu"; /* What RCU implementation to torture. */
-module_param(nreaders, int, 0);
+module_param(nreaders, int, 0444);
MODULE_PARM_DESC(nreaders, "Number of RCU reader threads");
-module_param(nfakewriters, int, 0);
+module_param(nfakewriters, int, 0444);
MODULE_PARM_DESC(nfakewriters, "Number of RCU fake writer threads");
-module_param(stat_interval, int, 0);
+module_param(stat_interval, int, 0444);
MODULE_PARM_DESC(stat_interval, "Number of seconds between stats printk()s");
-module_param(verbose, bool, 0);
+module_param(verbose, bool, 0444);
MODULE_PARM_DESC(verbose, "Enable verbose debugging printk()s");
-module_param(test_no_idle_hz, bool, 0);
+module_param(test_no_idle_hz, bool, 0444);
MODULE_PARM_DESC(test_no_idle_hz, "Test support for tickless idle CPUs");
-module_param(shuffle_interval, int, 0);
+module_param(shuffle_interval, int, 0444);
MODULE_PARM_DESC(shuffle_interval, "Number of seconds between shuffles");
-module_param(torture_type, charp, 0);
+module_param(torture_type, charp, 0444);
MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, rcu_bh, srcu)");
#define TORTURE_FLAG "-torture:"
diff --git a/kernel/sched.c b/kernel/sched.c
index 5f102e6..a4ca632 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3006,23 +3006,6 @@
}
#endif
-static inline void wake_priority_sleeper(struct rq *rq)
-{
-#ifdef CONFIG_SCHED_SMT
- if (!rq->nr_running)
- return;
-
- spin_lock(&rq->lock);
- /*
- * If an SMT sibling task has been put to sleep for priority
- * reasons reschedule the idle task to see if it can now run.
- */
- if (rq->nr_running)
- resched_task(rq->idle);
- spin_unlock(&rq->lock);
-#endif
-}
-
DEFINE_PER_CPU(struct kernel_stat, kstat);
EXPORT_PER_CPU_SYMBOL(kstat);
@@ -3239,10 +3222,7 @@
update_cpu_clock(p, rq, now);
- if (p == rq->idle)
- /* Task on the idle queue */
- wake_priority_sleeper(rq);
- else
+ if (p != rq->idle)
task_running_tick(rq, p);
#ifdef CONFIG_SMP
update_load(rq);
@@ -3251,136 +3231,6 @@
#endif
}
-#ifdef CONFIG_SCHED_SMT
-static inline void wakeup_busy_runqueue(struct rq *rq)
-{
- /* If an SMT runqueue is sleeping due to priority reasons wake it up */
- if (rq->curr == rq->idle && rq->nr_running)
- resched_task(rq->idle);
-}
-
-/*
- * Called with interrupt disabled and this_rq's runqueue locked.
- */
-static void wake_sleeping_dependent(int this_cpu)
-{
- struct sched_domain *tmp, *sd = NULL;
- int i;
-
- for_each_domain(this_cpu, tmp) {
- if (tmp->flags & SD_SHARE_CPUPOWER) {
- sd = tmp;
- break;
- }
- }
-
- if (!sd)
- return;
-
- for_each_cpu_mask(i, sd->span) {
- struct rq *smt_rq = cpu_rq(i);
-
- if (i == this_cpu)
- continue;
- if (unlikely(!spin_trylock(&smt_rq->lock)))
- continue;
-
- wakeup_busy_runqueue(smt_rq);
- spin_unlock(&smt_rq->lock);
- }
-}
-
-/*
- * number of 'lost' timeslices this task wont be able to fully
- * utilize, if another task runs on a sibling. This models the
- * slowdown effect of other tasks running on siblings:
- */
-static inline unsigned long
-smt_slice(struct task_struct *p, struct sched_domain *sd)
-{
- return p->time_slice * (100 - sd->per_cpu_gain) / 100;
-}
-
-/*
- * To minimise lock contention and not have to drop this_rq's runlock we only
- * trylock the sibling runqueues and bypass those runqueues if we fail to
- * acquire their lock. As we only trylock the normal locking order does not
- * need to be obeyed.
- */
-static int
-dependent_sleeper(int this_cpu, struct rq *this_rq, struct task_struct *p)
-{
- struct sched_domain *tmp, *sd = NULL;
- int ret = 0, i;
-
- /* kernel/rt threads do not participate in dependent sleeping */
- if (!p->mm || rt_task(p))
- return 0;
-
- for_each_domain(this_cpu, tmp) {
- if (tmp->flags & SD_SHARE_CPUPOWER) {
- sd = tmp;
- break;
- }
- }
-
- if (!sd)
- return 0;
-
- for_each_cpu_mask(i, sd->span) {
- struct task_struct *smt_curr;
- struct rq *smt_rq;
-
- if (i == this_cpu)
- continue;
-
- smt_rq = cpu_rq(i);
- if (unlikely(!spin_trylock(&smt_rq->lock)))
- continue;
-
- smt_curr = smt_rq->curr;
-
- if (!smt_curr->mm)
- goto unlock;
-
- /*
- * If a user task with lower static priority than the
- * running task on the SMT sibling is trying to schedule,
- * delay it till there is proportionately less timeslice
- * left of the sibling task to prevent a lower priority
- * task from using an unfair proportion of the
- * physical cpu's resources. -ck
- */
- if (rt_task(smt_curr)) {
- /*
- * With real time tasks we run non-rt tasks only
- * per_cpu_gain% of the time.
- */
- if ((jiffies % DEF_TIMESLICE) >
- (sd->per_cpu_gain * DEF_TIMESLICE / 100))
- ret = 1;
- } else {
- if (smt_curr->static_prio < p->static_prio &&
- !TASK_PREEMPTS_CURR(p, smt_rq) &&
- smt_slice(smt_curr, sd) > task_timeslice(p))
- ret = 1;
- }
-unlock:
- spin_unlock(&smt_rq->lock);
- }
- return ret;
-}
-#else
-static inline void wake_sleeping_dependent(int this_cpu)
-{
-}
-static inline int
-dependent_sleeper(int this_cpu, struct rq *this_rq, struct task_struct *p)
-{
- return 0;
-}
-#endif
-
#if defined(CONFIG_PREEMPT) && defined(CONFIG_DEBUG_PREEMPT)
void fastcall add_preempt_count(int val)
@@ -3507,7 +3357,6 @@
if (!rq->nr_running) {
next = rq->idle;
rq->expired_timestamp = 0;
- wake_sleeping_dependent(cpu);
goto switch_tasks;
}
}
@@ -3547,8 +3396,6 @@
}
}
next->sleep_type = SLEEP_NORMAL;
- if (rq->nr_running == 1 && dependent_sleeper(cpu, rq, next))
- next = rq->idle;
switch_tasks:
if (next == rq->idle)
schedstat_inc(rq, sched_goidle);
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index 193a079..5b0e46b 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -55,16 +55,18 @@
static char override_name[32];
static int finished_booting;
-/* clocksource_done_booting - Called near the end of bootup
+/* clocksource_done_booting - Called near the end of core bootup
*
- * Hack to avoid lots of clocksource churn at boot time
+ * Hack to avoid lots of clocksource churn at boot time.
+ * We use fs_initcall because we want this to start before
+ * device_initcall but after subsys_initcall.
*/
static int __init clocksource_done_booting(void)
{
finished_booting = 1;
return 0;
}
-late_initcall(clocksource_done_booting);
+fs_initcall(clocksource_done_booting);
#ifdef CONFIG_CLOCKSOURCE_WATCHDOG
static LIST_HEAD(watchdog_list);
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index 12b3efe..5567745 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -284,6 +284,42 @@
spin_unlock_irqrestore(&tick_broadcast_lock, flags);
}
+void tick_suspend_broadcast(void)
+{
+ struct clock_event_device *bc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tick_broadcast_lock, flags);
+
+ bc = tick_broadcast_device.evtdev;
+ if (bc && tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
+ clockevents_set_mode(bc, CLOCK_EVT_MODE_SHUTDOWN);
+
+ spin_unlock_irqrestore(&tick_broadcast_lock, flags);
+}
+
+int tick_resume_broadcast(void)
+{
+ struct clock_event_device *bc;
+ unsigned long flags;
+ int broadcast = 0;
+
+ spin_lock_irqsave(&tick_broadcast_lock, flags);
+
+ bc = tick_broadcast_device.evtdev;
+ if (bc) {
+ if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC &&
+ !cpus_empty(tick_broadcast_mask))
+ tick_broadcast_start_periodic(bc);
+
+ broadcast = cpu_isset(smp_processor_id(), tick_broadcast_mask);
+ }
+ spin_unlock_irqrestore(&tick_broadcast_lock, flags);
+
+ return broadcast;
+}
+
+
#ifdef CONFIG_TICK_ONESHOT
static cpumask_t tick_broadcast_oneshot_mask;
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index 0986a2b..43ba1bd 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -298,6 +298,28 @@
spin_unlock_irqrestore(&tick_device_lock, flags);
}
+static void tick_suspend_periodic(void)
+{
+ struct tick_device *td = &__get_cpu_var(tick_cpu_device);
+ unsigned long flags;
+
+ spin_lock_irqsave(&tick_device_lock, flags);
+ if (td->mode == TICKDEV_MODE_PERIODIC)
+ clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_SHUTDOWN);
+ spin_unlock_irqrestore(&tick_device_lock, flags);
+}
+
+static void tick_resume_periodic(void)
+{
+ struct tick_device *td = &__get_cpu_var(tick_cpu_device);
+ unsigned long flags;
+
+ spin_lock_irqsave(&tick_device_lock, flags);
+ if (td->mode == TICKDEV_MODE_PERIODIC)
+ tick_setup_periodic(td->evtdev, 0);
+ spin_unlock_irqrestore(&tick_device_lock, flags);
+}
+
/*
* Notification about clock event devices
*/
@@ -325,6 +347,16 @@
tick_shutdown(dev);
break;
+ case CLOCK_EVT_NOTIFY_SUSPEND:
+ tick_suspend_periodic();
+ tick_suspend_broadcast();
+ break;
+
+ case CLOCK_EVT_NOTIFY_RESUME:
+ if (!tick_resume_broadcast())
+ tick_resume_periodic();
+ break;
+
default:
break;
}
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h
index 54861a0..75890ef 100644
--- a/kernel/time/tick-internal.h
+++ b/kernel/time/tick-internal.h
@@ -67,6 +67,8 @@
extern int tick_is_broadcast_device(struct clock_event_device *dev);
extern void tick_broadcast_on_off(unsigned long reason, int *oncpu);
extern void tick_shutdown_broadcast(unsigned int *cpup);
+extern void tick_suspend_broadcast(void);
+extern int tick_resume_broadcast(void);
extern void
tick_set_periodic_handler(struct clock_event_device *dev, int broadcast);
@@ -90,6 +92,8 @@
static inline void tick_do_periodic_broadcast(struct clock_event_device *d) { }
static inline void tick_broadcast_on_off(unsigned long reason, int *oncpu) { }
static inline void tick_shutdown_broadcast(unsigned int *cpup) { }
+static inline void tick_suspend_broadcast(void) { }
+static inline int tick_resume_broadcast(void) { return 0; }
/*
* Set the periodic handler in non broadcast mode
diff --git a/kernel/timer.c b/kernel/timer.c
index 6663a87..797cccb 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -862,6 +862,8 @@
clock->error = 0;
ntp_clear();
+ update_vsyscall(&xtime, clock);
+
write_sequnlock_irqrestore(&xtime_lock, flags);
/* signal hrtimers about time change */
@@ -997,6 +999,9 @@
write_sequnlock_irqrestore(&xtime_lock, flags);
touch_softlockup_watchdog();
+
+ clockevents_notify(CLOCK_EVT_NOTIFY_RESUME, NULL);
+
/* Resume hrtimers */
clock_was_set();
@@ -1011,6 +1016,9 @@
timekeeping_suspended = 1;
timekeeping_suspend_time = read_persistent_clock();
write_sequnlock_irqrestore(&xtime_lock, flags);
+
+ clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL);
+
return 0;
}
@@ -1651,8 +1659,8 @@
new_base = get_cpu_var(tvec_bases);
local_irq_disable();
- spin_lock(&new_base->lock);
- spin_lock(&old_base->lock);
+ double_spin_lock(&new_base->lock, &old_base->lock,
+ smp_processor_id() < cpu);
BUG_ON(old_base->running_timer);
@@ -1665,8 +1673,8 @@
migrate_timer_list(new_base, old_base->tv5.vec + i);
}
- spin_unlock(&old_base->lock);
- spin_unlock(&new_base->lock);
+ double_spin_unlock(&new_base->lock, &old_base->lock,
+ smp_processor_id() < cpu);
local_irq_enable();
put_cpu_var(tvec_bases);
}
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 623a68a..9970e55 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -28,7 +28,6 @@
#include <asm/io.h>
#include <asm/dma.h>
#include <asm/scatterlist.h>
-#include <asm/swiotlb.h>
#include <linux/init.h>
#include <linux/bootmem.h>
@@ -36,10 +35,8 @@
#define OFFSET(val,align) ((unsigned long) \
( (val) & ( (align) - 1)))
-#ifndef SG_ENT_VIRT_ADDRESS
#define SG_ENT_VIRT_ADDRESS(sg) (page_address((sg)->page) + (sg)->offset)
#define SG_ENT_PHYS_ADDRESS(sg) virt_to_bus(SG_ENT_VIRT_ADDRESS(sg))
-#endif
/*
* Maximum allowable number of contiguous slabs to map,
@@ -104,25 +101,13 @@
* We need to save away the original address corresponding to a mapped entry
* for the sync operations.
*/
-#ifndef SWIOTLB_ARCH_HAS_IO_TLB_ADDR_T
-typedef char *io_tlb_addr_t;
-#define swiotlb_orig_addr_null(buffer) (!(buffer))
-#define ptr_to_io_tlb_addr(ptr) (ptr)
-#define page_to_io_tlb_addr(pg, off) (page_address(pg) + (off))
-#define sg_to_io_tlb_addr(sg) SG_ENT_VIRT_ADDRESS(sg)
-#endif
-static io_tlb_addr_t *io_tlb_orig_addr;
+static unsigned char **io_tlb_orig_addr;
/*
* Protect the above data structures in the map and unmap calls
*/
static DEFINE_SPINLOCK(io_tlb_lock);
-#ifdef SWIOTLB_EXTRA_VARIABLES
-SWIOTLB_EXTRA_VARIABLES;
-#endif
-
-#ifndef SWIOTLB_ARCH_HAS_SETUP_IO_TLB_NPAGES
static int __init
setup_io_tlb_npages(char *str)
{
@@ -137,25 +122,9 @@
swiotlb_force = 1;
return 1;
}
-#endif
__setup("swiotlb=", setup_io_tlb_npages);
/* make io_tlb_overflow tunable too? */
-#ifndef swiotlb_adjust_size
-#define swiotlb_adjust_size(size) ((void)0)
-#endif
-
-#ifndef swiotlb_adjust_seg
-#define swiotlb_adjust_seg(start, size) ((void)0)
-#endif
-
-#ifndef swiotlb_print_info
-#define swiotlb_print_info(bytes) \
- printk(KERN_INFO "Placing %luMB software IO TLB between 0x%lx - " \
- "0x%lx\n", bytes >> 20, \
- virt_to_bus(io_tlb_start), virt_to_bus(io_tlb_end))
-#endif
-
/*
* Statically reserve bounce buffer space and initialize bounce buffer data
* structures for the software IO TLB used to implement the DMA API.
@@ -169,8 +138,6 @@
io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
}
- swiotlb_adjust_size(io_tlb_nslabs);
- swiotlb_adjust_size(io_tlb_overflow);
bytes = io_tlb_nslabs << IO_TLB_SHIFT;
@@ -188,14 +155,10 @@
* between io_tlb_start and io_tlb_end.
*/
io_tlb_list = alloc_bootmem(io_tlb_nslabs * sizeof(int));
- for (i = 0; i < io_tlb_nslabs; i++) {
- if ( !(i % IO_TLB_SEGSIZE) )
- swiotlb_adjust_seg(io_tlb_start + (i << IO_TLB_SHIFT),
- IO_TLB_SEGSIZE << IO_TLB_SHIFT);
+ for (i = 0; i < io_tlb_nslabs; i++)
io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
- }
io_tlb_index = 0;
- io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(io_tlb_addr_t));
+ io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(char *));
/*
* Get the overflow emergency buffer
@@ -203,21 +166,17 @@
io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow);
if (!io_tlb_overflow_buffer)
panic("Cannot allocate SWIOTLB overflow buffer!\n");
- swiotlb_adjust_seg(io_tlb_overflow_buffer, io_tlb_overflow);
- swiotlb_print_info(bytes);
+ printk(KERN_INFO "Placing software IO TLB between 0x%lx - 0x%lx\n",
+ virt_to_bus(io_tlb_start), virt_to_bus(io_tlb_end));
}
-#ifndef __swiotlb_init_with_default_size
-#define __swiotlb_init_with_default_size swiotlb_init_with_default_size
-#endif
void __init
swiotlb_init(void)
{
- __swiotlb_init_with_default_size(64 * (1<<20)); /* default to 64MB */
+ swiotlb_init_with_default_size(64 * (1<<20)); /* default to 64MB */
}
-#ifdef SWIOTLB_ARCH_NEED_LATE_INIT
/*
* Systems with larger DMA zones (those that don't support ISA) can
* initialize the swiotlb later using the slab allocator if needed.
@@ -275,12 +234,12 @@
io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
io_tlb_index = 0;
- io_tlb_orig_addr = (io_tlb_addr_t *)__get_free_pages(GFP_KERNEL,
- get_order(io_tlb_nslabs * sizeof(io_tlb_addr_t)));
+ io_tlb_orig_addr = (unsigned char **)__get_free_pages(GFP_KERNEL,
+ get_order(io_tlb_nslabs * sizeof(char *)));
if (!io_tlb_orig_addr)
goto cleanup3;
- memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(io_tlb_addr_t));
+ memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(char *));
/*
* Get the overflow emergency buffer
@@ -290,17 +249,19 @@
if (!io_tlb_overflow_buffer)
goto cleanup4;
- swiotlb_print_info(bytes);
+ printk(KERN_INFO "Placing %luMB software IO TLB between 0x%lx - "
+ "0x%lx\n", bytes >> 20,
+ virt_to_bus(io_tlb_start), virt_to_bus(io_tlb_end));
return 0;
cleanup4:
- free_pages((unsigned long)io_tlb_orig_addr,
- get_order(io_tlb_nslabs * sizeof(io_tlb_addr_t)));
+ free_pages((unsigned long)io_tlb_orig_addr, get_order(io_tlb_nslabs *
+ sizeof(char *)));
io_tlb_orig_addr = NULL;
cleanup3:
- free_pages((unsigned long)io_tlb_list,
- get_order(io_tlb_nslabs * sizeof(int)));
+ free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
+ sizeof(int)));
io_tlb_list = NULL;
cleanup2:
io_tlb_end = NULL;
@@ -310,9 +271,7 @@
io_tlb_nslabs = req_nslabs;
return -ENOMEM;
}
-#endif
-#ifndef SWIOTLB_ARCH_HAS_NEEDS_MAPPING
static int
address_needs_mapping(struct device *hwdev, dma_addr_t addr)
{
@@ -323,35 +282,11 @@
return (addr & ~mask) != 0;
}
-static inline int range_needs_mapping(const void *ptr, size_t size)
-{
- return swiotlb_force;
-}
-
-static inline int order_needs_mapping(unsigned int order)
-{
- return 0;
-}
-#endif
-
-static void
-__sync_single(io_tlb_addr_t buffer, char *dma_addr, size_t size, int dir)
-{
-#ifndef SWIOTLB_ARCH_HAS_SYNC_SINGLE
- if (dir == DMA_TO_DEVICE)
- memcpy(dma_addr, buffer, size);
- else
- memcpy(buffer, dma_addr, size);
-#else
- __swiotlb_arch_sync_single(buffer, dma_addr, size, dir);
-#endif
-}
-
/*
* Allocates bounce buffer and returns its kernel virtual address.
*/
static void *
-map_single(struct device *hwdev, io_tlb_addr_t buffer, size_t size, int dir)
+map_single(struct device *hwdev, char *buffer, size_t size, int dir)
{
unsigned long flags;
char *dma_addr;
@@ -424,7 +359,7 @@
*/
io_tlb_orig_addr[index] = buffer;
if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
- __sync_single(buffer, dma_addr, size, DMA_TO_DEVICE);
+ memcpy(dma_addr, buffer, size);
return dma_addr;
}
@@ -438,18 +373,17 @@
unsigned long flags;
int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
- io_tlb_addr_t buffer = io_tlb_orig_addr[index];
+ char *buffer = io_tlb_orig_addr[index];
/*
* First, sync the memory before unmapping the entry
*/
- if (!swiotlb_orig_addr_null(buffer)
- && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
+ if (buffer && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
/*
* bounce... copy the data back into the original buffer * and
* delete the bounce buffer.
*/
- __sync_single(buffer, dma_addr, size, DMA_FROM_DEVICE);
+ memcpy(buffer, dma_addr, size);
/*
* Return the buffer to the free list by setting the corresponding
@@ -482,18 +416,18 @@
int dir, int target)
{
int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
- io_tlb_addr_t buffer = io_tlb_orig_addr[index];
+ char *buffer = io_tlb_orig_addr[index];
switch (target) {
case SYNC_FOR_CPU:
if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
- __sync_single(buffer, dma_addr, size, DMA_FROM_DEVICE);
+ memcpy(buffer, dma_addr, size);
else
BUG_ON(dir != DMA_TO_DEVICE);
break;
case SYNC_FOR_DEVICE:
if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
- __sync_single(buffer, dma_addr, size, DMA_TO_DEVICE);
+ memcpy(dma_addr, buffer, size);
else
BUG_ON(dir != DMA_FROM_DEVICE);
break;
@@ -502,8 +436,6 @@
}
}
-#ifdef SWIOTLB_ARCH_NEED_ALLOC
-
void *
swiotlb_alloc_coherent(struct device *hwdev, size_t size,
dma_addr_t *dma_handle, gfp_t flags)
@@ -519,10 +451,7 @@
*/
flags |= GFP_DMA;
- if (!order_needs_mapping(order))
- ret = (void *)__get_free_pages(flags, order);
- else
- ret = NULL;
+ ret = (void *)__get_free_pages(flags, order);
if (ret && address_needs_mapping(hwdev, virt_to_bus(ret))) {
/*
* The allocated memory isn't reachable by the device.
@@ -560,7 +489,6 @@
*dma_handle = dev_addr;
return ret;
}
-EXPORT_SYMBOL(swiotlb_alloc_coherent);
void
swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
@@ -573,9 +501,6 @@
/* DMA_TO_DEVICE to avoid memcpy in unmap_single */
swiotlb_unmap_single (hwdev, dma_handle, size, DMA_TO_DEVICE);
}
-EXPORT_SYMBOL(swiotlb_free_coherent);
-
-#endif
static void
swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
@@ -617,14 +542,13 @@
* we can safely return the device addr and not worry about bounce
* buffering it.
*/
- if (!range_needs_mapping(ptr, size)
- && !address_needs_mapping(hwdev, dev_addr))
+ if (!address_needs_mapping(hwdev, dev_addr) && !swiotlb_force)
return dev_addr;
/*
* Oh well, have to allocate and map a bounce buffer.
*/
- map = map_single(hwdev, ptr_to_io_tlb_addr(ptr), size, dir);
+ map = map_single(hwdev, ptr, size, dir);
if (!map) {
swiotlb_full(hwdev, size, dir, 1);
map = io_tlb_overflow_buffer;
@@ -752,16 +676,17 @@
swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
int dir)
{
+ void *addr;
dma_addr_t dev_addr;
int i;
BUG_ON(dir == DMA_NONE);
for (i = 0; i < nelems; i++, sg++) {
- dev_addr = SG_ENT_PHYS_ADDRESS(sg);
- if (range_needs_mapping(SG_ENT_VIRT_ADDRESS(sg), sg->length)
- || address_needs_mapping(hwdev, dev_addr)) {
- void *map = map_single(hwdev, sg_to_io_tlb_addr(sg), sg->length, dir);
+ addr = SG_ENT_VIRT_ADDRESS(sg);
+ dev_addr = virt_to_bus(addr);
+ if (swiotlb_force || address_needs_mapping(hwdev, dev_addr)) {
+ void *map = map_single(hwdev, addr, sg->length, dir);
if (!map) {
/* Don't panic here, we expect map_sg users
to do proper error handling. */
@@ -835,44 +760,6 @@
swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE);
}
-#ifdef SWIOTLB_ARCH_NEED_MAP_PAGE
-
-dma_addr_t
-swiotlb_map_page(struct device *hwdev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction direction)
-{
- dma_addr_t dev_addr;
- char *map;
-
- dev_addr = page_to_bus(page) + offset;
- if (address_needs_mapping(hwdev, dev_addr)) {
- map = map_single(hwdev, page_to_io_tlb_addr(page, offset), size, direction);
- if (!map) {
- swiotlb_full(hwdev, size, direction, 1);
- map = io_tlb_overflow_buffer;
- }
- dev_addr = virt_to_bus(map);
- }
-
- return dev_addr;
-}
-
-void
-swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
- size_t size, enum dma_data_direction direction)
-{
- char *dma_addr = bus_to_virt(dev_addr);
-
- BUG_ON(direction == DMA_NONE);
- if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
- unmap_single(hwdev, dma_addr, size, direction);
- else if (direction == DMA_FROM_DEVICE)
- dma_mark_clean(dma_addr, size);
-}
-
-#endif
-
int
swiotlb_dma_mapping_error(dma_addr_t dma_addr)
{
@@ -885,13 +772,10 @@
* during bus mastering, then you would pass 0x00ffffff as the mask to
* this function.
*/
-#ifndef __swiotlb_dma_supported
-#define __swiotlb_dma_supported(hwdev, mask) (virt_to_bus(io_tlb_end - 1) <= (mask))
-#endif
int
swiotlb_dma_supported(struct device *hwdev, u64 mask)
{
- return __swiotlb_dma_supported(hwdev, mask);
+ return virt_to_bus(io_tlb_end - 1) <= mask;
}
EXPORT_SYMBOL(swiotlb_init);
@@ -906,4 +790,6 @@
EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu);
EXPORT_SYMBOL(swiotlb_sync_sg_for_device);
EXPORT_SYMBOL(swiotlb_dma_mapping_error);
+EXPORT_SYMBOL(swiotlb_alloc_coherent);
+EXPORT_SYMBOL(swiotlb_free_coherent);
EXPORT_SYMBOL(swiotlb_dma_supported);
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index cf2a538..d76e8eb 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -321,15 +321,6 @@
return 0;
}
-/* Check if a vma is migratable */
-static inline int vma_migratable(struct vm_area_struct *vma)
-{
- if (vma->vm_flags & (
- VM_LOCKED|VM_IO|VM_HUGETLB|VM_PFNMAP|VM_RESERVED))
- return 0;
- return 1;
-}
-
/*
* Check if all pages in a range are on a set of nodes.
* If pagelist != NULL then isolate pages from the LRU and
diff --git a/mm/migrate.c b/mm/migrate.c
index e9b161b..7a66ca2 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -781,7 +781,7 @@
err = -EFAULT;
vma = find_vma(mm, pp->addr);
- if (!vma)
+ if (!vma || !vma_migratable(vma))
goto set_status;
page = follow_page(vma, pp->addr, FOLL_GET);
diff --git a/mm/shmem.c b/mm/shmem.c
index fcb0788..b8c429a 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -175,7 +175,7 @@
vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE));
}
-static struct super_operations shmem_ops;
+static const struct super_operations shmem_ops;
static const struct address_space_operations shmem_aops;
static const struct file_operations shmem_file_operations;
static const struct inode_operations shmem_inode_operations;
@@ -2383,7 +2383,7 @@
#endif
};
-static struct super_operations shmem_ops = {
+static const struct super_operations shmem_ops = {
.alloc_inode = shmem_alloc_inode,
.destroy_inode = shmem_destroy_inode,
#ifdef CONFIG_TMPFS
diff --git a/net/core/sock.c b/net/core/sock.c
index e9986ac..8d65d64 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1597,7 +1597,7 @@
{
struct sock *sk = sock->sk;
- if (sk->sk_prot->compat_setsockopt != NULL)
+ if (sk->sk_prot->compat_getsockopt != NULL)
return sk->sk_prot->compat_getsockopt(sk, level, optname,
optval, optlen);
return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
diff --git a/net/dccp/input.c b/net/dccp/input.c
index 4dee462..287099f 100644
--- a/net/dccp/input.c
+++ b/net/dccp/input.c
@@ -256,10 +256,10 @@
* (only one is active at a time); when moving to bidirectional
* service, this needs to be revised.
*/
- if (dccp_sk(sk)->dccps_role == DCCP_ROLE_SERVER)
- ccid_hc_rx_packet_recv(dp->dccps_hc_rx_ccid, sk, skb);
- else
+ if (dccp_sk(sk)->dccps_role == DCCP_ROLE_CLIENT)
ccid_hc_tx_packet_recv(dp->dccps_hc_tx_ccid, sk, skb);
+ else /* listening or connected server */
+ ccid_hc_rx_packet_recv(dp->dccps_hc_rx_ccid, sk, skb);
return __dccp_rcv_established(sk, skb, dh, len);
discard:
@@ -495,10 +495,10 @@
goto discard;
/* XXX see the comments in dccp_rcv_established about this */
- if (dccp_sk(sk)->dccps_role == DCCP_ROLE_SERVER)
- ccid_hc_rx_packet_recv(dp->dccps_hc_rx_ccid, sk, skb);
- else
+ if (dccp_sk(sk)->dccps_role == DCCP_ROLE_CLIENT)
ccid_hc_tx_packet_recv(dp->dccps_hc_tx_ccid, sk, skb);
+ else
+ ccid_hc_rx_packet_recv(dp->dccps_hc_rx_ccid, sk, skb);
}
/*
diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c
index 6656bb4..6d235b3 100644
--- a/net/dccp/minisocks.c
+++ b/net/dccp/minisocks.c
@@ -103,7 +103,7 @@
if (newsk != NULL) {
const struct dccp_request_sock *dreq = dccp_rsk(req);
- struct inet_connection_sock *newicsk = inet_csk(sk);
+ struct inet_connection_sock *newicsk = inet_csk(newsk);
struct dccp_sock *newdp = dccp_sk(newsk);
struct dccp_minisock *newdmsk = dccp_msk(newsk);
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 0637213..1c6a084 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -1251,6 +1251,28 @@
}
/*
+ * Resend IGMP JOIN report; used for bonding.
+ */
+void ip_mc_rejoin_group(struct ip_mc_list *im)
+{
+ struct in_device *in_dev = im->interface;
+
+#ifdef CONFIG_IP_MULTICAST
+ if (im->multiaddr == IGMP_ALL_HOSTS)
+ return;
+
+ if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev)) {
+ igmp_mod_timer(im, IGMP_Initial_Report_Delay);
+ return;
+ }
+ /* else, v3 */
+ im->crcount = in_dev->mr_qrv ? in_dev->mr_qrv :
+ IGMP_Unsolicited_Report_Count;
+ igmp_ifc_event(in_dev);
+#endif
+}
+
+/*
* A socket has left a multicast group on device dev
*/
@@ -2596,3 +2618,4 @@
EXPORT_SYMBOL(ip_mc_dec_group);
EXPORT_SYMBOL(ip_mc_inc_group);
EXPORT_SYMBOL(ip_mc_join_group);
+EXPORT_SYMBOL(ip_mc_rejoin_group);
diff --git a/net/ipv4/netfilter/ip_conntrack_core.c b/net/ipv4/netfilter/ip_conntrack_core.c
index 07ba1dd..23b99ae 100644
--- a/net/ipv4/netfilter/ip_conntrack_core.c
+++ b/net/ipv4/netfilter/ip_conntrack_core.c
@@ -1254,7 +1254,7 @@
list_for_each_entry(h, &unconfirmed, list) {
ct = tuplehash_to_ctrack(h);
if (iter(ct, data))
- goto found;
+ set_bit(IPS_DYING_BIT, &ct->status);
}
write_unlock_bh(&ip_conntrack_lock);
return NULL;
diff --git a/net/ipv4/netfilter/ip_conntrack_proto_tcp.c b/net/ipv4/netfilter/ip_conntrack_proto_tcp.c
index 170d625..0a72eab 100644
--- a/net/ipv4/netfilter/ip_conntrack_proto_tcp.c
+++ b/net/ipv4/netfilter/ip_conntrack_proto_tcp.c
@@ -812,8 +812,10 @@
static const u8 tcp_valid_flags[(TH_FIN|TH_SYN|TH_RST|TH_PUSH|TH_ACK|TH_URG) + 1] =
{
[TH_SYN] = 1,
- [TH_SYN|TH_ACK] = 1,
[TH_SYN|TH_PUSH] = 1,
+ [TH_SYN|TH_URG] = 1,
+ [TH_SYN|TH_PUSH|TH_URG] = 1,
+ [TH_SYN|TH_ACK] = 1,
[TH_SYN|TH_ACK|TH_PUSH] = 1,
[TH_RST] = 1,
[TH_RST|TH_ACK] = 1,
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
index b984db7..8f3e92d 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
@@ -379,8 +379,7 @@
return -ENOENT;
}
-#if defined(CONFIG_NF_CT_NETLINK) || \
- defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_conntrack.h>
@@ -435,8 +434,7 @@
.print_conntrack = ipv4_print_conntrack,
.prepare = ipv4_prepare,
.get_features = ipv4_get_features,
-#if defined(CONFIG_NF_CT_NETLINK) || \
- defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
.tuple_to_nfattr = ipv4_tuple_to_nfattr,
.nfattr_to_tuple = ipv4_nfattr_to_tuple,
#endif
diff --git a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
index 88cfa6a..5fd1e53 100644
--- a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
+++ b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
@@ -268,8 +268,7 @@
return icmp_error_message(skb, ctinfo, hooknum);
}
-#if defined(CONFIG_NF_CT_NETLINK) || \
- defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_conntrack.h>
@@ -368,8 +367,7 @@
.error = icmp_error,
.destroy = NULL,
.me = NULL,
-#if defined(CONFIG_NF_CT_NETLINK) || \
- defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
.tuple_to_nfattr = icmp_tuple_to_nfattr,
.nfattr_to_tuple = icmp_nfattr_to_tuple,
#endif
diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c
index 2c01378..452e9d3 100644
--- a/net/ipv4/netfilter/nf_nat_core.c
+++ b/net/ipv4/netfilter/nf_nat_core.c
@@ -546,8 +546,7 @@
}
EXPORT_SYMBOL(nf_nat_protocol_unregister);
-#if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \
- defined(CONFIG_IP_NF_CONNTRACK_NETLINK_MODULE)
+#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
int
nf_nat_port_range_to_nfattr(struct sk_buff *skb,
const struct nf_nat_range *range)
diff --git a/net/ipv4/netfilter/nf_nat_proto_gre.c b/net/ipv4/netfilter/nf_nat_proto_gre.c
index d3de579..e5a34c1 100644
--- a/net/ipv4/netfilter/nf_nat_proto_gre.c
+++ b/net/ipv4/netfilter/nf_nat_proto_gre.c
@@ -152,8 +152,7 @@
.manip_pkt = gre_manip_pkt,
.in_range = gre_in_range,
.unique_tuple = gre_unique_tuple,
-#if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \
- defined(CONFIG_IP_NF_CONNTRACK_NETLINK_MODULE)
+#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
.range_to_nfattr = nf_nat_port_range_to_nfattr,
.nfattr_to_range = nf_nat_port_nfattr_to_range,
#endif
diff --git a/net/ipv4/netfilter/nf_nat_proto_icmp.c b/net/ipv4/netfilter/nf_nat_proto_icmp.c
index 6bc2f06..f71ef9b 100644
--- a/net/ipv4/netfilter/nf_nat_proto_icmp.c
+++ b/net/ipv4/netfilter/nf_nat_proto_icmp.c
@@ -78,8 +78,7 @@
.manip_pkt = icmp_manip_pkt,
.in_range = icmp_in_range,
.unique_tuple = icmp_unique_tuple,
-#if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \
- defined(CONFIG_IP_NF_CONNTRACK_NETLINK_MODULE)
+#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
.range_to_nfattr = nf_nat_port_range_to_nfattr,
.nfattr_to_range = nf_nat_port_nfattr_to_range,
#endif
diff --git a/net/ipv4/netfilter/nf_nat_proto_tcp.c b/net/ipv4/netfilter/nf_nat_proto_tcp.c
index 439164c..123c959 100644
--- a/net/ipv4/netfilter/nf_nat_proto_tcp.c
+++ b/net/ipv4/netfilter/nf_nat_proto_tcp.c
@@ -144,8 +144,7 @@
.manip_pkt = tcp_manip_pkt,
.in_range = tcp_in_range,
.unique_tuple = tcp_unique_tuple,
-#if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \
- defined(CONFIG_IP_NF_CONNTRACK_NETLINK_MODULE)
+#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
.range_to_nfattr = nf_nat_port_range_to_nfattr,
.nfattr_to_range = nf_nat_port_nfattr_to_range,
#endif
diff --git a/net/ipv4/netfilter/nf_nat_proto_udp.c b/net/ipv4/netfilter/nf_nat_proto_udp.c
index 8cae6e0..1c4c70e 100644
--- a/net/ipv4/netfilter/nf_nat_proto_udp.c
+++ b/net/ipv4/netfilter/nf_nat_proto_udp.c
@@ -134,8 +134,7 @@
.manip_pkt = udp_manip_pkt,
.in_range = udp_in_range,
.unique_tuple = udp_unique_tuple,
-#if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \
- defined(CONFIG_IP_NF_CONNTRACK_NETLINK_MODULE)
+#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
.range_to_nfattr = nf_nat_port_range_to_nfattr,
.nfattr_to_range = nf_nat_port_nfattr_to_range,
#endif
diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c
index 0b2d265..1c405dd 100644
--- a/net/ipv6/netfilter.c
+++ b/net/ipv6/netfilter.c
@@ -15,6 +15,7 @@
struct dst_entry *dst;
struct flowi fl = {
.oif = skb->sk ? skb->sk->sk_bound_dev_if : 0,
+ .mark = skb->mark,
.nl_u =
{ .ip6_u =
{ .daddr = iph->daddr,
diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
index 4b7be4b..6f19c4a 100644
--- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
@@ -353,8 +353,7 @@
};
#endif
-#if defined(CONFIG_NF_CT_NETLINK) || \
- defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_conntrack.h>
@@ -403,8 +402,7 @@
.print_tuple = ipv6_print_tuple,
.print_conntrack = ipv6_print_conntrack,
.prepare = ipv6_prepare,
-#if defined(CONFIG_NF_CT_NETLINK) || \
- defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
.tuple_to_nfattr = ipv6_tuple_to_nfattr,
.nfattr_to_tuple = ipv6_nfattr_to_tuple,
#endif
diff --git a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
index 21f19cc..075da4f 100644
--- a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
@@ -244,8 +244,7 @@
return icmpv6_error_message(skb, dataoff, ctinfo, hooknum);
}
-#if defined(CONFIG_NF_CT_NETLINK) || \
- defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_conntrack.h>
@@ -327,8 +326,7 @@
.packet = icmpv6_packet,
.new = icmpv6_new,
.error = icmpv6_error,
-#if defined(CONFIG_NF_CT_NETLINK) || \
- defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
.tuple_to_nfattr = icmpv6_tuple_to_nfattr,
.nfattr_to_tuple = icmpv6_nfattr_to_tuple,
#endif
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 32891eb..b3a70eb 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -976,8 +976,7 @@
}
EXPORT_SYMBOL_GPL(__nf_ct_refresh_acct);
-#if defined(CONFIG_NF_CT_NETLINK) || \
- defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_conntrack.h>
@@ -1070,7 +1069,7 @@
list_for_each_entry(h, &unconfirmed, list) {
ct = nf_ct_tuplehash_to_ctrack(h);
if (iter(ct, data))
- goto found;
+ set_bit(IPS_DYING_BIT, &ct->status);
}
write_unlock_bh(&nf_conntrack_lock);
return NULL;
diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c
index ac193ce..5434472 100644
--- a/net/netfilter/nf_conntrack_proto_gre.c
+++ b/net/netfilter/nf_conntrack_proto_gre.c
@@ -281,8 +281,7 @@
.new = gre_new,
.destroy = gre_destroy,
.me = THIS_MODULE,
-#if defined(CONFIG_NF_CONNTRACK_NETLINK) || \
- defined(CONFIG_NF_CONNTRACK_NETLINK_MODULE)
+#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
.tuple_to_nfattr = nf_ct_port_tuple_to_nfattr,
.nfattr_to_tuple = nf_ct_port_nfattr_to_tuple,
#endif
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 069b85c..153d661 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -769,8 +769,10 @@
static u8 tcp_valid_flags[(TH_FIN|TH_SYN|TH_RST|TH_PUSH|TH_ACK|TH_URG) + 1] =
{
[TH_SYN] = 1,
- [TH_SYN|TH_ACK] = 1,
[TH_SYN|TH_PUSH] = 1,
+ [TH_SYN|TH_URG] = 1,
+ [TH_SYN|TH_PUSH|TH_URG] = 1,
+ [TH_SYN|TH_ACK] = 1,
[TH_SYN|TH_ACK|TH_PUSH] = 1,
[TH_RST] = 1,
[TH_RST|TH_ACK] = 1,
@@ -1099,8 +1101,7 @@
return 1;
}
-#if defined(CONFIG_NF_CT_NETLINK) || \
- defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_conntrack.h>
@@ -1378,8 +1379,7 @@
.packet = tcp_packet,
.new = tcp_new,
.error = tcp_error,
-#if defined(CONFIG_NF_CT_NETLINK) || \
- defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
.to_nfattr = tcp_to_nfattr,
.from_nfattr = nfattr_to_tcp,
.tuple_to_nfattr = nf_ct_port_tuple_to_nfattr,
@@ -1408,8 +1408,7 @@
.packet = tcp_packet,
.new = tcp_new,
.error = tcp_error,
-#if defined(CONFIG_NF_CT_NETLINK) || \
- defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
.to_nfattr = tcp_to_nfattr,
.from_nfattr = nfattr_to_tcp,
.tuple_to_nfattr = nf_ct_port_tuple_to_nfattr,
diff --git a/net/netfilter/nf_conntrack_proto_udp.c b/net/netfilter/nf_conntrack_proto_udp.c
index d0a1cee..a5e5726 100644
--- a/net/netfilter/nf_conntrack_proto_udp.c
+++ b/net/netfilter/nf_conntrack_proto_udp.c
@@ -208,8 +208,7 @@
.packet = udp_packet,
.new = udp_new,
.error = udp_error,
-#if defined(CONFIG_NF_CT_NETLINK) || \
- defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
.tuple_to_nfattr = nf_ct_port_tuple_to_nfattr,
.nfattr_to_tuple = nf_ct_port_nfattr_to_tuple,
#endif
@@ -236,8 +235,7 @@
.packet = udp_packet,
.new = udp_new,
.error = udp_error,
-#if defined(CONFIG_NF_CT_NETLINK) || \
- defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
.tuple_to_nfattr = nf_ct_port_tuple_to_nfattr,
.nfattr_to_tuple = nf_ct_port_nfattr_to_tuple,
#endif
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index b8eab0d..91a0972 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -133,6 +133,7 @@
if (inst && atomic_dec_and_test(&inst->use)) {
UDEBUG("kfree(inst=%p)\n", inst);
kfree(inst);
+ module_put(THIS_MODULE);
}
}
@@ -217,6 +218,9 @@
spin_lock_bh(&inst->lock);
if (inst->skb) {
+ /* timer "holds" one reference (we have one more) */
+ if (del_timer(&inst->timer))
+ instance_put(inst);
if (inst->qlen)
__nfulnl_send(inst);
if (inst->skb) {
@@ -228,8 +232,6 @@
/* and finally put the refcount */
instance_put(inst);
-
- module_put(THIS_MODULE);
}
static inline void
@@ -363,9 +365,6 @@
{
int status;
- if (timer_pending(&inst->timer))
- del_timer(&inst->timer);
-
if (!inst->skb)
return 0;
@@ -393,8 +392,8 @@
spin_lock_bh(&inst->lock);
__nfulnl_send(inst);
- instance_put(inst);
spin_unlock_bh(&inst->lock);
+ instance_put(inst);
}
/* This is an inline function, we don't really care about a long
@@ -560,6 +559,7 @@
}
nlh->nlmsg_len = inst->skb->tail - old_tail;
+ inst->lastnlh = nlh;
return 0;
nlmsg_failure:
@@ -689,6 +689,9 @@
* enough room in the skb left. flush to userspace. */
UDEBUG("flushing old skb\n");
+ /* timer "holds" one reference (we have another one) */
+ if (del_timer(&inst->timer))
+ instance_put(inst);
__nfulnl_send(inst);
if (!(inst->skb = nfulnl_alloc_skb(nlbufsiz, size))) {
@@ -711,15 +714,16 @@
inst->timer.expires = jiffies + (inst->flushtimeout*HZ/100);
add_timer(&inst->timer);
}
- spin_unlock_bh(&inst->lock);
+unlock_and_release:
+ spin_unlock_bh(&inst->lock);
+ instance_put(inst);
return;
alloc_failure:
- spin_unlock_bh(&inst->lock);
- instance_put(inst);
UDEBUG("error allocating skb\n");
/* FIXME: statistics */
+ goto unlock_and_release;
}
static int
@@ -856,6 +860,9 @@
ret = -EINVAL;
break;
}
+
+ if (!inst)
+ goto out;
} else {
if (!inst) {
UDEBUG("no config command, and no instance for "
@@ -909,6 +916,7 @@
out_put:
instance_put(inst);
+out:
return ret;
}
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index 8353829..b4db53f 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -27,22 +27,26 @@
#define RPCDBG_FACILITY RPCDBG_SVCDSP
+#define svc_serv_is_pooled(serv) ((serv)->sv_function)
+
/*
* Mode for mapping cpus to pools.
*/
enum {
- SVC_POOL_NONE = -1, /* uninitialised, choose one of the others */
+ SVC_POOL_AUTO = -1, /* choose one of the others */
SVC_POOL_GLOBAL, /* no mapping, just a single global pool
* (legacy & UP mode) */
SVC_POOL_PERCPU, /* one pool per cpu */
SVC_POOL_PERNODE /* one pool per numa node */
};
+#define SVC_POOL_DEFAULT SVC_POOL_GLOBAL
/*
* Structure for mapping cpus to pools and vice versa.
* Setup once during sunrpc initialisation.
*/
static struct svc_pool_map {
+ int count; /* How many svc_servs use us */
int mode; /* Note: int not enum to avoid
* warnings about "enumeration value
* not handled in switch" */
@@ -50,9 +54,63 @@
unsigned int *pool_to; /* maps pool id to cpu or node */
unsigned int *to_pool; /* maps cpu or node to pool id */
} svc_pool_map = {
- .mode = SVC_POOL_NONE
+ .count = 0,
+ .mode = SVC_POOL_DEFAULT
};
+static DEFINE_MUTEX(svc_pool_map_mutex);/* protects svc_pool_map.count only */
+static int
+param_set_pool_mode(const char *val, struct kernel_param *kp)
+{
+ int *ip = (int *)kp->arg;
+ struct svc_pool_map *m = &svc_pool_map;
+ int err;
+
+ mutex_lock(&svc_pool_map_mutex);
+
+ err = -EBUSY;
+ if (m->count)
+ goto out;
+
+ err = 0;
+ if (!strncmp(val, "auto", 4))
+ *ip = SVC_POOL_AUTO;
+ else if (!strncmp(val, "global", 6))
+ *ip = SVC_POOL_GLOBAL;
+ else if (!strncmp(val, "percpu", 6))
+ *ip = SVC_POOL_PERCPU;
+ else if (!strncmp(val, "pernode", 7))
+ *ip = SVC_POOL_PERNODE;
+ else
+ err = -EINVAL;
+
+out:
+ mutex_unlock(&svc_pool_map_mutex);
+ return err;
+}
+
+static int
+param_get_pool_mode(char *buf, struct kernel_param *kp)
+{
+ int *ip = (int *)kp->arg;
+
+ switch (*ip)
+ {
+ case SVC_POOL_AUTO:
+ return strlcpy(buf, "auto", 20);
+ case SVC_POOL_GLOBAL:
+ return strlcpy(buf, "global", 20);
+ case SVC_POOL_PERCPU:
+ return strlcpy(buf, "percpu", 20);
+ case SVC_POOL_PERNODE:
+ return strlcpy(buf, "pernode", 20);
+ default:
+ return sprintf(buf, "%d", *ip);
+ }
+}
+
+module_param_call(pool_mode, param_set_pool_mode, param_get_pool_mode,
+ &svc_pool_map.mode, 0644);
/*
* Detect best pool mapping mode heuristically,
@@ -166,18 +224,25 @@
/*
- * Build the global map of cpus to pools and vice versa.
+ * Add a reference to the global map of cpus to pools (and
+ * vice versa). Initialise the map if we're the first user.
+ * Returns the number of pools.
*/
static unsigned int
-svc_pool_map_init(void)
+svc_pool_map_get(void)
{
struct svc_pool_map *m = &svc_pool_map;
int npools = -1;
- if (m->mode != SVC_POOL_NONE)
- return m->npools;
+ mutex_lock(&svc_pool_map_mutex);
- m->mode = svc_pool_map_choose_mode();
+ if (m->count++) {
+ mutex_unlock(&svc_pool_map_mutex);
+ return m->npools;
+ }
+
+ if (m->mode == SVC_POOL_AUTO)
+ m->mode = svc_pool_map_choose_mode();
switch (m->mode) {
case SVC_POOL_PERCPU:
@@ -195,9 +260,36 @@
}
m->npools = npools;
+ mutex_unlock(&svc_pool_map_mutex);
return m->npools;
}
+
+/*
+ * Drop a reference to the global map of cpus to pools.
+ * When the last reference is dropped, the map data is
+ * freed; this allows the sysadmin to change the pool
+ * mode using the pool_mode module option without
+ * rebooting or re-loading sunrpc.ko.
+ */
+static void
+svc_pool_map_put(void)
+{
+ struct svc_pool_map *m = &svc_pool_map;
+
+ mutex_lock(&svc_pool_map_mutex);
+
+ if (!--m->count) {
+ m->mode = SVC_POOL_DEFAULT;
+ kfree(m->to_pool);
+ kfree(m->pool_to);
+ m->npools = 0;
+ }
+
+ mutex_unlock(&svc_pool_map_mutex);
+}
+
+
/*
* Set the current thread's cpus_allowed mask so that it
* will only run on cpus in the given pool.
@@ -212,10 +304,9 @@
/*
* The caller checks for sv_nrpools > 1, which
- * implies that we've been initialized and the
- * map mode is not NONE.
+ * implies that we've been initialized.
*/
- BUG_ON(m->mode == SVC_POOL_NONE);
+ BUG_ON(m->count == 0);
switch (m->mode)
{
@@ -246,18 +337,19 @@
unsigned int pidx = 0;
/*
- * SVC_POOL_NONE happens in a pure client when
+ * An uninitialised map happens in a pure client when
* lockd is brought up, so silently treat it the
* same as SVC_POOL_GLOBAL.
*/
-
- switch (m->mode) {
- case SVC_POOL_PERCPU:
- pidx = m->to_pool[cpu];
- break;
- case SVC_POOL_PERNODE:
- pidx = m->to_pool[cpu_to_node(cpu)];
- break;
+ if (svc_serv_is_pooled(serv)) {
+ switch (m->mode) {
+ case SVC_POOL_PERCPU:
+ pidx = m->to_pool[cpu];
+ break;
+ case SVC_POOL_PERNODE:
+ pidx = m->to_pool[cpu_to_node(cpu)];
+ break;
+ }
}
return &serv->sv_pools[pidx % serv->sv_nrpools];
}
@@ -347,7 +439,7 @@
svc_thread_fn func, int sig, struct module *mod)
{
struct svc_serv *serv;
- unsigned int npools = svc_pool_map_init();
+ unsigned int npools = svc_pool_map_get();
serv = __svc_create(prog, bufsize, npools, shutdown);
@@ -367,6 +459,7 @@
svc_destroy(struct svc_serv *serv)
{
struct svc_sock *svsk;
+ struct svc_sock *tmp;
dprintk("svc: svc_destroy(%s, %d)\n",
serv->sv_program->pg_name,
@@ -382,24 +475,23 @@
del_timer_sync(&serv->sv_temptimer);
- while (!list_empty(&serv->sv_tempsocks)) {
- svsk = list_entry(serv->sv_tempsocks.next,
- struct svc_sock,
- sk_list);
- svc_close_socket(svsk);
- }
+ list_for_each_entry_safe(svsk, tmp, &serv->sv_tempsocks, sk_list)
+ svc_force_close_socket(svsk);
+
if (serv->sv_shutdown)
serv->sv_shutdown(serv);
- while (!list_empty(&serv->sv_permsocks)) {
- svsk = list_entry(serv->sv_permsocks.next,
- struct svc_sock,
- sk_list);
- svc_close_socket(svsk);
- }
+ list_for_each_entry_safe(svsk, tmp, &serv->sv_permsocks, sk_list)
+ svc_force_close_socket(svsk);
+
+ BUG_ON(!list_empty(&serv->sv_permsocks));
+ BUG_ON(!list_empty(&serv->sv_tempsocks));
cache_clean_deferred(serv);
+ if (svc_serv_is_pooled(serv))
+ svc_pool_map_put();
+
/* Unregister service with the portmapper */
svc_register(serv, 0, 0);
kfree(serv->sv_pools);
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 63ae947..f6e1eb1 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -82,6 +82,7 @@
static void svc_udp_data_ready(struct sock *, int);
static int svc_udp_recvfrom(struct svc_rqst *);
static int svc_udp_sendto(struct svc_rqst *);
+static void svc_close_socket(struct svc_sock *svsk);
static struct svc_deferred_req *svc_deferred_dequeue(struct svc_sock *svsk);
static int svc_deferred_recv(struct svc_rqst *rqstp);
@@ -131,13 +132,13 @@
NIPQUAD(((struct sockaddr_in *) addr)->sin_addr),
htons(((struct sockaddr_in *) addr)->sin_port));
break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+
case AF_INET6:
snprintf(buf, len, "%x:%x:%x:%x:%x:%x:%x:%x, port=%u",
NIP6(((struct sockaddr_in6 *) addr)->sin6_addr),
htons(((struct sockaddr_in6 *) addr)->sin6_port));
break;
-#endif
+
default:
snprintf(buf, len, "unknown address type: %d", addr->sa_family);
break;
@@ -449,9 +450,7 @@
union svc_pktinfo_u {
struct in_pktinfo pkti;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
struct in6_pktinfo pkti6;
-#endif
};
static void svc_set_cmsg_data(struct svc_rqst *rqstp, struct cmsghdr *cmh)
@@ -467,7 +466,7 @@
cmh->cmsg_len = CMSG_LEN(sizeof(*pki));
}
break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+
case AF_INET6: {
struct in6_pktinfo *pki = CMSG_DATA(cmh);
@@ -479,7 +478,6 @@
cmh->cmsg_len = CMSG_LEN(sizeof(*pki));
}
break;
-#endif
}
return;
}
@@ -721,45 +719,21 @@
}
}
-static void svc_udp_get_sender_address(struct svc_rqst *rqstp,
- struct sk_buff *skb)
+static inline void svc_udp_get_dest_address(struct svc_rqst *rqstp,
+ struct cmsghdr *cmh)
{
switch (rqstp->rq_sock->sk_sk->sk_family) {
case AF_INET: {
- /* this seems to come from net/ipv4/udp.c:udp_recvmsg */
- struct sockaddr_in *sin = svc_addr_in(rqstp);
-
- sin->sin_family = AF_INET;
- sin->sin_port = skb->h.uh->source;
- sin->sin_addr.s_addr = skb->nh.iph->saddr;
- rqstp->rq_addrlen = sizeof(struct sockaddr_in);
- /* Remember which interface received this request */
- rqstp->rq_daddr.addr.s_addr = skb->nh.iph->daddr;
- }
+ struct in_pktinfo *pki = CMSG_DATA(cmh);
+ rqstp->rq_daddr.addr.s_addr = pki->ipi_spec_dst.s_addr;
break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+ }
case AF_INET6: {
- /* this is derived from net/ipv6/udp.c:udpv6_recvmesg */
- struct sockaddr_in6 *sin6 = svc_addr_in6(rqstp);
-
- sin6->sin6_family = AF_INET6;
- sin6->sin6_port = skb->h.uh->source;
- sin6->sin6_flowinfo = 0;
- sin6->sin6_scope_id = 0;
- if (ipv6_addr_type(&sin6->sin6_addr) &
- IPV6_ADDR_LINKLOCAL)
- sin6->sin6_scope_id = IP6CB(skb)->iif;
- ipv6_addr_copy(&sin6->sin6_addr,
- &skb->nh.ipv6h->saddr);
- rqstp->rq_addrlen = sizeof(struct sockaddr_in);
- /* Remember which interface received this request */
- ipv6_addr_copy(&rqstp->rq_daddr.addr6,
- &skb->nh.ipv6h->saddr);
- }
+ struct in6_pktinfo *pki = CMSG_DATA(cmh);
+ ipv6_addr_copy(&rqstp->rq_daddr.addr6, &pki->ipi6_addr);
break;
-#endif
+ }
}
- return;
}
/*
@@ -771,7 +745,15 @@
struct svc_sock *svsk = rqstp->rq_sock;
struct svc_serv *serv = svsk->sk_server;
struct sk_buff *skb;
+ char buffer[CMSG_SPACE(sizeof(union svc_pktinfo_u))];
+ struct cmsghdr *cmh = (struct cmsghdr *)buffer;
int err, len;
+ struct msghdr msg = {
+ .msg_name = svc_addr(rqstp),
+ .msg_control = cmh,
+ .msg_controllen = sizeof(buffer),
+ .msg_flags = MSG_DONTWAIT,
+ };
if (test_and_clear_bit(SK_CHNGBUF, &svsk->sk_flags))
/* udp sockets need large rcvbuf as all pending
@@ -797,7 +779,9 @@
}
clear_bit(SK_DATA, &svsk->sk_flags);
- while ((skb = skb_recv_datagram(svsk->sk_sk, 0, 1, &err)) == NULL) {
+ while ((err == kernel_recvmsg(svsk->sk_sock, &msg, NULL,
+ 0, 0, MSG_PEEK | MSG_DONTWAIT)) < 0 ||
+ (skb = skb_recv_datagram(svsk->sk_sk, 0, 1, &err)) == NULL) {
if (err == -EAGAIN) {
svc_sock_received(svsk);
return err;
@@ -805,6 +789,7 @@
/* possibly an icmp error */
dprintk("svc: recvfrom returned error %d\n", -err);
}
+ rqstp->rq_addrlen = sizeof(rqstp->rq_addr);
if (skb->tstamp.off_sec == 0) {
struct timeval tv;
@@ -827,7 +812,16 @@
rqstp->rq_prot = IPPROTO_UDP;
- svc_udp_get_sender_address(rqstp, skb);
+ if (cmh->cmsg_level != IPPROTO_IP ||
+ cmh->cmsg_type != IP_PKTINFO) {
+ if (net_ratelimit())
+ printk("rpcsvc: received unknown control message:"
+ "%d/%d\n",
+ cmh->cmsg_level, cmh->cmsg_type);
+ skb_free_datagram(svsk->sk_sk, skb);
+ return 0;
+ }
+ svc_udp_get_dest_address(rqstp, cmh);
if (skb_is_nonlinear(skb)) {
/* we have to copy */
@@ -884,6 +878,9 @@
static void
svc_udp_init(struct svc_sock *svsk)
{
+ int one = 1;
+ mm_segment_t oldfs;
+
svsk->sk_sk->sk_data_ready = svc_udp_data_ready;
svsk->sk_sk->sk_write_space = svc_write_space;
svsk->sk_recvfrom = svc_udp_recvfrom;
@@ -899,6 +896,13 @@
set_bit(SK_DATA, &svsk->sk_flags); /* might have come in before data_ready set up */
set_bit(SK_CHNGBUF, &svsk->sk_flags);
+
+ oldfs = get_fs();
+ set_fs(KERNEL_DS);
+ /* make sure we get destination address info */
+ svsk->sk_sock->ops->setsockopt(svsk->sk_sock, IPPROTO_IP, IP_PKTINFO,
+ (char __user *)&one, sizeof(one));
+ set_fs(oldfs);
}
/*
@@ -977,11 +981,9 @@
case AF_INET:
return ntohs(((struct sockaddr_in *)sin)->sin_port)
< PROT_SOCK;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
case AF_INET6:
return ntohs(((struct sockaddr_in6 *)sin)->sin6_port)
< PROT_SOCK;
-#endif
default:
return 0;
}
@@ -1786,7 +1788,7 @@
spin_unlock_bh(&serv->sv_lock);
}
-void svc_close_socket(struct svc_sock *svsk)
+static void svc_close_socket(struct svc_sock *svsk)
{
set_bit(SK_CLOSE, &svsk->sk_flags);
if (test_and_set_bit(SK_BUSY, &svsk->sk_flags))
@@ -1799,6 +1801,19 @@
svc_sock_put(svsk);
}
+void svc_force_close_socket(struct svc_sock *svsk)
+{
+ set_bit(SK_CLOSE, &svsk->sk_flags);
+ if (test_bit(SK_BUSY, &svsk->sk_flags)) {
+ /* Waiting to be processed, but no threads left,
+ * So just remove it from the waiting list
+ */
+ list_del_init(&svsk->sk_ready);
+ clear_bit(SK_BUSY, &svsk->sk_flags);
+ }
+ svc_close_socket(svsk);
+}
+
/**
* svc_makesock - Make a socket for nfsd and lockd
* @serv: RPC server structure
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 51ca438..6069716 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -934,7 +934,7 @@
sched = !sock_flag(other, SOCK_DEAD) &&
!(other->sk_shutdown & RCV_SHUTDOWN) &&
- (skb_queue_len(&other->sk_receive_queue) >=
+ (skb_queue_len(&other->sk_receive_queue) >
other->sk_max_ack_backlog);
unix_state_runlock(other);
@@ -1008,7 +1008,7 @@
if (other->sk_state != TCP_LISTEN)
goto out_unlock;
- if (skb_queue_len(&other->sk_receive_queue) >=
+ if (skb_queue_len(&other->sk_receive_queue) >
other->sk_max_ack_backlog) {
err = -EAGAIN;
if (!timeo)
@@ -1381,7 +1381,7 @@
}
if (unix_peer(other) != sk &&
- (skb_queue_len(&other->sk_receive_queue) >=
+ (skb_queue_len(&other->sk_receive_queue) >
other->sk_max_ack_backlog)) {
if (!timeo) {
err = -EAGAIN;
diff --git a/sound/pci/ac97/ac97_patch.c b/sound/pci/ac97/ac97_patch.c
index bfc2fed..37fabf7 100644
--- a/sound/pci/ac97/ac97_patch.c
+++ b/sound/pci/ac97/ac97_patch.c
@@ -1790,6 +1790,8 @@
* (SS vendor << 16 | device)
*/
static unsigned int ad1981_jacks_blacklist[] = {
+ 0x10140523, /* Thinkpad R40 */
+ 0x10140534, /* Thinkpad X31 */
0x10140537, /* Thinkpad T41p */
0x10140554, /* Thinkpad T42p/R50p */
0 /* end */
diff --git a/sound/pci/ali5451/ali5451.c b/sound/pci/ali5451/ali5451.c
index 9327ab2..ba7fa22 100644
--- a/sound/pci/ali5451/ali5451.c
+++ b/sound/pci/ali5451/ali5451.c
@@ -2312,6 +2312,8 @@
return err;
}
+ snd_card_set_dev(card, &pci->dev);
+
/* initialise synth voices*/
for (i = 0; i < ALI_CHANNELS; i++ ) {
codec->synth.voices[i].number = i;
diff --git a/sound/pci/cmipci.c b/sound/pci/cmipci.c
index 70face7..7d3c5ee 100644
--- a/sound/pci/cmipci.c
+++ b/sound/pci/cmipci.c
@@ -57,7 +57,7 @@
static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
static int enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP; /* Enable switches */
static long mpu_port[SNDRV_CARDS];
-static long fm_port[SNDRV_CARDS];
+static long fm_port[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS-1)]=1};
static int soft_ac3[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS-1)]=1};
#ifdef SUPPORT_JOYSTICK
static int joystick_port[SNDRV_CARDS];
@@ -2779,6 +2779,9 @@
struct snd_opl3 *opl3;
int err;
+ if (!fm_port)
+ goto disable_fm;
+
/* first try FM regs in PCI port range */
iosynth = cm->iobase + CM_REG_FM_PCI;
err = snd_opl3_create(cm->card, iosynth, iosynth + 2,
@@ -2793,7 +2796,7 @@
case 0x3C8: val |= CM_FMSEL_3C8; break;
case 0x388: val |= CM_FMSEL_388; break;
default:
- return 0;
+ goto disable_fm;
}
snd_cmipci_write(cm, CM_REG_LEGACY_CTRL, val);
/* enable FM */
@@ -2803,11 +2806,7 @@
OPL3_HW_OPL3, 0, &opl3) < 0) {
printk(KERN_ERR "cmipci: no OPL device at %#lx, "
"skipping...\n", iosynth);
- /* disable FM */
- snd_cmipci_write(cm, CM_REG_LEGACY_CTRL,
- val & ~CM_FMSEL_MASK);
- snd_cmipci_clear_bit(cm, CM_REG_MISC_CTRL, CM_FM_EN);
- return 0;
+ goto disable_fm;
}
}
if ((err = snd_opl3_hwdep_new(opl3, 0, 1, NULL)) < 0) {
@@ -2815,6 +2814,11 @@
return err;
}
return 0;
+
+ disable_fm:
+ snd_cmipci_clear_bit(cm, CM_REG_LEGACY_CTRL, CM_FMSEL_MASK);
+ snd_cmipci_clear_bit(cm, CM_REG_MISC_CTRL, CM_FM_EN);
+ return 0;
}
static int __devinit snd_cmipci_create(struct snd_card *card, struct pci_dev *pci,
diff --git a/sound/pci/echoaudio/echoaudio.c b/sound/pci/echoaudio/echoaudio.c
index 6a428b8..e413da0 100644
--- a/sound/pci/echoaudio/echoaudio.c
+++ b/sound/pci/echoaudio/echoaudio.c
@@ -2033,6 +2033,8 @@
if (card == NULL)
return -ENOMEM;
+ snd_card_set_dev(card, &pci->dev);
+
if ((err = snd_echo_create(card, pci, &chip)) < 0) {
snd_card_free(card);
return err;
diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c
index 38977bc..00ace59 100644
--- a/sound/pci/hda/patch_analog.c
+++ b/sound/pci/hda/patch_analog.c
@@ -523,6 +523,7 @@
HDA_CODEC_MUTE("Aux Playback Switch", 0x16, 0x0, HDA_OUTPUT),
HDA_CODEC_VOLUME("Mic Playback Volume", 0x13, 0x0, HDA_OUTPUT),
HDA_CODEC_MUTE("Mic Playback Switch", 0x13, 0x0, HDA_OUTPUT),
+ HDA_CODEC_VOLUME("Mic Boost", 0x0f, 0x0, HDA_OUTPUT),
HDA_CODEC_VOLUME("PC Speaker Playback Volume", 0x18, 0x0, HDA_OUTPUT),
HDA_CODEC_MUTE("PC Speaker Playback Switch", 0x18, 0x0, HDA_OUTPUT),
HDA_CODEC_VOLUME("Mono Playback Volume", 0x1e, 0x0, HDA_OUTPUT),
@@ -570,6 +571,7 @@
HDA_CODEC_MUTE("Aux Playback Switch", 0x16, 0x0, HDA_OUTPUT),
HDA_CODEC_VOLUME("Mic Playback Volume", 0x13, 0x0, HDA_OUTPUT),
HDA_CODEC_MUTE("Mic Playback Switch", 0x13, 0x0, HDA_OUTPUT),
+ HDA_CODEC_VOLUME("Mic Boost", 0x0f, 0x0, HDA_OUTPUT),
/* HDA_CODEC_VOLUME("PC Speaker Playback Volume", 0x18, 0x0, HDA_OUTPUT),
HDA_CODEC_MUTE("PC Speaker Playback Switch", 0x18, 0x0, HDA_OUTPUT),
HDA_CODEC_VOLUME("Mono Playback Volume", 0x1e, 0x0, HDA_OUTPUT),
@@ -658,6 +660,7 @@
HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x17, 0x0, HDA_OUTPUT),
HDA_CODEC_VOLUME("Mic Playback Volume", 0x13, 0x0, HDA_OUTPUT),
HDA_CODEC_MUTE("Mic Playback Switch", 0x13, 0x0, HDA_OUTPUT),
+ HDA_CODEC_VOLUME("Mic Boost", 0x0f, 0x0, HDA_OUTPUT),
HDA_CODEC_VOLUME("Capture Volume", 0x12, 0x0, HDA_OUTPUT),
HDA_CODEC_MUTE("Capture Switch", 0x12, 0x0, HDA_OUTPUT),
{
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index 23a1c75..46e93c6 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -629,10 +629,12 @@
static void cxt5045_hp_automute(struct hda_codec *codec)
{
struct conexant_spec *spec = codec->spec;
- unsigned int bits = (spec->hp_present || !spec->cur_eapd) ? 0x80 : 0;
+ unsigned int bits;
spec->hp_present = snd_hda_codec_read(codec, 0x11, 0,
AC_VERB_GET_PIN_SENSE, 0) & 0x80000000;
+
+ bits = (spec->hp_present || !spec->cur_eapd) ? 0x80 : 0;
snd_hda_codec_amp_update(codec, 0x10, 0, HDA_OUTPUT, 0, 0x80, bits);
snd_hda_codec_amp_update(codec, 0x10, 1, HDA_OUTPUT, 0, 0x80, bits);
}
@@ -979,10 +981,12 @@
static void cxt5047_hp_automute(struct hda_codec *codec)
{
struct conexant_spec *spec = codec->spec;
- unsigned int bits = spec->hp_present || !spec->cur_eapd ? 0x80 : 0;
+ unsigned int bits;
spec->hp_present = snd_hda_codec_read(codec, 0x13, 0,
AC_VERB_GET_PIN_SENSE, 0) & 0x80000000;
+
+ bits = (spec->hp_present || !spec->cur_eapd) ? 0x80 : 0;
snd_hda_codec_amp_update(codec, 0x1d, 0, HDA_OUTPUT, 0, 0x80, bits);
snd_hda_codec_amp_update(codec, 0x1d, 1, HDA_OUTPUT, 0, 0x80, bits);
/* Mute/Unmute PCM 2 for good measure - some systems need this */
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 145682b..84d005e 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -4942,9 +4942,16 @@
alc882_cfg_tbl);
if (board_config < 0 || board_config >= ALC882_MODEL_LAST) {
- printk(KERN_INFO "hda_codec: Unknown model for ALC882, "
- "trying auto-probe from BIOS...\n");
- board_config = ALC882_AUTO;
+ /* Pick up systems that don't supply PCI SSID */
+ switch (codec->subsystem_id) {
+ case 0x106b0c00: /* Mac Pro */
+ board_config = ALC885_MACPRO;
+ break;
+ default:
+ printk(KERN_INFO "hda_codec: Unknown model for ALC882, "
+ "trying auto-probe from BIOS...\n");
+ board_config = ALC882_AUTO;
+ }
}
if (board_config == ALC882_AUTO) {
@@ -5917,8 +5924,10 @@
HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
+ HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x01, HDA_INPUT),
HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x01, HDA_INPUT),
+ HDA_CODEC_VOLUME("Front Mic Boost", 0x19, 0, HDA_INPUT),
/* HDA_CODEC_VOLUME("PC Beep Playback Volume", 0x0b, 0x05, HDA_INPUT),
HDA_CODEC_MUTE("PC Beelp Playback Switch", 0x0b, 0x05, HDA_INPUT), */
HDA_CODEC_VOLUME("Headphone Playback Volume", 0x0D, 0x0, HDA_OUTPUT),
@@ -5937,8 +5946,10 @@
HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
+ HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x01, HDA_INPUT),
HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x01, HDA_INPUT),
+ HDA_CODEC_VOLUME("Front Mic Boost", 0x19, 0, HDA_INPUT),
/* HDA_CODEC_VOLUME("PC Beep Playback Volume", 0x0b, 0x05, HDA_INPUT),
HDA_CODEC_MUTE("PC Beelp Playback Switch", 0x0b, 0x05, HDA_INPUT), */
/*HDA_CODEC_VOLUME("Headphone Playback Volume", 0x0D, 0x0, HDA_OUTPUT),*/
@@ -5955,8 +5966,10 @@
HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
+ HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x01, HDA_INPUT),
HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x01, HDA_INPUT),
+ HDA_CODEC_VOLUME("Front Mic Boost", 0x19, 0, HDA_INPUT),
HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT),
HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
HDA_CODEC_VOLUME("CD Playback Volume", 0x0b, 0x04, HDA_INPUT),
@@ -5977,6 +5990,7 @@
HDA_CODEC_MUTE_MONO("Mono Playback Switch", 0x16, 2, 0x0, HDA_OUTPUT),
HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x02, HDA_INPUT),
HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x02, HDA_INPUT),
+ HDA_CODEC_VOLUME("Front Mic Boost", 0x1a, 0, HDA_INPUT),
HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x01, HDA_INPUT),
HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x01, HDA_INPUT),
HDA_CODEC_VOLUME("CD Playback Volume", 0x0b, 0x04, HDA_INPUT),
@@ -5989,6 +6003,7 @@
static struct snd_kcontrol_new alc262_HP_BPC_WildWest_option_mixer[] = {
HDA_CODEC_VOLUME("Rear Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
HDA_CODEC_MUTE("Rear Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
+ HDA_CODEC_VOLUME("Rear Mic Boost", 0x18, 0, HDA_INPUT),
{ } /* end */
};
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index f7ef9c5..4c7b039 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -59,6 +59,8 @@
STAC_D945GTP3,
STAC_D945GTP5,
STAC_MACMINI,
+ STAC_MACBOOK,
+ STAC_MACBOOK_PRO,
STAC_922X_MODELS
};
@@ -461,6 +463,8 @@
"Dell Inspiron E1705/9400", STAC_REF),
SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01ce,
"Dell XPS M1710", STAC_REF),
+ SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01cf,
+ "Dell Precision M90", STAC_REF),
{} /* terminator */
};
@@ -519,11 +523,25 @@
0x02a19320, 0x40000100,
};
+static unsigned int macbook_pin_configs[10] = {
+ 0x0321e230, 0x03a1e020, 0x400000fd, 0x9017e110,
+ 0x400000fe, 0x0381e021, 0x1345e240, 0x13c5e22e,
+ 0x400000fc, 0x400000fb,
+};
+
+static unsigned int macbook_pro_pin_configs[10] = {
+ 0x0221401f, 0x90a70120, 0x01813024, 0x01014010,
+ 0x400000fd, 0x01016011, 0x1345e240, 0x13c5e22e,
+ 0x400000fc, 0x400000fb,
+};
+
static unsigned int *stac922x_brd_tbl[STAC_922X_MODELS] = {
[STAC_D945_REF] = ref922x_pin_configs,
[STAC_D945GTP3] = d945gtp3_pin_configs,
[STAC_D945GTP5] = d945gtp5_pin_configs,
[STAC_MACMINI] = d945gtp5_pin_configs,
+ [STAC_MACBOOK] = macbook_pin_configs,
+ [STAC_MACBOOK_PRO] = macbook_pro_pin_configs,
};
static const char *stac922x_models[STAC_922X_MODELS] = {
@@ -531,6 +549,8 @@
[STAC_D945GTP5] = "5stack",
[STAC_D945GTP3] = "3stack",
[STAC_MACMINI] = "macmini",
+ [STAC_MACBOOK] = "macbook",
+ [STAC_MACBOOK_PRO] = "macbook-pro",
};
static struct snd_pci_quirk stac922x_cfg_tbl[] = {
@@ -1864,6 +1884,18 @@
spec->board_config = snd_hda_check_board_config(codec, STAC_922X_MODELS,
stac922x_models,
stac922x_cfg_tbl);
+ if (spec->board_config == STAC_MACMINI) {
+ spec->gpio_mute = 1;
+ /* Intel Macs have all same PCI SSID, so we need to check
+ * codec SSID to distinguish the exact models
+ */
+ switch (codec->subsystem_id) {
+ case 0x106b1e00:
+ spec->board_config = STAC_MACBOOK_PRO;
+ break;
+ }
+ }
+
again:
if (spec->board_config < 0) {
snd_printdd(KERN_INFO "hda_codec: Unknown model for STAC922x, "
@@ -1904,9 +1936,6 @@
return err;
}
- if (spec->board_config == STAC_MACMINI)
- spec->gpio_mute = 1;
-
codec->patch_ops = stac92xx_patch_ops;
return 0;
diff --git a/sound/pci/riptide/riptide.c b/sound/pci/riptide/riptide.c
index 5e1d5d2..952625d 100644
--- a/sound/pci/riptide/riptide.c
+++ b/sound/pci/riptide/riptide.c
@@ -1919,6 +1919,8 @@
return err;
}
+ snd_card_set_dev(card, &pci->dev);
+
*rchip = chip;
return 0;
}
diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c
index e0215ac..6e95857 100644
--- a/sound/pci/rme9652/hdspm.c
+++ b/sound/pci/rme9652/hdspm.c
@@ -4468,6 +4468,8 @@
hdspm->dev = dev;
hdspm->pci = pci;
+ snd_card_set_dev(card, &pci->dev);
+
if ((err =
snd_hdspm_create(card, hdspm, precise_ptr[dev],
enable_monitor[dev])) < 0) {
diff --git a/sound/soc/codecs/wm9712.c b/sound/soc/codecs/wm9712.c
index 92a6487..ee7a691 100644
--- a/sound/soc/codecs/wm9712.c
+++ b/sound/soc/codecs/wm9712.c
@@ -39,7 +39,7 @@
*/
static const u16 wm9712_reg[] = {
0x6174, 0x8000, 0x8000, 0x8000, // 6
- 0xf0f0, 0xaaa0, 0xc008, 0x6808, // e
+ 0x0f0f, 0xaaa0, 0xc008, 0x6808, // e
0xe808, 0xaaa0, 0xad00, 0x8000, // 16
0xe808, 0x3000, 0x8000, 0x0000, // 1e
0x0000, 0x0000, 0x0000, 0x000f, // 26
@@ -96,6 +96,7 @@
SOC_SINGLE("Speaker Playback Switch", AC97_MASTER, 15, 1, 1),
SOC_DOUBLE("Headphone Playback Volume", AC97_HEADPHONE, 8, 0, 31, 1),
SOC_SINGLE("Headphone Playback Switch", AC97_HEADPHONE,15, 1, 1),
+SOC_DOUBLE("PCM Playback Volume", AC97_PCM, 8, 0, 31, 1),
SOC_SINGLE("Speaker Playback ZC Switch", AC97_MASTER, 7, 1, 0),
SOC_SINGLE("Speaker Playback Invert Switch", AC97_MASTER, 6, 1, 0),