Merge branch 'x86-cpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'x86-cpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
x86, cpu: Fix X86_FEATURE_NOPL
x86, cpu: Re-run get_cpu_cap() after adjusting the CPUID level
diff --git a/CREDITS b/CREDITS
index 72b4878..41d8e63 100644
--- a/CREDITS
+++ b/CREDITS
@@ -3554,12 +3554,12 @@
D: portions of the Linux Security Module (LSM) framework and security modules
N: Petr Vandrovec
-E: vandrove@vc.cvut.cz
+E: petr@vandrovec.name
D: Small contributions to ncpfs
D: Matrox framebuffer driver
-S: Chudenicka 8
-S: 10200 Prague 10, Hostivar
-S: Czech Republic
+S: 21513 Conradia Ct
+S: Cupertino, CA 95014
+S: USA
N: Thibaut Varene
E: T-Bone@parisc-linux.org
diff --git a/Documentation/DocBook/device-drivers.tmpl b/Documentation/DocBook/device-drivers.tmpl
index ecd35e9..feca075 100644
--- a/Documentation/DocBook/device-drivers.tmpl
+++ b/Documentation/DocBook/device-drivers.tmpl
@@ -46,7 +46,6 @@
<sect1><title>Atomic and pointer manipulation</title>
!Iarch/x86/include/asm/atomic.h
-!Iarch/x86/include/asm/unaligned.h
</sect1>
<sect1><title>Delaying, scheduling, and timer routines</title>
diff --git a/Documentation/DocBook/kernel-api.tmpl b/Documentation/DocBook/kernel-api.tmpl
index a20c6f6..6899f47 100644
--- a/Documentation/DocBook/kernel-api.tmpl
+++ b/Documentation/DocBook/kernel-api.tmpl
@@ -57,7 +57,6 @@
</para>
<sect1><title>String Conversions</title>
-!Ilib/vsprintf.c
!Elib/vsprintf.c
</sect1>
<sect1><title>String Manipulation</title>
diff --git a/Documentation/DocBook/kernel-locking.tmpl b/Documentation/DocBook/kernel-locking.tmpl
index 0b1a3f9..f66f4df 100644
--- a/Documentation/DocBook/kernel-locking.tmpl
+++ b/Documentation/DocBook/kernel-locking.tmpl
@@ -1645,7 +1645,9 @@
all the readers who were traversing the list when we deleted the
element are finished. We use <function>call_rcu()</function> to
register a callback which will actually destroy the object once
- the readers are finished.
+ all pre-existing readers are finished. Alternatively,
+ <function>synchronize_rcu()</function> may be used to block until
+ all pre-existing are finished.
</para>
<para>
But how does Read Copy Update know when the readers are
@@ -1714,7 +1716,7 @@
- object_put(obj);
+ list_del_rcu(&obj->list);
cache_num--;
-+ call_rcu(&obj->rcu, cache_delete_rcu, obj);
++ call_rcu(&obj->rcu, cache_delete_rcu);
}
/* Must be holding cache_lock */
@@ -1725,14 +1727,6 @@
if (++cache_num > MAX_CACHE_SIZE) {
struct object *i, *outcast = NULL;
list_for_each_entry(i, &cache, list) {
-@@ -85,6 +94,7 @@
- obj->popularity = 0;
- atomic_set(&obj->refcnt, 1); /* The cache holds a reference */
- spin_lock_init(&obj->lock);
-+ INIT_RCU_HEAD(&obj->rcu);
-
- spin_lock_irqsave(&cache_lock, flags);
- __cache_add(obj);
@@ -104,12 +114,11 @@
struct object *cache_find(int id)
{
@@ -1961,6 +1955,12 @@
</sect1>
</chapter>
+ <chapter id="apiref">
+ <title>Mutex API reference</title>
+!Iinclude/linux/mutex.h
+!Ekernel/mutex.c
+ </chapter>
+
<chapter id="references">
<title>Further reading</title>
diff --git a/Documentation/DocBook/tracepoint.tmpl b/Documentation/DocBook/tracepoint.tmpl
index e8473ea..b57a9ed 100644
--- a/Documentation/DocBook/tracepoint.tmpl
+++ b/Documentation/DocBook/tracepoint.tmpl
@@ -104,4 +104,9 @@
<title>Block IO</title>
!Iinclude/trace/events/block.h
</chapter>
+
+ <chapter id="workqueue">
+ <title>Workqueue</title>
+!Iinclude/trace/events/workqueue.h
+ </chapter>
</book>
diff --git a/Documentation/RCU/checklist.txt b/Documentation/RCU/checklist.txt
index 790d1a8..0c134f8 100644
--- a/Documentation/RCU/checklist.txt
+++ b/Documentation/RCU/checklist.txt
@@ -218,13 +218,22 @@
include:
a. Keeping a count of the number of data-structure elements
- used by the RCU-protected data structure, including those
- waiting for a grace period to elapse. Enforce a limit
- on this number, stalling updates as needed to allow
- previously deferred frees to complete.
+ used by the RCU-protected data structure, including
+ those waiting for a grace period to elapse. Enforce a
+ limit on this number, stalling updates as needed to allow
+ previously deferred frees to complete. Alternatively,
+ limit only the number awaiting deferred free rather than
+ the total number of elements.
- Alternatively, limit only the number awaiting deferred
- free rather than the total number of elements.
+ One way to stall the updates is to acquire the update-side
+ mutex. (Don't try this with a spinlock -- other CPUs
+ spinning on the lock could prevent the grace period
+ from ever ending.) Another way to stall the updates
+ is for the updates to use a wrapper function around
+ the memory allocator, so that this wrapper function
+ simulates OOM when there is too much memory awaiting an
+ RCU grace period. There are of course many other
+ variations on this theme.
b. Limiting update rate. For example, if updates occur only
once per hour, then no explicit rate limiting is required,
@@ -365,3 +374,26 @@
and the compiler to freely reorder code into and out of RCU
read-side critical sections. It is the responsibility of the
RCU update-side primitives to deal with this.
+
+17. Use CONFIG_PROVE_RCU, CONFIG_DEBUG_OBJECTS_RCU_HEAD, and
+ the __rcu sparse checks to validate your RCU code. These
+ can help find problems as follows:
+
+ CONFIG_PROVE_RCU: check that accesses to RCU-protected data
+ structures are carried out under the proper RCU
+ read-side critical section, while holding the right
+ combination of locks, or whatever other conditions
+ are appropriate.
+
+ CONFIG_DEBUG_OBJECTS_RCU_HEAD: check that you don't pass the
+ same object to call_rcu() (or friends) before an RCU
+ grace period has elapsed since the last time that you
+ passed that same object to call_rcu() (or friends).
+
+ __rcu sparse checks: tag the pointer to the RCU-protected data
+ structure with __rcu, and sparse will warn you if you
+ access that pointer without the services of one of the
+ variants of rcu_dereference().
+
+ These debugging aids can help you find problems that are
+ otherwise extremely difficult to spot.
diff --git a/Documentation/RCU/stallwarn.txt b/Documentation/RCU/stallwarn.txt
index 44c6dcc..862c08e 100644
--- a/Documentation/RCU/stallwarn.txt
+++ b/Documentation/RCU/stallwarn.txt
@@ -80,6 +80,24 @@
o For !CONFIG_PREEMPT kernels, a CPU looping anywhere in the kernel
without invoking schedule().
+o A CPU-bound real-time task in a CONFIG_PREEMPT kernel, which might
+ happen to preempt a low-priority task in the middle of an RCU
+ read-side critical section. This is especially damaging if
+ that low-priority task is not permitted to run on any other CPU,
+ in which case the next RCU grace period can never complete, which
+ will eventually cause the system to run out of memory and hang.
+ While the system is in the process of running itself out of
+ memory, you might see stall-warning messages.
+
+o A CPU-bound real-time task in a CONFIG_PREEMPT_RT kernel that
+ is running at a higher priority than the RCU softirq threads.
+ This will prevent RCU callbacks from ever being invoked,
+ and in a CONFIG_TREE_PREEMPT_RCU kernel will further prevent
+ RCU grace periods from ever completing. Either way, the
+ system will eventually run out of memory and hang. In the
+ CONFIG_TREE_PREEMPT_RCU case, you might see stall-warning
+ messages.
+
o A bug in the RCU implementation.
o A hardware failure. This is quite unlikely, but has occurred
diff --git a/Documentation/RCU/trace.txt b/Documentation/RCU/trace.txt
index efd8cc9..a851118 100644
--- a/Documentation/RCU/trace.txt
+++ b/Documentation/RCU/trace.txt
@@ -125,6 +125,17 @@
of RCU callbacks is ready to invoke, then the remainder will
be deferred.
+o "ci" is the number of RCU callbacks that have been invoked for
+ this CPU. Note that ci+ql is the number of callbacks that have
+ been registered in absence of CPU-hotplug activity.
+
+o "co" is the number of RCU callbacks that have been orphaned due to
+ this CPU going offline.
+
+o "ca" is the number of RCU callbacks that have been adopted due to
+ other CPUs going offline. Note that ci+co-ca+ql is the number of
+ RCU callbacks registered on this CPU.
+
There is also an rcu/rcudata.csv file with the same information in
comma-separated-variable spreadsheet format.
@@ -180,7 +191,7 @@
o "jfq" is the number of jiffies remaining for this grace period
before force_quiescent_state() is invoked to help push things
- along. Note that CPUs in dyntick-idle mode thoughout the grace
+ along. Note that CPUs in dyntick-idle mode throughout the grace
period will not report on their own, but rather must be check by
some other CPU via force_quiescent_state().
diff --git a/Documentation/block/cfq-iosched.txt b/Documentation/block/cfq-iosched.txt
new file mode 100644
index 0000000..e578fee
--- /dev/null
+++ b/Documentation/block/cfq-iosched.txt
@@ -0,0 +1,45 @@
+CFQ ioscheduler tunables
+========================
+
+slice_idle
+----------
+This specifies how long CFQ should idle for next request on certain cfq queues
+(for sequential workloads) and service trees (for random workloads) before
+queue is expired and CFQ selects next queue to dispatch from.
+
+By default slice_idle is a non-zero value. That means by default we idle on
+queues/service trees. This can be very helpful on highly seeky media like
+single spindle SATA/SAS disks where we can cut down on overall number of
+seeks and see improved throughput.
+
+Setting slice_idle to 0 will remove all the idling on queues/service tree
+level and one should see an overall improved throughput on faster storage
+devices like multiple SATA/SAS disks in hardware RAID configuration. The down
+side is that isolation provided from WRITES also goes down and notion of
+IO priority becomes weaker.
+
+So depending on storage and workload, it might be useful to set slice_idle=0.
+In general I think for SATA/SAS disks and software RAID of SATA/SAS disks
+keeping slice_idle enabled should be useful. For any configurations where
+there are multiple spindles behind single LUN (Host based hardware RAID
+controller or for storage arrays), setting slice_idle=0 might end up in better
+throughput and acceptable latencies.
+
+CFQ IOPS Mode for group scheduling
+===================================
+Basic CFQ design is to provide priority based time slices. Higher priority
+process gets bigger time slice and lower priority process gets smaller time
+slice. Measuring time becomes harder if storage is fast and supports NCQ and
+it would be better to dispatch multiple requests from multiple cfq queues in
+request queue at a time. In such scenario, it is not possible to measure time
+consumed by single queue accurately.
+
+What is possible though is to measure number of requests dispatched from a
+single queue and also allow dispatch from multiple cfq queue at the same time.
+This effectively becomes the fairness in terms of IOPS (IO operations per
+second).
+
+If one sets slice_idle=0 and if storage supports NCQ, CFQ internally switches
+to IOPS mode and starts providing fairness in terms of number of requests
+dispatched. Note that this mode switching takes effect only for group
+scheduling. For non-cgroup users nothing should change.
diff --git a/Documentation/cgroups/blkio-controller.txt b/Documentation/cgroups/blkio-controller.txt
index 48e0b21..6919d62 100644
--- a/Documentation/cgroups/blkio-controller.txt
+++ b/Documentation/cgroups/blkio-controller.txt
@@ -217,6 +217,7 @@
CFQ sysfs tunable
=================
/sys/block/<disk>/queue/iosched/group_isolation
+-----------------------------------------------
If group_isolation=1, it provides stronger isolation between groups at the
expense of throughput. By default group_isolation is 0. In general that
@@ -243,6 +244,33 @@
and one wants stronger isolation between groups, then set group_isolation=1
but this will come at cost of reduced throughput.
+/sys/block/<disk>/queue/iosched/slice_idle
+------------------------------------------
+On a faster hardware CFQ can be slow, especially with sequential workload.
+This happens because CFQ idles on a single queue and single queue might not
+drive deeper request queue depths to keep the storage busy. In such scenarios
+one can try setting slice_idle=0 and that would switch CFQ to IOPS
+(IO operations per second) mode on NCQ supporting hardware.
+
+That means CFQ will not idle between cfq queues of a cfq group and hence be
+able to driver higher queue depth and achieve better throughput. That also
+means that cfq provides fairness among groups in terms of IOPS and not in
+terms of disk time.
+
+/sys/block/<disk>/queue/iosched/group_idle
+------------------------------------------
+If one disables idling on individual cfq queues and cfq service trees by
+setting slice_idle=0, group_idle kicks in. That means CFQ will still idle
+on the group in an attempt to provide fairness among groups.
+
+By default group_idle is same as slice_idle and does not do anything if
+slice_idle is enabled.
+
+One can experience an overall throughput drop if you have created multiple
+groups and put applications in that group which are not driving enough
+IO to keep disk busy. In that case set group_idle=0, and CFQ will not idle
+on individual groups and throughput should improve.
+
What works
==========
- Currently only sync IO queues are support. All the buffered writes are
diff --git a/Documentation/cputopology.txt b/Documentation/cputopology.txt
index f1c5c4b..902d315 100644
--- a/Documentation/cputopology.txt
+++ b/Documentation/cputopology.txt
@@ -14,25 +14,39 @@
identifier (rather than the kernel's). The actual value is
architecture and platform dependent.
-3) /sys/devices/system/cpu/cpuX/topology/thread_siblings:
+3) /sys/devices/system/cpu/cpuX/topology/book_id:
+
+ the book ID of cpuX. Typically it is the hardware platform's
+ identifier (rather than the kernel's). The actual value is
+ architecture and platform dependent.
+
+4) /sys/devices/system/cpu/cpuX/topology/thread_siblings:
internel kernel map of cpuX's hardware threads within the same
core as cpuX
-4) /sys/devices/system/cpu/cpuX/topology/core_siblings:
+5) /sys/devices/system/cpu/cpuX/topology/core_siblings:
internal kernel map of cpuX's hardware threads within the same
physical_package_id.
+6) /sys/devices/system/cpu/cpuX/topology/book_siblings:
+
+ internal kernel map of cpuX's hardware threads within the same
+ book_id.
+
To implement it in an architecture-neutral way, a new source file,
-drivers/base/topology.c, is to export the 4 attributes.
+drivers/base/topology.c, is to export the 4 or 6 attributes. The two book
+related sysfs files will only be created if CONFIG_SCHED_BOOK is selected.
For an architecture to support this feature, it must define some of
these macros in include/asm-XXX/topology.h:
#define topology_physical_package_id(cpu)
#define topology_core_id(cpu)
+#define topology_book_id(cpu)
#define topology_thread_cpumask(cpu)
#define topology_core_cpumask(cpu)
+#define topology_book_cpumask(cpu)
The type of **_id is int.
The type of siblings is (const) struct cpumask *.
@@ -45,6 +59,9 @@
3) thread_siblings: just the given CPU
4) core_siblings: just the given CPU
+For architectures that don't support books (CONFIG_SCHED_BOOK) there are no
+default definitions for topology_book_id() and topology_book_cpumask().
+
Additionally, CPU topology information is provided under
/sys/devices/system/cpu and includes these files. The internal
source for the output is in brackets ("[]").
diff --git a/Documentation/gpio.txt b/Documentation/gpio.txt
index d96a6db..9633da0 100644
--- a/Documentation/gpio.txt
+++ b/Documentation/gpio.txt
@@ -109,17 +109,19 @@
If you want to initialize a structure with an invalid GPIO number, use
some negative number (perhaps "-EINVAL"); that will never be valid. To
-test if a number could reference a GPIO, you may use this predicate:
+test if such number from such a structure could reference a GPIO, you
+may use this predicate:
int gpio_is_valid(int number);
A number that's not valid will be rejected by calls which may request
or free GPIOs (see below). Other numbers may also be rejected; for
-example, a number might be valid but unused on a given board.
+example, a number might be valid but temporarily unused on a given board.
-Whether a platform supports multiple GPIO controllers is currently a
-platform-specific implementation issue.
-
+Whether a platform supports multiple GPIO controllers is a platform-specific
+implementation issue, as are whether that support can leave "holes" in the space
+of GPIO numbers, and whether new controllers can be added at runtime. Such issues
+can affect things including whether adjacent GPIO numbers are both valid.
Using GPIOs
-----------
@@ -480,12 +482,16 @@
ARCH_REQUIRE_GPIOLIB or ARCH_WANT_OPTIONAL_GPIOLIB
and arrange that its <asm/gpio.h> includes <asm-generic/gpio.h> and defines
three functions: gpio_get_value(), gpio_set_value(), and gpio_cansleep().
-They may also want to provide a custom value for ARCH_NR_GPIOS.
-ARCH_REQUIRE_GPIOLIB means that the gpio-lib code will always get compiled
+It may also provide a custom value for ARCH_NR_GPIOS, so that it better
+reflects the number of GPIOs in actual use on that platform, without
+wasting static table space. (It should count both built-in/SoC GPIOs and
+also ones on GPIO expanders.
+
+ARCH_REQUIRE_GPIOLIB means that the gpiolib code will always get compiled
into the kernel on that architecture.
-ARCH_WANT_OPTIONAL_GPIOLIB means the gpio-lib code defaults to off and the user
+ARCH_WANT_OPTIONAL_GPIOLIB means the gpiolib code defaults to off and the user
can enable it and build it into the kernel optionally.
If neither of these options are selected, the platform does not support
diff --git a/Documentation/hwmon/sysfs-interface b/Documentation/hwmon/sysfs-interface
index ff45d1f..48ceabe 100644
--- a/Documentation/hwmon/sysfs-interface
+++ b/Documentation/hwmon/sysfs-interface
@@ -91,12 +91,11 @@
I2C devices get this attribute created automatically.
RO
-update_rate The rate at which the chip will update readings.
+update_interval The interval at which the chip will update readings.
Unit: millisecond
RW
- Some devices have a variable update rate. This attribute
- can be used to change the update rate to the desired
- frequency.
+ Some devices have a variable update rate or interval.
+ This attribute can be used to change it to the desired value.
************
diff --git a/Documentation/kernel-doc-nano-HOWTO.txt b/Documentation/kernel-doc-nano-HOWTO.txt
index 27a52b3..3d8a977 100644
--- a/Documentation/kernel-doc-nano-HOWTO.txt
+++ b/Documentation/kernel-doc-nano-HOWTO.txt
@@ -345,5 +345,10 @@
section titled <section title> from <filename>.
Spaces are allowed in <section title>; do not quote the <section title>.
+!C<filename> is replaced by nothing, but makes the tools check that
+all DOC: sections and documented functions, symbols, etc. are used.
+This makes sense to use when you use !F/!P only and want to verify
+that all documentation is included.
+
Tim.
*/ <twaugh@redhat.com>
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 2c85c06..a2ffd6b 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1974,15 +1974,18 @@
force Enable ASPM even on devices that claim not to support it.
WARNING: Forcing ASPM on may cause system lockups.
+ pcie_ports= [PCIE] PCIe ports handling:
+ auto Ask the BIOS whether or not to use native PCIe services
+ associated with PCIe ports (PME, hot-plug, AER). Use
+ them only if that is allowed by the BIOS.
+ native Use native PCIe services associated with PCIe ports
+ unconditionally.
+ compat Treat PCIe ports as PCI-to-PCI bridges, disable the PCIe
+ ports driver.
+
pcie_pme= [PCIE,PM] Native PCIe PME signaling options:
- Format: {auto|force}[,nomsi]
- auto Use native PCIe PME signaling if the BIOS allows the
- kernel to control PCIe config registers of root ports.
- force Use native PCIe PME signaling even if the BIOS refuses
- to allow the kernel to control the relevant PCIe config
- registers.
nomsi Do not use MSI for native PCIe PME signaling (this makes
- all PCIe root ports use INTx for everything).
+ all PCIe root ports use INTx for all services).
pcmv= [HW,PCMCIA] BadgePAD 4
@@ -2150,6 +2153,11 @@
Reserves a hole at the top of the kernel virtual
address space.
+ reservelow= [X86]
+ Format: nn[K]
+ Set the amount of memory to reserve for BIOS at
+ the bottom of the address space.
+
reset_devices [KNL] Force drivers to reset the underlying device
during initialization.
@@ -2432,6 +2440,10 @@
disables clocksource verification at runtime.
Used to enable high-resolution timer mode on older
hardware, and in virtualized environment.
+ [x86] noirqtime: Do not use TSC to do irq accounting.
+ Used to run time disable IRQ_TIME_ACCOUNTING on any
+ platforms where RDTSC is slow and this accounting
+ can add overhead.
turbografx.map[2|3]= [HW,JOY]
TurboGraFX parallel port interface
@@ -2629,8 +2641,10 @@
aux-ide-disks -- unplug non-primary-master IDE devices
nics -- unplug network devices
all -- unplug all emulated devices (NICs and IDE disks)
- ignore -- continue loading the Xen platform PCI driver even
- if the version check failed
+ unnecessary -- unplugging emulated devices is
+ unnecessary even if the host did not respond to
+ the unplug protocol
+ never -- do not unplug even if version check succeeds
xirc2ps_cs= [NET,PCMCIA]
Format:
diff --git a/Documentation/kprobes.txt b/Documentation/kprobes.txt
index 1762b81..741fe66 100644
--- a/Documentation/kprobes.txt
+++ b/Documentation/kprobes.txt
@@ -542,9 +542,11 @@
registration and unregistration.
Probe handlers are run with preemption disabled. Depending on the
-architecture, handlers may also run with interrupts disabled. In any
-case, your handler should not yield the CPU (e.g., by attempting to
-acquire a semaphore).
+architecture and optimization state, handlers may also run with
+interrupts disabled (e.g., kretprobe handlers and optimized kprobe
+handlers run without interrupt disabled on x86/x86-64). In any case,
+your handler should not yield the CPU (e.g., by attempting to acquire
+a semaphore).
Since a return probe is implemented by replacing the return
address with the trampoline's address, stack backtraces and calls
diff --git a/Documentation/lguest/Makefile b/Documentation/lguest/Makefile
index 28c8cdf..bebac6b 100644
--- a/Documentation/lguest/Makefile
+++ b/Documentation/lguest/Makefile
@@ -1,5 +1,6 @@
# This creates the demonstration utility "lguest" which runs a Linux guest.
-CFLAGS:=-m32 -Wall -Wmissing-declarations -Wmissing-prototypes -O3 -I../../include -I../../arch/x86/include -U_FORTIFY_SOURCE
+# Missing headers? Add "-I../../include -I../../arch/x86/include"
+CFLAGS:=-m32 -Wall -Wmissing-declarations -Wmissing-prototypes -O3 -U_FORTIFY_SOURCE
all: lguest
diff --git a/Documentation/lguest/lguest.c b/Documentation/lguest/lguest.c
index e9ce3c5..8a6a8c6 100644
--- a/Documentation/lguest/lguest.c
+++ b/Documentation/lguest/lguest.c
@@ -39,14 +39,14 @@
#include <limits.h>
#include <stddef.h>
#include <signal.h>
-#include "linux/lguest_launcher.h"
-#include "linux/virtio_config.h"
-#include "linux/virtio_net.h"
-#include "linux/virtio_blk.h"
-#include "linux/virtio_console.h"
-#include "linux/virtio_rng.h"
-#include "linux/virtio_ring.h"
-#include "asm/bootparam.h"
+#include <linux/virtio_config.h>
+#include <linux/virtio_net.h>
+#include <linux/virtio_blk.h>
+#include <linux/virtio_console.h>
+#include <linux/virtio_rng.h>
+#include <linux/virtio_ring.h>
+#include <asm/bootparam.h>
+#include "../../include/linux/lguest_launcher.h"
/*L:110
* We can ignore the 42 include files we need for this program, but I do want
* to draw attention to the use of kernel-style types.
@@ -1447,14 +1447,15 @@
static void configure_device(int fd, const char *tapif, u32 ipaddr)
{
struct ifreq ifr;
- struct sockaddr_in *sin = (struct sockaddr_in *)&ifr.ifr_addr;
+ struct sockaddr_in sin;
memset(&ifr, 0, sizeof(ifr));
strcpy(ifr.ifr_name, tapif);
/* Don't read these incantations. Just cut & paste them like I did! */
- sin->sin_family = AF_INET;
- sin->sin_addr.s_addr = htonl(ipaddr);
+ sin.sin_family = AF_INET;
+ sin.sin_addr.s_addr = htonl(ipaddr);
+ memcpy(&ifr.ifr_addr, &sin, sizeof(sin));
if (ioctl(fd, SIOCSIFADDR, &ifr) != 0)
err(1, "Setting %s interface address", tapif);
ifr.ifr_flags = IFF_UP;
diff --git a/Documentation/mutex-design.txt b/Documentation/mutex-design.txt
index c91ccc0..38c10fd 100644
--- a/Documentation/mutex-design.txt
+++ b/Documentation/mutex-design.txt
@@ -9,7 +9,7 @@
mutex semantics are sufficient for your code, then there are a couple
of advantages of mutexes:
- - 'struct mutex' is smaller on most architectures: .e.g on x86,
+ - 'struct mutex' is smaller on most architectures: E.g. on x86,
'struct semaphore' is 20 bytes, 'struct mutex' is 16 bytes.
A smaller structure size means less RAM footprint, and better
CPU-cache utilization.
@@ -136,3 +136,4 @@
void mutex_lock_nested(struct mutex *lock, unsigned int subclass);
int mutex_lock_interruptible_nested(struct mutex *lock,
unsigned int subclass);
+ int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
diff --git a/Documentation/networking/e1000.txt b/Documentation/networking/e1000.txt
index 2df7186..d9271e7 100644
--- a/Documentation/networking/e1000.txt
+++ b/Documentation/networking/e1000.txt
@@ -1,82 +1,35 @@
Linux* Base Driver for the Intel(R) PRO/1000 Family of Adapters
===============================================================
-September 26, 2006
-
+Intel Gigabit Linux driver.
+Copyright(c) 1999 - 2010 Intel Corporation.
Contents
========
-- In This Release
- Identifying Your Adapter
-- Building and Installation
- Command Line Parameters
- Speed and Duplex Configuration
- Additional Configurations
-- Known Issues
- Support
-
-In This Release
-===============
-
-This file describes the Linux* Base Driver for the Intel(R) PRO/1000 Family
-of Adapters. This driver includes support for Itanium(R)2-based systems.
-
-For questions related to hardware requirements, refer to the documentation
-supplied with your Intel PRO/1000 adapter. All hardware requirements listed
-apply to use with Linux.
-
-The following features are now available in supported kernels:
- - Native VLANs
- - Channel Bonding (teaming)
- - SNMP
-
-Channel Bonding documentation can be found in the Linux kernel source:
-/Documentation/networking/bonding.txt
-
-The driver information previously displayed in the /proc filesystem is not
-supported in this release. Alternatively, you can use ethtool (version 1.6
-or later), lspci, and ifconfig to obtain the same information.
-
-Instructions on updating ethtool can be found in the section "Additional
-Configurations" later in this document.
-
-NOTE: The Intel(R) 82562v 10/100 Network Connection only provides 10/100
-support.
-
-
Identifying Your Adapter
========================
For more information on how to identify your adapter, go to the Adapter &
Driver ID Guide at:
- http://support.intel.com/support/network/adapter/pro100/21397.htm
+ http://support.intel.com/support/go/network/adapter/idguide.htm
For the latest Intel network drivers for Linux, refer to the following
website. In the search field, enter your adapter name or type, or use the
networking link on the left to search for your adapter:
- http://downloadfinder.intel.com/scripts-df/support_intel.asp
-
+ http://support.intel.com/support/go/network/adapter/home.htm
Command Line Parameters
=======================
-If the driver is built as a module, the following optional parameters
-are used by entering them on the command line with the modprobe command
-using this syntax:
-
- modprobe e1000 [<option>=<VAL1>,<VAL2>,...]
-
-For example, with two PRO/1000 PCI adapters, entering:
-
- modprobe e1000 TxDescriptors=80,128
-
-loads the e1000 driver with 80 TX descriptors for the first adapter and
-128 TX descriptors for the second adapter.
-
The default value for each parameter is generally the recommended setting,
unless otherwise noted.
@@ -89,10 +42,6 @@
parameters, see the application note at:
http://www.intel.com/design/network/applnots/ap450.htm
- A descriptor describes a data buffer and attributes related to
- the data buffer. This information is accessed by the hardware.
-
-
AutoNeg
-------
(Supported only on adapters with copper connections)
@@ -106,7 +55,6 @@
NOTE: Refer to the Speed and Duplex section of this readme for more
information on the AutoNeg parameter.
-
Duplex
------
(Supported only on adapters with copper connections)
@@ -119,7 +67,6 @@
link partner is forced (either full or half), Duplex defaults to half-
duplex.
-
FlowControl
-----------
Valid Range: 0-3 (0=none, 1=Rx only, 2=Tx only, 3=Rx&Tx)
@@ -128,16 +75,16 @@
This parameter controls the automatic generation(Tx) and response(Rx)
to Ethernet PAUSE frames.
-
InterruptThrottleRate
---------------------
(not supported on Intel(R) 82542, 82543 or 82544-based adapters)
-Valid Range: 0,1,3,100-100000 (0=off, 1=dynamic, 3=dynamic conservative)
+Valid Range: 0,1,3,4,100-100000 (0=off, 1=dynamic, 3=dynamic conservative,
+ 4=simplified balancing)
Default Value: 3
The driver can limit the amount of interrupts per second that the adapter
-will generate for incoming packets. It does this by writing a value to the
-adapter that is based on the maximum amount of interrupts that the adapter
+will generate for incoming packets. It does this by writing a value to the
+adapter that is based on the maximum amount of interrupts that the adapter
will generate per second.
Setting InterruptThrottleRate to a value greater or equal to 100
@@ -146,37 +93,43 @@
load on the system and can lower CPU utilization under heavy load,
but will increase latency as packets are not processed as quickly.
-The default behaviour of the driver previously assumed a static
-InterruptThrottleRate value of 8000, providing a good fallback value for
-all traffic types,but lacking in small packet performance and latency.
-The hardware can handle many more small packets per second however, and
+The default behaviour of the driver previously assumed a static
+InterruptThrottleRate value of 8000, providing a good fallback value for
+all traffic types,but lacking in small packet performance and latency.
+The hardware can handle many more small packets per second however, and
for this reason an adaptive interrupt moderation algorithm was implemented.
Since 7.3.x, the driver has two adaptive modes (setting 1 or 3) in which
-it dynamically adjusts the InterruptThrottleRate value based on the traffic
+it dynamically adjusts the InterruptThrottleRate value based on the traffic
that it receives. After determining the type of incoming traffic in the last
-timeframe, it will adjust the InterruptThrottleRate to an appropriate value
+timeframe, it will adjust the InterruptThrottleRate to an appropriate value
for that traffic.
The algorithm classifies the incoming traffic every interval into
-classes. Once the class is determined, the InterruptThrottleRate value is
-adjusted to suit that traffic type the best. There are three classes defined:
+classes. Once the class is determined, the InterruptThrottleRate value is
+adjusted to suit that traffic type the best. There are three classes defined:
"Bulk traffic", for large amounts of packets of normal size; "Low latency",
for small amounts of traffic and/or a significant percentage of small
-packets; and "Lowest latency", for almost completely small packets or
+packets; and "Lowest latency", for almost completely small packets or
minimal traffic.
-In dynamic conservative mode, the InterruptThrottleRate value is set to 4000
-for traffic that falls in class "Bulk traffic". If traffic falls in the "Low
-latency" or "Lowest latency" class, the InterruptThrottleRate is increased
+In dynamic conservative mode, the InterruptThrottleRate value is set to 4000
+for traffic that falls in class "Bulk traffic". If traffic falls in the "Low
+latency" or "Lowest latency" class, the InterruptThrottleRate is increased
stepwise to 20000. This default mode is suitable for most applications.
For situations where low latency is vital such as cluster or
grid computing, the algorithm can reduce latency even more when
InterruptThrottleRate is set to mode 1. In this mode, which operates
-the same as mode 3, the InterruptThrottleRate will be increased stepwise to
+the same as mode 3, the InterruptThrottleRate will be increased stepwise to
70000 for traffic in class "Lowest latency".
+In simplified mode the interrupt rate is based on the ratio of Tx and
+Rx traffic. If the bytes per second rate is approximately equal, the
+interrupt rate will drop as low as 2000 interrupts per second. If the
+traffic is mostly transmit or mostly receive, the interrupt rate could
+be as high as 8000.
+
Setting InterruptThrottleRate to 0 turns off any interrupt moderation
and may improve small packet latency, but is generally not suitable
for bulk throughput traffic.
@@ -212,8 +165,6 @@
be platform-specific. If CPU utilization is not a concern, use
RX_POLLING (NAPI) and default driver settings.
-
-
RxDescriptors
-------------
Valid Range: 80-256 for 82542 and 82543-based adapters
@@ -225,15 +176,14 @@
incoming packets, at the expense of increased system memory utilization.
Each descriptor is 16 bytes. A receive buffer is also allocated for each
-descriptor and can be either 2048, 4096, 8192, or 16384 bytes, depending
+descriptor and can be either 2048, 4096, 8192, or 16384 bytes, depending
on the MTU setting. The maximum MTU size is 16110.
-NOTE: MTU designates the frame size. It only needs to be set for Jumbo
- Frames. Depending on the available system resources, the request
- for a higher number of receive descriptors may be denied. In this
+NOTE: MTU designates the frame size. It only needs to be set for Jumbo
+ Frames. Depending on the available system resources, the request
+ for a higher number of receive descriptors may be denied. In this
case, use a lower number.
-
RxIntDelay
----------
Valid Range: 0-65535 (0=off)
@@ -254,7 +204,6 @@
restoring the network connection. To eliminate the potential
for the hang ensure that RxIntDelay is set to 0.
-
RxAbsIntDelay
-------------
(This parameter is supported only on 82540, 82545 and later adapters.)
@@ -268,7 +217,6 @@
along with RxIntDelay, may improve traffic throughput in specific network
conditions.
-
Speed
-----
(This parameter is supported only on adapters with copper connections.)
@@ -280,7 +228,6 @@
partner is set to auto-negotiate, the board will auto-detect the correct
speed. Duplex should also be set when Speed is set to either 10 or 100.
-
TxDescriptors
-------------
Valid Range: 80-256 for 82542 and 82543-based adapters
@@ -295,6 +242,36 @@
higher number of transmit descriptors may be denied. In this case,
use a lower number.
+TxDescriptorStep
+----------------
+Valid Range: 1 (use every Tx Descriptor)
+ 4 (use every 4th Tx Descriptor)
+
+Default Value: 1 (use every Tx Descriptor)
+
+On certain non-Intel architectures, it has been observed that intense TX
+traffic bursts of short packets may result in an improper descriptor
+writeback. If this occurs, the driver will report a "TX Timeout" and reset
+the adapter, after which the transmit flow will restart, though data may
+have stalled for as much as 10 seconds before it resumes.
+
+The improper writeback does not occur on the first descriptor in a system
+memory cache-line, which is typically 32 bytes, or 4 descriptors long.
+
+Setting TxDescriptorStep to a value of 4 will ensure that all TX descriptors
+are aligned to the start of a system memory cache line, and so this problem
+will not occur.
+
+NOTES: Setting TxDescriptorStep to 4 effectively reduces the number of
+ TxDescriptors available for transmits to 1/4 of the normal allocation.
+ This has a possible negative performance impact, which may be
+ compensated for by allocating more descriptors using the TxDescriptors
+ module parameter.
+
+ There are other conditions which may result in "TX Timeout", which will
+ not be resolved by the use of the TxDescriptorStep parameter. As the
+ issue addressed by this parameter has never been observed on Intel
+ Architecture platforms, it should not be used on Intel platforms.
TxIntDelay
----------
@@ -307,7 +284,6 @@
system is reporting dropped transmits, this value may be set too high
causing the driver to run out of available transmit descriptors.
-
TxAbsIntDelay
-------------
(This parameter is supported only on 82540, 82545 and later adapters.)
@@ -330,6 +306,35 @@
A value of '1' indicates that the driver should enable IP checksum
offload for received packets (both UDP and TCP) to the adapter hardware.
+Copybreak
+---------
+Valid Range: 0-xxxxxxx (0=off)
+Default Value: 256
+Usage: insmod e1000.ko copybreak=128
+
+Driver copies all packets below or equaling this size to a fresh Rx
+buffer before handing it up the stack.
+
+This parameter is different than other parameters, in that it is a
+single (not 1,1,1 etc.) parameter applied to all driver instances and
+it is also available during runtime at
+/sys/module/e1000/parameters/copybreak
+
+SmartPowerDownEnable
+--------------------
+Valid Range: 0-1
+Default Value: 0 (disabled)
+
+Allows PHY to turn off in lower power states. The user can turn off
+this parameter in supported chipsets.
+
+KumeranLockLoss
+---------------
+Valid Range: 0-1
+Default Value: 1 (enabled)
+
+This workaround skips resetting the PHY at shutdown for the initial
+silicon releases of ICH8 systems.
Speed and Duplex Configuration
==============================
@@ -385,40 +390,9 @@
parameter should not be used. Instead, use the Speed and Duplex parameters
previously mentioned to force the adapter to the same speed and duplex.
-
Additional Configurations
=========================
- Configuring the Driver on Different Distributions
- -------------------------------------------------
- Configuring a network driver to load properly when the system is started
- is distribution dependent. Typically, the configuration process involves
- adding an alias line to /etc/modules.conf or /etc/modprobe.conf as well
- as editing other system startup scripts and/or configuration files. Many
- popular Linux distributions ship with tools to make these changes for you.
- To learn the proper way to configure a network device for your system,
- refer to your distribution documentation. If during this process you are
- asked for the driver or module name, the name for the Linux Base Driver
- for the Intel(R) PRO/1000 Family of Adapters is e1000.
-
- As an example, if you install the e1000 driver for two PRO/1000 adapters
- (eth0 and eth1) and set the speed and duplex to 10full and 100half, add
- the following to modules.conf or or modprobe.conf:
-
- alias eth0 e1000
- alias eth1 e1000
- options e1000 Speed=10,100 Duplex=2,1
-
- Viewing Link Messages
- ---------------------
- Link messages will not be displayed to the console if the distribution is
- restricting system messages. In order to see network driver link messages
- on your console, set dmesg to eight by entering the following:
-
- dmesg -n 8
-
- NOTE: This setting is not saved across reboots.
-
Jumbo Frames
------------
Jumbo Frames support is enabled by changing the MTU to a value larger than
@@ -437,9 +411,11 @@
setting in a different location.
Notes:
-
- - To enable Jumbo Frames, increase the MTU size on the interface beyond
- 1500.
+ Degradation in throughput performance may be observed in some Jumbo frames
+ environments. If this is observed, increasing the application's socket buffer
+ size and/or increasing the /proc/sys/net/ipv4/tcp_*mem entry values may help.
+ See the specific application manual and /usr/src/linux*/Documentation/
+ networking/ip-sysctl.txt for more details.
- The maximum MTU setting for Jumbo Frames is 16110. This value coincides
with the maximum Jumbo Frames size of 16128.
@@ -447,40 +423,11 @@
- Using Jumbo Frames at 10 or 100 Mbps may result in poor performance or
loss of link.
- - Some Intel gigabit adapters that support Jumbo Frames have a frame size
- limit of 9238 bytes, with a corresponding MTU size limit of 9216 bytes.
- The adapters with this limitation are based on the Intel(R) 82571EB,
- 82572EI, 82573L and 80003ES2LAN controller. These correspond to the
- following product names:
- Intel(R) PRO/1000 PT Server Adapter
- Intel(R) PRO/1000 PT Desktop Adapter
- Intel(R) PRO/1000 PT Network Connection
- Intel(R) PRO/1000 PT Dual Port Server Adapter
- Intel(R) PRO/1000 PT Dual Port Network Connection
- Intel(R) PRO/1000 PF Server Adapter
- Intel(R) PRO/1000 PF Network Connection
- Intel(R) PRO/1000 PF Dual Port Server Adapter
- Intel(R) PRO/1000 PB Server Connection
- Intel(R) PRO/1000 PL Network Connection
- Intel(R) PRO/1000 EB Network Connection with I/O Acceleration
- Intel(R) PRO/1000 EB Backplane Connection with I/O Acceleration
- Intel(R) PRO/1000 PT Quad Port Server Adapter
-
- Adapters based on the Intel(R) 82542 and 82573V/E controller do not
support Jumbo Frames. These correspond to the following product names:
Intel(R) PRO/1000 Gigabit Server Adapter
Intel(R) PRO/1000 PM Network Connection
- - The following adapters do not support Jumbo Frames:
- Intel(R) 82562V 10/100 Network Connection
- Intel(R) 82566DM Gigabit Network Connection
- Intel(R) 82566DC Gigabit Network Connection
- Intel(R) 82566MM Gigabit Network Connection
- Intel(R) 82566MC Gigabit Network Connection
- Intel(R) 82562GT 10/100 Network Connection
- Intel(R) 82562G 10/100 Network Connection
-
-
Ethtool
-------
The driver utilizes the ethtool interface for driver configuration and
@@ -490,142 +437,14 @@
The latest release of ethtool can be found from
http://sourceforge.net/projects/gkernel.
- NOTE: Ethtool 1.6 only supports a limited set of ethtool options. Support
- for a more complete ethtool feature set can be enabled by upgrading
- ethtool to ethtool-1.8.1.
-
Enabling Wake on LAN* (WoL)
---------------------------
- WoL is configured through the Ethtool* utility. Ethtool is included with
- all versions of Red Hat after Red Hat 7.2. For other Linux distributions,
- download and install Ethtool from the following website:
- http://sourceforge.net/projects/gkernel.
-
- For instructions on enabling WoL with Ethtool, refer to the website listed
- above.
+ WoL is configured through the Ethtool* utility.
WoL will be enabled on the system during the next shut down or reboot.
For this driver version, in order to enable WoL, the e1000 driver must be
loaded when shutting down or rebooting the system.
- Wake On LAN is only supported on port A for the following devices:
- Intel(R) PRO/1000 PT Dual Port Network Connection
- Intel(R) PRO/1000 PT Dual Port Server Connection
- Intel(R) PRO/1000 PT Dual Port Server Adapter
- Intel(R) PRO/1000 PF Dual Port Server Adapter
- Intel(R) PRO/1000 PT Quad Port Server Adapter
-
- NAPI
- ----
- NAPI (Rx polling mode) is enabled in the e1000 driver.
-
- See www.cyberus.ca/~hadi/usenix-paper.tgz for more information on NAPI.
-
-
-Known Issues
-============
-
-Dropped Receive Packets on Half-duplex 10/100 Networks
-------------------------------------------------------
-If you have an Intel PCI Express adapter running at 10mbps or 100mbps, half-
-duplex, you may observe occasional dropped receive packets. There are no
-workarounds for this problem in this network configuration. The network must
-be updated to operate in full-duplex, and/or 1000mbps only.
-
-Jumbo Frames System Requirement
--------------------------------
-Memory allocation failures have been observed on Linux systems with 64 MB
-of RAM or less that are running Jumbo Frames. If you are using Jumbo
-Frames, your system may require more than the advertised minimum
-requirement of 64 MB of system memory.
-
-Performance Degradation with Jumbo Frames
------------------------------------------
-Degradation in throughput performance may be observed in some Jumbo frames
-environments. If this is observed, increasing the application's socket
-buffer size and/or increasing the /proc/sys/net/ipv4/tcp_*mem entry values
-may help. See the specific application manual and
-/usr/src/linux*/Documentation/
-networking/ip-sysctl.txt for more details.
-
-Jumbo Frames on Foundry BigIron 8000 switch
--------------------------------------------
-There is a known issue using Jumbo frames when connected to a Foundry
-BigIron 8000 switch. This is a 3rd party limitation. If you experience
-loss of packets, lower the MTU size.
-
-Allocating Rx Buffers when Using Jumbo Frames
----------------------------------------------
-Allocating Rx buffers when using Jumbo Frames on 2.6.x kernels may fail if
-the available memory is heavily fragmented. This issue may be seen with PCI-X
-adapters or with packet split disabled. This can be reduced or eliminated
-by changing the amount of available memory for receive buffer allocation, by
-increasing /proc/sys/vm/min_free_kbytes.
-
-Multiple Interfaces on Same Ethernet Broadcast Network
-------------------------------------------------------
-Due to the default ARP behavior on Linux, it is not possible to have
-one system on two IP networks in the same Ethernet broadcast domain
-(non-partitioned switch) behave as expected. All Ethernet interfaces
-will respond to IP traffic for any IP address assigned to the system.
-This results in unbalanced receive traffic.
-
-If you have multiple interfaces in a server, either turn on ARP
-filtering by entering:
-
- echo 1 > /proc/sys/net/ipv4/conf/all/arp_filter
-(this only works if your kernel's version is higher than 2.4.5),
-
-NOTE: This setting is not saved across reboots. The configuration
-change can be made permanent by adding the line:
- net.ipv4.conf.all.arp_filter = 1
-to the file /etc/sysctl.conf
-
- or,
-
-install the interfaces in separate broadcast domains (either in
-different switches or in a switch partitioned to VLANs).
-
-82541/82547 can't link or are slow to link with some link partners
------------------------------------------------------------------
-There is a known compatibility issue with 82541/82547 and some
-low-end switches where the link will not be established, or will
-be slow to establish. In particular, these switches are known to
-be incompatible with 82541/82547:
-
- Planex FXG-08TE
- I-O Data ETG-SH8
-
-To workaround this issue, the driver can be compiled with an override
-of the PHY's master/slave setting. Forcing master or forcing slave
-mode will improve time-to-link.
-
- # make CFLAGS_EXTRA=-DE1000_MASTER_SLAVE=<n>
-
-Where <n> is:
-
- 0 = Hardware default
- 1 = Master mode
- 2 = Slave mode
- 3 = Auto master/slave
-
-Disable rx flow control with ethtool
-------------------------------------
-In order to disable receive flow control using ethtool, you must turn
-off auto-negotiation on the same command line.
-
-For example:
-
- ethtool -A eth? autoneg off rx off
-
-Unplugging network cable while ethtool -p is running
-----------------------------------------------------
-In kernel versions 2.5.50 and later (including 2.6 kernel), unplugging
-the network cable while ethtool -p is running will cause the system to
-become unresponsive to keyboard commands, except for control-alt-delete.
-Restarting the system appears to be the only remedy.
-
-
Support
=======
diff --git a/Documentation/networking/e1000e.txt b/Documentation/networking/e1000e.txt
new file mode 100644
index 0000000..6aa048b
--- /dev/null
+++ b/Documentation/networking/e1000e.txt
@@ -0,0 +1,302 @@
+Linux* Driver for Intel(R) Network Connection
+===============================================================
+
+Intel Gigabit Linux driver.
+Copyright(c) 1999 - 2010 Intel Corporation.
+
+Contents
+========
+
+- Identifying Your Adapter
+- Command Line Parameters
+- Additional Configurations
+- Support
+
+Identifying Your Adapter
+========================
+
+The e1000e driver supports all PCI Express Intel(R) Gigabit Network
+Connections, except those that are 82575, 82576 and 82580-based*.
+
+* NOTE: The Intel(R) PRO/1000 P Dual Port Server Adapter is supported by
+ the e1000 driver, not the e1000e driver due to the 82546 part being used
+ behind a PCI Express bridge.
+
+For more information on how to identify your adapter, go to the Adapter &
+Driver ID Guide at:
+
+ http://support.intel.com/support/go/network/adapter/idguide.htm
+
+For the latest Intel network drivers for Linux, refer to the following
+website. In the search field, enter your adapter name or type, or use the
+networking link on the left to search for your adapter:
+
+ http://support.intel.com/support/go/network/adapter/home.htm
+
+Command Line Parameters
+=======================
+
+The default value for each parameter is generally the recommended setting,
+unless otherwise noted.
+
+NOTES: For more information about the InterruptThrottleRate,
+ RxIntDelay, TxIntDelay, RxAbsIntDelay, and TxAbsIntDelay
+ parameters, see the application note at:
+ http://www.intel.com/design/network/applnots/ap450.htm
+
+InterruptThrottleRate
+---------------------
+Valid Range: 0,1,3,4,100-100000 (0=off, 1=dynamic, 3=dynamic conservative,
+ 4=simplified balancing)
+Default Value: 3
+
+The driver can limit the amount of interrupts per second that the adapter
+will generate for incoming packets. It does this by writing a value to the
+adapter that is based on the maximum amount of interrupts that the adapter
+will generate per second.
+
+Setting InterruptThrottleRate to a value greater or equal to 100
+will program the adapter to send out a maximum of that many interrupts
+per second, even if more packets have come in. This reduces interrupt
+load on the system and can lower CPU utilization under heavy load,
+but will increase latency as packets are not processed as quickly.
+
+The driver has two adaptive modes (setting 1 or 3) in which
+it dynamically adjusts the InterruptThrottleRate value based on the traffic
+that it receives. After determining the type of incoming traffic in the last
+timeframe, it will adjust the InterruptThrottleRate to an appropriate value
+for that traffic.
+
+The algorithm classifies the incoming traffic every interval into
+classes. Once the class is determined, the InterruptThrottleRate value is
+adjusted to suit that traffic type the best. There are three classes defined:
+"Bulk traffic", for large amounts of packets of normal size; "Low latency",
+for small amounts of traffic and/or a significant percentage of small
+packets; and "Lowest latency", for almost completely small packets or
+minimal traffic.
+
+In dynamic conservative mode, the InterruptThrottleRate value is set to 4000
+for traffic that falls in class "Bulk traffic". If traffic falls in the "Low
+latency" or "Lowest latency" class, the InterruptThrottleRate is increased
+stepwise to 20000. This default mode is suitable for most applications.
+
+For situations where low latency is vital such as cluster or
+grid computing, the algorithm can reduce latency even more when
+InterruptThrottleRate is set to mode 1. In this mode, which operates
+the same as mode 3, the InterruptThrottleRate will be increased stepwise to
+70000 for traffic in class "Lowest latency".
+
+In simplified mode the interrupt rate is based on the ratio of Tx and
+Rx traffic. If the bytes per second rate is approximately equal the
+interrupt rate will drop as low as 2000 interrupts per second. If the
+traffic is mostly transmit or mostly receive, the interrupt rate could
+be as high as 8000.
+
+Setting InterruptThrottleRate to 0 turns off any interrupt moderation
+and may improve small packet latency, but is generally not suitable
+for bulk throughput traffic.
+
+NOTE: InterruptThrottleRate takes precedence over the TxAbsIntDelay and
+ RxAbsIntDelay parameters. In other words, minimizing the receive
+ and/or transmit absolute delays does not force the controller to
+ generate more interrupts than what the Interrupt Throttle Rate
+ allows.
+
+NOTE: When e1000e is loaded with default settings and multiple adapters
+ are in use simultaneously, the CPU utilization may increase non-
+ linearly. In order to limit the CPU utilization without impacting
+ the overall throughput, we recommend that you load the driver as
+ follows:
+
+ modprobe e1000e InterruptThrottleRate=3000,3000,3000
+
+ This sets the InterruptThrottleRate to 3000 interrupts/sec for
+ the first, second, and third instances of the driver. The range
+ of 2000 to 3000 interrupts per second works on a majority of
+ systems and is a good starting point, but the optimal value will
+ be platform-specific. If CPU utilization is not a concern, use
+ RX_POLLING (NAPI) and default driver settings.
+
+RxIntDelay
+----------
+Valid Range: 0-65535 (0=off)
+Default Value: 0
+
+This value delays the generation of receive interrupts in units of 1.024
+microseconds. Receive interrupt reduction can improve CPU efficiency if
+properly tuned for specific network traffic. Increasing this value adds
+extra latency to frame reception and can end up decreasing the throughput
+of TCP traffic. If the system is reporting dropped receives, this value
+may be set too high, causing the driver to run out of available receive
+descriptors.
+
+CAUTION: When setting RxIntDelay to a value other than 0, adapters may
+ hang (stop transmitting) under certain network conditions. If
+ this occurs a NETDEV WATCHDOG message is logged in the system
+ event log. In addition, the controller is automatically reset,
+ restoring the network connection. To eliminate the potential
+ for the hang ensure that RxIntDelay is set to 0.
+
+RxAbsIntDelay
+-------------
+Valid Range: 0-65535 (0=off)
+Default Value: 8
+
+This value, in units of 1.024 microseconds, limits the delay in which a
+receive interrupt is generated. Useful only if RxIntDelay is non-zero,
+this value ensures that an interrupt is generated after the initial
+packet is received within the set amount of time. Proper tuning,
+along with RxIntDelay, may improve traffic throughput in specific network
+conditions.
+
+TxIntDelay
+----------
+Valid Range: 0-65535 (0=off)
+Default Value: 8
+
+This value delays the generation of transmit interrupts in units of
+1.024 microseconds. Transmit interrupt reduction can improve CPU
+efficiency if properly tuned for specific network traffic. If the
+system is reporting dropped transmits, this value may be set too high
+causing the driver to run out of available transmit descriptors.
+
+TxAbsIntDelay
+-------------
+Valid Range: 0-65535 (0=off)
+Default Value: 32
+
+This value, in units of 1.024 microseconds, limits the delay in which a
+transmit interrupt is generated. Useful only if TxIntDelay is non-zero,
+this value ensures that an interrupt is generated after the initial
+packet is sent on the wire within the set amount of time. Proper tuning,
+along with TxIntDelay, may improve traffic throughput in specific
+network conditions.
+
+Copybreak
+---------
+Valid Range: 0-xxxxxxx (0=off)
+Default Value: 256
+
+Driver copies all packets below or equaling this size to a fresh Rx
+buffer before handing it up the stack.
+
+This parameter is different than other parameters, in that it is a
+single (not 1,1,1 etc.) parameter applied to all driver instances and
+it is also available during runtime at
+/sys/module/e1000e/parameters/copybreak
+
+SmartPowerDownEnable
+--------------------
+Valid Range: 0-1
+Default Value: 0 (disabled)
+
+Allows PHY to turn off in lower power states. The user can set this parameter
+in supported chipsets.
+
+KumeranLockLoss
+---------------
+Valid Range: 0-1
+Default Value: 1 (enabled)
+
+This workaround skips resetting the PHY at shutdown for the initial
+silicon releases of ICH8 systems.
+
+IntMode
+-------
+Valid Range: 0-2 (0=legacy, 1=MSI, 2=MSI-X)
+Default Value: 2
+
+Allows changing the interrupt mode at module load time, without requiring a
+recompile. If the driver load fails to enable a specific interrupt mode, the
+driver will try other interrupt modes, from least to most compatible. The
+interrupt order is MSI-X, MSI, Legacy. If specifying MSI (IntMode=1)
+interrupts, only MSI and Legacy will be attempted.
+
+CrcStripping
+------------
+Valid Range: 0-1
+Default Value: 1 (enabled)
+
+Strip the CRC from received packets before sending up the network stack. If
+you have a machine with a BMC enabled but cannot receive IPMI traffic after
+loading or enabling the driver, try disabling this feature.
+
+WriteProtectNVM
+---------------
+Valid Range: 0-1
+Default Value: 1 (enabled)
+
+Set the hardware to ignore all write/erase cycles to the GbE region in the
+ICHx NVM (non-volatile memory). This feature can be disabled by the
+WriteProtectNVM module parameter (enabled by default) only after a hardware
+reset, but the machine must be power cycled before trying to enable writes.
+
+Note: the kernel boot option iomem=relaxed may need to be set if the kernel
+config option CONFIG_STRICT_DEVMEM=y, if the root user wants to write the
+NVM from user space via ethtool.
+
+Additional Configurations
+=========================
+
+ Jumbo Frames
+ ------------
+ Jumbo Frames support is enabled by changing the MTU to a value larger than
+ the default of 1500. Use the ifconfig command to increase the MTU size.
+ For example:
+
+ ifconfig eth<x> mtu 9000 up
+
+ This setting is not saved across reboots.
+
+ Notes:
+
+ - The maximum MTU setting for Jumbo Frames is 9216. This value coincides
+ with the maximum Jumbo Frames size of 9234 bytes.
+
+ - Using Jumbo Frames at 10 or 100 Mbps is not supported and may result in
+ poor performance or loss of link.
+
+ - Some adapters limit Jumbo Frames sized packets to a maximum of
+ 4096 bytes and some adapters do not support Jumbo Frames.
+
+
+ Ethtool
+ -------
+ The driver utilizes the ethtool interface for driver configuration and
+ diagnostics, as well as displaying statistical information. We
+ strongly recommend downloading the latest version of Ethtool at:
+
+ http://sourceforge.net/projects/gkernel.
+
+ Speed and Duplex
+ ----------------
+ Speed and Duplex are configured through the Ethtool* utility. For
+ instructions, refer to the Ethtool man page.
+
+ Enabling Wake on LAN* (WoL)
+ ---------------------------
+ WoL is configured through the Ethtool* utility. For instructions on
+ enabling WoL with Ethtool, refer to the Ethtool man page.
+
+ WoL will be enabled on the system during the next shut down or reboot.
+ For this driver version, in order to enable WoL, the e1000e driver must be
+ loaded when shutting down or rebooting the system.
+
+ In most cases Wake On LAN is only supported on port A for multiple port
+ adapters. To verify if a port supports Wake on LAN run ethtool eth<X>.
+
+
+Support
+=======
+
+For general information, go to the Intel support website at:
+
+ www.intel.com/support/
+
+or the Intel Wired Networking project hosted by Sourceforge at:
+
+ http://sourceforge.net/projects/e1000
+
+If an issue is identified with the released source code on the supported
+kernel with a supported adapter, email the specific information related
+to the issue to e1000-devel@lists.sf.net
diff --git a/Documentation/networking/ixgbevf.txt b/Documentation/networking/ixgbevf.txt
old mode 100755
new mode 100644
index 19015de..21dd5d1
--- a/Documentation/networking/ixgbevf.txt
+++ b/Documentation/networking/ixgbevf.txt
@@ -1,19 +1,16 @@
Linux* Base Driver for Intel(R) Network Connection
==================================================
-November 24, 2009
+Intel Gigabit Linux driver.
+Copyright(c) 1999 - 2010 Intel Corporation.
Contents
========
-- In This Release
- Identifying Your Adapter
- Known Issues/Troubleshooting
- Support
-In This Release
-===============
-
This file describes the ixgbevf Linux* Base Driver for Intel Network
Connection.
@@ -33,7 +30,7 @@
For more information on how to identify your adapter, go to the Adapter &
Driver ID Guide at:
- http://support.intel.com/support/network/sb/CS-008441.htm
+ http://support.intel.com/support/go/network/adapter/idguide.htm
Known Issues/Troubleshooting
============================
@@ -57,34 +54,3 @@
If an issue is identified with the released source code on the supported
kernel with a supported adapter, email the specific information related
to the issue to e1000-devel@lists.sf.net
-
-License
-=======
-
-Intel 10 Gigabit Linux driver.
-Copyright(c) 1999 - 2009 Intel Corporation.
-
-This program is free software; you can redistribute it and/or modify it
-under the terms and conditions of the GNU General Public License,
-version 2, as published by the Free Software Foundation.
-
-This program is distributed in the hope it will be useful, but WITHOUT
-ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-more details.
-
-You should have received a copy of the GNU General Public License along with
-this program; if not, write to the Free Software Foundation, Inc.,
-51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
-The full GNU General Public License is included in this distribution in
-the file called "COPYING".
-
-Trademarks
-==========
-
-Intel, Itanium, and Pentium are trademarks or registered trademarks of
-Intel Corporation or its subsidiaries in the United States and other
-countries.
-
-* Other names and brands may be claimed as the property of others.
diff --git a/Documentation/power/regulator/overview.txt b/Documentation/power/regulator/overview.txt
index 9363e05..8ed1758 100644
--- a/Documentation/power/regulator/overview.txt
+++ b/Documentation/power/regulator/overview.txt
@@ -13,7 +13,7 @@
current limit is controllable).
(C) 2008 Wolfson Microelectronics PLC.
-Author: Liam Girdwood <lg@opensource.wolfsonmicro.com>
+Author: Liam Girdwood <lrg@slimlogic.co.uk>
Nomenclature
diff --git a/Documentation/sound/alsa/HD-Audio-Models.txt b/Documentation/sound/alsa/HD-Audio-Models.txt
index ce46fa1..37c6aad 100644
--- a/Documentation/sound/alsa/HD-Audio-Models.txt
+++ b/Documentation/sound/alsa/HD-Audio-Models.txt
@@ -296,6 +296,7 @@
Conexant 5066
=============
laptop Basic Laptop config (default)
+ hp-laptop HP laptops, e g G60
dell-laptop Dell laptops
dell-vostro Dell Vostro
olpc-xo-1_5 OLPC XO 1.5
diff --git a/Documentation/vm/page-types.c b/Documentation/vm/page-types.c
index ccd951f..cc96ee2 100644
--- a/Documentation/vm/page-types.c
+++ b/Documentation/vm/page-types.c
@@ -478,7 +478,7 @@
}
if (opt_unpoison && !hwpoison_forget_fd) {
- sprintf(buf, "%s/renew-pfn", hwpoison_debug_fs);
+ sprintf(buf, "%s/unpoison-pfn", hwpoison_debug_fs);
hwpoison_forget_fd = checked_open(buf, O_WRONLY);
}
}
diff --git a/Documentation/workqueue.txt b/Documentation/workqueue.txt
new file mode 100644
index 0000000..e4498a2
--- /dev/null
+++ b/Documentation/workqueue.txt
@@ -0,0 +1,380 @@
+
+Concurrency Managed Workqueue (cmwq)
+
+September, 2010 Tejun Heo <tj@kernel.org>
+ Florian Mickler <florian@mickler.org>
+
+CONTENTS
+
+1. Introduction
+2. Why cmwq?
+3. The Design
+4. Application Programming Interface (API)
+5. Example Execution Scenarios
+6. Guidelines
+
+
+1. Introduction
+
+There are many cases where an asynchronous process execution context
+is needed and the workqueue (wq) API is the most commonly used
+mechanism for such cases.
+
+When such an asynchronous execution context is needed, a work item
+describing which function to execute is put on a queue. An
+independent thread serves as the asynchronous execution context. The
+queue is called workqueue and the thread is called worker.
+
+While there are work items on the workqueue the worker executes the
+functions associated with the work items one after the other. When
+there is no work item left on the workqueue the worker becomes idle.
+When a new work item gets queued, the worker begins executing again.
+
+
+2. Why cmwq?
+
+In the original wq implementation, a multi threaded (MT) wq had one
+worker thread per CPU and a single threaded (ST) wq had one worker
+thread system-wide. A single MT wq needed to keep around the same
+number of workers as the number of CPUs. The kernel grew a lot of MT
+wq users over the years and with the number of CPU cores continuously
+rising, some systems saturated the default 32k PID space just booting
+up.
+
+Although MT wq wasted a lot of resource, the level of concurrency
+provided was unsatisfactory. The limitation was common to both ST and
+MT wq albeit less severe on MT. Each wq maintained its own separate
+worker pool. A MT wq could provide only one execution context per CPU
+while a ST wq one for the whole system. Work items had to compete for
+those very limited execution contexts leading to various problems
+including proneness to deadlocks around the single execution context.
+
+The tension between the provided level of concurrency and resource
+usage also forced its users to make unnecessary tradeoffs like libata
+choosing to use ST wq for polling PIOs and accepting an unnecessary
+limitation that no two polling PIOs can progress at the same time. As
+MT wq don't provide much better concurrency, users which require
+higher level of concurrency, like async or fscache, had to implement
+their own thread pool.
+
+Concurrency Managed Workqueue (cmwq) is a reimplementation of wq with
+focus on the following goals.
+
+* Maintain compatibility with the original workqueue API.
+
+* Use per-CPU unified worker pools shared by all wq to provide
+ flexible level of concurrency on demand without wasting a lot of
+ resource.
+
+* Automatically regulate worker pool and level of concurrency so that
+ the API users don't need to worry about such details.
+
+
+3. The Design
+
+In order to ease the asynchronous execution of functions a new
+abstraction, the work item, is introduced.
+
+A work item is a simple struct that holds a pointer to the function
+that is to be executed asynchronously. Whenever a driver or subsystem
+wants a function to be executed asynchronously it has to set up a work
+item pointing to that function and queue that work item on a
+workqueue.
+
+Special purpose threads, called worker threads, execute the functions
+off of the queue, one after the other. If no work is queued, the
+worker threads become idle. These worker threads are managed in so
+called thread-pools.
+
+The cmwq design differentiates between the user-facing workqueues that
+subsystems and drivers queue work items on and the backend mechanism
+which manages thread-pool and processes the queued work items.
+
+The backend is called gcwq. There is one gcwq for each possible CPU
+and one gcwq to serve work items queued on unbound workqueues.
+
+Subsystems and drivers can create and queue work items through special
+workqueue API functions as they see fit. They can influence some
+aspects of the way the work items are executed by setting flags on the
+workqueue they are putting the work item on. These flags include
+things like CPU locality, reentrancy, concurrency limits and more. To
+get a detailed overview refer to the API description of
+alloc_workqueue() below.
+
+When a work item is queued to a workqueue, the target gcwq is
+determined according to the queue parameters and workqueue attributes
+and appended on the shared worklist of the gcwq. For example, unless
+specifically overridden, a work item of a bound workqueue will be
+queued on the worklist of exactly that gcwq that is associated to the
+CPU the issuer is running on.
+
+For any worker pool implementation, managing the concurrency level
+(how many execution contexts are active) is an important issue. cmwq
+tries to keep the concurrency at a minimal but sufficient level.
+Minimal to save resources and sufficient in that the system is used at
+its full capacity.
+
+Each gcwq bound to an actual CPU implements concurrency management by
+hooking into the scheduler. The gcwq is notified whenever an active
+worker wakes up or sleeps and keeps track of the number of the
+currently runnable workers. Generally, work items are not expected to
+hog a CPU and consume many cycles. That means maintaining just enough
+concurrency to prevent work processing from stalling should be
+optimal. As long as there are one or more runnable workers on the
+CPU, the gcwq doesn't start execution of a new work, but, when the
+last running worker goes to sleep, it immediately schedules a new
+worker so that the CPU doesn't sit idle while there are pending work
+items. This allows using a minimal number of workers without losing
+execution bandwidth.
+
+Keeping idle workers around doesn't cost other than the memory space
+for kthreads, so cmwq holds onto idle ones for a while before killing
+them.
+
+For an unbound wq, the above concurrency management doesn't apply and
+the gcwq for the pseudo unbound CPU tries to start executing all work
+items as soon as possible. The responsibility of regulating
+concurrency level is on the users. There is also a flag to mark a
+bound wq to ignore the concurrency management. Please refer to the
+API section for details.
+
+Forward progress guarantee relies on that workers can be created when
+more execution contexts are necessary, which in turn is guaranteed
+through the use of rescue workers. All work items which might be used
+on code paths that handle memory reclaim are required to be queued on
+wq's that have a rescue-worker reserved for execution under memory
+pressure. Else it is possible that the thread-pool deadlocks waiting
+for execution contexts to free up.
+
+
+4. Application Programming Interface (API)
+
+alloc_workqueue() allocates a wq. The original create_*workqueue()
+functions are deprecated and scheduled for removal. alloc_workqueue()
+takes three arguments - @name, @flags and @max_active. @name is the
+name of the wq and also used as the name of the rescuer thread if
+there is one.
+
+A wq no longer manages execution resources but serves as a domain for
+forward progress guarantee, flush and work item attributes. @flags
+and @max_active control how work items are assigned execution
+resources, scheduled and executed.
+
+@flags:
+
+ WQ_NON_REENTRANT
+
+ By default, a wq guarantees non-reentrance only on the same
+ CPU. A work item may not be executed concurrently on the same
+ CPU by multiple workers but is allowed to be executed
+ concurrently on multiple CPUs. This flag makes sure
+ non-reentrance is enforced across all CPUs. Work items queued
+ to a non-reentrant wq are guaranteed to be executed by at most
+ one worker system-wide at any given time.
+
+ WQ_UNBOUND
+
+ Work items queued to an unbound wq are served by a special
+ gcwq which hosts workers which are not bound to any specific
+ CPU. This makes the wq behave as a simple execution context
+ provider without concurrency management. The unbound gcwq
+ tries to start execution of work items as soon as possible.
+ Unbound wq sacrifices locality but is useful for the following
+ cases.
+
+ * Wide fluctuation in the concurrency level requirement is
+ expected and using bound wq may end up creating large number
+ of mostly unused workers across different CPUs as the issuer
+ hops through different CPUs.
+
+ * Long running CPU intensive workloads which can be better
+ managed by the system scheduler.
+
+ WQ_FREEZEABLE
+
+ A freezeable wq participates in the freeze phase of the system
+ suspend operations. Work items on the wq are drained and no
+ new work item starts execution until thawed.
+
+ WQ_RESCUER
+
+ All wq which might be used in the memory reclaim paths _MUST_
+ have this flag set. This reserves one worker exclusively for
+ the execution of this wq under memory pressure.
+
+ WQ_HIGHPRI
+
+ Work items of a highpri wq are queued at the head of the
+ worklist of the target gcwq and start execution regardless of
+ the current concurrency level. In other words, highpri work
+ items will always start execution as soon as execution
+ resource is available.
+
+ Ordering among highpri work items is preserved - a highpri
+ work item queued after another highpri work item will start
+ execution after the earlier highpri work item starts.
+
+ Although highpri work items are not held back by other
+ runnable work items, they still contribute to the concurrency
+ level. Highpri work items in runnable state will prevent
+ non-highpri work items from starting execution.
+
+ This flag is meaningless for unbound wq.
+
+ WQ_CPU_INTENSIVE
+
+ Work items of a CPU intensive wq do not contribute to the
+ concurrency level. In other words, runnable CPU intensive
+ work items will not prevent other work items from starting
+ execution. This is useful for bound work items which are
+ expected to hog CPU cycles so that their execution is
+ regulated by the system scheduler.
+
+ Although CPU intensive work items don't contribute to the
+ concurrency level, start of their executions is still
+ regulated by the concurrency management and runnable
+ non-CPU-intensive work items can delay execution of CPU
+ intensive work items.
+
+ This flag is meaningless for unbound wq.
+
+ WQ_HIGHPRI | WQ_CPU_INTENSIVE
+
+ This combination makes the wq avoid interaction with
+ concurrency management completely and behave as a simple
+ per-CPU execution context provider. Work items queued on a
+ highpri CPU-intensive wq start execution as soon as resources
+ are available and don't affect execution of other work items.
+
+@max_active:
+
+@max_active determines the maximum number of execution contexts per
+CPU which can be assigned to the work items of a wq. For example,
+with @max_active of 16, at most 16 work items of the wq can be
+executing at the same time per CPU.
+
+Currently, for a bound wq, the maximum limit for @max_active is 512
+and the default value used when 0 is specified is 256. For an unbound
+wq, the limit is higher of 512 and 4 * num_possible_cpus(). These
+values are chosen sufficiently high such that they are not the
+limiting factor while providing protection in runaway cases.
+
+The number of active work items of a wq is usually regulated by the
+users of the wq, more specifically, by how many work items the users
+may queue at the same time. Unless there is a specific need for
+throttling the number of active work items, specifying '0' is
+recommended.
+
+Some users depend on the strict execution ordering of ST wq. The
+combination of @max_active of 1 and WQ_UNBOUND is used to achieve this
+behavior. Work items on such wq are always queued to the unbound gcwq
+and only one work item can be active at any given time thus achieving
+the same ordering property as ST wq.
+
+
+5. Example Execution Scenarios
+
+The following example execution scenarios try to illustrate how cmwq
+behave under different configurations.
+
+ Work items w0, w1, w2 are queued to a bound wq q0 on the same CPU.
+ w0 burns CPU for 5ms then sleeps for 10ms then burns CPU for 5ms
+ again before finishing. w1 and w2 burn CPU for 5ms then sleep for
+ 10ms.
+
+Ignoring all other tasks, works and processing overhead, and assuming
+simple FIFO scheduling, the following is one highly simplified version
+of possible sequences of events with the original wq.
+
+ TIME IN MSECS EVENT
+ 0 w0 starts and burns CPU
+ 5 w0 sleeps
+ 15 w0 wakes up and burns CPU
+ 20 w0 finishes
+ 20 w1 starts and burns CPU
+ 25 w1 sleeps
+ 35 w1 wakes up and finishes
+ 35 w2 starts and burns CPU
+ 40 w2 sleeps
+ 50 w2 wakes up and finishes
+
+And with cmwq with @max_active >= 3,
+
+ TIME IN MSECS EVENT
+ 0 w0 starts and burns CPU
+ 5 w0 sleeps
+ 5 w1 starts and burns CPU
+ 10 w1 sleeps
+ 10 w2 starts and burns CPU
+ 15 w2 sleeps
+ 15 w0 wakes up and burns CPU
+ 20 w0 finishes
+ 20 w1 wakes up and finishes
+ 25 w2 wakes up and finishes
+
+If @max_active == 2,
+
+ TIME IN MSECS EVENT
+ 0 w0 starts and burns CPU
+ 5 w0 sleeps
+ 5 w1 starts and burns CPU
+ 10 w1 sleeps
+ 15 w0 wakes up and burns CPU
+ 20 w0 finishes
+ 20 w1 wakes up and finishes
+ 20 w2 starts and burns CPU
+ 25 w2 sleeps
+ 35 w2 wakes up and finishes
+
+Now, let's assume w1 and w2 are queued to a different wq q1 which has
+WQ_HIGHPRI set,
+
+ TIME IN MSECS EVENT
+ 0 w1 and w2 start and burn CPU
+ 5 w1 sleeps
+ 10 w2 sleeps
+ 10 w0 starts and burns CPU
+ 15 w0 sleeps
+ 15 w1 wakes up and finishes
+ 20 w2 wakes up and finishes
+ 25 w0 wakes up and burns CPU
+ 30 w0 finishes
+
+If q1 has WQ_CPU_INTENSIVE set,
+
+ TIME IN MSECS EVENT
+ 0 w0 starts and burns CPU
+ 5 w0 sleeps
+ 5 w1 and w2 start and burn CPU
+ 10 w1 sleeps
+ 15 w2 sleeps
+ 15 w0 wakes up and burns CPU
+ 20 w0 finishes
+ 20 w1 wakes up and finishes
+ 25 w2 wakes up and finishes
+
+
+6. Guidelines
+
+* Do not forget to use WQ_RESCUER if a wq may process work items which
+ are used during memory reclaim. Each wq with WQ_RESCUER set has one
+ rescuer thread reserved for it. If there is dependency among
+ multiple work items used during memory reclaim, they should be
+ queued to separate wq each with WQ_RESCUER.
+
+* Unless strict ordering is required, there is no need to use ST wq.
+
+* Unless there is a specific need, using 0 for @max_active is
+ recommended. In most use cases, concurrency level usually stays
+ well under the default limit.
+
+* A wq serves as a domain for forward progress guarantee (WQ_RESCUER),
+ flush and work item attributes. Work items which are not involved
+ in memory reclaim and don't need to be flushed as a part of a group
+ of work items, and don't require any special attribute, can use one
+ of the system wq. There is no difference in execution
+ characteristics between using a dedicated wq and a system wq.
+
+* Unless work items are expected to consume a huge amount of CPU
+ cycles, using a bound wq is usually beneficial due to the increased
+ level of locality in wq operations and work item execution.
diff --git a/MAINTAINERS b/MAINTAINERS
index 433f353..3d4179f 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -454,6 +454,17 @@
S: Maintained
F: drivers/infiniband/hw/amso1100/
+ANALOG DEVICES INC ASOC DRIVERS
+L: uclinux-dist-devel@blackfin.uclinux.org
+L: alsa-devel@alsa-project.org (moderated for non-subscribers)
+W: http://blackfin.uclinux.org/
+S: Supported
+F: sound/soc/blackfin/*
+F: sound/soc/codecs/ad1*
+F: sound/soc/codecs/adau*
+F: sound/soc/codecs/adav*
+F: sound/soc/codecs/ssm*
+
AOA (Apple Onboard Audio) ALSA DRIVER
M: Johannes Berg <johannes@sipsolutions.net>
L: linuxppc-dev@lists.ozlabs.org
@@ -951,6 +962,23 @@
S: Maintained
F: arch/arm/mach-s3c6410/
+ARM/S5P ARM ARCHITECTURES
+M: Kukjin Kim <kgene.kim@samsung.com>
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
+S: Maintained
+F: arch/arm/mach-s5p*/
+
+ARM/SAMSUNG S5P SERIES FIMC SUPPORT
+M: Kyungmin Park <kyungmin.park@samsung.com>
+M: Sylwester Nawrocki <s.nawrocki@samsung.com>
+L: linux-arm-kernel@lists.infradead.org
+L: linux-media@vger.kernel.org
+S: Maintained
+F: arch/arm/plat-s5p/dev-fimc*
+F: arch/arm/plat-samsung/include/plat/*fimc*
+F: drivers/media/video/s5p-fimc/
+
ARM/SHMOBILE ARM ARCHITECTURE
M: Paul Mundt <lethal@linux-sh.org>
M: Magnus Damm <magnus.damm@gmail.com>
@@ -1124,7 +1152,7 @@
M: Jay Cliburn <jcliburn@gmail.com>
M: Chris Snook <chris.snook@gmail.com>
M: Jie Yang <jie.yang@atheros.com>
-L: atl1-devel@lists.sourceforge.net
+L: netdev@vger.kernel.org
W: http://sourceforge.net/projects/atl1
W: http://atl1.sourceforge.net
S: Maintained
@@ -1209,7 +1237,7 @@
F: include/linux/cfag12864b.h
AVR32 ARCHITECTURE
-M: Haavard Skinnemoen <hskinnemoen@atmel.com>
+M: Hans-Christian Egtvedt <hans-christian.egtvedt@atmel.com>
W: http://www.atmel.com/products/AVR32/
W: http://avr32linux.org/
W: http://avrfreaks.net/
@@ -1217,7 +1245,7 @@
F: arch/avr32/
AVR32/AT32AP MACHINE SUPPORT
-M: Haavard Skinnemoen <hskinnemoen@atmel.com>
+M: Hans-Christian Egtvedt <hans-christian.egtvedt@atmel.com>
S: Supported
F: arch/avr32/mach-at32ap/
@@ -1434,6 +1462,16 @@
F: Documentation/video4linux/cafe_ccic
F: drivers/media/video/cafe_ccic*
+CAIF NETWORK LAYER
+M: Sjur Braendeland <sjur.brandeland@stericsson.com>
+L: netdev@vger.kernel.org
+S: Supported
+F: Documentation/networking/caif/
+F: drivers/net/caif/
+F: include/linux/caif/
+F: include/net/caif/
+F: net/caif/
+
CALGARY x86-64 IOMMU
M: Muli Ben-Yehuda <muli@il.ibm.com>
M: "Jon D. Mason" <jdmason@kudzu.us>
@@ -1489,6 +1527,8 @@
S: Supported
F: Documentation/filesystems/ceph.txt
F: fs/ceph
+F: net/ceph
+F: include/linux/ceph
CERTIFIED WIRELESS USB (WUSB) SUBSYSTEM:
M: David Vrabel <david.vrabel@csr.com>
@@ -1665,8 +1705,7 @@
F: mm/*cgroup*
CORETEMP HARDWARE MONITORING DRIVER
-M: Rudolf Marek <r.marek@assembler.cz>
-M: Huaxu Wan <huaxu.wan@intel.com>
+M: Fenghua Yu <fenghua.yu@intel.com>
L: lm-sensors@lm-sensors.org
S: Maintained
F: Documentation/hwmon/coretemp
@@ -2179,6 +2218,12 @@
S: Maintained
F: drivers/platform/x86/eeepc-laptop.c
+EFIFB FRAMEBUFFER DRIVER
+L: linux-fbdev@vger.kernel.org
+M: Peter Jones <pjones@redhat.com>
+S: Maintained
+F: drivers/video/efifb.c
+
EFS FILESYSTEM
W: http://aeschi.ch.eu.org/efs/
S: Orphan
@@ -2191,6 +2236,12 @@
S: Supported
F: drivers/infiniband/hw/ehca/
+EHEA (IBM pSeries eHEA 10Gb ethernet adapter) DRIVER
+M: Breno Leitao <leitao@linux.vnet.ibm.com>
+L: netdev@vger.kernel.org
+S: Maintained
+F: drivers/net/ehea/
+
EMBEDDED LINUX
M: Paul Gortmaker <paul.gortmaker@windriver.com>
M: Matt Mackall <mpm@selenic.com>
@@ -2286,6 +2337,12 @@
F: Documentation/hwmon/f71805f
F: drivers/hwmon/f71805f.c
+FANOTIFY
+M: Eric Paris <eparis@redhat.com>
+S: Maintained
+F: fs/notify/fanotify/
+F: include/linux/fanotify.h
+
FARSYNC SYNCHRONOUS DRIVER
M: Kevin Curtis <kevin.curtis@farsite.co.uk>
W: http://www.farsite.co.uk/
@@ -2490,7 +2547,7 @@
F: drivers/scsi/gdt*
GENERIC GPIO I2C DRIVER
-M: Haavard Skinnemoen <hskinnemoen@atmel.com>
+M: Haavard Skinnemoen <hskinnemoen@gmail.com>
S: Supported
F: drivers/i2c/busses/i2c-gpio.c
F: include/linux/i2c-gpio.h
@@ -2625,9 +2682,14 @@
F: drivers/media/video/gspca/
HARDWARE MONITORING
+M: Jean Delvare <khali@linux-fr.org>
+M: Guenter Roeck <guenter.roeck@ericsson.com>
L: lm-sensors@lm-sensors.org
W: http://www.lm-sensors.org/
-S: Orphan
+T: quilt kernel.org/pub/linux/kernel/people/jdelvare/linux-2.6/jdelvare-hwmon/
+T: quilt kernel.org/pub/linux/kernel/people/groeck/linux-staging/
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/groeck/linux-staging.git
+S: Maintained
F: Documentation/hwmon/
F: drivers/hwmon/
F: include/linux/hwmon*.h
@@ -2765,11 +2827,6 @@
F: arch/x86/kernel/hpet.c
F: arch/x86/include/asm/hpet.h
-HPET: ACPI
-M: Bob Picco <bob.picco@hp.com>
-S: Maintained
-F: drivers/char/hpet.c
-
HPFS FILESYSTEM
M: Mikulas Patocka <mikulas@artax.karlin.mff.cuni.cz>
W: http://artax.karlin.mff.cuni.cz/~mikulas/vyplody/hpfs/index-e.cgi
@@ -3018,16 +3075,27 @@
S: Maintained
F: drivers/net/ixp2000/
-INTEL ETHERNET DRIVERS (e100/e1000/e1000e/igb/igbvf/ixgb/ixgbe)
+INTEL ETHERNET DRIVERS (e100/e1000/e1000e/igb/igbvf/ixgb/ixgbe/ixgbevf)
M: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
M: Jesse Brandeburg <jesse.brandeburg@intel.com>
M: Bruce Allan <bruce.w.allan@intel.com>
-M: Alex Duyck <alexander.h.duyck@intel.com>
+M: Carolyn Wyborny <carolyn.wyborny@intel.com>
+M: Don Skidmore <donald.c.skidmore@intel.com>
+M: Greg Rose <gregory.v.rose@intel.com>
M: PJ Waskiewicz <peter.p.waskiewicz.jr@intel.com>
+M: Alex Duyck <alexander.h.duyck@intel.com>
M: John Ronciak <john.ronciak@intel.com>
L: e1000-devel@lists.sourceforge.net
W: http://e1000.sourceforge.net/
S: Supported
+F: Documentation/networking/e100.txt
+F: Documentation/networking/e1000.txt
+F: Documentation/networking/e1000e.txt
+F: Documentation/networking/igb.txt
+F: Documentation/networking/igbvf.txt
+F: Documentation/networking/ixgb.txt
+F: Documentation/networking/ixgbe.txt
+F: Documentation/networking/ixgbevf.txt
F: drivers/net/e100.c
F: drivers/net/e1000/
F: drivers/net/e1000e/
@@ -3035,6 +3103,7 @@
F: drivers/net/igbvf/
F: drivers/net/ixgb/
F: drivers/net/ixgbe/
+F: drivers/net/ixgbevf/
INTEL PRO/WIRELESS 2100 NETWORK CONNECTION SUPPORT
L: linux-wireless@vger.kernel.org
@@ -3095,7 +3164,7 @@
IOC3 SERIAL DRIVER
M: Pat Gefre <pfg@sgi.com>
-L: linux-mips@linux-mips.org
+L: linux-serial@vger.kernel.org
S: Maintained
F: drivers/serial/ioc3_serial.c
@@ -3382,7 +3451,7 @@
KEXEC
M: Eric Biederman <ebiederm@xmission.com>
-W: http://ftp.kernel.org/pub/linux/kernel/people/horms/kexec-tools/
+W: http://kernel.org/pub/linux/utils/kernel/kexec/
L: kexec@lists.infradead.org
S: Maintained
F: include/linux/kexec.h
@@ -3478,7 +3547,7 @@
M: Rusty Russell <rusty@rustcorp.com.au>
L: lguest@lists.ozlabs.org
W: http://lguest.ozlabs.org/
-S: Maintained
+S: Odd Fixes
F: Documentation/lguest/
F: arch/x86/lguest/
F: drivers/lguest/
@@ -3743,9 +3812,8 @@
S: Supported
MATROX FRAMEBUFFER DRIVER
-M: Petr Vandrovec <vandrove@vc.cvut.cz>
L: linux-fbdev@vger.kernel.org
-S: Maintained
+S: Orphan
F: drivers/video/matrox/matroxfb_*
F: include/linux/matroxfb.h
@@ -3869,10 +3937,8 @@
F: drivers/char/mxser.*
MSI LAPTOP SUPPORT
-M: Lennart Poettering <mzxreary@0pointer.de>
+M: Lee, Chun-Yi <jlee@novell.com>
L: platform-driver-x86@vger.kernel.org
-W: https://tango.0pointer.de/mailman/listinfo/s270-linux
-W: http://0pointer.de/lennart/tchibo.html
S: Maintained
F: drivers/platform/x86/msi-laptop.c
@@ -3889,8 +3955,10 @@
F: drivers/mfd/
MULTIMEDIA CARD (MMC), SECURE DIGITAL (SD) AND SDIO SUBSYSTEM
-S: Orphan
+M: Chris Ball <cjb@laptop.org>
L: linux-mmc@vger.kernel.org
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/cjb/mmc.git
+S: Maintained
F: drivers/mmc/
F: include/linux/mmc/
@@ -3907,13 +3975,12 @@
F: sound/oss/msnd*
MULTITECH MULTIPORT CARD (ISICOM)
-M: Jiri Slaby <jirislaby@gmail.com>
-S: Maintained
+S: Orphan
F: drivers/char/isicom.c
F: include/linux/isicom.h
MUSB MULTIPOINT HIGH SPEED DUAL-ROLE CONTROLLER
-M: Felipe Balbi <felipe.balbi@nokia.com>
+M: Felipe Balbi <balbi@ti.com>
L: linux-usb@vger.kernel.org
T: git git://gitorious.org/usb/usb.git
S: Maintained
@@ -3933,8 +4000,8 @@
F: drivers/net/natsemi.c
NCP FILESYSTEM
-M: Petr Vandrovec <vandrove@vc.cvut.cz>
-S: Maintained
+M: Petr Vandrovec <petr@vandrovec.name>
+S: Odd Fixes
F: fs/ncpfs/
NCR DUAL 700 SCSI DRIVER (MICROCHANNEL)
@@ -4211,7 +4278,7 @@
F: drivers/char/hw_random/omap-rng.c
OMAP USB SUPPORT
-M: Felipe Balbi <felipe.balbi@nokia.com>
+M: Felipe Balbi <balbi@ti.com>
M: David Brownell <dbrownell@users.sourceforge.net>
L: linux-usb@vger.kernel.org
L: linux-omap@vger.kernel.org
@@ -4588,7 +4655,7 @@
PRISM54 WIRELESS DRIVER
M: "Luis R. Rodriguez" <mcgrof@gmail.com>
L: linux-wireless@vger.kernel.org
-W: http://prism54.org
+W: http://wireless.kernel.org/en/users/Drivers/p54
S: Obsolete
F: drivers/net/wireless/prism54/
@@ -4740,6 +4807,15 @@
F: include/linux/qnx4_fs.h
F: include/linux/qnxtypes.h
+RADOS BLOCK DEVICE (RBD)
+F: include/linux/qnxtypes.h
+M: Yehuda Sadeh <yehuda@hq.newdream.net>
+M: Sage Weil <sage@newdream.net>
+M: ceph-devel@vger.kernel.org
+S: Supported
+F: drivers/block/rbd.c
+F: drivers/block/rbd_types.h
+
RADEON FRAMEBUFFER DISPLAY DRIVER
M: Benjamin Herrenschmidt <benh@kernel.crashing.org>
L: linux-fbdev@vger.kernel.org
@@ -4789,6 +4865,7 @@
M: Josh Triplett <josh@freedesktop.org>
M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
S: Supported
+T: git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-2.6-rcu.git
F: Documentation/RCU/torture.txt
F: kernel/rcutorture.c
@@ -4813,6 +4890,7 @@
M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
W: http://www.rdrop.com/users/paulmck/rclock/
S: Supported
+T: git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-2.6-rcu.git
F: Documentation/RCU/
F: include/linux/rcu*
F: include/linux/srcu*
@@ -4820,12 +4898,10 @@
F: kernel/srcu*
X: kernel/rcutorture.c
-REAL TIME CLOCK DRIVER
+REAL TIME CLOCK DRIVER (LEGACY)
M: Paul Gortmaker <p_gortmaker@yahoo.com>
S: Maintained
-F: Documentation/rtc.txt
-F: drivers/rtc/
-F: include/linux/rtc.h
+F: drivers/char/rtc.c
REAL TIME CLOCK (RTC) SUBSYSTEM
M: Alessandro Zummo <a.zummo@towertech.it>
@@ -4965,6 +5041,12 @@
F: drivers/media/video/*7146*
F: include/media/*7146*
+SAMSUNG AUDIO (ASoC) DRIVERS
+M: Jassi Brar <jassi.brar@samsung.com>
+L: alsa-devel@alsa-project.org (moderated for non-subscribers)
+S: Supported
+F: sound/soc/s3c24xx
+
TLG2300 VIDEO4LINUX-2 DRIVER
M: Huang Shijie <shijie8@gmail.com>
M: Kang Yong <kangyong@telegent.com>
@@ -5062,8 +5144,10 @@
F: drivers/mmc/host/sdricoh_cs.c
SECURE DIGITAL HOST CONTROLLER INTERFACE (SDHCI) DRIVER
-S: Orphan
+M: Chris Ball <cjb@laptop.org>
L: linux-mmc@vger.kernel.org
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/cjb/mmc.git
+S: Maintained
F: drivers/mmc/host/sdhci.*
SECURE DIGITAL HOST CONTROLLER INTERFACE, OPEN FIRMWARE BINDINGS (SDHCI-OF)
@@ -6405,8 +6489,10 @@
WOLFSON MICROELECTRONICS DRIVERS
M: Mark Brown <broonie@opensource.wolfsonmicro.com>
M: Ian Lartey <ian@opensource.wolfsonmicro.com>
+M: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
+T: git git://opensource.wolfsonmicro.com/linux-2.6-asoc
T: git git://opensource.wolfsonmicro.com/linux-2.6-audioplus
-W: http://opensource.wolfsonmicro.com/node/8
+W: http://opensource.wolfsonmicro.com/content/linux-drivers-wolfson-devices
S: Supported
F: Documentation/hwmon/wm83??
F: drivers/leds/leds-wm83*.c
diff --git a/Makefile b/Makefile
index 2c0299f..d3c1071 100644
--- a/Makefile
+++ b/Makefile
@@ -1,8 +1,8 @@
VERSION = 2
PATCHLEVEL = 6
SUBLEVEL = 36
-EXTRAVERSION = -rc2
-NAME = Sheep on Meth
+EXTRAVERSION =
+NAME = Flesh-Eating Bats with Fangs
# *DOCUMENTATION*
# To see a list of typical targets execute "make help"
@@ -568,6 +568,12 @@
ifdef CONFIG_FUNCTION_TRACER
KBUILD_CFLAGS += -pg
+ifdef CONFIG_DYNAMIC_FTRACE
+ ifdef CONFIG_HAVE_C_RECORDMCOUNT
+ BUILD_C_RECORDMCOUNT := y
+ export BUILD_C_RECORDMCOUNT
+ endif
+endif
endif
# We trigger additional mismatches with less inlining
@@ -591,6 +597,11 @@
# conserve stack if available
KBUILD_CFLAGS += $(call cc-option,-fconserve-stack)
+# check for 'asm goto'
+ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC)), y)
+ KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO
+endif
+
# Add user supplied CPPFLAGS, AFLAGS and CFLAGS as the last assignments
# But warn user when we do so
warn-assign = \
@@ -1408,8 +1419,8 @@
$(OBJDUMP) -d vmlinux $$(find . -name '*.ko') | \
$(PERL) $(src)/scripts/checkstack.pl $(CHECKSTACK_ARCH)
-kernelrelease: include/config/kernel.release
- @echo $(KERNELRELEASE)
+kernelrelease:
+ @echo "$(KERNELVERSION)$$($(CONFIG_SHELL) $(srctree)/scripts/setlocalversion $(srctree))"
kernelversion:
@echo $(KERNELVERSION)
diff --git a/arch/Kconfig b/arch/Kconfig
index 4877a8c..53d7f61 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -32,8 +32,9 @@
config KPROBES
bool "Kprobes"
- depends on KALLSYMS && MODULES
+ depends on MODULES
depends on HAVE_KPROBES
+ select KALLSYMS
help
Kprobes allows you to trap at almost any kernel address and
execute a callback function. register_kprobe() establishes
@@ -45,7 +46,6 @@
def_bool y
depends on KPROBES && HAVE_OPTPROBES
depends on !PREEMPT
- select KALLSYMS_ALL
config HAVE_EFFICIENT_UNALIGNED_ACCESS
bool
@@ -158,4 +158,7 @@
subsystem. Also has support for calculating CPU cycle events
to determine how many clock cycles in a given period.
+config HAVE_ARCH_JUMP_LABEL
+ bool
+
source "kernel/gcov/Kconfig"
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index b9647bb..d04ccd7 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -9,6 +9,7 @@
select HAVE_IDE
select HAVE_OPROFILE
select HAVE_SYSCALL_WRAPPERS
+ select HAVE_IRQ_WORK
select HAVE_PERF_EVENTS
select HAVE_DMA_ATTRS
help
diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h
index f199e69..ad368a9 100644
--- a/arch/alpha/include/asm/cache.h
+++ b/arch/alpha/include/asm/cache.h
@@ -17,7 +17,6 @@
# define L1_CACHE_SHIFT 5
#endif
-#define L1_CACHE_ALIGN(x) (((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1))
#define SMP_CACHE_BYTES L1_CACHE_BYTES
#endif
diff --git a/arch/alpha/include/asm/cacheflush.h b/arch/alpha/include/asm/cacheflush.h
index 01d71e1..012f124 100644
--- a/arch/alpha/include/asm/cacheflush.h
+++ b/arch/alpha/include/asm/cacheflush.h
@@ -43,6 +43,8 @@
/* ??? Ought to use this in arch/alpha/kernel/signal.c too. */
#ifndef CONFIG_SMP
+#include <linux/sched.h>
+
extern void __load_new_mm_context(struct mm_struct *);
static inline void
flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
diff --git a/arch/alpha/include/asm/perf_event.h b/arch/alpha/include/asm/perf_event.h
index 4157cd3..fe792ca 100644
--- a/arch/alpha/include/asm/perf_event.h
+++ b/arch/alpha/include/asm/perf_event.h
@@ -1,11 +1,6 @@
#ifndef __ASM_ALPHA_PERF_EVENT_H
#define __ASM_ALPHA_PERF_EVENT_H
-/* Alpha only supports software events through this interface. */
-extern void set_perf_event_pending(void);
-
-#define PERF_EVENT_INDEX_OFFSET 0
-
#ifdef CONFIG_PERF_EVENTS
extern void init_hw_perf_events(void);
#else
diff --git a/arch/alpha/include/asm/unistd.h b/arch/alpha/include/asm/unistd.h
index 804e531..058937b 100644
--- a/arch/alpha/include/asm/unistd.h
+++ b/arch/alpha/include/asm/unistd.h
@@ -449,10 +449,13 @@
#define __NR_pwritev 491
#define __NR_rt_tgsigqueueinfo 492
#define __NR_perf_event_open 493
+#define __NR_fanotify_init 494
+#define __NR_fanotify_mark 495
+#define __NR_prlimit64 496
#ifdef __KERNEL__
-#define NR_SYSCALLS 494
+#define NR_SYSCALLS 497
#define __ARCH_WANT_IPC_PARSE_VERSION
#define __ARCH_WANT_OLD_READDIR
@@ -463,6 +466,7 @@
#define __ARCH_WANT_SYS_OLD_GETRLIMIT
#define __ARCH_WANT_SYS_OLDUMOUNT
#define __ARCH_WANT_SYS_SIGPENDING
+#define __ARCH_WANT_SYS_RT_SIGSUSPEND
/* "Conditional" syscalls. What we want is
diff --git a/arch/alpha/kernel/entry.S b/arch/alpha/kernel/entry.S
index b45d913..6d159ce 100644
--- a/arch/alpha/kernel/entry.S
+++ b/arch/alpha/kernel/entry.S
@@ -73,8 +73,6 @@
ldq $20, HAE_REG($19); \
stq $21, HAE_CACHE($19); \
stq $21, 0($20); \
- ldq $0, 0($sp); \
- ldq $1, 8($sp); \
99:; \
ldq $19, 72($sp); \
ldq $20, 80($sp); \
@@ -316,19 +314,24 @@
cmovne $26, 0, $19 /* $19 = 0 => non-restartable */
ldq $0, SP_OFF($sp)
and $0, 8, $0
- beq $0, restore_all
-ret_from_reschedule:
+ beq $0, ret_to_kernel
+ret_to_user:
/* Make sure need_resched and sigpending don't change between
sampling and the rti. */
lda $16, 7
call_pal PAL_swpipl
ldl $5, TI_FLAGS($8)
and $5, _TIF_WORK_MASK, $2
- bne $5, work_pending
+ bne $2, work_pending
restore_all:
RESTORE_ALL
call_pal PAL_rti
+ret_to_kernel:
+ lda $16, 7
+ call_pal PAL_swpipl
+ br restore_all
+
.align 3
$syscall_error:
/*
@@ -363,7 +366,7 @@
* $8: current.
* $19: The old syscall number, or zero if this is not a return
* from a syscall that errored and is possibly restartable.
- * $20: Error indication.
+ * $20: The old a3 value
*/
.align 4
@@ -392,12 +395,18 @@
$work_notifysig:
mov $sp, $16
- br $1, do_switch_stack
+ bsr $1, do_switch_stack
mov $sp, $17
mov $5, $18
+ mov $19, $9 /* save old syscall number */
+ mov $20, $10 /* save old a3 */
+ and $5, _TIF_SIGPENDING, $2
+ cmovne $2, 0, $9 /* we don't want double syscall restarts */
jsr $26, do_notify_resume
+ mov $9, $19
+ mov $10, $20
bsr $1, undo_switch_stack
- br restore_all
+ br ret_to_user
.end work_pending
/*
@@ -430,6 +439,7 @@
beq $1, 1f
ldq $27, 0($2)
1: jsr $26, ($27), sys_gettimeofday
+ret_from_straced:
ldgp $gp, 0($26)
/* check return.. */
@@ -650,7 +660,7 @@
/* We don't actually care for a3 success widgetry in the kernel.
Not for positive errno values. */
stq $0, 0($sp) /* $0 */
- br restore_all
+ br ret_to_kernel
.end kernel_thread
/*
@@ -757,11 +767,15 @@
.ent sys_sigreturn
sys_sigreturn:
.prologue 0
+ lda $9, ret_from_straced
+ cmpult $26, $9, $9
mov $sp, $17
lda $18, -SWITCH_STACK_SIZE($sp)
lda $sp, -SWITCH_STACK_SIZE($sp)
jsr $26, do_sigreturn
- br $1, undo_switch_stack
+ bne $9, 1f
+ jsr $26, syscall_trace
+1: br $1, undo_switch_stack
br ret_from_sys_call
.end sys_sigreturn
@@ -770,47 +784,19 @@
.ent sys_rt_sigreturn
sys_rt_sigreturn:
.prologue 0
+ lda $9, ret_from_straced
+ cmpult $26, $9, $9
mov $sp, $17
lda $18, -SWITCH_STACK_SIZE($sp)
lda $sp, -SWITCH_STACK_SIZE($sp)
jsr $26, do_rt_sigreturn
- br $1, undo_switch_stack
+ bne $9, 1f
+ jsr $26, syscall_trace
+1: br $1, undo_switch_stack
br ret_from_sys_call
.end sys_rt_sigreturn
.align 4
- .globl sys_sigsuspend
- .ent sys_sigsuspend
-sys_sigsuspend:
- .prologue 0
- mov $sp, $17
- br $1, do_switch_stack
- mov $sp, $18
- subq $sp, 16, $sp
- stq $26, 0($sp)
- jsr $26, do_sigsuspend
- ldq $26, 0($sp)
- lda $sp, SWITCH_STACK_SIZE+16($sp)
- ret
-.end sys_sigsuspend
-
- .align 4
- .globl sys_rt_sigsuspend
- .ent sys_rt_sigsuspend
-sys_rt_sigsuspend:
- .prologue 0
- mov $sp, $18
- br $1, do_switch_stack
- mov $sp, $19
- subq $sp, 16, $sp
- stq $26, 0($sp)
- jsr $26, do_rt_sigsuspend
- ldq $26, 0($sp)
- lda $sp, SWITCH_STACK_SIZE+16($sp)
- ret
-.end sys_rt_sigsuspend
-
- .align 4
.globl sys_sethae
.ent sys_sethae
sys_sethae:
@@ -929,15 +915,6 @@
.end sys_execve
.align 4
- .globl osf_sigprocmask
- .ent osf_sigprocmask
-osf_sigprocmask:
- .prologue 0
- mov $sp, $18
- jmp $31, sys_osf_sigprocmask
-.end osf_sigprocmask
-
- .align 4
.globl alpha_ni_syscall
.ent alpha_ni_syscall
alpha_ni_syscall:
diff --git a/arch/alpha/kernel/err_ev6.c b/arch/alpha/kernel/err_ev6.c
index 8ca6345..253cf1a 100644
--- a/arch/alpha/kernel/err_ev6.c
+++ b/arch/alpha/kernel/err_ev6.c
@@ -90,11 +90,13 @@
ev6_parse_cbox(u64 c_addr, u64 c1_syn, u64 c2_syn,
u64 c_stat, u64 c_sts, int print)
{
- char *sourcename[] = { "UNKNOWN", "UNKNOWN", "UNKNOWN",
- "MEMORY", "BCACHE", "DCACHE",
- "BCACHE PROBE", "BCACHE PROBE" };
- char *streamname[] = { "D", "I" };
- char *bitsname[] = { "SINGLE", "DOUBLE" };
+ static const char * const sourcename[] = {
+ "UNKNOWN", "UNKNOWN", "UNKNOWN",
+ "MEMORY", "BCACHE", "DCACHE",
+ "BCACHE PROBE", "BCACHE PROBE"
+ };
+ static const char * const streamname[] = { "D", "I" };
+ static const char * const bitsname[] = { "SINGLE", "DOUBLE" };
int status = MCHK_DISPOSITION_REPORT;
int source = -1, stream = -1, bits = -1;
diff --git a/arch/alpha/kernel/err_marvel.c b/arch/alpha/kernel/err_marvel.c
index 52a79df..648ae88 100644
--- a/arch/alpha/kernel/err_marvel.c
+++ b/arch/alpha/kernel/err_marvel.c
@@ -109,7 +109,7 @@
#define IO7__ERR_CYC__CYCLE__M (0x7)
printk("%s Packet In Error: %s\n"
- "%s Error in %s, cycle %ld%s%s\n",
+ "%s Error in %s, cycle %lld%s%s\n",
err_print_prefix,
packet_desc[EXTRACT(err_cyc, IO7__ERR_CYC__PACKET)],
err_print_prefix,
@@ -313,7 +313,7 @@
}
printk("%s Up Hose Garbage Symptom:\n"
- "%s Source Port: %ld - Dest PID: %ld - OpCode: %s\n",
+ "%s Source Port: %lld - Dest PID: %lld - OpCode: %s\n",
err_print_prefix,
err_print_prefix,
EXTRACT(ugbge_sym, IO7__PO7_UGBGE_SYM__UPH_SRC_PORT),
@@ -552,7 +552,7 @@
#define IO7__POX_SPLCMPLT__REM_BYTE_COUNT__M (0xfff)
printk("%s Split Completion Error:\n"
- "%s Source (Bus:Dev:Func): %ld:%ld:%ld\n",
+ "%s Source (Bus:Dev:Func): %lld:%lld:%lld\n",
err_print_prefix,
err_print_prefix,
EXTRACT(spl_cmplt, IO7__POX_SPLCMPLT__SOURCE_BUS),
@@ -589,22 +589,23 @@
static void
marvel_print_pox_trans_sum(u64 trans_sum)
{
- char *pcix_cmd[] = { "Interrupt Acknowledge",
- "Special Cycle",
- "I/O Read",
- "I/O Write",
- "Reserved",
- "Reserved / Device ID Message",
- "Memory Read",
- "Memory Write",
- "Reserved / Alias to Memory Read Block",
- "Reserved / Alias to Memory Write Block",
- "Configuration Read",
- "Configuration Write",
- "Memory Read Multiple / Split Completion",
- "Dual Address Cycle",
- "Memory Read Line / Memory Read Block",
- "Memory Write and Invalidate / Memory Write Block"
+ static const char * const pcix_cmd[] = {
+ "Interrupt Acknowledge",
+ "Special Cycle",
+ "I/O Read",
+ "I/O Write",
+ "Reserved",
+ "Reserved / Device ID Message",
+ "Memory Read",
+ "Memory Write",
+ "Reserved / Alias to Memory Read Block",
+ "Reserved / Alias to Memory Write Block",
+ "Configuration Read",
+ "Configuration Write",
+ "Memory Read Multiple / Split Completion",
+ "Dual Address Cycle",
+ "Memory Read Line / Memory Read Block",
+ "Memory Write and Invalidate / Memory Write Block"
};
#define IO7__POX_TRANSUM__PCI_ADDR__S (0)
diff --git a/arch/alpha/kernel/err_titan.c b/arch/alpha/kernel/err_titan.c
index f7ed97c..c3b3781 100644
--- a/arch/alpha/kernel/err_titan.c
+++ b/arch/alpha/kernel/err_titan.c
@@ -75,8 +75,12 @@
int status = MCHK_DISPOSITION_REPORT;
#ifdef CONFIG_VERBOSE_MCHECK
- char *serror_src[] = {"GPCI", "APCI", "AGP HP", "AGP LP"};
- char *serror_cmd[] = {"DMA Read", "DMA RMW", "SGTE Read", "Reserved"};
+ static const char * const serror_src[] = {
+ "GPCI", "APCI", "AGP HP", "AGP LP"
+ };
+ static const char * const serror_cmd[] = {
+ "DMA Read", "DMA RMW", "SGTE Read", "Reserved"
+ };
#endif /* CONFIG_VERBOSE_MCHECK */
#define TITAN__PCHIP_SERROR__LOST_UECC (1UL << 0)
@@ -140,14 +144,15 @@
int status = MCHK_DISPOSITION_REPORT;
#ifdef CONFIG_VERBOSE_MCHECK
- char *perror_cmd[] = { "Interrupt Acknowledge", "Special Cycle",
- "I/O Read", "I/O Write",
- "Reserved", "Reserved",
- "Memory Read", "Memory Write",
- "Reserved", "Reserved",
- "Configuration Read", "Configuration Write",
- "Memory Read Multiple", "Dual Address Cycle",
- "Memory Read Line","Memory Write and Invalidate"
+ static const char * const perror_cmd[] = {
+ "Interrupt Acknowledge", "Special Cycle",
+ "I/O Read", "I/O Write",
+ "Reserved", "Reserved",
+ "Memory Read", "Memory Write",
+ "Reserved", "Reserved",
+ "Configuration Read", "Configuration Write",
+ "Memory Read Multiple", "Dual Address Cycle",
+ "Memory Read Line", "Memory Write and Invalidate"
};
#endif /* CONFIG_VERBOSE_MCHECK */
@@ -273,11 +278,11 @@
int cmd, len;
unsigned long addr;
- char *agperror_cmd[] = { "Read (low-priority)", "Read (high-priority)",
- "Write (low-priority)",
- "Write (high-priority)",
- "Reserved", "Reserved",
- "Flush", "Fence"
+ static const char * const agperror_cmd[] = {
+ "Read (low-priority)", "Read (high-priority)",
+ "Write (low-priority)", "Write (high-priority)",
+ "Reserved", "Reserved",
+ "Flush", "Fence"
};
#endif /* CONFIG_VERBOSE_MCHECK */
diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
index fb58150..547e8b8 100644
--- a/arch/alpha/kernel/osf_sys.c
+++ b/arch/alpha/kernel/osf_sys.c
@@ -15,7 +15,6 @@
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/smp.h>
-#include <linux/smp_lock.h>
#include <linux/stddef.h>
#include <linux/syscalls.h>
#include <linux/unistd.h>
@@ -69,7 +68,6 @@
{
struct mm_struct *mm;
- lock_kernel();
mm = current->mm;
mm->end_code = bss_start + bss_len;
mm->start_brk = bss_start + bss_len;
@@ -78,7 +76,6 @@
printk("set_program_attributes(%lx %lx %lx %lx)\n",
text_start, text_len, bss_start, bss_len);
#endif
- unlock_kernel();
return 0;
}
@@ -252,7 +249,7 @@
retval = user_path(pathname, &path);
if (!retval) {
- retval = do_osf_statfs(&path buffer, bufsiz);
+ retval = do_osf_statfs(&path, buffer, bufsiz);
path_put(&path);
}
return retval;
@@ -517,7 +514,6 @@
long error;
int __user *min_buf_size_ptr;
- lock_kernel();
switch (code) {
case PL_SET:
if (get_user(error, &args->set.nbytes))
@@ -547,7 +543,6 @@
error = -EOPNOTSUPP;
break;
};
- unlock_kernel();
return error;
}
@@ -594,7 +589,7 @@
SYSCALL_DEFINE3(osf_sysinfo, int, command, char __user *, buf, long, count)
{
- char *sysinfo_table[] = {
+ const char *sysinfo_table[] = {
utsname()->sysname,
utsname()->nodename,
utsname()->release,
@@ -606,7 +601,7 @@
"dummy", /* secure RPC domain */
};
unsigned long offset;
- char *res;
+ const char *res;
long len, err = -EINVAL;
offset = command-1;
diff --git a/arch/alpha/kernel/pci-sysfs.c b/arch/alpha/kernel/pci-sysfs.c
index 738fc82..b899e95 100644
--- a/arch/alpha/kernel/pci-sysfs.c
+++ b/arch/alpha/kernel/pci-sysfs.c
@@ -66,7 +66,7 @@
{
struct pci_dev *pdev = to_pci_dev(container_of(kobj,
struct device, kobj));
- struct resource *res = (struct resource *)attr->private;
+ struct resource *res = attr->private;
enum pci_mmap_state mmap_type;
struct pci_bus_region bar;
int i;
diff --git a/arch/alpha/kernel/perf_event.c b/arch/alpha/kernel/perf_event.c
index 51c39fa..1cc4968 100644
--- a/arch/alpha/kernel/perf_event.c
+++ b/arch/alpha/kernel/perf_event.c
@@ -241,20 +241,20 @@
static int alpha_perf_event_set_period(struct perf_event *event,
struct hw_perf_event *hwc, int idx)
{
- long left = atomic64_read(&hwc->period_left);
+ long left = local64_read(&hwc->period_left);
long period = hwc->sample_period;
int ret = 0;
if (unlikely(left <= -period)) {
left = period;
- atomic64_set(&hwc->period_left, left);
+ local64_set(&hwc->period_left, left);
hwc->last_period = period;
ret = 1;
}
if (unlikely(left <= 0)) {
left += period;
- atomic64_set(&hwc->period_left, left);
+ local64_set(&hwc->period_left, left);
hwc->last_period = period;
ret = 1;
}
@@ -269,7 +269,7 @@
if (left > (long)alpha_pmu->pmc_max_period[idx])
left = alpha_pmu->pmc_max_period[idx];
- atomic64_set(&hwc->prev_count, (unsigned long)(-left));
+ local64_set(&hwc->prev_count, (unsigned long)(-left));
alpha_write_pmc(idx, (unsigned long)(-left));
@@ -300,14 +300,14 @@
long delta;
again:
- prev_raw_count = atomic64_read(&hwc->prev_count);
+ prev_raw_count = local64_read(&hwc->prev_count);
new_raw_count = alpha_read_pmc(idx);
- if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
+ if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
new_raw_count) != prev_raw_count)
goto again;
- delta = (new_raw_count - (prev_raw_count & alpha_pmu->pmc_count_mask[idx])) + ovf;
+ delta = (new_raw_count - (prev_raw_count & alpha_pmu->pmc_count_mask[idx])) + ovf;
/* It is possible on very rare occasions that the PMC has overflowed
* but the interrupt is yet to come. Detect and fix this situation.
@@ -316,8 +316,8 @@
delta += alpha_pmu->pmc_max_period[idx] + 1;
}
- atomic64_add(delta, &event->count);
- atomic64_sub(delta, &hwc->period_left);
+ local64_add(delta, &event->count);
+ local64_sub(delta, &hwc->period_left);
return new_raw_count;
}
@@ -402,14 +402,13 @@
struct hw_perf_event *hwc = &pe->hw;
int idx = hwc->idx;
- if (cpuc->current_idx[j] != PMC_NO_INDEX) {
- cpuc->idx_mask |= (1<<cpuc->current_idx[j]);
- continue;
+ if (cpuc->current_idx[j] == PMC_NO_INDEX) {
+ alpha_perf_event_set_period(pe, hwc, idx);
+ cpuc->current_idx[j] = idx;
}
- alpha_perf_event_set_period(pe, hwc, idx);
- cpuc->current_idx[j] = idx;
- cpuc->idx_mask |= (1<<cpuc->current_idx[j]);
+ if (!(hwc->state & PERF_HES_STOPPED))
+ cpuc->idx_mask |= (1<<cpuc->current_idx[j]);
}
cpuc->config = cpuc->event[0]->hw.config_base;
}
@@ -420,12 +419,13 @@
* - this function is called from outside this module via the pmu struct
* returned from perf event initialisation.
*/
-static int alpha_pmu_enable(struct perf_event *event)
+static int alpha_pmu_add(struct perf_event *event, int flags)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct hw_perf_event *hwc = &event->hw;
int n0;
int ret;
- unsigned long flags;
+ unsigned long irq_flags;
/*
* The Sparc code has the IRQ disable first followed by the perf
@@ -435,8 +435,8 @@
* nevertheless we disable the PMCs first to enable a potential
* final PMI to occur before we disable interrupts.
*/
- perf_disable();
- local_irq_save(flags);
+ perf_pmu_disable(event->pmu);
+ local_irq_save(irq_flags);
/* Default to error to be returned */
ret = -EAGAIN;
@@ -455,8 +455,12 @@
}
}
- local_irq_restore(flags);
- perf_enable();
+ hwc->state = PERF_HES_UPTODATE;
+ if (!(flags & PERF_EF_START))
+ hwc->state |= PERF_HES_STOPPED;
+
+ local_irq_restore(irq_flags);
+ perf_pmu_enable(event->pmu);
return ret;
}
@@ -467,15 +471,15 @@
* - this function is called from outside this module via the pmu struct
* returned from perf event initialisation.
*/
-static void alpha_pmu_disable(struct perf_event *event)
+static void alpha_pmu_del(struct perf_event *event, int flags)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
- unsigned long flags;
+ unsigned long irq_flags;
int j;
- perf_disable();
- local_irq_save(flags);
+ perf_pmu_disable(event->pmu);
+ local_irq_save(irq_flags);
for (j = 0; j < cpuc->n_events; j++) {
if (event == cpuc->event[j]) {
@@ -501,8 +505,8 @@
}
}
- local_irq_restore(flags);
- perf_enable();
+ local_irq_restore(irq_flags);
+ perf_pmu_enable(event->pmu);
}
@@ -514,13 +518,44 @@
}
-static void alpha_pmu_unthrottle(struct perf_event *event)
+static void alpha_pmu_stop(struct perf_event *event, int flags)
{
struct hw_perf_event *hwc = &event->hw;
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ if (!(hwc->state & PERF_HES_STOPPED)) {
+ cpuc->idx_mask &= ~(1UL<<hwc->idx);
+ hwc->state |= PERF_HES_STOPPED;
+ }
+
+ if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
+ alpha_perf_event_update(event, hwc, hwc->idx, 0);
+ hwc->state |= PERF_HES_UPTODATE;
+ }
+
+ if (cpuc->enabled)
+ wrperfmon(PERFMON_CMD_DISABLE, (1UL<<hwc->idx));
+}
+
+
+static void alpha_pmu_start(struct perf_event *event, int flags)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+
+ if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
+ return;
+
+ if (flags & PERF_EF_RELOAD) {
+ WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
+ alpha_perf_event_set_period(event, hwc, hwc->idx);
+ }
+
+ hwc->state = 0;
+
cpuc->idx_mask |= 1UL<<hwc->idx;
- wrperfmon(PERFMON_CMD_ENABLE, (1UL<<hwc->idx));
+ if (cpuc->enabled)
+ wrperfmon(PERFMON_CMD_ENABLE, (1UL<<hwc->idx));
}
@@ -636,45 +671,42 @@
if (!hwc->sample_period) {
hwc->sample_period = alpha_pmu->pmc_max_period[0];
hwc->last_period = hwc->sample_period;
- atomic64_set(&hwc->period_left, hwc->sample_period);
+ local64_set(&hwc->period_left, hwc->sample_period);
}
return 0;
}
-static const struct pmu pmu = {
- .enable = alpha_pmu_enable,
- .disable = alpha_pmu_disable,
- .read = alpha_pmu_read,
- .unthrottle = alpha_pmu_unthrottle,
-};
-
-
/*
* Main entry point to initialise a HW performance event.
*/
-const struct pmu *hw_perf_event_init(struct perf_event *event)
+static int alpha_pmu_event_init(struct perf_event *event)
{
int err;
+ switch (event->attr.type) {
+ case PERF_TYPE_RAW:
+ case PERF_TYPE_HARDWARE:
+ case PERF_TYPE_HW_CACHE:
+ break;
+
+ default:
+ return -ENOENT;
+ }
+
if (!alpha_pmu)
- return ERR_PTR(-ENODEV);
+ return -ENODEV;
/* Do the real initialisation work. */
err = __hw_perf_event_init(event);
- if (err)
- return ERR_PTR(err);
-
- return &pmu;
+ return err;
}
-
-
/*
* Main entry point - enable HW performance counters.
*/
-void hw_perf_enable(void)
+static void alpha_pmu_enable(struct pmu *pmu)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
@@ -700,7 +732,7 @@
* Main entry point - disable HW performance counters.
*/
-void hw_perf_disable(void)
+static void alpha_pmu_disable(struct pmu *pmu)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
@@ -713,6 +745,17 @@
wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask);
}
+static struct pmu pmu = {
+ .pmu_enable = alpha_pmu_enable,
+ .pmu_disable = alpha_pmu_disable,
+ .event_init = alpha_pmu_event_init,
+ .add = alpha_pmu_add,
+ .del = alpha_pmu_del,
+ .start = alpha_pmu_start,
+ .stop = alpha_pmu_stop,
+ .read = alpha_pmu_read,
+};
+
/*
* Main entry point - don't know when this is called but it
@@ -766,7 +809,7 @@
wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask);
/* la_ptr is the counter that overflowed. */
- if (unlikely(la_ptr >= perf_max_events)) {
+ if (unlikely(la_ptr >= alpha_pmu->num_pmcs)) {
/* This should never occur! */
irq_err_count++;
pr_warning("PMI: silly index %ld\n", la_ptr);
@@ -807,7 +850,7 @@
/* Interrupts coming too quickly; "throttle" the
* counter, i.e., disable it for a little while.
*/
- cpuc->idx_mask &= ~(1UL<<idx);
+ alpha_pmu_stop(event, 0);
}
}
wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask);
@@ -837,6 +880,7 @@
/* And set up PMU specification */
alpha_pmu = &ev67_pmu;
- perf_max_events = alpha_pmu->num_pmcs;
+
+ perf_pmu_register(&pmu);
}
diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c
index 842dba3..3ec3506 100644
--- a/arch/alpha/kernel/process.c
+++ b/arch/alpha/kernel/process.c
@@ -356,7 +356,7 @@
dest[27] = pt->r27;
dest[28] = pt->r28;
dest[29] = pt->gp;
- dest[30] = rdusp();
+ dest[30] = ti == current_thread_info() ? rdusp() : ti->pcb.usp;
dest[31] = pt->pc;
/* Once upon a time this was the PS value. Which is stupid
diff --git a/arch/alpha/kernel/proto.h b/arch/alpha/kernel/proto.h
index 3d2627e..d3e52d3 100644
--- a/arch/alpha/kernel/proto.h
+++ b/arch/alpha/kernel/proto.h
@@ -156,9 +156,6 @@
/* es1888.c */
extern void es1888_init(void);
-/* ns87312.c */
-extern void ns87312_enable_ide(long ide_base);
-
/* ../lib/fpreg.c */
extern void alpha_write_fp_reg (unsigned long reg, unsigned long val);
extern unsigned long alpha_read_fp_reg (unsigned long reg);
diff --git a/arch/alpha/kernel/signal.c b/arch/alpha/kernel/signal.c
index 0932dbb..6f7feb5 100644
--- a/arch/alpha/kernel/signal.c
+++ b/arch/alpha/kernel/signal.c
@@ -41,46 +41,20 @@
/*
* The OSF/1 sigprocmask calling sequence is different from the
* C sigprocmask() sequence..
- *
- * how:
- * 1 - SIG_BLOCK
- * 2 - SIG_UNBLOCK
- * 3 - SIG_SETMASK
- *
- * We change the range to -1 .. 1 in order to let gcc easily
- * use the conditional move instructions.
- *
- * Note that we don't need to acquire the kernel lock for SMP
- * operation, as all of this is local to this thread.
*/
-SYSCALL_DEFINE3(osf_sigprocmask, int, how, unsigned long, newmask,
- struct pt_regs *, regs)
+SYSCALL_DEFINE2(osf_sigprocmask, int, how, unsigned long, newmask)
{
- unsigned long oldmask = -EINVAL;
+ sigset_t oldmask;
+ sigset_t mask;
+ unsigned long res;
- if ((unsigned long)how-1 <= 2) {
- long sign = how-2; /* -1 .. 1 */
- unsigned long block, unblock;
-
- newmask &= _BLOCKABLE;
- spin_lock_irq(¤t->sighand->siglock);
- oldmask = current->blocked.sig[0];
-
- unblock = oldmask & ~newmask;
- block = oldmask | newmask;
- if (!sign)
- block = unblock;
- if (sign <= 0)
- newmask = block;
- if (_NSIG_WORDS > 1 && sign > 0)
- sigemptyset(¤t->blocked);
- current->blocked.sig[0] = newmask;
- recalc_sigpending();
- spin_unlock_irq(¤t->sighand->siglock);
-
- regs->r0 = 0; /* special no error return */
+ siginitset(&mask, newmask & _BLOCKABLE);
+ res = sigprocmask(how, &mask, &oldmask);
+ if (!res) {
+ force_successful_syscall_return();
+ res = oldmask.sig[0];
}
- return oldmask;
+ return res;
}
SYSCALL_DEFINE3(osf_sigaction, int, sig,
@@ -94,9 +68,9 @@
old_sigset_t mask;
if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
__get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
- __get_user(new_ka.sa.sa_flags, &act->sa_flags))
+ __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
+ __get_user(mask, &act->sa_mask))
return -EFAULT;
- __get_user(mask, &act->sa_mask);
siginitset(&new_ka.sa.sa_mask, mask);
new_ka.ka_restorer = NULL;
}
@@ -106,9 +80,9 @@
if (!ret && oact) {
if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
__put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
- __put_user(old_ka.sa.sa_flags, &oact->sa_flags))
+ __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
+ __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
return -EFAULT;
- __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
}
return ret;
@@ -144,8 +118,7 @@
/*
* Atomically swap in the new signal mask, and wait for a signal.
*/
-asmlinkage int
-do_sigsuspend(old_sigset_t mask, struct pt_regs *regs, struct switch_stack *sw)
+SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
{
mask &= _BLOCKABLE;
spin_lock_irq(¤t->sighand->siglock);
@@ -154,41 +127,6 @@
recalc_sigpending();
spin_unlock_irq(¤t->sighand->siglock);
- /* Indicate EINTR on return from any possible signal handler,
- which will not come back through here, but via sigreturn. */
- regs->r0 = EINTR;
- regs->r19 = 1;
-
- current->state = TASK_INTERRUPTIBLE;
- schedule();
- set_thread_flag(TIF_RESTORE_SIGMASK);
- return -ERESTARTNOHAND;
-}
-
-asmlinkage int
-do_rt_sigsuspend(sigset_t __user *uset, size_t sigsetsize,
- struct pt_regs *regs, struct switch_stack *sw)
-{
- sigset_t set;
-
- /* XXX: Don't preclude handling different sized sigset_t's. */
- if (sigsetsize != sizeof(sigset_t))
- return -EINVAL;
- if (copy_from_user(&set, uset, sizeof(set)))
- return -EFAULT;
-
- sigdelsetmask(&set, ~_BLOCKABLE);
- spin_lock_irq(¤t->sighand->siglock);
- current->saved_sigmask = current->blocked;
- current->blocked = set;
- recalc_sigpending();
- spin_unlock_irq(¤t->sighand->siglock);
-
- /* Indicate EINTR on return from any possible signal handler,
- which will not come back through here, but via sigreturn. */
- regs->r0 = EINTR;
- regs->r19 = 1;
-
current->state = TASK_INTERRUPTIBLE;
schedule();
set_thread_flag(TIF_RESTORE_SIGMASK);
@@ -239,6 +177,8 @@
unsigned long usp;
long i, err = __get_user(regs->pc, &sc->sc_pc);
+ current_thread_info()->restart_block.fn = do_no_restart_syscall;
+
sw->r26 = (unsigned long) ret_from_sys_call;
err |= __get_user(regs->r0, sc->sc_regs+0);
@@ -591,7 +531,6 @@
regs->pc -= 4;
break;
case ERESTART_RESTARTBLOCK:
- current_thread_info()->restart_block.fn = do_no_restart_syscall;
regs->r0 = EINTR;
break;
}
diff --git a/arch/alpha/kernel/srm_env.c b/arch/alpha/kernel/srm_env.c
index 4afc1a1..f0df3fb 100644
--- a/arch/alpha/kernel/srm_env.c
+++ b/arch/alpha/kernel/srm_env.c
@@ -87,7 +87,7 @@
srm_env_t *entry;
char *page;
- entry = (srm_env_t *)m->private;
+ entry = m->private;
page = (char *)__get_free_page(GFP_USER);
if (!page)
return -ENOMEM;
diff --git a/arch/alpha/kernel/sys_cabriolet.c b/arch/alpha/kernel/sys_cabriolet.c
index affd0f3..14c8898 100644
--- a/arch/alpha/kernel/sys_cabriolet.c
+++ b/arch/alpha/kernel/sys_cabriolet.c
@@ -33,7 +33,7 @@
#include "irq_impl.h"
#include "pci_impl.h"
#include "machvec_impl.h"
-
+#include "pc873xx.h"
/* Note mask bit is true for DISABLED irqs. */
static unsigned long cached_irq_mask = ~0UL;
@@ -236,17 +236,30 @@
}
static inline void __init
+cabriolet_enable_ide(void)
+{
+ if (pc873xx_probe() == -1) {
+ printk(KERN_ERR "Probing for PC873xx Super IO chip failed.\n");
+ } else {
+ printk(KERN_INFO "Found %s Super IO chip at 0x%x\n",
+ pc873xx_get_model(), pc873xx_get_base());
+
+ pc873xx_enable_ide();
+ }
+}
+
+static inline void __init
cabriolet_init_pci(void)
{
common_init_pci();
- ns87312_enable_ide(0x398);
+ cabriolet_enable_ide();
}
static inline void __init
cia_cab_init_pci(void)
{
cia_init_pci();
- ns87312_enable_ide(0x398);
+ cabriolet_enable_ide();
}
/*
diff --git a/arch/alpha/kernel/sys_takara.c b/arch/alpha/kernel/sys_takara.c
index 2304648..4da596b 100644
--- a/arch/alpha/kernel/sys_takara.c
+++ b/arch/alpha/kernel/sys_takara.c
@@ -29,7 +29,7 @@
#include "irq_impl.h"
#include "pci_impl.h"
#include "machvec_impl.h"
-
+#include "pc873xx.h"
/* Note mask bit is true for DISABLED irqs. */
static unsigned long cached_irq_mask[2] = { -1, -1 };
@@ -264,7 +264,14 @@
alpha_mv.pci_map_irq = takara_map_irq_srm;
cia_init_pci();
- ns87312_enable_ide(0x26e);
+
+ if (pc873xx_probe() == -1) {
+ printk(KERN_ERR "Probing for PC873xx Super IO chip failed.\n");
+ } else {
+ printk(KERN_INFO "Found %s Super IO chip at 0x%x\n",
+ pc873xx_get_model(), pc873xx_get_base());
+ pc873xx_enable_ide();
+ }
}
diff --git a/arch/alpha/kernel/systbls.S b/arch/alpha/kernel/systbls.S
index 09acb78..a6a1de9 100644
--- a/arch/alpha/kernel/systbls.S
+++ b/arch/alpha/kernel/systbls.S
@@ -58,7 +58,7 @@
.quad sys_open /* 45 */
.quad alpha_ni_syscall
.quad sys_getxgid
- .quad osf_sigprocmask
+ .quad sys_osf_sigprocmask
.quad alpha_ni_syscall
.quad alpha_ni_syscall /* 50 */
.quad sys_acct
@@ -512,6 +512,9 @@
.quad sys_pwritev
.quad sys_rt_tgsigqueueinfo
.quad sys_perf_event_open
+ .quad sys_fanotify_init
+ .quad sys_fanotify_mark /* 495 */
+ .quad sys_prlimit64
.size sys_call_table, . - sys_call_table
.type sys_call_table, @object
diff --git a/arch/alpha/kernel/time.c b/arch/alpha/kernel/time.c
index eacceb2..0f1d849 100644
--- a/arch/alpha/kernel/time.c
+++ b/arch/alpha/kernel/time.c
@@ -41,7 +41,7 @@
#include <linux/init.h>
#include <linux/bcd.h>
#include <linux/profile.h>
-#include <linux/perf_event.h>
+#include <linux/irq_work.h>
#include <asm/uaccess.h>
#include <asm/io.h>
@@ -83,25 +83,25 @@
unsigned long est_cycle_freq;
-#ifdef CONFIG_PERF_EVENTS
+#ifdef CONFIG_IRQ_WORK
-DEFINE_PER_CPU(u8, perf_event_pending);
+DEFINE_PER_CPU(u8, irq_work_pending);
-#define set_perf_event_pending_flag() __get_cpu_var(perf_event_pending) = 1
-#define test_perf_event_pending() __get_cpu_var(perf_event_pending)
-#define clear_perf_event_pending() __get_cpu_var(perf_event_pending) = 0
+#define set_irq_work_pending_flag() __get_cpu_var(irq_work_pending) = 1
+#define test_irq_work_pending() __get_cpu_var(irq_work_pending)
+#define clear_irq_work_pending() __get_cpu_var(irq_work_pending) = 0
-void set_perf_event_pending(void)
+void set_irq_work_pending(void)
{
- set_perf_event_pending_flag();
+ set_irq_work_pending_flag();
}
-#else /* CONFIG_PERF_EVENTS */
+#else /* CONFIG_IRQ_WORK */
-#define test_perf_event_pending() 0
-#define clear_perf_event_pending()
+#define test_irq_work_pending() 0
+#define clear_irq_work_pending()
-#endif /* CONFIG_PERF_EVENTS */
+#endif /* CONFIG_IRQ_WORK */
static inline __u32 rpcc(void)
@@ -191,16 +191,16 @@
write_sequnlock(&xtime_lock);
+ if (test_irq_work_pending()) {
+ clear_irq_work_pending();
+ irq_work_run();
+ }
+
#ifndef CONFIG_SMP
while (nticks--)
update_process_times(user_mode(get_irq_regs()));
#endif
- if (test_perf_event_pending()) {
- clear_perf_event_pending();
- perf_event_do_pending();
- }
-
return IRQ_HANDLED;
}
diff --git a/arch/alpha/kernel/traps.c b/arch/alpha/kernel/traps.c
index b14f015..0414e02 100644
--- a/arch/alpha/kernel/traps.c
+++ b/arch/alpha/kernel/traps.c
@@ -13,7 +13,6 @@
#include <linux/sched.h>
#include <linux/tty.h>
#include <linux/delay.h>
-#include <linux/smp_lock.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kallsyms.h>
@@ -623,7 +622,6 @@
return;
}
- lock_kernel();
printk("Bad unaligned kernel access at %016lx: %p %lx %lu\n",
pc, va, opcode, reg);
do_exit(SIGSEGV);
@@ -646,7 +644,6 @@
* Yikes! No one to forward the exception to.
* Since the registers are in a weird format, dump them ourselves.
*/
- lock_kernel();
printk("%s(%d): unhandled unaligned exception\n",
current->comm, task_pid_nr(current));
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 9295110..9103904 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -23,6 +23,7 @@
select HAVE_KERNEL_GZIP
select HAVE_KERNEL_LZO
select HAVE_KERNEL_LZMA
+ select HAVE_IRQ_WORK
select HAVE_PERF_EVENTS
select PERF_USE_VMALLOC
select HAVE_REGS_AND_STACK_ACCESS_API
@@ -271,7 +272,6 @@
bool "Atmel AT91"
select ARCH_REQUIRE_GPIOLIB
select HAVE_CLK
- select ARCH_USES_GETTIMEOFFSET
help
This enables support for systems based on the Atmel AT91RM9200,
AT91SAM9 and AT91CAP9 processors.
@@ -1051,6 +1051,32 @@
ACTLR register. Note that setting specific bits in the ACTLR register
may not be available in non-secure mode.
+config ARM_ERRATA_742230
+ bool "ARM errata: DMB operation may be faulty"
+ depends on CPU_V7 && SMP
+ help
+ This option enables the workaround for the 742230 Cortex-A9
+ (r1p0..r2p2) erratum. Under rare circumstances, a DMB instruction
+ between two write operations may not ensure the correct visibility
+ ordering of the two writes. This workaround sets a specific bit in
+ the diagnostic register of the Cortex-A9 which causes the DMB
+ instruction to behave as a DSB, ensuring the correct behaviour of
+ the two writes.
+
+config ARM_ERRATA_742231
+ bool "ARM errata: Incorrect hazard handling in the SCU may lead to data corruption"
+ depends on CPU_V7 && SMP
+ help
+ This option enables the workaround for the 742231 Cortex-A9
+ (r2p0..r2p2) erratum. Under certain conditions, specific to the
+ Cortex-A9 MPCore micro-architecture, two CPUs working in SMP mode,
+ accessing some data located in the same cache line, may get corrupted
+ data due to bad handling of the address hazard when the line gets
+ replaced from one of the CPUs at the same time as another CPU is
+ accessing it. This workaround sets specific bits in the diagnostic
+ register of the Cortex-A9 which reduces the linefill issuing
+ capabilities of the processor.
+
config PL310_ERRATA_588369
bool "Clean & Invalidate maintenance operations do not invalidate clean lines"
depends on CACHE_L2X0 && ARCH_OMAP4
@@ -1076,6 +1102,20 @@
invalidated are not, resulting in an incoherency in the system page
tables. The workaround changes the TLB flushing routines to invalidate
entries regardless of the ASID.
+
+config ARM_ERRATA_743622
+ bool "ARM errata: Faulty hazard checking in the Store Buffer may lead to data corruption"
+ depends on CPU_V7
+ help
+ This option enables the workaround for the 743622 Cortex-A9
+ (r2p0..r2p2) erratum. Under very rare conditions, a faulty
+ optimisation in the Cortex-A9 Store Buffer may lead to data
+ corruption. This workaround sets a specific bit in the diagnostic
+ register of the Cortex-A9 which disables the Store Buffer
+ optimisation, preventing the defect from occurring. This has no
+ visible impact on the overall performance or power consumption of the
+ processor.
+
endmenu
source "arch/arm/common/Kconfig"
@@ -1576,95 +1616,6 @@
0xf8000000. This assumes the zImage being placed in the first 128MB
from start of memory.
-config ZRELADDR
- hex "Physical address of the decompressed kernel image"
- depends on !AUTO_ZRELADDR
- default 0x00008000 if ARCH_BCMRING ||\
- ARCH_CNS3XXX ||\
- ARCH_DOVE ||\
- ARCH_EBSA110 ||\
- ARCH_FOOTBRIDGE ||\
- ARCH_INTEGRATOR ||\
- ARCH_IOP13XX ||\
- ARCH_IOP33X ||\
- ARCH_IXP2000 ||\
- ARCH_IXP23XX ||\
- ARCH_IXP4XX ||\
- ARCH_KIRKWOOD ||\
- ARCH_KS8695 ||\
- ARCH_LOKI ||\
- ARCH_MMP ||\
- ARCH_MV78XX0 ||\
- ARCH_NOMADIK ||\
- ARCH_NUC93X ||\
- ARCH_NS9XXX ||\
- ARCH_ORION5X ||\
- ARCH_SPEAR3XX ||\
- ARCH_SPEAR6XX ||\
- ARCH_U8500 ||\
- ARCH_VERSATILE ||\
- ARCH_W90X900
- default 0x08008000 if ARCH_MX1 ||\
- ARCH_SHARK
- default 0x10008000 if ARCH_MSM ||\
- ARCH_OMAP1 ||\
- ARCH_RPC
- default 0x20008000 if ARCH_S5P6440 ||\
- ARCH_S5P6442 ||\
- ARCH_S5PC100 ||\
- ARCH_S5PV210
- default 0x30008000 if ARCH_S3C2410 ||\
- ARCH_S3C2400 ||\
- ARCH_S3C2412 ||\
- ARCH_S3C2416 ||\
- ARCH_S3C2440 ||\
- ARCH_S3C2443
- default 0x40008000 if ARCH_STMP378X ||\
- ARCH_STMP37XX ||\
- ARCH_SH7372 ||\
- ARCH_SH7377
- default 0x50008000 if ARCH_S3C64XX ||\
- ARCH_SH7367
- default 0x60008000 if ARCH_VEXPRESS
- default 0x80008000 if ARCH_MX25 ||\
- ARCH_MX3 ||\
- ARCH_NETX ||\
- ARCH_OMAP2PLUS ||\
- ARCH_PNX4008
- default 0x90008000 if ARCH_MX5 ||\
- ARCH_MX91231
- default 0xa0008000 if ARCH_IOP32X ||\
- ARCH_PXA ||\
- MACH_MX27
- default 0xc0008000 if ARCH_LH7A40X ||\
- MACH_MX21
- default 0xf0008000 if ARCH_AAEC2000 ||\
- ARCH_L7200
- default 0xc0028000 if ARCH_CLPS711X
- default 0x70008000 if ARCH_AT91 && (ARCH_AT91CAP9 || ARCH_AT91SAM9G45)
- default 0x20008000 if ARCH_AT91 && !(ARCH_AT91CAP9 || ARCH_AT91SAM9G45)
- default 0xc0008000 if ARCH_DAVINCI && ARCH_DAVINCI_DA8XX
- default 0x80008000 if ARCH_DAVINCI && !ARCH_DAVINCI_DA8XX
- default 0x00008000 if ARCH_EP93XX && EP93XX_SDCE3_SYNC_PHYS_OFFSET
- default 0xc0008000 if ARCH_EP93XX && EP93XX_SDCE0_PHYS_OFFSET
- default 0xd0008000 if ARCH_EP93XX && EP93XX_SDCE1_PHYS_OFFSET
- default 0xe0008000 if ARCH_EP93XX && EP93XX_SDCE2_PHYS_OFFSET
- default 0xf0008000 if ARCH_EP93XX && EP93XX_SDCE3_ASYNC_PHYS_OFFSET
- default 0x00008000 if ARCH_GEMINI && GEMINI_MEM_SWAP
- default 0x10008000 if ARCH_GEMINI && !GEMINI_MEM_SWAP
- default 0x70008000 if ARCH_REALVIEW && REALVIEW_HIGH_PHYS_OFFSET
- default 0x00008000 if ARCH_REALVIEW && !REALVIEW_HIGH_PHYS_OFFSET
- default 0xc0208000 if ARCH_SA1100 && SA1111
- default 0xc0008000 if ARCH_SA1100 && !SA1111
- default 0x30108000 if ARCH_S3C2410 && PM_H1940
- default 0x28E08000 if ARCH_U300 && MACH_U300_SINGLE_RAM
- default 0x48008000 if ARCH_U300 && !MACH_U300_SINGLE_RAM
- help
- ZRELADDR is the physical address where the decompressed kernel
- image will be placed. ZRELADDR has to be specified when the
- assumption of AUTO_ZRELADDR is not valid, or when ZBOOT_ROM is
- selected.
-
endmenu
menu "CPU Power Management"
diff --git a/arch/arm/boot/Makefile b/arch/arm/boot/Makefile
index f705213..4a590f4 100644
--- a/arch/arm/boot/Makefile
+++ b/arch/arm/boot/Makefile
@@ -14,16 +14,18 @@
MKIMAGE := $(srctree)/scripts/mkuboot.sh
ifneq ($(MACHINE),)
--include $(srctree)/$(MACHINE)/Makefile.boot
+include $(srctree)/$(MACHINE)/Makefile.boot
endif
# Note: the following conditions must always be true:
+# ZRELADDR == virt_to_phys(PAGE_OFFSET + TEXT_OFFSET)
# PARAMS_PHYS must be within 4MB of ZRELADDR
# INITRD_PHYS must be in RAM
+ZRELADDR := $(zreladdr-y)
PARAMS_PHYS := $(params_phys-y)
INITRD_PHYS := $(initrd_phys-y)
-export INITRD_PHYS PARAMS_PHYS
+export ZRELADDR INITRD_PHYS PARAMS_PHYS
targets := Image zImage xipImage bootpImage uImage
@@ -65,7 +67,7 @@
ifeq ($(CONFIG_ZBOOT_ROM),y)
$(obj)/uImage: LOADADDR=$(CONFIG_ZBOOT_ROM_TEXT)
else
-$(obj)/uImage: LOADADDR=$(CONFIG_ZRELADDR)
+$(obj)/uImage: LOADADDR=$(ZRELADDR)
endif
ifeq ($(CONFIG_THUMB2_KERNEL),y)
diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
index 68775e3..65a7c1c 100644
--- a/arch/arm/boot/compressed/Makefile
+++ b/arch/arm/boot/compressed/Makefile
@@ -79,6 +79,10 @@
EXTRA_CFLAGS := -fpic -fno-builtin
EXTRA_AFLAGS := -Wa,-march=all
+# Supply ZRELADDR to the decompressor via a linker symbol.
+ifneq ($(CONFIG_AUTO_ZRELADDR),y)
+LDFLAGS_vmlinux := --defsym zreladdr=$(ZRELADDR)
+endif
ifeq ($(CONFIG_CPU_ENDIAN_BE8),y)
LDFLAGS_vmlinux += --be8
endif
@@ -112,5 +116,5 @@
$(obj)/font.c: $(FONTC)
$(call cmd,shipped)
-$(obj)/vmlinux.lds: $(obj)/vmlinux.lds.in arch/arm/boot/Makefile .config
+$(obj)/vmlinux.lds: $(obj)/vmlinux.lds.in arch/arm/boot/Makefile $(KCONFIG_CONFIG)
@sed "$(SEDFLAGS)" < $< > $@
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index 6af9907..6825c34 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -177,7 +177,7 @@
and r4, pc, #0xf8000000
add r4, r4, #TEXT_OFFSET
#else
- ldr r4, =CONFIG_ZRELADDR
+ ldr r4, =zreladdr
#endif
subs r0, r0, r1 @ calculate the delta offset
diff --git a/arch/arm/common/it8152.c b/arch/arm/common/it8152.c
index 6c09135..1bec96e 100644
--- a/arch/arm/common/it8152.c
+++ b/arch/arm/common/it8152.c
@@ -263,6 +263,22 @@
return 0;
}
+int dma_needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size)
+{
+ dev_dbg(dev, "%s: dma_addr %08x, size %08x\n",
+ __func__, dma_addr, size);
+ return (dev->bus == &pci_bus_type) &&
+ ((dma_addr + size - PHYS_OFFSET) >= SZ_64M);
+}
+
+int dma_set_coherent_mask(struct device *dev, u64 mask)
+{
+ if (mask >= PHYS_OFFSET + SZ_64M - 1)
+ return 0;
+
+ return -EIO;
+}
+
int __init it8152_pci_setup(int nr, struct pci_sys_data *sys)
{
it8152_io.start = IT8152_IO_BASE + 0x12000;
diff --git a/arch/arm/configs/omap_4430sdp_defconfig b/arch/arm/configs/omap_4430sdp_defconfig
index 63e0c2d..14c1e18 100644
--- a/arch/arm/configs/omap_4430sdp_defconfig
+++ b/arch/arm/configs/omap_4430sdp_defconfig
@@ -13,6 +13,9 @@
# CONFIG_BLK_DEV_BSG is not set
CONFIG_ARCH_OMAP=y
CONFIG_ARCH_OMAP4=y
+# CONFIG_ARCH_OMAP2PLUS_TYPICAL is not set
+# CONFIG_ARCH_OMAP2 is not set
+# CONFIG_ARCH_OMAP3 is not set
# CONFIG_OMAP_MUX is not set
CONFIG_OMAP_32K_TIMER=y
CONFIG_OMAP_DM_TIMER=y
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index c226fe1..c568da7 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -288,15 +288,7 @@
* DMA access and 1 if the buffer needs to be bounced.
*
*/
-#ifdef CONFIG_SA1111
extern int dma_needs_bounce(struct device*, dma_addr_t, size_t);
-#else
-static inline int dma_needs_bounce(struct device *dev, dma_addr_t addr,
- size_t size)
-{
- return 0;
-}
-#endif
/*
* The DMA API, implemented by dmabounce.c. See below for descriptions.
diff --git a/arch/arm/include/asm/perf_event.h b/arch/arm/include/asm/perf_event.h
index 48837e6..c4aa4e8 100644
--- a/arch/arm/include/asm/perf_event.h
+++ b/arch/arm/include/asm/perf_event.h
@@ -12,18 +12,6 @@
#ifndef __ARM_PERF_EVENT_H__
#define __ARM_PERF_EVENT_H__
-/*
- * NOP: on *most* (read: all supported) ARM platforms, the performance
- * counter interrupts are regular interrupts and not an NMI. This
- * means that when we receive the interrupt we can call
- * perf_event_do_pending() that handles all of the work with
- * interrupts enabled.
- */
-static inline void
-set_perf_event_pending(void)
-{
-}
-
/* ARM performance counters start from 1 (in the cp15 accesses) so use the
* same indexes here for consistency. */
#define PERF_EVENT_INDEX_OFFSET 1
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index ab68cf1..e90b167 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -317,6 +317,10 @@
#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
#define pgprot_dmacoherent(prot) \
__pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_BUFFERABLE)
+#define __HAVE_PHYS_MEM_ACCESS_PROT
+struct file;
+extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
+ unsigned long size, pgprot_t vma_prot);
#else
#define pgprot_dmacoherent(prot) \
__pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_UNCACHED)
diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h
index d02cfb6..c891eb7 100644
--- a/arch/arm/include/asm/unistd.h
+++ b/arch/arm/include/asm/unistd.h
@@ -393,6 +393,9 @@
#define __NR_perf_event_open (__NR_SYSCALL_BASE+364)
#define __NR_recvmmsg (__NR_SYSCALL_BASE+365)
#define __NR_accept4 (__NR_SYSCALL_BASE+366)
+#define __NR_fanotify_init (__NR_SYSCALL_BASE+367)
+#define __NR_fanotify_mark (__NR_SYSCALL_BASE+368)
+#define __NR_prlimit64 (__NR_SYSCALL_BASE+369)
/*
* The following SWIs are ARM private.
diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S
index afeb71f..5c26ecc 100644
--- a/arch/arm/kernel/calls.S
+++ b/arch/arm/kernel/calls.S
@@ -376,6 +376,9 @@
CALL(sys_perf_event_open)
/* 365 */ CALL(sys_recvmmsg)
CALL(sys_accept4)
+ CALL(sys_fanotify_init)
+ CALL(sys_fanotify_mark)
+ CALL(sys_prlimit64)
#ifndef syscalls_counted
.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
#define syscalls_counted
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index f05a35a..7885722 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -48,6 +48,8 @@
beq no_work_pending
mov r0, sp @ 'regs'
mov r2, why @ 'syscall'
+ tst r1, #_TIF_SIGPENDING @ delivering a signal?
+ movne why, #0 @ prevent further restarts
bl do_notify_resume
b ret_slow_syscall @ Check work again
@@ -418,11 +420,13 @@
sys_sigreturn_wrapper:
add r0, sp, #S_OFF
+ mov why, #0 @ prevent syscall restart handling
b sys_sigreturn
ENDPROC(sys_sigreturn_wrapper)
sys_rt_sigreturn_wrapper:
add r0, sp, #S_OFF
+ mov why, #0 @ prevent syscall restart handling
b sys_rt_sigreturn
ENDPROC(sys_rt_sigreturn_wrapper)
diff --git a/arch/arm/kernel/etm.c b/arch/arm/kernel/etm.c
index 56418f9..33c7077 100644
--- a/arch/arm/kernel/etm.c
+++ b/arch/arm/kernel/etm.c
@@ -230,7 +230,7 @@
etb_lock(t);
}
-static void sysrq_etm_dump(int key, struct tty_struct *tty)
+static void sysrq_etm_dump(int key)
{
dev_dbg(tracer.dev, "Dumping ETB buffer\n");
etm_dump();
diff --git a/arch/arm/kernel/kprobes-decode.c b/arch/arm/kernel/kprobes-decode.c
index 8bccbfa..2c1f005 100644
--- a/arch/arm/kernel/kprobes-decode.c
+++ b/arch/arm/kernel/kprobes-decode.c
@@ -1162,11 +1162,12 @@
{
/*
* MSR : cccc 0011 0x10 xxxx xxxx xxxx xxxx xxxx
- * Undef : cccc 0011 0x00 xxxx xxxx xxxx xxxx xxxx
+ * Undef : cccc 0011 0100 xxxx xxxx xxxx xxxx xxxx
* ALU op with S bit and Rd == 15 :
* cccc 001x xxx1 xxxx 1111 xxxx xxxx xxxx
*/
- if ((insn & 0x0f900000) == 0x03200000 || /* MSR & Undef */
+ if ((insn & 0x0fb00000) == 0x03200000 || /* MSR */
+ (insn & 0x0ff00000) == 0x03400000 || /* Undef */
(insn & 0x0e10f000) == 0x0210f000) /* ALU s-bit, R15 */
return INSN_REJECTED;
@@ -1177,7 +1178,7 @@
* *S (bit 20) updates condition codes
* ADC/SBC/RSC reads the C flag
*/
- insn &= 0xfff00fff; /* Rn = r0, Rd = r0 */
+ insn &= 0xffff0fff; /* Rd = r0 */
asi->insn[0] = insn;
asi->insn_handler = (insn & (1 << 20)) ? /* S-bit */
emulate_alu_imm_rwflags : emulate_alu_imm_rflags;
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 417c392..49643b1 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -123,6 +123,12 @@
}
EXPORT_SYMBOL_GPL(armpmu_get_max_events);
+int perf_num_counters(void)
+{
+ return armpmu_get_max_events();
+}
+EXPORT_SYMBOL_GPL(perf_num_counters);
+
#define HW_OP_UNSUPPORTED 0xFFFF
#define C(_x) \
@@ -221,27 +227,6 @@
}
static void
-armpmu_disable(struct perf_event *event)
-{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
- struct hw_perf_event *hwc = &event->hw;
- int idx = hwc->idx;
-
- WARN_ON(idx < 0);
-
- clear_bit(idx, cpuc->active_mask);
- armpmu->disable(hwc, idx);
-
- barrier();
-
- armpmu_event_update(event, hwc, idx);
- cpuc->events[idx] = NULL;
- clear_bit(idx, cpuc->used_mask);
-
- perf_event_update_userpage(event);
-}
-
-static void
armpmu_read(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
@@ -254,13 +239,44 @@
}
static void
-armpmu_unthrottle(struct perf_event *event)
+armpmu_stop(struct perf_event *event, int flags)
{
struct hw_perf_event *hwc = &event->hw;
+ if (!armpmu)
+ return;
+
+ /*
+ * ARM pmu always has to update the counter, so ignore
+ * PERF_EF_UPDATE, see comments in armpmu_start().
+ */
+ if (!(hwc->state & PERF_HES_STOPPED)) {
+ armpmu->disable(hwc, hwc->idx);
+ barrier(); /* why? */
+ armpmu_event_update(event, hwc, hwc->idx);
+ hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
+ }
+}
+
+static void
+armpmu_start(struct perf_event *event, int flags)
+{
+ struct hw_perf_event *hwc = &event->hw;
+
+ if (!armpmu)
+ return;
+
+ /*
+ * ARM pmu always has to reprogram the period, so ignore
+ * PERF_EF_RELOAD, see the comment below.
+ */
+ if (flags & PERF_EF_RELOAD)
+ WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
+
+ hwc->state = 0;
/*
* Set the period again. Some counters can't be stopped, so when we
- * were throttled we simply disabled the IRQ source and the counter
+ * were stopped we simply disabled the IRQ source and the counter
* may have been left counting. If we don't do this step then we may
* get an interrupt too soon or *way* too late if the overflow has
* happened since disabling.
@@ -269,14 +285,33 @@
armpmu->enable(hwc, hwc->idx);
}
+static void
+armpmu_del(struct perf_event *event, int flags)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = hwc->idx;
+
+ WARN_ON(idx < 0);
+
+ clear_bit(idx, cpuc->active_mask);
+ armpmu_stop(event, PERF_EF_UPDATE);
+ cpuc->events[idx] = NULL;
+ clear_bit(idx, cpuc->used_mask);
+
+ perf_event_update_userpage(event);
+}
+
static int
-armpmu_enable(struct perf_event *event)
+armpmu_add(struct perf_event *event, int flags)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
int idx;
int err = 0;
+ perf_pmu_disable(event->pmu);
+
/* If we don't have a space for the counter then finish early. */
idx = armpmu->get_event_idx(cpuc, hwc);
if (idx < 0) {
@@ -293,25 +328,19 @@
cpuc->events[idx] = event;
set_bit(idx, cpuc->active_mask);
- /* Set the period for the event. */
- armpmu_event_set_period(event, hwc, idx);
-
- /* Enable the event. */
- armpmu->enable(hwc, idx);
+ hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
+ if (flags & PERF_EF_START)
+ armpmu_start(event, PERF_EF_RELOAD);
/* Propagate our changes to the userspace mapping. */
perf_event_update_userpage(event);
out:
+ perf_pmu_enable(event->pmu);
return err;
}
-static struct pmu pmu = {
- .enable = armpmu_enable,
- .disable = armpmu_disable,
- .unthrottle = armpmu_unthrottle,
- .read = armpmu_read,
-};
+static struct pmu pmu;
static int
validate_event(struct cpu_hw_events *cpuc,
@@ -319,8 +348,8 @@
{
struct hw_perf_event fake_event = event->hw;
- if (event->pmu && event->pmu != &pmu)
- return 0;
+ if (event->pmu != &pmu || event->state <= PERF_EVENT_STATE_OFF)
+ return 1;
return armpmu->get_event_idx(cpuc, &fake_event) >= 0;
}
@@ -491,20 +520,29 @@
return err;
}
-const struct pmu *
-hw_perf_event_init(struct perf_event *event)
+static int armpmu_event_init(struct perf_event *event)
{
int err = 0;
+ switch (event->attr.type) {
+ case PERF_TYPE_RAW:
+ case PERF_TYPE_HARDWARE:
+ case PERF_TYPE_HW_CACHE:
+ break;
+
+ default:
+ return -ENOENT;
+ }
+
if (!armpmu)
- return ERR_PTR(-ENODEV);
+ return -ENODEV;
event->destroy = hw_perf_event_destroy;
if (!atomic_inc_not_zero(&active_events)) {
- if (atomic_read(&active_events) > perf_max_events) {
+ if (atomic_read(&active_events) > armpmu->num_events) {
atomic_dec(&active_events);
- return ERR_PTR(-ENOSPC);
+ return -ENOSPC;
}
mutex_lock(&pmu_reserve_mutex);
@@ -518,17 +556,16 @@
}
if (err)
- return ERR_PTR(err);
+ return err;
err = __hw_perf_event_init(event);
if (err)
hw_perf_event_destroy(event);
- return err ? ERR_PTR(err) : &pmu;
+ return err;
}
-void
-hw_perf_enable(void)
+static void armpmu_enable(struct pmu *pmu)
{
/* Enable all of the perf events on hardware. */
int idx;
@@ -549,13 +586,23 @@
armpmu->start();
}
-void
-hw_perf_disable(void)
+static void armpmu_disable(struct pmu *pmu)
{
if (armpmu)
armpmu->stop();
}
+static struct pmu pmu = {
+ .pmu_enable = armpmu_enable,
+ .pmu_disable = armpmu_disable,
+ .event_init = armpmu_event_init,
+ .add = armpmu_add,
+ .del = armpmu_del,
+ .start = armpmu_start,
+ .stop = armpmu_stop,
+ .read = armpmu_read,
+};
+
/*
* ARMv6 Performance counter handling code.
*
@@ -1041,11 +1088,11 @@
/*
* Handle the pending perf events.
*
- * Note: this call *must* be run with interrupts enabled. For
- * platforms that can have the PMU interrupts raised as a PMI, this
+ * Note: this call *must* be run with interrupts disabled. For
+ * platforms that can have the PMU interrupts raised as an NMI, this
* will not work.
*/
- perf_event_do_pending();
+ irq_work_run();
return IRQ_HANDLED;
}
@@ -2017,11 +2064,11 @@
/*
* Handle the pending perf events.
*
- * Note: this call *must* be run with interrupts enabled. For
- * platforms that can have the PMU interrupts raised as a PMI, this
+ * Note: this call *must* be run with interrupts disabled. For
+ * platforms that can have the PMU interrupts raised as an NMI, this
* will not work.
*/
- perf_event_do_pending();
+ irq_work_run();
return IRQ_HANDLED;
}
@@ -2389,7 +2436,7 @@
armpmu->disable(hwc, idx);
}
- perf_event_do_pending();
+ irq_work_run();
/*
* Re-enable the PMU.
@@ -2716,7 +2763,7 @@
armpmu->disable(hwc, idx);
}
- perf_event_do_pending();
+ irq_work_run();
/*
* Re-enable the PMU.
@@ -2933,14 +2980,12 @@
armpmu = &armv6pmu;
memcpy(armpmu_perf_cache_map, armv6_perf_cache_map,
sizeof(armv6_perf_cache_map));
- perf_max_events = armv6pmu.num_events;
break;
case 0xB020: /* ARM11mpcore */
armpmu = &armv6mpcore_pmu;
memcpy(armpmu_perf_cache_map,
armv6mpcore_perf_cache_map,
sizeof(armv6mpcore_perf_cache_map));
- perf_max_events = armv6mpcore_pmu.num_events;
break;
case 0xC080: /* Cortex-A8 */
armv7pmu.id = ARM_PERF_PMU_ID_CA8;
@@ -2952,7 +2997,6 @@
/* Reset PMNC and read the nb of CNTx counters
supported */
armv7pmu.num_events = armv7_reset_read_pmnc();
- perf_max_events = armv7pmu.num_events;
break;
case 0xC090: /* Cortex-A9 */
armv7pmu.id = ARM_PERF_PMU_ID_CA9;
@@ -2964,7 +3008,6 @@
/* Reset PMNC and read the nb of CNTx counters
supported */
armv7pmu.num_events = armv7_reset_read_pmnc();
- perf_max_events = armv7pmu.num_events;
break;
}
/* Intel CPUs [xscale]. */
@@ -2975,13 +3018,11 @@
armpmu = &xscale1pmu;
memcpy(armpmu_perf_cache_map, xscale_perf_cache_map,
sizeof(xscale_perf_cache_map));
- perf_max_events = xscale1pmu.num_events;
break;
case 2:
armpmu = &xscale2pmu;
memcpy(armpmu_perf_cache_map, xscale_perf_cache_map,
sizeof(xscale_perf_cache_map));
- perf_max_events = xscale2pmu.num_events;
break;
}
}
@@ -2991,9 +3032,10 @@
arm_pmu_names[armpmu->id], armpmu->num_events);
} else {
pr_info("no hardware support available\n");
- perf_max_events = -1;
}
+ perf_pmu_register(&pmu);
+
return 0;
}
arch_initcall(init_hw_perf_events);
@@ -3001,13 +3043,6 @@
/*
* Callchain handling code.
*/
-static inline void
-callchain_store(struct perf_callchain_entry *entry,
- u64 ip)
-{
- if (entry->nr < PERF_MAX_STACK_DEPTH)
- entry->ip[entry->nr++] = ip;
-}
/*
* The registers we're interested in are at the end of the variable
@@ -3039,7 +3074,7 @@
if (__copy_from_user_inatomic(&buftail, tail, sizeof(buftail)))
return NULL;
- callchain_store(entry, buftail.lr);
+ perf_callchain_store(entry, buftail.lr);
/*
* Frame pointers should strictly progress back up the stack
@@ -3051,16 +3086,11 @@
return buftail.fp - 1;
}
-static void
-perf_callchain_user(struct pt_regs *regs,
- struct perf_callchain_entry *entry)
+void
+perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
{
struct frame_tail *tail;
- callchain_store(entry, PERF_CONTEXT_USER);
-
- if (!user_mode(regs))
- regs = task_pt_regs(current);
tail = (struct frame_tail *)regs->ARM_fp - 1;
@@ -3078,56 +3108,18 @@
void *data)
{
struct perf_callchain_entry *entry = data;
- callchain_store(entry, fr->pc);
+ perf_callchain_store(entry, fr->pc);
return 0;
}
-static void
-perf_callchain_kernel(struct pt_regs *regs,
- struct perf_callchain_entry *entry)
+void
+perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
{
struct stackframe fr;
- callchain_store(entry, PERF_CONTEXT_KERNEL);
fr.fp = regs->ARM_fp;
fr.sp = regs->ARM_sp;
fr.lr = regs->ARM_lr;
fr.pc = regs->ARM_pc;
walk_stackframe(&fr, callchain_trace, entry);
}
-
-static void
-perf_do_callchain(struct pt_regs *regs,
- struct perf_callchain_entry *entry)
-{
- int is_user;
-
- if (!regs)
- return;
-
- is_user = user_mode(regs);
-
- if (!current || !current->pid)
- return;
-
- if (is_user && current->state != TASK_RUNNING)
- return;
-
- if (!is_user)
- perf_callchain_kernel(regs, entry);
-
- if (current->mm)
- perf_callchain_user(regs, entry);
-}
-
-static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry);
-
-struct perf_callchain_entry *
-perf_callchain(struct pt_regs *regs)
-{
- struct perf_callchain_entry *entry = &__get_cpu_var(pmc_irq_entry);
-
- entry->nr = 0;
- perf_do_callchain(regs, entry);
- return entry;
-}
diff --git a/arch/arm/mach-at91/at91sam9g45.c b/arch/arm/mach-at91/at91sam9g45.c
index 753c0d3..c67b47f 100644
--- a/arch/arm/mach-at91/at91sam9g45.c
+++ b/arch/arm/mach-at91/at91sam9g45.c
@@ -121,8 +121,8 @@
.pmc_mask = 1 << AT91SAM9G45_ID_SSC1,
.type = CLK_TYPE_PERIPHERAL,
};
-static struct clk tcb_clk = {
- .name = "tcb_clk",
+static struct clk tcb0_clk = {
+ .name = "tcb0_clk",
.pmc_mask = 1 << AT91SAM9G45_ID_TCB,
.type = CLK_TYPE_PERIPHERAL,
};
@@ -192,6 +192,14 @@
.parent = &uhphs_clk,
};
+/* One additional fake clock for second TC block */
+static struct clk tcb1_clk = {
+ .name = "tcb1_clk",
+ .pmc_mask = 0,
+ .type = CLK_TYPE_PERIPHERAL,
+ .parent = &tcb0_clk,
+};
+
static struct clk *periph_clocks[] __initdata = {
&pioA_clk,
&pioB_clk,
@@ -208,7 +216,7 @@
&spi1_clk,
&ssc0_clk,
&ssc1_clk,
- &tcb_clk,
+ &tcb0_clk,
&pwm_clk,
&tsc_clk,
&dma_clk,
@@ -221,6 +229,7 @@
&mmc1_clk,
// irq0
&ohci_clk,
+ &tcb1_clk,
};
/*
diff --git a/arch/arm/mach-at91/at91sam9g45_devices.c b/arch/arm/mach-at91/at91sam9g45_devices.c
index 809114d..1276bab 100644
--- a/arch/arm/mach-at91/at91sam9g45_devices.c
+++ b/arch/arm/mach-at91/at91sam9g45_devices.c
@@ -46,7 +46,7 @@
.end = AT91_BASE_SYS + AT91_DMA + SZ_512 - 1,
.flags = IORESOURCE_MEM,
},
- [2] = {
+ [1] = {
.start = AT91SAM9G45_ID_DMA,
.end = AT91SAM9G45_ID_DMA,
.flags = IORESOURCE_IRQ,
@@ -426,7 +426,7 @@
.sda_is_open_drain = 1,
.scl_pin = AT91_PIN_PA21,
.scl_is_open_drain = 1,
- .udelay = 2, /* ~100 kHz */
+ .udelay = 5, /* ~100 kHz */
};
static struct platform_device at91sam9g45_twi0_device = {
@@ -440,7 +440,7 @@
.sda_is_open_drain = 1,
.scl_pin = AT91_PIN_PB11,
.scl_is_open_drain = 1,
- .udelay = 2, /* ~100 kHz */
+ .udelay = 5, /* ~100 kHz */
};
static struct platform_device at91sam9g45_twi1_device = {
@@ -835,9 +835,9 @@
static void __init at91_add_device_tc(void)
{
/* this chip has one clock and irq for all six TC channels */
- at91_clock_associate("tcb_clk", &at91sam9g45_tcb0_device.dev, "t0_clk");
+ at91_clock_associate("tcb0_clk", &at91sam9g45_tcb0_device.dev, "t0_clk");
platform_device_register(&at91sam9g45_tcb0_device);
- at91_clock_associate("tcb_clk", &at91sam9g45_tcb1_device.dev, "t0_clk");
+ at91_clock_associate("tcb1_clk", &at91sam9g45_tcb1_device.dev, "t0_clk");
platform_device_register(&at91sam9g45_tcb1_device);
}
#else
diff --git a/arch/arm/mach-at91/board-sam9261ek.c b/arch/arm/mach-at91/board-sam9261ek.c
index c4c8865..65eb094 100644
--- a/arch/arm/mach-at91/board-sam9261ek.c
+++ b/arch/arm/mach-at91/board-sam9261ek.c
@@ -93,11 +93,12 @@
.start = AT91_PIN_PC11,
.end = AT91_PIN_PC11,
.flags = IORESOURCE_IRQ
+ | IORESOURCE_IRQ_LOWEDGE | IORESOURCE_IRQ_HIGHEDGE,
}
};
static struct dm9000_plat_data dm9000_platdata = {
- .flags = DM9000_PLATF_16BITONLY,
+ .flags = DM9000_PLATF_16BITONLY | DM9000_PLATF_NO_EEPROM,
};
static struct platform_device dm9000_device = {
@@ -168,17 +169,6 @@
/*
- * MCI (SD/MMC)
- */
-static struct at91_mmc_data __initdata ek_mmc_data = {
- .wire4 = 1,
-// .det_pin = ... not connected
-// .wp_pin = ... not connected
-// .vcc_pin = ... not connected
-};
-
-
-/*
* NAND flash
*/
static struct mtd_partition __initdata ek_nand_partition[] = {
@@ -246,6 +236,10 @@
at91_add_device_nand(&ek_nand_data);
}
+/*
+ * SPI related devices
+ */
+#if defined(CONFIG_SPI_ATMEL) || defined(CONFIG_SPI_ATMEL_MODULE)
/*
* ADS7846 Touchscreen
@@ -356,6 +350,19 @@
#endif
};
+#else /* CONFIG_SPI_ATMEL_* */
+/* spi0 and mmc/sd share the same PIO pins: cannot be used at the same time */
+
+/*
+ * MCI (SD/MMC)
+ * det_pin, wp_pin and vcc_pin are not connected
+ */
+static struct at91_mmc_data __initdata ek_mmc_data = {
+ .wire4 = 1,
+};
+
+#endif /* CONFIG_SPI_ATMEL_* */
+
/*
* LCD Controller
diff --git a/arch/arm/mach-at91/clock.c b/arch/arm/mach-at91/clock.c
index 7f7da43..7525cee 100644
--- a/arch/arm/mach-at91/clock.c
+++ b/arch/arm/mach-at91/clock.c
@@ -501,7 +501,8 @@
int __init clk_register(struct clk *clk)
{
if (clk_is_peripheral(clk)) {
- clk->parent = &mck;
+ if (!clk->parent)
+ clk->parent = &mck;
clk->mode = pmc_periph_mode;
list_add_tail(&clk->node, &clocks);
}
diff --git a/arch/arm/mach-at91/include/mach/system.h b/arch/arm/mach-at91/include/mach/system.h
index c80e090..ee8db15 100644
--- a/arch/arm/mach-at91/include/mach/system.h
+++ b/arch/arm/mach-at91/include/mach/system.h
@@ -28,17 +28,16 @@
static inline void arch_idle(void)
{
-#ifndef CONFIG_DEBUG_KERNEL
/*
* Disable the processor clock. The processor will be automatically
* re-enabled by an interrupt or by a reset.
*/
at91_sys_write(AT91_PMC_SCDR, AT91_PMC_PCK);
-#else
+#ifndef CONFIG_CPU_ARM920T
/*
* Set the processor (CP15) into 'Wait for Interrupt' mode.
- * Unlike disabling the processor clock via the PMC (above)
- * this allows the processor to be woken via JTAG.
+ * Post-RM9200 processors need this in conjunction with the above
+ * to save power when idle.
*/
cpu_do_idle();
#endif
diff --git a/arch/arm/mach-bcmring/dma.c b/arch/arm/mach-bcmring/dma.c
index 29c0a91..77eb35c 100644
--- a/arch/arm/mach-bcmring/dma.c
+++ b/arch/arm/mach-bcmring/dma.c
@@ -691,7 +691,7 @@
memset(&gDMA, 0, sizeof(gDMA));
- init_MUTEX_LOCKED(&gDMA.lock);
+ sema_init(&gDMA.lock, 0);
init_waitqueue_head(&gDMA.freeChannelQ);
/* Initialize the Hardware */
@@ -1574,7 +1574,7 @@
{
memset(memMap, 0, sizeof(*memMap));
- init_MUTEX(&memMap->lock);
+ sema_init(&memMap->lock, 1);
return 0;
}
diff --git a/arch/arm/mach-davinci/dm355.c b/arch/arm/mach-davinci/dm355.c
index 3d996b6..9be261b 100644
--- a/arch/arm/mach-davinci/dm355.c
+++ b/arch/arm/mach-davinci/dm355.c
@@ -769,8 +769,7 @@
.virtual = SRAM_VIRT,
.pfn = __phys_to_pfn(0x00010000),
.length = SZ_32K,
- /* MT_MEMORY_NONCACHED requires supersection alignment */
- .type = MT_DEVICE,
+ .type = MT_MEMORY_NONCACHED,
},
};
diff --git a/arch/arm/mach-davinci/dm365.c b/arch/arm/mach-davinci/dm365.c
index 6b6f4c6..7781e35 100644
--- a/arch/arm/mach-davinci/dm365.c
+++ b/arch/arm/mach-davinci/dm365.c
@@ -969,8 +969,7 @@
.virtual = SRAM_VIRT,
.pfn = __phys_to_pfn(0x00010000),
.length = SZ_32K,
- /* MT_MEMORY_NONCACHED requires supersection alignment */
- .type = MT_DEVICE,
+ .type = MT_MEMORY_NONCACHED,
},
};
diff --git a/arch/arm/mach-davinci/dm644x.c b/arch/arm/mach-davinci/dm644x.c
index 40fec31..5e5b0a7 100644
--- a/arch/arm/mach-davinci/dm644x.c
+++ b/arch/arm/mach-davinci/dm644x.c
@@ -653,8 +653,7 @@
.virtual = SRAM_VIRT,
.pfn = __phys_to_pfn(0x00008000),
.length = SZ_16K,
- /* MT_MEMORY_NONCACHED requires supersection alignment */
- .type = MT_DEVICE,
+ .type = MT_MEMORY_NONCACHED,
},
};
diff --git a/arch/arm/mach-davinci/dm646x.c b/arch/arm/mach-davinci/dm646x.c
index e4a3df1..26e8a9c 100644
--- a/arch/arm/mach-davinci/dm646x.c
+++ b/arch/arm/mach-davinci/dm646x.c
@@ -737,8 +737,7 @@
.virtual = SRAM_VIRT,
.pfn = __phys_to_pfn(0x00010000),
.length = SZ_32K,
- /* MT_MEMORY_NONCACHED requires supersection alignment */
- .type = MT_DEVICE,
+ .type = MT_MEMORY_NONCACHED,
},
};
diff --git a/arch/arm/mach-dove/include/mach/io.h b/arch/arm/mach-dove/include/mach/io.h
index 3b3e472..eb4936f 100644
--- a/arch/arm/mach-dove/include/mach/io.h
+++ b/arch/arm/mach-dove/include/mach/io.h
@@ -13,8 +13,8 @@
#define IO_SPACE_LIMIT 0xffffffff
-#define __io(a) ((void __iomem *)(((a) - DOVE_PCIE0_IO_PHYS_BASE) +\
- DOVE_PCIE0_IO_VIRT_BASE))
-#define __mem_pci(a) (a)
+#define __io(a) ((void __iomem *)(((a) - DOVE_PCIE0_IO_BUS_BASE) + \
+ DOVE_PCIE0_IO_VIRT_BASE))
+#define __mem_pci(a) (a)
#endif
diff --git a/arch/arm/mach-ep93xx/clock.c b/arch/arm/mach-ep93xx/clock.c
index 8bf3cec..4566bd1 100644
--- a/arch/arm/mach-ep93xx/clock.c
+++ b/arch/arm/mach-ep93xx/clock.c
@@ -560,4 +560,4 @@
clkdev_add_table(clocks, ARRAY_SIZE(clocks));
return 0;
}
-arch_initcall(ep93xx_clock_init);
+postcore_initcall(ep93xx_clock_init);
diff --git a/arch/arm/mach-ep93xx/dma-m2p.c b/arch/arm/mach-ep93xx/dma-m2p.c
index 8904ca4..a696d35 100644
--- a/arch/arm/mach-ep93xx/dma-m2p.c
+++ b/arch/arm/mach-ep93xx/dma-m2p.c
@@ -276,7 +276,7 @@
v &= ~(M2P_CONTROL_STALL_IRQ_EN | M2P_CONTROL_NFB_IRQ_EN);
m2p_set_control(ch, v);
- while (m2p_channel_state(ch) == STATE_ON)
+ while (m2p_channel_state(ch) >= STATE_ON)
cpu_relax();
m2p_set_control(ch, 0x0);
diff --git a/arch/arm/mach-imx/Kconfig b/arch/arm/mach-imx/Kconfig
index c5c0369..2f7e272 100644
--- a/arch/arm/mach-imx/Kconfig
+++ b/arch/arm/mach-imx/Kconfig
@@ -122,6 +122,7 @@
select IMX_HAVE_PLATFORM_IMX_I2C
select IMX_HAVE_PLATFORM_IMX_UART
select IMX_HAVE_PLATFORM_MXC_NAND
+ select MXC_ULPI if USB_ULPI
help
Include support for Eukrea CPUIMX27 platform. This includes
specific configurations for the module and its peripherals.
diff --git a/arch/arm/mach-imx/mach-cpuimx27.c b/arch/arm/mach-imx/mach-cpuimx27.c
index 575ff1a..6830afd 100644
--- a/arch/arm/mach-imx/mach-cpuimx27.c
+++ b/arch/arm/mach-imx/mach-cpuimx27.c
@@ -259,7 +259,7 @@
i2c_register_board_info(0, eukrea_cpuimx27_i2c_devices,
ARRAY_SIZE(eukrea_cpuimx27_i2c_devices));
- imx27_add_i2c_imx1(&cpuimx27_i2c1_data);
+ imx27_add_i2c_imx0(&cpuimx27_i2c1_data);
platform_add_devices(platform_devices, ARRAY_SIZE(platform_devices));
@@ -279,13 +279,13 @@
#if defined(CONFIG_USB_ULPI)
if (otg_mode_host) {
otg_pdata.otg = otg_ulpi_create(&mxc_ulpi_access_ops,
- USB_OTG_DRV_VBUS | USB_OTG_DRV_VBUS_EXT);
+ ULPI_OTG_DRVVBUS | ULPI_OTG_DRVVBUS_EXT);
mxc_register_device(&mxc_otg_host, &otg_pdata);
}
usbh2_pdata.otg = otg_ulpi_create(&mxc_ulpi_access_ops,
- USB_OTG_DRV_VBUS | USB_OTG_DRV_VBUS_EXT);
+ ULPI_OTG_DRVVBUS | ULPI_OTG_DRVVBUS_EXT);
mxc_register_device(&mxc_usbh2, &usbh2_pdata);
#endif
diff --git a/arch/arm/mach-imx/mach-pca100.c b/arch/arm/mach-imx/mach-pca100.c
index a389d11..23c9e1f 100644
--- a/arch/arm/mach-imx/mach-pca100.c
+++ b/arch/arm/mach-imx/mach-pca100.c
@@ -419,13 +419,13 @@
#if defined(CONFIG_USB_ULPI)
if (otg_mode_host) {
otg_pdata.otg = otg_ulpi_create(&mxc_ulpi_access_ops,
- USB_OTG_DRV_VBUS | USB_OTG_DRV_VBUS_EXT);
+ ULPI_OTG_DRVVBUS | ULPI_OTG_DRVVBUS_EXT);
mxc_register_device(&mxc_otg_host, &otg_pdata);
}
usbh2_pdata.otg = otg_ulpi_create(&mxc_ulpi_access_ops,
- USB_OTG_DRV_VBUS | USB_OTG_DRV_VBUS_EXT);
+ ULPI_OTG_DRVVBUS | ULPI_OTG_DRVVBUS_EXT);
mxc_register_device(&mxc_usbh2, &usbh2_pdata);
#endif
diff --git a/arch/arm/mach-ixp4xx/common-pci.c b/arch/arm/mach-ixp4xx/common-pci.c
index 61cd4d6..24498a9 100644
--- a/arch/arm/mach-ixp4xx/common-pci.c
+++ b/arch/arm/mach-ixp4xx/common-pci.c
@@ -503,6 +503,14 @@
return pci_scan_bus(sys->busnr, &ixp4xx_ops, sys);
}
+int dma_set_coherent_mask(struct device *dev, u64 mask)
+{
+ if (mask >= SZ_64M - 1)
+ return 0;
+
+ return -EIO;
+}
+
EXPORT_SYMBOL(ixp4xx_pci_read);
EXPORT_SYMBOL(ixp4xx_pci_write);
diff --git a/arch/arm/mach-ixp4xx/include/mach/hardware.h b/arch/arm/mach-ixp4xx/include/mach/hardware.h
index f91ca6d..8138371 100644
--- a/arch/arm/mach-ixp4xx/include/mach/hardware.h
+++ b/arch/arm/mach-ixp4xx/include/mach/hardware.h
@@ -26,6 +26,8 @@
#define PCIBIOS_MAX_MEM 0x4BFFFFFF
#endif
+#define ARCH_HAS_DMA_SET_COHERENT_MASK
+
#define pcibios_assign_all_busses() 1
/* Register locations and bits */
diff --git a/arch/arm/mach-kirkwood/include/mach/kirkwood.h b/arch/arm/mach-kirkwood/include/mach/kirkwood.h
index 93fc2ec..6e924b3 100644
--- a/arch/arm/mach-kirkwood/include/mach/kirkwood.h
+++ b/arch/arm/mach-kirkwood/include/mach/kirkwood.h
@@ -38,7 +38,7 @@
#define KIRKWOOD_PCIE1_IO_PHYS_BASE 0xf3000000
#define KIRKWOOD_PCIE1_IO_VIRT_BASE 0xfef00000
-#define KIRKWOOD_PCIE1_IO_BUS_BASE 0x00000000
+#define KIRKWOOD_PCIE1_IO_BUS_BASE 0x00100000
#define KIRKWOOD_PCIE1_IO_SIZE SZ_1M
#define KIRKWOOD_PCIE_IO_PHYS_BASE 0xf2000000
diff --git a/arch/arm/mach-kirkwood/pcie.c b/arch/arm/mach-kirkwood/pcie.c
index 55e7f00..513ad31 100644
--- a/arch/arm/mach-kirkwood/pcie.c
+++ b/arch/arm/mach-kirkwood/pcie.c
@@ -117,7 +117,7 @@
* IORESOURCE_IO
*/
pp->res[0].name = "PCIe 0 I/O Space";
- pp->res[0].start = KIRKWOOD_PCIE_IO_PHYS_BASE;
+ pp->res[0].start = KIRKWOOD_PCIE_IO_BUS_BASE;
pp->res[0].end = pp->res[0].start + KIRKWOOD_PCIE_IO_SIZE - 1;
pp->res[0].flags = IORESOURCE_IO;
@@ -139,7 +139,7 @@
* IORESOURCE_IO
*/
pp->res[0].name = "PCIe 1 I/O Space";
- pp->res[0].start = KIRKWOOD_PCIE1_IO_PHYS_BASE;
+ pp->res[0].start = KIRKWOOD_PCIE1_IO_BUS_BASE;
pp->res[0].end = pp->res[0].start + KIRKWOOD_PCIE1_IO_SIZE - 1;
pp->res[0].flags = IORESOURCE_IO;
diff --git a/arch/arm/mach-mmp/include/mach/system.h b/arch/arm/mach-mmp/include/mach/system.h
index 4f5b0e0..1a8a25e 100644
--- a/arch/arm/mach-mmp/include/mach/system.h
+++ b/arch/arm/mach-mmp/include/mach/system.h
@@ -9,6 +9,8 @@
#ifndef __ASM_MACH_SYSTEM_H
#define __ASM_MACH_SYSTEM_H
+#include <mach/cputype.h>
+
static inline void arch_idle(void)
{
cpu_do_idle();
@@ -16,6 +18,9 @@
static inline void arch_reset(char mode, const char *cmd)
{
- cpu_reset(0);
+ if (cpu_is_pxa168())
+ cpu_reset(0xffff0000);
+ else
+ cpu_reset(0);
}
#endif /* __ASM_MACH_SYSTEM_H */
diff --git a/arch/arm/mach-mx25/eukrea_mbimxsd-baseboard.c b/arch/arm/mach-mx25/eukrea_mbimxsd-baseboard.c
index 91931dc..4aaadc7 100644
--- a/arch/arm/mach-mx25/eukrea_mbimxsd-baseboard.c
+++ b/arch/arm/mach-mx25/eukrea_mbimxsd-baseboard.c
@@ -215,7 +215,7 @@
* Add platform devices present on this baseboard and init
* them from CPU side as far as required to use them later on
*/
-void __init eukrea_mbimxsd_baseboard_init(void)
+void __init eukrea_mbimxsd25_baseboard_init(void)
{
if (mxc_iomux_v3_setup_multiple_pads(eukrea_mbimxsd_pads,
ARRAY_SIZE(eukrea_mbimxsd_pads)))
diff --git a/arch/arm/mach-mx25/mach-cpuimx25.c b/arch/arm/mach-mx25/mach-cpuimx25.c
index 56b2e26..e064bb3 100644
--- a/arch/arm/mach-mx25/mach-cpuimx25.c
+++ b/arch/arm/mach-mx25/mach-cpuimx25.c
@@ -138,7 +138,7 @@
#if defined(CONFIG_USB_ULPI)
if (otg_mode_host) {
otg_pdata.otg = otg_ulpi_create(&mxc_ulpi_access_ops,
- USB_OTG_DRV_VBUS | USB_OTG_DRV_VBUS_EXT);
+ ULPI_OTG_DRVVBUS | ULPI_OTG_DRVVBUS_EXT);
mxc_register_device(&mxc_otg, &otg_pdata);
}
@@ -147,8 +147,8 @@
if (!otg_mode_host)
mxc_register_device(&otg_udc_device, &otg_device_pdata);
-#ifdef CONFIG_MACH_EUKREA_MBIMXSD_BASEBOARD
- eukrea_mbimxsd_baseboard_init();
+#ifdef CONFIG_MACH_EUKREA_MBIMXSD25_BASEBOARD
+ eukrea_mbimxsd25_baseboard_init();
#endif
}
diff --git a/arch/arm/mach-mx3/clock-imx35.c b/arch/arm/mach-mx3/clock-imx35.c
index d3af0fd..7a62e74 100644
--- a/arch/arm/mach-mx3/clock-imx35.c
+++ b/arch/arm/mach-mx3/clock-imx35.c
@@ -155,7 +155,7 @@
aad = &clk_consumer[(pdr0 >> 16) & 0xf];
if (aad->sel)
- fref = fref * 2 / 3;
+ fref = fref * 3 / 4;
return fref / aad->arm;
}
@@ -164,7 +164,7 @@
{
unsigned long pdr0 = __raw_readl(CCM_BASE + CCM_PDR0);
struct arm_ahb_div *aad;
- unsigned long fref = get_rate_mpll();
+ unsigned long fref = get_rate_arm();
aad = &clk_consumer[(pdr0 >> 16) & 0xf];
@@ -176,16 +176,11 @@
return get_rate_ahb(NULL) >> 1;
}
-static unsigned long get_3_3_div(unsigned long in)
-{
- return (((in >> 3) & 0x7) + 1) * ((in & 0x7) + 1);
-}
-
static unsigned long get_rate_uart(struct clk *clk)
{
unsigned long pdr3 = __raw_readl(CCM_BASE + CCM_PDR3);
unsigned long pdr4 = __raw_readl(CCM_BASE + CCM_PDR4);
- unsigned long div = get_3_3_div(pdr4 >> 10);
+ unsigned long div = ((pdr4 >> 10) & 0x3f) + 1;
if (pdr3 & (1 << 14))
return get_rate_arm() / div;
@@ -216,7 +211,7 @@
break;
}
- return rate / get_3_3_div(div);
+ return rate / (div + 1);
}
static unsigned long get_rate_mshc(struct clk *clk)
@@ -270,7 +265,7 @@
else
rate = get_rate_ppll();
- return rate / get_3_3_div((pdr2 >> 16) & 0x3f);
+ return rate / (((pdr2 >> 16) & 0x3f) + 1);
}
static unsigned long get_rate_otg(struct clk *clk)
@@ -283,25 +278,51 @@
else
rate = get_rate_ppll();
- return rate / get_3_3_div((pdr4 >> 22) & 0x3f);
+ return rate / (((pdr4 >> 22) & 0x3f) + 1);
}
static unsigned long get_rate_ipg_per(struct clk *clk)
{
unsigned long pdr0 = __raw_readl(CCM_BASE + CCM_PDR0);
unsigned long pdr4 = __raw_readl(CCM_BASE + CCM_PDR4);
- unsigned long div1, div2;
+ unsigned long div;
if (pdr0 & (1 << 26)) {
- div1 = (pdr4 >> 19) & 0x7;
- div2 = (pdr4 >> 16) & 0x7;
- return get_rate_arm() / ((div1 + 1) * (div2 + 1));
+ div = (pdr4 >> 16) & 0x3f;
+ return get_rate_arm() / (div + 1);
} else {
- div1 = (pdr0 >> 12) & 0x7;
- return get_rate_ahb(NULL) / div1;
+ div = (pdr0 >> 12) & 0x7;
+ return get_rate_ahb(NULL) / (div + 1);
}
}
+static unsigned long get_rate_hsp(struct clk *clk)
+{
+ unsigned long hsp_podf = (__raw_readl(CCM_BASE + CCM_PDR0) >> 20) & 0x03;
+ unsigned long fref = get_rate_mpll();
+
+ if (fref > 400 * 1000 * 1000) {
+ switch (hsp_podf) {
+ case 0:
+ return fref >> 2;
+ case 1:
+ return fref >> 3;
+ case 2:
+ return fref / 3;
+ }
+ } else {
+ switch (hsp_podf) {
+ case 0:
+ case 2:
+ return fref / 3;
+ case 1:
+ return fref / 6;
+ }
+ }
+
+ return 0;
+}
+
static int clk_cgr_enable(struct clk *clk)
{
u32 reg;
@@ -359,7 +380,7 @@
DEFINE_CLOCK(i2c2_clk, 1, CCM_CGR1, 12, get_rate_ipg_per, NULL);
DEFINE_CLOCK(i2c3_clk, 2, CCM_CGR1, 14, get_rate_ipg_per, NULL);
DEFINE_CLOCK(iomuxc_clk, 0, CCM_CGR1, 16, NULL, NULL);
-DEFINE_CLOCK(ipu_clk, 0, CCM_CGR1, 18, get_rate_ahb, NULL);
+DEFINE_CLOCK(ipu_clk, 0, CCM_CGR1, 18, get_rate_hsp, NULL);
DEFINE_CLOCK(kpp_clk, 0, CCM_CGR1, 20, get_rate_ipg, NULL);
DEFINE_CLOCK(mlb_clk, 0, CCM_CGR1, 22, get_rate_ahb, NULL);
DEFINE_CLOCK(mshc_clk, 0, CCM_CGR1, 24, get_rate_mshc, NULL);
@@ -485,10 +506,10 @@
int __init mx35_clocks_init()
{
- unsigned int ll = 0;
+ unsigned int cgr2 = 3 << 26, cgr3 = 0;
#if defined(CONFIG_DEBUG_LL) && !defined(CONFIG_DEBUG_ICEDCC)
- ll = (3 << 16);
+ cgr2 |= 3 << 16;
#endif
clkdev_add_table(lookups, ARRAY_SIZE(lookups));
@@ -499,8 +520,20 @@
__raw_writel((3 << 18), CCM_BASE + CCM_CGR0);
__raw_writel((3 << 2) | (3 << 4) | (3 << 6) | (3 << 8) | (3 << 16),
CCM_BASE + CCM_CGR1);
- __raw_writel((3 << 26) | ll, CCM_BASE + CCM_CGR2);
- __raw_writel(0, CCM_BASE + CCM_CGR3);
+
+ /*
+ * Check if we came up in internal boot mode. If yes, we need some
+ * extra clocks turned on, otherwise the MX35 boot ROM code will
+ * hang after a watchdog reset.
+ */
+ if (!(__raw_readl(CCM_BASE + CCM_RCSR) & (3 << 10))) {
+ /* Additionally turn on UART1, SCC, and IIM clocks */
+ cgr2 |= 3 << 16 | 3 << 4;
+ cgr3 |= 3 << 2;
+ }
+
+ __raw_writel(cgr2, CCM_BASE + CCM_CGR2);
+ __raw_writel(cgr3, CCM_BASE + CCM_CGR3);
mxc_timer_init(&gpt_clk,
MX35_IO_ADDRESS(MX35_GPT1_BASE_ADDR), MX35_INT_GPT);
diff --git a/arch/arm/mach-mx3/eukrea_mbimxsd-baseboard.c b/arch/arm/mach-mx3/eukrea_mbimxsd-baseboard.c
index 1dc5004..f8f15e3 100644
--- a/arch/arm/mach-mx3/eukrea_mbimxsd-baseboard.c
+++ b/arch/arm/mach-mx3/eukrea_mbimxsd-baseboard.c
@@ -216,7 +216,7 @@
* Add platform devices present on this baseboard and init
* them from CPU side as far as required to use them later on
*/
-void __init eukrea_mbimxsd_baseboard_init(void)
+void __init eukrea_mbimxsd35_baseboard_init(void)
{
if (mxc_iomux_v3_setup_multiple_pads(eukrea_mbimxsd_pads,
ARRAY_SIZE(eukrea_mbimxsd_pads)))
diff --git a/arch/arm/mach-mx3/mach-cpuimx35.c b/arch/arm/mach-mx3/mach-cpuimx35.c
index 63f970f..2a4f8b7 100644
--- a/arch/arm/mach-mx3/mach-cpuimx35.c
+++ b/arch/arm/mach-mx3/mach-cpuimx35.c
@@ -192,7 +192,7 @@
#if defined(CONFIG_USB_ULPI)
if (otg_mode_host) {
otg_pdata.otg = otg_ulpi_create(&mxc_ulpi_access_ops,
- USB_OTG_DRV_VBUS | USB_OTG_DRV_VBUS_EXT);
+ ULPI_OTG_DRVVBUS | ULPI_OTG_DRVVBUS_EXT);
mxc_register_device(&mxc_otg_host, &otg_pdata);
}
@@ -201,8 +201,8 @@
if (!otg_mode_host)
mxc_register_device(&mxc_otg_udc_device, &otg_device_pdata);
-#ifdef CONFIG_MACH_EUKREA_MBIMXSD_BASEBOARD
- eukrea_mbimxsd_baseboard_init();
+#ifdef CONFIG_MACH_EUKREA_MBIMXSD35_BASEBOARD
+ eukrea_mbimxsd35_baseboard_init();
#endif
}
diff --git a/arch/arm/mach-mx5/clock-mx51.c b/arch/arm/mach-mx5/clock-mx51.c
index 6af69de..57c10a9 100644
--- a/arch/arm/mach-mx5/clock-mx51.c
+++ b/arch/arm/mach-mx5/clock-mx51.c
@@ -56,7 +56,7 @@
{
u32 reg;
reg = __raw_readl(clk->enable_reg);
- reg &= ~(MXC_CCM_CCGRx_MOD_OFF << clk->enable_shift);
+ reg &= ~(MXC_CCM_CCGRx_CG_MASK << clk->enable_shift);
__raw_writel(reg, clk->enable_reg);
}
diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile
index 63b2d88..88d3a1e 100644
--- a/arch/arm/mach-omap2/Makefile
+++ b/arch/arm/mach-omap2/Makefile
@@ -25,6 +25,7 @@
obj-$(CONFIG_HOTPLUG_CPU) += omap-hotplug.o
obj-$(CONFIG_ARCH_OMAP4) += omap44xx-smc.o omap4-common.o
+AFLAGS_omap-headsmp.o :=-Wa,-march=armv7-a
AFLAGS_omap44xx-smc.o :=-Wa,-march=armv7-a
# Functions loaded to SRAM
diff --git a/arch/arm/mach-omap2/clock3xxx_data.c b/arch/arm/mach-omap2/clock3xxx_data.c
index 138646d..dfdce2d 100644
--- a/arch/arm/mach-omap2/clock3xxx_data.c
+++ b/arch/arm/mach-omap2/clock3xxx_data.c
@@ -3417,7 +3417,13 @@
struct omap_clk *c;
u32 cpu_clkflg = CK_3XXX;
- if (cpu_is_omap34xx()) {
+ if (cpu_is_omap3517()) {
+ cpu_mask = RATE_IN_3XXX | RATE_IN_3430ES2PLUS;
+ cpu_clkflg |= CK_3517;
+ } else if (cpu_is_omap3505()) {
+ cpu_mask = RATE_IN_3XXX | RATE_IN_3430ES2PLUS;
+ cpu_clkflg |= CK_3505;
+ } else if (cpu_is_omap34xx()) {
cpu_mask = RATE_IN_3XXX;
cpu_clkflg |= CK_343X;
@@ -3432,12 +3438,6 @@
cpu_mask |= RATE_IN_3430ES2PLUS;
cpu_clkflg |= CK_3430ES2;
}
- } else if (cpu_is_omap3517()) {
- cpu_mask = RATE_IN_3XXX | RATE_IN_3430ES2PLUS;
- cpu_clkflg |= CK_3517;
- } else if (cpu_is_omap3505()) {
- cpu_mask = RATE_IN_3XXX | RATE_IN_3430ES2PLUS;
- cpu_clkflg |= CK_3505;
}
if (omap3_has_192mhz_clk())
diff --git a/arch/arm/mach-omap2/id.c b/arch/arm/mach-omap2/id.c
index e8256a2..9a879f9 100644
--- a/arch/arm/mach-omap2/id.c
+++ b/arch/arm/mach-omap2/id.c
@@ -284,8 +284,8 @@
default:
omap_revision = OMAP3630_REV_ES1_2;
omap_chip.oc |= CHIP_IS_OMAP3630ES1_2;
- break;
}
+ break;
default:
/* Unknown default to latest silicon rev as default*/
omap_revision = OMAP3630_REV_ES1_2;
diff --git a/arch/arm/mach-omap2/include/mach/entry-macro.S b/arch/arm/mach-omap2/include/mach/entry-macro.S
index 50fd749..06e64e1 100644
--- a/arch/arm/mach-omap2/include/mach/entry-macro.S
+++ b/arch/arm/mach-omap2/include/mach/entry-macro.S
@@ -177,7 +177,10 @@
cmpne \irqnr, \tmp
cmpcs \irqnr, \irqnr
.endm
+#endif
+#endif /* MULTI_OMAP2 */
+#ifdef CONFIG_SMP
/* We assume that irqstat (the raw value of the IRQ acknowledge
* register) is preserved from the macro above.
* If there is an IPI, we immediately signal end of interrupt
@@ -205,8 +208,7 @@
streq \irqstat, [\base, #GIC_CPU_EOI]
cmp \tmp, #0
.endm
-#endif
-#endif /* MULTI_OMAP2 */
+#endif /* CONFIG_SMP */
.macro irq_prio_table
.endm
diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c
index af3c20c..9e9f70e 100644
--- a/arch/arm/mach-omap2/omap-smp.c
+++ b/arch/arm/mach-omap2/omap-smp.c
@@ -102,8 +102,7 @@
* Send a 'sev' to wake the secondary core from WFE.
* Drain the outstanding writes to memory
*/
- dsb();
- set_event();
+ dsb_sev();
mb();
}
diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c
index fb4994a..7b03426 100644
--- a/arch/arm/mach-omap2/pm34xx.c
+++ b/arch/arm/mach-omap2/pm34xx.c
@@ -480,7 +480,9 @@
}
/* Disable IO-PAD and IO-CHAIN wakeup */
- if (omap3_has_io_wakeup() && core_next_state < PWRDM_POWER_ON) {
+ if (omap3_has_io_wakeup() &&
+ (per_next_state < PWRDM_POWER_ON ||
+ core_next_state < PWRDM_POWER_ON)) {
prm_clear_mod_reg_bits(OMAP3430_EN_IO_MASK, WKUP_MOD, PM_WKEN);
omap3_disable_io_chain();
}
diff --git a/arch/arm/mach-pxa/cpufreq-pxa2xx.c b/arch/arm/mach-pxa/cpufreq-pxa2xx.c
index 268a9bc..58093d9 100644
--- a/arch/arm/mach-pxa/cpufreq-pxa2xx.c
+++ b/arch/arm/mach-pxa/cpufreq-pxa2xx.c
@@ -312,8 +312,7 @@
freqs.cpu = policy->cpu;
if (freq_debug)
- pr_debug(KERN_INFO "Changing CPU frequency to %d Mhz, "
- "(SDRAM %d Mhz)\n",
+ pr_debug("Changing CPU frequency to %d Mhz, (SDRAM %d Mhz)\n",
freqs.new / 1000, (pxa_freq_settings[idx].div2) ?
(new_freq_mem / 2000) : (new_freq_mem / 1000));
@@ -398,7 +397,7 @@
return 0;
}
-static __init int pxa_cpufreq_init(struct cpufreq_policy *policy)
+static int pxa_cpufreq_init(struct cpufreq_policy *policy)
{
int i;
unsigned int freq;
diff --git a/arch/arm/mach-pxa/cpufreq-pxa3xx.c b/arch/arm/mach-pxa/cpufreq-pxa3xx.c
index 27fa329..0a0d0fe 100644
--- a/arch/arm/mach-pxa/cpufreq-pxa3xx.c
+++ b/arch/arm/mach-pxa/cpufreq-pxa3xx.c
@@ -204,7 +204,7 @@
return 0;
}
-static __init int pxa3xx_cpufreq_init(struct cpufreq_policy *policy)
+static int pxa3xx_cpufreq_init(struct cpufreq_policy *policy)
{
int ret = -EINVAL;
diff --git a/arch/arm/mach-pxa/include/mach/hardware.h b/arch/arm/mach-pxa/include/mach/hardware.h
index 7f64d24..814f145 100644
--- a/arch/arm/mach-pxa/include/mach/hardware.h
+++ b/arch/arm/mach-pxa/include/mach/hardware.h
@@ -264,23 +264,35 @@
* <= 0x2 for pxa21x/pxa25x/pxa26x/pxa27x
* == 0x3 for pxa300/pxa310/pxa320
*/
+#if defined(CONFIG_PXA25x) || defined(CONFIG_PXA27x)
#define __cpu_is_pxa2xx(id) \
({ \
unsigned int _id = (id) >> 13 & 0x7; \
_id <= 0x2; \
})
+#else
+#define __cpu_is_pxa2xx(id) (0)
+#endif
+#ifdef CONFIG_PXA3xx
#define __cpu_is_pxa3xx(id) \
({ \
unsigned int _id = (id) >> 13 & 0x7; \
_id == 0x3; \
})
+#else
+#define __cpu_is_pxa3xx(id) (0)
+#endif
+#if defined(CONFIG_CPU_PXA930) || defined(CONFIG_CPU_PXA935)
#define __cpu_is_pxa93x(id) \
({ \
unsigned int _id = (id) >> 4 & 0xfff; \
_id == 0x683 || _id == 0x693; \
})
+#else
+#define __cpu_is_pxa93x(id) (0)
+#endif
#define cpu_is_pxa2xx() \
({ \
@@ -309,7 +321,7 @@
#define PCIBIOS_MIN_IO 0
#define PCIBIOS_MIN_MEM 0
#define pcibios_assign_all_busses() 1
+#define ARCH_HAS_DMA_SET_COHERENT_MASK
#endif
-
#endif /* _ASM_ARCH_HARDWARE_H */
diff --git a/arch/arm/mach-pxa/include/mach/io.h b/arch/arm/mach-pxa/include/mach/io.h
index 262691f..fdca3be 100644
--- a/arch/arm/mach-pxa/include/mach/io.h
+++ b/arch/arm/mach-pxa/include/mach/io.h
@@ -6,6 +6,8 @@
#ifndef __ASM_ARM_ARCH_IO_H
#define __ASM_ARM_ARCH_IO_H
+#include <mach/hardware.h>
+
#define IO_SPACE_LIMIT 0xffffffff
/*
diff --git a/arch/arm/mach-pxa/include/mach/mfp-pxa300.h b/arch/arm/mach-pxa/include/mach/mfp-pxa300.h
index 7139e0d..4e12870 100644
--- a/arch/arm/mach-pxa/include/mach/mfp-pxa300.h
+++ b/arch/arm/mach-pxa/include/mach/mfp-pxa300.h
@@ -71,10 +71,10 @@
#define GPIO46_CI_DD_7 MFP_CFG_DRV(GPIO46, AF0, DS04X)
#define GPIO47_CI_DD_8 MFP_CFG_DRV(GPIO47, AF1, DS04X)
#define GPIO48_CI_DD_9 MFP_CFG_DRV(GPIO48, AF1, DS04X)
-#define GPIO52_CI_HSYNC MFP_CFG_DRV(GPIO52, AF0, DS04X)
-#define GPIO51_CI_VSYNC MFP_CFG_DRV(GPIO51, AF0, DS04X)
#define GPIO49_CI_MCLK MFP_CFG_DRV(GPIO49, AF0, DS04X)
#define GPIO50_CI_PCLK MFP_CFG_DRV(GPIO50, AF0, DS04X)
+#define GPIO51_CI_HSYNC MFP_CFG_DRV(GPIO51, AF0, DS04X)
+#define GPIO52_CI_VSYNC MFP_CFG_DRV(GPIO52, AF0, DS04X)
/* KEYPAD */
#define GPIO3_KP_DKIN_6 MFP_CFG_LPM(GPIO3, AF2, FLOAT)
diff --git a/arch/arm/mach-pxa/palm27x.c b/arch/arm/mach-pxa/palm27x.c
index 77ad6d3..405b92a 100644
--- a/arch/arm/mach-pxa/palm27x.c
+++ b/arch/arm/mach-pxa/palm27x.c
@@ -469,9 +469,13 @@
},
};
+static struct i2c_pxa_platform_data palm27x_i2c_power_info = {
+ .use_pio = 1,
+};
+
void __init palm27x_pmic_init(void)
{
i2c_register_board_info(1, ARRAY_AND_SIZE(palm27x_pi2c_board_info));
- pxa27x_set_i2c_power_info(NULL);
+ pxa27x_set_i2c_power_info(&palm27x_i2c_power_info);
}
#endif
diff --git a/arch/arm/mach-pxa/vpac270.c b/arch/arm/mach-pxa/vpac270.c
index c9b747c..37d6173 100644
--- a/arch/arm/mach-pxa/vpac270.c
+++ b/arch/arm/mach-pxa/vpac270.c
@@ -240,6 +240,7 @@
#if defined(CONFIG_MMC_PXA) || defined(CONFIG_MMC_PXA_MODULE)
static struct pxamci_platform_data vpac270_mci_platform_data = {
.ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34,
+ .gpio_power = -1,
.gpio_card_detect = GPIO53_VPAC270_SD_DETECT_N,
.gpio_card_ro = GPIO52_VPAC270_SD_READONLY,
.detect_delay_ms = 200,
diff --git a/arch/arm/mach-s3c2410/include/mach/vmalloc.h b/arch/arm/mach-s3c2410/include/mach/vmalloc.h
index 315b007..54297eb 100644
--- a/arch/arm/mach-s3c2410/include/mach/vmalloc.h
+++ b/arch/arm/mach-s3c2410/include/mach/vmalloc.h
@@ -15,6 +15,6 @@
#ifndef __ASM_ARCH_VMALLOC_H
#define __ASM_ARCH_VMALLOC_H
-#define VMALLOC_END (0xE0000000)
+#define VMALLOC_END 0xE0000000UL
#endif /* __ASM_ARCH_VMALLOC_H */
diff --git a/arch/arm/mach-s3c64xx/dev-spi.c b/arch/arm/mach-s3c64xx/dev-spi.c
index a492b98..405e621 100644
--- a/arch/arm/mach-s3c64xx/dev-spi.c
+++ b/arch/arm/mach-s3c64xx/dev-spi.c
@@ -18,10 +18,11 @@
#include <mach/map.h>
#include <mach/gpio-bank-c.h>
#include <mach/spi-clocks.h>
+#include <mach/irqs.h>
#include <plat/s3c64xx-spi.h>
#include <plat/gpio-cfg.h>
-#include <plat/irqs.h>
+#include <plat/devs.h>
static char *spi_src_clks[] = {
[S3C64XX_SPI_SRCCLK_PCLK] = "pclk",
diff --git a/arch/arm/mach-s3c64xx/include/mach/vmalloc.h b/arch/arm/mach-s3c64xx/include/mach/vmalloc.h
index 7411ef3..bc0e913 100644
--- a/arch/arm/mach-s3c64xx/include/mach/vmalloc.h
+++ b/arch/arm/mach-s3c64xx/include/mach/vmalloc.h
@@ -15,6 +15,6 @@
#ifndef __ASM_ARCH_VMALLOC_H
#define __ASM_ARCH_VMALLOC_H
-#define VMALLOC_END (0xE0000000)
+#define VMALLOC_END 0xE0000000UL
#endif /* __ASM_ARCH_VMALLOC_H */
diff --git a/arch/arm/mach-s3c64xx/mach-real6410.c b/arch/arm/mach-s3c64xx/mach-real6410.c
index 5c07d01..e130379 100644
--- a/arch/arm/mach-s3c64xx/mach-real6410.c
+++ b/arch/arm/mach-s3c64xx/mach-real6410.c
@@ -30,73 +30,73 @@
#include <plat/devs.h>
#include <plat/regs-serial.h>
-#define UCON S3C2410_UCON_DEFAULT | S3C2410_UCON_UCLK
-#define ULCON S3C2410_LCON_CS8 | S3C2410_LCON_PNONE | S3C2410_LCON_STOPB
-#define UFCON S3C2410_UFCON_RXTRIG8 | S3C2410_UFCON_FIFOMODE
+#define UCON (S3C2410_UCON_DEFAULT | S3C2410_UCON_UCLK)
+#define ULCON (S3C2410_LCON_CS8 | S3C2410_LCON_PNONE | S3C2410_LCON_STOPB)
+#define UFCON (S3C2410_UFCON_RXTRIG8 | S3C2410_UFCON_FIFOMODE)
static struct s3c2410_uartcfg real6410_uartcfgs[] __initdata = {
[0] = {
- .hwport = 0,
- .flags = 0,
- .ucon = UCON,
- .ulcon = ULCON,
- .ufcon = UFCON,
+ .hwport = 0,
+ .flags = 0,
+ .ucon = UCON,
+ .ulcon = ULCON,
+ .ufcon = UFCON,
},
[1] = {
- .hwport = 1,
- .flags = 0,
- .ucon = UCON,
- .ulcon = ULCON,
- .ufcon = UFCON,
+ .hwport = 1,
+ .flags = 0,
+ .ucon = UCON,
+ .ulcon = ULCON,
+ .ufcon = UFCON,
},
[2] = {
- .hwport = 2,
- .flags = 0,
- .ucon = UCON,
- .ulcon = ULCON,
- .ufcon = UFCON,
+ .hwport = 2,
+ .flags = 0,
+ .ucon = UCON,
+ .ulcon = ULCON,
+ .ufcon = UFCON,
},
[3] = {
- .hwport = 3,
- .flags = 0,
- .ucon = UCON,
- .ulcon = ULCON,
- .ufcon = UFCON,
+ .hwport = 3,
+ .flags = 0,
+ .ucon = UCON,
+ .ulcon = ULCON,
+ .ufcon = UFCON,
},
};
/* DM9000AEP 10/100 ethernet controller */
static struct resource real6410_dm9k_resource[] = {
- [0] = {
- .start = S3C64XX_PA_XM0CSN1,
- .end = S3C64XX_PA_XM0CSN1 + 1,
- .flags = IORESOURCE_MEM
- },
- [1] = {
- .start = S3C64XX_PA_XM0CSN1 + 4,
- .end = S3C64XX_PA_XM0CSN1 + 5,
- .flags = IORESOURCE_MEM
- },
- [2] = {
- .start = S3C_EINT(7),
- .end = S3C_EINT(7),
- .flags = IORESOURCE_IRQ,
- }
+ [0] = {
+ .start = S3C64XX_PA_XM0CSN1,
+ .end = S3C64XX_PA_XM0CSN1 + 1,
+ .flags = IORESOURCE_MEM
+ },
+ [1] = {
+ .start = S3C64XX_PA_XM0CSN1 + 4,
+ .end = S3C64XX_PA_XM0CSN1 + 5,
+ .flags = IORESOURCE_MEM
+ },
+ [2] = {
+ .start = S3C_EINT(7),
+ .end = S3C_EINT(7),
+ .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL
+ }
};
static struct dm9000_plat_data real6410_dm9k_pdata = {
- .flags = (DM9000_PLATF_16BITONLY | DM9000_PLATF_NO_EEPROM),
+ .flags = (DM9000_PLATF_16BITONLY | DM9000_PLATF_NO_EEPROM),
};
static struct platform_device real6410_device_eth = {
- .name = "dm9000",
- .id = -1,
- .num_resources = ARRAY_SIZE(real6410_dm9k_resource),
- .resource = real6410_dm9k_resource,
- .dev = {
- .platform_data = &real6410_dm9k_pdata,
- },
+ .name = "dm9000",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(real6410_dm9k_resource),
+ .resource = real6410_dm9k_resource,
+ .dev = {
+ .platform_data = &real6410_dm9k_pdata,
+ },
};
static struct platform_device *real6410_devices[] __initdata = {
@@ -129,12 +129,12 @@
/* set timing for nCS1 suitable for ethernet chip */
__raw_writel((0 << S3C64XX_SROM_BCX__PMC__SHIFT) |
- (6 << S3C64XX_SROM_BCX__TACP__SHIFT) |
- (4 << S3C64XX_SROM_BCX__TCAH__SHIFT) |
- (1 << S3C64XX_SROM_BCX__TCOH__SHIFT) |
- (13 << S3C64XX_SROM_BCX__TACC__SHIFT) |
- (4 << S3C64XX_SROM_BCX__TCOS__SHIFT) |
- (0 << S3C64XX_SROM_BCX__TACS__SHIFT), S3C64XX_SROM_BC1);
+ (6 << S3C64XX_SROM_BCX__TACP__SHIFT) |
+ (4 << S3C64XX_SROM_BCX__TCAH__SHIFT) |
+ (1 << S3C64XX_SROM_BCX__TCOH__SHIFT) |
+ (13 << S3C64XX_SROM_BCX__TACC__SHIFT) |
+ (4 << S3C64XX_SROM_BCX__TCOS__SHIFT) |
+ (0 << S3C64XX_SROM_BCX__TACS__SHIFT), S3C64XX_SROM_BC1);
platform_add_devices(real6410_devices, ARRAY_SIZE(real6410_devices));
}
diff --git a/arch/arm/mach-s5p6440/cpu.c b/arch/arm/mach-s5p6440/cpu.c
index 526f33a..ec592e8 100644
--- a/arch/arm/mach-s5p6440/cpu.c
+++ b/arch/arm/mach-s5p6440/cpu.c
@@ -19,6 +19,7 @@
#include <linux/sysdev.h>
#include <linux/serial_core.h>
#include <linux/platform_device.h>
+#include <linux/sched.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
diff --git a/arch/arm/mach-s5p6440/include/mach/vmalloc.h b/arch/arm/mach-s5p6440/include/mach/vmalloc.h
index 16df257..e3f0eeb 100644
--- a/arch/arm/mach-s5p6440/include/mach/vmalloc.h
+++ b/arch/arm/mach-s5p6440/include/mach/vmalloc.h
@@ -12,6 +12,6 @@
#ifndef __ASM_ARCH_VMALLOC_H
#define __ASM_ARCH_VMALLOC_H
-#define VMALLOC_END (0xE0000000)
+#define VMALLOC_END 0xE0000000UL
#endif /* __ASM_ARCH_VMALLOC_H */
diff --git a/arch/arm/mach-s5p6442/cpu.c b/arch/arm/mach-s5p6442/cpu.c
index a48fb55..70ac681 100644
--- a/arch/arm/mach-s5p6442/cpu.c
+++ b/arch/arm/mach-s5p6442/cpu.c
@@ -19,6 +19,7 @@
#include <linux/sysdev.h>
#include <linux/serial_core.h>
#include <linux/platform_device.h>
+#include <linux/sched.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
diff --git a/arch/arm/mach-s5p6442/include/mach/vmalloc.h b/arch/arm/mach-s5p6442/include/mach/vmalloc.h
index be33336..f5c83f0 100644
--- a/arch/arm/mach-s5p6442/include/mach/vmalloc.h
+++ b/arch/arm/mach-s5p6442/include/mach/vmalloc.h
@@ -12,6 +12,6 @@
#ifndef __ASM_ARCH_VMALLOC_H
#define __ASM_ARCH_VMALLOC_H
-#define VMALLOC_END (0xE0000000)
+#define VMALLOC_END 0xE0000000UL
#endif /* __ASM_ARCH_VMALLOC_H */
diff --git a/arch/arm/mach-s5pc100/cpu.c b/arch/arm/mach-s5pc100/cpu.c
index 251c92a..cd1afbc 100644
--- a/arch/arm/mach-s5pc100/cpu.c
+++ b/arch/arm/mach-s5pc100/cpu.c
@@ -21,6 +21,7 @@
#include <linux/sysdev.h>
#include <linux/serial_core.h>
#include <linux/platform_device.h>
+#include <linux/sched.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
diff --git a/arch/arm/mach-s5pv210/clock.c b/arch/arm/mach-s5pv210/clock.c
index af91fef..d562670 100644
--- a/arch/arm/mach-s5pv210/clock.c
+++ b/arch/arm/mach-s5pv210/clock.c
@@ -173,11 +173,6 @@
return s5p_gatectrl(S5P_CLKGATE_IP3, clk, enable);
}
-static int s5pv210_clk_ip4_ctrl(struct clk *clk, int enable)
-{
- return s5p_gatectrl(S5P_CLKGATE_IP4, clk, enable);
-}
-
static int s5pv210_clk_mask0_ctrl(struct clk *clk, int enable)
{
return s5p_gatectrl(S5P_CLK_SRC_MASK0, clk, enable);
@@ -281,6 +276,24 @@
.enable = s5pv210_clk_ip0_ctrl,
.ctrlbit = (1<<29),
}, {
+ .name = "fimc",
+ .id = 0,
+ .parent = &clk_hclk_dsys.clk,
+ .enable = s5pv210_clk_ip0_ctrl,
+ .ctrlbit = (1 << 24),
+ }, {
+ .name = "fimc",
+ .id = 1,
+ .parent = &clk_hclk_dsys.clk,
+ .enable = s5pv210_clk_ip0_ctrl,
+ .ctrlbit = (1 << 25),
+ }, {
+ .name = "fimc",
+ .id = 2,
+ .parent = &clk_hclk_dsys.clk,
+ .enable = s5pv210_clk_ip0_ctrl,
+ .ctrlbit = (1 << 26),
+ }, {
.name = "otg",
.id = -1,
.parent = &clk_hclk_psys.clk,
@@ -357,7 +370,7 @@
.id = 1,
.parent = &clk_pclk_psys.clk,
.enable = s5pv210_clk_ip3_ctrl,
- .ctrlbit = (1<<8),
+ .ctrlbit = (1 << 10),
}, {
.name = "i2c",
.id = 2,
diff --git a/arch/arm/mach-s5pv210/cpu.c b/arch/arm/mach-s5pv210/cpu.c
index b9f4d67..245b82b 100644
--- a/arch/arm/mach-s5pv210/cpu.c
+++ b/arch/arm/mach-s5pv210/cpu.c
@@ -19,6 +19,7 @@
#include <linux/io.h>
#include <linux/sysdev.h>
#include <linux/platform_device.h>
+#include <linux/sched.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
@@ -47,7 +48,7 @@
{
.virtual = (unsigned long)S5P_VA_SYSTIMER,
.pfn = __phys_to_pfn(S5PV210_PA_SYSTIMER),
- .length = SZ_1M,
+ .length = SZ_4K,
.type = MT_DEVICE,
}, {
.virtual = (unsigned long)VA_VIC2,
diff --git a/arch/arm/mach-s5pv210/include/mach/vmalloc.h b/arch/arm/mach-s5pv210/include/mach/vmalloc.h
index 58f515e..df9a288 100644
--- a/arch/arm/mach-s5pv210/include/mach/vmalloc.h
+++ b/arch/arm/mach-s5pv210/include/mach/vmalloc.h
@@ -17,6 +17,6 @@
#ifndef __ASM_ARCH_VMALLOC_H
#define __ASM_ARCH_VMALLOC_H __FILE__
-#define VMALLOC_END (0xE0000000)
+#define VMALLOC_END (0xE0000000UL)
#endif /* __ASM_ARCH_VMALLOC_H */
diff --git a/arch/arm/mach-s5pv310/clock.c b/arch/arm/mach-s5pv310/clock.c
index 77f2b4d..26a0f03 100644
--- a/arch/arm/mach-s5pv310/clock.c
+++ b/arch/arm/mach-s5pv310/clock.c
@@ -30,6 +30,16 @@
.rate = 27000000,
};
+static int s5pv310_clksrc_mask_peril0_ctrl(struct clk *clk, int enable)
+{
+ return s5p_gatectrl(S5P_CLKSRC_MASK_PERIL0, clk, enable);
+}
+
+static int s5pv310_clk_ip_peril_ctrl(struct clk *clk, int enable)
+{
+ return s5p_gatectrl(S5P_CLKGATE_IP_PERIL, clk, enable);
+}
+
/* Core list of CMU_CPU side */
static struct clksrc_clk clk_mout_apll = {
@@ -39,6 +49,14 @@
},
.sources = &clk_src_apll,
.reg_src = { .reg = S5P_CLKSRC_CPU, .shift = 0, .size = 1 },
+};
+
+static struct clksrc_clk clk_sclk_apll = {
+ .clk = {
+ .name = "sclk_apll",
+ .id = -1,
+ .parent = &clk_mout_apll.clk,
+ },
.reg_div = { .reg = S5P_CLKDIV_CPU, .shift = 24, .size = 3 },
};
@@ -61,7 +79,7 @@
};
static struct clk *clkset_moutcore_list[] = {
- [0] = &clk_mout_apll.clk,
+ [0] = &clk_sclk_apll.clk,
[1] = &clk_mout_mpll.clk,
};
@@ -154,7 +172,7 @@
static struct clk *clkset_corebus_list[] = {
[0] = &clk_mout_mpll.clk,
- [1] = &clk_mout_apll.clk,
+ [1] = &clk_sclk_apll.clk,
};
static struct clksrc_sources clkset_mout_corebus = {
@@ -220,7 +238,7 @@
static struct clk *clkset_aclk_top_list[] = {
[0] = &clk_mout_mpll.clk,
- [1] = &clk_mout_apll.clk,
+ [1] = &clk_sclk_apll.clk,
};
static struct clksrc_sources clkset_aclk_200 = {
@@ -321,11 +339,6 @@
.reg_src = { .reg = S5P_CLKSRC_TOP0, .shift = 8, .size = 1 },
};
-static int s5pv310_clk_ip_peril_ctrl(struct clk *clk, int enable)
-{
- return s5p_gatectrl(S5P_CLKGATE_IP_PERIL, clk, enable);
-}
-
static struct clk init_clocks_disable[] = {
{
.name = "timers",
@@ -337,7 +350,37 @@
};
static struct clk init_clocks[] = {
- /* Nothing here yet */
+ {
+ .name = "uart",
+ .id = 0,
+ .enable = s5pv310_clk_ip_peril_ctrl,
+ .ctrlbit = (1 << 0),
+ }, {
+ .name = "uart",
+ .id = 1,
+ .enable = s5pv310_clk_ip_peril_ctrl,
+ .ctrlbit = (1 << 1),
+ }, {
+ .name = "uart",
+ .id = 2,
+ .enable = s5pv310_clk_ip_peril_ctrl,
+ .ctrlbit = (1 << 2),
+ }, {
+ .name = "uart",
+ .id = 3,
+ .enable = s5pv310_clk_ip_peril_ctrl,
+ .ctrlbit = (1 << 3),
+ }, {
+ .name = "uart",
+ .id = 4,
+ .enable = s5pv310_clk_ip_peril_ctrl,
+ .ctrlbit = (1 << 4),
+ }, {
+ .name = "uart",
+ .id = 5,
+ .enable = s5pv310_clk_ip_peril_ctrl,
+ .ctrlbit = (1 << 5),
+ }
};
static struct clk *clkset_group_list[] = {
@@ -359,8 +402,8 @@
.clk = {
.name = "uclk1",
.id = 0,
+ .enable = s5pv310_clksrc_mask_peril0_ctrl,
.ctrlbit = (1 << 0),
- .enable = s5pv310_clk_ip_peril_ctrl,
},
.sources = &clkset_group,
.reg_src = { .reg = S5P_CLKSRC_PERIL0, .shift = 0, .size = 4 },
@@ -369,8 +412,8 @@
.clk = {
.name = "uclk1",
.id = 1,
- .enable = s5pv310_clk_ip_peril_ctrl,
- .ctrlbit = (1 << 1),
+ .enable = s5pv310_clksrc_mask_peril0_ctrl,
+ .ctrlbit = (1 << 4),
},
.sources = &clkset_group,
.reg_src = { .reg = S5P_CLKSRC_PERIL0, .shift = 4, .size = 4 },
@@ -379,8 +422,8 @@
.clk = {
.name = "uclk1",
.id = 2,
- .enable = s5pv310_clk_ip_peril_ctrl,
- .ctrlbit = (1 << 2),
+ .enable = s5pv310_clksrc_mask_peril0_ctrl,
+ .ctrlbit = (1 << 8),
},
.sources = &clkset_group,
.reg_src = { .reg = S5P_CLKSRC_PERIL0, .shift = 8, .size = 4 },
@@ -389,8 +432,8 @@
.clk = {
.name = "uclk1",
.id = 3,
- .enable = s5pv310_clk_ip_peril_ctrl,
- .ctrlbit = (1 << 3),
+ .enable = s5pv310_clksrc_mask_peril0_ctrl,
+ .ctrlbit = (1 << 12),
},
.sources = &clkset_group,
.reg_src = { .reg = S5P_CLKSRC_PERIL0, .shift = 12, .size = 4 },
@@ -399,7 +442,7 @@
.clk = {
.name = "sclk_pwm",
.id = -1,
- .enable = s5pv310_clk_ip_peril_ctrl,
+ .enable = s5pv310_clksrc_mask_peril0_ctrl,
.ctrlbit = (1 << 24),
},
.sources = &clkset_group,
@@ -411,6 +454,7 @@
/* Clock initialization code */
static struct clksrc_clk *sysclks[] = {
&clk_mout_apll,
+ &clk_sclk_apll,
&clk_mout_epll,
&clk_mout_mpll,
&clk_moutcore,
@@ -470,11 +514,11 @@
apll = s5p_get_pll45xx(xtal, __raw_readl(S5P_APLL_CON0), pll_4508);
mpll = s5p_get_pll45xx(xtal, __raw_readl(S5P_MPLL_CON0), pll_4508);
epll = s5p_get_pll46xx(xtal, __raw_readl(S5P_EPLL_CON0),
- __raw_readl(S5P_EPLL_CON1), pll_4500);
+ __raw_readl(S5P_EPLL_CON1), pll_4600);
vpllsrc = clk_get_rate(&clk_vpllsrc.clk);
vpll = s5p_get_pll46xx(vpllsrc, __raw_readl(S5P_VPLL_CON0),
- __raw_readl(S5P_VPLL_CON1), pll_4502);
+ __raw_readl(S5P_VPLL_CON1), pll_4650);
clk_fout_apll.rate = apll;
clk_fout_mpll.rate = mpll;
diff --git a/arch/arm/mach-s5pv310/cpu.c b/arch/arm/mach-s5pv310/cpu.c
index 196c9f1..e5b261a 100644
--- a/arch/arm/mach-s5pv310/cpu.c
+++ b/arch/arm/mach-s5pv310/cpu.c
@@ -45,6 +45,16 @@
.pfn = __phys_to_pfn(S5PV310_PA_L2CC),
.length = SZ_4K,
.type = MT_DEVICE,
+ }, {
+ .virtual = (unsigned long)S5P_VA_SYSRAM,
+ .pfn = __phys_to_pfn(S5PV310_PA_SYSRAM),
+ .length = SZ_4K,
+ .type = MT_DEVICE,
+ }, {
+ .virtual = (unsigned long)S5P_VA_CMU,
+ .pfn = __phys_to_pfn(S5PV310_PA_CMU),
+ .length = SZ_128K,
+ .type = MT_DEVICE,
},
};
diff --git a/arch/arm/mach-s5pv310/include/mach/irqs.h b/arch/arm/mach-s5pv310/include/mach/irqs.h
index 56885ca..4cdedda 100644
--- a/arch/arm/mach-s5pv310/include/mach/irqs.h
+++ b/arch/arm/mach-s5pv310/include/mach/irqs.h
@@ -15,12 +15,14 @@
#include <plat/irqs.h>
-/* Private Peripheral Interrupt */
+/* PPI: Private Peripheral Interrupt */
+
#define IRQ_PPI(x) S5P_IRQ(x+16)
#define IRQ_LOCALTIMER IRQ_PPI(13)
-/* Shared Peripheral Interrupt */
+/* SPI: Shared Peripheral Interrupt */
+
#define IRQ_SPI(x) S5P_IRQ(x+32)
#define IRQ_EINT0 IRQ_SPI(40)
@@ -36,7 +38,7 @@
#define IRQ_PCIE IRQ_SPI(50)
#define IRQ_SYSTEM_TIMER IRQ_SPI(51)
#define IRQ_MFC IRQ_SPI(52)
-#define IRQ_WTD IRQ_SPI(53)
+#define IRQ_WDT IRQ_SPI(53)
#define IRQ_AUDIO_SS IRQ_SPI(54)
#define IRQ_AC97 IRQ_SPI(55)
#define IRQ_SPDIF IRQ_SPI(56)
@@ -67,8 +69,9 @@
#define IRQ_IIC COMBINER_IRQ(27, 0)
/* Set the default NR_IRQS */
+
#define NR_IRQS COMBINER_IRQ(MAX_COMBINER_NR, 0)
#define MAX_COMBINER_NR 39
-#endif /* ASM_ARCH_IRQS_H */
+#endif /* __ASM_ARCH_IRQS_H */
diff --git a/arch/arm/mach-s5pv310/include/mach/map.h b/arch/arm/mach-s5pv310/include/mach/map.h
index 87697c9..213e110 100644
--- a/arch/arm/mach-s5pv310/include/mach/map.h
+++ b/arch/arm/mach-s5pv310/include/mach/map.h
@@ -23,12 +23,16 @@
#include <plat/map-s5p.h>
+#define S5PV310_PA_SYSRAM (0x02025000)
+
#define S5PV310_PA_CHIPID (0x10000000)
#define S5P_PA_CHIPID S5PV310_PA_CHIPID
#define S5PV310_PA_SYSCON (0x10020000)
#define S5P_PA_SYSCON S5PV310_PA_SYSCON
+#define S5PV310_PA_CMU (0x10030000)
+
#define S5PV310_PA_WATCHDOG (0x10060000)
#define S5PV310_PA_COMBINER (0x10448000)
@@ -39,8 +43,12 @@
#define S5PV310_PA_GIC_DIST (0x10501000)
#define S5PV310_PA_L2CC (0x10502000)
-#define S5PV310_PA_GPIO (0x11000000)
-#define S5P_PA_GPIO S5PV310_PA_GPIO
+#define S5PV310_PA_GPIO1 (0x11400000)
+#define S5PV310_PA_GPIO2 (0x11000000)
+#define S5PV310_PA_GPIO3 (0x03860000)
+#define S5P_PA_GPIO S5PV310_PA_GPIO1
+
+#define S5PV310_PA_HSMMC(x) (0x12510000 + ((x) * 0x10000))
#define S5PV310_PA_UART (0x13800000)
@@ -63,6 +71,10 @@
/* compatibiltiy defines. */
#define S3C_PA_UART S5PV310_PA_UART
+#define S3C_PA_HSMMC0 S5PV310_PA_HSMMC(0)
+#define S3C_PA_HSMMC1 S5PV310_PA_HSMMC(1)
+#define S3C_PA_HSMMC2 S5PV310_PA_HSMMC(2)
+#define S3C_PA_HSMMC3 S5PV310_PA_HSMMC(3)
#define S3C_PA_IIC S5PV310_PA_IIC0
#define S3C_PA_WDT S5PV310_PA_WATCHDOG
diff --git a/arch/arm/mach-s5pv310/include/mach/regs-clock.h b/arch/arm/mach-s5pv310/include/mach/regs-clock.h
index 59e3a7e..4013553 100644
--- a/arch/arm/mach-s5pv310/include/mach/regs-clock.h
+++ b/arch/arm/mach-s5pv310/include/mach/regs-clock.h
@@ -15,48 +15,49 @@
#include <mach/map.h>
-#define S5P_CLKREG(x) (S3C_VA_SYS + (x))
+#define S5P_CLKREG(x) (S5P_VA_CMU + (x))
#define S5P_INFORM0 S5P_CLKREG(0x800)
-#define S5P_EPLL_CON0 S5P_CLKREG(0x1C110)
-#define S5P_EPLL_CON1 S5P_CLKREG(0x1C114)
-#define S5P_VPLL_CON0 S5P_CLKREG(0x1C120)
-#define S5P_VPLL_CON1 S5P_CLKREG(0x1C124)
+#define S5P_EPLL_CON0 S5P_CLKREG(0x0C110)
+#define S5P_EPLL_CON1 S5P_CLKREG(0x0C114)
+#define S5P_VPLL_CON0 S5P_CLKREG(0x0C120)
+#define S5P_VPLL_CON1 S5P_CLKREG(0x0C124)
-#define S5P_CLKSRC_TOP0 S5P_CLKREG(0x1C210)
-#define S5P_CLKSRC_TOP1 S5P_CLKREG(0x1C214)
+#define S5P_CLKSRC_TOP0 S5P_CLKREG(0x0C210)
+#define S5P_CLKSRC_TOP1 S5P_CLKREG(0x0C214)
-#define S5P_CLKSRC_PERIL0 S5P_CLKREG(0x1C250)
+#define S5P_CLKSRC_PERIL0 S5P_CLKREG(0x0C250)
-#define S5P_CLKDIV_TOP S5P_CLKREG(0x1C510)
+#define S5P_CLKDIV_TOP S5P_CLKREG(0x0C510)
-#define S5P_CLKDIV_PERIL0 S5P_CLKREG(0x1C550)
-#define S5P_CLKDIV_PERIL1 S5P_CLKREG(0x1C554)
-#define S5P_CLKDIV_PERIL2 S5P_CLKREG(0x1C558)
-#define S5P_CLKDIV_PERIL3 S5P_CLKREG(0x1C55C)
-#define S5P_CLKDIV_PERIL4 S5P_CLKREG(0x1C560)
-#define S5P_CLKDIV_PERIL5 S5P_CLKREG(0x1C564)
+#define S5P_CLKDIV_PERIL0 S5P_CLKREG(0x0C550)
+#define S5P_CLKDIV_PERIL1 S5P_CLKREG(0x0C554)
+#define S5P_CLKDIV_PERIL2 S5P_CLKREG(0x0C558)
+#define S5P_CLKDIV_PERIL3 S5P_CLKREG(0x0C55C)
+#define S5P_CLKDIV_PERIL4 S5P_CLKREG(0x0C560)
+#define S5P_CLKDIV_PERIL5 S5P_CLKREG(0x0C564)
-#define S5P_CLKGATE_IP_PERIL S5P_CLKREG(0x1C950)
+#define S5P_CLKSRC_MASK_PERIL0 S5P_CLKREG(0x0C350)
-#define S5P_CLKSRC_CORE S5P_CLKREG(0x20200)
+#define S5P_CLKGATE_IP_PERIL S5P_CLKREG(0x0C950)
-#define S5P_CLKDIV_CORE0 S5P_CLKREG(0x20500)
+#define S5P_CLKSRC_CORE S5P_CLKREG(0x10200)
+#define S5P_CLKDIV_CORE0 S5P_CLKREG(0x10500)
-#define S5P_APLL_LOCK S5P_CLKREG(0x24000)
-#define S5P_MPLL_LOCK S5P_CLKREG(0x24004)
-#define S5P_APLL_CON0 S5P_CLKREG(0x24100)
-#define S5P_APLL_CON1 S5P_CLKREG(0x24104)
-#define S5P_MPLL_CON0 S5P_CLKREG(0x24108)
-#define S5P_MPLL_CON1 S5P_CLKREG(0x2410C)
+#define S5P_APLL_LOCK S5P_CLKREG(0x14000)
+#define S5P_MPLL_LOCK S5P_CLKREG(0x14004)
+#define S5P_APLL_CON0 S5P_CLKREG(0x14100)
+#define S5P_APLL_CON1 S5P_CLKREG(0x14104)
+#define S5P_MPLL_CON0 S5P_CLKREG(0x14108)
+#define S5P_MPLL_CON1 S5P_CLKREG(0x1410C)
-#define S5P_CLKSRC_CPU S5P_CLKREG(0x24200)
-#define S5P_CLKMUX_STATCPU S5P_CLKREG(0x24400)
+#define S5P_CLKSRC_CPU S5P_CLKREG(0x14200)
+#define S5P_CLKMUX_STATCPU S5P_CLKREG(0x14400)
-#define S5P_CLKDIV_CPU S5P_CLKREG(0x24500)
-#define S5P_CLKDIV_STATCPU S5P_CLKREG(0x24600)
+#define S5P_CLKDIV_CPU S5P_CLKREG(0x14500)
+#define S5P_CLKDIV_STATCPU S5P_CLKREG(0x14600)
-#define S5P_CLKGATE_SCLKCPU S5P_CLKREG(0x24800)
+#define S5P_CLKGATE_SCLKCPU S5P_CLKREG(0x14800)
#endif /* __ASM_ARCH_REGS_CLOCK_H */
diff --git a/arch/arm/mach-s5pv310/include/mach/vmalloc.h b/arch/arm/mach-s5pv310/include/mach/vmalloc.h
index 3f565eb..256f221 100644
--- a/arch/arm/mach-s5pv310/include/mach/vmalloc.h
+++ b/arch/arm/mach-s5pv310/include/mach/vmalloc.h
@@ -17,6 +17,6 @@
#ifndef __ASM_ARCH_VMALLOC_H
#define __ASM_ARCH_VMALLOC_H __FILE__
-#define VMALLOC_END (0xF0000000)
+#define VMALLOC_END (0xF0000000UL)
#endif /* __ASM_ARCH_VMALLOC_H */
diff --git a/arch/arm/mach-s5pv310/platsmp.c b/arch/arm/mach-s5pv310/platsmp.c
index fe9469ab..d357c19 100644
--- a/arch/arm/mach-s5pv310/platsmp.c
+++ b/arch/arm/mach-s5pv310/platsmp.c
@@ -187,6 +187,6 @@
* until it receives a soft interrupt, and then the
* secondary CPU branches to this address.
*/
- __raw_writel(BSYM(virt_to_phys(s5pv310_secondary_startup)), S5P_INFORM0);
+ __raw_writel(BSYM(virt_to_phys(s5pv310_secondary_startup)), S5P_VA_SYSRAM);
}
}
diff --git a/arch/arm/mach-shmobile/Makefile b/arch/arm/mach-shmobile/Makefile
index 5e16b4c..ae416fe 100644
--- a/arch/arm/mach-shmobile/Makefile
+++ b/arch/arm/mach-shmobile/Makefile
@@ -3,7 +3,7 @@
#
# Common objects
-obj-y := timer.o console.o clock.o
+obj-y := timer.o console.o clock.o pm_runtime.o
# CPU objects
obj-$(CONFIG_ARCH_SH7367) += setup-sh7367.o clock-sh7367.o intc-sh7367.o
diff --git a/arch/arm/mach-shmobile/board-ap4evb.c b/arch/arm/mach-shmobile/board-ap4evb.c
index 23d472f..95935c8 100644
--- a/arch/arm/mach-shmobile/board-ap4evb.c
+++ b/arch/arm/mach-shmobile/board-ap4evb.c
@@ -25,6 +25,7 @@
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/mfd/sh_mobile_sdhi.h>
+#include <linux/mfd/tmio.h>
#include <linux/mmc/host.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
@@ -39,6 +40,7 @@
#include <linux/sh_clk.h>
#include <linux/gpio.h>
#include <linux/input.h>
+#include <linux/leds.h>
#include <linux/input/sh_keysc.h>
#include <linux/usb/r8a66597.h>
@@ -307,6 +309,7 @@
.dma_slave_tx = SHDMA_SLAVE_SDHI1_TX,
.dma_slave_rx = SHDMA_SLAVE_SDHI1_RX,
.tmio_ocr_mask = MMC_VDD_165_195,
+ .tmio_flags = TMIO_MMC_WRPROTECT_DISABLE,
};
static struct resource sdhi1_resources[] = {
@@ -558,7 +561,7 @@
static struct platform_device fsi_device = {
.name = "sh_fsi2",
- .id = 0,
+ .id = -1,
.num_resources = ARRAY_SIZE(fsi_resources),
.resource = fsi_resources,
.dev = {
@@ -650,7 +653,44 @@
},
};
+static struct gpio_led ap4evb_leds[] = {
+ {
+ .name = "led4",
+ .gpio = GPIO_PORT185,
+ .default_state = LEDS_GPIO_DEFSTATE_ON,
+ },
+ {
+ .name = "led2",
+ .gpio = GPIO_PORT186,
+ .default_state = LEDS_GPIO_DEFSTATE_ON,
+ },
+ {
+ .name = "led3",
+ .gpio = GPIO_PORT187,
+ .default_state = LEDS_GPIO_DEFSTATE_ON,
+ },
+ {
+ .name = "led1",
+ .gpio = GPIO_PORT188,
+ .default_state = LEDS_GPIO_DEFSTATE_ON,
+ }
+};
+
+static struct gpio_led_platform_data ap4evb_leds_pdata = {
+ .num_leds = ARRAY_SIZE(ap4evb_leds),
+ .leds = ap4evb_leds,
+};
+
+static struct platform_device leds_device = {
+ .name = "leds-gpio",
+ .id = 0,
+ .dev = {
+ .platform_data = &ap4evb_leds_pdata,
+ },
+};
+
static struct platform_device *ap4evb_devices[] __initdata = {
+ &leds_device,
&nor_flash_device,
&smc911x_device,
&sdhi0_device,
@@ -840,20 +880,6 @@
gpio_request(GPIO_FN_CS5A, NULL);
gpio_request(GPIO_FN_IRQ6_39, NULL);
- /* enable LED 1 - 4 */
- gpio_request(GPIO_PORT185, NULL);
- gpio_request(GPIO_PORT186, NULL);
- gpio_request(GPIO_PORT187, NULL);
- gpio_request(GPIO_PORT188, NULL);
- gpio_direction_output(GPIO_PORT185, 1);
- gpio_direction_output(GPIO_PORT186, 1);
- gpio_direction_output(GPIO_PORT187, 1);
- gpio_direction_output(GPIO_PORT188, 1);
- gpio_export(GPIO_PORT185, 0);
- gpio_export(GPIO_PORT186, 0);
- gpio_export(GPIO_PORT187, 0);
- gpio_export(GPIO_PORT188, 0);
-
/* enable Debug switch (S6) */
gpio_request(GPIO_PORT32, NULL);
gpio_request(GPIO_PORT33, NULL);
diff --git a/arch/arm/mach-shmobile/clock-sh7372.c b/arch/arm/mach-shmobile/clock-sh7372.c
index fb4e9b1..7594689 100644
--- a/arch/arm/mach-shmobile/clock-sh7372.c
+++ b/arch/arm/mach-shmobile/clock-sh7372.c
@@ -286,7 +286,6 @@
struct clk pllc2_clk = {
.ops = &pllc2_clk_ops,
- .flags = CLK_ENABLE_ON_INIT,
.parent = &extal1_div2_clk,
.freq_table = pllc2_freq_table,
.parent_table = pllc2_parent,
@@ -395,7 +394,7 @@
enum { MSTP001,
MSTP131, MSTP130,
- MSTP129, MSTP128,
+ MSTP129, MSTP128, MSTP127, MSTP126,
MSTP118, MSTP117, MSTP116,
MSTP106, MSTP101, MSTP100,
MSTP223,
@@ -413,6 +412,8 @@
[MSTP130] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 30, 0), /* VEU2 */
[MSTP129] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 29, 0), /* VEU1 */
[MSTP128] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 28, 0), /* VEU0 */
+ [MSTP127] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 27, 0), /* CEU */
+ [MSTP126] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 26, 0), /* CSI2 */
[MSTP118] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 18, 0), /* DSITX */
[MSTP117] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 17, 0), /* LCDC1 */
[MSTP116] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR1, 16, 0), /* IIC0 */
@@ -428,7 +429,7 @@
[MSTP201] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 1, 0), /* SCIFA3 */
[MSTP200] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 0, 0), /* SCIFA4 */
[MSTP329] = MSTP(&r_clk, SMSTPCR3, 29, 0), /* CMT10 */
- [MSTP328] = MSTP(&div6_clks[DIV6_SPU], SMSTPCR3, 28, CLK_ENABLE_ON_INIT), /* FSIA */
+ [MSTP328] = MSTP(&div6_clks[DIV6_SPU], SMSTPCR3, 28, 0), /* FSIA */
[MSTP323] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR3, 23, 0), /* IIC1 */
[MSTP322] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR3, 22, 0), /* USB0 */
[MSTP314] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 14, 0), /* SDHI0 */
@@ -498,6 +499,8 @@
CLKDEV_DEV_ID("uio_pdrv_genirq.3", &mstp_clks[MSTP130]), /* VEU2 */
CLKDEV_DEV_ID("uio_pdrv_genirq.2", &mstp_clks[MSTP129]), /* VEU1 */
CLKDEV_DEV_ID("uio_pdrv_genirq.1", &mstp_clks[MSTP128]), /* VEU0 */
+ CLKDEV_DEV_ID("sh_mobile_ceu.0", &mstp_clks[MSTP127]), /* CEU */
+ CLKDEV_DEV_ID("sh-mobile-csi2.0", &mstp_clks[MSTP126]), /* CSI2 */
CLKDEV_DEV_ID("sh-mipi-dsi.0", &mstp_clks[MSTP118]), /* DSITX */
CLKDEV_DEV_ID("sh_mobile_lcdc_fb.1", &mstp_clks[MSTP117]), /* LCDC1 */
CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[MSTP116]), /* IIC0 */
diff --git a/arch/arm/mach-shmobile/clock.c b/arch/arm/mach-shmobile/clock.c
index b7c705a..6b7c7c4 100644
--- a/arch/arm/mach-shmobile/clock.c
+++ b/arch/arm/mach-shmobile/clock.c
@@ -1,8 +1,10 @@
/*
- * SH-Mobile Timer
+ * SH-Mobile Clock Framework
*
* Copyright (C) 2010 Magnus Damm
*
+ * Used together with arch/arm/common/clkdev.c and drivers/sh/clk.c.
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
diff --git a/arch/arm/mach-shmobile/pm_runtime.c b/arch/arm/mach-shmobile/pm_runtime.c
new file mode 100644
index 0000000..94912d3
--- /dev/null
+++ b/arch/arm/mach-shmobile/pm_runtime.c
@@ -0,0 +1,169 @@
+/*
+ * arch/arm/mach-shmobile/pm_runtime.c
+ *
+ * Runtime PM support code for SuperH Mobile ARM
+ *
+ * Copyright (C) 2009-2010 Magnus Damm
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/pm_runtime.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/sh_clk.h>
+#include <linux/bitmap.h>
+
+#ifdef CONFIG_PM_RUNTIME
+#define BIT_ONCE 0
+#define BIT_ACTIVE 1
+#define BIT_CLK_ENABLED 2
+
+struct pm_runtime_data {
+ unsigned long flags;
+ struct clk *clk;
+};
+
+static void __devres_release(struct device *dev, void *res)
+{
+ struct pm_runtime_data *prd = res;
+
+ dev_dbg(dev, "__devres_release()\n");
+
+ if (test_bit(BIT_CLK_ENABLED, &prd->flags))
+ clk_disable(prd->clk);
+
+ if (test_bit(BIT_ACTIVE, &prd->flags))
+ clk_put(prd->clk);
+}
+
+static struct pm_runtime_data *__to_prd(struct device *dev)
+{
+ return devres_find(dev, __devres_release, NULL, NULL);
+}
+
+static void platform_pm_runtime_init(struct device *dev,
+ struct pm_runtime_data *prd)
+{
+ if (prd && !test_and_set_bit(BIT_ONCE, &prd->flags)) {
+ prd->clk = clk_get(dev, NULL);
+ if (!IS_ERR(prd->clk)) {
+ set_bit(BIT_ACTIVE, &prd->flags);
+ dev_info(dev, "clocks managed by runtime pm\n");
+ }
+ }
+}
+
+static void platform_pm_runtime_bug(struct device *dev,
+ struct pm_runtime_data *prd)
+{
+ if (prd && !test_and_set_bit(BIT_ONCE, &prd->flags))
+ dev_err(dev, "runtime pm suspend before resume\n");
+}
+
+int platform_pm_runtime_suspend(struct device *dev)
+{
+ struct pm_runtime_data *prd = __to_prd(dev);
+
+ dev_dbg(dev, "platform_pm_runtime_suspend()\n");
+
+ platform_pm_runtime_bug(dev, prd);
+
+ if (prd && test_bit(BIT_ACTIVE, &prd->flags)) {
+ clk_disable(prd->clk);
+ clear_bit(BIT_CLK_ENABLED, &prd->flags);
+ }
+
+ return 0;
+}
+
+int platform_pm_runtime_resume(struct device *dev)
+{
+ struct pm_runtime_data *prd = __to_prd(dev);
+
+ dev_dbg(dev, "platform_pm_runtime_resume()\n");
+
+ platform_pm_runtime_init(dev, prd);
+
+ if (prd && test_bit(BIT_ACTIVE, &prd->flags)) {
+ clk_enable(prd->clk);
+ set_bit(BIT_CLK_ENABLED, &prd->flags);
+ }
+
+ return 0;
+}
+
+int platform_pm_runtime_idle(struct device *dev)
+{
+ /* suspend synchronously to disable clocks immediately */
+ return pm_runtime_suspend(dev);
+}
+
+static int platform_bus_notify(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct device *dev = data;
+ struct pm_runtime_data *prd;
+
+ dev_dbg(dev, "platform_bus_notify() %ld !\n", action);
+
+ if (action == BUS_NOTIFY_BIND_DRIVER) {
+ prd = devres_alloc(__devres_release, sizeof(*prd), GFP_KERNEL);
+ if (prd)
+ devres_add(dev, prd);
+ else
+ dev_err(dev, "unable to alloc memory for runtime pm\n");
+ }
+
+ return 0;
+}
+
+#else /* CONFIG_PM_RUNTIME */
+
+static int platform_bus_notify(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct device *dev = data;
+ struct clk *clk;
+
+ dev_dbg(dev, "platform_bus_notify() %ld !\n", action);
+
+ switch (action) {
+ case BUS_NOTIFY_BIND_DRIVER:
+ clk = clk_get(dev, NULL);
+ if (!IS_ERR(clk)) {
+ clk_enable(clk);
+ clk_put(clk);
+ dev_info(dev, "runtime pm disabled, clock forced on\n");
+ }
+ break;
+ case BUS_NOTIFY_UNBOUND_DRIVER:
+ clk = clk_get(dev, NULL);
+ if (!IS_ERR(clk)) {
+ clk_disable(clk);
+ clk_put(clk);
+ dev_info(dev, "runtime pm disabled, clock forced off\n");
+ }
+ break;
+ }
+
+ return 0;
+}
+
+#endif /* CONFIG_PM_RUNTIME */
+
+static struct notifier_block platform_bus_notifier = {
+ .notifier_call = platform_bus_notify
+};
+
+static int __init sh_pm_runtime_init(void)
+{
+ bus_register_notifier(&platform_bus_type, &platform_bus_notifier);
+ return 0;
+}
+core_initcall(sh_pm_runtime_init);
diff --git a/arch/arm/mach-tegra/board-harmony.c b/arch/arm/mach-tegra/board-harmony.c
index 05e78dd..9e305de 100644
--- a/arch/arm/mach-tegra/board-harmony.c
+++ b/arch/arm/mach-tegra/board-harmony.c
@@ -91,10 +91,8 @@
{
mi->nr_banks = 2;
mi->bank[0].start = PHYS_OFFSET;
- mi->bank[0].node = PHYS_TO_NID(PHYS_OFFSET);
mi->bank[0].size = 448 * SZ_1M;
mi->bank[1].start = SZ_512M;
- mi->bank[1].node = PHYS_TO_NID(SZ_512M);
mi->bank[1].size = SZ_512M;
}
diff --git a/arch/arm/mach-tegra/include/mach/vmalloc.h b/arch/arm/mach-tegra/include/mach/vmalloc.h
index 267a141..fd6aa65 100644
--- a/arch/arm/mach-tegra/include/mach/vmalloc.h
+++ b/arch/arm/mach-tegra/include/mach/vmalloc.h
@@ -23,6 +23,6 @@
#include <asm/sizes.h>
-#define VMALLOC_END 0xFE000000
+#define VMALLOC_END 0xFE000000UL
#endif
diff --git a/arch/arm/mach-u300/include/mach/gpio.h b/arch/arm/mach-u300/include/mach/gpio.h
index 7b1fc98..d5a71ab 100644
--- a/arch/arm/mach-u300/include/mach/gpio.h
+++ b/arch/arm/mach-u300/include/mach/gpio.h
@@ -273,6 +273,9 @@
extern int gpio_get_value(unsigned gpio);
extern void gpio_set_value(unsigned gpio, int value);
+#define gpio_get_value_cansleep gpio_get_value
+#define gpio_set_value_cansleep gpio_set_value
+
/* wrappers to sleep-enable the previous two functions */
static inline unsigned gpio_to_irq(unsigned gpio)
{
diff --git a/arch/arm/mach-vexpress/ct-ca9x4.c b/arch/arm/mach-vexpress/ct-ca9x4.c
index 577df6c..71fb173 100644
--- a/arch/arm/mach-vexpress/ct-ca9x4.c
+++ b/arch/arm/mach-vexpress/ct-ca9x4.c
@@ -68,7 +68,7 @@
}
#if 0
-static void ct_ca9x4_timer_init(void)
+static void __init ct_ca9x4_timer_init(void)
{
writel(0, MMIO_P2V(CT_CA9X4_TIMER0) + TIMER_CTRL);
writel(0, MMIO_P2V(CT_CA9X4_TIMER1) + TIMER_CTRL);
@@ -222,12 +222,18 @@
.resource = pmu_resources,
};
-static void ct_ca9x4_init(void)
+static void __init ct_ca9x4_init(void)
{
int i;
#ifdef CONFIG_CACHE_L2X0
- l2x0_init(MMIO_P2V(CT_CA9X4_L2CC), 0x00000000, 0xfe0fffff);
+ void __iomem *l2x0_base = MMIO_P2V(CT_CA9X4_L2CC);
+
+ /* set RAM latencies to 1 cycle for this core tile. */
+ writel(0, l2x0_base + L2X0_TAG_LATENCY_CTRL);
+ writel(0, l2x0_base + L2X0_DATA_LATENCY_CTRL);
+
+ l2x0_init(l2x0_base, 0x00400000, 0xfe0fffff);
#endif
clkdev_add_table(lookups, ARRAY_SIZE(lookups));
diff --git a/arch/arm/mach-vexpress/v2m.c b/arch/arm/mach-vexpress/v2m.c
index 817f0ad..7eaa232 100644
--- a/arch/arm/mach-vexpress/v2m.c
+++ b/arch/arm/mach-vexpress/v2m.c
@@ -48,7 +48,7 @@
}
-static void v2m_timer_init(void)
+static void __init v2m_timer_init(void)
{
writel(0, MMIO_P2V(V2M_TIMER0) + TIMER_CTRL);
writel(0, MMIO_P2V(V2M_TIMER1) + TIMER_CTRL);
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 33c3f57..a0a2928 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -398,7 +398,7 @@
# ARMv6k
config CPU_32v6K
bool "Support ARM V6K processor extensions" if !SMP
- depends on CPU_V6
+ depends on CPU_V6 || CPU_V7
default y if SMP && !(ARCH_MX3 || ARCH_OMAP2)
help
Say Y here if your ARMv6 processor supports the 'K' extension.
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
index d073b64..724ba3b 100644
--- a/arch/arm/mm/alignment.c
+++ b/arch/arm/mm/alignment.c
@@ -885,8 +885,23 @@
if (ai_usermode & UM_SIGNAL)
force_sig(SIGBUS, current);
- else
- set_cr(cr_no_alignment);
+ else {
+ /*
+ * We're about to disable the alignment trap and return to
+ * user space. But if an interrupt occurs before actually
+ * reaching user space, then the IRQ vector entry code will
+ * notice that we were still in kernel space and therefore
+ * the alignment trap won't be re-enabled in that case as it
+ * is presumed to be always on from kernel space.
+ * Let's prevent that race by disabling interrupts here (they
+ * are disabled on the way back to user space anyway in
+ * entry-common.S) and disable the alignment trap only if
+ * there is no work pending for this thread.
+ */
+ raw_local_irq_disable();
+ if (!(current_thread_info()->flags & _TIF_WORK_MASK))
+ set_cr(cr_no_alignment);
+ }
return 0;
}
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index c704eed..4bc43e5 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -229,6 +229,8 @@
}
} while (size -= PAGE_SIZE);
+ dsb();
+
return (void *)c->vm_start;
}
return NULL;
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index ab50627..17e7b0b 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -204,8 +204,12 @@
/*
* Don't allow RAM to be mapped - this causes problems with ARMv6+
*/
- if (WARN_ON(pfn_valid(pfn)))
- return NULL;
+ if (pfn_valid(pfn)) {
+ printk(KERN_WARNING "BUG: Your driver calls ioremap() on system memory. This leads\n"
+ KERN_WARNING "to architecturally unpredictable behaviour on ARMv6+, and ioremap()\n"
+ KERN_WARNING "will fail in the next kernel release. Please fix your driver.\n");
+ WARN_ON(1);
+ }
type = get_mem_type(mtype);
if (!type)
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 6e1c4f6..e8ed9dc 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -15,6 +15,7 @@
#include <linux/nodemask.h>
#include <linux/memblock.h>
#include <linux/sort.h>
+#include <linux/fs.h>
#include <asm/cputype.h>
#include <asm/sections.h>
@@ -246,6 +247,9 @@
.domain = DOMAIN_USER,
},
[MT_MEMORY] = {
+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
+ L_PTE_WRITE | L_PTE_EXEC,
+ .prot_l1 = PMD_TYPE_TABLE,
.prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
.domain = DOMAIN_KERNEL,
},
@@ -254,6 +258,9 @@
.domain = DOMAIN_KERNEL,
},
[MT_MEMORY_NONCACHED] = {
+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
+ L_PTE_WRITE | L_PTE_EXEC | L_PTE_MT_BUFFERABLE,
+ .prot_l1 = PMD_TYPE_TABLE,
.prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
.domain = DOMAIN_KERNEL,
},
@@ -411,9 +418,12 @@
* Enable CPU-specific coherency if supported.
* (Only available on XSC3 at the moment.)
*/
- if (arch_is_coherent() && cpu_is_xsc3())
+ if (arch_is_coherent() && cpu_is_xsc3()) {
mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
-
+ mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
+ mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
+ mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
+ }
/*
* ARMv6 and above have extended page tables.
*/
@@ -438,7 +448,9 @@
mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
+ mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
+ mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
#endif
}
@@ -475,6 +487,8 @@
mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
+ mem_types[MT_MEMORY].prot_pte |= kern_pgprot;
+ mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask;
mem_types[MT_ROM].prot_sect |= cp->pmd;
switch (cp->pmd) {
@@ -498,6 +512,19 @@
}
}
+#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
+pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
+ unsigned long size, pgprot_t vma_prot)
+{
+ if (!pfn_valid(pfn))
+ return pgprot_noncached(vma_prot);
+ else if (file->f_flags & O_SYNC)
+ return pgprot_writecombine(vma_prot);
+ return vma_prot;
+}
+EXPORT_SYMBOL(phys_mem_access_prot);
+#endif
+
#define vectors_base() (vectors_high() ? 0xffff0000 : 0)
static void __init *early_alloc(unsigned long sz)
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 6a8506d..197f21b 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -186,13 +186,14 @@
* It is assumed that:
* - cache type register is implemented
*/
-__v7_setup:
+__v7_ca9mp_setup:
#ifdef CONFIG_SMP
mrc p15, 0, r0, c1, c0, 1
tst r0, #(1 << 6) @ SMP/nAMP mode enabled?
orreq r0, r0, #(1 << 6) | (1 << 0) @ Enable SMP/nAMP mode and
mcreq p15, 0, r0, c1, c0, 1 @ TLB ops broadcasting
#endif
+__v7_setup:
adr r12, __v7_setup_stack @ the local stack
stmia r12, {r0-r5, r7, r9, r11, lr}
bl v7_flush_dcache_all
@@ -201,11 +202,16 @@
mrc p15, 0, r0, c0, c0, 0 @ read main ID register
and r10, r0, #0xff000000 @ ARM?
teq r10, #0x41000000
- bne 2f
+ bne 3f
and r5, r0, #0x00f00000 @ variant
and r6, r0, #0x0000000f @ revision
- orr r0, r6, r5, lsr #20-4 @ combine variant and revision
+ orr r6, r6, r5, lsr #20-4 @ combine variant and revision
+ ubfx r0, r0, #4, #12 @ primary part number
+ /* Cortex-A8 Errata */
+ ldr r10, =0x00000c08 @ Cortex-A8 primary part number
+ teq r0, r10
+ bne 2f
#ifdef CONFIG_ARM_ERRATA_430973
teq r5, #0x00100000 @ only present in r1p*
mrceq p15, 0, r10, c1, c0, 1 @ read aux control register
@@ -213,21 +219,50 @@
mcreq p15, 0, r10, c1, c0, 1 @ write aux control register
#endif
#ifdef CONFIG_ARM_ERRATA_458693
- teq r0, #0x20 @ only present in r2p0
+ teq r6, #0x20 @ only present in r2p0
mrceq p15, 0, r10, c1, c0, 1 @ read aux control register
orreq r10, r10, #(1 << 5) @ set L1NEON to 1
orreq r10, r10, #(1 << 9) @ set PLDNOP to 1
mcreq p15, 0, r10, c1, c0, 1 @ write aux control register
#endif
#ifdef CONFIG_ARM_ERRATA_460075
- teq r0, #0x20 @ only present in r2p0
+ teq r6, #0x20 @ only present in r2p0
mrceq p15, 1, r10, c9, c0, 2 @ read L2 cache aux ctrl register
tsteq r10, #1 << 22
orreq r10, r10, #(1 << 22) @ set the Write Allocate disable bit
mcreq p15, 1, r10, c9, c0, 2 @ write the L2 cache aux ctrl register
#endif
+ b 3f
-2: mov r10, #0
+ /* Cortex-A9 Errata */
+2: ldr r10, =0x00000c09 @ Cortex-A9 primary part number
+ teq r0, r10
+ bne 3f
+#ifdef CONFIG_ARM_ERRATA_742230
+ cmp r6, #0x22 @ only present up to r2p2
+ mrcle p15, 0, r10, c15, c0, 1 @ read diagnostic register
+ orrle r10, r10, #1 << 4 @ set bit #4
+ mcrle p15, 0, r10, c15, c0, 1 @ write diagnostic register
+#endif
+#ifdef CONFIG_ARM_ERRATA_742231
+ teq r6, #0x20 @ present in r2p0
+ teqne r6, #0x21 @ present in r2p1
+ teqne r6, #0x22 @ present in r2p2
+ mrceq p15, 0, r10, c15, c0, 1 @ read diagnostic register
+ orreq r10, r10, #1 << 12 @ set bit #12
+ orreq r10, r10, #1 << 22 @ set bit #22
+ mcreq p15, 0, r10, c15, c0, 1 @ write diagnostic register
+#endif
+#ifdef CONFIG_ARM_ERRATA_743622
+ teq r6, #0x20 @ present in r2p0
+ teqne r6, #0x21 @ present in r2p1
+ teqne r6, #0x22 @ present in r2p2
+ mrceq p15, 0, r10, c15, c0, 1 @ read diagnostic register
+ orreq r10, r10, #1 << 6 @ set bit #6
+ mcreq p15, 0, r10, c15, c0, 1 @ write diagnostic register
+#endif
+
+3: mov r10, #0
#ifdef HARVARD_CACHE
mcr p15, 0, r10, c7, c5, 0 @ I+BTB cache invalidate
#endif
@@ -323,6 +358,29 @@
.section ".proc.info.init", #alloc, #execinstr
+ .type __v7_ca9mp_proc_info, #object
+__v7_ca9mp_proc_info:
+ .long 0x410fc090 @ Required ID value
+ .long 0xff0ffff0 @ Mask for ID
+ .long PMD_TYPE_SECT | \
+ PMD_SECT_AP_WRITE | \
+ PMD_SECT_AP_READ | \
+ PMD_FLAGS
+ .long PMD_TYPE_SECT | \
+ PMD_SECT_XN | \
+ PMD_SECT_AP_WRITE | \
+ PMD_SECT_AP_READ
+ b __v7_ca9mp_setup
+ .long cpu_arch_name
+ .long cpu_elf_name
+ .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP|HWCAP_TLS
+ .long cpu_v7_name
+ .long v7_processor_functions
+ .long v7wbi_tlb_fns
+ .long v6_user_fns
+ .long v7_cache_fns
+ .size __v7_ca9mp_proc_info, . - __v7_ca9mp_proc_info
+
/*
* Match any ARMv7 processor core.
*/
diff --git a/arch/arm/oprofile/Makefile b/arch/arm/oprofile/Makefile
index e666eaf..b2215c6 100644
--- a/arch/arm/oprofile/Makefile
+++ b/arch/arm/oprofile/Makefile
@@ -6,4 +6,8 @@
oprofilefs.o oprofile_stats.o \
timer_int.o )
+ifeq ($(CONFIG_HW_PERF_EVENTS),y)
+DRIVER_OBJS += $(addprefix ../../../drivers/oprofile/, oprofile_perf.o)
+endif
+
oprofile-y := $(DRIVER_OBJS) common.o
diff --git a/arch/arm/oprofile/common.c b/arch/arm/oprofile/common.c
index 0691176..8aa9744 100644
--- a/arch/arm/oprofile/common.c
+++ b/arch/arm/oprofile/common.c
@@ -25,138 +25,10 @@
#include <asm/ptrace.h>
#ifdef CONFIG_HW_PERF_EVENTS
-/*
- * Per performance monitor configuration as set via oprofilefs.
- */
-struct op_counter_config {
- unsigned long count;
- unsigned long enabled;
- unsigned long event;
- unsigned long unit_mask;
- unsigned long kernel;
- unsigned long user;
- struct perf_event_attr attr;
-};
-
-static int op_arm_enabled;
-static DEFINE_MUTEX(op_arm_mutex);
-
-static struct op_counter_config *counter_config;
-static struct perf_event **perf_events[nr_cpumask_bits];
-static int perf_num_counters;
-
-/*
- * Overflow callback for oprofile.
- */
-static void op_overflow_handler(struct perf_event *event, int unused,
- struct perf_sample_data *data, struct pt_regs *regs)
+char *op_name_from_perf_id(void)
{
- int id;
- u32 cpu = smp_processor_id();
+ enum arm_perf_pmu_ids id = armpmu_get_pmu_id();
- for (id = 0; id < perf_num_counters; ++id)
- if (perf_events[cpu][id] == event)
- break;
-
- if (id != perf_num_counters)
- oprofile_add_sample(regs, id);
- else
- pr_warning("oprofile: ignoring spurious overflow "
- "on cpu %u\n", cpu);
-}
-
-/*
- * Called by op_arm_setup to create perf attributes to mirror the oprofile
- * settings in counter_config. Attributes are created as `pinned' events and
- * so are permanently scheduled on the PMU.
- */
-static void op_perf_setup(void)
-{
- int i;
- u32 size = sizeof(struct perf_event_attr);
- struct perf_event_attr *attr;
-
- for (i = 0; i < perf_num_counters; ++i) {
- attr = &counter_config[i].attr;
- memset(attr, 0, size);
- attr->type = PERF_TYPE_RAW;
- attr->size = size;
- attr->config = counter_config[i].event;
- attr->sample_period = counter_config[i].count;
- attr->pinned = 1;
- }
-}
-
-static int op_create_counter(int cpu, int event)
-{
- int ret = 0;
- struct perf_event *pevent;
-
- if (!counter_config[event].enabled || (perf_events[cpu][event] != NULL))
- return ret;
-
- pevent = perf_event_create_kernel_counter(&counter_config[event].attr,
- cpu, -1,
- op_overflow_handler);
-
- if (IS_ERR(pevent)) {
- ret = PTR_ERR(pevent);
- } else if (pevent->state != PERF_EVENT_STATE_ACTIVE) {
- pr_warning("oprofile: failed to enable event %d "
- "on CPU %d\n", event, cpu);
- ret = -EBUSY;
- } else {
- perf_events[cpu][event] = pevent;
- }
-
- return ret;
-}
-
-static void op_destroy_counter(int cpu, int event)
-{
- struct perf_event *pevent = perf_events[cpu][event];
-
- if (pevent) {
- perf_event_release_kernel(pevent);
- perf_events[cpu][event] = NULL;
- }
-}
-
-/*
- * Called by op_arm_start to create active perf events based on the
- * perviously configured attributes.
- */
-static int op_perf_start(void)
-{
- int cpu, event, ret = 0;
-
- for_each_online_cpu(cpu) {
- for (event = 0; event < perf_num_counters; ++event) {
- ret = op_create_counter(cpu, event);
- if (ret)
- goto out;
- }
- }
-
-out:
- return ret;
-}
-
-/*
- * Called by op_arm_stop at the end of a profiling run.
- */
-static void op_perf_stop(void)
-{
- int cpu, event;
-
- for_each_online_cpu(cpu)
- for (event = 0; event < perf_num_counters; ++event)
- op_destroy_counter(cpu, event);
-}
-
-
-static char *op_name_from_perf_id(enum arm_perf_pmu_ids id)
-{
switch (id) {
case ARM_PERF_PMU_ID_XSCALE1:
return "arm/xscale1";
@@ -175,116 +47,6 @@
}
}
-static int op_arm_create_files(struct super_block *sb, struct dentry *root)
-{
- unsigned int i;
-
- for (i = 0; i < perf_num_counters; i++) {
- struct dentry *dir;
- char buf[4];
-
- snprintf(buf, sizeof buf, "%d", i);
- dir = oprofilefs_mkdir(sb, root, buf);
- oprofilefs_create_ulong(sb, dir, "enabled", &counter_config[i].enabled);
- oprofilefs_create_ulong(sb, dir, "event", &counter_config[i].event);
- oprofilefs_create_ulong(sb, dir, "count", &counter_config[i].count);
- oprofilefs_create_ulong(sb, dir, "unit_mask", &counter_config[i].unit_mask);
- oprofilefs_create_ulong(sb, dir, "kernel", &counter_config[i].kernel);
- oprofilefs_create_ulong(sb, dir, "user", &counter_config[i].user);
- }
-
- return 0;
-}
-
-static int op_arm_setup(void)
-{
- spin_lock(&oprofilefs_lock);
- op_perf_setup();
- spin_unlock(&oprofilefs_lock);
- return 0;
-}
-
-static int op_arm_start(void)
-{
- int ret = -EBUSY;
-
- mutex_lock(&op_arm_mutex);
- if (!op_arm_enabled) {
- ret = 0;
- op_perf_start();
- op_arm_enabled = 1;
- }
- mutex_unlock(&op_arm_mutex);
- return ret;
-}
-
-static void op_arm_stop(void)
-{
- mutex_lock(&op_arm_mutex);
- if (op_arm_enabled)
- op_perf_stop();
- op_arm_enabled = 0;
- mutex_unlock(&op_arm_mutex);
-}
-
-#ifdef CONFIG_PM
-static int op_arm_suspend(struct platform_device *dev, pm_message_t state)
-{
- mutex_lock(&op_arm_mutex);
- if (op_arm_enabled)
- op_perf_stop();
- mutex_unlock(&op_arm_mutex);
- return 0;
-}
-
-static int op_arm_resume(struct platform_device *dev)
-{
- mutex_lock(&op_arm_mutex);
- if (op_arm_enabled && op_perf_start())
- op_arm_enabled = 0;
- mutex_unlock(&op_arm_mutex);
- return 0;
-}
-
-static struct platform_driver oprofile_driver = {
- .driver = {
- .name = "arm-oprofile",
- },
- .resume = op_arm_resume,
- .suspend = op_arm_suspend,
-};
-
-static struct platform_device *oprofile_pdev;
-
-static int __init init_driverfs(void)
-{
- int ret;
-
- ret = platform_driver_register(&oprofile_driver);
- if (ret)
- goto out;
-
- oprofile_pdev = platform_device_register_simple(
- oprofile_driver.driver.name, 0, NULL, 0);
- if (IS_ERR(oprofile_pdev)) {
- ret = PTR_ERR(oprofile_pdev);
- platform_driver_unregister(&oprofile_driver);
- }
-
-out:
- return ret;
-}
-
-static void exit_driverfs(void)
-{
- platform_device_unregister(oprofile_pdev);
- platform_driver_unregister(&oprofile_driver);
-}
-#else
-static int __init init_driverfs(void) { return 0; }
-#define exit_driverfs() do { } while (0)
-#endif /* CONFIG_PM */
-
static int report_trace(struct stackframe *frame, void *d)
{
unsigned int *depth = d;
@@ -349,72 +111,14 @@
int __init oprofile_arch_init(struct oprofile_operations *ops)
{
- int cpu, ret = 0;
-
- perf_num_counters = armpmu_get_max_events();
-
- counter_config = kcalloc(perf_num_counters,
- sizeof(struct op_counter_config), GFP_KERNEL);
-
- if (!counter_config) {
- pr_info("oprofile: failed to allocate %d "
- "counters\n", perf_num_counters);
- return -ENOMEM;
- }
-
- ret = init_driverfs();
- if (ret) {
- kfree(counter_config);
- return ret;
- }
-
- for_each_possible_cpu(cpu) {
- perf_events[cpu] = kcalloc(perf_num_counters,
- sizeof(struct perf_event *), GFP_KERNEL);
- if (!perf_events[cpu]) {
- pr_info("oprofile: failed to allocate %d perf events "
- "for cpu %d\n", perf_num_counters, cpu);
- while (--cpu >= 0)
- kfree(perf_events[cpu]);
- return -ENOMEM;
- }
- }
-
ops->backtrace = arm_backtrace;
- ops->create_files = op_arm_create_files;
- ops->setup = op_arm_setup;
- ops->start = op_arm_start;
- ops->stop = op_arm_stop;
- ops->shutdown = op_arm_stop;
- ops->cpu_type = op_name_from_perf_id(armpmu_get_pmu_id());
- if (!ops->cpu_type)
- ret = -ENODEV;
- else
- pr_info("oprofile: using %s\n", ops->cpu_type);
-
- return ret;
+ return oprofile_perf_init(ops);
}
-void oprofile_arch_exit(void)
+void __exit oprofile_arch_exit(void)
{
- int cpu, id;
- struct perf_event *event;
-
- if (*perf_events) {
- exit_driverfs();
- for_each_possible_cpu(cpu) {
- for (id = 0; id < perf_num_counters; ++id) {
- event = perf_events[cpu][id];
- if (event != NULL)
- perf_event_release_kernel(event);
- }
- kfree(perf_events[cpu]);
- }
- }
-
- if (counter_config)
- kfree(counter_config);
+ oprofile_perf_exit();
}
#else
int __init oprofile_arch_init(struct oprofile_operations *ops)
@@ -422,5 +126,5 @@
pr_info("oprofile: hardware counters not available\n");
return -ENODEV;
}
-void oprofile_arch_exit(void) {}
+void __exit oprofile_arch_exit(void) {}
#endif /* CONFIG_HW_PERF_EVENTS */
diff --git a/arch/arm/plat-mxc/Kconfig b/arch/arm/plat-mxc/Kconfig
index 0527e65..6785db4 100644
--- a/arch/arm/plat-mxc/Kconfig
+++ b/arch/arm/plat-mxc/Kconfig
@@ -43,6 +43,7 @@
config ARCH_MX5
bool "MX5-based"
select CPU_V7
+ select ARM_L1_CACHE_SHIFT_6
help
This enables support for systems based on the Freescale i.MX51 family
diff --git a/arch/arm/plat-mxc/include/mach/eukrea-baseboards.h b/arch/arm/plat-mxc/include/mach/eukrea-baseboards.h
index 634e3f4..656acb4 100644
--- a/arch/arm/plat-mxc/include/mach/eukrea-baseboards.h
+++ b/arch/arm/plat-mxc/include/mach/eukrea-baseboards.h
@@ -37,9 +37,9 @@
* mach-mx5/eukrea_mbimx51-baseboard.c for cpuimx51
*/
-extern void eukrea_mbimx25_baseboard_init(void);
+extern void eukrea_mbimxsd25_baseboard_init(void);
extern void eukrea_mbimx27_baseboard_init(void);
-extern void eukrea_mbimx35_baseboard_init(void);
+extern void eukrea_mbimxsd35_baseboard_init(void);
extern void eukrea_mbimx51_baseboard_init(void);
#endif
diff --git a/arch/arm/plat-mxc/tzic.c b/arch/arm/plat-mxc/tzic.c
index b3da9aa..3703ab2 100644
--- a/arch/arm/plat-mxc/tzic.c
+++ b/arch/arm/plat-mxc/tzic.c
@@ -164,8 +164,9 @@
return -EAGAIN;
for (i = 0; i < 4; i++) {
- v = is_idle ? __raw_readl(TZIC_ENSET0(i)) : wakeup_intr[i];
- __raw_writel(v, TZIC_WAKEUP0(i));
+ v = is_idle ? __raw_readl(tzic_base + TZIC_ENSET0(i)) :
+ wakeup_intr[i];
+ __raw_writel(v, tzic_base + TZIC_WAKEUP0(i));
}
return 0;
diff --git a/arch/arm/plat-nomadik/timer.c b/arch/arm/plat-nomadik/timer.c
index ea3ca86..aedf9c1 100644
--- a/arch/arm/plat-nomadik/timer.c
+++ b/arch/arm/plat-nomadik/timer.c
@@ -1,5 +1,5 @@
/*
- * linux/arch/arm/mach-nomadik/timer.c
+ * linux/arch/arm/plat-nomadik/timer.c
*
* Copyright (C) 2008 STMicroelectronics
* Copyright (C) 2010 Alessandro Rubini
@@ -75,7 +75,7 @@
cr = readl(mtu_base + MTU_CR(1));
writel(0, mtu_base + MTU_LR(1));
writel(cr | MTU_CRn_ENA, mtu_base + MTU_CR(1));
- writel(0x2, mtu_base + MTU_IMSC);
+ writel(1 << 1, mtu_base + MTU_IMSC);
break;
case CLOCK_EVT_MODE_SHUTDOWN:
case CLOCK_EVT_MODE_UNUSED:
@@ -131,25 +131,23 @@
{
unsigned long rate;
struct clk *clk0;
- struct clk *clk1;
- u32 cr;
+ u32 cr = MTU_CRn_32BITS;
clk0 = clk_get_sys("mtu0", NULL);
BUG_ON(IS_ERR(clk0));
- clk1 = clk_get_sys("mtu1", NULL);
- BUG_ON(IS_ERR(clk1));
-
clk_enable(clk0);
- clk_enable(clk1);
/*
- * Tick rate is 2.4MHz for Nomadik and 110MHz for ux500:
- * use a divide-by-16 counter if it's more than 16MHz
+ * Tick rate is 2.4MHz for Nomadik and 2.4Mhz, 100MHz or 133 MHz
+ * for ux500.
+ * Use a divide-by-16 counter if the tick rate is more than 32MHz.
+ * At 32 MHz, the timer (with 32 bit counter) can be programmed
+ * to wake-up at a max 127s a head in time. Dividing a 2.4 MHz timer
+ * with 16 gives too low timer resolution.
*/
- cr = MTU_CRn_32BITS;;
rate = clk_get_rate(clk0);
- if (rate > 16 << 20) {
+ if (rate > 32000000) {
rate /= 16;
cr |= MTU_CRn_PRESCALE_16;
} else {
@@ -170,15 +168,8 @@
pr_err("timer: failed to initialize clock source %s\n",
nmdk_clksrc.name);
- /* Timer 1 is used for events, fix according to rate */
- cr = MTU_CRn_32BITS;
- rate = clk_get_rate(clk1);
- if (rate > 16 << 20) {
- rate /= 16;
- cr |= MTU_CRn_PRESCALE_16;
- } else {
- cr |= MTU_CRn_PRESCALE_1;
- }
+ /* Timer 1 is used for events */
+
clockevents_calc_mult_shift(&nmdk_clkevt, rate, MTU_MIN_RANGE);
writel(cr | MTU_CRn_ONESHOT, mtu_base + MTU_CR(1)); /* off, currently */
diff --git a/arch/arm/plat-omap/Kconfig b/arch/arm/plat-omap/Kconfig
index e39a417..a92cb49 100644
--- a/arch/arm/plat-omap/Kconfig
+++ b/arch/arm/plat-omap/Kconfig
@@ -33,7 +33,7 @@
config OMAP_DEBUG_LEDS
bool
depends on OMAP_DEBUG_DEVICES
- default y if LEDS
+ default y if LEDS_CLASS
config OMAP_RESET_CLOCKS
bool "Reset unused clocks during boot"
diff --git a/arch/arm/plat-omap/include/plat/smp.h b/arch/arm/plat-omap/include/plat/smp.h
index 6a3ff65..5177a9c 100644
--- a/arch/arm/plat-omap/include/plat/smp.h
+++ b/arch/arm/plat-omap/include/plat/smp.h
@@ -19,13 +19,6 @@
#include <asm/hardware/gic.h>
-/*
- * set_event() is used to wake up secondary core from wfe using sev. ROM
- * code puts the second core into wfe(standby).
- *
- */
-#define set_event() __asm__ __volatile__ ("sev" : : : "memory")
-
/* Needed for secondary core boot */
extern void omap_secondary_startup(void);
extern u32 omap_modify_auxcoreboot0(u32 set_mask, u32 clear_mask);
diff --git a/arch/arm/plat-omap/iommu.c b/arch/arm/plat-omap/iommu.c
index a202a2c..6cd151b 100644
--- a/arch/arm/plat-omap/iommu.c
+++ b/arch/arm/plat-omap/iommu.c
@@ -320,6 +320,7 @@
if ((start <= da) && (da < start + bytes)) {
dev_dbg(obj->dev, "%s: %08x<=%08x(%x)\n",
__func__, start, da, bytes);
+ iotlb_load_cr(obj, &cr);
iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY);
}
}
diff --git a/arch/arm/plat-omap/mcbsp.c b/arch/arm/plat-omap/mcbsp.c
index e31496e..0c8612f 100644
--- a/arch/arm/plat-omap/mcbsp.c
+++ b/arch/arm/plat-omap/mcbsp.c
@@ -156,7 +156,7 @@
/* Writing zero to RSYNC_ERR clears the IRQ */
MCBSP_WRITE(mcbsp_rx, SPCR1, MCBSP_READ_CACHE(mcbsp_rx, SPCR1));
} else {
- complete(&mcbsp_rx->tx_irq_completion);
+ complete(&mcbsp_rx->rx_irq_completion);
}
return IRQ_HANDLED;
diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
index 226b2e8..10b3b4c 100644
--- a/arch/arm/plat-omap/sram.c
+++ b/arch/arm/plat-omap/sram.c
@@ -220,20 +220,7 @@
if (omap_sram_size == 0)
return;
- if (cpu_is_omap24xx()) {
- omap_sram_io_desc[0].virtual = OMAP2_SRAM_VA;
-
- base = OMAP2_SRAM_PA;
- base = ROUND_DOWN(base, PAGE_SIZE);
- omap_sram_io_desc[0].pfn = __phys_to_pfn(base);
- }
-
if (cpu_is_omap34xx()) {
- omap_sram_io_desc[0].virtual = OMAP3_SRAM_VA;
- base = OMAP3_SRAM_PA;
- base = ROUND_DOWN(base, PAGE_SIZE);
- omap_sram_io_desc[0].pfn = __phys_to_pfn(base);
-
/*
* SRAM must be marked as non-cached on OMAP3 since the
* CORE DPLL M2 divider change code (in SRAM) runs with the
@@ -244,13 +231,11 @@
omap_sram_io_desc[0].type = MT_MEMORY_NONCACHED;
}
- if (cpu_is_omap44xx()) {
- omap_sram_io_desc[0].virtual = OMAP4_SRAM_VA;
- base = OMAP4_SRAM_PA;
- base = ROUND_DOWN(base, PAGE_SIZE);
- omap_sram_io_desc[0].pfn = __phys_to_pfn(base);
- }
- omap_sram_io_desc[0].length = 1024 * 1024; /* Use section desc */
+ omap_sram_io_desc[0].virtual = omap_sram_base;
+ base = omap_sram_start;
+ base = ROUND_DOWN(base, PAGE_SIZE);
+ omap_sram_io_desc[0].pfn = __phys_to_pfn(base);
+ omap_sram_io_desc[0].length = ROUND_DOWN(omap_sram_size, PAGE_SIZE);
iotable_init(omap_sram_io_desc, ARRAY_SIZE(omap_sram_io_desc));
printk(KERN_INFO "SRAM: Mapped pa 0x%08lx to va 0x%08lx size: 0x%lx\n",
diff --git a/arch/arm/plat-pxa/pwm.c b/arch/arm/plat-pxa/pwm.c
index 0732c6c..ef32686 100644
--- a/arch/arm/plat-pxa/pwm.c
+++ b/arch/arm/plat-pxa/pwm.c
@@ -176,7 +176,7 @@
static int __devinit pwm_probe(struct platform_device *pdev)
{
- struct platform_device_id *id = platform_get_device_id(pdev);
+ const struct platform_device_id *id = platform_get_device_id(pdev);
struct pwm_device *pwm, *secondary = NULL;
struct resource *r;
int ret = 0;
diff --git a/arch/arm/plat-s5p/dev-fimc0.c b/arch/arm/plat-s5p/dev-fimc0.c
index d3f1a9b..608770f 100644
--- a/arch/arm/plat-s5p/dev-fimc0.c
+++ b/arch/arm/plat-s5p/dev-fimc0.c
@@ -10,6 +10,7 @@
*/
#include <linux/kernel.h>
+#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
@@ -18,7 +19,7 @@
static struct resource s5p_fimc0_resource[] = {
[0] = {
.start = S5P_PA_FIMC0,
- .end = S5P_PA_FIMC0 + SZ_1M - 1,
+ .end = S5P_PA_FIMC0 + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
@@ -28,9 +29,15 @@
},
};
+static u64 s5p_fimc0_dma_mask = DMA_BIT_MASK(32);
+
struct platform_device s5p_device_fimc0 = {
.name = "s5p-fimc",
.id = 0,
.num_resources = ARRAY_SIZE(s5p_fimc0_resource),
.resource = s5p_fimc0_resource,
+ .dev = {
+ .dma_mask = &s5p_fimc0_dma_mask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
};
diff --git a/arch/arm/plat-s5p/dev-fimc1.c b/arch/arm/plat-s5p/dev-fimc1.c
index 41bd698..76e3a97 100644
--- a/arch/arm/plat-s5p/dev-fimc1.c
+++ b/arch/arm/plat-s5p/dev-fimc1.c
@@ -10,6 +10,7 @@
*/
#include <linux/kernel.h>
+#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
@@ -18,7 +19,7 @@
static struct resource s5p_fimc1_resource[] = {
[0] = {
.start = S5P_PA_FIMC1,
- .end = S5P_PA_FIMC1 + SZ_1M - 1,
+ .end = S5P_PA_FIMC1 + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
@@ -28,9 +29,15 @@
},
};
+static u64 s5p_fimc1_dma_mask = DMA_BIT_MASK(32);
+
struct platform_device s5p_device_fimc1 = {
.name = "s5p-fimc",
.id = 1,
.num_resources = ARRAY_SIZE(s5p_fimc1_resource),
.resource = s5p_fimc1_resource,
+ .dev = {
+ .dma_mask = &s5p_fimc1_dma_mask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
};
diff --git a/arch/arm/plat-s5p/dev-fimc2.c b/arch/arm/plat-s5p/dev-fimc2.c
index dfddeda..24d2981 100644
--- a/arch/arm/plat-s5p/dev-fimc2.c
+++ b/arch/arm/plat-s5p/dev-fimc2.c
@@ -10,6 +10,7 @@
*/
#include <linux/kernel.h>
+#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
@@ -18,7 +19,7 @@
static struct resource s5p_fimc2_resource[] = {
[0] = {
.start = S5P_PA_FIMC2,
- .end = S5P_PA_FIMC2 + SZ_1M - 1,
+ .end = S5P_PA_FIMC2 + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
@@ -28,9 +29,15 @@
},
};
+static u64 s5p_fimc2_dma_mask = DMA_BIT_MASK(32);
+
struct platform_device s5p_device_fimc2 = {
.name = "s5p-fimc",
.id = 2,
.num_resources = ARRAY_SIZE(s5p_fimc2_resource),
.resource = s5p_fimc2_resource,
+ .dev = {
+ .dma_mask = &s5p_fimc2_dma_mask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
};
diff --git a/arch/arm/plat-s5p/include/plat/map-s5p.h b/arch/arm/plat-s5p/include/plat/map-s5p.h
index 54e9fb9..c4ff88b 100644
--- a/arch/arm/plat-s5p/include/plat/map-s5p.h
+++ b/arch/arm/plat-s5p/include/plat/map-s5p.h
@@ -17,6 +17,7 @@
#define S5P_VA_GPIO S3C_ADDR(0x00500000)
#define S5P_VA_SYSTIMER S3C_ADDR(0x01200000)
#define S5P_VA_SROMC S3C_ADDR(0x01100000)
+#define S5P_VA_SYSRAM S3C_ADDR(0x01180000)
#define S5P_VA_COMBINER_BASE S3C_ADDR(0x00600000)
#define S5P_VA_COMBINER(x) (S5P_VA_COMBINER_BASE + ((x) >> 2) * 0x10)
@@ -29,6 +30,7 @@
#define S5P_VA_GIC_DIST S5P_VA_COREPERI(0x1000)
#define S5P_VA_L2CC S3C_ADDR(0x00900000)
+#define S5P_VA_CMU S3C_ADDR(0x00920000)
#define S5P_VA_UART(x) (S3C_VA_UART + ((x) * S3C_UART_OFFSET))
#define S5P_VA_UART0 S5P_VA_UART(0)
diff --git a/arch/arm/plat-samsung/adc.c b/arch/arm/plat-samsung/adc.c
index 04d9521..e8f2be2 100644
--- a/arch/arm/plat-samsung/adc.c
+++ b/arch/arm/plat-samsung/adc.c
@@ -435,7 +435,6 @@
static int s3c_adc_resume(struct platform_device *pdev)
{
struct adc_device *adc = platform_get_drvdata(pdev);
- unsigned long flags;
clk_enable(adc->clk);
enable_irq(adc->irq);
diff --git a/arch/arm/plat-samsung/clock.c b/arch/arm/plat-samsung/clock.c
index 90a2051..e8d20b0 100644
--- a/arch/arm/plat-samsung/clock.c
+++ b/arch/arm/plat-samsung/clock.c
@@ -48,6 +48,9 @@
#include <plat/clock.h>
#include <plat/cpu.h>
+#include <linux/serial_core.h>
+#include <plat/regs-serial.h> /* for s3c24xx_uart_devs */
+
/* clock information */
static LIST_HEAD(clocks);
@@ -65,6 +68,28 @@
return 0;
}
+static int dev_is_s3c_uart(struct device *dev)
+{
+ struct platform_device **pdev = s3c24xx_uart_devs;
+ int i;
+ for (i = 0; i < ARRAY_SIZE(s3c24xx_uart_devs); i++, pdev++)
+ if (*pdev && dev == &(*pdev)->dev)
+ return 1;
+ return 0;
+}
+
+/*
+ * Serial drivers call get_clock() very early, before platform bus
+ * has been set up, this requires a special check to let them get
+ * a proper clock
+ */
+
+static int dev_is_platform_device(struct device *dev)
+{
+ return dev->bus == &platform_bus_type ||
+ (dev->bus == NULL && dev_is_s3c_uart(dev));
+}
+
/* Clock API calls */
struct clk *clk_get(struct device *dev, const char *id)
@@ -73,7 +98,7 @@
struct clk *clk = ERR_PTR(-ENOENT);
int idno;
- if (dev == NULL || dev->bus != &platform_bus_type)
+ if (dev == NULL || !dev_is_platform_device(dev))
idno = -1;
else
idno = to_platform_device(dev)->id;
diff --git a/arch/arm/plat-samsung/gpio-config.c b/arch/arm/plat-samsung/gpio-config.c
index 57b68a5..e3d41ea 100644
--- a/arch/arm/plat-samsung/gpio-config.c
+++ b/arch/arm/plat-samsung/gpio-config.c
@@ -273,13 +273,13 @@
if (!chip)
return -EINVAL;
- off = chip->chip.base - pin;
+ off = pin - chip->chip.base;
shift = off * 2;
reg = chip->base + 0x0C;
drvstr = __raw_readl(reg);
- drvstr = 0xffff & (0x3 << shift);
drvstr = drvstr >> shift;
+ drvstr &= 0x3;
return (__force s5p_gpio_drvstr_t)drvstr;
}
@@ -296,11 +296,12 @@
if (!chip)
return -EINVAL;
- off = chip->chip.base - pin;
+ off = pin - chip->chip.base;
shift = off * 2;
reg = chip->base + 0x0C;
tmp = __raw_readl(reg);
+ tmp &= ~(0x3 << shift);
tmp |= drvstr << shift;
__raw_writel(tmp, reg);
diff --git a/arch/arm/plat-samsung/include/plat/gpio-cfg.h b/arch/arm/plat-samsung/include/plat/gpio-cfg.h
index db4112c..1c6b929 100644
--- a/arch/arm/plat-samsung/include/plat/gpio-cfg.h
+++ b/arch/arm/plat-samsung/include/plat/gpio-cfg.h
@@ -143,12 +143,12 @@
/* Define values for the drvstr available for each gpio pin.
*
* These values control the value of the output signal driver strength,
- * configurable on most pins on the S5C series.
+ * configurable on most pins on the S5P series.
*/
-#define S5P_GPIO_DRVSTR_LV1 ((__force s5p_gpio_drvstr_t)0x00)
-#define S5P_GPIO_DRVSTR_LV2 ((__force s5p_gpio_drvstr_t)0x01)
-#define S5P_GPIO_DRVSTR_LV3 ((__force s5p_gpio_drvstr_t)0x10)
-#define S5P_GPIO_DRVSTR_LV4 ((__force s5p_gpio_drvstr_t)0x11)
+#define S5P_GPIO_DRVSTR_LV1 ((__force s5p_gpio_drvstr_t)0x0)
+#define S5P_GPIO_DRVSTR_LV2 ((__force s5p_gpio_drvstr_t)0x2)
+#define S5P_GPIO_DRVSTR_LV3 ((__force s5p_gpio_drvstr_t)0x1)
+#define S5P_GPIO_DRVSTR_LV4 ((__force s5p_gpio_drvstr_t)0x3)
/**
* s5c_gpio_get_drvstr() - get the driver streght value of a gpio pin
diff --git a/arch/arm/tools/mach-types b/arch/arm/tools/mach-types
index 48cbdcb..55590a4 100644
--- a/arch/arm/tools/mach-types
+++ b/arch/arm/tools/mach-types
@@ -12,7 +12,7 @@
#
# http://www.arm.linux.org.uk/developer/machines/?action=new
#
-# Last update: Mon Jul 12 21:10:14 2010
+# Last update: Thu Sep 9 22:43:01 2010
#
# machine_is_xxx CONFIG_xxxx MACH_TYPE_xxx number
#
@@ -2622,7 +2622,7 @@
gw2388 MACH_GW2388 GW2388 2635
jadecpu MACH_JADECPU JADECPU 2636
carlisle MACH_CARLISLE CARLISLE 2637
-lux_sf9 MACH_LUX_SFT9 LUX_SFT9 2638
+lux_sf9 MACH_LUX_SF9 LUX_SF9 2638
nemid_tb MACH_NEMID_TB NEMID_TB 2639
terrier MACH_TERRIER TERRIER 2640
turbot MACH_TURBOT TURBOT 2641
@@ -2950,3 +2950,97 @@
netviz MACH_NETVIZ NETVIZ 2964
flexibity MACH_FLEXIBITY FLEXIBITY 2965
wlan_computer MACH_WLAN_COMPUTER WLAN_COMPUTER 2966
+lpc24xx MACH_LPC24XX LPC24XX 2967
+spica MACH_SPICA SPICA 2968
+gpsdisplay MACH_GPSDISPLAY GPSDISPLAY 2969
+bipnet MACH_BIPNET BIPNET 2970
+overo_ctu_inertial MACH_OVERO_CTU_INERTIAL OVERO_CTU_INERTIAL 2971
+davinci_dm355_mmm MACH_DAVINCI_DM355_MMM DAVINCI_DM355_MMM 2972
+pc9260_v2 MACH_PC9260_V2 PC9260_V2 2973
+ptx7545 MACH_PTX7545 PTX7545 2974
+tm_efdc MACH_TM_EFDC TM_EFDC 2975
+omap3_waldo1 MACH_OMAP3_WALDO1 OMAP3_WALDO1 2977
+flyer MACH_FLYER FLYER 2978
+tornado3240 MACH_TORNADO3240 TORNADO3240 2979
+soli_01 MACH_SOLI_01 SOLI_01 2980
+omapl138_europalc MACH_OMAPL138_EUROPALC OMAPL138_EUROPALC 2981
+helios_v1 MACH_HELIOS_V1 HELIOS_V1 2982
+netspace_lite_v2 MACH_NETSPACE_LITE_V2 NETSPACE_LITE_V2 2983
+ssc MACH_SSC SSC 2984
+premierwave_en MACH_PREMIERWAVE_EN PREMIERWAVE_EN 2985
+wasabi MACH_WASABI WASABI 2986
+vivow MACH_VIVOW VIVOW 2987
+mx50_rdp MACH_MX50_RDP MX50_RDP 2988
+universal MACH_UNIVERSAL UNIVERSAL 2989
+real6410 MACH_REAL6410 REAL6410 2990
+spx_sakura MACH_SPX_SAKURA SPX_SAKURA 2991
+ij3k_2440 MACH_IJ3K_2440 IJ3K_2440 2992
+omap3_bc10 MACH_OMAP3_BC10 OMAP3_BC10 2993
+thebe MACH_THEBE THEBE 2994
+rv082 MACH_RV082 RV082 2995
+armlguest MACH_ARMLGUEST ARMLGUEST 2996
+tjinc1000 MACH_TJINC1000 TJINC1000 2997
+dockstar MACH_DOCKSTAR DOCKSTAR 2998
+ax8008 MACH_AX8008 AX8008 2999
+gnet_sgce MACH_GNET_SGCE GNET_SGCE 3000
+pxwnas_500_1000 MACH_PXWNAS_500_1000 PXWNAS_500_1000 3001
+ea20 MACH_EA20 EA20 3002
+awm2 MACH_AWM2 AWM2 3003
+ti8148evm MACH_TI8148EVM TI8148EVM 3004
+tegra_seaboard MACH_TEGRA_SEABOARD TEGRA_SEABOARD 3005
+linkstation_chlv2 MACH_LINKSTATION_CHLV2 LINKSTATION_CHLV2 3006
+tera_pro2_rack MACH_TERA_PRO2_RACK TERA_PRO2_RACK 3007
+rubys MACH_RUBYS RUBYS 3008
+aquarius MACH_AQUARIUS AQUARIUS 3009
+mx53_ard MACH_MX53_ARD MX53_ARD 3010
+mx53_smd MACH_MX53_SMD MX53_SMD 3011
+lswxl MACH_LSWXL LSWXL 3012
+dove_avng_v3 MACH_DOVE_AVNG_V3 DOVE_AVNG_V3 3013
+sdi_ess_9263 MACH_SDI_ESS_9263 SDI_ESS_9263 3014
+jocpu550 MACH_JOCPU550 JOCPU550 3015
+msm8x60_rumi3 MACH_MSM8X60_RUMI3 MSM8X60_RUMI3 3016
+msm8x60_ffa MACH_MSM8X60_FFA MSM8X60_FFA 3017
+yanomami MACH_YANOMAMI YANOMAMI 3018
+gta04 MACH_GTA04 GTA04 3019
+cm_a510 MACH_CM_A510 CM_A510 3020
+omap3_rfs200 MACH_OMAP3_RFS200 OMAP3_RFS200 3021
+kx33xx MACH_KX33XX KX33XX 3022
+ptx7510 MACH_PTX7510 PTX7510 3023
+top9000 MACH_TOP9000 TOP9000 3024
+teenote MACH_TEENOTE TEENOTE 3025
+ts3 MACH_TS3 TS3 3026
+a0 MACH_A0 A0 3027
+fsm9xxx_surf MACH_FSM9XXX_SURF FSM9XXX_SURF 3028
+fsm9xxx_ffa MACH_FSM9XXX_FFA FSM9XXX_FFA 3029
+frrhwcdma60w MACH_FRRHWCDMA60W FRRHWCDMA60W 3030
+remus MACH_REMUS REMUS 3031
+at91cap7xdk MACH_AT91CAP7XDK AT91CAP7XDK 3032
+at91cap7stk MACH_AT91CAP7STK AT91CAP7STK 3033
+kt_sbc_sam9_1 MACH_KT_SBC_SAM9_1 KT_SBC_SAM9_1 3034
+oratisrouter MACH_ORATISROUTER ORATISROUTER 3035
+armada_xp_db MACH_ARMADA_XP_DB ARMADA_XP_DB 3036
+spdm MACH_SPDM SPDM 3037
+gtib MACH_GTIB GTIB 3038
+dgm3240 MACH_DGM3240 DGM3240 3039
+atlas_i_lpe MACH_ATLAS_I_LPE ATLAS_I_LPE 3040
+htcmega MACH_HTCMEGA HTCMEGA 3041
+tricorder MACH_TRICORDER TRICORDER 3042
+tx28 MACH_TX28 TX28 3043
+bstbrd MACH_BSTBRD BSTBRD 3044
+pwb3090 MACH_PWB3090 PWB3090 3045
+idea6410 MACH_IDEA6410 IDEA6410 3046
+qbc9263 MACH_QBC9263 QBC9263 3047
+borabora MACH_BORABORA BORABORA 3048
+valdez MACH_VALDEZ VALDEZ 3049
+ls9g20 MACH_LS9G20 LS9G20 3050
+mios_v1 MACH_MIOS_V1 MIOS_V1 3051
+s5pc110_crespo MACH_S5PC110_CRESPO S5PC110_CRESPO 3052
+controltek9g20 MACH_CONTROLTEK9G20 CONTROLTEK9G20 3053
+tin307 MACH_TIN307 TIN307 3054
+tin510 MACH_TIN510 TIN510 3055
+bluecheese MACH_BLUECHEESE BLUECHEESE 3057
+tem3x30 MACH_TEM3X30 TEM3X30 3058
+harvest_desoto MACH_HARVEST_DESOTO HARVEST_DESOTO 3059
+msm8x60_qrdc MACH_MSM8X60_QRDC MSM8X60_QRDC 3060
+spear900 MACH_SPEAR900 SPEAR900 3061
+pcontrol_g20 MACH_PCONTROL_G20 PCONTROL_G20 3062
diff --git a/arch/avr32/kernel/module.c b/arch/avr32/kernel/module.c
index 98f94d0..a727f54 100644
--- a/arch/avr32/kernel/module.c
+++ b/arch/avr32/kernel/module.c
@@ -314,10 +314,9 @@
vfree(module->arch.syminfo);
module->arch.syminfo = NULL;
- return module_bug_finalize(hdr, sechdrs, module);
+ return 0;
}
void module_arch_cleanup(struct module *module)
{
- module_bug_cleanup(module);
}
diff --git a/arch/blackfin/include/asm/bfin_sport.h b/arch/blackfin/include/asm/bfin_sport.h
index 9626cf7..d27600c2 100644
--- a/arch/blackfin/include/asm/bfin_sport.h
+++ b/arch/blackfin/include/asm/bfin_sport.h
@@ -115,12 +115,6 @@
#endif
-/* Workaround defBF*.h SPORT MMRs till they get cleansed */
-#undef DTYPE_NORM
-#undef SLEN
-#undef SP_WOFF
-#undef SP_WSIZE
-
/* SPORT_TCR1 Masks */
#define TSPEN 0x0001 /* TX enable */
#define ITCLK 0x0002 /* Internal TX Clock Select */
diff --git a/arch/blackfin/include/asm/bitops.h b/arch/blackfin/include/asm/bitops.h
index d5872cd..3f7ef4d 100644
--- a/arch/blackfin/include/asm/bitops.h
+++ b/arch/blackfin/include/asm/bitops.h
@@ -22,7 +22,9 @@
#include <asm-generic/bitops/sched.h>
#include <asm-generic/bitops/ffs.h>
+#include <asm-generic/bitops/const_hweight.h>
#include <asm-generic/bitops/lock.h>
+
#include <asm-generic/bitops/ext2-non-atomic.h>
#include <asm-generic/bitops/ext2-atomic.h>
#include <asm-generic/bitops/minix.h>
@@ -115,7 +117,7 @@
* of bits set) of a N-bit word
*/
-static inline unsigned int hweight32(unsigned int w)
+static inline unsigned int __arch_hweight32(unsigned int w)
{
unsigned int res;
@@ -125,19 +127,20 @@
return res;
}
-static inline unsigned int hweight64(__u64 w)
+static inline unsigned int __arch_hweight64(__u64 w)
{
- return hweight32((unsigned int)(w >> 32)) + hweight32((unsigned int)w);
+ return __arch_hweight32((unsigned int)(w >> 32)) +
+ __arch_hweight32((unsigned int)w);
}
-static inline unsigned int hweight16(unsigned int w)
+static inline unsigned int __arch_hweight16(unsigned int w)
{
- return hweight32(w & 0xffff);
+ return __arch_hweight32(w & 0xffff);
}
-static inline unsigned int hweight8(unsigned int w)
+static inline unsigned int __arch_hweight8(unsigned int w)
{
- return hweight32(w & 0xff);
+ return __arch_hweight32(w & 0xff);
}
#endif /* _BLACKFIN_BITOPS_H */
diff --git a/arch/blackfin/include/asm/unistd.h b/arch/blackfin/include/asm/unistd.h
index 22886cb..14fcd25 100644
--- a/arch/blackfin/include/asm/unistd.h
+++ b/arch/blackfin/include/asm/unistd.h
@@ -389,8 +389,11 @@
#define __NR_rt_tgsigqueueinfo 368
#define __NR_perf_event_open 369
#define __NR_recvmmsg 370
+#define __NR_fanotify_init 371
+#define __NR_fanotify_mark 372
+#define __NR_prlimit64 373
-#define __NR_syscall 371
+#define __NR_syscall 374
#define NR_syscalls __NR_syscall
/* Old optional stuff no one actually uses */
diff --git a/arch/blackfin/mach-bf518/include/mach/defBF51x_base.h b/arch/blackfin/mach-bf518/include/mach/defBF51x_base.h
index 2bc8f4f..037a51f 100644
--- a/arch/blackfin/mach-bf518/include/mach/defBF51x_base.h
+++ b/arch/blackfin/mach-bf518/include/mach/defBF51x_base.h
@@ -913,88 +913,6 @@
#define PH6 0x0040
#define PH7 0x0080
-
-/* ******************* SERIAL PORT MASKS **************************************/
-/* SPORTx_TCR1 Masks */
-#define TSPEN 0x0001 /* Transmit Enable */
-#define ITCLK 0x0002 /* Internal Transmit Clock Select */
-#define DTYPE_NORM 0x0004 /* Data Format Normal */
-#define DTYPE_ULAW 0x0008 /* Compand Using u-Law */
-#define DTYPE_ALAW 0x000C /* Compand Using A-Law */
-#define TLSBIT 0x0010 /* Transmit Bit Order */
-#define ITFS 0x0200 /* Internal Transmit Frame Sync Select */
-#define TFSR 0x0400 /* Transmit Frame Sync Required Select */
-#define DITFS 0x0800 /* Data-Independent Transmit Frame Sync Select */
-#define LTFS 0x1000 /* Low Transmit Frame Sync Select */
-#define LATFS 0x2000 /* Late Transmit Frame Sync Select */
-#define TCKFE 0x4000 /* Clock Falling Edge Select */
-
-/* SPORTx_TCR2 Masks and Macro */
-#define SLEN(x) ((x)&0x1F) /* SPORT TX Word Length (2 - 31) */
-#define TXSE 0x0100 /* TX Secondary Enable */
-#define TSFSE 0x0200 /* Transmit Stereo Frame Sync Enable */
-#define TRFST 0x0400 /* Left/Right Order (1 = Right Channel 1st) */
-
-/* SPORTx_RCR1 Masks */
-#define RSPEN 0x0001 /* Receive Enable */
-#define IRCLK 0x0002 /* Internal Receive Clock Select */
-#define DTYPE_NORM 0x0004 /* Data Format Normal */
-#define DTYPE_ULAW 0x0008 /* Compand Using u-Law */
-#define DTYPE_ALAW 0x000C /* Compand Using A-Law */
-#define RLSBIT 0x0010 /* Receive Bit Order */
-#define IRFS 0x0200 /* Internal Receive Frame Sync Select */
-#define RFSR 0x0400 /* Receive Frame Sync Required Select */
-#define LRFS 0x1000 /* Low Receive Frame Sync Select */
-#define LARFS 0x2000 /* Late Receive Frame Sync Select */
-#define RCKFE 0x4000 /* Clock Falling Edge Select */
-
-/* SPORTx_RCR2 Masks */
-#define SLEN(x) ((x)&0x1F) /* SPORT RX Word Length (2 - 31) */
-#define RXSE 0x0100 /* RX Secondary Enable */
-#define RSFSE 0x0200 /* RX Stereo Frame Sync Enable */
-#define RRFST 0x0400 /* Right-First Data Order */
-
-/* SPORTx_STAT Masks */
-#define RXNE 0x0001 /* Receive FIFO Not Empty Status */
-#define RUVF 0x0002 /* Sticky Receive Underflow Status */
-#define ROVF 0x0004 /* Sticky Receive Overflow Status */
-#define TXF 0x0008 /* Transmit FIFO Full Status */
-#define TUVF 0x0010 /* Sticky Transmit Underflow Status */
-#define TOVF 0x0020 /* Sticky Transmit Overflow Status */
-#define TXHRE 0x0040 /* Transmit Hold Register Empty */
-
-/* SPORTx_MCMC1 Macros */
-#define SP_WOFF(x) ((x) & 0x3FF) /* Multichannel Window Offset Field */
-
-/* Only use WSIZE Macro With Logic OR While Setting Lower Order Bits */
-#define SP_WSIZE(x) (((((x)>>0x3)-1)&0xF) << 0xC) /* Multichannel Window Size = (x/8)-1 */
-
-/* SPORTx_MCMC2 Masks */
-#define REC_BYPASS 0x0000 /* Bypass Mode (No Clock Recovery) */
-#define REC_2FROM4 0x0002 /* Recover 2 MHz Clock from 4 MHz Clock */
-#define REC_8FROM16 0x0003 /* Recover 8 MHz Clock from 16 MHz Clock */
-#define MCDTXPE 0x0004 /* Multichannel DMA Transmit Packing */
-#define MCDRXPE 0x0008 /* Multichannel DMA Receive Packing */
-#define MCMEN 0x0010 /* Multichannel Frame Mode Enable */
-#define FSDR 0x0080 /* Multichannel Frame Sync to Data Relationship */
-#define MFD_0 0x0000 /* Multichannel Frame Delay = 0 */
-#define MFD_1 0x1000 /* Multichannel Frame Delay = 1 */
-#define MFD_2 0x2000 /* Multichannel Frame Delay = 2 */
-#define MFD_3 0x3000 /* Multichannel Frame Delay = 3 */
-#define MFD_4 0x4000 /* Multichannel Frame Delay = 4 */
-#define MFD_5 0x5000 /* Multichannel Frame Delay = 5 */
-#define MFD_6 0x6000 /* Multichannel Frame Delay = 6 */
-#define MFD_7 0x7000 /* Multichannel Frame Delay = 7 */
-#define MFD_8 0x8000 /* Multichannel Frame Delay = 8 */
-#define MFD_9 0x9000 /* Multichannel Frame Delay = 9 */
-#define MFD_10 0xA000 /* Multichannel Frame Delay = 10 */
-#define MFD_11 0xB000 /* Multichannel Frame Delay = 11 */
-#define MFD_12 0xC000 /* Multichannel Frame Delay = 12 */
-#define MFD_13 0xD000 /* Multichannel Frame Delay = 13 */
-#define MFD_14 0xE000 /* Multichannel Frame Delay = 14 */
-#define MFD_15 0xF000 /* Multichannel Frame Delay = 15 */
-
-
/* ********************* ASYNCHRONOUS MEMORY CONTROLLER MASKS *************************/
/* EBIU_AMGCTL Masks */
#define AMCKEN 0x0001 /* Enable CLKOUT */
diff --git a/arch/blackfin/mach-bf527/boards/cm_bf527.c b/arch/blackfin/mach-bf527/boards/cm_bf527.c
index f392af6..645ba5c 100644
--- a/arch/blackfin/mach-bf527/boards/cm_bf527.c
+++ b/arch/blackfin/mach-bf527/boards/cm_bf527.c
@@ -145,7 +145,6 @@
};
static struct bf5xx_nand_platform bf5xx_nand_platform = {
- .page_size = NFC_PG_SIZE_256,
.data_width = NFC_NWIDTH_8,
.partitions = partition_info,
.nr_partitions = ARRAY_SIZE(partition_info),
diff --git a/arch/blackfin/mach-bf527/boards/ezbrd.c b/arch/blackfin/mach-bf527/boards/ezbrd.c
index 606eb36..c975fe8 100644
--- a/arch/blackfin/mach-bf527/boards/ezbrd.c
+++ b/arch/blackfin/mach-bf527/boards/ezbrd.c
@@ -149,7 +149,6 @@
};
static struct bf5xx_nand_platform bf5xx_nand_platform = {
- .page_size = NFC_PG_SIZE_256,
.data_width = NFC_NWIDTH_8,
.partitions = partition_info,
.nr_partitions = ARRAY_SIZE(partition_info),
diff --git a/arch/blackfin/mach-bf527/boards/ezkit.c b/arch/blackfin/mach-bf527/boards/ezkit.c
index a05c967..87b41e9 100644
--- a/arch/blackfin/mach-bf527/boards/ezkit.c
+++ b/arch/blackfin/mach-bf527/boards/ezkit.c
@@ -234,7 +234,6 @@
};
static struct bf5xx_nand_platform bf5xx_nand_platform = {
- .page_size = NFC_PG_SIZE_256,
.data_width = NFC_NWIDTH_8,
.partitions = partition_info,
.nr_partitions = ARRAY_SIZE(partition_info),
diff --git a/arch/blackfin/mach-bf527/include/mach/defBF52x_base.h b/arch/blackfin/mach-bf527/include/mach/defBF52x_base.h
index 5f97f01..3e00075 100644
--- a/arch/blackfin/mach-bf527/include/mach/defBF52x_base.h
+++ b/arch/blackfin/mach-bf527/include/mach/defBF52x_base.h
@@ -922,88 +922,6 @@
#define PH14 0x4000
#define PH15 0x8000
-
-/* ******************* SERIAL PORT MASKS **************************************/
-/* SPORTx_TCR1 Masks */
-#define TSPEN 0x0001 /* Transmit Enable */
-#define ITCLK 0x0002 /* Internal Transmit Clock Select */
-#define DTYPE_NORM 0x0004 /* Data Format Normal */
-#define DTYPE_ULAW 0x0008 /* Compand Using u-Law */
-#define DTYPE_ALAW 0x000C /* Compand Using A-Law */
-#define TLSBIT 0x0010 /* Transmit Bit Order */
-#define ITFS 0x0200 /* Internal Transmit Frame Sync Select */
-#define TFSR 0x0400 /* Transmit Frame Sync Required Select */
-#define DITFS 0x0800 /* Data-Independent Transmit Frame Sync Select */
-#define LTFS 0x1000 /* Low Transmit Frame Sync Select */
-#define LATFS 0x2000 /* Late Transmit Frame Sync Select */
-#define TCKFE 0x4000 /* Clock Falling Edge Select */
-
-/* SPORTx_TCR2 Masks and Macro */
-#define SLEN(x) ((x)&0x1F) /* SPORT TX Word Length (2 - 31) */
-#define TXSE 0x0100 /* TX Secondary Enable */
-#define TSFSE 0x0200 /* Transmit Stereo Frame Sync Enable */
-#define TRFST 0x0400 /* Left/Right Order (1 = Right Channel 1st) */
-
-/* SPORTx_RCR1 Masks */
-#define RSPEN 0x0001 /* Receive Enable */
-#define IRCLK 0x0002 /* Internal Receive Clock Select */
-#define DTYPE_NORM 0x0004 /* Data Format Normal */
-#define DTYPE_ULAW 0x0008 /* Compand Using u-Law */
-#define DTYPE_ALAW 0x000C /* Compand Using A-Law */
-#define RLSBIT 0x0010 /* Receive Bit Order */
-#define IRFS 0x0200 /* Internal Receive Frame Sync Select */
-#define RFSR 0x0400 /* Receive Frame Sync Required Select */
-#define LRFS 0x1000 /* Low Receive Frame Sync Select */
-#define LARFS 0x2000 /* Late Receive Frame Sync Select */
-#define RCKFE 0x4000 /* Clock Falling Edge Select */
-
-/* SPORTx_RCR2 Masks */
-#define SLEN(x) ((x)&0x1F) /* SPORT RX Word Length (2 - 31) */
-#define RXSE 0x0100 /* RX Secondary Enable */
-#define RSFSE 0x0200 /* RX Stereo Frame Sync Enable */
-#define RRFST 0x0400 /* Right-First Data Order */
-
-/* SPORTx_STAT Masks */
-#define RXNE 0x0001 /* Receive FIFO Not Empty Status */
-#define RUVF 0x0002 /* Sticky Receive Underflow Status */
-#define ROVF 0x0004 /* Sticky Receive Overflow Status */
-#define TXF 0x0008 /* Transmit FIFO Full Status */
-#define TUVF 0x0010 /* Sticky Transmit Underflow Status */
-#define TOVF 0x0020 /* Sticky Transmit Overflow Status */
-#define TXHRE 0x0040 /* Transmit Hold Register Empty */
-
-/* SPORTx_MCMC1 Macros */
-#define SP_WOFF(x) ((x) & 0x3FF) /* Multichannel Window Offset Field */
-
-/* Only use WSIZE Macro With Logic OR While Setting Lower Order Bits */
-#define SP_WSIZE(x) (((((x)>>0x3)-1)&0xF) << 0xC) /* Multichannel Window Size = (x/8)-1 */
-
-/* SPORTx_MCMC2 Masks */
-#define REC_BYPASS 0x0000 /* Bypass Mode (No Clock Recovery) */
-#define REC_2FROM4 0x0002 /* Recover 2 MHz Clock from 4 MHz Clock */
-#define REC_8FROM16 0x0003 /* Recover 8 MHz Clock from 16 MHz Clock */
-#define MCDTXPE 0x0004 /* Multichannel DMA Transmit Packing */
-#define MCDRXPE 0x0008 /* Multichannel DMA Receive Packing */
-#define MCMEN 0x0010 /* Multichannel Frame Mode Enable */
-#define FSDR 0x0080 /* Multichannel Frame Sync to Data Relationship */
-#define MFD_0 0x0000 /* Multichannel Frame Delay = 0 */
-#define MFD_1 0x1000 /* Multichannel Frame Delay = 1 */
-#define MFD_2 0x2000 /* Multichannel Frame Delay = 2 */
-#define MFD_3 0x3000 /* Multichannel Frame Delay = 3 */
-#define MFD_4 0x4000 /* Multichannel Frame Delay = 4 */
-#define MFD_5 0x5000 /* Multichannel Frame Delay = 5 */
-#define MFD_6 0x6000 /* Multichannel Frame Delay = 6 */
-#define MFD_7 0x7000 /* Multichannel Frame Delay = 7 */
-#define MFD_8 0x8000 /* Multichannel Frame Delay = 8 */
-#define MFD_9 0x9000 /* Multichannel Frame Delay = 9 */
-#define MFD_10 0xA000 /* Multichannel Frame Delay = 10 */
-#define MFD_11 0xB000 /* Multichannel Frame Delay = 11 */
-#define MFD_12 0xC000 /* Multichannel Frame Delay = 12 */
-#define MFD_13 0xD000 /* Multichannel Frame Delay = 13 */
-#define MFD_14 0xE000 /* Multichannel Frame Delay = 14 */
-#define MFD_15 0xF000 /* Multichannel Frame Delay = 15 */
-
-
/* ********************* ASYNCHRONOUS MEMORY CONTROLLER MASKS *************************/
/* EBIU_AMGCTL Masks */
#define AMCKEN 0x0001 /* Enable CLKOUT */
diff --git a/arch/blackfin/mach-bf533/include/mach/defBF532.h b/arch/blackfin/mach-bf533/include/mach/defBF532.h
index e9ff491..04acf1e 100644
--- a/arch/blackfin/mach-bf533/include/mach/defBF532.h
+++ b/arch/blackfin/mach-bf533/include/mach/defBF532.h
@@ -509,98 +509,6 @@
#define IREN_P 0x01
#define UCEN_P 0x00
-/* ********** SERIAL PORT MASKS ********************** */
-
-/* SPORTx_TCR1 Masks */
-#define TSPEN 0x0001 /* TX enable */
-#define ITCLK 0x0002 /* Internal TX Clock Select */
-#define TDTYPE 0x000C /* TX Data Formatting Select */
-#define DTYPE_NORM 0x0000 /* Data Format Normal */
-#define DTYPE_ULAW 0x0008 /* Compand Using u-Law */
-#define DTYPE_ALAW 0x000C /* Compand Using A-Law */
-#define TLSBIT 0x0010 /* TX Bit Order */
-#define ITFS 0x0200 /* Internal TX Frame Sync Select */
-#define TFSR 0x0400 /* TX Frame Sync Required Select */
-#define DITFS 0x0800 /* Data Independent TX Frame Sync Select */
-#define LTFS 0x1000 /* Low TX Frame Sync Select */
-#define LATFS 0x2000 /* Late TX Frame Sync Select */
-#define TCKFE 0x4000 /* TX Clock Falling Edge Select */
-
-/* SPORTx_TCR2 Masks */
-#if defined(__ADSPBF531__) || defined(__ADSPBF532__) || \
- defined(__ADSPBF533__)
-# define SLEN 0x001F /*TX Word Length */
-#else
-# define SLEN(x) ((x)&0x1F) /* SPORT TX Word Length (2 - 31) */
-#endif
-#define TXSE 0x0100 /*TX Secondary Enable */
-#define TSFSE 0x0200 /*TX Stereo Frame Sync Enable */
-#define TRFST 0x0400 /*TX Right-First Data Order */
-
-/* SPORTx_RCR1 Masks */
-#define RSPEN 0x0001 /* RX enable */
-#define IRCLK 0x0002 /* Internal RX Clock Select */
-#define RDTYPE 0x000C /* RX Data Formatting Select */
-#define DTYPE_NORM 0x0000 /* no companding */
-#define DTYPE_ULAW 0x0008 /* Compand Using u-Law */
-#define DTYPE_ALAW 0x000C /* Compand Using A-Law */
-#define RLSBIT 0x0010 /* RX Bit Order */
-#define IRFS 0x0200 /* Internal RX Frame Sync Select */
-#define RFSR 0x0400 /* RX Frame Sync Required Select */
-#define LRFS 0x1000 /* Low RX Frame Sync Select */
-#define LARFS 0x2000 /* Late RX Frame Sync Select */
-#define RCKFE 0x4000 /* RX Clock Falling Edge Select */
-
-/* SPORTx_RCR2 Masks */
-/* SLEN defined above */
-#define RXSE 0x0100 /*RX Secondary Enable */
-#define RSFSE 0x0200 /*RX Stereo Frame Sync Enable */
-#define RRFST 0x0400 /*Right-First Data Order */
-
-/*SPORTx_STAT Masks */
-#define RXNE 0x0001 /*RX FIFO Not Empty Status */
-#define RUVF 0x0002 /*RX Underflow Status */
-#define ROVF 0x0004 /*RX Overflow Status */
-#define TXF 0x0008 /*TX FIFO Full Status */
-#define TUVF 0x0010 /*TX Underflow Status */
-#define TOVF 0x0020 /*TX Overflow Status */
-#define TXHRE 0x0040 /*TX Hold Register Empty */
-
-/*SPORTx_MCMC1 Masks */
-#define SP_WSIZE 0x0000F000 /*Multichannel Window Size Field */
-#define SP_WOFF 0x000003FF /*Multichannel Window Offset Field */
-/* SPORTx_MCMC1 Macros */
-#define SET_SP_WOFF(x) ((x) & 0x3FF) /* Multichannel Window Offset Field */
-/* Only use SET_WSIZE Macro With Logic OR While Setting Lower Order Bits */
-#define SET_SP_WSIZE(x) (((((x)>>0x3)-1)&0xF) << 0xC) /* Multichannel Window Size = (x/8)-1 */
-
-/*SPORTx_MCMC2 Masks */
-#define MCCRM 0x00000003 /*Multichannel Clock Recovery Mode */
-#define REC_BYPASS 0x0000 /* Bypass Mode (No Clock Recovery) */
-#define REC_2FROM4 0x0002 /* Recover 2 MHz Clock from 4 MHz Clock */
-#define REC_8FROM16 0x0003 /* Recover 8 MHz Clock from 16 MHz Clock */
-#define MCDTXPE 0x00000004 /*Multichannel DMA Transmit Packing */
-#define MCDRXPE 0x00000008 /*Multichannel DMA Receive Packing */
-#define MCMEN 0x00000010 /*Multichannel Frame Mode Enable */
-#define FSDR 0x00000080 /*Multichannel Frame Sync to Data Relationship */
-#define MFD 0x0000F000 /*Multichannel Frame Delay */
-#define MFD_0 0x0000 /* Multichannel Frame Delay = 0 */
-#define MFD_1 0x1000 /* Multichannel Frame Delay = 1 */
-#define MFD_2 0x2000 /* Multichannel Frame Delay = 2 */
-#define MFD_3 0x3000 /* Multichannel Frame Delay = 3 */
-#define MFD_4 0x4000 /* Multichannel Frame Delay = 4 */
-#define MFD_5 0x5000 /* Multichannel Frame Delay = 5 */
-#define MFD_6 0x6000 /* Multichannel Frame Delay = 6 */
-#define MFD_7 0x7000 /* Multichannel Frame Delay = 7 */
-#define MFD_8 0x8000 /* Multichannel Frame Delay = 8 */
-#define MFD_9 0x9000 /* Multichannel Frame Delay = 9 */
-#define MFD_10 0xA000 /* Multichannel Frame Delay = 10 */
-#define MFD_11 0xB000 /* Multichannel Frame Delay = 11 */
-#define MFD_12 0xC000 /* Multichannel Frame Delay = 12 */
-#define MFD_13 0xD000 /* Multichannel Frame Delay = 13 */
-#define MFD_14 0xE000 /* Multichannel Frame Delay = 14 */
-#define MFD_15 0xF000 /* Multichannel Frame Delay = 15 */
-
/* ********* PARALLEL PERIPHERAL INTERFACE (PPI) MASKS **************** */
/* PPI_CONTROL Masks */
diff --git a/arch/blackfin/mach-bf537/include/mach/defBF534.h b/arch/blackfin/mach-bf537/include/mach/defBF534.h
index aad61b8..6f56907 100644
--- a/arch/blackfin/mach-bf537/include/mach/defBF534.h
+++ b/arch/blackfin/mach-bf537/include/mach/defBF534.h
@@ -1241,86 +1241,6 @@
#define PH14 0x4000
#define PH15 0x8000
-/* ******************* SERIAL PORT MASKS **************************************/
-/* SPORTx_TCR1 Masks */
-#define TSPEN 0x0001 /* Transmit Enable */
-#define ITCLK 0x0002 /* Internal Transmit Clock Select */
-#define DTYPE_NORM 0x0004 /* Data Format Normal */
-#define DTYPE_ULAW 0x0008 /* Compand Using u-Law */
-#define DTYPE_ALAW 0x000C /* Compand Using A-Law */
-#define TLSBIT 0x0010 /* Transmit Bit Order */
-#define ITFS 0x0200 /* Internal Transmit Frame Sync Select */
-#define TFSR 0x0400 /* Transmit Frame Sync Required Select */
-#define DITFS 0x0800 /* Data-Independent Transmit Frame Sync Select */
-#define LTFS 0x1000 /* Low Transmit Frame Sync Select */
-#define LATFS 0x2000 /* Late Transmit Frame Sync Select */
-#define TCKFE 0x4000 /* Clock Falling Edge Select */
-
-/* SPORTx_TCR2 Masks and Macro */
-#define SLEN(x) ((x)&0x1F) /* SPORT TX Word Length (2 - 31) */
-#define TXSE 0x0100 /* TX Secondary Enable */
-#define TSFSE 0x0200 /* Transmit Stereo Frame Sync Enable */
-#define TRFST 0x0400 /* Left/Right Order (1 = Right Channel 1st) */
-
-/* SPORTx_RCR1 Masks */
-#define RSPEN 0x0001 /* Receive Enable */
-#define IRCLK 0x0002 /* Internal Receive Clock Select */
-#define DTYPE_NORM 0x0004 /* Data Format Normal */
-#define DTYPE_ULAW 0x0008 /* Compand Using u-Law */
-#define DTYPE_ALAW 0x000C /* Compand Using A-Law */
-#define RLSBIT 0x0010 /* Receive Bit Order */
-#define IRFS 0x0200 /* Internal Receive Frame Sync Select */
-#define RFSR 0x0400 /* Receive Frame Sync Required Select */
-#define LRFS 0x1000 /* Low Receive Frame Sync Select */
-#define LARFS 0x2000 /* Late Receive Frame Sync Select */
-#define RCKFE 0x4000 /* Clock Falling Edge Select */
-
-/* SPORTx_RCR2 Masks */
-#define SLEN(x) ((x)&0x1F) /* SPORT RX Word Length (2 - 31) */
-#define RXSE 0x0100 /* RX Secondary Enable */
-#define RSFSE 0x0200 /* RX Stereo Frame Sync Enable */
-#define RRFST 0x0400 /* Right-First Data Order */
-
-/* SPORTx_STAT Masks */
-#define RXNE 0x0001 /* Receive FIFO Not Empty Status */
-#define RUVF 0x0002 /* Sticky Receive Underflow Status */
-#define ROVF 0x0004 /* Sticky Receive Overflow Status */
-#define TXF 0x0008 /* Transmit FIFO Full Status */
-#define TUVF 0x0010 /* Sticky Transmit Underflow Status */
-#define TOVF 0x0020 /* Sticky Transmit Overflow Status */
-#define TXHRE 0x0040 /* Transmit Hold Register Empty */
-
-/* SPORTx_MCMC1 Macros */
-#define SP_WOFF(x) ((x) & 0x3FF) /* Multichannel Window Offset Field */
-
-/* Only use WSIZE Macro With Logic OR While Setting Lower Order Bits */
-#define SP_WSIZE(x) (((((x)>>0x3)-1)&0xF) << 0xC) /* Multichannel Window Size = (x/8)-1 */
-
-/* SPORTx_MCMC2 Masks */
-#define REC_BYPASS 0x0000 /* Bypass Mode (No Clock Recovery) */
-#define REC_2FROM4 0x0002 /* Recover 2 MHz Clock from 4 MHz Clock */
-#define REC_8FROM16 0x0003 /* Recover 8 MHz Clock from 16 MHz Clock */
-#define MCDTXPE 0x0004 /* Multichannel DMA Transmit Packing */
-#define MCDRXPE 0x0008 /* Multichannel DMA Receive Packing */
-#define MCMEN 0x0010 /* Multichannel Frame Mode Enable */
-#define FSDR 0x0080 /* Multichannel Frame Sync to Data Relationship */
-#define MFD_0 0x0000 /* Multichannel Frame Delay = 0 */
-#define MFD_1 0x1000 /* Multichannel Frame Delay = 1 */
-#define MFD_2 0x2000 /* Multichannel Frame Delay = 2 */
-#define MFD_3 0x3000 /* Multichannel Frame Delay = 3 */
-#define MFD_4 0x4000 /* Multichannel Frame Delay = 4 */
-#define MFD_5 0x5000 /* Multichannel Frame Delay = 5 */
-#define MFD_6 0x6000 /* Multichannel Frame Delay = 6 */
-#define MFD_7 0x7000 /* Multichannel Frame Delay = 7 */
-#define MFD_8 0x8000 /* Multichannel Frame Delay = 8 */
-#define MFD_9 0x9000 /* Multichannel Frame Delay = 9 */
-#define MFD_10 0xA000 /* Multichannel Frame Delay = 10 */
-#define MFD_11 0xB000 /* Multichannel Frame Delay = 11 */
-#define MFD_12 0xC000 /* Multichannel Frame Delay = 12 */
-#define MFD_13 0xD000 /* Multichannel Frame Delay = 13 */
-#define MFD_14 0xE000 /* Multichannel Frame Delay = 14 */
-#define MFD_15 0xF000 /* Multichannel Frame Delay = 15 */
-
/* ********************* ASYNCHRONOUS MEMORY CONTROLLER MASKS *************************/
/* EBIU_AMGCTL Masks */
#define AMCKEN 0x0001 /* Enable CLKOUT */
diff --git a/arch/blackfin/mach-bf538/include/mach/defBF539.h b/arch/blackfin/mach-bf538/include/mach/defBF539.h
index b674a1c..fe43062 100644
--- a/arch/blackfin/mach-bf538/include/mach/defBF539.h
+++ b/arch/blackfin/mach-bf538/include/mach/defBF539.h
@@ -1610,113 +1610,6 @@
#define UCEN_P 0x00
-/* ********** SERIAL PORT MASKS ********************** */
-/* SPORTx_TCR1 Masks */
-#define TSPEN 0x0001 /* TX enable */
-#define ITCLK 0x0002 /* Internal TX Clock Select */
-#define TDTYPE 0x000C /* TX Data Formatting Select */
-#define DTYPE_NORM 0x0000 /* Data Format Normal */
-#define DTYPE_ULAW 0x0008 /* Compand Using u-Law */
-#define DTYPE_ALAW 0x000C /* Compand Using A-Law */
-#define TLSBIT 0x0010 /* TX Bit Order */
-#define ITFS 0x0200 /* Internal TX Frame Sync Select */
-#define TFSR 0x0400 /* TX Frame Sync Required Select */
-#define DITFS 0x0800 /* Data Independent TX Frame Sync Select */
-#define LTFS 0x1000 /* Low TX Frame Sync Select */
-#define LATFS 0x2000 /* Late TX Frame Sync Select */
-#define TCKFE 0x4000 /* TX Clock Falling Edge Select */
-/* SPORTx_RCR1 Deprecated Masks */
-#define TULAW DTYPE_ULAW /* Compand Using u-Law */
-#define TALAW DTYPE_ALAW /* Compand Using A-Law */
-
-/* SPORTx_TCR2 Masks */
-#ifdef _MISRA_RULES
-#define SLEN(x) ((x)&0x1Fu) /* SPORT TX Word Length (2 - 31) */
-#else
-#define SLEN(x) ((x)&0x1F) /* SPORT TX Word Length (2 - 31) */
-#endif /* _MISRA_RULES */
-#define TXSE 0x0100 /*TX Secondary Enable */
-#define TSFSE 0x0200 /*TX Stereo Frame Sync Enable */
-#define TRFST 0x0400 /*TX Right-First Data Order */
-
-/* SPORTx_RCR1 Masks */
-#define RSPEN 0x0001 /* RX enable */
-#define IRCLK 0x0002 /* Internal RX Clock Select */
-#define RDTYPE 0x000C /* RX Data Formatting Select */
-#define DTYPE_NORM 0x0000 /* no companding */
-#define DTYPE_ULAW 0x0008 /* Compand Using u-Law */
-#define DTYPE_ALAW 0x000C /* Compand Using A-Law */
-#define RLSBIT 0x0010 /* RX Bit Order */
-#define IRFS 0x0200 /* Internal RX Frame Sync Select */
-#define RFSR 0x0400 /* RX Frame Sync Required Select */
-#define LRFS 0x1000 /* Low RX Frame Sync Select */
-#define LARFS 0x2000 /* Late RX Frame Sync Select */
-#define RCKFE 0x4000 /* RX Clock Falling Edge Select */
-/* SPORTx_RCR1 Deprecated Masks */
-#define RULAW DTYPE_ULAW /* Compand Using u-Law */
-#define RALAW DTYPE_ALAW /* Compand Using A-Law */
-
-/* SPORTx_RCR2 Masks */
-#ifdef _MISRA_RULES
-#define SLEN(x) ((x)&0x1Fu) /* SPORT RX Word Length (2 - 31) */
-#else
-#define SLEN(x) ((x)&0x1F) /* SPORT RX Word Length (2 - 31) */
-#endif /* _MISRA_RULES */
-#define RXSE 0x0100 /*RX Secondary Enable */
-#define RSFSE 0x0200 /*RX Stereo Frame Sync Enable */
-#define RRFST 0x0400 /*Right-First Data Order */
-
-/*SPORTx_STAT Masks */
-#define RXNE 0x0001 /*RX FIFO Not Empty Status */
-#define RUVF 0x0002 /*RX Underflow Status */
-#define ROVF 0x0004 /*RX Overflow Status */
-#define TXF 0x0008 /*TX FIFO Full Status */
-#define TUVF 0x0010 /*TX Underflow Status */
-#define TOVF 0x0020 /*TX Overflow Status */
-#define TXHRE 0x0040 /*TX Hold Register Empty */
-
-/*SPORTx_MCMC1 Masks */
-#define WOFF 0x000003FF /*Multichannel Window Offset Field */
-/* SPORTx_MCMC1 Macros */
-#ifdef _MISRA_RULES
-#define SET_WOFF(x) ((x) & 0x3FFu) /* Multichannel Window Offset Field */
-/* Only use SET_WSIZE Macro With Logic OR While Setting Lower Order Bits */
-#define SET_WSIZE(x) (((((x)>>0x3)-1u)&0xFu) << 0xC) /* Multichannel Window Size = (x/8)-1 */
-#else
-#define SET_WOFF(x) ((x) & 0x3FF) /* Multichannel Window Offset Field */
-/* Only use SET_WSIZE Macro With Logic OR While Setting Lower Order Bits */
-#define SET_WSIZE(x) (((((x)>>0x3)-1)&0xF) << 0xC) /* Multichannel Window Size = (x/8)-1 */
-#endif /* _MISRA_RULES */
-
-
-/*SPORTx_MCMC2 Masks */
-#define MCCRM 0x0003 /*Multichannel Clock Recovery Mode */
-#define REC_BYPASS 0x0000 /* Bypass Mode (No Clock Recovery) */
-#define REC_2FROM4 0x0002 /* Recover 2 MHz Clock from 4 MHz Clock */
-#define REC_8FROM16 0x0003 /* Recover 8 MHz Clock from 16 MHz Clock */
-#define MCDTXPE 0x0004 /*Multichannel DMA Transmit Packing */
-#define MCDRXPE 0x0008 /*Multichannel DMA Receive Packing */
-#define MCMEN 0x0010 /*Multichannel Frame Mode Enable */
-#define FSDR 0x0080 /*Multichannel Frame Sync to Data Relationship */
-#define MFD 0xF000 /*Multichannel Frame Delay */
-#define MFD_0 0x0000 /* Multichannel Frame Delay = 0 */
-#define MFD_1 0x1000 /* Multichannel Frame Delay = 1 */
-#define MFD_2 0x2000 /* Multichannel Frame Delay = 2 */
-#define MFD_3 0x3000 /* Multichannel Frame Delay = 3 */
-#define MFD_4 0x4000 /* Multichannel Frame Delay = 4 */
-#define MFD_5 0x5000 /* Multichannel Frame Delay = 5 */
-#define MFD_6 0x6000 /* Multichannel Frame Delay = 6 */
-#define MFD_7 0x7000 /* Multichannel Frame Delay = 7 */
-#define MFD_8 0x8000 /* Multichannel Frame Delay = 8 */
-#define MFD_9 0x9000 /* Multichannel Frame Delay = 9 */
-#define MFD_10 0xA000 /* Multichannel Frame Delay = 10 */
-#define MFD_11 0xB000 /* Multichannel Frame Delay = 11 */
-#define MFD_12 0xC000 /* Multichannel Frame Delay = 12 */
-#define MFD_13 0xD000 /* Multichannel Frame Delay = 13 */
-#define MFD_14 0xE000 /* Multichannel Frame Delay = 14 */
-#define MFD_15 0xF000 /* Multichannel Frame Delay = 15 */
-
-
/* ********* PARALLEL PERIPHERAL INTERFACE (PPI) MASKS **************** */
/* PPI_CONTROL Masks */
#define PORT_EN 0x0001 /* PPI Port Enable */
diff --git a/arch/blackfin/mach-bf548/boards/cm_bf548.c b/arch/blackfin/mach-bf548/boards/cm_bf548.c
index dbb6b1d..0c38eec 100644
--- a/arch/blackfin/mach-bf548/boards/cm_bf548.c
+++ b/arch/blackfin/mach-bf548/boards/cm_bf548.c
@@ -706,7 +706,6 @@
};
static struct bf5xx_nand_platform bf5xx_nand_platform = {
- .page_size = NFC_PG_SIZE_256,
.data_width = NFC_NWIDTH_8,
.partitions = partition_info,
.nr_partitions = ARRAY_SIZE(partition_info),
diff --git a/arch/blackfin/mach-bf548/boards/ezkit.c b/arch/blackfin/mach-bf548/boards/ezkit.c
index 6fcfb91..56682a3 100644
--- a/arch/blackfin/mach-bf548/boards/ezkit.c
+++ b/arch/blackfin/mach-bf548/boards/ezkit.c
@@ -849,7 +849,6 @@
};
static struct bf5xx_nand_platform bf5xx_nand_platform = {
- .page_size = NFC_PG_SIZE_256,
.data_width = NFC_NWIDTH_8,
.partitions = partition_info,
.nr_partitions = ARRAY_SIZE(partition_info),
diff --git a/arch/blackfin/mach-bf548/include/mach/defBF54x_base.h b/arch/blackfin/mach-bf548/include/mach/defBF54x_base.h
index 95ff446..7866197 100644
--- a/arch/blackfin/mach-bf548/include/mach/defBF54x_base.h
+++ b/arch/blackfin/mach-bf548/include/mach/defBF54x_base.h
@@ -2221,73 +2221,6 @@
#define RCVDATA16 0xffff /* Receive FIFO 16-Bit Data */
-/* Bit masks for SPORTx_TCR1 */
-
-#define TCKFE 0x4000 /* Clock Falling Edge Select */
-#define LATFS 0x2000 /* Late Transmit Frame Sync */
-#define LTFS 0x1000 /* Low Transmit Frame Sync Select */
-#define DITFS 0x800 /* Data-Independent Transmit Frame Sync Select */
-#define TFSR 0x400 /* Transmit Frame Sync Required Select */
-#define ITFS 0x200 /* Internal Transmit Frame Sync Select */
-#define TLSBIT 0x10 /* Transmit Bit Order */
-#define TDTYPE 0xc /* Data Formatting Type Select */
-#define ITCLK 0x2 /* Internal Transmit Clock Select */
-#define TSPEN 0x1 /* Transmit Enable */
-
-/* Bit masks for SPORTx_TCR2 */
-
-#define TRFST 0x400 /* Left/Right Order */
-#define TSFSE 0x200 /* Transmit Stereo Frame Sync Enable */
-#define TXSE 0x100 /* TxSEC Enable */
-#define SLEN_T 0x1f /* SPORT Word Length */
-
-/* Bit masks for SPORTx_RCR1 */
-
-#define RCKFE 0x4000 /* Clock Falling Edge Select */
-#define LARFS 0x2000 /* Late Receive Frame Sync */
-#define LRFS 0x1000 /* Low Receive Frame Sync Select */
-#define RFSR 0x400 /* Receive Frame Sync Required Select */
-#define IRFS 0x200 /* Internal Receive Frame Sync Select */
-#define RLSBIT 0x10 /* Receive Bit Order */
-#define RDTYPE 0xc /* Data Formatting Type Select */
-#define IRCLK 0x2 /* Internal Receive Clock Select */
-#define RSPEN 0x1 /* Receive Enable */
-
-/* Bit masks for SPORTx_RCR2 */
-
-#define RRFST 0x400 /* Left/Right Order */
-#define RSFSE 0x200 /* Receive Stereo Frame Sync Enable */
-#define RXSE 0x100 /* RxSEC Enable */
-#define SLEN_R 0x1f /* SPORT Word Length */
-
-/* Bit masks for SPORTx_STAT */
-
-#define TXHRE 0x40 /* Transmit Hold Register Empty */
-#define TOVF 0x20 /* Sticky Transmit Overflow Status */
-#define TUVF 0x10 /* Sticky Transmit Underflow Status */
-#define TXF 0x8 /* Transmit FIFO Full Status */
-#define ROVF 0x4 /* Sticky Receive Overflow Status */
-#define RUVF 0x2 /* Sticky Receive Underflow Status */
-#define RXNE 0x1 /* Receive FIFO Not Empty Status */
-
-/* Bit masks for SPORTx_MCMC1 */
-
-#define SP_WSIZE 0xf000 /* Window Size */
-#define SP_WOFF 0x3ff /* Windows Offset */
-
-/* Bit masks for SPORTx_MCMC2 */
-
-#define MFD 0xf000 /* Multi channel Frame Delay */
-#define FSDR 0x80 /* Frame Sync to Data Relationship */
-#define MCMEN 0x10 /* Multi channel Frame Mode Enable */
-#define MCDRXPE 0x8 /* Multi channel DMA Receive Packing */
-#define MCDTXPE 0x4 /* Multi channel DMA Transmit Packing */
-#define MCCRM 0x3 /* 2X Clock Recovery Mode */
-
-/* Bit masks for SPORTx_CHNL */
-
-#define CUR_CHNL 0x3ff /* Current Channel Indicator */
-
/* Bit masks for UARTx_LCR */
#if 0
diff --git a/arch/blackfin/mach-bf561/include/mach/defBF561.h b/arch/blackfin/mach-bf561/include/mach/defBF561.h
index 4c8e36b..2674f00 100644
--- a/arch/blackfin/mach-bf561/include/mach/defBF561.h
+++ b/arch/blackfin/mach-bf561/include/mach/defBF561.h
@@ -1007,66 +1007,6 @@
#define IREN_P 0x01
#define UCEN_P 0x00
-/* ********** SERIAL PORT MASKS ********************** */
-
-/* SPORTx_TCR1 Masks */
-#define TSPEN 0x0001 /* TX enable */
-#define ITCLK 0x0002 /* Internal TX Clock Select */
-#define TDTYPE 0x000C /* TX Data Formatting Select */
-#define TLSBIT 0x0010 /* TX Bit Order */
-#define ITFS 0x0200 /* Internal TX Frame Sync Select */
-#define TFSR 0x0400 /* TX Frame Sync Required Select */
-#define DITFS 0x0800 /* Data Independent TX Frame Sync Select */
-#define LTFS 0x1000 /* Low TX Frame Sync Select */
-#define LATFS 0x2000 /* Late TX Frame Sync Select */
-#define TCKFE 0x4000 /* TX Clock Falling Edge Select */
-
-/* SPORTx_TCR2 Masks */
-#define SLEN 0x001F /*TX Word Length */
-#define TXSE 0x0100 /*TX Secondary Enable */
-#define TSFSE 0x0200 /*TX Stereo Frame Sync Enable */
-#define TRFST 0x0400 /*TX Right-First Data Order */
-
-/* SPORTx_RCR1 Masks */
-#define RSPEN 0x0001 /* RX enable */
-#define IRCLK 0x0002 /* Internal RX Clock Select */
-#define RDTYPE 0x000C /* RX Data Formatting Select */
-#define RULAW 0x0008 /* u-Law enable */
-#define RALAW 0x000C /* A-Law enable */
-#define RLSBIT 0x0010 /* RX Bit Order */
-#define IRFS 0x0200 /* Internal RX Frame Sync Select */
-#define RFSR 0x0400 /* RX Frame Sync Required Select */
-#define LRFS 0x1000 /* Low RX Frame Sync Select */
-#define LARFS 0x2000 /* Late RX Frame Sync Select */
-#define RCKFE 0x4000 /* RX Clock Falling Edge Select */
-
-/* SPORTx_RCR2 Masks */
-#define SLEN 0x001F /*RX Word Length */
-#define RXSE 0x0100 /*RX Secondary Enable */
-#define RSFSE 0x0200 /*RX Stereo Frame Sync Enable */
-#define RRFST 0x0400 /*Right-First Data Order */
-
-/*SPORTx_STAT Masks */
-#define RXNE 0x0001 /*RX FIFO Not Empty Status */
-#define RUVF 0x0002 /*RX Underflow Status */
-#define ROVF 0x0004 /*RX Overflow Status */
-#define TXF 0x0008 /*TX FIFO Full Status */
-#define TUVF 0x0010 /*TX Underflow Status */
-#define TOVF 0x0020 /*TX Overflow Status */
-#define TXHRE 0x0040 /*TX Hold Register Empty */
-
-/*SPORTx_MCMC1 Masks */
-#define SP_WSIZE 0x0000F000 /*Multichannel Window Size Field */
-#define SP_WOFF 0x000003FF /*Multichannel Window Offset Field */
-
-/*SPORTx_MCMC2 Masks */
-#define MCCRM 0x00000003 /*Multichannel Clock Recovery Mode */
-#define MCDTXPE 0x00000004 /*Multichannel DMA Transmit Packing */
-#define MCDRXPE 0x00000008 /*Multichannel DMA Receive Packing */
-#define MCMEN 0x00000010 /*Multichannel Frame Mode Enable */
-#define FSDR 0x00000080 /*Multichannel Frame Sync to Data Relationship */
-#define MFD 0x0000F000 /*Multichannel Frame Delay */
-
/* ********* PARALLEL PERIPHERAL INTERFACE (PPI) MASKS **************** */
/* PPI_CONTROL Masks */
diff --git a/arch/blackfin/mach-common/entry.S b/arch/blackfin/mach-common/entry.S
index a5847f5..af1bffa 100644
--- a/arch/blackfin/mach-common/entry.S
+++ b/arch/blackfin/mach-common/entry.S
@@ -1628,6 +1628,9 @@
.long _sys_rt_tgsigqueueinfo
.long _sys_perf_event_open
.long _sys_recvmmsg /* 370 */
+ .long _sys_fanotify_init
+ .long _sys_fanotify_mark
+ .long _sys_prlimit64
.rept NR_syscalls-(.-_sys_call_table)/4
.long _sys_ni_syscall
diff --git a/arch/frv/Kconfig b/arch/frv/Kconfig
index 16399bd..0f2417d 100644
--- a/arch/frv/Kconfig
+++ b/arch/frv/Kconfig
@@ -7,6 +7,7 @@
default y
select HAVE_IDE
select HAVE_ARCH_TRACEHOOK
+ select HAVE_IRQ_WORK
select HAVE_PERF_EVENTS
config ZONE_DMA
diff --git a/arch/frv/kernel/signal.c b/arch/frv/kernel/signal.c
index 0974c0e..bab0129 100644
--- a/arch/frv/kernel/signal.c
+++ b/arch/frv/kernel/signal.c
@@ -121,6 +121,9 @@
struct user_context *user = current->thread.user;
unsigned long tbr, psr;
+ /* Always make any pending restarted system calls return -EINTR */
+ current_thread_info()->restart_block.fn = do_no_restart_syscall;
+
tbr = user->i.tbr;
psr = user->i.psr;
if (copy_from_user(user, &sc->sc_context, sizeof(sc->sc_context)))
@@ -250,6 +253,8 @@
struct sigframe __user *frame;
int rsig;
+ set_fs(USER_DS);
+
frame = get_sigframe(ka, sizeof(*frame));
if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
@@ -293,22 +298,23 @@
(unsigned long) (frame->retcode + 2));
}
- /* set up registers for signal handler */
- __frame->sp = (unsigned long) frame;
- __frame->lr = (unsigned long) &frame->retcode;
- __frame->gr8 = sig;
-
+ /* Set up registers for the signal handler */
if (current->personality & FDPIC_FUNCPTRS) {
struct fdpic_func_descriptor __user *funcptr =
(struct fdpic_func_descriptor __user *) ka->sa.sa_handler;
- __get_user(__frame->pc, &funcptr->text);
- __get_user(__frame->gr15, &funcptr->GOT);
+ struct fdpic_func_descriptor desc;
+ if (copy_from_user(&desc, funcptr, sizeof(desc)))
+ goto give_sigsegv;
+ __frame->pc = desc.text;
+ __frame->gr15 = desc.GOT;
} else {
__frame->pc = (unsigned long) ka->sa.sa_handler;
__frame->gr15 = 0;
}
- set_fs(USER_DS);
+ __frame->sp = (unsigned long) frame;
+ __frame->lr = (unsigned long) &frame->retcode;
+ __frame->gr8 = sig;
/* the tracer may want to single-step inside the handler */
if (test_thread_flag(TIF_SINGLESTEP))
@@ -323,7 +329,7 @@
return 0;
give_sigsegv:
- force_sig(SIGSEGV, current);
+ force_sigsegv(sig, current);
return -EFAULT;
} /* end setup_frame() */
@@ -338,6 +344,8 @@
struct rt_sigframe __user *frame;
int rsig;
+ set_fs(USER_DS);
+
frame = get_sigframe(ka, sizeof(*frame));
if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
@@ -392,22 +400,23 @@
}
/* Set up registers for signal handler */
- __frame->sp = (unsigned long) frame;
- __frame->lr = (unsigned long) &frame->retcode;
- __frame->gr8 = sig;
- __frame->gr9 = (unsigned long) &frame->info;
-
if (current->personality & FDPIC_FUNCPTRS) {
struct fdpic_func_descriptor __user *funcptr =
(struct fdpic_func_descriptor __user *) ka->sa.sa_handler;
- __get_user(__frame->pc, &funcptr->text);
- __get_user(__frame->gr15, &funcptr->GOT);
+ struct fdpic_func_descriptor desc;
+ if (copy_from_user(&desc, funcptr, sizeof(desc)))
+ goto give_sigsegv;
+ __frame->pc = desc.text;
+ __frame->gr15 = desc.GOT;
} else {
__frame->pc = (unsigned long) ka->sa.sa_handler;
__frame->gr15 = 0;
}
- set_fs(USER_DS);
+ __frame->sp = (unsigned long) frame;
+ __frame->lr = (unsigned long) &frame->retcode;
+ __frame->gr8 = sig;
+ __frame->gr9 = (unsigned long) &frame->info;
/* the tracer may want to single-step inside the handler */
if (test_thread_flag(TIF_SINGLESTEP))
@@ -422,7 +431,7 @@
return 0;
give_sigsegv:
- force_sig(SIGSEGV, current);
+ force_sigsegv(sig, current);
return -EFAULT;
} /* end setup_rt_frame() */
@@ -437,7 +446,7 @@
int ret;
/* Are we from a system call? */
- if (in_syscall(__frame)) {
+ if (__frame->syscallno != -1) {
/* If so, check system call restarting.. */
switch (__frame->gr8) {
case -ERESTART_RESTARTBLOCK:
@@ -456,6 +465,7 @@
__frame->gr8 = __frame->orig_gr8;
__frame->pc -= 4;
}
+ __frame->syscallno = -1;
}
/* Set up the stack frame */
@@ -538,10 +548,11 @@
break;
case -ERESTART_RESTARTBLOCK:
- __frame->gr8 = __NR_restart_syscall;
+ __frame->gr7 = __NR_restart_syscall;
__frame->pc -= 4;
break;
}
+ __frame->syscallno = -1;
}
/* if there's no signal to deliver, we just put the saved sigmask
diff --git a/arch/frv/lib/Makefile b/arch/frv/lib/Makefile
index f470975..4ff2fb1 100644
--- a/arch/frv/lib/Makefile
+++ b/arch/frv/lib/Makefile
@@ -5,4 +5,4 @@
lib-y := \
__ashldi3.o __lshrdi3.o __muldi3.o __ashrdi3.o __negdi2.o __ucmpdi2.o \
checksum.o memcpy.o memset.o atomic-ops.o atomic64-ops.o \
- outsl_ns.o outsl_sw.o insl_ns.o insl_sw.o cache.o perf_event.o
+ outsl_ns.o outsl_sw.o insl_ns.o insl_sw.o cache.o
diff --git a/arch/frv/lib/perf_event.c b/arch/frv/lib/perf_event.c
deleted file mode 100644
index 9ac5acf..0000000
--- a/arch/frv/lib/perf_event.c
+++ /dev/null
@@ -1,19 +0,0 @@
-/* Performance event handling
- *
- * Copyright (C) 2009 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public Licence
- * as published by the Free Software Foundation; either version
- * 2 of the Licence, or (at your option) any later version.
- */
-
-#include <linux/perf_event.h>
-
-/*
- * mark the performance event as pending
- */
-void set_perf_event_pending(void)
-{
-}
diff --git a/arch/h8300/include/asm/atomic.h b/arch/h8300/include/asm/atomic.h
index e936804..984221a 100644
--- a/arch/h8300/include/asm/atomic.h
+++ b/arch/h8300/include/asm/atomic.h
@@ -18,7 +18,8 @@
static __inline__ int atomic_add_return(int i, atomic_t *v)
{
- int ret,flags;
+ unsigned long flags;
+ int ret;
local_irq_save(flags);
ret = v->counter += i;
local_irq_restore(flags);
@@ -30,7 +31,8 @@
static __inline__ int atomic_sub_return(int i, atomic_t *v)
{
- int ret,flags;
+ unsigned long flags;
+ int ret;
local_irq_save(flags);
ret = v->counter -= i;
local_irq_restore(flags);
@@ -42,7 +44,8 @@
static __inline__ int atomic_inc_return(atomic_t *v)
{
- int ret,flags;
+ unsigned long flags;
+ int ret;
local_irq_save(flags);
v->counter++;
ret = v->counter;
@@ -64,7 +67,8 @@
static __inline__ int atomic_dec_return(atomic_t *v)
{
- int ret,flags;
+ unsigned long flags;
+ int ret;
local_irq_save(flags);
--v->counter;
ret = v->counter;
@@ -76,7 +80,8 @@
static __inline__ int atomic_dec_and_test(atomic_t *v)
{
- int ret,flags;
+ unsigned long flags;
+ int ret;
local_irq_save(flags);
--v->counter;
ret = v->counter;
diff --git a/arch/h8300/include/asm/system.h b/arch/h8300/include/asm/system.h
index d98d976..16bf156 100644
--- a/arch/h8300/include/asm/system.h
+++ b/arch/h8300/include/asm/system.h
@@ -3,6 +3,8 @@
#include <linux/linkage.h>
+struct pt_regs;
+
/*
* switch_to(n) should switch tasks to task ptr, first checking that
* ptr isn't the current task, in which case it does nothing. This
@@ -155,6 +157,6 @@
#define arch_align_stack(x) (x)
-void die(char *str, struct pt_regs *fp, unsigned long err);
+extern void die(const char *str, struct pt_regs *fp, unsigned long err);
#endif /* _H8300_SYSTEM_H */
diff --git a/arch/h8300/kernel/module.c b/arch/h8300/kernel/module.c
index 0865e29..db4953d 100644
--- a/arch/h8300/kernel/module.c
+++ b/arch/h8300/kernel/module.c
@@ -112,10 +112,9 @@
const Elf_Shdr *sechdrs,
struct module *me)
{
- return module_bug_finalize(hdr, sechdrs, me);
+ return 0;
}
void module_arch_cleanup(struct module *mod)
{
- module_bug_cleanup(mod);
}
diff --git a/arch/h8300/kernel/sys_h8300.c b/arch/h8300/kernel/sys_h8300.c
index dc1ac02..aaf5e5a 100644
--- a/arch/h8300/kernel/sys_h8300.c
+++ b/arch/h8300/kernel/sys_h8300.c
@@ -56,8 +56,8 @@
const char *const envp[])
{
register long res __asm__("er0");
- register char *const *_c __asm__("er3") = envp;
- register char *const *_b __asm__("er2") = argv;
+ register const char *const *_c __asm__("er3") = envp;
+ register const char *const *_b __asm__("er2") = argv;
register const char * _a __asm__("er1") = filename;
__asm__ __volatile__ ("mov.l %1,er0\n\t"
"trapa #0\n\t"
diff --git a/arch/h8300/kernel/traps.c b/arch/h8300/kernel/traps.c
index 3c0b66b..dfa05bd 100644
--- a/arch/h8300/kernel/traps.c
+++ b/arch/h8300/kernel/traps.c
@@ -96,7 +96,7 @@
printk("\n\n");
}
-void die(char *str, struct pt_regs *fp, unsigned long err)
+void die(const char *str, struct pt_regs *fp, unsigned long err)
{
static int diecount;
diff --git a/arch/ia64/hp/sim/simserial.c b/arch/ia64/hp/sim/simserial.c
index 2bef526..1e8d71a 100644
--- a/arch/ia64/hp/sim/simserial.c
+++ b/arch/ia64/hp/sim/simserial.c
@@ -149,7 +149,7 @@
ch = ia64_ssc(0, 0, 0, 0,
SSC_GETCHAR);
while (!ch);
- handle_sysrq(ch, NULL);
+ handle_sysrq(ch);
}
#endif
seen_esc = 0;
diff --git a/arch/ia64/include/asm/compat.h b/arch/ia64/include/asm/compat.h
index f90edc8..9301a28 100644
--- a/arch/ia64/include/asm/compat.h
+++ b/arch/ia64/include/asm/compat.h
@@ -199,7 +199,7 @@
}
static __inline__ void __user *
-compat_alloc_user_space (long len)
+arch_compat_alloc_user_space (long len)
{
struct pt_regs *regs = task_pt_regs(current);
return (void __user *) (((regs->r12 & 0xffffffff) & -16) - len);
diff --git a/arch/ia64/include/asm/hardirq.h b/arch/ia64/include/asm/hardirq.h
index d514cd9..8fb7d33 100644
--- a/arch/ia64/include/asm/hardirq.h
+++ b/arch/ia64/include/asm/hardirq.h
@@ -6,12 +6,6 @@
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
-
-#include <linux/threads.h>
-#include <linux/irq.h>
-
-#include <asm/processor.h>
-
/*
* No irq_cpustat_t for IA-64. The data is held in the per-CPU data structure.
*/
@@ -20,6 +14,11 @@
#define local_softirq_pending() (local_cpu_data->softirq_pending)
+#include <linux/threads.h>
+#include <linux/irq.h>
+
+#include <asm/processor.h>
+
extern void __iomem *ipi_base_addr;
void ack_bad_irq(unsigned int irq);
diff --git a/arch/ia64/include/asm/system.h b/arch/ia64/include/asm/system.h
index 9f342a5..dd028f2 100644
--- a/arch/ia64/include/asm/system.h
+++ b/arch/ia64/include/asm/system.h
@@ -272,10 +272,6 @@
void default_idle(void);
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
-extern void account_system_vtime(struct task_struct *);
-#endif
-
#endif /* __KERNEL__ */
#endif /* __ASSEMBLY__ */
diff --git a/arch/ia64/kernel/fsys.S b/arch/ia64/kernel/fsys.S
index 3567d54..331d42b 100644
--- a/arch/ia64/kernel/fsys.S
+++ b/arch/ia64/kernel/fsys.S
@@ -420,22 +420,31 @@
;;
RSM_PSR_I(p0, r18, r19) // mask interrupt delivery
- mov ar.ccv=0
andcm r14=r14,r17 // filter out SIGKILL & SIGSTOP
+ mov r8=EINVAL // default to EINVAL
#ifdef CONFIG_SMP
- mov r17=1
+ // __ticket_spin_trylock(r31)
+ ld4 r17=[r31]
;;
- cmpxchg4.acq r18=[r31],r17,ar.ccv // try to acquire the lock
- mov r8=EINVAL // default to EINVAL
+ mov.m ar.ccv=r17
+ extr.u r9=r17,17,15
+ adds r19=1,r17
+ extr.u r18=r17,0,15
;;
+ cmp.eq p6,p7=r9,r18
+ ;;
+(p6) cmpxchg4.acq r9=[r31],r19,ar.ccv
+(p6) dep.z r20=r19,1,15 // next serving ticket for unlock
+(p7) br.cond.spnt.many .lock_contention
+ ;;
+ cmp4.eq p0,p7=r9,r17
+ adds r31=2,r31
+(p7) br.cond.spnt.many .lock_contention
ld8 r3=[r2] // re-read current->blocked now that we hold the lock
- cmp4.ne p6,p0=r18,r0
-(p6) br.cond.spnt.many .lock_contention
;;
#else
ld8 r3=[r2] // re-read current->blocked now that we hold the lock
- mov r8=EINVAL // default to EINVAL
#endif
add r18=IA64_TASK_PENDING_OFFSET+IA64_SIGPENDING_SIGNAL_OFFSET,r16
add r19=IA64_TASK_SIGNAL_OFFSET,r16
@@ -490,7 +499,9 @@
(p6) br.cond.spnt.few 1b // yes -> retry
#ifdef CONFIG_SMP
- st4.rel [r31]=r0 // release the lock
+ // __ticket_spin_unlock(r31)
+ st2.rel [r31]=r20
+ mov r20=0 // i must not leak kernel bits...
#endif
SSM_PSR_I(p0, p9, r31)
;;
@@ -512,7 +523,8 @@
.sig_pending:
#ifdef CONFIG_SMP
- st4.rel [r31]=r0 // release the lock
+ // __ticket_spin_unlock(r31)
+ st2.rel [r31]=r20 // release the lock
#endif
SSM_PSR_I(p0, p9, r17)
;;
diff --git a/arch/m32r/include/asm/elf.h b/arch/m32r/include/asm/elf.h
index 2f85412..b8da7d0 100644
--- a/arch/m32r/include/asm/elf.h
+++ b/arch/m32r/include/asm/elf.h
@@ -82,9 +82,9 @@
* These are used to set parameters in the core dumps.
*/
#define ELF_CLASS ELFCLASS32
-#if defined(__LITTLE_ENDIAN)
+#if defined(__LITTLE_ENDIAN__)
#define ELF_DATA ELFDATA2LSB
-#elif defined(__BIG_ENDIAN)
+#elif defined(__BIG_ENDIAN__)
#define ELF_DATA ELFDATA2MSB
#else
#error no endian defined
diff --git a/arch/m32r/include/asm/signal.h b/arch/m32r/include/asm/signal.h
index 9c1acb2..b2eeb0d 100644
--- a/arch/m32r/include/asm/signal.h
+++ b/arch/m32r/include/asm/signal.h
@@ -157,7 +157,6 @@
#undef __HAVE_ARCH_SIG_BITOPS
struct pt_regs;
-extern int do_signal(struct pt_regs *regs, sigset_t *oldset);
#define ptrace_signal_deliver(regs, cookie) do { } while (0)
diff --git a/arch/m32r/include/asm/unistd.h b/arch/m32r/include/asm/unistd.h
index 7612577..c705456 100644
--- a/arch/m32r/include/asm/unistd.h
+++ b/arch/m32r/include/asm/unistd.h
@@ -351,6 +351,7 @@
#define __ARCH_WANT_SYS_OLD_GETRLIMIT /*will be unused*/
#define __ARCH_WANT_SYS_OLDUMOUNT
#define __ARCH_WANT_SYS_RT_SIGACTION
+#define __ARCH_WANT_SYS_RT_SIGSUSPEND
#define __IGNORE_lchown
#define __IGNORE_setuid
diff --git a/arch/m32r/kernel/.gitignore b/arch/m32r/kernel/.gitignore
new file mode 100644
index 0000000..c5f676c
--- /dev/null
+++ b/arch/m32r/kernel/.gitignore
@@ -0,0 +1 @@
+vmlinux.lds
diff --git a/arch/m32r/kernel/entry.S b/arch/m32r/kernel/entry.S
index 4038698..225412b 100644
--- a/arch/m32r/kernel/entry.S
+++ b/arch/m32r/kernel/entry.S
@@ -235,10 +235,9 @@
work_notifysig: ; deal with pending signals and
; notify-resume requests
mv r0, sp ; arg1 : struct pt_regs *regs
- ldi r1, #0 ; arg2 : sigset_t *oldset
- mv r2, r9 ; arg3 : __u32 thread_info_flags
+ mv r1, r9 ; arg2 : __u32 thread_info_flags
bl do_notify_resume
- bra restore_all
+ bra resume_userspace
; perform syscall exit tracing
ALIGN
diff --git a/arch/m32r/kernel/ptrace.c b/arch/m32r/kernel/ptrace.c
index e555091..0021ade 100644
--- a/arch/m32r/kernel/ptrace.c
+++ b/arch/m32r/kernel/ptrace.c
@@ -592,16 +592,17 @@
if (access_process_vm(child, pc&~3, &insn, sizeof(insn), 0)
!= sizeof(insn))
- break;
+ return -EIO;
compute_next_pc(insn, pc, &next_pc, child);
if (next_pc & 0x80000000)
- break;
+ return -EIO;
if (embed_debug_trap(child, next_pc))
- break;
+ return -EIO;
invalidate_cache();
+ return 0;
}
void user_disable_single_step(struct task_struct *child)
diff --git a/arch/m32r/kernel/signal.c b/arch/m32r/kernel/signal.c
index 144b0f1..a08697f 100644
--- a/arch/m32r/kernel/signal.c
+++ b/arch/m32r/kernel/signal.c
@@ -30,35 +30,6 @@
#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-int do_signal(struct pt_regs *, sigset_t *);
-
-asmlinkage int
-sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize,
- unsigned long r2, unsigned long r3, unsigned long r4,
- unsigned long r5, unsigned long r6, struct pt_regs *regs)
-{
- sigset_t newset;
-
- /* XXX: Don't preclude handling different sized sigset_t's. */
- if (sigsetsize != sizeof(sigset_t))
- return -EINVAL;
-
- if (copy_from_user(&newset, unewset, sizeof(newset)))
- return -EFAULT;
- sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
-
- spin_lock_irq(¤t->sighand->siglock);
- current->saved_sigmask = current->blocked;
- current->blocked = newset;
- recalc_sigpending();
- spin_unlock_irq(¤t->sighand->siglock);
-
- current->state = TASK_INTERRUPTIBLE;
- schedule();
- set_thread_flag(TIF_RESTORE_SIGMASK);
- return -ERESTARTNOHAND;
-}
-
asmlinkage int
sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
unsigned long r2, unsigned long r3, unsigned long r4,
@@ -218,7 +189,7 @@
return (void __user *)((sp - frame_size) & -8ul);
}
-static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
+static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
sigset_t *set, struct pt_regs *regs)
{
struct rt_sigframe __user *frame;
@@ -275,22 +246,34 @@
current->comm, current->pid, frame, regs->pc);
#endif
- return;
+ return 0;
give_sigsegv:
force_sigsegv(sig, current);
+ return -EFAULT;
+}
+
+static int prev_insn(struct pt_regs *regs)
+{
+ u16 inst;
+ if (get_user(inst, (u16 __user *)(regs->bpc - 2)))
+ return -EFAULT;
+ if ((inst & 0xfff0) == 0x10f0) /* trap ? */
+ regs->bpc -= 2;
+ else
+ regs->bpc -= 4;
+ regs->syscall_nr = -1;
+ return 0;
}
/*
* OK, we're invoking a handler
*/
-static void
+static int
handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
sigset_t *oldset, struct pt_regs *regs)
{
- unsigned short inst;
-
/* Are we from a system call? */
if (regs->syscall_nr >= 0) {
/* If so, check system call restarting.. */
@@ -308,16 +291,14 @@
/* fallthrough */
case -ERESTARTNOINTR:
regs->r0 = regs->orig_r0;
- inst = *(unsigned short *)(regs->bpc - 2);
- if ((inst & 0xfff0) == 0x10f0) /* trap ? */
- regs->bpc -= 2;
- else
- regs->bpc -= 4;
+ if (prev_insn(regs) < 0)
+ return -EFAULT;
}
}
/* Set up the stack frame */
- setup_rt_frame(sig, ka, info, oldset, regs);
+ if (setup_rt_frame(sig, ka, info, oldset, regs))
+ return -EFAULT;
spin_lock_irq(¤t->sighand->siglock);
sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask);
@@ -325,6 +306,7 @@
sigaddset(¤t->blocked,sig);
recalc_sigpending();
spin_unlock_irq(¤t->sighand->siglock);
+ return 0;
}
/*
@@ -332,12 +314,12 @@
* want to handle. Thus you cannot kill init even with a SIGKILL even by
* mistake.
*/
-int do_signal(struct pt_regs *regs, sigset_t *oldset)
+static void do_signal(struct pt_regs *regs)
{
siginfo_t info;
int signr;
struct k_sigaction ka;
- unsigned short inst;
+ sigset_t *oldset;
/*
* We want the common case to go fast, which
@@ -346,12 +328,14 @@
* if so.
*/
if (!user_mode(regs))
- return 1;
+ return;
if (try_to_freeze())
goto no_signal;
- if (!oldset)
+ if (test_thread_flag(TIF_RESTORE_SIGMASK))
+ oldset = ¤t->saved_sigmask;
+ else
oldset = ¤t->blocked;
signr = get_signal_to_deliver(&info, &ka, regs, NULL);
@@ -363,8 +347,10 @@
*/
/* Whee! Actually deliver the signal. */
- handle_signal(signr, &ka, &info, oldset, regs);
- return 1;
+ if (handle_signal(signr, &ka, &info, oldset, regs) == 0)
+ clear_thread_flag(TIF_RESTORE_SIGMASK);
+
+ return;
}
no_signal:
@@ -375,31 +361,24 @@
regs->r0 == -ERESTARTSYS ||
regs->r0 == -ERESTARTNOINTR) {
regs->r0 = regs->orig_r0;
- inst = *(unsigned short *)(regs->bpc - 2);
- if ((inst & 0xfff0) == 0x10f0) /* trap ? */
- regs->bpc -= 2;
- else
- regs->bpc -= 4;
- }
- if (regs->r0 == -ERESTART_RESTARTBLOCK){
+ prev_insn(regs);
+ } else if (regs->r0 == -ERESTART_RESTARTBLOCK){
regs->r0 = regs->orig_r0;
regs->r7 = __NR_restart_syscall;
- inst = *(unsigned short *)(regs->bpc - 2);
- if ((inst & 0xfff0) == 0x10f0) /* trap ? */
- regs->bpc -= 2;
- else
- regs->bpc -= 4;
+ prev_insn(regs);
}
}
- return 0;
+ if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
+ clear_thread_flag(TIF_RESTORE_SIGMASK);
+ sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL);
+ }
}
/*
* notification of userspace execution resumption
* - triggered by current->work.notify_resume
*/
-void do_notify_resume(struct pt_regs *regs, sigset_t *oldset,
- __u32 thread_info_flags)
+void do_notify_resume(struct pt_regs *regs, __u32 thread_info_flags)
{
/* Pending single-step? */
if (thread_info_flags & _TIF_SINGLESTEP)
@@ -407,7 +386,7 @@
/* deal with pending signal delivery */
if (thread_info_flags & _TIF_SIGPENDING)
- do_signal(regs,oldset);
+ do_signal(regs);
if (thread_info_flags & _TIF_NOTIFY_RESUME) {
clear_thread_flag(TIF_NOTIFY_RESUME);
diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h
index 60b15d0..b43b36b 100644
--- a/arch/m68k/include/asm/unistd.h
+++ b/arch/m68k/include/asm/unistd.h
@@ -340,10 +340,13 @@
#define __NR_set_thread_area 334
#define __NR_atomic_cmpxchg_32 335
#define __NR_atomic_barrier 336
+#define __NR_fanotify_init 337
+#define __NR_fanotify_mark 338
+#define __NR_prlimit64 339
#ifdef __KERNEL__
-#define NR_syscalls 337
+#define NR_syscalls 340
#define __ARCH_WANT_IPC_PARSE_VERSION
#define __ARCH_WANT_OLD_READDIR
diff --git a/arch/m68k/kernel/entry.S b/arch/m68k/kernel/entry.S
index 2391bdf..6360c43 100644
--- a/arch/m68k/kernel/entry.S
+++ b/arch/m68k/kernel/entry.S
@@ -765,4 +765,7 @@
.long sys_set_thread_area
.long sys_atomic_cmpxchg_32 /* 335 */
.long sys_atomic_barrier
+ .long sys_fanotify_init
+ .long sys_fanotify_mark
+ .long sys_prlimit64
diff --git a/arch/m68k/mac/macboing.c b/arch/m68k/mac/macboing.c
index 8f06408..05285d0 100644
--- a/arch/m68k/mac/macboing.c
+++ b/arch/m68k/mac/macboing.c
@@ -162,7 +162,7 @@
void mac_mksound( unsigned int freq, unsigned int length )
{
__u32 cfreq = ( freq << 5 ) / 468;
- __u32 flags;
+ unsigned long flags;
int i;
if ( mac_special_bell == NULL )
@@ -224,7 +224,7 @@
*/
static void mac_quadra_start_bell( unsigned int freq, unsigned int length, unsigned int volume )
{
- __u32 flags;
+ unsigned long flags;
/* if the bell is already ringing, ring longer */
if ( mac_bell_duration > 0 )
@@ -271,7 +271,7 @@
static void mac_quadra_ring_bell( unsigned long ignored )
{
int i, count = mac_asc_samplespersec / HZ;
- __u32 flags;
+ unsigned long flags;
/*
* we neither want a sound buffer overflow nor underflow, so we need to match
diff --git a/arch/m68knommu/kernel/syscalltable.S b/arch/m68knommu/kernel/syscalltable.S
index b30b3eb..79b1ed1 100644
--- a/arch/m68knommu/kernel/syscalltable.S
+++ b/arch/m68knommu/kernel/syscalltable.S
@@ -355,6 +355,9 @@
.long sys_set_thread_area
.long sys_atomic_cmpxchg_32 /* 335 */
.long sys_atomic_barrier
+ .long sys_fanotify_init
+ .long sys_fanotify_mark
+ .long sys_prlimit64
.rept NR_syscalls-(.-sys_call_table)/4
.long sys_ni_syscall
diff --git a/arch/m68knommu/kernel/vmlinux.lds.S b/arch/m68knommu/kernel/vmlinux.lds.S
index a91b271..ef33213 100644
--- a/arch/m68knommu/kernel/vmlinux.lds.S
+++ b/arch/m68knommu/kernel/vmlinux.lds.S
@@ -150,6 +150,8 @@
_sdata = . ;
DATA_DATA
CACHELINE_ALIGNED_DATA(32)
+ PAGE_ALIGNED_DATA(PAGE_SIZE)
+ *(.data..shared_aligned)
INIT_TASK_DATA(THREAD_SIZE)
_edata = . ;
} > DATA
diff --git a/arch/mips/Kbuild b/arch/mips/Kbuild
index e322d65..7dd65cf 100644
--- a/arch/mips/Kbuild
+++ b/arch/mips/Kbuild
@@ -7,6 +7,10 @@
include arch/mips/Kbuild.platforms
obj-y := $(platform-y)
+# make clean traverses $(obj-) without having included .config, so
+# everything ends up here
+obj- := $(platform-)
+
# mips object files
# The object files are linked as core-y files would be linked
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 3ad59dd..4c9f402 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -13,6 +13,7 @@
select HAVE_KPROBES
select HAVE_KRETPROBES
select RTC_LIB if !MACH_LOONGSON
+ select GENERIC_ATOMIC64 if !64BIT
mainmenu "Linux/MIPS Kernel Configuration"
@@ -880,11 +881,15 @@
config GENERIC_ISA_DMA
bool
select ZONE_DMA if GENERIC_ISA_DMA_SUPPORT_BROKEN=n
+ select ISA_DMA_API
config GENERIC_ISA_DMA_SUPPORT_BROKEN
bool
select GENERIC_ISA_DMA
+config ISA_DMA_API
+ bool
+
config GENERIC_GPIO
bool
@@ -1646,8 +1651,16 @@
select SYS_SUPPORTS_SMP
select SMP_UP
help
- This is a kernel model which is also known a VSMP or lately
- has been marketesed into SMVP.
+ This is a kernel model which is known a VSMP but lately has been
+ marketesed into SMVP.
+ Virtual SMP uses the processor's VPEs to implement virtual
+ processors. In currently available configuration of the 34K processor
+ this allows for a dual processor. Both processors will share the same
+ primary caches; each will obtain the half of the TLB for it's own
+ exclusive use. For a layman this model can be described as similar to
+ what Intel calls Hyperthreading.
+
+ For further information see http://www.linux-mips.org/wiki/34K#VSMP
config MIPS_MT_SMTC
bool "SMTC: Use all TCs on all VPEs for SMP"
@@ -1664,6 +1677,14 @@
help
This is a kernel model which is known a SMTC or lately has been
marketesed into SMVP.
+ is presenting the available TC's of the core as processors to Linux.
+ On currently available 34K processors this means a Linux system will
+ see up to 5 processors. The implementation of the SMTC kernel differs
+ significantly from VSMP and cannot efficiently coexist in the same
+ kernel binary so the choice between VSMP and SMTC is a compile time
+ decision.
+
+ For further information see http://www.linux-mips.org/wiki/34K#SMTC
endchoice
diff --git a/arch/mips/alchemy/common/prom.c b/arch/mips/alchemy/common/prom.c
index c29511b..5340210 100644
--- a/arch/mips/alchemy/common/prom.c
+++ b/arch/mips/alchemy/common/prom.c
@@ -43,7 +43,7 @@
char **prom_argv;
char **prom_envp;
-void prom_init_cmdline(void)
+void __init prom_init_cmdline(void)
{
int i;
@@ -104,7 +104,7 @@
}
}
-int prom_get_ethernet_addr(char *ethernet_addr)
+int __init prom_get_ethernet_addr(char *ethernet_addr)
{
char *ethaddr_str;
@@ -123,7 +123,6 @@
return 0;
}
-EXPORT_SYMBOL(prom_get_ethernet_addr);
void __init prom_free_prom_memory(void)
{
diff --git a/arch/mips/boot/compressed/Makefile b/arch/mips/boot/compressed/Makefile
index ed9bb70..5042d51 100644
--- a/arch/mips/boot/compressed/Makefile
+++ b/arch/mips/boot/compressed/Makefile
@@ -59,7 +59,7 @@
hostprogs-y := calc_vmlinuz_load_addr
VMLINUZ_LOAD_ADDRESS = $(shell $(obj)/calc_vmlinuz_load_addr \
- $(objtree)/$(KBUILD_IMAGE) $(VMLINUX_LOAD_ADDRESS))
+ $(obj)/vmlinux.bin $(VMLINUX_LOAD_ADDRESS))
vmlinuzobjs-y += $(obj)/piggy.o
@@ -105,4 +105,4 @@
vmlinuz.srec: vmlinuz
$(call cmd,objcopy)
-clean-files := $(objtree)/vmlinuz.*
+clean-files := $(objtree)/vmlinuz $(objtree)/vmlinuz.{32,ecoff,bin,srec}
diff --git a/arch/mips/cavium-octeon/Kconfig b/arch/mips/cavium-octeon/Kconfig
index 094c17e..47323ca 100644
--- a/arch/mips/cavium-octeon/Kconfig
+++ b/arch/mips/cavium-octeon/Kconfig
@@ -83,3 +83,7 @@
def_bool y
select SPARSEMEM_STATIC
depends on CPU_CAVIUM_OCTEON
+
+config CAVIUM_OCTEON_HELPER
+ def_bool y
+ depends on OCTEON_ETHERNET || PCI
diff --git a/arch/mips/cavium-octeon/cpu.c b/arch/mips/cavium-octeon/cpu.c
index c664c8c..a5b4279 100644
--- a/arch/mips/cavium-octeon/cpu.c
+++ b/arch/mips/cavium-octeon/cpu.c
@@ -41,7 +41,7 @@
return NOTIFY_OK; /* Let default notifier send signals */
}
-static int cnmips_cu2_setup(void)
+static int __init cnmips_cu2_setup(void)
{
return cu2_notifier(cnmips_cu2_call, 0);
}
diff --git a/arch/mips/cavium-octeon/executive/Makefile b/arch/mips/cavium-octeon/executive/Makefile
index 2fd66db..7f41c5b 100644
--- a/arch/mips/cavium-octeon/executive/Makefile
+++ b/arch/mips/cavium-octeon/executive/Makefile
@@ -11,4 +11,4 @@
obj-y += cvmx-bootmem.o cvmx-l2c.o cvmx-sysinfo.o octeon-model.o
-obj-$(CONFIG_PCI) += cvmx-helper-errata.o cvmx-helper-jtag.o
+obj-$(CONFIG_CAVIUM_OCTEON_HELPER) += cvmx-helper-errata.o cvmx-helper-jtag.o
diff --git a/arch/mips/dec/Platform b/arch/mips/dec/Platform
index 3adbcbd..cf55a6f 100644
--- a/arch/mips/dec/Platform
+++ b/arch/mips/dec/Platform
@@ -1,7 +1,7 @@
#
# DECstation family
#
-platform-$(CONFIG_MACH_DECSTATION) = dec/
+platform-$(CONFIG_MACH_DECSTATION) += dec/
cflags-$(CONFIG_MACH_DECSTATION) += \
-I$(srctree)/arch/mips/include/asm/mach-dec
libs-$(CONFIG_MACH_DECSTATION) += arch/mips/dec/prom/
diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
index c63c56b..47d87da 100644
--- a/arch/mips/include/asm/atomic.h
+++ b/arch/mips/include/asm/atomic.h
@@ -782,6 +782,10 @@
*/
#define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0)
+#else /* !CONFIG_64BIT */
+
+#include <asm-generic/atomic64.h>
+
#endif /* CONFIG_64BIT */
/*
diff --git a/arch/mips/include/asm/compat.h b/arch/mips/include/asm/compat.h
index 613f691..dbc5106 100644
--- a/arch/mips/include/asm/compat.h
+++ b/arch/mips/include/asm/compat.h
@@ -145,7 +145,7 @@
return (u32)(unsigned long)uptr;
}
-static inline void __user *compat_alloc_user_space(long len)
+static inline void __user *arch_compat_alloc_user_space(long len)
{
struct pt_regs *regs = (struct pt_regs *)
((unsigned long) current_thread_info() + THREAD_SIZE - 32) - 1;
diff --git a/arch/mips/include/asm/cop2.h b/arch/mips/include/asm/cop2.h
index 2cb2f0c..3532e2c 100644
--- a/arch/mips/include/asm/cop2.h
+++ b/arch/mips/include/asm/cop2.h
@@ -24,7 +24,7 @@
#define cu2_notifier(fn, pri) \
({ \
- static struct notifier_block fn##_nb __cpuinitdata = { \
+ static struct notifier_block fn##_nb = { \
.notifier_call = fn, \
.priority = pri \
}; \
diff --git a/arch/mips/include/asm/fcntl.h b/arch/mips/include/asm/fcntl.h
index e482fe9..75edded 100644
--- a/arch/mips/include/asm/fcntl.h
+++ b/arch/mips/include/asm/fcntl.h
@@ -56,6 +56,7 @@
*/
#ifdef CONFIG_32BIT
+#include <linux/types.h>
struct flock {
short l_type;
diff --git a/arch/mips/include/asm/gic.h b/arch/mips/include/asm/gic.h
index 9b9436a..86548da 100644
--- a/arch/mips/include/asm/gic.h
+++ b/arch/mips/include/asm/gic.h
@@ -321,6 +321,7 @@
*/
struct gic_intr_map {
unsigned int cpunum; /* Directed to this CPU */
+#define GIC_UNUSED 0xdead /* Dummy data */
unsigned int pin; /* Directed to this Pin */
unsigned int polarity; /* Polarity : +/- */
unsigned int trigtype; /* Trigger : Edge/Levl */
diff --git a/arch/mips/include/asm/mach-tx49xx/kmalloc.h b/arch/mips/include/asm/mach-tx49xx/kmalloc.h
index b74caf6..ff9a8b86 100644
--- a/arch/mips/include/asm/mach-tx49xx/kmalloc.h
+++ b/arch/mips/include/asm/mach-tx49xx/kmalloc.h
@@ -1,6 +1,6 @@
#ifndef __ASM_MACH_TX49XX_KMALLOC_H
#define __ASM_MACH_TX49XX_KMALLOC_H
-#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES
+#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
#endif /* __ASM_MACH_TX49XX_KMALLOC_H */
diff --git a/arch/mips/include/asm/mips-boards/maltaint.h b/arch/mips/include/asm/mips-boards/maltaint.h
index cea872f..d11aa02 100644
--- a/arch/mips/include/asm/mips-boards/maltaint.h
+++ b/arch/mips/include/asm/mips-boards/maltaint.h
@@ -88,9 +88,6 @@
#define GIC_EXT_INTR(x) x
-/* Dummy data */
-#define X 0xdead
-
/* External Interrupts used for IPI */
#define GIC_IPI_EXT_INTR_RESCHED_VPE0 16
#define GIC_IPI_EXT_INTR_CALLFNC_VPE0 17
diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
index a16beaf..e59cd1a 100644
--- a/arch/mips/include/asm/page.h
+++ b/arch/mips/include/asm/page.h
@@ -150,6 +150,20 @@
((unsigned long)(x) - PAGE_OFFSET + PHYS_OFFSET)
#endif
#define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET - PHYS_OFFSET))
+
+/*
+ * RELOC_HIDE was originally added by 6007b903dfe5f1d13e0c711ac2894bdd4a61b1ad
+ * (lmo) rsp. 8431fd094d625b94d364fe393076ccef88e6ce18 (kernel.org). The
+ * discussion can be found in lkml posting
+ * <a2ebde260608230500o3407b108hc03debb9da6e62c@mail.gmail.com> which is
+ * archived at http://lists.linuxcoding.com/kernel/2006-q3/msg17360.html
+ *
+ * It is unclear if the misscompilations mentioned in
+ * http://lkml.org/lkml/2010/8/8/138 also affect MIPS so we keep this one
+ * until GCC 3.x has been retired before we can apply
+ * https://patchwork.linux-mips.org/patch/1541/
+ */
+
#define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0))
#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
diff --git a/arch/mips/include/asm/siginfo.h b/arch/mips/include/asm/siginfo.h
index 96e28f1..1ca64b4 100644
--- a/arch/mips/include/asm/siginfo.h
+++ b/arch/mips/include/asm/siginfo.h
@@ -88,6 +88,7 @@
#ifdef __ARCH_SI_TRAPNO
int _trapno; /* TRAP # which caused the signal */
#endif
+ short _addr_lsb;
} _sigfault;
/* SIGPOLL, SIGXFSZ (To do ...) */
diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
index 2376f2e..70df9c0 100644
--- a/arch/mips/include/asm/thread_info.h
+++ b/arch/mips/include/asm/thread_info.h
@@ -146,7 +146,8 @@
#define _TIF_LOAD_WATCH (1<<TIF_LOAD_WATCH)
/* work to do on interrupt/exception return */
-#define _TIF_WORK_MASK (0x0000ffef & ~_TIF_SECCOMP)
+#define _TIF_WORK_MASK (0x0000ffef & \
+ ~(_TIF_SECCOMP | _TIF_SYSCALL_AUDIT))
/* work to do on any return to u-space */
#define _TIF_ALLWORK_MASK (0x8000ffff & ~_TIF_SECCOMP)
diff --git a/arch/mips/include/asm/unistd.h b/arch/mips/include/asm/unistd.h
index baa318a..550725b 100644
--- a/arch/mips/include/asm/unistd.h
+++ b/arch/mips/include/asm/unistd.h
@@ -356,16 +356,19 @@
#define __NR_perf_event_open (__NR_Linux + 333)
#define __NR_accept4 (__NR_Linux + 334)
#define __NR_recvmmsg (__NR_Linux + 335)
+#define __NR_fanotify_init (__NR_Linux + 336)
+#define __NR_fanotify_mark (__NR_Linux + 337)
+#define __NR_prlimit64 (__NR_Linux + 338)
/*
* Offset of the last Linux o32 flavoured syscall
*/
-#define __NR_Linux_syscalls 335
+#define __NR_Linux_syscalls 338
#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
#define __NR_O32_Linux 4000
-#define __NR_O32_Linux_syscalls 335
+#define __NR_O32_Linux_syscalls 338
#if _MIPS_SIM == _MIPS_SIM_ABI64
@@ -668,16 +671,19 @@
#define __NR_perf_event_open (__NR_Linux + 292)
#define __NR_accept4 (__NR_Linux + 293)
#define __NR_recvmmsg (__NR_Linux + 294)
+#define __NR_fanotify_init (__NR_Linux + 295)
+#define __NR_fanotify_mark (__NR_Linux + 296)
+#define __NR_prlimit64 (__NR_Linux + 297)
/*
* Offset of the last Linux 64-bit flavoured syscall
*/
-#define __NR_Linux_syscalls 294
+#define __NR_Linux_syscalls 297
#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
#define __NR_64_Linux 5000
-#define __NR_64_Linux_syscalls 294
+#define __NR_64_Linux_syscalls 297
#if _MIPS_SIM == _MIPS_SIM_NABI32
@@ -985,16 +991,19 @@
#define __NR_accept4 (__NR_Linux + 297)
#define __NR_recvmmsg (__NR_Linux + 298)
#define __NR_getdents64 (__NR_Linux + 299)
+#define __NR_fanotify_init (__NR_Linux + 300)
+#define __NR_fanotify_mark (__NR_Linux + 301)
+#define __NR_prlimit64 (__NR_Linux + 302)
/*
* Offset of the last N32 flavoured syscall
*/
-#define __NR_Linux_syscalls 299
+#define __NR_Linux_syscalls 302
#endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
#define __NR_N32_Linux 6000
-#define __NR_N32_Linux_syscalls 299
+#define __NR_N32_Linux_syscalls 302
#ifdef __KERNEL__
diff --git a/arch/mips/jz4740/Platform b/arch/mips/jz4740/Platform
index 6a97230..ba91be9 100644
--- a/arch/mips/jz4740/Platform
+++ b/arch/mips/jz4740/Platform
@@ -1,3 +1,3 @@
-core-$(CONFIG_MACH_JZ4740) += arch/mips/jz4740/
+platform-$(CONFIG_MACH_JZ4740) += jz4740/
cflags-$(CONFIG_MACH_JZ4740) += -I$(srctree)/arch/mips/include/asm/mach-jz4740
load-$(CONFIG_MACH_JZ4740) += 0xffffffff80010000
diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c
index 0176ed0..32103cc 100644
--- a/arch/mips/kernel/branch.c
+++ b/arch/mips/kernel/branch.c
@@ -40,7 +40,6 @@
return -EFAULT;
}
- regs->regs[0] = 0;
switch (insn.i_format.opcode) {
/*
* jr and jalr are in r_format format.
diff --git a/arch/mips/kernel/irq-gic.c b/arch/mips/kernel/irq-gic.c
index b181f2f..82ba9f6 100644
--- a/arch/mips/kernel/irq-gic.c
+++ b/arch/mips/kernel/irq-gic.c
@@ -7,7 +7,6 @@
#include <asm/io.h>
#include <asm/gic.h>
#include <asm/gcmpregs.h>
-#include <asm/mips-boards/maltaint.h>
#include <asm/irq.h>
#include <linux/hardirq.h>
#include <asm-generic/bitops/find.h>
@@ -131,7 +130,7 @@
int i;
irq -= _irqbase;
- pr_debug(KERN_DEBUG "%s(%d) called\n", __func__, irq);
+ pr_debug("%s(%d) called\n", __func__, irq);
cpumask_and(&tmp, cpumask, cpu_online_mask);
if (cpus_empty(tmp))
return -1;
@@ -222,7 +221,7 @@
/* Setup specifics */
for (i = 0; i < mapsize; i++) {
cpu = intrmap[i].cpunum;
- if (cpu == X)
+ if (cpu == GIC_UNUSED)
continue;
if (cpu == 0 && i != 0 && intrmap[i].flags == 0)
continue;
diff --git a/arch/mips/kernel/kgdb.c b/arch/mips/kernel/kgdb.c
index 1f4e2fa..f4546e9 100644
--- a/arch/mips/kernel/kgdb.c
+++ b/arch/mips/kernel/kgdb.c
@@ -283,7 +283,7 @@
struct pt_regs *regs = args->regs;
int trap = (regs->cp0_cause & 0x7c) >> 2;
- /* Userpace events, ignore. */
+ /* Userspace events, ignore. */
if (user_mode(regs))
return NOTIFY_DONE;
diff --git a/arch/mips/kernel/kspd.c b/arch/mips/kernel/kspd.c
index 80e2ba6..29811f0 100644
--- a/arch/mips/kernel/kspd.c
+++ b/arch/mips/kernel/kspd.c
@@ -251,7 +251,7 @@
memset(&tz, 0, sizeof(tz));
if ((ret.retval = sp_syscall(__NR_gettimeofday, (int)&tv,
(int)&tz, 0, 0)) == 0)
- ret.retval = tv.tv_sec;
+ ret.retval = tv.tv_sec;
break;
case MTSP_SYSCALL_EXIT:
diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c
index c2dab14..6343b4a 100644
--- a/arch/mips/kernel/linux32.c
+++ b/arch/mips/kernel/linux32.c
@@ -341,3 +341,10 @@
{
return sys_lookup_dcookie(merge_64(a0, a1), buf, len);
}
+
+SYSCALL_DEFINE6(32_fanotify_mark, int, fanotify_fd, unsigned int, flags,
+ u64, a3, u64, a4, int, dfd, const char __user *, pathname)
+{
+ return sys_fanotify_mark(fanotify_fd, flags, merge_64(a3, a4),
+ dfd, pathname);
+}
diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c
index 2340f11..9a526ba 100644
--- a/arch/mips/kernel/mips-mt-fpaff.c
+++ b/arch/mips/kernel/mips-mt-fpaff.c
@@ -103,7 +103,7 @@
if (!check_same_owner(p) && !capable(CAP_SYS_NICE))
goto out_unlock;
- retval = security_task_setscheduler(p, 0, NULL);
+ retval = security_task_setscheduler(p)
if (retval)
goto out_unlock;
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index c51b95f..c877733 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -536,7 +536,7 @@
{
/* do the secure computing check first */
if (!entryexit)
- secure_computing(regs->regs[0]);
+ secure_computing(regs->regs[2]);
if (unlikely(current->audit_context) && entryexit)
audit_syscall_exit(AUDITSC_RESULT(regs->regs[2]),
@@ -565,7 +565,7 @@
out:
if (unlikely(current->audit_context) && !entryexit)
- audit_syscall_entry(audit_arch(), regs->regs[0],
+ audit_syscall_entry(audit_arch(), regs->regs[2],
regs->regs[4], regs->regs[5],
regs->regs[6], regs->regs[7]);
}
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index 17202bb..fbaabad 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -63,9 +63,9 @@
sw t0, PT_R7(sp) # set error flag
beqz t0, 1f
+ lw t1, PT_R2(sp) # syscall number
negu v0 # error
- sw v0, PT_R0(sp) # set flag for syscall
- # restarting
+ sw t1, PT_R0(sp) # save it for syscall restarting
1: sw v0, PT_R2(sp) # result
o32_syscall_exit:
@@ -104,9 +104,9 @@
sw t0, PT_R7(sp) # set error flag
beqz t0, 1f
+ lw t1, PT_R2(sp) # syscall number
negu v0 # error
- sw v0, PT_R0(sp) # set flag for syscall
- # restarting
+ sw t1, PT_R0(sp) # save it for syscall restarting
1: sw v0, PT_R2(sp) # result
j syscall_exit
@@ -169,8 +169,7 @@
* We probably should handle this case a bit more drastic.
*/
bad_stack:
- negu v0 # error
- sw v0, PT_R0(sp)
+ li v0, EFAULT
sw v0, PT_R2(sp)
li t0, 1 # set error flag
sw t0, PT_R7(sp)
@@ -583,7 +582,10 @@
sys sys_rt_tgsigqueueinfo 4
sys sys_perf_event_open 5
sys sys_accept4 4
- sys sys_recvmmsg 5
+ sys sys_recvmmsg 5 /* 4335 */
+ sys sys_fanotify_init 2
+ sys sys_fanotify_mark 6
+ sys sys_prlimit64 4
.endm
/* We pre-compute the number of _instruction_ bytes needed to
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
index a8a6c59..3f41792 100644
--- a/arch/mips/kernel/scall64-64.S
+++ b/arch/mips/kernel/scall64-64.S
@@ -66,9 +66,9 @@
sd t0, PT_R7(sp) # set error flag
beqz t0, 1f
+ ld t1, PT_R2(sp) # syscall number
dnegu v0 # error
- sd v0, PT_R0(sp) # set flag for syscall
- # restarting
+ sd t1, PT_R0(sp) # save it for syscall restarting
1: sd v0, PT_R2(sp) # result
n64_syscall_exit:
@@ -109,8 +109,9 @@
sd t0, PT_R7(sp) # set error flag
beqz t0, 1f
+ ld t1, PT_R2(sp) # syscall number
dnegu v0 # error
- sd v0, PT_R0(sp) # set flag for syscall restarting
+ sd t1, PT_R0(sp) # save it for syscall restarting
1: sd v0, PT_R2(sp) # result
j syscall_exit
@@ -416,9 +417,12 @@
PTR sys_pipe2
PTR sys_inotify_init1
PTR sys_preadv
- PTR sys_pwritev /* 5390 */
+ PTR sys_pwritev /* 5290 */
PTR sys_rt_tgsigqueueinfo
PTR sys_perf_event_open
PTR sys_accept4
- PTR sys_recvmmsg
+ PTR sys_recvmmsg
+ PTR sys_fanotify_init /* 5295 */
+ PTR sys_fanotify_mark
+ PTR sys_prlimit64
.size sys_call_table,.-sys_call_table
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index a3d6613..f08ece6 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -65,8 +65,9 @@
sd t0, PT_R7(sp) # set error flag
beqz t0, 1f
+ ld t1, PT_R2(sp) # syscall number
dnegu v0 # error
- sd v0, PT_R0(sp) # set flag for syscall restarting
+ sd t1, PT_R0(sp) # save it for syscall restarting
1: sd v0, PT_R2(sp) # result
local_irq_disable # make sure need_resched and
@@ -106,8 +107,9 @@
sd t0, PT_R7(sp) # set error flag
beqz t0, 1f
+ ld t1, PT_R2(sp) # syscall number
dnegu v0 # error
- sd v0, PT_R0(sp) # set flag for syscall restarting
+ sd t1, PT_R0(sp) # save it for syscall restarting
1: sd v0, PT_R2(sp) # result
j syscall_exit
@@ -320,10 +322,10 @@
PTR sys_cacheflush
PTR sys_cachectl
PTR sys_sysmips
- PTR sys_io_setup /* 6200 */
+ PTR compat_sys_io_setup /* 6200 */
PTR sys_io_destroy
- PTR sys_io_getevents
- PTR sys_io_submit
+ PTR compat_sys_io_getevents
+ PTR compat_sys_io_submit
PTR sys_io_cancel
PTR sys_exit_group /* 6205 */
PTR sys_lookup_dcookie
@@ -419,5 +421,8 @@
PTR sys_perf_event_open
PTR sys_accept4
PTR compat_sys_recvmmsg
- PTR sys_getdents
+ PTR sys_getdents64
+ PTR sys_fanotify_init /* 6300 */
+ PTR sys_fanotify_mark
+ PTR sys_prlimit64
.size sysn32_call_table,.-sysn32_call_table
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index 813689e..78d768a 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -93,8 +93,9 @@
sd t0, PT_R7(sp) # set error flag
beqz t0, 1f
+ ld t1, PT_R2(sp) # syscall number
dnegu v0 # error
- sd v0, PT_R0(sp) # flag for syscall restarting
+ sd t1, PT_R0(sp) # save it for syscall restarting
1: sd v0, PT_R2(sp) # result
o32_syscall_exit:
@@ -142,8 +143,9 @@
sd t0, PT_R7(sp) # set error flag
beqz t0, 1f
+ ld t1, PT_R2(sp) # syscall number
dnegu v0 # error
- sd v0, PT_R0(sp) # set flag for syscall restarting
+ sd t1, PT_R0(sp) # save it for syscall restarting
1: sd v0, PT_R2(sp) # result
j syscall_exit
@@ -154,8 +156,7 @@
* The stackpointer for a call with more than 4 arguments is bad.
*/
bad_stack:
- dnegu v0 # error
- sd v0, PT_R0(sp)
+ li v0, EFAULT
sd v0, PT_R2(sp)
li t0, 1 # set error flag
sd t0, PT_R7(sp)
@@ -444,10 +445,10 @@
PTR compat_sys_futex
PTR compat_sys_sched_setaffinity
PTR compat_sys_sched_getaffinity /* 4240 */
- PTR sys_io_setup
+ PTR compat_sys_io_setup
PTR sys_io_destroy
- PTR sys_io_getevents
- PTR sys_io_submit
+ PTR compat_sys_io_getevents
+ PTR compat_sys_io_submit
PTR sys_io_cancel /* 4245 */
PTR sys_exit_group
PTR sys32_lookup_dcookie
@@ -538,5 +539,8 @@
PTR compat_sys_rt_tgsigqueueinfo
PTR sys_perf_event_open
PTR sys_accept4
- PTR compat_sys_recvmmsg
+ PTR compat_sys_recvmmsg /* 4335 */
+ PTR sys_fanotify_init
+ PTR sys_32_fanotify_mark
+ PTR sys_prlimit64
.size sys_call_table,.-sys_call_table
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c
index 2099d5a..5922342 100644
--- a/arch/mips/kernel/signal.c
+++ b/arch/mips/kernel/signal.c
@@ -390,7 +390,6 @@
{
struct rt_sigframe __user *frame;
sigset_t set;
- stack_t st;
int sig;
frame = (struct rt_sigframe __user *) regs.regs[29];
@@ -411,11 +410,9 @@
else if (sig)
force_sig(sig, current);
- if (__copy_from_user(&st, &frame->rs_uc.uc_stack, sizeof(st)))
- goto badframe;
/* It is more difficult to avoid calling this function than to
call it and ignore errors. */
- do_sigaltstack((stack_t __user *)&st, NULL, regs.regs[29]);
+ do_sigaltstack(&frame->rs_uc.uc_stack, NULL, regs.regs[29]);
/*
* Don't let your children do this ...
@@ -550,23 +547,26 @@
struct mips_abi *abi = current->thread.abi;
void *vdso = current->mm->context.vdso;
- switch(regs->regs[0]) {
- case ERESTART_RESTARTBLOCK:
- case ERESTARTNOHAND:
- regs->regs[2] = EINTR;
- break;
- case ERESTARTSYS:
- if (!(ka->sa.sa_flags & SA_RESTART)) {
+ if (regs->regs[0]) {
+ switch(regs->regs[2]) {
+ case ERESTART_RESTARTBLOCK:
+ case ERESTARTNOHAND:
regs->regs[2] = EINTR;
break;
+ case ERESTARTSYS:
+ if (!(ka->sa.sa_flags & SA_RESTART)) {
+ regs->regs[2] = EINTR;
+ break;
+ }
+ /* fallthrough */
+ case ERESTARTNOINTR:
+ regs->regs[7] = regs->regs[26];
+ regs->regs[2] = regs->regs[0];
+ regs->cp0_epc -= 4;
}
- /* fallthrough */
- case ERESTARTNOINTR: /* Userland will reload $v0. */
- regs->regs[7] = regs->regs[26];
- regs->cp0_epc -= 8;
- }
- regs->regs[0] = 0; /* Don't deal with this again. */
+ regs->regs[0] = 0; /* Don't deal with this again. */
+ }
if (sig_uses_siginfo(ka))
ret = abi->setup_rt_frame(vdso + abi->rt_signal_return_offset,
@@ -575,6 +575,9 @@
ret = abi->setup_frame(vdso + abi->signal_return_offset,
ka, regs, sig, oldset);
+ if (ret)
+ return ret;
+
spin_lock_irq(¤t->sighand->siglock);
sigorsets(¤t->blocked, ¤t->blocked, &ka->sa.sa_mask);
if (!(ka->sa.sa_flags & SA_NODEFER))
@@ -622,17 +625,13 @@
return;
}
- /*
- * Who's code doesn't conform to the restartable syscall convention
- * dies here!!! The li instruction, a single machine instruction,
- * must directly be followed by the syscall instruction.
- */
if (regs->regs[0]) {
if (regs->regs[2] == ERESTARTNOHAND ||
regs->regs[2] == ERESTARTSYS ||
regs->regs[2] == ERESTARTNOINTR) {
+ regs->regs[2] = regs->regs[0];
regs->regs[7] = regs->regs[26];
- regs->cp0_epc -= 8;
+ regs->cp0_epc -= 4;
}
if (regs->regs[2] == ERESTART_RESTARTBLOCK) {
regs->regs[2] = current->thread.abi->restart;
diff --git a/arch/mips/kernel/signal_n32.c b/arch/mips/kernel/signal_n32.c
index 2c5df81..ee24d81 100644
--- a/arch/mips/kernel/signal_n32.c
+++ b/arch/mips/kernel/signal_n32.c
@@ -109,6 +109,7 @@
asmlinkage void sysn32_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
{
struct rt_sigframe_n32 __user *frame;
+ mm_segment_t old_fs;
sigset_t set;
stack_t st;
s32 sp;
@@ -143,7 +144,11 @@
/* It is more difficult to avoid calling this function than to
call it and ignore errors. */
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
do_sigaltstack((stack_t __user *)&st, NULL, regs.regs[29]);
+ set_fs(old_fs);
+
/*
* Don't let your children do this ...
diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c
index 69b039c..33d5a5c 100644
--- a/arch/mips/kernel/unaligned.c
+++ b/arch/mips/kernel/unaligned.c
@@ -109,8 +109,6 @@
unsigned long value;
unsigned int res;
- regs->regs[0] = 0;
-
/*
* This load never faults.
*/
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c
index 7ba8908..469d401 100644
--- a/arch/mips/mm/dma-default.c
+++ b/arch/mips/mm/dma-default.c
@@ -44,27 +44,39 @@
static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)
{
+ gfp_t dma_flag;
+
/* ignore region specifiers */
gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
-#ifdef CONFIG_ZONE_DMA
+#ifdef CONFIG_ISA
if (dev == NULL)
- gfp |= __GFP_DMA;
- else if (dev->coherent_dma_mask < DMA_BIT_MASK(24))
- gfp |= __GFP_DMA;
+ dma_flag = __GFP_DMA;
else
#endif
-#ifdef CONFIG_ZONE_DMA32
+#if defined(CONFIG_ZONE_DMA32) && defined(CONFIG_ZONE_DMA)
if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
- gfp |= __GFP_DMA32;
+ dma_flag = __GFP_DMA;
+ else if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
+ dma_flag = __GFP_DMA32;
else
#endif
- ;
+#if defined(CONFIG_ZONE_DMA32) && !defined(CONFIG_ZONE_DMA)
+ if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
+ dma_flag = __GFP_DMA32;
+ else
+#endif
+#if defined(CONFIG_ZONE_DMA) && !defined(CONFIG_ZONE_DMA32)
+ if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
+ dma_flag = __GFP_DMA;
+ else
+#endif
+ dma_flag = 0;
/* Don't invoke OOM killer */
gfp |= __GFP_NORETRY;
- return gfp;
+ return gfp | dma_flag;
}
void *dma_alloc_noncoherent(struct device *dev, size_t size,
diff --git a/arch/mips/mm/sc-rm7k.c b/arch/mips/mm/sc-rm7k.c
index 1ef75cd..274af3b 100644
--- a/arch/mips/mm/sc-rm7k.c
+++ b/arch/mips/mm/sc-rm7k.c
@@ -30,7 +30,7 @@
#define tc_lsize 32
extern unsigned long icache_way_size, dcache_way_size;
-unsigned long tcache_size;
+static unsigned long tcache_size;
#include <asm/r4kcache.h>
diff --git a/arch/mips/mti-malta/malta-int.c b/arch/mips/mti-malta/malta-int.c
index 15949b0..b79b24a 100644
--- a/arch/mips/mti-malta/malta-int.c
+++ b/arch/mips/mti-malta/malta-int.c
@@ -385,6 +385,8 @@
*/
#define GIC_CPU_NMI GIC_MAP_TO_NMI_MSK
+#define X GIC_UNUSED
+
static struct gic_intr_map gic_intr_map[GIC_NUM_INTRS] = {
{ X, X, X, X, 0 },
{ X, X, X, X, 0 },
@@ -404,6 +406,7 @@
{ X, X, X, X, 0 },
/* The remainder of this table is initialised by fill_ipi_map */
};
+#undef X
/*
* GCMP needs to be detected before any SMP initialisation
diff --git a/arch/mips/pci/pci-rc32434.c b/arch/mips/pci/pci-rc32434.c
index 71f7d27..f31218e 100644
--- a/arch/mips/pci/pci-rc32434.c
+++ b/arch/mips/pci/pci-rc32434.c
@@ -118,7 +118,7 @@
if (!((pcicvalue == PCIM_H_EA) ||
(pcicvalue == PCIM_H_IA_FIX) ||
(pcicvalue == PCIM_H_IA_RR))) {
- pr_err(KERN_ERR "PCI init error!!!\n");
+ pr_err("PCI init error!!!\n");
/* Not in Host Mode, return ERROR */
return -1;
}
diff --git a/arch/mips/pnx8550/common/reset.c b/arch/mips/pnx8550/common/reset.c
index fadd8744..e7a12ff 100644
--- a/arch/mips/pnx8550/common/reset.c
+++ b/arch/mips/pnx8550/common/reset.c
@@ -22,29 +22,19 @@
*/
#include <linux/kernel.h>
+#include <asm/processor.h>
#include <asm/reboot.h>
#include <glb.h>
void pnx8550_machine_restart(char *command)
{
- char head[] = "************* Machine restart *************";
- char foot[] = "*******************************************";
-
- printk("\n\n");
- printk("%s\n", head);
- if (command != NULL)
- printk("* %s\n", command);
- printk("%s\n", foot);
-
PNX8550_RST_CTL = PNX8550_RST_DO_SW_RST;
}
void pnx8550_machine_halt(void)
{
- printk("*** Machine halt. (Not implemented) ***\n");
-}
-
-void pnx8550_machine_power_off(void)
-{
- printk("*** Machine power off. (Not implemented) ***\n");
+ while (1) {
+ if (cpu_wait)
+ cpu_wait();
+ }
}
diff --git a/arch/mips/pnx8550/common/setup.c b/arch/mips/pnx8550/common/setup.c
index 64246c9..43cb394 100644
--- a/arch/mips/pnx8550/common/setup.c
+++ b/arch/mips/pnx8550/common/setup.c
@@ -44,7 +44,6 @@
extern void __init board_setup(void);
extern void pnx8550_machine_restart(char *);
extern void pnx8550_machine_halt(void);
-extern void pnx8550_machine_power_off(void);
extern struct resource ioport_resource;
extern struct resource iomem_resource;
extern char *prom_getcmdline(void);
@@ -100,7 +99,7 @@
_machine_restart = pnx8550_machine_restart;
_machine_halt = pnx8550_machine_halt;
- pm_power_off = pnx8550_machine_power_off;
+ pm_power_off = pnx8550_machine_halt;
/* Clear the Global 2 Register, PCI Inta Output Enable Registers
Bit 1:Enable DAC Powerdown
diff --git a/arch/mn10300/Kconfig b/arch/mn10300/Kconfig
index 444b9f9..7c2a2f7 100644
--- a/arch/mn10300/Kconfig
+++ b/arch/mn10300/Kconfig
@@ -8,7 +8,6 @@
config MN10300
def_bool y
select HAVE_OPROFILE
- select HAVE_ARCH_TRACEHOOK
config AM33
def_bool y
diff --git a/arch/mn10300/Kconfig.debug b/arch/mn10300/Kconfig.debug
index ff80e86..ce83c74 100644
--- a/arch/mn10300/Kconfig.debug
+++ b/arch/mn10300/Kconfig.debug
@@ -101,7 +101,7 @@
choice
prompt "GDB stub port"
- default GDBSTUB_TTYSM0
+ default GDBSTUB_ON_TTYSM0
depends on GDBSTUB
help
Select the serial port used for GDB-stub.
diff --git a/arch/mn10300/include/asm/bitops.h b/arch/mn10300/include/asm/bitops.h
index f49ac49..3f50e96 100644
--- a/arch/mn10300/include/asm/bitops.h
+++ b/arch/mn10300/include/asm/bitops.h
@@ -229,9 +229,9 @@
#include <asm-generic/bitops/hweight.h>
#define ext2_set_bit_atomic(lock, nr, addr) \
- test_and_set_bit((nr) ^ 0x18, (addr))
+ test_and_set_bit((nr), (addr))
#define ext2_clear_bit_atomic(lock, nr, addr) \
- test_and_clear_bit((nr) ^ 0x18, (addr))
+ test_and_clear_bit((nr), (addr))
#include <asm-generic/bitops/ext2-non-atomic.h>
#include <asm-generic/bitops/minix-le.h>
diff --git a/arch/mn10300/include/asm/signal.h b/arch/mn10300/include/asm/signal.h
index 7e891fc..1865d72 100644
--- a/arch/mn10300/include/asm/signal.h
+++ b/arch/mn10300/include/asm/signal.h
@@ -78,7 +78,7 @@
/* These should not be considered constants from userland. */
#define SIGRTMIN 32
-#define SIGRTMAX (_NSIG-1)
+#define SIGRTMAX _NSIG
/*
* SA_FLAGS values:
diff --git a/arch/mn10300/kernel/mn10300-serial.c b/arch/mn10300/kernel/mn10300-serial.c
index 9d49073..db509dd 100644
--- a/arch/mn10300/kernel/mn10300-serial.c
+++ b/arch/mn10300/kernel/mn10300-serial.c
@@ -156,17 +156,17 @@
._intr = &SC0ICR,
._rxb = &SC0RXB,
._txb = &SC0TXB,
- .rx_name = "ttySM0/Rx",
- .tx_name = "ttySM0/Tx",
+ .rx_name = "ttySM0:Rx",
+ .tx_name = "ttySM0:Tx",
#ifdef CONFIG_MN10300_TTYSM0_TIMER8
- .tm_name = "ttySM0/Timer8",
+ .tm_name = "ttySM0:Timer8",
._tmxmd = &TM8MD,
._tmxbr = &TM8BR,
._tmicr = &TM8ICR,
.tm_irq = TM8IRQ,
.div_timer = MNSCx_DIV_TIMER_16BIT,
#else /* CONFIG_MN10300_TTYSM0_TIMER2 */
- .tm_name = "ttySM0/Timer2",
+ .tm_name = "ttySM0:Timer2",
._tmxmd = &TM2MD,
._tmxbr = (volatile u16 *) &TM2BR,
._tmicr = &TM2ICR,
@@ -209,17 +209,17 @@
._intr = &SC1ICR,
._rxb = &SC1RXB,
._txb = &SC1TXB,
- .rx_name = "ttySM1/Rx",
- .tx_name = "ttySM1/Tx",
+ .rx_name = "ttySM1:Rx",
+ .tx_name = "ttySM1:Tx",
#ifdef CONFIG_MN10300_TTYSM1_TIMER9
- .tm_name = "ttySM1/Timer9",
+ .tm_name = "ttySM1:Timer9",
._tmxmd = &TM9MD,
._tmxbr = &TM9BR,
._tmicr = &TM9ICR,
.tm_irq = TM9IRQ,
.div_timer = MNSCx_DIV_TIMER_16BIT,
#else /* CONFIG_MN10300_TTYSM1_TIMER3 */
- .tm_name = "ttySM1/Timer3",
+ .tm_name = "ttySM1:Timer3",
._tmxmd = &TM3MD,
._tmxbr = (volatile u16 *) &TM3BR,
._tmicr = &TM3ICR,
@@ -260,9 +260,9 @@
.uart.lock =
__SPIN_LOCK_UNLOCKED(mn10300_serial_port_sif2.uart.lock),
.name = "ttySM2",
- .rx_name = "ttySM2/Rx",
- .tx_name = "ttySM2/Tx",
- .tm_name = "ttySM2/Timer10",
+ .rx_name = "ttySM2:Rx",
+ .tx_name = "ttySM2:Tx",
+ .tm_name = "ttySM2:Timer10",
._iobase = &SC2CTR,
._control = &SC2CTR,
._status = &SC2STR,
diff --git a/arch/mn10300/kernel/module.c b/arch/mn10300/kernel/module.c
index 6aea7fd..196a111 100644
--- a/arch/mn10300/kernel/module.c
+++ b/arch/mn10300/kernel/module.c
@@ -206,7 +206,7 @@
const Elf_Shdr *sechdrs,
struct module *me)
{
- return module_bug_finalize(hdr, sechdrs, me);
+ return 0;
}
/*
@@ -214,5 +214,4 @@
*/
void module_arch_cleanup(struct module *mod)
{
- module_bug_cleanup(mod);
}
diff --git a/arch/mn10300/kernel/signal.c b/arch/mn10300/kernel/signal.c
index 717db14..d4de05a 100644
--- a/arch/mn10300/kernel/signal.c
+++ b/arch/mn10300/kernel/signal.c
@@ -65,10 +65,10 @@
old_sigset_t mask;
if (verify_area(VERIFY_READ, act, sizeof(*act)) ||
__get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
- __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
+ __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
+ __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
+ __get_user(mask, &act->sa_mask))
return -EFAULT;
- __get_user(new_ka.sa.sa_flags, &act->sa_flags);
- __get_user(mask, &act->sa_mask);
siginitset(&new_ka.sa.sa_mask, mask);
}
@@ -77,10 +77,10 @@
if (!ret && oact) {
if (verify_area(VERIFY_WRITE, oact, sizeof(*oact)) ||
__put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
- __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
+ __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
+ __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
+ __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
return -EFAULT;
- __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
- __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
}
return ret;
@@ -102,6 +102,9 @@
{
unsigned int err = 0;
+ /* Always make any pending restarted system calls return -EINTR */
+ current_thread_info()->restart_block.fn = do_no_restart_syscall;
+
if (is_using_fpu(current))
fpu_kill_state(current);
@@ -330,8 +333,6 @@
regs->d0 = sig;
regs->d1 = (unsigned long) &frame->sc;
- set_fs(USER_DS);
-
/* the tracer may want to single-step inside the handler */
if (test_thread_flag(TIF_SINGLESTEP))
ptrace_notify(SIGTRAP);
@@ -345,7 +346,7 @@
return 0;
give_sigsegv:
- force_sig(SIGSEGV, current);
+ force_sigsegv(sig, current);
return -EFAULT;
}
@@ -413,8 +414,6 @@
regs->d0 = sig;
regs->d1 = (long) &frame->info;
- set_fs(USER_DS);
-
/* the tracer may want to single-step inside the handler */
if (test_thread_flag(TIF_SINGLESTEP))
ptrace_notify(SIGTRAP);
@@ -428,10 +427,16 @@
return 0;
give_sigsegv:
- force_sig(SIGSEGV, current);
+ force_sigsegv(sig, current);
return -EFAULT;
}
+static inline void stepback(struct pt_regs *regs)
+{
+ regs->pc -= 2;
+ regs->orig_d0 = -1;
+}
+
/*
* handle the actual delivery of a signal to userspace
*/
@@ -459,7 +464,7 @@
/* fallthrough */
case -ERESTARTNOINTR:
regs->d0 = regs->orig_d0;
- regs->pc -= 2;
+ stepback(regs);
}
}
@@ -527,12 +532,12 @@
case -ERESTARTSYS:
case -ERESTARTNOINTR:
regs->d0 = regs->orig_d0;
- regs->pc -= 2;
+ stepback(regs);
break;
case -ERESTART_RESTARTBLOCK:
regs->d0 = __NR_restart_syscall;
- regs->pc -= 2;
+ stepback(regs);
break;
}
}
diff --git a/arch/mn10300/mm/Makefile b/arch/mn10300/mm/Makefile
index 28b9d98..1557277 100644
--- a/arch/mn10300/mm/Makefile
+++ b/arch/mn10300/mm/Makefile
@@ -2,13 +2,11 @@
# Makefile for the MN10300-specific memory management code
#
+cacheflush-y := cache.o cache-mn10300.o
+cacheflush-$(CONFIG_MN10300_CACHE_WBACK) += cache-flush-mn10300.o
+
+cacheflush-$(CONFIG_MN10300_CACHE_DISABLED) := cache-disabled.o
+
obj-y := \
init.o fault.o pgtable.o extable.o tlb-mn10300.o mmu-context.o \
- misalignment.o dma-alloc.o
-
-ifneq ($(CONFIG_MN10300_CACHE_DISABLED),y)
-obj-y += cache.o cache-mn10300.o
-ifeq ($(CONFIG_MN10300_CACHE_WBACK),y)
-obj-y += cache-flush-mn10300.o
-endif
-endif
+ misalignment.o dma-alloc.o $(cacheflush-y)
diff --git a/arch/mn10300/mm/cache-disabled.c b/arch/mn10300/mm/cache-disabled.c
new file mode 100644
index 0000000..f669ea4
--- /dev/null
+++ b/arch/mn10300/mm/cache-disabled.c
@@ -0,0 +1,21 @@
+/* Handle the cache being disabled
+ *
+ * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#include <linux/mm.h>
+
+/*
+ * allow userspace to flush the instruction cache
+ */
+asmlinkage long sys_cacheflush(unsigned long start, unsigned long end)
+{
+ if (end < start)
+ return -EINVAL;
+ return 0;
+}
diff --git a/arch/mn10300/mm/cache.c b/arch/mn10300/mm/cache.c
index 1b76719..9261217 100644
--- a/arch/mn10300/mm/cache.c
+++ b/arch/mn10300/mm/cache.c
@@ -54,13 +54,30 @@
void flush_icache_range(unsigned long start, unsigned long end)
{
#ifdef CONFIG_MN10300_CACHE_WBACK
- unsigned long addr, size, off;
+ unsigned long addr, size, base, off;
struct page *page;
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *ppte, pte;
+ if (end > 0x80000000UL) {
+ /* addresses above 0xa0000000 do not go through the cache */
+ if (end > 0xa0000000UL) {
+ end = 0xa0000000UL;
+ if (start >= end)
+ return;
+ }
+
+ /* kernel addresses between 0x80000000 and 0x9fffffff do not
+ * require page tables, so we just map such addresses directly */
+ base = (start >= 0x80000000UL) ? start : 0x80000000UL;
+ mn10300_dcache_flush_range(base, end);
+ if (base == start)
+ goto invalidate;
+ end = base;
+ }
+
for (; start < end; start += size) {
/* work out how much of the page to flush */
off = start & (PAGE_SIZE - 1);
@@ -104,6 +121,7 @@
}
#endif
+invalidate:
mn10300_icache_inv();
}
EXPORT_SYMBOL(flush_icache_range);
diff --git a/arch/mn10300/mm/dma-alloc.c b/arch/mn10300/mm/dma-alloc.c
index 4e34880..159acb0 100644
--- a/arch/mn10300/mm/dma-alloc.c
+++ b/arch/mn10300/mm/dma-alloc.c
@@ -25,7 +25,8 @@
unsigned long addr;
void *ret;
- printk("dma_alloc_coherent(%s,%zu,,%x)\n", dev_name(dev), size, gfp);
+ pr_debug("dma_alloc_coherent(%s,%zu,%x)\n",
+ dev ? dev_name(dev) : "?", size, gfp);
if (0xbe000000 - pci_sram_allocated >= size) {
size = (size + 255) & ~255;
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index 907417d..79a04a9 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -16,6 +16,7 @@
select RTC_DRV_GENERIC
select INIT_ALL_POSSIBLE
select BUG
+ select HAVE_IRQ_WORK
select HAVE_PERF_EVENTS
select GENERIC_ATOMIC64 if !64BIT
help
diff --git a/arch/parisc/include/asm/compat.h b/arch/parisc/include/asm/compat.h
index 02b77ba..efa0b60 100644
--- a/arch/parisc/include/asm/compat.h
+++ b/arch/parisc/include/asm/compat.h
@@ -147,7 +147,7 @@
return (u32)(unsigned long)uptr;
}
-static __inline__ void __user *compat_alloc_user_space(long len)
+static __inline__ void __user *arch_compat_alloc_user_space(long len)
{
struct pt_regs *regs = ¤t->thread.regs;
return (void __user *)regs->gr[30];
diff --git a/arch/parisc/include/asm/perf_event.h b/arch/parisc/include/asm/perf_event.h
index cc146427..1e0fd8b 100644
--- a/arch/parisc/include/asm/perf_event.h
+++ b/arch/parisc/include/asm/perf_event.h
@@ -1,7 +1,6 @@
#ifndef __ASM_PARISC_PERF_EVENT_H
#define __ASM_PARISC_PERF_EVENT_H
-/* parisc only supports software events through this interface. */
-static inline void set_perf_event_pending(void) { }
+/* Empty, just to avoid compiling error */
#endif /* __ASM_PARISC_PERF_EVENT_H */
diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
index 159a2b8..6e81bb5 100644
--- a/arch/parisc/kernel/module.c
+++ b/arch/parisc/kernel/module.c
@@ -941,11 +941,10 @@
nsyms = newptr - (Elf_Sym *)symhdr->sh_addr;
DEBUGP("NEW num_symtab %lu\n", nsyms);
symhdr->sh_size = nsyms * sizeof(Elf_Sym);
- return module_bug_finalize(hdr, sechdrs, me);
+ return 0;
}
void module_arch_cleanup(struct module *mod)
{
deregister_unwind_table(mod);
- module_bug_cleanup(mod);
}
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 631e5a0..4b1e521 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -138,6 +138,7 @@
select HAVE_OPROFILE
select HAVE_SYSCALL_WRAPPERS if PPC64
select GENERIC_ATOMIC64 if PPC32
+ select HAVE_IRQ_WORK
select HAVE_PERF_EVENTS
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_HW_BREAKPOINT if PERF_EVENTS && PPC_BOOK3S_64
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index e3ea151..b7212b6 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -164,7 +164,7 @@
all: zImage
# With make 3.82 we cannot mix normal and wildcard targets
-BOOT_TARGETS1 := zImage zImage.initrd uImaged
+BOOT_TARGETS1 := zImage zImage.initrd uImage
BOOT_TARGETS2 := zImage% dtbImage% treeImage.% cuImage.% simpleImage.%
PHONY += $(BOOT_TARGETS1) $(BOOT_TARGETS2)
diff --git a/arch/powerpc/boot/dts/canyonlands.dts b/arch/powerpc/boot/dts/canyonlands.dts
index 5806ef0..a303703 100644
--- a/arch/powerpc/boot/dts/canyonlands.dts
+++ b/arch/powerpc/boot/dts/canyonlands.dts
@@ -163,6 +163,14 @@
interrupts = <0x1e 4>;
};
+ SATA0: sata@bffd1000 {
+ compatible = "amcc,sata-460ex";
+ reg = <4 0xbffd1000 0x800 4 0xbffd0800 0x400>;
+ interrupt-parent = <&UIC3>;
+ interrupts = <0x0 0x4 /* SATA */
+ 0x5 0x4>; /* AHBDMA */
+ };
+
POB0: opb {
compatible = "ibm,opb-460ex", "ibm,opb";
#address-cells = <1>;
diff --git a/arch/powerpc/include/asm/compat.h b/arch/powerpc/include/asm/compat.h
index 396d21a..a11d4ea 100644
--- a/arch/powerpc/include/asm/compat.h
+++ b/arch/powerpc/include/asm/compat.h
@@ -134,7 +134,7 @@
return (u32)(unsigned long)uptr;
}
-static inline void __user *compat_alloc_user_space(long len)
+static inline void __user *arch_compat_alloc_user_space(long len)
{
struct pt_regs *regs = current->thread.regs;
unsigned long usp = regs->gpr[1];
diff --git a/arch/powerpc/include/asm/fsldma.h b/arch/powerpc/include/asm/fsldma.h
index a67aeed..debc5ed 100644
--- a/arch/powerpc/include/asm/fsldma.h
+++ b/arch/powerpc/include/asm/fsldma.h
@@ -11,6 +11,7 @@
#ifndef __ARCH_POWERPC_ASM_FSLDMA_H__
#define __ARCH_POWERPC_ASM_FSLDMA_H__
+#include <linux/slab.h>
#include <linux/dmaengine.h>
/*
diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h
index 0e398cf..acac35d 100644
--- a/arch/powerpc/include/asm/mmu-hash64.h
+++ b/arch/powerpc/include/asm/mmu-hash64.h
@@ -433,7 +433,7 @@
* with. However gcc is not clever enough to compute the
* modulus (2^n-1) without a second multiply.
*/
-#define vsid_scrample(protovsid, size) \
+#define vsid_scramble(protovsid, size) \
((((protovsid) * VSID_MULTIPLIER_##size) % VSID_MODULUS_##size))
#else /* 1 */
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index 1ff6662..9b287fd 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -129,7 +129,7 @@
u8 soft_enabled; /* irq soft-enable flag */
u8 hard_enabled; /* set if irqs are enabled in MSR */
u8 io_sync; /* writel() needs spin_unlock sync */
- u8 perf_event_pending; /* PM interrupt while soft-disabled */
+ u8 irq_work_pending; /* IRQ_WORK interrupt while soft-disable */
/* Stuff for accurate time accounting */
u64 user_time; /* accumulated usermode TB ticks */
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index d8be016..ff0005eec 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -951,7 +951,14 @@
#ifdef CONFIG_PPC64
extern void ppc64_runlatch_on(void);
-extern void ppc64_runlatch_off(void);
+extern void __ppc64_runlatch_off(void);
+
+#define ppc64_runlatch_off() \
+ do { \
+ if (cpu_has_feature(CPU_FTR_CTRL) && \
+ test_thread_flag(TIF_RUNLATCH)) \
+ __ppc64_runlatch_off(); \
+ } while (0)
extern unsigned long scom970_read(unsigned int address);
extern void scom970_write(unsigned int address, unsigned long value);
diff --git a/arch/powerpc/include/asm/rwsem.h b/arch/powerpc/include/asm/rwsem.h
index 24cd928..8447d89 100644
--- a/arch/powerpc/include/asm/rwsem.h
+++ b/arch/powerpc/include/asm/rwsem.h
@@ -21,15 +21,20 @@
/*
* the semaphore definition
*/
-struct rw_semaphore {
- /* XXX this should be able to be an atomic_t -- paulus */
- signed int count;
-#define RWSEM_UNLOCKED_VALUE 0x00000000
-#define RWSEM_ACTIVE_BIAS 0x00000001
-#define RWSEM_ACTIVE_MASK 0x0000ffff
-#define RWSEM_WAITING_BIAS (-0x00010000)
+#ifdef CONFIG_PPC64
+# define RWSEM_ACTIVE_MASK 0xffffffffL
+#else
+# define RWSEM_ACTIVE_MASK 0x0000ffffL
+#endif
+
+#define RWSEM_UNLOCKED_VALUE 0x00000000L
+#define RWSEM_ACTIVE_BIAS 0x00000001L
+#define RWSEM_WAITING_BIAS (-RWSEM_ACTIVE_MASK-1)
#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
+
+struct rw_semaphore {
+ long count;
spinlock_t wait_lock;
struct list_head wait_list;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
@@ -43,9 +48,13 @@
# define __RWSEM_DEP_MAP_INIT(lockname)
#endif
-#define __RWSEM_INITIALIZER(name) \
- { RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \
- LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) }
+#define __RWSEM_INITIALIZER(name) \
+{ \
+ RWSEM_UNLOCKED_VALUE, \
+ __SPIN_LOCK_UNLOCKED((name).wait_lock), \
+ LIST_HEAD_INIT((name).wait_list) \
+ __RWSEM_DEP_MAP_INIT(name) \
+}
#define DECLARE_RWSEM(name) \
struct rw_semaphore name = __RWSEM_INITIALIZER(name)
@@ -70,13 +79,13 @@
*/
static inline void __down_read(struct rw_semaphore *sem)
{
- if (unlikely(atomic_inc_return((atomic_t *)(&sem->count)) <= 0))
+ if (unlikely(atomic_long_inc_return((atomic_long_t *)&sem->count) <= 0))
rwsem_down_read_failed(sem);
}
static inline int __down_read_trylock(struct rw_semaphore *sem)
{
- int tmp;
+ long tmp;
while ((tmp = sem->count) >= 0) {
if (tmp == cmpxchg(&sem->count, tmp,
@@ -92,10 +101,10 @@
*/
static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
{
- int tmp;
+ long tmp;
- tmp = atomic_add_return(RWSEM_ACTIVE_WRITE_BIAS,
- (atomic_t *)(&sem->count));
+ tmp = atomic_long_add_return(RWSEM_ACTIVE_WRITE_BIAS,
+ (atomic_long_t *)&sem->count);
if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS))
rwsem_down_write_failed(sem);
}
@@ -107,7 +116,7 @@
static inline int __down_write_trylock(struct rw_semaphore *sem)
{
- int tmp;
+ long tmp;
tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
RWSEM_ACTIVE_WRITE_BIAS);
@@ -119,9 +128,9 @@
*/
static inline void __up_read(struct rw_semaphore *sem)
{
- int tmp;
+ long tmp;
- tmp = atomic_dec_return((atomic_t *)(&sem->count));
+ tmp = atomic_long_dec_return((atomic_long_t *)&sem->count);
if (unlikely(tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0))
rwsem_wake(sem);
}
@@ -131,17 +140,17 @@
*/
static inline void __up_write(struct rw_semaphore *sem)
{
- if (unlikely(atomic_sub_return(RWSEM_ACTIVE_WRITE_BIAS,
- (atomic_t *)(&sem->count)) < 0))
+ if (unlikely(atomic_long_sub_return(RWSEM_ACTIVE_WRITE_BIAS,
+ (atomic_long_t *)&sem->count) < 0))
rwsem_wake(sem);
}
/*
* implement atomic add functionality
*/
-static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
+static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
{
- atomic_add(delta, (atomic_t *)(&sem->count));
+ atomic_long_add(delta, (atomic_long_t *)&sem->count);
}
/*
@@ -149,9 +158,10 @@
*/
static inline void __downgrade_write(struct rw_semaphore *sem)
{
- int tmp;
+ long tmp;
- tmp = atomic_add_return(-RWSEM_WAITING_BIAS, (atomic_t *)(&sem->count));
+ tmp = atomic_long_add_return(-RWSEM_WAITING_BIAS,
+ (atomic_long_t *)&sem->count);
if (tmp < 0)
rwsem_downgrade_wake(sem);
}
@@ -159,14 +169,14 @@
/*
* implement exchange and add functionality
*/
-static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
+static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
{
- return atomic_add_return(delta, (atomic_t *)(&sem->count));
+ return atomic_long_add_return(delta, (atomic_long_t *)&sem->count);
}
static inline int rwsem_is_locked(struct rw_semaphore *sem)
{
- return (sem->count != 0);
+ return sem->count != 0;
}
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h
index a5ee345..3d21266 100644
--- a/arch/powerpc/include/asm/systbl.h
+++ b/arch/powerpc/include/asm/systbl.h
@@ -326,3 +326,6 @@
COMPAT_SYS_SPU(preadv)
COMPAT_SYS_SPU(pwritev)
COMPAT_SYS(rt_tgsigqueueinfo)
+SYSCALL(fanotify_init)
+COMPAT_SYS(fanotify_mark)
+SYSCALL_SPU(prlimit64)
diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h
index 6c294ac..9c3d160 100644
--- a/arch/powerpc/include/asm/system.h
+++ b/arch/powerpc/include/asm/system.h
@@ -542,10 +542,6 @@
#define PTRRELOC(x) ((typeof(x)) add_reloc_offset((unsigned long)(x)))
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
-extern void account_system_vtime(struct task_struct *);
-#endif
-
extern struct dentry *powerpc_debugfs_root;
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h
index f0a1026..597e6f9 100644
--- a/arch/powerpc/include/asm/unistd.h
+++ b/arch/powerpc/include/asm/unistd.h
@@ -345,10 +345,13 @@
#define __NR_preadv 320
#define __NR_pwritev 321
#define __NR_rt_tgsigqueueinfo 322
+#define __NR_fanotify_init 323
+#define __NR_fanotify_mark 324
+#define __NR_prlimit64 325
#ifdef __KERNEL__
-#define __NR_syscalls 323
+#define __NR_syscalls 326
#define __NR__exit __NR_exit
#define NR_syscalls __NR_syscalls
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 65e2b4e..1f9123f 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -1826,7 +1826,6 @@
.cpu_features = CPU_FTRS_47X,
.cpu_user_features = COMMON_USER_BOOKE |
PPC_FEATURE_HAS_FPU,
- .cpu_user_features = COMMON_USER_BOOKE,
.mmu_features = MMU_FTR_TYPE_47x |
MMU_FTR_USE_TLBIVAX_BCAST | MMU_FTR_LOCK_BCAST_INVAL,
.icache_bsize = 32,
diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c
index 417f7b0..4457382 100644
--- a/arch/powerpc/kernel/crash.c
+++ b/arch/powerpc/kernel/crash.c
@@ -402,6 +402,18 @@
*/
hard_irq_disable();
+ /*
+ * Make a note of crashing cpu. Will be used in machine_kexec
+ * such that another IPI will not be sent.
+ */
+ crashing_cpu = smp_processor_id();
+ crash_save_cpu(regs, crashing_cpu);
+ crash_kexec_prepare_cpus(crashing_cpu);
+ cpu_set(crashing_cpu, cpus_in_crash);
+#if defined(CONFIG_PPC_STD_MMU_64) && defined(CONFIG_SMP)
+ crash_kexec_wait_realmode(crashing_cpu);
+#endif
+
for_each_irq(i) {
struct irq_desc *desc = irq_to_desc(i);
@@ -438,18 +450,8 @@
crash_shutdown_cpu = -1;
__debugger_fault_handler = old_handler;
- /*
- * Make a note of crashing cpu. Will be used in machine_kexec
- * such that another IPI will not be sent.
- */
- crashing_cpu = smp_processor_id();
- crash_save_cpu(regs, crashing_cpu);
- crash_kexec_prepare_cpus(crashing_cpu);
- cpu_set(crashing_cpu, cpus_in_crash);
crash_kexec_stop_spus();
-#if defined(CONFIG_PPC_STD_MMU_64) && defined(CONFIG_SMP)
- crash_kexec_wait_realmode(crashing_cpu);
-#endif
+
if (ppc_md.kexec_cpu_down)
ppc_md.kexec_cpu_down(1, 0);
}
diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S
index 5ab484e..562305b 100644
--- a/arch/powerpc/kernel/head_44x.S
+++ b/arch/powerpc/kernel/head_44x.S
@@ -113,6 +113,10 @@
stw r5, 0(r4) /* Save abatron_pteptrs at a fixed location */
stw r6, 0(r5)
+ /* Clear the Machine Check Syndrome Register */
+ li r0,0
+ mtspr SPRN_MCSR,r0
+
/* Let's move on */
lis r4,start_kernel@h
ori r4,r4,start_kernel@l
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 844a44b..c571cd3 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -572,15 +572,21 @@
/* Set thread priority to MEDIUM */
HMT_MEDIUM
- /* Do early setup for that CPU (stab, slb, hash table pointer) */
- bl .early_setup_secondary
-
/* Initialize the kernel stack. Just a repeat for iSeries. */
LOAD_REG_ADDR(r3, current_set)
sldi r28,r24,3 /* get current_set[cpu#] */
- ldx r1,r3,r28
- addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
- std r1,PACAKSAVE(r13)
+ ldx r14,r3,r28
+ addi r14,r14,THREAD_SIZE-STACK_FRAME_OVERHEAD
+ std r14,PACAKSAVE(r13)
+
+ /* Do early setup for that CPU (stab, slb, hash table pointer) */
+ bl .early_setup_secondary
+
+ /*
+ * setup the new stack pointer, but *don't* use this until
+ * translation is on.
+ */
+ mr r1, r14
/* Clear backchain so we get nice backtraces */
li r7,0
diff --git a/arch/powerpc/kernel/idle.c b/arch/powerpc/kernel/idle.c
index 049dda6..39a2baa 100644
--- a/arch/powerpc/kernel/idle.c
+++ b/arch/powerpc/kernel/idle.c
@@ -94,9 +94,9 @@
HMT_medium();
ppc64_runlatch_on();
tick_nohz_restart_sched_tick();
+ preempt_enable_no_resched();
if (cpu_should_die())
cpu_die();
- preempt_enable_no_resched();
schedule();
preempt_disable();
}
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index d3ce67c..4a65386 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -67,6 +67,7 @@
#include <asm/machdep.h>
#include <asm/udbg.h>
#include <asm/dbell.h>
+#include <asm/smp.h>
#ifdef CONFIG_PPC64
#include <asm/paca.h>
@@ -446,22 +447,23 @@
void exc_lvl_ctx_init(void)
{
struct thread_info *tp;
- int i;
+ int i, hw_cpu;
for_each_possible_cpu(i) {
- memset((void *)critirq_ctx[i], 0, THREAD_SIZE);
- tp = critirq_ctx[i];
+ hw_cpu = get_hard_smp_processor_id(i);
+ memset((void *)critirq_ctx[hw_cpu], 0, THREAD_SIZE);
+ tp = critirq_ctx[hw_cpu];
tp->cpu = i;
tp->preempt_count = 0;
#ifdef CONFIG_BOOKE
- memset((void *)dbgirq_ctx[i], 0, THREAD_SIZE);
- tp = dbgirq_ctx[i];
+ memset((void *)dbgirq_ctx[hw_cpu], 0, THREAD_SIZE);
+ tp = dbgirq_ctx[hw_cpu];
tp->cpu = i;
tp->preempt_count = 0;
- memset((void *)mcheckirq_ctx[i], 0, THREAD_SIZE);
- tp = mcheckirq_ctx[i];
+ memset((void *)mcheckirq_ctx[hw_cpu], 0, THREAD_SIZE);
+ tp = mcheckirq_ctx[hw_cpu];
tp->cpu = i;
tp->preempt_count = HARDIRQ_OFFSET;
#endif
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index 6bbd7a6..a7a570d 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -810,6 +810,9 @@
isync
sync
+ mfspr r3, SPRN_PIR /* current core we are running on */
+ mr r4, r5 /* load physical address of chunk called */
+
/* jump to the entry point, usually the setup routine */
mtlr r5
blrl
diff --git a/arch/powerpc/kernel/module.c b/arch/powerpc/kernel/module.c
index 477c663..49cee9d 100644
--- a/arch/powerpc/kernel/module.c
+++ b/arch/powerpc/kernel/module.c
@@ -63,11 +63,6 @@
const Elf_Shdr *sechdrs, struct module *me)
{
const Elf_Shdr *sect;
- int err;
-
- err = module_bug_finalize(hdr, sechdrs, me);
- if (err)
- return err;
/* Apply feature fixups */
sect = find_section(hdr, sechdrs, "__ftr_fixup");
@@ -101,5 +96,4 @@
void module_arch_cleanup(struct module *mod)
{
- module_bug_cleanup(mod);
}
diff --git a/arch/powerpc/kernel/pci_of_scan.c b/arch/powerpc/kernel/pci_of_scan.c
index 6ddb795f..e751506 100644
--- a/arch/powerpc/kernel/pci_of_scan.c
+++ b/arch/powerpc/kernel/pci_of_scan.c
@@ -336,7 +336,7 @@
if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) {
struct device_node *child = pci_device_to_OF_node(dev);
- if (dev)
+ if (child)
of_scan_pci_bridge(child, dev);
}
}
diff --git a/arch/powerpc/kernel/perf_callchain.c b/arch/powerpc/kernel/perf_callchain.c
index 95ad9da..d05ae42 100644
--- a/arch/powerpc/kernel/perf_callchain.c
+++ b/arch/powerpc/kernel/perf_callchain.c
@@ -23,18 +23,6 @@
#include "ppc32.h"
#endif
-/*
- * Store another value in a callchain_entry.
- */
-static inline void callchain_store(struct perf_callchain_entry *entry, u64 ip)
-{
- unsigned int nr = entry->nr;
-
- if (nr < PERF_MAX_STACK_DEPTH) {
- entry->ip[nr] = ip;
- entry->nr = nr + 1;
- }
-}
/*
* Is sp valid as the address of the next kernel stack frame after prev_sp?
@@ -58,8 +46,8 @@
return 0;
}
-static void perf_callchain_kernel(struct pt_regs *regs,
- struct perf_callchain_entry *entry)
+void
+perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
{
unsigned long sp, next_sp;
unsigned long next_ip;
@@ -69,8 +57,7 @@
lr = regs->link;
sp = regs->gpr[1];
- callchain_store(entry, PERF_CONTEXT_KERNEL);
- callchain_store(entry, regs->nip);
+ perf_callchain_store(entry, regs->nip);
if (!validate_sp(sp, current, STACK_FRAME_OVERHEAD))
return;
@@ -89,7 +76,7 @@
next_ip = regs->nip;
lr = regs->link;
level = 0;
- callchain_store(entry, PERF_CONTEXT_KERNEL);
+ perf_callchain_store(entry, PERF_CONTEXT_KERNEL);
} else {
if (level == 0)
@@ -111,7 +98,7 @@
++level;
}
- callchain_store(entry, next_ip);
+ perf_callchain_store(entry, next_ip);
if (!valid_next_sp(next_sp, sp))
return;
sp = next_sp;
@@ -233,8 +220,8 @@
puc == (unsigned long) &sf->uc;
}
-static void perf_callchain_user_64(struct pt_regs *regs,
- struct perf_callchain_entry *entry)
+static void perf_callchain_user_64(struct perf_callchain_entry *entry,
+ struct pt_regs *regs)
{
unsigned long sp, next_sp;
unsigned long next_ip;
@@ -246,8 +233,7 @@
next_ip = regs->nip;
lr = regs->link;
sp = regs->gpr[1];
- callchain_store(entry, PERF_CONTEXT_USER);
- callchain_store(entry, next_ip);
+ perf_callchain_store(entry, next_ip);
for (;;) {
fp = (unsigned long __user *) sp;
@@ -276,14 +262,14 @@
read_user_stack_64(&uregs[PT_R1], &sp))
return;
level = 0;
- callchain_store(entry, PERF_CONTEXT_USER);
- callchain_store(entry, next_ip);
+ perf_callchain_store(entry, PERF_CONTEXT_USER);
+ perf_callchain_store(entry, next_ip);
continue;
}
if (level == 0)
next_ip = lr;
- callchain_store(entry, next_ip);
+ perf_callchain_store(entry, next_ip);
++level;
sp = next_sp;
}
@@ -315,8 +301,8 @@
return __get_user_inatomic(*ret, ptr);
}
-static inline void perf_callchain_user_64(struct pt_regs *regs,
- struct perf_callchain_entry *entry)
+static inline void perf_callchain_user_64(struct perf_callchain_entry *entry,
+ struct pt_regs *regs)
{
}
@@ -435,8 +421,8 @@
return mctx->mc_gregs;
}
-static void perf_callchain_user_32(struct pt_regs *regs,
- struct perf_callchain_entry *entry)
+static void perf_callchain_user_32(struct perf_callchain_entry *entry,
+ struct pt_regs *regs)
{
unsigned int sp, next_sp;
unsigned int next_ip;
@@ -447,8 +433,7 @@
next_ip = regs->nip;
lr = regs->link;
sp = regs->gpr[1];
- callchain_store(entry, PERF_CONTEXT_USER);
- callchain_store(entry, next_ip);
+ perf_callchain_store(entry, next_ip);
while (entry->nr < PERF_MAX_STACK_DEPTH) {
fp = (unsigned int __user *) (unsigned long) sp;
@@ -470,45 +455,24 @@
read_user_stack_32(&uregs[PT_R1], &sp))
return;
level = 0;
- callchain_store(entry, PERF_CONTEXT_USER);
- callchain_store(entry, next_ip);
+ perf_callchain_store(entry, PERF_CONTEXT_USER);
+ perf_callchain_store(entry, next_ip);
continue;
}
if (level == 0)
next_ip = lr;
- callchain_store(entry, next_ip);
+ perf_callchain_store(entry, next_ip);
++level;
sp = next_sp;
}
}
-/*
- * Since we can't get PMU interrupts inside a PMU interrupt handler,
- * we don't need separate irq and nmi entries here.
- */
-static DEFINE_PER_CPU(struct perf_callchain_entry, cpu_perf_callchain);
-
-struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
+void
+perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
{
- struct perf_callchain_entry *entry = &__get_cpu_var(cpu_perf_callchain);
-
- entry->nr = 0;
-
- if (!user_mode(regs)) {
- perf_callchain_kernel(regs, entry);
- if (current->mm)
- regs = task_pt_regs(current);
- else
- regs = NULL;
- }
-
- if (regs) {
- if (current_is_64bit())
- perf_callchain_user_64(regs, entry);
- else
- perf_callchain_user_32(regs, entry);
- }
-
- return entry;
+ if (current_is_64bit())
+ perf_callchain_user_64(entry, regs);
+ else
+ perf_callchain_user_32(entry, regs);
}
diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c
index d301a30..3129c85 100644
--- a/arch/powerpc/kernel/perf_event.c
+++ b/arch/powerpc/kernel/perf_event.c
@@ -402,6 +402,9 @@
{
s64 val, delta, prev;
+ if (event->hw.state & PERF_HES_STOPPED)
+ return;
+
if (!event->hw.idx)
return;
/*
@@ -517,7 +520,7 @@
* Disable all events to prevent PMU interrupts and to allow
* events to be added or removed.
*/
-void hw_perf_disable(void)
+static void power_pmu_disable(struct pmu *pmu)
{
struct cpu_hw_events *cpuhw;
unsigned long flags;
@@ -565,7 +568,7 @@
* If we were previously disabled and events were added, then
* put the new config on the PMU.
*/
-void hw_perf_enable(void)
+static void power_pmu_enable(struct pmu *pmu)
{
struct perf_event *event;
struct cpu_hw_events *cpuhw;
@@ -672,6 +675,8 @@
}
local64_set(&event->hw.prev_count, val);
event->hw.idx = idx;
+ if (event->hw.state & PERF_HES_STOPPED)
+ val = 0;
write_pmc(idx, val);
perf_event_update_userpage(event);
}
@@ -727,7 +732,7 @@
* re-enable the PMU in order to get hw_perf_enable to do the
* actual work of reconfiguring the PMU.
*/
-static int power_pmu_enable(struct perf_event *event)
+static int power_pmu_add(struct perf_event *event, int ef_flags)
{
struct cpu_hw_events *cpuhw;
unsigned long flags;
@@ -735,7 +740,7 @@
int ret = -EAGAIN;
local_irq_save(flags);
- perf_disable();
+ perf_pmu_disable(event->pmu);
/*
* Add the event to the list (if there is room)
@@ -749,6 +754,9 @@
cpuhw->events[n0] = event->hw.config;
cpuhw->flags[n0] = event->hw.event_base;
+ if (!(ef_flags & PERF_EF_START))
+ event->hw.state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
+
/*
* If group events scheduling transaction was started,
* skip the schedulability test here, it will be peformed
@@ -769,7 +777,7 @@
ret = 0;
out:
- perf_enable();
+ perf_pmu_enable(event->pmu);
local_irq_restore(flags);
return ret;
}
@@ -777,14 +785,14 @@
/*
* Remove a event from the PMU.
*/
-static void power_pmu_disable(struct perf_event *event)
+static void power_pmu_del(struct perf_event *event, int ef_flags)
{
struct cpu_hw_events *cpuhw;
long i;
unsigned long flags;
local_irq_save(flags);
- perf_disable();
+ perf_pmu_disable(event->pmu);
power_pmu_read(event);
@@ -821,34 +829,60 @@
cpuhw->mmcr[0] &= ~(MMCR0_PMXE | MMCR0_FCECE);
}
- perf_enable();
+ perf_pmu_enable(event->pmu);
local_irq_restore(flags);
}
/*
- * Re-enable interrupts on a event after they were throttled
- * because they were coming too fast.
+ * POWER-PMU does not support disabling individual counters, hence
+ * program their cycle counter to their max value and ignore the interrupts.
*/
-static void power_pmu_unthrottle(struct perf_event *event)
+
+static void power_pmu_start(struct perf_event *event, int ef_flags)
{
- s64 val, left;
+ unsigned long flags;
+ s64 left;
+
+ if (!event->hw.idx || !event->hw.sample_period)
+ return;
+
+ if (!(event->hw.state & PERF_HES_STOPPED))
+ return;
+
+ if (ef_flags & PERF_EF_RELOAD)
+ WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
+
+ local_irq_save(flags);
+ perf_pmu_disable(event->pmu);
+
+ event->hw.state = 0;
+ left = local64_read(&event->hw.period_left);
+ write_pmc(event->hw.idx, left);
+
+ perf_event_update_userpage(event);
+ perf_pmu_enable(event->pmu);
+ local_irq_restore(flags);
+}
+
+static void power_pmu_stop(struct perf_event *event, int ef_flags)
+{
unsigned long flags;
if (!event->hw.idx || !event->hw.sample_period)
return;
+
+ if (event->hw.state & PERF_HES_STOPPED)
+ return;
+
local_irq_save(flags);
- perf_disable();
+ perf_pmu_disable(event->pmu);
+
power_pmu_read(event);
- left = event->hw.sample_period;
- event->hw.last_period = left;
- val = 0;
- if (left < 0x80000000L)
- val = 0x80000000L - left;
- write_pmc(event->hw.idx, val);
- local64_set(&event->hw.prev_count, val);
- local64_set(&event->hw.period_left, left);
+ event->hw.state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
+ write_pmc(event->hw.idx, 0);
+
perf_event_update_userpage(event);
- perf_enable();
+ perf_pmu_enable(event->pmu);
local_irq_restore(flags);
}
@@ -857,10 +891,11 @@
* Set the flag to make pmu::enable() not perform the
* schedulability test, it will be performed at commit time
*/
-void power_pmu_start_txn(const struct pmu *pmu)
+void power_pmu_start_txn(struct pmu *pmu)
{
struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
+ perf_pmu_disable(pmu);
cpuhw->group_flag |= PERF_EVENT_TXN;
cpuhw->n_txn_start = cpuhw->n_events;
}
@@ -870,11 +905,12 @@
* Clear the flag and pmu::enable() will perform the
* schedulability test.
*/
-void power_pmu_cancel_txn(const struct pmu *pmu)
+void power_pmu_cancel_txn(struct pmu *pmu)
{
struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
cpuhw->group_flag &= ~PERF_EVENT_TXN;
+ perf_pmu_enable(pmu);
}
/*
@@ -882,7 +918,7 @@
* Perform the group schedulability test as a whole
* Return 0 if success
*/
-int power_pmu_commit_txn(const struct pmu *pmu)
+int power_pmu_commit_txn(struct pmu *pmu)
{
struct cpu_hw_events *cpuhw;
long i, n;
@@ -901,19 +937,10 @@
cpuhw->event[i]->hw.config = cpuhw->events[i];
cpuhw->group_flag &= ~PERF_EVENT_TXN;
+ perf_pmu_enable(pmu);
return 0;
}
-struct pmu power_pmu = {
- .enable = power_pmu_enable,
- .disable = power_pmu_disable,
- .read = power_pmu_read,
- .unthrottle = power_pmu_unthrottle,
- .start_txn = power_pmu_start_txn,
- .cancel_txn = power_pmu_cancel_txn,
- .commit_txn = power_pmu_commit_txn,
-};
-
/*
* Return 1 if we might be able to put event on a limited PMC,
* or 0 if not.
@@ -1014,7 +1041,7 @@
return 0;
}
-const struct pmu *hw_perf_event_init(struct perf_event *event)
+static int power_pmu_event_init(struct perf_event *event)
{
u64 ev;
unsigned long flags;
@@ -1026,25 +1053,27 @@
struct cpu_hw_events *cpuhw;
if (!ppmu)
- return ERR_PTR(-ENXIO);
+ return -ENOENT;
+
switch (event->attr.type) {
case PERF_TYPE_HARDWARE:
ev = event->attr.config;
if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0)
- return ERR_PTR(-EOPNOTSUPP);
+ return -EOPNOTSUPP;
ev = ppmu->generic_events[ev];
break;
case PERF_TYPE_HW_CACHE:
err = hw_perf_cache_event(event->attr.config, &ev);
if (err)
- return ERR_PTR(err);
+ return err;
break;
case PERF_TYPE_RAW:
ev = event->attr.config;
break;
default:
- return ERR_PTR(-EINVAL);
+ return -ENOENT;
}
+
event->hw.config_base = ev;
event->hw.idx = 0;
@@ -1063,7 +1092,7 @@
* XXX we should check if the task is an idle task.
*/
flags = 0;
- if (event->ctx->task)
+ if (event->attach_state & PERF_ATTACH_TASK)
flags |= PPMU_ONLY_COUNT_RUN;
/*
@@ -1081,7 +1110,7 @@
*/
ev = normal_pmc_alternative(ev, flags);
if (!ev)
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
}
}
@@ -1095,19 +1124,19 @@
n = collect_events(event->group_leader, ppmu->n_counter - 1,
ctrs, events, cflags);
if (n < 0)
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
}
events[n] = ev;
ctrs[n] = event;
cflags[n] = flags;
if (check_excludes(ctrs, cflags, n, 1))
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
cpuhw = &get_cpu_var(cpu_hw_events);
err = power_check_constraints(cpuhw, events, cflags, n + 1);
put_cpu_var(cpu_hw_events);
if (err)
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
event->hw.config = events[n];
event->hw.event_base = cflags[n];
@@ -1132,11 +1161,23 @@
}
event->destroy = hw_perf_event_destroy;
- if (err)
- return ERR_PTR(err);
- return &power_pmu;
+ return err;
}
+struct pmu power_pmu = {
+ .pmu_enable = power_pmu_enable,
+ .pmu_disable = power_pmu_disable,
+ .event_init = power_pmu_event_init,
+ .add = power_pmu_add,
+ .del = power_pmu_del,
+ .start = power_pmu_start,
+ .stop = power_pmu_stop,
+ .read = power_pmu_read,
+ .start_txn = power_pmu_start_txn,
+ .cancel_txn = power_pmu_cancel_txn,
+ .commit_txn = power_pmu_commit_txn,
+};
+
/*
* A counter has overflowed; update its count and record
* things if requested. Note that interrupts are hard-disabled
@@ -1149,6 +1190,11 @@
s64 prev, delta, left;
int record = 0;
+ if (event->hw.state & PERF_HES_STOPPED) {
+ write_pmc(event->hw.idx, 0);
+ return;
+ }
+
/* we don't have to worry about interrupts here */
prev = local64_read(&event->hw.prev_count);
delta = (val - prev) & 0xfffffffful;
@@ -1171,6 +1217,11 @@
val = 0x80000000LL - left;
}
+ write_pmc(event->hw.idx, val);
+ local64_set(&event->hw.prev_count, val);
+ local64_set(&event->hw.period_left, left);
+ perf_event_update_userpage(event);
+
/*
* Finally record data if requested.
*/
@@ -1183,23 +1234,9 @@
if (event->attr.sample_type & PERF_SAMPLE_ADDR)
perf_get_data_addr(regs, &data.addr);
- if (perf_event_overflow(event, nmi, &data, regs)) {
- /*
- * Interrupts are coming too fast - throttle them
- * by setting the event to 0, so it will be
- * at least 2^30 cycles until the next interrupt
- * (assuming each event counts at most 2 counts
- * per cycle).
- */
- val = 0;
- left = ~0ULL >> 1;
- }
+ if (perf_event_overflow(event, nmi, &data, regs))
+ power_pmu_stop(event, 0);
}
-
- write_pmc(event->hw.idx, val);
- local64_set(&event->hw.prev_count, val);
- local64_set(&event->hw.period_left, left);
- perf_event_update_userpage(event);
}
/*
@@ -1342,6 +1379,7 @@
freeze_events_kernel = MMCR0_FCHV;
#endif /* CONFIG_PPC64 */
+ perf_pmu_register(&power_pmu);
perf_cpu_notifier(power_pmu_notifier);
return 0;
diff --git a/arch/powerpc/kernel/perf_event_fsl_emb.c b/arch/powerpc/kernel/perf_event_fsl_emb.c
index 1ba4547..7ecca59 100644
--- a/arch/powerpc/kernel/perf_event_fsl_emb.c
+++ b/arch/powerpc/kernel/perf_event_fsl_emb.c
@@ -156,6 +156,9 @@
{
s64 val, delta, prev;
+ if (event->hw.state & PERF_HES_STOPPED)
+ return;
+
/*
* Performance monitor interrupts come even when interrupts
* are soft-disabled, as long as interrupts are hard-enabled.
@@ -177,7 +180,7 @@
* Disable all events to prevent PMU interrupts and to allow
* events to be added or removed.
*/
-void hw_perf_disable(void)
+static void fsl_emb_pmu_disable(struct pmu *pmu)
{
struct cpu_hw_events *cpuhw;
unsigned long flags;
@@ -216,7 +219,7 @@
* If we were previously disabled and events were added, then
* put the new config on the PMU.
*/
-void hw_perf_enable(void)
+static void fsl_emb_pmu_enable(struct pmu *pmu)
{
struct cpu_hw_events *cpuhw;
unsigned long flags;
@@ -262,8 +265,8 @@
return n;
}
-/* perf must be disabled, context locked on entry */
-static int fsl_emb_pmu_enable(struct perf_event *event)
+/* context locked on entry */
+static int fsl_emb_pmu_add(struct perf_event *event, int flags)
{
struct cpu_hw_events *cpuhw;
int ret = -EAGAIN;
@@ -271,6 +274,7 @@
u64 val;
int i;
+ perf_pmu_disable(event->pmu);
cpuhw = &get_cpu_var(cpu_hw_events);
if (event->hw.config & FSL_EMB_EVENT_RESTRICTED)
@@ -301,6 +305,12 @@
val = 0x80000000L - left;
}
local64_set(&event->hw.prev_count, val);
+
+ if (!(flags & PERF_EF_START)) {
+ event->hw.state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
+ val = 0;
+ }
+
write_pmc(i, val);
perf_event_update_userpage(event);
@@ -310,15 +320,17 @@
ret = 0;
out:
put_cpu_var(cpu_hw_events);
+ perf_pmu_enable(event->pmu);
return ret;
}
-/* perf must be disabled, context locked on entry */
-static void fsl_emb_pmu_disable(struct perf_event *event)
+/* context locked on entry */
+static void fsl_emb_pmu_del(struct perf_event *event, int flags)
{
struct cpu_hw_events *cpuhw;
int i = event->hw.idx;
+ perf_pmu_disable(event->pmu);
if (i < 0)
goto out;
@@ -346,45 +358,58 @@
cpuhw->n_events--;
out:
+ perf_pmu_enable(event->pmu);
put_cpu_var(cpu_hw_events);
}
-/*
- * Re-enable interrupts on a event after they were throttled
- * because they were coming too fast.
- *
- * Context is locked on entry, but perf is not disabled.
- */
-static void fsl_emb_pmu_unthrottle(struct perf_event *event)
+static void fsl_emb_pmu_start(struct perf_event *event, int ef_flags)
{
- s64 val, left;
+ unsigned long flags;
+ s64 left;
+
+ if (event->hw.idx < 0 || !event->hw.sample_period)
+ return;
+
+ if (!(event->hw.state & PERF_HES_STOPPED))
+ return;
+
+ if (ef_flags & PERF_EF_RELOAD)
+ WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
+
+ local_irq_save(flags);
+ perf_pmu_disable(event->pmu);
+
+ event->hw.state = 0;
+ left = local64_read(&event->hw.period_left);
+ write_pmc(event->hw.idx, left);
+
+ perf_event_update_userpage(event);
+ perf_pmu_enable(event->pmu);
+ local_irq_restore(flags);
+}
+
+static void fsl_emb_pmu_stop(struct perf_event *event, int ef_flags)
+{
unsigned long flags;
if (event->hw.idx < 0 || !event->hw.sample_period)
return;
+
+ if (event->hw.state & PERF_HES_STOPPED)
+ return;
+
local_irq_save(flags);
- perf_disable();
+ perf_pmu_disable(event->pmu);
+
fsl_emb_pmu_read(event);
- left = event->hw.sample_period;
- event->hw.last_period = left;
- val = 0;
- if (left < 0x80000000L)
- val = 0x80000000L - left;
- write_pmc(event->hw.idx, val);
- local64_set(&event->hw.prev_count, val);
- local64_set(&event->hw.period_left, left);
+ event->hw.state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
+ write_pmc(event->hw.idx, 0);
+
perf_event_update_userpage(event);
- perf_enable();
+ perf_pmu_enable(event->pmu);
local_irq_restore(flags);
}
-static struct pmu fsl_emb_pmu = {
- .enable = fsl_emb_pmu_enable,
- .disable = fsl_emb_pmu_disable,
- .read = fsl_emb_pmu_read,
- .unthrottle = fsl_emb_pmu_unthrottle,
-};
-
/*
* Release the PMU if this is the last perf_event.
*/
@@ -428,7 +453,7 @@
return 0;
}
-const struct pmu *hw_perf_event_init(struct perf_event *event)
+static int fsl_emb_pmu_event_init(struct perf_event *event)
{
u64 ev;
struct perf_event *events[MAX_HWEVENTS];
@@ -441,14 +466,14 @@
case PERF_TYPE_HARDWARE:
ev = event->attr.config;
if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0)
- return ERR_PTR(-EOPNOTSUPP);
+ return -EOPNOTSUPP;
ev = ppmu->generic_events[ev];
break;
case PERF_TYPE_HW_CACHE:
err = hw_perf_cache_event(event->attr.config, &ev);
if (err)
- return ERR_PTR(err);
+ return err;
break;
case PERF_TYPE_RAW:
@@ -456,12 +481,12 @@
break;
default:
- return ERR_PTR(-EINVAL);
+ return -ENOENT;
}
event->hw.config = ppmu->xlate_event(ev);
if (!(event->hw.config & FSL_EMB_EVENT_VALID))
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
/*
* If this is in a group, check if it can go on with all the
@@ -473,7 +498,7 @@
n = collect_events(event->group_leader,
ppmu->n_counter - 1, events);
if (n < 0)
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
}
if (event->hw.config & FSL_EMB_EVENT_RESTRICTED) {
@@ -484,7 +509,7 @@
}
if (num_restricted >= ppmu->n_restricted)
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
}
event->hw.idx = -1;
@@ -497,7 +522,7 @@
if (event->attr.exclude_kernel)
event->hw.config_base |= PMLCA_FCS;
if (event->attr.exclude_idle)
- return ERR_PTR(-ENOTSUPP);
+ return -ENOTSUPP;
event->hw.last_period = event->hw.sample_period;
local64_set(&event->hw.period_left, event->hw.last_period);
@@ -523,11 +548,20 @@
}
event->destroy = hw_perf_event_destroy;
- if (err)
- return ERR_PTR(err);
- return &fsl_emb_pmu;
+ return err;
}
+static struct pmu fsl_emb_pmu = {
+ .pmu_enable = fsl_emb_pmu_enable,
+ .pmu_disable = fsl_emb_pmu_disable,
+ .event_init = fsl_emb_pmu_event_init,
+ .add = fsl_emb_pmu_add,
+ .del = fsl_emb_pmu_del,
+ .start = fsl_emb_pmu_start,
+ .stop = fsl_emb_pmu_stop,
+ .read = fsl_emb_pmu_read,
+};
+
/*
* A counter has overflowed; update its count and record
* things if requested. Note that interrupts are hard-disabled
@@ -540,6 +574,11 @@
s64 prev, delta, left;
int record = 0;
+ if (event->hw.state & PERF_HES_STOPPED) {
+ write_pmc(event->hw.idx, 0);
+ return;
+ }
+
/* we don't have to worry about interrupts here */
prev = local64_read(&event->hw.prev_count);
delta = (val - prev) & 0xfffffffful;
@@ -562,6 +601,11 @@
val = 0x80000000LL - left;
}
+ write_pmc(event->hw.idx, val);
+ local64_set(&event->hw.prev_count, val);
+ local64_set(&event->hw.period_left, left);
+ perf_event_update_userpage(event);
+
/*
* Finally record data if requested.
*/
@@ -571,23 +615,9 @@
perf_sample_data_init(&data, 0);
data.period = event->hw.last_period;
- if (perf_event_overflow(event, nmi, &data, regs)) {
- /*
- * Interrupts are coming too fast - throttle them
- * by setting the event to 0, so it will be
- * at least 2^30 cycles until the next interrupt
- * (assuming each event counts at most 2 counts
- * per cycle).
- */
- val = 0;
- left = ~0ULL >> 1;
- }
+ if (perf_event_overflow(event, nmi, &data, regs))
+ fsl_emb_pmu_stop(event, 0);
}
-
- write_pmc(event->hw.idx, val);
- local64_set(&event->hw.prev_count, val);
- local64_set(&event->hw.period_left, left);
- perf_event_update_userpage(event);
}
static void perf_event_interrupt(struct pt_regs *regs)
@@ -651,5 +681,7 @@
pr_info("%s performance monitor hardware support registered\n",
pmu->name);
+ perf_pmu_register(&fsl_emb_pmu);
+
return 0;
}
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 91356ff..b1c648a 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -728,7 +728,7 @@
p->thread.regs = childregs;
if (clone_flags & CLONE_SETTLS) {
#ifdef CONFIG_PPC64
- if (!test_thread_flag(TIF_32BIT))
+ if (!is_32bit_task())
childregs->gpr[13] = childregs->gpr[6];
else
#endif
@@ -823,7 +823,7 @@
regs->nip = start;
regs->msr = MSR_USER;
#else
- if (!test_thread_flag(TIF_32BIT)) {
+ if (!is_32bit_task()) {
unsigned long entry, toc;
/* start is a relocated pointer to the function descriptor for
@@ -995,7 +995,7 @@
if (usp == 0)
usp = regs->gpr[1]; /* stack pointer for child */
#ifdef CONFIG_PPC64
- if (test_thread_flag(TIF_32BIT)) {
+ if (is_32bit_task()) {
parent_tidp = TRUNC_PTR(parent_tidp);
child_tidp = TRUNC_PTR(child_tidp);
}
@@ -1199,19 +1199,17 @@
}
}
-void ppc64_runlatch_off(void)
+void __ppc64_runlatch_off(void)
{
unsigned long ctrl;
- if (cpu_has_feature(CPU_FTR_CTRL) && test_thread_flag(TIF_RUNLATCH)) {
- HMT_medium();
+ HMT_medium();
- clear_thread_flag(TIF_RUNLATCH);
+ clear_thread_flag(TIF_RUNLATCH);
- ctrl = mfspr(SPRN_CTRLF);
- ctrl &= ~CTRL_RUNLATCH;
- mtspr(SPRN_CTRLT, ctrl);
- }
+ ctrl = mfspr(SPRN_CTRLF);
+ ctrl &= ~CTRL_RUNLATCH;
+ mtspr(SPRN_CTRLT, ctrl);
}
#endif
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index a10ffc8..93666f9 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -258,17 +258,18 @@
#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
static void __init exc_lvl_early_init(void)
{
- unsigned int i;
+ unsigned int i, hw_cpu;
/* interrupt stacks must be in lowmem, we get that for free on ppc32
* as the memblock is limited to lowmem by MEMBLOCK_REAL_LIMIT */
for_each_possible_cpu(i) {
- critirq_ctx[i] = (struct thread_info *)
+ hw_cpu = get_hard_smp_processor_id(i);
+ critirq_ctx[hw_cpu] = (struct thread_info *)
__va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
#ifdef CONFIG_BOOKE
- dbgirq_ctx[i] = (struct thread_info *)
+ dbgirq_ctx[hw_cpu] = (struct thread_info *)
__va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
- mcheckirq_ctx[i] = (struct thread_info *)
+ mcheckirq_ctx[hw_cpu] = (struct thread_info *)
__va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
#endif
}
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 1bee4b6..e72690e 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -95,7 +95,7 @@
#ifdef CONFIG_SMP
-static int smt_enabled_cmdline;
+static char *smt_enabled_cmdline;
/* Look for ibm,smt-enabled OF option */
static void check_smt_enabled(void)
@@ -103,37 +103,46 @@
struct device_node *dn;
const char *smt_option;
+ /* Default to enabling all threads */
+ smt_enabled_at_boot = threads_per_core;
+
/* Allow the command line to overrule the OF option */
- if (smt_enabled_cmdline)
- return;
+ if (smt_enabled_cmdline) {
+ if (!strcmp(smt_enabled_cmdline, "on"))
+ smt_enabled_at_boot = threads_per_core;
+ else if (!strcmp(smt_enabled_cmdline, "off"))
+ smt_enabled_at_boot = 0;
+ else {
+ long smt;
+ int rc;
- dn = of_find_node_by_path("/options");
+ rc = strict_strtol(smt_enabled_cmdline, 10, &smt);
+ if (!rc)
+ smt_enabled_at_boot =
+ min(threads_per_core, (int)smt);
+ }
+ } else {
+ dn = of_find_node_by_path("/options");
+ if (dn) {
+ smt_option = of_get_property(dn, "ibm,smt-enabled",
+ NULL);
- if (dn) {
- smt_option = of_get_property(dn, "ibm,smt-enabled", NULL);
+ if (smt_option) {
+ if (!strcmp(smt_option, "on"))
+ smt_enabled_at_boot = threads_per_core;
+ else if (!strcmp(smt_option, "off"))
+ smt_enabled_at_boot = 0;
+ }
- if (smt_option) {
- if (!strcmp(smt_option, "on"))
- smt_enabled_at_boot = 1;
- else if (!strcmp(smt_option, "off"))
- smt_enabled_at_boot = 0;
- }
- }
+ of_node_put(dn);
+ }
+ }
}
/* Look for smt-enabled= cmdline option */
static int __init early_smt_enabled(char *p)
{
- smt_enabled_cmdline = 1;
-
- if (!p)
- return 0;
-
- if (!strcmp(p, "on") || !strcmp(p, "1"))
- smt_enabled_at_boot = 1;
- else if (!strcmp(p, "off") || !strcmp(p, "0"))
- smt_enabled_at_boot = 0;
-
+ smt_enabled_cmdline = p;
return 0;
}
early_param("smt-enabled", early_smt_enabled);
@@ -380,8 +389,8 @@
*/
xmon_setup();
- check_smt_enabled();
smp_setup_cpu_maps();
+ check_smt_enabled();
#ifdef CONFIG_SMP
/* Release secondary cpus out of their spinloops at 0x60 now that
diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c
index 7109f5b..2300426 100644
--- a/arch/powerpc/kernel/signal.c
+++ b/arch/powerpc/kernel/signal.c
@@ -138,6 +138,7 @@
ti->local_flags &= ~_TLF_RESTORE_SIGMASK;
sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL);
}
+ regs->trap = 0;
return 0; /* no signals delivered */
}
@@ -164,6 +165,7 @@
ret = handle_rt_signal64(signr, &ka, &info, oldset, regs);
}
+ regs->trap = 0;
if (ret) {
spin_lock_irq(¤t->sighand->siglock);
sigorsets(¤t->blocked, ¤t->blocked,
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index 2666101..b96a3a0 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -511,6 +511,7 @@
if (!sig)
save_r2 = (unsigned int)regs->gpr[2];
err = restore_general_regs(regs, sr);
+ regs->trap = 0;
err |= __get_user(msr, &sr->mc_gregs[PT_MSR]);
if (!sig)
regs->gpr[2] = (unsigned long) save_r2;
@@ -884,7 +885,6 @@
regs->nip = (unsigned long) ka->sa.sa_handler;
/* enter the signal handler in big-endian mode */
regs->msr &= ~MSR_LE;
- regs->trap = 0;
return 1;
badframe:
@@ -1228,7 +1228,6 @@
regs->nip = (unsigned long) ka->sa.sa_handler;
/* enter the signal handler in big-endian mode */
regs->msr &= ~MSR_LE;
- regs->trap = 0;
return 1;
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index 2fe6fc6..27c4a45 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -178,7 +178,7 @@
err |= __get_user(regs->xer, &sc->gp_regs[PT_XER]);
err |= __get_user(regs->ccr, &sc->gp_regs[PT_CCR]);
/* skip SOFTE */
- err |= __get_user(regs->trap, &sc->gp_regs[PT_TRAP]);
+ regs->trap = 0;
err |= __get_user(regs->dar, &sc->gp_regs[PT_DAR]);
err |= __get_user(regs->dsisr, &sc->gp_regs[PT_DSISR]);
err |= __get_user(regs->result, &sc->gp_regs[PT_RESULT]);
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index a61b3dd..0008bc5 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -427,11 +427,11 @@
#endif
if (!cpu_callin_map[cpu]) {
- printk("Processor %u is stuck.\n", cpu);
+ printk(KERN_ERR "Processor %u is stuck.\n", cpu);
return -ENOENT;
}
- printk("Processor %u found.\n", cpu);
+ DBG("Processor %u found.\n", cpu);
if (smp_ops->give_timebase)
smp_ops->give_timebase();
diff --git a/arch/powerpc/kernel/sys_ppc32.c b/arch/powerpc/kernel/sys_ppc32.c
index 20fd701..b1b6043 100644
--- a/arch/powerpc/kernel/sys_ppc32.c
+++ b/arch/powerpc/kernel/sys_ppc32.c
@@ -616,3 +616,11 @@
return sys_sync_file_range(fd, offset, nbytes, flags);
}
+
+asmlinkage long compat_sys_fanotify_mark(int fanotify_fd, unsigned int flags,
+ unsigned mask_hi, unsigned mask_lo,
+ int dfd, const char __user *pathname)
+{
+ u64 mask = ((u64)mask_hi << 32) | mask_lo;
+ return sys_fanotify_mark(fanotify_fd, flags, mask, dfd, pathname);
+}
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index ce53dfa..54888eb 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -53,7 +53,7 @@
#include <linux/posix-timers.h>
#include <linux/irq.h>
#include <linux/delay.h>
-#include <linux/perf_event.h>
+#include <linux/irq_work.h>
#include <asm/trace.h>
#include <asm/io.h>
@@ -493,60 +493,60 @@
}
#endif /* CONFIG_PPC_ISERIES */
-#ifdef CONFIG_PERF_EVENTS
+#ifdef CONFIG_IRQ_WORK
/*
* 64-bit uses a byte in the PACA, 32-bit uses a per-cpu variable...
*/
#ifdef CONFIG_PPC64
-static inline unsigned long test_perf_event_pending(void)
+static inline unsigned long test_irq_work_pending(void)
{
unsigned long x;
asm volatile("lbz %0,%1(13)"
: "=r" (x)
- : "i" (offsetof(struct paca_struct, perf_event_pending)));
+ : "i" (offsetof(struct paca_struct, irq_work_pending)));
return x;
}
-static inline void set_perf_event_pending_flag(void)
+static inline void set_irq_work_pending_flag(void)
{
asm volatile("stb %0,%1(13)" : :
"r" (1),
- "i" (offsetof(struct paca_struct, perf_event_pending)));
+ "i" (offsetof(struct paca_struct, irq_work_pending)));
}
-static inline void clear_perf_event_pending(void)
+static inline void clear_irq_work_pending(void)
{
asm volatile("stb %0,%1(13)" : :
"r" (0),
- "i" (offsetof(struct paca_struct, perf_event_pending)));
+ "i" (offsetof(struct paca_struct, irq_work_pending)));
}
#else /* 32-bit */
-DEFINE_PER_CPU(u8, perf_event_pending);
+DEFINE_PER_CPU(u8, irq_work_pending);
-#define set_perf_event_pending_flag() __get_cpu_var(perf_event_pending) = 1
-#define test_perf_event_pending() __get_cpu_var(perf_event_pending)
-#define clear_perf_event_pending() __get_cpu_var(perf_event_pending) = 0
+#define set_irq_work_pending_flag() __get_cpu_var(irq_work_pending) = 1
+#define test_irq_work_pending() __get_cpu_var(irq_work_pending)
+#define clear_irq_work_pending() __get_cpu_var(irq_work_pending) = 0
#endif /* 32 vs 64 bit */
-void set_perf_event_pending(void)
+void set_irq_work_pending(void)
{
preempt_disable();
- set_perf_event_pending_flag();
+ set_irq_work_pending_flag();
set_dec(1);
preempt_enable();
}
-#else /* CONFIG_PERF_EVENTS */
+#else /* CONFIG_IRQ_WORK */
-#define test_perf_event_pending() 0
-#define clear_perf_event_pending()
+#define test_irq_work_pending() 0
+#define clear_irq_work_pending()
-#endif /* CONFIG_PERF_EVENTS */
+#endif /* CONFIG_IRQ_WORK */
/*
* For iSeries shared processors, we have to let the hypervisor
@@ -577,28 +577,19 @@
* some CPUs will continuue to take decrementer exceptions */
set_dec(DECREMENTER_MAX);
-#ifdef CONFIG_PPC32
+#if defined(CONFIG_PPC32) && defined(CONFIG_PMAC)
if (atomic_read(&ppc_n_lost_interrupts) != 0)
do_IRQ(regs);
#endif
- now = get_tb_or_rtc();
- if (now < decrementer->next_tb) {
- /* not time for this event yet */
- now = decrementer->next_tb - now;
- if (now <= DECREMENTER_MAX)
- set_dec((int)now);
- trace_timer_interrupt_exit(regs);
- return;
- }
old_regs = set_irq_regs(regs);
irq_enter();
calculate_steal_time();
- if (test_perf_event_pending()) {
- clear_perf_event_pending();
- perf_event_do_pending();
+ if (test_irq_work_pending()) {
+ clear_irq_work_pending();
+ irq_work_run();
}
#ifdef CONFIG_PPC_ISERIES
@@ -606,8 +597,16 @@
get_lppaca()->int_dword.fields.decr_int = 0;
#endif
- if (evt->event_handler)
- evt->event_handler(evt);
+ now = get_tb_or_rtc();
+ if (now >= decrementer->next_tb) {
+ decrementer->next_tb = ~(u64)0;
+ if (evt->event_handler)
+ evt->event_handler(evt);
+ } else {
+ now = decrementer->next_tb - now;
+ if (now <= DECREMENTER_MAX)
+ set_dec((int)now);
+ }
#ifdef CONFIG_PPC_ISERIES
if (firmware_has_feature(FW_FEATURE_ISERIES) && hvlpevent_is_pending())
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
index 00b9436..fa3469d 100644
--- a/arch/powerpc/kernel/vio.c
+++ b/arch/powerpc/kernel/vio.c
@@ -1059,7 +1059,7 @@
if (!dma_window)
return NULL;
- tbl = kmalloc(sizeof(*tbl), GFP_KERNEL);
+ tbl = kzalloc(sizeof(*tbl), GFP_KERNEL);
if (tbl == NULL)
return NULL;
@@ -1072,6 +1072,7 @@
tbl->it_offset = offset >> IOMMU_PAGE_SHIFT;
tbl->it_busno = 0;
tbl->it_type = TCE_VB;
+ tbl->it_blocksize = 16;
return iommu_init_table(tbl, -1);
}
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 71f1415..ace85fa 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -79,7 +79,9 @@
#endif /* CONFIG_PPC_STD_MMU_64 */
phys_addr_t memstart_addr = ~0;
+EXPORT_SYMBOL_GPL(memstart_addr);
phys_addr_t kernstart_addr;
+EXPORT_SYMBOL_GPL(kernstart_addr);
void free_initmem(void)
{
diff --git a/arch/powerpc/mm/tlb_nohash_low.S b/arch/powerpc/mm/tlb_nohash_low.S
index cfa7682..b9d9fed 100644
--- a/arch/powerpc/mm/tlb_nohash_low.S
+++ b/arch/powerpc/mm/tlb_nohash_low.S
@@ -200,6 +200,7 @@
rlwimi r5,r4,0,16,31
wrteei 0
mtspr SPRN_MMUCR,r5
+ isync
/* tlbivax 0,r3 - use .long to avoid binutils deps */
.long 0x7c000624 | (r3 << 11)
isync
diff --git a/arch/powerpc/platforms/512x/clock.c b/arch/powerpc/platforms/512x/clock.c
index 5b243bd..3dc2a8d 100644
--- a/arch/powerpc/platforms/512x/clock.c
+++ b/arch/powerpc/platforms/512x/clock.c
@@ -57,7 +57,7 @@
int id_match = 0;
if (dev == NULL || id == NULL)
- return NULL;
+ return clk;
mutex_lock(&clocks_mutex);
list_for_each_entry(p, &clocks, node) {
diff --git a/arch/powerpc/platforms/52xx/efika.c b/arch/powerpc/platforms/52xx/efika.c
index 45c0cb9..18c1048 100644
--- a/arch/powerpc/platforms/52xx/efika.c
+++ b/arch/powerpc/platforms/52xx/efika.c
@@ -99,7 +99,7 @@
if (bus_range == NULL || len < 2 * sizeof(int)) {
printk(KERN_WARNING EFIKA_PLATFORM_NAME
": Can't get bus-range for %s\n", pcictrl->full_name);
- return;
+ goto out_put;
}
if (bus_range[1] == bus_range[0])
@@ -111,12 +111,12 @@
printk(" controlled by %s\n", pcictrl->full_name);
printk("\n");
- hose = pcibios_alloc_controller(of_node_get(pcictrl));
+ hose = pcibios_alloc_controller(pcictrl);
if (!hose) {
printk(KERN_WARNING EFIKA_PLATFORM_NAME
": Can't allocate PCI controller structure for %s\n",
pcictrl->full_name);
- return;
+ goto out_put;
}
hose->first_busno = bus_range[0];
@@ -124,6 +124,9 @@
hose->ops = &rtas_pci_ops;
pci_process_bridge_OF_ranges(hose, pcictrl, 0);
+ return;
+out_put:
+ of_node_put(pcictrl);
}
#else
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_common.c b/arch/powerpc/platforms/52xx/mpc52xx_common.c
index 6e90531..41f3a7e 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_common.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_common.c
@@ -325,12 +325,16 @@
clrbits32(&simple_gpio->simple_dvo, sync | out);
clrbits8(&wkup_gpio->wkup_dvo, reset);
- /* wait at lease 1 us */
- udelay(2);
+ /* wait for 1 us */
+ udelay(1);
/* Deassert reset */
setbits8(&wkup_gpio->wkup_dvo, reset);
+ /* wait at least 200ns */
+ /* 7 ~= (200ns * timebase) / ns2sec */
+ __delay(7);
+
/* Restore pin-muxing */
out_be32(&simple_gpio->port_config, mux);
diff --git a/arch/powerpc/platforms/83xx/mpc837x_mds.c b/arch/powerpc/platforms/83xx/mpc837x_mds.c
index f9751c8..8306832 100644
--- a/arch/powerpc/platforms/83xx/mpc837x_mds.c
+++ b/arch/powerpc/platforms/83xx/mpc837x_mds.c
@@ -48,8 +48,10 @@
return -1;
np = of_find_node_by_name(NULL, "usb");
- if (!np)
- return -ENODEV;
+ if (!np) {
+ ret = -ENODEV;
+ goto out;
+ }
phy_type = of_get_property(np, "phy_type", NULL);
if (phy_type && !strcmp(phy_type, "ulpi")) {
clrbits8(bcsr_regs + 12, BCSR12_USB_SER_PIN);
@@ -65,8 +67,9 @@
}
of_node_put(np);
+out:
iounmap(bcsr_regs);
- return 0;
+ return ret;
}
/* ************************************************************************
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_mds.c b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
index da64be1..aa34cac 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_mds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
@@ -357,6 +357,7 @@
{
#ifdef CONFIG_PCI
struct pci_controller *hose;
+ struct device_node *np;
#endif
dma_addr_t max = 0xffffffff;
diff --git a/arch/powerpc/platforms/85xx/p1022_ds.c b/arch/powerpc/platforms/85xx/p1022_ds.c
index e1467c9..34e0090 100644
--- a/arch/powerpc/platforms/85xx/p1022_ds.c
+++ b/arch/powerpc/platforms/85xx/p1022_ds.c
@@ -19,7 +19,7 @@
#include <linux/pci.h>
#include <linux/of_platform.h>
-#include <linux/lmb.h>
+#include <linux/memblock.h>
#include <asm/mpic.h>
#include <asm/swiotlb.h>
@@ -97,7 +97,7 @@
#endif
#ifdef CONFIG_SWIOTLB
- if (lmb_end_of_DRAM() > max) {
+ if (memblock_end_of_DRAM() > max) {
ppc_swiotlb_enable = 1;
set_pci_dma_ops(&swiotlb_dma_ops);
ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_swiotlb;
diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig
index d1663db..81c9208 100644
--- a/arch/powerpc/platforms/Kconfig
+++ b/arch/powerpc/platforms/Kconfig
@@ -106,8 +106,7 @@
config MPIC_U3_HT_IRQS
bool
- depends on PPC_MAPLE
- default y
+ default n
config MPIC_BROKEN_REGREAD
bool
diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
index 58b13ce..26a0671 100644
--- a/arch/powerpc/platforms/cell/iommu.c
+++ b/arch/powerpc/platforms/cell/iommu.c
@@ -477,7 +477,7 @@
ioid = cell_iommu_get_ioid(np);
- window = kmalloc_node(sizeof(*window), GFP_KERNEL, iommu->nid);
+ window = kzalloc_node(sizeof(*window), GFP_KERNEL, iommu->nid);
BUG_ON(window == NULL);
window->offset = offset;
diff --git a/arch/powerpc/platforms/iseries/iommu.c b/arch/powerpc/platforms/iseries/iommu.c
index ce61cea..d8b7633 100644
--- a/arch/powerpc/platforms/iseries/iommu.c
+++ b/arch/powerpc/platforms/iseries/iommu.c
@@ -184,7 +184,7 @@
BUG_ON(lsn == NULL);
- tbl = kmalloc(sizeof(struct iommu_table), GFP_KERNEL);
+ tbl = kzalloc(sizeof(struct iommu_table), GFP_KERNEL);
iommu_table_getparms_iSeries(pdn->busno, *lsn, 0, tbl);
diff --git a/arch/powerpc/platforms/powermac/feature.c b/arch/powerpc/platforms/powermac/feature.c
index 39df6ab..df42399 100644
--- a/arch/powerpc/platforms/powermac/feature.c
+++ b/arch/powerpc/platforms/powermac/feature.c
@@ -2873,12 +2873,11 @@
/* Switch airport off */
for_each_node_by_name(np, "radio") {
- if (np && np->parent == macio_chips[0].of_node) {
+ if (np->parent == macio_chips[0].of_node) {
macio_chips[0].flags |= MACIO_FLAG_AIRPORT_ON;
core99_airport_enable(np, 0, 0);
}
}
- of_node_put(np);
}
/* On all machines that support sound PM, switch sound off */
diff --git a/arch/powerpc/platforms/powermac/pci.c b/arch/powerpc/platforms/powermac/pci.c
index ab2027c..3bc075c 100644
--- a/arch/powerpc/platforms/powermac/pci.c
+++ b/arch/powerpc/platforms/powermac/pci.c
@@ -1155,13 +1155,11 @@
pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, nd, 0, 0);
}
}
- of_node_put(nd);
for_each_node_by_name(nd, "ethernet") {
if (nd->parent && of_device_is_compatible(nd, "gmac")
&& of_device_is_compatible(nd->parent, "uni-north"))
pmac_call_feature(PMAC_FTR_GMAC_ENABLE, nd, 0, 0);
}
- of_node_put(nd);
}
void pmac_pci_fixup_cardbus(struct pci_dev* dev)
diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c
index 227c1c3..72d8054 100644
--- a/arch/powerpc/platforms/pseries/dlpar.c
+++ b/arch/powerpc/platforms/pseries/dlpar.c
@@ -129,20 +129,35 @@
struct property *property;
struct property *last_property = NULL;
struct cc_workarea *ccwa;
+ char *data_buf;
int cc_token;
- int rc;
+ int rc = -1;
cc_token = rtas_token("ibm,configure-connector");
if (cc_token == RTAS_UNKNOWN_SERVICE)
return NULL;
- spin_lock(&rtas_data_buf_lock);
- ccwa = (struct cc_workarea *)&rtas_data_buf[0];
+ data_buf = kzalloc(RTAS_DATA_BUF_SIZE, GFP_KERNEL);
+ if (!data_buf)
+ return NULL;
+
+ ccwa = (struct cc_workarea *)&data_buf[0];
ccwa->drc_index = drc_index;
ccwa->zero = 0;
- rc = rtas_call(cc_token, 2, 1, NULL, rtas_data_buf, NULL);
- while (rc) {
+ do {
+ /* Since we release the rtas_data_buf lock between configure
+ * connector calls we want to re-populate the rtas_data_buffer
+ * with the contents of the previous call.
+ */
+ spin_lock(&rtas_data_buf_lock);
+
+ memcpy(rtas_data_buf, data_buf, RTAS_DATA_BUF_SIZE);
+ rc = rtas_call(cc_token, 2, 1, NULL, rtas_data_buf, NULL);
+ memcpy(data_buf, rtas_data_buf, RTAS_DATA_BUF_SIZE);
+
+ spin_unlock(&rtas_data_buf_lock);
+
switch (rc) {
case NEXT_SIBLING:
dn = dlpar_parse_cc_node(ccwa);
@@ -197,18 +212,19 @@
"returned from configure-connector\n", rc);
goto cc_error;
}
-
- rc = rtas_call(cc_token, 2, 1, NULL, rtas_data_buf, NULL);
- }
-
- spin_unlock(&rtas_data_buf_lock);
- return first_dn;
+ } while (rc);
cc_error:
- if (first_dn)
- dlpar_free_cc_nodes(first_dn);
- spin_unlock(&rtas_data_buf_lock);
- return NULL;
+ kfree(data_buf);
+
+ if (rc) {
+ if (first_dn)
+ dlpar_free_cc_nodes(first_dn);
+
+ return NULL;
+ }
+
+ return first_dn;
}
static struct device_node *derive_parent(const char *path)
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index 395848e..a77bcae 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -403,7 +403,7 @@
pci->phb->dma_window_size = 0x8000000ul;
pci->phb->dma_window_base_cur = 0x8000000ul;
- tbl = kmalloc_node(sizeof(struct iommu_table), GFP_KERNEL,
+ tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL,
pci->phb->node);
iommu_table_setparms(pci->phb, dn, tbl);
@@ -448,7 +448,7 @@
pdn->full_name, ppci->iommu_table);
if (!ppci->iommu_table) {
- tbl = kmalloc_node(sizeof(struct iommu_table), GFP_KERNEL,
+ tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL,
ppci->phb->node);
iommu_table_setparms_lpar(ppci->phb, pdn, tbl, dma_window,
bus->number);
@@ -478,7 +478,7 @@
struct pci_controller *phb = PCI_DN(dn)->phb;
pr_debug(" --> first child, no bridge. Allocating iommu table.\n");
- tbl = kmalloc_node(sizeof(struct iommu_table), GFP_KERNEL,
+ tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL,
phb->node);
iommu_table_setparms(phb, dn, tbl);
PCI_DN(dn)->iommu_table = iommu_init_table(tbl, phb->node);
@@ -544,7 +544,7 @@
pci = PCI_DN(pdn);
if (!pci->iommu_table) {
- tbl = kmalloc_node(sizeof(struct iommu_table), GFP_KERNEL,
+ tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL,
pci->phb->node);
iommu_table_setparms_lpar(pci->phb, pdn, tbl, dma_window,
pci->phb->bus->number);
diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c
index 3b1bf61..0317cce 100644
--- a/arch/powerpc/platforms/pseries/smp.c
+++ b/arch/powerpc/platforms/pseries/smp.c
@@ -182,10 +182,13 @@
/* Special case - we inhibit secondary thread startup
* during boot if the user requests it.
*/
- if (system_state < SYSTEM_RUNNING &&
- cpu_has_feature(CPU_FTR_SMT) &&
- !smt_enabled_at_boot && cpu_thread_in_core(nr) != 0)
- return 0;
+ if (system_state < SYSTEM_RUNNING && cpu_has_feature(CPU_FTR_SMT)) {
+ if (!smt_enabled_at_boot && cpu_thread_in_core(nr) != 0)
+ return 0;
+ if (smt_enabled_at_boot
+ && cpu_thread_in_core(nr) >= smt_enabled_at_boot)
+ return 0;
+ }
return 1;
}
diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c
index 5b22b07..93834b0 100644
--- a/arch/powerpc/platforms/pseries/xics.c
+++ b/arch/powerpc/platforms/pseries/xics.c
@@ -928,8 +928,10 @@
if (xics_status[0] != hw_cpu)
goto unlock;
- printk(KERN_WARNING "IRQ %u affinity broken off cpu %u\n",
- virq, cpu);
+ /* This is expected during cpu offline. */
+ if (cpu_online(cpu))
+ printk(KERN_WARNING "IRQ %u affinity broken off cpu %u\n",
+ virq, cpu);
/* Reset affinity to all cpus */
cpumask_setall(irq_to_desc(virq)->affinity);
diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c
index 209384b..4ae9332 100644
--- a/arch/powerpc/sysdev/fsl_pci.c
+++ b/arch/powerpc/sysdev/fsl_pci.c
@@ -399,6 +399,8 @@
DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1013, quirk_fsl_pcie_header);
DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1020E, quirk_fsl_pcie_header);
DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1020, quirk_fsl_pcie_header);
+DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1021E, quirk_fsl_pcie_header);
+DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1021, quirk_fsl_pcie_header);
DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1022E, quirk_fsl_pcie_header);
DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1022, quirk_fsl_pcie_header);
DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P2010E, quirk_fsl_pcie_header);
diff --git a/arch/powerpc/sysdev/fsl_rio.c b/arch/powerpc/sysdev/fsl_rio.c
index 6425abe..3017532 100644
--- a/arch/powerpc/sysdev/fsl_rio.c
+++ b/arch/powerpc/sysdev/fsl_rio.c
@@ -240,12 +240,13 @@
static void __iomem *rio_regs_win;
+#ifdef CONFIG_E500
static int (*saved_mcheck_exception)(struct pt_regs *regs);
static int fsl_rio_mcheck_exception(struct pt_regs *regs)
{
const struct exception_table_entry *entry = NULL;
- unsigned long reason = (mfspr(SPRN_MCSR) & MCSR_MASK);
+ unsigned long reason = mfspr(SPRN_MCSR);
if (reason & MCSR_BUS_RBERR) {
reason = in_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR));
@@ -269,6 +270,7 @@
else
return cur_cpu_spec->machine_check(regs);
}
+#endif
/**
* fsl_rio_doorbell_send - Send a MPC85xx doorbell message
@@ -1517,8 +1519,10 @@
fsl_rio_doorbell_init(port);
fsl_rio_port_write_init(port);
+#ifdef CONFIG_E500
saved_mcheck_exception = ppc_md.machine_check_exception;
ppc_md.machine_check_exception = fsl_rio_mcheck_exception;
+#endif
/* Ensure that RFXE is set */
mtspr(SPRN_HID1, (mfspr(SPRN_HID1) | 0x20000));
diff --git a/arch/powerpc/sysdev/qe_lib/qe.c b/arch/powerpc/sysdev/qe_lib/qe.c
index 3da8014..90020de 100644
--- a/arch/powerpc/sysdev/qe_lib/qe.c
+++ b/arch/powerpc/sysdev/qe_lib/qe.c
@@ -640,6 +640,7 @@
if ((num_of_snums < 28) || (num_of_snums > QE_NUM_OF_SNUM)) {
/* No QE ever has fewer than 28 SNUMs */
pr_err("QE: number of snum is invalid\n");
+ of_node_put(qe);
return -EINVAL;
}
}
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index 0554445..d17d04c 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -2880,15 +2880,14 @@
}
#ifdef CONFIG_MAGIC_SYSRQ
-static void sysrq_handle_xmon(int key, struct tty_struct *tty)
+static void sysrq_handle_xmon(int key)
{
/* ensure xmon is enabled */
xmon_init(1);
debugger(get_irq_regs());
}
-static struct sysrq_key_op sysrq_xmon_op =
-{
+static struct sysrq_key_op sysrq_xmon_op = {
.handler = sysrq_handle_xmon,
.help_msg = "Xmon",
.action_msg = "Entering xmon",
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index f0777a4..75976a1 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -95,6 +95,7 @@
select HAVE_KVM if 64BIT
select HAVE_ARCH_TRACEHOOK
select INIT_ALL_POSSIBLE
+ select HAVE_IRQ_WORK
select HAVE_PERF_EVENTS
select HAVE_KERNEL_GZIP
select HAVE_KERNEL_BZIP2
@@ -198,6 +199,13 @@
can be controlled through /sys/devices/system/cpu/cpu#.
Say N if you want to disable CPU hotplug.
+config SCHED_BOOK
+ bool "Book scheduler support"
+ depends on SMP
+ help
+ Book scheduler support improves the CPU scheduler's decision making
+ when dealing with machines that have several books.
+
config MATHEMU
bool "IEEE FPU emulation"
depends on MARCH_G5
diff --git a/arch/s390/include/asm/compat.h b/arch/s390/include/asm/compat.h
index 104f200..a875c2f 100644
--- a/arch/s390/include/asm/compat.h
+++ b/arch/s390/include/asm/compat.h
@@ -181,7 +181,7 @@
#endif
-static inline void __user *compat_alloc_user_space(long len)
+static inline void __user *arch_compat_alloc_user_space(long len)
{
unsigned long stack;
diff --git a/arch/s390/include/asm/hardirq.h b/arch/s390/include/asm/hardirq.h
index 498bc38..881d945 100644
--- a/arch/s390/include/asm/hardirq.h
+++ b/arch/s390/include/asm/hardirq.h
@@ -12,10 +12,6 @@
#ifndef __ASM_HARDIRQ_H
#define __ASM_HARDIRQ_H
-#include <linux/threads.h>
-#include <linux/sched.h>
-#include <linux/cache.h>
-#include <linux/interrupt.h>
#include <asm/lowcore.h>
#define local_softirq_pending() (S390_lowcore.softirq_pending)
diff --git a/arch/s390/include/asm/hugetlb.h b/arch/s390/include/asm/hugetlb.h
index 670a1d1..bb8343d 100644
--- a/arch/s390/include/asm/hugetlb.h
+++ b/arch/s390/include/asm/hugetlb.h
@@ -97,6 +97,7 @@
{
pte_t pte = huge_ptep_get(ptep);
+ mm->context.flush_mm = 1;
pmd_clear((pmd_t *) ptep);
return pte;
}
@@ -167,7 +168,8 @@
({ \
pte_t __pte = huge_ptep_get(__ptep); \
if (pte_write(__pte)) { \
- if (atomic_read(&(__mm)->mm_users) > 1 || \
+ (__mm)->context.flush_mm = 1; \
+ if (atomic_read(&(__mm)->context.attach_count) > 1 || \
(__mm) != current->active_mm) \
huge_ptep_invalidate(__mm, __addr, __ptep); \
set_huge_pte_at(__mm, __addr, __ptep, \
diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h
index 99e3409..78522cde 100644
--- a/arch/s390/include/asm/mmu.h
+++ b/arch/s390/include/asm/mmu.h
@@ -2,6 +2,8 @@
#define __MMU_H
typedef struct {
+ atomic_t attach_count;
+ unsigned int flush_mm;
spinlock_t list_lock;
struct list_head crst_list;
struct list_head pgtable_list;
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index 976e273..a6f0e7c 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -11,11 +11,14 @@
#include <asm/pgalloc.h>
#include <asm/uaccess.h>
+#include <asm/tlbflush.h>
#include <asm-generic/mm_hooks.h>
static inline int init_new_context(struct task_struct *tsk,
struct mm_struct *mm)
{
+ atomic_set(&mm->context.attach_count, 0);
+ mm->context.flush_mm = 0;
mm->context.asce_bits = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS;
#ifdef CONFIG_64BIT
mm->context.asce_bits |= _ASCE_TYPE_REGION3;
@@ -76,6 +79,12 @@
{
cpumask_set_cpu(smp_processor_id(), mm_cpumask(next));
update_mm(next, tsk);
+ atomic_dec(&prev->context.attach_count);
+ WARN_ON(atomic_read(&prev->context.attach_count) < 0);
+ atomic_inc(&next->context.attach_count);
+ /* Check for TLBs not flushed yet */
+ if (next->context.flush_mm)
+ __tlb_flush_mm(next);
}
#define enter_lazy_tlb(mm,tsk) do { } while (0)
diff --git a/arch/s390/include/asm/perf_event.h b/arch/s390/include/asm/perf_event.h
index 3840cbe..a75f168 100644
--- a/arch/s390/include/asm/perf_event.h
+++ b/arch/s390/include/asm/perf_event.h
@@ -4,7 +4,6 @@
* Copyright 2009 Martin Schwidefsky, IBM Corporation.
*/
-static inline void set_perf_event_pending(void) {}
-static inline void clear_perf_event_pending(void) {}
+/* Empty, just to avoid compiling error */
#define PERF_EVENT_INDEX_OFFSET 0
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 89a504c..3157441 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -880,7 +880,8 @@
#define ptep_get_and_clear(__mm, __address, __ptep) \
({ \
pte_t __pte = *(__ptep); \
- if (atomic_read(&(__mm)->mm_users) > 1 || \
+ (__mm)->context.flush_mm = 1; \
+ if (atomic_read(&(__mm)->context.attach_count) > 1 || \
(__mm) != current->active_mm) \
ptep_invalidate(__mm, __address, __ptep); \
else \
@@ -923,7 +924,8 @@
({ \
pte_t __pte = *(__ptep); \
if (pte_write(__pte)) { \
- if (atomic_read(&(__mm)->mm_users) > 1 || \
+ (__mm)->context.flush_mm = 1; \
+ if (atomic_read(&(__mm)->context.attach_count) > 1 || \
(__mm) != current->active_mm) \
ptep_invalidate(__mm, __addr, __ptep); \
set_pte_at(__mm, __addr, __ptep, pte_wrprotect(__pte)); \
diff --git a/arch/s390/include/asm/system.h b/arch/s390/include/asm/system.h
index cef6621..38ddd8a 100644
--- a/arch/s390/include/asm/system.h
+++ b/arch/s390/include/asm/system.h
@@ -97,7 +97,6 @@
extern void account_vtime(struct task_struct *, struct task_struct *);
extern void account_tick_vtime(struct task_struct *);
-extern void account_system_vtime(struct task_struct *);
#ifdef CONFIG_PFAULT
extern void pfault_irq_init(void);
diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h
index 81150b0..fd1c00d 100644
--- a/arch/s390/include/asm/tlb.h
+++ b/arch/s390/include/asm/tlb.h
@@ -50,8 +50,7 @@
struct mmu_gather *tlb = &get_cpu_var(mmu_gathers);
tlb->mm = mm;
- tlb->fullmm = full_mm_flush || (num_online_cpus() == 1) ||
- (atomic_read(&mm->mm_users) <= 1 && mm == current->active_mm);
+ tlb->fullmm = full_mm_flush;
tlb->nr_ptes = 0;
tlb->nr_pxds = TLB_NR_PTRS;
if (tlb->fullmm)
diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h
index 304cffa..29d5d6d 100644
--- a/arch/s390/include/asm/tlbflush.h
+++ b/arch/s390/include/asm/tlbflush.h
@@ -94,8 +94,12 @@
static inline void __tlb_flush_mm_cond(struct mm_struct * mm)
{
- if (atomic_read(&mm->mm_users) <= 1 && mm == current->active_mm)
+ spin_lock(&mm->page_table_lock);
+ if (mm->context.flush_mm) {
__tlb_flush_mm(mm);
+ mm->context.flush_mm = 0;
+ }
+ spin_unlock(&mm->page_table_lock);
}
/*
diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h
index 831bd03..051107a 100644
--- a/arch/s390/include/asm/topology.h
+++ b/arch/s390/include/asm/topology.h
@@ -3,15 +3,32 @@
#include <linux/cpumask.h>
-#define mc_capable() (1)
-
-const struct cpumask *cpu_coregroup_mask(unsigned int cpu);
-
extern unsigned char cpu_core_id[NR_CPUS];
extern cpumask_t cpu_core_map[NR_CPUS];
+static inline const struct cpumask *cpu_coregroup_mask(unsigned int cpu)
+{
+ return &cpu_core_map[cpu];
+}
+
#define topology_core_id(cpu) (cpu_core_id[cpu])
#define topology_core_cpumask(cpu) (&cpu_core_map[cpu])
+#define mc_capable() (1)
+
+#ifdef CONFIG_SCHED_BOOK
+
+extern unsigned char cpu_book_id[NR_CPUS];
+extern cpumask_t cpu_book_map[NR_CPUS];
+
+static inline const struct cpumask *cpu_book_mask(unsigned int cpu)
+{
+ return &cpu_book_map[cpu];
+}
+
+#define topology_book_id(cpu) (cpu_book_id[cpu])
+#define topology_book_cpumask(cpu) (&cpu_book_map[cpu])
+
+#endif /* CONFIG_SCHED_BOOK */
int topology_set_cpu_management(int fc);
void topology_schedule_update(void);
@@ -30,6 +47,8 @@
};
#endif
+#define SD_BOOK_INIT SD_CPU_INIT
+
#include <asm-generic/topology.h>
#endif /* _ASM_S390_TOPOLOGY_H */
diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h
index 403fb43..ff579b6 100644
--- a/arch/s390/kernel/entry.h
+++ b/arch/s390/kernel/entry.h
@@ -42,8 +42,8 @@
int __user *parent_tidptr, int __user *child_tidptr);
long sys_vfork(void);
void execve_tail(void);
-long sys_execve(const char __user *name, char __user * __user *argv,
- char __user * __user *envp);
+long sys_execve(const char __user *name, const char __user *const __user *argv,
+ const char __user *const __user *envp);
long sys_sigsuspend(int history0, int history1, old_sigset_t mask);
long sys_sigaction(int sig, const struct old_sigaction __user *act,
struct old_sigaction __user *oact);
diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
index 22cfd63..f7167ee 100644
--- a/arch/s390/kernel/module.c
+++ b/arch/s390/kernel/module.c
@@ -407,10 +407,9 @@
{
vfree(me->arch.syminfo);
me->arch.syminfo = NULL;
- return module_bug_finalize(hdr, sechdrs, me);
+ return 0;
}
void module_arch_cleanup(struct module *mod)
{
- module_bug_cleanup(mod);
}
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 541053e..8127ebd 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -583,6 +583,7 @@
sf->gprs[9] = (unsigned long) sf;
cpu_lowcore->save_area[15] = (unsigned long) sf;
__ctl_store(cpu_lowcore->cregs_save_area, 0, 15);
+ atomic_inc(&init_mm.context.attach_count);
asm volatile(
" stam 0,15,0(%0)"
: : "a" (&cpu_lowcore->access_regs_save_area) : "memory");
@@ -659,6 +660,7 @@
while (sigp_p(0, cpu, sigp_set_prefix) == sigp_busy)
udelay(10);
smp_free_lowcore(cpu);
+ atomic_dec(&init_mm.context.attach_count);
pr_info("Processor %d stopped\n", cpu);
}
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index bcef007..13559c9 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -57,8 +57,8 @@
union tl_entry tle[0];
};
-struct core_info {
- struct core_info *next;
+struct mask_info {
+ struct mask_info *next;
unsigned char id;
cpumask_t mask;
};
@@ -66,7 +66,6 @@
static int topology_enabled;
static void topology_work_fn(struct work_struct *work);
static struct tl_info *tl_info;
-static struct core_info core_info;
static int machine_has_topology;
static struct timer_list topology_timer;
static void set_topology_timer(void);
@@ -74,38 +73,37 @@
/* topology_lock protects the core linked list */
static DEFINE_SPINLOCK(topology_lock);
+static struct mask_info core_info;
cpumask_t cpu_core_map[NR_CPUS];
unsigned char cpu_core_id[NR_CPUS];
-static cpumask_t cpu_coregroup_map(unsigned int cpu)
+#ifdef CONFIG_SCHED_BOOK
+static struct mask_info book_info;
+cpumask_t cpu_book_map[NR_CPUS];
+unsigned char cpu_book_id[NR_CPUS];
+#endif
+
+static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
{
- struct core_info *core = &core_info;
- unsigned long flags;
cpumask_t mask;
cpus_clear(mask);
if (!topology_enabled || !machine_has_topology)
return cpu_possible_map;
- spin_lock_irqsave(&topology_lock, flags);
- while (core) {
- if (cpu_isset(cpu, core->mask)) {
- mask = core->mask;
+ while (info) {
+ if (cpu_isset(cpu, info->mask)) {
+ mask = info->mask;
break;
}
- core = core->next;
+ info = info->next;
}
- spin_unlock_irqrestore(&topology_lock, flags);
if (cpus_empty(mask))
mask = cpumask_of_cpu(cpu);
return mask;
}
-const struct cpumask *cpu_coregroup_mask(unsigned int cpu)
-{
- return &cpu_core_map[cpu];
-}
-
-static void add_cpus_to_core(struct tl_cpu *tl_cpu, struct core_info *core)
+static void add_cpus_to_mask(struct tl_cpu *tl_cpu, struct mask_info *book,
+ struct mask_info *core)
{
unsigned int cpu;
@@ -117,23 +115,35 @@
rcpu = CPU_BITS - 1 - cpu + tl_cpu->origin;
for_each_present_cpu(lcpu) {
- if (cpu_logical_map(lcpu) == rcpu) {
- cpu_set(lcpu, core->mask);
- cpu_core_id[lcpu] = core->id;
- smp_cpu_polarization[lcpu] = tl_cpu->pp;
- }
+ if (cpu_logical_map(lcpu) != rcpu)
+ continue;
+#ifdef CONFIG_SCHED_BOOK
+ cpu_set(lcpu, book->mask);
+ cpu_book_id[lcpu] = book->id;
+#endif
+ cpu_set(lcpu, core->mask);
+ cpu_core_id[lcpu] = core->id;
+ smp_cpu_polarization[lcpu] = tl_cpu->pp;
}
}
}
-static void clear_cores(void)
+static void clear_masks(void)
{
- struct core_info *core = &core_info;
+ struct mask_info *info;
- while (core) {
- cpus_clear(core->mask);
- core = core->next;
+ info = &core_info;
+ while (info) {
+ cpus_clear(info->mask);
+ info = info->next;
}
+#ifdef CONFIG_SCHED_BOOK
+ info = &book_info;
+ while (info) {
+ cpus_clear(info->mask);
+ info = info->next;
+ }
+#endif
}
static union tl_entry *next_tle(union tl_entry *tle)
@@ -146,29 +156,36 @@
static void tl_to_cores(struct tl_info *info)
{
+#ifdef CONFIG_SCHED_BOOK
+ struct mask_info *book = &book_info;
+#else
+ struct mask_info *book = NULL;
+#endif
+ struct mask_info *core = &core_info;
union tl_entry *tle, *end;
- struct core_info *core = &core_info;
+
spin_lock_irq(&topology_lock);
- clear_cores();
+ clear_masks();
tle = info->tle;
end = (union tl_entry *)((unsigned long)info + info->length);
while (tle < end) {
switch (tle->nl) {
- case 5:
- case 4:
- case 3:
+#ifdef CONFIG_SCHED_BOOK
case 2:
+ book = book->next;
+ book->id = tle->container.id;
break;
+#endif
case 1:
core = core->next;
core->id = tle->container.id;
break;
case 0:
- add_cpus_to_core(&tle->cpu, core);
+ add_cpus_to_mask(&tle->cpu, book, core);
break;
default:
- clear_cores();
+ clear_masks();
machine_has_topology = 0;
goto out;
}
@@ -221,10 +238,29 @@
static void update_cpu_core_map(void)
{
+ unsigned long flags;
int cpu;
- for_each_possible_cpu(cpu)
- cpu_core_map[cpu] = cpu_coregroup_map(cpu);
+ spin_lock_irqsave(&topology_lock, flags);
+ for_each_possible_cpu(cpu) {
+ cpu_core_map[cpu] = cpu_group_map(&core_info, cpu);
+#ifdef CONFIG_SCHED_BOOK
+ cpu_book_map[cpu] = cpu_group_map(&book_info, cpu);
+#endif
+ }
+ spin_unlock_irqrestore(&topology_lock, flags);
+}
+
+static void store_topology(struct tl_info *info)
+{
+#ifdef CONFIG_SCHED_BOOK
+ int rc;
+
+ rc = stsi(info, 15, 1, 3);
+ if (rc != -ENOSYS)
+ return;
+#endif
+ stsi(info, 15, 1, 2);
}
int arch_update_cpu_topology(void)
@@ -238,7 +274,7 @@
topology_update_polarization_simple();
return 0;
}
- stsi(info, 15, 1, 2);
+ store_topology(info);
tl_to_cores(info);
update_cpu_core_map();
for_each_online_cpu(cpu) {
@@ -299,12 +335,24 @@
}
__initcall(init_topology_update);
+static void alloc_masks(struct tl_info *info, struct mask_info *mask, int offset)
+{
+ int i, nr_masks;
+
+ nr_masks = info->mag[NR_MAG - offset];
+ for (i = 0; i < info->mnest - offset; i++)
+ nr_masks *= info->mag[NR_MAG - offset - 1 - i];
+ nr_masks = max(nr_masks, 1);
+ for (i = 0; i < nr_masks; i++) {
+ mask->next = alloc_bootmem(sizeof(struct mask_info));
+ mask = mask->next;
+ }
+}
+
void __init s390_init_cpu_topology(void)
{
unsigned long long facility_bits;
struct tl_info *info;
- struct core_info *core;
- int nr_cores;
int i;
if (stfle(&facility_bits, 1) <= 0)
@@ -315,25 +363,13 @@
tl_info = alloc_bootmem_pages(PAGE_SIZE);
info = tl_info;
- stsi(info, 15, 1, 2);
-
- nr_cores = info->mag[NR_MAG - 2];
- for (i = 0; i < info->mnest - 2; i++)
- nr_cores *= info->mag[NR_MAG - 3 - i];
-
+ store_topology(info);
pr_info("The CPU configuration topology of the machine is:");
for (i = 0; i < NR_MAG; i++)
printk(" %d", info->mag[i]);
printk(" / %d\n", info->mnest);
-
- core = &core_info;
- for (i = 0; i < nr_cores; i++) {
- core->next = alloc_bootmem(sizeof(struct core_info));
- core = core->next;
- if (!core)
- goto error;
- }
- return;
-error:
- machine_has_topology = 0;
+ alloc_masks(info, &core_info, 2);
+#ifdef CONFIG_SCHED_BOOK
+ alloc_masks(info, &book_info, 3);
+#endif
}
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index acc91c7..30eb6d02 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -74,6 +74,8 @@
__ctl_load(S390_lowcore.kernel_asce, 13, 13);
__raw_local_irq_ssm(ssm_mask);
+ atomic_set(&init_mm.context.attach_count, 1);
+
sparse_memory_present_with_active_regions(MAX_NUMNODES);
sparse_init();
memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 33990fa..35b6879 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -16,6 +16,7 @@
select HAVE_ARCH_TRACEHOOK
select HAVE_DMA_API_DEBUG
select HAVE_DMA_ATTRS
+ select HAVE_IRQ_WORK
select HAVE_PERF_EVENTS
select PERF_USE_VMALLOC
select HAVE_KERNEL_GZIP
@@ -249,6 +250,11 @@
select PM
select PM_RUNTIME
+config CPU_HAS_PMU
+ depends on CPU_SH4 || CPU_SH4A
+ default y
+ bool
+
if SUPERH32
choice
@@ -738,6 +744,14 @@
LLSC, this should be more efficient than the other alternative of
disabling interrupts around the atomic sequence.
+config HW_PERF_EVENTS
+ bool "Enable hardware performance counter support for perf events"
+ depends on PERF_EVENTS && CPU_HAS_PMU
+ default y
+ help
+ Enable hardware performance counter support for perf events. If
+ disabled, perf events will use software events only.
+
source "drivers/sh/Kconfig"
endmenu
diff --git a/arch/sh/include/asm/perf_event.h b/arch/sh/include/asm/perf_event.h
index 3d0c9f3..14308be 100644
--- a/arch/sh/include/asm/perf_event.h
+++ b/arch/sh/include/asm/perf_event.h
@@ -26,11 +26,4 @@
extern int reserve_pmc_hardware(void);
extern void release_pmc_hardware(void);
-static inline void set_perf_event_pending(void)
-{
- /* Nothing to see here, move along. */
-}
-
-#define PERF_EVENT_INDEX_OFFSET 0
-
#endif /* __ASM_SH_PERF_EVENT_H */
diff --git a/arch/sh/kernel/module.c b/arch/sh/kernel/module.c
index 43adddf..ae0be69 100644
--- a/arch/sh/kernel/module.c
+++ b/arch/sh/kernel/module.c
@@ -149,13 +149,11 @@
int ret = 0;
ret |= module_dwarf_finalize(hdr, sechdrs, me);
- ret |= module_bug_finalize(hdr, sechdrs, me);
return ret;
}
void module_arch_cleanup(struct module *mod)
{
- module_bug_cleanup(mod);
module_dwarf_cleanup(mod);
}
diff --git a/arch/sh/kernel/perf_callchain.c b/arch/sh/kernel/perf_callchain.c
index a9dd3ab..d5ca1ef 100644
--- a/arch/sh/kernel/perf_callchain.c
+++ b/arch/sh/kernel/perf_callchain.c
@@ -14,11 +14,6 @@
#include <asm/unwinder.h>
#include <asm/ptrace.h>
-static inline void callchain_store(struct perf_callchain_entry *entry, u64 ip)
-{
- if (entry->nr < PERF_MAX_STACK_DEPTH)
- entry->ip[entry->nr++] = ip;
-}
static void callchain_warning(void *data, char *msg)
{
@@ -39,7 +34,7 @@
struct perf_callchain_entry *entry = data;
if (reliable)
- callchain_store(entry, addr);
+ perf_callchain_store(entry, addr);
}
static const struct stacktrace_ops callchain_ops = {
@@ -49,47 +44,10 @@
.address = callchain_address,
};
-static void
-perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry)
+void
+perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
{
- callchain_store(entry, PERF_CONTEXT_KERNEL);
- callchain_store(entry, regs->pc);
+ perf_callchain_store(entry, regs->pc);
unwind_stack(NULL, regs, NULL, &callchain_ops, entry);
}
-
-static void
-perf_do_callchain(struct pt_regs *regs, struct perf_callchain_entry *entry)
-{
- int is_user;
-
- if (!regs)
- return;
-
- is_user = user_mode(regs);
-
- if (is_user && current->state != TASK_RUNNING)
- return;
-
- /*
- * Only the kernel side is implemented for now.
- */
- if (!is_user)
- perf_callchain_kernel(regs, entry);
-}
-
-/*
- * No need for separate IRQ and NMI entries.
- */
-static DEFINE_PER_CPU(struct perf_callchain_entry, callchain);
-
-struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
-{
- struct perf_callchain_entry *entry = &__get_cpu_var(callchain);
-
- entry->nr = 0;
-
- perf_do_callchain(regs, entry);
-
- return entry;
-}
diff --git a/arch/sh/kernel/perf_event.c b/arch/sh/kernel/perf_event.c
index 7a3dc35..5a4b334 100644
--- a/arch/sh/kernel/perf_event.c
+++ b/arch/sh/kernel/perf_event.c
@@ -59,6 +59,24 @@
return !!sh_pmu;
}
+const char *perf_pmu_name(void)
+{
+ if (!sh_pmu)
+ return NULL;
+
+ return sh_pmu->name;
+}
+EXPORT_SYMBOL_GPL(perf_pmu_name);
+
+int perf_num_counters(void)
+{
+ if (!sh_pmu)
+ return 0;
+
+ return sh_pmu->num_events;
+}
+EXPORT_SYMBOL_GPL(perf_num_counters);
+
/*
* Release the PMU if this is the last perf_event.
*/
@@ -206,50 +224,80 @@
local64_add(delta, &event->count);
}
-static void sh_pmu_disable(struct perf_event *event)
+static void sh_pmu_stop(struct perf_event *event, int flags)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
- clear_bit(idx, cpuc->active_mask);
- sh_pmu->disable(hwc, idx);
+ if (!(event->hw.state & PERF_HES_STOPPED)) {
+ sh_pmu->disable(hwc, idx);
+ cpuc->events[idx] = NULL;
+ event->hw.state |= PERF_HES_STOPPED;
+ }
- barrier();
+ if ((flags & PERF_EF_UPDATE) && !(event->hw.state & PERF_HES_UPTODATE)) {
+ sh_perf_event_update(event, &event->hw, idx);
+ event->hw.state |= PERF_HES_UPTODATE;
+ }
+}
- sh_perf_event_update(event, &event->hw, idx);
+static void sh_pmu_start(struct perf_event *event, int flags)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = hwc->idx;
- cpuc->events[idx] = NULL;
- clear_bit(idx, cpuc->used_mask);
+ if (WARN_ON_ONCE(idx == -1))
+ return;
+
+ if (flags & PERF_EF_RELOAD)
+ WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
+
+ cpuc->events[idx] = event;
+ event->hw.state = 0;
+ sh_pmu->enable(hwc, idx);
+}
+
+static void sh_pmu_del(struct perf_event *event, int flags)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+
+ sh_pmu_stop(event, PERF_EF_UPDATE);
+ __clear_bit(event->hw.idx, cpuc->used_mask);
perf_event_update_userpage(event);
}
-static int sh_pmu_enable(struct perf_event *event)
+static int sh_pmu_add(struct perf_event *event, int flags)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
+ int ret = -EAGAIN;
- if (test_and_set_bit(idx, cpuc->used_mask)) {
+ perf_pmu_disable(event->pmu);
+
+ if (__test_and_set_bit(idx, cpuc->used_mask)) {
idx = find_first_zero_bit(cpuc->used_mask, sh_pmu->num_events);
if (idx == sh_pmu->num_events)
- return -EAGAIN;
+ goto out;
- set_bit(idx, cpuc->used_mask);
+ __set_bit(idx, cpuc->used_mask);
hwc->idx = idx;
}
sh_pmu->disable(hwc, idx);
- cpuc->events[idx] = event;
- set_bit(idx, cpuc->active_mask);
-
- sh_pmu->enable(hwc, idx);
+ event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
+ if (flags & PERF_EF_START)
+ sh_pmu_start(event, PERF_EF_RELOAD);
perf_event_update_userpage(event);
-
- return 0;
+ ret = 0;
+out:
+ perf_pmu_enable(event->pmu);
+ return ret;
}
static void sh_pmu_read(struct perf_event *event)
@@ -257,24 +305,56 @@
sh_perf_event_update(event, &event->hw, event->hw.idx);
}
-static const struct pmu pmu = {
- .enable = sh_pmu_enable,
- .disable = sh_pmu_disable,
- .read = sh_pmu_read,
-};
-
-const struct pmu *hw_perf_event_init(struct perf_event *event)
+static int sh_pmu_event_init(struct perf_event *event)
{
- int err = __hw_perf_event_init(event);
+ int err;
+
+ switch (event->attr.type) {
+ case PERF_TYPE_RAW:
+ case PERF_TYPE_HW_CACHE:
+ case PERF_TYPE_HARDWARE:
+ err = __hw_perf_event_init(event);
+ break;
+
+ default:
+ return -ENOENT;
+ }
+
if (unlikely(err)) {
if (event->destroy)
event->destroy(event);
- return ERR_PTR(err);
}
- return &pmu;
+ return err;
}
+static void sh_pmu_enable(struct pmu *pmu)
+{
+ if (!sh_pmu_initialized())
+ return;
+
+ sh_pmu->enable_all();
+}
+
+static void sh_pmu_disable(struct pmu *pmu)
+{
+ if (!sh_pmu_initialized())
+ return;
+
+ sh_pmu->disable_all();
+}
+
+static struct pmu pmu = {
+ .pmu_enable = sh_pmu_enable,
+ .pmu_disable = sh_pmu_disable,
+ .event_init = sh_pmu_event_init,
+ .add = sh_pmu_add,
+ .del = sh_pmu_del,
+ .start = sh_pmu_start,
+ .stop = sh_pmu_stop,
+ .read = sh_pmu_read,
+};
+
static void sh_pmu_setup(int cpu)
{
struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
@@ -299,32 +379,17 @@
return NOTIFY_OK;
}
-void hw_perf_enable(void)
-{
- if (!sh_pmu_initialized())
- return;
-
- sh_pmu->enable_all();
-}
-
-void hw_perf_disable(void)
-{
- if (!sh_pmu_initialized())
- return;
-
- sh_pmu->disable_all();
-}
-
-int __cpuinit register_sh_pmu(struct sh_pmu *pmu)
+int __cpuinit register_sh_pmu(struct sh_pmu *_pmu)
{
if (sh_pmu)
return -EBUSY;
- sh_pmu = pmu;
+ sh_pmu = _pmu;
- pr_info("Performance Events: %s support registered\n", pmu->name);
+ pr_info("Performance Events: %s support registered\n", _pmu->name);
- WARN_ON(pmu->num_events > MAX_HWEVENTS);
+ WARN_ON(_pmu->num_events > MAX_HWEVENTS);
+ perf_pmu_register(&pmu);
perf_cpu_notifier(sh_pmu_notifier);
return 0;
}
diff --git a/arch/sh/oprofile/Makefile b/arch/sh/oprofile/Makefile
index 4886c5c..e85aae7 100644
--- a/arch/sh/oprofile/Makefile
+++ b/arch/sh/oprofile/Makefile
@@ -6,4 +6,8 @@
oprofilefs.o oprofile_stats.o \
timer_int.o )
+ifeq ($(CONFIG_HW_PERF_EVENTS),y)
+DRIVER_OBJS += $(addprefix ../../../drivers/oprofile/, oprofile_perf.o)
+endif
+
oprofile-y := $(DRIVER_OBJS) common.o backtrace.o
diff --git a/arch/sh/oprofile/common.c b/arch/sh/oprofile/common.c
index ac60493..e10d893 100644
--- a/arch/sh/oprofile/common.c
+++ b/arch/sh/oprofile/common.c
@@ -17,114 +17,45 @@
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/smp.h>
+#include <linux/perf_event.h>
#include <asm/processor.h>
-#include "op_impl.h"
-static struct op_sh_model *model;
-
-static struct op_counter_config ctr[20];
-
+#ifdef CONFIG_HW_PERF_EVENTS
extern void sh_backtrace(struct pt_regs * const regs, unsigned int depth);
-static int op_sh_setup(void)
+char *op_name_from_perf_id(void)
{
- /* Pre-compute the values to stuff in the hardware registers. */
- model->reg_setup(ctr);
+ const char *pmu;
+ char buf[20];
+ int size;
- /* Configure the registers on all cpus. */
- on_each_cpu(model->cpu_setup, NULL, 1);
+ pmu = perf_pmu_name();
+ if (!pmu)
+ return NULL;
- return 0;
-}
+ size = snprintf(buf, sizeof(buf), "sh/%s", pmu);
+ if (size > -1 && size < sizeof(buf))
+ return buf;
-static int op_sh_create_files(struct super_block *sb, struct dentry *root)
-{
- int i, ret = 0;
-
- for (i = 0; i < model->num_counters; i++) {
- struct dentry *dir;
- char buf[4];
-
- snprintf(buf, sizeof(buf), "%d", i);
- dir = oprofilefs_mkdir(sb, root, buf);
-
- ret |= oprofilefs_create_ulong(sb, dir, "enabled", &ctr[i].enabled);
- ret |= oprofilefs_create_ulong(sb, dir, "event", &ctr[i].event);
- ret |= oprofilefs_create_ulong(sb, dir, "kernel", &ctr[i].kernel);
- ret |= oprofilefs_create_ulong(sb, dir, "user", &ctr[i].user);
-
- if (model->create_files)
- ret |= model->create_files(sb, dir);
- else
- ret |= oprofilefs_create_ulong(sb, dir, "count", &ctr[i].count);
-
- /* Dummy entries */
- ret |= oprofilefs_create_ulong(sb, dir, "unit_mask", &ctr[i].unit_mask);
- }
-
- return ret;
-}
-
-static int op_sh_start(void)
-{
- /* Enable performance monitoring for all counters. */
- on_each_cpu(model->cpu_start, NULL, 1);
-
- return 0;
-}
-
-static void op_sh_stop(void)
-{
- /* Disable performance monitoring for all counters. */
- on_each_cpu(model->cpu_stop, NULL, 1);
+ return NULL;
}
int __init oprofile_arch_init(struct oprofile_operations *ops)
{
- struct op_sh_model *lmodel = NULL;
- int ret;
-
- /*
- * Always assign the backtrace op. If the counter initialization
- * fails, we fall back to the timer which will still make use of
- * this.
- */
ops->backtrace = sh_backtrace;
- /*
- * XXX
- *
- * All of the SH7750/SH-4A counters have been converted to perf,
- * this infrastructure hook is left for other users until they've
- * had a chance to convert over, at which point all of this
- * will be deleted.
- */
-
- if (!lmodel)
- return -ENODEV;
- if (!(current_cpu_data.flags & CPU_HAS_PERF_COUNTER))
- return -ENODEV;
-
- ret = lmodel->init();
- if (unlikely(ret != 0))
- return ret;
-
- model = lmodel;
-
- ops->setup = op_sh_setup;
- ops->create_files = op_sh_create_files;
- ops->start = op_sh_start;
- ops->stop = op_sh_stop;
- ops->cpu_type = lmodel->cpu_type;
-
- printk(KERN_INFO "oprofile: using %s performance monitoring.\n",
- lmodel->cpu_type);
-
- return 0;
+ return oprofile_perf_init(ops);
}
-void oprofile_arch_exit(void)
+void __exit oprofile_arch_exit(void)
{
- if (model && model->exit)
- model->exit();
+ oprofile_perf_exit();
}
+#else
+int __init oprofile_arch_init(struct oprofile_operations *ops)
+{
+ pr_info("oprofile: hardware counters not available\n");
+ return -ENODEV;
+}
+void __exit oprofile_arch_exit(void) {}
+#endif /* CONFIG_HW_PERF_EVENTS */
diff --git a/arch/sh/oprofile/op_impl.h b/arch/sh/oprofile/op_impl.h
deleted file mode 100644
index 1244479..0000000
--- a/arch/sh/oprofile/op_impl.h
+++ /dev/null
@@ -1,33 +0,0 @@
-#ifndef __OP_IMPL_H
-#define __OP_IMPL_H
-
-/* Per-counter configuration as set via oprofilefs. */
-struct op_counter_config {
- unsigned long enabled;
- unsigned long event;
-
- unsigned long count;
-
- /* Dummy values for userspace tool compliance */
- unsigned long kernel;
- unsigned long user;
- unsigned long unit_mask;
-};
-
-/* Per-architecture configury and hooks. */
-struct op_sh_model {
- void (*reg_setup)(struct op_counter_config *);
- int (*create_files)(struct super_block *sb, struct dentry *dir);
- void (*cpu_setup)(void *dummy);
- int (*init)(void);
- void (*exit)(void);
- void (*cpu_start)(void *args);
- void (*cpu_stop)(void *args);
- char *cpu_type;
- unsigned char num_counters;
-};
-
-/* arch/sh/oprofile/common.c */
-extern void sh_backtrace(struct pt_regs * const regs, unsigned int depth);
-
-#endif /* __OP_IMPL_H */
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 491e9d6..3e9d314 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -26,10 +26,12 @@
select ARCH_WANT_OPTIONAL_GPIOLIB
select RTC_CLASS
select RTC_DRV_M48T59
+ select HAVE_IRQ_WORK
select HAVE_PERF_EVENTS
select PERF_USE_VMALLOC
select HAVE_DMA_ATTRS
select HAVE_DMA_API_DEBUG
+ select HAVE_ARCH_JUMP_LABEL
config SPARC32
def_bool !64BIT
@@ -53,6 +55,7 @@
select RTC_DRV_BQ4802
select RTC_DRV_SUN4V
select RTC_DRV_STARFIRE
+ select HAVE_IRQ_WORK
select HAVE_PERF_EVENTS
select PERF_USE_VMALLOC
diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
index f0c7422..bdb2ff8 100644
--- a/arch/sparc/include/asm/atomic_64.h
+++ b/arch/sparc/include/asm/atomic_64.h
@@ -20,14 +20,14 @@
#define atomic64_set(v, i) (((v)->counter) = i)
extern void atomic_add(int, atomic_t *);
-extern void atomic64_add(int, atomic64_t *);
+extern void atomic64_add(long, atomic64_t *);
extern void atomic_sub(int, atomic_t *);
-extern void atomic64_sub(int, atomic64_t *);
+extern void atomic64_sub(long, atomic64_t *);
extern int atomic_add_ret(int, atomic_t *);
-extern long atomic64_add_ret(int, atomic64_t *);
+extern long atomic64_add_ret(long, atomic64_t *);
extern int atomic_sub_ret(int, atomic_t *);
-extern long atomic64_sub_ret(int, atomic64_t *);
+extern long atomic64_sub_ret(long, atomic64_t *);
#define atomic_dec_return(v) atomic_sub_ret(1, v)
#define atomic64_dec_return(v) atomic64_sub_ret(1, v)
diff --git a/arch/sparc/include/asm/backoff.h b/arch/sparc/include/asm/backoff.h
index fa1fdf6..db3af0d 100644
--- a/arch/sparc/include/asm/backoff.h
+++ b/arch/sparc/include/asm/backoff.h
@@ -8,6 +8,9 @@
#define BACKOFF_SETUP(reg) \
mov 1, reg
+#define BACKOFF_LABEL(spin_label, continue_label) \
+ spin_label
+
#define BACKOFF_SPIN(reg, tmp, label) \
mov reg, tmp; \
88: brnz,pt tmp, 88b; \
@@ -22,9 +25,11 @@
#else
#define BACKOFF_SETUP(reg)
-#define BACKOFF_SPIN(reg, tmp, label) \
- ba,pt %xcc, label; \
- nop;
+
+#define BACKOFF_LABEL(spin_label, continue_label) \
+ continue_label
+
+#define BACKOFF_SPIN(reg, tmp, label)
#endif
diff --git a/arch/sparc/include/asm/compat.h b/arch/sparc/include/asm/compat.h
index 5016f76..6f57325 100644
--- a/arch/sparc/include/asm/compat.h
+++ b/arch/sparc/include/asm/compat.h
@@ -167,7 +167,7 @@
return (u32)(unsigned long)uptr;
}
-static inline void __user *compat_alloc_user_space(long len)
+static inline void __user *arch_compat_alloc_user_space(long len)
{
struct pt_regs *regs = current_thread_info()->kregs;
unsigned long usp = regs->u_regs[UREG_I6];
diff --git a/arch/sparc/include/asm/jump_label.h b/arch/sparc/include/asm/jump_label.h
new file mode 100644
index 0000000..62e66d7
--- /dev/null
+++ b/arch/sparc/include/asm/jump_label.h
@@ -0,0 +1,32 @@
+#ifndef _ASM_SPARC_JUMP_LABEL_H
+#define _ASM_SPARC_JUMP_LABEL_H
+
+#ifdef __KERNEL__
+
+#include <linux/types.h>
+#include <asm/system.h>
+
+#define JUMP_LABEL_NOP_SIZE 4
+
+#define JUMP_LABEL(key, label) \
+ do { \
+ asm goto("1:\n\t" \
+ "nop\n\t" \
+ "nop\n\t" \
+ ".pushsection __jump_table, \"a\"\n\t"\
+ ".word 1b, %l[" #label "], %c0\n\t" \
+ ".popsection \n\t" \
+ : : "i" (key) : : label);\
+ } while (0)
+
+#endif /* __KERNEL__ */
+
+typedef u32 jump_label_t;
+
+struct jump_entry {
+ jump_label_t code;
+ jump_label_t target;
+ jump_label_t key;
+};
+
+#endif
diff --git a/arch/sparc/include/asm/oplib_64.h b/arch/sparc/include/asm/oplib_64.h
index a5db031..3e0b2d6 100644
--- a/arch/sparc/include/asm/oplib_64.h
+++ b/arch/sparc/include/asm/oplib_64.h
@@ -185,9 +185,8 @@
char *buf, int buflen);
/* Retain physical memory to the caller across soft resets. */
-extern unsigned long prom_retain(const char *name,
- unsigned long pa_low, unsigned long pa_high,
- long size, long align);
+extern int prom_retain(const char *name, unsigned long size,
+ unsigned long align, unsigned long *paddr);
/* Load explicit I/D TLB entries into the calling processor. */
extern long prom_itlb_load(unsigned long index,
@@ -287,26 +286,6 @@
extern int prom_ihandle2path(int handle, char *buffer, int bufsize);
/* Client interface level routines. */
-extern long p1275_cmd(const char *, long, ...);
-
-#if 0
-#define P1275_SIZE(x) ((((long)((x) / 32)) << 32) | (x))
-#else
-#define P1275_SIZE(x) x
-#endif
-
-/* We support at most 16 input and 1 output argument */
-#define P1275_ARG_NUMBER 0
-#define P1275_ARG_IN_STRING 1
-#define P1275_ARG_OUT_BUF 2
-#define P1275_ARG_OUT_32B 3
-#define P1275_ARG_IN_FUNCTION 4
-#define P1275_ARG_IN_BUF 5
-#define P1275_ARG_IN_64B 6
-
-#define P1275_IN(x) ((x) & 0xf)
-#define P1275_OUT(x) (((x) << 4) & 0xf0)
-#define P1275_INOUT(i,o) (P1275_IN(i)|P1275_OUT(o))
-#define P1275_ARG(n,x) ((x) << ((n)*3 + 8))
+extern void p1275_cmd_direct(unsigned long *);
#endif /* !(__SPARC64_OPLIB_H) */
diff --git a/arch/sparc/include/asm/perf_event.h b/arch/sparc/include/asm/perf_event.h
index 727af70..6e8bfa1 100644
--- a/arch/sparc/include/asm/perf_event.h
+++ b/arch/sparc/include/asm/perf_event.h
@@ -1,10 +1,6 @@
#ifndef __ASM_SPARC_PERF_EVENT_H
#define __ASM_SPARC_PERF_EVENT_H
-extern void set_perf_event_pending(void);
-
-#define PERF_EVENT_INDEX_OFFSET 0
-
#ifdef CONFIG_PERF_EVENTS
#include <asm/ptrace.h>
diff --git a/arch/sparc/include/asm/rwsem-const.h b/arch/sparc/include/asm/rwsem-const.h
deleted file mode 100644
index e4c61a1..0000000
--- a/arch/sparc/include/asm/rwsem-const.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/* rwsem-const.h: RW semaphore counter constants. */
-#ifndef _SPARC64_RWSEM_CONST_H
-#define _SPARC64_RWSEM_CONST_H
-
-#define RWSEM_UNLOCKED_VALUE 0x00000000
-#define RWSEM_ACTIVE_BIAS 0x00000001
-#define RWSEM_ACTIVE_MASK 0x0000ffff
-#define RWSEM_WAITING_BIAS (-0x00010000)
-#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
-#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
-
-#endif /* _SPARC64_RWSEM_CONST_H */
diff --git a/arch/sparc/include/asm/rwsem.h b/arch/sparc/include/asm/rwsem.h
index 6e56210..a2b4302 100644
--- a/arch/sparc/include/asm/rwsem.h
+++ b/arch/sparc/include/asm/rwsem.h
@@ -15,16 +15,21 @@
#include <linux/list.h>
#include <linux/spinlock.h>
-#include <asm/rwsem-const.h>
struct rwsem_waiter;
struct rw_semaphore {
- signed int count;
- spinlock_t wait_lock;
- struct list_head wait_list;
+ signed long count;
+#define RWSEM_UNLOCKED_VALUE 0x00000000L
+#define RWSEM_ACTIVE_BIAS 0x00000001L
+#define RWSEM_ACTIVE_MASK 0xffffffffL
+#define RWSEM_WAITING_BIAS (-RWSEM_ACTIVE_MASK-1)
+#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
+#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
+ spinlock_t wait_lock;
+ struct list_head wait_list;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
- struct lockdep_map dep_map;
+ struct lockdep_map dep_map;
#endif
};
@@ -41,6 +46,11 @@
#define DECLARE_RWSEM(name) \
struct rw_semaphore name = __RWSEM_INITIALIZER(name)
+extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
+extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
+extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem);
+extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
+
extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
struct lock_class_key *key);
@@ -51,27 +61,103 @@
__init_rwsem((sem), #sem, &__key); \
} while (0)
-extern void __down_read(struct rw_semaphore *sem);
-extern int __down_read_trylock(struct rw_semaphore *sem);
-extern void __down_write(struct rw_semaphore *sem);
-extern int __down_write_trylock(struct rw_semaphore *sem);
-extern void __up_read(struct rw_semaphore *sem);
-extern void __up_write(struct rw_semaphore *sem);
-extern void __downgrade_write(struct rw_semaphore *sem);
+/*
+ * lock for reading
+ */
+static inline void __down_read(struct rw_semaphore *sem)
+{
+ if (unlikely(atomic64_inc_return((atomic64_t *)(&sem->count)) <= 0L))
+ rwsem_down_read_failed(sem);
+}
+static inline int __down_read_trylock(struct rw_semaphore *sem)
+{
+ long tmp;
+
+ while ((tmp = sem->count) >= 0L) {
+ if (tmp == cmpxchg(&sem->count, tmp,
+ tmp + RWSEM_ACTIVE_READ_BIAS)) {
+ return 1;
+ }
+ }
+ return 0;
+}
+
+/*
+ * lock for writing
+ */
static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
{
- __down_write(sem);
+ long tmp;
+
+ tmp = atomic64_add_return(RWSEM_ACTIVE_WRITE_BIAS,
+ (atomic64_t *)(&sem->count));
+ if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS))
+ rwsem_down_write_failed(sem);
}
-static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
+static inline void __down_write(struct rw_semaphore *sem)
{
- return atomic_add_return(delta, (atomic_t *)(&sem->count));
+ __down_write_nested(sem, 0);
}
-static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
+static inline int __down_write_trylock(struct rw_semaphore *sem)
{
- atomic_add(delta, (atomic_t *)(&sem->count));
+ long tmp;
+
+ tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
+ RWSEM_ACTIVE_WRITE_BIAS);
+ return tmp == RWSEM_UNLOCKED_VALUE;
+}
+
+/*
+ * unlock after reading
+ */
+static inline void __up_read(struct rw_semaphore *sem)
+{
+ long tmp;
+
+ tmp = atomic64_dec_return((atomic64_t *)(&sem->count));
+ if (unlikely(tmp < -1L && (tmp & RWSEM_ACTIVE_MASK) == 0L))
+ rwsem_wake(sem);
+}
+
+/*
+ * unlock after writing
+ */
+static inline void __up_write(struct rw_semaphore *sem)
+{
+ if (unlikely(atomic64_sub_return(RWSEM_ACTIVE_WRITE_BIAS,
+ (atomic64_t *)(&sem->count)) < 0L))
+ rwsem_wake(sem);
+}
+
+/*
+ * implement atomic add functionality
+ */
+static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
+{
+ atomic64_add(delta, (atomic64_t *)(&sem->count));
+}
+
+/*
+ * downgrade write lock to read lock
+ */
+static inline void __downgrade_write(struct rw_semaphore *sem)
+{
+ long tmp;
+
+ tmp = atomic64_add_return(-RWSEM_WAITING_BIAS, (atomic64_t *)(&sem->count));
+ if (tmp < 0L)
+ rwsem_downgrade_wake(sem);
+}
+
+/*
+ * implement exchange and add functionality
+ */
+static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
+{
+ return atomic64_add_return(delta, (atomic64_t *)(&sem->count));
}
static inline int rwsem_is_locked(struct rw_semaphore *sem)
diff --git a/arch/sparc/include/asm/system_64.h b/arch/sparc/include/asm/system_64.h
index d24cfe1..e3b65d8 100644
--- a/arch/sparc/include/asm/system_64.h
+++ b/arch/sparc/include/asm/system_64.h
@@ -106,6 +106,7 @@
*/
#define write_pic(__p) \
__asm__ __volatile__("ba,pt %%xcc, 99f\n\t" \
+ " nop\n\t" \
".align 64\n" \
"99:wr %0, 0x0, %%pic\n\t" \
"rd %%pic, %%g0" : : "r" (__p))
diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
index 0c2dc1f..599398f 100644
--- a/arch/sparc/kernel/Makefile
+++ b/arch/sparc/kernel/Makefile
@@ -119,3 +119,5 @@
pc--$(CONFIG_PERF_EVENTS) := perf_event.o
obj-$(CONFIG_SPARC64) += $(pc--y)
+
+obj-$(CONFIG_SPARC64) += jump_label.o
diff --git a/arch/sparc/kernel/jump_label.c b/arch/sparc/kernel/jump_label.c
new file mode 100644
index 0000000..ea2dafc
--- /dev/null
+++ b/arch/sparc/kernel/jump_label.c
@@ -0,0 +1,47 @@
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/cpu.h>
+
+#include <linux/jump_label.h>
+#include <linux/memory.h>
+
+#ifdef HAVE_JUMP_LABEL
+
+void arch_jump_label_transform(struct jump_entry *entry,
+ enum jump_label_type type)
+{
+ u32 val;
+ u32 *insn = (u32 *) (unsigned long) entry->code;
+
+ if (type == JUMP_LABEL_ENABLE) {
+ s32 off = (s32)entry->target - (s32)entry->code;
+
+#ifdef CONFIG_SPARC64
+ /* ba,pt %xcc, . + (off << 2) */
+ val = 0x10680000 | ((u32) off >> 2);
+#else
+ /* ba . + (off << 2) */
+ val = 0x10800000 | ((u32) off >> 2);
+#endif
+ } else {
+ val = 0x01000000;
+ }
+
+ get_online_cpus();
+ mutex_lock(&text_mutex);
+ *insn = val;
+ flushi(insn);
+ mutex_unlock(&text_mutex);
+ put_online_cpus();
+}
+
+void arch_jump_label_text_poke_early(jump_label_t addr)
+{
+ u32 *insn_p = (u32 *) (unsigned long) addr;
+
+ *insn_p = 0x01000000;
+ flushi(insn_p);
+}
+
+#endif
diff --git a/arch/sparc/kernel/module.c b/arch/sparc/kernel/module.c
index f848aad..ee3c7dd 100644
--- a/arch/sparc/kernel/module.c
+++ b/arch/sparc/kernel/module.c
@@ -18,6 +18,9 @@
#include <asm/spitfire.h>
#ifdef CONFIG_SPARC64
+
+#include <linux/jump_label.h>
+
static void *module_map(unsigned long size)
{
struct vm_struct *area;
@@ -227,6 +230,9 @@
const Elf_Shdr *sechdrs,
struct module *me)
{
+ /* make jump label nops */
+ jump_label_apply_nops(me);
+
/* Cheetah's I-cache is fully coherent. */
if (tlb_type == spitfire) {
unsigned long va;
diff --git a/arch/sparc/kernel/pcr.c b/arch/sparc/kernel/pcr.c
index c4a6a50..b87873c 100644
--- a/arch/sparc/kernel/pcr.c
+++ b/arch/sparc/kernel/pcr.c
@@ -7,7 +7,7 @@
#include <linux/init.h>
#include <linux/irq.h>
-#include <linux/perf_event.h>
+#include <linux/irq_work.h>
#include <linux/ftrace.h>
#include <asm/pil.h>
@@ -43,14 +43,14 @@
old_regs = set_irq_regs(regs);
irq_enter();
-#ifdef CONFIG_PERF_EVENTS
- perf_event_do_pending();
+#ifdef CONFIG_IRQ_WORK
+ irq_work_run();
#endif
irq_exit();
set_irq_regs(old_regs);
}
-void set_perf_event_pending(void)
+void arch_irq_work_raise(void)
{
set_softint(1 << PIL_DEFERRED_PCR_WORK);
}
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
index 357ced3..0d6deb5 100644
--- a/arch/sparc/kernel/perf_event.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -658,13 +658,16 @@
enc = perf_event_get_enc(cpuc->events[i]);
pcr &= ~mask_for_index(idx);
- pcr |= event_encoding(enc, idx);
+ if (hwc->state & PERF_HES_STOPPED)
+ pcr |= nop_for_index(idx);
+ else
+ pcr |= event_encoding(enc, idx);
}
out:
return pcr;
}
-void hw_perf_enable(void)
+static void sparc_pmu_enable(struct pmu *pmu)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
u64 pcr;
@@ -691,7 +694,7 @@
pcr_ops->write(cpuc->pcr);
}
-void hw_perf_disable(void)
+static void sparc_pmu_disable(struct pmu *pmu)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
u64 val;
@@ -710,48 +713,6 @@
pcr_ops->write(cpuc->pcr);
}
-static void sparc_pmu_disable(struct perf_event *event)
-{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
- struct hw_perf_event *hwc = &event->hw;
- unsigned long flags;
- int i;
-
- local_irq_save(flags);
- perf_disable();
-
- for (i = 0; i < cpuc->n_events; i++) {
- if (event == cpuc->event[i]) {
- int idx = cpuc->current_idx[i];
-
- /* Shift remaining entries down into
- * the existing slot.
- */
- while (++i < cpuc->n_events) {
- cpuc->event[i - 1] = cpuc->event[i];
- cpuc->events[i - 1] = cpuc->events[i];
- cpuc->current_idx[i - 1] =
- cpuc->current_idx[i];
- }
-
- /* Absorb the final count and turn off the
- * event.
- */
- sparc_pmu_disable_event(cpuc, hwc, idx);
- barrier();
- sparc_perf_event_update(event, hwc, idx);
-
- perf_event_update_userpage(event);
-
- cpuc->n_events--;
- break;
- }
- }
-
- perf_enable();
- local_irq_restore(flags);
-}
-
static int active_event_index(struct cpu_hw_events *cpuc,
struct perf_event *event)
{
@@ -765,6 +726,74 @@
return cpuc->current_idx[i];
}
+static void sparc_pmu_start(struct perf_event *event, int flags)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ int idx = active_event_index(cpuc, event);
+
+ if (flags & PERF_EF_RELOAD) {
+ WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
+ sparc_perf_event_set_period(event, &event->hw, idx);
+ }
+
+ event->hw.state = 0;
+
+ sparc_pmu_enable_event(cpuc, &event->hw, idx);
+}
+
+static void sparc_pmu_stop(struct perf_event *event, int flags)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ int idx = active_event_index(cpuc, event);
+
+ if (!(event->hw.state & PERF_HES_STOPPED)) {
+ sparc_pmu_disable_event(cpuc, &event->hw, idx);
+ event->hw.state |= PERF_HES_STOPPED;
+ }
+
+ if (!(event->hw.state & PERF_HES_UPTODATE) && (flags & PERF_EF_UPDATE)) {
+ sparc_perf_event_update(event, &event->hw, idx);
+ event->hw.state |= PERF_HES_UPTODATE;
+ }
+}
+
+static void sparc_pmu_del(struct perf_event *event, int _flags)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ unsigned long flags;
+ int i;
+
+ local_irq_save(flags);
+ perf_pmu_disable(event->pmu);
+
+ for (i = 0; i < cpuc->n_events; i++) {
+ if (event == cpuc->event[i]) {
+ /* Absorb the final count and turn off the
+ * event.
+ */
+ sparc_pmu_stop(event, PERF_EF_UPDATE);
+
+ /* Shift remaining entries down into
+ * the existing slot.
+ */
+ while (++i < cpuc->n_events) {
+ cpuc->event[i - 1] = cpuc->event[i];
+ cpuc->events[i - 1] = cpuc->events[i];
+ cpuc->current_idx[i - 1] =
+ cpuc->current_idx[i];
+ }
+
+ perf_event_update_userpage(event);
+
+ cpuc->n_events--;
+ break;
+ }
+ }
+
+ perf_pmu_enable(event->pmu);
+ local_irq_restore(flags);
+}
+
static void sparc_pmu_read(struct perf_event *event)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
@@ -774,15 +803,6 @@
sparc_perf_event_update(event, hwc, idx);
}
-static void sparc_pmu_unthrottle(struct perf_event *event)
-{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
- int idx = active_event_index(cpuc, event);
- struct hw_perf_event *hwc = &event->hw;
-
- sparc_pmu_enable_event(cpuc, hwc, idx);
-}
-
static atomic_t active_events = ATOMIC_INIT(0);
static DEFINE_MUTEX(pmc_grab_mutex);
@@ -877,7 +897,7 @@
if (!n_ev)
return 0;
- if (n_ev > perf_max_events)
+ if (n_ev > MAX_HWEVENTS)
return -1;
msk0 = perf_event_get_msk(events[0]);
@@ -984,23 +1004,27 @@
return n;
}
-static int sparc_pmu_enable(struct perf_event *event)
+static int sparc_pmu_add(struct perf_event *event, int ef_flags)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
int n0, ret = -EAGAIN;
unsigned long flags;
local_irq_save(flags);
- perf_disable();
+ perf_pmu_disable(event->pmu);
n0 = cpuc->n_events;
- if (n0 >= perf_max_events)
+ if (n0 >= MAX_HWEVENTS)
goto out;
cpuc->event[n0] = event;
cpuc->events[n0] = event->hw.event_base;
cpuc->current_idx[n0] = PIC_NO_INDEX;
+ event->hw.state = PERF_HES_UPTODATE;
+ if (!(ef_flags & PERF_EF_START))
+ event->hw.state |= PERF_HES_STOPPED;
+
/*
* If group events scheduling transaction was started,
* skip the schedulability test here, it will be peformed
@@ -1020,12 +1044,12 @@
ret = 0;
out:
- perf_enable();
+ perf_pmu_enable(event->pmu);
local_irq_restore(flags);
return ret;
}
-static int __hw_perf_event_init(struct perf_event *event)
+static int sparc_pmu_event_init(struct perf_event *event)
{
struct perf_event_attr *attr = &event->attr;
struct perf_event *evts[MAX_HWEVENTS];
@@ -1038,16 +1062,37 @@
if (atomic_read(&nmi_active) < 0)
return -ENODEV;
- if (attr->type == PERF_TYPE_HARDWARE) {
+ switch (attr->type) {
+ case PERF_TYPE_HARDWARE:
if (attr->config >= sparc_pmu->max_events)
return -EINVAL;
pmap = sparc_pmu->event_map(attr->config);
- } else if (attr->type == PERF_TYPE_HW_CACHE) {
+ break;
+
+ case PERF_TYPE_HW_CACHE:
pmap = sparc_map_cache_event(attr->config);
if (IS_ERR(pmap))
return PTR_ERR(pmap);
- } else
- return -EOPNOTSUPP;
+ break;
+
+ case PERF_TYPE_RAW:
+ pmap = NULL;
+ break;
+
+ default:
+ return -ENOENT;
+
+ }
+
+ if (pmap) {
+ hwc->event_base = perf_event_encode(pmap);
+ } else {
+ /*
+ * User gives us "(encoding << 16) | pic_mask" for
+ * PERF_TYPE_RAW events.
+ */
+ hwc->event_base = attr->config;
+ }
/* We save the enable bits in the config_base. */
hwc->config_base = sparc_pmu->irq_bit;
@@ -1058,12 +1103,10 @@
if (!attr->exclude_hv)
hwc->config_base |= sparc_pmu->hv_bit;
- hwc->event_base = perf_event_encode(pmap);
-
n = 0;
if (event->group_leader != event) {
n = collect_events(event->group_leader,
- perf_max_events - 1,
+ MAX_HWEVENTS - 1,
evts, events, current_idx_dmy);
if (n < 0)
return -EINVAL;
@@ -1099,10 +1142,11 @@
* Set the flag to make pmu::enable() not perform the
* schedulability test, it will be performed at commit time
*/
-static void sparc_pmu_start_txn(const struct pmu *pmu)
+static void sparc_pmu_start_txn(struct pmu *pmu)
{
struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
+ perf_pmu_disable(pmu);
cpuhw->group_flag |= PERF_EVENT_TXN;
}
@@ -1111,11 +1155,12 @@
* Clear the flag and pmu::enable() will perform the
* schedulability test.
*/
-static void sparc_pmu_cancel_txn(const struct pmu *pmu)
+static void sparc_pmu_cancel_txn(struct pmu *pmu)
{
struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
cpuhw->group_flag &= ~PERF_EVENT_TXN;
+ perf_pmu_enable(pmu);
}
/*
@@ -1123,7 +1168,7 @@
* Perform the group schedulability test as a whole
* Return 0 if success
*/
-static int sparc_pmu_commit_txn(const struct pmu *pmu)
+static int sparc_pmu_commit_txn(struct pmu *pmu)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
int n;
@@ -1139,28 +1184,24 @@
return -EAGAIN;
cpuc->group_flag &= ~PERF_EVENT_TXN;
+ perf_pmu_enable(pmu);
return 0;
}
-static const struct pmu pmu = {
- .enable = sparc_pmu_enable,
- .disable = sparc_pmu_disable,
+static struct pmu pmu = {
+ .pmu_enable = sparc_pmu_enable,
+ .pmu_disable = sparc_pmu_disable,
+ .event_init = sparc_pmu_event_init,
+ .add = sparc_pmu_add,
+ .del = sparc_pmu_del,
+ .start = sparc_pmu_start,
+ .stop = sparc_pmu_stop,
.read = sparc_pmu_read,
- .unthrottle = sparc_pmu_unthrottle,
.start_txn = sparc_pmu_start_txn,
.cancel_txn = sparc_pmu_cancel_txn,
.commit_txn = sparc_pmu_commit_txn,
};
-const struct pmu *hw_perf_event_init(struct perf_event *event)
-{
- int err = __hw_perf_event_init(event);
-
- if (err)
- return ERR_PTR(err);
- return &pmu;
-}
-
void perf_event_print_debug(void)
{
unsigned long flags;
@@ -1236,7 +1277,7 @@
continue;
if (perf_event_overflow(event, 1, &data, regs))
- sparc_pmu_disable_event(cpuc, hwc, idx);
+ sparc_pmu_stop(event, 0);
}
return NOTIFY_STOP;
@@ -1277,28 +1318,21 @@
pr_cont("Supported PMU type is '%s'\n", sparc_pmu_type);
- /* All sparc64 PMUs currently have 2 events. */
- perf_max_events = 2;
-
+ perf_pmu_register(&pmu);
register_die_notifier(&perf_event_nmi_notifier);
}
-static inline void callchain_store(struct perf_callchain_entry *entry, u64 ip)
-{
- if (entry->nr < PERF_MAX_STACK_DEPTH)
- entry->ip[entry->nr++] = ip;
-}
-
-static void perf_callchain_kernel(struct pt_regs *regs,
- struct perf_callchain_entry *entry)
+void perf_callchain_kernel(struct perf_callchain_entry *entry,
+ struct pt_regs *regs)
{
unsigned long ksp, fp;
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
int graph = 0;
#endif
- callchain_store(entry, PERF_CONTEXT_KERNEL);
- callchain_store(entry, regs->tpc);
+ stack_trace_flush();
+
+ perf_callchain_store(entry, regs->tpc);
ksp = regs->u_regs[UREG_I6];
fp = ksp + STACK_BIAS;
@@ -1322,13 +1356,13 @@
pc = sf->callers_pc;
fp = (unsigned long)sf->fp + STACK_BIAS;
}
- callchain_store(entry, pc);
+ perf_callchain_store(entry, pc);
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
if ((pc + 8UL) == (unsigned long) &return_to_handler) {
int index = current->curr_ret_stack;
if (current->ret_stack && index >= graph) {
pc = current->ret_stack[index - graph].ret;
- callchain_store(entry, pc);
+ perf_callchain_store(entry, pc);
graph++;
}
}
@@ -1336,13 +1370,12 @@
} while (entry->nr < PERF_MAX_STACK_DEPTH);
}
-static void perf_callchain_user_64(struct pt_regs *regs,
- struct perf_callchain_entry *entry)
+static void perf_callchain_user_64(struct perf_callchain_entry *entry,
+ struct pt_regs *regs)
{
unsigned long ufp;
- callchain_store(entry, PERF_CONTEXT_USER);
- callchain_store(entry, regs->tpc);
+ perf_callchain_store(entry, regs->tpc);
ufp = regs->u_regs[UREG_I6] + STACK_BIAS;
do {
@@ -1355,17 +1388,16 @@
pc = sf.callers_pc;
ufp = (unsigned long)sf.fp + STACK_BIAS;
- callchain_store(entry, pc);
+ perf_callchain_store(entry, pc);
} while (entry->nr < PERF_MAX_STACK_DEPTH);
}
-static void perf_callchain_user_32(struct pt_regs *regs,
- struct perf_callchain_entry *entry)
+static void perf_callchain_user_32(struct perf_callchain_entry *entry,
+ struct pt_regs *regs)
{
unsigned long ufp;
- callchain_store(entry, PERF_CONTEXT_USER);
- callchain_store(entry, regs->tpc);
+ perf_callchain_store(entry, regs->tpc);
ufp = regs->u_regs[UREG_I6] & 0xffffffffUL;
do {
@@ -1378,34 +1410,16 @@
pc = sf.callers_pc;
ufp = (unsigned long)sf.fp;
- callchain_store(entry, pc);
+ perf_callchain_store(entry, pc);
} while (entry->nr < PERF_MAX_STACK_DEPTH);
}
-/* Like powerpc we can't get PMU interrupts within the PMU handler,
- * so no need for separate NMI and IRQ chains as on x86.
- */
-static DEFINE_PER_CPU(struct perf_callchain_entry, callchain);
-
-struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
+void
+perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
{
- struct perf_callchain_entry *entry = &__get_cpu_var(callchain);
-
- entry->nr = 0;
- if (!user_mode(regs)) {
- stack_trace_flush();
- perf_callchain_kernel(regs, entry);
- if (current->mm)
- regs = task_pt_regs(current);
- else
- regs = NULL;
- }
- if (regs) {
- flushw_user();
- if (test_thread_flag(TIF_32BIT))
- perf_callchain_user_32(regs, entry);
- else
- perf_callchain_user_64(regs, entry);
- }
- return entry;
+ flushw_user();
+ if (test_thread_flag(TIF_32BIT))
+ perf_callchain_user_32(entry, regs);
+ else
+ perf_callchain_user_64(entry, regs);
}
diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
index 485f547..c158a95 100644
--- a/arch/sparc/kernel/process_64.c
+++ b/arch/sparc/kernel/process_64.c
@@ -303,7 +303,7 @@
#ifdef CONFIG_MAGIC_SYSRQ
-static void sysrq_handle_globreg(int key, struct tty_struct *tty)
+static void sysrq_handle_globreg(int key)
{
arch_trigger_all_cpu_backtrace();
}
diff --git a/arch/sparc/kernel/signal32.c b/arch/sparc/kernel/signal32.c
index ea22cd3..75fad42 100644
--- a/arch/sparc/kernel/signal32.c
+++ b/arch/sparc/kernel/signal32.c
@@ -453,8 +453,66 @@
return err;
}
-static void setup_frame32(struct k_sigaction *ka, struct pt_regs *regs,
- int signo, sigset_t *oldset)
+/* The I-cache flush instruction only works in the primary ASI, which
+ * right now is the nucleus, aka. kernel space.
+ *
+ * Therefore we have to kick the instructions out using the kernel
+ * side linear mapping of the physical address backing the user
+ * instructions.
+ */
+static void flush_signal_insns(unsigned long address)
+{
+ unsigned long pstate, paddr;
+ pte_t *ptep, pte;
+ pgd_t *pgdp;
+ pud_t *pudp;
+ pmd_t *pmdp;
+
+ /* Commit all stores of the instructions we are about to flush. */
+ wmb();
+
+ /* Disable cross-call reception. In this way even a very wide
+ * munmap() on another cpu can't tear down the page table
+ * hierarchy from underneath us, since that can't complete
+ * until the IPI tlb flush returns.
+ */
+
+ __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
+ __asm__ __volatile__("wrpr %0, %1, %%pstate"
+ : : "r" (pstate), "i" (PSTATE_IE));
+
+ pgdp = pgd_offset(current->mm, address);
+ if (pgd_none(*pgdp))
+ goto out_irqs_on;
+ pudp = pud_offset(pgdp, address);
+ if (pud_none(*pudp))
+ goto out_irqs_on;
+ pmdp = pmd_offset(pudp, address);
+ if (pmd_none(*pmdp))
+ goto out_irqs_on;
+
+ ptep = pte_offset_map(pmdp, address);
+ pte = *ptep;
+ if (!pte_present(pte))
+ goto out_unmap;
+
+ paddr = (unsigned long) page_address(pte_page(pte));
+
+ __asm__ __volatile__("flush %0 + %1"
+ : /* no outputs */
+ : "r" (paddr),
+ "r" (address & (PAGE_SIZE - 1))
+ : "memory");
+
+out_unmap:
+ pte_unmap(ptep);
+out_irqs_on:
+ __asm__ __volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate));
+
+}
+
+static int setup_frame32(struct k_sigaction *ka, struct pt_regs *regs,
+ int signo, sigset_t *oldset)
{
struct signal_frame32 __user *sf;
int sigframe_size;
@@ -547,13 +605,7 @@
if (ka->ka_restorer) {
regs->u_regs[UREG_I7] = (unsigned long)ka->ka_restorer;
} else {
- /* Flush instruction space. */
unsigned long address = ((unsigned long)&(sf->insns[0]));
- pgd_t *pgdp = pgd_offset(current->mm, address);
- pud_t *pudp = pud_offset(pgdp, address);
- pmd_t *pmdp = pmd_offset(pudp, address);
- pte_t *ptep;
- pte_t pte;
regs->u_regs[UREG_I7] = (unsigned long) (&(sf->insns[0]) - 2);
@@ -562,34 +614,22 @@
if (err)
goto sigsegv;
- preempt_disable();
- ptep = pte_offset_map(pmdp, address);
- pte = *ptep;
- if (pte_present(pte)) {
- unsigned long page = (unsigned long)
- page_address(pte_page(pte));
-
- wmb();
- __asm__ __volatile__("flush %0 + %1"
- : /* no outputs */
- : "r" (page),
- "r" (address & (PAGE_SIZE - 1))
- : "memory");
- }
- pte_unmap(ptep);
- preempt_enable();
+ flush_signal_insns(address);
}
- return;
+ return 0;
sigill:
do_exit(SIGILL);
+ return -EINVAL;
+
sigsegv:
force_sigsegv(signo, current);
+ return -EFAULT;
}
-static void setup_rt_frame32(struct k_sigaction *ka, struct pt_regs *regs,
- unsigned long signr, sigset_t *oldset,
- siginfo_t *info)
+static int setup_rt_frame32(struct k_sigaction *ka, struct pt_regs *regs,
+ unsigned long signr, sigset_t *oldset,
+ siginfo_t *info)
{
struct rt_signal_frame32 __user *sf;
int sigframe_size;
@@ -687,12 +727,7 @@
if (ka->ka_restorer)
regs->u_regs[UREG_I7] = (unsigned long)ka->ka_restorer;
else {
- /* Flush instruction space. */
unsigned long address = ((unsigned long)&(sf->insns[0]));
- pgd_t *pgdp = pgd_offset(current->mm, address);
- pud_t *pudp = pud_offset(pgdp, address);
- pmd_t *pmdp = pmd_offset(pudp, address);
- pte_t *ptep;
regs->u_regs[UREG_I7] = (unsigned long) (&(sf->insns[0]) - 2);
@@ -704,38 +739,32 @@
if (err)
goto sigsegv;
- preempt_disable();
- ptep = pte_offset_map(pmdp, address);
- if (pte_present(*ptep)) {
- unsigned long page = (unsigned long)
- page_address(pte_page(*ptep));
-
- wmb();
- __asm__ __volatile__("flush %0 + %1"
- : /* no outputs */
- : "r" (page),
- "r" (address & (PAGE_SIZE - 1))
- : "memory");
- }
- pte_unmap(ptep);
- preempt_enable();
+ flush_signal_insns(address);
}
- return;
+ return 0;
sigill:
do_exit(SIGILL);
+ return -EINVAL;
+
sigsegv:
force_sigsegv(signr, current);
+ return -EFAULT;
}
-static inline void handle_signal32(unsigned long signr, struct k_sigaction *ka,
- siginfo_t *info,
- sigset_t *oldset, struct pt_regs *regs)
+static inline int handle_signal32(unsigned long signr, struct k_sigaction *ka,
+ siginfo_t *info,
+ sigset_t *oldset, struct pt_regs *regs)
{
+ int err;
+
if (ka->sa.sa_flags & SA_SIGINFO)
- setup_rt_frame32(ka, regs, signr, oldset, info);
+ err = setup_rt_frame32(ka, regs, signr, oldset, info);
else
- setup_frame32(ka, regs, signr, oldset);
+ err = setup_frame32(ka, regs, signr, oldset);
+
+ if (err)
+ return err;
spin_lock_irq(¤t->sighand->siglock);
sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask);
@@ -743,6 +772,10 @@
sigaddset(¤t->blocked,signr);
recalc_sigpending();
spin_unlock_irq(¤t->sighand->siglock);
+
+ tracehook_signal_handler(signr, info, ka, regs, 0);
+
+ return 0;
}
static inline void syscall_restart32(unsigned long orig_i0, struct pt_regs *regs,
@@ -789,16 +822,14 @@
if (signr > 0) {
if (restart_syscall)
syscall_restart32(orig_i0, regs, &ka.sa);
- handle_signal32(signr, &ka, &info, oldset, regs);
-
- /* A signal was successfully delivered; the saved
- * sigmask will have been stored in the signal frame,
- * and will be restored by sigreturn, so we can simply
- * clear the TS_RESTORE_SIGMASK flag.
- */
- current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
-
- tracehook_signal_handler(signr, &info, &ka, regs, 0);
+ if (handle_signal32(signr, &ka, &info, oldset, regs) == 0) {
+ /* A signal was successfully delivered; the saved
+ * sigmask will have been stored in the signal frame,
+ * and will be restored by sigreturn, so we can simply
+ * clear the TS_RESTORE_SIGMASK flag.
+ */
+ current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
+ }
return;
}
if (restart_syscall &&
@@ -809,12 +840,14 @@
regs->u_regs[UREG_I0] = orig_i0;
regs->tpc -= 4;
regs->tnpc -= 4;
+ pt_regs_clear_syscall(regs);
}
if (restart_syscall &&
regs->u_regs[UREG_I0] == ERESTART_RESTARTBLOCK) {
regs->u_regs[UREG_G1] = __NR_restart_syscall;
regs->tpc -= 4;
regs->tnpc -= 4;
+ pt_regs_clear_syscall(regs);
}
/* If there's no signal to deliver, we just put the saved sigmask
diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c
index 9882df9..5e5c5fd 100644
--- a/arch/sparc/kernel/signal_32.c
+++ b/arch/sparc/kernel/signal_32.c
@@ -315,8 +315,8 @@
return err;
}
-static void setup_frame(struct k_sigaction *ka, struct pt_regs *regs,
- int signo, sigset_t *oldset)
+static int setup_frame(struct k_sigaction *ka, struct pt_regs *regs,
+ int signo, sigset_t *oldset)
{
struct signal_frame __user *sf;
int sigframe_size, err;
@@ -384,16 +384,19 @@
/* Flush instruction space. */
flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0]));
}
- return;
+ return 0;
sigill_and_return:
do_exit(SIGILL);
+ return -EINVAL;
+
sigsegv:
force_sigsegv(signo, current);
+ return -EFAULT;
}
-static void setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs,
- int signo, sigset_t *oldset, siginfo_t *info)
+static int setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs,
+ int signo, sigset_t *oldset, siginfo_t *info)
{
struct rt_signal_frame __user *sf;
int sigframe_size;
@@ -466,22 +469,30 @@
/* Flush instruction space. */
flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0]));
}
- return;
+ return 0;
sigill:
do_exit(SIGILL);
+ return -EINVAL;
+
sigsegv:
force_sigsegv(signo, current);
+ return -EFAULT;
}
-static inline void
+static inline int
handle_signal(unsigned long signr, struct k_sigaction *ka,
siginfo_t *info, sigset_t *oldset, struct pt_regs *regs)
{
+ int err;
+
if (ka->sa.sa_flags & SA_SIGINFO)
- setup_rt_frame(ka, regs, signr, oldset, info);
+ err = setup_rt_frame(ka, regs, signr, oldset, info);
else
- setup_frame(ka, regs, signr, oldset);
+ err = setup_frame(ka, regs, signr, oldset);
+
+ if (err)
+ return err;
spin_lock_irq(¤t->sighand->siglock);
sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask);
@@ -489,6 +500,10 @@
sigaddset(¤t->blocked, signr);
recalc_sigpending();
spin_unlock_irq(¤t->sighand->siglock);
+
+ tracehook_signal_handler(signr, info, ka, regs, 0);
+
+ return 0;
}
static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs,
@@ -546,17 +561,15 @@
if (signr > 0) {
if (restart_syscall)
syscall_restart(orig_i0, regs, &ka.sa);
- handle_signal(signr, &ka, &info, oldset, regs);
-
- /* a signal was successfully delivered; the saved
- * sigmask will have been stored in the signal frame,
- * and will be restored by sigreturn, so we can simply
- * clear the TIF_RESTORE_SIGMASK flag.
- */
- if (test_thread_flag(TIF_RESTORE_SIGMASK))
- clear_thread_flag(TIF_RESTORE_SIGMASK);
-
- tracehook_signal_handler(signr, &info, &ka, regs, 0);
+ if (handle_signal(signr, &ka, &info, oldset, regs) == 0) {
+ /* a signal was successfully delivered; the saved
+ * sigmask will have been stored in the signal frame,
+ * and will be restored by sigreturn, so we can simply
+ * clear the TIF_RESTORE_SIGMASK flag.
+ */
+ if (test_thread_flag(TIF_RESTORE_SIGMASK))
+ clear_thread_flag(TIF_RESTORE_SIGMASK);
+ }
return;
}
if (restart_syscall &&
@@ -567,12 +580,14 @@
regs->u_regs[UREG_I0] = orig_i0;
regs->pc -= 4;
regs->npc -= 4;
+ pt_regs_clear_syscall(regs);
}
if (restart_syscall &&
regs->u_regs[UREG_I0] == ERESTART_RESTARTBLOCK) {
regs->u_regs[UREG_G1] = __NR_restart_syscall;
regs->pc -= 4;
regs->npc -= 4;
+ pt_regs_clear_syscall(regs);
}
/* if there's no signal to deliver, we just put the saved sigmask
diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c
index 9fa48c3..006fe45 100644
--- a/arch/sparc/kernel/signal_64.c
+++ b/arch/sparc/kernel/signal_64.c
@@ -409,7 +409,7 @@
return (void __user *) sp;
}
-static inline void
+static inline int
setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs,
int signo, sigset_t *oldset, siginfo_t *info)
{
@@ -483,26 +483,37 @@
}
/* 4. return to kernel instructions */
regs->u_regs[UREG_I7] = (unsigned long)ka->ka_restorer;
- return;
+ return 0;
sigill:
do_exit(SIGILL);
+ return -EINVAL;
+
sigsegv:
force_sigsegv(signo, current);
+ return -EFAULT;
}
-static inline void handle_signal(unsigned long signr, struct k_sigaction *ka,
- siginfo_t *info,
- sigset_t *oldset, struct pt_regs *regs)
+static inline int handle_signal(unsigned long signr, struct k_sigaction *ka,
+ siginfo_t *info,
+ sigset_t *oldset, struct pt_regs *regs)
{
- setup_rt_frame(ka, regs, signr, oldset,
- (ka->sa.sa_flags & SA_SIGINFO) ? info : NULL);
+ int err;
+
+ err = setup_rt_frame(ka, regs, signr, oldset,
+ (ka->sa.sa_flags & SA_SIGINFO) ? info : NULL);
+ if (err)
+ return err;
spin_lock_irq(¤t->sighand->siglock);
sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask);
if (!(ka->sa.sa_flags & SA_NOMASK))
sigaddset(¤t->blocked,signr);
recalc_sigpending();
spin_unlock_irq(¤t->sighand->siglock);
+
+ tracehook_signal_handler(signr, info, ka, regs, 0);
+
+ return 0;
}
static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs,
@@ -571,16 +582,14 @@
if (signr > 0) {
if (restart_syscall)
syscall_restart(orig_i0, regs, &ka.sa);
- handle_signal(signr, &ka, &info, oldset, regs);
-
- /* A signal was successfully delivered; the saved
- * sigmask will have been stored in the signal frame,
- * and will be restored by sigreturn, so we can simply
- * clear the TS_RESTORE_SIGMASK flag.
- */
- current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
-
- tracehook_signal_handler(signr, &info, &ka, regs, 0);
+ if (handle_signal(signr, &ka, &info, oldset, regs) == 0) {
+ /* A signal was successfully delivered; the saved
+ * sigmask will have been stored in the signal frame,
+ * and will be restored by sigreturn, so we can simply
+ * clear the TS_RESTORE_SIGMASK flag.
+ */
+ current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
+ }
return;
}
if (restart_syscall &&
@@ -591,12 +600,14 @@
regs->u_regs[UREG_I0] = orig_i0;
regs->tpc -= 4;
regs->tnpc -= 4;
+ pt_regs_clear_syscall(regs);
}
if (restart_syscall &&
regs->u_regs[UREG_I0] == ERESTART_RESTARTBLOCK) {
regs->u_regs[UREG_G1] = __NR_restart_syscall;
regs->tpc -= 4;
regs->tnpc -= 4;
+ pt_regs_clear_syscall(regs);
}
/* If there's no signal to deliver, we just put the saved sigmask
diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
index 5079413..675c9e1 100644
--- a/arch/sparc/kernel/sys_sparc_32.c
+++ b/arch/sparc/kernel/sys_sparc_32.c
@@ -166,7 +166,6 @@
{
siginfo_t info;
- lock_kernel();
#ifdef DEBUG_SPARC_BREAKPOINT
printk ("TRAP: Entering kernel PC=%x, nPC=%x\n", regs->pc, regs->npc);
#endif
@@ -180,7 +179,6 @@
#ifdef DEBUG_SPARC_BREAKPOINT
printk ("TRAP: Returning to space: PC=%x nPC=%x\n", regs->pc, regs->npc);
#endif
- unlock_kernel();
}
asmlinkage int
diff --git a/arch/sparc/kernel/unaligned_32.c b/arch/sparc/kernel/unaligned_32.c
index f8514e2..12b9f35 100644
--- a/arch/sparc/kernel/unaligned_32.c
+++ b/arch/sparc/kernel/unaligned_32.c
@@ -323,7 +323,6 @@
{
enum direction dir;
- lock_kernel();
if(!(current->thread.flags & SPARC_FLAG_UNALIGNED) ||
(((insn >> 30) & 3) != 3))
goto kill_user;
@@ -377,5 +376,5 @@
kill_user:
user_mna_trap_fault(regs, insn);
out:
- unlock_kernel();
+ ;
}
diff --git a/arch/sparc/kernel/windows.c b/arch/sparc/kernel/windows.c
index f24d298..b351770 100644
--- a/arch/sparc/kernel/windows.c
+++ b/arch/sparc/kernel/windows.c
@@ -112,7 +112,6 @@
struct thread_info *tp = current_thread_info();
int window;
- lock_kernel();
flush_user_windows();
for(window = 0; window < tp->w_saved; window++) {
unsigned long sp = tp->rwbuf_stkptrs[window];
@@ -123,5 +122,4 @@
do_exit(SIGILL);
}
tp->w_saved = 0;
- unlock_kernel();
}
diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
index c4b5e03..846d1c4 100644
--- a/arch/sparc/lib/Makefile
+++ b/arch/sparc/lib/Makefile
@@ -15,7 +15,7 @@
lib-$(CONFIG_SPARC32) += copy_user.o locks.o
lib-y += atomic_$(BITS).o
lib-$(CONFIG_SPARC32) += lshrdi3.o ashldi3.o
-lib-y += rwsem_$(BITS).o
+lib-$(CONFIG_SPARC32) += rwsem_32.o
lib-$(CONFIG_SPARC32) += muldi3.o bitext.o cmpdi2.o
lib-$(CONFIG_SPARC64) += copy_page.o clear_page.o bzero.o
diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
index 0268210..59186e0 100644
--- a/arch/sparc/lib/atomic_64.S
+++ b/arch/sparc/lib/atomic_64.S
@@ -21,7 +21,7 @@
add %g1, %o0, %g7
cas [%o1], %g1, %g7
cmp %g1, %g7
- bne,pn %icc, 2f
+ bne,pn %icc, BACKOFF_LABEL(2f, 1b)
nop
retl
nop
@@ -36,7 +36,7 @@
sub %g1, %o0, %g7
cas [%o1], %g1, %g7
cmp %g1, %g7
- bne,pn %icc, 2f
+ bne,pn %icc, BACKOFF_LABEL(2f, 1b)
nop
retl
nop
@@ -51,11 +51,10 @@
add %g1, %o0, %g7
cas [%o1], %g1, %g7
cmp %g1, %g7
- bne,pn %icc, 2f
- add %g7, %o0, %g7
- sra %g7, 0, %o0
+ bne,pn %icc, BACKOFF_LABEL(2f, 1b)
+ add %g1, %o0, %g1
retl
- nop
+ sra %g1, 0, %o0
2: BACKOFF_SPIN(%o2, %o3, 1b)
.size atomic_add_ret, .-atomic_add_ret
@@ -67,11 +66,10 @@
sub %g1, %o0, %g7
cas [%o1], %g1, %g7
cmp %g1, %g7
- bne,pn %icc, 2f
- sub %g7, %o0, %g7
- sra %g7, 0, %o0
+ bne,pn %icc, BACKOFF_LABEL(2f, 1b)
+ sub %g1, %o0, %g1
retl
- nop
+ sra %g1, 0, %o0
2: BACKOFF_SPIN(%o2, %o3, 1b)
.size atomic_sub_ret, .-atomic_sub_ret
@@ -83,7 +81,7 @@
add %g1, %o0, %g7
casx [%o1], %g1, %g7
cmp %g1, %g7
- bne,pn %xcc, 2f
+ bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
nop
retl
nop
@@ -98,7 +96,7 @@
sub %g1, %o0, %g7
casx [%o1], %g1, %g7
cmp %g1, %g7
- bne,pn %xcc, 2f
+ bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
nop
retl
nop
@@ -113,11 +111,10 @@
add %g1, %o0, %g7
casx [%o1], %g1, %g7
cmp %g1, %g7
- bne,pn %xcc, 2f
- add %g7, %o0, %g7
- mov %g7, %o0
- retl
+ bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
nop
+ retl
+ add %g1, %o0, %o0
2: BACKOFF_SPIN(%o2, %o3, 1b)
.size atomic64_add_ret, .-atomic64_add_ret
@@ -129,10 +126,9 @@
sub %g1, %o0, %g7
casx [%o1], %g1, %g7
cmp %g1, %g7
- bne,pn %xcc, 2f
- sub %g7, %o0, %g7
- mov %g7, %o0
- retl
+ bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
nop
+ retl
+ sub %g1, %o0, %o0
2: BACKOFF_SPIN(%o2, %o3, 1b)
.size atomic64_sub_ret, .-atomic64_sub_ret
diff --git a/arch/sparc/lib/bitops.S b/arch/sparc/lib/bitops.S
index 2b7228c..3dc61d5 100644
--- a/arch/sparc/lib/bitops.S
+++ b/arch/sparc/lib/bitops.S
@@ -22,7 +22,7 @@
or %g7, %o2, %g1
casx [%o1], %g7, %g1
cmp %g7, %g1
- bne,pn %xcc, 2f
+ bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
and %g7, %o2, %g2
clr %o0
movrne %g2, 1, %o0
@@ -45,7 +45,7 @@
andn %g7, %o2, %g1
casx [%o1], %g7, %g1
cmp %g7, %g1
- bne,pn %xcc, 2f
+ bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
and %g7, %o2, %g2
clr %o0
movrne %g2, 1, %o0
@@ -68,7 +68,7 @@
xor %g7, %o2, %g1
casx [%o1], %g7, %g1
cmp %g7, %g1
- bne,pn %xcc, 2f
+ bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
and %g7, %o2, %g2
clr %o0
movrne %g2, 1, %o0
@@ -91,7 +91,7 @@
or %g7, %o2, %g1
casx [%o1], %g7, %g1
cmp %g7, %g1
- bne,pn %xcc, 2f
+ bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
nop
retl
nop
@@ -112,7 +112,7 @@
andn %g7, %o2, %g1
casx [%o1], %g7, %g1
cmp %g7, %g1
- bne,pn %xcc, 2f
+ bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
nop
retl
nop
@@ -133,7 +133,7 @@
xor %g7, %o2, %g1
casx [%o1], %g7, %g1
cmp %g7, %g1
- bne,pn %xcc, 2f
+ bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
nop
retl
nop
diff --git a/arch/sparc/lib/rwsem_64.S b/arch/sparc/lib/rwsem_64.S
deleted file mode 100644
index 91a7d29..0000000
--- a/arch/sparc/lib/rwsem_64.S
+++ /dev/null
@@ -1,163 +0,0 @@
-/* rwsem.S: RW semaphore assembler.
- *
- * Written by David S. Miller (davem@redhat.com), 2001.
- * Derived from asm-i386/rwsem.h
- */
-
-#include <asm/rwsem-const.h>
-
- .section .sched.text, "ax"
-
- .globl __down_read
-__down_read:
-1: lduw [%o0], %g1
- add %g1, 1, %g7
- cas [%o0], %g1, %g7
- cmp %g1, %g7
- bne,pn %icc, 1b
- add %g7, 1, %g7
- cmp %g7, 0
- bl,pn %icc, 3f
- nop
-2:
- retl
- nop
-3:
- save %sp, -192, %sp
- call rwsem_down_read_failed
- mov %i0, %o0
- ret
- restore
- .size __down_read, .-__down_read
-
- .globl __down_read_trylock
-__down_read_trylock:
-1: lduw [%o0], %g1
- add %g1, 1, %g7
- cmp %g7, 0
- bl,pn %icc, 2f
- mov 0, %o1
- cas [%o0], %g1, %g7
- cmp %g1, %g7
- bne,pn %icc, 1b
- mov 1, %o1
-2: retl
- mov %o1, %o0
- .size __down_read_trylock, .-__down_read_trylock
-
- .globl __down_write
-__down_write:
- sethi %hi(RWSEM_ACTIVE_WRITE_BIAS), %g1
- or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
-1:
- lduw [%o0], %g3
- add %g3, %g1, %g7
- cas [%o0], %g3, %g7
- cmp %g3, %g7
- bne,pn %icc, 1b
- cmp %g7, 0
- bne,pn %icc, 3f
- nop
-2: retl
- nop
-3:
- save %sp, -192, %sp
- call rwsem_down_write_failed
- mov %i0, %o0
- ret
- restore
- .size __down_write, .-__down_write
-
- .globl __down_write_trylock
-__down_write_trylock:
- sethi %hi(RWSEM_ACTIVE_WRITE_BIAS), %g1
- or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
-1:
- lduw [%o0], %g3
- cmp %g3, 0
- bne,pn %icc, 2f
- mov 0, %o1
- add %g3, %g1, %g7
- cas [%o0], %g3, %g7
- cmp %g3, %g7
- bne,pn %icc, 1b
- mov 1, %o1
-2: retl
- mov %o1, %o0
- .size __down_write_trylock, .-__down_write_trylock
-
- .globl __up_read
-__up_read:
-1:
- lduw [%o0], %g1
- sub %g1, 1, %g7
- cas [%o0], %g1, %g7
- cmp %g1, %g7
- bne,pn %icc, 1b
- cmp %g7, 0
- bl,pn %icc, 3f
- nop
-2: retl
- nop
-3: sethi %hi(RWSEM_ACTIVE_MASK), %g1
- sub %g7, 1, %g7
- or %g1, %lo(RWSEM_ACTIVE_MASK), %g1
- andcc %g7, %g1, %g0
- bne,pn %icc, 2b
- nop
- save %sp, -192, %sp
- call rwsem_wake
- mov %i0, %o0
- ret
- restore
- .size __up_read, .-__up_read
-
- .globl __up_write
-__up_write:
- sethi %hi(RWSEM_ACTIVE_WRITE_BIAS), %g1
- or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
-1:
- lduw [%o0], %g3
- sub %g3, %g1, %g7
- cas [%o0], %g3, %g7
- cmp %g3, %g7
- bne,pn %icc, 1b
- sub %g7, %g1, %g7
- cmp %g7, 0
- bl,pn %icc, 3f
- nop
-2:
- retl
- nop
-3:
- save %sp, -192, %sp
- call rwsem_wake
- mov %i0, %o0
- ret
- restore
- .size __up_write, .-__up_write
-
- .globl __downgrade_write
-__downgrade_write:
- sethi %hi(RWSEM_WAITING_BIAS), %g1
- or %g1, %lo(RWSEM_WAITING_BIAS), %g1
-1:
- lduw [%o0], %g3
- sub %g3, %g1, %g7
- cas [%o0], %g3, %g7
- cmp %g3, %g7
- bne,pn %icc, 1b
- sub %g7, %g1, %g7
- cmp %g7, 0
- bl,pn %icc, 3f
- nop
-2:
- retl
- nop
-3:
- save %sp, -192, %sp
- call rwsem_downgrade_wake
- mov %i0, %o0
- ret
- restore
- .size __downgrade_write, .-__downgrade_write
diff --git a/arch/sparc/prom/cif.S b/arch/sparc/prom/cif.S
index 5f27ad7..9c86b4b 100644
--- a/arch/sparc/prom/cif.S
+++ b/arch/sparc/prom/cif.S
@@ -9,18 +9,18 @@
#include <asm/thread_info.h>
.text
- .globl prom_cif_interface
-prom_cif_interface:
- sethi %hi(p1275buf), %o0
- or %o0, %lo(p1275buf), %o0
- ldx [%o0 + 0x010], %o1 ! prom_cif_stack
- save %o1, -192, %sp
- ldx [%i0 + 0x008], %l2 ! prom_cif_handler
+ .globl prom_cif_direct
+prom_cif_direct:
+ sethi %hi(p1275buf), %o1
+ or %o1, %lo(p1275buf), %o1
+ ldx [%o1 + 0x0010], %o2 ! prom_cif_stack
+ save %o2, -192, %sp
+ ldx [%i1 + 0x0008], %l2 ! prom_cif_handler
mov %g4, %l0
mov %g5, %l1
mov %g6, %l3
call %l2
- add %i0, 0x018, %o0 ! prom_args
+ mov %i0, %o0 ! prom_args
mov %l0, %g4
mov %l1, %g5
mov %l3, %g6
diff --git a/arch/sparc/prom/console_64.c b/arch/sparc/prom/console_64.c
index f55d58a..10322dc 100644
--- a/arch/sparc/prom/console_64.c
+++ b/arch/sparc/prom/console_64.c
@@ -21,14 +21,22 @@
inline int
prom_nbgetchar(void)
{
+ unsigned long args[7];
char inc;
- if (p1275_cmd("read", P1275_ARG(1,P1275_ARG_OUT_BUF)|
- P1275_INOUT(3,1),
- prom_stdin, &inc, P1275_SIZE(1)) == 1)
+ args[0] = (unsigned long) "read";
+ args[1] = 3;
+ args[2] = 1;
+ args[3] = (unsigned int) prom_stdin;
+ args[4] = (unsigned long) &inc;
+ args[5] = 1;
+ args[6] = (unsigned long) -1;
+
+ p1275_cmd_direct(args);
+
+ if (args[6] == 1)
return inc;
- else
- return -1;
+ return -1;
}
/* Non blocking put character to console device, returns -1 if
@@ -37,12 +45,22 @@
inline int
prom_nbputchar(char c)
{
+ unsigned long args[7];
char outc;
outc = c;
- if (p1275_cmd("write", P1275_ARG(1,P1275_ARG_IN_BUF)|
- P1275_INOUT(3,1),
- prom_stdout, &outc, P1275_SIZE(1)) == 1)
+
+ args[0] = (unsigned long) "write";
+ args[1] = 3;
+ args[2] = 1;
+ args[3] = (unsigned int) prom_stdout;
+ args[4] = (unsigned long) &outc;
+ args[5] = 1;
+ args[6] = (unsigned long) -1;
+
+ p1275_cmd_direct(args);
+
+ if (args[6] == 1)
return 0;
else
return -1;
@@ -67,7 +85,15 @@
void
prom_puts(const char *s, int len)
{
- p1275_cmd("write", P1275_ARG(1,P1275_ARG_IN_BUF)|
- P1275_INOUT(3,1),
- prom_stdout, s, P1275_SIZE(len));
+ unsigned long args[7];
+
+ args[0] = (unsigned long) "write";
+ args[1] = 3;
+ args[2] = 1;
+ args[3] = (unsigned int) prom_stdout;
+ args[4] = (unsigned long) s;
+ args[5] = len;
+ args[6] = (unsigned long) -1;
+
+ p1275_cmd_direct(args);
}
diff --git a/arch/sparc/prom/devops_64.c b/arch/sparc/prom/devops_64.c
index 9dbd803..a017119 100644
--- a/arch/sparc/prom/devops_64.c
+++ b/arch/sparc/prom/devops_64.c
@@ -18,16 +18,32 @@
int
prom_devopen(const char *dstr)
{
- return p1275_cmd ("open", P1275_ARG(0,P1275_ARG_IN_STRING)|
- P1275_INOUT(1,1),
- dstr);
+ unsigned long args[5];
+
+ args[0] = (unsigned long) "open";
+ args[1] = 1;
+ args[2] = 1;
+ args[3] = (unsigned long) dstr;
+ args[4] = (unsigned long) -1;
+
+ p1275_cmd_direct(args);
+
+ return (int) args[4];
}
/* Close the device described by device handle 'dhandle'. */
int
prom_devclose(int dhandle)
{
- p1275_cmd ("close", P1275_INOUT(1,0), dhandle);
+ unsigned long args[4];
+
+ args[0] = (unsigned long) "close";
+ args[1] = 1;
+ args[2] = 0;
+ args[3] = (unsigned int) dhandle;
+
+ p1275_cmd_direct(args);
+
return 0;
}
@@ -37,5 +53,15 @@
void
prom_seek(int dhandle, unsigned int seekhi, unsigned int seeklo)
{
- p1275_cmd ("seek", P1275_INOUT(3,1), dhandle, seekhi, seeklo);
+ unsigned long args[7];
+
+ args[0] = (unsigned long) "seek";
+ args[1] = 3;
+ args[2] = 1;
+ args[3] = (unsigned int) dhandle;
+ args[4] = seekhi;
+ args[5] = seeklo;
+ args[6] = (unsigned long) -1;
+
+ p1275_cmd_direct(args);
}
diff --git a/arch/sparc/prom/misc_64.c b/arch/sparc/prom/misc_64.c
index 39fc6af..6cb1581 100644
--- a/arch/sparc/prom/misc_64.c
+++ b/arch/sparc/prom/misc_64.c
@@ -20,10 +20,17 @@
int prom_service_exists(const char *service_name)
{
- int err = p1275_cmd("test", P1275_ARG(0, P1275_ARG_IN_STRING) |
- P1275_INOUT(1, 1), service_name);
+ unsigned long args[5];
- if (err)
+ args[0] = (unsigned long) "test";
+ args[1] = 1;
+ args[2] = 1;
+ args[3] = (unsigned long) service_name;
+ args[4] = (unsigned long) -1;
+
+ p1275_cmd_direct(args);
+
+ if (args[4])
return 0;
return 1;
}
@@ -31,30 +38,47 @@
void prom_sun4v_guest_soft_state(void)
{
const char *svc = "SUNW,soft-state-supported";
+ unsigned long args[3];
if (!prom_service_exists(svc))
return;
- p1275_cmd(svc, P1275_INOUT(0, 0));
+ args[0] = (unsigned long) svc;
+ args[1] = 0;
+ args[2] = 0;
+ p1275_cmd_direct(args);
}
/* Reset and reboot the machine with the command 'bcommand'. */
void prom_reboot(const char *bcommand)
{
+ unsigned long args[4];
+
#ifdef CONFIG_SUN_LDOMS
if (ldom_domaining_enabled)
ldom_reboot(bcommand);
#endif
- p1275_cmd("boot", P1275_ARG(0, P1275_ARG_IN_STRING) |
- P1275_INOUT(1, 0), bcommand);
+ args[0] = (unsigned long) "boot";
+ args[1] = 1;
+ args[2] = 0;
+ args[3] = (unsigned long) bcommand;
+
+ p1275_cmd_direct(args);
}
/* Forth evaluate the expression contained in 'fstring'. */
void prom_feval(const char *fstring)
{
+ unsigned long args[5];
+
if (!fstring || fstring[0] == 0)
return;
- p1275_cmd("interpret", P1275_ARG(0, P1275_ARG_IN_STRING) |
- P1275_INOUT(1, 1), fstring);
+ args[0] = (unsigned long) "interpret";
+ args[1] = 1;
+ args[2] = 1;
+ args[3] = (unsigned long) fstring;
+ args[4] = (unsigned long) -1;
+
+ p1275_cmd_direct(args);
}
EXPORT_SYMBOL(prom_feval);
@@ -68,6 +92,7 @@
*/
void prom_cmdline(void)
{
+ unsigned long args[3];
unsigned long flags;
local_irq_save(flags);
@@ -76,7 +101,11 @@
smp_capture();
#endif
- p1275_cmd("enter", P1275_INOUT(0, 0));
+ args[0] = (unsigned long) "enter";
+ args[1] = 0;
+ args[2] = 0;
+
+ p1275_cmd_direct(args);
#ifdef CONFIG_SMP
smp_release();
@@ -90,22 +119,32 @@
*/
void notrace prom_halt(void)
{
+ unsigned long args[3];
+
#ifdef CONFIG_SUN_LDOMS
if (ldom_domaining_enabled)
ldom_power_off();
#endif
again:
- p1275_cmd("exit", P1275_INOUT(0, 0));
+ args[0] = (unsigned long) "exit";
+ args[1] = 0;
+ args[2] = 0;
+ p1275_cmd_direct(args);
goto again; /* PROM is out to get me -DaveM */
}
void prom_halt_power_off(void)
{
+ unsigned long args[3];
+
#ifdef CONFIG_SUN_LDOMS
if (ldom_domaining_enabled)
ldom_power_off();
#endif
- p1275_cmd("SUNW,power-off", P1275_INOUT(0, 0));
+ args[0] = (unsigned long) "SUNW,power-off";
+ args[1] = 0;
+ args[2] = 0;
+ p1275_cmd_direct(args);
/* if nothing else helps, we just halt */
prom_halt();
@@ -114,10 +153,15 @@
/* Set prom sync handler to call function 'funcp'. */
void prom_setcallback(callback_func_t funcp)
{
+ unsigned long args[5];
if (!funcp)
return;
- p1275_cmd("set-callback", P1275_ARG(0, P1275_ARG_IN_FUNCTION) |
- P1275_INOUT(1, 1), funcp);
+ args[0] = (unsigned long) "set-callback";
+ args[1] = 1;
+ args[2] = 1;
+ args[3] = (unsigned long) funcp;
+ args[4] = (unsigned long) -1;
+ p1275_cmd_direct(args);
}
/* Get the idprom and stuff it into buffer 'idbuf'. Returns the
@@ -173,57 +217,61 @@
}
/* Load explicit I/D TLB entries. */
+static long tlb_load(const char *type, unsigned long index,
+ unsigned long tte_data, unsigned long vaddr)
+{
+ unsigned long args[9];
+
+ args[0] = (unsigned long) prom_callmethod_name;
+ args[1] = 5;
+ args[2] = 1;
+ args[3] = (unsigned long) type;
+ args[4] = (unsigned int) prom_get_mmu_ihandle();
+ args[5] = vaddr;
+ args[6] = tte_data;
+ args[7] = index;
+ args[8] = (unsigned long) -1;
+
+ p1275_cmd_direct(args);
+
+ return (long) args[8];
+}
+
long prom_itlb_load(unsigned long index,
unsigned long tte_data,
unsigned long vaddr)
{
- return p1275_cmd(prom_callmethod_name,
- (P1275_ARG(0, P1275_ARG_IN_STRING) |
- P1275_ARG(2, P1275_ARG_IN_64B) |
- P1275_ARG(3, P1275_ARG_IN_64B) |
- P1275_INOUT(5, 1)),
- "SUNW,itlb-load",
- prom_get_mmu_ihandle(),
- /* And then our actual args are pushed backwards. */
- vaddr,
- tte_data,
- index);
+ return tlb_load("SUNW,itlb-load", index, tte_data, vaddr);
}
long prom_dtlb_load(unsigned long index,
unsigned long tte_data,
unsigned long vaddr)
{
- return p1275_cmd(prom_callmethod_name,
- (P1275_ARG(0, P1275_ARG_IN_STRING) |
- P1275_ARG(2, P1275_ARG_IN_64B) |
- P1275_ARG(3, P1275_ARG_IN_64B) |
- P1275_INOUT(5, 1)),
- "SUNW,dtlb-load",
- prom_get_mmu_ihandle(),
- /* And then our actual args are pushed backwards. */
- vaddr,
- tte_data,
- index);
+ return tlb_load("SUNW,dtlb-load", index, tte_data, vaddr);
}
int prom_map(int mode, unsigned long size,
unsigned long vaddr, unsigned long paddr)
{
- int ret = p1275_cmd(prom_callmethod_name,
- (P1275_ARG(0, P1275_ARG_IN_STRING) |
- P1275_ARG(3, P1275_ARG_IN_64B) |
- P1275_ARG(4, P1275_ARG_IN_64B) |
- P1275_ARG(6, P1275_ARG_IN_64B) |
- P1275_INOUT(7, 1)),
- prom_map_name,
- prom_get_mmu_ihandle(),
- mode,
- size,
- vaddr,
- 0,
- paddr);
+ unsigned long args[11];
+ int ret;
+ args[0] = (unsigned long) prom_callmethod_name;
+ args[1] = 7;
+ args[2] = 1;
+ args[3] = (unsigned long) prom_map_name;
+ args[4] = (unsigned int) prom_get_mmu_ihandle();
+ args[5] = (unsigned int) mode;
+ args[6] = size;
+ args[7] = vaddr;
+ args[8] = 0;
+ args[9] = paddr;
+ args[10] = (unsigned long) -1;
+
+ p1275_cmd_direct(args);
+
+ ret = (int) args[10];
if (ret == 0)
ret = -1;
return ret;
@@ -231,40 +279,51 @@
void prom_unmap(unsigned long size, unsigned long vaddr)
{
- p1275_cmd(prom_callmethod_name,
- (P1275_ARG(0, P1275_ARG_IN_STRING) |
- P1275_ARG(2, P1275_ARG_IN_64B) |
- P1275_ARG(3, P1275_ARG_IN_64B) |
- P1275_INOUT(4, 0)),
- prom_unmap_name,
- prom_get_mmu_ihandle(),
- size,
- vaddr);
+ unsigned long args[7];
+
+ args[0] = (unsigned long) prom_callmethod_name;
+ args[1] = 4;
+ args[2] = 0;
+ args[3] = (unsigned long) prom_unmap_name;
+ args[4] = (unsigned int) prom_get_mmu_ihandle();
+ args[5] = size;
+ args[6] = vaddr;
+
+ p1275_cmd_direct(args);
}
/* Set aside physical memory which is not touched or modified
* across soft resets.
*/
-unsigned long prom_retain(const char *name,
- unsigned long pa_low, unsigned long pa_high,
- long size, long align)
+int prom_retain(const char *name, unsigned long size,
+ unsigned long align, unsigned long *paddr)
{
- /* XXX I don't think we return multiple values correctly.
- * XXX OBP supposedly returns pa_low/pa_high here, how does
- * XXX it work?
- */
+ unsigned long args[11];
- /* If align is zero, the pa_low/pa_high args are passed,
- * else they are not.
+ args[0] = (unsigned long) prom_callmethod_name;
+ args[1] = 5;
+ args[2] = 3;
+ args[3] = (unsigned long) "SUNW,retain";
+ args[4] = (unsigned int) prom_get_memory_ihandle();
+ args[5] = align;
+ args[6] = size;
+ args[7] = (unsigned long) name;
+ args[8] = (unsigned long) -1;
+ args[9] = (unsigned long) -1;
+ args[10] = (unsigned long) -1;
+
+ p1275_cmd_direct(args);
+
+ if (args[8])
+ return (int) args[8];
+
+ /* Next we get "phys_high" then "phys_low". On 64-bit
+ * the phys_high cell is don't care since the phys_low
+ * cell has the full value.
*/
- if (align == 0)
- return p1275_cmd("SUNW,retain",
- (P1275_ARG(0, P1275_ARG_IN_BUF) | P1275_INOUT(5, 2)),
- name, pa_low, pa_high, size, align);
- else
- return p1275_cmd("SUNW,retain",
- (P1275_ARG(0, P1275_ARG_IN_BUF) | P1275_INOUT(3, 2)),
- name, size, align);
+ *paddr = args[10];
+
+ return 0;
}
/* Get "Unumber" string for the SIMM at the given
@@ -277,62 +336,129 @@
unsigned long phys_addr,
char *buf, int buflen)
{
- return p1275_cmd(prom_callmethod_name,
- (P1275_ARG(0, P1275_ARG_IN_STRING) |
- P1275_ARG(3, P1275_ARG_OUT_BUF) |
- P1275_ARG(6, P1275_ARG_IN_64B) |
- P1275_INOUT(8, 2)),
- "SUNW,get-unumber", prom_get_memory_ihandle(),
- buflen, buf, P1275_SIZE(buflen),
- 0, phys_addr, syndrome_code);
+ unsigned long args[12];
+
+ args[0] = (unsigned long) prom_callmethod_name;
+ args[1] = 7;
+ args[2] = 2;
+ args[3] = (unsigned long) "SUNW,get-unumber";
+ args[4] = (unsigned int) prom_get_memory_ihandle();
+ args[5] = buflen;
+ args[6] = (unsigned long) buf;
+ args[7] = 0;
+ args[8] = phys_addr;
+ args[9] = (unsigned int) syndrome_code;
+ args[10] = (unsigned long) -1;
+ args[11] = (unsigned long) -1;
+
+ p1275_cmd_direct(args);
+
+ return (int) args[10];
}
/* Power management extensions. */
void prom_sleepself(void)
{
- p1275_cmd("SUNW,sleep-self", P1275_INOUT(0, 0));
+ unsigned long args[3];
+
+ args[0] = (unsigned long) "SUNW,sleep-self";
+ args[1] = 0;
+ args[2] = 0;
+ p1275_cmd_direct(args);
}
int prom_sleepsystem(void)
{
- return p1275_cmd("SUNW,sleep-system", P1275_INOUT(0, 1));
+ unsigned long args[4];
+
+ args[0] = (unsigned long) "SUNW,sleep-system";
+ args[1] = 0;
+ args[2] = 1;
+ args[3] = (unsigned long) -1;
+ p1275_cmd_direct(args);
+
+ return (int) args[3];
}
int prom_wakeupsystem(void)
{
- return p1275_cmd("SUNW,wakeup-system", P1275_INOUT(0, 1));
+ unsigned long args[4];
+
+ args[0] = (unsigned long) "SUNW,wakeup-system";
+ args[1] = 0;
+ args[2] = 1;
+ args[3] = (unsigned long) -1;
+ p1275_cmd_direct(args);
+
+ return (int) args[3];
}
#ifdef CONFIG_SMP
void prom_startcpu(int cpunode, unsigned long pc, unsigned long arg)
{
- p1275_cmd("SUNW,start-cpu", P1275_INOUT(3, 0), cpunode, pc, arg);
+ unsigned long args[6];
+
+ args[0] = (unsigned long) "SUNW,start-cpu";
+ args[1] = 3;
+ args[2] = 0;
+ args[3] = (unsigned int) cpunode;
+ args[4] = pc;
+ args[5] = arg;
+ p1275_cmd_direct(args);
}
void prom_startcpu_cpuid(int cpuid, unsigned long pc, unsigned long arg)
{
- p1275_cmd("SUNW,start-cpu-by-cpuid", P1275_INOUT(3, 0),
- cpuid, pc, arg);
+ unsigned long args[6];
+
+ args[0] = (unsigned long) "SUNW,start-cpu-by-cpuid";
+ args[1] = 3;
+ args[2] = 0;
+ args[3] = (unsigned int) cpuid;
+ args[4] = pc;
+ args[5] = arg;
+ p1275_cmd_direct(args);
}
void prom_stopcpu_cpuid(int cpuid)
{
- p1275_cmd("SUNW,stop-cpu-by-cpuid", P1275_INOUT(1, 0),
- cpuid);
+ unsigned long args[4];
+
+ args[0] = (unsigned long) "SUNW,stop-cpu-by-cpuid";
+ args[1] = 1;
+ args[2] = 0;
+ args[3] = (unsigned int) cpuid;
+ p1275_cmd_direct(args);
}
void prom_stopself(void)
{
- p1275_cmd("SUNW,stop-self", P1275_INOUT(0, 0));
+ unsigned long args[3];
+
+ args[0] = (unsigned long) "SUNW,stop-self";
+ args[1] = 0;
+ args[2] = 0;
+ p1275_cmd_direct(args);
}
void prom_idleself(void)
{
- p1275_cmd("SUNW,idle-self", P1275_INOUT(0, 0));
+ unsigned long args[3];
+
+ args[0] = (unsigned long) "SUNW,idle-self";
+ args[1] = 0;
+ args[2] = 0;
+ p1275_cmd_direct(args);
}
void prom_resumecpu(int cpunode)
{
- p1275_cmd("SUNW,resume-cpu", P1275_INOUT(1, 0), cpunode);
+ unsigned long args[4];
+
+ args[0] = (unsigned long) "SUNW,resume-cpu";
+ args[1] = 1;
+ args[2] = 0;
+ args[3] = (unsigned int) cpunode;
+ p1275_cmd_direct(args);
}
#endif
diff --git a/arch/sparc/prom/p1275.c b/arch/sparc/prom/p1275.c
index 2d8b70d..fa6e4e2 100644
--- a/arch/sparc/prom/p1275.c
+++ b/arch/sparc/prom/p1275.c
@@ -22,13 +22,11 @@
long prom_callback; /* 0x00 */
void (*prom_cif_handler)(long *); /* 0x08 */
unsigned long prom_cif_stack; /* 0x10 */
- unsigned long prom_args [23]; /* 0x18 */
- char prom_buffer [3000];
} p1275buf;
extern void prom_world(int);
-extern void prom_cif_interface(void);
+extern void prom_cif_direct(unsigned long *args);
extern void prom_cif_callback(void);
/*
@@ -36,114 +34,20 @@
*/
DEFINE_RAW_SPINLOCK(prom_entry_lock);
-long p1275_cmd(const char *service, long fmt, ...)
+void p1275_cmd_direct(unsigned long *args)
{
- char *p, *q;
unsigned long flags;
- int nargs, nrets, i;
- va_list list;
- long attrs, x;
-
- p = p1275buf.prom_buffer;
raw_local_save_flags(flags);
raw_local_irq_restore(PIL_NMI);
raw_spin_lock(&prom_entry_lock);
- p1275buf.prom_args[0] = (unsigned long)p; /* service */
- strcpy (p, service);
- p = (char *)(((long)(strchr (p, 0) + 8)) & ~7);
- p1275buf.prom_args[1] = nargs = (fmt & 0x0f); /* nargs */
- p1275buf.prom_args[2] = nrets = ((fmt & 0xf0) >> 4); /* nrets */
- attrs = fmt >> 8;
- va_start(list, fmt);
- for (i = 0; i < nargs; i++, attrs >>= 3) {
- switch (attrs & 0x7) {
- case P1275_ARG_NUMBER:
- p1275buf.prom_args[i + 3] =
- (unsigned)va_arg(list, long);
- break;
- case P1275_ARG_IN_64B:
- p1275buf.prom_args[i + 3] =
- va_arg(list, unsigned long);
- break;
- case P1275_ARG_IN_STRING:
- strcpy (p, va_arg(list, char *));
- p1275buf.prom_args[i + 3] = (unsigned long)p;
- p = (char *)(((long)(strchr (p, 0) + 8)) & ~7);
- break;
- case P1275_ARG_OUT_BUF:
- (void) va_arg(list, char *);
- p1275buf.prom_args[i + 3] = (unsigned long)p;
- x = va_arg(list, long);
- i++; attrs >>= 3;
- p = (char *)(((long)(p + (int)x + 7)) & ~7);
- p1275buf.prom_args[i + 3] = x;
- break;
- case P1275_ARG_IN_BUF:
- q = va_arg(list, char *);
- p1275buf.prom_args[i + 3] = (unsigned long)p;
- x = va_arg(list, long);
- i++; attrs >>= 3;
- memcpy (p, q, (int)x);
- p = (char *)(((long)(p + (int)x + 7)) & ~7);
- p1275buf.prom_args[i + 3] = x;
- break;
- case P1275_ARG_OUT_32B:
- (void) va_arg(list, char *);
- p1275buf.prom_args[i + 3] = (unsigned long)p;
- p += 32;
- break;
- case P1275_ARG_IN_FUNCTION:
- p1275buf.prom_args[i + 3] =
- (unsigned long)prom_cif_callback;
- p1275buf.prom_callback = va_arg(list, long);
- break;
- }
- }
- va_end(list);
-
prom_world(1);
- prom_cif_interface();
+ prom_cif_direct(args);
prom_world(0);
- attrs = fmt >> 8;
- va_start(list, fmt);
- for (i = 0; i < nargs; i++, attrs >>= 3) {
- switch (attrs & 0x7) {
- case P1275_ARG_NUMBER:
- (void) va_arg(list, long);
- break;
- case P1275_ARG_IN_STRING:
- (void) va_arg(list, char *);
- break;
- case P1275_ARG_IN_FUNCTION:
- (void) va_arg(list, long);
- break;
- case P1275_ARG_IN_BUF:
- (void) va_arg(list, char *);
- (void) va_arg(list, long);
- i++; attrs >>= 3;
- break;
- case P1275_ARG_OUT_BUF:
- p = va_arg(list, char *);
- x = va_arg(list, long);
- memcpy (p, (char *)(p1275buf.prom_args[i + 3]), (int)x);
- i++; attrs >>= 3;
- break;
- case P1275_ARG_OUT_32B:
- p = va_arg(list, char *);
- memcpy (p, (char *)(p1275buf.prom_args[i + 3]), 32);
- break;
- }
- }
- va_end(list);
- x = p1275buf.prom_args [nargs + 3];
-
raw_spin_unlock(&prom_entry_lock);
raw_local_irq_restore(flags);
-
- return x;
}
void prom_cif_init(void *cif_handler, void *cif_stack)
diff --git a/arch/sparc/prom/tree_64.c b/arch/sparc/prom/tree_64.c
index 3c0d2dd..9d3f913 100644
--- a/arch/sparc/prom/tree_64.c
+++ b/arch/sparc/prom/tree_64.c
@@ -16,22 +16,39 @@
#include <asm/oplib.h>
#include <asm/ldc.h>
+static int prom_node_to_node(const char *type, int node)
+{
+ unsigned long args[5];
+
+ args[0] = (unsigned long) type;
+ args[1] = 1;
+ args[2] = 1;
+ args[3] = (unsigned int) node;
+ args[4] = (unsigned long) -1;
+
+ p1275_cmd_direct(args);
+
+ return (int) args[4];
+}
+
/* Return the child of node 'node' or zero if no this node has no
* direct descendent.
*/
inline int __prom_getchild(int node)
{
- return p1275_cmd ("child", P1275_INOUT(1, 1), node);
+ return prom_node_to_node("child", node);
}
inline int prom_getchild(int node)
{
int cnode;
- if(node == -1) return 0;
+ if (node == -1)
+ return 0;
cnode = __prom_getchild(node);
- if(cnode == -1) return 0;
- return (int)cnode;
+ if (cnode == -1)
+ return 0;
+ return cnode;
}
EXPORT_SYMBOL(prom_getchild);
@@ -39,10 +56,12 @@
{
int cnode;
- if(node == -1) return 0;
- cnode = p1275_cmd ("parent", P1275_INOUT(1, 1), node);
- if(cnode == -1) return 0;
- return (int)cnode;
+ if (node == -1)
+ return 0;
+ cnode = prom_node_to_node("parent", node);
+ if (cnode == -1)
+ return 0;
+ return cnode;
}
/* Return the next sibling of node 'node' or zero if no more siblings
@@ -50,7 +69,7 @@
*/
inline int __prom_getsibling(int node)
{
- return p1275_cmd(prom_peer_name, P1275_INOUT(1, 1), node);
+ return prom_node_to_node(prom_peer_name, node);
}
inline int prom_getsibling(int node)
@@ -72,11 +91,21 @@
*/
inline int prom_getproplen(int node, const char *prop)
{
- if((!node) || (!prop)) return -1;
- return p1275_cmd ("getproplen",
- P1275_ARG(1,P1275_ARG_IN_STRING)|
- P1275_INOUT(2, 1),
- node, prop);
+ unsigned long args[6];
+
+ if (!node || !prop)
+ return -1;
+
+ args[0] = (unsigned long) "getproplen";
+ args[1] = 2;
+ args[2] = 1;
+ args[3] = (unsigned int) node;
+ args[4] = (unsigned long) prop;
+ args[5] = (unsigned long) -1;
+
+ p1275_cmd_direct(args);
+
+ return (int) args[5];
}
EXPORT_SYMBOL(prom_getproplen);
@@ -87,19 +116,25 @@
inline int prom_getproperty(int node, const char *prop,
char *buffer, int bufsize)
{
+ unsigned long args[8];
int plen;
plen = prom_getproplen(node, prop);
- if ((plen > bufsize) || (plen == 0) || (plen == -1)) {
+ if ((plen > bufsize) || (plen == 0) || (plen == -1))
return -1;
- } else {
- /* Ok, things seem all right. */
- return p1275_cmd(prom_getprop_name,
- P1275_ARG(1,P1275_ARG_IN_STRING)|
- P1275_ARG(2,P1275_ARG_OUT_BUF)|
- P1275_INOUT(4, 1),
- node, prop, buffer, P1275_SIZE(plen));
- }
+
+ args[0] = (unsigned long) prom_getprop_name;
+ args[1] = 4;
+ args[2] = 1;
+ args[3] = (unsigned int) node;
+ args[4] = (unsigned long) prop;
+ args[5] = (unsigned long) buffer;
+ args[6] = bufsize;
+ args[7] = (unsigned long) -1;
+
+ p1275_cmd_direct(args);
+
+ return (int) args[7];
}
EXPORT_SYMBOL(prom_getproperty);
@@ -110,7 +145,7 @@
{
int intprop;
- if(prom_getproperty(node, prop, (char *) &intprop, sizeof(int)) != -1)
+ if (prom_getproperty(node, prop, (char *) &intprop, sizeof(int)) != -1)
return intprop;
return -1;
@@ -126,7 +161,8 @@
int retval;
retval = prom_getint(node, property);
- if(retval == -1) return deflt;
+ if (retval == -1)
+ return deflt;
return retval;
}
@@ -138,7 +174,8 @@
int retval;
retval = prom_getproplen(node, prop);
- if(retval == -1) return 0;
+ if (retval == -1)
+ return 0;
return 1;
}
EXPORT_SYMBOL(prom_getbool);
@@ -152,7 +189,8 @@
int len;
len = prom_getproperty(node, prop, user_buf, ubuf_size);
- if(len != -1) return;
+ if (len != -1)
+ return;
user_buf[0] = 0;
}
EXPORT_SYMBOL(prom_getstring);
@@ -164,7 +202,8 @@
{
char namebuf[128];
prom_getproperty(node, "name", namebuf, sizeof(namebuf));
- if(strcmp(namebuf, name) == 0) return 1;
+ if (strcmp(namebuf, name) == 0)
+ return 1;
return 0;
}
@@ -190,16 +229,29 @@
}
EXPORT_SYMBOL(prom_searchsiblings);
+static const char *prom_nextprop_name = "nextprop";
+
/* Return the first property type for node 'node'.
* buffer should be at least 32B in length
*/
inline char *prom_firstprop(int node, char *buffer)
{
+ unsigned long args[7];
+
*buffer = 0;
- if(node == -1) return buffer;
- p1275_cmd ("nextprop", P1275_ARG(2,P1275_ARG_OUT_32B)|
- P1275_INOUT(3, 0),
- node, (char *) 0x0, buffer);
+ if (node == -1)
+ return buffer;
+
+ args[0] = (unsigned long) prom_nextprop_name;
+ args[1] = 3;
+ args[2] = 1;
+ args[3] = (unsigned int) node;
+ args[4] = 0;
+ args[5] = (unsigned long) buffer;
+ args[6] = (unsigned long) -1;
+
+ p1275_cmd_direct(args);
+
return buffer;
}
EXPORT_SYMBOL(prom_firstprop);
@@ -210,9 +262,10 @@
*/
inline char *prom_nextprop(int node, const char *oprop, char *buffer)
{
+ unsigned long args[7];
char buf[32];
- if(node == -1) {
+ if (node == -1) {
*buffer = 0;
return buffer;
}
@@ -220,10 +273,17 @@
strcpy (buf, oprop);
oprop = buf;
}
- p1275_cmd ("nextprop", P1275_ARG(1,P1275_ARG_IN_STRING)|
- P1275_ARG(2,P1275_ARG_OUT_32B)|
- P1275_INOUT(3, 0),
- node, oprop, buffer);
+
+ args[0] = (unsigned long) prom_nextprop_name;
+ args[1] = 3;
+ args[2] = 1;
+ args[3] = (unsigned int) node;
+ args[4] = (unsigned long) oprop;
+ args[5] = (unsigned long) buffer;
+ args[6] = (unsigned long) -1;
+
+ p1275_cmd_direct(args);
+
return buffer;
}
EXPORT_SYMBOL(prom_nextprop);
@@ -231,12 +291,19 @@
int
prom_finddevice(const char *name)
{
+ unsigned long args[5];
+
if (!name)
return 0;
- return p1275_cmd(prom_finddev_name,
- P1275_ARG(0,P1275_ARG_IN_STRING)|
- P1275_INOUT(1, 1),
- name);
+ args[0] = (unsigned long) "finddevice";
+ args[1] = 1;
+ args[2] = 1;
+ args[3] = (unsigned long) name;
+ args[4] = (unsigned long) -1;
+
+ p1275_cmd_direct(args);
+
+ return (int) args[4];
}
EXPORT_SYMBOL(prom_finddevice);
@@ -247,7 +314,7 @@
*buf = 0;
do {
prom_nextprop(node, buf, buf);
- if(!strcmp(buf, prop))
+ if (!strcmp(buf, prop))
return 1;
} while (*buf);
return 0;
@@ -260,6 +327,8 @@
int
prom_setprop(int node, const char *pname, char *value, int size)
{
+ unsigned long args[8];
+
if (size == 0)
return 0;
if ((pname == 0) || (value == 0))
@@ -271,19 +340,37 @@
return 0;
}
#endif
- return p1275_cmd ("setprop", P1275_ARG(1,P1275_ARG_IN_STRING)|
- P1275_ARG(2,P1275_ARG_IN_BUF)|
- P1275_INOUT(4, 1),
- node, pname, value, P1275_SIZE(size));
+ args[0] = (unsigned long) "setprop";
+ args[1] = 4;
+ args[2] = 1;
+ args[3] = (unsigned int) node;
+ args[4] = (unsigned long) pname;
+ args[5] = (unsigned long) value;
+ args[6] = size;
+ args[7] = (unsigned long) -1;
+
+ p1275_cmd_direct(args);
+
+ return (int) args[7];
}
EXPORT_SYMBOL(prom_setprop);
inline int prom_inst2pkg(int inst)
{
+ unsigned long args[5];
int node;
- node = p1275_cmd ("instance-to-package", P1275_INOUT(1, 1), inst);
- if (node == -1) return 0;
+ args[0] = (unsigned long) "instance-to-package";
+ args[1] = 1;
+ args[2] = 1;
+ args[3] = (unsigned int) inst;
+ args[4] = (unsigned long) -1;
+
+ p1275_cmd_direct(args);
+
+ node = (int) args[4];
+ if (node == -1)
+ return 0;
return node;
}
@@ -296,17 +383,28 @@
int node, inst;
inst = prom_devopen (path);
- if (inst == 0) return 0;
- node = prom_inst2pkg (inst);
- prom_devclose (inst);
- if (node == -1) return 0;
+ if (inst == 0)
+ return 0;
+ node = prom_inst2pkg(inst);
+ prom_devclose(inst);
+ if (node == -1)
+ return 0;
return node;
}
int prom_ihandle2path(int handle, char *buffer, int bufsize)
{
- return p1275_cmd("instance-to-path",
- P1275_ARG(1,P1275_ARG_OUT_BUF)|
- P1275_INOUT(3, 1),
- handle, buffer, P1275_SIZE(bufsize));
+ unsigned long args[7];
+
+ args[0] = (unsigned long) "instance-to-path";
+ args[1] = 3;
+ args[2] = 1;
+ args[3] = (unsigned int) handle;
+ args[4] = (unsigned long) buffer;
+ args[5] = bufsize;
+ args[6] = (unsigned long) -1;
+
+ p1275_cmd_direct(args);
+
+ return (int) args[6];
}
diff --git a/arch/tile/include/arch/chip_tile64.h b/arch/tile/include/arch/chip_tile64.h
index 1246573..261aaba 100644
--- a/arch/tile/include/arch/chip_tile64.h
+++ b/arch/tile/include/arch/chip_tile64.h
@@ -150,6 +150,9 @@
/** Is the PROC_STATUS SPR supported? */
#define CHIP_HAS_PROC_STATUS_SPR() 0
+/** Is the DSTREAM_PF SPR supported? */
+#define CHIP_HAS_DSTREAM_PF() 0
+
/** Log of the number of mshims we have. */
#define CHIP_LOG_NUM_MSHIMS() 2
diff --git a/arch/tile/include/arch/chip_tilepro.h b/arch/tile/include/arch/chip_tilepro.h
index e864c47..7001769 100644
--- a/arch/tile/include/arch/chip_tilepro.h
+++ b/arch/tile/include/arch/chip_tilepro.h
@@ -150,6 +150,9 @@
/** Is the PROC_STATUS SPR supported? */
#define CHIP_HAS_PROC_STATUS_SPR() 1
+/** Is the DSTREAM_PF SPR supported? */
+#define CHIP_HAS_DSTREAM_PF() 0
+
/** Log of the number of mshims we have. */
#define CHIP_LOG_NUM_MSHIMS() 2
diff --git a/arch/tile/include/asm/compat.h b/arch/tile/include/asm/compat.h
index 5a34da6..8b60ec8 100644
--- a/arch/tile/include/asm/compat.h
+++ b/arch/tile/include/asm/compat.h
@@ -195,7 +195,7 @@
return (long)(int)(long __force)uptr;
}
-static inline void __user *compat_alloc_user_space(long len)
+static inline void __user *arch_compat_alloc_user_space(long len)
{
struct pt_regs *regs = task_pt_regs(current);
return (void __user *)regs->sp - len;
@@ -214,8 +214,9 @@
struct compat_sigaction;
struct compat_siginfo;
struct compat_sigaltstack;
-long compat_sys_execve(char __user *path, compat_uptr_t __user *argv,
- compat_uptr_t __user *envp);
+long compat_sys_execve(const char __user *path,
+ const compat_uptr_t __user *argv,
+ const compat_uptr_t __user *envp);
long compat_sys_rt_sigaction(int sig, struct compat_sigaction __user *act,
struct compat_sigaction __user *oact,
size_t sigsetsize);
diff --git a/arch/tile/include/asm/io.h b/arch/tile/include/asm/io.h
index 8c95bef..ee43328 100644
--- a/arch/tile/include/asm/io.h
+++ b/arch/tile/include/asm/io.h
@@ -164,22 +164,22 @@
#define iowrite32 writel
#define iowrite64 writeq
-static inline void *memcpy_fromio(void *dst, void *src, int len)
+static inline void memcpy_fromio(void *dst, const volatile void __iomem *src,
+ size_t len)
{
int x;
BUG_ON((unsigned long)src & 0x3);
for (x = 0; x < len; x += 4)
*(u32 *)(dst + x) = readl(src + x);
- return dst;
}
-static inline void *memcpy_toio(void *dst, void *src, int len)
+static inline void memcpy_toio(volatile void __iomem *dst, const void *src,
+ size_t len)
{
int x;
BUG_ON((unsigned long)dst & 0x3);
for (x = 0; x < len; x += 4)
writel(*(u32 *)(src + x), dst + x);
- return dst;
}
/*
diff --git a/arch/tile/include/asm/processor.h b/arch/tile/include/asm/processor.h
index d942d09..ccd5f84 100644
--- a/arch/tile/include/asm/processor.h
+++ b/arch/tile/include/asm/processor.h
@@ -103,6 +103,18 @@
/* Any other miscellaneous processor state bits */
unsigned long proc_status;
#endif
+#if !CHIP_HAS_FIXED_INTVEC_BASE()
+ /* Interrupt base for PL0 interrupts */
+ unsigned long interrupt_vector_base;
+#endif
+#if CHIP_HAS_TILE_RTF_HWM()
+ /* Tile cache retry fifo high-water mark */
+ unsigned long tile_rtf_hwm;
+#endif
+#if CHIP_HAS_DSTREAM_PF()
+ /* Data stream prefetch control */
+ unsigned long dstream_pf;
+#endif
#ifdef CONFIG_HARDWALL
/* Is this task tied to an activated hardwall? */
struct hardwall_info *hardwall;
diff --git a/arch/tile/include/asm/ptrace.h b/arch/tile/include/asm/ptrace.h
index acdae81..4a02bb0 100644
--- a/arch/tile/include/asm/ptrace.h
+++ b/arch/tile/include/asm/ptrace.h
@@ -51,10 +51,7 @@
/*
* This struct defines the way the registers are stored on the stack during a
- * system call/exception. It should be a multiple of 8 bytes to preserve
- * normal stack alignment rules.
- *
- * Must track <sys/ucontext.h> and <sys/procfs.h>
+ * system call or exception. "struct sigcontext" has the same shape.
*/
struct pt_regs {
/* Saved main processor registers; 56..63 are special. */
@@ -80,11 +77,6 @@
#endif /* __ASSEMBLY__ */
-/* Flag bits in pt_regs.flags */
-#define PT_FLAGS_DISABLE_IRQ 1 /* on return to kernel, disable irqs */
-#define PT_FLAGS_CALLER_SAVES 2 /* caller-save registers are valid */
-#define PT_FLAGS_RESTORE_REGS 4 /* restore callee-save regs on return */
-
#define PTRACE_GETREGS 12
#define PTRACE_SETREGS 13
#define PTRACE_GETFPREGS 14
@@ -101,6 +93,11 @@
#ifdef __KERNEL__
+/* Flag bits in pt_regs.flags */
+#define PT_FLAGS_DISABLE_IRQ 1 /* on return to kernel, disable irqs */
+#define PT_FLAGS_CALLER_SAVES 2 /* caller-save registers are valid */
+#define PT_FLAGS_RESTORE_REGS 4 /* restore callee-save regs on return */
+
#ifndef __ASSEMBLY__
#define instruction_pointer(regs) ((regs)->pc)
diff --git a/arch/tile/include/asm/sigcontext.h b/arch/tile/include/asm/sigcontext.h
index 7cd7672..5e2d033 100644
--- a/arch/tile/include/asm/sigcontext.h
+++ b/arch/tile/include/asm/sigcontext.h
@@ -15,13 +15,21 @@
#ifndef _ASM_TILE_SIGCONTEXT_H
#define _ASM_TILE_SIGCONTEXT_H
-/* NOTE: we can't include <linux/ptrace.h> due to #include dependencies. */
-#include <asm/ptrace.h>
+#include <arch/abi.h>
-/* Must track <sys/ucontext.h> */
-
+/*
+ * struct sigcontext has the same shape as struct pt_regs,
+ * but is simplified since we know the fault is from userspace.
+ */
struct sigcontext {
- struct pt_regs regs;
+ uint_reg_t gregs[53]; /* General-purpose registers. */
+ uint_reg_t tp; /* Aliases gregs[TREG_TP]. */
+ uint_reg_t sp; /* Aliases gregs[TREG_SP]. */
+ uint_reg_t lr; /* Aliases gregs[TREG_LR]. */
+ uint_reg_t pc; /* Program counter. */
+ uint_reg_t ics; /* In Interrupt Critical Section? */
+ uint_reg_t faultnum; /* Fault number. */
+ uint_reg_t pad[5];
};
#endif /* _ASM_TILE_SIGCONTEXT_H */
diff --git a/arch/tile/include/asm/signal.h b/arch/tile/include/asm/signal.h
index eb0253f..c1ee1d6 100644
--- a/arch/tile/include/asm/signal.h
+++ b/arch/tile/include/asm/signal.h
@@ -24,6 +24,7 @@
#include <asm-generic/signal.h>
#if defined(__KERNEL__) && !defined(__ASSEMBLY__)
+struct pt_regs;
int restore_sigcontext(struct pt_regs *, struct sigcontext __user *, long *);
int setup_sigcontext(struct sigcontext __user *, struct pt_regs *);
void do_signal(struct pt_regs *regs);
diff --git a/arch/tile/include/asm/syscalls.h b/arch/tile/include/asm/syscalls.h
index af165a7..ce99ffe 100644
--- a/arch/tile/include/asm/syscalls.h
+++ b/arch/tile/include/asm/syscalls.h
@@ -62,10 +62,12 @@
long _sys_fork(struct pt_regs *regs);
long sys_vfork(void);
long _sys_vfork(struct pt_regs *regs);
-long sys_execve(char __user *filename, char __user * __user *argv,
- char __user * __user *envp);
-long _sys_execve(char __user *filename, char __user * __user *argv,
- char __user * __user *envp, struct pt_regs *regs);
+long sys_execve(const char __user *filename,
+ const char __user *const __user *argv,
+ const char __user *const __user *envp);
+long _sys_execve(const char __user *filename,
+ const char __user *const __user *argv,
+ const char __user *const __user *envp, struct pt_regs *regs);
/* kernel/signal.c */
long sys_sigaltstack(const stack_t __user *, stack_t __user *);
@@ -86,10 +88,13 @@
#endif
#ifdef CONFIG_COMPAT
-long compat_sys_execve(char __user *path, compat_uptr_t __user *argv,
- compat_uptr_t __user *envp);
-long _compat_sys_execve(char __user *path, compat_uptr_t __user *argv,
- compat_uptr_t __user *envp, struct pt_regs *regs);
+long compat_sys_execve(const char __user *path,
+ const compat_uptr_t __user *argv,
+ const compat_uptr_t __user *envp);
+long _compat_sys_execve(const char __user *path,
+ const compat_uptr_t __user *argv,
+ const compat_uptr_t __user *envp,
+ struct pt_regs *regs);
long compat_sys_sigaltstack(const struct compat_sigaltstack __user *uss_ptr,
struct compat_sigaltstack __user *uoss_ptr);
long _compat_sys_sigaltstack(const struct compat_sigaltstack __user *uss_ptr,
diff --git a/arch/tile/kernel/intvec_32.S b/arch/tile/kernel/intvec_32.S
index 84f296c..8f58bdf 100644
--- a/arch/tile/kernel/intvec_32.S
+++ b/arch/tile/kernel/intvec_32.S
@@ -1506,13 +1506,6 @@
}
STD_ENDPROC(handle_ill)
- .pushsection .rodata, "a"
- .align 8
-bpt_code:
- bpt
- ENDPROC(bpt_code)
- .popsection
-
/* Various stub interrupt handlers and syscall handlers */
STD_ENTRY_LOCAL(_kernel_double_fault)
diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c
index 985cc28..84c2911 100644
--- a/arch/tile/kernel/process.c
+++ b/arch/tile/kernel/process.c
@@ -408,6 +408,15 @@
#if CHIP_HAS_PROC_STATUS_SPR()
t->proc_status = __insn_mfspr(SPR_PROC_STATUS);
#endif
+#if !CHIP_HAS_FIXED_INTVEC_BASE()
+ t->interrupt_vector_base = __insn_mfspr(SPR_INTERRUPT_VECTOR_BASE_0);
+#endif
+#if CHIP_HAS_TILE_RTF_HWM()
+ t->tile_rtf_hwm = __insn_mfspr(SPR_TILE_RTF_HWM);
+#endif
+#if CHIP_HAS_DSTREAM_PF()
+ t->dstream_pf = __insn_mfspr(SPR_DSTREAM_PF);
+#endif
}
static void restore_arch_state(const struct thread_struct *t)
@@ -428,14 +437,14 @@
#if CHIP_HAS_PROC_STATUS_SPR()
__insn_mtspr(SPR_PROC_STATUS, t->proc_status);
#endif
+#if !CHIP_HAS_FIXED_INTVEC_BASE()
+ __insn_mtspr(SPR_INTERRUPT_VECTOR_BASE_0, t->interrupt_vector_base);
+#endif
#if CHIP_HAS_TILE_RTF_HWM()
- /*
- * Clear this whenever we switch back to a process in case
- * the previous process was monkeying with it. Even if enabled
- * in CBOX_MSR1 via TILE_RTF_HWM_MIN, it's still just a
- * performance hint, so isn't worth a full save/restore.
- */
- __insn_mtspr(SPR_TILE_RTF_HWM, 0);
+ __insn_mtspr(SPR_TILE_RTF_HWM, t->tile_rtf_hwm);
+#endif
+#if CHIP_HAS_DSTREAM_PF()
+ __insn_mtspr(SPR_DSTREAM_PF, t->dstream_pf);
#endif
}
@@ -561,8 +570,9 @@
}
#ifdef CONFIG_COMPAT
-long _compat_sys_execve(char __user *path, compat_uptr_t __user *argv,
- compat_uptr_t __user *envp, struct pt_regs *regs)
+long _compat_sys_execve(const char __user *path,
+ const compat_uptr_t __user *argv,
+ const compat_uptr_t __user *envp, struct pt_regs *regs)
{
long error;
char *filename;
@@ -657,7 +667,7 @@
regs->regs[51], regs->regs[52], regs->tp);
pr_err(" sp : "REGFMT" lr : "REGFMT"\n", regs->sp, regs->lr);
#else
- for (i = 0; i < 52; i += 3)
+ for (i = 0; i < 52; i += 4)
pr_err(" r%-2d: "REGFMT" r%-2d: "REGFMT
" r%-2d: "REGFMT" r%-2d: "REGFMT"\n",
i, regs->regs[i], i+1, regs->regs[i+1],
diff --git a/arch/tile/kernel/signal.c b/arch/tile/kernel/signal.c
index 45b66a3..ce183aa 100644
--- a/arch/tile/kernel/signal.c
+++ b/arch/tile/kernel/signal.c
@@ -61,13 +61,19 @@
/* Always make any pending restarted system calls return -EINTR */
current_thread_info()->restart_block.fn = do_no_restart_syscall;
+ /*
+ * Enforce that sigcontext is like pt_regs, and doesn't mess
+ * up our stack alignment rules.
+ */
+ BUILD_BUG_ON(sizeof(struct sigcontext) != sizeof(struct pt_regs));
+ BUILD_BUG_ON(sizeof(struct sigcontext) % 8 != 0);
+
for (i = 0; i < sizeof(struct pt_regs)/sizeof(long); ++i)
- err |= __get_user(((long *)regs)[i],
- &((long __user *)(&sc->regs))[i]);
+ err |= __get_user(regs->regs[i], &sc->gregs[i]);
regs->faultnum = INT_SWINT_1_SIGRETURN;
- err |= __get_user(*pr0, &sc->regs.regs[0]);
+ err |= __get_user(*pr0, &sc->gregs[0]);
return err;
}
@@ -112,8 +118,7 @@
int i, err = 0;
for (i = 0; i < sizeof(struct pt_regs)/sizeof(long); ++i)
- err |= __put_user(((long *)regs)[i],
- &((long __user *)(&sc->regs))[i]);
+ err |= __put_user(regs->regs[i], &sc->gregs[i]);
return err;
}
@@ -203,19 +208,17 @@
* Set up registers for signal handler.
* Registers that we don't modify keep the value they had from
* user-space at the time we took the signal.
+ * We always pass siginfo and mcontext, regardless of SA_SIGINFO,
+ * since some things rely on this (e.g. glibc's debug/segfault.c).
*/
regs->pc = (unsigned long) ka->sa.sa_handler;
regs->ex1 = PL_ICS_EX1(USER_PL, 1); /* set crit sec in handler */
regs->sp = (unsigned long) frame;
regs->lr = restorer;
regs->regs[0] = (unsigned long) usig;
-
- if (ka->sa.sa_flags & SA_SIGINFO) {
- /* Need extra arguments, so mark to restore caller-saves. */
- regs->regs[1] = (unsigned long) &frame->info;
- regs->regs[2] = (unsigned long) &frame->uc;
- regs->flags |= PT_FLAGS_CALLER_SAVES;
- }
+ regs->regs[1] = (unsigned long) &frame->info;
+ regs->regs[2] = (unsigned long) &frame->uc;
+ regs->flags |= PT_FLAGS_CALLER_SAVES;
/*
* Notify any tracer that was single-stepping it.
diff --git a/arch/tile/kernel/stack.c b/arch/tile/kernel/stack.c
index 38a68b0b4..ea2e0ce 100644
--- a/arch/tile/kernel/stack.c
+++ b/arch/tile/kernel/stack.c
@@ -175,7 +175,7 @@
pr_err(" <received signal %d>\n",
frame->info.si_signo);
}
- return &frame->uc.uc_mcontext.regs;
+ return (struct pt_regs *)&frame->uc.uc_mcontext;
}
return NULL;
}
diff --git a/arch/um/drivers/hostaudio_kern.c b/arch/um/drivers/hostaudio_kern.c
index 0c46e39..63c740a 100644
--- a/arch/um/drivers/hostaudio_kern.c
+++ b/arch/um/drivers/hostaudio_kern.c
@@ -40,6 +40,11 @@
" This is used to specify the host mixer device to the hostaudio driver.\n"\
" The default is \"" HOSTAUDIO_DEV_MIXER "\".\n\n"
+module_param(dsp, charp, 0644);
+MODULE_PARM_DESC(dsp, DSP_HELP);
+module_param(mixer, charp, 0644);
+MODULE_PARM_DESC(mixer, MIXER_HELP);
+
#ifndef MODULE
static int set_dsp(char *name, int *add)
{
@@ -56,15 +61,6 @@
}
__uml_setup("mixer=", set_mixer, "mixer=<mixer device>\n" MIXER_HELP);
-
-#else /*MODULE*/
-
-module_param(dsp, charp, 0644);
-MODULE_PARM_DESC(dsp, DSP_HELP);
-
-module_param(mixer, charp, 0644);
-MODULE_PARM_DESC(mixer, MIXER_HELP);
-
#endif
/* /dev/dsp file operations */
diff --git a/arch/um/drivers/mconsole_kern.c b/arch/um/drivers/mconsole_kern.c
index de317d0..ebc6807 100644
--- a/arch/um/drivers/mconsole_kern.c
+++ b/arch/um/drivers/mconsole_kern.c
@@ -690,7 +690,7 @@
static void sysrq_proc(void *arg)
{
char *op = arg;
- handle_sysrq(*op, NULL);
+ handle_sysrq(*op);
}
void mconsole_sysrq(struct mc_request *req)
diff --git a/arch/um/drivers/net_kern.c b/arch/um/drivers/net_kern.c
index 2ab233b..47d0c37 100644
--- a/arch/um/drivers/net_kern.c
+++ b/arch/um/drivers/net_kern.c
@@ -255,18 +255,6 @@
netif_wake_queue(dev);
}
-static int uml_net_set_mac(struct net_device *dev, void *addr)
-{
- struct uml_net_private *lp = netdev_priv(dev);
- struct sockaddr *hwaddr = addr;
-
- spin_lock_irq(&lp->lock);
- eth_mac_addr(dev, hwaddr->sa_data);
- spin_unlock_irq(&lp->lock);
-
- return 0;
-}
-
static int uml_net_change_mtu(struct net_device *dev, int new_mtu)
{
dev->mtu = new_mtu;
@@ -373,7 +361,7 @@
.ndo_start_xmit = uml_net_start_xmit,
.ndo_set_multicast_list = uml_net_set_multicast_list,
.ndo_tx_timeout = uml_net_tx_timeout,
- .ndo_set_mac_address = uml_net_set_mac,
+ .ndo_set_mac_address = eth_mac_addr,
.ndo_change_mtu = uml_net_change_mtu,
.ndo_validate_addr = eth_validate_addr,
};
@@ -472,7 +460,8 @@
((*transport->user->init)(&lp->user, dev) != 0))
goto out_unregister;
- eth_mac_addr(dev, device->mac);
+ /* don't use eth_mac_addr, it will not work here */
+ memcpy(dev->dev_addr, device->mac, ETH_ALEN);
dev->mtu = transport->user->mtu;
dev->netdev_ops = ¨_netdev_ops;
dev->ethtool_ops = ¨_net_ethtool_ops;
diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
index 1bcd208..9734994 100644
--- a/arch/um/drivers/ubd_kern.c
+++ b/arch/um/drivers/ubd_kern.c
@@ -163,6 +163,7 @@
struct scatterlist sg[MAX_SG];
struct request *request;
int start_sg, end_sg;
+ sector_t rq_pos;
};
#define DEFAULT_COW { \
@@ -187,6 +188,7 @@
.request = NULL, \
.start_sg = 0, \
.end_sg = 0, \
+ .rq_pos = 0, \
}
/* Protected by ubd_lock */
@@ -1228,7 +1230,6 @@
{
struct io_thread_req *io_req;
struct request *req;
- sector_t sector;
int n;
while(1){
@@ -1239,12 +1240,12 @@
return;
dev->request = req;
+ dev->rq_pos = blk_rq_pos(req);
dev->start_sg = 0;
dev->end_sg = blk_rq_map_sg(q, req, dev->sg);
}
req = dev->request;
- sector = blk_rq_pos(req);
while(dev->start_sg < dev->end_sg){
struct scatterlist *sg = &dev->sg[dev->start_sg];
@@ -1256,10 +1257,9 @@
return;
}
prepare_request(req, io_req,
- (unsigned long long)sector << 9,
+ (unsigned long long)dev->rq_pos << 9,
sg->offset, sg->length, sg_page(sg));
- sector += sg->length >> 9;
n = os_write_file(thread_fd, &io_req,
sizeof(struct io_thread_req *));
if(n != sizeof(struct io_thread_req *)){
@@ -1272,6 +1272,7 @@
return;
}
+ dev->rq_pos += sg->length >> 9;
dev->start_sg++;
}
dev->end_sg = 0;
diff --git a/arch/um/kernel/exec.c b/arch/um/kernel/exec.c
index cd145ed..49b5e1e 100644
--- a/arch/um/kernel/exec.c
+++ b/arch/um/kernel/exec.c
@@ -62,7 +62,7 @@
return error;
}
-long um_execve(const char *file, char __user *__user *argv, char __user *__user *env)
+long um_execve(const char *file, const char __user *const __user *argv, const char __user *const __user *env)
{
long err;
@@ -72,8 +72,8 @@
return err;
}
-long sys_execve(const char __user *file, char __user *__user *argv,
- char __user *__user *env)
+long sys_execve(const char __user *file, const char __user *const __user *argv,
+ const char __user *const __user *env)
{
long error;
char *filename;
diff --git a/arch/um/kernel/internal.h b/arch/um/kernel/internal.h
index 1303a10..5bf97db 100644
--- a/arch/um/kernel/internal.h
+++ b/arch/um/kernel/internal.h
@@ -1 +1 @@
-extern long um_execve(const char *file, char __user *__user *argv, char __user *__user *env);
+extern long um_execve(const char *file, const char __user *const __user *argv, const char __user *const __user *env);
diff --git a/arch/um/kernel/syscall.c b/arch/um/kernel/syscall.c
index 5ddb246..f958cb8 100644
--- a/arch/um/kernel/syscall.c
+++ b/arch/um/kernel/syscall.c
@@ -60,8 +60,8 @@
fs = get_fs();
set_fs(KERNEL_DS);
- ret = um_execve(filename, (char __user *__user *)argv,
- (char __user *__user *) envp);
+ ret = um_execve(filename, (const char __user *const __user *)argv,
+ (const char __user *const __user *) envp);
set_fs(fs);
return ret;
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index cea0cd9..b867649 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -25,6 +25,7 @@
select HAVE_IDE
select HAVE_OPROFILE
select HAVE_PERF_EVENTS if (!M386 && !M486)
+ select HAVE_IRQ_WORK
select HAVE_IOREMAP_PROT
select HAVE_KPROBES
select ARCH_WANT_OPTIONAL_GPIOLIB
@@ -33,6 +34,7 @@
select HAVE_KRETPROBES
select HAVE_OPTPROBES
select HAVE_FTRACE_MCOUNT_RECORD
+ select HAVE_C_RECORDMCOUNT
select HAVE_DYNAMIC_FTRACE
select HAVE_FUNCTION_TRACER
select HAVE_FUNCTION_GRAPH_TRACER
@@ -59,6 +61,8 @@
select ANON_INODES
select HAVE_ARCH_KMEMCHECK
select HAVE_USER_RETURN_NOTIFIER
+ select HAVE_ARCH_JUMP_LABEL
+ select HAVE_TEXT_POKE_SMP
config INSTRUCTION_DECODER
def_bool (KPROBES || PERF_EVENTS)
@@ -670,7 +674,7 @@
bool "GART IOMMU support" if EMBEDDED
default y
select SWIOTLB
- depends on X86_64 && PCI && K8_NB
+ depends on X86_64 && PCI && AMD_NB
---help---
Support for full DMA access of devices with 32bit memory access only
on systems with more than 3GB. This is usually needed for USB,
@@ -795,6 +799,17 @@
making when dealing with multi-core CPU chips at a cost of slightly
increased overhead in some places. If unsure say N here.
+config IRQ_TIME_ACCOUNTING
+ bool "Fine granularity task level IRQ time accounting"
+ default n
+ ---help---
+ Select this option to enable fine granularity task irq time
+ accounting. This is done by reading a timestamp on each
+ transitions between softirq and hardirq state, so there can be a
+ small performance impact.
+
+ If in doubt, say N here.
+
source "kernel/Kconfig.preempt"
config X86_UP_APIC
@@ -1326,25 +1341,34 @@
Set whether the default state of memory_corruption_check is
on or off.
-config X86_RESERVE_LOW_64K
- bool "Reserve low 64K of RAM on AMI/Phoenix BIOSen"
- default y
+config X86_RESERVE_LOW
+ int "Amount of low memory, in kilobytes, to reserve for the BIOS"
+ default 64
+ range 4 640
---help---
- Reserve the first 64K of physical RAM on BIOSes that are known
- to potentially corrupt that memory range. A numbers of BIOSes are
- known to utilize this area during suspend/resume, so it must not
- be used by the kernel.
+ Specify the amount of low memory to reserve for the BIOS.
- Set this to N if you are absolutely sure that you trust the BIOS
- to get all its memory reservations and usages right.
+ The first page contains BIOS data structures that the kernel
+ must not use, so that page must always be reserved.
- If you have doubts about the BIOS (e.g. suspend/resume does not
- work or there's kernel crashes after certain hardware hotplug
- events) and it's not AMI or Phoenix, then you might want to enable
- X86_CHECK_BIOS_CORRUPTION=y to allow the kernel to check typical
- corruption patterns.
+ By default we reserve the first 64K of physical RAM, as a
+ number of BIOSes are known to corrupt that memory range
+ during events such as suspend/resume or monitor cable
+ insertion, so it must not be used by the kernel.
- Say Y if unsure.
+ You can set this to 4 if you are absolutely sure that you
+ trust the BIOS to get all its memory reservations and usages
+ right. If you know your BIOS have problems beyond the
+ default 64K area, you can set this to 640 to avoid using the
+ entire low memory range.
+
+ If you have doubts about the BIOS (e.g. suspend/resume does
+ not work or there's kernel crashes after certain hardware
+ hotplug events) then you might want to enable
+ X86_CHECK_BIOS_CORRUPTION=y to allow the kernel to check
+ typical corruption patterns.
+
+ Leave this to the default value of 64 if you are unsure.
config MATH_EMULATION
bool
@@ -2076,7 +2100,7 @@
endif # X86_32
-config K8_NB
+config AMD_NB
def_bool y
depends on CPU_SUP_AMD && PCI
@@ -2125,6 +2149,10 @@
def_bool y
depends on X86_32
+config HAVE_TEXT_POKE_SMP
+ bool
+ select STOP_MACHINE if SMP
+
source "net/Kconfig"
source "drivers/Kconfig"
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 8aa1b59..b02e509 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -74,7 +74,7 @@
ifdef CONFIG_CC_STACKPROTECTOR
cc_has_sp := $(srctree)/scripts/gcc-x86_$(BITS)-has-stack-protector.sh
- ifeq ($(shell $(CONFIG_SHELL) $(cc_has_sp) $(CC) $(biarch)),y)
+ ifeq ($(shell $(CONFIG_SHELL) $(cc_has_sp) $(CC) $(KBUILD_CPPFLAGS) $(biarch)),y)
stackp-y := -fstack-protector
KBUILD_CFLAGS += $(stackp-y)
else
@@ -96,8 +96,12 @@
# is .cfi_signal_frame supported too?
cfi-sigframe := $(call as-instr,.cfi_startproc\n.cfi_signal_frame\n.cfi_endproc,-DCONFIG_AS_CFI_SIGNAL_FRAME=1)
cfi-sections := $(call as-instr,.cfi_sections .debug_frame,-DCONFIG_AS_CFI_SECTIONS=1)
-KBUILD_AFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections)
-KBUILD_CFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections)
+
+# does binutils support specific instructions?
+asinstr := $(call as-instr,fxsaveq (%rax),-DCONFIG_AS_FXSAVEQ=1)
+
+KBUILD_AFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr)
+KBUILD_CFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr)
LDFLAGS := -m elf_$(UTS_MACHINE)
diff --git a/arch/x86/boot/early_serial_console.c b/arch/x86/boot/early_serial_console.c
index 030f4b9..5df2869 100644
--- a/arch/x86/boot/early_serial_console.c
+++ b/arch/x86/boot/early_serial_console.c
@@ -58,7 +58,19 @@
if (arg[pos] == ',')
pos++;
- if (!strncmp(arg, "ttyS", 4)) {
+ /*
+ * make sure we have
+ * "serial,0x3f8,115200"
+ * "serial,ttyS0,115200"
+ * "ttyS0,115200"
+ */
+ if (pos == 7 && !strncmp(arg + pos, "0x", 2)) {
+ port = simple_strtoull(arg + pos, &e, 16);
+ if (port == 0 || arg + pos == e)
+ port = DEFAULT_SERIAL_PORT;
+ else
+ pos = e - arg;
+ } else if (!strncmp(arg + pos, "ttyS", 4)) {
static const int bases[] = { 0x3f8, 0x2f8 };
int idx = 0;
diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
index 0350311..2d93bdb 100644
--- a/arch/x86/ia32/ia32_aout.c
+++ b/arch/x86/ia32/ia32_aout.c
@@ -34,7 +34,7 @@
#include <asm/ia32.h>
#undef WARN_OLD
-#undef CORE_DUMP /* probably broken */
+#undef CORE_DUMP /* definitely broken */
static int load_aout_binary(struct linux_binprm *, struct pt_regs *regs);
static int load_aout_library(struct file *);
@@ -131,21 +131,15 @@
* macros to write out all the necessary info.
*/
-static int dump_write(struct file *file, const void *addr, int nr)
-{
- return file->f_op->write(file, addr, nr, &file->f_pos) == nr;
-}
+#include <linux/coredump.h>
#define DUMP_WRITE(addr, nr) \
if (!dump_write(file, (void *)(addr), (nr))) \
goto end_coredump;
-#define DUMP_SEEK(offset) \
- if (file->f_op->llseek) { \
- if (file->f_op->llseek(file, (offset), 0) != (offset)) \
- goto end_coredump; \
- } else \
- file->f_pos = (offset)
+#define DUMP_SEEK(offset) \
+ if (!dump_seek(file, offset)) \
+ goto end_coredump;
#define START_DATA() (u.u_tsize << PAGE_SHIFT)
#define START_STACK(u) (u.start_stack)
@@ -217,12 +211,6 @@
dump_size = dump.u_ssize << PAGE_SHIFT;
DUMP_WRITE(dump_start, dump_size);
}
- /*
- * Finally dump the task struct. Not be used by gdb, but
- * could be useful
- */
- set_fs(KERNEL_DS);
- DUMP_WRITE(current, sizeof(*current));
end_coredump:
set_fs(fs);
return has_dumped;
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
index b86feab..518bb99 100644
--- a/arch/x86/ia32/ia32entry.S
+++ b/arch/x86/ia32/ia32entry.S
@@ -50,7 +50,12 @@
/*
* Reload arg registers from stack in case ptrace changed them.
* We don't reload %eax because syscall_trace_enter() returned
- * the value it wants us to use in the table lookup.
+ * the %rax value we should see. Instead, we just truncate that
+ * value to 32 bits again as we did on entry from user mode.
+ * If it's a new value set by user_regset during entry tracing,
+ * this matches the normal truncation of the user-mode value.
+ * If it's -1 to make us punt the syscall, then (u32)-1 is still
+ * an appropriately invalid value.
*/
.macro LOAD_ARGS32 offset, _r9=0
.if \_r9
@@ -60,6 +65,7 @@
movl \offset+48(%rsp),%edx
movl \offset+56(%rsp),%esi
movl \offset+64(%rsp),%edi
+ movl %eax,%eax /* zero extension */
.endm
.macro CFI_STARTPROC32 simple
@@ -153,7 +159,7 @@
testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
CFI_REMEMBER_STATE
jnz sysenter_tracesys
- cmpl $(IA32_NR_syscalls-1),%eax
+ cmpq $(IA32_NR_syscalls-1),%rax
ja ia32_badsys
sysenter_do_call:
IA32_ARG_FIXUP
@@ -195,7 +201,7 @@
movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
call audit_syscall_entry
movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
- cmpl $(IA32_NR_syscalls-1),%eax
+ cmpq $(IA32_NR_syscalls-1),%rax
ja ia32_badsys
movl %ebx,%edi /* reload 1st syscall arg */
movl RCX-ARGOFFSET(%rsp),%esi /* reload 2nd syscall arg */
@@ -248,7 +254,7 @@
call syscall_trace_enter
LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
RESTORE_REST
- cmpl $(IA32_NR_syscalls-1),%eax
+ cmpq $(IA32_NR_syscalls-1),%rax
ja int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */
jmp sysenter_do_call
CFI_ENDPROC
@@ -314,7 +320,7 @@
testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
CFI_REMEMBER_STATE
jnz cstar_tracesys
- cmpl $IA32_NR_syscalls-1,%eax
+ cmpq $IA32_NR_syscalls-1,%rax
ja ia32_badsys
cstar_do_call:
IA32_ARG_FIXUP 1
@@ -367,7 +373,7 @@
LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
RESTORE_REST
xchgl %ebp,%r9d
- cmpl $(IA32_NR_syscalls-1),%eax
+ cmpq $(IA32_NR_syscalls-1),%rax
ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */
jmp cstar_do_call
END(ia32_cstar_target)
@@ -425,7 +431,7 @@
orl $TS_COMPAT,TI_status(%r10)
testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
jnz ia32_tracesys
- cmpl $(IA32_NR_syscalls-1),%eax
+ cmpq $(IA32_NR_syscalls-1),%rax
ja ia32_badsys
ia32_do_call:
IA32_ARG_FIXUP
@@ -444,7 +450,7 @@
call syscall_trace_enter
LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
RESTORE_REST
- cmpl $(IA32_NR_syscalls-1),%eax
+ cmpq $(IA32_NR_syscalls-1),%rax
ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */
jmp ia32_do_call
END(ia32_syscall)
diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
index bc6abb7..76561d2 100644
--- a/arch/x86/include/asm/alternative.h
+++ b/arch/x86/include/asm/alternative.h
@@ -4,6 +4,7 @@
#include <linux/types.h>
#include <linux/stddef.h>
#include <linux/stringify.h>
+#include <linux/jump_label.h>
#include <asm/asm.h>
/*
@@ -160,6 +161,8 @@
#define __parainstructions_end NULL
#endif
+extern void *text_poke_early(void *addr, const void *opcode, size_t len);
+
/*
* Clear and restore the kernel write-protection flag on the local CPU.
* Allows the kernel to edit read-only pages.
@@ -180,4 +183,12 @@
extern void *text_poke(void *addr, const void *opcode, size_t len);
extern void *text_poke_smp(void *addr, const void *opcode, size_t len);
+#if defined(CONFIG_DYNAMIC_FTRACE) || defined(HAVE_JUMP_LABEL)
+#define IDEAL_NOP_SIZE_5 5
+extern unsigned char ideal_nop5[IDEAL_NOP_SIZE_5];
+extern void arch_init_ideal_nop5(void);
+#else
+static inline void arch_init_ideal_nop5(void) {}
+#endif
+
#endif /* _ASM_X86_ALTERNATIVE_H */
diff --git a/arch/x86/include/asm/amd_iommu.h b/arch/x86/include/asm/amd_iommu.h
index 5af2982..f16a2ca 100644
--- a/arch/x86/include/asm/amd_iommu.h
+++ b/arch/x86/include/asm/amd_iommu.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007-2009 Advanced Micro Devices, Inc.
+ * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
* Author: Joerg Roedel <joerg.roedel@amd.com>
* Leo Duran <leo.duran@amd.com>
*
diff --git a/arch/x86/include/asm/amd_iommu_proto.h b/arch/x86/include/asm/amd_iommu_proto.h
index d2544f1..916bc81 100644
--- a/arch/x86/include/asm/amd_iommu_proto.h
+++ b/arch/x86/include/asm/amd_iommu_proto.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2009 Advanced Micro Devices, Inc.
+ * Copyright (C) 2009-2010 Advanced Micro Devices, Inc.
* Author: Joerg Roedel <joerg.roedel@amd.com>
*
* This program is free software; you can redistribute it and/or modify it
@@ -38,4 +38,10 @@
#endif /* !CONFIG_AMD_IOMMU_STATS */
+static inline bool is_rd890_iommu(struct pci_dev *pdev)
+{
+ return (pdev->vendor == PCI_VENDOR_ID_ATI) &&
+ (pdev->device == PCI_DEVICE_ID_RD890_IOMMU);
+}
+
#endif /* _ASM_X86_AMD_IOMMU_PROTO_H */
diff --git a/arch/x86/include/asm/amd_iommu_types.h b/arch/x86/include/asm/amd_iommu_types.h
index 7014e88..e3509fc 100644
--- a/arch/x86/include/asm/amd_iommu_types.h
+++ b/arch/x86/include/asm/amd_iommu_types.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007-2009 Advanced Micro Devices, Inc.
+ * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
* Author: Joerg Roedel <joerg.roedel@amd.com>
* Leo Duran <leo.duran@amd.com>
*
@@ -368,6 +368,9 @@
/* capabilities of that IOMMU read from ACPI */
u32 cap;
+ /* flags read from acpi table */
+ u8 acpi_flags;
+
/*
* Capability pointer. There could be more than one IOMMU per PCI
* device function if there are more than one AMD IOMMU capability
@@ -411,6 +414,24 @@
/* default dma_ops domain for that IOMMU */
struct dma_ops_domain *default_dom;
+
+ /*
+ * We can't rely on the BIOS to restore all values on reinit, so we
+ * need to stash them
+ */
+
+ /* The iommu BAR */
+ u32 stored_addr_lo;
+ u32 stored_addr_hi;
+
+ /*
+ * Each iommu has 6 l1s, each of which is documented as having 0x12
+ * registers
+ */
+ u32 stored_l1[6][0x12];
+
+ /* The l2 indirect registers */
+ u32 stored_l2[0x83];
};
/*
diff --git a/arch/x86/include/asm/k8.h b/arch/x86/include/asm/amd_nb.h
similarity index 61%
rename from arch/x86/include/asm/k8.h
rename to arch/x86/include/asm/amd_nb.h
index af00bd1..c8517f8 100644
--- a/arch/x86/include/asm/k8.h
+++ b/arch/x86/include/asm/amd_nb.h
@@ -1,5 +1,5 @@
-#ifndef _ASM_X86_K8_H
-#define _ASM_X86_K8_H
+#ifndef _ASM_X86_AMD_NB_H
+#define _ASM_X86_AMD_NB_H
#include <linux/pci.h>
@@ -7,24 +7,27 @@
struct bootnode;
extern int early_is_k8_nb(u32 value);
-extern struct pci_dev **k8_northbridges;
-extern int num_k8_northbridges;
extern int cache_k8_northbridges(void);
extern void k8_flush_garts(void);
extern int k8_get_nodes(struct bootnode *nodes);
extern int k8_numa_init(unsigned long start_pfn, unsigned long end_pfn);
extern int k8_scan_nodes(void);
-#ifdef CONFIG_K8_NB
-extern int num_k8_northbridges;
+struct k8_northbridge_info {
+ u16 num;
+ u8 gart_supported;
+ struct pci_dev **nb_misc;
+};
+extern struct k8_northbridge_info k8_northbridges;
+
+#ifdef CONFIG_AMD_NB
static inline struct pci_dev *node_to_k8_nb_misc(int node)
{
- return (node < num_k8_northbridges) ? k8_northbridges[node] : NULL;
+ return (node < k8_northbridges.num) ? k8_northbridges.nb_misc[node] : NULL;
}
#else
-#define num_k8_northbridges 0
static inline struct pci_dev *node_to_k8_nb_misc(int node)
{
@@ -33,4 +36,4 @@
#endif
-#endif /* _ASM_X86_K8_H */
+#endif /* _ASM_X86_AMD_NB_H */
diff --git a/arch/x86/include/asm/apb_timer.h b/arch/x86/include/asm/apb_timer.h
index a69b1ac..2fefa50 100644
--- a/arch/x86/include/asm/apb_timer.h
+++ b/arch/x86/include/asm/apb_timer.h
@@ -54,7 +54,6 @@
extern unsigned long apbt_quick_calibrate(void);
extern int arch_setup_apbt_irqs(int irq, int trigger, int mask, int cpu);
extern void apbt_setup_secondary_clock(void);
-extern unsigned int boot_cpu_id;
extern struct sfi_timer_table_entry *sfi_get_mtmr(int hint);
extern void sfi_free_mtmr(struct sfi_timer_table_entry *mtmr);
diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
index 545776e..bafd80d 100644
--- a/arch/x86/include/asm/bitops.h
+++ b/arch/x86/include/asm/bitops.h
@@ -309,7 +309,7 @@
static __always_inline int constant_test_bit(unsigned int nr, const volatile unsigned long *addr)
{
return ((1UL << (nr % BITS_PER_LONG)) &
- (((unsigned long *)addr)[nr / BITS_PER_LONG])) != 0;
+ (addr[nr / BITS_PER_LONG])) != 0;
}
static inline int variable_test_bit(int nr, volatile const unsigned long *addr)
diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h
index 306160e..1d9cd27 100644
--- a/arch/x86/include/asm/compat.h
+++ b/arch/x86/include/asm/compat.h
@@ -205,7 +205,7 @@
return (u32)(unsigned long)uptr;
}
-static inline void __user *compat_alloc_user_space(long len)
+static inline void __user *arch_compat_alloc_user_space(long len)
{
struct pt_regs *regs = task_pt_regs(current);
return (void __user *)regs->sp - len;
diff --git a/arch/x86/include/asm/cpu.h b/arch/x86/include/asm/cpu.h
index b185091..4fab24d 100644
--- a/arch/x86/include/asm/cpu.h
+++ b/arch/x86/include/asm/cpu.h
@@ -32,6 +32,5 @@
DECLARE_PER_CPU(int, cpu_state);
-extern unsigned int boot_cpu_id;
#endif /* _ASM_X86_CPU_H */
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index bffeab7..220e2ea 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -172,6 +172,7 @@
#define X86_FEATURE_XSAVEOPT (7*32+ 4) /* Optimized Xsave */
#define X86_FEATURE_PLN (7*32+ 5) /* Intel Power Limit Notification */
#define X86_FEATURE_PTS (7*32+ 6) /* Intel Package Thermal Status */
+#define X86_FEATURE_DTS (7*32+ 7) /* Digital Thermal Sensor */
/* Virtualization flags: Linux defined, word 8 */
#define X86_FEATURE_TPR_SHADOW (8*32+ 0) /* Intel TPR Shadow */
@@ -307,6 +308,7 @@
#endif /* CONFIG_X86_64 */
+#if __GNUC__ >= 4
/*
* Static testing of CPU features. Used the same as boot_cpu_has().
* These are only valid after alternatives have run, but will statically
@@ -315,7 +317,7 @@
*/
static __always_inline __pure bool __static_cpu_has(u16 bit)
{
-#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5)
+#if __GNUC__ > 4 || __GNUC_MINOR__ >= 5
asm goto("1: jmp %l[t_no]\n"
"2:\n"
".section .altinstructions,\"a\"\n"
@@ -356,7 +358,6 @@
#endif
}
-#if __GNUC__ >= 4
#define static_cpu_has(bit) \
( \
__builtin_constant_p(boot_cpu_has(bit)) ? \
diff --git a/arch/x86/include/asm/entry_arch.h b/arch/x86/include/asm/entry_arch.h
index 8e8ec66..b8e96a1 100644
--- a/arch/x86/include/asm/entry_arch.h
+++ b/arch/x86/include/asm/entry_arch.h
@@ -49,8 +49,8 @@
BUILD_INTERRUPT(error_interrupt,ERROR_APIC_VECTOR)
BUILD_INTERRUPT(spurious_interrupt,SPURIOUS_APIC_VECTOR)
-#ifdef CONFIG_PERF_EVENTS
-BUILD_INTERRUPT(perf_pending_interrupt, LOCAL_PENDING_VECTOR)
+#ifdef CONFIG_IRQ_WORK
+BUILD_INTERRUPT(irq_work_interrupt, IRQ_WORK_VECTOR)
#endif
#ifdef CONFIG_X86_THERMAL_VECTOR
diff --git a/arch/x86/include/asm/gart.h b/arch/x86/include/asm/gart.h
index 4ac5b0f..bf357f9 100644
--- a/arch/x86/include/asm/gart.h
+++ b/arch/x86/include/asm/gart.h
@@ -17,6 +17,7 @@
#define GARTEN (1<<0)
#define DISGARTCPU (1<<4)
#define DISGARTIO (1<<5)
+#define DISTLBWALKPRB (1<<6)
/* GART cache control register bits. */
#define INVGART (1<<0)
@@ -27,7 +28,6 @@
#define AMD64_GARTAPERTUREBASE 0x94
#define AMD64_GARTTABLEBASE 0x98
#define AMD64_GARTCACHECTL 0x9c
-#define AMD64_GARTEN (1<<0)
#ifdef CONFIG_GART_IOMMU
extern int gart_iommu_aperture;
@@ -57,6 +57,19 @@
extern int agp_amd64_init(void);
+static inline void gart_set_size_and_enable(struct pci_dev *dev, u32 order)
+{
+ u32 ctl;
+
+ /*
+ * Don't enable translation but enable GART IO and CPU accesses.
+ * Also, set DISTLBWALKPRB since GART tables memory is UC.
+ */
+ ctl = DISTLBWALKPRB | order << 1;
+
+ pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, ctl);
+}
+
static inline void enable_gart_translation(struct pci_dev *dev, u64 addr)
{
u32 tmp, ctl;
diff --git a/arch/x86/include/asm/hardirq.h b/arch/x86/include/asm/hardirq.h
index aeab29a..55e4de6 100644
--- a/arch/x86/include/asm/hardirq.h
+++ b/arch/x86/include/asm/hardirq.h
@@ -14,7 +14,7 @@
#endif
unsigned int x86_platform_ipis; /* arch dependent */
unsigned int apic_perf_irqs;
- unsigned int apic_pending_irqs;
+ unsigned int apic_irq_work_irqs;
#ifdef CONFIG_SMP
unsigned int irq_resched_count;
unsigned int irq_call_count;
diff --git a/arch/x86/include/asm/hpet.h b/arch/x86/include/asm/hpet.h
index 004e6e2..1d5c08a 100644
--- a/arch/x86/include/asm/hpet.h
+++ b/arch/x86/include/asm/hpet.h
@@ -68,7 +68,6 @@
extern u8 hpet_blockid;
extern int hpet_force_user;
extern u8 hpet_msi_disable;
-extern u8 hpet_readback_cmp;
extern int is_hpet_enabled(void);
extern int hpet_enable(void);
extern void hpet_disable(void);
diff --git a/arch/x86/include/asm/hw_breakpoint.h b/arch/x86/include/asm/hw_breakpoint.h
index 528a11e..824ca07 100644
--- a/arch/x86/include/asm/hw_breakpoint.h
+++ b/arch/x86/include/asm/hw_breakpoint.h
@@ -20,7 +20,7 @@
#include <linux/list.h>
/* Available HW breakpoint length encodings */
-#define X86_BREAKPOINT_LEN_X 0x00
+#define X86_BREAKPOINT_LEN_X 0x40
#define X86_BREAKPOINT_LEN_1 0x40
#define X86_BREAKPOINT_LEN_2 0x44
#define X86_BREAKPOINT_LEN_4 0x4c
diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
index 46c0fe0..3a54a1c 100644
--- a/arch/x86/include/asm/hw_irq.h
+++ b/arch/x86/include/asm/hw_irq.h
@@ -29,7 +29,7 @@
extern void apic_timer_interrupt(void);
extern void x86_platform_ipi(void);
extern void error_interrupt(void);
-extern void perf_pending_interrupt(void);
+extern void irq_work_interrupt(void);
extern void spurious_interrupt(void);
extern void thermal_interrupt(void);
diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
index a73a8d5..70f105b 100644
--- a/arch/x86/include/asm/i387.h
+++ b/arch/x86/include/asm/i387.h
@@ -175,7 +175,7 @@
uses any extended registers for addressing, a second REX prefix
will be generated (to the assembler, rex64 followed by semicolon
is a separate instruction), and hence the 64-bitness is lost. */
-#if 0
+#ifdef CONFIG_AS_FXSAVEQ
/* Using "fxsaveq %0" would be the ideal choice, but is only supported
starting with gas 2.16. */
__asm__ __volatile__("fxsaveq %0"
diff --git a/arch/x86/include/asm/iomap.h b/arch/x86/include/asm/iomap.h
index f35eb45..c4191b3 100644
--- a/arch/x86/include/asm/iomap.h
+++ b/arch/x86/include/asm/iomap.h
@@ -26,11 +26,11 @@
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
-void *
+void __iomem *
iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot);
void
-iounmap_atomic(void *kvaddr, enum km_type type);
+iounmap_atomic(void __iomem *kvaddr, enum km_type type);
int
iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot);
diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h
index e2ca300..6af0894 100644
--- a/arch/x86/include/asm/irq_vectors.h
+++ b/arch/x86/include/asm/irq_vectors.h
@@ -114,9 +114,9 @@
#define X86_PLATFORM_IPI_VECTOR 0xed
/*
- * Performance monitoring pending work vector:
+ * IRQ work vector:
*/
-#define LOCAL_PENDING_VECTOR 0xec
+#define IRQ_WORK_VECTOR 0xec
#define UV_BAU_MESSAGE 0xea
diff --git a/arch/x86/include/asm/jump_label.h b/arch/x86/include/asm/jump_label.h
new file mode 100644
index 0000000..f52d42e
--- /dev/null
+++ b/arch/x86/include/asm/jump_label.h
@@ -0,0 +1,37 @@
+#ifndef _ASM_X86_JUMP_LABEL_H
+#define _ASM_X86_JUMP_LABEL_H
+
+#ifdef __KERNEL__
+
+#include <linux/types.h>
+#include <asm/nops.h>
+
+#define JUMP_LABEL_NOP_SIZE 5
+
+# define JUMP_LABEL_INITIAL_NOP ".byte 0xe9 \n\t .long 0\n\t"
+
+# define JUMP_LABEL(key, label) \
+ do { \
+ asm goto("1:" \
+ JUMP_LABEL_INITIAL_NOP \
+ ".pushsection __jump_table, \"a\" \n\t"\
+ _ASM_PTR "1b, %l[" #label "], %c0 \n\t" \
+ ".popsection \n\t" \
+ : : "i" (key) : : label); \
+ } while (0)
+
+#endif /* __KERNEL__ */
+
+#ifdef CONFIG_X86_64
+typedef u64 jump_label_t;
+#else
+typedef u32 jump_label_t;
+#endif
+
+struct jump_entry {
+ jump_label_t code;
+ jump_label_t target;
+ jump_label_t key;
+};
+
+#endif
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
index 51cfd73..1f99ecf 100644
--- a/arch/x86/include/asm/kvm_emulate.h
+++ b/arch/x86/include/asm/kvm_emulate.h
@@ -152,9 +152,14 @@
struct operand {
enum { OP_REG, OP_MEM, OP_IMM, OP_NONE } type;
unsigned int bytes;
- unsigned long orig_val, *ptr;
+ union {
+ unsigned long orig_val;
+ u64 orig_val64;
+ };
+ unsigned long *ptr;
union {
unsigned long val;
+ u64 val64;
char valptr[sizeof(unsigned long) + 2];
};
};
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 502e53f..c52e2eb 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -652,20 +652,6 @@
return (struct kvm_mmu_page *)page_private(page);
}
-static inline u16 kvm_read_fs(void)
-{
- u16 seg;
- asm("mov %%fs, %0" : "=g"(seg));
- return seg;
-}
-
-static inline u16 kvm_read_gs(void)
-{
- u16 seg;
- asm("mov %%gs, %0" : "=g"(seg));
- return seg;
-}
-
static inline u16 kvm_read_ldt(void)
{
u16 ldt;
@@ -673,16 +659,6 @@
return ldt;
}
-static inline void kvm_load_fs(u16 sel)
-{
- asm("mov %0, %%fs" : : "rm"(sel));
-}
-
-static inline void kvm_load_gs(u16 sel)
-{
- asm("mov %0, %%gs" : : "rm"(sel));
-}
-
static inline void kvm_load_ldt(u16 sel)
{
asm("lldt %0" : : "rm"(sel));
diff --git a/arch/x86/include/asm/pci.h b/arch/x86/include/asm/pci.h
index 404a880..d395540 100644
--- a/arch/x86/include/asm/pci.h
+++ b/arch/x86/include/asm/pci.h
@@ -27,6 +27,9 @@
int node);
extern struct pci_bus *pci_scan_bus_with_sysdata(int busno);
+#ifdef CONFIG_PCI
+
+#ifdef CONFIG_PCI_DOMAINS
static inline int pci_domain_nr(struct pci_bus *bus)
{
struct pci_sysdata *sd = bus->sysdata;
@@ -37,13 +40,12 @@
{
return pci_domain_nr(bus);
}
-
+#endif
/* Can be used to override the logic in pci_scan_bus for skipping
already-configured bus numbers - to be used for buggy BIOSes
or architectures with incomplete PCI setup by the loader */
-#ifdef CONFIG_PCI
extern unsigned int pcibios_assign_all_busses(void);
extern int pci_legacy_init(void);
# ifdef CONFIG_ACPI
diff --git a/arch/x86/include/asm/perf_event_p4.h b/arch/x86/include/asm/perf_event_p4.h
index def5007..a70cd21 100644
--- a/arch/x86/include/asm/perf_event_p4.h
+++ b/arch/x86/include/asm/perf_event_p4.h
@@ -36,19 +36,6 @@
#define P4_ESCR_EMASK(v) ((v) << P4_ESCR_EVENTMASK_SHIFT)
#define P4_ESCR_TAG(v) ((v) << P4_ESCR_TAG_SHIFT)
-/* Non HT mask */
-#define P4_ESCR_MASK \
- (P4_ESCR_EVENT_MASK | \
- P4_ESCR_EVENTMASK_MASK | \
- P4_ESCR_TAG_MASK | \
- P4_ESCR_TAG_ENABLE | \
- P4_ESCR_T0_OS | \
- P4_ESCR_T0_USR)
-
-/* HT mask */
-#define P4_ESCR_MASK_HT \
- (P4_ESCR_MASK | P4_ESCR_T1_OS | P4_ESCR_T1_USR)
-
#define P4_CCCR_OVF 0x80000000U
#define P4_CCCR_CASCADE 0x40000000U
#define P4_CCCR_OVF_PMI_T0 0x04000000U
@@ -70,23 +57,6 @@
#define P4_CCCR_THRESHOLD(v) ((v) << P4_CCCR_THRESHOLD_SHIFT)
#define P4_CCCR_ESEL(v) ((v) << P4_CCCR_ESCR_SELECT_SHIFT)
-/* Non HT mask */
-#define P4_CCCR_MASK \
- (P4_CCCR_OVF | \
- P4_CCCR_CASCADE | \
- P4_CCCR_OVF_PMI_T0 | \
- P4_CCCR_FORCE_OVF | \
- P4_CCCR_EDGE | \
- P4_CCCR_THRESHOLD_MASK | \
- P4_CCCR_COMPLEMENT | \
- P4_CCCR_COMPARE | \
- P4_CCCR_ESCR_SELECT_MASK | \
- P4_CCCR_ENABLE)
-
-/* HT mask */
-#define P4_CCCR_MASK_HT \
- (P4_CCCR_MASK | P4_CCCR_OVF_PMI_T1 | P4_CCCR_THREAD_ANY)
-
#define P4_GEN_ESCR_EMASK(class, name, bit) \
class##__##name = ((1 << bit) << P4_ESCR_EVENTMASK_SHIFT)
#define P4_ESCR_EMASK_BIT(class, name) class##__##name
@@ -127,6 +97,28 @@
#define P4_CONFIG_HT_SHIFT 63
#define P4_CONFIG_HT (1ULL << P4_CONFIG_HT_SHIFT)
+/*
+ * The bits we allow to pass for RAW events
+ */
+#define P4_CONFIG_MASK_ESCR \
+ P4_ESCR_EVENT_MASK | \
+ P4_ESCR_EVENTMASK_MASK | \
+ P4_ESCR_TAG_MASK | \
+ P4_ESCR_TAG_ENABLE
+
+#define P4_CONFIG_MASK_CCCR \
+ P4_CCCR_EDGE | \
+ P4_CCCR_THRESHOLD_MASK | \
+ P4_CCCR_COMPLEMENT | \
+ P4_CCCR_COMPARE | \
+ P4_CCCR_THREAD_ANY | \
+ P4_CCCR_RESERVED
+
+/* some dangerous bits are reserved for kernel internals */
+#define P4_CONFIG_MASK \
+ (p4_config_pack_escr(P4_CONFIG_MASK_ESCR)) | \
+ (p4_config_pack_cccr(P4_CONFIG_MASK_CCCR))
+
static inline bool p4_is_event_cascaded(u64 config)
{
u32 cccr = p4_config_unpack_cccr(config);
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 325b7bd..69e80c2 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -110,6 +110,8 @@
u16 phys_proc_id;
/* Core id: */
u16 cpu_core_id;
+ /* Compute unit id */
+ u8 compute_unit_id;
/* Index into per_cpu list: */
u16 cpu_index;
#endif
diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h
index c042729..1ca132f 100644
--- a/arch/x86/include/asm/tsc.h
+++ b/arch/x86/include/asm/tsc.h
@@ -59,5 +59,7 @@
extern void check_tsc_sync_target(void);
extern int notsc_setup(char *);
+extern void save_sched_clock_state(void);
+extern void restore_sched_clock_state(void);
#endif /* _ASM_X86_TSC_H */
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 0925676..f49257e 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -11,6 +11,8 @@
CFLAGS_REMOVE_tsc.o = -pg
CFLAGS_REMOVE_rtc.o = -pg
CFLAGS_REMOVE_paravirt-spinlocks.o = -pg
+CFLAGS_REMOVE_pvclock.o = -pg
+CFLAGS_REMOVE_kvmclock.o = -pg
CFLAGS_REMOVE_ftrace.o = -pg
CFLAGS_REMOVE_early_printk.o = -pg
endif
@@ -32,7 +34,8 @@
obj-y := process_$(BITS).o signal.o entry_$(BITS).o
obj-y += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o
obj-y += time.o ioport.o ldt.o dumpstack.o
-obj-y += setup.o x86_init.o i8259.o irqinit.o
+obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o
+obj-$(CONFIG_IRQ_WORK) += irq_work.o
obj-$(CONFIG_X86_VISWS) += visws_quirks.o
obj-$(CONFIG_X86_32) += probe_roms_32.o
obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o
@@ -87,7 +90,7 @@
obj-$(CONFIG_HPET_TIMER) += hpet.o
obj-$(CONFIG_APB_TIMER) += apb_timer.o
-obj-$(CONFIG_K8_NB) += k8.o
+obj-$(CONFIG_AMD_NB) += amd_nb.o
obj-$(CONFIG_DEBUG_RODATA_TEST) += test_rodata.o
obj-$(CONFIG_DEBUG_NX_TEST) += test_nx.o
@@ -120,7 +123,6 @@
# 64 bit specific files
ifeq ($(CONFIG_X86_64),y)
obj-$(CONFIG_X86_UV) += tlb_uv.o bios_uv.o uv_irq.o uv_sysfs.o uv_time.o
- obj-$(CONFIG_X86_PM_TIMER) += pmtimer_64.o
obj-$(CONFIG_AUDIT) += audit_64.o
obj-$(CONFIG_GART_IOMMU) += pci-gart_64.o aperture_64.o
diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c
index fb7a5f0..fb16f17 100644
--- a/arch/x86/kernel/acpi/cstate.c
+++ b/arch/x86/kernel/acpi/cstate.c
@@ -61,7 +61,7 @@
unsigned int ecx;
} states[ACPI_PROCESSOR_MAX_POWER];
};
-static struct cstate_entry *cpu_cstate_entry; /* per CPU ptr */
+static struct cstate_entry __percpu *cpu_cstate_entry; /* per CPU ptr */
static short mwait_supported[ACPI_PROCESSOR_MAX_POWER];
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index f65ab8b..a36bb90 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -195,7 +195,7 @@
extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
extern s32 __smp_locks[], __smp_locks_end[];
-static void *text_poke_early(void *addr, const void *opcode, size_t len);
+void *text_poke_early(void *addr, const void *opcode, size_t len);
/* Replace instructions with better alternatives for this CPU type.
This runs before SMP is initialized to avoid SMP problems with
@@ -522,7 +522,7 @@
* instructions. And on the local CPU you need to be protected again NMI or MCE
* handlers seeing an inconsistent instruction while you patch.
*/
-static void *__init_or_module text_poke_early(void *addr, const void *opcode,
+void *__init_or_module text_poke_early(void *addr, const void *opcode,
size_t len)
{
unsigned long flags;
@@ -637,7 +637,72 @@
tpp.len = len;
atomic_set(&stop_machine_first, 1);
wrote_text = 0;
- stop_machine(stop_machine_text_poke, (void *)&tpp, NULL);
+ /* Use __stop_machine() because the caller already got online_cpus. */
+ __stop_machine(stop_machine_text_poke, (void *)&tpp, NULL);
return addr;
}
+#if defined(CONFIG_DYNAMIC_FTRACE) || defined(HAVE_JUMP_LABEL)
+
+unsigned char ideal_nop5[IDEAL_NOP_SIZE_5];
+
+void __init arch_init_ideal_nop5(void)
+{
+ extern const unsigned char ftrace_test_p6nop[];
+ extern const unsigned char ftrace_test_nop5[];
+ extern const unsigned char ftrace_test_jmp[];
+ int faulted = 0;
+
+ /*
+ * There is no good nop for all x86 archs.
+ * We will default to using the P6_NOP5, but first we
+ * will test to make sure that the nop will actually
+ * work on this CPU. If it faults, we will then
+ * go to a lesser efficient 5 byte nop. If that fails
+ * we then just use a jmp as our nop. This isn't the most
+ * efficient nop, but we can not use a multi part nop
+ * since we would then risk being preempted in the middle
+ * of that nop, and if we enabled tracing then, it might
+ * cause a system crash.
+ *
+ * TODO: check the cpuid to determine the best nop.
+ */
+ asm volatile (
+ "ftrace_test_jmp:"
+ "jmp ftrace_test_p6nop\n"
+ "nop\n"
+ "nop\n"
+ "nop\n" /* 2 byte jmp + 3 bytes */
+ "ftrace_test_p6nop:"
+ P6_NOP5
+ "jmp 1f\n"
+ "ftrace_test_nop5:"
+ ".byte 0x66,0x66,0x66,0x66,0x90\n"
+ "1:"
+ ".section .fixup, \"ax\"\n"
+ "2: movl $1, %0\n"
+ " jmp ftrace_test_nop5\n"
+ "3: movl $2, %0\n"
+ " jmp 1b\n"
+ ".previous\n"
+ _ASM_EXTABLE(ftrace_test_p6nop, 2b)
+ _ASM_EXTABLE(ftrace_test_nop5, 3b)
+ : "=r"(faulted) : "0" (faulted));
+
+ switch (faulted) {
+ case 0:
+ pr_info("converting mcount calls to 0f 1f 44 00 00\n");
+ memcpy(ideal_nop5, ftrace_test_p6nop, IDEAL_NOP_SIZE_5);
+ break;
+ case 1:
+ pr_info("converting mcount calls to 66 66 66 66 90\n");
+ memcpy(ideal_nop5, ftrace_test_nop5, IDEAL_NOP_SIZE_5);
+ break;
+ case 2:
+ pr_info("converting mcount calls to jmp . + 5\n");
+ memcpy(ideal_nop5, ftrace_test_jmp, IDEAL_NOP_SIZE_5);
+ break;
+ }
+
+}
+#endif
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index fa044e1..d2fdb08 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007-2009 Advanced Micro Devices, Inc.
+ * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
* Author: Joerg Roedel <joerg.roedel@amd.com>
* Leo Duran <leo.duran@amd.com>
*
@@ -1953,6 +1953,7 @@
size_t size,
int dir)
{
+ dma_addr_t flush_addr;
dma_addr_t i, start;
unsigned int pages;
@@ -1960,6 +1961,7 @@
(dma_addr + size > dma_dom->aperture_size))
return;
+ flush_addr = dma_addr;
pages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
dma_addr &= PAGE_MASK;
start = dma_addr;
@@ -1974,7 +1976,7 @@
dma_ops_free_addresses(dma_dom, dma_addr, pages);
if (amd_iommu_unmap_flush || dma_dom->need_flush) {
- iommu_flush_pages(&dma_dom->domain, dma_addr, size);
+ iommu_flush_pages(&dma_dom->domain, flush_addr, size);
dma_dom->need_flush = false;
}
}
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c
index 3cc63e2..3cb482e 100644
--- a/arch/x86/kernel/amd_iommu_init.c
+++ b/arch/x86/kernel/amd_iommu_init.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007-2009 Advanced Micro Devices, Inc.
+ * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
* Author: Joerg Roedel <joerg.roedel@amd.com>
* Leo Duran <leo.duran@amd.com>
*
@@ -194,6 +194,39 @@
return 1UL << shift;
}
+/* Access to l1 and l2 indexed register spaces */
+
+static u32 iommu_read_l1(struct amd_iommu *iommu, u16 l1, u8 address)
+{
+ u32 val;
+
+ pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
+ pci_read_config_dword(iommu->dev, 0xfc, &val);
+ return val;
+}
+
+static void iommu_write_l1(struct amd_iommu *iommu, u16 l1, u8 address, u32 val)
+{
+ pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16 | 1 << 31));
+ pci_write_config_dword(iommu->dev, 0xfc, val);
+ pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
+}
+
+static u32 iommu_read_l2(struct amd_iommu *iommu, u8 address)
+{
+ u32 val;
+
+ pci_write_config_dword(iommu->dev, 0xf0, address);
+ pci_read_config_dword(iommu->dev, 0xf4, &val);
+ return val;
+}
+
+static void iommu_write_l2(struct amd_iommu *iommu, u8 address, u32 val)
+{
+ pci_write_config_dword(iommu->dev, 0xf0, (address | 1 << 8));
+ pci_write_config_dword(iommu->dev, 0xf4, val);
+}
+
/****************************************************************************
*
* AMD IOMMU MMIO register space handling functions
@@ -619,6 +652,7 @@
{
int cap_ptr = iommu->cap_ptr;
u32 range, misc;
+ int i, j;
pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET,
&iommu->cap);
@@ -632,6 +666,30 @@
iommu->last_device = calc_devid(MMIO_GET_BUS(range),
MMIO_GET_LD(range));
iommu->evt_msi_num = MMIO_MSI_NUM(misc);
+
+ if (!is_rd890_iommu(iommu->dev))
+ return;
+
+ /*
+ * Some rd890 systems may not be fully reconfigured by the BIOS, so
+ * it's necessary for us to store this information so it can be
+ * reprogrammed on resume
+ */
+
+ pci_read_config_dword(iommu->dev, iommu->cap_ptr + 4,
+ &iommu->stored_addr_lo);
+ pci_read_config_dword(iommu->dev, iommu->cap_ptr + 8,
+ &iommu->stored_addr_hi);
+
+ /* Low bit locks writes to configuration space */
+ iommu->stored_addr_lo &= ~1;
+
+ for (i = 0; i < 6; i++)
+ for (j = 0; j < 0x12; j++)
+ iommu->stored_l1[i][j] = iommu_read_l1(iommu, i, j);
+
+ for (i = 0; i < 0x83; i++)
+ iommu->stored_l2[i] = iommu_read_l2(iommu, i);
}
/*
@@ -649,29 +707,9 @@
struct ivhd_entry *e;
/*
- * First set the recommended feature enable bits from ACPI
- * into the IOMMU control registers
+ * First save the recommended feature enable bits from ACPI
*/
- h->flags & IVHD_FLAG_HT_TUN_EN_MASK ?
- iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) :
- iommu_feature_disable(iommu, CONTROL_HT_TUN_EN);
-
- h->flags & IVHD_FLAG_PASSPW_EN_MASK ?
- iommu_feature_enable(iommu, CONTROL_PASSPW_EN) :
- iommu_feature_disable(iommu, CONTROL_PASSPW_EN);
-
- h->flags & IVHD_FLAG_RESPASSPW_EN_MASK ?
- iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) :
- iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN);
-
- h->flags & IVHD_FLAG_ISOC_EN_MASK ?
- iommu_feature_enable(iommu, CONTROL_ISOC_EN) :
- iommu_feature_disable(iommu, CONTROL_ISOC_EN);
-
- /*
- * make IOMMU memory accesses cache coherent
- */
- iommu_feature_enable(iommu, CONTROL_COHERENT_EN);
+ iommu->acpi_flags = h->flags;
/*
* Done. Now parse the device entries
@@ -1116,6 +1154,79 @@
}
}
+static void iommu_init_flags(struct amd_iommu *iommu)
+{
+ iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ?
+ iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) :
+ iommu_feature_disable(iommu, CONTROL_HT_TUN_EN);
+
+ iommu->acpi_flags & IVHD_FLAG_PASSPW_EN_MASK ?
+ iommu_feature_enable(iommu, CONTROL_PASSPW_EN) :
+ iommu_feature_disable(iommu, CONTROL_PASSPW_EN);
+
+ iommu->acpi_flags & IVHD_FLAG_RESPASSPW_EN_MASK ?
+ iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) :
+ iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN);
+
+ iommu->acpi_flags & IVHD_FLAG_ISOC_EN_MASK ?
+ iommu_feature_enable(iommu, CONTROL_ISOC_EN) :
+ iommu_feature_disable(iommu, CONTROL_ISOC_EN);
+
+ /*
+ * make IOMMU memory accesses cache coherent
+ */
+ iommu_feature_enable(iommu, CONTROL_COHERENT_EN);
+}
+
+static void iommu_apply_resume_quirks(struct amd_iommu *iommu)
+{
+ int i, j;
+ u32 ioc_feature_control;
+ struct pci_dev *pdev = NULL;
+
+ /* RD890 BIOSes may not have completely reconfigured the iommu */
+ if (!is_rd890_iommu(iommu->dev))
+ return;
+
+ /*
+ * First, we need to ensure that the iommu is enabled. This is
+ * controlled by a register in the northbridge
+ */
+ pdev = pci_get_bus_and_slot(iommu->dev->bus->number, PCI_DEVFN(0, 0));
+
+ if (!pdev)
+ return;
+
+ /* Select Northbridge indirect register 0x75 and enable writing */
+ pci_write_config_dword(pdev, 0x60, 0x75 | (1 << 7));
+ pci_read_config_dword(pdev, 0x64, &ioc_feature_control);
+
+ /* Enable the iommu */
+ if (!(ioc_feature_control & 0x1))
+ pci_write_config_dword(pdev, 0x64, ioc_feature_control | 1);
+
+ pci_dev_put(pdev);
+
+ /* Restore the iommu BAR */
+ pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
+ iommu->stored_addr_lo);
+ pci_write_config_dword(iommu->dev, iommu->cap_ptr + 8,
+ iommu->stored_addr_hi);
+
+ /* Restore the l1 indirect regs for each of the 6 l1s */
+ for (i = 0; i < 6; i++)
+ for (j = 0; j < 0x12; j++)
+ iommu_write_l1(iommu, i, j, iommu->stored_l1[i][j]);
+
+ /* Restore the l2 indirect regs */
+ for (i = 0; i < 0x83; i++)
+ iommu_write_l2(iommu, i, iommu->stored_l2[i]);
+
+ /* Lock PCI setup registers */
+ pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
+ iommu->stored_addr_lo | 1);
+}
+
/*
* This function finally enables all IOMMUs found in the system after
* they have been initialized
@@ -1126,6 +1237,7 @@
for_each_iommu(iommu) {
iommu_disable(iommu);
+ iommu_init_flags(iommu);
iommu_set_device_table(iommu);
iommu_enable_command_buffer(iommu);
iommu_enable_event_buffer(iommu);
@@ -1150,6 +1262,11 @@
static int amd_iommu_resume(struct sys_device *dev)
{
+ struct amd_iommu *iommu;
+
+ for_each_iommu(iommu)
+ iommu_apply_resume_quirks(iommu);
+
/* re-load the hardware */
enable_iommus();
diff --git a/arch/x86/kernel/k8.c b/arch/x86/kernel/amd_nb.c
similarity index 66%
rename from arch/x86/kernel/k8.c
rename to arch/x86/kernel/amd_nb.c
index 0f7bc20..8f6463d 100644
--- a/arch/x86/kernel/k8.c
+++ b/arch/x86/kernel/amd_nb.c
@@ -8,21 +8,19 @@
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/spinlock.h>
-#include <asm/k8.h>
-
-int num_k8_northbridges;
-EXPORT_SYMBOL(num_k8_northbridges);
+#include <asm/amd_nb.h>
static u32 *flush_words;
struct pci_device_id k8_nb_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_MISC) },
{}
};
EXPORT_SYMBOL(k8_nb_ids);
-struct pci_dev **k8_northbridges;
+struct k8_northbridge_info k8_northbridges;
EXPORT_SYMBOL(k8_northbridges);
static struct pci_dev *next_k8_northbridge(struct pci_dev *dev)
@@ -40,36 +38,45 @@
int i;
struct pci_dev *dev;
- if (num_k8_northbridges)
+ if (k8_northbridges.num)
return 0;
dev = NULL;
while ((dev = next_k8_northbridge(dev)) != NULL)
- num_k8_northbridges++;
+ k8_northbridges.num++;
- k8_northbridges = kmalloc((num_k8_northbridges + 1) * sizeof(void *),
- GFP_KERNEL);
- if (!k8_northbridges)
+ /* some CPU families (e.g. family 0x11) do not support GART */
+ if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10 ||
+ boot_cpu_data.x86 == 0x15)
+ k8_northbridges.gart_supported = 1;
+
+ k8_northbridges.nb_misc = kmalloc((k8_northbridges.num + 1) *
+ sizeof(void *), GFP_KERNEL);
+ if (!k8_northbridges.nb_misc)
return -ENOMEM;
- if (!num_k8_northbridges) {
- k8_northbridges[0] = NULL;
+ if (!k8_northbridges.num) {
+ k8_northbridges.nb_misc[0] = NULL;
return 0;
}
- flush_words = kmalloc(num_k8_northbridges * sizeof(u32), GFP_KERNEL);
- if (!flush_words) {
- kfree(k8_northbridges);
- return -ENOMEM;
+ if (k8_northbridges.gart_supported) {
+ flush_words = kmalloc(k8_northbridges.num * sizeof(u32),
+ GFP_KERNEL);
+ if (!flush_words) {
+ kfree(k8_northbridges.nb_misc);
+ return -ENOMEM;
+ }
}
dev = NULL;
i = 0;
while ((dev = next_k8_northbridge(dev)) != NULL) {
- k8_northbridges[i] = dev;
- pci_read_config_dword(dev, 0x9c, &flush_words[i++]);
+ k8_northbridges.nb_misc[i] = dev;
+ if (k8_northbridges.gart_supported)
+ pci_read_config_dword(dev, 0x9c, &flush_words[i++]);
}
- k8_northbridges[i] = NULL;
+ k8_northbridges.nb_misc[i] = NULL;
return 0;
}
EXPORT_SYMBOL_GPL(cache_k8_northbridges);
@@ -93,22 +100,25 @@
unsigned long flags;
static DEFINE_SPINLOCK(gart_lock);
+ if (!k8_northbridges.gart_supported)
+ return;
+
/* Avoid races between AGP and IOMMU. In theory it's not needed
but I'm not sure if the hardware won't lose flush requests
when another is pending. This whole thing is so expensive anyways
that it doesn't matter to serialize more. -AK */
spin_lock_irqsave(&gart_lock, flags);
flushed = 0;
- for (i = 0; i < num_k8_northbridges; i++) {
- pci_write_config_dword(k8_northbridges[i], 0x9c,
+ for (i = 0; i < k8_northbridges.num; i++) {
+ pci_write_config_dword(k8_northbridges.nb_misc[i], 0x9c,
flush_words[i]|1);
flushed++;
}
- for (i = 0; i < num_k8_northbridges; i++) {
+ for (i = 0; i < k8_northbridges.num; i++) {
u32 w;
/* Make sure the hardware actually executed the flush*/
for (;;) {
- pci_read_config_dword(k8_northbridges[i],
+ pci_read_config_dword(k8_northbridges.nb_misc[i],
0x9c, &w);
if (!(w & 1))
break;
diff --git a/arch/x86/kernel/apb_timer.c b/arch/x86/kernel/apb_timer.c
index 8dd7780..08f75fb 100644
--- a/arch/x86/kernel/apb_timer.c
+++ b/arch/x86/kernel/apb_timer.c
@@ -343,7 +343,7 @@
/* Don't register boot CPU clockevent */
cpu = smp_processor_id();
- if (cpu == boot_cpu_id)
+ if (!cpu)
return;
/*
* We need to calculate the scaled math multiplication factor for
diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c
index a2e0caf..377f5db 100644
--- a/arch/x86/kernel/aperture_64.c
+++ b/arch/x86/kernel/aperture_64.c
@@ -27,7 +27,7 @@
#include <asm/gart.h>
#include <asm/pci-direct.h>
#include <asm/dma.h>
-#include <asm/k8.h>
+#include <asm/amd_nb.h>
#include <asm/x86_init.h>
int gart_iommu_aperture;
@@ -307,7 +307,7 @@
continue;
ctl = read_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL);
- aper_enabled = ctl & AMD64_GARTEN;
+ aper_enabled = ctl & GARTEN;
aper_order = (ctl >> 1) & 7;
aper_size = (32 * 1024 * 1024) << aper_order;
aper_base = read_pci_config(bus, slot, 3, AMD64_GARTAPERTUREBASE) & 0x7fff;
@@ -362,7 +362,7 @@
continue;
ctl = read_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL);
- ctl &= ~AMD64_GARTEN;
+ ctl &= ~GARTEN;
write_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL, ctl);
}
}
@@ -505,8 +505,13 @@
/* Fix up the north bridges */
for (i = 0; i < ARRAY_SIZE(bus_dev_ranges); i++) {
- int bus;
- int dev_base, dev_limit;
+ int bus, dev_base, dev_limit;
+
+ /*
+ * Don't enable translation yet but enable GART IO and CPU
+ * accesses and set DISTLBWALKPRB since GART table memory is UC.
+ */
+ u32 ctl = DISTLBWALKPRB | aper_order << 1;
bus = bus_dev_ranges[i].bus;
dev_base = bus_dev_ranges[i].dev_base;
@@ -515,10 +520,7 @@
if (!early_is_k8_nb(read_pci_config(bus, slot, 3, 0x00)))
continue;
- /* Don't enable translation yet. That is done later.
- Assume this BIOS didn't initialise the GART so
- just overwrite all previous bits */
- write_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL, aper_order << 1);
+ write_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL, ctl);
write_pci_config(bus, slot, 3, AMD64_GARTAPERTUREBASE, aper_alloc >> 25);
}
}
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index f1efeba..f1e7894 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -162,7 +162,7 @@
cfg = irq_cfgx;
count = ARRAY_SIZE(irq_cfgx);
- node= cpu_to_node(boot_cpu_id);
+ node = cpu_to_node(0);
for (i = 0; i < count; i++) {
desc = irq_to_desc(i);
@@ -306,14 +306,19 @@
old_cfg = old_desc->chip_data;
- memcpy(cfg, old_cfg, sizeof(struct irq_cfg));
+ cfg->vector = old_cfg->vector;
+ cfg->move_in_progress = old_cfg->move_in_progress;
+ cpumask_copy(cfg->domain, old_cfg->domain);
+ cpumask_copy(cfg->old_domain, old_cfg->old_domain);
init_copy_irq_2_pin(old_cfg, cfg, node);
}
-static void free_irq_cfg(struct irq_cfg *old_cfg)
+static void free_irq_cfg(struct irq_cfg *cfg)
{
- kfree(old_cfg);
+ free_cpumask_var(cfg->domain);
+ free_cpumask_var(cfg->old_domain);
+ kfree(cfg);
}
void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc)
@@ -1483,7 +1488,7 @@
int notcon = 0;
struct irq_desc *desc;
struct irq_cfg *cfg;
- int node = cpu_to_node(boot_cpu_id);
+ int node = cpu_to_node(0);
apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
@@ -1548,7 +1553,7 @@
void setup_IO_APIC_irq_extra(u32 gsi)
{
int apic_id = 0, pin, idx, irq;
- int node = cpu_to_node(boot_cpu_id);
+ int node = cpu_to_node(0);
struct irq_desc *desc;
struct irq_cfg *cfg;
@@ -2927,7 +2932,7 @@
{
struct irq_desc *desc = irq_to_desc(0);
struct irq_cfg *cfg = desc->chip_data;
- int node = cpu_to_node(boot_cpu_id);
+ int node = cpu_to_node(0);
int apic1, pin1, apic2, pin2;
unsigned long flags;
int no_pin1 = 0;
@@ -3281,7 +3286,7 @@
int create_irq(void)
{
- int node = cpu_to_node(boot_cpu_id);
+ int node = cpu_to_node(0);
unsigned int irq_want;
int irq;
@@ -3903,7 +3908,7 @@
if (dev)
node = dev_to_node(dev);
else
- node = cpu_to_node(boot_cpu_id);
+ node = cpu_to_node(0);
desc = irq_to_desc_alloc_node(irq, node);
if (!desc) {
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index 7b598b8..f744f54 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -698,9 +698,11 @@
for (j = 0; j < 64; j++) {
if (!test_bit(j, &present))
continue;
- uv_blade_info[blade].pnode = (i * 64 + j);
+ pnode = (i * 64 + j);
+ uv_blade_info[blade].pnode = pnode;
uv_blade_info[blade].nr_possible_cpus = 0;
uv_blade_info[blade].nr_online_cpus = 0;
+ max_pnode = max(pnode, max_pnode);
blade++;
}
}
@@ -738,7 +740,6 @@
uv_cpu_hub_info(cpu)->scir.offset = uv_scir_offset(apicid);
uv_node_to_blade[nid] = blade;
uv_cpu_to_blade[cpu] = blade;
- max_pnode = max(pnode, max_pnode);
}
/* Add blade/pnode info for nodes without cpus */
@@ -750,7 +751,6 @@
pnode = (paddr >> m_val) & pnode_mask;
blade = boot_pnode_to_blade(pnode);
uv_node_to_blade[nid] = blade;
- max_pnode = max(pnode, max_pnode);
}
map_gru_high(max_pnode);
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 0f0ace5d..9e093f8 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -148,7 +148,7 @@
{
#ifdef CONFIG_SMP
/* calling is from identify_secondary_cpu() ? */
- if (c->cpu_index == boot_cpu_id)
+ if (!c->cpu_index)
return;
/*
@@ -253,37 +253,51 @@
#endif
/*
- * Fixup core topology information for AMD multi-node processors.
- * Assumption: Number of cores in each internal node is the same.
+ * Fixup core topology information for
+ * (1) AMD multi-node processors
+ * Assumption: Number of cores in each internal node is the same.
+ * (2) AMD processors supporting compute units
*/
#ifdef CONFIG_X86_HT
-static void __cpuinit amd_fixup_dcm(struct cpuinfo_x86 *c)
+static void __cpuinit amd_get_topology(struct cpuinfo_x86 *c)
{
- unsigned long long value;
- u32 nodes, cores_per_node;
+ u32 nodes;
+ u8 node_id;
int cpu = smp_processor_id();
- if (!cpu_has(c, X86_FEATURE_NODEID_MSR))
+ /* get information required for multi-node processors */
+ if (cpu_has(c, X86_FEATURE_TOPOEXT)) {
+ u32 eax, ebx, ecx, edx;
+
+ cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
+ nodes = ((ecx >> 8) & 7) + 1;
+ node_id = ecx & 7;
+
+ /* get compute unit information */
+ smp_num_siblings = ((ebx >> 8) & 3) + 1;
+ c->compute_unit_id = ebx & 0xff;
+ } else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) {
+ u64 value;
+
+ rdmsrl(MSR_FAM10H_NODE_ID, value);
+ nodes = ((value >> 3) & 7) + 1;
+ node_id = value & 7;
+ } else
return;
- /* fixup topology information only once for a core */
- if (cpu_has(c, X86_FEATURE_AMD_DCM))
- return;
+ /* fixup multi-node processor information */
+ if (nodes > 1) {
+ u32 cores_per_node;
- rdmsrl(MSR_FAM10H_NODE_ID, value);
+ set_cpu_cap(c, X86_FEATURE_AMD_DCM);
+ cores_per_node = c->x86_max_cores / nodes;
- nodes = ((value >> 3) & 7) + 1;
- if (nodes == 1)
- return;
+ /* store NodeID, use llc_shared_map to store sibling info */
+ per_cpu(cpu_llc_id, cpu) = node_id;
- set_cpu_cap(c, X86_FEATURE_AMD_DCM);
- cores_per_node = c->x86_max_cores / nodes;
-
- /* store NodeID, use llc_shared_map to store sibling info */
- per_cpu(cpu_llc_id, cpu) = value & 7;
-
- /* fixup core id to be in range from 0 to (cores_per_node - 1) */
- c->cpu_core_id = c->cpu_core_id % cores_per_node;
+ /* core id to be in range from 0 to (cores_per_node - 1) */
+ c->cpu_core_id = c->cpu_core_id % cores_per_node;
+ }
}
#endif
@@ -304,9 +318,7 @@
c->phys_proc_id = c->initial_apicid >> bits;
/* use socket ID also for last level cache */
per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
- /* fixup topology information on multi-node processors */
- if ((c->x86 == 0x10) && (c->x86_model == 9))
- amd_fixup_dcm(c);
+ amd_get_topology(c);
#endif
}
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 927e630..92af7b4 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -665,7 +665,7 @@
this_cpu->c_early_init(c);
#ifdef CONFIG_SMP
- c->cpu_index = boot_cpu_id;
+ c->cpu_index = 0;
#endif
filter_cpuid_features(c, false);
}
diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h
index c16456b..e765633 100644
--- a/arch/x86/kernel/cpu/cpu.h
+++ b/arch/x86/kernel/cpu/cpu.h
@@ -34,5 +34,6 @@
extern void get_cpu_cap(struct cpuinfo_x86 *c);
extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c);
+extern void get_cpu_cap(struct cpuinfo_x86 *c);
#endif
diff --git a/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c
index 994230d..4f6f679 100644
--- a/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c
+++ b/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c
@@ -368,16 +368,22 @@
return -ENODEV;
out_obj = output.pointer;
- if (out_obj->type != ACPI_TYPE_BUFFER)
- return -ENODEV;
+ if (out_obj->type != ACPI_TYPE_BUFFER) {
+ ret = -ENODEV;
+ goto out_free;
+ }
errors = *((u32 *)out_obj->buffer.pointer) & ~(1 << 0);
- if (errors)
- return -ENODEV;
+ if (errors) {
+ ret = -ENODEV;
+ goto out_free;
+ }
supported = *((u32 *)(out_obj->buffer.pointer + 4));
- if (!(supported & 0x1))
- return -ENODEV;
+ if (!(supported & 0x1)) {
+ ret = -ENODEV;
+ goto out_free;
+ }
out_free:
kfree(output.pointer);
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index b438944..695f177 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -170,7 +170,7 @@
{
#ifdef CONFIG_SMP
/* calling is from identify_secondary_cpu() ? */
- if (c->cpu_index == boot_cpu_id)
+ if (!c->cpu_index)
return;
/*
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index 898c2f4..12cd823 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -17,7 +17,7 @@
#include <asm/processor.h>
#include <linux/smp.h>
-#include <asm/k8.h>
+#include <asm/amd_nb.h>
#include <asm/smp.h>
#define LVL_1_INST 1
@@ -306,7 +306,7 @@
ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count);
};
-#ifdef CONFIG_CPU_SUP_AMD
+#ifdef CONFIG_AMD_NB
/*
* L3 cache descriptors
@@ -369,7 +369,7 @@
return;
/* not in virtualized environments */
- if (num_k8_northbridges == 0)
+ if (k8_northbridges.num == 0)
return;
/*
@@ -377,7 +377,7 @@
* never freed but this is done only on shutdown so it doesn't matter.
*/
if (!l3_caches) {
- int size = num_k8_northbridges * sizeof(struct amd_l3_cache *);
+ int size = k8_northbridges.num * sizeof(struct amd_l3_cache *);
l3_caches = kzalloc(size, GFP_ATOMIC);
if (!l3_caches)
@@ -556,12 +556,12 @@
static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644,
show_cache_disable_1, store_cache_disable_1);
-#else /* CONFIG_CPU_SUP_AMD */
+#else /* CONFIG_AMD_NB */
static void __cpuinit
amd_check_l3_disable(struct _cpuid4_info_regs *this_leaf, int index)
{
};
-#endif /* CONFIG_CPU_SUP_AMD */
+#endif /* CONFIG_AMD_NB */
static int
__cpuinit cpuid4_cache_lookup_regs(int index,
@@ -1000,7 +1000,7 @@
static struct attribute *default_l3_attrs[] = {
DEFAULT_SYSFS_CACHE_ATTRS,
-#ifdef CONFIG_CPU_SUP_AMD
+#ifdef CONFIG_AMD_NB
&cache_disable_0.attr,
&cache_disable_1.attr,
#endif
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
index 224392d..39aaee5 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
@@ -141,6 +141,7 @@
address = (low & MASK_BLKPTR_LO) >> 21;
if (!address)
break;
+
address += MCG_XBLK_ADDR;
} else
++address;
@@ -148,12 +149,8 @@
if (rdmsr_safe(address, &low, &high))
break;
- if (!(high & MASK_VALID_HI)) {
- if (block)
- continue;
- else
- break;
- }
+ if (!(high & MASK_VALID_HI))
+ continue;
if (!(high & MASK_CNTP_HI) ||
(high & MASK_LOCKED_HI))
@@ -530,7 +527,7 @@
err = -ENOMEM;
goto out;
}
- if (!alloc_cpumask_var(&b->cpus, GFP_KERNEL)) {
+ if (!zalloc_cpumask_var(&b->cpus, GFP_KERNEL)) {
kfree(b);
err = -ENOMEM;
goto out;
@@ -543,7 +540,7 @@
#ifndef CONFIG_SMP
cpumask_setall(b->cpus);
#else
- cpumask_copy(b->cpus, c->llc_shared_map);
+ cpumask_set_cpu(cpu, b->cpus);
#endif
per_cpu(threshold_banks, cpu)[bank] = b;
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
index c2a8b26..169d880 100644
--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
+++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
@@ -202,10 +202,11 @@
#ifdef CONFIG_SYSFS
/* Add/Remove thermal_throttle interface for CPU device: */
-static __cpuinit int thermal_throttle_add_dev(struct sys_device *sys_dev)
+static __cpuinit int thermal_throttle_add_dev(struct sys_device *sys_dev,
+ unsigned int cpu)
{
int err;
- struct cpuinfo_x86 *c = &cpu_data(smp_processor_id());
+ struct cpuinfo_x86 *c = &cpu_data(cpu);
err = sysfs_create_group(&sys_dev->kobj, &thermal_attr_group);
if (err)
@@ -215,7 +216,7 @@
err = sysfs_add_file_to_group(&sys_dev->kobj,
&attr_core_power_limit_count.attr,
thermal_attr_group.name);
- if (cpu_has(c, X86_FEATURE_PTS))
+ if (cpu_has(c, X86_FEATURE_PTS)) {
err = sysfs_add_file_to_group(&sys_dev->kobj,
&attr_package_throttle_count.attr,
thermal_attr_group.name);
@@ -223,6 +224,7 @@
err = sysfs_add_file_to_group(&sys_dev->kobj,
&attr_package_power_limit_count.attr,
thermal_attr_group.name);
+ }
return err;
}
@@ -251,7 +253,7 @@
case CPU_UP_PREPARE:
case CPU_UP_PREPARE_FROZEN:
mutex_lock(&therm_cpu_lock);
- err = thermal_throttle_add_dev(sys_dev);
+ err = thermal_throttle_add_dev(sys_dev, cpu);
mutex_unlock(&therm_cpu_lock);
WARN_ON(err);
break;
@@ -287,7 +289,7 @@
#endif
/* connect live CPUs to sysfs */
for_each_online_cpu(cpu) {
- err = thermal_throttle_add_dev(get_cpu_sysdev(cpu));
+ err = thermal_throttle_add_dev(get_cpu_sysdev(cpu), cpu);
WARN_ON(err);
}
#ifdef CONFIG_HOTPLUG_CPU
diff --git a/arch/x86/kernel/cpu/mtrr/cleanup.c b/arch/x86/kernel/cpu/mtrr/cleanup.c
index c5f59d0..ac140c7 100644
--- a/arch/x86/kernel/cpu/mtrr/cleanup.c
+++ b/arch/x86/kernel/cpu/mtrr/cleanup.c
@@ -827,7 +827,7 @@
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
return 0;
- if (boot_cpu_data.x86 < 0xf || boot_cpu_data.x86 > 0x11)
+ if (boot_cpu_data.x86 < 0xf)
return 0;
/* In case some hypervisor doesn't pass SYSCFG through: */
if (rdmsr_safe(MSR_K8_SYSCFG, &l, &h) < 0)
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index f2da20f..fe73c18 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -102,6 +102,7 @@
*/
struct perf_event *events[X86_PMC_IDX_MAX]; /* in counter order */
unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
+ unsigned long running[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
int enabled;
int n_events;
@@ -530,7 +531,7 @@
/*
* Setup the hardware configuration for a given attr_type
*/
-static int __hw_perf_event_init(struct perf_event *event)
+static int __x86_pmu_event_init(struct perf_event *event)
{
int err;
@@ -583,7 +584,7 @@
}
}
-void hw_perf_disable(void)
+static void x86_pmu_disable(struct pmu *pmu)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
@@ -618,7 +619,7 @@
}
}
-static const struct pmu pmu;
+static struct pmu pmu;
static inline int is_x86_event(struct perf_event *event)
{
@@ -800,10 +801,10 @@
hwc->last_tag == cpuc->tags[i];
}
-static int x86_pmu_start(struct perf_event *event);
-static void x86_pmu_stop(struct perf_event *event);
+static void x86_pmu_start(struct perf_event *event, int flags);
+static void x86_pmu_stop(struct perf_event *event, int flags);
-void hw_perf_enable(void)
+static void x86_pmu_enable(struct pmu *pmu)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
struct perf_event *event;
@@ -839,7 +840,14 @@
match_prev_assignment(hwc, cpuc, i))
continue;
- x86_pmu_stop(event);
+ /*
+ * Ensure we don't accidentally enable a stopped
+ * counter simply because we rescheduled.
+ */
+ if (hwc->state & PERF_HES_STOPPED)
+ hwc->state |= PERF_HES_ARCH;
+
+ x86_pmu_stop(event, PERF_EF_UPDATE);
}
for (i = 0; i < cpuc->n_events; i++) {
@@ -851,7 +859,10 @@
else if (i < n_running)
continue;
- x86_pmu_start(event);
+ if (hwc->state & PERF_HES_ARCH)
+ continue;
+
+ x86_pmu_start(event, PERF_EF_RELOAD);
}
cpuc->n_added = 0;
perf_events_lapic_init();
@@ -952,15 +963,12 @@
}
/*
- * activate a single event
+ * Add a single event to the PMU.
*
* The event is added to the group of enabled events
* but only if it can be scehduled with existing events.
- *
- * Called with PMU disabled. If successful and return value 1,
- * then guaranteed to call perf_enable() and hw_perf_enable()
*/
-static int x86_pmu_enable(struct perf_event *event)
+static int x86_pmu_add(struct perf_event *event, int flags)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
struct hw_perf_event *hwc;
@@ -969,57 +977,67 @@
hwc = &event->hw;
+ perf_pmu_disable(event->pmu);
n0 = cpuc->n_events;
- n = collect_events(cpuc, event, false);
- if (n < 0)
- return n;
+ ret = n = collect_events(cpuc, event, false);
+ if (ret < 0)
+ goto out;
+
+ hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
+ if (!(flags & PERF_EF_START))
+ hwc->state |= PERF_HES_ARCH;
/*
* If group events scheduling transaction was started,
* skip the schedulability test here, it will be peformed
- * at commit time(->commit_txn) as a whole
+ * at commit time (->commit_txn) as a whole
*/
if (cpuc->group_flag & PERF_EVENT_TXN)
- goto out;
+ goto done_collect;
ret = x86_pmu.schedule_events(cpuc, n, assign);
if (ret)
- return ret;
+ goto out;
/*
* copy new assignment, now we know it is possible
* will be used by hw_perf_enable()
*/
memcpy(cpuc->assign, assign, n*sizeof(int));
-out:
+done_collect:
cpuc->n_events = n;
cpuc->n_added += n - n0;
cpuc->n_txn += n - n0;
- return 0;
+ ret = 0;
+out:
+ perf_pmu_enable(event->pmu);
+ return ret;
}
-static int x86_pmu_start(struct perf_event *event)
+static void x86_pmu_start(struct perf_event *event, int flags)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
int idx = event->hw.idx;
- if (idx == -1)
- return -EAGAIN;
+ if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
+ return;
- x86_perf_event_set_period(event);
+ if (WARN_ON_ONCE(idx == -1))
+ return;
+
+ if (flags & PERF_EF_RELOAD) {
+ WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
+ x86_perf_event_set_period(event);
+ }
+
+ event->hw.state = 0;
+
cpuc->events[idx] = event;
__set_bit(idx, cpuc->active_mask);
+ __set_bit(idx, cpuc->running);
x86_pmu.enable(event);
perf_event_update_userpage(event);
-
- return 0;
-}
-
-static void x86_pmu_unthrottle(struct perf_event *event)
-{
- int ret = x86_pmu_start(event);
- WARN_ON_ONCE(ret);
}
void perf_event_print_debug(void)
@@ -1076,27 +1094,29 @@
local_irq_restore(flags);
}
-static void x86_pmu_stop(struct perf_event *event)
+static void x86_pmu_stop(struct perf_event *event, int flags)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
- int idx = hwc->idx;
- if (!__test_and_clear_bit(idx, cpuc->active_mask))
- return;
+ if (__test_and_clear_bit(hwc->idx, cpuc->active_mask)) {
+ x86_pmu.disable(event);
+ cpuc->events[hwc->idx] = NULL;
+ WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
+ hwc->state |= PERF_HES_STOPPED;
+ }
- x86_pmu.disable(event);
-
- /*
- * Drain the remaining delta count out of a event
- * that we are disabling:
- */
- x86_perf_event_update(event);
-
- cpuc->events[idx] = NULL;
+ if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
+ /*
+ * Drain the remaining delta count out of a event
+ * that we are disabling:
+ */
+ x86_perf_event_update(event);
+ hwc->state |= PERF_HES_UPTODATE;
+ }
}
-static void x86_pmu_disable(struct perf_event *event)
+static void x86_pmu_del(struct perf_event *event, int flags)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
int i;
@@ -1109,7 +1129,7 @@
if (cpuc->group_flag & PERF_EVENT_TXN)
return;
- x86_pmu_stop(event);
+ x86_pmu_stop(event, PERF_EF_UPDATE);
for (i = 0; i < cpuc->n_events; i++) {
if (event == cpuc->event_list[i]) {
@@ -1132,7 +1152,6 @@
struct perf_sample_data data;
struct cpu_hw_events *cpuc;
struct perf_event *event;
- struct hw_perf_event *hwc;
int idx, handled = 0;
u64 val;
@@ -1141,11 +1160,18 @@
cpuc = &__get_cpu_var(cpu_hw_events);
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
- if (!test_bit(idx, cpuc->active_mask))
+ if (!test_bit(idx, cpuc->active_mask)) {
+ /*
+ * Though we deactivated the counter some cpus
+ * might still deliver spurious interrupts still
+ * in flight. Catch them:
+ */
+ if (__test_and_clear_bit(idx, cpuc->running))
+ handled++;
continue;
+ }
event = cpuc->events[idx];
- hwc = &event->hw;
val = x86_perf_event_update(event);
if (val & (1ULL << (x86_pmu.cntval_bits - 1)))
@@ -1154,14 +1180,14 @@
/*
* event overflow
*/
- handled = 1;
+ handled++;
data.period = event->hw.last_period;
if (!x86_perf_event_set_period(event))
continue;
if (perf_event_overflow(event, 1, &data, regs))
- x86_pmu_stop(event);
+ x86_pmu_stop(event, 0);
}
if (handled)
@@ -1170,25 +1196,6 @@
return handled;
}
-void smp_perf_pending_interrupt(struct pt_regs *regs)
-{
- irq_enter();
- ack_APIC_irq();
- inc_irq_stat(apic_pending_irqs);
- perf_event_do_pending();
- irq_exit();
-}
-
-void set_perf_event_pending(void)
-{
-#ifdef CONFIG_X86_LOCAL_APIC
- if (!x86_pmu.apic || !x86_pmu_initialized())
- return;
-
- apic->send_IPI_self(LOCAL_PENDING_VECTOR);
-#endif
-}
-
void perf_events_lapic_init(void)
{
if (!x86_pmu.apic || !x86_pmu_initialized())
@@ -1200,12 +1207,20 @@
apic_write(APIC_LVTPC, APIC_DM_NMI);
}
+struct pmu_nmi_state {
+ unsigned int marked;
+ int handled;
+};
+
+static DEFINE_PER_CPU(struct pmu_nmi_state, pmu_nmi);
+
static int __kprobes
perf_event_nmi_handler(struct notifier_block *self,
unsigned long cmd, void *__args)
{
struct die_args *args = __args;
- struct pt_regs *regs;
+ unsigned int this_nmi;
+ int handled;
if (!atomic_read(&active_events))
return NOTIFY_DONE;
@@ -1214,22 +1229,47 @@
case DIE_NMI:
case DIE_NMI_IPI:
break;
-
+ case DIE_NMIUNKNOWN:
+ this_nmi = percpu_read(irq_stat.__nmi_count);
+ if (this_nmi != __get_cpu_var(pmu_nmi).marked)
+ /* let the kernel handle the unknown nmi */
+ return NOTIFY_DONE;
+ /*
+ * This one is a PMU back-to-back nmi. Two events
+ * trigger 'simultaneously' raising two back-to-back
+ * NMIs. If the first NMI handles both, the latter
+ * will be empty and daze the CPU. So, we drop it to
+ * avoid false-positive 'unknown nmi' messages.
+ */
+ return NOTIFY_STOP;
default:
return NOTIFY_DONE;
}
- regs = args->regs;
-
apic_write(APIC_LVTPC, APIC_DM_NMI);
- /*
- * Can't rely on the handled return value to say it was our NMI, two
- * events could trigger 'simultaneously' raising two back-to-back NMIs.
- *
- * If the first NMI handles both, the latter will be empty and daze
- * the CPU.
- */
- x86_pmu.handle_irq(regs);
+
+ handled = x86_pmu.handle_irq(args->regs);
+ if (!handled)
+ return NOTIFY_DONE;
+
+ this_nmi = percpu_read(irq_stat.__nmi_count);
+ if ((handled > 1) ||
+ /* the next nmi could be a back-to-back nmi */
+ ((__get_cpu_var(pmu_nmi).marked == this_nmi) &&
+ (__get_cpu_var(pmu_nmi).handled > 1))) {
+ /*
+ * We could have two subsequent back-to-back nmis: The
+ * first handles more than one counter, the 2nd
+ * handles only one counter and the 3rd handles no
+ * counter.
+ *
+ * This is the 2nd nmi because the previous was
+ * handling more than one counter. We will mark the
+ * next (3rd) and then drop it if unhandled.
+ */
+ __get_cpu_var(pmu_nmi).marked = this_nmi + 1;
+ __get_cpu_var(pmu_nmi).handled = handled;
+ }
return NOTIFY_STOP;
}
@@ -1345,7 +1385,6 @@
x86_pmu.num_counters = X86_PMC_MAX_GENERIC;
}
x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1;
- perf_max_events = x86_pmu.num_counters;
if (x86_pmu.num_counters_fixed > X86_PMC_MAX_FIXED) {
WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
@@ -1381,6 +1420,7 @@
pr_info("... fixed-purpose events: %d\n", x86_pmu.num_counters_fixed);
pr_info("... event mask: %016Lx\n", x86_pmu.intel_ctrl);
+ perf_pmu_register(&pmu);
perf_cpu_notifier(x86_pmu_notifier);
}
@@ -1394,10 +1434,11 @@
* Set the flag to make pmu::enable() not perform the
* schedulability test, it will be performed at commit time
*/
-static void x86_pmu_start_txn(const struct pmu *pmu)
+static void x86_pmu_start_txn(struct pmu *pmu)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ perf_pmu_disable(pmu);
cpuc->group_flag |= PERF_EVENT_TXN;
cpuc->n_txn = 0;
}
@@ -1407,7 +1448,7 @@
* Clear the flag and pmu::enable() will perform the
* schedulability test.
*/
-static void x86_pmu_cancel_txn(const struct pmu *pmu)
+static void x86_pmu_cancel_txn(struct pmu *pmu)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
@@ -1417,6 +1458,7 @@
*/
cpuc->n_added -= cpuc->n_txn;
cpuc->n_events -= cpuc->n_txn;
+ perf_pmu_enable(pmu);
}
/*
@@ -1424,7 +1466,7 @@
* Perform the group schedulability test as a whole
* Return 0 if success
*/
-static int x86_pmu_commit_txn(const struct pmu *pmu)
+static int x86_pmu_commit_txn(struct pmu *pmu)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
int assign[X86_PMC_IDX_MAX];
@@ -1446,22 +1488,10 @@
memcpy(cpuc->assign, assign, n*sizeof(int));
cpuc->group_flag &= ~PERF_EVENT_TXN;
-
+ perf_pmu_enable(pmu);
return 0;
}
-static const struct pmu pmu = {
- .enable = x86_pmu_enable,
- .disable = x86_pmu_disable,
- .start = x86_pmu_start,
- .stop = x86_pmu_stop,
- .read = x86_pmu_read,
- .unthrottle = x86_pmu_unthrottle,
- .start_txn = x86_pmu_start_txn,
- .cancel_txn = x86_pmu_cancel_txn,
- .commit_txn = x86_pmu_commit_txn,
-};
-
/*
* validate that we can schedule this event
*/
@@ -1536,12 +1566,22 @@
return ret;
}
-const struct pmu *hw_perf_event_init(struct perf_event *event)
+int x86_pmu_event_init(struct perf_event *event)
{
- const struct pmu *tmp;
+ struct pmu *tmp;
int err;
- err = __hw_perf_event_init(event);
+ switch (event->attr.type) {
+ case PERF_TYPE_RAW:
+ case PERF_TYPE_HARDWARE:
+ case PERF_TYPE_HW_CACHE:
+ break;
+
+ default:
+ return -ENOENT;
+ }
+
+ err = __x86_pmu_event_init(event);
if (!err) {
/*
* we temporarily connect event to its pmu
@@ -1561,27 +1601,32 @@
if (err) {
if (event->destroy)
event->destroy(event);
- return ERR_PTR(err);
}
- return &pmu;
+ return err;
}
+static struct pmu pmu = {
+ .pmu_enable = x86_pmu_enable,
+ .pmu_disable = x86_pmu_disable,
+
+ .event_init = x86_pmu_event_init,
+
+ .add = x86_pmu_add,
+ .del = x86_pmu_del,
+ .start = x86_pmu_start,
+ .stop = x86_pmu_stop,
+ .read = x86_pmu_read,
+
+ .start_txn = x86_pmu_start_txn,
+ .cancel_txn = x86_pmu_cancel_txn,
+ .commit_txn = x86_pmu_commit_txn,
+};
+
/*
* callchain support
*/
-static inline
-void callchain_store(struct perf_callchain_entry *entry, u64 ip)
-{
- if (entry->nr < PERF_MAX_STACK_DEPTH)
- entry->ip[entry->nr++] = ip;
-}
-
-static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry);
-static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_nmi_entry);
-
-
static void
backtrace_warning_symbol(void *data, char *msg, unsigned long symbol)
{
@@ -1602,7 +1647,7 @@
{
struct perf_callchain_entry *entry = data;
- callchain_store(entry, addr);
+ perf_callchain_store(entry, addr);
}
static const struct stacktrace_ops backtrace_ops = {
@@ -1613,11 +1658,15 @@
.walk_stack = print_context_stack_bp,
};
-static void
-perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry)
+void
+perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
{
- callchain_store(entry, PERF_CONTEXT_KERNEL);
- callchain_store(entry, regs->ip);
+ if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
+ /* TODO: We don't support guest os callchain now */
+ return;
+ }
+
+ perf_callchain_store(entry, regs->ip);
dump_trace(NULL, regs, NULL, regs->bp, &backtrace_ops, entry);
}
@@ -1646,7 +1695,7 @@
if (fp < compat_ptr(regs->sp))
break;
- callchain_store(entry, frame.return_address);
+ perf_callchain_store(entry, frame.return_address);
fp = compat_ptr(frame.next_frame);
}
return 1;
@@ -1659,19 +1708,20 @@
}
#endif
-static void
-perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
+void
+perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
{
struct stack_frame frame;
const void __user *fp;
- if (!user_mode(regs))
- regs = task_pt_regs(current);
+ if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
+ /* TODO: We don't support guest os callchain now */
+ return;
+ }
fp = (void __user *)regs->bp;
- callchain_store(entry, PERF_CONTEXT_USER);
- callchain_store(entry, regs->ip);
+ perf_callchain_store(entry, regs->ip);
if (perf_callchain_user32(regs, entry))
return;
@@ -1688,52 +1738,11 @@
if ((unsigned long)fp < regs->sp)
break;
- callchain_store(entry, frame.return_address);
+ perf_callchain_store(entry, frame.return_address);
fp = frame.next_frame;
}
}
-static void
-perf_do_callchain(struct pt_regs *regs, struct perf_callchain_entry *entry)
-{
- int is_user;
-
- if (!regs)
- return;
-
- is_user = user_mode(regs);
-
- if (is_user && current->state != TASK_RUNNING)
- return;
-
- if (!is_user)
- perf_callchain_kernel(regs, entry);
-
- if (current->mm)
- perf_callchain_user(regs, entry);
-}
-
-struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
-{
- struct perf_callchain_entry *entry;
-
- if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
- /* TODO: We don't support guest os callchain now */
- return NULL;
- }
-
- if (in_nmi())
- entry = &__get_cpu_var(pmc_nmi_entry);
- else
- entry = &__get_cpu_var(pmc_irq_entry);
-
- entry->nr = 0;
-
- perf_do_callchain(regs, entry);
-
- return entry;
-}
-
unsigned long perf_instruction_pointer(struct pt_regs *regs)
{
unsigned long ip;
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c
index c2897b7..46d5844 100644
--- a/arch/x86/kernel/cpu/perf_event_amd.c
+++ b/arch/x86/kernel/cpu/perf_event_amd.c
@@ -52,7 +52,7 @@
[ C(DTLB) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */
- [ C(RESULT_MISS) ] = 0x0046, /* L1 DTLB and L2 DLTB Miss */
+ [ C(RESULT_MISS) ] = 0x0746, /* L1_DTLB_AND_L2_DLTB_MISS.ALL */
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = 0,
@@ -66,7 +66,7 @@
[ C(ITLB) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0x0080, /* Instruction fecthes */
- [ C(RESULT_MISS) ] = 0x0085, /* Instr. fetch ITLB misses */
+ [ C(RESULT_MISS) ] = 0x0385, /* L1_ITLB_AND_L2_ITLB_MISS.ALL */
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = -1,
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index d8d86d0..c8f5c08 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -712,22 +712,24 @@
struct perf_sample_data data;
struct cpu_hw_events *cpuc;
int bit, loops;
- u64 ack, status;
+ u64 status;
+ int handled;
perf_sample_data_init(&data, 0);
cpuc = &__get_cpu_var(cpu_hw_events);
intel_pmu_disable_all();
- intel_pmu_drain_bts_buffer();
+ handled = intel_pmu_drain_bts_buffer();
status = intel_pmu_get_status();
if (!status) {
intel_pmu_enable_all(0);
- return 0;
+ return handled;
}
loops = 0;
again:
+ intel_pmu_ack_status(status);
if (++loops > 100) {
WARN_ONCE(1, "perfevents: irq loop stuck!\n");
perf_event_print_debug();
@@ -736,19 +738,22 @@
}
inc_irq_stat(apic_perf_irqs);
- ack = status;
intel_pmu_lbr_read();
/*
* PEBS overflow sets bit 62 in the global status register
*/
- if (__test_and_clear_bit(62, (unsigned long *)&status))
+ if (__test_and_clear_bit(62, (unsigned long *)&status)) {
+ handled++;
x86_pmu.drain_pebs(regs);
+ }
for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
struct perf_event *event = cpuc->events[bit];
+ handled++;
+
if (!test_bit(bit, cpuc->active_mask))
continue;
@@ -758,11 +763,9 @@
data.period = event->hw.last_period;
if (perf_event_overflow(event, 1, &data, regs))
- x86_pmu_stop(event);
+ x86_pmu_stop(event, 0);
}
- intel_pmu_ack_status(ack);
-
/*
* Repeat if there is more work to be done:
*/
@@ -772,7 +775,7 @@
done:
intel_pmu_enable_all(0);
- return 1;
+ return handled;
}
static struct event_constraint *
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
index 18018d1..4977f9c 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
@@ -214,7 +214,7 @@
update_debugctlmsr(debugctlmsr);
}
-static void intel_pmu_drain_bts_buffer(void)
+static int intel_pmu_drain_bts_buffer(void)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
struct debug_store *ds = cpuc->ds;
@@ -231,16 +231,16 @@
struct pt_regs regs;
if (!event)
- return;
+ return 0;
if (!ds)
- return;
+ return 0;
at = (struct bts_record *)(unsigned long)ds->bts_buffer_base;
top = (struct bts_record *)(unsigned long)ds->bts_index;
if (top <= at)
- return;
+ return 0;
ds->bts_index = ds->bts_buffer_base;
@@ -256,7 +256,7 @@
perf_prepare_sample(&header, &data, event, ®s);
if (perf_output_begin(&handle, event, header.size * (top - at), 1, 1))
- return;
+ return 1;
for (; at < top; at++) {
data.ip = at->from;
@@ -270,6 +270,7 @@
/* There's new data available. */
event->hw.interrupts++;
event->pending_kill = POLL_IN;
+ return 1;
}
/*
@@ -491,7 +492,7 @@
regs.flags &= ~PERF_EFLAGS_EXACT;
if (perf_event_overflow(event, 1, &data, ®s))
- x86_pmu_stop(event);
+ x86_pmu_stop(event, 0);
}
static void intel_pmu_drain_pebs_core(struct pt_regs *iregs)
diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c
index febb12c..81400b9 100644
--- a/arch/x86/kernel/cpu/perf_event_p4.c
+++ b/arch/x86/kernel/cpu/perf_event_p4.c
@@ -18,6 +18,8 @@
struct p4_event_bind {
unsigned int opcode; /* Event code and ESCR selector */
unsigned int escr_msr[2]; /* ESCR MSR for this event */
+ unsigned int escr_emask; /* valid ESCR EventMask bits */
+ unsigned int shared; /* event is shared across threads */
char cntr[2][P4_CNTR_LIMIT]; /* counter index (offset), -1 on abscence */
};
@@ -66,231 +68,435 @@
[P4_EVENT_TC_DELIVER_MODE] = {
.opcode = P4_OPCODE(P4_EVENT_TC_DELIVER_MODE),
.escr_msr = { MSR_P4_TC_ESCR0, MSR_P4_TC_ESCR1 },
+ .escr_emask =
+ P4_ESCR_EMASK_BIT(P4_EVENT_TC_DELIVER_MODE, DD) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_TC_DELIVER_MODE, DB) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_TC_DELIVER_MODE, DI) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_TC_DELIVER_MODE, BD) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_TC_DELIVER_MODE, BB) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_TC_DELIVER_MODE, BI) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_TC_DELIVER_MODE, ID),
+ .shared = 1,
.cntr = { {4, 5, -1}, {6, 7, -1} },
},
[P4_EVENT_BPU_FETCH_REQUEST] = {
.opcode = P4_OPCODE(P4_EVENT_BPU_FETCH_REQUEST),
.escr_msr = { MSR_P4_BPU_ESCR0, MSR_P4_BPU_ESCR1 },
+ .escr_emask =
+ P4_ESCR_EMASK_BIT(P4_EVENT_BPU_FETCH_REQUEST, TCMISS),
.cntr = { {0, -1, -1}, {2, -1, -1} },
},
[P4_EVENT_ITLB_REFERENCE] = {
.opcode = P4_OPCODE(P4_EVENT_ITLB_REFERENCE),
.escr_msr = { MSR_P4_ITLB_ESCR0, MSR_P4_ITLB_ESCR1 },
+ .escr_emask =
+ P4_ESCR_EMASK_BIT(P4_EVENT_ITLB_REFERENCE, HIT) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_ITLB_REFERENCE, MISS) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_ITLB_REFERENCE, HIT_UK),
.cntr = { {0, -1, -1}, {2, -1, -1} },
},
[P4_EVENT_MEMORY_CANCEL] = {
.opcode = P4_OPCODE(P4_EVENT_MEMORY_CANCEL),
.escr_msr = { MSR_P4_DAC_ESCR0, MSR_P4_DAC_ESCR1 },
+ .escr_emask =
+ P4_ESCR_EMASK_BIT(P4_EVENT_MEMORY_CANCEL, ST_RB_FULL) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_MEMORY_CANCEL, 64K_CONF),
.cntr = { {8, 9, -1}, {10, 11, -1} },
},
[P4_EVENT_MEMORY_COMPLETE] = {
.opcode = P4_OPCODE(P4_EVENT_MEMORY_COMPLETE),
.escr_msr = { MSR_P4_SAAT_ESCR0 , MSR_P4_SAAT_ESCR1 },
+ .escr_emask =
+ P4_ESCR_EMASK_BIT(P4_EVENT_MEMORY_COMPLETE, LSC) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_MEMORY_COMPLETE, SSC),
.cntr = { {8, 9, -1}, {10, 11, -1} },
},
[P4_EVENT_LOAD_PORT_REPLAY] = {
.opcode = P4_OPCODE(P4_EVENT_LOAD_PORT_REPLAY),
.escr_msr = { MSR_P4_SAAT_ESCR0, MSR_P4_SAAT_ESCR1 },
+ .escr_emask =
+ P4_ESCR_EMASK_BIT(P4_EVENT_LOAD_PORT_REPLAY, SPLIT_LD),
.cntr = { {8, 9, -1}, {10, 11, -1} },
},
[P4_EVENT_STORE_PORT_REPLAY] = {
.opcode = P4_OPCODE(P4_EVENT_STORE_PORT_REPLAY),
.escr_msr = { MSR_P4_SAAT_ESCR0 , MSR_P4_SAAT_ESCR1 },
+ .escr_emask =
+ P4_ESCR_EMASK_BIT(P4_EVENT_STORE_PORT_REPLAY, SPLIT_ST),
.cntr = { {8, 9, -1}, {10, 11, -1} },
},
[P4_EVENT_MOB_LOAD_REPLAY] = {
.opcode = P4_OPCODE(P4_EVENT_MOB_LOAD_REPLAY),
.escr_msr = { MSR_P4_MOB_ESCR0, MSR_P4_MOB_ESCR1 },
+ .escr_emask =
+ P4_ESCR_EMASK_BIT(P4_EVENT_MOB_LOAD_REPLAY, NO_STA) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_MOB_LOAD_REPLAY, NO_STD) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_MOB_LOAD_REPLAY, PARTIAL_DATA) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_MOB_LOAD_REPLAY, UNALGN_ADDR),
.cntr = { {0, -1, -1}, {2, -1, -1} },
},
[P4_EVENT_PAGE_WALK_TYPE] = {
.opcode = P4_OPCODE(P4_EVENT_PAGE_WALK_TYPE),
.escr_msr = { MSR_P4_PMH_ESCR0, MSR_P4_PMH_ESCR1 },
+ .escr_emask =
+ P4_ESCR_EMASK_BIT(P4_EVENT_PAGE_WALK_TYPE, DTMISS) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_PAGE_WALK_TYPE, ITMISS),
+ .shared = 1,
.cntr = { {0, -1, -1}, {2, -1, -1} },
},
[P4_EVENT_BSQ_CACHE_REFERENCE] = {
.opcode = P4_OPCODE(P4_EVENT_BSQ_CACHE_REFERENCE),
.escr_msr = { MSR_P4_BSU_ESCR0, MSR_P4_BSU_ESCR1 },
+ .escr_emask =
+ P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_HITS) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_HITE) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_HITM) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_HITS) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_HITE) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_HITM) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_MISS) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_MISS) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, WR_2ndL_MISS),
.cntr = { {0, -1, -1}, {2, -1, -1} },
},
[P4_EVENT_IOQ_ALLOCATION] = {
.opcode = P4_OPCODE(P4_EVENT_IOQ_ALLOCATION),
.escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 },
+ .escr_emask =
+ P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, DEFAULT) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, ALL_READ) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, ALL_WRITE) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, MEM_UC) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, MEM_WC) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, MEM_WT) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, MEM_WP) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, MEM_WB) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, OWN) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, OTHER) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, PREFETCH),
.cntr = { {0, -1, -1}, {2, -1, -1} },
},
[P4_EVENT_IOQ_ACTIVE_ENTRIES] = { /* shared ESCR */
.opcode = P4_OPCODE(P4_EVENT_IOQ_ACTIVE_ENTRIES),
.escr_msr = { MSR_P4_FSB_ESCR1, MSR_P4_FSB_ESCR1 },
+ .escr_emask =
+ P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, DEFAULT) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, ALL_READ) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, ALL_WRITE) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, MEM_UC) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, MEM_WC) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, MEM_WT) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, MEM_WP) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, MEM_WB) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, OWN) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, OTHER) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, PREFETCH),
.cntr = { {2, -1, -1}, {3, -1, -1} },
},
[P4_EVENT_FSB_DATA_ACTIVITY] = {
.opcode = P4_OPCODE(P4_EVENT_FSB_DATA_ACTIVITY),
.escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 },
+ .escr_emask =
+ P4_ESCR_EMASK_BIT(P4_EVENT_FSB_DATA_ACTIVITY, DRDY_DRV) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_FSB_DATA_ACTIVITY, DRDY_OWN) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_FSB_DATA_ACTIVITY, DRDY_OTHER) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_FSB_DATA_ACTIVITY, DBSY_DRV) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_FSB_DATA_ACTIVITY, DBSY_OWN) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_FSB_DATA_ACTIVITY, DBSY_OTHER),
+ .shared = 1,
.cntr = { {0, -1, -1}, {2, -1, -1} },
},
[P4_EVENT_BSQ_ALLOCATION] = { /* shared ESCR, broken CCCR1 */
.opcode = P4_OPCODE(P4_EVENT_BSQ_ALLOCATION),
.escr_msr = { MSR_P4_BSU_ESCR0, MSR_P4_BSU_ESCR0 },
+ .escr_emask =
+ P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, REQ_TYPE0) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, REQ_TYPE1) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, REQ_LEN0) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, REQ_LEN1) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, REQ_IO_TYPE) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, REQ_LOCK_TYPE) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, REQ_CACHE_TYPE) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, REQ_SPLIT_TYPE) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, REQ_DEM_TYPE) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, REQ_ORD_TYPE) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, MEM_TYPE0) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, MEM_TYPE1) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, MEM_TYPE2),
.cntr = { {0, -1, -1}, {1, -1, -1} },
},
[P4_EVENT_BSQ_ACTIVE_ENTRIES] = { /* shared ESCR */
.opcode = P4_OPCODE(P4_EVENT_BSQ_ACTIVE_ENTRIES),
.escr_msr = { MSR_P4_BSU_ESCR1 , MSR_P4_BSU_ESCR1 },
+ .escr_emask =
+ P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_TYPE0) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_TYPE1) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_LEN0) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_LEN1) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_IO_TYPE) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_LOCK_TYPE) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_CACHE_TYPE) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_SPLIT_TYPE) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_DEM_TYPE) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_ORD_TYPE) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, MEM_TYPE0) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, MEM_TYPE1) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, MEM_TYPE2),
.cntr = { {2, -1, -1}, {3, -1, -1} },
},
[P4_EVENT_SSE_INPUT_ASSIST] = {
.opcode = P4_OPCODE(P4_EVENT_SSE_INPUT_ASSIST),
.escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 },
+ .escr_emask =
+ P4_ESCR_EMASK_BIT(P4_EVENT_SSE_INPUT_ASSIST, ALL),
+ .shared = 1,
.cntr = { {8, 9, -1}, {10, 11, -1} },
},
[P4_EVENT_PACKED_SP_UOP] = {
.opcode = P4_OPCODE(P4_EVENT_PACKED_SP_UOP),
.escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 },
+ .escr_emask =
+ P4_ESCR_EMASK_BIT(P4_EVENT_PACKED_SP_UOP, ALL),
+ .shared = 1,
.cntr = { {8, 9, -1}, {10, 11, -1} },
},
[P4_EVENT_PACKED_DP_UOP] = {
.opcode = P4_OPCODE(P4_EVENT_PACKED_DP_UOP),
.escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 },
+ .escr_emask =
+ P4_ESCR_EMASK_BIT(P4_EVENT_PACKED_DP_UOP, ALL),
+ .shared = 1,
.cntr = { {8, 9, -1}, {10, 11, -1} },
},
[P4_EVENT_SCALAR_SP_UOP] = {
.opcode = P4_OPCODE(P4_EVENT_SCALAR_SP_UOP),
.escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 },
+ .escr_emask =
+ P4_ESCR_EMASK_BIT(P4_EVENT_SCALAR_SP_UOP, ALL),
+ .shared = 1,
.cntr = { {8, 9, -1}, {10, 11, -1} },
},
[P4_EVENT_SCALAR_DP_UOP] = {
.opcode = P4_OPCODE(P4_EVENT_SCALAR_DP_UOP),
.escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 },
+ .escr_emask =
+ P4_ESCR_EMASK_BIT(P4_EVENT_SCALAR_DP_UOP, ALL),
+ .shared = 1,
.cntr = { {8, 9, -1}, {10, 11, -1} },
},
[P4_EVENT_64BIT_MMX_UOP] = {
.opcode = P4_OPCODE(P4_EVENT_64BIT_MMX_UOP),
.escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 },
+ .escr_emask =
+ P4_ESCR_EMASK_BIT(P4_EVENT_64BIT_MMX_UOP, ALL),
+ .shared = 1,
.cntr = { {8, 9, -1}, {10, 11, -1} },
},
[P4_EVENT_128BIT_MMX_UOP] = {
.opcode = P4_OPCODE(P4_EVENT_128BIT_MMX_UOP),
.escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 },
+ .escr_emask =
+ P4_ESCR_EMASK_BIT(P4_EVENT_128BIT_MMX_UOP, ALL),
+ .shared = 1,
.cntr = { {8, 9, -1}, {10, 11, -1} },
},
[P4_EVENT_X87_FP_UOP] = {
.opcode = P4_OPCODE(P4_EVENT_X87_FP_UOP),
.escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 },
+ .escr_emask =
+ P4_ESCR_EMASK_BIT(P4_EVENT_X87_FP_UOP, ALL),
+ .shared = 1,
.cntr = { {8, 9, -1}, {10, 11, -1} },
},
[P4_EVENT_TC_MISC] = {
.opcode = P4_OPCODE(P4_EVENT_TC_MISC),
.escr_msr = { MSR_P4_TC_ESCR0, MSR_P4_TC_ESCR1 },
+ .escr_emask =
+ P4_ESCR_EMASK_BIT(P4_EVENT_TC_MISC, FLUSH),
.cntr = { {4, 5, -1}, {6, 7, -1} },
},
[P4_EVENT_GLOBAL_POWER_EVENTS] = {
.opcode = P4_OPCODE(P4_EVENT_GLOBAL_POWER_EVENTS),
.escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 },
+ .escr_emask =
+ P4_ESCR_EMASK_BIT(P4_EVENT_GLOBAL_POWER_EVENTS, RUNNING),
.cntr = { {0, -1, -1}, {2, -1, -1} },
},
[P4_EVENT_TC_MS_XFER] = {
.opcode = P4_OPCODE(P4_EVENT_TC_MS_XFER),
.escr_msr = { MSR_P4_MS_ESCR0, MSR_P4_MS_ESCR1 },
+ .escr_emask =
+ P4_ESCR_EMASK_BIT(P4_EVENT_TC_MS_XFER, CISC),
.cntr = { {4, 5, -1}, {6, 7, -1} },
},
[P4_EVENT_UOP_QUEUE_WRITES] = {
.opcode = P4_OPCODE(P4_EVENT_UOP_QUEUE_WRITES),
.escr_msr = { MSR_P4_MS_ESCR0, MSR_P4_MS_ESCR1 },
+ .escr_emask =
+ P4_ESCR_EMASK_BIT(P4_EVENT_UOP_QUEUE_WRITES, FROM_TC_BUILD) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_UOP_QUEUE_WRITES, FROM_TC_DELIVER) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_UOP_QUEUE_WRITES, FROM_ROM),
.cntr = { {4, 5, -1}, {6, 7, -1} },
},
[P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE] = {
.opcode = P4_OPCODE(P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE),
.escr_msr = { MSR_P4_TBPU_ESCR0 , MSR_P4_TBPU_ESCR0 },
+ .escr_emask =
+ P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE, CONDITIONAL) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE, CALL) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE, RETURN) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE, INDIRECT),
.cntr = { {4, 5, -1}, {6, 7, -1} },
},
[P4_EVENT_RETIRED_BRANCH_TYPE] = {
.opcode = P4_OPCODE(P4_EVENT_RETIRED_BRANCH_TYPE),
.escr_msr = { MSR_P4_TBPU_ESCR0 , MSR_P4_TBPU_ESCR1 },
+ .escr_emask =
+ P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_BRANCH_TYPE, CONDITIONAL) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_BRANCH_TYPE, CALL) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_BRANCH_TYPE, RETURN) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_BRANCH_TYPE, INDIRECT),
.cntr = { {4, 5, -1}, {6, 7, -1} },
},
[P4_EVENT_RESOURCE_STALL] = {
.opcode = P4_OPCODE(P4_EVENT_RESOURCE_STALL),
.escr_msr = { MSR_P4_ALF_ESCR0, MSR_P4_ALF_ESCR1 },
+ .escr_emask =
+ P4_ESCR_EMASK_BIT(P4_EVENT_RESOURCE_STALL, SBFULL),
.cntr = { {12, 13, 16}, {14, 15, 17} },
},
[P4_EVENT_WC_BUFFER] = {
.opcode = P4_OPCODE(P4_EVENT_WC_BUFFER),
.escr_msr = { MSR_P4_DAC_ESCR0, MSR_P4_DAC_ESCR1 },
+ .escr_emask =
+ P4_ESCR_EMASK_BIT(P4_EVENT_WC_BUFFER, WCB_EVICTS) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_WC_BUFFER, WCB_FULL_EVICTS),
+ .shared = 1,
.cntr = { {8, 9, -1}, {10, 11, -1} },
},
[P4_EVENT_B2B_CYCLES] = {
.opcode = P4_OPCODE(P4_EVENT_B2B_CYCLES),
.escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 },
+ .escr_emask = 0,
.cntr = { {0, -1, -1}, {2, -1, -1} },
},
[P4_EVENT_BNR] = {
.opcode = P4_OPCODE(P4_EVENT_BNR),
.escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 },
+ .escr_emask = 0,
.cntr = { {0, -1, -1}, {2, -1, -1} },
},
[P4_EVENT_SNOOP] = {
.opcode = P4_OPCODE(P4_EVENT_SNOOP),
.escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 },
+ .escr_emask = 0,
.cntr = { {0, -1, -1}, {2, -1, -1} },
},
[P4_EVENT_RESPONSE] = {
.opcode = P4_OPCODE(P4_EVENT_RESPONSE),
.escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 },
+ .escr_emask = 0,
.cntr = { {0, -1, -1}, {2, -1, -1} },
},
[P4_EVENT_FRONT_END_EVENT] = {
.opcode = P4_OPCODE(P4_EVENT_FRONT_END_EVENT),
.escr_msr = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 },
+ .escr_emask =
+ P4_ESCR_EMASK_BIT(P4_EVENT_FRONT_END_EVENT, NBOGUS) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_FRONT_END_EVENT, BOGUS),
.cntr = { {12, 13, 16}, {14, 15, 17} },
},
[P4_EVENT_EXECUTION_EVENT] = {
.opcode = P4_OPCODE(P4_EVENT_EXECUTION_EVENT),
.escr_msr = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 },
+ .escr_emask =
+ P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, NBOGUS0) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, NBOGUS1) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, NBOGUS2) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, NBOGUS3) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, BOGUS0) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, BOGUS1) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, BOGUS2) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, BOGUS3),
.cntr = { {12, 13, 16}, {14, 15, 17} },
},
[P4_EVENT_REPLAY_EVENT] = {
.opcode = P4_OPCODE(P4_EVENT_REPLAY_EVENT),
.escr_msr = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 },
+ .escr_emask =
+ P4_ESCR_EMASK_BIT(P4_EVENT_REPLAY_EVENT, NBOGUS) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_REPLAY_EVENT, BOGUS),
.cntr = { {12, 13, 16}, {14, 15, 17} },
},
[P4_EVENT_INSTR_RETIRED] = {
.opcode = P4_OPCODE(P4_EVENT_INSTR_RETIRED),
.escr_msr = { MSR_P4_CRU_ESCR0, MSR_P4_CRU_ESCR1 },
+ .escr_emask =
+ P4_ESCR_EMASK_BIT(P4_EVENT_INSTR_RETIRED, NBOGUSNTAG) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_INSTR_RETIRED, NBOGUSTAG) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_INSTR_RETIRED, BOGUSNTAG) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_INSTR_RETIRED, BOGUSTAG),
.cntr = { {12, 13, 16}, {14, 15, 17} },
},
[P4_EVENT_UOPS_RETIRED] = {
.opcode = P4_OPCODE(P4_EVENT_UOPS_RETIRED),
.escr_msr = { MSR_P4_CRU_ESCR0, MSR_P4_CRU_ESCR1 },
+ .escr_emask =
+ P4_ESCR_EMASK_BIT(P4_EVENT_UOPS_RETIRED, NBOGUS) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_UOPS_RETIRED, BOGUS),
.cntr = { {12, 13, 16}, {14, 15, 17} },
},
[P4_EVENT_UOP_TYPE] = {
.opcode = P4_OPCODE(P4_EVENT_UOP_TYPE),
.escr_msr = { MSR_P4_RAT_ESCR0, MSR_P4_RAT_ESCR1 },
+ .escr_emask =
+ P4_ESCR_EMASK_BIT(P4_EVENT_UOP_TYPE, TAGLOADS) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_UOP_TYPE, TAGSTORES),
.cntr = { {12, 13, 16}, {14, 15, 17} },
},
[P4_EVENT_BRANCH_RETIRED] = {
.opcode = P4_OPCODE(P4_EVENT_BRANCH_RETIRED),
.escr_msr = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 },
+ .escr_emask =
+ P4_ESCR_EMASK_BIT(P4_EVENT_BRANCH_RETIRED, MMNP) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_BRANCH_RETIRED, MMNM) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_BRANCH_RETIRED, MMTP) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_BRANCH_RETIRED, MMTM),
.cntr = { {12, 13, 16}, {14, 15, 17} },
},
[P4_EVENT_MISPRED_BRANCH_RETIRED] = {
.opcode = P4_OPCODE(P4_EVENT_MISPRED_BRANCH_RETIRED),
.escr_msr = { MSR_P4_CRU_ESCR0, MSR_P4_CRU_ESCR1 },
+ .escr_emask =
+ P4_ESCR_EMASK_BIT(P4_EVENT_MISPRED_BRANCH_RETIRED, NBOGUS),
.cntr = { {12, 13, 16}, {14, 15, 17} },
},
[P4_EVENT_X87_ASSIST] = {
.opcode = P4_OPCODE(P4_EVENT_X87_ASSIST),
.escr_msr = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 },
+ .escr_emask =
+ P4_ESCR_EMASK_BIT(P4_EVENT_X87_ASSIST, FPSU) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_X87_ASSIST, FPSO) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_X87_ASSIST, POAO) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_X87_ASSIST, POAU) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_X87_ASSIST, PREA),
.cntr = { {12, 13, 16}, {14, 15, 17} },
},
[P4_EVENT_MACHINE_CLEAR] = {
.opcode = P4_OPCODE(P4_EVENT_MACHINE_CLEAR),
.escr_msr = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 },
+ .escr_emask =
+ P4_ESCR_EMASK_BIT(P4_EVENT_MACHINE_CLEAR, CLEAR) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_MACHINE_CLEAR, MOCLEAR) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_MACHINE_CLEAR, SMCLEAR),
.cntr = { {12, 13, 16}, {14, 15, 17} },
},
[P4_EVENT_INSTR_COMPLETED] = {
.opcode = P4_OPCODE(P4_EVENT_INSTR_COMPLETED),
.escr_msr = { MSR_P4_CRU_ESCR0, MSR_P4_CRU_ESCR1 },
+ .escr_emask =
+ P4_ESCR_EMASK_BIT(P4_EVENT_INSTR_COMPLETED, NBOGUS) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_INSTR_COMPLETED, BOGUS),
.cntr = { {12, 13, 16}, {14, 15, 17} },
},
};
@@ -428,29 +634,73 @@
return config;
}
-static int p4_validate_raw_event(struct perf_event *event)
+/* check cpu model specifics */
+static bool p4_event_match_cpu_model(unsigned int event_idx)
{
- unsigned int v;
-
- /* user data may have out-of-bound event index */
- v = p4_config_unpack_event(event->attr.config);
- if (v >= ARRAY_SIZE(p4_event_bind_map)) {
- pr_warning("P4 PMU: Unknown event code: %d\n", v);
- return -EINVAL;
+ /* INSTR_COMPLETED event only exist for model 3, 4, 6 (Prescott) */
+ if (event_idx == P4_EVENT_INSTR_COMPLETED) {
+ if (boot_cpu_data.x86_model != 3 &&
+ boot_cpu_data.x86_model != 4 &&
+ boot_cpu_data.x86_model != 6)
+ return false;
}
/*
- * it may have some screwed PEBS bits
+ * For info
+ * - IQ_ESCR0, IQ_ESCR1 only for models 1 and 2
*/
- if (p4_config_pebs_has(event->attr.config, P4_PEBS_CONFIG_ENABLE)) {
- pr_warning("P4 PMU: PEBS are not supported yet\n");
+
+ return true;
+}
+
+static int p4_validate_raw_event(struct perf_event *event)
+{
+ unsigned int v, emask;
+
+ /* User data may have out-of-bound event index */
+ v = p4_config_unpack_event(event->attr.config);
+ if (v >= ARRAY_SIZE(p4_event_bind_map))
return -EINVAL;
+
+ /* It may be unsupported: */
+ if (!p4_event_match_cpu_model(v))
+ return -EINVAL;
+
+ /*
+ * NOTE: P4_CCCR_THREAD_ANY has not the same meaning as
+ * in Architectural Performance Monitoring, it means not
+ * on _which_ logical cpu to count but rather _when_, ie it
+ * depends on logical cpu state -- count event if one cpu active,
+ * none, both or any, so we just allow user to pass any value
+ * desired.
+ *
+ * In turn we always set Tx_OS/Tx_USR bits bound to logical
+ * cpu without their propagation to another cpu
+ */
+
+ /*
+ * if an event is shared accross the logical threads
+ * the user needs special permissions to be able to use it
+ */
+ if (p4_event_bind_map[v].shared) {
+ if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
+ return -EACCES;
}
+
+ /* ESCR EventMask bits may be invalid */
+ emask = p4_config_unpack_escr(event->attr.config) & P4_ESCR_EVENTMASK_MASK;
+ if (emask & ~p4_event_bind_map[v].escr_emask)
+ return -EINVAL;
+
+ /*
+ * it may have some invalid PEBS bits
+ */
+ if (p4_config_pebs_has(event->attr.config, P4_PEBS_CONFIG_ENABLE))
+ return -EINVAL;
+
v = p4_config_unpack_metric(event->attr.config);
- if (v >= ARRAY_SIZE(p4_pebs_bind_map)) {
- pr_warning("P4 PMU: Unknown metric code: %d\n", v);
+ if (v >= ARRAY_SIZE(p4_pebs_bind_map))
return -EINVAL;
- }
return 0;
}
@@ -478,25 +728,21 @@
if (event->attr.type == PERF_TYPE_RAW) {
+ /*
+ * Clear bits we reserve to be managed by kernel itself
+ * and never allowed from a user space
+ */
+ event->attr.config &= P4_CONFIG_MASK;
+
rc = p4_validate_raw_event(event);
if (rc)
goto out;
/*
- * We don't control raw events so it's up to the caller
- * to pass sane values (and we don't count the thread number
- * on HT machine but allow HT-compatible specifics to be
- * passed on)
- *
* Note that for RAW events we allow user to use P4_CCCR_RESERVED
* bits since we keep additional info here (for cache events and etc)
- *
- * XXX: HT wide things should check perf_paranoid_cpu() &&
- * CAP_SYS_ADMIN
*/
- event->hw.config |= event->attr.config &
- (p4_config_pack_escr(P4_ESCR_MASK_HT) |
- p4_config_pack_cccr(P4_CCCR_MASK_HT | P4_CCCR_RESERVED));
+ event->hw.config |= event->attr.config;
}
rc = x86_setup_perfctr(event);
@@ -658,8 +904,12 @@
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
int overflow;
- if (!test_bit(idx, cpuc->active_mask))
+ if (!test_bit(idx, cpuc->active_mask)) {
+ /* catch in-flight IRQs */
+ if (__test_and_clear_bit(idx, cpuc->running))
+ handled++;
continue;
+ }
event = cpuc->events[idx];
hwc = &event->hw;
@@ -690,7 +940,7 @@
inc_irq_stat(apic_perf_irqs);
}
- return handled > 0;
+ return handled;
}
/*
diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c
index fb329e9..d9f4ff8 100644
--- a/arch/x86/kernel/cpu/perfctr-watchdog.c
+++ b/arch/x86/kernel/cpu/perfctr-watchdog.c
@@ -700,11 +700,10 @@
{
switch (boot_cpu_data.x86_vendor) {
case X86_VENDOR_AMD:
- if (boot_cpu_data.x86 != 6 && boot_cpu_data.x86 != 15 &&
- boot_cpu_data.x86 != 16 && boot_cpu_data.x86 != 17)
- return;
- wd_ops = &k7_wd_ops;
- break;
+ if (boot_cpu_data.x86 == 6 ||
+ (boot_cpu_data.x86 >= 0xf && boot_cpu_data.x86 <= 0x15))
+ wd_ops = &k7_wd_ops;
+ return;
case X86_VENDOR_INTEL:
/* Work around where perfctr1 doesn't have a working enable
* bit as described in the following errata:
diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c
index 2c77931..c7f64e6 100644
--- a/arch/x86/kernel/cpu/scattered.c
+++ b/arch/x86/kernel/cpu/scattered.c
@@ -31,6 +31,7 @@
const struct cpuid_bit *cb;
static const struct cpuid_bit __cpuinitconst cpuid_bits[] = {
+ { X86_FEATURE_DTS, CR_EAX, 0, 0x00000006, 0 },
{ X86_FEATURE_IDA, CR_EAX, 1, 0x00000006, 0 },
{ X86_FEATURE_ARAT, CR_EAX, 2, 0x00000006, 0 },
{ X86_FEATURE_PLN, CR_EAX, 4, 0x00000006, 0 },
diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c
index e5cc7e8..76b8cd9 100644
--- a/arch/x86/kernel/early-quirks.c
+++ b/arch/x86/kernel/early-quirks.c
@@ -18,7 +18,6 @@
#include <asm/apic.h>
#include <asm/iommu.h>
#include <asm/gart.h>
-#include <asm/hpet.h>
static void __init fix_hypertransport_config(int num, int slot, int func)
{
@@ -98,7 +97,6 @@
}
#if defined(CONFIG_ACPI) && defined(CONFIG_X86_IO_APIC)
-#if defined(CONFIG_ACPI) && defined(CONFIG_X86_IO_APIC)
static u32 __init ati_ixp4x0_rev(int num, int slot, int func)
{
u32 d;
@@ -116,7 +114,6 @@
d &= 0xff;
return d;
}
-#endif
static void __init ati_bugs(int num, int slot, int func)
{
@@ -192,21 +189,6 @@
}
#endif
-/*
- * Force the read back of the CMP register in hpet_next_event()
- * to work around the problem that the CMP register write seems to be
- * delayed. See hpet_next_event() for details.
- *
- * We do this on all SMBUS incarnations for now until we have more
- * information about the affected chipsets.
- */
-static void __init ati_hpet_bugs(int num, int slot, int func)
-{
-#ifdef CONFIG_HPET_TIMER
- hpet_readback_cmp = 1;
-#endif
-}
-
#define QFLAG_APPLY_ONCE 0x1
#define QFLAG_APPLIED 0x2
#define QFLAG_DONE (QFLAG_APPLY_ONCE|QFLAG_APPLIED)
@@ -236,8 +218,6 @@
PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_bugs },
{ PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS,
PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_bugs_contd },
- { PCI_VENDOR_ID_ATI, PCI_ANY_ID,
- PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_hpet_bugs },
{}
};
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index 17be5ec..c375c79 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -1023,9 +1023,9 @@
apicinterrupt SPURIOUS_APIC_VECTOR \
spurious_interrupt smp_spurious_interrupt
-#ifdef CONFIG_PERF_EVENTS
-apicinterrupt LOCAL_PENDING_VECTOR \
- perf_pending_interrupt smp_perf_pending_interrupt
+#ifdef CONFIG_IRQ_WORK
+apicinterrupt IRQ_WORK_VECTOR \
+ irq_work_interrupt smp_irq_work_interrupt
#endif
/*
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index cd37469..3afb33f 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -257,14 +257,9 @@
return mod_code_status;
}
-
-
-
-static unsigned char ftrace_nop[MCOUNT_INSN_SIZE];
-
static unsigned char *ftrace_nop_replace(void)
{
- return ftrace_nop;
+ return ideal_nop5;
}
static int
@@ -338,62 +333,6 @@
int __init ftrace_dyn_arch_init(void *data)
{
- extern const unsigned char ftrace_test_p6nop[];
- extern const unsigned char ftrace_test_nop5[];
- extern const unsigned char ftrace_test_jmp[];
- int faulted = 0;
-
- /*
- * There is no good nop for all x86 archs.
- * We will default to using the P6_NOP5, but first we
- * will test to make sure that the nop will actually
- * work on this CPU. If it faults, we will then
- * go to a lesser efficient 5 byte nop. If that fails
- * we then just use a jmp as our nop. This isn't the most
- * efficient nop, but we can not use a multi part nop
- * since we would then risk being preempted in the middle
- * of that nop, and if we enabled tracing then, it might
- * cause a system crash.
- *
- * TODO: check the cpuid to determine the best nop.
- */
- asm volatile (
- "ftrace_test_jmp:"
- "jmp ftrace_test_p6nop\n"
- "nop\n"
- "nop\n"
- "nop\n" /* 2 byte jmp + 3 bytes */
- "ftrace_test_p6nop:"
- P6_NOP5
- "jmp 1f\n"
- "ftrace_test_nop5:"
- ".byte 0x66,0x66,0x66,0x66,0x90\n"
- "1:"
- ".section .fixup, \"ax\"\n"
- "2: movl $1, %0\n"
- " jmp ftrace_test_nop5\n"
- "3: movl $2, %0\n"
- " jmp 1b\n"
- ".previous\n"
- _ASM_EXTABLE(ftrace_test_p6nop, 2b)
- _ASM_EXTABLE(ftrace_test_nop5, 3b)
- : "=r"(faulted) : "0" (faulted));
-
- switch (faulted) {
- case 0:
- pr_info("converting mcount calls to 0f 1f 44 00 00\n");
- memcpy(ftrace_nop, ftrace_test_p6nop, MCOUNT_INSN_SIZE);
- break;
- case 1:
- pr_info("converting mcount calls to 66 66 66 66 90\n");
- memcpy(ftrace_nop, ftrace_test_nop5, MCOUNT_INSN_SIZE);
- break;
- case 2:
- pr_info("converting mcount calls to jmp . + 5\n");
- memcpy(ftrace_nop, ftrace_test_jmp, MCOUNT_INSN_SIZE);
- break;
- }
-
/* The return code is retured via data */
*(unsigned long *)data = 0;
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index 351f9c0..7494999 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -35,7 +35,6 @@
unsigned long hpet_address;
u8 hpet_blockid; /* OS timer block num */
u8 hpet_msi_disable;
-u8 hpet_readback_cmp;
#ifdef CONFIG_PCI_MSI
static unsigned long hpet_num_timers;
@@ -395,23 +394,27 @@
* at that point and we would wait for the next hpet interrupt
* forever. We found out that reading the CMP register back
* forces the transfer so we can rely on the comparison with
- * the counter register below.
+ * the counter register below. If the read back from the
+ * compare register does not match the value we programmed
+ * then we might have a real hardware problem. We can not do
+ * much about it here, but at least alert the user/admin with
+ * a prominent warning.
*
- * That works fine on those ATI chipsets, but on newer Intel
- * chipsets (ICH9...) this triggers due to an erratum: Reading
- * the comparator immediately following a write is returning
- * the old value.
+ * An erratum on some chipsets (ICH9,..), results in
+ * comparator read immediately following a write returning old
+ * value. Workaround for this is to read this value second
+ * time, when first read returns old value.
*
- * We restrict the read back to the affected ATI chipsets (set
- * by quirks) and also run it with hpet=verbose for debugging
- * purposes.
+ * In fact the write to the comparator register is delayed up
+ * to two HPET cycles so the workaround we tried to restrict
+ * the readback to those known to be borked ATI chipsets
+ * failed miserably. So we give up on optimizations forever
+ * and penalize all HPET incarnations unconditionally.
*/
- if (hpet_readback_cmp || hpet_verbose) {
- u32 cmp = hpet_readl(HPET_Tn_CMP(timer));
-
- if (cmp != cnt)
+ if (unlikely((u32)hpet_readl(HPET_Tn_CMP(timer)) != cnt)) {
+ if (hpet_readl(HPET_Tn_CMP(timer)) != cnt)
printk_once(KERN_WARNING
- "hpet: compare register read back failed.\n");
+ "hpet: compare register read back failed.\n");
}
return (s32)(hpet_readl(HPET_COUNTER) - cnt) >= 0 ? -ETIME : 0;
@@ -503,7 +506,7 @@
{
unsigned int irq;
- irq = create_irq();
+ irq = create_irq_nr(0, -1);
if (!irq)
return -EINVAL;
diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c
index a474ec3..ff15c9d 100644
--- a/arch/x86/kernel/hw_breakpoint.c
+++ b/arch/x86/kernel/hw_breakpoint.c
@@ -206,11 +206,27 @@
int arch_bp_generic_fields(int x86_len, int x86_type,
int *gen_len, int *gen_type)
{
+ /* Type */
+ switch (x86_type) {
+ case X86_BREAKPOINT_EXECUTE:
+ if (x86_len != X86_BREAKPOINT_LEN_X)
+ return -EINVAL;
+
+ *gen_type = HW_BREAKPOINT_X;
+ *gen_len = sizeof(long);
+ return 0;
+ case X86_BREAKPOINT_WRITE:
+ *gen_type = HW_BREAKPOINT_W;
+ break;
+ case X86_BREAKPOINT_RW:
+ *gen_type = HW_BREAKPOINT_W | HW_BREAKPOINT_R;
+ break;
+ default:
+ return -EINVAL;
+ }
+
/* Len */
switch (x86_len) {
- case X86_BREAKPOINT_LEN_X:
- *gen_len = sizeof(long);
- break;
case X86_BREAKPOINT_LEN_1:
*gen_len = HW_BREAKPOINT_LEN_1;
break;
@@ -229,21 +245,6 @@
return -EINVAL;
}
- /* Type */
- switch (x86_type) {
- case X86_BREAKPOINT_EXECUTE:
- *gen_type = HW_BREAKPOINT_X;
- break;
- case X86_BREAKPOINT_WRITE:
- *gen_type = HW_BREAKPOINT_W;
- break;
- case X86_BREAKPOINT_RW:
- *gen_type = HW_BREAKPOINT_W | HW_BREAKPOINT_R;
- break;
- default:
- return -EINVAL;
- }
-
return 0;
}
@@ -316,9 +317,6 @@
ret = -EINVAL;
switch (info->len) {
- case X86_BREAKPOINT_LEN_X:
- align = sizeof(long) -1;
- break;
case X86_BREAKPOINT_LEN_1:
align = 0;
break;
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index 91fd0c7..44edb03 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -67,10 +67,10 @@
for_each_online_cpu(j)
seq_printf(p, "%10u ", irq_stats(j)->apic_perf_irqs);
seq_printf(p, " Performance monitoring interrupts\n");
- seq_printf(p, "%*s: ", prec, "PND");
+ seq_printf(p, "%*s: ", prec, "IWI");
for_each_online_cpu(j)
- seq_printf(p, "%10u ", irq_stats(j)->apic_pending_irqs);
- seq_printf(p, " Performance pending work\n");
+ seq_printf(p, "%10u ", irq_stats(j)->apic_irq_work_irqs);
+ seq_printf(p, " IRQ work interrupts\n");
#endif
if (x86_platform_ipi_callback) {
seq_printf(p, "%*s: ", prec, "PLT");
@@ -185,7 +185,7 @@
sum += irq_stats(cpu)->apic_timer_irqs;
sum += irq_stats(cpu)->irq_spurious_count;
sum += irq_stats(cpu)->apic_perf_irqs;
- sum += irq_stats(cpu)->apic_pending_irqs;
+ sum += irq_stats(cpu)->apic_irq_work_irqs;
#endif
if (x86_platform_ipi_callback)
sum += irq_stats(cpu)->x86_platform_ipis;
diff --git a/arch/x86/kernel/irq_work.c b/arch/x86/kernel/irq_work.c
new file mode 100644
index 0000000..ca8f703
--- /dev/null
+++ b/arch/x86/kernel/irq_work.c
@@ -0,0 +1,30 @@
+/*
+ * x86 specific code for irq_work
+ *
+ * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/irq_work.h>
+#include <linux/hardirq.h>
+#include <asm/apic.h>
+
+void smp_irq_work_interrupt(struct pt_regs *regs)
+{
+ irq_enter();
+ ack_APIC_irq();
+ inc_irq_stat(apic_irq_work_irqs);
+ irq_work_run();
+ irq_exit();
+}
+
+void arch_irq_work_raise(void)
+{
+#ifdef CONFIG_X86_LOCAL_APIC
+ if (!cpu_has_apic)
+ return;
+
+ apic->send_IPI_self(IRQ_WORK_VECTOR);
+ apic_wait_icr_idle();
+#endif
+}
diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c
index 990ae7c..713969b 100644
--- a/arch/x86/kernel/irqinit.c
+++ b/arch/x86/kernel/irqinit.c
@@ -224,9 +224,9 @@
alloc_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt);
alloc_intr_gate(ERROR_APIC_VECTOR, error_interrupt);
- /* Performance monitoring interrupts: */
-# ifdef CONFIG_PERF_EVENTS
- alloc_intr_gate(LOCAL_PENDING_VECTOR, perf_pending_interrupt);
+ /* IRQ work interrupts: */
+# ifdef CONFIG_IRQ_WORK
+ alloc_intr_gate(IRQ_WORK_VECTOR, irq_work_interrupt);
# endif
#endif
diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c
new file mode 100644
index 0000000..961b6b3
--- /dev/null
+++ b/arch/x86/kernel/jump_label.c
@@ -0,0 +1,50 @@
+/*
+ * jump label x86 support
+ *
+ * Copyright (C) 2009 Jason Baron <jbaron@redhat.com>
+ *
+ */
+#include <linux/jump_label.h>
+#include <linux/memory.h>
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/jhash.h>
+#include <linux/cpu.h>
+#include <asm/kprobes.h>
+#include <asm/alternative.h>
+
+#ifdef HAVE_JUMP_LABEL
+
+union jump_code_union {
+ char code[JUMP_LABEL_NOP_SIZE];
+ struct {
+ char jump;
+ int offset;
+ } __attribute__((packed));
+};
+
+void arch_jump_label_transform(struct jump_entry *entry,
+ enum jump_label_type type)
+{
+ union jump_code_union code;
+
+ if (type == JUMP_LABEL_ENABLE) {
+ code.jump = 0xe9;
+ code.offset = entry->target -
+ (entry->code + JUMP_LABEL_NOP_SIZE);
+ } else
+ memcpy(&code, ideal_nop5, JUMP_LABEL_NOP_SIZE);
+ get_online_cpus();
+ mutex_lock(&text_mutex);
+ text_poke_smp((void *)entry->code, &code, JUMP_LABEL_NOP_SIZE);
+ mutex_unlock(&text_mutex);
+ put_online_cpus();
+}
+
+void arch_jump_label_text_poke_early(jump_label_t addr)
+{
+ text_poke_early((void *)addr, ideal_nop5, JUMP_LABEL_NOP_SIZE);
+}
+
+#endif
diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
index 770ebfb..1cbd54c 100644
--- a/arch/x86/kernel/kprobes.c
+++ b/arch/x86/kernel/kprobes.c
@@ -230,9 +230,6 @@
return 0;
}
-/* Dummy buffers for kallsyms_lookup */
-static char __dummy_buf[KSYM_NAME_LEN];
-
/* Check if paddr is at an instruction boundary */
static int __kprobes can_probe(unsigned long paddr)
{
@@ -241,7 +238,7 @@
struct insn insn;
kprobe_opcode_t buf[MAX_INSN_SIZE];
- if (!kallsyms_lookup(paddr, NULL, &offset, NULL, __dummy_buf))
+ if (!kallsyms_lookup_size_offset(paddr, NULL, &offset))
return 0;
/* Decode instructions */
@@ -1129,7 +1126,7 @@
*(unsigned long *)addr = val;
}
-void __kprobes kprobes_optinsn_template_holder(void)
+static void __used __kprobes kprobes_optinsn_template_holder(void)
{
asm volatile (
".global optprobe_template_entry\n"
@@ -1221,7 +1218,8 @@
}
/* Check whether the address range is reserved */
if (ftrace_text_reserved(src, src + len - 1) ||
- alternatives_text_reserved(src, src + len - 1))
+ alternatives_text_reserved(src, src + len - 1) ||
+ jump_label_text_reserved(src, src + len - 1))
return -EBUSY;
return len;
@@ -1269,11 +1267,9 @@
unsigned long addr, size = 0, offset = 0;
struct insn insn;
kprobe_opcode_t buf[MAX_INSN_SIZE];
- /* Dummy buffers for lookup_symbol_attrs */
- static char __dummy_buf[KSYM_NAME_LEN];
/* Lookup symbol including addr */
- if (!kallsyms_lookup(paddr, &size, &offset, NULL, __dummy_buf))
+ if (!kallsyms_lookup_size_offset(paddr, &size, &offset))
return 0;
/* Check there is enough space for a relative jump. */
diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c
index 035c8c5..b3ea9db 100644
--- a/arch/x86/kernel/machine_kexec_64.c
+++ b/arch/x86/kernel/machine_kexec_64.c
@@ -36,7 +36,7 @@
if (!page)
goto out;
pud = (pud_t *)page_address(page);
- memset(pud, 0, PAGE_SIZE);
+ clear_page(pud);
set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE));
}
pud = pud_offset(pgd, addr);
@@ -45,7 +45,7 @@
if (!page)
goto out;
pmd = (pmd_t *)page_address(page);
- memset(pmd, 0, PAGE_SIZE);
+ clear_page(pmd);
set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
}
pmd = pmd_offset(pud, addr);
diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
index e0bc186..8f29560 100644
--- a/arch/x86/kernel/module.c
+++ b/arch/x86/kernel/module.c
@@ -239,11 +239,13 @@
apply_paravirt(pseg, pseg + para->sh_size);
}
- return module_bug_finalize(hdr, sechdrs, me);
+ /* make jump label nops */
+ jump_label_apply_nops(me);
+
+ return 0;
}
void module_arch_cleanup(struct module *mod)
{
alternatives_smp_module_del(mod);
- module_bug_cleanup(mod);
}
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
index 0f7f130..c562207 100644
--- a/arch/x86/kernel/pci-gart_64.c
+++ b/arch/x86/kernel/pci-gart_64.c
@@ -39,7 +39,7 @@
#include <asm/cacheflush.h>
#include <asm/swiotlb.h>
#include <asm/dma.h>
-#include <asm/k8.h>
+#include <asm/amd_nb.h>
#include <asm/x86_init.h>
static unsigned long iommu_bus_base; /* GART remapping area (physical) */
@@ -560,8 +560,11 @@
{
int i;
- for (i = 0; i < num_k8_northbridges; i++) {
- struct pci_dev *dev = k8_northbridges[i];
+ if (!k8_northbridges.gart_supported)
+ return;
+
+ for (i = 0; i < k8_northbridges.num; i++) {
+ struct pci_dev *dev = k8_northbridges.nb_misc[i];
enable_gart_translation(dev, __pa(agp_gatt_table));
}
@@ -592,16 +595,19 @@
if (!fix_up_north_bridges)
return;
+ if (!k8_northbridges.gart_supported)
+ return;
+
pr_info("PCI-DMA: Restoring GART aperture settings\n");
- for (i = 0; i < num_k8_northbridges; i++) {
- struct pci_dev *dev = k8_northbridges[i];
+ for (i = 0; i < k8_northbridges.num; i++) {
+ struct pci_dev *dev = k8_northbridges.nb_misc[i];
/*
* Don't enable translations just yet. That is the next
* step. Restore the pre-suspend aperture settings.
*/
- pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, aperture_order << 1);
+ gart_set_size_and_enable(dev, aperture_order);
pci_write_config_dword(dev, AMD64_GARTAPERTUREBASE, aperture_alloc >> 25);
}
}
@@ -649,8 +655,8 @@
aper_size = aper_base = info->aper_size = 0;
dev = NULL;
- for (i = 0; i < num_k8_northbridges; i++) {
- dev = k8_northbridges[i];
+ for (i = 0; i < k8_northbridges.num; i++) {
+ dev = k8_northbridges.nb_misc[i];
new_aper_base = read_aperture(dev, &new_aper_size);
if (!new_aper_base)
goto nommu;
@@ -718,10 +724,13 @@
if (!no_agp)
return;
- for (i = 0; i < num_k8_northbridges; i++) {
+ if (!k8_northbridges.gart_supported)
+ return;
+
+ for (i = 0; i < k8_northbridges.num; i++) {
u32 ctl;
- dev = k8_northbridges[i];
+ dev = k8_northbridges.nb_misc[i];
pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &ctl);
ctl &= ~GARTEN;
@@ -739,7 +748,7 @@
unsigned long scratch;
long i;
- if (num_k8_northbridges == 0)
+ if (!k8_northbridges.gart_supported)
return 0;
#ifndef CONFIG_AGP_AMD64
diff --git a/arch/x86/kernel/pmtimer_64.c b/arch/x86/kernel/pmtimer_64.c
deleted file mode 100644
index b112406..0000000
--- a/arch/x86/kernel/pmtimer_64.c
+++ /dev/null
@@ -1,69 +0,0 @@
-/* Ported over from i386 by AK, original copyright was:
- *
- * (C) Dominik Brodowski <linux@brodo.de> 2003
- *
- * Driver to use the Power Management Timer (PMTMR) available in some
- * southbridges as primary timing source for the Linux kernel.
- *
- * Based on parts of linux/drivers/acpi/hardware/hwtimer.c, timer_pit.c,
- * timer_hpet.c, and on Arjan van de Ven's implementation for 2.4.
- *
- * This file is licensed under the GPL v2.
- *
- * Dropped all the hardware bug workarounds for now. Hopefully they
- * are not needed on 64bit chipsets.
- */
-
-#include <linux/jiffies.h>
-#include <linux/kernel.h>
-#include <linux/time.h>
-#include <linux/init.h>
-#include <linux/cpumask.h>
-#include <linux/acpi_pmtmr.h>
-
-#include <asm/io.h>
-#include <asm/proto.h>
-#include <asm/msr.h>
-#include <asm/vsyscall.h>
-
-static inline u32 cyc2us(u32 cycles)
-{
- /* The Power Management Timer ticks at 3.579545 ticks per microsecond.
- * 1 / PM_TIMER_FREQUENCY == 0.27936511 =~ 286/1024 [error: 0.024%]
- *
- * Even with HZ = 100, delta is at maximum 35796 ticks, so it can
- * easily be multiplied with 286 (=0x11E) without having to fear
- * u32 overflows.
- */
- cycles *= 286;
- return (cycles >> 10);
-}
-
-static unsigned pmtimer_wait_tick(void)
-{
- u32 a, b;
- for (a = b = inl(pmtmr_ioport) & ACPI_PM_MASK;
- a == b;
- b = inl(pmtmr_ioport) & ACPI_PM_MASK)
- cpu_relax();
- return b;
-}
-
-/* note: wait time is rounded up to one tick */
-void pmtimer_wait(unsigned us)
-{
- u32 a, b;
- a = pmtimer_wait_tick();
- do {
- b = inl(pmtmr_ioport);
- cpu_relax();
- } while (cyc2us(b - a) < us);
-}
-
-static int __init nopmtimer_setup(char *s)
-{
- pmtmr_ioport = 0;
- return 1;
-}
-
-__setup("nopmtimer", nopmtimer_setup);
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index e3af342..7a4cf14 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -84,7 +84,7 @@
}
/* we will leave sorting out the final value
when we are ready to reboot, since we might not
- have set up boot_cpu_id or smp_num_cpu */
+ have detected BSP APIC ID or smp_num_cpu */
break;
#endif /* CONFIG_SMP */
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index c3a4fbb..9e5cdaf 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -107,11 +107,12 @@
#include <asm/percpu.h>
#include <asm/topology.h>
#include <asm/apicdef.h>
-#include <asm/k8.h>
+#include <asm/amd_nb.h>
#ifdef CONFIG_X86_64
#include <asm/numa_64.h>
#endif
#include <asm/mce.h>
+#include <asm/alternative.h>
/*
* end_pfn only includes RAM, while max_pfn_mapped includes all e820 entries.
@@ -125,7 +126,6 @@
RESERVE_BRK(dmi_alloc, 65536);
#endif
-unsigned int boot_cpu_id __read_mostly;
static __initdata unsigned long _brk_start = (unsigned long)__brk_base;
unsigned long _brk_end = (unsigned long)__brk_base;
@@ -618,79 +618,7 @@
reserve_early_overlap_ok(addr, addr + size, "ibft");
}
-#ifdef CONFIG_X86_RESERVE_LOW_64K
-static int __init dmi_low_memory_corruption(const struct dmi_system_id *d)
-{
- printk(KERN_NOTICE
- "%s detected: BIOS may corrupt low RAM, working around it.\n",
- d->ident);
-
- e820_update_range(0, 0x10000, E820_RAM, E820_RESERVED);
- sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
-
- return 0;
-}
-#endif
-
-/* List of systems that have known low memory corruption BIOS problems */
-static struct dmi_system_id __initdata bad_bios_dmi_table[] = {
-#ifdef CONFIG_X86_RESERVE_LOW_64K
- {
- .callback = dmi_low_memory_corruption,
- .ident = "AMI BIOS",
- .matches = {
- DMI_MATCH(DMI_BIOS_VENDOR, "American Megatrends Inc."),
- },
- },
- {
- .callback = dmi_low_memory_corruption,
- .ident = "Phoenix BIOS",
- .matches = {
- DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies"),
- },
- },
- {
- .callback = dmi_low_memory_corruption,
- .ident = "Phoenix/MSC BIOS",
- .matches = {
- DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix/MSC"),
- },
- },
- /*
- * AMI BIOS with low memory corruption was found on Intel DG45ID and
- * DG45FC boards.
- * It has a different DMI_BIOS_VENDOR = "Intel Corp.", for now we will
- * match only DMI_BOARD_NAME and see if there is more bad products
- * with this vendor.
- */
- {
- .callback = dmi_low_memory_corruption,
- .ident = "AMI BIOS",
- .matches = {
- DMI_MATCH(DMI_BOARD_NAME, "DG45ID"),
- },
- },
- {
- .callback = dmi_low_memory_corruption,
- .ident = "AMI BIOS",
- .matches = {
- DMI_MATCH(DMI_BOARD_NAME, "DG45FC"),
- },
- },
- /*
- * The Dell Inspiron Mini 1012 has DMI_BIOS_VENDOR = "Dell Inc.", so
- * match on the product name.
- */
- {
- .callback = dmi_low_memory_corruption,
- .ident = "Phoenix BIOS",
- .matches = {
- DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1012"),
- },
- },
-#endif
- {}
-};
+static unsigned reserve_low = CONFIG_X86_RESERVE_LOW << 10;
static void __init trim_bios_range(void)
{
@@ -698,8 +626,14 @@
* A special case is the first 4Kb of memory;
* This is a BIOS owned area, not kernel ram, but generally
* not listed as such in the E820 table.
+ *
+ * This typically reserves additional memory (64KiB by default)
+ * since some BIOSes are known to corrupt low memory. See the
+ * Kconfig help text for X86_RESERVE_LOW.
*/
- e820_update_range(0, PAGE_SIZE, E820_RAM, E820_RESERVED);
+ e820_update_range(0, ALIGN(reserve_low, PAGE_SIZE),
+ E820_RAM, E820_RESERVED);
+
/*
* special case: Some BIOSen report the PC BIOS
* area (640->1Mb) as ram even though it is not.
@@ -709,6 +643,28 @@
sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
}
+static int __init parse_reservelow(char *p)
+{
+ unsigned long long size;
+
+ if (!p)
+ return -EINVAL;
+
+ size = memparse(p, &p);
+
+ if (size < 4096)
+ size = 4096;
+
+ if (size > 640*1024)
+ size = 640*1024;
+
+ reserve_low = size;
+
+ return 0;
+}
+
+early_param("reservelow", parse_reservelow);
+
/*
* Determine if we were loaded by an EFI loader. If so, then we have also been
* passed the efi memmap, systab, etc., so we should use these data structures
@@ -726,6 +682,7 @@
{
int acpi = 0;
int k8 = 0;
+ unsigned long flags;
#ifdef CONFIG_X86_32
memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data));
@@ -863,8 +820,6 @@
dmi_scan_machine();
- dmi_check_system(bad_bios_dmi_table);
-
/*
* VMware detection requires dmi to be available, so this
* needs to be done after dmi_scan_machine, for the BP.
@@ -1071,6 +1026,10 @@
x86_init.oem.banner();
mcheck_init();
+
+ local_irq_save(flags);
+ arch_init_ideal_nop5();
+ local_irq_restore(flags);
}
#ifdef CONFIG_X86_32
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index a60df9a..2335c15 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -253,7 +253,7 @@
* Up to this point, the boot CPU has been using .init.data
* area. Reload any changed state for the boot CPU.
*/
- if (cpu == boot_cpu_id)
+ if (!cpu)
switch_to_new_gdt(cpu);
}
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 8b3bfc4..bc2cc44 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -397,6 +397,19 @@
identify_secondary_cpu(c);
}
+static void __cpuinit link_thread_siblings(int cpu1, int cpu2)
+{
+ struct cpuinfo_x86 *c1 = &cpu_data(cpu1);
+ struct cpuinfo_x86 *c2 = &cpu_data(cpu2);
+
+ cpumask_set_cpu(cpu1, cpu_sibling_mask(cpu2));
+ cpumask_set_cpu(cpu2, cpu_sibling_mask(cpu1));
+ cpumask_set_cpu(cpu1, cpu_core_mask(cpu2));
+ cpumask_set_cpu(cpu2, cpu_core_mask(cpu1));
+ cpumask_set_cpu(cpu1, c2->llc_shared_map);
+ cpumask_set_cpu(cpu2, c1->llc_shared_map);
+}
+
void __cpuinit set_cpu_sibling_map(int cpu)
{
@@ -409,14 +422,13 @@
for_each_cpu(i, cpu_sibling_setup_mask) {
struct cpuinfo_x86 *o = &cpu_data(i);
- if (c->phys_proc_id == o->phys_proc_id &&
- c->cpu_core_id == o->cpu_core_id) {
- cpumask_set_cpu(i, cpu_sibling_mask(cpu));
- cpumask_set_cpu(cpu, cpu_sibling_mask(i));
- cpumask_set_cpu(i, cpu_core_mask(cpu));
- cpumask_set_cpu(cpu, cpu_core_mask(i));
- cpumask_set_cpu(i, c->llc_shared_map);
- cpumask_set_cpu(cpu, o->llc_shared_map);
+ if (cpu_has(c, X86_FEATURE_TOPOEXT)) {
+ if (c->phys_proc_id == o->phys_proc_id &&
+ c->compute_unit_id == o->compute_unit_id)
+ link_thread_siblings(cpu, i);
+ } else if (c->phys_proc_id == o->phys_proc_id &&
+ c->cpu_core_id == o->cpu_core_id) {
+ link_thread_siblings(cpu, i);
}
}
} else {
diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
index d5e0662..0b0cb5f 100644
--- a/arch/x86/kernel/sys_i386_32.c
+++ b/arch/x86/kernel/sys_i386_32.c
@@ -33,8 +33,8 @@
const char *const envp[])
{
long __res;
- asm volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx"
+ asm volatile ("int $0x80"
: "=a" (__res)
- : "0" (__NR_execve), "ri" (filename), "c" (argv), "d" (envp) : "memory");
+ : "0" (__NR_execve), "b" (filename), "c" (argv), "d" (envp) : "memory");
return __res;
}
diff --git a/arch/x86/kernel/trampoline.c b/arch/x86/kernel/trampoline.c
index a874495..e2a5952 100644
--- a/arch/x86/kernel/trampoline.c
+++ b/arch/x86/kernel/trampoline.c
@@ -45,8 +45,7 @@
/* Copy kernel address range */
clone_pgd_range(trampoline_pg_dir + KERNEL_PGD_BOUNDARY,
swapper_pg_dir + KERNEL_PGD_BOUNDARY,
- min_t(unsigned long, KERNEL_PGD_PTRS,
- KERNEL_PGD_BOUNDARY));
+ KERNEL_PGD_PTRS);
/* Initialize low mappings */
clone_pgd_range(trampoline_pg_dir,
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 13b6a6c..0c40d8b 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -104,10 +104,14 @@
__setup("notsc", notsc_setup);
+static int no_sched_irq_time;
+
static int __init tsc_setup(char *str)
{
if (!strcmp(str, "reliable"))
tsc_clocksource_reliable = 1;
+ if (!strncmp(str, "noirqtime", 9))
+ no_sched_irq_time = 1;
return 1;
}
@@ -626,6 +630,44 @@
local_irq_restore(flags);
}
+static unsigned long long cyc2ns_suspend;
+
+void save_sched_clock_state(void)
+{
+ if (!sched_clock_stable)
+ return;
+
+ cyc2ns_suspend = sched_clock();
+}
+
+/*
+ * Even on processors with invariant TSC, TSC gets reset in some the
+ * ACPI system sleep states. And in some systems BIOS seem to reinit TSC to
+ * arbitrary value (still sync'd across cpu's) during resume from such sleep
+ * states. To cope up with this, recompute the cyc2ns_offset for each cpu so
+ * that sched_clock() continues from the point where it was left off during
+ * suspend.
+ */
+void restore_sched_clock_state(void)
+{
+ unsigned long long offset;
+ unsigned long flags;
+ int cpu;
+
+ if (!sched_clock_stable)
+ return;
+
+ local_irq_save(flags);
+
+ __get_cpu_var(cyc2ns_offset) = 0;
+ offset = cyc2ns_suspend - sched_clock();
+
+ for_each_possible_cpu(cpu)
+ per_cpu(cyc2ns_offset, cpu) = offset;
+
+ local_irq_restore(flags);
+}
+
#ifdef CONFIG_CPU_FREQ
/* Frequency scaling support. Adjust the TSC based timer when the cpu frequency
@@ -763,6 +805,7 @@
if (!tsc_unstable) {
tsc_unstable = 1;
sched_clock_stable = 0;
+ disable_sched_clock_irqtime();
printk(KERN_INFO "Marking TSC unstable due to %s\n", reason);
/* Change only the rating, when not registered */
if (clocksource_tsc.mult)
@@ -891,6 +934,9 @@
/* now allow native_sched_clock() to use rdtsc */
tsc_disabled = 0;
+ if (!no_sched_irq_time)
+ enable_sched_clock_irqtime();
+
lpj = ((u64)tsc_khz * 1000);
do_div(lpj, HZ);
lpj_fine = lpj;
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index b38bd8b..66ca98a 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -1870,17 +1870,16 @@
struct x86_emulate_ops *ops)
{
struct decode_cache *c = &ctxt->decode;
- u64 old = c->dst.orig_val;
+ u64 old = c->dst.orig_val64;
if (((u32) (old >> 0) != (u32) c->regs[VCPU_REGS_RAX]) ||
((u32) (old >> 32) != (u32) c->regs[VCPU_REGS_RDX])) {
-
c->regs[VCPU_REGS_RAX] = (u32) (old >> 0);
c->regs[VCPU_REGS_RDX] = (u32) (old >> 32);
ctxt->eflags &= ~EFLG_ZF;
} else {
- c->dst.val = ((u64)c->regs[VCPU_REGS_RCX] << 32) |
- (u32) c->regs[VCPU_REGS_RBX];
+ c->dst.val64 = ((u64)c->regs[VCPU_REGS_RCX] << 32) |
+ (u32) c->regs[VCPU_REGS_RBX];
ctxt->eflags |= EFLG_ZF;
}
@@ -2616,7 +2615,7 @@
c->src.valptr, c->src.bytes);
if (rc != X86EMUL_CONTINUE)
goto done;
- c->src.orig_val = c->src.val;
+ c->src.orig_val64 = c->src.val64;
}
if (c->src2.type == OP_MEM) {
diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c
index 8d10c06..4b7b73c 100644
--- a/arch/x86/kvm/i8259.c
+++ b/arch/x86/kvm/i8259.c
@@ -64,6 +64,9 @@
if (!found)
found = s->kvm->bsp_vcpu;
+ if (!found)
+ return;
+
kvm_vcpu_kick(found);
}
}
diff --git a/arch/x86/kvm/irq.h b/arch/x86/kvm/irq.h
index ffed068..63c3145 100644
--- a/arch/x86/kvm/irq.h
+++ b/arch/x86/kvm/irq.h
@@ -43,7 +43,6 @@
u8 irr; /* interrupt request register */
u8 imr; /* interrupt mask register */
u8 isr; /* interrupt service register */
- u8 isr_ack; /* interrupt ack detection */
u8 priority_add; /* highest irq priority */
u8 irq_base;
u8 read_reg_select;
@@ -56,6 +55,7 @@
u8 init4; /* true if 4 byte init */
u8 elcr; /* PIIX edge/trigger selection */
u8 elcr_mask;
+ u8 isr_ack; /* interrupt ack detection */
struct kvm_pic *pics_state;
};
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 77d8c0f..22b06f7 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -1056,14 +1056,13 @@
vcpu->arch.apic = apic;
- apic->regs_page = alloc_page(GFP_KERNEL);
+ apic->regs_page = alloc_page(GFP_KERNEL|__GFP_ZERO);
if (apic->regs_page == NULL) {
printk(KERN_ERR "malloc apic regs error for vcpu %x\n",
vcpu->vcpu_id);
goto nomem_free_apic;
}
apic->regs = page_address(apic->regs_page);
- memset(apic->regs, 0, PAGE_SIZE);
apic->vcpu = vcpu;
hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC,
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index bc5b9b8..8a3f9f6 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -766,7 +766,6 @@
control->iopm_base_pa = iopm_base;
control->msrpm_base_pa = __pa(svm->msrpm);
- control->tsc_offset = 0;
control->int_ctl = V_INTR_MASKING_MASK;
init_seg(&save->es);
@@ -902,6 +901,7 @@
svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT;
svm->asid_generation = 0;
init_vmcb(svm);
+ svm->vmcb->control.tsc_offset = 0-native_read_tsc();
err = fx_init(&svm->vcpu);
if (err)
@@ -3163,8 +3163,8 @@
sync_lapic_to_cr8(vcpu);
save_host_msrs(vcpu);
- fs_selector = kvm_read_fs();
- gs_selector = kvm_read_gs();
+ savesegment(fs, fs_selector);
+ savesegment(gs, gs_selector);
ldt_selector = kvm_read_ldt();
svm->vmcb->save.cr2 = vcpu->arch.cr2;
/* required for live migration with NPT */
@@ -3251,10 +3251,15 @@
vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
- kvm_load_fs(fs_selector);
- kvm_load_gs(gs_selector);
- kvm_load_ldt(ldt_selector);
load_host_msrs(vcpu);
+ loadsegment(fs, fs_selector);
+#ifdef CONFIG_X86_64
+ load_gs_index(gs_selector);
+ wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gs);
+#else
+ loadsegment(gs, gs_selector);
+#endif
+ kvm_load_ldt(ldt_selector);
reload_tss(vcpu);
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 49b25ee..7bddfab 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -803,7 +803,7 @@
*/
vmx->host_state.ldt_sel = kvm_read_ldt();
vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel;
- vmx->host_state.fs_sel = kvm_read_fs();
+ savesegment(fs, vmx->host_state.fs_sel);
if (!(vmx->host_state.fs_sel & 7)) {
vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel);
vmx->host_state.fs_reload_needed = 0;
@@ -811,7 +811,7 @@
vmcs_write16(HOST_FS_SELECTOR, 0);
vmx->host_state.fs_reload_needed = 1;
}
- vmx->host_state.gs_sel = kvm_read_gs();
+ savesegment(gs, vmx->host_state.gs_sel);
if (!(vmx->host_state.gs_sel & 7))
vmcs_write16(HOST_GS_SELECTOR, vmx->host_state.gs_sel);
else {
@@ -841,27 +841,21 @@
static void __vmx_load_host_state(struct vcpu_vmx *vmx)
{
- unsigned long flags;
-
if (!vmx->host_state.loaded)
return;
++vmx->vcpu.stat.host_state_reload;
vmx->host_state.loaded = 0;
if (vmx->host_state.fs_reload_needed)
- kvm_load_fs(vmx->host_state.fs_sel);
+ loadsegment(fs, vmx->host_state.fs_sel);
if (vmx->host_state.gs_ldt_reload_needed) {
kvm_load_ldt(vmx->host_state.ldt_sel);
- /*
- * If we have to reload gs, we must take care to
- * preserve our gs base.
- */
- local_irq_save(flags);
- kvm_load_gs(vmx->host_state.gs_sel);
#ifdef CONFIG_X86_64
- wrmsrl(MSR_GS_BASE, vmcs_readl(HOST_GS_BASE));
+ load_gs_index(vmx->host_state.gs_sel);
+ wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gs);
+#else
+ loadsegment(gs, vmx->host_state.gs_sel);
#endif
- local_irq_restore(flags);
}
reload_tss();
#ifdef CONFIG_X86_64
@@ -2589,8 +2583,8 @@
vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS); /* 22.2.4 */
- vmcs_write16(HOST_FS_SELECTOR, kvm_read_fs()); /* 22.2.4 */
- vmcs_write16(HOST_GS_SELECTOR, kvm_read_gs()); /* 22.2.4 */
+ vmcs_write16(HOST_FS_SELECTOR, 0); /* 22.2.4 */
+ vmcs_write16(HOST_GS_SELECTOR, 0); /* 22.2.4 */
vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
#ifdef CONFIG_X86_64
rdmsrl(MSR_FS_BASE, a);
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
index 9257510..9d5f558 100644
--- a/arch/x86/lguest/boot.c
+++ b/arch/x86/lguest/boot.c
@@ -324,9 +324,8 @@
}
/*
- * For a single GDT entry which changes, we do the lazy thing: alter our GDT,
- * then tell the Host to reload the entire thing. This operation is so rare
- * that this naive implementation is reasonable.
+ * For a single GDT entry which changes, we simply change our copy and
+ * then tell the host about it.
*/
static void lguest_write_gdt_entry(struct desc_struct *dt, int entrynum,
const void *desc, int type)
@@ -338,9 +337,13 @@
}
/*
- * OK, I lied. There are three "thread local storage" GDT entries which change
+ * There are three "thread local storage" GDT entries which change
* on every context switch (these three entries are how glibc implements
- * __thread variables). So we have a hypercall specifically for this case.
+ * __thread variables). As an optimization, we have a hypercall
+ * specifically for this case.
+ *
+ * Wouldn't it be nicer to have a general LOAD_GDT_ENTRIES hypercall
+ * which took a range of entries?
*/
static void lguest_load_tls(struct thread_struct *t, unsigned int cpu)
{
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 4c4508e..a24c6cf 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -251,6 +251,8 @@
if (!(address >= VMALLOC_START && address < VMALLOC_END))
return -1;
+ WARN_ON_ONCE(in_nmi());
+
/*
* Synchronize this task's top level page-table
* with the 'reference' page table.
@@ -369,6 +371,8 @@
if (!(address >= VMALLOC_START && address < VMALLOC_END))
return -1;
+ WARN_ON_ONCE(in_nmi());
+
/*
* Copy kernel mappings over when needed. This can also
* happen within a race in page table update. In the later
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index bca7909..558f2d3 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -67,7 +67,7 @@
panic("alloc_low_page: ran out of memory");
adr = __va(pfn * PAGE_SIZE);
- memset(adr, 0, PAGE_SIZE);
+ clear_page(adr);
return adr;
}
@@ -558,7 +558,7 @@
static inline void save_pg_dir(void)
{
- memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
+ copy_page(swsusp_pg_dir, swapper_pg_dir);
}
#else /* !CONFIG_ACPI_SLEEP */
static inline void save_pg_dir(void)
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 9a66746..7c48ad4 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -293,7 +293,7 @@
panic("alloc_low_page: ran out of memory");
adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
- memset(adr, 0, PAGE_SIZE);
+ clear_page(adr);
*phys = pfn * PAGE_SIZE;
return adr;
}
diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
index 84e236c..72fc70c 100644
--- a/arch/x86/mm/iomap_32.c
+++ b/arch/x86/mm/iomap_32.c
@@ -74,7 +74,7 @@
/*
* Map 'pfn' using fixed map 'type' and protections 'prot'
*/
-void *
+void __iomem *
iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
{
/*
@@ -86,12 +86,12 @@
if (!pat_enabled && pgprot_val(prot) == pgprot_val(PAGE_KERNEL_WC))
prot = PAGE_KERNEL_UC_MINUS;
- return kmap_atomic_prot_pfn(pfn, type, prot);
+ return (void __force __iomem *) kmap_atomic_prot_pfn(pfn, type, prot);
}
EXPORT_SYMBOL_GPL(iomap_atomic_prot_pfn);
void
-iounmap_atomic(void *kvaddr, enum km_type type)
+iounmap_atomic(void __iomem *kvaddr, enum km_type type)
{
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
diff --git a/arch/x86/mm/k8topology_64.c b/arch/x86/mm/k8topology_64.c
index 970ed57..52d54bf 100644
--- a/arch/x86/mm/k8topology_64.c
+++ b/arch/x86/mm/k8topology_64.c
@@ -22,7 +22,7 @@
#include <asm/numa.h>
#include <asm/mpspec.h>
#include <asm/apic.h>
-#include <asm/k8.h>
+#include <asm/amd_nb.h>
static struct bootnode __initdata nodes[8];
static nodemask_t __initdata nodes_parsed = NODE_MASK_NONE;
@@ -54,8 +54,8 @@
static __init void early_get_boot_cpu_id(void)
{
/*
- * need to get boot_cpu_id so can use that to create apicid_to_node
- * in k8_scan_nodes()
+ * need to get the APIC ID of the BSP so can use that to
+ * create apicid_to_node in k8_scan_nodes()
*/
#ifdef CONFIG_X86_MPPARSE
/*
@@ -212,7 +212,7 @@
bits = boot_cpu_data.x86_coreid_bits;
cores = (1<<bits);
apicid_base = 0;
- /* need to get boot_cpu_id early for system with apicid lifting */
+ /* get the APIC ID of the BSP early for systems with apicid lifting */
early_get_boot_cpu_id();
if (boot_cpu_physical_apicid > 0) {
pr_info("BSP APIC ID: %02x\n", boot_cpu_physical_apicid);
diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
index b3b531a..d87dd6d 100644
--- a/arch/x86/mm/kmemcheck/kmemcheck.c
+++ b/arch/x86/mm/kmemcheck/kmemcheck.c
@@ -631,6 +631,8 @@
if (!pte)
return false;
+ WARN_ON_ONCE(in_nmi());
+
if (error_code & 2)
kmemcheck_access(regs, address, KMEMCHECK_WRITE);
else
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c
index a7bcc23..4962f1a 100644
--- a/arch/x86/mm/numa_64.c
+++ b/arch/x86/mm/numa_64.c
@@ -18,7 +18,7 @@
#include <asm/dma.h>
#include <asm/numa.h>
#include <asm/acpi.h>
-#include <asm/k8.h>
+#include <asm/amd_nb.h>
struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
EXPORT_SYMBOL(node_data);
diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c
index f9897f7..9c0d0d3 100644
--- a/arch/x86/mm/srat_64.c
+++ b/arch/x86/mm/srat_64.c
@@ -420,9 +420,11 @@
return -1;
}
- for_each_node_mask(i, nodes_parsed)
- e820_register_active_regions(i, nodes[i].start >> PAGE_SHIFT,
- nodes[i].end >> PAGE_SHIFT);
+ for (i = 0; i < num_node_memblks; i++)
+ e820_register_active_regions(memblk_nodeid[i],
+ node_memblk_range[i].start >> PAGE_SHIFT,
+ node_memblk_range[i].end >> PAGE_SHIFT);
+
/* for out of order entries in SRAT */
sort_node_map();
if (!nodes_cover_memory(nodes)) {
diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
index 3855096..2d49d4e 100644
--- a/arch/x86/oprofile/backtrace.c
+++ b/arch/x86/oprofile/backtrace.c
@@ -14,6 +14,7 @@
#include <asm/ptrace.h>
#include <asm/uaccess.h>
#include <asm/stacktrace.h>
+#include <linux/compat.h>
static void backtrace_warning_symbol(void *data, char *msg,
unsigned long symbol)
@@ -48,14 +49,12 @@
.walk_stack = print_context_stack,
};
-struct frame_head {
- struct frame_head *bp;
- unsigned long ret;
-} __attribute__((packed));
-
-static struct frame_head *dump_user_backtrace(struct frame_head *head)
+#ifdef CONFIG_COMPAT
+static struct stack_frame_ia32 *
+dump_user_backtrace_32(struct stack_frame_ia32 *head)
{
- struct frame_head bufhead[2];
+ struct stack_frame_ia32 bufhead[2];
+ struct stack_frame_ia32 *fp;
/* Also check accessibility of one struct frame_head beyond */
if (!access_ok(VERIFY_READ, head, sizeof(bufhead)))
@@ -63,20 +62,66 @@
if (__copy_from_user_inatomic(bufhead, head, sizeof(bufhead)))
return NULL;
- oprofile_add_trace(bufhead[0].ret);
+ fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
+
+ oprofile_add_trace(bufhead[0].return_address);
+
+ /* frame pointers should strictly progress back up the stack
+ * (towards higher addresses) */
+ if (head >= fp)
+ return NULL;
+
+ return fp;
+}
+
+static inline int
+x86_backtrace_32(struct pt_regs * const regs, unsigned int depth)
+{
+ struct stack_frame_ia32 *head;
+
+ /* User process is 32-bit */
+ if (!current || !test_thread_flag(TIF_IA32))
+ return 0;
+
+ head = (struct stack_frame_ia32 *) regs->bp;
+ while (depth-- && head)
+ head = dump_user_backtrace_32(head);
+
+ return 1;
+}
+
+#else
+static inline int
+x86_backtrace_32(struct pt_regs * const regs, unsigned int depth)
+{
+ return 0;
+}
+#endif /* CONFIG_COMPAT */
+
+static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
+{
+ struct stack_frame bufhead[2];
+
+ /* Also check accessibility of one struct stack_frame beyond */
+ if (!access_ok(VERIFY_READ, head, sizeof(bufhead)))
+ return NULL;
+ if (__copy_from_user_inatomic(bufhead, head, sizeof(bufhead)))
+ return NULL;
+
+ oprofile_add_trace(bufhead[0].return_address);
/* frame pointers should strictly progress back up the stack
* (towards higher addresses) */
- if (head >= bufhead[0].bp)
+ if (head >= bufhead[0].next_frame)
return NULL;
- return bufhead[0].bp;
+ return bufhead[0].next_frame;
}
void
x86_backtrace(struct pt_regs * const regs, unsigned int depth)
{
- struct frame_head *head = (struct frame_head *)frame_pointer(regs);
+ struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
if (!user_mode_vm(regs)) {
unsigned long stack = kernel_stack_pointer(regs);
@@ -86,6 +131,9 @@
return;
}
+ if (x86_backtrace_32(regs, depth))
+ return;
+
while (depth-- && head)
head = dump_user_backtrace(head);
}
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
index f6b48f6..bd1489c 100644
--- a/arch/x86/oprofile/nmi_int.c
+++ b/arch/x86/oprofile/nmi_int.c
@@ -568,8 +568,13 @@
int error;
error = sysdev_class_register(&oprofile_sysclass);
- if (!error)
- error = sysdev_register(&device_oprofile);
+ if (error)
+ return error;
+
+ error = sysdev_register(&device_oprofile);
+ if (error)
+ sysdev_class_unregister(&oprofile_sysclass);
+
return error;
}
@@ -580,8 +585,10 @@
}
#else
-#define init_sysfs() do { } while (0)
-#define exit_sysfs() do { } while (0)
+
+static inline int init_sysfs(void) { return 0; }
+static inline void exit_sysfs(void) { }
+
#endif /* CONFIG_PM */
static int __init p4_init(char **cpu_type)
@@ -664,7 +671,10 @@
case 14:
*cpu_type = "i386/core";
break;
- case 15: case 23:
+ case 0x0f:
+ case 0x16:
+ case 0x17:
+ case 0x1d:
*cpu_type = "i386/core_2";
break;
case 0x1a:
@@ -685,9 +695,6 @@
return 1;
}
-/* in order to get sysfs right */
-static int using_nmi;
-
int __init op_nmi_init(struct oprofile_operations *ops)
{
__u8 vendor = boot_cpu_data.x86_vendor;
@@ -774,14 +781,15 @@
mux_init(ops);
- init_sysfs();
- using_nmi = 1;
+ ret = init_sysfs();
+ if (ret)
+ return ret;
+
printk(KERN_INFO "oprofile: using NMI interrupt.\n");
return 0;
}
void op_nmi_exit(void)
{
- if (using_nmi)
- exit_sysfs();
+ exit_sysfs();
}
diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
index e7e8c5f..87bb35e 100644
--- a/arch/x86/power/cpu.c
+++ b/arch/x86/power/cpu.c
@@ -113,6 +113,7 @@
void save_processor_state(void)
{
__save_processor_state(&saved_context);
+ save_sched_clock_state();
}
#ifdef CONFIG_X86_32
EXPORT_SYMBOL(save_processor_state);
@@ -229,6 +230,7 @@
void restore_processor_state(void)
{
__restore_processor_state(&saved_context);
+ restore_sched_clock_state();
}
#ifdef CONFIG_X86_32
EXPORT_SYMBOL(restore_processor_state);
diff --git a/arch/x86/xen/platform-pci-unplug.c b/arch/x86/xen/platform-pci-unplug.c
index 554c002..0f45638 100644
--- a/arch/x86/xen/platform-pci-unplug.c
+++ b/arch/x86/xen/platform-pci-unplug.c
@@ -72,13 +72,17 @@
{
int r;
+ /* user explicitly requested no unplug */
+ if (xen_emul_unplug & XEN_UNPLUG_NEVER)
+ return;
/* check the version of the xen platform PCI device */
r = check_platform_magic();
/* If the version matches enable the Xen platform PCI driver.
- * Also enable the Xen platform PCI driver if the version is really old
- * and the user told us to ignore it. */
+ * Also enable the Xen platform PCI driver if the host does
+ * not support the unplug protocol (XEN_PLATFORM_ERR_MAGIC)
+ * but the user told us that unplugging is unnecessary. */
if (r && !(r == XEN_PLATFORM_ERR_MAGIC &&
- (xen_emul_unplug & XEN_UNPLUG_IGNORE)))
+ (xen_emul_unplug & XEN_UNPLUG_UNNECESSARY)))
return;
/* Set the default value of xen_emul_unplug depending on whether or
* not the Xen PV frontends and the Xen platform PCI driver have
@@ -99,7 +103,7 @@
}
}
/* Now unplug the emulated devices */
- if (!(xen_emul_unplug & XEN_UNPLUG_IGNORE))
+ if (!(xen_emul_unplug & XEN_UNPLUG_UNNECESSARY))
outw(xen_emul_unplug, XEN_IOPORT_UNPLUG);
xen_platform_pci_unplug = xen_emul_unplug;
}
@@ -125,8 +129,10 @@
xen_emul_unplug |= XEN_UNPLUG_AUX_IDE_DISKS;
else if (!strncmp(p, "nics", l))
xen_emul_unplug |= XEN_UNPLUG_ALL_NICS;
- else if (!strncmp(p, "ignore", l))
- xen_emul_unplug |= XEN_UNPLUG_IGNORE;
+ else if (!strncmp(p, "unnecessary", l))
+ xen_emul_unplug |= XEN_UNPLUG_UNNECESSARY;
+ else if (!strncmp(p, "never", l))
+ xen_emul_unplug |= XEN_UNPLUG_NEVER;
else
printk(KERN_WARNING "unrecognised option '%s' "
"in parameter 'xen_emul_unplug'\n", p);
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
index 1a5353a..b2bb5aa 100644
--- a/arch/x86/xen/time.c
+++ b/arch/x86/xen/time.c
@@ -489,8 +489,9 @@
__init void xen_hvm_init_time_ops(void)
{
/* vector callback is needed otherwise we cannot receive interrupts
- * on cpu > 0 */
- if (!xen_have_vector_callback && num_present_cpus() > 1)
+ * on cpu > 0 and at this point we don't know how many cpus are
+ * available */
+ if (!xen_have_vector_callback)
return;
if (!xen_feature(XENFEAT_hvm_safe_pvclock)) {
printk(KERN_INFO "Xen doesn't support pvclock on HVM,"
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index a680964..2fef1ef 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -966,7 +966,7 @@
/* Currently we do not support hierarchy deeper than two level (0,1) */
if (parent != cgroup->top_cgroup)
- return ERR_PTR(-EINVAL);
+ return ERR_PTR(-EPERM);
blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
if (!blkcg)
diff --git a/block/blk-core.c b/block/blk-core.c
index ee1a1e7..32a1c12 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1198,9 +1198,9 @@
int el_ret;
unsigned int bytes = bio->bi_size;
const unsigned short prio = bio_prio(bio);
- const bool sync = (bio->bi_rw & REQ_SYNC);
- const bool unplug = (bio->bi_rw & REQ_UNPLUG);
- const unsigned int ff = bio->bi_rw & REQ_FAILFAST_MASK;
+ const bool sync = !!(bio->bi_rw & REQ_SYNC);
+ const bool unplug = !!(bio->bi_rw & REQ_UNPLUG);
+ const unsigned long ff = bio->bi_rw & REQ_FAILFAST_MASK;
int rw_flags;
if ((bio->bi_rw & REQ_HARDBARRIER) &&
diff --git a/block/blk-map.c b/block/blk-map.c
index c65d759..ade0a08 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -307,7 +307,7 @@
return PTR_ERR(bio);
if (rq_data_dir(rq) == WRITE)
- bio->bi_rw |= (1 << REQ_WRITE);
+ bio->bi_rw |= REQ_WRITE;
if (do_copy)
rq->cmd_flags |= REQ_COPY_USER;
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 3b0cd42..eafc94f 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -362,6 +362,18 @@
return 0;
/*
+ * Don't merge file system requests and discard requests
+ */
+ if ((req->cmd_flags & REQ_DISCARD) != (next->cmd_flags & REQ_DISCARD))
+ return 0;
+
+ /*
+ * Don't merge discard requests and secure discard requests
+ */
+ if ((req->cmd_flags & REQ_SECURE) != (next->cmd_flags & REQ_SECURE))
+ return 0;
+
+ /*
* not contiguous
*/
if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next))
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 001ab18..0749b89 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -511,6 +511,7 @@
kobject_uevent(&q->kobj, KOBJ_REMOVE);
kobject_del(&q->kobj);
blk_trace_remove_sysfs(disk_to_dev(disk));
+ kobject_put(&dev->kobj);
return ret;
}
diff --git a/block/blk.h b/block/blk.h
index 6e7dc87..d6b911a 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -142,14 +142,18 @@
static inline int blk_cpu_to_group(int cpu)
{
+ int group = NR_CPUS;
#ifdef CONFIG_SCHED_MC
const struct cpumask *mask = cpu_coregroup_mask(cpu);
- return cpumask_first(mask);
+ group = cpumask_first(mask);
#elif defined(CONFIG_SCHED_SMT)
- return cpumask_first(topology_thread_cpumask(cpu));
+ group = cpumask_first(topology_thread_cpumask(cpu));
#else
return cpu;
#endif
+ if (likely(group < NR_CPUS))
+ return group;
+ return cpu;
}
/*
diff --git a/block/bsg.c b/block/bsg.c
index 82d5882..0c00870 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -426,7 +426,7 @@
/*
* fill in all the output members
*/
- hdr->device_status = status_byte(rq->errors);
+ hdr->device_status = rq->errors & 0xff;
hdr->transport_status = host_byte(rq->errors);
hdr->driver_status = driver_byte(rq->errors);
hdr->info = 0;
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index eb4086f..9eba291 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -30,6 +30,7 @@
static int cfq_slice_async = HZ / 25;
static const int cfq_slice_async_rq = 2;
static int cfq_slice_idle = HZ / 125;
+static int cfq_group_idle = HZ / 125;
static const int cfq_target_latency = HZ * 3/10; /* 300 ms */
static const int cfq_hist_divisor = 4;
@@ -147,6 +148,8 @@
struct cfq_queue *new_cfqq;
struct cfq_group *cfqg;
struct cfq_group *orig_cfqg;
+ /* Number of sectors dispatched from queue in single dispatch round */
+ unsigned long nr_sectors;
};
/*
@@ -198,6 +201,8 @@
struct hlist_node cfqd_node;
atomic_t ref;
#endif
+ /* number of requests that are on the dispatch list or inside driver */
+ int dispatched;
};
/*
@@ -271,6 +276,7 @@
unsigned int cfq_slice[2];
unsigned int cfq_slice_async_rq;
unsigned int cfq_slice_idle;
+ unsigned int cfq_group_idle;
unsigned int cfq_latency;
unsigned int cfq_group_isolation;
@@ -378,6 +384,21 @@
&cfqg->service_trees[i][j]: NULL) \
+static inline bool iops_mode(struct cfq_data *cfqd)
+{
+ /*
+ * If we are not idling on queues and it is a NCQ drive, parallel
+ * execution of requests is on and measuring time is not possible
+ * in most of the cases until and unless we drive shallower queue
+ * depths and that becomes a performance bottleneck. In such cases
+ * switch to start providing fairness in terms of number of IOs.
+ */
+ if (!cfqd->cfq_slice_idle && cfqd->hw_tag)
+ return true;
+ else
+ return false;
+}
+
static inline enum wl_prio_t cfqq_prio(struct cfq_queue *cfqq)
{
if (cfq_class_idle(cfqq))
@@ -906,7 +927,6 @@
slice_used = cfqq->allocated_slice;
}
- cfq_log_cfqq(cfqq->cfqd, cfqq, "sl_used=%u", slice_used);
return slice_used;
}
@@ -914,19 +934,21 @@
struct cfq_queue *cfqq)
{
struct cfq_rb_root *st = &cfqd->grp_service_tree;
- unsigned int used_sl, charge_sl;
+ unsigned int used_sl, charge;
int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg)
- cfqg->service_tree_idle.count;
BUG_ON(nr_sync < 0);
- used_sl = charge_sl = cfq_cfqq_slice_usage(cfqq);
+ used_sl = charge = cfq_cfqq_slice_usage(cfqq);
- if (!cfq_cfqq_sync(cfqq) && !nr_sync)
- charge_sl = cfqq->allocated_slice;
+ if (iops_mode(cfqd))
+ charge = cfqq->slice_dispatch;
+ else if (!cfq_cfqq_sync(cfqq) && !nr_sync)
+ charge = cfqq->allocated_slice;
/* Can't update vdisktime while group is on service tree */
cfq_rb_erase(&cfqg->rb_node, st);
- cfqg->vdisktime += cfq_scale_slice(charge_sl, cfqg);
+ cfqg->vdisktime += cfq_scale_slice(charge, cfqg);
__cfq_group_service_tree_add(st, cfqg);
/* This group is being expired. Save the context */
@@ -940,6 +962,9 @@
cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime,
st->min_vdisktime);
+ cfq_log_cfqq(cfqq->cfqd, cfqq, "sl_used=%u disp=%u charge=%u iops=%u"
+ " sect=%u", used_sl, cfqq->slice_dispatch, charge,
+ iops_mode(cfqd), cfqq->nr_sectors);
cfq_blkiocg_update_timeslice_used(&cfqg->blkg, used_sl);
cfq_blkiocg_set_start_empty_time(&cfqg->blkg);
}
@@ -994,10 +1019,20 @@
*/
atomic_set(&cfqg->ref, 1);
- /* Add group onto cgroup list */
- sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
- cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg, (void *)cfqd,
+ /*
+ * Add group onto cgroup list. It might happen that bdi->dev is
+ * not initiliazed yet. Initialize this new group without major
+ * and minor info and this info will be filled in once a new thread
+ * comes for IO. See code above.
+ */
+ if (bdi->dev) {
+ sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
+ cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg, (void *)cfqd,
MKDEV(major, minor));
+ } else
+ cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg, (void *)cfqd,
+ 0);
+
cfqg->weight = blkcg_get_weight(blkcg, cfqg->blkg.dev);
/* Add group on cfqd list */
@@ -1587,6 +1622,7 @@
cfqq->allocated_slice = 0;
cfqq->slice_end = 0;
cfqq->slice_dispatch = 0;
+ cfqq->nr_sectors = 0;
cfq_clear_cfqq_wait_request(cfqq);
cfq_clear_cfqq_must_dispatch(cfqq);
@@ -1839,6 +1875,9 @@
BUG_ON(!service_tree);
BUG_ON(!service_tree->count);
+ if (!cfqd->cfq_slice_idle)
+ return false;
+
/* We never do for idle class queues. */
if (prio == IDLE_WORKLOAD)
return false;
@@ -1863,7 +1902,7 @@
{
struct cfq_queue *cfqq = cfqd->active_queue;
struct cfq_io_context *cic;
- unsigned long sl;
+ unsigned long sl, group_idle = 0;
/*
* SSD device without seek penalty, disable idling. But only do so
@@ -1879,8 +1918,13 @@
/*
* idle is disabled, either manually or by past process history
*/
- if (!cfqd->cfq_slice_idle || !cfq_should_idle(cfqd, cfqq))
- return;
+ if (!cfq_should_idle(cfqd, cfqq)) {
+ /* no queue idling. Check for group idling */
+ if (cfqd->cfq_group_idle)
+ group_idle = cfqd->cfq_group_idle;
+ else
+ return;
+ }
/*
* still active requests from this queue, don't idle
@@ -1907,13 +1951,21 @@
return;
}
+ /* There are other queues in the group, don't do group idle */
+ if (group_idle && cfqq->cfqg->nr_cfqq > 1)
+ return;
+
cfq_mark_cfqq_wait_request(cfqq);
- sl = cfqd->cfq_slice_idle;
+ if (group_idle)
+ sl = cfqd->cfq_group_idle;
+ else
+ sl = cfqd->cfq_slice_idle;
mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
cfq_blkiocg_update_set_idle_time_stats(&cfqq->cfqg->blkg);
- cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu", sl);
+ cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu group_idle: %d", sl,
+ group_idle ? 1 : 0);
}
/*
@@ -1929,9 +1981,11 @@
cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq);
cfq_remove_request(rq);
cfqq->dispatched++;
+ (RQ_CFQG(rq))->dispatched++;
elv_dispatch_sort(q, rq);
cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++;
+ cfqq->nr_sectors += blk_rq_sectors(rq);
cfq_blkiocg_update_dispatch_stats(&cfqq->cfqg->blkg, blk_rq_bytes(rq),
rq_data_dir(rq), rq_is_sync(rq));
}
@@ -2198,7 +2252,7 @@
cfqq = NULL;
goto keep_queue;
} else
- goto expire;
+ goto check_group_idle;
}
/*
@@ -2226,8 +2280,23 @@
* flight or is idling for a new request, allow either of these
* conditions to happen (or time out) before selecting a new queue.
*/
- if (timer_pending(&cfqd->idle_slice_timer) ||
- (cfqq->dispatched && cfq_should_idle(cfqd, cfqq))) {
+ if (timer_pending(&cfqd->idle_slice_timer)) {
+ cfqq = NULL;
+ goto keep_queue;
+ }
+
+ if (cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
+ cfqq = NULL;
+ goto keep_queue;
+ }
+
+ /*
+ * If group idle is enabled and there are requests dispatched from
+ * this group, wait for requests to complete.
+ */
+check_group_idle:
+ if (cfqd->cfq_group_idle && cfqq->cfqg->nr_cfqq == 1
+ && cfqq->cfqg->dispatched) {
cfqq = NULL;
goto keep_queue;
}
@@ -3375,6 +3444,7 @@
WARN_ON(!cfqq->dispatched);
cfqd->rq_in_driver--;
cfqq->dispatched--;
+ (RQ_CFQG(rq))->dispatched--;
cfq_blkiocg_update_completion_stats(&cfqq->cfqg->blkg,
rq_start_time_ns(rq), rq_io_start_time_ns(rq),
rq_data_dir(rq), rq_is_sync(rq));
@@ -3404,7 +3474,10 @@
* the queue.
*/
if (cfq_should_wait_busy(cfqd, cfqq)) {
- cfqq->slice_end = jiffies + cfqd->cfq_slice_idle;
+ unsigned long extend_sl = cfqd->cfq_slice_idle;
+ if (!cfqd->cfq_slice_idle)
+ extend_sl = cfqd->cfq_group_idle;
+ cfqq->slice_end = jiffies + extend_sl;
cfq_mark_cfqq_wait_busy(cfqq);
cfq_log_cfqq(cfqd, cfqq, "will busy wait");
}
@@ -3850,6 +3923,7 @@
cfqd->cfq_slice[1] = cfq_slice_sync;
cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
cfqd->cfq_slice_idle = cfq_slice_idle;
+ cfqd->cfq_group_idle = cfq_group_idle;
cfqd->cfq_latency = 1;
cfqd->cfq_group_isolation = 0;
cfqd->hw_tag = -1;
@@ -3922,6 +3996,7 @@
SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0);
SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
+SHOW_FUNCTION(cfq_group_idle_show, cfqd->cfq_group_idle, 1);
SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
@@ -3954,6 +4029,7 @@
STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1,
UINT_MAX, 0);
STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
+STORE_FUNCTION(cfq_group_idle_store, &cfqd->cfq_group_idle, 0, UINT_MAX, 1);
STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
@@ -3975,6 +4051,7 @@
CFQ_ATTR(slice_async),
CFQ_ATTR(slice_async_rq),
CFQ_ATTR(slice_idle),
+ CFQ_ATTR(group_idle),
CFQ_ATTR(low_latency),
CFQ_ATTR(group_isolation),
__ATTR_NULL
@@ -4028,6 +4105,12 @@
if (!cfq_slice_idle)
cfq_slice_idle = 1;
+#ifdef CONFIG_CFQ_GROUP_IOSCHED
+ if (!cfq_group_idle)
+ cfq_group_idle = 1;
+#else
+ cfq_group_idle = 0;
+#endif
if (cfq_slab_setup())
return -ENOMEM;
diff --git a/block/elevator.c b/block/elevator.c
index ec585c9..4e11559 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -938,6 +938,7 @@
}
}
kobject_uevent(&e->kobj, KOBJ_ADD);
+ e->registered = 1;
}
return error;
}
@@ -947,6 +948,7 @@
{
kobject_uevent(&e->kobj, KOBJ_REMOVE);
kobject_del(&e->kobj);
+ e->registered = 0;
}
void elv_unregister_queue(struct request_queue *q)
@@ -1009,18 +1011,19 @@
{
struct elevator_queue *old_elevator, *e;
void *data;
+ int err;
/*
* Allocate new elevator
*/
e = elevator_alloc(q, new_e);
if (!e)
- return 0;
+ return -ENOMEM;
data = elevator_init_queue(q, e);
if (!data) {
kobject_put(&e->kobj);
- return 0;
+ return -ENOMEM;
}
/*
@@ -1041,10 +1044,13 @@
spin_unlock_irq(q->queue_lock);
- __elv_unregister_queue(old_elevator);
+ if (old_elevator->registered) {
+ __elv_unregister_queue(old_elevator);
- if (elv_register_queue(q))
- goto fail_register;
+ err = elv_register_queue(q);
+ if (err)
+ goto fail_register;
+ }
/*
* finally exit old elevator and turn off BYPASS.
@@ -1056,7 +1062,7 @@
blk_add_trace_msg(q, "elv switch: %s", e->elevator_type->elevator_name);
- return 1;
+ return 0;
fail_register:
/*
@@ -1071,17 +1077,19 @@
queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q);
spin_unlock_irq(q->queue_lock);
- return 0;
+ return err;
}
-ssize_t elv_iosched_store(struct request_queue *q, const char *name,
- size_t count)
+/*
+ * Switch this queue to the given IO scheduler.
+ */
+int elevator_change(struct request_queue *q, const char *name)
{
char elevator_name[ELV_NAME_MAX];
struct elevator_type *e;
if (!q->elevator)
- return count;
+ return -ENXIO;
strlcpy(elevator_name, name, sizeof(elevator_name));
e = elevator_get(strstrip(elevator_name));
@@ -1092,13 +1100,27 @@
if (!strcmp(elevator_name, q->elevator->elevator_type->elevator_name)) {
elevator_put(e);
- return count;
+ return 0;
}
- if (!elevator_switch(q, e))
- printk(KERN_ERR "elevator: switch to %s failed\n",
- elevator_name);
- return count;
+ return elevator_switch(q, e);
+}
+EXPORT_SYMBOL(elevator_change);
+
+ssize_t elv_iosched_store(struct request_queue *q, const char *name,
+ size_t count)
+{
+ int ret;
+
+ if (!q->elevator)
+ return count;
+
+ ret = elevator_change(q, name);
+ if (!ret)
+ return count;
+
+ printk(KERN_ERR "elevator: switch to %s failed\n", name);
+ return ret;
}
ssize_t elv_iosched_show(struct request_queue *q, char *name)
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 1cd497d..e573077 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -101,13 +101,13 @@
select CRYPTO_BLKCIPHER2
select CRYPTO_PCOMP2
-config CRYPTO_MANAGER_TESTS
- bool "Run algolithms' self-tests"
+config CRYPTO_MANAGER_DISABLE_TESTS
+ bool "Disable run-time self tests"
default y
depends on CRYPTO_MANAGER2
help
- Run cryptomanager's tests for the new crypto algorithms being
- registered.
+ Disable run-time self tests that normally take place at
+ algorithm registration.
config CRYPTO_GF128MUL
tristate "GF(2^128) multiplication functions (EXPERIMENTAL)"
diff --git a/crypto/ahash.c b/crypto/ahash.c
index b8c59b8..f669822 100644
--- a/crypto/ahash.c
+++ b/crypto/ahash.c
@@ -47,8 +47,11 @@
walk->data = crypto_kmap(walk->pg, 0);
walk->data += offset;
- if (offset & alignmask)
- nbytes = alignmask + 1 - (offset & alignmask);
+ if (offset & alignmask) {
+ unsigned int unaligned = alignmask + 1 - (offset & alignmask);
+ if (nbytes > unaligned)
+ nbytes = unaligned;
+ }
walk->entrylen -= nbytes;
return nbytes;
diff --git a/crypto/algboss.c b/crypto/algboss.c
index 40bd391..791d194 100644
--- a/crypto/algboss.c
+++ b/crypto/algboss.c
@@ -206,13 +206,16 @@
return NOTIFY_OK;
}
-#ifdef CONFIG_CRYPTO_MANAGER_TESTS
static int cryptomgr_test(void *data)
{
struct crypto_test_param *param = data;
u32 type = param->type;
int err = 0;
+#ifdef CONFIG_CRYPTO_MANAGER_DISABLE_TESTS
+ goto skiptest;
+#endif
+
if (type & CRYPTO_ALG_TESTED)
goto skiptest;
@@ -267,7 +270,6 @@
err:
return NOTIFY_OK;
}
-#endif /* CONFIG_CRYPTO_MANAGER_TESTS */
static int cryptomgr_notify(struct notifier_block *this, unsigned long msg,
void *data)
@@ -275,10 +277,8 @@
switch (msg) {
case CRYPTO_MSG_ALG_REQUEST:
return cryptomgr_schedule_probe(data);
-#ifdef CONFIG_CRYPTO_MANAGER_TESTS
case CRYPTO_MSG_ALG_REGISTER:
return cryptomgr_schedule_test(data);
-#endif
}
return NOTIFY_DONE;
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index abd980c..fa8c8f7 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -23,7 +23,7 @@
#include "internal.h"
-#ifndef CONFIG_CRYPTO_MANAGER_TESTS
+#ifdef CONFIG_CRYPTO_MANAGER_DISABLE_TESTS
/* a perfect nop */
int alg_test(const char *driver, const char *alg, u32 type, u32 mask)
@@ -2542,6 +2542,6 @@
return -EINVAL;
}
-#endif /* CONFIG_CRYPTO_MANAGER_TESTS */
+#endif /* CONFIG_CRYPTO_MANAGER_DISABLE_TESTS */
EXPORT_SYMBOL_GPL(alg_test);
diff --git a/drivers/Makefile b/drivers/Makefile
index ae47344..a2aea53 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -50,7 +50,7 @@
obj-y += net/
obj-$(CONFIG_ATM) += atm/
obj-$(CONFIG_FUSION) += message/
-obj-$(CONFIG_FIREWIRE) += firewire/
+obj-y += firewire/
obj-y += ieee1394/
obj-$(CONFIG_UIO) += uio/
obj-y += cdrom/
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index b811f21..88681ac 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -105,7 +105,7 @@
Be aware that using this interface can confuse your Embedded
Controller in a way that a normal reboot is not enough. You then
- have to power of your system, and remove the laptop battery for
+ have to power off your system, and remove the laptop battery for
some seconds.
An Embedded Controller typically is available on laptops and reads
sensor values like battery state and temperature.
diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
index b76848c..6b115f6 100644
--- a/drivers/acpi/acpi_pad.c
+++ b/drivers/acpi/acpi_pad.c
@@ -382,31 +382,32 @@
device_remove_file(&device->dev, &dev_attr_rrtime);
}
-/* Query firmware how many CPUs should be idle */
-static int acpi_pad_pur(acpi_handle handle, int *num_cpus)
+/*
+ * Query firmware how many CPUs should be idle
+ * return -1 on failure
+ */
+static int acpi_pad_pur(acpi_handle handle)
{
struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
union acpi_object *package;
- int rev, num, ret = -EINVAL;
+ int num = -1;
if (ACPI_FAILURE(acpi_evaluate_object(handle, "_PUR", NULL, &buffer)))
- return -EINVAL;
+ return num;
if (!buffer.length || !buffer.pointer)
- return -EINVAL;
+ return num;
package = buffer.pointer;
- if (package->type != ACPI_TYPE_PACKAGE || package->package.count != 2)
- goto out;
- rev = package->package.elements[0].integer.value;
- num = package->package.elements[1].integer.value;
- if (rev != 1 || num < 0)
- goto out;
- *num_cpus = num;
- ret = 0;
-out:
+
+ if (package->type == ACPI_TYPE_PACKAGE &&
+ package->package.count == 2 &&
+ package->package.elements[0].integer.value == 1) /* rev 1 */
+
+ num = package->package.elements[1].integer.value;
+
kfree(buffer.pointer);
- return ret;
+ return num;
}
/* Notify firmware how many CPUs are idle */
@@ -433,7 +434,8 @@
uint32_t idle_cpus;
mutex_lock(&isolated_cpus_lock);
- if (acpi_pad_pur(handle, &num_cpus)) {
+ num_cpus = acpi_pad_pur(handle);
+ if (num_cpus < 0) {
mutex_unlock(&isolated_cpus_lock);
return;
}
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index df85b53..7dad916 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -854,6 +854,7 @@
ACPI_BITMASK_POWER_BUTTON_STATUS | \
ACPI_BITMASK_SLEEP_BUTTON_STATUS | \
ACPI_BITMASK_RT_CLOCK_STATUS | \
+ ACPI_BITMASK_PCIEXP_WAKE_DISABLE | \
ACPI_BITMASK_WAKE_STATUS)
#define ACPI_BITMASK_TIMER_ENABLE 0x0001
diff --git a/drivers/acpi/acpica/exutils.c b/drivers/acpi/acpica/exutils.c
index 74c24d5..4093522 100644
--- a/drivers/acpi/acpica/exutils.c
+++ b/drivers/acpi/acpica/exutils.c
@@ -109,7 +109,7 @@
*
* DESCRIPTION: Reacquire the interpreter execution region from within the
* interpreter code. Failure to enter the interpreter region is a
- * fatal system error. Used in conjuction with
+ * fatal system error. Used in conjunction with
* relinquish_interpreter
*
******************************************************************************/
diff --git a/drivers/acpi/acpica/rsutils.c b/drivers/acpi/acpica/rsutils.c
index 22cfcfb..491191e 100644
--- a/drivers/acpi/acpica/rsutils.c
+++ b/drivers/acpi/acpica/rsutils.c
@@ -149,7 +149,7 @@
/*
* 16-, 32-, and 64-bit cases must use the move macros that perform
- * endian conversion and/or accomodate hardware that cannot perform
+ * endian conversion and/or accommodate hardware that cannot perform
* misaligned memory transfers
*/
case ACPI_RSC_MOVE16:
diff --git a/drivers/acpi/apei/Kconfig b/drivers/acpi/apei/Kconfig
index 907e350..fca34cc 100644
--- a/drivers/acpi/apei/Kconfig
+++ b/drivers/acpi/apei/Kconfig
@@ -34,6 +34,6 @@
depends on ACPI_APEI
help
ERST is a way provided by APEI to save and retrieve hardware
- error infomation to and from a persistent store. Enable this
+ error information to and from a persistent store. Enable this
if you want to debugging and testing the ERST kernel support
and firmware implementation.
diff --git a/drivers/acpi/apei/apei-base.c b/drivers/acpi/apei/apei-base.c
index 73fd0c7..4a904a4 100644
--- a/drivers/acpi/apei/apei-base.c
+++ b/drivers/acpi/apei/apei-base.c
@@ -445,11 +445,15 @@
int apei_resources_request(struct apei_resources *resources,
const char *desc)
{
- struct apei_res *res, *res_bak;
+ struct apei_res *res, *res_bak = NULL;
struct resource *r;
+ int rc;
- apei_resources_sub(resources, &apei_resources_all);
+ rc = apei_resources_sub(resources, &apei_resources_all);
+ if (rc)
+ return rc;
+ rc = -EINVAL;
list_for_each_entry(res, &resources->iomem, list) {
r = request_mem_region(res->start, res->end - res->start,
desc);
@@ -475,7 +479,11 @@
}
}
- apei_resources_merge(&apei_resources_all, resources);
+ rc = apei_resources_merge(&apei_resources_all, resources);
+ if (rc) {
+ pr_err(APEI_PFX "Fail to merge resources!\n");
+ goto err_unmap_ioport;
+ }
return 0;
err_unmap_ioport:
@@ -491,12 +499,13 @@
break;
release_mem_region(res->start, res->end - res->start);
}
- return -EINVAL;
+ return rc;
}
EXPORT_SYMBOL_GPL(apei_resources_request);
void apei_resources_release(struct apei_resources *resources)
{
+ int rc;
struct apei_res *res;
list_for_each_entry(res, &resources->iomem, list)
@@ -504,7 +513,9 @@
list_for_each_entry(res, &resources->ioport, list)
release_region(res->start, res->end - res->start);
- apei_resources_sub(&apei_resources_all, resources);
+ rc = apei_resources_sub(&apei_resources_all, resources);
+ if (rc)
+ pr_err(APEI_PFX "Fail to sub resources!\n");
}
EXPORT_SYMBOL_GPL(apei_resources_release);
diff --git a/drivers/acpi/apei/einj.c b/drivers/acpi/apei/einj.c
index 465c885..cf29df6 100644
--- a/drivers/acpi/apei/einj.c
+++ b/drivers/acpi/apei/einj.c
@@ -426,7 +426,9 @@
static int einj_check_table(struct acpi_table_einj *einj_tab)
{
- if (einj_tab->header_length != sizeof(struct acpi_table_einj))
+ if ((einj_tab->header_length !=
+ (sizeof(struct acpi_table_einj) - sizeof(einj_tab->header)))
+ && (einj_tab->header_length != sizeof(struct acpi_table_einj)))
return -EINVAL;
if (einj_tab->header.length < sizeof(struct acpi_table_einj))
return -EINVAL;
diff --git a/drivers/acpi/apei/erst-dbg.c b/drivers/acpi/apei/erst-dbg.c
index 5281ddd..da1228a 100644
--- a/drivers/acpi/apei/erst-dbg.c
+++ b/drivers/acpi/apei/erst-dbg.c
@@ -2,7 +2,7 @@
* APEI Error Record Serialization Table debug support
*
* ERST is a way provided by APEI to save and retrieve hardware error
- * infomation to and from a persistent store. This file provide the
+ * information to and from a persistent store. This file provide the
* debugging/testing support for ERST kernel support and firmware
* implementation.
*
@@ -111,11 +111,13 @@
goto out;
}
if (len > erst_dbg_buf_len) {
- kfree(erst_dbg_buf);
+ void *p;
rc = -ENOMEM;
- erst_dbg_buf = kmalloc(len, GFP_KERNEL);
- if (!erst_dbg_buf)
+ p = kmalloc(len, GFP_KERNEL);
+ if (!p)
goto out;
+ kfree(erst_dbg_buf);
+ erst_dbg_buf = p;
erst_dbg_buf_len = len;
goto retry;
}
@@ -150,11 +152,13 @@
if (mutex_lock_interruptible(&erst_dbg_mutex))
return -EINTR;
if (usize > erst_dbg_buf_len) {
- kfree(erst_dbg_buf);
+ void *p;
rc = -ENOMEM;
- erst_dbg_buf = kmalloc(usize, GFP_KERNEL);
- if (!erst_dbg_buf)
+ p = kmalloc(usize, GFP_KERNEL);
+ if (!p)
goto out;
+ kfree(erst_dbg_buf);
+ erst_dbg_buf = p;
erst_dbg_buf_len = usize;
}
rc = copy_from_user(erst_dbg_buf, ubuf, usize);
diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c
index 18645f4..1211c03 100644
--- a/drivers/acpi/apei/erst.c
+++ b/drivers/acpi/apei/erst.c
@@ -2,7 +2,7 @@
* APEI Error Record Serialization Table support
*
* ERST is a way provided by APEI to save and retrieve hardware error
- * infomation to and from a persistent store.
+ * information to and from a persistent store.
*
* For more information about ERST, please refer to ACPI Specification
* version 4.0, section 17.4.
@@ -266,13 +266,30 @@
{
int rc;
u64 offset;
+ void *src, *dst;
+
+ /* ioremap does not work in interrupt context */
+ if (in_interrupt()) {
+ pr_warning(ERST_PFX
+ "MOVE_DATA can not be used in interrupt context");
+ return -EBUSY;
+ }
rc = __apei_exec_read_register(entry, &offset);
if (rc)
return rc;
- memmove((void *)ctx->dst_base + offset,
- (void *)ctx->src_base + offset,
- ctx->var2);
+
+ src = ioremap(ctx->src_base + offset, ctx->var2);
+ if (!src)
+ return -ENOMEM;
+ dst = ioremap(ctx->dst_base + offset, ctx->var2);
+ if (!dst)
+ return -ENOMEM;
+
+ memmove(dst, src, ctx->var2);
+
+ iounmap(src);
+ iounmap(dst);
return 0;
}
@@ -750,7 +767,9 @@
static int erst_check_table(struct acpi_table_erst *erst_tab)
{
- if (erst_tab->header_length != sizeof(struct acpi_table_erst))
+ if ((erst_tab->header_length !=
+ (sizeof(struct acpi_table_erst) - sizeof(erst_tab->header)))
+ && (erst_tab->header_length != sizeof(struct acpi_table_einj)))
return -EINVAL;
if (erst_tab->header.length < sizeof(struct acpi_table_erst))
return -EINVAL;
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index 385a605..0d505e5 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -302,7 +302,7 @@
struct ghes *ghes = NULL;
int rc = -EINVAL;
- generic = ghes_dev->dev.platform_data;
+ generic = *(struct acpi_hest_generic **)ghes_dev->dev.platform_data;
if (!generic->enabled)
return -ENODEV;
diff --git a/drivers/acpi/apei/hest.c b/drivers/acpi/apei/hest.c
index 343168d..1a3508a 100644
--- a/drivers/acpi/apei/hest.c
+++ b/drivers/acpi/apei/hest.c
@@ -137,20 +137,23 @@
static int hest_parse_ghes(struct acpi_hest_header *hest_hdr, void *data)
{
- struct acpi_hest_generic *generic;
struct platform_device *ghes_dev;
struct ghes_arr *ghes_arr = data;
int rc;
if (hest_hdr->type != ACPI_HEST_TYPE_GENERIC_ERROR)
return 0;
- generic = (struct acpi_hest_generic *)hest_hdr;
- if (!generic->enabled)
+
+ if (!((struct acpi_hest_generic *)hest_hdr)->enabled)
return 0;
ghes_dev = platform_device_alloc("GHES", hest_hdr->source_id);
if (!ghes_dev)
return -ENOMEM;
- ghes_dev->dev.platform_data = generic;
+
+ rc = platform_device_add_data(ghes_dev, &hest_hdr, sizeof(void *));
+ if (rc)
+ goto err;
+
rc = platform_device_add(ghes_dev);
if (rc)
goto err;
diff --git a/drivers/acpi/atomicio.c b/drivers/acpi/atomicio.c
index 8f8bd73..542e539 100644
--- a/drivers/acpi/atomicio.c
+++ b/drivers/acpi/atomicio.c
@@ -142,7 +142,7 @@
list_add_tail_rcu(&map->list, &acpi_iomaps);
spin_unlock_irqrestore(&acpi_iomaps_lock, flags);
- return vaddr + (paddr - pg_off);
+ return map->vaddr + (paddr - map->paddr);
err_unmap:
iounmap(vaddr);
return NULL;
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index dc58402..9841720 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -273,7 +273,6 @@
POWER_SUPPLY_PROP_CYCLE_COUNT,
POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
POWER_SUPPLY_PROP_VOLTAGE_NOW,
- POWER_SUPPLY_PROP_CURRENT_NOW,
POWER_SUPPLY_PROP_POWER_NOW,
POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN,
POWER_SUPPLY_PROP_ENERGY_FULL,
diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
index 2bb28b9..af308d0 100644
--- a/drivers/acpi/blacklist.c
+++ b/drivers/acpi/blacklist.c
@@ -183,6 +183,8 @@
{
printk(KERN_NOTICE PREFIX "DMI detected: %s\n", d->ident);
acpi_osi_setup("!Windows 2006");
+ acpi_osi_setup("!Windows 2006 SP1");
+ acpi_osi_setup("!Windows 2006 SP2");
return 0;
}
static int __init dmi_disable_osi_win7(const struct dmi_system_id *d)
@@ -202,6 +204,23 @@
},
},
{
+ /*
+ * There have a NVIF method in MSI GX723 DSDT need call by Nvidia
+ * driver (e.g. nouveau) when user press brightness hotkey.
+ * Currently, nouveau driver didn't do the job and it causes there
+ * have a infinite while loop in DSDT when user press hotkey.
+ * We add MSI GX723's dmi information to this table for workaround
+ * this issue.
+ * Will remove MSI GX723 from the table after nouveau grows support.
+ */
+ .callback = dmi_disable_osi_vista,
+ .ident = "MSI GX723",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star International"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "GX723"),
+ },
+ },
+ {
.callback = dmi_disable_osi_vista,
.ident = "Sony VGN-NS10J_S",
.matches = {
@@ -226,6 +245,14 @@
},
},
{
+ .callback = dmi_disable_osi_vista,
+ .ident = "Toshiba Satellite L355",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Satellite L355"),
+ },
+ },
+ {
.callback = dmi_disable_osi_win7,
.ident = "ASUS K50IJ",
.matches = {
@@ -233,6 +260,14 @@
DMI_MATCH(DMI_PRODUCT_NAME, "K50IJ"),
},
},
+ {
+ .callback = dmi_disable_osi_vista,
+ .ident = "Toshiba P305D",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Satellite P305D"),
+ },
+ },
/*
* BIOS invocation of _OSI(Linux) is almost always a BIOS bug.
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 5c221ab..310e3b9 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -55,7 +55,7 @@
static int set_power_nocheck(const struct dmi_system_id *id)
{
printk(KERN_NOTICE PREFIX "%s detected - "
- "disable power check in power transistion\n", id->ident);
+ "disable power check in power transition\n", id->ident);
acpi_power_nocheck = 1;
return 0;
}
@@ -80,23 +80,15 @@
static struct dmi_system_id dsdt_dmi_table[] __initdata = {
/*
- * Insyde BIOS on some TOSHIBA machines corrupt the DSDT.
+ * Invoke DSDT corruption work-around on all Toshiba Satellite.
* https://bugzilla.kernel.org/show_bug.cgi?id=14679
*/
{
.callback = set_copy_dsdt,
- .ident = "TOSHIBA Satellite A505",
+ .ident = "TOSHIBA Satellite",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Satellite A505"),
- },
- },
- {
- .callback = set_copy_dsdt,
- .ident = "TOSHIBA Satellite L505D",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Satellite L505D"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Satellite"),
},
},
{}
@@ -1027,7 +1019,7 @@
/*
* If the laptop falls into the DMI check table, the power state check
- * will be disabled in the course of device power transistion.
+ * will be disabled in the course of device power transition.
*/
dmi_check_system(power_nocheck_dmi_table);
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c
index 8a3b840..d94d295 100644
--- a/drivers/acpi/fan.c
+++ b/drivers/acpi/fan.c
@@ -369,7 +369,9 @@
acpi_bus_unregister_driver(&acpi_fan_driver);
+#ifdef CONFIG_ACPI_PROCFS
remove_proc_entry(ACPI_FAN_CLASS, acpi_root_dir);
+#endif
return;
}
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index 1f67057..3ba8d1f 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -33,7 +33,6 @@
#include <linux/pm_runtime.h>
#include <linux/pci.h>
#include <linux/pci-acpi.h>
-#include <linux/pci-aspm.h>
#include <linux/acpi.h>
#include <linux/slab.h>
#include <acpi/acpi_bus.h>
@@ -226,22 +225,31 @@
return status;
}
-static acpi_status acpi_pci_query_osc(struct acpi_pci_root *root, u32 flags)
+static acpi_status acpi_pci_query_osc(struct acpi_pci_root *root,
+ u32 support,
+ u32 *control)
{
acpi_status status;
- u32 support_set, result, capbuf[3];
+ u32 result, capbuf[3];
- /* do _OSC query for all possible controls */
- support_set = root->osc_support_set | (flags & OSC_PCI_SUPPORT_MASKS);
+ support &= OSC_PCI_SUPPORT_MASKS;
+ support |= root->osc_support_set;
+
capbuf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE;
- capbuf[OSC_SUPPORT_TYPE] = support_set;
- capbuf[OSC_CONTROL_TYPE] = OSC_PCI_CONTROL_MASKS;
+ capbuf[OSC_SUPPORT_TYPE] = support;
+ if (control) {
+ *control &= OSC_PCI_CONTROL_MASKS;
+ capbuf[OSC_CONTROL_TYPE] = *control | root->osc_control_set;
+ } else {
+ /* Run _OSC query for all possible controls. */
+ capbuf[OSC_CONTROL_TYPE] = OSC_PCI_CONTROL_MASKS;
+ }
status = acpi_pci_run_osc(root->device->handle, capbuf, &result);
if (ACPI_SUCCESS(status)) {
- root->osc_support_set = support_set;
- root->osc_control_qry = result;
- root->osc_queried = 1;
+ root->osc_support_set = support;
+ if (control)
+ *control = result;
}
return status;
}
@@ -255,7 +263,7 @@
if (ACPI_FAILURE(status))
return status;
mutex_lock(&osc_lock);
- status = acpi_pci_query_osc(root, flags);
+ status = acpi_pci_query_osc(root, flags, NULL);
mutex_unlock(&osc_lock);
return status;
}
@@ -365,55 +373,70 @@
EXPORT_SYMBOL_GPL(acpi_get_pci_dev);
/**
- * acpi_pci_osc_control_set - commit requested control to Firmware
- * @handle: acpi_handle for the target ACPI object
- * @flags: driver's requested control bits
+ * acpi_pci_osc_control_set - Request control of PCI root _OSC features.
+ * @handle: ACPI handle of a PCI root bridge (or PCIe Root Complex).
+ * @mask: Mask of _OSC bits to request control of, place to store control mask.
+ * @req: Mask of _OSC bits the control of is essential to the caller.
*
- * Attempt to take control from Firmware on requested control bits.
+ * Run _OSC query for @mask and if that is successful, compare the returned
+ * mask of control bits with @req. If all of the @req bits are set in the
+ * returned mask, run _OSC request for it.
+ *
+ * The variable at the @mask address may be modified regardless of whether or
+ * not the function returns success. On success it will contain the mask of
+ * _OSC bits the BIOS has granted control of, but its contents are meaningless
+ * on failure.
**/
-acpi_status acpi_pci_osc_control_set(acpi_handle handle, u32 flags)
+acpi_status acpi_pci_osc_control_set(acpi_handle handle, u32 *mask, u32 req)
{
- acpi_status status;
- u32 control_req, result, capbuf[3];
- acpi_handle tmp;
struct acpi_pci_root *root;
+ acpi_status status;
+ u32 ctrl, capbuf[3];
+ acpi_handle tmp;
- status = acpi_get_handle(handle, "_OSC", &tmp);
- if (ACPI_FAILURE(status))
- return status;
+ if (!mask)
+ return AE_BAD_PARAMETER;
- control_req = (flags & OSC_PCI_CONTROL_MASKS);
- if (!control_req)
+ ctrl = *mask & OSC_PCI_CONTROL_MASKS;
+ if ((ctrl & req) != req)
return AE_TYPE;
root = acpi_pci_find_root(handle);
if (!root)
return AE_NOT_EXIST;
+ status = acpi_get_handle(handle, "_OSC", &tmp);
+ if (ACPI_FAILURE(status))
+ return status;
+
mutex_lock(&osc_lock);
+
+ *mask = ctrl | root->osc_control_set;
/* No need to evaluate _OSC if the control was already granted. */
- if ((root->osc_control_set & control_req) == control_req)
+ if ((root->osc_control_set & ctrl) == ctrl)
goto out;
- /* Need to query controls first before requesting them */
- if (!root->osc_queried) {
- status = acpi_pci_query_osc(root, root->osc_support_set);
+ /* Need to check the available controls bits before requesting them. */
+ while (*mask) {
+ status = acpi_pci_query_osc(root, root->osc_support_set, mask);
if (ACPI_FAILURE(status))
goto out;
+ if (ctrl == *mask)
+ break;
+ ctrl = *mask;
}
- if ((root->osc_control_qry & control_req) != control_req) {
- printk(KERN_DEBUG
- "Firmware did not grant requested _OSC control\n");
+
+ if ((ctrl & req) != req) {
status = AE_SUPPORT;
goto out;
}
capbuf[OSC_QUERY_TYPE] = 0;
capbuf[OSC_SUPPORT_TYPE] = root->osc_support_set;
- capbuf[OSC_CONTROL_TYPE] = root->osc_control_set | control_req;
- status = acpi_pci_run_osc(handle, capbuf, &result);
+ capbuf[OSC_CONTROL_TYPE] = ctrl;
+ status = acpi_pci_run_osc(handle, capbuf, mask);
if (ACPI_SUCCESS(status))
- root->osc_control_set = result;
+ root->osc_control_set = *mask;
out:
mutex_unlock(&osc_lock);
return status;
@@ -544,14 +567,6 @@
if (flags != base_flags)
acpi_pci_osc_support(root, flags);
- status = acpi_pci_osc_control_set(root->device->handle,
- OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL);
-
- if (ACPI_FAILURE(status)) {
- printk(KERN_INFO "Unable to assume PCIe control: Disabling ASPM\n");
- pcie_no_aspm();
- }
-
pci_acpi_add_bus_pm_notifier(device, root->bus);
if (device->wakeup.flags.run_wake)
device_set_run_wake(root->bus->bridge, true);
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index e9699aa..bec561c 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -29,12 +29,6 @@
static struct dmi_system_id __cpuinitdata processor_idle_dmi_table[] = {
{
- set_no_mwait, "IFL91 board", {
- DMI_MATCH(DMI_BIOS_VENDOR, "COMPAL"),
- DMI_MATCH(DMI_SYS_VENDOR, "ZEPTO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "3215W"),
- DMI_MATCH(DMI_BOARD_NAME, "IFL91") }, NULL},
- {
set_no_mwait, "Extensa 5220", {
DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"),
DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
@@ -352,4 +346,5 @@
acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
ACPI_UINT32_MAX,
early_init_pdc, NULL, NULL, NULL);
+ acpi_get_devices("ACPI0007", early_init_pdc, NULL, NULL);
}
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index 1560218..347eb21 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -850,7 +850,7 @@
printk(KERN_DEBUG "ACPI: %s registered with cpuidle\n",
acpi_idle_driver.name);
} else {
- printk(KERN_DEBUG "ACPI: acpi_idle yielding to %s",
+ printk(KERN_DEBUG "ACPI: acpi_idle yielding to %s\n",
cpuidle_get_driver()->name);
}
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index ba1bd26..3a73a93 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -447,8 +447,8 @@
if (!try_module_get(calling_module))
return -EINVAL;
- /* is_done is set to negative if an error occured,
- * and to postitive if _no_ error occured, but SMM
+ /* is_done is set to negative if an error occurred,
+ * and to postitive if _no_ error occurred, but SMM
* was already notified. This avoids double notification
* which might lead to unexpected results...
*/
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index cf82989..4754ff6 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -363,6 +363,12 @@
return 0;
}
+static int __init init_nvs_nosave(const struct dmi_system_id *d)
+{
+ acpi_nvs_nosave();
+ return 0;
+}
+
static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
{
.callback = init_old_suspend_ordering,
@@ -397,6 +403,22 @@
DMI_MATCH(DMI_BOARD_NAME, "CF51-2L"),
},
},
+ {
+ .callback = init_nvs_nosave,
+ .ident = "Sony Vaio VGN-SR11M",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR11M"),
+ },
+ },
+ {
+ .callback = init_nvs_nosave,
+ .ident = "Everex StepNote Series",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Everex Systems, Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Everex StepNote Series"),
+ },
+ },
{},
};
#endif /* CONFIG_SUSPEND */
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index 68e2e45..f8588f8 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -100,7 +100,7 @@
ACPI_DEBUG_INIT(ACPI_LV_EVENTS),
};
-static int param_get_debug_layer(char *buffer, struct kernel_param *kp)
+static int param_get_debug_layer(char *buffer, const struct kernel_param *kp)
{
int result = 0;
int i;
@@ -128,7 +128,7 @@
return result;
}
-static int param_get_debug_level(char *buffer, struct kernel_param *kp)
+static int param_get_debug_level(char *buffer, const struct kernel_param *kp)
{
int result = 0;
int i;
@@ -149,10 +149,18 @@
return result;
}
-module_param_call(debug_layer, param_set_uint, param_get_debug_layer,
- &acpi_dbg_layer, 0644);
-module_param_call(debug_level, param_set_uint, param_get_debug_level,
- &acpi_dbg_level, 0644);
+static struct kernel_param_ops param_ops_debug_layer = {
+ .set = param_set_uint,
+ .get = param_get_debug_layer,
+};
+
+static struct kernel_param_ops param_ops_debug_level = {
+ .set = param_set_uint,
+ .get = param_get_debug_level,
+};
+
+module_param_cb(debug_layer, ¶m_ops_debug_layer, &acpi_dbg_layer, 0644);
+module_param_cb(debug_level, ¶m_ops_debug_level, &acpi_dbg_level, 0644);
static char trace_method_name[6];
module_param_string(trace_method_name, trace_method_name, 6, 0644);
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index c5fef01..b836761 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -59,8 +59,8 @@
"support\n"));
*cap |= ACPI_VIDEO_BACKLIGHT;
if (ACPI_FAILURE(acpi_get_handle(handle, "_BQC", &h_dummy)))
- printk(KERN_WARNING FW_BUG PREFIX "ACPI brightness "
- "control misses _BQC function\n");
+ printk(KERN_WARNING FW_BUG PREFIX "No _BQC method, "
+ "cannot determine initial brightness\n");
/* We have backlight support, no need to scan further */
return AE_CTRL_TERMINATE;
}
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 65e3e27..11ec911 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -828,6 +828,7 @@
config PATA_WINBOND_VLB
tristate "Winbond W83759A VLB PATA support (Experimental)"
depends on ISA && EXPERIMENTAL
+ select PATA_LEGACY
help
Support for the Winbond W83759A controller on Vesa Local Bus
systems.
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
index 158eaa9..d5df04a 100644
--- a/drivers/ata/Makefile
+++ b/drivers/ata/Makefile
@@ -89,7 +89,6 @@
obj-$(CONFIG_PATA_RB532) += pata_rb532_cf.o
obj-$(CONFIG_PATA_RZ1000) += pata_rz1000.o
obj-$(CONFIG_PATA_SAMSUNG_CF) += pata_samsung_cf.o
-obj-$(CONFIG_PATA_WINBOND_VLB) += pata_winbond.o
obj-$(CONFIG_PATA_PXA) += pata_pxa.o
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index fe75d8b..99d0e5a 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -60,6 +60,7 @@
board_ahci,
board_ahci_ign_iferr,
board_ahci_nosntf,
+ board_ahci_yes_fbs,
/* board IDs for specific chipsets in alphabetical order */
board_ahci_mcp65,
@@ -89,6 +90,10 @@
static int ahci_pci_device_resume(struct pci_dev *pdev);
#endif
+static struct scsi_host_template ahci_sht = {
+ AHCI_SHT("ahci"),
+};
+
static struct ata_port_operations ahci_vt8251_ops = {
.inherits = &ahci_ops,
.hardreset = ahci_vt8251_hardreset,
@@ -132,6 +137,14 @@
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ops,
},
+ [board_ahci_yes_fbs] =
+ {
+ AHCI_HFLAGS (AHCI_HFLAG_YES_FBS),
+ .flags = AHCI_FLAG_COMMON,
+ .pio_mask = ATA_PIO4,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &ahci_ops,
+ },
/* by chipsets */
[board_ahci_mcp65] =
{
@@ -244,6 +257,9 @@
{ PCI_VDEVICE(INTEL, 0x1c05), board_ahci }, /* CPT RAID */
{ PCI_VDEVICE(INTEL, 0x1c06), board_ahci }, /* CPT RAID */
{ PCI_VDEVICE(INTEL, 0x1c07), board_ahci }, /* CPT RAID */
+ { PCI_VDEVICE(INTEL, 0x1d02), board_ahci }, /* PBG AHCI */
+ { PCI_VDEVICE(INTEL, 0x1d04), board_ahci }, /* PBG RAID */
+ { PCI_VDEVICE(INTEL, 0x1d06), board_ahci }, /* PBG RAID */
/* JMicron 360/1/3/5/6, match class to avoid IDE function */
{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
@@ -362,6 +378,8 @@
/* Marvell */
{ PCI_VDEVICE(MARVELL, 0x6145), board_ahci_mv }, /* 6145 */
{ PCI_VDEVICE(MARVELL, 0x6121), board_ahci_mv }, /* 6121 */
+ { PCI_DEVICE(0x1b4b, 0x9123),
+ .driver_data = board_ahci_yes_fbs }, /* 88se9128 */
/* Promise */
{ PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index 7113c57..e5fdeeb 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -209,6 +209,7 @@
link offline */
AHCI_HFLAG_NO_SNTF = (1 << 12), /* no sntf */
AHCI_HFLAG_NO_FPDMA_AA = (1 << 13), /* no FPDMA AA */
+ AHCI_HFLAG_YES_FBS = (1 << 14), /* force FBS cap on */
/* ap->flags bits */
@@ -297,7 +298,17 @@
extern int ahci_ignore_sss;
-extern struct scsi_host_template ahci_sht;
+extern struct device_attribute *ahci_shost_attrs[];
+extern struct device_attribute *ahci_sdev_attrs[];
+
+#define AHCI_SHT(drv_name) \
+ ATA_NCQ_SHT(drv_name), \
+ .can_queue = AHCI_MAX_CMDS - 1, \
+ .sg_tablesize = AHCI_MAX_SG, \
+ .dma_boundary = AHCI_DMA_BOUNDARY, \
+ .shost_attrs = ahci_shost_attrs, \
+ .sdev_attrs = ahci_sdev_attrs
+
extern struct ata_port_operations ahci_ops;
void ahci_save_initial_config(struct device *dev,
diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c
index 4e97f33..84b6432 100644
--- a/drivers/ata/ahci_platform.c
+++ b/drivers/ata/ahci_platform.c
@@ -23,6 +23,10 @@
#include <linux/ahci_platform.h>
#include "ahci.h"
+static struct scsi_host_template ahci_platform_sht = {
+ AHCI_SHT("ahci_platform"),
+};
+
static int __init ahci_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -145,7 +149,7 @@
ahci_print_info(host, "platform");
rc = ata_host_activate(host, irq, ahci_interrupt, IRQF_SHARED,
- &ahci_sht);
+ &ahci_platform_sht);
if (rc)
goto err0;
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index 3971bc0..d712675 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -302,6 +302,10 @@
{ 0x8086, 0x1c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
/* SATA Controller IDE (CPT) */
{ 0x8086, 0x1c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+ /* SATA Controller IDE (PBG) */
+ { 0x8086, 0x1d00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
+ /* SATA Controller IDE (PBG) */
+ { 0x8086, 0x1d08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
{ } /* terminate list */
};
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index 81e772a..8eea309 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -121,7 +121,7 @@
static DEVICE_ATTR(em_buffer, S_IWUSR | S_IRUGO,
ahci_read_em_buffer, ahci_store_em_buffer);
-static struct device_attribute *ahci_shost_attrs[] = {
+struct device_attribute *ahci_shost_attrs[] = {
&dev_attr_link_power_management_policy,
&dev_attr_em_message_type,
&dev_attr_em_message,
@@ -132,22 +132,14 @@
&dev_attr_em_buffer,
NULL
};
+EXPORT_SYMBOL_GPL(ahci_shost_attrs);
-static struct device_attribute *ahci_sdev_attrs[] = {
+struct device_attribute *ahci_sdev_attrs[] = {
&dev_attr_sw_activity,
&dev_attr_unload_heads,
NULL
};
-
-struct scsi_host_template ahci_sht = {
- ATA_NCQ_SHT("ahci"),
- .can_queue = AHCI_MAX_CMDS - 1,
- .sg_tablesize = AHCI_MAX_SG,
- .dma_boundary = AHCI_DMA_BOUNDARY,
- .shost_attrs = ahci_shost_attrs,
- .sdev_attrs = ahci_sdev_attrs,
-};
-EXPORT_SYMBOL_GPL(ahci_sht);
+EXPORT_SYMBOL_GPL(ahci_sdev_attrs);
struct ata_port_operations ahci_ops = {
.inherits = &sata_pmp_port_ops,
@@ -430,6 +422,12 @@
cap &= ~HOST_CAP_SNTF;
}
+ if (!(cap & HOST_CAP_FBS) && (hpriv->flags & AHCI_HFLAG_YES_FBS)) {
+ dev_printk(KERN_INFO, dev,
+ "controller can do FBS, turning on CAP_FBS\n");
+ cap |= HOST_CAP_FBS;
+ }
+
if (force_port_map && port_map != force_port_map) {
dev_printk(KERN_INFO, dev, "forcing port_map 0x%x -> 0x%x\n",
port_map, force_port_map);
@@ -1320,7 +1318,7 @@
/* issue the first D2H Register FIS */
msecs = 0;
now = jiffies;
- if (time_after(now, deadline))
+ if (time_after(deadline, now))
msecs = jiffies_to_msecs(deadline - now);
tf.ctl |= ATA_SRST;
@@ -2036,9 +2034,15 @@
u32 cmd = readl(port_mmio + PORT_CMD);
if (cmd & PORT_CMD_FBSCP)
pp->fbs_supported = true;
- else
+ else if (hpriv->flags & AHCI_HFLAG_YES_FBS) {
+ dev_printk(KERN_INFO, dev,
+ "port %d can do FBS, forcing FBSCP\n",
+ ap->port_no);
+ pp->fbs_supported = true;
+ } else
dev_printk(KERN_WARNING, dev,
- "The port is not capable of FBS\n");
+ "port %d is not capable of FBS\n",
+ ap->port_no);
}
if (pp->fbs_supported) {
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 7ef7c4f..932eaee 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -5111,15 +5111,18 @@
qc->flags |= ATA_QCFLAG_ACTIVE;
ap->qc_active |= 1 << qc->tag;
- /* We guarantee to LLDs that they will have at least one
+ /*
+ * We guarantee to LLDs that they will have at least one
* non-zero sg if the command is a data command.
*/
- BUG_ON(ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes));
+ if (WARN_ON_ONCE(ata_is_data(prot) &&
+ (!qc->sg || !qc->n_elem || !qc->nbytes)))
+ goto sys_err;
if (ata_is_dma(prot) || (ata_is_pio(prot) &&
(ap->flags & ATA_FLAG_PIO_DMA)))
if (ata_sg_setup(qc))
- goto sg_err;
+ goto sys_err;
/* if device is sleeping, schedule reset and abort the link */
if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
@@ -5136,7 +5139,7 @@
goto err;
return;
-sg_err:
+sys_err:
qc->err_mask |= AC_ERR_SYSTEM;
err:
ata_qc_complete(qc);
@@ -5415,6 +5418,7 @@
*/
int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
{
+ unsigned int ehi_flags = ATA_EHI_QUIET;
int rc;
/*
@@ -5423,7 +5427,18 @@
*/
ata_lpm_enable(host);
- rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
+ /*
+ * On some hardware, device fails to respond after spun down
+ * for suspend. As the device won't be used before being
+ * resumed, we don't need to touch the device. Ask EH to skip
+ * the usual stuff and proceed directly to suspend.
+ *
+ * http://thread.gmane.org/gmane.linux.ide/46764
+ */
+ if (mesg.event == PM_EVENT_SUSPEND)
+ ehi_flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_NO_RECOVERY;
+
+ rc = ata_host_request_pm(host, mesg, 0, ehi_flags, 1);
if (rc == 0)
host->dev->power.power_state = mesg;
return rc;
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index c9ae299..e48302e 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -3235,6 +3235,10 @@
if (link->flags & ATA_LFLAG_DISABLED)
return 1;
+ /* skip if explicitly requested */
+ if (ehc->i.flags & ATA_EHI_NO_RECOVERY)
+ return 1;
+
/* thaw frozen port and recover failed devices */
if ((ap->pflags & ATA_PFLAG_FROZEN) || ata_link_nr_enabled(link))
return 0;
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index 674c143..e30c537 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -418,6 +418,7 @@
if (ioaddr->ctl_addr)
iowrite8(tf->ctl, ioaddr->ctl_addr);
ap->last_ctl = tf->ctl;
+ ata_wait_idle(ap);
}
if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
@@ -453,6 +454,8 @@
iowrite8(tf->device, ioaddr->device_addr);
VPRINTK("device 0x%X\n", tf->device);
}
+
+ ata_wait_idle(ap);
}
EXPORT_SYMBOL_GPL(ata_sff_tf_load);
@@ -1042,7 +1045,8 @@
int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
u8 status, int in_wq)
{
- struct ata_eh_info *ehi = &ap->link.eh_info;
+ struct ata_link *link = qc->dev->link;
+ struct ata_eh_info *ehi = &link->eh_info;
unsigned long flags = 0;
int poll_next;
@@ -1298,8 +1302,14 @@
}
EXPORT_SYMBOL_GPL(ata_sff_hsm_move);
-void ata_sff_queue_pio_task(struct ata_port *ap, unsigned long delay)
+void ata_sff_queue_pio_task(struct ata_link *link, unsigned long delay)
{
+ struct ata_port *ap = link->ap;
+
+ WARN_ON((ap->sff_pio_task_link != NULL) &&
+ (ap->sff_pio_task_link != link));
+ ap->sff_pio_task_link = link;
+
/* may fail if ata_sff_flush_pio_task() in progress */
queue_delayed_work(ata_sff_wq, &ap->sff_pio_task,
msecs_to_jiffies(delay));
@@ -1321,14 +1331,18 @@
{
struct ata_port *ap =
container_of(work, struct ata_port, sff_pio_task.work);
+ struct ata_link *link = ap->sff_pio_task_link;
struct ata_queued_cmd *qc;
u8 status;
int poll_next;
+ BUG_ON(ap->sff_pio_task_link == NULL);
/* qc can be NULL if timeout occurred */
- qc = ata_qc_from_tag(ap, ap->link.active_tag);
- if (!qc)
+ qc = ata_qc_from_tag(ap, link->active_tag);
+ if (!qc) {
+ ap->sff_pio_task_link = NULL;
return;
+ }
fsm_start:
WARN_ON_ONCE(ap->hsm_task_state == HSM_ST_IDLE);
@@ -1345,11 +1359,16 @@
msleep(2);
status = ata_sff_busy_wait(ap, ATA_BUSY, 10);
if (status & ATA_BUSY) {
- ata_sff_queue_pio_task(ap, ATA_SHORT_PAUSE);
+ ata_sff_queue_pio_task(link, ATA_SHORT_PAUSE);
return;
}
}
+ /*
+ * hsm_move() may trigger another command to be processed.
+ * clean the link beforehand.
+ */
+ ap->sff_pio_task_link = NULL;
/* move the HSM */
poll_next = ata_sff_hsm_move(ap, qc, status, 1);
@@ -1376,6 +1395,7 @@
unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
+ struct ata_link *link = qc->dev->link;
/* Use polling pio if the LLD doesn't handle
* interrupt driven pio and atapi CDB interrupt.
@@ -1396,7 +1416,7 @@
ap->hsm_task_state = HSM_ST_LAST;
if (qc->tf.flags & ATA_TFLAG_POLLING)
- ata_sff_queue_pio_task(ap, 0);
+ ata_sff_queue_pio_task(link, 0);
break;
@@ -1409,7 +1429,7 @@
if (qc->tf.flags & ATA_TFLAG_WRITE) {
/* PIO data out protocol */
ap->hsm_task_state = HSM_ST_FIRST;
- ata_sff_queue_pio_task(ap, 0);
+ ata_sff_queue_pio_task(link, 0);
/* always send first data block using the
* ata_sff_pio_task() codepath.
@@ -1419,7 +1439,7 @@
ap->hsm_task_state = HSM_ST;
if (qc->tf.flags & ATA_TFLAG_POLLING)
- ata_sff_queue_pio_task(ap, 0);
+ ata_sff_queue_pio_task(link, 0);
/* if polling, ata_sff_pio_task() handles the
* rest. otherwise, interrupt handler takes
@@ -1441,7 +1461,7 @@
/* send cdb by polling if no cdb interrupt */
if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
(qc->tf.flags & ATA_TFLAG_POLLING))
- ata_sff_queue_pio_task(ap, 0);
+ ata_sff_queue_pio_task(link, 0);
break;
default:
@@ -2734,10 +2754,7 @@
unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
-
- /* see ata_dma_blacklisted() */
- BUG_ON((ap->flags & ATA_FLAG_PIO_POLLING) &&
- qc->tf.protocol == ATAPI_PROT_DMA);
+ struct ata_link *link = qc->dev->link;
/* defer PIO handling to sff_qc_issue */
if (!ata_is_dma(qc->tf.protocol))
@@ -2766,7 +2783,7 @@
/* send cdb by polling if no cdb interrupt */
if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
- ata_sff_queue_pio_task(ap, 0);
+ ata_sff_queue_pio_task(link, 0);
break;
default:
diff --git a/drivers/ata/pata_artop.c b/drivers/ata/pata_artop.c
index ba43f0f..2215632 100644
--- a/drivers/ata/pata_artop.c
+++ b/drivers/ata/pata_artop.c
@@ -74,7 +74,8 @@
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
/* Odd numbered device ids are the units with enable bits (the -R cards) */
- if (pdev->device % 1 && !pci_test_config_bits(pdev, &artop_enable_bits[ap->port_no]))
+ if ((pdev->device & 1) &&
+ !pci_test_config_bits(pdev, &artop_enable_bits[ap->port_no]))
return -ENOENT;
return ata_sff_prereset(link, deadline);
diff --git a/drivers/ata/pata_cmd64x.c b/drivers/ata/pata_cmd64x.c
index 9f5da1c..905ff76 100644
--- a/drivers/ata/pata_cmd64x.c
+++ b/drivers/ata/pata_cmd64x.c
@@ -121,14 +121,8 @@
if (pair) {
struct ata_timing tp;
-
ata_timing_compute(pair, pair->pio_mode, &tp, T, 0);
ata_timing_merge(&t, &tp, &t, ATA_TIMING_SETUP);
- if (pair->dma_mode) {
- ata_timing_compute(pair, pair->dma_mode,
- &tp, T, 0);
- ata_timing_merge(&tp, &t, &t, ATA_TIMING_SETUP);
- }
}
}
diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c
index 9df1ff7..eaf1941 100644
--- a/drivers/ata/pata_legacy.c
+++ b/drivers/ata/pata_legacy.c
@@ -44,6 +44,9 @@
* Specific support is included for the ht6560a/ht6560b/opti82c611a/
* opti82c465mv/promise 20230c/20630/qdi65x0/winbond83759A
*
+ * Support for the Winbond 83759A when operating in advanced mode.
+ * Multichip mode is not currently supported.
+ *
* Use the autospeed and pio_mask options with:
* Appian ADI/2 aka CLPD7220 or AIC25VL01.
* Use the jumpers, autospeed and set pio_mask to the mode on the jumpers with
@@ -135,12 +138,18 @@
static int opti82c611a; /* Opti82c611A on primary 1, sec 2, both 3 */
static int opti82c46x; /* Opti 82c465MV present(pri/sec autodetect) */
static int qdi; /* Set to probe QDI controllers */
-static int winbond; /* Set to probe Winbond controllers,
- give I/O port if non standard */
static int autospeed; /* Chip present which snoops speed changes */
static int pio_mask = ATA_PIO4; /* PIO range for autospeed devices */
static int iordy_mask = 0xFFFFFFFF; /* Use iordy if available */
+#ifdef PATA_WINBOND_VLB_MODULE
+static int winbond = 1; /* Set to probe Winbond controllers,
+ give I/O port if non standard */
+#else
+static int winbond; /* Set to probe Winbond controllers,
+ give I/O port if non standard */
+#endif
+
/**
* legacy_probe_add - Add interface to probe list
* @port: Controller port
@@ -1297,6 +1306,7 @@
MODULE_DESCRIPTION("low-level driver for legacy ATA");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
+MODULE_ALIAS("pata_winbond");
module_param(probe_all, int, 0);
module_param(autospeed, int, 0);
@@ -1305,6 +1315,7 @@
module_param(opti82c611a, int, 0);
module_param(opti82c46x, int, 0);
module_param(qdi, int, 0);
+module_param(winbond, int, 0);
module_param(pio_mask, int, 0);
module_param(iordy_mask, int, 0);
diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c
index 5e65988..ac8d7d9 100644
--- a/drivers/ata/pata_via.c
+++ b/drivers/ata/pata_via.c
@@ -417,6 +417,8 @@
tf->lbam,
tf->lbah);
}
+
+ ata_wait_idle(ap);
}
static int via_port_start(struct ata_port *ap)
diff --git a/drivers/ata/pata_winbond.c b/drivers/ata/pata_winbond.c
deleted file mode 100644
index 6d8619b..0000000
--- a/drivers/ata/pata_winbond.c
+++ /dev/null
@@ -1,282 +0,0 @@
-/*
- * pata_winbond.c - Winbond VLB ATA controllers
- * (C) 2006 Red Hat
- *
- * Support for the Winbond 83759A when operating in advanced mode.
- * Multichip mode is not currently supported.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/blkdev.h>
-#include <linux/delay.h>
-#include <scsi/scsi_host.h>
-#include <linux/libata.h>
-#include <linux/platform_device.h>
-
-#define DRV_NAME "pata_winbond"
-#define DRV_VERSION "0.0.3"
-
-#define NR_HOST 4 /* Two winbond controllers, two channels each */
-
-struct winbond_data {
- unsigned long config;
- struct platform_device *platform_dev;
-};
-
-static struct ata_host *winbond_host[NR_HOST];
-static struct winbond_data winbond_data[NR_HOST];
-static int nr_winbond_host;
-
-#ifdef MODULE
-static int probe_winbond = 1;
-#else
-static int probe_winbond;
-#endif
-
-static DEFINE_SPINLOCK(winbond_lock);
-
-static void winbond_writecfg(unsigned long port, u8 reg, u8 val)
-{
- unsigned long flags;
- spin_lock_irqsave(&winbond_lock, flags);
- outb(reg, port + 0x01);
- outb(val, port + 0x02);
- spin_unlock_irqrestore(&winbond_lock, flags);
-}
-
-static u8 winbond_readcfg(unsigned long port, u8 reg)
-{
- u8 val;
-
- unsigned long flags;
- spin_lock_irqsave(&winbond_lock, flags);
- outb(reg, port + 0x01);
- val = inb(port + 0x02);
- spin_unlock_irqrestore(&winbond_lock, flags);
-
- return val;
-}
-
-static void winbond_set_piomode(struct ata_port *ap, struct ata_device *adev)
-{
- struct ata_timing t;
- struct winbond_data *winbond = ap->host->private_data;
- int active, recovery;
- u8 reg;
- int timing = 0x88 + (ap->port_no * 4) + (adev->devno * 2);
-
- reg = winbond_readcfg(winbond->config, 0x81);
-
- /* Get the timing data in cycles */
- if (reg & 0x40) /* Fast VLB bus, assume 50MHz */
- ata_timing_compute(adev, adev->pio_mode, &t, 20000, 1000);
- else
- ata_timing_compute(adev, adev->pio_mode, &t, 30303, 1000);
-
- active = (clamp_val(t.active, 3, 17) - 1) & 0x0F;
- recovery = (clamp_val(t.recover, 1, 15) + 1) & 0x0F;
- timing = (active << 4) | recovery;
- winbond_writecfg(winbond->config, timing, reg);
-
- /* Load the setup timing */
-
- reg = 0x35;
- if (adev->class != ATA_DEV_ATA)
- reg |= 0x08; /* FIFO off */
- if (!ata_pio_need_iordy(adev))
- reg |= 0x02; /* IORDY off */
- reg |= (clamp_val(t.setup, 0, 3) << 6);
- winbond_writecfg(winbond->config, timing + 1, reg);
-}
-
-
-static unsigned int winbond_data_xfer(struct ata_device *dev,
- unsigned char *buf, unsigned int buflen, int rw)
-{
- struct ata_port *ap = dev->link->ap;
- int slop = buflen & 3;
-
- if (ata_id_has_dword_io(dev->id)) {
- if (rw == READ)
- ioread32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
- else
- iowrite32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
-
- if (unlikely(slop)) {
- __le32 pad;
- if (rw == READ) {
- pad = cpu_to_le32(ioread32(ap->ioaddr.data_addr));
- memcpy(buf + buflen - slop, &pad, slop);
- } else {
- memcpy(&pad, buf + buflen - slop, slop);
- iowrite32(le32_to_cpu(pad), ap->ioaddr.data_addr);
- }
- buflen += 4 - slop;
- }
- } else
- buflen = ata_sff_data_xfer(dev, buf, buflen, rw);
-
- return buflen;
-}
-
-static struct scsi_host_template winbond_sht = {
- ATA_PIO_SHT(DRV_NAME),
-};
-
-static struct ata_port_operations winbond_port_ops = {
- .inherits = &ata_sff_port_ops,
- .sff_data_xfer = winbond_data_xfer,
- .cable_detect = ata_cable_40wire,
- .set_piomode = winbond_set_piomode,
-};
-
-/**
- * winbond_init_one - attach a winbond interface
- * @type: Type to display
- * @io: I/O port start
- * @irq: interrupt line
- * @fast: True if on a > 33Mhz VLB
- *
- * Register a VLB bus IDE interface. Such interfaces are PIO and we
- * assume do not support IRQ sharing.
- */
-
-static __init int winbond_init_one(unsigned long port)
-{
- struct platform_device *pdev;
- u8 reg;
- int i, rc;
-
- reg = winbond_readcfg(port, 0x81);
- reg |= 0x80; /* jumpered mode off */
- winbond_writecfg(port, 0x81, reg);
- reg = winbond_readcfg(port, 0x83);
- reg |= 0xF0; /* local control */
- winbond_writecfg(port, 0x83, reg);
- reg = winbond_readcfg(port, 0x85);
- reg |= 0xF0; /* programmable timing */
- winbond_writecfg(port, 0x85, reg);
-
- reg = winbond_readcfg(port, 0x81);
-
- if (!(reg & 0x03)) /* Disabled */
- return -ENODEV;
-
- for (i = 0; i < 2 ; i ++) {
- unsigned long cmd_port = 0x1F0 - (0x80 * i);
- unsigned long ctl_port = cmd_port + 0x206;
- struct ata_host *host;
- struct ata_port *ap;
- void __iomem *cmd_addr, *ctl_addr;
-
- if (!(reg & (1 << i)))
- continue;
-
- pdev = platform_device_register_simple(DRV_NAME, nr_winbond_host, NULL, 0);
- if (IS_ERR(pdev))
- return PTR_ERR(pdev);
-
- rc = -ENOMEM;
- host = ata_host_alloc(&pdev->dev, 1);
- if (!host)
- goto err_unregister;
- ap = host->ports[0];
-
- rc = -ENOMEM;
- cmd_addr = devm_ioport_map(&pdev->dev, cmd_port, 8);
- ctl_addr = devm_ioport_map(&pdev->dev, ctl_port, 1);
- if (!cmd_addr || !ctl_addr)
- goto err_unregister;
-
- ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx", cmd_port, ctl_port);
-
- ap->ops = &winbond_port_ops;
- ap->pio_mask = ATA_PIO4;
- ap->flags |= ATA_FLAG_SLAVE_POSS;
- ap->ioaddr.cmd_addr = cmd_addr;
- ap->ioaddr.altstatus_addr = ctl_addr;
- ap->ioaddr.ctl_addr = ctl_addr;
- ata_sff_std_ports(&ap->ioaddr);
-
- /* hook in a private data structure per channel */
- host->private_data = &winbond_data[nr_winbond_host];
- winbond_data[nr_winbond_host].config = port;
- winbond_data[nr_winbond_host].platform_dev = pdev;
-
- /* activate */
- rc = ata_host_activate(host, 14 + i, ata_sff_interrupt, 0,
- &winbond_sht);
- if (rc)
- goto err_unregister;
-
- winbond_host[nr_winbond_host++] = dev_get_drvdata(&pdev->dev);
- }
-
- return 0;
-
- err_unregister:
- platform_device_unregister(pdev);
- return rc;
-}
-
-/**
- * winbond_init - attach winbond interfaces
- *
- * Attach winbond IDE interfaces by scanning the ports it may occupy.
- */
-
-static __init int winbond_init(void)
-{
- static const unsigned long config[2] = { 0x130, 0x1B0 };
-
- int ct = 0;
- int i;
-
- if (probe_winbond == 0)
- return -ENODEV;
-
- /*
- * Check both base addresses
- */
-
- for (i = 0; i < 2; i++) {
- if (probe_winbond & (1<<i)) {
- int ret = 0;
- unsigned long port = config[i];
-
- if (request_region(port, 2, "pata_winbond")) {
- ret = winbond_init_one(port);
- if (ret <= 0)
- release_region(port, 2);
- else ct+= ret;
- }
- }
- }
- if (ct != 0)
- return 0;
- return -ENODEV;
-}
-
-static __exit void winbond_exit(void)
-{
- int i;
-
- for (i = 0; i < nr_winbond_host; i++) {
- ata_host_detach(winbond_host[i]);
- release_region(winbond_data[i].config, 2);
- platform_device_unregister(winbond_data[i].platform_dev);
- }
-}
-
-MODULE_AUTHOR("Alan Cox");
-MODULE_DESCRIPTION("low-level driver for Winbond VL ATA");
-MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_VERSION);
-
-module_init(winbond_init);
-module_exit(winbond_exit);
-
-module_param(probe_winbond, int, 0);
-
diff --git a/drivers/ata/sata_dwc_460ex.c b/drivers/ata/sata_dwc_460ex.c
index 2673a3d..6cf57c5 100644
--- a/drivers/ata/sata_dwc_460ex.c
+++ b/drivers/ata/sata_dwc_460ex.c
@@ -1459,7 +1459,7 @@
{
struct scatterlist *sg = qc->sg;
struct ata_port *ap = qc->ap;
- u32 dma_chan;
+ int dma_chan;
struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
int err;
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 9463c71..a9fd970 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -1898,19 +1898,25 @@
* LOCKING:
* Inherited from caller.
*/
-static void mv_bmdma_stop(struct ata_queued_cmd *qc)
+static void mv_bmdma_stop_ap(struct ata_port *ap)
{
- struct ata_port *ap = qc->ap;
void __iomem *port_mmio = mv_ap_base(ap);
u32 cmd;
/* clear start/stop bit */
cmd = readl(port_mmio + BMDMA_CMD);
- cmd &= ~ATA_DMA_START;
- writelfl(cmd, port_mmio + BMDMA_CMD);
+ if (cmd & ATA_DMA_START) {
+ cmd &= ~ATA_DMA_START;
+ writelfl(cmd, port_mmio + BMDMA_CMD);
- /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
- ata_sff_dma_pause(ap);
+ /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
+ ata_sff_dma_pause(ap);
+ }
+}
+
+static void mv_bmdma_stop(struct ata_queued_cmd *qc)
+{
+ mv_bmdma_stop_ap(qc->ap);
}
/**
@@ -1934,8 +1940,21 @@
reg = readl(port_mmio + BMDMA_STATUS);
if (reg & ATA_DMA_ACTIVE)
status = ATA_DMA_ACTIVE;
- else
+ else if (reg & ATA_DMA_ERR)
status = (reg & ATA_DMA_ERR) | ATA_DMA_INTR;
+ else {
+ /*
+ * Just because DMA_ACTIVE is 0 (DMA completed),
+ * this does _not_ mean the device is "done".
+ * So we should not yet be signalling ATA_DMA_INTR
+ * in some cases. Eg. DSM/TRIM, and perhaps others.
+ */
+ mv_bmdma_stop_ap(ap);
+ if (ioread8(ap->ioaddr.altstatus_addr) & ATA_BUSY)
+ status = 0;
+ else
+ status = ATA_DMA_INTR;
+ }
return status;
}
@@ -1995,6 +2014,9 @@
switch (tf->protocol) {
case ATA_PROT_DMA:
+ if (tf->command == ATA_CMD_DSM)
+ return;
+ /* fall-thru */
case ATA_PROT_NCQ:
break; /* continue below */
case ATA_PROT_PIO:
@@ -2094,6 +2116,8 @@
if ((tf->protocol != ATA_PROT_DMA) &&
(tf->protocol != ATA_PROT_NCQ))
return;
+ if (tf->command == ATA_CMD_DSM)
+ return; /* use bmdma for this */
/* Fill in Gen IIE command request block */
if (!(tf->flags & ATA_TFLAG_WRITE))
@@ -2260,7 +2284,7 @@
}
if (qc->tf.flags & ATA_TFLAG_POLLING)
- ata_sff_queue_pio_task(ap, 0);
+ ata_sff_queue_pio_task(link, 0);
return 0;
}
@@ -2289,6 +2313,12 @@
switch (qc->tf.protocol) {
case ATA_PROT_DMA:
+ if (qc->tf.command == ATA_CMD_DSM) {
+ if (!ap->ops->bmdma_setup) /* no bmdma on GEN_I */
+ return AC_ERR_OTHER;
+ break; /* use bmdma for this */
+ }
+ /* fall thru */
case ATA_PROT_NCQ:
mv_start_edma(ap, port_mmio, pp, qc->tf.protocol);
pp->req_idx = (pp->req_idx + 1) & MV_MAX_Q_DEPTH_MASK;
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
index ee9ddeb..8cb0347 100644
--- a/drivers/atm/iphase.c
+++ b/drivers/atm/iphase.c
@@ -3156,7 +3156,6 @@
{
struct atm_dev *dev;
IADEV *iadev;
- unsigned long flags;
int ret;
iadev = kzalloc(sizeof(*iadev), GFP_KERNEL);
@@ -3188,19 +3187,14 @@
ia_dev[iadev_count] = iadev;
_ia_dev[iadev_count] = dev;
iadev_count++;
- spin_lock_init(&iadev->misc_lock);
- /* First fixes first. I don't want to think about this now. */
- spin_lock_irqsave(&iadev->misc_lock, flags);
if (ia_init(dev) || ia_start(dev)) {
IF_INIT(printk("IA register failed!\n");)
iadev_count--;
ia_dev[iadev_count] = NULL;
_ia_dev[iadev_count] = NULL;
- spin_unlock_irqrestore(&iadev->misc_lock, flags);
ret = -EINVAL;
goto err_out_deregister_dev;
}
- spin_unlock_irqrestore(&iadev->misc_lock, flags);
IF_EVENT(printk("iadev_count = %d\n", iadev_count);)
iadev->next_board = ia_boards;
diff --git a/drivers/atm/iphase.h b/drivers/atm/iphase.h
index b2cd20f..077735e 100644
--- a/drivers/atm/iphase.h
+++ b/drivers/atm/iphase.h
@@ -1022,7 +1022,7 @@
struct dle_q rx_dle_q;
struct free_desc_q *rx_free_desc_qhead;
struct sk_buff_head rx_dma_q;
- spinlock_t rx_lock, misc_lock;
+ spinlock_t rx_lock;
struct atm_vcc **rx_open; /* list of all open VCs */
u16 num_rx_desc, rx_buf_sz, rxing;
u32 rx_pkt_ram, rx_tmp_cnt;
diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
index f916ddf..f46138a 100644
--- a/drivers/atm/solos-pci.c
+++ b/drivers/atm/solos-pci.c
@@ -444,6 +444,7 @@
struct atm_dev *atmdev = container_of(dev, struct atm_dev, class_dev);
struct solos_card *card = atmdev->dev_data;
struct sk_buff *skb;
+ unsigned int len;
spin_lock(&card->cli_queue_lock);
skb = skb_dequeue(&card->cli_queue[SOLOS_CHAN(atmdev)]);
@@ -451,11 +452,12 @@
if(skb == NULL)
return sprintf(buf, "No data.\n");
- memcpy(buf, skb->data, skb->len);
- dev_dbg(&card->dev->dev, "len: %d\n", skb->len);
+ len = skb->len;
+ memcpy(buf, skb->data, len);
+ dev_dbg(&card->dev->dev, "len: %d\n", len);
kfree_skb(skb);
- return skb->len;
+ return len;
}
static int send_command(struct solos_card *card, int dev, const char *buf, size_t size)
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index c8a44f5..40af43e 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -568,7 +568,7 @@
out:
if (retval) {
release_firmware(firmware);
- firmware_p = NULL;
+ *firmware_p = NULL;
}
return retval;
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 5419a49..276d5a7 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -59,6 +59,7 @@
{
dev->power.status = DPM_ON;
init_completion(&dev->power.completion);
+ complete_all(&dev->power.completion);
dev->power.wakeup_count = 0;
pm_runtime_init(dev);
}
diff --git a/drivers/base/topology.c b/drivers/base/topology.c
index 9fc630c..f6f37a0 100644
--- a/drivers/base/topology.c
+++ b/drivers/base/topology.c
@@ -45,7 +45,8 @@
return sprintf(buf, "%d\n", topology_##name(cpu)); \
}
-#if defined(topology_thread_cpumask) || defined(topology_core_cpumask)
+#if defined(topology_thread_cpumask) || defined(topology_core_cpumask) || \
+ defined(topology_book_cpumask)
static ssize_t show_cpumap(int type, const struct cpumask *mask, char *buf)
{
ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf;
@@ -114,6 +115,14 @@
define_one_ro_named(core_siblings, show_core_cpumask);
define_one_ro_named(core_siblings_list, show_core_cpumask_list);
+#ifdef CONFIG_SCHED_BOOK
+define_id_show_func(book_id);
+define_one_ro(book_id);
+define_siblings_show_func(book_cpumask);
+define_one_ro_named(book_siblings, show_book_cpumask);
+define_one_ro_named(book_siblings_list, show_book_cpumask_list);
+#endif
+
static struct attribute *default_attrs[] = {
&attr_physical_package_id.attr,
&attr_core_id.attr,
@@ -121,6 +130,11 @@
&attr_thread_siblings_list.attr,
&attr_core_siblings.attr,
&attr_core_siblings_list.attr,
+#ifdef CONFIG_SCHED_BOOK
+ &attr_book_id.attr,
+ &attr_book_siblings.attr,
+ &attr_book_siblings_list.attr,
+#endif
NULL
};
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index de27768..4b9359a 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -488,4 +488,21 @@
If unsure, say N.
+config BLK_DEV_RBD
+ tristate "Rados block device (RBD)"
+ depends on INET && EXPERIMENTAL && BLOCK
+ select CEPH_LIB
+ select LIBCRC32C
+ select CRYPTO_AES
+ select CRYPTO
+ default n
+ help
+ Say Y here if you want include the Rados block device, which stripes
+ a block device over objects stored in the Ceph distributed object
+ store.
+
+ More information at http://ceph.newdream.net/.
+
+ If unsure, say N.
+
endif # BLK_DEV
diff --git a/drivers/block/Makefile b/drivers/block/Makefile
index aff5ac9..d7f463d 100644
--- a/drivers/block/Makefile
+++ b/drivers/block/Makefile
@@ -37,5 +37,6 @@
obj-$(CONFIG_XEN_BLKDEV_FRONTEND) += xen-blkfront.o
obj-$(CONFIG_BLK_DEV_DRBD) += drbd/
+obj-$(CONFIG_BLK_DEV_RBD) += rbd.o
swim_mod-objs := swim.o swim_asm.o
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 31064df..5e4fadc 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -297,6 +297,8 @@
spin_lock_irqsave(&h->lock, flags);
addQ(&h->reqQ, c);
h->Qdepth++;
+ if (h->Qdepth > h->maxQsinceinit)
+ h->maxQsinceinit = h->Qdepth;
start_io(h);
spin_unlock_irqrestore(&h->lock, flags);
}
@@ -4519,6 +4521,12 @@
misc_fw_support = readl(&cfgtable->misc_fw_support);
use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
+ /* The doorbell reset seems to cause lockups on some Smart
+ * Arrays (e.g. P410, P410i, maybe others). Until this is
+ * fixed or at least isolated, avoid the doorbell reset.
+ */
+ use_doorbell = 0;
+
rc = cciss_controller_hard_reset(pdev, vaddr, use_doorbell);
if (rc)
goto unmap_cfgtable;
@@ -4712,6 +4720,9 @@
h->scatter_list = kmalloc(h->max_commands *
sizeof(struct scatterlist *),
GFP_KERNEL);
+ if (!h->scatter_list)
+ goto clean4;
+
for (k = 0; k < h->nr_cmds; k++) {
h->scatter_list[k] = kmalloc(sizeof(struct scatterlist) *
h->maxsgentries,
@@ -4781,7 +4792,7 @@
clean4:
kfree(h->cmd_pool_bits);
/* Free up sg elements */
- for (k = 0; k < h->nr_cmds; k++)
+ for (k-- ; k >= 0; k--)
kfree(h->scatter_list[k]);
kfree(h->scatter_list);
cciss_free_sg_chain_blocks(h->cmd_sg_list, h->nr_cmds);
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index f3c636d..91797bb 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -477,7 +477,7 @@
pos = ((loff_t) bio->bi_sector << 9) + lo->lo_offset;
if (bio_rw(bio) == WRITE) {
- bool barrier = (bio->bi_rw & REQ_HARDBARRIER);
+ bool barrier = !!(bio->bi_rw & REQ_HARDBARRIER);
struct file *file = lo->lo_backing_file;
if (barrier) {
diff --git a/drivers/block/mg_disk.c b/drivers/block/mg_disk.c
index b82c5ce..76fa3de 100644
--- a/drivers/block/mg_disk.c
+++ b/drivers/block/mg_disk.c
@@ -974,8 +974,7 @@
host->breq->queuedata = host;
/* mflash is random device, thanx for the noop */
- elevator_exit(host->breq->elevator);
- err = elevator_init(host->breq, "noop");
+ err = elevator_change(host->breq, "noop");
if (err) {
printk(KERN_ERR "%s:%d (elevator_init) fail\n",
__func__, __LINE__);
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index b1cbeb5..37a2bb5 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -2369,7 +2369,7 @@
pkt_shrink_pktlist(pd);
}
-static struct pktcdvd_device *pkt_find_dev_from_minor(int dev_minor)
+static struct pktcdvd_device *pkt_find_dev_from_minor(unsigned int dev_minor)
{
if (dev_minor >= MAX_WRITERS)
return NULL;
diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c
index e9da874..03688c2 100644
--- a/drivers/block/ps3disk.c
+++ b/drivers/block/ps3disk.c
@@ -113,7 +113,7 @@
memcpy(buf, dev->bounce_buf+offset, size);
offset += size;
flush_kernel_dcache_page(bvec->bv_page);
- bvec_kunmap_irq(bvec, &flags);
+ bvec_kunmap_irq(buf, &flags);
i++;
}
}
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
new file mode 100644
index 0000000..6ec9d53
--- /dev/null
+++ b/drivers/block/rbd.c
@@ -0,0 +1,1841 @@
+/*
+ rbd.c -- Export ceph rados objects as a Linux block device
+
+
+ based on drivers/block/osdblk.c:
+
+ Copyright 2009 Red Hat, Inc.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; see the file COPYING. If not, write to
+ the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+
+
+
+ Instructions for use
+ --------------------
+
+ 1) Map a Linux block device to an existing rbd image.
+
+ Usage: <mon ip addr> <options> <pool name> <rbd image name> [snap name]
+
+ $ echo "192.168.0.1 name=admin rbd foo" > /sys/class/rbd/add
+
+ The snapshot name can be "-" or omitted to map the image read/write.
+
+ 2) List all active blkdev<->object mappings.
+
+ In this example, we have performed step #1 twice, creating two blkdevs,
+ mapped to two separate rados objects in the rados rbd pool
+
+ $ cat /sys/class/rbd/list
+ #id major client_name pool name snap KB
+ 0 254 client4143 rbd foo - 1024000
+
+ The columns, in order, are:
+ - blkdev unique id
+ - blkdev assigned major
+ - rados client id
+ - rados pool name
+ - rados block device name
+ - mapped snapshot ("-" if none)
+ - device size in KB
+
+
+ 3) Create a snapshot.
+
+ Usage: <blkdev id> <snapname>
+
+ $ echo "0 mysnap" > /sys/class/rbd/snap_create
+
+
+ 4) Listing a snapshot.
+
+ $ cat /sys/class/rbd/snaps_list
+ #id snap KB
+ 0 - 1024000 (*)
+ 0 foo 1024000
+
+ The columns, in order, are:
+ - blkdev unique id
+ - snapshot name, '-' means none (active read/write version)
+ - size of device at time of snapshot
+ - the (*) indicates this is the active version
+
+ 5) Rollback to snapshot.
+
+ Usage: <blkdev id> <snapname>
+
+ $ echo "0 mysnap" > /sys/class/rbd/snap_rollback
+
+
+ 6) Mapping an image using snapshot.
+
+ A snapshot mapping is read-only. This is being done by passing
+ snap=<snapname> to the options when adding a device.
+
+ $ echo "192.168.0.1 name=admin,snap=mysnap rbd foo" > /sys/class/rbd/add
+
+
+ 7) Remove an active blkdev<->rbd image mapping.
+
+ In this example, we remove the mapping with blkdev unique id 1.
+
+ $ echo 1 > /sys/class/rbd/remove
+
+
+ NOTE: The actual creation and deletion of rados objects is outside the scope
+ of this driver.
+
+ */
+
+#include <linux/ceph/libceph.h>
+#include <linux/ceph/osd_client.h>
+#include <linux/ceph/mon_client.h>
+#include <linux/ceph/decode.h>
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/blkdev.h>
+
+#include "rbd_types.h"
+
+#define DRV_NAME "rbd"
+#define DRV_NAME_LONG "rbd (rados block device)"
+
+#define RBD_MINORS_PER_MAJOR 256 /* max minors per blkdev */
+
+#define RBD_MAX_MD_NAME_LEN (96 + sizeof(RBD_SUFFIX))
+#define RBD_MAX_POOL_NAME_LEN 64
+#define RBD_MAX_SNAP_NAME_LEN 32
+#define RBD_MAX_OPT_LEN 1024
+
+#define RBD_SNAP_HEAD_NAME "-"
+
+#define DEV_NAME_LEN 32
+
+/*
+ * block device image metadata (in-memory version)
+ */
+struct rbd_image_header {
+ u64 image_size;
+ char block_name[32];
+ __u8 obj_order;
+ __u8 crypt_type;
+ __u8 comp_type;
+ struct rw_semaphore snap_rwsem;
+ struct ceph_snap_context *snapc;
+ size_t snap_names_len;
+ u64 snap_seq;
+ u32 total_snaps;
+
+ char *snap_names;
+ u64 *snap_sizes;
+};
+
+/*
+ * an instance of the client. multiple devices may share a client.
+ */
+struct rbd_client {
+ struct ceph_client *client;
+ struct kref kref;
+ struct list_head node;
+};
+
+/*
+ * a single io request
+ */
+struct rbd_request {
+ struct request *rq; /* blk layer request */
+ struct bio *bio; /* cloned bio */
+ struct page **pages; /* list of used pages */
+ u64 len;
+};
+
+/*
+ * a single device
+ */
+struct rbd_device {
+ int id; /* blkdev unique id */
+
+ int major; /* blkdev assigned major */
+ struct gendisk *disk; /* blkdev's gendisk and rq */
+ struct request_queue *q;
+
+ struct ceph_client *client;
+ struct rbd_client *rbd_client;
+
+ char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
+
+ spinlock_t lock; /* queue lock */
+
+ struct rbd_image_header header;
+ char obj[RBD_MAX_OBJ_NAME_LEN]; /* rbd image name */
+ int obj_len;
+ char obj_md_name[RBD_MAX_MD_NAME_LEN]; /* hdr nm. */
+ char pool_name[RBD_MAX_POOL_NAME_LEN];
+ int poolid;
+
+ char snap_name[RBD_MAX_SNAP_NAME_LEN];
+ u32 cur_snap; /* index+1 of current snapshot within snap context
+ 0 - for the head */
+ int read_only;
+
+ struct list_head node;
+};
+
+static spinlock_t node_lock; /* protects client get/put */
+
+static struct class *class_rbd; /* /sys/class/rbd */
+static DEFINE_MUTEX(ctl_mutex); /* Serialize open/close/setup/teardown */
+static LIST_HEAD(rbd_dev_list); /* devices */
+static LIST_HEAD(rbd_client_list); /* clients */
+
+
+static int rbd_open(struct block_device *bdev, fmode_t mode)
+{
+ struct gendisk *disk = bdev->bd_disk;
+ struct rbd_device *rbd_dev = disk->private_data;
+
+ set_device_ro(bdev, rbd_dev->read_only);
+
+ if ((mode & FMODE_WRITE) && rbd_dev->read_only)
+ return -EROFS;
+
+ return 0;
+}
+
+static const struct block_device_operations rbd_bd_ops = {
+ .owner = THIS_MODULE,
+ .open = rbd_open,
+};
+
+/*
+ * Initialize an rbd client instance.
+ * We own *opt.
+ */
+static struct rbd_client *rbd_client_create(struct ceph_options *opt)
+{
+ struct rbd_client *rbdc;
+ int ret = -ENOMEM;
+
+ dout("rbd_client_create\n");
+ rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL);
+ if (!rbdc)
+ goto out_opt;
+
+ kref_init(&rbdc->kref);
+ INIT_LIST_HEAD(&rbdc->node);
+
+ rbdc->client = ceph_create_client(opt, rbdc);
+ if (IS_ERR(rbdc->client))
+ goto out_rbdc;
+ opt = NULL; /* Now rbdc->client is responsible for opt */
+
+ ret = ceph_open_session(rbdc->client);
+ if (ret < 0)
+ goto out_err;
+
+ spin_lock(&node_lock);
+ list_add_tail(&rbdc->node, &rbd_client_list);
+ spin_unlock(&node_lock);
+
+ dout("rbd_client_create created %p\n", rbdc);
+ return rbdc;
+
+out_err:
+ ceph_destroy_client(rbdc->client);
+out_rbdc:
+ kfree(rbdc);
+out_opt:
+ if (opt)
+ ceph_destroy_options(opt);
+ return ERR_PTR(ret);
+}
+
+/*
+ * Find a ceph client with specific addr and configuration.
+ */
+static struct rbd_client *__rbd_client_find(struct ceph_options *opt)
+{
+ struct rbd_client *client_node;
+
+ if (opt->flags & CEPH_OPT_NOSHARE)
+ return NULL;
+
+ list_for_each_entry(client_node, &rbd_client_list, node)
+ if (ceph_compare_options(opt, client_node->client) == 0)
+ return client_node;
+ return NULL;
+}
+
+/*
+ * Get a ceph client with specific addr and configuration, if one does
+ * not exist create it.
+ */
+static int rbd_get_client(struct rbd_device *rbd_dev, const char *mon_addr,
+ char *options)
+{
+ struct rbd_client *rbdc;
+ struct ceph_options *opt;
+ int ret;
+
+ ret = ceph_parse_options(&opt, options, mon_addr,
+ mon_addr + strlen(mon_addr), NULL, NULL);
+ if (ret < 0)
+ return ret;
+
+ spin_lock(&node_lock);
+ rbdc = __rbd_client_find(opt);
+ if (rbdc) {
+ ceph_destroy_options(opt);
+
+ /* using an existing client */
+ kref_get(&rbdc->kref);
+ rbd_dev->rbd_client = rbdc;
+ rbd_dev->client = rbdc->client;
+ spin_unlock(&node_lock);
+ return 0;
+ }
+ spin_unlock(&node_lock);
+
+ rbdc = rbd_client_create(opt);
+ if (IS_ERR(rbdc))
+ return PTR_ERR(rbdc);
+
+ rbd_dev->rbd_client = rbdc;
+ rbd_dev->client = rbdc->client;
+ return 0;
+}
+
+/*
+ * Destroy ceph client
+ */
+static void rbd_client_release(struct kref *kref)
+{
+ struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
+
+ dout("rbd_release_client %p\n", rbdc);
+ spin_lock(&node_lock);
+ list_del(&rbdc->node);
+ spin_unlock(&node_lock);
+
+ ceph_destroy_client(rbdc->client);
+ kfree(rbdc);
+}
+
+/*
+ * Drop reference to ceph client node. If it's not referenced anymore, release
+ * it.
+ */
+static void rbd_put_client(struct rbd_device *rbd_dev)
+{
+ kref_put(&rbd_dev->rbd_client->kref, rbd_client_release);
+ rbd_dev->rbd_client = NULL;
+ rbd_dev->client = NULL;
+}
+
+
+/*
+ * Create a new header structure, translate header format from the on-disk
+ * header.
+ */
+static int rbd_header_from_disk(struct rbd_image_header *header,
+ struct rbd_image_header_ondisk *ondisk,
+ int allocated_snaps,
+ gfp_t gfp_flags)
+{
+ int i;
+ u32 snap_count = le32_to_cpu(ondisk->snap_count);
+ int ret = -ENOMEM;
+
+ init_rwsem(&header->snap_rwsem);
+
+ header->snap_names_len = le64_to_cpu(ondisk->snap_names_len);
+ header->snapc = kmalloc(sizeof(struct ceph_snap_context) +
+ snap_count *
+ sizeof(struct rbd_image_snap_ondisk),
+ gfp_flags);
+ if (!header->snapc)
+ return -ENOMEM;
+ if (snap_count) {
+ header->snap_names = kmalloc(header->snap_names_len,
+ GFP_KERNEL);
+ if (!header->snap_names)
+ goto err_snapc;
+ header->snap_sizes = kmalloc(snap_count * sizeof(u64),
+ GFP_KERNEL);
+ if (!header->snap_sizes)
+ goto err_names;
+ } else {
+ header->snap_names = NULL;
+ header->snap_sizes = NULL;
+ }
+ memcpy(header->block_name, ondisk->block_name,
+ sizeof(ondisk->block_name));
+
+ header->image_size = le64_to_cpu(ondisk->image_size);
+ header->obj_order = ondisk->options.order;
+ header->crypt_type = ondisk->options.crypt_type;
+ header->comp_type = ondisk->options.comp_type;
+
+ atomic_set(&header->snapc->nref, 1);
+ header->snap_seq = le64_to_cpu(ondisk->snap_seq);
+ header->snapc->num_snaps = snap_count;
+ header->total_snaps = snap_count;
+
+ if (snap_count &&
+ allocated_snaps == snap_count) {
+ for (i = 0; i < snap_count; i++) {
+ header->snapc->snaps[i] =
+ le64_to_cpu(ondisk->snaps[i].id);
+ header->snap_sizes[i] =
+ le64_to_cpu(ondisk->snaps[i].image_size);
+ }
+
+ /* copy snapshot names */
+ memcpy(header->snap_names, &ondisk->snaps[i],
+ header->snap_names_len);
+ }
+
+ return 0;
+
+err_names:
+ kfree(header->snap_names);
+err_snapc:
+ kfree(header->snapc);
+ return ret;
+}
+
+static int snap_index(struct rbd_image_header *header, int snap_num)
+{
+ return header->total_snaps - snap_num;
+}
+
+static u64 cur_snap_id(struct rbd_device *rbd_dev)
+{
+ struct rbd_image_header *header = &rbd_dev->header;
+
+ if (!rbd_dev->cur_snap)
+ return 0;
+
+ return header->snapc->snaps[snap_index(header, rbd_dev->cur_snap)];
+}
+
+static int snap_by_name(struct rbd_image_header *header, const char *snap_name,
+ u64 *seq, u64 *size)
+{
+ int i;
+ char *p = header->snap_names;
+
+ for (i = 0; i < header->total_snaps; i++, p += strlen(p) + 1) {
+ if (strcmp(snap_name, p) == 0)
+ break;
+ }
+ if (i == header->total_snaps)
+ return -ENOENT;
+ if (seq)
+ *seq = header->snapc->snaps[i];
+
+ if (size)
+ *size = header->snap_sizes[i];
+
+ return i;
+}
+
+static int rbd_header_set_snap(struct rbd_device *dev,
+ const char *snap_name,
+ u64 *size)
+{
+ struct rbd_image_header *header = &dev->header;
+ struct ceph_snap_context *snapc = header->snapc;
+ int ret = -ENOENT;
+
+ down_write(&header->snap_rwsem);
+
+ if (!snap_name ||
+ !*snap_name ||
+ strcmp(snap_name, "-") == 0 ||
+ strcmp(snap_name, RBD_SNAP_HEAD_NAME) == 0) {
+ if (header->total_snaps)
+ snapc->seq = header->snap_seq;
+ else
+ snapc->seq = 0;
+ dev->cur_snap = 0;
+ dev->read_only = 0;
+ if (size)
+ *size = header->image_size;
+ } else {
+ ret = snap_by_name(header, snap_name, &snapc->seq, size);
+ if (ret < 0)
+ goto done;
+
+ dev->cur_snap = header->total_snaps - ret;
+ dev->read_only = 1;
+ }
+
+ ret = 0;
+done:
+ up_write(&header->snap_rwsem);
+ return ret;
+}
+
+static void rbd_header_free(struct rbd_image_header *header)
+{
+ kfree(header->snapc);
+ kfree(header->snap_names);
+ kfree(header->snap_sizes);
+}
+
+/*
+ * get the actual striped segment name, offset and length
+ */
+static u64 rbd_get_segment(struct rbd_image_header *header,
+ const char *block_name,
+ u64 ofs, u64 len,
+ char *seg_name, u64 *segofs)
+{
+ u64 seg = ofs >> header->obj_order;
+
+ if (seg_name)
+ snprintf(seg_name, RBD_MAX_SEG_NAME_LEN,
+ "%s.%012llx", block_name, seg);
+
+ ofs = ofs & ((1 << header->obj_order) - 1);
+ len = min_t(u64, len, (1 << header->obj_order) - ofs);
+
+ if (segofs)
+ *segofs = ofs;
+
+ return len;
+}
+
+/*
+ * bio helpers
+ */
+
+static void bio_chain_put(struct bio *chain)
+{
+ struct bio *tmp;
+
+ while (chain) {
+ tmp = chain;
+ chain = chain->bi_next;
+ bio_put(tmp);
+ }
+}
+
+/*
+ * zeros a bio chain, starting at specific offset
+ */
+static void zero_bio_chain(struct bio *chain, int start_ofs)
+{
+ struct bio_vec *bv;
+ unsigned long flags;
+ void *buf;
+ int i;
+ int pos = 0;
+
+ while (chain) {
+ bio_for_each_segment(bv, chain, i) {
+ if (pos + bv->bv_len > start_ofs) {
+ int remainder = max(start_ofs - pos, 0);
+ buf = bvec_kmap_irq(bv, &flags);
+ memset(buf + remainder, 0,
+ bv->bv_len - remainder);
+ bvec_kunmap_irq(buf, &flags);
+ }
+ pos += bv->bv_len;
+ }
+
+ chain = chain->bi_next;
+ }
+}
+
+/*
+ * bio_chain_clone - clone a chain of bios up to a certain length.
+ * might return a bio_pair that will need to be released.
+ */
+static struct bio *bio_chain_clone(struct bio **old, struct bio **next,
+ struct bio_pair **bp,
+ int len, gfp_t gfpmask)
+{
+ struct bio *tmp, *old_chain = *old, *new_chain = NULL, *tail = NULL;
+ int total = 0;
+
+ if (*bp) {
+ bio_pair_release(*bp);
+ *bp = NULL;
+ }
+
+ while (old_chain && (total < len)) {
+ tmp = bio_kmalloc(gfpmask, old_chain->bi_max_vecs);
+ if (!tmp)
+ goto err_out;
+
+ if (total + old_chain->bi_size > len) {
+ struct bio_pair *bp;
+
+ /*
+ * this split can only happen with a single paged bio,
+ * split_bio will BUG_ON if this is not the case
+ */
+ dout("bio_chain_clone split! total=%d remaining=%d"
+ "bi_size=%d\n",
+ (int)total, (int)len-total,
+ (int)old_chain->bi_size);
+
+ /* split the bio. We'll release it either in the next
+ call, or it will have to be released outside */
+ bp = bio_split(old_chain, (len - total) / 512ULL);
+ if (!bp)
+ goto err_out;
+
+ __bio_clone(tmp, &bp->bio1);
+
+ *next = &bp->bio2;
+ } else {
+ __bio_clone(tmp, old_chain);
+ *next = old_chain->bi_next;
+ }
+
+ tmp->bi_bdev = NULL;
+ gfpmask &= ~__GFP_WAIT;
+ tmp->bi_next = NULL;
+
+ if (!new_chain) {
+ new_chain = tail = tmp;
+ } else {
+ tail->bi_next = tmp;
+ tail = tmp;
+ }
+ old_chain = old_chain->bi_next;
+
+ total += tmp->bi_size;
+ }
+
+ BUG_ON(total < len);
+
+ if (tail)
+ tail->bi_next = NULL;
+
+ *old = old_chain;
+
+ return new_chain;
+
+err_out:
+ dout("bio_chain_clone with err\n");
+ bio_chain_put(new_chain);
+ return NULL;
+}
+
+/*
+ * helpers for osd request op vectors.
+ */
+static int rbd_create_rw_ops(struct ceph_osd_req_op **ops,
+ int num_ops,
+ int opcode,
+ u32 payload_len)
+{
+ *ops = kzalloc(sizeof(struct ceph_osd_req_op) * (num_ops + 1),
+ GFP_NOIO);
+ if (!*ops)
+ return -ENOMEM;
+ (*ops)[0].op = opcode;
+ /*
+ * op extent offset and length will be set later on
+ * in calc_raw_layout()
+ */
+ (*ops)[0].payload_len = payload_len;
+ return 0;
+}
+
+static void rbd_destroy_ops(struct ceph_osd_req_op *ops)
+{
+ kfree(ops);
+}
+
+/*
+ * Send ceph osd request
+ */
+static int rbd_do_request(struct request *rq,
+ struct rbd_device *dev,
+ struct ceph_snap_context *snapc,
+ u64 snapid,
+ const char *obj, u64 ofs, u64 len,
+ struct bio *bio,
+ struct page **pages,
+ int num_pages,
+ int flags,
+ struct ceph_osd_req_op *ops,
+ int num_reply,
+ void (*rbd_cb)(struct ceph_osd_request *req,
+ struct ceph_msg *msg))
+{
+ struct ceph_osd_request *req;
+ struct ceph_file_layout *layout;
+ int ret;
+ u64 bno;
+ struct timespec mtime = CURRENT_TIME;
+ struct rbd_request *req_data;
+ struct ceph_osd_request_head *reqhead;
+ struct rbd_image_header *header = &dev->header;
+
+ ret = -ENOMEM;
+ req_data = kzalloc(sizeof(*req_data), GFP_NOIO);
+ if (!req_data)
+ goto done;
+
+ dout("rbd_do_request len=%lld ofs=%lld\n", len, ofs);
+
+ down_read(&header->snap_rwsem);
+
+ req = ceph_osdc_alloc_request(&dev->client->osdc, flags,
+ snapc,
+ ops,
+ false,
+ GFP_NOIO, pages, bio);
+ if (IS_ERR(req)) {
+ up_read(&header->snap_rwsem);
+ ret = PTR_ERR(req);
+ goto done_pages;
+ }
+
+ req->r_callback = rbd_cb;
+
+ req_data->rq = rq;
+ req_data->bio = bio;
+ req_data->pages = pages;
+ req_data->len = len;
+
+ req->r_priv = req_data;
+
+ reqhead = req->r_request->front.iov_base;
+ reqhead->snapid = cpu_to_le64(CEPH_NOSNAP);
+
+ strncpy(req->r_oid, obj, sizeof(req->r_oid));
+ req->r_oid_len = strlen(req->r_oid);
+
+ layout = &req->r_file_layout;
+ memset(layout, 0, sizeof(*layout));
+ layout->fl_stripe_unit = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
+ layout->fl_stripe_count = cpu_to_le32(1);
+ layout->fl_object_size = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
+ layout->fl_pg_preferred = cpu_to_le32(-1);
+ layout->fl_pg_pool = cpu_to_le32(dev->poolid);
+ ceph_calc_raw_layout(&dev->client->osdc, layout, snapid,
+ ofs, &len, &bno, req, ops);
+
+ ceph_osdc_build_request(req, ofs, &len,
+ ops,
+ snapc,
+ &mtime,
+ req->r_oid, req->r_oid_len);
+ up_read(&header->snap_rwsem);
+
+ ret = ceph_osdc_start_request(&dev->client->osdc, req, false);
+ if (ret < 0)
+ goto done_err;
+
+ if (!rbd_cb) {
+ ret = ceph_osdc_wait_request(&dev->client->osdc, req);
+ ceph_osdc_put_request(req);
+ }
+ return ret;
+
+done_err:
+ bio_chain_put(req_data->bio);
+ ceph_osdc_put_request(req);
+done_pages:
+ kfree(req_data);
+done:
+ if (rq)
+ blk_end_request(rq, ret, len);
+ return ret;
+}
+
+/*
+ * Ceph osd op callback
+ */
+static void rbd_req_cb(struct ceph_osd_request *req, struct ceph_msg *msg)
+{
+ struct rbd_request *req_data = req->r_priv;
+ struct ceph_osd_reply_head *replyhead;
+ struct ceph_osd_op *op;
+ __s32 rc;
+ u64 bytes;
+ int read_op;
+
+ /* parse reply */
+ replyhead = msg->front.iov_base;
+ WARN_ON(le32_to_cpu(replyhead->num_ops) == 0);
+ op = (void *)(replyhead + 1);
+ rc = le32_to_cpu(replyhead->result);
+ bytes = le64_to_cpu(op->extent.length);
+ read_op = (le32_to_cpu(op->op) == CEPH_OSD_OP_READ);
+
+ dout("rbd_req_cb bytes=%lld readop=%d rc=%d\n", bytes, read_op, rc);
+
+ if (rc == -ENOENT && read_op) {
+ zero_bio_chain(req_data->bio, 0);
+ rc = 0;
+ } else if (rc == 0 && read_op && bytes < req_data->len) {
+ zero_bio_chain(req_data->bio, bytes);
+ bytes = req_data->len;
+ }
+
+ blk_end_request(req_data->rq, rc, bytes);
+
+ if (req_data->bio)
+ bio_chain_put(req_data->bio);
+
+ ceph_osdc_put_request(req);
+ kfree(req_data);
+}
+
+/*
+ * Do a synchronous ceph osd operation
+ */
+static int rbd_req_sync_op(struct rbd_device *dev,
+ struct ceph_snap_context *snapc,
+ u64 snapid,
+ int opcode,
+ int flags,
+ struct ceph_osd_req_op *orig_ops,
+ int num_reply,
+ const char *obj,
+ u64 ofs, u64 len,
+ char *buf)
+{
+ int ret;
+ struct page **pages;
+ int num_pages;
+ struct ceph_osd_req_op *ops = orig_ops;
+ u32 payload_len;
+
+ num_pages = calc_pages_for(ofs , len);
+ pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
+ if (IS_ERR(pages))
+ return PTR_ERR(pages);
+
+ if (!orig_ops) {
+ payload_len = (flags & CEPH_OSD_FLAG_WRITE ? len : 0);
+ ret = rbd_create_rw_ops(&ops, 1, opcode, payload_len);
+ if (ret < 0)
+ goto done;
+
+ if ((flags & CEPH_OSD_FLAG_WRITE) && buf) {
+ ret = ceph_copy_to_page_vector(pages, buf, ofs, len);
+ if (ret < 0)
+ goto done_ops;
+ }
+ }
+
+ ret = rbd_do_request(NULL, dev, snapc, snapid,
+ obj, ofs, len, NULL,
+ pages, num_pages,
+ flags,
+ ops,
+ 2,
+ NULL);
+ if (ret < 0)
+ goto done_ops;
+
+ if ((flags & CEPH_OSD_FLAG_READ) && buf)
+ ret = ceph_copy_from_page_vector(pages, buf, ofs, ret);
+
+done_ops:
+ if (!orig_ops)
+ rbd_destroy_ops(ops);
+done:
+ ceph_release_page_vector(pages, num_pages);
+ return ret;
+}
+
+/*
+ * Do an asynchronous ceph osd operation
+ */
+static int rbd_do_op(struct request *rq,
+ struct rbd_device *rbd_dev ,
+ struct ceph_snap_context *snapc,
+ u64 snapid,
+ int opcode, int flags, int num_reply,
+ u64 ofs, u64 len,
+ struct bio *bio)
+{
+ char *seg_name;
+ u64 seg_ofs;
+ u64 seg_len;
+ int ret;
+ struct ceph_osd_req_op *ops;
+ u32 payload_len;
+
+ seg_name = kmalloc(RBD_MAX_SEG_NAME_LEN + 1, GFP_NOIO);
+ if (!seg_name)
+ return -ENOMEM;
+
+ seg_len = rbd_get_segment(&rbd_dev->header,
+ rbd_dev->header.block_name,
+ ofs, len,
+ seg_name, &seg_ofs);
+
+ payload_len = (flags & CEPH_OSD_FLAG_WRITE ? seg_len : 0);
+
+ ret = rbd_create_rw_ops(&ops, 1, opcode, payload_len);
+ if (ret < 0)
+ goto done;
+
+ /* we've taken care of segment sizes earlier when we
+ cloned the bios. We should never have a segment
+ truncated at this point */
+ BUG_ON(seg_len < len);
+
+ ret = rbd_do_request(rq, rbd_dev, snapc, snapid,
+ seg_name, seg_ofs, seg_len,
+ bio,
+ NULL, 0,
+ flags,
+ ops,
+ num_reply,
+ rbd_req_cb);
+done:
+ kfree(seg_name);
+ return ret;
+}
+
+/*
+ * Request async osd write
+ */
+static int rbd_req_write(struct request *rq,
+ struct rbd_device *rbd_dev,
+ struct ceph_snap_context *snapc,
+ u64 ofs, u64 len,
+ struct bio *bio)
+{
+ return rbd_do_op(rq, rbd_dev, snapc, CEPH_NOSNAP,
+ CEPH_OSD_OP_WRITE,
+ CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK,
+ 2,
+ ofs, len, bio);
+}
+
+/*
+ * Request async osd read
+ */
+static int rbd_req_read(struct request *rq,
+ struct rbd_device *rbd_dev,
+ u64 snapid,
+ u64 ofs, u64 len,
+ struct bio *bio)
+{
+ return rbd_do_op(rq, rbd_dev, NULL,
+ (snapid ? snapid : CEPH_NOSNAP),
+ CEPH_OSD_OP_READ,
+ CEPH_OSD_FLAG_READ,
+ 2,
+ ofs, len, bio);
+}
+
+/*
+ * Request sync osd read
+ */
+static int rbd_req_sync_read(struct rbd_device *dev,
+ struct ceph_snap_context *snapc,
+ u64 snapid,
+ const char *obj,
+ u64 ofs, u64 len,
+ char *buf)
+{
+ return rbd_req_sync_op(dev, NULL,
+ (snapid ? snapid : CEPH_NOSNAP),
+ CEPH_OSD_OP_READ,
+ CEPH_OSD_FLAG_READ,
+ NULL,
+ 1, obj, ofs, len, buf);
+}
+
+/*
+ * Request sync osd read
+ */
+static int rbd_req_sync_rollback_obj(struct rbd_device *dev,
+ u64 snapid,
+ const char *obj)
+{
+ struct ceph_osd_req_op *ops;
+ int ret = rbd_create_rw_ops(&ops, 1, CEPH_OSD_OP_ROLLBACK, 0);
+ if (ret < 0)
+ return ret;
+
+ ops[0].snap.snapid = snapid;
+
+ ret = rbd_req_sync_op(dev, NULL,
+ CEPH_NOSNAP,
+ 0,
+ CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK,
+ ops,
+ 1, obj, 0, 0, NULL);
+
+ rbd_destroy_ops(ops);
+
+ if (ret < 0)
+ return ret;
+
+ return ret;
+}
+
+/*
+ * Request sync osd read
+ */
+static int rbd_req_sync_exec(struct rbd_device *dev,
+ const char *obj,
+ const char *cls,
+ const char *method,
+ const char *data,
+ int len)
+{
+ struct ceph_osd_req_op *ops;
+ int cls_len = strlen(cls);
+ int method_len = strlen(method);
+ int ret = rbd_create_rw_ops(&ops, 1, CEPH_OSD_OP_CALL,
+ cls_len + method_len + len);
+ if (ret < 0)
+ return ret;
+
+ ops[0].cls.class_name = cls;
+ ops[0].cls.class_len = (__u8)cls_len;
+ ops[0].cls.method_name = method;
+ ops[0].cls.method_len = (__u8)method_len;
+ ops[0].cls.argc = 0;
+ ops[0].cls.indata = data;
+ ops[0].cls.indata_len = len;
+
+ ret = rbd_req_sync_op(dev, NULL,
+ CEPH_NOSNAP,
+ 0,
+ CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK,
+ ops,
+ 1, obj, 0, 0, NULL);
+
+ rbd_destroy_ops(ops);
+
+ dout("cls_exec returned %d\n", ret);
+ return ret;
+}
+
+/*
+ * block device queue callback
+ */
+static void rbd_rq_fn(struct request_queue *q)
+{
+ struct rbd_device *rbd_dev = q->queuedata;
+ struct request *rq;
+ struct bio_pair *bp = NULL;
+
+ rq = blk_fetch_request(q);
+
+ while (1) {
+ struct bio *bio;
+ struct bio *rq_bio, *next_bio = NULL;
+ bool do_write;
+ int size, op_size = 0;
+ u64 ofs;
+
+ /* peek at request from block layer */
+ if (!rq)
+ break;
+
+ dout("fetched request\n");
+
+ /* filter out block requests we don't understand */
+ if ((rq->cmd_type != REQ_TYPE_FS)) {
+ __blk_end_request_all(rq, 0);
+ goto next;
+ }
+
+ /* deduce our operation (read, write) */
+ do_write = (rq_data_dir(rq) == WRITE);
+
+ size = blk_rq_bytes(rq);
+ ofs = blk_rq_pos(rq) * 512ULL;
+ rq_bio = rq->bio;
+ if (do_write && rbd_dev->read_only) {
+ __blk_end_request_all(rq, -EROFS);
+ goto next;
+ }
+
+ spin_unlock_irq(q->queue_lock);
+
+ dout("%s 0x%x bytes at 0x%llx\n",
+ do_write ? "write" : "read",
+ size, blk_rq_pos(rq) * 512ULL);
+
+ do {
+ /* a bio clone to be passed down to OSD req */
+ dout("rq->bio->bi_vcnt=%d\n", rq->bio->bi_vcnt);
+ op_size = rbd_get_segment(&rbd_dev->header,
+ rbd_dev->header.block_name,
+ ofs, size,
+ NULL, NULL);
+ bio = bio_chain_clone(&rq_bio, &next_bio, &bp,
+ op_size, GFP_ATOMIC);
+ if (!bio) {
+ spin_lock_irq(q->queue_lock);
+ __blk_end_request_all(rq, -ENOMEM);
+ goto next;
+ }
+
+ /* init OSD command: write or read */
+ if (do_write)
+ rbd_req_write(rq, rbd_dev,
+ rbd_dev->header.snapc,
+ ofs,
+ op_size, bio);
+ else
+ rbd_req_read(rq, rbd_dev,
+ cur_snap_id(rbd_dev),
+ ofs,
+ op_size, bio);
+
+ size -= op_size;
+ ofs += op_size;
+
+ rq_bio = next_bio;
+ } while (size > 0);
+
+ if (bp)
+ bio_pair_release(bp);
+
+ spin_lock_irq(q->queue_lock);
+next:
+ rq = blk_fetch_request(q);
+ }
+}
+
+/*
+ * a queue callback. Makes sure that we don't create a bio that spans across
+ * multiple osd objects. One exception would be with a single page bios,
+ * which we handle later at bio_chain_clone
+ */
+static int rbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bmd,
+ struct bio_vec *bvec)
+{
+ struct rbd_device *rbd_dev = q->queuedata;
+ unsigned int chunk_sectors = 1 << (rbd_dev->header.obj_order - 9);
+ sector_t sector = bmd->bi_sector + get_start_sect(bmd->bi_bdev);
+ unsigned int bio_sectors = bmd->bi_size >> 9;
+ int max;
+
+ max = (chunk_sectors - ((sector & (chunk_sectors - 1))
+ + bio_sectors)) << 9;
+ if (max < 0)
+ max = 0; /* bio_add cannot handle a negative return */
+ if (max <= bvec->bv_len && bio_sectors == 0)
+ return bvec->bv_len;
+ return max;
+}
+
+static void rbd_free_disk(struct rbd_device *rbd_dev)
+{
+ struct gendisk *disk = rbd_dev->disk;
+
+ if (!disk)
+ return;
+
+ rbd_header_free(&rbd_dev->header);
+
+ if (disk->flags & GENHD_FL_UP)
+ del_gendisk(disk);
+ if (disk->queue)
+ blk_cleanup_queue(disk->queue);
+ put_disk(disk);
+}
+
+/*
+ * reload the ondisk the header
+ */
+static int rbd_read_header(struct rbd_device *rbd_dev,
+ struct rbd_image_header *header)
+{
+ ssize_t rc;
+ struct rbd_image_header_ondisk *dh;
+ int snap_count = 0;
+ u64 snap_names_len = 0;
+
+ while (1) {
+ int len = sizeof(*dh) +
+ snap_count * sizeof(struct rbd_image_snap_ondisk) +
+ snap_names_len;
+
+ rc = -ENOMEM;
+ dh = kmalloc(len, GFP_KERNEL);
+ if (!dh)
+ return -ENOMEM;
+
+ rc = rbd_req_sync_read(rbd_dev,
+ NULL, CEPH_NOSNAP,
+ rbd_dev->obj_md_name,
+ 0, len,
+ (char *)dh);
+ if (rc < 0)
+ goto out_dh;
+
+ rc = rbd_header_from_disk(header, dh, snap_count, GFP_KERNEL);
+ if (rc < 0)
+ goto out_dh;
+
+ if (snap_count != header->total_snaps) {
+ snap_count = header->total_snaps;
+ snap_names_len = header->snap_names_len;
+ rbd_header_free(header);
+ kfree(dh);
+ continue;
+ }
+ break;
+ }
+
+out_dh:
+ kfree(dh);
+ return rc;
+}
+
+/*
+ * create a snapshot
+ */
+static int rbd_header_add_snap(struct rbd_device *dev,
+ const char *snap_name,
+ gfp_t gfp_flags)
+{
+ int name_len = strlen(snap_name);
+ u64 new_snapid;
+ int ret;
+ void *data, *data_start, *data_end;
+
+ /* we should create a snapshot only if we're pointing at the head */
+ if (dev->cur_snap)
+ return -EINVAL;
+
+ ret = ceph_monc_create_snapid(&dev->client->monc, dev->poolid,
+ &new_snapid);
+ dout("created snapid=%lld\n", new_snapid);
+ if (ret < 0)
+ return ret;
+
+ data = kmalloc(name_len + 16, gfp_flags);
+ if (!data)
+ return -ENOMEM;
+
+ data_start = data;
+ data_end = data + name_len + 16;
+
+ ceph_encode_string_safe(&data, data_end, snap_name, name_len, bad);
+ ceph_encode_64_safe(&data, data_end, new_snapid, bad);
+
+ ret = rbd_req_sync_exec(dev, dev->obj_md_name, "rbd", "snap_add",
+ data_start, data - data_start);
+
+ kfree(data_start);
+
+ if (ret < 0)
+ return ret;
+
+ dev->header.snapc->seq = new_snapid;
+
+ return 0;
+bad:
+ return -ERANGE;
+}
+
+/*
+ * only read the first part of the ondisk header, without the snaps info
+ */
+static int rbd_update_snaps(struct rbd_device *rbd_dev)
+{
+ int ret;
+ struct rbd_image_header h;
+ u64 snap_seq;
+
+ ret = rbd_read_header(rbd_dev, &h);
+ if (ret < 0)
+ return ret;
+
+ down_write(&rbd_dev->header.snap_rwsem);
+
+ snap_seq = rbd_dev->header.snapc->seq;
+
+ kfree(rbd_dev->header.snapc);
+ kfree(rbd_dev->header.snap_names);
+ kfree(rbd_dev->header.snap_sizes);
+
+ rbd_dev->header.total_snaps = h.total_snaps;
+ rbd_dev->header.snapc = h.snapc;
+ rbd_dev->header.snap_names = h.snap_names;
+ rbd_dev->header.snap_sizes = h.snap_sizes;
+ rbd_dev->header.snapc->seq = snap_seq;
+
+ up_write(&rbd_dev->header.snap_rwsem);
+
+ return 0;
+}
+
+static int rbd_init_disk(struct rbd_device *rbd_dev)
+{
+ struct gendisk *disk;
+ struct request_queue *q;
+ int rc;
+ u64 total_size = 0;
+
+ /* contact OSD, request size info about the object being mapped */
+ rc = rbd_read_header(rbd_dev, &rbd_dev->header);
+ if (rc)
+ return rc;
+
+ rc = rbd_header_set_snap(rbd_dev, rbd_dev->snap_name, &total_size);
+ if (rc)
+ return rc;
+
+ /* create gendisk info */
+ rc = -ENOMEM;
+ disk = alloc_disk(RBD_MINORS_PER_MAJOR);
+ if (!disk)
+ goto out;
+
+ sprintf(disk->disk_name, DRV_NAME "%d", rbd_dev->id);
+ disk->major = rbd_dev->major;
+ disk->first_minor = 0;
+ disk->fops = &rbd_bd_ops;
+ disk->private_data = rbd_dev;
+
+ /* init rq */
+ rc = -ENOMEM;
+ q = blk_init_queue(rbd_rq_fn, &rbd_dev->lock);
+ if (!q)
+ goto out_disk;
+ blk_queue_merge_bvec(q, rbd_merge_bvec);
+ disk->queue = q;
+
+ q->queuedata = rbd_dev;
+
+ rbd_dev->disk = disk;
+ rbd_dev->q = q;
+
+ /* finally, announce the disk to the world */
+ set_capacity(disk, total_size / 512ULL);
+ add_disk(disk);
+
+ pr_info("%s: added with size 0x%llx\n",
+ disk->disk_name, (unsigned long long)total_size);
+ return 0;
+
+out_disk:
+ put_disk(disk);
+out:
+ return rc;
+}
+
+/********************************************************************
+ * /sys/class/rbd/
+ * add map rados objects to blkdev
+ * remove unmap rados objects
+ * list show mappings
+ *******************************************************************/
+
+static void class_rbd_release(struct class *cls)
+{
+ kfree(cls);
+}
+
+static ssize_t class_rbd_list(struct class *c,
+ struct class_attribute *attr,
+ char *data)
+{
+ int n = 0;
+ struct list_head *tmp;
+ int max = PAGE_SIZE;
+
+ mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
+
+ n += snprintf(data, max,
+ "#id\tmajor\tclient_name\tpool\tname\tsnap\tKB\n");
+
+ list_for_each(tmp, &rbd_dev_list) {
+ struct rbd_device *rbd_dev;
+
+ rbd_dev = list_entry(tmp, struct rbd_device, node);
+ n += snprintf(data+n, max-n,
+ "%d\t%d\tclient%lld\t%s\t%s\t%s\t%lld\n",
+ rbd_dev->id,
+ rbd_dev->major,
+ ceph_client_id(rbd_dev->client),
+ rbd_dev->pool_name,
+ rbd_dev->obj, rbd_dev->snap_name,
+ rbd_dev->header.image_size >> 10);
+ if (n == max)
+ break;
+ }
+
+ mutex_unlock(&ctl_mutex);
+ return n;
+}
+
+static ssize_t class_rbd_add(struct class *c,
+ struct class_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ceph_osd_client *osdc;
+ struct rbd_device *rbd_dev;
+ ssize_t rc = -ENOMEM;
+ int irc, new_id = 0;
+ struct list_head *tmp;
+ char *mon_dev_name;
+ char *options;
+
+ if (!try_module_get(THIS_MODULE))
+ return -ENODEV;
+
+ mon_dev_name = kmalloc(RBD_MAX_OPT_LEN, GFP_KERNEL);
+ if (!mon_dev_name)
+ goto err_out_mod;
+
+ options = kmalloc(RBD_MAX_OPT_LEN, GFP_KERNEL);
+ if (!options)
+ goto err_mon_dev;
+
+ /* new rbd_device object */
+ rbd_dev = kzalloc(sizeof(*rbd_dev), GFP_KERNEL);
+ if (!rbd_dev)
+ goto err_out_opt;
+
+ /* static rbd_device initialization */
+ spin_lock_init(&rbd_dev->lock);
+ INIT_LIST_HEAD(&rbd_dev->node);
+
+ /* generate unique id: find highest unique id, add one */
+ mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
+
+ list_for_each(tmp, &rbd_dev_list) {
+ struct rbd_device *rbd_dev;
+
+ rbd_dev = list_entry(tmp, struct rbd_device, node);
+ if (rbd_dev->id >= new_id)
+ new_id = rbd_dev->id + 1;
+ }
+
+ rbd_dev->id = new_id;
+
+ /* add to global list */
+ list_add_tail(&rbd_dev->node, &rbd_dev_list);
+
+ /* parse add command */
+ if (sscanf(buf, "%" __stringify(RBD_MAX_OPT_LEN) "s "
+ "%" __stringify(RBD_MAX_OPT_LEN) "s "
+ "%" __stringify(RBD_MAX_POOL_NAME_LEN) "s "
+ "%" __stringify(RBD_MAX_OBJ_NAME_LEN) "s"
+ "%" __stringify(RBD_MAX_SNAP_NAME_LEN) "s",
+ mon_dev_name, options, rbd_dev->pool_name,
+ rbd_dev->obj, rbd_dev->snap_name) < 4) {
+ rc = -EINVAL;
+ goto err_out_slot;
+ }
+
+ if (rbd_dev->snap_name[0] == 0)
+ rbd_dev->snap_name[0] = '-';
+
+ rbd_dev->obj_len = strlen(rbd_dev->obj);
+ snprintf(rbd_dev->obj_md_name, sizeof(rbd_dev->obj_md_name), "%s%s",
+ rbd_dev->obj, RBD_SUFFIX);
+
+ /* initialize rest of new object */
+ snprintf(rbd_dev->name, DEV_NAME_LEN, DRV_NAME "%d", rbd_dev->id);
+ rc = rbd_get_client(rbd_dev, mon_dev_name, options);
+ if (rc < 0)
+ goto err_out_slot;
+
+ mutex_unlock(&ctl_mutex);
+
+ /* pick the pool */
+ osdc = &rbd_dev->client->osdc;
+ rc = ceph_pg_poolid_by_name(osdc->osdmap, rbd_dev->pool_name);
+ if (rc < 0)
+ goto err_out_client;
+ rbd_dev->poolid = rc;
+
+ /* register our block device */
+ irc = register_blkdev(0, rbd_dev->name);
+ if (irc < 0) {
+ rc = irc;
+ goto err_out_client;
+ }
+ rbd_dev->major = irc;
+
+ /* set up and announce blkdev mapping */
+ rc = rbd_init_disk(rbd_dev);
+ if (rc)
+ goto err_out_blkdev;
+
+ return count;
+
+err_out_blkdev:
+ unregister_blkdev(rbd_dev->major, rbd_dev->name);
+err_out_client:
+ rbd_put_client(rbd_dev);
+ mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
+err_out_slot:
+ list_del_init(&rbd_dev->node);
+ mutex_unlock(&ctl_mutex);
+
+ kfree(rbd_dev);
+err_out_opt:
+ kfree(options);
+err_mon_dev:
+ kfree(mon_dev_name);
+err_out_mod:
+ dout("Error adding device %s\n", buf);
+ module_put(THIS_MODULE);
+ return rc;
+}
+
+static struct rbd_device *__rbd_get_dev(unsigned long id)
+{
+ struct list_head *tmp;
+ struct rbd_device *rbd_dev;
+
+ list_for_each(tmp, &rbd_dev_list) {
+ rbd_dev = list_entry(tmp, struct rbd_device, node);
+ if (rbd_dev->id == id)
+ return rbd_dev;
+ }
+ return NULL;
+}
+
+static ssize_t class_rbd_remove(struct class *c,
+ struct class_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct rbd_device *rbd_dev = NULL;
+ int target_id, rc;
+ unsigned long ul;
+
+ rc = strict_strtoul(buf, 10, &ul);
+ if (rc)
+ return rc;
+
+ /* convert to int; abort if we lost anything in the conversion */
+ target_id = (int) ul;
+ if (target_id != ul)
+ return -EINVAL;
+
+ /* remove object from list immediately */
+ mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
+
+ rbd_dev = __rbd_get_dev(target_id);
+ if (rbd_dev)
+ list_del_init(&rbd_dev->node);
+
+ mutex_unlock(&ctl_mutex);
+
+ if (!rbd_dev)
+ return -ENOENT;
+
+ rbd_put_client(rbd_dev);
+
+ /* clean up and free blkdev */
+ rbd_free_disk(rbd_dev);
+ unregister_blkdev(rbd_dev->major, rbd_dev->name);
+ kfree(rbd_dev);
+
+ /* release module ref */
+ module_put(THIS_MODULE);
+
+ return count;
+}
+
+static ssize_t class_rbd_snaps_list(struct class *c,
+ struct class_attribute *attr,
+ char *data)
+{
+ struct rbd_device *rbd_dev = NULL;
+ struct list_head *tmp;
+ struct rbd_image_header *header;
+ int i, n = 0, max = PAGE_SIZE;
+ int ret;
+
+ mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
+
+ n += snprintf(data, max, "#id\tsnap\tKB\n");
+
+ list_for_each(tmp, &rbd_dev_list) {
+ char *names, *p;
+ struct ceph_snap_context *snapc;
+
+ rbd_dev = list_entry(tmp, struct rbd_device, node);
+ header = &rbd_dev->header;
+
+ down_read(&header->snap_rwsem);
+
+ names = header->snap_names;
+ snapc = header->snapc;
+
+ n += snprintf(data + n, max - n, "%d\t%s\t%lld%s\n",
+ rbd_dev->id, RBD_SNAP_HEAD_NAME,
+ header->image_size >> 10,
+ (!rbd_dev->cur_snap ? " (*)" : ""));
+ if (n == max)
+ break;
+
+ p = names;
+ for (i = 0; i < header->total_snaps; i++, p += strlen(p) + 1) {
+ n += snprintf(data + n, max - n, "%d\t%s\t%lld%s\n",
+ rbd_dev->id, p, header->snap_sizes[i] >> 10,
+ (rbd_dev->cur_snap &&
+ (snap_index(header, i) == rbd_dev->cur_snap) ?
+ " (*)" : ""));
+ if (n == max)
+ break;
+ }
+
+ up_read(&header->snap_rwsem);
+ }
+
+
+ ret = n;
+ mutex_unlock(&ctl_mutex);
+ return ret;
+}
+
+static ssize_t class_rbd_snaps_refresh(struct class *c,
+ struct class_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct rbd_device *rbd_dev = NULL;
+ int target_id, rc;
+ unsigned long ul;
+ int ret = count;
+
+ rc = strict_strtoul(buf, 10, &ul);
+ if (rc)
+ return rc;
+
+ /* convert to int; abort if we lost anything in the conversion */
+ target_id = (int) ul;
+ if (target_id != ul)
+ return -EINVAL;
+
+ mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
+
+ rbd_dev = __rbd_get_dev(target_id);
+ if (!rbd_dev) {
+ ret = -ENOENT;
+ goto done;
+ }
+
+ rc = rbd_update_snaps(rbd_dev);
+ if (rc < 0)
+ ret = rc;
+
+done:
+ mutex_unlock(&ctl_mutex);
+ return ret;
+}
+
+static ssize_t class_rbd_snap_create(struct class *c,
+ struct class_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct rbd_device *rbd_dev = NULL;
+ int target_id, ret;
+ char *name;
+
+ name = kmalloc(RBD_MAX_SNAP_NAME_LEN + 1, GFP_KERNEL);
+ if (!name)
+ return -ENOMEM;
+
+ /* parse snaps add command */
+ if (sscanf(buf, "%d "
+ "%" __stringify(RBD_MAX_SNAP_NAME_LEN) "s",
+ &target_id,
+ name) != 2) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
+
+ rbd_dev = __rbd_get_dev(target_id);
+ if (!rbd_dev) {
+ ret = -ENOENT;
+ goto done_unlock;
+ }
+
+ ret = rbd_header_add_snap(rbd_dev,
+ name, GFP_KERNEL);
+ if (ret < 0)
+ goto done_unlock;
+
+ ret = rbd_update_snaps(rbd_dev);
+ if (ret < 0)
+ goto done_unlock;
+
+ ret = count;
+done_unlock:
+ mutex_unlock(&ctl_mutex);
+done:
+ kfree(name);
+ return ret;
+}
+
+static ssize_t class_rbd_rollback(struct class *c,
+ struct class_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct rbd_device *rbd_dev = NULL;
+ int target_id, ret;
+ u64 snapid;
+ char snap_name[RBD_MAX_SNAP_NAME_LEN];
+ u64 cur_ofs;
+ char *seg_name;
+
+ /* parse snaps add command */
+ if (sscanf(buf, "%d "
+ "%" __stringify(RBD_MAX_SNAP_NAME_LEN) "s",
+ &target_id,
+ snap_name) != 2) {
+ return -EINVAL;
+ }
+
+ ret = -ENOMEM;
+ seg_name = kmalloc(RBD_MAX_SEG_NAME_LEN + 1, GFP_NOIO);
+ if (!seg_name)
+ return ret;
+
+ mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
+
+ rbd_dev = __rbd_get_dev(target_id);
+ if (!rbd_dev) {
+ ret = -ENOENT;
+ goto done_unlock;
+ }
+
+ ret = snap_by_name(&rbd_dev->header, snap_name, &snapid, NULL);
+ if (ret < 0)
+ goto done_unlock;
+
+ dout("snapid=%lld\n", snapid);
+
+ cur_ofs = 0;
+ while (cur_ofs < rbd_dev->header.image_size) {
+ cur_ofs += rbd_get_segment(&rbd_dev->header,
+ rbd_dev->obj,
+ cur_ofs, (u64)-1,
+ seg_name, NULL);
+ dout("seg_name=%s\n", seg_name);
+
+ ret = rbd_req_sync_rollback_obj(rbd_dev, snapid, seg_name);
+ if (ret < 0)
+ pr_warning("could not roll back obj %s err=%d\n",
+ seg_name, ret);
+ }
+
+ ret = rbd_update_snaps(rbd_dev);
+ if (ret < 0)
+ goto done_unlock;
+
+ ret = count;
+
+done_unlock:
+ mutex_unlock(&ctl_mutex);
+ kfree(seg_name);
+
+ return ret;
+}
+
+static struct class_attribute class_rbd_attrs[] = {
+ __ATTR(add, 0200, NULL, class_rbd_add),
+ __ATTR(remove, 0200, NULL, class_rbd_remove),
+ __ATTR(list, 0444, class_rbd_list, NULL),
+ __ATTR(snaps_refresh, 0200, NULL, class_rbd_snaps_refresh),
+ __ATTR(snap_create, 0200, NULL, class_rbd_snap_create),
+ __ATTR(snaps_list, 0444, class_rbd_snaps_list, NULL),
+ __ATTR(snap_rollback, 0200, NULL, class_rbd_rollback),
+ __ATTR_NULL
+};
+
+/*
+ * create control files in sysfs
+ * /sys/class/rbd/...
+ */
+static int rbd_sysfs_init(void)
+{
+ int ret = -ENOMEM;
+
+ class_rbd = kzalloc(sizeof(*class_rbd), GFP_KERNEL);
+ if (!class_rbd)
+ goto out;
+
+ class_rbd->name = DRV_NAME;
+ class_rbd->owner = THIS_MODULE;
+ class_rbd->class_release = class_rbd_release;
+ class_rbd->class_attrs = class_rbd_attrs;
+
+ ret = class_register(class_rbd);
+ if (ret)
+ goto out_class;
+ return 0;
+
+out_class:
+ kfree(class_rbd);
+ class_rbd = NULL;
+ pr_err(DRV_NAME ": failed to create class rbd\n");
+out:
+ return ret;
+}
+
+static void rbd_sysfs_cleanup(void)
+{
+ if (class_rbd)
+ class_destroy(class_rbd);
+ class_rbd = NULL;
+}
+
+int __init rbd_init(void)
+{
+ int rc;
+
+ rc = rbd_sysfs_init();
+ if (rc)
+ return rc;
+ spin_lock_init(&node_lock);
+ pr_info("loaded " DRV_NAME_LONG "\n");
+ return 0;
+}
+
+void __exit rbd_exit(void)
+{
+ rbd_sysfs_cleanup();
+}
+
+module_init(rbd_init);
+module_exit(rbd_exit);
+
+MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
+MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
+MODULE_DESCRIPTION("rados block device");
+
+/* following authorship retained from original osdblk.c */
+MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/block/rbd_types.h b/drivers/block/rbd_types.h
new file mode 100644
index 0000000..fc6c678
--- /dev/null
+++ b/drivers/block/rbd_types.h
@@ -0,0 +1,73 @@
+/*
+ * Ceph - scalable distributed file system
+ *
+ * Copyright (C) 2004-2010 Sage Weil <sage@newdream.net>
+ *
+ * This is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License version 2.1, as published by the Free Software
+ * Foundation. See file COPYING.
+ *
+ */
+
+#ifndef CEPH_RBD_TYPES_H
+#define CEPH_RBD_TYPES_H
+
+#include <linux/types.h>
+
+/*
+ * rbd image 'foo' consists of objects
+ * foo.rbd - image metadata
+ * foo.00000000
+ * foo.00000001
+ * ... - data
+ */
+
+#define RBD_SUFFIX ".rbd"
+#define RBD_DIRECTORY "rbd_directory"
+#define RBD_INFO "rbd_info"
+
+#define RBD_DEFAULT_OBJ_ORDER 22 /* 4MB */
+#define RBD_MIN_OBJ_ORDER 16
+#define RBD_MAX_OBJ_ORDER 30
+
+#define RBD_MAX_OBJ_NAME_LEN 96
+#define RBD_MAX_SEG_NAME_LEN 128
+
+#define RBD_COMP_NONE 0
+#define RBD_CRYPT_NONE 0
+
+#define RBD_HEADER_TEXT "<<< Rados Block Device Image >>>\n"
+#define RBD_HEADER_SIGNATURE "RBD"
+#define RBD_HEADER_VERSION "001.005"
+
+struct rbd_info {
+ __le64 max_id;
+} __attribute__ ((packed));
+
+struct rbd_image_snap_ondisk {
+ __le64 id;
+ __le64 image_size;
+} __attribute__((packed));
+
+struct rbd_image_header_ondisk {
+ char text[40];
+ char block_name[24];
+ char signature[4];
+ char version[8];
+ struct {
+ __u8 order;
+ __u8 crypt_type;
+ __u8 comp_type;
+ __u8 unused;
+ } __attribute__((packed)) options;
+ __le64 image_size;
+ __le64 snap_seq;
+ __le32 snap_count;
+ __le32 reserved;
+ __le64 snap_names_len;
+ struct rbd_image_snap_ondisk snaps[0];
+} __attribute__((packed));
+
+
+#endif
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 2aafafc..8320490 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -2,7 +2,6 @@
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/blkdev.h>
-#include <linux/smp_lock.h>
#include <linux/hdreg.h>
#include <linux/virtio.h>
#include <linux/virtio_blk.h>
@@ -202,6 +201,7 @@
struct virtio_blk *vblk = disk->private_data;
struct request *req;
struct bio *bio;
+ int err;
bio = bio_map_kern(vblk->disk->queue, id_str, VIRTIO_BLK_ID_BYTES,
GFP_KERNEL);
@@ -215,11 +215,14 @@
}
req->cmd_type = REQ_TYPE_SPECIAL;
- return blk_execute_rq(vblk->disk->queue, vblk->disk, req, false);
+ err = blk_execute_rq(vblk->disk->queue, vblk->disk, req, false);
+ blk_put_request(req);
+
+ return err;
}
-static int virtblk_locked_ioctl(struct block_device *bdev, fmode_t mode,
- unsigned cmd, unsigned long data)
+static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, unsigned long data)
{
struct gendisk *disk = bdev->bd_disk;
struct virtio_blk *vblk = disk->private_data;
@@ -234,18 +237,6 @@
(void __user *)data);
}
-static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
- unsigned int cmd, unsigned long param)
-{
- int ret;
-
- lock_kernel();
- ret = virtblk_locked_ioctl(bdev, mode, cmd, param);
- unlock_kernel();
-
- return ret;
-}
-
/* We provide getgeo only to please some old bootloader/partitioning tools */
static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
{
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index ac1b682e..ab735a6 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -834,7 +834,7 @@
char *type;
int len;
/* no unplug has been done: do not hook devices != xen vbds */
- if (xen_platform_pci_unplug & XEN_UNPLUG_IGNORE) {
+ if (xen_platform_pci_unplug & XEN_UNPLUG_UNNECESSARY) {
int major;
if (!VDEV_IS_EXTENDED(vdevice))
diff --git a/drivers/char/agp/Kconfig b/drivers/char/agp/Kconfig
index 4b66c69..5ddf67e 100644
--- a/drivers/char/agp/Kconfig
+++ b/drivers/char/agp/Kconfig
@@ -57,7 +57,7 @@
config AGP_AMD64
tristate "AMD Opteron/Athlon64 on-CPU GART support"
- depends on AGP && X86 && K8_NB
+ depends on AGP && X86 && AMD_NB
help
This option gives you AGP support for the GLX component of
X using the on-CPU northbridge of the AMD Athlon64/Opteron CPUs.
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c
index 70312da..42396df 100644
--- a/drivers/char/agp/amd64-agp.c
+++ b/drivers/char/agp/amd64-agp.c
@@ -15,7 +15,7 @@
#include <linux/mmzone.h>
#include <asm/page.h> /* PAGE_SIZE */
#include <asm/e820.h>
-#include <asm/k8.h>
+#include <asm/amd_nb.h>
#include <asm/gart.h>
#include "agp.h"
@@ -124,7 +124,7 @@
u32 temp;
struct aper_size_info_32 *values;
- dev = k8_northbridges[0];
+ dev = k8_northbridges.nb_misc[0];
if (dev==NULL)
return 0;
@@ -181,10 +181,14 @@
unsigned long gatt_bus = virt_to_phys(agp_bridge->gatt_table_real);
int i;
+ if (!k8_northbridges.gart_supported)
+ return 0;
+
/* Configure AGP regs in each x86-64 host bridge. */
- for (i = 0; i < num_k8_northbridges; i++) {
+ for (i = 0; i < k8_northbridges.num; i++) {
agp_bridge->gart_bus_addr =
- amd64_configure(k8_northbridges[i], gatt_bus);
+ amd64_configure(k8_northbridges.nb_misc[i],
+ gatt_bus);
}
k8_flush_garts();
return 0;
@@ -195,11 +199,15 @@
{
u32 tmp;
int i;
- for (i = 0; i < num_k8_northbridges; i++) {
- struct pci_dev *dev = k8_northbridges[i];
+
+ if (!k8_northbridges.gart_supported)
+ return;
+
+ for (i = 0; i < k8_northbridges.num; i++) {
+ struct pci_dev *dev = k8_northbridges.nb_misc[i];
/* disable gart translation */
pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &tmp);
- tmp &= ~AMD64_GARTEN;
+ tmp &= ~GARTEN;
pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, tmp);
}
}
@@ -313,22 +321,25 @@
if (order < 0 || !agp_aperture_valid(aper, (32*1024*1024)<<order))
return -1;
- pci_write_config_dword(nb, AMD64_GARTAPERTURECTL, order << 1);
+ gart_set_size_and_enable(nb, order);
pci_write_config_dword(nb, AMD64_GARTAPERTUREBASE, aper >> 25);
return 0;
}
-static __devinit int cache_nbs (struct pci_dev *pdev, u32 cap_ptr)
+static __devinit int cache_nbs(struct pci_dev *pdev, u32 cap_ptr)
{
int i;
if (cache_k8_northbridges() < 0)
return -ENODEV;
+ if (!k8_northbridges.gart_supported)
+ return -ENODEV;
+
i = 0;
- for (i = 0; i < num_k8_northbridges; i++) {
- struct pci_dev *dev = k8_northbridges[i];
+ for (i = 0; i < k8_northbridges.num; i++) {
+ struct pci_dev *dev = k8_northbridges.nb_misc[i];
if (fix_northbridge(dev, pdev, cap_ptr) < 0) {
dev_err(&dev->dev, "no usable aperture found\n");
#ifdef __x86_64__
@@ -405,7 +416,8 @@
}
/* shadow x86-64 registers into ULi registers */
- pci_read_config_dword (k8_northbridges[0], AMD64_GARTAPERTUREBASE, &httfea);
+ pci_read_config_dword (k8_northbridges.nb_misc[0], AMD64_GARTAPERTUREBASE,
+ &httfea);
/* if x86-64 aperture base is beyond 4G, exit here */
if ((httfea & 0x7fff) >> (32 - 25)) {
@@ -472,7 +484,8 @@
pci_write_config_dword(dev1, NVIDIA_X86_64_1_APSIZE, tmp);
/* shadow x86-64 registers into NVIDIA registers */
- pci_read_config_dword (k8_northbridges[0], AMD64_GARTAPERTUREBASE, &apbase);
+ pci_read_config_dword (k8_northbridges.nb_misc[0], AMD64_GARTAPERTUREBASE,
+ &apbase);
/* if x86-64 aperture base is beyond 4G, exit here */
if ( (apbase & 0x7fff) >> (32 - 25) ) {
diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c
index d2abf51..64255ce 100644
--- a/drivers/char/agp/generic.c
+++ b/drivers/char/agp/generic.c
@@ -984,7 +984,9 @@
bridge->driver->cache_flush();
#ifdef CONFIG_X86
- set_memory_uc((unsigned long)table, 1 << page_order);
+ if (set_memory_uc((unsigned long)table, 1 << page_order))
+ printk(KERN_WARNING "Could not set GATT table memory to UC!");
+
bridge->gatt_table = (void *)table;
#else
bridge->gatt_table = ioremap_nocache(virt_to_phys(table),
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index 710af89..cd18493 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -12,6 +12,7 @@
#include <asm/smp.h>
#include "agp.h"
#include "intel-agp.h"
+#include <linux/intel-gtt.h>
#include "intel-gtt.c"
@@ -805,6 +806,8 @@
"G45/G43", NULL, &intel_i965_driver },
{ PCI_DEVICE_ID_INTEL_B43_HB, PCI_DEVICE_ID_INTEL_B43_IG,
"B43", NULL, &intel_i965_driver },
+ { PCI_DEVICE_ID_INTEL_B43_1_HB, PCI_DEVICE_ID_INTEL_B43_1_IG,
+ "B43", NULL, &intel_i965_driver },
{ PCI_DEVICE_ID_INTEL_G41_HB, PCI_DEVICE_ID_INTEL_G41_IG,
"G41", NULL, &intel_i965_driver },
{ PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG,
@@ -815,11 +818,19 @@
"HD Graphics", NULL, &intel_i965_driver },
{ PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
"HD Graphics", NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_IG,
+ { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG,
"Sandybridge", NULL, &intel_gen6_driver },
- { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG,
+ { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG,
"Sandybridge", NULL, &intel_gen6_driver },
- { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_D0_IG,
+ { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG,
+ "Sandybridge", NULL, &intel_gen6_driver },
+ { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG,
+ "Sandybridge", NULL, &intel_gen6_driver },
+ { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG,
+ "Sandybridge", NULL, &intel_gen6_driver },
+ { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG,
+ "Sandybridge", NULL, &intel_gen6_driver },
+ { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG,
"Sandybridge", NULL, &intel_gen6_driver },
{ 0, 0, NULL, NULL, NULL }
};
@@ -1044,6 +1055,7 @@
ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB),
ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB),
ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB),
+ ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB),
{ }
};
diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h
index 08d4753..d09b1ab 100644
--- a/drivers/char/agp/intel-agp.h
+++ b/drivers/char/agp/intel-agp.h
@@ -1,6 +1,8 @@
/*
* Common Intel AGPGART and GTT definitions.
*/
+#ifndef _INTEL_AGP_H
+#define _INTEL_AGP_H
/* Intel registers */
#define INTEL_APSIZE 0xb4
@@ -184,6 +186,8 @@
#define PCI_DEVICE_ID_INTEL_Q33_IG 0x29D2
#define PCI_DEVICE_ID_INTEL_B43_HB 0x2E40
#define PCI_DEVICE_ID_INTEL_B43_IG 0x2E42
+#define PCI_DEVICE_ID_INTEL_B43_1_HB 0x2E90
+#define PCI_DEVICE_ID_INTEL_B43_1_IG 0x2E92
#define PCI_DEVICE_ID_INTEL_GM45_HB 0x2A40
#define PCI_DEVICE_ID_INTEL_GM45_IG 0x2A42
#define PCI_DEVICE_ID_INTEL_EAGLELAKE_HB 0x2E00
@@ -200,11 +204,16 @@
#define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB 0x0062
#define PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB 0x006a
#define PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG 0x0046
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB 0x0100
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_IG 0x0102
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB 0x0104
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG 0x0106
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_D0_IG 0x0126
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB 0x0100 /* Desktop */
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG 0x0102
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG 0x0112
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG 0x0122
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB 0x0104 /* Mobile */
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG 0x0106
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG 0x0116
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG 0x0126
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB 0x0108 /* Server */
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG 0x010A
/* cover 915 and 945 variants */
#define IS_I915 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB || \
@@ -231,7 +240,8 @@
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_HB)
#define IS_SNB (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB)
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB)
#define IS_G4X (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_EAGLELAKE_HB || \
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q45_HB || \
@@ -244,3 +254,5 @@
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB || \
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB || \
IS_SNB)
+
+#endif
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index d22ffb8..75e0a34 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -49,6 +49,26 @@
.type = INTEL_AGP_CACHED_MEMORY}
};
+#define INTEL_AGP_UNCACHED_MEMORY 0
+#define INTEL_AGP_CACHED_MEMORY_LLC 1
+#define INTEL_AGP_CACHED_MEMORY_LLC_GFDT 2
+#define INTEL_AGP_CACHED_MEMORY_LLC_MLC 3
+#define INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT 4
+
+static struct gatt_mask intel_gen6_masks[] =
+{
+ {.mask = I810_PTE_VALID | GEN6_PTE_UNCACHED,
+ .type = INTEL_AGP_UNCACHED_MEMORY },
+ {.mask = I810_PTE_VALID | GEN6_PTE_LLC,
+ .type = INTEL_AGP_CACHED_MEMORY_LLC },
+ {.mask = I810_PTE_VALID | GEN6_PTE_LLC | GEN6_PTE_GFDT,
+ .type = INTEL_AGP_CACHED_MEMORY_LLC_GFDT },
+ {.mask = I810_PTE_VALID | GEN6_PTE_LLC_MLC,
+ .type = INTEL_AGP_CACHED_MEMORY_LLC_MLC },
+ {.mask = I810_PTE_VALID | GEN6_PTE_LLC_MLC | GEN6_PTE_GFDT,
+ .type = INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT },
+};
+
static struct _intel_private {
struct pci_dev *pcidev; /* device one */
u8 __iomem *registers;
@@ -178,13 +198,6 @@
off_t pg_start, int mask_type)
{
int i, j;
- u32 cache_bits = 0;
-
- if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB ||
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB)
- {
- cache_bits = GEN6_PTE_LLC_MLC;
- }
for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
writel(agp_bridge->driver->mask_memory(agp_bridge,
@@ -317,6 +330,23 @@
return 0;
}
+static int intel_gen6_type_to_mask_type(struct agp_bridge_data *bridge,
+ int type)
+{
+ unsigned int type_mask = type & ~AGP_USER_CACHED_MEMORY_GFDT;
+ unsigned int gfdt = type & AGP_USER_CACHED_MEMORY_GFDT;
+
+ if (type_mask == AGP_USER_UNCACHED_MEMORY)
+ return INTEL_AGP_UNCACHED_MEMORY;
+ else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC)
+ return gfdt ? INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT :
+ INTEL_AGP_CACHED_MEMORY_LLC_MLC;
+ else /* set 'normal'/'cached' to LLC by default */
+ return gfdt ? INTEL_AGP_CACHED_MEMORY_LLC_GFDT :
+ INTEL_AGP_CACHED_MEMORY_LLC;
+}
+
+
static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start,
int type)
{
@@ -588,8 +618,7 @@
gtt_entries = 0;
break;
}
- } else if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB ||
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB) {
+ } else if (IS_SNB) {
/*
* SandyBridge has new memory control reg at 0x50.w
*/
@@ -1068,11 +1097,11 @@
intel_i915_setup_chipset_flush();
}
- if (intel_private.ifp_resource.start) {
+ if (intel_private.ifp_resource.start)
intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE);
- if (!intel_private.i9xx_flush_page)
- dev_info(&intel_private.pcidev->dev, "can't ioremap flush page - no chipset flushing");
- }
+ if (!intel_private.i9xx_flush_page)
+ dev_err(&intel_private.pcidev->dev,
+ "can't ioremap flush page - no chipset flushing\n");
}
static int intel_i9xx_configure(void)
@@ -1163,7 +1192,7 @@
mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
- if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY &&
+ if (!IS_SNB && mask_type != 0 && mask_type != AGP_PHYS_MEMORY &&
mask_type != INTEL_AGP_CACHED_MEMORY)
goto out_err;
@@ -1333,8 +1362,8 @@
static unsigned long intel_gen6_mask_memory(struct agp_bridge_data *bridge,
dma_addr_t addr, int type)
{
- /* Shift high bits down */
- addr |= (addr >> 28) & 0xff;
+ /* gen6 has bit11-4 for physical addr bit39-32 */
+ addr |= (addr >> 28) & 0xff0;
/* Type checking must be done elsewhere */
return addr | bridge->driver->masks[type].mask;
@@ -1359,6 +1388,7 @@
break;
case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB:
case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB:
+ case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB:
*gtt_offset = MB(2);
pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
@@ -1563,7 +1593,7 @@
.fetch_size = intel_i9xx_fetch_size,
.cleanup = intel_i915_cleanup,
.mask_memory = intel_gen6_mask_memory,
- .masks = intel_i810_masks,
+ .masks = intel_gen6_masks,
.agp_enable = intel_i810_agp_enable,
.cache_flush = global_cache_flush,
.create_gatt_table = intel_i965_create_gatt_table,
@@ -1576,7 +1606,7 @@
.agp_alloc_pages = agp_generic_alloc_pages,
.agp_destroy_page = agp_generic_destroy_page,
.agp_destroy_pages = agp_generic_destroy_pages,
- .agp_type_to_mask_type = intel_i830_type_to_mask_type,
+ .agp_type_to_mask_type = intel_gen6_type_to_mask_type,
.chipset_flush = intel_i915_chipset_flush,
#ifdef USE_PCI_DMA_API
.agp_map_page = intel_agp_map_page,
diff --git a/drivers/char/hangcheck-timer.c b/drivers/char/hangcheck-timer.c
index e024972..f953c96 100644
--- a/drivers/char/hangcheck-timer.c
+++ b/drivers/char/hangcheck-timer.c
@@ -159,7 +159,7 @@
if (hangcheck_dump_tasks) {
printk(KERN_CRIT "Hangcheck: Task state:\n");
#ifdef CONFIG_MAGIC_SYSRQ
- handle_sysrq('t', NULL);
+ handle_sysrq('t');
#endif /* CONFIG_MAGIC_SYSRQ */
}
if (hangcheck_reboot) {
diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c
index fa27d16..3afd62e 100644
--- a/drivers/char/hvc_console.c
+++ b/drivers/char/hvc_console.c
@@ -651,7 +651,7 @@
if (sysrq_pressed)
continue;
} else if (sysrq_pressed) {
- handle_sysrq(buf[i], tty);
+ handle_sysrq(buf[i]);
sysrq_pressed = 0;
continue;
}
diff --git a/drivers/char/hvsi.c b/drivers/char/hvsi.c
index 1f4b6de..a2bc885 100644
--- a/drivers/char/hvsi.c
+++ b/drivers/char/hvsi.c
@@ -403,7 +403,7 @@
hp->sysrq = 1;
continue;
} else if (hp->sysrq) {
- handle_sysrq(c, hp->tty);
+ handle_sysrq(c);
hp->sysrq = 0;
continue;
}
diff --git a/drivers/char/hw_random/n2-drv.c b/drivers/char/hw_random/n2-drv.c
index 1acdb25..a3f5e38 100644
--- a/drivers/char/hw_random/n2-drv.c
+++ b/drivers/char/hw_random/n2-drv.c
@@ -387,7 +387,7 @@
static int n2rng_data_read(struct hwrng *rng, u32 *data)
{
- struct n2rng *np = rng->priv;
+ struct n2rng *np = (struct n2rng *) rng->priv;
unsigned long ra = __pa(&np->test_data);
int len;
diff --git a/drivers/char/ip2/ip2main.c b/drivers/char/ip2/ip2main.c
index 07f3ea3..d4b71e8 100644
--- a/drivers/char/ip2/ip2main.c
+++ b/drivers/char/ip2/ip2main.c
@@ -1650,7 +1650,7 @@
/* disable DSS reporting */
i2QueueCommands(PTYPE_INLINE, pCh, 100, 4,
CMD_DCD_NREP, CMD_CTS_NREP, CMD_DSR_NREP, CMD_RI_NREP);
- if ( !tty || (tty->termios->c_cflag & HUPCL) ) {
+ if (tty->termios->c_cflag & HUPCL) {
i2QueueCommands(PTYPE_INLINE, pCh, 100, 2, CMD_RTSDN, CMD_DTRDN);
pCh->dataSetOut &= ~(I2_DTR | I2_RTS);
i2QueueCommands( PTYPE_INLINE, pCh, 100, 1, CMD_PAUSE(25));
@@ -2930,6 +2930,8 @@
if ( pCh )
{
rc = copy_to_user(argp, pCh, sizeof(i2ChanStr));
+ if (rc)
+ rc = -EFAULT;
} else {
rc = -ENODEV;
}
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 3822b4f..7bd7c45 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -305,6 +305,9 @@
#ifdef CONFIG_PCI
static int pci_registered;
#endif
+#ifdef CONFIG_ACPI
+static int pnp_registered;
+#endif
#ifdef CONFIG_PPC_OF
static int of_registered;
#endif
@@ -2126,7 +2129,7 @@
{
struct acpi_device *acpi_dev;
struct smi_info *info;
- struct resource *res;
+ struct resource *res, *res_second;
acpi_handle handle;
acpi_status status;
unsigned long long tmp;
@@ -2182,13 +2185,13 @@
info->io.addr_data = res->start;
info->io.regspacing = DEFAULT_REGSPACING;
- res = pnp_get_resource(dev,
+ res_second = pnp_get_resource(dev,
(info->io.addr_type == IPMI_IO_ADDR_SPACE) ?
IORESOURCE_IO : IORESOURCE_MEM,
1);
- if (res) {
- if (res->start > info->io.addr_data)
- info->io.regspacing = res->start - info->io.addr_data;
+ if (res_second) {
+ if (res_second->start > info->io.addr_data)
+ info->io.regspacing = res_second->start - info->io.addr_data;
}
info->io.regsize = DEFAULT_REGSPACING;
info->io.regshift = 0;
@@ -3359,6 +3362,7 @@
#ifdef CONFIG_ACPI
pnp_register_driver(&ipmi_pnp_driver);
+ pnp_registered = 1;
#endif
#ifdef CONFIG_DMI
@@ -3526,7 +3530,8 @@
pci_unregister_driver(&ipmi_pci_driver);
#endif
#ifdef CONFIG_ACPI
- pnp_unregister_driver(&ipmi_pnp_driver);
+ if (pnp_registered)
+ pnp_unregister_driver(&ipmi_pnp_driver);
#endif
#ifdef CONFIG_PPC_OF
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index a398ecd..1f528fa 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -788,10 +788,11 @@
/*
* capabilities for /dev/zero
* - permits private mappings, "copies" are taken of the source of zeros
+ * - no writeback happens
*/
static struct backing_dev_info zero_bdi = {
.name = "char/mem",
- .capabilities = BDI_CAP_MAP_COPY,
+ .capabilities = BDI_CAP_MAP_COPY | BDI_CAP_NO_ACCT_AND_WRITEBACK,
};
static const struct file_operations full_fops = {
diff --git a/drivers/char/rocket.c b/drivers/char/rocket.c
index 79c3bc6..7c79d24 100644
--- a/drivers/char/rocket.c
+++ b/drivers/char/rocket.c
@@ -1244,6 +1244,7 @@
}
info->flags = ((info->flags & ~ROCKET_USR_MASK) | (new_serial.flags & ROCKET_USR_MASK));
configure_r_port(tty, info, NULL);
+ mutex_unlock(&info->port.mutex);
return 0;
}
diff --git a/drivers/char/synclink_gt.c b/drivers/char/synclink_gt.c
index fef80cf..e63b830 100644
--- a/drivers/char/synclink_gt.c
+++ b/drivers/char/synclink_gt.c
@@ -691,8 +691,10 @@
if (info->port.count == 1) {
/* 1st open on this device, init hardware */
retval = startup(info);
- if (retval < 0)
+ if (retval < 0) {
+ mutex_unlock(&info->port.mutex);
goto cleanup;
+ }
}
mutex_unlock(&info->port.mutex);
retval = block_til_ready(tty, filp, info);
diff --git a/drivers/char/sysrq.c b/drivers/char/sysrq.c
index 878ac0c..ef31bb8 100644
--- a/drivers/char/sysrq.c
+++ b/drivers/char/sysrq.c
@@ -18,7 +18,6 @@
#include <linux/interrupt.h>
#include <linux/mm.h>
#include <linux/fs.h>
-#include <linux/tty.h>
#include <linux/mount.h>
#include <linux/kdev_t.h>
#include <linux/major.h>
@@ -76,7 +75,7 @@
__setup("sysrq_always_enabled", sysrq_always_enabled_setup);
-static void sysrq_handle_loglevel(int key, struct tty_struct *tty)
+static void sysrq_handle_loglevel(int key)
{
int i;
@@ -93,7 +92,7 @@
};
#ifdef CONFIG_VT
-static void sysrq_handle_SAK(int key, struct tty_struct *tty)
+static void sysrq_handle_SAK(int key)
{
struct work_struct *SAK_work = &vc_cons[fg_console].SAK_work;
schedule_work(SAK_work);
@@ -109,7 +108,7 @@
#endif
#ifdef CONFIG_VT
-static void sysrq_handle_unraw(int key, struct tty_struct *tty)
+static void sysrq_handle_unraw(int key)
{
struct kbd_struct *kbd = &kbd_table[fg_console];
@@ -126,7 +125,7 @@
#define sysrq_unraw_op (*(struct sysrq_key_op *)NULL)
#endif /* CONFIG_VT */
-static void sysrq_handle_crash(int key, struct tty_struct *tty)
+static void sysrq_handle_crash(int key)
{
char *killer = NULL;
@@ -141,7 +140,7 @@
.enable_mask = SYSRQ_ENABLE_DUMP,
};
-static void sysrq_handle_reboot(int key, struct tty_struct *tty)
+static void sysrq_handle_reboot(int key)
{
lockdep_off();
local_irq_enable();
@@ -154,7 +153,7 @@
.enable_mask = SYSRQ_ENABLE_BOOT,
};
-static void sysrq_handle_sync(int key, struct tty_struct *tty)
+static void sysrq_handle_sync(int key)
{
emergency_sync();
}
@@ -165,7 +164,7 @@
.enable_mask = SYSRQ_ENABLE_SYNC,
};
-static void sysrq_handle_show_timers(int key, struct tty_struct *tty)
+static void sysrq_handle_show_timers(int key)
{
sysrq_timer_list_show();
}
@@ -176,7 +175,7 @@
.action_msg = "Show clockevent devices & pending hrtimers (no others)",
};
-static void sysrq_handle_mountro(int key, struct tty_struct *tty)
+static void sysrq_handle_mountro(int key)
{
emergency_remount();
}
@@ -188,7 +187,7 @@
};
#ifdef CONFIG_LOCKDEP
-static void sysrq_handle_showlocks(int key, struct tty_struct *tty)
+static void sysrq_handle_showlocks(int key)
{
debug_show_all_locks();
}
@@ -226,7 +225,7 @@
static DECLARE_WORK(sysrq_showallcpus, sysrq_showregs_othercpus);
-static void sysrq_handle_showallcpus(int key, struct tty_struct *tty)
+static void sysrq_handle_showallcpus(int key)
{
/*
* Fall back to the workqueue based printing if the
@@ -252,7 +251,7 @@
};
#endif
-static void sysrq_handle_showregs(int key, struct tty_struct *tty)
+static void sysrq_handle_showregs(int key)
{
struct pt_regs *regs = get_irq_regs();
if (regs)
@@ -266,7 +265,7 @@
.enable_mask = SYSRQ_ENABLE_DUMP,
};
-static void sysrq_handle_showstate(int key, struct tty_struct *tty)
+static void sysrq_handle_showstate(int key)
{
show_state();
}
@@ -277,7 +276,7 @@
.enable_mask = SYSRQ_ENABLE_DUMP,
};
-static void sysrq_handle_showstate_blocked(int key, struct tty_struct *tty)
+static void sysrq_handle_showstate_blocked(int key)
{
show_state_filter(TASK_UNINTERRUPTIBLE);
}
@@ -291,7 +290,7 @@
#ifdef CONFIG_TRACING
#include <linux/ftrace.h>
-static void sysrq_ftrace_dump(int key, struct tty_struct *tty)
+static void sysrq_ftrace_dump(int key)
{
ftrace_dump(DUMP_ALL);
}
@@ -305,7 +304,7 @@
#define sysrq_ftrace_dump_op (*(struct sysrq_key_op *)NULL)
#endif
-static void sysrq_handle_showmem(int key, struct tty_struct *tty)
+static void sysrq_handle_showmem(int key)
{
show_mem();
}
@@ -330,7 +329,7 @@
}
}
-static void sysrq_handle_term(int key, struct tty_struct *tty)
+static void sysrq_handle_term(int key)
{
send_sig_all(SIGTERM);
console_loglevel = 8;
@@ -349,7 +348,7 @@
static DECLARE_WORK(moom_work, moom_callback);
-static void sysrq_handle_moom(int key, struct tty_struct *tty)
+static void sysrq_handle_moom(int key)
{
schedule_work(&moom_work);
}
@@ -361,7 +360,7 @@
};
#ifdef CONFIG_BLOCK
-static void sysrq_handle_thaw(int key, struct tty_struct *tty)
+static void sysrq_handle_thaw(int key)
{
emergency_thaw_all();
}
@@ -373,7 +372,7 @@
};
#endif
-static void sysrq_handle_kill(int key, struct tty_struct *tty)
+static void sysrq_handle_kill(int key)
{
send_sig_all(SIGKILL);
console_loglevel = 8;
@@ -385,7 +384,7 @@
.enable_mask = SYSRQ_ENABLE_SIGNAL,
};
-static void sysrq_handle_unrt(int key, struct tty_struct *tty)
+static void sysrq_handle_unrt(int key)
{
normalize_rt_tasks();
}
@@ -493,7 +492,7 @@
sysrq_key_table[i] = op_p;
}
-void __handle_sysrq(int key, struct tty_struct *tty, int check_mask)
+void __handle_sysrq(int key, bool check_mask)
{
struct sysrq_key_op *op_p;
int orig_log_level;
@@ -520,7 +519,7 @@
if (!check_mask || sysrq_on_mask(op_p->enable_mask)) {
printk("%s\n", op_p->action_msg);
console_loglevel = orig_log_level;
- op_p->handler(key, tty);
+ op_p->handler(key);
} else {
printk("This sysrq operation is disabled.\n");
}
@@ -545,10 +544,10 @@
spin_unlock_irqrestore(&sysrq_key_table_lock, flags);
}
-void handle_sysrq(int key, struct tty_struct *tty)
+void handle_sysrq(int key)
{
if (sysrq_on())
- __handle_sysrq(key, tty, 1);
+ __handle_sysrq(key, true);
}
EXPORT_SYMBOL(handle_sysrq);
@@ -597,7 +596,7 @@
default:
if (sysrq_down && value && value != 2)
- __handle_sysrq(sysrq_xlate[code], NULL, 1);
+ __handle_sysrq(sysrq_xlate[code], true);
break;
}
@@ -765,7 +764,7 @@
if (get_user(c, buf))
return -EFAULT;
- __handle_sysrq(c, NULL, 0);
+ __handle_sysrq(c, false);
}
return count;
diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
index 05ad4a1..7c41335 100644
--- a/drivers/char/tpm/tpm.c
+++ b/drivers/char/tpm/tpm.c
@@ -47,6 +47,16 @@
#define TPM_MAX_PROTECTED_ORDINAL 12
#define TPM_PROTECTED_ORDINAL_MASK 0xFF
+/*
+ * Bug workaround - some TPM's don't flush the most
+ * recently changed pcr on suspend, so force the flush
+ * with an extend to the selected _unused_ non-volatile pcr.
+ */
+static int tpm_suspend_pcr;
+module_param_named(suspend_pcr, tpm_suspend_pcr, uint, 0644);
+MODULE_PARM_DESC(suspend_pcr,
+ "PCR to use for dummy writes to faciltate flush on suspend.");
+
static LIST_HEAD(tpm_chip_list);
static DEFINE_SPINLOCK(driver_lock);
static DECLARE_BITMAP(dev_mask, TPM_NUM_DEVICES);
@@ -1077,18 +1087,6 @@
.ordinal = TPM_ORD_SAVESTATE
};
-/* Bug workaround - some TPM's don't flush the most
- * recently changed pcr on suspend, so force the flush
- * with an extend to the selected _unused_ non-volatile pcr.
- */
-static int tpm_suspend_pcr;
-static int __init tpm_suspend_setup(char *str)
-{
- get_option(&str, &tpm_suspend_pcr);
- return 1;
-}
-__setup("tpm_suspend_pcr=", tpm_suspend_setup);
-
/*
* We are about to suspend. Save the TPM state
* so that it can be restored.
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
index 949067a..613c852 100644
--- a/drivers/char/tty_io.c
+++ b/drivers/char/tty_io.c
@@ -355,7 +355,7 @@
if (*stp == '\0')
stp = NULL;
- if (tty_line >= 0 && tty_line <= p->num && p->ops &&
+ if (tty_line >= 0 && tty_line < p->num && p->ops &&
p->ops->poll_init && !p->ops->poll_init(p, tty_line, stp)) {
res = tty_driver_kref_get(p);
*line = tty_line;
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 942a982..6c1b676 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -48,6 +48,9 @@
/* Used for exporting per-port information to debugfs */
struct dentry *debugfs_dir;
+ /* List of all the devices we're handling */
+ struct list_head portdevs;
+
/* Number of devices this driver is handling */
unsigned int index;
@@ -108,6 +111,9 @@
* ports for that device (vdev->priv).
*/
struct ports_device {
+ /* Next portdev in the list, head is in the pdrvdata struct */
+ struct list_head list;
+
/*
* Workqueue handlers where we process deferred work after
* notification
@@ -178,15 +184,21 @@
struct console cons;
/* Each port associates with a separate char device */
- struct cdev cdev;
+ struct cdev *cdev;
struct device *dev;
+ /* Reference-counting to handle port hot-unplugs and file operations */
+ struct kref kref;
+
/* A waitqueue for poll() or blocking read operations */
wait_queue_head_t waitqueue;
/* The 'name' of the port that we expose via sysfs properties */
char *name;
+ /* We can notify apps of host connect / disconnect events via SIGIO */
+ struct fasync_struct *async_queue;
+
/* The 'id' to identify the port with the Host */
u32 id;
@@ -221,6 +233,41 @@
return port;
}
+static struct port *find_port_by_devt_in_portdev(struct ports_device *portdev,
+ dev_t dev)
+{
+ struct port *port;
+ unsigned long flags;
+
+ spin_lock_irqsave(&portdev->ports_lock, flags);
+ list_for_each_entry(port, &portdev->ports, list)
+ if (port->cdev->dev == dev)
+ goto out;
+ port = NULL;
+out:
+ spin_unlock_irqrestore(&portdev->ports_lock, flags);
+
+ return port;
+}
+
+static struct port *find_port_by_devt(dev_t dev)
+{
+ struct ports_device *portdev;
+ struct port *port;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pdrvdata_lock, flags);
+ list_for_each_entry(portdev, &pdrvdata.portdevs, list) {
+ port = find_port_by_devt_in_portdev(portdev, dev);
+ if (port)
+ goto out;
+ }
+ port = NULL;
+out:
+ spin_unlock_irqrestore(&pdrvdata_lock, flags);
+ return port;
+}
+
static struct port *find_port_by_id(struct ports_device *portdev, u32 id)
{
struct port *port;
@@ -410,7 +457,10 @@
static ssize_t send_control_msg(struct port *port, unsigned int event,
unsigned int value)
{
- return __send_control_msg(port->portdev, port->id, event, value);
+ /* Did the port get unplugged before userspace closed it? */
+ if (port->portdev)
+ return __send_control_msg(port->portdev, port->id, event, value);
+ return 0;
}
/* Callers must take the port->outvq_lock */
@@ -459,9 +509,12 @@
/*
* Wait till the host acknowledges it pushed out the data we
- * sent. This is done for ports in blocking mode or for data
- * from the hvc_console; the tty operations are performed with
- * spinlocks held so we can't sleep here.
+ * sent. This is done for data from the hvc_console; the tty
+ * operations are performed with spinlocks held so we can't
+ * sleep here. An alternative would be to copy the data to a
+ * buffer and relax the spinning requirement. The downside is
+ * we need to kmalloc a GFP_ATOMIC buffer each time the
+ * console driver writes something out.
*/
while (!virtqueue_get_buf(out_vq, &len))
cpu_relax();
@@ -522,6 +575,10 @@
/* The condition that must be true for polling to end */
static bool will_read_block(struct port *port)
{
+ if (!port->guest_connected) {
+ /* Port got hot-unplugged. Let's exit. */
+ return false;
+ }
return !port_has_data(port) && port->host_connected;
}
@@ -572,6 +629,9 @@
if (ret < 0)
return ret;
}
+ /* Port got hot-unplugged. */
+ if (!port->guest_connected)
+ return -ENODEV;
/*
* We could've received a disconnection message while we were
* waiting for more data.
@@ -596,6 +656,10 @@
ssize_t ret;
bool nonblock;
+ /* Userspace could be out to fool us */
+ if (!count)
+ return 0;
+
port = filp->private_data;
nonblock = filp->f_flags & O_NONBLOCK;
@@ -609,6 +673,9 @@
if (ret < 0)
return ret;
}
+ /* Port got hot-unplugged. */
+ if (!port->guest_connected)
+ return -ENODEV;
count = min((size_t)(32 * 1024), count);
@@ -622,6 +689,14 @@
goto free_buf;
}
+ /*
+ * We now ask send_buf() to not spin for generic ports -- we
+ * can re-use the same code path that non-blocking file
+ * descriptors take for blocking file descriptors since the
+ * wait is already done and we're certain the write will go
+ * through to the host.
+ */
+ nonblock = true;
ret = send_buf(port, buf, count, nonblock);
if (nonblock && ret > 0)
@@ -641,8 +716,12 @@
port = filp->private_data;
poll_wait(filp, &port->waitqueue, wait);
+ if (!port->guest_connected) {
+ /* Port got unplugged */
+ return POLLHUP;
+ }
ret = 0;
- if (port->inbuf)
+ if (!will_read_block(port))
ret |= POLLIN | POLLRDNORM;
if (!will_write_block(port))
ret |= POLLOUT;
@@ -652,6 +731,8 @@
return ret;
}
+static void remove_port(struct kref *kref);
+
static int port_fops_release(struct inode *inode, struct file *filp)
{
struct port *port;
@@ -672,6 +753,16 @@
reclaim_consumed_buffers(port);
spin_unlock_irq(&port->outvq_lock);
+ /*
+ * Locks aren't necessary here as a port can't be opened after
+ * unplug, and if a port isn't unplugged, a kref would already
+ * exist for the port. Plus, taking ports_lock here would
+ * create a dependency on other locks taken by functions
+ * inside remove_port if we're the last holder of the port,
+ * creating many problems.
+ */
+ kref_put(&port->kref, remove_port);
+
return 0;
}
@@ -679,22 +770,31 @@
{
struct cdev *cdev = inode->i_cdev;
struct port *port;
+ int ret;
- port = container_of(cdev, struct port, cdev);
+ port = find_port_by_devt(cdev->dev);
filp->private_data = port;
+ /* Prevent against a port getting hot-unplugged at the same time */
+ spin_lock_irq(&port->portdev->ports_lock);
+ kref_get(&port->kref);
+ spin_unlock_irq(&port->portdev->ports_lock);
+
/*
* Don't allow opening of console port devices -- that's done
* via /dev/hvc
*/
- if (is_console_port(port))
- return -ENXIO;
+ if (is_console_port(port)) {
+ ret = -ENXIO;
+ goto out;
+ }
/* Allow only one process to open a particular port at a time */
spin_lock_irq(&port->inbuf_lock);
if (port->guest_connected) {
spin_unlock_irq(&port->inbuf_lock);
- return -EMFILE;
+ ret = -EMFILE;
+ goto out;
}
port->guest_connected = true;
@@ -709,10 +809,23 @@
reclaim_consumed_buffers(port);
spin_unlock_irq(&port->outvq_lock);
+ nonseekable_open(inode, filp);
+
/* Notify host of port being opened */
send_control_msg(filp->private_data, VIRTIO_CONSOLE_PORT_OPEN, 1);
return 0;
+out:
+ kref_put(&port->kref, remove_port);
+ return ret;
+}
+
+static int port_fops_fasync(int fd, struct file *filp, int mode)
+{
+ struct port *port;
+
+ port = filp->private_data;
+ return fasync_helper(fd, filp, mode, &port->async_queue);
}
/*
@@ -728,6 +841,8 @@
.write = port_fops_write,
.poll = port_fops_poll,
.release = port_fops_release,
+ .fasync = port_fops_fasync,
+ .llseek = no_llseek,
};
/*
@@ -986,6 +1101,12 @@
return nr_added_bufs;
}
+static void send_sigio_to_port(struct port *port)
+{
+ if (port->async_queue && port->guest_connected)
+ kill_fasync(&port->async_queue, SIGIO, POLL_OUT);
+}
+
static int add_port(struct ports_device *portdev, u32 id)
{
char debugfs_name[16];
@@ -1000,6 +1121,7 @@
err = -ENOMEM;
goto fail;
}
+ kref_init(&port->kref);
port->portdev = portdev;
port->id = id;
@@ -1007,6 +1129,7 @@
port->name = NULL;
port->inbuf = NULL;
port->cons.hvc = NULL;
+ port->async_queue = NULL;
port->cons.ws.ws_row = port->cons.ws.ws_col = 0;
@@ -1017,14 +1140,20 @@
port->in_vq = portdev->in_vqs[port->id];
port->out_vq = portdev->out_vqs[port->id];
- cdev_init(&port->cdev, &port_fops);
+ port->cdev = cdev_alloc();
+ if (!port->cdev) {
+ dev_err(&port->portdev->vdev->dev, "Error allocating cdev\n");
+ err = -ENOMEM;
+ goto free_port;
+ }
+ port->cdev->ops = &port_fops;
devt = MKDEV(portdev->chr_major, id);
- err = cdev_add(&port->cdev, devt, 1);
+ err = cdev_add(port->cdev, devt, 1);
if (err < 0) {
dev_err(&port->portdev->vdev->dev,
"Error %d adding cdev for port %u\n", err, id);
- goto free_port;
+ goto free_cdev;
}
port->dev = device_create(pdrvdata.class, &port->portdev->vdev->dev,
devt, port, "vport%up%u",
@@ -1089,7 +1218,7 @@
free_device:
device_destroy(pdrvdata.class, port->dev->devt);
free_cdev:
- cdev_del(&port->cdev);
+ cdev_del(port->cdev);
free_port:
kfree(port);
fail:
@@ -1098,21 +1227,45 @@
return err;
}
-/* Remove all port-specific data. */
-static int remove_port(struct port *port)
+/* No users remain, remove all port-specific data. */
+static void remove_port(struct kref *kref)
+{
+ struct port *port;
+
+ port = container_of(kref, struct port, kref);
+
+ sysfs_remove_group(&port->dev->kobj, &port_attribute_group);
+ device_destroy(pdrvdata.class, port->dev->devt);
+ cdev_del(port->cdev);
+
+ kfree(port->name);
+
+ debugfs_remove(port->debugfs_file);
+
+ kfree(port);
+}
+
+/*
+ * Port got unplugged. Remove port from portdev's list and drop the
+ * kref reference. If no userspace has this port opened, it will
+ * result in immediate removal the port.
+ */
+static void unplug_port(struct port *port)
{
struct port_buffer *buf;
+ spin_lock_irq(&port->portdev->ports_lock);
+ list_del(&port->list);
+ spin_unlock_irq(&port->portdev->ports_lock);
+
if (port->guest_connected) {
port->guest_connected = false;
port->host_connected = false;
wake_up_interruptible(&port->waitqueue);
- send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 0);
- }
- spin_lock_irq(&port->portdev->ports_lock);
- list_del(&port->list);
- spin_unlock_irq(&port->portdev->ports_lock);
+ /* Let the app know the port is going down. */
+ send_sigio_to_port(port);
+ }
if (is_console_port(port)) {
spin_lock_irq(&pdrvdata_lock);
@@ -1131,9 +1284,6 @@
hvc_remove(port->cons.hvc);
#endif
}
- sysfs_remove_group(&port->dev->kobj, &port_attribute_group);
- device_destroy(pdrvdata.class, port->dev->devt);
- cdev_del(&port->cdev);
/* Remove unused data this port might have received. */
discard_port_data(port);
@@ -1144,12 +1294,19 @@
while ((buf = virtqueue_detach_unused_buf(port->in_vq)))
free_buf(buf);
- kfree(port->name);
+ /*
+ * We should just assume the device itself has gone off --
+ * else a close on an open port later will try to send out a
+ * control message.
+ */
+ port->portdev = NULL;
- debugfs_remove(port->debugfs_file);
-
- kfree(port);
- return 0;
+ /*
+ * Locks around here are not necessary - a port can't be
+ * opened after we removed the port struct from ports_list
+ * above.
+ */
+ kref_put(&port->kref, remove_port);
}
/* Any private messages that the Host and Guest want to share */
@@ -1188,7 +1345,7 @@
add_port(portdev, cpkt->id);
break;
case VIRTIO_CONSOLE_PORT_REMOVE:
- remove_port(port);
+ unplug_port(port);
break;
case VIRTIO_CONSOLE_CONSOLE_PORT:
if (!cpkt->value)
@@ -1230,6 +1387,12 @@
spin_lock_irq(&port->outvq_lock);
reclaim_consumed_buffers(port);
spin_unlock_irq(&port->outvq_lock);
+
+ /*
+ * If the guest is connected, it'll be interested in
+ * knowing the host connection state changed.
+ */
+ send_sigio_to_port(port);
break;
case VIRTIO_CONSOLE_PORT_NAME:
/*
@@ -1326,6 +1489,9 @@
wake_up_interruptible(&port->waitqueue);
+ /* Send a SIGIO indicating new data in case the process asked for it */
+ send_sigio_to_port(port);
+
if (is_console_port(port) && hvc_poll(port->cons.hvc))
hvc_kick();
}
@@ -1562,6 +1728,10 @@
add_port(portdev, 0);
}
+ spin_lock_irq(&pdrvdata_lock);
+ list_add_tail(&portdev->list, &pdrvdata.portdevs);
+ spin_unlock_irq(&pdrvdata_lock);
+
__send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID,
VIRTIO_CONSOLE_DEVICE_READY, 1);
return 0;
@@ -1585,23 +1755,41 @@
{
struct ports_device *portdev;
struct port *port, *port2;
- struct port_buffer *buf;
- unsigned int len;
portdev = vdev->priv;
+ spin_lock_irq(&pdrvdata_lock);
+ list_del(&portdev->list);
+ spin_unlock_irq(&pdrvdata_lock);
+
+ /* Disable interrupts for vqs */
+ vdev->config->reset(vdev);
+ /* Finish up work that's lined up */
cancel_work_sync(&portdev->control_work);
list_for_each_entry_safe(port, port2, &portdev->ports, list)
- remove_port(port);
+ unplug_port(port);
unregister_chrdev(portdev->chr_major, "virtio-portsdev");
- while ((buf = virtqueue_get_buf(portdev->c_ivq, &len)))
- free_buf(buf);
+ /*
+ * When yanking out a device, we immediately lose the
+ * (device-side) queues. So there's no point in keeping the
+ * guest side around till we drop our final reference. This
+ * also means that any ports which are in an open state will
+ * have to just stop using the port, as the vqs are going
+ * away.
+ */
+ if (use_multiport(portdev)) {
+ struct port_buffer *buf;
+ unsigned int len;
- while ((buf = virtqueue_detach_unused_buf(portdev->c_ivq)))
- free_buf(buf);
+ while ((buf = virtqueue_get_buf(portdev->c_ivq, &len)))
+ free_buf(buf);
+
+ while ((buf = virtqueue_detach_unused_buf(portdev->c_ivq)))
+ free_buf(buf);
+ }
vdev->config->del_vqs(vdev);
kfree(portdev->in_vqs);
@@ -1648,6 +1836,7 @@
PTR_ERR(pdrvdata.debugfs_dir));
}
INIT_LIST_HEAD(&pdrvdata.consoles);
+ INIT_LIST_HEAD(&pdrvdata.portdevs);
return register_virtio_driver(&virtio_console);
}
diff --git a/drivers/char/vt.c b/drivers/char/vt.c
index 50590c7..281aada 100644
--- a/drivers/char/vt.c
+++ b/drivers/char/vt.c
@@ -906,22 +906,16 @@
* bottom of buffer
*/
old_origin += (old_rows - new_rows) * old_row_size;
- end = vc->vc_scr_end;
} else {
/*
* Cursor is in no man's land, copy 1/2 screenful
* from the top and bottom of cursor position
*/
old_origin += (vc->vc_y - new_rows/2) * old_row_size;
- end = old_origin + (old_row_size * new_rows);
}
- } else
- /*
- * Cursor near the top, copy contents from the top of buffer
- */
- end = (old_rows > new_rows) ? old_origin +
- (old_row_size * new_rows) :
- vc->vc_scr_end;
+ }
+
+ end = old_origin + old_row_size * min(old_rows, new_rows);
update_attr(vc);
@@ -3075,8 +3069,7 @@
old_was_color = vc->vc_can_do_color;
vc->vc_sw->con_deinit(vc);
- if (!vc->vc_origin)
- vc->vc_origin = (unsigned long)vc->vc_screenbuf;
+ vc->vc_origin = (unsigned long)vc->vc_screenbuf;
visual_init(vc, i, 0);
set_origin(vc);
update_attr(vc);
diff --git a/drivers/char/vt_ioctl.c b/drivers/char/vt_ioctl.c
index 2bbeaae..38df8c1 100644
--- a/drivers/char/vt_ioctl.c
+++ b/drivers/char/vt_ioctl.c
@@ -533,11 +533,14 @@
case KIOCSOUND:
if (!perm)
goto eperm;
- /* FIXME: This is an old broken API but we need to keep it
- supported and somehow separate the historic advertised
- tick rate from any real one */
+ /*
+ * The use of PIT_TICK_RATE is historic, it used to be
+ * the platform-dependent CLOCK_TICK_RATE between 2.6.12
+ * and 2.6.36, which was a minor but unfortunate ABI
+ * change.
+ */
if (arg)
- arg = CLOCK_TICK_RATE / arg;
+ arg = PIT_TICK_RATE / arg;
kd_mksound(arg, 0);
break;
@@ -553,11 +556,8 @@
*/
ticks = HZ * ((arg >> 16) & 0xffff) / 1000;
count = ticks ? (arg & 0xffff) : 0;
- /* FIXME: This is an old broken API but we need to keep it
- supported and somehow separate the historic advertised
- tick rate from any real one */
if (count)
- count = CLOCK_TICK_RATE / count;
+ count = PIT_TICK_RATE / count;
kd_mksound(count, ticks);
break;
}
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index c2408bb..f508690 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -80,7 +80,7 @@
* Limiting Performance Impact
* ---------------------------
* C states, especially those with large exit latencies, can have a real
- * noticable impact on workloads, which is not acceptable for most sysadmins,
+ * noticeable impact on workloads, which is not acceptable for most sysadmins,
* and in addition, less performance has a power price of its own.
*
* As a general rule of thumb, menu assumes that the following heuristic
diff --git a/drivers/dca/dca-core.c b/drivers/dca/dca-core.c
index 8661c84..b98c676 100644
--- a/drivers/dca/dca-core.c
+++ b/drivers/dca/dca-core.c
@@ -39,6 +39,10 @@
static LIST_HEAD(dca_domains);
+static BLOCKING_NOTIFIER_HEAD(dca_provider_chain);
+
+static int dca_providers_blocked;
+
static struct pci_bus *dca_pci_rc_from_dev(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
@@ -70,6 +74,60 @@
kfree(domain);
}
+static int dca_provider_ioat_ver_3_0(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ return ((pdev->vendor == PCI_VENDOR_ID_INTEL) &&
+ ((pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG0) ||
+ (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG1) ||
+ (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG2) ||
+ (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG3) ||
+ (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG4) ||
+ (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG5) ||
+ (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG6) ||
+ (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG7)));
+}
+
+static void unregister_dca_providers(void)
+{
+ struct dca_provider *dca, *_dca;
+ struct list_head unregistered_providers;
+ struct dca_domain *domain;
+ unsigned long flags;
+
+ blocking_notifier_call_chain(&dca_provider_chain,
+ DCA_PROVIDER_REMOVE, NULL);
+
+ INIT_LIST_HEAD(&unregistered_providers);
+
+ spin_lock_irqsave(&dca_lock, flags);
+
+ if (list_empty(&dca_domains)) {
+ spin_unlock_irqrestore(&dca_lock, flags);
+ return;
+ }
+
+ /* at this point only one domain in the list is expected */
+ domain = list_first_entry(&dca_domains, struct dca_domain, node);
+ if (!domain)
+ return;
+
+ list_for_each_entry_safe(dca, _dca, &domain->dca_providers, node) {
+ list_del(&dca->node);
+ list_add(&dca->node, &unregistered_providers);
+ }
+
+ dca_free_domain(domain);
+
+ spin_unlock_irqrestore(&dca_lock, flags);
+
+ list_for_each_entry_safe(dca, _dca, &unregistered_providers, node) {
+ dca_sysfs_remove_provider(dca);
+ list_del(&dca->node);
+ }
+}
+
static struct dca_domain *dca_find_domain(struct pci_bus *rc)
{
struct dca_domain *domain;
@@ -90,9 +148,13 @@
domain = dca_find_domain(rc);
if (!domain) {
- domain = dca_allocate_domain(rc);
- if (domain)
- list_add(&domain->node, &dca_domains);
+ if (dca_provider_ioat_ver_3_0(dev) && !list_empty(&dca_domains)) {
+ dca_providers_blocked = 1;
+ } else {
+ domain = dca_allocate_domain(rc);
+ if (domain)
+ list_add(&domain->node, &dca_domains);
+ }
}
return domain;
@@ -293,8 +355,6 @@
}
EXPORT_SYMBOL_GPL(free_dca_provider);
-static BLOCKING_NOTIFIER_HEAD(dca_provider_chain);
-
/**
* register_dca_provider - register a dca provider
* @dca - struct created by alloc_dca_provider()
@@ -306,6 +366,13 @@
unsigned long flags;
struct dca_domain *domain;
+ spin_lock_irqsave(&dca_lock, flags);
+ if (dca_providers_blocked) {
+ spin_unlock_irqrestore(&dca_lock, flags);
+ return -ENODEV;
+ }
+ spin_unlock_irqrestore(&dca_lock, flags);
+
err = dca_sysfs_add_provider(dca, dev);
if (err)
return err;
@@ -313,7 +380,13 @@
spin_lock_irqsave(&dca_lock, flags);
domain = dca_get_domain(dev);
if (!domain) {
- spin_unlock_irqrestore(&dca_lock, flags);
+ if (dca_providers_blocked) {
+ spin_unlock_irqrestore(&dca_lock, flags);
+ dca_sysfs_remove_provider(dca);
+ unregister_dca_providers();
+ } else {
+ spin_unlock_irqrestore(&dca_lock, flags);
+ }
return -ENODEV;
}
list_add(&dca->node, &domain->dca_providers);
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c
index 216f9d3..effd140 100644
--- a/drivers/dma/ioat/dma_v2.c
+++ b/drivers/dma/ioat/dma_v2.c
@@ -879,7 +879,7 @@
dma->device_issue_pending = ioat2_issue_pending;
dma->device_alloc_chan_resources = ioat2_alloc_chan_resources;
dma->device_free_chan_resources = ioat2_free_chan_resources;
- dma->device_tx_status = ioat_tx_status;
+ dma->device_tx_status = ioat_dma_tx_status;
err = ioat_probe(device);
if (err)
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 86c5ae9..411d5bf 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -162,7 +162,7 @@
static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan)
{
- u32 val = (1 << (1 + (chan->idx * 16)));
+ u32 val = ~(1 << (chan->idx * 16));
dev_dbg(chan->device->common.dev, "%s, val 0x%08x\n", __func__, val);
__raw_writel(val, XOR_INTR_CAUSE(chan));
}
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c
index fb64cf3..eb6b54d 100644
--- a/drivers/dma/shdma.c
+++ b/drivers/dma/shdma.c
@@ -580,7 +580,6 @@
sh_chan = to_sh_chan(chan);
param = chan->private;
- slave_addr = param->config->addr;
/* Someone calling slave DMA on a public channel? */
if (!param || !sg_len) {
@@ -589,6 +588,8 @@
return NULL;
}
+ slave_addr = param->config->addr;
+
/*
* if (param != NULL), this is a successfully requested slave channel,
* therefore param->config != NULL too.
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index 70bb350..734e2e0 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -66,7 +66,7 @@
config EDAC_AMD64
tristate "AMD64 (Opteron, Athlon64) K8, F10h, F11h"
- depends on EDAC_MM_EDAC && K8_NB && X86_64 && PCI && EDAC_DECODE_MCE
+ depends on EDAC_MM_EDAC && AMD_NB && X86_64 && PCI && EDAC_DECODE_MCE
help
Support for error detection and correction on the AMD 64
Families of Memory Controllers (K8, F10h and F11h)
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 670239a..09fcc52 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -1,5 +1,5 @@
#include "amd64_edac.h"
-#include <asm/k8.h>
+#include <asm/amd_nb.h>
static struct edac_pci_ctl_info *amd64_ctl_pci;
@@ -2071,16 +2071,6 @@
amd64_handle_ce(mci, info);
else if (ecc_type == 1)
amd64_handle_ue(mci, info);
-
- /*
- * If main error is CE then overflow must be CE. If main error is UE
- * then overflow is unknown. We'll call the overflow a CE - if
- * panic_on_ue is set then we're already panic'ed and won't arrive
- * here. Else, then apparently someone doesn't think that UE's are
- * catastrophic.
- */
- if (info->nbsh & K8_NBSH_OVERFLOW)
- edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR " Error Overflow");
}
void amd64_decode_bus_error(int node_id, struct err_regs *regs)
@@ -2937,7 +2927,7 @@
* to finish initialization of the MC instances.
*/
err = -ENODEV;
- for (nb = 0; nb < num_k8_northbridges; nb++) {
+ for (nb = 0; nb < k8_northbridges.num; nb++) {
if (!pvt_lookup[nb])
continue;
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index 3630308..6b21e25 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -339,6 +339,9 @@
{
int status;
+ if (mci->op_state != OP_RUNNING_POLL)
+ return;
+
status = cancel_delayed_work(&mci->work);
if (status == 0) {
debugf0("%s() not canceled, flush the queue\n",
diff --git a/drivers/edac/edac_mce_amd.c b/drivers/edac/edac_mce_amd.c
index bae9351..9014df6 100644
--- a/drivers/edac/edac_mce_amd.c
+++ b/drivers/edac/edac_mce_amd.c
@@ -365,11 +365,10 @@
pr_emerg("MC%d_STATUS: ", m->bank);
- pr_cont("%sorrected error, report: %s, MiscV: %svalid, "
+ pr_cont("%sorrected error, other errors lost: %s, "
"CPU context corrupt: %s",
((m->status & MCI_STATUS_UC) ? "Unc" : "C"),
- ((m->status & MCI_STATUS_EN) ? "yes" : "no"),
- ((m->status & MCI_STATUS_MISCV) ? "" : "in"),
+ ((m->status & MCI_STATUS_OVER) ? "yes" : "no"),
((m->status & MCI_STATUS_PCC) ? "yes" : "no"));
/* do the two bits[14:13] together */
@@ -426,11 +425,15 @@
static int __init mce_amd_init(void)
{
/*
- * We can decode MCEs for Opteron and later CPUs:
+ * We can decode MCEs for K8, F10h and F11h CPUs:
*/
- if ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) &&
- (boot_cpu_data.x86 >= 0xf))
- atomic_notifier_chain_register(&x86_mce_decoder_chain, &amd_mce_dec_nb);
+ if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
+ return 0;
+
+ if (boot_cpu_data.x86 < 0xf || boot_cpu_data.x86 > 0x11)
+ return 0;
+
+ atomic_notifier_chain_register(&x86_mce_decoder_chain, &amd_mce_dec_nb);
return 0;
}
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
index e0187d1..0fd5b85 100644
--- a/drivers/edac/i7core_edac.c
+++ b/drivers/edac/i7core_edac.c
@@ -1140,6 +1140,7 @@
ATTR_COUNTER(0),
ATTR_COUNTER(1),
ATTR_COUNTER(2),
+ { .attr = { .name = NULL } }
};
static struct mcidev_sysfs_group i7core_udimm_counters = {
diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
index ca7ca56..b42a0bd 100644
--- a/drivers/firewire/core-transaction.c
+++ b/drivers/firewire/core-transaction.c
@@ -81,6 +81,10 @@
spin_lock_irqsave(&card->lock, flags);
list_for_each_entry(t, &card->transaction_list, link) {
if (t == transaction) {
+ if (!del_timer(&t->split_timeout_timer)) {
+ spin_unlock_irqrestore(&card->lock, flags);
+ goto timed_out;
+ }
list_del_init(&t->link);
card->tlabel_mask &= ~(1ULL << t->tlabel);
break;
@@ -89,11 +93,11 @@
spin_unlock_irqrestore(&card->lock, flags);
if (&t->link != &card->transaction_list) {
- del_timer_sync(&t->split_timeout_timer);
t->callback(card, rcode, NULL, 0, t->callback_data);
return 0;
}
+ timed_out:
return -ENOENT;
}
@@ -921,6 +925,10 @@
spin_lock_irqsave(&card->lock, flags);
list_for_each_entry(t, &card->transaction_list, link) {
if (t->node_id == source && t->tlabel == tlabel) {
+ if (!del_timer(&t->split_timeout_timer)) {
+ spin_unlock_irqrestore(&card->lock, flags);
+ goto timed_out;
+ }
list_del_init(&t->link);
card->tlabel_mask &= ~(1ULL << t->tlabel);
break;
@@ -929,6 +937,7 @@
spin_unlock_irqrestore(&card->lock, flags);
if (&t->link == &card->transaction_list) {
+ timed_out:
fw_notify("Unsolicited response (source %x, tlabel %x)\n",
source, tlabel);
return;
@@ -963,8 +972,6 @@
break;
}
- del_timer_sync(&t->split_timeout_timer);
-
/*
* The response handler may be executed while the request handler
* is still pending. Cancel the request handler.
diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
index da17d40..33f8421 100644
--- a/drivers/firewire/net.c
+++ b/drivers/firewire/net.c
@@ -579,7 +579,7 @@
if (!peer) {
fw_notify("No peer for ARP packet from %016llx\n",
(unsigned long long)peer_guid);
- goto failed_proto;
+ goto no_peer;
}
/*
@@ -656,7 +656,7 @@
return 0;
- failed_proto:
+ no_peer:
net->stats.rx_errors++;
net->stats.rx_dropped++;
@@ -664,7 +664,7 @@
if (netif_queue_stopped(net))
netif_wake_queue(net);
- return 0;
+ return -ENOENT;
}
static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
@@ -701,7 +701,7 @@
fw_error("out of memory\n");
net->stats.rx_dropped++;
- return -1;
+ return -ENOMEM;
}
skb_reserve(skb, (net->hard_header_len + 15) & ~15);
memcpy(skb_put(skb, len), buf, len);
@@ -726,8 +726,10 @@
spin_lock_irqsave(&dev->lock, flags);
peer = fwnet_peer_find_by_node_id(dev, source_node_id, generation);
- if (!peer)
- goto bad_proto;
+ if (!peer) {
+ retval = -ENOENT;
+ goto fail;
+ }
pd = fwnet_pd_find(peer, datagram_label);
if (pd == NULL) {
@@ -741,7 +743,7 @@
dg_size, buf, fg_off, len);
if (pd == NULL) {
retval = -ENOMEM;
- goto bad_proto;
+ goto fail;
}
peer->pdg_size++;
} else {
@@ -755,9 +757,9 @@
pd = fwnet_pd_new(net, peer, datagram_label,
dg_size, buf, fg_off, len);
if (pd == NULL) {
- retval = -ENOMEM;
peer->pdg_size--;
- goto bad_proto;
+ retval = -ENOMEM;
+ goto fail;
}
} else {
if (!fwnet_pd_update(peer, pd, buf, fg_off, len)) {
@@ -768,7 +770,8 @@
*/
fwnet_pd_delete(pd);
peer->pdg_size--;
- goto bad_proto;
+ retval = -ENOMEM;
+ goto fail;
}
}
} /* new datagram or add to existing one */
@@ -794,14 +797,13 @@
spin_unlock_irqrestore(&dev->lock, flags);
return 0;
-
- bad_proto:
+ fail:
spin_unlock_irqrestore(&dev->lock, flags);
if (netif_queue_stopped(net))
netif_wake_queue(net);
- return 0;
+ return retval;
}
static void fwnet_receive_packet(struct fw_card *card, struct fw_request *r,
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 7f03540..9dcb17d 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -263,6 +263,7 @@
{PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB38X_FW, QUIRK_NO_MSI},
{PCI_VENDOR_ID_NEC, PCI_ANY_ID, QUIRK_CYCLE_TIMER},
{PCI_VENDOR_ID_VIA, PCI_ANY_ID, QUIRK_CYCLE_TIMER},
+ {PCI_VENDOR_ID_RICOH, PCI_ANY_ID, QUIRK_CYCLE_TIMER},
{PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_FW, QUIRK_BE_HEADERS},
};
@@ -694,7 +695,15 @@
log_ar_at_event('R', p.speed, p.header, evt);
/*
- * The OHCI bus reset handler synthesizes a phy packet with
+ * Several controllers, notably from NEC and VIA, forget to
+ * write ack_complete status at PHY packet reception.
+ */
+ if (evt == OHCI1394_evt_no_status &&
+ (p.header[0] & 0xff) == (OHCI1394_phy_tcode << 4))
+ p.ack = ACK_COMPLETE;
+
+ /*
+ * The OHCI bus reset handler synthesizes a PHY packet with
* the new generation number when a bus reset happens (see
* section 8.4.2.3). This helps us determine when a request
* was received and make sure we send the response in the same
@@ -2831,7 +2840,7 @@
const struct pci_device_id *ent)
{
struct fw_ohci *ohci;
- u32 bus_options, max_receive, link_speed, version, link_enh;
+ u32 bus_options, max_receive, link_speed, version;
u64 guid;
int i, err, n_ir, n_it;
size_t size;
@@ -2885,23 +2894,6 @@
if (param_quirks)
ohci->quirks = param_quirks;
- /* TI OHCI-Lynx and compatible: set recommended configuration bits. */
- if (dev->vendor == PCI_VENDOR_ID_TI) {
- pci_read_config_dword(dev, PCI_CFG_TI_LinkEnh, &link_enh);
-
- /* adjust latency of ATx FIFO: use 1.7 KB threshold */
- link_enh &= ~TI_LinkEnh_atx_thresh_mask;
- link_enh |= TI_LinkEnh_atx_thresh_1_7K;
-
- /* use priority arbitration for asynchronous responses */
- link_enh |= TI_LinkEnh_enab_unfair;
-
- /* required for aPhyEnhanceEnable to work */
- link_enh |= TI_LinkEnh_enab_accel;
-
- pci_write_config_dword(dev, PCI_CFG_TI_LinkEnh, link_enh);
- }
-
ar_context_init(&ohci->ar_request_ctx, ohci,
OHCI1394_AsReqRcvContextControlSet);
diff --git a/drivers/firewire/ohci.h b/drivers/firewire/ohci.h
index 0e6c5a4..ef5e733 100644
--- a/drivers/firewire/ohci.h
+++ b/drivers/firewire/ohci.h
@@ -155,12 +155,4 @@
#define OHCI1394_phy_tcode 0xe
-/* TI extensions */
-
-#define PCI_CFG_TI_LinkEnh 0xf4
-#define TI_LinkEnh_enab_accel 0x00000002
-#define TI_LinkEnh_enab_unfair 0x00000080
-#define TI_LinkEnh_atx_thresh_mask 0x00003000
-#define TI_LinkEnh_atx_thresh_1_7K 0x00001000
-
#endif /* _FIREWIRE_OHCI_H */
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
index 9f76171..bfae4b3 100644
--- a/drivers/firewire/sbp2.c
+++ b/drivers/firewire/sbp2.c
@@ -450,7 +450,7 @@
if (&orb->link != &lu->orb_list) {
orb->callback(orb, &status);
- kref_put(&orb->kref, free_orb);
+ kref_put(&orb->kref, free_orb); /* orb callback reference */
} else {
fw_error("status write for unknown orb\n");
}
@@ -472,20 +472,28 @@
* So this callback only sets the rcode if it hasn't already
* been set and only does the cleanup if the transaction
* failed and we didn't already get a status write.
+ *
+ * Here we treat RCODE_CANCELLED like RCODE_COMPLETE because some
+ * OXUF936QSE firmwares occasionally respond after Split_Timeout and
+ * complete the ORB just fine. Note, we also get RCODE_CANCELLED
+ * from sbp2_cancel_orbs() if fw_cancel_transaction() == 0.
*/
spin_lock_irqsave(&card->lock, flags);
if (orb->rcode == -1)
orb->rcode = rcode;
- if (orb->rcode != RCODE_COMPLETE) {
+
+ if (orb->rcode != RCODE_COMPLETE && orb->rcode != RCODE_CANCELLED) {
list_del(&orb->link);
spin_unlock_irqrestore(&card->lock, flags);
+
orb->callback(orb, NULL);
+ kref_put(&orb->kref, free_orb); /* orb callback reference */
} else {
spin_unlock_irqrestore(&card->lock, flags);
}
- kref_put(&orb->kref, free_orb);
+ kref_put(&orb->kref, free_orb); /* transaction callback reference */
}
static void sbp2_send_orb(struct sbp2_orb *orb, struct sbp2_logical_unit *lu,
@@ -501,9 +509,8 @@
list_add_tail(&orb->link, &lu->orb_list);
spin_unlock_irqrestore(&device->card->lock, flags);
- /* Take a ref for the orb list and for the transaction callback. */
- kref_get(&orb->kref);
- kref_get(&orb->kref);
+ kref_get(&orb->kref); /* transaction callback reference */
+ kref_get(&orb->kref); /* orb callback reference */
fw_send_request(device->card, &orb->t, TCODE_WRITE_BLOCK_REQUEST,
node_id, generation, device->max_speed, offset,
@@ -525,11 +532,11 @@
list_for_each_entry_safe(orb, next, &list, link) {
retval = 0;
- if (fw_cancel_transaction(device->card, &orb->t) == 0)
- continue;
+ fw_cancel_transaction(device->card, &orb->t);
orb->rcode = RCODE_CANCELLED;
orb->callback(orb, NULL);
+ kref_put(&orb->kref, free_orb); /* orb callback reference */
}
return retval;
diff --git a/drivers/gpio/sx150x.c b/drivers/gpio/sx150x.c
index b42f42c..823559a 100644
--- a/drivers/gpio/sx150x.c
+++ b/drivers/gpio/sx150x.c
@@ -459,16 +459,32 @@
return err;
}
+static int sx150x_reset(struct sx150x_chip *chip)
+{
+ int err;
+
+ err = i2c_smbus_write_byte_data(chip->client,
+ chip->dev_cfg->reg_reset,
+ 0x12);
+ if (err < 0)
+ return err;
+
+ err = i2c_smbus_write_byte_data(chip->client,
+ chip->dev_cfg->reg_reset,
+ 0x34);
+ return err;
+}
+
static int sx150x_init_hw(struct sx150x_chip *chip,
struct sx150x_platform_data *pdata)
{
int err = 0;
- err = i2c_smbus_write_word_data(chip->client,
- chip->dev_cfg->reg_reset,
- 0x3412);
- if (err < 0)
- return err;
+ if (pdata->reset_during_probe) {
+ err = sx150x_reset(chip);
+ if (err < 0)
+ return err;
+ }
err = sx150x_i2c_write(chip->client,
chip->dev_cfg->reg_misc,
diff --git a/drivers/gpu/drm/drm_buffer.c b/drivers/gpu/drm/drm_buffer.c
index 55d03ed..529a0db 100644
--- a/drivers/gpu/drm/drm_buffer.c
+++ b/drivers/gpu/drm/drm_buffer.c
@@ -98,8 +98,8 @@
* user_data: A pointer the data that is copied to the buffer.
* size: The Number of bytes to copy.
*/
-extern int drm_buffer_copy_from_user(struct drm_buffer *buf,
- void __user *user_data, int size)
+int drm_buffer_copy_from_user(struct drm_buffer *buf,
+ void __user *user_data, int size)
{
int nr_pages = size / PAGE_SIZE + 1;
int idx;
@@ -163,7 +163,7 @@
{
int idx = drm_buffer_index(buf);
int page = drm_buffer_page(buf);
- void *obj = 0;
+ void *obj = NULL;
if (idx + objsize <= PAGE_SIZE) {
obj = &buf->data[page][idx];
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 7e31d43..dcbeb98 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -34,6 +34,9 @@
#include "drm_crtc_helper.h"
#include "drm_fb_helper.h"
+static bool drm_kms_helper_poll = true;
+module_param_named(poll, drm_kms_helper_poll, bool, 0600);
+
static void drm_mode_validate_flag(struct drm_connector *connector,
int flags)
{
@@ -99,8 +102,10 @@
connector->status = connector_status_disconnected;
if (connector->funcs->force)
connector->funcs->force(connector);
- } else
- connector->status = connector->funcs->detect(connector);
+ } else {
+ connector->status = connector->funcs->detect(connector, true);
+ drm_kms_helper_poll_enable(dev);
+ }
if (connector->status == connector_status_disconnected) {
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n",
@@ -110,11 +115,10 @@
}
count = (*connector_funcs->get_modes)(connector);
- if (!count) {
+ if (count == 0 && connector->status == connector_status_connected)
count = drm_add_modes_noedid(connector, 1024, 768);
- if (!count)
- return 0;
- }
+ if (count == 0)
+ goto prune;
drm_mode_connector_list_update(connector);
@@ -633,13 +637,13 @@
mode_changed = true;
if (mode_changed) {
- old_fb = set->crtc->fb;
- set->crtc->fb = set->fb;
set->crtc->enabled = (set->mode != NULL);
if (set->mode != NULL) {
DRM_DEBUG_KMS("attempting to set mode from"
" userspace\n");
drm_mode_debug_printmodeline(set->mode);
+ old_fb = set->crtc->fb;
+ set->crtc->fb = set->fb;
if (!drm_crtc_helper_set_mode(set->crtc, set->mode,
set->x, set->y,
old_fb)) {
@@ -840,6 +844,9 @@
enum drm_connector_status old_status, status;
bool repoll = false, changed = false;
+ if (!drm_kms_helper_poll)
+ return;
+
mutex_lock(&dev->mode_config.mutex);
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
@@ -859,7 +866,7 @@
!(connector->polled & DRM_CONNECTOR_POLL_HPD))
continue;
- status = connector->funcs->detect(connector);
+ status = connector->funcs->detect(connector, false);
if (old_status != status)
changed = true;
}
@@ -890,6 +897,9 @@
bool poll = false;
struct drm_connector *connector;
+ if (!dev->mode_config.poll_enabled || !drm_kms_helper_poll)
+ return;
+
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
if (connector->polled)
poll = true;
@@ -919,8 +929,10 @@
{
if (!dev->mode_config.poll_enabled)
return;
+
/* kill timer and schedule immediate execution, this doesn't block */
cancel_delayed_work(&dev->mode_config.output_poll_work);
- queue_delayed_work(system_nrt_wq, &dev->mode_config.output_poll_work, 0);
+ if (drm_kms_helper_poll)
+ queue_delayed_work(system_nrt_wq, &dev->mode_config.output_poll_work, 0);
}
EXPORT_SYMBOL(drm_helper_hpd_irq_event);
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 90288ec..84da748 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -55,6 +55,9 @@
static int drm_version(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+#define DRM_IOCTL_DEF(ioctl, _func, _flags) \
+ [DRM_IOCTL_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, .cmd_drv = 0}
+
/** Ioctl table */
static struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, 0),
@@ -421,6 +424,7 @@
int retcode = -EINVAL;
char stack_kdata[128];
char *kdata = NULL;
+ unsigned int usize, asize;
dev = file_priv->minor->dev;
atomic_inc(&dev->ioctl_count);
@@ -436,11 +440,18 @@
((nr < DRM_COMMAND_BASE) || (nr >= DRM_COMMAND_END)))
goto err_i1;
if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END) &&
- (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls))
+ (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
+ u32 drv_size;
ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE];
+ drv_size = _IOC_SIZE(ioctl->cmd_drv);
+ usize = asize = _IOC_SIZE(cmd);
+ if (drv_size > asize)
+ asize = drv_size;
+ }
else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE)) {
ioctl = &drm_ioctls[nr];
cmd = ioctl->cmd;
+ usize = asize = _IOC_SIZE(cmd);
} else
goto err_i1;
@@ -460,10 +471,10 @@
retcode = -EACCES;
} else {
if (cmd & (IOC_IN | IOC_OUT)) {
- if (_IOC_SIZE(cmd) <= sizeof(stack_kdata)) {
+ if (asize <= sizeof(stack_kdata)) {
kdata = stack_kdata;
} else {
- kdata = kmalloc(_IOC_SIZE(cmd), GFP_KERNEL);
+ kdata = kmalloc(asize, GFP_KERNEL);
if (!kdata) {
retcode = -ENOMEM;
goto err_i1;
@@ -473,11 +484,13 @@
if (cmd & IOC_IN) {
if (copy_from_user(kdata, (void __user *)arg,
- _IOC_SIZE(cmd)) != 0) {
+ usize) != 0) {
retcode = -EFAULT;
goto err_i1;
}
- }
+ } else
+ memset(kdata, 0, usize);
+
if (ioctl->flags & DRM_UNLOCKED)
retcode = func(dev, kdata, file_priv);
else {
@@ -488,7 +501,7 @@
if (cmd & IOC_OUT) {
if (copy_to_user((void __user *)arg, kdata,
- _IOC_SIZE(cmd)) != 0)
+ usize) != 0)
retcode = -EFAULT;
}
}
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index de82e20..6a5e403 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -94,10 +94,11 @@
int i;
enum drm_connector_force force = DRM_FORCE_UNSPECIFIED;
struct drm_fb_helper_cmdline_mode *cmdline_mode;
- struct drm_connector *connector = fb_helper_conn->connector;
+ struct drm_connector *connector;
if (!fb_helper_conn)
return false;
+ connector = fb_helper_conn->connector;
cmdline_mode = &fb_helper_conn->cmdline_mode;
if (!mode_option)
@@ -369,7 +370,7 @@
}
static DECLARE_WORK(drm_fb_helper_restore_work, drm_fb_helper_restore_work_fn);
-static void drm_fb_helper_sysrq(int dummy1, struct tty_struct *dummy3)
+static void drm_fb_helper_sysrq(int dummy1)
{
schedule_work(&drm_fb_helper_restore_work);
}
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 3a652a6..b744dad 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -41,6 +41,7 @@
/* from BKL pushdown: note that nothing else serializes idr_find() */
DEFINE_MUTEX(drm_global_mutex);
+EXPORT_SYMBOL(drm_global_mutex);
static int drm_open_helper(struct inode *inode, struct file *filp,
struct drm_device * dev);
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index bf92d07..5663d27 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -148,7 +148,7 @@
return -ENOMEM;
kref_init(&obj->refcount);
- kref_init(&obj->handlecount);
+ atomic_set(&obj->handle_count, 0);
obj->size = size;
atomic_inc(&dev->object_count);
@@ -462,28 +462,6 @@
}
EXPORT_SYMBOL(drm_gem_object_free);
-/**
- * Called after the last reference to the object has been lost.
- * Must be called without holding struct_mutex
- *
- * Frees the object
- */
-void
-drm_gem_object_free_unlocked(struct kref *kref)
-{
- struct drm_gem_object *obj = (struct drm_gem_object *) kref;
- struct drm_device *dev = obj->dev;
-
- if (dev->driver->gem_free_object_unlocked != NULL)
- dev->driver->gem_free_object_unlocked(obj);
- else if (dev->driver->gem_free_object != NULL) {
- mutex_lock(&dev->struct_mutex);
- dev->driver->gem_free_object(obj);
- mutex_unlock(&dev->struct_mutex);
- }
-}
-EXPORT_SYMBOL(drm_gem_object_free_unlocked);
-
static void drm_gem_object_ref_bug(struct kref *list_kref)
{
BUG();
@@ -496,12 +474,8 @@
* called before drm_gem_object_free or we'll be touching
* freed memory
*/
-void
-drm_gem_object_handle_free(struct kref *kref)
+void drm_gem_object_handle_free(struct drm_gem_object *obj)
{
- struct drm_gem_object *obj = container_of(kref,
- struct drm_gem_object,
- handlecount);
struct drm_device *dev = obj->dev;
/* Remove any name for this object */
@@ -528,6 +502,10 @@
struct drm_gem_object *obj = vma->vm_private_data;
drm_gem_object_reference(obj);
+
+ mutex_lock(&obj->dev->struct_mutex);
+ drm_vm_open_locked(vma);
+ mutex_unlock(&obj->dev->struct_mutex);
}
EXPORT_SYMBOL(drm_gem_vm_open);
@@ -535,7 +513,10 @@
{
struct drm_gem_object *obj = vma->vm_private_data;
- drm_gem_object_unreference_unlocked(obj);
+ mutex_lock(&obj->dev->struct_mutex);
+ drm_vm_close_locked(vma);
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&obj->dev->struct_mutex);
}
EXPORT_SYMBOL(drm_gem_vm_close);
diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
index 2ef2c78..974e970 100644
--- a/drivers/gpu/drm/drm_info.c
+++ b/drivers/gpu/drm/drm_info.c
@@ -255,7 +255,7 @@
seq_printf(m, "%6d %8zd %7d %8d\n",
obj->name, obj->size,
- atomic_read(&obj->handlecount.refcount),
+ atomic_read(&obj->handle_count),
atomic_read(&obj->refcount.refcount));
return 0;
}
diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
index e2f70a5..9bf93bc 100644
--- a/drivers/gpu/drm/drm_lock.c
+++ b/drivers/gpu/drm/drm_lock.c
@@ -92,7 +92,9 @@
}
/* Contention */
+ mutex_unlock(&drm_global_mutex);
schedule();
+ mutex_lock(&drm_global_mutex);
if (signal_pending(current)) {
ret = -EINTR;
break;
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index da99edc..a6bfc30 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -285,21 +285,21 @@
EXPORT_SYMBOL(drm_mm_put_block);
-static int check_free_mm_node(struct drm_mm_node *entry, unsigned long size,
- unsigned alignment)
+static int check_free_hole(unsigned long start, unsigned long end,
+ unsigned long size, unsigned alignment)
{
unsigned wasted = 0;
- if (entry->size < size)
+ if (end - start < size)
return 0;
if (alignment) {
- register unsigned tmp = entry->start % alignment;
+ unsigned tmp = start % alignment;
if (tmp)
wasted = alignment - tmp;
}
- if (entry->size >= size + wasted) {
+ if (end >= start + size + wasted) {
return 1;
}
@@ -320,7 +320,8 @@
best_size = ~0UL;
list_for_each_entry(entry, &mm->free_stack, free_stack) {
- if (!check_free_mm_node(entry, size, alignment))
+ if (!check_free_hole(entry->start, entry->start + entry->size,
+ size, alignment))
continue;
if (!best_match)
@@ -353,10 +354,12 @@
best_size = ~0UL;
list_for_each_entry(entry, &mm->free_stack, free_stack) {
- if (entry->start > end || (entry->start+entry->size) < start)
- continue;
+ unsigned long adj_start = entry->start < start ?
+ start : entry->start;
+ unsigned long adj_end = entry->start + entry->size > end ?
+ end : entry->start + entry->size;
- if (!check_free_mm_node(entry, size, alignment))
+ if (!check_free_hole(adj_start, adj_end, size, alignment))
continue;
if (!best_match)
@@ -449,7 +452,8 @@
node->free_stack.prev = prev_free;
node->free_stack.next = next_free;
- if (check_free_mm_node(node, mm->scan_size, mm->scan_alignment)) {
+ if (check_free_hole(node->start, node->start + node->size,
+ mm->scan_size, mm->scan_alignment)) {
mm->scan_hit_start = node->start;
mm->scan_hit_size = node->size;
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index f1f473e..949326d 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -251,7 +251,10 @@
drm_mode->htotal = drm_mode->hdisplay + CVT_RB_H_BLANK;
/* Fill in HSync values */
drm_mode->hsync_end = drm_mode->hdisplay + CVT_RB_H_BLANK / 2;
- drm_mode->hsync_start = drm_mode->hsync_end = CVT_RB_H_SYNC;
+ drm_mode->hsync_start = drm_mode->hsync_end - CVT_RB_H_SYNC;
+ /* Fill in VSync values */
+ drm_mode->vsync_start = drm_mode->vdisplay + CVT_RB_VFPORCH;
+ drm_mode->vsync_end = drm_mode->vsync_start + vsync;
}
/* 15/13. Find pixel clock frequency (kHz for xf86) */
drm_mode->clock = drm_mode->htotal * HV_FACTOR * 1000 / hperiod;
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index e20f78b..f5bd9e5 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -164,6 +164,8 @@
dev->hose = pdev->sysdata;
#endif
+ mutex_lock(&drm_global_mutex);
+
if ((ret = drm_fill_in_dev(dev, ent, driver))) {
printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
goto err_g2;
@@ -199,6 +201,7 @@
driver->name, driver->major, driver->minor, driver->patchlevel,
driver->date, pci_name(pdev), dev->primary->index);
+ mutex_unlock(&drm_global_mutex);
return 0;
err_g4:
@@ -210,6 +213,7 @@
pci_disable_device(pdev);
err_g1:
kfree(dev);
+ mutex_unlock(&drm_global_mutex);
return ret;
}
EXPORT_SYMBOL(drm_get_pci_dev);
diff --git a/drivers/gpu/drm/drm_platform.c b/drivers/gpu/drm/drm_platform.c
index 460e9a3..92d1d0f 100644
--- a/drivers/gpu/drm/drm_platform.c
+++ b/drivers/gpu/drm/drm_platform.c
@@ -53,6 +53,8 @@
dev->platformdev = platdev;
dev->dev = &platdev->dev;
+ mutex_lock(&drm_global_mutex);
+
ret = drm_fill_in_dev(dev, NULL, driver);
if (ret) {
@@ -87,6 +89,8 @@
list_add_tail(&dev->driver_item, &driver->device_list);
+ mutex_unlock(&drm_global_mutex);
+
DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
driver->name, driver->major, driver->minor, driver->patchlevel,
driver->date, dev->primary->index);
@@ -100,6 +104,7 @@
drm_put_minor(&dev->control);
err_g1:
kfree(dev);
+ mutex_unlock(&drm_global_mutex);
return ret;
}
EXPORT_SYMBOL(drm_get_platform_dev);
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index 86118a7..85da4c4 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -159,7 +159,7 @@
struct drm_connector *connector = to_drm_connector(device);
enum drm_connector_status status;
- status = connector->funcs->detect(connector);
+ status = connector->funcs->detect(connector, true);
return snprintf(buf, PAGE_SIZE, "%s\n",
drm_get_connector_status_name(status));
}
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index 3778360..5df4506 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -138,7 +138,7 @@
break;
}
- if (!agpmem)
+ if (&agpmem->head == &dev->agp->memory)
goto vm_fault_error;
/*
@@ -433,6 +433,25 @@
mutex_unlock(&dev->struct_mutex);
}
+void drm_vm_close_locked(struct vm_area_struct *vma)
+{
+ struct drm_file *priv = vma->vm_file->private_data;
+ struct drm_device *dev = priv->minor->dev;
+ struct drm_vma_entry *pt, *temp;
+
+ DRM_DEBUG("0x%08lx,0x%08lx\n",
+ vma->vm_start, vma->vm_end - vma->vm_start);
+ atomic_dec(&dev->vma_count);
+
+ list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
+ if (pt->vma == vma) {
+ list_del(&pt->head);
+ kfree(pt);
+ break;
+ }
+ }
+}
+
/**
* \c close method for all virtual memory types.
*
@@ -445,20 +464,9 @@
{
struct drm_file *priv = vma->vm_file->private_data;
struct drm_device *dev = priv->minor->dev;
- struct drm_vma_entry *pt, *temp;
-
- DRM_DEBUG("0x%08lx,0x%08lx\n",
- vma->vm_start, vma->vm_end - vma->vm_start);
- atomic_dec(&dev->vma_count);
mutex_lock(&dev->struct_mutex);
- list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
- if (pt->vma == vma) {
- list_del(&pt->head);
- kfree(pt);
- break;
- }
- }
+ drm_vm_close_locked(vma);
mutex_unlock(&dev->struct_mutex);
}
diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
index 0e6c131..fb07e73 100644
--- a/drivers/gpu/drm/i810/i810_dma.c
+++ b/drivers/gpu/drm/i810/i810_dma.c
@@ -116,7 +116,7 @@
static const struct file_operations i810_buffer_fops = {
.open = drm_open,
.release = drm_release,
- .unlocked_ioctl = drm_ioctl,
+ .unlocked_ioctl = i810_ioctl,
.mmap = i810_mmap_buffers,
.fasync = drm_fasync,
};
@@ -1255,21 +1255,21 @@
}
struct drm_ioctl_desc i810_ioctls[] = {
- DRM_IOCTL_DEF(DRM_I810_INIT, i810_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I810_VERTEX, i810_dma_vertex, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I810_CLEAR, i810_clear_bufs, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I810_FLUSH, i810_flush_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I810_GETAGE, i810_getage, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I810_GETBUF, i810_getbuf, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I810_SWAP, i810_swap_bufs, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I810_COPY, i810_copybuf, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I810_DOCOPY, i810_docopy, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I810_OV0INFO, i810_ov0_info, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I810_FSTATUS, i810_fstatus, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I810_OV0FLIP, i810_ov0_flip, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I810_MC, i810_dma_mc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I810_RSTATUS, i810_rstatus, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I810_FLIP, i810_flip_bufs, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I810_INIT, i810_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I810_VERTEX, i810_dma_vertex, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I810_CLEAR, i810_clear_bufs, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I810_FLUSH, i810_flush_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I810_GETAGE, i810_getage, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I810_GETBUF, i810_getbuf, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I810_SWAP, i810_swap_bufs, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I810_COPY, i810_copybuf, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I810_DOCOPY, i810_docopy, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I810_OV0INFO, i810_ov0_info, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I810_FSTATUS, i810_fstatus, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I810_OV0FLIP, i810_ov0_flip, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I810_MC, i810_dma_mc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I810_RSTATUS, i810_rstatus, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I810_FLIP, i810_flip_bufs, DRM_AUTH|DRM_UNLOCKED),
};
int i810_max_ioctl = DRM_ARRAY_SIZE(i810_ioctls);
diff --git a/drivers/gpu/drm/i830/i830_dma.c b/drivers/gpu/drm/i830/i830_dma.c
index 5168862..cc92c7e 100644
--- a/drivers/gpu/drm/i830/i830_dma.c
+++ b/drivers/gpu/drm/i830/i830_dma.c
@@ -118,7 +118,7 @@
static const struct file_operations i830_buffer_fops = {
.open = drm_open,
.release = drm_release,
- .unlocked_ioctl = drm_ioctl,
+ .unlocked_ioctl = i830_ioctl,
.mmap = i830_mmap_buffers,
.fasync = drm_fasync,
};
@@ -1524,20 +1524,20 @@
}
struct drm_ioctl_desc i830_ioctls[] = {
- DRM_IOCTL_DEF(DRM_I830_INIT, i830_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I830_VERTEX, i830_dma_vertex, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I830_CLEAR, i830_clear_bufs, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I830_FLUSH, i830_flush_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I830_GETAGE, i830_getage, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I830_GETBUF, i830_getbuf, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I830_SWAP, i830_swap_bufs, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I830_COPY, i830_copybuf, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I830_DOCOPY, i830_docopy, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I830_FLIP, i830_flip_bufs, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I830_IRQ_EMIT, i830_irq_emit, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I830_IRQ_WAIT, i830_irq_wait, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I830_GETPARAM, i830_getparam, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I830_SETPARAM, i830_setparam, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I830_INIT, i830_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I830_VERTEX, i830_dma_vertex, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I830_CLEAR, i830_clear_bufs, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I830_FLUSH, i830_flush_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I830_GETAGE, i830_getage, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I830_GETBUF, i830_getbuf, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I830_SWAP, i830_swap_bufs, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I830_COPY, i830_copybuf, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I830_DOCOPY, i830_docopy, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I830_FLIP, i830_flip_bufs, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I830_IRQ_EMIT, i830_irq_emit, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I830_IRQ_WAIT, i830_irq_wait, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I830_GETPARAM, i830_getparam, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I830_SETPARAM, i830_setparam, DRM_AUTH|DRM_UNLOCKED),
};
int i830_max_ioctl = DRM_ARRAY_SIZE(i830_ioctls);
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 92d5605..5e43d70 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -31,6 +31,7 @@
#include <linux/slab.h>
#include "drmP.h"
#include "drm.h"
+#include "intel_drv.h"
#include "i915_drm.h"
#include "i915_drv.h"
@@ -121,6 +122,54 @@
return 0;
}
+static int i915_gem_pageflip_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ unsigned long flags;
+ struct intel_crtc *crtc;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
+ const char *pipe = crtc->pipe ? "B" : "A";
+ const char *plane = crtc->plane ? "B" : "A";
+ struct intel_unpin_work *work;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ work = crtc->unpin_work;
+ if (work == NULL) {
+ seq_printf(m, "No flip due on pipe %s (plane %s)\n",
+ pipe, plane);
+ } else {
+ if (!work->pending) {
+ seq_printf(m, "Flip queued on pipe %s (plane %s)\n",
+ pipe, plane);
+ } else {
+ seq_printf(m, "Flip pending (waiting for vsync) on pipe %s (plane %s)\n",
+ pipe, plane);
+ }
+ if (work->enable_stall_check)
+ seq_printf(m, "Stall check enabled, ");
+ else
+ seq_printf(m, "Stall check waiting for page flip ioctl, ");
+ seq_printf(m, "%d prepares\n", work->pending);
+
+ if (work->old_fb_obj) {
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(work->old_fb_obj);
+ if(obj_priv)
+ seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj_priv->gtt_offset );
+ }
+ if (work->pending_flip_obj) {
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(work->pending_flip_obj);
+ if(obj_priv)
+ seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj_priv->gtt_offset );
+ }
+ }
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ }
+
+ return 0;
+}
+
static int i915_gem_request_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -777,6 +826,7 @@
{"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
{"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
{"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
+ {"i915_gem_pageflip", i915_gem_pageflip_info, 0},
{"i915_gem_request", i915_gem_request_info, 0},
{"i915_gem_seqno", i915_gem_seqno_info, 0},
{"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 44af317..2dd2c93 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -620,8 +620,10 @@
ret = copy_from_user(cliprects, batch->cliprects,
batch->num_cliprects *
sizeof(struct drm_clip_rect));
- if (ret != 0)
+ if (ret != 0) {
+ ret = -EFAULT;
goto fail_free;
+ }
}
mutex_lock(&dev->struct_mutex);
@@ -662,8 +664,10 @@
return -ENOMEM;
ret = copy_from_user(batch_data, cmdbuf->buf, cmdbuf->sz);
- if (ret != 0)
+ if (ret != 0) {
+ ret = -EFAULT;
goto fail_batch_free;
+ }
if (cmdbuf->num_cliprects) {
cliprects = kcalloc(cmdbuf->num_cliprects,
@@ -676,8 +680,10 @@
ret = copy_from_user(cliprects, cmdbuf->cliprects,
cmdbuf->num_cliprects *
sizeof(struct drm_clip_rect));
- if (ret != 0)
+ if (ret != 0) {
+ ret = -EFAULT;
goto fail_clip_free;
+ }
}
mutex_lock(&dev->struct_mutex);
@@ -885,7 +891,7 @@
int reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
u32 temp_lo, temp_hi = 0;
u64 mchbar_addr;
- int ret = 0;
+ int ret;
if (IS_I965G(dev))
pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
@@ -895,22 +901,23 @@
/* If ACPI doesn't have it, assume we need to allocate it ourselves */
#ifdef CONFIG_PNP
if (mchbar_addr &&
- pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) {
- ret = 0;
- goto out;
- }
+ pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
+ return 0;
#endif
/* Get some space for it */
- ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus, &dev_priv->mch_res,
+ dev_priv->mch_res.name = "i915 MCHBAR";
+ dev_priv->mch_res.flags = IORESOURCE_MEM;
+ ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus,
+ &dev_priv->mch_res,
MCHBAR_SIZE, MCHBAR_SIZE,
PCIBIOS_MIN_MEM,
- 0, pcibios_align_resource,
+ 0, pcibios_align_resource,
dev_priv->bridge_dev);
if (ret) {
DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret);
dev_priv->mch_res.start = 0;
- goto out;
+ return ret;
}
if (IS_I965G(dev))
@@ -919,8 +926,7 @@
pci_write_config_dword(dev_priv->bridge_dev, reg,
lower_32_bits(dev_priv->mch_res.start));
-out:
- return ret;
+ return 0;
}
/* Setup MCHBAR if possible, return true if we should disable it again */
@@ -1781,9 +1787,9 @@
}
}
- div_u64(diff, diff1);
+ diff = div_u64(diff, diff1);
ret = ((m * diff) + c);
- div_u64(ret, 10);
+ ret = div_u64(ret, 10);
dev_priv->last_count1 = total_count;
dev_priv->last_time1 = now;
@@ -1852,7 +1858,7 @@
/* More magic constants... */
diff = diff * 1181;
- div_u64(diff, diffms * 10);
+ diff = div_u64(diff, diffms * 10);
dev_priv->gfx_power = diff;
}
@@ -2082,6 +2088,10 @@
goto free_priv;
}
+ /* overlay on gen2 is broken and can't address above 1G */
+ if (IS_GEN2(dev))
+ dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
+
dev_priv->regs = ioremap(base, size);
if (!dev_priv->regs) {
DRM_ERROR("failed to map registers\n");
@@ -2221,6 +2231,9 @@
dev_priv->mchdev_lock = &mchdev_lock;
spin_unlock(&mchdev_lock);
+ /* XXX Prevent module unload due to memory corruption bugs. */
+ __module_get(THIS_MODULE);
+
return 0;
out_workqueue_free:
@@ -2367,46 +2380,46 @@
}
struct drm_ioctl_desc i915_ioctls[] = {
- DRM_IOCTL_DEF(DRM_I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I915_FLIP, i915_flip_bufs, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I915_GETPARAM, i915_getparam, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_I915_ALLOC, i915_mem_alloc, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I915_FREE, i915_mem_free, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I915_INIT_HEAP, i915_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I915_DESTROY_HEAP, i915_mem_destroy_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ),
- DRM_IOCTL_DEF(DRM_I915_SET_VBLANK_PIPE, i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ),
- DRM_IOCTL_DEF(DRM_I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH ),
- DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_FLIP, i915_flip_bufs, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(I915_ALLOC, i915_mem_alloc, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_FREE, i915_mem_free, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, i915_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, i915_mem_destroy_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
};
int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 00befce..6dbe14c 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -61,91 +61,86 @@
.driver_data = (unsigned long) info }
static const struct intel_device_info intel_i830_info = {
- .is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1,
+ .gen = 2, .is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1,
};
static const struct intel_device_info intel_845g_info = {
- .is_i8xx = 1,
+ .gen = 2, .is_i8xx = 1,
};
static const struct intel_device_info intel_i85x_info = {
- .is_i8xx = 1, .is_i85x = 1, .is_mobile = 1,
+ .gen = 2, .is_i8xx = 1, .is_i85x = 1, .is_mobile = 1,
.cursor_needs_physical = 1,
};
static const struct intel_device_info intel_i865g_info = {
- .is_i8xx = 1,
+ .gen = 2, .is_i8xx = 1,
};
static const struct intel_device_info intel_i915g_info = {
- .is_i915g = 1, .is_i9xx = 1, .cursor_needs_physical = 1,
+ .gen = 3, .is_i915g = 1, .is_i9xx = 1, .cursor_needs_physical = 1,
};
static const struct intel_device_info intel_i915gm_info = {
- .is_i9xx = 1, .is_mobile = 1,
+ .gen = 3, .is_i9xx = 1, .is_mobile = 1,
.cursor_needs_physical = 1,
};
static const struct intel_device_info intel_i945g_info = {
- .is_i9xx = 1, .has_hotplug = 1, .cursor_needs_physical = 1,
+ .gen = 3, .is_i9xx = 1, .has_hotplug = 1, .cursor_needs_physical = 1,
};
static const struct intel_device_info intel_i945gm_info = {
- .is_i945gm = 1, .is_i9xx = 1, .is_mobile = 1,
+ .gen = 3, .is_i945gm = 1, .is_i9xx = 1, .is_mobile = 1,
.has_hotplug = 1, .cursor_needs_physical = 1,
};
static const struct intel_device_info intel_i965g_info = {
- .is_broadwater = 1, .is_i965g = 1, .is_i9xx = 1, .has_hotplug = 1,
+ .gen = 4, .is_broadwater = 1, .is_i965g = 1, .is_i9xx = 1,
+ .has_hotplug = 1,
};
static const struct intel_device_info intel_i965gm_info = {
- .is_crestline = 1, .is_i965g = 1, .is_i965gm = 1, .is_i9xx = 1,
- .is_mobile = 1, .has_fbc = 1, .has_rc6 = 1,
- .has_hotplug = 1,
+ .gen = 4, .is_crestline = 1, .is_i965g = 1, .is_i965gm = 1, .is_i9xx = 1,
+ .is_mobile = 1, .has_fbc = 1, .has_rc6 = 1, .has_hotplug = 1,
};
static const struct intel_device_info intel_g33_info = {
- .is_g33 = 1, .is_i9xx = 1, .need_gfx_hws = 1,
- .has_hotplug = 1,
+ .gen = 3, .is_g33 = 1, .is_i9xx = 1,
+ .need_gfx_hws = 1, .has_hotplug = 1,
};
static const struct intel_device_info intel_g45_info = {
- .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1, .need_gfx_hws = 1,
- .has_pipe_cxsr = 1,
- .has_hotplug = 1,
+ .gen = 4, .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1, .need_gfx_hws = 1,
+ .has_pipe_cxsr = 1, .has_hotplug = 1,
};
static const struct intel_device_info intel_gm45_info = {
- .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1,
+ .gen = 4, .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1,
.is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1,
- .has_pipe_cxsr = 1,
- .has_hotplug = 1,
+ .has_pipe_cxsr = 1, .has_hotplug = 1,
};
static const struct intel_device_info intel_pineview_info = {
- .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1,
- .need_gfx_hws = 1,
- .has_hotplug = 1,
+ .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1,
+ .need_gfx_hws = 1, .has_hotplug = 1,
};
static const struct intel_device_info intel_ironlake_d_info = {
- .is_ironlake = 1, .is_i965g = 1, .is_i9xx = 1, .need_gfx_hws = 1,
- .has_pipe_cxsr = 1,
- .has_hotplug = 1,
+ .gen = 5, .is_ironlake = 1, .is_i965g = 1, .is_i9xx = 1,
+ .need_gfx_hws = 1, .has_pipe_cxsr = 1, .has_hotplug = 1,
};
static const struct intel_device_info intel_ironlake_m_info = {
- .is_ironlake = 1, .is_mobile = 1, .is_i965g = 1, .is_i9xx = 1,
- .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1,
- .has_hotplug = 1,
+ .gen = 5, .is_ironlake = 1, .is_mobile = 1, .is_i965g = 1, .is_i9xx = 1,
+ .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1, .has_hotplug = 1,
};
static const struct intel_device_info intel_sandybridge_d_info = {
- .is_i965g = 1, .is_i9xx = 1, .need_gfx_hws = 1,
- .has_hotplug = 1, .is_gen6 = 1,
+ .gen = 6, .is_i965g = 1, .is_i9xx = 1,
+ .need_gfx_hws = 1, .has_hotplug = 1,
};
static const struct intel_device_info intel_sandybridge_m_info = {
- .is_i965g = 1, .is_mobile = 1, .is_i9xx = 1, .need_gfx_hws = 1,
- .has_hotplug = 1, .is_gen6 = 1,
+ .gen = 6, .is_i965g = 1, .is_mobile = 1, .is_i9xx = 1,
+ .need_gfx_hws = 1, .has_hotplug = 1,
};
static const struct pci_device_id pciidlist[] = { /* aka */
@@ -175,13 +170,18 @@
INTEL_VGA_DEVICE(0x2e22, &intel_g45_info), /* G45_G */
INTEL_VGA_DEVICE(0x2e32, &intel_g45_info), /* G41_G */
INTEL_VGA_DEVICE(0x2e42, &intel_g45_info), /* B43_G */
+ INTEL_VGA_DEVICE(0x2e92, &intel_g45_info), /* B43_G.1 */
INTEL_VGA_DEVICE(0xa001, &intel_pineview_info),
INTEL_VGA_DEVICE(0xa011, &intel_pineview_info),
INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info),
INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info),
INTEL_VGA_DEVICE(0x0102, &intel_sandybridge_d_info),
+ INTEL_VGA_DEVICE(0x0112, &intel_sandybridge_d_info),
+ INTEL_VGA_DEVICE(0x0122, &intel_sandybridge_d_info),
INTEL_VGA_DEVICE(0x0106, &intel_sandybridge_m_info),
+ INTEL_VGA_DEVICE(0x0116, &intel_sandybridge_m_info),
INTEL_VGA_DEVICE(0x0126, &intel_sandybridge_m_info),
+ INTEL_VGA_DEVICE(0x010A, &intel_sandybridge_d_info),
{0, 0, 0}
};
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 047cd7c..af4a263 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -191,6 +191,7 @@
};
struct intel_device_info {
+ u8 gen;
u8 is_mobile : 1;
u8 is_i8xx : 1;
u8 is_i85x : 1;
@@ -206,7 +207,6 @@
u8 is_broadwater : 1;
u8 is_crestline : 1;
u8 is_ironlake : 1;
- u8 is_gen6 : 1;
u8 has_fbc : 1;
u8 has_rc6 : 1;
u8 has_pipe_cxsr : 1;
@@ -1162,7 +1162,6 @@
#define IS_845G(dev) ((dev)->pci_device == 0x2562)
#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x)
#define IS_I865G(dev) ((dev)->pci_device == 0x2572)
-#define IS_GEN2(dev) (INTEL_INFO(dev)->is_i8xx)
#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
#define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
#define IS_I945G(dev) ((dev)->pci_device == 0x2772)
@@ -1181,27 +1180,13 @@
#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046)
#define IS_IRONLAKE(dev) (INTEL_INFO(dev)->is_ironlake)
#define IS_I9XX(dev) (INTEL_INFO(dev)->is_i9xx)
-#define IS_GEN6(dev) (INTEL_INFO(dev)->is_gen6)
#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
-#define IS_GEN3(dev) (IS_I915G(dev) || \
- IS_I915GM(dev) || \
- IS_I945G(dev) || \
- IS_I945GM(dev) || \
- IS_G33(dev) || \
- IS_PINEVIEW(dev))
-#define IS_GEN4(dev) ((dev)->pci_device == 0x2972 || \
- (dev)->pci_device == 0x2982 || \
- (dev)->pci_device == 0x2992 || \
- (dev)->pci_device == 0x29A2 || \
- (dev)->pci_device == 0x2A02 || \
- (dev)->pci_device == 0x2A12 || \
- (dev)->pci_device == 0x2E02 || \
- (dev)->pci_device == 0x2E12 || \
- (dev)->pci_device == 0x2E22 || \
- (dev)->pci_device == 0x2E32 || \
- (dev)->pci_device == 0x2A42 || \
- (dev)->pci_device == 0x2E42)
+#define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2)
+#define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3)
+#define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4)
+#define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5)
+#define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6)
#define HAS_BSD(dev) (IS_IRONLAKE(dev) || IS_G4X(dev))
#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index df5a713..90b1d67 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -34,6 +34,7 @@
#include <linux/slab.h>
#include <linux/swap.h>
#include <linux/pci.h>
+#include <linux/intel-gtt.h>
static uint32_t i915_gem_get_gtt_alignment(struct drm_gem_object *obj);
static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
@@ -135,12 +136,13 @@
return -ENOMEM;
ret = drm_gem_handle_create(file_priv, obj, &handle);
+ /* drop reference from allocate - handle holds it now */
drm_gem_object_unreference_unlocked(obj);
- if (ret)
+ if (ret) {
return ret;
+ }
args->handle = handle;
-
return 0;
}
@@ -467,14 +469,17 @@
return -ENOENT;
obj_priv = to_intel_bo(obj);
- /* Bounds check source.
- *
- * XXX: This could use review for overflow issues...
- */
- if (args->offset > obj->size || args->size > obj->size ||
- args->offset + args->size > obj->size) {
- drm_gem_object_unreference_unlocked(obj);
- return -EINVAL;
+ /* Bounds check source. */
+ if (args->offset > obj->size || args->size > obj->size - args->offset) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ if (!access_ok(VERIFY_WRITE,
+ (char __user *)(uintptr_t)args->data_ptr,
+ args->size)) {
+ ret = -EFAULT;
+ goto err;
}
if (i915_gem_object_needs_bit17_swizzle(obj)) {
@@ -486,8 +491,8 @@
file_priv);
}
+err:
drm_gem_object_unreference_unlocked(obj);
-
return ret;
}
@@ -576,8 +581,6 @@
user_data = (char __user *) (uintptr_t) args->data_ptr;
remain = args->size;
- if (!access_ok(VERIFY_READ, user_data, remain))
- return -EFAULT;
mutex_lock(&dev->struct_mutex);
@@ -930,14 +933,17 @@
return -ENOENT;
obj_priv = to_intel_bo(obj);
- /* Bounds check destination.
- *
- * XXX: This could use review for overflow issues...
- */
- if (args->offset > obj->size || args->size > obj->size ||
- args->offset + args->size > obj->size) {
- drm_gem_object_unreference_unlocked(obj);
- return -EINVAL;
+ /* Bounds check destination. */
+ if (args->offset > obj->size || args->size > obj->size - args->offset) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ if (!access_ok(VERIFY_READ,
+ (char __user *)(uintptr_t)args->data_ptr,
+ args->size)) {
+ ret = -EFAULT;
+ goto err;
}
/* We can only do the GTT pwrite on untiled buffers, as otherwise
@@ -971,8 +977,8 @@
DRM_INFO("pwrite failed %d\n", ret);
#endif
+err:
drm_gem_object_unreference_unlocked(obj);
-
return ret;
}
@@ -2347,14 +2353,21 @@
reg->obj = obj;
- if (IS_GEN6(dev))
+ switch (INTEL_INFO(dev)->gen) {
+ case 6:
sandybridge_write_fence_reg(reg);
- else if (IS_I965G(dev))
+ break;
+ case 5:
+ case 4:
i965_write_fence_reg(reg);
- else if (IS_I9XX(dev))
+ break;
+ case 3:
i915_write_fence_reg(reg);
- else
+ break;
+ case 2:
i830_write_fence_reg(reg);
+ break;
+ }
trace_i915_gem_object_get_fence(obj, obj_priv->fence_reg,
obj_priv->tiling_mode);
@@ -2377,22 +2390,26 @@
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
struct drm_i915_fence_reg *reg =
&dev_priv->fence_regs[obj_priv->fence_reg];
+ uint32_t fence_reg;
- if (IS_GEN6(dev)) {
+ switch (INTEL_INFO(dev)->gen) {
+ case 6:
I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 +
(obj_priv->fence_reg * 8), 0);
- } else if (IS_I965G(dev)) {
+ break;
+ case 5:
+ case 4:
I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0);
- } else {
- uint32_t fence_reg;
-
- if (obj_priv->fence_reg < 8)
- fence_reg = FENCE_REG_830_0 + obj_priv->fence_reg * 4;
+ break;
+ case 3:
+ if (obj_priv->fence_reg >= 8)
+ fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg - 8) * 4;
else
- fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg -
- 8) * 4;
+ case 2:
+ fence_reg = FENCE_REG_830_0 + obj_priv->fence_reg * 4;
I915_WRITE(fence_reg, 0);
+ break;
}
reg->obj = NULL;
@@ -3243,6 +3260,8 @@
(int) reloc->offset,
reloc->read_domains,
reloc->write_domain);
+ drm_gem_object_unreference(target_obj);
+ i915_gem_object_unpin(obj);
return -EINVAL;
}
if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||
@@ -3585,6 +3604,7 @@
if (ret != 0) {
DRM_ERROR("copy %d cliprects failed: %d\n",
args->num_cliprects, ret);
+ ret = -EFAULT;
goto pre_mutex_err;
}
}
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 72cae3c..5c428fa 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -79,6 +79,7 @@
struct list_head *unwind)
{
list_add(&obj_priv->evict_list, unwind);
+ drm_gem_object_reference(&obj_priv->base);
return drm_mm_scan_add_block(obj_priv->gtt_space);
}
@@ -92,7 +93,7 @@
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct list_head eviction_list, unwind_list;
- struct drm_i915_gem_object *obj_priv, *tmp_obj_priv;
+ struct drm_i915_gem_object *obj_priv;
struct list_head *render_iter, *bsd_iter;
int ret = 0;
@@ -165,6 +166,7 @@
list_for_each_entry(obj_priv, &unwind_list, evict_list) {
ret = drm_mm_scan_remove_block(obj_priv->gtt_space);
BUG_ON(ret);
+ drm_gem_object_unreference(&obj_priv->base);
}
/* We expect the caller to unpin, evict all and try again, or give up.
@@ -173,36 +175,34 @@
return -ENOSPC;
found:
+ /* drm_mm doesn't allow any other other operations while
+ * scanning, therefore store to be evicted objects on a
+ * temporary list. */
INIT_LIST_HEAD(&eviction_list);
- list_for_each_entry_safe(obj_priv, tmp_obj_priv,
- &unwind_list, evict_list) {
+ while (!list_empty(&unwind_list)) {
+ obj_priv = list_first_entry(&unwind_list,
+ struct drm_i915_gem_object,
+ evict_list);
if (drm_mm_scan_remove_block(obj_priv->gtt_space)) {
- /* drm_mm doesn't allow any other other operations while
- * scanning, therefore store to be evicted objects on a
- * temporary list. */
list_move(&obj_priv->evict_list, &eviction_list);
+ continue;
}
+ list_del(&obj_priv->evict_list);
+ drm_gem_object_unreference(&obj_priv->base);
}
/* Unbinding will emit any required flushes */
- list_for_each_entry_safe(obj_priv, tmp_obj_priv,
- &eviction_list, evict_list) {
-#if WATCH_LRU
- DRM_INFO("%s: evicting %p\n", __func__, obj);
-#endif
- ret = i915_gem_object_unbind(&obj_priv->base);
- if (ret)
- return ret;
+ while (!list_empty(&eviction_list)) {
+ obj_priv = list_first_entry(&eviction_list,
+ struct drm_i915_gem_object,
+ evict_list);
+ if (ret == 0)
+ ret = i915_gem_object_unbind(&obj_priv->base);
+ list_del(&obj_priv->evict_list);
+ drm_gem_object_unreference(&obj_priv->base);
}
- /* The just created free hole should be on the top of the free stack
- * maintained by drm_mm, so this BUG_ON actually executes in O(1).
- * Furthermore all accessed data has just recently been used, so it
- * should be really fast, too. */
- BUG_ON(!drm_mm_search_free(&dev_priv->mm.gtt_space, min_size,
- alignment, 0));
-
- return 0;
+ return ret;
}
int
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 16861b8..744225e 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -887,6 +887,49 @@
queue_work(dev_priv->wq, &dev_priv->error_work);
}
+static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct drm_i915_gem_object *obj_priv;
+ struct intel_unpin_work *work;
+ unsigned long flags;
+ bool stall_detected;
+
+ /* Ignore early vblank irqs */
+ if (intel_crtc == NULL)
+ return;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ work = intel_crtc->unpin_work;
+
+ if (work == NULL || work->pending || !work->enable_stall_check) {
+ /* Either the pending flip IRQ arrived, or we're too early. Don't check */
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ return;
+ }
+
+ /* Potential stall - if we see that the flip has happened, assume a missed interrupt */
+ obj_priv = to_intel_bo(work->pending_flip_obj);
+ if(IS_I965G(dev)) {
+ int dspsurf = intel_crtc->plane == 0 ? DSPASURF : DSPBSURF;
+ stall_detected = I915_READ(dspsurf) == obj_priv->gtt_offset;
+ } else {
+ int dspaddr = intel_crtc->plane == 0 ? DSPAADDR : DSPBADDR;
+ stall_detected = I915_READ(dspaddr) == (obj_priv->gtt_offset +
+ crtc->y * crtc->fb->pitch +
+ crtc->x * crtc->fb->bits_per_pixel/8);
+ }
+
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ if (stall_detected) {
+ DRM_DEBUG_DRIVER("Pageflip stall detected\n");
+ intel_prepare_page_flip(dev, intel_crtc->plane);
+ }
+}
+
irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
{
struct drm_device *dev = (struct drm_device *) arg;
@@ -1004,15 +1047,19 @@
if (pipea_stats & vblank_status) {
vblank++;
drm_handle_vblank(dev, 0);
- if (!dev_priv->flip_pending_is_done)
+ if (!dev_priv->flip_pending_is_done) {
+ i915_pageflip_stall_check(dev, 0);
intel_finish_page_flip(dev, 0);
+ }
}
if (pipeb_stats & vblank_status) {
vblank++;
drm_handle_vblank(dev, 1);
- if (!dev_priv->flip_pending_is_done)
+ if (!dev_priv->flip_pending_is_done) {
+ i915_pageflip_stall_check(dev, 1);
intel_finish_page_flip(dev, 1);
+ }
}
if ((pipea_stats & PIPE_LEGACY_BLC_EVENT_STATUS) ||
@@ -1303,17 +1350,25 @@
i915_seqno_passed(i915_get_gem_seqno(dev,
&dev_priv->render_ring),
i915_get_tail_request(dev)->seqno)) {
+ bool missed_wakeup = false;
+
dev_priv->hangcheck_count = 0;
/* Issue a wake-up to catch stuck h/w. */
- if (dev_priv->render_ring.waiting_gem_seqno |
- dev_priv->bsd_ring.waiting_gem_seqno) {
- DRM_ERROR("Hangcheck timer elapsed... GPU idle, missed IRQ.\n");
- if (dev_priv->render_ring.waiting_gem_seqno)
- DRM_WAKEUP(&dev_priv->render_ring.irq_queue);
- if (dev_priv->bsd_ring.waiting_gem_seqno)
- DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue);
+ if (dev_priv->render_ring.waiting_gem_seqno &&
+ waitqueue_active(&dev_priv->render_ring.irq_queue)) {
+ DRM_WAKEUP(&dev_priv->render_ring.irq_queue);
+ missed_wakeup = true;
}
+
+ if (dev_priv->bsd_ring.waiting_gem_seqno &&
+ waitqueue_active(&dev_priv->bsd_ring.irq_queue)) {
+ DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue);
+ missed_wakeup = true;
+ }
+
+ if (missed_wakeup)
+ DRM_ERROR("Hangcheck timer elapsed... GPU idle, missed IRQ.\n");
return;
}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 67e3ec1..4f5e155 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -319,6 +319,7 @@
#define MI_MODE 0x0209c
# define VS_TIMER_DISPATCH (1 << 6)
+# define MI_FLUSH_ENABLE (1 << 11)
#define SCPD0 0x0209c /* 915+ only */
#define IER 0x020a0
@@ -2205,9 +2206,17 @@
#define WM1_LP_SR_EN (1<<31)
#define WM1_LP_LATENCY_SHIFT 24
#define WM1_LP_LATENCY_MASK (0x7f<<24)
+#define WM1_LP_FBC_LP1_MASK (0xf<<20)
+#define WM1_LP_FBC_LP1_SHIFT 20
#define WM1_LP_SR_MASK (0x1ff<<8)
#define WM1_LP_SR_SHIFT 8
#define WM1_LP_CURSOR_MASK (0x3f)
+#define WM2_LP_ILK 0x4510c
+#define WM2_LP_EN (1<<31)
+#define WM3_LP_ILK 0x45110
+#define WM3_LP_EN (1<<31)
+#define WM1S_LP_ILK 0x45120
+#define WM1S_LP_EN (1<<31)
/* Memory latency timer register */
#define MLTR_ILK 0x11222
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 2c6b98f..31f0858 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -789,16 +789,25 @@
dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2));
/* Fences */
- if (IS_I965G(dev)) {
+ switch (INTEL_INFO(dev)->gen) {
+ case 6:
+ for (i = 0; i < 16; i++)
+ dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
+ break;
+ case 5:
+ case 4:
for (i = 0; i < 16; i++)
dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
- } else {
- for (i = 0; i < 8; i++)
- dev_priv->saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
-
+ break;
+ case 3:
if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
for (i = 0; i < 8; i++)
dev_priv->saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
+ case 2:
+ for (i = 0; i < 8; i++)
+ dev_priv->saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
+ break;
+
}
return 0;
@@ -815,15 +824,24 @@
I915_WRITE(HWS_PGA, dev_priv->saveHWS);
/* Fences */
- if (IS_I965G(dev)) {
+ switch (INTEL_INFO(dev)->gen) {
+ case 6:
+ for (i = 0; i < 16; i++)
+ I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->saveFENCE[i]);
+ break;
+ case 5:
+ case 4:
for (i = 0; i < 16; i++)
I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->saveFENCE[i]);
- } else {
- for (i = 0; i < 8; i++)
- I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->saveFENCE[i]);
+ break;
+ case 3:
+ case 2:
if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
for (i = 0; i < 8; i++)
I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->saveFENCE[i+8]);
+ for (i = 0; i < 8; i++)
+ I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->saveFENCE[i]);
+ break;
}
i915_restore_display(dev);
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 4b77351..197d4f3 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -188,7 +188,7 @@
if (wait_for((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0,
1000, 1))
- DRM_ERROR("timed out waiting for FORCE_TRIGGER");
+ DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
if (turn_off_dac) {
I915_WRITE(PCH_ADPA, temp);
@@ -245,7 +245,7 @@
if (wait_for((I915_READ(PORT_HOTPLUG_EN) &
CRT_HOTPLUG_FORCE_DETECT) == 0,
1000, 1))
- DRM_ERROR("timed out waiting for FORCE_DETECT to go off");
+ DRM_DEBUG_KMS("timed out waiting for FORCE_DETECT to go off");
}
stat = I915_READ(PORT_HOTPLUG_STAT);
@@ -400,7 +400,8 @@
return status;
}
-static enum drm_connector_status intel_crt_detect(struct drm_connector *connector)
+static enum drm_connector_status
+intel_crt_detect(struct drm_connector *connector, bool force)
{
struct drm_device *dev = connector->dev;
struct drm_encoder *encoder = intel_attached_encoder(connector);
@@ -419,6 +420,9 @@
if (intel_crt_detect_ddc(encoder))
return connector_status_connected;
+ if (!force)
+ return connector->status;
+
/* for pre-945g platforms use load detect */
if (encoder->crtc && encoder->crtc->enabled) {
status = intel_crt_load_detect(encoder->crtc, intel_encoder);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 23157e1..9792285 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -990,15 +990,31 @@
struct drm_i915_private *dev_priv = dev->dev_private;
int pipestat_reg = (pipe == 0 ? PIPEASTAT : PIPEBSTAT);
+ /* Clear existing vblank status. Note this will clear any other
+ * sticky status fields as well.
+ *
+ * This races with i915_driver_irq_handler() with the result
+ * that either function could miss a vblank event. Here it is not
+ * fatal, as we will either wait upon the next vblank interrupt or
+ * timeout. Generally speaking intel_wait_for_vblank() is only
+ * called during modeset at which time the GPU should be idle and
+ * should *not* be performing page flips and thus not waiting on
+ * vblanks...
+ * Currently, the result of us stealing a vblank from the irq
+ * handler is that a single frame will be skipped during swapbuffers.
+ */
+ I915_WRITE(pipestat_reg,
+ I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS);
+
/* Wait for vblank interrupt bit to set */
if (wait_for((I915_READ(pipestat_reg) &
- PIPE_VBLANK_INTERRUPT_STATUS) == 0,
+ PIPE_VBLANK_INTERRUPT_STATUS),
50, 0))
DRM_DEBUG_KMS("vblank wait timed out\n");
}
-/**
- * intel_wait_for_vblank_off - wait for vblank after disabling a pipe
+/*
+ * intel_wait_for_pipe_off - wait for pipe to turn off
* @dev: drm device
* @pipe: pipe to wait for
*
@@ -1006,25 +1022,39 @@
* spinning on the vblank interrupt status bit, since we won't actually
* see an interrupt when the pipe is disabled.
*
- * So this function waits for the display line value to settle (it
- * usually ends up stopping at the start of the next frame).
+ * On Gen4 and above:
+ * wait for the pipe register state bit to turn off
+ *
+ * Otherwise:
+ * wait for the display line value to settle (it usually
+ * ends up stopping at the start of the next frame).
+ *
*/
-void intel_wait_for_vblank_off(struct drm_device *dev, int pipe)
+static void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- int pipedsl_reg = (pipe == 0 ? PIPEADSL : PIPEBDSL);
- unsigned long timeout = jiffies + msecs_to_jiffies(100);
- u32 last_line;
- /* Wait for the display line to settle */
- do {
- last_line = I915_READ(pipedsl_reg) & DSL_LINEMASK;
- mdelay(5);
- } while (((I915_READ(pipedsl_reg) & DSL_LINEMASK) != last_line) &&
- time_after(timeout, jiffies));
+ if (INTEL_INFO(dev)->gen >= 4) {
+ int pipeconf_reg = (pipe == 0 ? PIPEACONF : PIPEBCONF);
- if (time_after(jiffies, timeout))
- DRM_DEBUG_KMS("vblank wait timed out\n");
+ /* Wait for the Pipe State to go off */
+ if (wait_for((I915_READ(pipeconf_reg) & I965_PIPECONF_ACTIVE) == 0,
+ 100, 0))
+ DRM_DEBUG_KMS("pipe_off wait timed out\n");
+ } else {
+ u32 last_line;
+ int pipedsl_reg = (pipe == 0 ? PIPEADSL : PIPEBDSL);
+ unsigned long timeout = jiffies + msecs_to_jiffies(100);
+
+ /* Wait for the display line to settle */
+ do {
+ last_line = I915_READ(pipedsl_reg) & DSL_LINEMASK;
+ mdelay(5);
+ } while (((I915_READ(pipedsl_reg) & DSL_LINEMASK) != last_line) &&
+ time_after(timeout, jiffies));
+ if (time_after(jiffies, timeout))
+ DRM_DEBUG_KMS("pipe_off wait timed out\n");
+ }
}
/* Parameters have changed, update FBC info */
@@ -1486,7 +1516,7 @@
dspcntr &= ~DISPPLANE_TILED;
}
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
/* must disable */
dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
@@ -1495,20 +1525,19 @@
Start = obj_priv->gtt_offset;
Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8);
- DRM_DEBUG("Writing base %08lX %08lX %d %d\n", Start, Offset, x, y);
+ DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
+ Start, Offset, x, y, fb->pitch);
I915_WRITE(dspstride, fb->pitch);
if (IS_I965G(dev)) {
- I915_WRITE(dspbase, Offset);
- I915_READ(dspbase);
I915_WRITE(dspsurf, Start);
- I915_READ(dspsurf);
I915_WRITE(dsptileoff, (y << 16) | x);
+ I915_WRITE(dspbase, Offset);
} else {
I915_WRITE(dspbase, Start + Offset);
- I915_READ(dspbase);
}
+ POSTING_READ(dspbase);
- if ((IS_I965G(dev) || plane == 0))
+ if (IS_I965G(dev) || plane == 0)
intel_update_fbc(crtc, &crtc->mode);
intel_wait_for_vblank(dev, intel_crtc->pipe);
@@ -1522,7 +1551,6 @@
struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_framebuffer *intel_fb;
@@ -1530,13 +1558,6 @@
struct drm_gem_object *obj;
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
- unsigned long Start, Offset;
- int dspbase = (plane == 0 ? DSPAADDR : DSPBADDR);
- int dspsurf = (plane == 0 ? DSPASURF : DSPBSURF);
- int dspstride = (plane == 0) ? DSPASTRIDE : DSPBSTRIDE;
- int dsptileoff = (plane == 0 ? DSPATILEOFF : DSPBTILEOFF);
- int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR;
- u32 dspcntr;
int ret;
/* no fb bound */
@@ -1572,71 +1593,18 @@
return ret;
}
- dspcntr = I915_READ(dspcntr_reg);
- /* Mask out pixel format bits in case we change it */
- dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
- switch (crtc->fb->bits_per_pixel) {
- case 8:
- dspcntr |= DISPPLANE_8BPP;
- break;
- case 16:
- if (crtc->fb->depth == 15)
- dspcntr |= DISPPLANE_15_16BPP;
- else
- dspcntr |= DISPPLANE_16BPP;
- break;
- case 24:
- case 32:
- if (crtc->fb->depth == 30)
- dspcntr |= DISPPLANE_32BPP_30BIT_NO_ALPHA;
- else
- dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
- break;
- default:
- DRM_ERROR("Unknown color depth\n");
+ ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y);
+ if (ret) {
i915_gem_object_unpin(obj);
mutex_unlock(&dev->struct_mutex);
- return -EINVAL;
+ return ret;
}
- if (IS_I965G(dev)) {
- if (obj_priv->tiling_mode != I915_TILING_NONE)
- dspcntr |= DISPPLANE_TILED;
- else
- dspcntr &= ~DISPPLANE_TILED;
- }
-
- if (HAS_PCH_SPLIT(dev))
- /* must disable */
- dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
-
- I915_WRITE(dspcntr_reg, dspcntr);
-
- Start = obj_priv->gtt_offset;
- Offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8);
-
- DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
- Start, Offset, x, y, crtc->fb->pitch);
- I915_WRITE(dspstride, crtc->fb->pitch);
- if (IS_I965G(dev)) {
- I915_WRITE(dspsurf, Start);
- I915_WRITE(dsptileoff, (y << 16) | x);
- I915_WRITE(dspbase, Offset);
- } else {
- I915_WRITE(dspbase, Start + Offset);
- }
- POSTING_READ(dspbase);
-
- if ((IS_I965G(dev) || plane == 0))
- intel_update_fbc(crtc, &crtc->mode);
-
- intel_wait_for_vblank(dev, pipe);
if (old_fb) {
intel_fb = to_intel_framebuffer(old_fb);
obj_priv = to_intel_bo(intel_fb->obj);
i915_gem_object_unpin(intel_fb->obj);
}
- intel_increase_pllclock(crtc, true);
mutex_unlock(&dev->struct_mutex);
@@ -1911,9 +1879,6 @@
int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL;
int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL;
int transconf_reg = (pipe == 0) ? TRANSACONF : TRANSBCONF;
- int pf_ctl_reg = (pipe == 0) ? PFA_CTL_1 : PFB_CTL_1;
- int pf_win_size = (pipe == 0) ? PFA_WIN_SZ : PFB_WIN_SZ;
- int pf_win_pos = (pipe == 0) ? PFA_WIN_POS : PFB_WIN_POS;
int cpu_htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
int cpu_hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
int cpu_hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
@@ -1982,15 +1947,19 @@
}
/* Enable panel fitting for LVDS */
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)
- || HAS_eDP || intel_pch_has_edp(crtc)) {
- if (dev_priv->pch_pf_size) {
- temp = I915_READ(pf_ctl_reg);
- I915_WRITE(pf_ctl_reg, temp | PF_ENABLE | PF_FILTER_MED_3x3);
- I915_WRITE(pf_win_pos, dev_priv->pch_pf_pos);
- I915_WRITE(pf_win_size, dev_priv->pch_pf_size);
- } else
- I915_WRITE(pf_ctl_reg, temp & ~PF_ENABLE);
+ if (dev_priv->pch_pf_size &&
+ (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)
+ || HAS_eDP || intel_pch_has_edp(crtc))) {
+ /* Force use of hard-coded filter coefficients
+ * as some pre-programmed values are broken,
+ * e.g. x201.
+ */
+ I915_WRITE(pipe ? PFB_CTL_1 : PFA_CTL_1,
+ PF_ENABLE | PF_FILTER_MED_3x3);
+ I915_WRITE(pipe ? PFB_WIN_POS : PFA_WIN_POS,
+ dev_priv->pch_pf_pos);
+ I915_WRITE(pipe ? PFB_WIN_SZ : PFA_WIN_SZ,
+ dev_priv->pch_pf_size);
}
/* Enable CPU pipe */
@@ -2115,7 +2084,7 @@
I915_WRITE(transconf_reg, temp | TRANS_ENABLE);
I915_READ(transconf_reg);
- if (wait_for(I915_READ(transconf_reg) & TRANS_STATE_ENABLE, 10, 0))
+ if (wait_for(I915_READ(transconf_reg) & TRANS_STATE_ENABLE, 100, 1))
DRM_ERROR("failed to enable transcoder\n");
}
@@ -2155,14 +2124,8 @@
udelay(100);
/* Disable PF */
- temp = I915_READ(pf_ctl_reg);
- if ((temp & PF_ENABLE) != 0) {
- I915_WRITE(pf_ctl_reg, temp & ~PF_ENABLE);
- I915_READ(pf_ctl_reg);
- }
- I915_WRITE(pf_win_size, 0);
- POSTING_READ(pf_win_size);
-
+ I915_WRITE(pipe ? PFB_CTL_1 : PFA_CTL_1, 0);
+ I915_WRITE(pipe ? PFB_WIN_SZ : PFA_WIN_SZ, 0);
/* disable CPU FDI tx and PCH FDI rx */
temp = I915_READ(fdi_tx_reg);
@@ -2379,13 +2342,13 @@
I915_READ(dspbase_reg);
}
- /* Wait for vblank for the disable to take effect */
- intel_wait_for_vblank_off(dev, pipe);
-
/* Don't disable pipe A or pipe A PLLs if needed */
if (pipeconf_reg == PIPEACONF &&
- (dev_priv->quirks & QUIRK_PIPEA_FORCE))
+ (dev_priv->quirks & QUIRK_PIPEA_FORCE)) {
+ /* Wait for vblank for the disable to take effect */
+ intel_wait_for_vblank(dev, pipe);
goto skip_pipe_off;
+ }
/* Next, disable display pipes */
temp = I915_READ(pipeconf_reg);
@@ -2394,8 +2357,8 @@
I915_READ(pipeconf_reg);
}
- /* Wait for vblank for the disable to take effect. */
- intel_wait_for_vblank_off(dev, pipe);
+ /* Wait for the pipe to turn off */
+ intel_wait_for_pipe_off(dev, pipe);
temp = I915_READ(dpll_reg);
if ((temp & DPLL_VCO_ENABLE) != 0) {
@@ -2421,6 +2384,9 @@
int pipe = intel_crtc->pipe;
bool enabled;
+ if (intel_crtc->dpms_mode == mode)
+ return;
+
intel_crtc->dpms_mode = mode;
intel_crtc->cursor_on = mode == DRM_MODE_DPMS_ON;
@@ -2511,11 +2477,19 @@
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = crtc->dev;
+
if (HAS_PCH_SPLIT(dev)) {
/* FDI link clock is fixed at 2.7G */
if (mode->clock * 3 > IRONLAKE_FDI_FREQ * 4)
return false;
}
+
+ /* XXX some encoders set the crtcinfo, others don't.
+ * Obviously we need some form of conflict resolution here...
+ */
+ if (adjusted_mode->crtc_htotal == 0)
+ drm_mode_set_crtcinfo(adjusted_mode, 0);
+
return true;
}
@@ -2815,14 +2789,8 @@
/* Don't promote wm_size to unsigned... */
if (wm_size > (long)wm->max_wm)
wm_size = wm->max_wm;
- if (wm_size <= 0) {
+ if (wm_size <= 0)
wm_size = wm->default_wm;
- DRM_ERROR("Insufficient FIFO for plane, expect flickering:"
- " entries required = %ld, available = %lu.\n",
- entries_required + wm->guard_size,
- wm->fifo_size);
- }
-
return wm_size;
}
@@ -3436,8 +3404,7 @@
reg_value = I915_READ(WM1_LP_ILK);
reg_value &= ~(WM1_LP_LATENCY_MASK | WM1_LP_SR_MASK |
WM1_LP_CURSOR_MASK);
- reg_value |= WM1_LP_SR_EN |
- (ilk_sr_latency << WM1_LP_LATENCY_SHIFT) |
+ reg_value |= (ilk_sr_latency << WM1_LP_LATENCY_SHIFT) |
(sr_wm << WM1_LP_SR_SHIFT) | cursor_wm;
I915_WRITE(WM1_LP_ILK, reg_value);
@@ -3554,10 +3521,9 @@
u32 dpll = 0, fp = 0, fp2 = 0, dspcntr, pipeconf;
bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false;
bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
- bool is_edp = false;
+ struct intel_encoder *has_edp_encoder = NULL;
struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_encoder *encoder;
- struct intel_encoder *intel_encoder = NULL;
const intel_limit_t *limit;
int ret;
struct fdi_m_n m_n = {0};
@@ -3578,12 +3544,12 @@
drm_vblank_pre_modeset(dev, pipe);
list_for_each_entry(encoder, &mode_config->encoder_list, head) {
+ struct intel_encoder *intel_encoder;
- if (!encoder || encoder->crtc != crtc)
+ if (encoder->crtc != crtc)
continue;
intel_encoder = enc_to_intel_encoder(encoder);
-
switch (intel_encoder->type) {
case INTEL_OUTPUT_LVDS:
is_lvds = true;
@@ -3607,7 +3573,7 @@
is_dp = true;
break;
case INTEL_OUTPUT_EDP:
- is_edp = true;
+ has_edp_encoder = intel_encoder;
break;
}
@@ -3685,10 +3651,10 @@
int lane = 0, link_bw, bpp;
/* eDP doesn't require FDI link, so just set DP M/N
according to current link config */
- if (is_edp) {
+ if (has_edp_encoder) {
target_clock = mode->clock;
- intel_edp_link_config(intel_encoder,
- &lane, &link_bw);
+ intel_edp_link_config(has_edp_encoder,
+ &lane, &link_bw);
} else {
/* DP over FDI requires target mode clock
instead of link clock */
@@ -3709,7 +3675,7 @@
temp |= PIPE_8BPC;
else
temp |= PIPE_6BPC;
- } else if (is_edp || (is_dp && intel_pch_has_edp(crtc))) {
+ } else if (has_edp_encoder || (is_dp && intel_pch_has_edp(crtc))) {
switch (dev_priv->edp_bpp/3) {
case 8:
temp |= PIPE_8BPC;
@@ -3782,7 +3748,7 @@
udelay(200);
- if (is_edp) {
+ if (has_edp_encoder) {
if (dev_priv->lvds_use_ssc) {
temp |= DREF_SSC1_ENABLE;
I915_WRITE(PCH_DREF_CONTROL, temp);
@@ -3931,7 +3897,7 @@
dpll_reg = pch_dpll_reg;
}
- if (!is_edp) {
+ if (!has_edp_encoder) {
I915_WRITE(fp_reg, fp);
I915_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE);
I915_READ(dpll_reg);
@@ -4026,7 +3992,7 @@
}
}
- if (!is_edp) {
+ if (!has_edp_encoder) {
I915_WRITE(fp_reg, fp);
I915_WRITE(dpll_reg, dpll);
I915_READ(dpll_reg);
@@ -4105,7 +4071,7 @@
I915_WRITE(link_m1_reg, m_n.link_m);
I915_WRITE(link_n1_reg, m_n.link_n);
- if (is_edp) {
+ if (has_edp_encoder) {
ironlake_set_pll_edp(crtc, adjusted_mode->clock);
} else {
/* enable FDI RX PLL too */
@@ -4911,15 +4877,6 @@
kfree(intel_crtc);
}
-struct intel_unpin_work {
- struct work_struct work;
- struct drm_device *dev;
- struct drm_gem_object *old_fb_obj;
- struct drm_gem_object *pending_flip_obj;
- struct drm_pending_vblank_event *event;
- int pending;
-};
-
static void intel_unpin_work_fn(struct work_struct *__work)
{
struct intel_unpin_work *work =
@@ -5007,7 +4964,8 @@
spin_lock_irqsave(&dev->event_lock, flags);
if (intel_crtc->unpin_work) {
- intel_crtc->unpin_work->pending = 1;
+ if ((++intel_crtc->unpin_work->pending) > 1)
+ DRM_ERROR("Prepared flip multiple times\n");
} else {
DRM_DEBUG_DRIVER("preparing flip with no unpin work?\n");
}
@@ -5026,9 +4984,9 @@
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_unpin_work *work;
unsigned long flags, offset;
- int pipesrc_reg = (intel_crtc->pipe == 0) ? PIPEASRC : PIPEBSRC;
- int ret, pipesrc;
- u32 flip_mask;
+ int pipe = intel_crtc->pipe;
+ u32 pf, pipesrc;
+ int ret;
work = kzalloc(sizeof *work, GFP_KERNEL);
if (work == NULL)
@@ -5077,42 +5035,73 @@
atomic_inc(&obj_priv->pending_flip);
work->pending_flip_obj = obj;
- if (intel_crtc->plane)
- flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
- else
- flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
-
if (IS_GEN3(dev) || IS_GEN2(dev)) {
+ u32 flip_mask;
+
+ if (intel_crtc->plane)
+ flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
+ else
+ flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
+
BEGIN_LP_RING(2);
OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
OUT_RING(0);
ADVANCE_LP_RING();
}
+ work->enable_stall_check = true;
+
/* Offset into the new buffer for cases of shared fbs between CRTCs */
- offset = obj_priv->gtt_offset;
- offset += (crtc->y * fb->pitch) + (crtc->x * (fb->bits_per_pixel) / 8);
+ offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8;
BEGIN_LP_RING(4);
- if (IS_I965G(dev)) {
+ switch(INTEL_INFO(dev)->gen) {
+ case 2:
OUT_RING(MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
OUT_RING(fb->pitch);
- OUT_RING(offset | obj_priv->tiling_mode);
- pipesrc = I915_READ(pipesrc_reg);
- OUT_RING(pipesrc & 0x0fff0fff);
- } else if (IS_GEN3(dev)) {
+ OUT_RING(obj_priv->gtt_offset + offset);
+ OUT_RING(MI_NOOP);
+ break;
+
+ case 3:
OUT_RING(MI_DISPLAY_FLIP_I915 |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
OUT_RING(fb->pitch);
- OUT_RING(offset);
+ OUT_RING(obj_priv->gtt_offset + offset);
OUT_RING(MI_NOOP);
- } else {
+ break;
+
+ case 4:
+ case 5:
+ /* i965+ uses the linear or tiled offsets from the
+ * Display Registers (which do not change across a page-flip)
+ * so we need only reprogram the base address.
+ */
OUT_RING(MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
OUT_RING(fb->pitch);
- OUT_RING(offset);
- OUT_RING(MI_NOOP);
+ OUT_RING(obj_priv->gtt_offset | obj_priv->tiling_mode);
+
+ /* XXX Enabling the panel-fitter across page-flip is so far
+ * untested on non-native modes, so ignore it for now.
+ * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
+ */
+ pf = 0;
+ pipesrc = I915_READ(pipe == 0 ? PIPEASRC : PIPEBSRC) & 0x0fff0fff;
+ OUT_RING(pf | pipesrc);
+ break;
+
+ case 6:
+ OUT_RING(MI_DISPLAY_FLIP |
+ MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
+ OUT_RING(fb->pitch | obj_priv->tiling_mode);
+ OUT_RING(obj_priv->gtt_offset);
+
+ pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
+ pipesrc = I915_READ(pipe == 0 ? PIPEASRC : PIPEBSRC) & 0x0fff0fff;
+ OUT_RING(pf | pipesrc);
+ break;
}
ADVANCE_LP_RING();
@@ -5193,7 +5182,7 @@
dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
intel_crtc->cursor_addr = 0;
- intel_crtc->dpms_mode = DRM_MODE_DPMS_OFF;
+ intel_crtc->dpms_mode = -1;
drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
intel_crtc->busy = false;
@@ -5701,6 +5690,9 @@
I915_WRITE(DISP_ARB_CTL,
(I915_READ(DISP_ARB_CTL) |
DISP_FBC_WM_DIS));
+ I915_WRITE(WM3_LP_ILK, 0);
+ I915_WRITE(WM2_LP_ILK, 0);
+ I915_WRITE(WM1_LP_ILK, 0);
}
/*
* Based on the document from hardware guys the following bits
@@ -5722,8 +5714,7 @@
ILK_DPFC_DIS2 |
ILK_CLK_FBC);
}
- if (IS_GEN6(dev))
- return;
+ return;
} else if (IS_G4X(dev)) {
uint32_t dspclk_gate;
I915_WRITE(RENCLK_GATE_D1, 0);
@@ -5784,11 +5775,9 @@
OUT_RING(MI_FLUSH);
ADVANCE_LP_RING();
}
- } else {
+ } else
DRM_DEBUG_KMS("Failed to allocate render context."
- "Disable RC6\n");
- return;
- }
+ "Disable RC6\n");
}
if (I915_HAS_RC6(dev) && drm_core_check_feature(dev, DRIVER_MODESET)) {
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 9caccd0..9ab8708 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -239,7 +239,6 @@
uint32_t ch_data = ch_ctl + 4;
int i;
int recv_bytes;
- uint32_t ctl;
uint32_t status;
uint32_t aux_clock_divider;
int try, precharge;
@@ -263,41 +262,43 @@
else
precharge = 5;
+ if (I915_READ(ch_ctl) & DP_AUX_CH_CTL_SEND_BUSY) {
+ DRM_ERROR("dp_aux_ch not started status 0x%08x\n",
+ I915_READ(ch_ctl));
+ return -EBUSY;
+ }
+
/* Must try at least 3 times according to DP spec */
for (try = 0; try < 5; try++) {
/* Load the send data into the aux channel data registers */
- for (i = 0; i < send_bytes; i += 4) {
- uint32_t d = pack_aux(send + i, send_bytes - i);
-
- I915_WRITE(ch_data + i, d);
- }
-
- ctl = (DP_AUX_CH_CTL_SEND_BUSY |
- DP_AUX_CH_CTL_TIME_OUT_400us |
- (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
- (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
- (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) |
- DP_AUX_CH_CTL_DONE |
- DP_AUX_CH_CTL_TIME_OUT_ERROR |
- DP_AUX_CH_CTL_RECEIVE_ERROR);
+ for (i = 0; i < send_bytes; i += 4)
+ I915_WRITE(ch_data + i,
+ pack_aux(send + i, send_bytes - i));
/* Send the command and wait for it to complete */
- I915_WRITE(ch_ctl, ctl);
- (void) I915_READ(ch_ctl);
+ I915_WRITE(ch_ctl,
+ DP_AUX_CH_CTL_SEND_BUSY |
+ DP_AUX_CH_CTL_TIME_OUT_400us |
+ (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
+ (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
+ (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) |
+ DP_AUX_CH_CTL_DONE |
+ DP_AUX_CH_CTL_TIME_OUT_ERROR |
+ DP_AUX_CH_CTL_RECEIVE_ERROR);
for (;;) {
- udelay(100);
status = I915_READ(ch_ctl);
if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
break;
+ udelay(100);
}
/* Clear done status and any errors */
- I915_WRITE(ch_ctl, (status |
- DP_AUX_CH_CTL_DONE |
- DP_AUX_CH_CTL_TIME_OUT_ERROR |
- DP_AUX_CH_CTL_RECEIVE_ERROR));
- (void) I915_READ(ch_ctl);
- if ((status & DP_AUX_CH_CTL_TIME_OUT_ERROR) == 0)
+ I915_WRITE(ch_ctl,
+ status |
+ DP_AUX_CH_CTL_DONE |
+ DP_AUX_CH_CTL_TIME_OUT_ERROR |
+ DP_AUX_CH_CTL_RECEIVE_ERROR);
+ if (status & DP_AUX_CH_CTL_DONE)
break;
}
@@ -324,15 +325,12 @@
/* Unload any bytes sent back from the other side */
recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
-
if (recv_bytes > recv_size)
recv_bytes = recv_size;
- for (i = 0; i < recv_bytes; i += 4) {
- uint32_t d = I915_READ(ch_data + i);
-
- unpack_aux(d, recv + i, recv_bytes - i);
- }
+ for (i = 0; i < recv_bytes; i += 4)
+ unpack_aux(I915_READ(ch_data + i),
+ recv + i, recv_bytes - i);
return recv_bytes;
}
@@ -1140,18 +1138,14 @@
intel_dp_set_link_train(struct intel_dp *intel_dp,
uint32_t dp_reg_value,
uint8_t dp_train_pat,
- uint8_t train_set[4],
- bool first)
+ uint8_t train_set[4])
{
struct drm_device *dev = intel_dp->base.enc.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.enc.crtc);
int ret;
I915_WRITE(intel_dp->output_reg, dp_reg_value);
POSTING_READ(intel_dp->output_reg);
- if (first)
- intel_wait_for_vblank(dev, intel_crtc->pipe);
intel_dp_aux_native_write_1(intel_dp,
DP_TRAINING_PATTERN_SET,
@@ -1176,10 +1170,15 @@
uint8_t voltage;
bool clock_recovery = false;
bool channel_eq = false;
- bool first = true;
int tries;
u32 reg;
uint32_t DP = intel_dp->DP;
+ struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.enc.crtc);
+
+ /* Enable output, wait for it to become active */
+ I915_WRITE(intel_dp->output_reg, intel_dp->DP);
+ POSTING_READ(intel_dp->output_reg);
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
/* Write the link configuration data */
intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET,
@@ -1212,9 +1211,8 @@
reg = DP | DP_LINK_TRAIN_PAT_1;
if (!intel_dp_set_link_train(intel_dp, reg,
- DP_TRAINING_PATTERN_1, train_set, first))
+ DP_TRAINING_PATTERN_1, train_set))
break;
- first = false;
/* Set training pattern 1 */
udelay(100);
@@ -1268,8 +1266,7 @@
/* channel eq pattern */
if (!intel_dp_set_link_train(intel_dp, reg,
- DP_TRAINING_PATTERN_2, train_set,
- false))
+ DP_TRAINING_PATTERN_2, train_set))
break;
udelay(400);
@@ -1388,7 +1385,7 @@
* \return false if DP port is disconnected.
*/
static enum drm_connector_status
-intel_dp_detect(struct drm_connector *connector)
+intel_dp_detect(struct drm_connector *connector, bool force)
{
struct drm_encoder *encoder = intel_attached_encoder(connector);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 0e92aa0..8828b3a 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -176,6 +176,16 @@
#define enc_to_intel_encoder(x) container_of(x, struct intel_encoder, enc)
#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
+struct intel_unpin_work {
+ struct work_struct work;
+ struct drm_device *dev;
+ struct drm_gem_object *old_fb_obj;
+ struct drm_gem_object *pending_flip_obj;
+ struct drm_pending_vblank_event *event;
+ int pending;
+ bool enable_stall_check;
+};
+
struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg,
const char *name);
void intel_i2c_destroy(struct i2c_adapter *adapter);
@@ -219,7 +229,6 @@
struct drm_crtc *crtc);
int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-extern void intel_wait_for_vblank_off(struct drm_device *dev, int pipe);
extern void intel_wait_for_vblank(struct drm_device *dev, int pipe);
extern struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe);
extern struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index a399f4b..7c9ec14 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -221,7 +221,8 @@
*
* Unimplemented.
*/
-static enum drm_connector_status intel_dvo_detect(struct drm_connector *connector)
+static enum drm_connector_status
+intel_dvo_detect(struct drm_connector *connector, bool force)
{
struct drm_encoder *encoder = intel_attached_encoder(connector);
struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index 7bdc962..b61966c 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -237,8 +237,10 @@
drm_fb_helper_fini(&ifbdev->helper);
drm_framebuffer_cleanup(&ifb->base);
- if (ifb->obj)
+ if (ifb->obj) {
drm_gem_object_unreference(ifb->obj);
+ ifb->obj = NULL;
+ }
return 0;
}
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index ccd4c97..926934a 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -139,7 +139,7 @@
}
static enum drm_connector_status
-intel_hdmi_detect(struct drm_connector *connector)
+intel_hdmi_detect(struct drm_connector *connector, bool force)
{
struct drm_encoder *encoder = intel_attached_encoder(connector);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index b819c10..6ec39a8 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -445,7 +445,8 @@
* connected and closed means disconnected. We also send hotplug events as
* needed, using lid status notification from the input layer.
*/
-static enum drm_connector_status intel_lvds_detect(struct drm_connector *connector)
+static enum drm_connector_status
+intel_lvds_detect(struct drm_connector *connector, bool force)
{
struct drm_device *dev = connector->dev;
enum drm_connector_status status = connector_status_connected;
@@ -540,7 +541,9 @@
* the LID nofication event.
*/
if (connector)
- connector->status = connector->funcs->detect(connector);
+ connector->status = connector->funcs->detect(connector,
+ false);
+
/* Don't force modeset on machines where it causes a GPU lockup */
if (dmi_check_system(intel_no_modeset_on_lid))
return NOTIFY_OK;
@@ -875,8 +878,6 @@
intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT);
intel_encoder->crtc_mask = (1 << 1);
- if (IS_I965G(dev))
- intel_encoder->crtc_mask |= (1 << 0);
drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs);
drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs);
connector->display_info.subpixel_order = SubPixelHorizontalRGB;
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 4f00390..1d306a4 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -25,6 +25,8 @@
*
* Derived from Xorg ddx, xf86-video-intel, src/i830_video.c
*/
+
+#include <linux/seq_file.h>
#include "drmP.h"
#include "drm.h"
#include "i915_drm.h"
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 51e9c9e7..cb3508f 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -220,9 +220,13 @@
{
drm_i915_private_t *dev_priv = dev->dev_private;
int ret = init_ring_common(dev, ring);
+ int mode;
+
if (IS_I9XX(dev) && !IS_GEN3(dev)) {
- I915_WRITE(MI_MODE,
- (VS_TIMER_DISPATCH) << 16 | VS_TIMER_DISPATCH);
+ mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
+ if (IS_GEN6(dev))
+ mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE;
+ I915_WRITE(MI_MODE, mode);
}
return ret;
}
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 093e914..ee73e42 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -1061,8 +1061,9 @@
if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo, mode))
return false;
- if (!intel_sdvo_set_input_timings_for_mode(intel_sdvo, mode, adjusted_mode))
- return false;
+ (void) intel_sdvo_set_input_timings_for_mode(intel_sdvo,
+ mode,
+ adjusted_mode);
} else if (intel_sdvo->is_lvds) {
drm_mode_set_crtcinfo(intel_sdvo->sdvo_lvds_fixed_mode, 0);
@@ -1070,8 +1071,9 @@
intel_sdvo->sdvo_lvds_fixed_mode))
return false;
- if (!intel_sdvo_set_input_timings_for_mode(intel_sdvo, mode, adjusted_mode))
- return false;
+ (void) intel_sdvo_set_input_timings_for_mode(intel_sdvo,
+ mode,
+ adjusted_mode);
}
/* Make the CRTC code factor in the SDVO pixel multiplier. The
@@ -1108,10 +1110,9 @@
in_out.in0 = intel_sdvo->attached_output;
in_out.in1 = 0;
- if (!intel_sdvo_set_value(intel_sdvo,
- SDVO_CMD_SET_IN_OUT_MAP,
- &in_out, sizeof(in_out)))
- return;
+ intel_sdvo_set_value(intel_sdvo,
+ SDVO_CMD_SET_IN_OUT_MAP,
+ &in_out, sizeof(in_out));
if (intel_sdvo->is_hdmi) {
if (!intel_sdvo_set_avi_infoframe(intel_sdvo, mode))
@@ -1122,11 +1123,9 @@
/* We have tried to get input timing in mode_fixup, and filled into
adjusted_mode */
- if (intel_sdvo->is_tv || intel_sdvo->is_lvds) {
- intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
+ intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
+ if (intel_sdvo->is_tv || intel_sdvo->is_lvds)
input_dtd.part2.sdvo_flags = intel_sdvo->sdvo_flags;
- } else
- intel_sdvo_get_dtd_from_mode(&input_dtd, mode);
/* If it's a TV, we already set the output timing in mode_fixup.
* Otherwise, the output timing is equal to the input timing.
@@ -1137,8 +1136,7 @@
intel_sdvo->attached_output))
return;
- if (!intel_sdvo_set_output_timing(intel_sdvo, &input_dtd))
- return;
+ (void) intel_sdvo_set_output_timing(intel_sdvo, &input_dtd);
}
/* Set the input timing to the screen. Assume always input 0. */
@@ -1165,8 +1163,7 @@
intel_sdvo_set_input_timing(encoder, &input_dtd);
}
#else
- if (!intel_sdvo_set_input_timing(intel_sdvo, &input_dtd))
- return;
+ (void) intel_sdvo_set_input_timing(intel_sdvo, &input_dtd);
#endif
sdvo_pixel_multiply = intel_sdvo_get_pixel_multiplier(mode);
@@ -1420,7 +1417,7 @@
if (!analog_connector)
return false;
- if (analog_connector->funcs->detect(analog_connector) ==
+ if (analog_connector->funcs->detect(analog_connector, false) ==
connector_status_disconnected)
return false;
@@ -1489,7 +1486,8 @@
return status;
}
-static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connector)
+static enum drm_connector_status
+intel_sdvo_detect(struct drm_connector *connector, bool force)
{
uint16_t response;
struct drm_encoder *encoder = intel_attached_encoder(connector);
@@ -1932,6 +1930,41 @@
.destroy = intel_sdvo_enc_destroy,
};
+static void
+intel_sdvo_guess_ddc_bus(struct intel_sdvo *sdvo)
+{
+ uint16_t mask = 0;
+ unsigned int num_bits;
+
+ /* Make a mask of outputs less than or equal to our own priority in the
+ * list.
+ */
+ switch (sdvo->controlled_output) {
+ case SDVO_OUTPUT_LVDS1:
+ mask |= SDVO_OUTPUT_LVDS1;
+ case SDVO_OUTPUT_LVDS0:
+ mask |= SDVO_OUTPUT_LVDS0;
+ case SDVO_OUTPUT_TMDS1:
+ mask |= SDVO_OUTPUT_TMDS1;
+ case SDVO_OUTPUT_TMDS0:
+ mask |= SDVO_OUTPUT_TMDS0;
+ case SDVO_OUTPUT_RGB1:
+ mask |= SDVO_OUTPUT_RGB1;
+ case SDVO_OUTPUT_RGB0:
+ mask |= SDVO_OUTPUT_RGB0;
+ break;
+ }
+
+ /* Count bits to find what number we are in the priority list. */
+ mask &= sdvo->caps.output_flags;
+ num_bits = hweight16(mask);
+ /* If more than 3 outputs, default to DDC bus 3 for now. */
+ if (num_bits > 3)
+ num_bits = 3;
+
+ /* Corresponds to SDVO_CONTROL_BUS_DDCx */
+ sdvo->ddc_bus = 1 << num_bits;
+}
/**
* Choose the appropriate DDC bus for control bus switch command for this
@@ -1951,7 +1984,10 @@
else
mapping = &(dev_priv->sdvo_mappings[1]);
- sdvo->ddc_bus = 1 << ((mapping->ddc_pin & 0xf0) >> 4);
+ if (mapping->initialized)
+ sdvo->ddc_bus = 1 << ((mapping->ddc_pin & 0xf0) >> 4);
+ else
+ intel_sdvo_guess_ddc_bus(sdvo);
}
static bool
@@ -2134,8 +2170,7 @@
return true;
err:
- intel_sdvo_destroy_enhance_property(connector);
- kfree(intel_sdvo_connector);
+ intel_sdvo_destroy(connector);
return false;
}
@@ -2207,8 +2242,7 @@
return true;
err:
- intel_sdvo_destroy_enhance_property(connector);
- kfree(intel_sdvo_connector);
+ intel_sdvo_destroy(connector);
return false;
}
@@ -2486,11 +2520,10 @@
uint16_t response;
} enhancements;
- if (!intel_sdvo_get_value(intel_sdvo,
- SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS,
- &enhancements, sizeof(enhancements)))
- return false;
-
+ enhancements.response = 0;
+ intel_sdvo_get_value(intel_sdvo,
+ SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS,
+ &enhancements, sizeof(enhancements));
if (enhancements.response == 0) {
DRM_DEBUG_KMS("No enhancement is supported\n");
return true;
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index d2029ef..4a117e3 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1231,7 +1231,6 @@
struct drm_encoder *encoder = &intel_tv->base.enc;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
unsigned long irqflags;
u32 tv_ctl, save_tv_ctl;
u32 tv_dac, save_tv_dac;
@@ -1268,11 +1267,15 @@
DAC_C_0_7_V);
I915_WRITE(TV_CTL, tv_ctl);
I915_WRITE(TV_DAC, tv_dac);
- intel_wait_for_vblank(dev, intel_crtc->pipe);
+ POSTING_READ(TV_DAC);
+ msleep(20);
+
tv_dac = I915_READ(TV_DAC);
I915_WRITE(TV_DAC, save_tv_dac);
I915_WRITE(TV_CTL, save_tv_ctl);
- intel_wait_for_vblank(dev, intel_crtc->pipe);
+ POSTING_READ(TV_CTL);
+ msleep(20);
+
/*
* A B C
* 0 1 1 Composite
@@ -1338,7 +1341,7 @@
* we have a pipe programmed in order to probe the TV.
*/
static enum drm_connector_status
-intel_tv_detect(struct drm_connector *connector)
+intel_tv_detect(struct drm_connector *connector, bool force)
{
struct drm_display_mode mode;
struct drm_encoder *encoder = intel_attached_encoder(connector);
@@ -1350,7 +1353,7 @@
if (encoder->crtc && encoder->crtc->enabled) {
type = intel_tv_detect_type(intel_tv);
- } else {
+ } else if (force) {
struct drm_crtc *crtc;
int dpms_mode;
@@ -1361,10 +1364,9 @@
intel_release_load_detect_pipe(&intel_tv->base, connector,
dpms_mode);
} else
- type = -1;
- }
-
- intel_tv->type = type;
+ return connector_status_unknown;
+ } else
+ return connector->status;
if (type < 0)
return connector_status_disconnected;
diff --git a/drivers/gpu/drm/mga/mga_state.c b/drivers/gpu/drm/mga/mga_state.c
index fff8204..9ce2827f 100644
--- a/drivers/gpu/drm/mga/mga_state.c
+++ b/drivers/gpu/drm/mga/mga_state.c
@@ -1085,19 +1085,19 @@
}
struct drm_ioctl_desc mga_ioctls[] = {
- DRM_IOCTL_DEF(DRM_MGA_INIT, mga_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_MGA_FLUSH, mga_dma_flush, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_MGA_RESET, mga_dma_reset, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_MGA_SWAP, mga_dma_swap, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_MGA_CLEAR, mga_dma_clear, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_MGA_VERTEX, mga_dma_vertex, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_MGA_INDICES, mga_dma_indices, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_MGA_ILOAD, mga_dma_iload, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_MGA_BLIT, mga_dma_blit, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_MGA_GETPARAM, mga_getparam, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_MGA_SET_FENCE, mga_set_fence, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_MGA_WAIT_FENCE, mga_wait_fence, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_MGA_DMA_BOOTSTRAP, mga_dma_bootstrap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(MGA_INIT, mga_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(MGA_FLUSH, mga_dma_flush, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(MGA_RESET, mga_dma_reset, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(MGA_SWAP, mga_dma_swap, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(MGA_CLEAR, mga_dma_clear, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(MGA_VERTEX, mga_dma_vertex, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(MGA_INDICES, mga_dma_indices, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(MGA_ILOAD, mga_dma_iload, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(MGA_BLIT, mga_dma_blit, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(MGA_GETPARAM, mga_getparam, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(MGA_SET_FENCE, mga_set_fence, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(MGA_WAIT_FENCE, mga_wait_fence, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(MGA_DMA_BOOTSTRAP, mga_dma_bootstrap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
};
int mga_max_ioctl = DRM_ARRAY_SIZE(mga_ioctls);
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 0b69a96..974b0f8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -2166,7 +2166,7 @@
uint32_t val = 0;
if (off < pci_resource_len(dev->pdev, 1)) {
- uint32_t __iomem *p =
+ uint8_t __iomem *p =
io_mapping_map_atomic_wc(fb, off & PAGE_MASK, KM_USER0);
val = ioread32(p + (off & ~PAGE_MASK));
@@ -2182,7 +2182,7 @@
uint32_t off, uint32_t val)
{
if (off < pci_resource_len(dev->pdev, 1)) {
- uint32_t __iomem *p =
+ uint8_t __iomem *p =
io_mapping_map_atomic_wc(fb, off & PAGE_MASK, KM_USER0);
iowrite32(val, p + (off & ~PAGE_MASK));
@@ -3869,27 +3869,10 @@
}
#ifdef __powerpc__
/* Powerbook specific quirks */
- if ((dev->pci_device & 0xffff) == 0x0179 ||
- (dev->pci_device & 0xffff) == 0x0189 ||
- (dev->pci_device & 0xffff) == 0x0329) {
- if (script == LVDS_RESET) {
- nv_write_tmds(dev, dcbent->or, 0, 0x02, 0x72);
-
- } else if (script == LVDS_PANEL_ON) {
- bios_wr32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL,
- bios_rd32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL)
- | (1 << 31));
- bios_wr32(bios, NV_PCRTC_GPIO_EXT,
- bios_rd32(bios, NV_PCRTC_GPIO_EXT) | 1);
-
- } else if (script == LVDS_PANEL_OFF) {
- bios_wr32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL,
- bios_rd32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL)
- & ~(1 << 31));
- bios_wr32(bios, NV_PCRTC_GPIO_EXT,
- bios_rd32(bios, NV_PCRTC_GPIO_EXT) & ~3);
- }
- }
+ if (script == LVDS_RESET &&
+ (dev->pci_device == 0x0179 || dev->pci_device == 0x0189 ||
+ dev->pci_device == 0x0329))
+ nv_write_tmds(dev, dcbent->or, 0, 0x02, 0x72);
#endif
return 0;
@@ -4381,11 +4364,8 @@
*
* For the moment, a quirk will do :)
*/
- if ((dev->pdev->device == 0x01d7) &&
- (dev->pdev->subsystem_vendor == 0x1028) &&
- (dev->pdev->subsystem_device == 0x01c2)) {
+ if (nv_match_device(dev, 0x01d7, 0x1028, 0x01c2))
bios->fp.duallink_transition_clk = 80000;
- }
/* set dual_link flag for EDID case */
if (pxclk && (chip_version < 0x25 || chip_version > 0x28))
@@ -4587,7 +4567,7 @@
return 1;
}
- NV_TRACE(dev, "0x%04X: parsing output script 0\n", script);
+ NV_DEBUG_KMS(dev, "0x%04X: parsing output script 0\n", script);
nouveau_bios_run_init_table(dev, script, dcbent);
} else
if (pxclk == -1) {
@@ -4597,7 +4577,7 @@
return 1;
}
- NV_TRACE(dev, "0x%04X: parsing output script 1\n", script);
+ NV_DEBUG_KMS(dev, "0x%04X: parsing output script 1\n", script);
nouveau_bios_run_init_table(dev, script, dcbent);
} else
if (pxclk == -2) {
@@ -4610,7 +4590,7 @@
return 1;
}
- NV_TRACE(dev, "0x%04X: parsing output script 2\n", script);
+ NV_DEBUG_KMS(dev, "0x%04X: parsing output script 2\n", script);
nouveau_bios_run_init_table(dev, script, dcbent);
} else
if (pxclk > 0) {
@@ -4622,7 +4602,7 @@
return 1;
}
- NV_TRACE(dev, "0x%04X: parsing clock script 0\n", script);
+ NV_DEBUG_KMS(dev, "0x%04X: parsing clock script 0\n", script);
nouveau_bios_run_init_table(dev, script, dcbent);
} else
if (pxclk < 0) {
@@ -4634,7 +4614,7 @@
return 1;
}
- NV_TRACE(dev, "0x%04X: parsing clock script 1\n", script);
+ NV_DEBUG_KMS(dev, "0x%04X: parsing clock script 1\n", script);
nouveau_bios_run_init_table(dev, script, dcbent);
}
@@ -5357,19 +5337,17 @@
}
tmdstableptr = ROM16(bios->data[bitentry->offset]);
-
- if (tmdstableptr == 0x0) {
+ if (!tmdstableptr) {
NV_ERROR(dev, "Pointer to TMDS table invalid\n");
return -EINVAL;
}
+ NV_INFO(dev, "TMDS table version %d.%d\n",
+ bios->data[tmdstableptr] >> 4, bios->data[tmdstableptr] & 0xf);
+
/* nv50+ has v2.0, but we don't parse it atm */
- if (bios->data[tmdstableptr] != 0x11) {
- NV_WARN(dev,
- "TMDS table revision %d.%d not currently supported\n",
- bios->data[tmdstableptr] >> 4, bios->data[tmdstableptr] & 0xf);
+ if (bios->data[tmdstableptr] != 0x11)
return -ENOSYS;
- }
/*
* These two scripts are odd: they don't seem to get run even when
@@ -5809,6 +5787,20 @@
gpio->line = tvdac_gpio[1] >> 4;
gpio->invert = tvdac_gpio[0] & 2;
}
+ } else {
+ /*
+ * No systematic way to store GPIO info on pre-v2.2
+ * DCBs, try to match the PCI device IDs.
+ */
+
+ /* Apple iMac G4 NV18 */
+ if (nv_match_device(dev, 0x0189, 0x10de, 0x0010)) {
+ struct dcb_gpio_entry *gpio = new_gpio_entry(bios);
+
+ gpio->tag = DCB_GPIO_TVDAC0;
+ gpio->line = 4;
+ }
+
}
if (!gpio_table_ptr)
@@ -5884,9 +5876,7 @@
struct drm_device *dev = bios->dev;
/* Gigabyte NX85T */
- if ((dev->pdev->device == 0x0421) &&
- (dev->pdev->subsystem_vendor == 0x1458) &&
- (dev->pdev->subsystem_device == 0x344c)) {
+ if (nv_match_device(dev, 0x0421, 0x1458, 0x344c)) {
if (cte->type == DCB_CONNECTOR_HDMI_1)
cte->type = DCB_CONNECTOR_DVI_I;
}
@@ -6139,7 +6129,7 @@
entry->tmdsconf.slave_addr = (conf & 0x00000070) >> 4;
break;
- case 0xe:
+ case OUTPUT_EOL:
/* weird g80 mobile type that "nv" treats as a terminator */
dcb->entries--;
return false;
@@ -6176,23 +6166,15 @@
entry->type = OUTPUT_TV;
break;
case 2:
+ case 4:
+ if (conn & 0x10)
+ entry->type = OUTPUT_LVDS;
+ else
+ entry->type = OUTPUT_TMDS;
+ break;
case 3:
entry->type = OUTPUT_LVDS;
break;
- case 4:
- switch ((conn & 0x000000f0) >> 4) {
- case 0:
- entry->type = OUTPUT_TMDS;
- break;
- case 1:
- entry->type = OUTPUT_LVDS;
- break;
- default:
- NV_ERROR(dev, "Unknown DCB subtype 4/%d\n",
- (conn & 0x000000f0) >> 4);
- return false;
- }
- break;
default:
NV_ERROR(dev, "Unknown DCB type %d\n", conn & 0x0000000f);
return false;
@@ -6307,9 +6289,7 @@
* nasty problems until this is sorted (assuming it's not a
* VBIOS bug).
*/
- if ((dev->pdev->device == 0x040d) &&
- (dev->pdev->subsystem_vendor == 0x1028) &&
- (dev->pdev->subsystem_device == 0x019b)) {
+ if (nv_match_device(dev, 0x040d, 0x1028, 0x019b)) {
if (*conn == 0x02026312 && *conf == 0x00000020)
return false;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h
index fd14dfd..c1de2f3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.h
@@ -95,6 +95,7 @@
OUTPUT_TMDS = 2,
OUTPUT_LVDS = 3,
OUTPUT_DP = 6,
+ OUTPUT_EOL = 14, /* DCB 4.0+, appears to be end-of-list */
OUTPUT_ANY = -1
};
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 84f8518..f6f4477 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -36,6 +36,21 @@
#include <linux/log2.h>
#include <linux/slab.h>
+int
+nouveau_bo_sync_gpu(struct nouveau_bo *nvbo, struct nouveau_channel *chan)
+{
+ struct nouveau_fence *prev_fence = nvbo->bo.sync_obj;
+ int ret;
+
+ if (!prev_fence || nouveau_fence_channel(prev_fence) == chan)
+ return 0;
+
+ spin_lock(&nvbo->bo.lock);
+ ret = ttm_bo_wait(&nvbo->bo, false, false, false);
+ spin_unlock(&nvbo->bo.lock);
+ return ret;
+}
+
static void
nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
{
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c
index 90fdcda..0480f06 100644
--- a/drivers/gpu/drm/nouveau/nouveau_channel.c
+++ b/drivers/gpu/drm/nouveau/nouveau_channel.c
@@ -426,18 +426,18 @@
***********************************/
struct drm_ioctl_desc nouveau_ioctls[] = {
- DRM_IOCTL_DEF(DRM_NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_NOUVEAU_CHANNEL_FREE, nouveau_ioctl_fifo_free, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_NOUVEAU_GROBJ_ALLOC, nouveau_ioctl_grobj_alloc, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_ioctl_notifier_alloc, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_FREE, nouveau_ioctl_fifo_free, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_GROBJ_ALLOC, nouveau_ioctl_grobj_alloc, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_ioctl_notifier_alloc, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_AUTH),
};
int nouveau_max_ioctl = DRM_ARRAY_SIZE(nouveau_ioctls);
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index b1b22ba..fc73703 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -104,7 +104,7 @@
int i;
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- struct nouveau_i2c_chan *i2c;
+ struct nouveau_i2c_chan *i2c = NULL;
struct nouveau_encoder *nv_encoder;
struct drm_mode_object *obj;
int id;
@@ -117,7 +117,9 @@
if (!obj)
continue;
nv_encoder = nouveau_encoder(obj_to_encoder(obj));
- i2c = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index);
+
+ if (nv_encoder->dcb->i2c_index < 0xf)
+ i2c = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index);
if (i2c && nouveau_probe_i2c_addr(i2c, 0x50)) {
*pnv_encoder = nv_encoder;
@@ -166,7 +168,7 @@
}
static enum drm_connector_status
-nouveau_connector_detect(struct drm_connector *connector)
+nouveau_connector_detect(struct drm_connector *connector, bool force)
{
struct drm_device *dev = connector->dev;
struct nouveau_connector *nv_connector = nouveau_connector(connector);
@@ -244,7 +246,7 @@
}
static enum drm_connector_status
-nouveau_connector_detect_lvds(struct drm_connector *connector)
+nouveau_connector_detect_lvds(struct drm_connector *connector, bool force)
{
struct drm_device *dev = connector->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -265,7 +267,7 @@
/* Try retrieving EDID via DDC */
if (!dev_priv->vbios.fp_no_ddc) {
- status = nouveau_connector_detect(connector);
+ status = nouveau_connector_detect(connector, force);
if (status == connector_status_connected)
goto out;
}
@@ -556,8 +558,10 @@
if (nv_encoder->dcb->type == OUTPUT_LVDS &&
(nv_encoder->dcb->lvdsconf.use_straps_for_mode ||
dev_priv->vbios.fp_no_ddc) && nouveau_bios_fp_mode(dev, NULL)) {
- nv_connector->native_mode = drm_mode_create(dev);
- nouveau_bios_fp_mode(dev, nv_connector->native_mode);
+ struct drm_display_mode mode;
+
+ nouveau_bios_fp_mode(dev, &mode);
+ nv_connector->native_mode = drm_mode_duplicate(dev, &mode);
}
/* Find the native mode if this is a digital panel, if we didn't
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index e424bf7..b1be617 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -1165,6 +1165,7 @@
extern void nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val);
extern u32 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index);
extern void nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val);
+extern int nouveau_bo_sync_gpu(struct nouveau_bo *, struct nouveau_channel *);
/* nouveau_fence.c */
struct nouveau_fence;
@@ -1388,6 +1389,15 @@
return false;
}
+static inline bool
+nv_match_device(struct drm_device *dev, unsigned device,
+ unsigned sub_vendor, unsigned sub_device)
+{
+ return dev->pdev->device == device &&
+ dev->pdev->subsystem_vendor == sub_vendor &&
+ dev->pdev->subsystem_device == sub_device;
+}
+
#define NV_SW 0x0000506e
#define NV_SW_DMA_SEMAPHORE 0x00000060
#define NV_SW_SEMAPHORE_OFFSET 0x00000064
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 6b208ff..87ac21e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -64,16 +64,17 @@
struct nouveau_fence *fence;
uint32_t sequence;
+ spin_lock(&chan->fence.lock);
+
if (USE_REFCNT)
sequence = nvchan_rd32(chan, 0x48);
else
sequence = atomic_read(&chan->fence.last_sequence_irq);
if (chan->fence.sequence_ack == sequence)
- return;
+ goto out;
chan->fence.sequence_ack = sequence;
- spin_lock(&chan->fence.lock);
list_for_each_safe(entry, tmp, &chan->fence.pending) {
fence = list_entry(entry, struct nouveau_fence, entry);
@@ -85,6 +86,7 @@
if (sequence == chan->fence.sequence_ack)
break;
}
+out:
spin_unlock(&chan->fence.lock);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 0f417ac..19620a6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -167,11 +167,9 @@
goto out;
ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle);
+ /* drop reference from allocate - handle holds it now */
+ drm_gem_object_unreference_unlocked(nvbo->gem);
out:
- drm_gem_object_handle_unreference_unlocked(nvbo->gem);
-
- if (ret)
- drm_gem_object_unreference_unlocked(nvbo->gem);
return ret;
}
@@ -245,7 +243,7 @@
list_del(&nvbo->entry);
nvbo->reserved_by = NULL;
ttm_bo_unreserve(&nvbo->bo);
- drm_gem_object_unreference(nvbo->gem);
+ drm_gem_object_unreference_unlocked(nvbo->gem);
}
}
@@ -300,7 +298,7 @@
validate_fini(op, NULL);
if (ret == -EAGAIN)
ret = ttm_bo_wait_unreserved(&nvbo->bo, false);
- drm_gem_object_unreference(gem);
+ drm_gem_object_unreference_unlocked(gem);
if (ret) {
NV_ERROR(dev, "fail reserve\n");
return ret;
@@ -337,7 +335,9 @@
return -EINVAL;
}
+ mutex_unlock(&drm_global_mutex);
ret = ttm_bo_wait_cpu(&nvbo->bo, false);
+ mutex_lock(&drm_global_mutex);
if (ret) {
NV_ERROR(dev, "fail wait_cpu\n");
return ret;
@@ -361,16 +361,11 @@
list_for_each_entry(nvbo, list, entry) {
struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
- struct nouveau_fence *prev_fence = nvbo->bo.sync_obj;
- if (prev_fence && nouveau_fence_channel(prev_fence) != chan) {
- spin_lock(&nvbo->bo.lock);
- ret = ttm_bo_wait(&nvbo->bo, false, false, false);
- spin_unlock(&nvbo->bo.lock);
- if (unlikely(ret)) {
- NV_ERROR(dev, "fail wait other chan\n");
- return ret;
- }
+ ret = nouveau_bo_sync_gpu(nvbo, chan);
+ if (unlikely(ret)) {
+ NV_ERROR(dev, "fail pre-validate sync\n");
+ return ret;
}
ret = nouveau_gem_set_domain(nvbo->gem, b->read_domains,
@@ -381,7 +376,7 @@
return ret;
}
- nvbo->channel = chan;
+ nvbo->channel = (b->read_domains & (1 << 31)) ? NULL : chan;
ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement,
false, false, false);
nvbo->channel = NULL;
@@ -390,6 +385,12 @@
return ret;
}
+ ret = nouveau_bo_sync_gpu(nvbo, chan);
+ if (unlikely(ret)) {
+ NV_ERROR(dev, "fail post-validate sync\n");
+ return ret;
+ }
+
if (nvbo->bo.offset == b->presumed.offset &&
((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
@@ -613,7 +614,20 @@
return PTR_ERR(bo);
}
- mutex_lock(&dev->struct_mutex);
+ /* Mark push buffers as being used on PFIFO, the validation code
+ * will then make sure that if the pushbuf bo moves, that they
+ * happen on the kernel channel, which will in turn cause a sync
+ * to happen before we try and submit the push buffer.
+ */
+ for (i = 0; i < req->nr_push; i++) {
+ if (push[i].bo_index >= req->nr_buffers) {
+ NV_ERROR(dev, "push %d buffer not in list\n", i);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ bo[push[i].bo_index].read_domains |= (1 << 31);
+ }
/* Validate buffer list */
ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers,
@@ -647,7 +661,7 @@
push[i].length);
}
} else
- if (dev_priv->card_type >= NV_20) {
+ if (dev_priv->chipset >= 0x25) {
ret = RING_SPACE(chan, req->nr_push * 2);
if (ret) {
NV_ERROR(dev, "cal_space: %d\n", ret);
@@ -713,7 +727,6 @@
out:
validate_fini(&op, fence);
nouveau_fence_unref((void**)&fence);
- mutex_unlock(&dev->struct_mutex);
kfree(bo);
kfree(push);
@@ -722,7 +735,7 @@
req->suffix0 = 0x00000000;
req->suffix1 = 0x00000000;
} else
- if (dev_priv->card_type >= NV_20) {
+ if (dev_priv->chipset >= 0x25) {
req->suffix0 = 0x00020000;
req->suffix1 = 0x00000000;
} else {
diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.c b/drivers/gpu/drm/nouveau/nouveau_i2c.c
index 0bd407c..8461485 100644
--- a/drivers/gpu/drm/nouveau/nouveau_i2c.c
+++ b/drivers/gpu/drm/nouveau/nouveau_i2c.c
@@ -163,7 +163,7 @@
if (entry->chan)
return -EEXIST;
- if (dev_priv->card_type == NV_C0 && entry->read >= NV50_I2C_PORTS) {
+ if (dev_priv->card_type >= NV_50 && entry->read >= NV50_I2C_PORTS) {
NV_ERROR(dev, "unknown i2c port %d\n", entry->read);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index 491767f..6b9187d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -214,6 +214,7 @@
nouveau_sgdma_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct pci_dev *pdev = dev->pdev;
struct nouveau_gpuobj *gpuobj = NULL;
uint32_t aper_size, obj_size;
int i, ret;
@@ -239,10 +240,19 @@
dev_priv->gart_info.sg_dummy_page =
alloc_page(GFP_KERNEL|__GFP_DMA32);
+ if (!dev_priv->gart_info.sg_dummy_page) {
+ nouveau_gpuobj_del(dev, &gpuobj);
+ return -ENOMEM;
+ }
+
set_bit(PG_locked, &dev_priv->gart_info.sg_dummy_page->flags);
dev_priv->gart_info.sg_dummy_bus =
- pci_map_page(dev->pdev, dev_priv->gart_info.sg_dummy_page, 0,
+ pci_map_page(pdev, dev_priv->gart_info.sg_dummy_page, 0,
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ if (pci_dma_mapping_error(pdev, dev_priv->gart_info.sg_dummy_bus)) {
+ nouveau_gpuobj_del(dev, &gpuobj);
+ return -EFAULT;
+ }
if (dev_priv->card_type < NV_50) {
/* Maybe use NV_DMA_TARGET_AGP for PCIE? NVIDIA do this, and
diff --git a/drivers/gpu/drm/nouveau/nv04_dfp.c b/drivers/gpu/drm/nouveau/nv04_dfp.c
index a5dcf768..0d3206a 100644
--- a/drivers/gpu/drm/nouveau/nv04_dfp.c
+++ b/drivers/gpu/drm/nouveau/nv04_dfp.c
@@ -444,6 +444,7 @@
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct dcb_entry *dcbe = nv_encoder->dcb;
int head = nouveau_crtc(encoder->crtc)->index;
+ struct drm_encoder *slave_encoder;
if (dcbe->type == OUTPUT_TMDS)
run_tmds_table(dev, dcbe, head, nv_encoder->mode.clock);
@@ -462,9 +463,10 @@
NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0x00100000);
/* Init external transmitters */
- if (get_tmds_slave(encoder))
- get_slave_funcs(get_tmds_slave(encoder))->mode_set(
- encoder, &nv_encoder->mode, &nv_encoder->mode);
+ slave_encoder = get_tmds_slave(encoder);
+ if (slave_encoder)
+ get_slave_funcs(slave_encoder)->mode_set(
+ slave_encoder, &nv_encoder->mode, &nv_encoder->mode);
helper->dpms(encoder, DRM_MODE_DPMS_ON);
@@ -473,6 +475,27 @@
nv_crtc->index, '@' + ffs(nv_encoder->dcb->or));
}
+static void nv04_dfp_update_backlight(struct drm_encoder *encoder, int mode)
+{
+#ifdef __powerpc__
+ struct drm_device *dev = encoder->dev;
+
+ /* BIOS scripts usually take care of the backlight, thanks
+ * Apple for your consistency.
+ */
+ if (dev->pci_device == 0x0179 || dev->pci_device == 0x0189 ||
+ dev->pci_device == 0x0329) {
+ if (mode == DRM_MODE_DPMS_ON) {
+ nv_mask(dev, NV_PBUS_DEBUG_DUALHEAD_CTL, 0, 1 << 31);
+ nv_mask(dev, NV_PCRTC_GPIO_EXT, 3, 1);
+ } else {
+ nv_mask(dev, NV_PBUS_DEBUG_DUALHEAD_CTL, 1 << 31, 0);
+ nv_mask(dev, NV_PCRTC_GPIO_EXT, 3, 0);
+ }
+ }
+#endif
+}
+
static inline bool is_powersaving_dpms(int mode)
{
return (mode != DRM_MODE_DPMS_ON);
@@ -520,6 +543,7 @@
LVDS_PANEL_OFF, 0);
}
+ nv04_dfp_update_backlight(encoder, mode);
nv04_dfp_update_fp_control(encoder, mode);
if (mode == DRM_MODE_DPMS_ON)
@@ -543,6 +567,7 @@
NV_INFO(dev, "Setting dpms mode %d on tmds encoder (output %d)\n",
mode, nv_encoder->dcb->index);
+ nv04_dfp_update_backlight(encoder, mode);
nv04_dfp_update_fp_control(encoder, mode);
}
diff --git a/drivers/gpu/drm/nouveau/nv17_tv.c b/drivers/gpu/drm/nouveau/nv17_tv.c
index 44fefb0..13cdc05 100644
--- a/drivers/gpu/drm/nouveau/nv17_tv.c
+++ b/drivers/gpu/drm/nouveau/nv17_tv.c
@@ -121,10 +121,14 @@
get_tv_detect_quirks(struct drm_device *dev, uint32_t *pin_mask)
{
/* Zotac FX5200 */
- if (dev->pdev->device == 0x0322 &&
- dev->pdev->subsystem_vendor == 0x19da &&
- (dev->pdev->subsystem_device == 0x1035 ||
- dev->pdev->subsystem_device == 0x2035)) {
+ if (nv_match_device(dev, 0x0322, 0x19da, 0x1035) ||
+ nv_match_device(dev, 0x0322, 0x19da, 0x2035)) {
+ *pin_mask = 0xc;
+ return false;
+ }
+
+ /* MSI nForce2 IGP */
+ if (nv_match_device(dev, 0x01f0, 0x1462, 0x5710)) {
*pin_mask = 0xc;
return false;
}
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c
index 37c7b48..91ef93c 100644
--- a/drivers/gpu/drm/nouveau/nv50_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv50_instmem.c
@@ -139,6 +139,8 @@
chan->file_priv = (struct drm_file *)-2;
dev_priv->fifos[0] = dev_priv->fifos[127] = chan;
+ INIT_LIST_HEAD(&chan->ramht_refs);
+
/* Channel's PRAMIN object + heap */
ret = nouveau_gpuobj_new_fake(dev, 0, c_offset, c_size, 0,
NULL, &chan->ramin);
@@ -278,7 +280,7 @@
/*XXX: incorrect, but needed to make hash func "work" */
dev_priv->ramht_offset = 0x10000;
dev_priv->ramht_bits = 9;
- dev_priv->ramht_size = (1 << dev_priv->ramht_bits);
+ dev_priv->ramht_size = (1 << dev_priv->ramht_bits) * 8;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvc0_instmem.c b/drivers/gpu/drm/nouveau/nvc0_instmem.c
index 3ab3cdc..6b451f8 100644
--- a/drivers/gpu/drm/nouveau/nvc0_instmem.c
+++ b/drivers/gpu/drm/nouveau/nvc0_instmem.c
@@ -142,14 +142,16 @@
nvc0_instmem_suspend(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ u32 *buf;
int i;
dev_priv->susres.ramin_copy = vmalloc(65536);
if (!dev_priv->susres.ramin_copy)
return -ENOMEM;
+ buf = dev_priv->susres.ramin_copy;
- for (i = 0x700000; i < 0x710000; i += 4)
- dev_priv->susres.ramin_copy[i/4] = nv_rd32(dev, i);
+ for (i = 0; i < 65536; i += 4)
+ buf[i/4] = nv_rd32(dev, NV04_PRAMIN + i);
return 0;
}
@@ -157,14 +159,15 @@
nvc0_instmem_resume(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ u32 *buf = dev_priv->susres.ramin_copy;
u64 chan;
int i;
chan = dev_priv->vram_size - dev_priv->ramin_rsvd_vram;
nv_wr32(dev, 0x001700, chan >> 16);
- for (i = 0x700000; i < 0x710000; i += 4)
- nv_wr32(dev, i, dev_priv->susres.ramin_copy[i/4]);
+ for (i = 0; i < 65536; i += 4)
+ nv_wr32(dev, NV04_PRAMIN + i, buf[i/4]);
vfree(dev_priv->susres.ramin_copy);
dev_priv->susres.ramin_copy = NULL;
@@ -221,7 +224,7 @@
/*XXX: incorrect, but needed to make hash func "work" */
dev_priv->ramht_offset = 0x10000;
dev_priv->ramht_bits = 9;
- dev_priv->ramht_size = (1 << dev_priv->ramht_bits);
+ dev_priv->ramht_size = (1 << dev_priv->ramht_bits) * 8;
return 0;
}
diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
index 077af1f..a9e33ce 100644
--- a/drivers/gpu/drm/r128/r128_state.c
+++ b/drivers/gpu/drm/r128/r128_state.c
@@ -1639,30 +1639,29 @@
r128_do_cleanup_pageflip(dev);
}
}
-
void r128_driver_lastclose(struct drm_device *dev)
{
r128_do_cleanup_cce(dev);
}
struct drm_ioctl_desc r128_ioctls[] = {
- DRM_IOCTL_DEF(DRM_R128_INIT, r128_cce_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_R128_CCE_START, r128_cce_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_R128_CCE_STOP, r128_cce_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_R128_CCE_RESET, r128_cce_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_R128_CCE_IDLE, r128_cce_idle, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_R128_RESET, r128_engine_reset, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_R128_FULLSCREEN, r128_fullscreen, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_R128_SWAP, r128_cce_swap, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_R128_FLIP, r128_cce_flip, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_R128_CLEAR, r128_cce_clear, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_R128_VERTEX, r128_cce_vertex, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_R128_INDICES, r128_cce_indices, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_R128_BLIT, r128_cce_blit, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_R128_DEPTH, r128_cce_depth, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_R128_STIPPLE, r128_cce_stipple, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_R128_INDIRECT, r128_cce_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_R128_GETPARAM, r128_getparam, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(R128_INIT, r128_cce_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(R128_CCE_START, r128_cce_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(R128_CCE_STOP, r128_cce_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(R128_CCE_RESET, r128_cce_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(R128_CCE_IDLE, r128_cce_idle, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(R128_RESET, r128_engine_reset, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(R128_FULLSCREEN, r128_fullscreen, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(R128_SWAP, r128_cce_swap, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(R128_FLIP, r128_cce_flip, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(R128_CLEAR, r128_cce_clear, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(R128_VERTEX, r128_cce_vertex, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(R128_INDICES, r128_cce_indices, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(R128_BLIT, r128_cce_blit, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(R128_DEPTH, r128_cce_depth, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(R128_STIPPLE, r128_cce_stipple, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(R128_INDIRECT, r128_cce_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(R128_GETPARAM, r128_getparam, DRM_AUTH),
};
int r128_max_ioctl = DRM_ARRAY_SIZE(r128_ioctls);
diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h
index 1bc72c3..fe359a2 100644
--- a/drivers/gpu/drm/radeon/atombios.h
+++ b/drivers/gpu/drm/radeon/atombios.h
@@ -4999,7 +4999,7 @@
#define SW_I2C_CNTL_WRITE1BIT 6
//==============================VESA definition Portion===============================
-#define VESA_OEM_PRODUCT_REV '01.00'
+#define VESA_OEM_PRODUCT_REV "01.00"
#define VESA_MODE_ATTRIBUTE_MODE_SUPPORT 0xBB //refer to VBE spec p.32, no TTY support
#define VESA_MODE_WIN_ATTRIBUTE 7
#define VESA_WIN_SIZE 64
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 12ad512..cd0290f 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -332,6 +332,11 @@
args.usV_SyncWidth =
cpu_to_le16(mode->crtc_vsync_end - mode->crtc_vsync_start);
+ args.ucOverscanRight = radeon_crtc->h_border;
+ args.ucOverscanLeft = radeon_crtc->h_border;
+ args.ucOverscanBottom = radeon_crtc->v_border;
+ args.ucOverscanTop = radeon_crtc->v_border;
+
if (mode->flags & DRM_MODE_FLAG_NVSYNC)
misc |= ATOM_VSYNC_POLARITY;
if (mode->flags & DRM_MODE_FLAG_NHSYNC)
@@ -471,6 +476,8 @@
struct radeon_encoder *radeon_encoder = NULL;
u32 adjusted_clock = mode->clock;
int encoder_mode = 0;
+ u32 dp_clock = mode->clock;
+ int bpc = 8;
/* reset the pll flags */
pll->flags = 0;
@@ -513,6 +520,17 @@
if (encoder->crtc == crtc) {
radeon_encoder = to_radeon_encoder(encoder);
encoder_mode = atombios_get_encoder_mode(encoder);
+ if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) {
+ struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+ if (connector) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ struct radeon_connector_atom_dig *dig_connector =
+ radeon_connector->con_priv;
+
+ dp_clock = dig_connector->dp_clock;
+ }
+ }
+
if (ASIC_IS_AVIVO(rdev)) {
/* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */
if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1)
@@ -521,6 +539,21 @@
pll->algo = PLL_ALGO_LEGACY;
pll->flags |= RADEON_PLL_PREFER_CLOSEST_LOWER;
}
+ /* There is some evidence (often anecdotal) that RV515/RV620 LVDS
+ * (on some boards at least) prefers the legacy algo. I'm not
+ * sure whether this should handled generically or on a
+ * case-by-case quirk basis. Both algos should work fine in the
+ * majority of cases.
+ */
+ if ((radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) &&
+ ((rdev->family == CHIP_RV515) ||
+ (rdev->family == CHIP_RV620))) {
+ /* allow the user to overrride just in case */
+ if (radeon_new_pll == 1)
+ pll->algo = PLL_ALGO_NEW;
+ else
+ pll->algo = PLL_ALGO_LEGACY;
+ }
} else {
if (encoder->encoder_type != DRM_MODE_ENCODER_DAC)
pll->flags |= RADEON_PLL_NO_ODD_POST_DIV;
@@ -555,6 +588,14 @@
args.v1.usPixelClock = cpu_to_le16(mode->clock / 10);
args.v1.ucTransmitterID = radeon_encoder->encoder_id;
args.v1.ucEncodeMode = encoder_mode;
+ if (encoder_mode == ATOM_ENCODER_MODE_DP) {
+ /* may want to enable SS on DP eventually */
+ /* args.v1.ucConfig |=
+ ADJUST_DISPLAY_CONFIG_SS_ENABLE;*/
+ } else if (encoder_mode == ATOM_ENCODER_MODE_LVDS) {
+ args.v1.ucConfig |=
+ ADJUST_DISPLAY_CONFIG_SS_ENABLE;
+ }
atom_execute_table(rdev->mode_info.atom_context,
index, (uint32_t *)&args);
@@ -568,10 +609,20 @@
if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
- if (encoder_mode == ATOM_ENCODER_MODE_DP)
+ if (encoder_mode == ATOM_ENCODER_MODE_DP) {
+ /* may want to enable SS on DP/eDP eventually */
+ /*args.v3.sInput.ucDispPllConfig |=
+ DISPPLL_CONFIG_SS_ENABLE;*/
args.v3.sInput.ucDispPllConfig |=
DISPPLL_CONFIG_COHERENT_MODE;
- else {
+ /* 16200 or 27000 */
+ args.v3.sInput.usPixelClock = cpu_to_le16(dp_clock / 10);
+ } else {
+ if (encoder_mode == ATOM_ENCODER_MODE_HDMI) {
+ /* deep color support */
+ args.v3.sInput.usPixelClock =
+ cpu_to_le16((mode->clock * bpc / 8) / 10);
+ }
if (dig->coherent_mode)
args.v3.sInput.ucDispPllConfig |=
DISPPLL_CONFIG_COHERENT_MODE;
@@ -580,13 +631,19 @@
DISPPLL_CONFIG_DUAL_LINK;
}
} else if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
- /* may want to enable SS on DP/eDP eventually */
- /*args.v3.sInput.ucDispPllConfig |=
- DISPPLL_CONFIG_SS_ENABLE;*/
- if (encoder_mode == ATOM_ENCODER_MODE_DP)
+ if (encoder_mode == ATOM_ENCODER_MODE_DP) {
+ /* may want to enable SS on DP/eDP eventually */
+ /*args.v3.sInput.ucDispPllConfig |=
+ DISPPLL_CONFIG_SS_ENABLE;*/
args.v3.sInput.ucDispPllConfig |=
DISPPLL_CONFIG_COHERENT_MODE;
- else {
+ /* 16200 or 27000 */
+ args.v3.sInput.usPixelClock = cpu_to_le16(dp_clock / 10);
+ } else if (encoder_mode == ATOM_ENCODER_MODE_LVDS) {
+ /* want to enable SS on LVDS eventually */
+ /*args.v3.sInput.ucDispPllConfig |=
+ DISPPLL_CONFIG_SS_ENABLE;*/
+ } else {
if (mode->clock > 165000)
args.v3.sInput.ucDispPllConfig |=
DISPPLL_CONFIG_DUAL_LINK;
@@ -1019,11 +1076,11 @@
if (rdev->family >= CHIP_RV770) {
if (radeon_crtc->crtc_id) {
- WREG32(R700_D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, 0);
- WREG32(R700_D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, 0);
+ WREG32(R700_D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location));
+ WREG32(R700_D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location));
} else {
- WREG32(R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, 0);
- WREG32(R700_D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, 0);
+ WREG32(R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location));
+ WREG32(R700_D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location));
}
}
WREG32(AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
@@ -1160,8 +1217,18 @@
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
+ struct drm_encoder *encoder;
+ bool is_tvcv = false;
- /* TODO color tiling */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ /* find tv std */
+ if (encoder->crtc == crtc) {
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ if (radeon_encoder->active_device &
+ (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
+ is_tvcv = true;
+ }
+ }
atombios_disable_ss(crtc);
/* always set DCPLL */
@@ -1170,9 +1237,14 @@
atombios_crtc_set_pll(crtc, adjusted_mode);
atombios_enable_ss(crtc);
- if (ASIC_IS_AVIVO(rdev))
+ if (ASIC_IS_DCE4(rdev))
atombios_set_crtc_dtd_timing(crtc, adjusted_mode);
- else {
+ else if (ASIC_IS_AVIVO(rdev)) {
+ if (is_tvcv)
+ atombios_crtc_set_timing(crtc, adjusted_mode);
+ else
+ atombios_set_crtc_dtd_timing(crtc, adjusted_mode);
+ } else {
atombios_crtc_set_timing(crtc, adjusted_mode);
if (radeon_crtc->crtc_id == 0)
atombios_set_crtc_dtd_timing(crtc, adjusted_mode);
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 36e0d4b..4e7778d 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -610,7 +610,7 @@
enc_id |= ATOM_DP_CONFIG_DIG2_ENCODER;
else
enc_id |= ATOM_DP_CONFIG_DIG1_ENCODER;
- if (dig_connector->linkb)
+ if (dig->linkb)
enc_id |= ATOM_DP_CONFIG_LINK_B;
else
enc_id |= ATOM_DP_CONFIG_LINK_A;
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 957d506..2f93d46 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -675,6 +675,43 @@
return 0;
}
+static int evergreen_cp_start(struct radeon_device *rdev)
+{
+ int r;
+ uint32_t cp_me;
+
+ r = radeon_ring_lock(rdev, 7);
+ if (r) {
+ DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
+ return r;
+ }
+ radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
+ radeon_ring_write(rdev, 0x1);
+ radeon_ring_write(rdev, 0x0);
+ radeon_ring_write(rdev, rdev->config.evergreen.max_hw_contexts - 1);
+ radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
+ radeon_ring_write(rdev, 0);
+ radeon_ring_write(rdev, 0);
+ radeon_ring_unlock_commit(rdev);
+
+ cp_me = 0xff;
+ WREG32(CP_ME_CNTL, cp_me);
+
+ r = radeon_ring_lock(rdev, 4);
+ if (r) {
+ DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
+ return r;
+ }
+ /* init some VGT regs */
+ radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
+ radeon_ring_write(rdev, (VGT_VERTEX_REUSE_BLOCK_CNTL - PACKET3_SET_CONTEXT_REG_START) >> 2);
+ radeon_ring_write(rdev, 0xe);
+ radeon_ring_write(rdev, 0x10);
+ radeon_ring_unlock_commit(rdev);
+
+ return 0;
+}
+
int evergreen_cp_resume(struct radeon_device *rdev)
{
u32 tmp;
@@ -719,7 +756,7 @@
rdev->cp.rptr = RREG32(CP_RB_RPTR);
rdev->cp.wptr = RREG32(CP_RB_WPTR);
- r600_cp_start(rdev);
+ evergreen_cp_start(rdev);
rdev->cp.ready = true;
r = radeon_ring_test(rdev);
if (r) {
@@ -1100,7 +1137,7 @@
WREG32(RCU_IND_INDEX, 0x203);
efuse_straps_3 = RREG32(RCU_IND_DATA);
- efuse_box_bit_127_124 = (u8)(efuse_straps_3 & 0xF0000000) >> 28;
+ efuse_box_bit_127_124 = (u8)((efuse_straps_3 & 0xF0000000) >> 28);
switch(efuse_box_bit_127_124) {
case 0x0:
@@ -1123,14 +1160,25 @@
EVERGREEN_MAX_BACKENDS_MASK));
break;
}
- } else
- gb_backend_map =
- evergreen_get_tile_pipe_to_backend_map(rdev,
- rdev->config.evergreen.max_tile_pipes,
- rdev->config.evergreen.max_backends,
- ((EVERGREEN_MAX_BACKENDS_MASK <<
- rdev->config.evergreen.max_backends) &
- EVERGREEN_MAX_BACKENDS_MASK));
+ } else {
+ switch (rdev->family) {
+ case CHIP_CYPRESS:
+ case CHIP_HEMLOCK:
+ gb_backend_map = 0x66442200;
+ break;
+ case CHIP_JUNIPER:
+ gb_backend_map = 0x00006420;
+ break;
+ default:
+ gb_backend_map =
+ evergreen_get_tile_pipe_to_backend_map(rdev,
+ rdev->config.evergreen.max_tile_pipes,
+ rdev->config.evergreen.max_backends,
+ ((EVERGREEN_MAX_BACKENDS_MASK <<
+ rdev->config.evergreen.max_backends) &
+ EVERGREEN_MAX_BACKENDS_MASK));
+ }
+ }
rdev->config.evergreen.tile_config = gb_addr_config;
WREG32(GB_BACKEND_MAP, gb_backend_map);
@@ -1359,6 +1407,7 @@
rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
rdev->mc.visible_vram_size = rdev->mc.aper_size;
+ rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
r600_vram_gtt_location(rdev, &rdev->mc);
radeon_update_bandwidth_info(rdev);
@@ -1472,7 +1521,7 @@
{
u32 tmp;
- WREG32(CP_INT_CNTL, 0);
+ WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
WREG32(GRBM_INT_CNTL, 0);
WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
@@ -2054,11 +2103,6 @@
*/
/* post card */
atom_asic_init(rdev->mode_info.atom_context);
- /* Initialize clocks */
- r = radeon_clocks_init(rdev);
- if (r) {
- return r;
- }
r = evergreen_startup(rdev);
if (r) {
@@ -2164,9 +2208,6 @@
radeon_surface_init(rdev);
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
- r = radeon_clocks_init(rdev);
- if (r)
- return r;
/* Fence driver */
r = radeon_fence_driver_init(rdev);
if (r)
@@ -2236,7 +2277,6 @@
evergreen_pcie_gart_fini(rdev);
radeon_gem_fini(rdev);
radeon_fence_driver_fini(rdev);
- radeon_clocks_fini(rdev);
radeon_agp_fini(rdev);
radeon_bo_fini(rdev);
radeon_atombios_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index e817a0b..e594223 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -1030,6 +1030,7 @@
return r;
}
rdev->cp.ready = true;
+ rdev->mc.active_vram_size = rdev->mc.real_vram_size;
return 0;
}
@@ -1047,6 +1048,7 @@
void r100_cp_disable(struct radeon_device *rdev)
{
/* Disable ring */
+ rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
rdev->cp.ready = false;
WREG32(RADEON_CP_CSQ_MODE, 0);
WREG32(RADEON_CP_CSQ_CNTL, 0);
@@ -2020,18 +2022,7 @@
return false;
}
elapsed = jiffies_to_msecs(cjiffies - lockup->last_jiffies);
- if (elapsed >= 3000) {
- /* very likely the improbable case where current
- * rptr is equal to last recorded, a while ago, rptr
- * this is more likely a false positive update tracking
- * information which should force us to be recall at
- * latter point
- */
- lockup->last_cp_rptr = cp->rptr;
- lockup->last_jiffies = jiffies;
- return false;
- }
- if (elapsed >= 1000) {
+ if (elapsed >= 10000) {
dev_err(rdev->dev, "GPU lockup CP stall for more than %lumsec\n", elapsed);
return true;
}
@@ -2306,6 +2297,7 @@
/* FIXME we don't use the second aperture yet when we could use it */
if (rdev->mc.visible_vram_size > rdev->mc.aper_size)
rdev->mc.visible_vram_size = rdev->mc.aper_size;
+ rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
config_aper_size = RREG32(RADEON_CONFIG_APER_SIZE);
if (rdev->flags & RADEON_IS_IGP) {
uint32_t tom;
@@ -3308,13 +3300,14 @@
unsigned long size;
unsigned prim_walk;
unsigned nverts;
+ unsigned num_cb = track->num_cb;
- for (i = 0; i < track->num_cb; i++) {
+ if (!track->zb_cb_clear && !track->color_channel_mask &&
+ !track->blend_read_enable)
+ num_cb = 0;
+
+ for (i = 0; i < num_cb; i++) {
if (track->cb[i].robj == NULL) {
- if (!(track->zb_cb_clear || track->color_channel_mask ||
- track->blend_read_enable)) {
- continue;
- }
DRM_ERROR("[drm] No buffer for color buffer %d !\n", i);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index d0ebae9..7b65e4ef 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -1248,6 +1248,7 @@
rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
rdev->mc.visible_vram_size = rdev->mc.aper_size;
+ rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
r600_vram_gtt_location(rdev, &rdev->mc);
if (rdev->flags & RADEON_IS_IGP) {
@@ -1917,6 +1918,7 @@
*/
void r600_cp_stop(struct radeon_device *rdev)
{
+ rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
}
@@ -2119,10 +2121,7 @@
}
radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
radeon_ring_write(rdev, 0x1);
- if (rdev->family >= CHIP_CEDAR) {
- radeon_ring_write(rdev, 0x0);
- radeon_ring_write(rdev, rdev->config.evergreen.max_hw_contexts - 1);
- } else if (rdev->family >= CHIP_RV770) {
+ if (rdev->family >= CHIP_RV770) {
radeon_ring_write(rdev, 0x0);
radeon_ring_write(rdev, rdev->config.rv770.max_hw_contexts - 1);
} else {
@@ -2489,11 +2488,6 @@
*/
/* post card */
atom_asic_init(rdev->mode_info.atom_context);
- /* Initialize clocks */
- r = radeon_clocks_init(rdev);
- if (r) {
- return r;
- }
r = r600_startup(rdev);
if (r) {
@@ -2586,9 +2580,6 @@
radeon_surface_init(rdev);
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
- r = radeon_clocks_init(rdev);
- if (r)
- return r;
/* Fence driver */
r = radeon_fence_driver_init(rdev);
if (r)
@@ -2663,7 +2654,6 @@
radeon_agp_fini(rdev);
radeon_gem_fini(rdev);
radeon_fence_driver_fini(rdev);
- radeon_clocks_fini(rdev);
radeon_bo_fini(rdev);
radeon_atombios_fini(rdev);
kfree(rdev->bios);
@@ -2741,7 +2731,7 @@
if (i < rdev->usec_timeout) {
DRM_INFO("ib test succeeded in %u usecs\n", i);
} else {
- DRM_ERROR("radeon: ib test failed (sracth(0x%04X)=0x%08X)\n",
+ DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n",
scratch, tmp);
r = -EINVAL;
}
@@ -2922,7 +2912,7 @@
{
u32 tmp;
- WREG32(CP_INT_CNTL, 0);
+ WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
WREG32(GRBM_INT_CNTL, 0);
WREG32(DxMODE_INT_MASK, 0);
if (ASIC_IS_DCE3(rdev)) {
@@ -3540,8 +3530,9 @@
/* r7xx hw bug. write to HDP_DEBUG1 followed by fb read
* rather than write to HDP_REG_COHERENCY_FLUSH_CNTL
*/
- if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740)) {
- void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
+ if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
+ rdev->vram_scratch.ptr) {
+ void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
u32 tmp;
WREG32(HDP_DEBUG1, 0);
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
index d13622a..3473c00 100644
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
@@ -1,3 +1,28 @@
+/*
+ * Copyright 2009 Advanced Micro Devices, Inc.
+ * Copyright 2009 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ */
+
#include "drmP.h"
#include "drm.h"
#include "radeon_drm.h"
@@ -507,6 +532,7 @@
memcpy(ptr + rdev->r600_blit.ps_offset, r6xx_ps, r6xx_ps_size * 4);
radeon_bo_kunmap(rdev->r600_blit.shader_obj);
radeon_bo_unreserve(rdev->r600_blit.shader_obj);
+ rdev->mc.active_vram_size = rdev->mc.real_vram_size;
return 0;
}
@@ -514,6 +540,7 @@
{
int r;
+ rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
if (rdev->r600_blit.shader_obj == NULL)
return;
/* If we can't reserve the bo, unref should be enough to destroy
diff --git a/drivers/gpu/drm/radeon/r600_blit_shaders.h b/drivers/gpu/drm/radeon/r600_blit_shaders.h
index fdc3b37..f437d36 100644
--- a/drivers/gpu/drm/radeon/r600_blit_shaders.h
+++ b/drivers/gpu/drm/radeon/r600_blit_shaders.h
@@ -1,3 +1,27 @@
+/*
+ * Copyright 2009 Advanced Micro Devices, Inc.
+ * Copyright 2009 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ */
#ifndef R600_BLIT_SHADERS_H
#define R600_BLIT_SHADERS_H
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index d886494..250a3a9 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -1170,9 +1170,8 @@
/* using get ib will give us the offset into the mipmap bo */
word0 = radeon_get_ib_value(p, idx + 3) << 8;
if ((mipmap_size + word0) > radeon_bo_size(mipmap)) {
- dev_warn(p->dev, "mipmap bo too small (%d %d %d %d %d %d -> %d have %ld)\n",
- w0, h0, bpe, blevel, nlevels, word0, mipmap_size, radeon_bo_size(texture));
- return -EINVAL;
+ /*dev_warn(p->dev, "mipmap bo too small (%d %d %d %d %d %d -> %d have %ld)\n",
+ w0, h0, bpe, blevel, nlevels, word0, mipmap_size, radeon_bo_size(texture));*/
}
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 3dfcfa3..9ff38c9 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -344,6 +344,7 @@
* about vram size near mc fb location */
u64 mc_vram_size;
u64 visible_vram_size;
+ u64 active_vram_size;
u64 gtt_size;
u64 gtt_start;
u64 gtt_end;
@@ -1013,6 +1014,11 @@
int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
+/* VRAM scratch page for HDP bug */
+struct r700_vram_scratch {
+ struct radeon_bo *robj;
+ volatile uint32_t *ptr;
+};
/*
* Core structure, functions and helpers.
@@ -1079,6 +1085,7 @@
const struct firmware *pfp_fw; /* r6/700 PFP firmware */
const struct firmware *rlc_fw; /* r6/700 RLC firmware */
struct r600_blit r600_blit;
+ struct r700_vram_scratch vram_scratch;
int msi_enabled; /* msi enabled */
struct r600_ih ih; /* r6/700 interrupt ring */
struct workqueue_struct *wq;
@@ -1333,8 +1340,6 @@
extern void radeon_update_bandwidth_info(struct radeon_device *rdev);
extern void radeon_update_display_priority(struct radeon_device *rdev);
extern bool radeon_boot_test_post_card(struct radeon_device *rdev);
-extern int radeon_clocks_init(struct radeon_device *rdev);
-extern void radeon_clocks_fini(struct radeon_device *rdev);
extern void radeon_scratch_init(struct radeon_device *rdev);
extern void radeon_surface_init(struct radeon_device *rdev);
extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data);
diff --git a/drivers/gpu/drm/radeon/radeon_agp.c b/drivers/gpu/drm/radeon/radeon_agp.c
index f40dfb7..bd2f33e 100644
--- a/drivers/gpu/drm/radeon/radeon_agp.c
+++ b/drivers/gpu/drm/radeon/radeon_agp.c
@@ -156,7 +156,13 @@
}
mode.mode = info.mode;
- agp_status = (RREG32(RADEON_AGP_STATUS) | RADEON_AGPv3_MODE) & mode.mode;
+ /* chips with the agp to pcie bridge don't have the AGP_STATUS register
+ * Just use the whatever mode the host sets up.
+ */
+ if (rdev->family <= CHIP_RV350)
+ agp_status = (RREG32(RADEON_AGP_STATUS) | RADEON_AGPv3_MODE) & mode.mode;
+ else
+ agp_status = mode.mode;
is_v3 = !!(agp_status & RADEON_AGPv3_MODE);
if (is_v3) {
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index 646f96f..25e1dd1 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -733,6 +733,7 @@
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
+ .get_pcie_lanes = NULL,
.set_pcie_lanes = NULL,
.set_clock_gating = NULL,
.set_surface_reg = r600_set_surface_reg,
@@ -857,21 +858,3 @@
return 0;
}
-/*
- * Wrapper around modesetting bits. Move to radeon_clocks.c?
- */
-int radeon_clocks_init(struct radeon_device *rdev)
-{
- int r;
-
- r = radeon_static_clocks_init(rdev->ddev);
- if (r) {
- return r;
- }
- DRM_INFO("Clocks initialized !\n");
- return 0;
-}
-
-void radeon_clocks_fini(struct radeon_device *rdev)
-{
-}
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 6d30868..8e43dda 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -32,11 +32,11 @@
/* from radeon_encoder.c */
extern uint32_t
-radeon_get_encoder_id(struct drm_device *dev, uint32_t supported_device,
- uint8_t dac);
+radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device,
+ uint8_t dac);
extern void radeon_link_encoder_connector(struct drm_device *dev);
extern void
-radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id,
+radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum,
uint32_t supported_device);
/* from radeon_connector.c */
@@ -46,14 +46,14 @@
uint32_t supported_device,
int connector_type,
struct radeon_i2c_bus_rec *i2c_bus,
- bool linkb, uint32_t igp_lane_info,
+ uint32_t igp_lane_info,
uint16_t connector_object_id,
struct radeon_hpd *hpd,
struct radeon_router *router);
/* from radeon_legacy_encoder.c */
extern void
-radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id,
+radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum,
uint32_t supported_device);
union atom_supported_devices {
@@ -85,6 +85,19 @@
for (i = 0; i < num_indices; i++) {
gpio = &i2c_info->asGPIO_Info[i];
+ /* some evergreen boards have bad data for this entry */
+ if (ASIC_IS_DCE4(rdev)) {
+ if ((i == 7) &&
+ (gpio->usClkMaskRegisterIndex == 0x1936) &&
+ (gpio->sucI2cId.ucAccess == 0)) {
+ gpio->sucI2cId.ucAccess = 0x97;
+ gpio->ucDataMaskShift = 8;
+ gpio->ucDataEnShift = 8;
+ gpio->ucDataY_Shift = 8;
+ gpio->ucDataA_Shift = 8;
+ }
+ }
+
if (gpio->sucI2cId.ucAccess == id) {
i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4;
i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
@@ -147,6 +160,20 @@
for (i = 0; i < num_indices; i++) {
gpio = &i2c_info->asGPIO_Info[i];
i2c.valid = false;
+
+ /* some evergreen boards have bad data for this entry */
+ if (ASIC_IS_DCE4(rdev)) {
+ if ((i == 7) &&
+ (gpio->usClkMaskRegisterIndex == 0x1936) &&
+ (gpio->sucI2cId.ucAccess == 0)) {
+ gpio->sucI2cId.ucAccess = 0x97;
+ gpio->ucDataMaskShift = 8;
+ gpio->ucDataEnShift = 8;
+ gpio->ucDataY_Shift = 8;
+ gpio->ucDataA_Shift = 8;
+ }
+ }
+
i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4;
i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4;
@@ -226,6 +253,8 @@
struct radeon_hpd hpd;
u32 reg;
+ memset(&hpd, 0, sizeof(struct radeon_hpd));
+
if (ASIC_IS_DCE4(rdev))
reg = EVERGREEN_DC_GPIO_HPD_A;
else
@@ -288,6 +317,15 @@
*connector_type = DRM_MODE_CONNECTOR_DVID;
}
+ /* MSI K9A2GM V2/V3 board has no HDMI or DVI */
+ if ((dev->pdev->device == 0x796e) &&
+ (dev->pdev->subsystem_vendor == 0x1462) &&
+ (dev->pdev->subsystem_device == 0x7302)) {
+ if ((supported_device == ATOM_DEVICE_DFP2_SUPPORT) ||
+ (supported_device == ATOM_DEVICE_DFP3_SUPPORT))
+ return false;
+ }
+
/* a-bit f-i90hd - ciaranm on #radeonhd - this board has no DVI */
if ((dev->pdev->device == 0x7941) &&
(dev->pdev->subsystem_vendor == 0x147b) &&
@@ -477,7 +515,6 @@
int i, j, k, path_size, device_support;
int connector_type;
u16 igp_lane_info, conn_id, connector_object_id;
- bool linkb;
struct radeon_i2c_bus_rec ddc_bus;
struct radeon_router router;
struct radeon_gpio_rec gpio;
@@ -510,7 +547,7 @@
addr += path_size;
path = (ATOM_DISPLAY_OBJECT_PATH *) addr;
path_size += le16_to_cpu(path->usSize);
- linkb = false;
+
if (device_support & le16_to_cpu(path->usDeviceTag)) {
uint8_t con_obj_id, con_obj_num, con_obj_type;
@@ -601,13 +638,10 @@
OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT;
if (grph_obj_type == GRAPH_OBJECT_TYPE_ENCODER) {
- if (grph_obj_num == 2)
- linkb = true;
- else
- linkb = false;
+ u16 encoder_obj = le16_to_cpu(path->usGraphicObjIds[j]);
radeon_add_atom_encoder(dev,
- grph_obj_id,
+ encoder_obj,
le16_to_cpu
(path->
usDeviceTag));
@@ -744,7 +778,7 @@
le16_to_cpu(path->
usDeviceTag),
connector_type, &ddc_bus,
- linkb, igp_lane_info,
+ igp_lane_info,
connector_object_id,
&hpd,
&router);
@@ -933,13 +967,13 @@
if (ASIC_IS_AVIVO(rdev) || radeon_r4xx_atom)
radeon_add_atom_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
(1 << i),
dac),
(1 << i));
else
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
(1 << i),
dac),
(1 << i));
@@ -996,7 +1030,7 @@
bios_connectors[i].
connector_type,
&bios_connectors[i].ddc_bus,
- false, 0,
+ 0,
connector_object_id,
&bios_connectors[i].hpd,
&router);
@@ -1183,7 +1217,7 @@
return true;
break;
case 2:
- if (igp_info->info_2.ucMemoryType & 0x0f)
+ if (igp_info->info_2.ulBootUpSidePortClock)
return true;
break;
default:
@@ -1305,6 +1339,7 @@
union lvds_info *lvds_info;
uint8_t frev, crev;
struct radeon_encoder_atom_dig *lvds = NULL;
+ int encoder_enum = (encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT;
if (atom_parse_data_header(mode_info->atom_context, index, NULL,
&frev, &crev, &data_offset)) {
@@ -1368,6 +1403,12 @@
}
encoder->native_mode = lvds->native_mode;
+
+ if (encoder_enum == 2)
+ lvds->linkb = true;
+ else
+ lvds->linkb = false;
+
}
return lvds;
}
@@ -1517,39 +1558,39 @@
switch (tv_info->ucTV_BootUpDefaultStandard) {
case ATOM_TV_NTSC:
tv_std = TV_STD_NTSC;
- DRM_INFO("Default TV standard: NTSC\n");
+ DRM_DEBUG_KMS("Default TV standard: NTSC\n");
break;
case ATOM_TV_NTSCJ:
tv_std = TV_STD_NTSC_J;
- DRM_INFO("Default TV standard: NTSC-J\n");
+ DRM_DEBUG_KMS("Default TV standard: NTSC-J\n");
break;
case ATOM_TV_PAL:
tv_std = TV_STD_PAL;
- DRM_INFO("Default TV standard: PAL\n");
+ DRM_DEBUG_KMS("Default TV standard: PAL\n");
break;
case ATOM_TV_PALM:
tv_std = TV_STD_PAL_M;
- DRM_INFO("Default TV standard: PAL-M\n");
+ DRM_DEBUG_KMS("Default TV standard: PAL-M\n");
break;
case ATOM_TV_PALN:
tv_std = TV_STD_PAL_N;
- DRM_INFO("Default TV standard: PAL-N\n");
+ DRM_DEBUG_KMS("Default TV standard: PAL-N\n");
break;
case ATOM_TV_PALCN:
tv_std = TV_STD_PAL_CN;
- DRM_INFO("Default TV standard: PAL-CN\n");
+ DRM_DEBUG_KMS("Default TV standard: PAL-CN\n");
break;
case ATOM_TV_PAL60:
tv_std = TV_STD_PAL_60;
- DRM_INFO("Default TV standard: PAL-60\n");
+ DRM_DEBUG_KMS("Default TV standard: PAL-60\n");
break;
case ATOM_TV_SECAM:
tv_std = TV_STD_SECAM;
- DRM_INFO("Default TV standard: SECAM\n");
+ DRM_DEBUG_KMS("Default TV standard: SECAM\n");
break;
default:
tv_std = TV_STD_NTSC;
- DRM_INFO("Unknown TV standard; defaulting to NTSC\n");
+ DRM_DEBUG_KMS("Unknown TV standard; defaulting to NTSC\n");
break;
}
}
diff --git a/drivers/gpu/drm/radeon/radeon_clocks.c b/drivers/gpu/drm/radeon/radeon_clocks.c
index 14448a7..5249af8 100644
--- a/drivers/gpu/drm/radeon/radeon_clocks.c
+++ b/drivers/gpu/drm/radeon/radeon_clocks.c
@@ -327,6 +327,14 @@
mpll->max_feedback_div = 0xff;
mpll->best_vco = 0;
+ if (!rdev->clock.default_sclk)
+ rdev->clock.default_sclk = radeon_get_engine_clock(rdev);
+ if ((!rdev->clock.default_mclk) && rdev->asic->get_memory_clock)
+ rdev->clock.default_mclk = radeon_get_memory_clock(rdev);
+
+ rdev->pm.current_sclk = rdev->clock.default_sclk;
+ rdev->pm.current_mclk = rdev->clock.default_mclk;
+
}
/* 10 khz */
@@ -897,53 +905,3 @@
}
}
-static void radeon_apply_clock_quirks(struct radeon_device *rdev)
-{
- uint32_t tmp;
-
- /* XXX make sure engine is idle */
-
- if (rdev->family < CHIP_RS600) {
- tmp = RREG32_PLL(RADEON_SCLK_CNTL);
- if (ASIC_IS_R300(rdev) || ASIC_IS_RV100(rdev))
- tmp |= RADEON_SCLK_FORCE_CP | RADEON_SCLK_FORCE_VIP;
- if ((rdev->family == CHIP_RV250)
- || (rdev->family == CHIP_RV280))
- tmp |=
- RADEON_SCLK_FORCE_DISP1 | RADEON_SCLK_FORCE_DISP2;
- if ((rdev->family == CHIP_RV350)
- || (rdev->family == CHIP_RV380))
- tmp |= R300_SCLK_FORCE_VAP;
- if (rdev->family == CHIP_R420)
- tmp |= R300_SCLK_FORCE_PX | R300_SCLK_FORCE_TX;
- WREG32_PLL(RADEON_SCLK_CNTL, tmp);
- } else if (rdev->family < CHIP_R600) {
- tmp = RREG32_PLL(AVIVO_CP_DYN_CNTL);
- tmp |= AVIVO_CP_FORCEON;
- WREG32_PLL(AVIVO_CP_DYN_CNTL, tmp);
-
- tmp = RREG32_PLL(AVIVO_E2_DYN_CNTL);
- tmp |= AVIVO_E2_FORCEON;
- WREG32_PLL(AVIVO_E2_DYN_CNTL, tmp);
-
- tmp = RREG32_PLL(AVIVO_IDCT_DYN_CNTL);
- tmp |= AVIVO_IDCT_FORCEON;
- WREG32_PLL(AVIVO_IDCT_DYN_CNTL, tmp);
- }
-}
-
-int radeon_static_clocks_init(struct drm_device *dev)
-{
- struct radeon_device *rdev = dev->dev_private;
-
- /* XXX make sure engine is idle */
-
- if (radeon_dynclks != -1) {
- if (radeon_dynclks) {
- if (rdev->asic->set_clock_gating)
- radeon_set_clock_gating(rdev, 1);
- }
- }
- radeon_apply_clock_quirks(rdev);
- return 0;
-}
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 885dcfa..7b7ea26 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -39,8 +39,8 @@
/* from radeon_encoder.c */
extern uint32_t
-radeon_get_encoder_id(struct drm_device *dev, uint32_t supported_device,
- uint8_t dac);
+radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device,
+ uint8_t dac);
extern void radeon_link_encoder_connector(struct drm_device *dev);
/* from radeon_connector.c */
@@ -55,7 +55,7 @@
/* from radeon_legacy_encoder.c */
extern void
-radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id,
+radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum,
uint32_t supported_device);
/* old legacy ATI BIOS routines */
@@ -913,47 +913,47 @@
switch (RBIOS8(tv_info + 7) & 0xf) {
case 1:
tv_std = TV_STD_NTSC;
- DRM_INFO("Default TV standard: NTSC\n");
+ DRM_DEBUG_KMS("Default TV standard: NTSC\n");
break;
case 2:
tv_std = TV_STD_PAL;
- DRM_INFO("Default TV standard: PAL\n");
+ DRM_DEBUG_KMS("Default TV standard: PAL\n");
break;
case 3:
tv_std = TV_STD_PAL_M;
- DRM_INFO("Default TV standard: PAL-M\n");
+ DRM_DEBUG_KMS("Default TV standard: PAL-M\n");
break;
case 4:
tv_std = TV_STD_PAL_60;
- DRM_INFO("Default TV standard: PAL-60\n");
+ DRM_DEBUG_KMS("Default TV standard: PAL-60\n");
break;
case 5:
tv_std = TV_STD_NTSC_J;
- DRM_INFO("Default TV standard: NTSC-J\n");
+ DRM_DEBUG_KMS("Default TV standard: NTSC-J\n");
break;
case 6:
tv_std = TV_STD_SCART_PAL;
- DRM_INFO("Default TV standard: SCART-PAL\n");
+ DRM_DEBUG_KMS("Default TV standard: SCART-PAL\n");
break;
default:
tv_std = TV_STD_NTSC;
- DRM_INFO
+ DRM_DEBUG_KMS
("Unknown TV standard; defaulting to NTSC\n");
break;
}
switch ((RBIOS8(tv_info + 9) >> 2) & 0x3) {
case 0:
- DRM_INFO("29.498928713 MHz TV ref clk\n");
+ DRM_DEBUG_KMS("29.498928713 MHz TV ref clk\n");
break;
case 1:
- DRM_INFO("28.636360000 MHz TV ref clk\n");
+ DRM_DEBUG_KMS("28.636360000 MHz TV ref clk\n");
break;
case 2:
- DRM_INFO("14.318180000 MHz TV ref clk\n");
+ DRM_DEBUG_KMS("14.318180000 MHz TV ref clk\n");
break;
case 3:
- DRM_INFO("27.000000000 MHz TV ref clk\n");
+ DRM_DEBUG_KMS("27.000000000 MHz TV ref clk\n");
break;
default:
break;
@@ -1324,7 +1324,7 @@
if (tmds_info) {
ver = RBIOS8(tmds_info);
- DRM_INFO("DFP table revision: %d\n", ver);
+ DRM_DEBUG_KMS("DFP table revision: %d\n", ver);
if (ver == 3) {
n = RBIOS8(tmds_info + 5) + 1;
if (n > 4)
@@ -1408,7 +1408,7 @@
offset = combios_get_table_offset(dev, COMBIOS_EXT_TMDS_INFO_TABLE);
if (offset) {
ver = RBIOS8(offset);
- DRM_INFO("External TMDS Table revision: %d\n", ver);
+ DRM_DEBUG_KMS("External TMDS Table revision: %d\n", ver);
tmds->slave_addr = RBIOS8(offset + 4 + 2);
tmds->slave_addr >>= 1; /* 7 bit addressing */
gpio = RBIOS8(offset + 4 + 3);
@@ -1485,6 +1485,11 @@
/* PowerMac8,1 ? */
/* imac g5 isight */
rdev->mode_info.connector_table = CT_IMAC_G5_ISIGHT;
+ } else if ((rdev->pdev->device == 0x4a48) &&
+ (rdev->pdev->subsystem_vendor == 0x1002) &&
+ (rdev->pdev->subsystem_device == 0x4a48)) {
+ /* Mac X800 */
+ rdev->mode_info.connector_table = CT_MAC_X800;
} else
#endif /* CONFIG_PPC_PMAC */
#ifdef CONFIG_PPC64
@@ -1505,7 +1510,7 @@
ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_CRT1_SUPPORT,
1),
ATOM_DEVICE_CRT1_SUPPORT);
@@ -1520,7 +1525,7 @@
ddc_i2c = combios_setup_i2c_bus(rdev, DDC_NONE_DETECTED, 0, 0);
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_LCD1_SUPPORT,
0),
ATOM_DEVICE_LCD1_SUPPORT);
@@ -1535,7 +1540,7 @@
ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_CRT1_SUPPORT,
1),
ATOM_DEVICE_CRT1_SUPPORT);
@@ -1550,12 +1555,12 @@
ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
hpd.hpd = RADEON_HPD_1;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_DFP1_SUPPORT,
0),
ATOM_DEVICE_DFP1_SUPPORT);
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_CRT2_SUPPORT,
2),
ATOM_DEVICE_CRT2_SUPPORT);
@@ -1571,7 +1576,7 @@
ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_CRT1_SUPPORT,
1),
ATOM_DEVICE_CRT1_SUPPORT);
@@ -1588,7 +1593,7 @@
ddc_i2c.valid = false;
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_TV1_SUPPORT,
2),
ATOM_DEVICE_TV1_SUPPORT);
@@ -1607,7 +1612,7 @@
ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_LCD1_SUPPORT,
0),
ATOM_DEVICE_LCD1_SUPPORT);
@@ -1619,7 +1624,7 @@
ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_CRT2_SUPPORT,
2),
ATOM_DEVICE_CRT2_SUPPORT);
@@ -1631,7 +1636,7 @@
ddc_i2c.valid = false;
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_TV1_SUPPORT,
2),
ATOM_DEVICE_TV1_SUPPORT);
@@ -1648,7 +1653,7 @@
ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_LCD1_SUPPORT,
0),
ATOM_DEVICE_LCD1_SUPPORT);
@@ -1660,12 +1665,12 @@
ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
hpd.hpd = RADEON_HPD_2; /* ??? */
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_DFP2_SUPPORT,
0),
ATOM_DEVICE_DFP2_SUPPORT);
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_CRT1_SUPPORT,
1),
ATOM_DEVICE_CRT1_SUPPORT);
@@ -1680,7 +1685,7 @@
ddc_i2c.valid = false;
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_TV1_SUPPORT,
2),
ATOM_DEVICE_TV1_SUPPORT);
@@ -1697,7 +1702,7 @@
ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_LCD1_SUPPORT,
0),
ATOM_DEVICE_LCD1_SUPPORT);
@@ -1709,12 +1714,12 @@
ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
hpd.hpd = RADEON_HPD_1; /* ??? */
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_DFP1_SUPPORT,
0),
ATOM_DEVICE_DFP1_SUPPORT);
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_CRT1_SUPPORT,
1),
ATOM_DEVICE_CRT1_SUPPORT);
@@ -1728,7 +1733,7 @@
ddc_i2c.valid = false;
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_TV1_SUPPORT,
2),
ATOM_DEVICE_TV1_SUPPORT);
@@ -1745,7 +1750,7 @@
ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_LCD1_SUPPORT,
0),
ATOM_DEVICE_LCD1_SUPPORT);
@@ -1757,7 +1762,7 @@
ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_CRT1_SUPPORT,
1),
ATOM_DEVICE_CRT1_SUPPORT);
@@ -1769,7 +1774,7 @@
ddc_i2c.valid = false;
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_TV1_SUPPORT,
2),
ATOM_DEVICE_TV1_SUPPORT);
@@ -1786,12 +1791,12 @@
ddc_i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0);
hpd.hpd = RADEON_HPD_2; /* ??? */
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_DFP2_SUPPORT,
0),
ATOM_DEVICE_DFP2_SUPPORT);
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_CRT2_SUPPORT,
2),
ATOM_DEVICE_CRT2_SUPPORT);
@@ -1806,7 +1811,7 @@
ddc_i2c.valid = false;
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_TV1_SUPPORT,
2),
ATOM_DEVICE_TV1_SUPPORT);
@@ -1823,12 +1828,12 @@
ddc_i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0);
hpd.hpd = RADEON_HPD_1; /* ??? */
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_DFP1_SUPPORT,
0),
ATOM_DEVICE_DFP1_SUPPORT);
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_CRT2_SUPPORT,
2),
ATOM_DEVICE_CRT2_SUPPORT);
@@ -1842,7 +1847,7 @@
ddc_i2c.valid = false;
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_TV1_SUPPORT,
2),
ATOM_DEVICE_TV1_SUPPORT);
@@ -1859,7 +1864,7 @@
ddc_i2c = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0);
hpd.hpd = RADEON_HPD_1; /* ??? */
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_DFP1_SUPPORT,
0),
ATOM_DEVICE_DFP1_SUPPORT);
@@ -1871,7 +1876,7 @@
ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_CRT2_SUPPORT,
2),
ATOM_DEVICE_CRT2_SUPPORT);
@@ -1883,7 +1888,7 @@
ddc_i2c.valid = false;
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_TV1_SUPPORT,
2),
ATOM_DEVICE_TV1_SUPPORT);
@@ -1900,7 +1905,7 @@
ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_CRT1_SUPPORT,
1),
ATOM_DEVICE_CRT1_SUPPORT);
@@ -1912,7 +1917,7 @@
ddc_i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0);
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_CRT2_SUPPORT,
2),
ATOM_DEVICE_CRT2_SUPPORT);
@@ -1924,7 +1929,7 @@
ddc_i2c.valid = false;
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_TV1_SUPPORT,
2),
ATOM_DEVICE_TV1_SUPPORT);
@@ -1941,7 +1946,7 @@
ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_CRT1_SUPPORT,
1),
ATOM_DEVICE_CRT1_SUPPORT);
@@ -1952,7 +1957,7 @@
ddc_i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0);
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_CRT2_SUPPORT,
2),
ATOM_DEVICE_CRT2_SUPPORT);
@@ -1961,6 +1966,48 @@
CONNECTOR_OBJECT_ID_VGA,
&hpd);
break;
+ case CT_MAC_X800:
+ DRM_INFO("Connector Table: %d (mac x800)\n",
+ rdev->mode_info.connector_table);
+ /* DVI - primary dac, internal tmds */
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
+ hpd.hpd = RADEON_HPD_1; /* ??? */
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_DFP1_SUPPORT,
+ 0),
+ ATOM_DEVICE_DFP1_SUPPORT);
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_CRT1_SUPPORT,
+ 1),
+ ATOM_DEVICE_CRT1_SUPPORT);
+ radeon_add_legacy_connector(dev, 0,
+ ATOM_DEVICE_DFP1_SUPPORT |
+ ATOM_DEVICE_CRT1_SUPPORT,
+ DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
+ CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
+ &hpd);
+ /* DVI - tv dac, dvo */
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0);
+ hpd.hpd = RADEON_HPD_2; /* ??? */
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_DFP2_SUPPORT,
+ 0),
+ ATOM_DEVICE_DFP2_SUPPORT);
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_CRT2_SUPPORT,
+ 2),
+ ATOM_DEVICE_CRT2_SUPPORT);
+ radeon_add_legacy_connector(dev, 1,
+ ATOM_DEVICE_DFP2_SUPPORT |
+ ATOM_DEVICE_CRT2_SUPPORT,
+ DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
+ CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I,
+ &hpd);
+ break;
default:
DRM_INFO("Connector table: %d (invalid)\n",
rdev->mode_info.connector_table);
@@ -2109,7 +2156,7 @@
else
devices = ATOM_DEVICE_DFP1_SUPPORT;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id
+ radeon_get_encoder_enum
(dev, devices, 0),
devices);
radeon_add_legacy_connector(dev, i, devices,
@@ -2123,7 +2170,7 @@
if (tmp & 0x1) {
devices = ATOM_DEVICE_CRT2_SUPPORT;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id
+ radeon_get_encoder_enum
(dev,
ATOM_DEVICE_CRT2_SUPPORT,
2),
@@ -2131,7 +2178,7 @@
} else {
devices = ATOM_DEVICE_CRT1_SUPPORT;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id
+ radeon_get_encoder_enum
(dev,
ATOM_DEVICE_CRT1_SUPPORT,
1),
@@ -2151,7 +2198,7 @@
if (tmp & 0x1) {
devices |= ATOM_DEVICE_CRT2_SUPPORT;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id
+ radeon_get_encoder_enum
(dev,
ATOM_DEVICE_CRT2_SUPPORT,
2),
@@ -2159,7 +2206,7 @@
} else {
devices |= ATOM_DEVICE_CRT1_SUPPORT;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id
+ radeon_get_encoder_enum
(dev,
ATOM_DEVICE_CRT1_SUPPORT,
1),
@@ -2168,7 +2215,7 @@
if ((tmp >> 4) & 0x1) {
devices |= ATOM_DEVICE_DFP2_SUPPORT;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id
+ radeon_get_encoder_enum
(dev,
ATOM_DEVICE_DFP2_SUPPORT,
0),
@@ -2177,7 +2224,7 @@
} else {
devices |= ATOM_DEVICE_DFP1_SUPPORT;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id
+ radeon_get_encoder_enum
(dev,
ATOM_DEVICE_DFP1_SUPPORT,
0),
@@ -2202,7 +2249,7 @@
connector_object_id = CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I;
}
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id
+ radeon_get_encoder_enum
(dev, devices, 0),
devices);
radeon_add_legacy_connector(dev, i, devices,
@@ -2215,7 +2262,7 @@
case CONNECTOR_CTV_LEGACY:
case CONNECTOR_STV_LEGACY:
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id
+ radeon_get_encoder_enum
(dev,
ATOM_DEVICE_TV1_SUPPORT,
2),
@@ -2242,12 +2289,12 @@
DRM_DEBUG_KMS("Found DFP table, assuming DVI connector\n");
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_CRT1_SUPPORT,
1),
ATOM_DEVICE_CRT1_SUPPORT);
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_DFP1_SUPPORT,
0),
ATOM_DEVICE_DFP1_SUPPORT);
@@ -2268,7 +2315,7 @@
DRM_DEBUG_KMS("Found CRT table, assuming VGA connector\n");
if (crt_info) {
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_CRT1_SUPPORT,
1),
ATOM_DEVICE_CRT1_SUPPORT);
@@ -2297,7 +2344,7 @@
COMBIOS_LCD_DDC_INFO_TABLE);
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id(dev,
+ radeon_get_encoder_enum(dev,
ATOM_DEVICE_LCD1_SUPPORT,
0),
ATOM_DEVICE_LCD1_SUPPORT);
@@ -2351,7 +2398,7 @@
hpd.hpd = RADEON_HPD_NONE;
ddc_i2c.valid = false;
radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id
+ radeon_get_encoder_enum
(dev,
ATOM_DEVICE_TV1_SUPPORT,
2),
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 47c4b27..ecc1a8f 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -481,7 +481,8 @@
return MODE_OK;
}
-static enum drm_connector_status radeon_lvds_detect(struct drm_connector *connector)
+static enum drm_connector_status
+radeon_lvds_detect(struct drm_connector *connector, bool force)
{
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct drm_encoder *encoder = radeon_best_single_encoder(connector);
@@ -594,7 +595,8 @@
return MODE_OK;
}
-static enum drm_connector_status radeon_vga_detect(struct drm_connector *connector)
+static enum drm_connector_status
+radeon_vga_detect(struct drm_connector *connector, bool force)
{
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct drm_encoder *encoder;
@@ -691,7 +693,8 @@
return MODE_OK;
}
-static enum drm_connector_status radeon_tv_detect(struct drm_connector *connector)
+static enum drm_connector_status
+radeon_tv_detect(struct drm_connector *connector, bool force)
{
struct drm_encoder *encoder;
struct drm_encoder_helper_funcs *encoder_funcs;
@@ -748,7 +751,8 @@
* we have to check if this analog encoder is shared with anyone else (TV)
* if its shared we have to set the other connector to disconnected.
*/
-static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connector)
+static enum drm_connector_status
+radeon_dvi_detect(struct drm_connector *connector, bool force)
{
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct drm_encoder *encoder = NULL;
@@ -972,32 +976,35 @@
return ret;
}
-static enum drm_connector_status radeon_dp_detect(struct drm_connector *connector)
+static enum drm_connector_status
+radeon_dp_detect(struct drm_connector *connector, bool force)
{
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
enum drm_connector_status ret = connector_status_disconnected;
struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv;
- u8 sink_type;
if (radeon_connector->edid) {
kfree(radeon_connector->edid);
radeon_connector->edid = NULL;
}
- sink_type = radeon_dp_getsinktype(radeon_connector);
- if ((sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
- (sink_type == CONNECTOR_OBJECT_ID_eDP)) {
- if (radeon_dp_getdpcd(radeon_connector)) {
- radeon_dig_connector->dp_sink_type = sink_type;
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
+ /* eDP is always DP */
+ radeon_dig_connector->dp_sink_type = CONNECTOR_OBJECT_ID_DISPLAYPORT;
+ if (radeon_dp_getdpcd(radeon_connector))
ret = connector_status_connected;
- }
} else {
- if (radeon_ddc_probe(radeon_connector)) {
- radeon_dig_connector->dp_sink_type = sink_type;
- ret = connector_status_connected;
+ radeon_dig_connector->dp_sink_type = radeon_dp_getsinktype(radeon_connector);
+ if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) {
+ if (radeon_dp_getdpcd(radeon_connector))
+ ret = connector_status_connected;
+ } else {
+ if (radeon_ddc_probe(radeon_connector))
+ ret = connector_status_connected;
}
}
+ radeon_connector_update_scratch_regs(connector, ret);
return ret;
}
@@ -1037,7 +1044,6 @@
uint32_t supported_device,
int connector_type,
struct radeon_i2c_bus_rec *i2c_bus,
- bool linkb,
uint32_t igp_lane_info,
uint16_t connector_object_id,
struct radeon_hpd *hpd,
@@ -1050,10 +1056,16 @@
uint32_t subpixel_order = SubPixelNone;
bool shared_ddc = false;
- /* fixme - tv/cv/din */
if (connector_type == DRM_MODE_CONNECTOR_Unknown)
return;
+ /* if the user selected tv=0 don't try and add the connector */
+ if (((connector_type == DRM_MODE_CONNECTOR_SVIDEO) ||
+ (connector_type == DRM_MODE_CONNECTOR_Composite) ||
+ (connector_type == DRM_MODE_CONNECTOR_9PinDIN)) &&
+ (radeon_tv == 0))
+ return;
+
/* see if we already added it */
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
radeon_connector = to_radeon_connector(connector);
@@ -1128,7 +1140,6 @@
radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
if (!radeon_dig_connector)
goto failed;
- radeon_dig_connector->linkb = linkb;
radeon_dig_connector->igp_lane_info = igp_lane_info;
radeon_connector->con_priv = radeon_dig_connector;
drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
@@ -1158,7 +1169,6 @@
radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
if (!radeon_dig_connector)
goto failed;
- radeon_dig_connector->linkb = linkb;
radeon_dig_connector->igp_lane_info = igp_lane_info;
radeon_connector->con_priv = radeon_dig_connector;
drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
@@ -1182,7 +1192,6 @@
radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
if (!radeon_dig_connector)
goto failed;
- radeon_dig_connector->linkb = linkb;
radeon_dig_connector->igp_lane_info = igp_lane_info;
radeon_connector->con_priv = radeon_dig_connector;
drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type);
@@ -1211,25 +1220,22 @@
case DRM_MODE_CONNECTOR_SVIDEO:
case DRM_MODE_CONNECTOR_Composite:
case DRM_MODE_CONNECTOR_9PinDIN:
- if (radeon_tv == 1) {
- drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type);
- drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
- radeon_connector->dac_load_detect = true;
- drm_connector_attach_property(&radeon_connector->base,
- rdev->mode_info.load_detect_property,
- 1);
- drm_connector_attach_property(&radeon_connector->base,
- rdev->mode_info.tv_std_property,
- radeon_atombios_get_tv_info(rdev));
- /* no HPD on analog connectors */
- radeon_connector->hpd.hpd = RADEON_HPD_NONE;
- }
+ drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type);
+ drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
+ radeon_connector->dac_load_detect = true;
+ drm_connector_attach_property(&radeon_connector->base,
+ rdev->mode_info.load_detect_property,
+ 1);
+ drm_connector_attach_property(&radeon_connector->base,
+ rdev->mode_info.tv_std_property,
+ radeon_atombios_get_tv_info(rdev));
+ /* no HPD on analog connectors */
+ radeon_connector->hpd.hpd = RADEON_HPD_NONE;
break;
case DRM_MODE_CONNECTOR_LVDS:
radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
if (!radeon_dig_connector)
goto failed;
- radeon_dig_connector->linkb = linkb;
radeon_dig_connector->igp_lane_info = igp_lane_info;
radeon_connector->con_priv = radeon_dig_connector;
drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type);
@@ -1275,10 +1281,16 @@
struct radeon_connector *radeon_connector;
uint32_t subpixel_order = SubPixelNone;
- /* fixme - tv/cv/din */
if (connector_type == DRM_MODE_CONNECTOR_Unknown)
return;
+ /* if the user selected tv=0 don't try and add the connector */
+ if (((connector_type == DRM_MODE_CONNECTOR_SVIDEO) ||
+ (connector_type == DRM_MODE_CONNECTOR_Composite) ||
+ (connector_type == DRM_MODE_CONNECTOR_9PinDIN)) &&
+ (radeon_tv == 0))
+ return;
+
/* see if we already added it */
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
radeon_connector = to_radeon_connector(connector);
@@ -1350,26 +1362,24 @@
case DRM_MODE_CONNECTOR_SVIDEO:
case DRM_MODE_CONNECTOR_Composite:
case DRM_MODE_CONNECTOR_9PinDIN:
- if (radeon_tv == 1) {
- drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type);
- drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
- radeon_connector->dac_load_detect = true;
- /* RS400,RC410,RS480 chipset seems to report a lot
- * of false positive on load detect, we haven't yet
- * found a way to make load detect reliable on those
- * chipset, thus just disable it for TV.
- */
- if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480)
- radeon_connector->dac_load_detect = false;
- drm_connector_attach_property(&radeon_connector->base,
- rdev->mode_info.load_detect_property,
- radeon_connector->dac_load_detect);
- drm_connector_attach_property(&radeon_connector->base,
- rdev->mode_info.tv_std_property,
- radeon_combios_get_tv_info(rdev));
- /* no HPD on analog connectors */
- radeon_connector->hpd.hpd = RADEON_HPD_NONE;
- }
+ drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type);
+ drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
+ radeon_connector->dac_load_detect = true;
+ /* RS400,RC410,RS480 chipset seems to report a lot
+ * of false positive on load detect, we haven't yet
+ * found a way to make load detect reliable on those
+ * chipset, thus just disable it for TV.
+ */
+ if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480)
+ radeon_connector->dac_load_detect = false;
+ drm_connector_attach_property(&radeon_connector->base,
+ rdev->mode_info.load_detect_property,
+ radeon_connector->dac_load_detect);
+ drm_connector_attach_property(&radeon_connector->base,
+ rdev->mode_info.tv_std_property,
+ radeon_combios_get_tv_info(rdev));
+ /* no HPD on analog connectors */
+ radeon_connector->hpd.hpd = RADEON_HPD_NONE;
break;
case DRM_MODE_CONNECTOR_LVDS:
drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type);
diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c
index 5731fc9..3eef567 100644
--- a/drivers/gpu/drm/radeon/radeon_cursor.c
+++ b/drivers/gpu/drm/radeon/radeon_cursor.c
@@ -203,6 +203,7 @@
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct radeon_device *rdev = crtc->dev->dev_private;
int xorigin = 0, yorigin = 0;
+ int w = radeon_crtc->cursor_width;
if (x < 0)
xorigin = -x + 1;
@@ -213,22 +214,7 @@
if (yorigin >= CURSOR_HEIGHT)
yorigin = CURSOR_HEIGHT - 1;
- radeon_lock_cursor(crtc, true);
- if (ASIC_IS_DCE4(rdev)) {
- /* cursors are offset into the total surface */
- x += crtc->x;
- y += crtc->y;
- DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
-
- /* XXX: check if evergreen has the same issues as avivo chips */
- WREG32(EVERGREEN_CUR_POSITION + radeon_crtc->crtc_offset,
- ((xorigin ? 0 : x) << 16) |
- (yorigin ? 0 : y));
- WREG32(EVERGREEN_CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin);
- WREG32(EVERGREEN_CUR_SIZE + radeon_crtc->crtc_offset,
- ((radeon_crtc->cursor_width - 1) << 16) | (radeon_crtc->cursor_height - 1));
- } else if (ASIC_IS_AVIVO(rdev)) {
- int w = radeon_crtc->cursor_width;
+ if (ASIC_IS_AVIVO(rdev)) {
int i = 0;
struct drm_crtc *crtc_p;
@@ -260,7 +246,17 @@
if (w <= 0)
w = 1;
}
+ }
+ radeon_lock_cursor(crtc, true);
+ if (ASIC_IS_DCE4(rdev)) {
+ WREG32(EVERGREEN_CUR_POSITION + radeon_crtc->crtc_offset,
+ ((xorigin ? 0 : x) << 16) |
+ (yorigin ? 0 : y));
+ WREG32(EVERGREEN_CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin);
+ WREG32(EVERGREEN_CUR_SIZE + radeon_crtc->crtc_offset,
+ ((w - 1) << 16) | (radeon_crtc->cursor_height - 1));
+ } else if (ASIC_IS_AVIVO(rdev)) {
WREG32(AVIVO_D1CUR_POSITION + radeon_crtc->crtc_offset,
((xorigin ? 0 : x) << 16) |
(yorigin ? 0 : y));
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 4f7a170..256d204 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -199,7 +199,7 @@
mc->mc_vram_size = mc->aper_size;
}
mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
- if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_end <= mc->gtt_end) {
+ if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) {
dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
mc->real_vram_size = mc->aper_size;
mc->mc_vram_size = mc->aper_size;
@@ -293,30 +293,20 @@
void radeon_update_bandwidth_info(struct radeon_device *rdev)
{
fixed20_12 a;
- u32 sclk, mclk;
+ u32 sclk = rdev->pm.current_sclk;
+ u32 mclk = rdev->pm.current_mclk;
+
+ /* sclk/mclk in Mhz */
+ a.full = dfixed_const(100);
+ rdev->pm.sclk.full = dfixed_const(sclk);
+ rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
+ rdev->pm.mclk.full = dfixed_const(mclk);
+ rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
if (rdev->flags & RADEON_IS_IGP) {
- sclk = radeon_get_engine_clock(rdev);
- mclk = rdev->clock.default_mclk;
-
- a.full = dfixed_const(100);
- rdev->pm.sclk.full = dfixed_const(sclk);
- rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
- rdev->pm.mclk.full = dfixed_const(mclk);
- rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
-
a.full = dfixed_const(16);
/* core_bandwidth = sclk(Mhz) * 16 */
rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a);
- } else {
- sclk = radeon_get_engine_clock(rdev);
- mclk = radeon_get_memory_clock(rdev);
-
- a.full = dfixed_const(100);
- rdev->pm.sclk.full = dfixed_const(sclk);
- rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
- rdev->pm.mclk.full = dfixed_const(mclk);
- rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
}
}
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 5764f4d..b92d2f2 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -349,6 +349,8 @@
DRM_INFO(" DFP4: %s\n", encoder_names[radeon_encoder->encoder_id]);
if (devices & ATOM_DEVICE_DFP5_SUPPORT)
DRM_INFO(" DFP5: %s\n", encoder_names[radeon_encoder->encoder_id]);
+ if (devices & ATOM_DEVICE_DFP6_SUPPORT)
+ DRM_INFO(" DFP6: %s\n", encoder_names[radeon_encoder->encoder_id]);
if (devices & ATOM_DEVICE_TV1_SUPPORT)
DRM_INFO(" TV1: %s\n", encoder_names[radeon_encoder->encoder_id]);
if (devices & ATOM_DEVICE_CV_SUPPORT)
@@ -841,8 +843,9 @@
{
struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb);
- if (radeon_fb->obj)
+ if (radeon_fb->obj) {
drm_gem_object_unreference_unlocked(radeon_fb->obj);
+ }
drm_framebuffer_cleanup(fb);
kfree(radeon_fb);
}
@@ -1094,6 +1097,18 @@
radeon_i2c_fini(rdev);
}
+static bool is_hdtv_mode(struct drm_display_mode *mode)
+{
+ /* try and guess if this is a tv or a monitor */
+ if ((mode->vdisplay == 480 && mode->hdisplay == 720) || /* 480p */
+ (mode->vdisplay == 576) || /* 576p */
+ (mode->vdisplay == 720) || /* 720p */
+ (mode->vdisplay == 1080)) /* 1080p */
+ return true;
+ else
+ return false;
+}
+
bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -1128,20 +1143,22 @@
radeon_crtc->rmx_type = radeon_encoder->rmx_type;
else
radeon_crtc->rmx_type = RMX_OFF;
- src_v = crtc->mode.vdisplay;
- dst_v = radeon_crtc->native_mode.vdisplay;
- src_h = crtc->mode.hdisplay;
- dst_h = radeon_crtc->native_mode.vdisplay;
/* copy native mode */
memcpy(&radeon_crtc->native_mode,
&radeon_encoder->native_mode,
sizeof(struct drm_display_mode));
+ src_v = crtc->mode.vdisplay;
+ dst_v = radeon_crtc->native_mode.vdisplay;
+ src_h = crtc->mode.hdisplay;
+ dst_h = radeon_crtc->native_mode.hdisplay;
/* fix up for overscan on hdmi */
if (ASIC_IS_AVIVO(rdev) &&
+ (!(mode->flags & DRM_MODE_FLAG_INTERLACE)) &&
((radeon_encoder->underscan_type == UNDERSCAN_ON) ||
((radeon_encoder->underscan_type == UNDERSCAN_AUTO) &&
- drm_detect_hdmi_monitor(radeon_connector->edid)))) {
+ drm_detect_hdmi_monitor(radeon_connector->edid) &&
+ is_hdtv_mode(mode)))) {
radeon_crtc->h_border = (mode->hdisplay >> 5) + 16;
radeon_crtc->v_border = (mode->vdisplay >> 5) + 16;
radeon_crtc->rmx_type = RMX_FULL;
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index 263c809..2c293e8 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -81,7 +81,7 @@
}
uint32_t
-radeon_get_encoder_id(struct drm_device *dev, uint32_t supported_device, uint8_t dac)
+radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device, uint8_t dac)
{
struct radeon_device *rdev = dev->dev_private;
uint32_t ret = 0;
@@ -97,59 +97,59 @@
if ((rdev->family == CHIP_RS300) ||
(rdev->family == CHIP_RS400) ||
(rdev->family == CHIP_RS480))
- ret = ENCODER_OBJECT_ID_INTERNAL_DAC2;
+ ret = ENCODER_INTERNAL_DAC2_ENUM_ID1;
else if (ASIC_IS_AVIVO(rdev))
- ret = ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1;
+ ret = ENCODER_INTERNAL_KLDSCP_DAC1_ENUM_ID1;
else
- ret = ENCODER_OBJECT_ID_INTERNAL_DAC1;
+ ret = ENCODER_INTERNAL_DAC1_ENUM_ID1;
break;
case 2: /* dac b */
if (ASIC_IS_AVIVO(rdev))
- ret = ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2;
+ ret = ENCODER_INTERNAL_KLDSCP_DAC2_ENUM_ID1;
else {
/*if (rdev->family == CHIP_R200)
- ret = ENCODER_OBJECT_ID_INTERNAL_DVO1;
+ ret = ENCODER_INTERNAL_DVO1_ENUM_ID1;
else*/
- ret = ENCODER_OBJECT_ID_INTERNAL_DAC2;
+ ret = ENCODER_INTERNAL_DAC2_ENUM_ID1;
}
break;
case 3: /* external dac */
if (ASIC_IS_AVIVO(rdev))
- ret = ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1;
+ ret = ENCODER_INTERNAL_KLDSCP_DVO1_ENUM_ID1;
else
- ret = ENCODER_OBJECT_ID_INTERNAL_DVO1;
+ ret = ENCODER_INTERNAL_DVO1_ENUM_ID1;
break;
}
break;
case ATOM_DEVICE_LCD1_SUPPORT:
if (ASIC_IS_AVIVO(rdev))
- ret = ENCODER_OBJECT_ID_INTERNAL_LVTM1;
+ ret = ENCODER_INTERNAL_LVTM1_ENUM_ID1;
else
- ret = ENCODER_OBJECT_ID_INTERNAL_LVDS;
+ ret = ENCODER_INTERNAL_LVDS_ENUM_ID1;
break;
case ATOM_DEVICE_DFP1_SUPPORT:
if ((rdev->family == CHIP_RS300) ||
(rdev->family == CHIP_RS400) ||
(rdev->family == CHIP_RS480))
- ret = ENCODER_OBJECT_ID_INTERNAL_DVO1;
+ ret = ENCODER_INTERNAL_DVO1_ENUM_ID1;
else if (ASIC_IS_AVIVO(rdev))
- ret = ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1;
+ ret = ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID1;
else
- ret = ENCODER_OBJECT_ID_INTERNAL_TMDS1;
+ ret = ENCODER_INTERNAL_TMDS1_ENUM_ID1;
break;
case ATOM_DEVICE_LCD2_SUPPORT:
case ATOM_DEVICE_DFP2_SUPPORT:
if ((rdev->family == CHIP_RS600) ||
(rdev->family == CHIP_RS690) ||
(rdev->family == CHIP_RS740))
- ret = ENCODER_OBJECT_ID_INTERNAL_DDI;
+ ret = ENCODER_INTERNAL_DDI_ENUM_ID1;
else if (ASIC_IS_AVIVO(rdev))
- ret = ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1;
+ ret = ENCODER_INTERNAL_KLDSCP_DVO1_ENUM_ID1;
else
- ret = ENCODER_OBJECT_ID_INTERNAL_DVO1;
+ ret = ENCODER_INTERNAL_DVO1_ENUM_ID1;
break;
case ATOM_DEVICE_DFP3_SUPPORT:
- ret = ENCODER_OBJECT_ID_INTERNAL_LVTM1;
+ ret = ENCODER_INTERNAL_LVTM1_ENUM_ID1;
break;
}
@@ -228,32 +228,6 @@
return NULL;
}
-static struct radeon_connector_atom_dig *
-radeon_get_atom_connector_priv_from_encoder(struct drm_encoder *encoder)
-{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
- struct drm_connector *connector;
- struct radeon_connector *radeon_connector;
- struct radeon_connector_atom_dig *dig_connector;
-
- if (!rdev->is_atom_bios)
- return NULL;
-
- connector = radeon_get_connector_for_encoder(encoder);
- if (!connector)
- return NULL;
-
- radeon_connector = to_radeon_connector(connector);
-
- if (!radeon_connector->con_priv)
- return NULL;
-
- dig_connector = radeon_connector->con_priv;
-
- return dig_connector;
-}
-
void radeon_panel_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode)
{
@@ -512,14 +486,12 @@
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
- struct radeon_connector_atom_dig *dig_connector =
- radeon_get_atom_connector_priv_from_encoder(encoder);
union lvds_encoder_control args;
int index = 0;
int hdmi_detected = 0;
uint8_t frev, crev;
- if (!dig || !dig_connector)
+ if (!dig)
return;
if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
@@ -562,7 +534,7 @@
if (dig->lvds_misc & ATOM_PANEL_MISC_888RGB)
args.v1.ucMisc |= (1 << 1);
} else {
- if (dig_connector->linkb)
+ if (dig->linkb)
args.v1.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB;
if (radeon_encoder->pixel_clock > 165000)
args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL;
@@ -601,7 +573,7 @@
args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_LEVEL_4;
}
} else {
- if (dig_connector->linkb)
+ if (dig->linkb)
args.v2.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB;
if (radeon_encoder->pixel_clock > 165000)
args.v2.ucMisc |= PANEL_ENCODER_MISC_DUAL;
@@ -623,6 +595,8 @@
int
atombios_get_encoder_mode(struct drm_encoder *encoder)
{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
struct drm_connector *connector;
struct radeon_connector *radeon_connector;
struct radeon_connector_atom_dig *dig_connector;
@@ -636,9 +610,13 @@
switch (connector->connector_type) {
case DRM_MODE_CONNECTOR_DVII:
case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */
- if (drm_detect_hdmi_monitor(radeon_connector->edid))
- return ATOM_ENCODER_MODE_HDMI;
- else if (radeon_connector->use_digital)
+ if (drm_detect_hdmi_monitor(radeon_connector->edid)) {
+ /* fix me */
+ if (ASIC_IS_DCE4(rdev))
+ return ATOM_ENCODER_MODE_DVI;
+ else
+ return ATOM_ENCODER_MODE_HDMI;
+ } else if (radeon_connector->use_digital)
return ATOM_ENCODER_MODE_DVI;
else
return ATOM_ENCODER_MODE_CRT;
@@ -646,9 +624,13 @@
case DRM_MODE_CONNECTOR_DVID:
case DRM_MODE_CONNECTOR_HDMIA:
default:
- if (drm_detect_hdmi_monitor(radeon_connector->edid))
- return ATOM_ENCODER_MODE_HDMI;
- else
+ if (drm_detect_hdmi_monitor(radeon_connector->edid)) {
+ /* fix me */
+ if (ASIC_IS_DCE4(rdev))
+ return ATOM_ENCODER_MODE_DVI;
+ else
+ return ATOM_ENCODER_MODE_HDMI;
+ } else
return ATOM_ENCODER_MODE_DVI;
break;
case DRM_MODE_CONNECTOR_LVDS:
@@ -660,9 +642,13 @@
if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
(dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
return ATOM_ENCODER_MODE_DP;
- else if (drm_detect_hdmi_monitor(radeon_connector->edid))
- return ATOM_ENCODER_MODE_HDMI;
- else
+ else if (drm_detect_hdmi_monitor(radeon_connector->edid)) {
+ /* fix me */
+ if (ASIC_IS_DCE4(rdev))
+ return ATOM_ENCODER_MODE_DVI;
+ else
+ return ATOM_ENCODER_MODE_HDMI;
+ } else
return ATOM_ENCODER_MODE_DVI;
break;
case DRM_MODE_CONNECTOR_DVIA:
@@ -729,13 +715,24 @@
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
- struct radeon_connector_atom_dig *dig_connector =
- radeon_get_atom_connector_priv_from_encoder(encoder);
+ struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
union dig_encoder_control args;
int index = 0;
uint8_t frev, crev;
+ int dp_clock = 0;
+ int dp_lane_count = 0;
- if (!dig || !dig_connector)
+ if (connector) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ struct radeon_connector_atom_dig *dig_connector =
+ radeon_connector->con_priv;
+
+ dp_clock = dig_connector->dp_clock;
+ dp_lane_count = dig_connector->dp_lane_count;
+ }
+
+ /* no dig encoder assigned */
+ if (dig->dig_encoder == -1)
return;
memset(&args, 0, sizeof(args));
@@ -757,9 +754,9 @@
args.v1.ucEncoderMode = atombios_get_encoder_mode(encoder);
if (args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) {
- if (dig_connector->dp_clock == 270000)
+ if (dp_clock == 270000)
args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
- args.v1.ucLaneNum = dig_connector->dp_lane_count;
+ args.v1.ucLaneNum = dp_lane_count;
} else if (radeon_encoder->pixel_clock > 165000)
args.v1.ucLaneNum = 8;
else
@@ -781,7 +778,7 @@
args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER3;
break;
}
- if (dig_connector->linkb)
+ if (dig->linkb)
args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKB;
else
args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKA;
@@ -804,38 +801,47 @@
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
- struct radeon_connector_atom_dig *dig_connector =
- radeon_get_atom_connector_priv_from_encoder(encoder);
- struct drm_connector *connector;
- struct radeon_connector *radeon_connector;
+ struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
union dig_transmitter_control args;
int index = 0;
uint8_t frev, crev;
bool is_dp = false;
int pll_id = 0;
+ int dp_clock = 0;
+ int dp_lane_count = 0;
+ int connector_object_id = 0;
+ int igp_lane_info = 0;
- if (!dig || !dig_connector)
+ if (connector) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ struct radeon_connector_atom_dig *dig_connector =
+ radeon_connector->con_priv;
+
+ dp_clock = dig_connector->dp_clock;
+ dp_lane_count = dig_connector->dp_lane_count;
+ connector_object_id =
+ (radeon_connector->connector_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
+ igp_lane_info = dig_connector->igp_lane_info;
+ }
+
+ /* no dig encoder assigned */
+ if (dig->dig_encoder == -1)
return;
- connector = radeon_get_connector_for_encoder(encoder);
- radeon_connector = to_radeon_connector(connector);
-
if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP)
is_dp = true;
memset(&args, 0, sizeof(args));
- if (ASIC_IS_DCE32(rdev) || ASIC_IS_DCE4(rdev))
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl);
- else {
- switch (radeon_encoder->encoder_id) {
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
- index = GetIndexIntoMasterTable(COMMAND, DIG1TransmitterControl);
- break;
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
- index = GetIndexIntoMasterTable(COMMAND, DIG2TransmitterControl);
- break;
- }
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+ index = GetIndexIntoMasterTable(COMMAND, LVTMATransmitterControl);
+ break;
}
if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
@@ -843,14 +849,14 @@
args.v1.ucAction = action;
if (action == ATOM_TRANSMITTER_ACTION_INIT) {
- args.v1.usInitInfo = radeon_connector->connector_object_id;
+ args.v1.usInitInfo = connector_object_id;
} else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) {
args.v1.asMode.ucLaneSel = lane_num;
args.v1.asMode.ucLaneSet = lane_set;
} else {
if (is_dp)
args.v1.usPixelClock =
- cpu_to_le16(dig_connector->dp_clock / 10);
+ cpu_to_le16(dp_clock / 10);
else if (radeon_encoder->pixel_clock > 165000)
args.v1.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
else
@@ -858,13 +864,13 @@
}
if (ASIC_IS_DCE4(rdev)) {
if (is_dp)
- args.v3.ucLaneNum = dig_connector->dp_lane_count;
+ args.v3.ucLaneNum = dp_lane_count;
else if (radeon_encoder->pixel_clock > 165000)
args.v3.ucLaneNum = 8;
else
args.v3.ucLaneNum = 4;
- if (dig_connector->linkb) {
+ if (dig->linkb) {
args.v3.acConfig.ucLinkSel = 1;
args.v3.acConfig.ucEncoderSel = 1;
}
@@ -904,7 +910,7 @@
}
} else if (ASIC_IS_DCE32(rdev)) {
args.v2.acConfig.ucEncoderSel = dig->dig_encoder;
- if (dig_connector->linkb)
+ if (dig->linkb)
args.v2.acConfig.ucLinkSel = 1;
switch (radeon_encoder->encoder_id) {
@@ -938,23 +944,23 @@
if ((rdev->flags & RADEON_IS_IGP) &&
(radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_UNIPHY)) {
if (is_dp || (radeon_encoder->pixel_clock <= 165000)) {
- if (dig_connector->igp_lane_info & 0x1)
+ if (igp_lane_info & 0x1)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_3;
- else if (dig_connector->igp_lane_info & 0x2)
+ else if (igp_lane_info & 0x2)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_4_7;
- else if (dig_connector->igp_lane_info & 0x4)
+ else if (igp_lane_info & 0x4)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_11;
- else if (dig_connector->igp_lane_info & 0x8)
+ else if (igp_lane_info & 0x8)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_12_15;
} else {
- if (dig_connector->igp_lane_info & 0x3)
+ if (igp_lane_info & 0x3)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_7;
- else if (dig_connector->igp_lane_info & 0xc)
+ else if (igp_lane_info & 0xc)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_15;
}
}
- if (dig_connector->linkb)
+ if (dig->linkb)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB;
else
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA;
@@ -1072,8 +1078,7 @@
if (is_dig) {
switch (mode) {
case DRM_MODE_DPMS_ON:
- if (!ASIC_IS_DCE4(rdev))
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) {
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
@@ -1085,8 +1090,7 @@
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
- if (!ASIC_IS_DCE4(rdev))
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0);
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0);
if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) {
if (ASIC_IS_DCE4(rdev))
atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF);
@@ -1290,24 +1294,22 @@
uint32_t dig_enc_in_use = 0;
if (ASIC_IS_DCE4(rdev)) {
- struct radeon_connector_atom_dig *dig_connector =
- radeon_get_atom_connector_priv_from_encoder(encoder);
-
+ dig = radeon_encoder->enc_priv;
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
- if (dig_connector->linkb)
+ if (dig->linkb)
return 1;
else
return 0;
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
- if (dig_connector->linkb)
+ if (dig->linkb)
return 3;
else
return 2;
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
- if (dig_connector->linkb)
+ if (dig->linkb)
return 5;
else
return 4;
@@ -1641,6 +1643,7 @@
struct radeon_encoder_atom_dig *
radeon_atombios_set_dig_info(struct radeon_encoder *radeon_encoder)
{
+ int encoder_enum = (radeon_encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT;
struct radeon_encoder_atom_dig *dig = kzalloc(sizeof(struct radeon_encoder_atom_dig), GFP_KERNEL);
if (!dig)
@@ -1650,11 +1653,16 @@
dig->coherent_mode = true;
dig->dig_encoder = -1;
+ if (encoder_enum == 2)
+ dig->linkb = true;
+ else
+ dig->linkb = false;
+
return dig;
}
void
-radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t supported_device)
+radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_t supported_device)
{
struct radeon_device *rdev = dev->dev_private;
struct drm_encoder *encoder;
@@ -1663,7 +1671,7 @@
/* see if we already added it */
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
radeon_encoder = to_radeon_encoder(encoder);
- if (radeon_encoder->encoder_id == encoder_id) {
+ if (radeon_encoder->encoder_enum == encoder_enum) {
radeon_encoder->devices |= supported_device;
return;
}
@@ -1691,7 +1699,8 @@
radeon_encoder->enc_priv = NULL;
- radeon_encoder->encoder_id = encoder_id;
+ radeon_encoder->encoder_enum = encoder_enum;
+ radeon_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
radeon_encoder->devices = supported_device;
radeon_encoder->rmx_type = RMX_OFF;
radeon_encoder->underscan_type = UNDERSCAN_OFF;
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index dbf8696..40b0c08 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -94,6 +94,7 @@
ret = radeon_bo_reserve(rbo, false);
if (likely(ret == 0)) {
radeon_bo_kunmap(rbo);
+ radeon_bo_unpin(rbo);
radeon_bo_unreserve(rbo);
}
drm_gem_object_unreference_unlocked(gobj);
@@ -118,7 +119,7 @@
aligned_size = ALIGN(size, PAGE_SIZE);
ret = radeon_gem_object_create(rdev, aligned_size, 0,
RADEON_GEM_DOMAIN_VRAM,
- false, ttm_bo_type_kernel,
+ false, true,
&gobj);
if (ret) {
printk(KERN_ERR "failed to allocate framebuffer (%d)\n",
@@ -325,8 +326,6 @@
{
struct fb_info *info;
struct radeon_framebuffer *rfb = &rfbdev->rfb;
- struct radeon_bo *rbo;
- int r;
if (rfbdev->helper.fbdev) {
info = rfbdev->helper.fbdev;
@@ -338,14 +337,8 @@
}
if (rfb->obj) {
- rbo = rfb->obj->driver_private;
- r = radeon_bo_reserve(rbo, false);
- if (likely(r == 0)) {
- radeon_bo_kunmap(rbo);
- radeon_bo_unpin(rbo);
- radeon_bo_unreserve(rbo);
- }
- drm_gem_object_unreference_unlocked(rfb->obj);
+ radeonfb_destroy_pinned_object(rfb->obj);
+ rfb->obj = NULL;
}
drm_fb_helper_fini(&rfbdev->helper);
drm_framebuffer_cleanup(&rfb->base);
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index c578f26..d1e595d 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -201,11 +201,11 @@
return r;
}
r = drm_gem_handle_create(filp, gobj, &handle);
+ /* drop reference from allocate - handle holds it now */
+ drm_gem_object_unreference_unlocked(gobj);
if (r) {
- drm_gem_object_unreference_unlocked(gobj);
return r;
}
- drm_gem_object_handle_unreference_unlocked(gobj);
args->handle = handle;
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
index bfd2ce5..6a13ee3 100644
--- a/drivers/gpu/drm/radeon/radeon_i2c.c
+++ b/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -99,6 +99,13 @@
}
}
+ /* switch the pads to ddc mode */
+ if (ASIC_IS_DCE3(rdev) && rec->hw_capable) {
+ temp = RREG32(rec->mask_clk_reg);
+ temp &= ~(1 << 16);
+ WREG32(rec->mask_clk_reg, temp);
+ }
+
/* clear the output pin values */
temp = RREG32(rec->a_clk_reg) & ~rec->a_clk_mask;
WREG32(rec->a_clk_reg, temp);
@@ -206,7 +213,7 @@
static u32 radeon_get_i2c_prescale(struct radeon_device *rdev)
{
- u32 sclk = radeon_get_engine_clock(rdev);
+ u32 sclk = rdev->pm.current_sclk;
u32 prescale = 0;
u32 nm;
u8 n, m, loop;
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index 059bfa4..a108c7e 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -121,11 +121,12 @@
* chips. Disable MSI on them for now.
*/
if ((rdev->family >= CHIP_RV380) &&
- (!(rdev->flags & RADEON_IS_IGP))) {
+ (!(rdev->flags & RADEON_IS_IGP)) &&
+ (!(rdev->flags & RADEON_IS_AGP))) {
int ret = pci_enable_msi(rdev->pdev);
if (!ret) {
rdev->msi_enabled = 1;
- DRM_INFO("radeon: using MSI.\n");
+ dev_info(rdev->dev, "radeon: using MSI.\n");
}
}
rdev->irq.installed = true;
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index b1c8ace..8fbbe1c 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -161,6 +161,7 @@
DRM_DEBUG_KMS("tiling config is r6xx+ only!\n");
return -EINVAL;
}
+ break;
case RADEON_INFO_WANT_HYPERZ:
/* The "value" here is both an input and output parameter.
* If the input value is 1, filp requests hyper-z access.
@@ -202,6 +203,10 @@
*/
int radeon_driver_firstopen_kms(struct drm_device *dev)
{
+ struct radeon_device *rdev = dev->dev_private;
+
+ if (rdev->powered_down)
+ return -EINVAL;
return 0;
}
@@ -323,45 +328,45 @@
struct drm_ioctl_desc radeon_ioctls_kms[] = {
- DRM_IOCTL_DEF(DRM_RADEON_CP_INIT, radeon_cp_init_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_RADEON_CP_START, radeon_cp_start_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_RADEON_CP_STOP, radeon_cp_stop_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_RADEON_CP_RESET, radeon_cp_reset_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_RADEON_CP_IDLE, radeon_cp_idle_kms, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_CP_RESUME, radeon_cp_resume_kms, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_RESET, radeon_engine_reset_kms, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_FULLSCREEN, radeon_fullscreen_kms, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_SWAP, radeon_cp_swap_kms, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_CLEAR, radeon_cp_clear_kms, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_VERTEX, radeon_cp_vertex_kms, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_INDICES, radeon_cp_indices_kms, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_TEXTURE, radeon_cp_texture_kms, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_STIPPLE, radeon_cp_stipple_kms, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_INDIRECT, radeon_cp_indirect_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_RADEON_VERTEX2, radeon_cp_vertex2_kms, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_CMDBUF, radeon_cp_cmdbuf_kms, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_GETPARAM, radeon_cp_getparam_kms, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_FLIP, radeon_cp_flip_kms, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_ALLOC, radeon_mem_alloc_kms, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_FREE, radeon_mem_free_kms, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_INIT_HEAP, radeon_mem_init_heap_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_RADEON_IRQ_EMIT, radeon_irq_emit_kms, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_IRQ_WAIT, radeon_irq_wait_kms, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_SETPARAM, radeon_cp_setparam_kms, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_SURF_ALLOC, radeon_surface_alloc_kms, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_SURF_FREE, radeon_surface_free_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_CP_INIT, radeon_cp_init_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(RADEON_CP_START, radeon_cp_start_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(RADEON_CP_STOP, radeon_cp_stop_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(RADEON_CP_RESET, radeon_cp_reset_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(RADEON_CP_IDLE, radeon_cp_idle_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_CP_RESUME, radeon_cp_resume_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_RESET, radeon_engine_reset_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_FULLSCREEN, radeon_fullscreen_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_SWAP, radeon_cp_swap_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_CLEAR, radeon_cp_clear_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_VERTEX, radeon_cp_vertex_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_INDICES, radeon_cp_indices_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_TEXTURE, radeon_cp_texture_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_STIPPLE, radeon_cp_stipple_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_INDIRECT, radeon_cp_indirect_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(RADEON_VERTEX2, radeon_cp_vertex2_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_CMDBUF, radeon_cp_cmdbuf_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_GETPARAM, radeon_cp_getparam_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_FLIP, radeon_cp_flip_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_ALLOC, radeon_mem_alloc_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_FREE, radeon_mem_free_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_INIT_HEAP, radeon_mem_init_heap_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(RADEON_IRQ_EMIT, radeon_irq_emit_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_IRQ_WAIT, radeon_irq_wait_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_SETPARAM, radeon_cp_setparam_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_SURF_ALLOC, radeon_surface_alloc_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_SURF_FREE, radeon_surface_free_kms, DRM_AUTH),
/* KMS */
- DRM_IOCTL_DEF(DRM_RADEON_GEM_INFO, radeon_gem_info_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_RADEON_GEM_CREATE, radeon_gem_create_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_RADEON_GEM_MMAP, radeon_gem_mmap_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_RADEON_GEM_PREAD, radeon_gem_pread_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_RADEON_GEM_PWRITE, radeon_gem_pwrite_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_RADEON_CS, radeon_cs_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_RADEON_INFO, radeon_info_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(RADEON_GEM_INFO, radeon_gem_info_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(RADEON_GEM_CREATE, radeon_gem_create_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(RADEON_GEM_MMAP, radeon_gem_mmap_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(RADEON_GEM_PREAD, radeon_gem_pread_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(RADEON_GEM_PWRITE, radeon_gem_pwrite_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(RADEON_CS, radeon_cs_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(RADEON_INFO, radeon_info_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
};
int radeon_max_kms_ioctl = DRM_ARRAY_SIZE(radeon_ioctls_kms);
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index 989df51..305049a 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -272,7 +272,7 @@
if (!ref_div)
return 1;
- vcoFreq = ((unsigned)ref_freq & fb_div) / ref_div;
+ vcoFreq = ((unsigned)ref_freq * fb_div) / ref_div;
/*
* This is horribly crude: the VCO frequency range is divided into
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index b8149cb..0b83970 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -1345,7 +1345,7 @@
}
void
-radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t supported_device)
+radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_t supported_device)
{
struct radeon_device *rdev = dev->dev_private;
struct drm_encoder *encoder;
@@ -1354,7 +1354,7 @@
/* see if we already added it */
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
radeon_encoder = to_radeon_encoder(encoder);
- if (radeon_encoder->encoder_id == encoder_id) {
+ if (radeon_encoder->encoder_enum == encoder_enum) {
radeon_encoder->devices |= supported_device;
return;
}
@@ -1374,7 +1374,8 @@
radeon_encoder->enc_priv = NULL;
- radeon_encoder->encoder_id = encoder_id;
+ radeon_encoder->encoder_enum = encoder_enum;
+ radeon_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
radeon_encoder->devices = supported_device;
radeon_encoder->rmx_type = RMX_OFF;
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 5bbc086..17a6602 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -204,7 +204,7 @@
/* mostly for macs, but really any system without connector tables */
enum radeon_connector_table {
- CT_NONE,
+ CT_NONE = 0,
CT_GENERIC,
CT_IBOOK,
CT_POWERBOOK_EXTERNAL,
@@ -215,6 +215,7 @@
CT_IMAC_G5_ISIGHT,
CT_EMAC,
CT_RN50_POWER,
+ CT_MAC_X800,
};
enum radeon_dvo_chip {
@@ -342,6 +343,7 @@
};
struct radeon_encoder_atom_dig {
+ bool linkb;
/* atom dig */
bool coherent_mode;
int dig_encoder; /* -1 disabled, 0 DIGA, 1 DIGB */
@@ -360,6 +362,7 @@
struct radeon_encoder {
struct drm_encoder base;
+ uint32_t encoder_enum;
uint32_t encoder_id;
uint32_t devices;
uint32_t active_device;
@@ -378,7 +381,6 @@
struct radeon_connector_atom_dig {
uint32_t igp_lane_info;
- bool linkb;
/* displayport */
struct radeon_i2c_chan *dp_i2c_bus;
u8 dpcd[8];
@@ -599,7 +601,6 @@
void radeon_enc_destroy(struct drm_encoder *encoder);
void radeon_copy_fb(struct drm_device *dev, struct drm_gem_object *dst_obj);
void radeon_combios_asic_init(struct drm_device *dev);
-extern int radeon_static_clocks_init(struct drm_device *dev);
bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 0afd1e6..b3b5306 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -69,7 +69,7 @@
u32 c = 0;
rbo->placement.fpfn = 0;
- rbo->placement.lpfn = 0;
+ rbo->placement.lpfn = rbo->rdev->mc.active_vram_size >> PAGE_SHIFT;
rbo->placement.placement = rbo->placements;
rbo->placement.busy_placement = rbo->placements;
if (domain & RADEON_GEM_DOMAIN_VRAM)
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index 353998d..3481bc7 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -124,11 +124,8 @@
int r;
r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
- if (unlikely(r != 0)) {
- if (r != -ERESTARTSYS)
- dev_err(bo->rdev->dev, "%p reserve failed for wait\n", bo);
+ if (unlikely(r != 0))
return r;
- }
spin_lock(&bo->tbo.lock);
if (mem_type)
*mem_type = bo->tbo.mem.mem_type;
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 58038f5c..f87efec 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -226,6 +226,11 @@
{
int i;
+ /* no need to take locks, etc. if nothing's going to change */
+ if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
+ (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index))
+ return;
+
mutex_lock(&rdev->ddev->struct_mutex);
mutex_lock(&rdev->vram_mutex);
mutex_lock(&rdev->cp.mutex);
@@ -632,8 +637,6 @@
}
radeon_hwmon_fini(rdev);
- if (rdev->pm.i2c_bus)
- radeon_i2c_destroy(rdev->pm.i2c_bus);
}
void radeon_pm_compute_clocks(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
index b3ba44c0..4ae5a3d 100644
--- a/drivers/gpu/drm/radeon/radeon_state.c
+++ b/drivers/gpu/drm/radeon/radeon_state.c
@@ -3228,34 +3228,34 @@
}
struct drm_ioctl_desc radeon_ioctls[] = {
- DRM_IOCTL_DEF(DRM_RADEON_CP_INIT, radeon_cp_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_RADEON_CP_START, radeon_cp_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_RADEON_CP_STOP, radeon_cp_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_RADEON_CP_RESET, radeon_cp_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_RADEON_CP_IDLE, radeon_cp_idle, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_CP_RESUME, radeon_cp_resume, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_RESET, radeon_engine_reset, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_FULLSCREEN, radeon_fullscreen, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_SWAP, radeon_cp_swap, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_CLEAR, radeon_cp_clear, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_VERTEX, radeon_cp_vertex, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_INDICES, radeon_cp_indices, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_TEXTURE, radeon_cp_texture, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_STIPPLE, radeon_cp_stipple, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_INDIRECT, radeon_cp_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_RADEON_VERTEX2, radeon_cp_vertex2, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_CMDBUF, radeon_cp_cmdbuf, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_GETPARAM, radeon_cp_getparam, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_FLIP, radeon_cp_flip, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_ALLOC, radeon_mem_alloc, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_FREE, radeon_mem_free, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_INIT_HEAP, radeon_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_RADEON_IRQ_EMIT, radeon_irq_emit, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_IRQ_WAIT, radeon_irq_wait, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_SETPARAM, radeon_cp_setparam, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_SURF_ALLOC, radeon_surface_alloc, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_SURF_FREE, radeon_surface_free, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_CS, r600_cs_legacy_ioctl, DRM_AUTH)
+ DRM_IOCTL_DEF_DRV(RADEON_CP_INIT, radeon_cp_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(RADEON_CP_START, radeon_cp_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(RADEON_CP_STOP, radeon_cp_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(RADEON_CP_RESET, radeon_cp_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(RADEON_CP_IDLE, radeon_cp_idle, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_CP_RESUME, radeon_cp_resume, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_RESET, radeon_engine_reset, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_FULLSCREEN, radeon_fullscreen, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_SWAP, radeon_cp_swap, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_CLEAR, radeon_cp_clear, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_VERTEX, radeon_cp_vertex, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_INDICES, radeon_cp_indices, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_TEXTURE, radeon_cp_texture, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_STIPPLE, radeon_cp_stipple, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_INDIRECT, radeon_cp_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(RADEON_VERTEX2, radeon_cp_vertex2, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_CMDBUF, radeon_cp_cmdbuf, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_GETPARAM, radeon_cp_getparam, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_FLIP, radeon_cp_flip, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_ALLOC, radeon_mem_alloc, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_FREE, radeon_mem_free, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_INIT_HEAP, radeon_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(RADEON_IRQ_EMIT, radeon_irq_emit, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_IRQ_WAIT, radeon_irq_wait, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_SETPARAM, radeon_cp_setparam, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_SURF_ALLOC, radeon_surface_alloc, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_SURF_FREE, radeon_surface_free, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_CS, r600_cs_legacy_ioctl, DRM_AUTH)
};
int radeon_max_ioctl = DRM_ARRAY_SIZE(radeon_ioctls);
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index cc05b23..51d5f7b 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -693,6 +693,7 @@
rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
rdev->mc.visible_vram_size = rdev->mc.aper_size;
+ rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
base = RREG32_MC(R_000004_MC_FB_LOCATION);
base = G_000004_MC_FB_START(base) << 16;
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 3e3f757..4dc2a87 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -157,6 +157,7 @@
rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
rdev->mc.visible_vram_size = rdev->mc.aper_size;
+ rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
base = RREG32_MC(R_000100_MCCFG_FB_LOCATION);
base = G_000100_MC_FB_START(base) << 16;
rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index f1c79681..9490da7 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -267,6 +267,7 @@
*/
void r700_cp_stop(struct radeon_device *rdev)
{
+ rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT));
}
@@ -905,6 +906,54 @@
}
+static int rv770_vram_scratch_init(struct radeon_device *rdev)
+{
+ int r;
+ u64 gpu_addr;
+
+ if (rdev->vram_scratch.robj == NULL) {
+ r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE,
+ true, RADEON_GEM_DOMAIN_VRAM,
+ &rdev->vram_scratch.robj);
+ if (r) {
+ return r;
+ }
+ }
+
+ r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
+ if (unlikely(r != 0))
+ return r;
+ r = radeon_bo_pin(rdev->vram_scratch.robj,
+ RADEON_GEM_DOMAIN_VRAM, &gpu_addr);
+ if (r) {
+ radeon_bo_unreserve(rdev->vram_scratch.robj);
+ return r;
+ }
+ r = radeon_bo_kmap(rdev->vram_scratch.robj,
+ (void **)&rdev->vram_scratch.ptr);
+ if (r)
+ radeon_bo_unpin(rdev->vram_scratch.robj);
+ radeon_bo_unreserve(rdev->vram_scratch.robj);
+
+ return r;
+}
+
+static void rv770_vram_scratch_fini(struct radeon_device *rdev)
+{
+ int r;
+
+ if (rdev->vram_scratch.robj == NULL) {
+ return;
+ }
+ r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
+ if (likely(r == 0)) {
+ radeon_bo_kunmap(rdev->vram_scratch.robj);
+ radeon_bo_unpin(rdev->vram_scratch.robj);
+ radeon_bo_unreserve(rdev->vram_scratch.robj);
+ }
+ radeon_bo_unref(&rdev->vram_scratch.robj);
+}
+
int rv770_mc_init(struct radeon_device *rdev)
{
u32 tmp;
@@ -944,6 +993,7 @@
rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
rdev->mc.visible_vram_size = rdev->mc.aper_size;
+ rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
r600_vram_gtt_location(rdev, &rdev->mc);
radeon_update_bandwidth_info(rdev);
@@ -970,6 +1020,9 @@
if (r)
return r;
}
+ r = rv770_vram_scratch_init(rdev);
+ if (r)
+ return r;
rv770_gpu_init(rdev);
r = r600_blit_init(rdev);
if (r) {
@@ -1023,11 +1076,6 @@
*/
/* post card */
atom_asic_init(rdev->mode_info.atom_context);
- /* Initialize clocks */
- r = radeon_clocks_init(rdev);
- if (r) {
- return r;
- }
r = rv770_startup(rdev);
if (r) {
@@ -1118,9 +1166,6 @@
radeon_surface_init(rdev);
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
- r = radeon_clocks_init(rdev);
- if (r)
- return r;
/* Fence driver */
r = radeon_fence_driver_init(rdev);
if (r)
@@ -1195,9 +1240,9 @@
r600_irq_fini(rdev);
radeon_irq_kms_fini(rdev);
rv770_pcie_gart_fini(rdev);
+ rv770_vram_scratch_fini(rdev);
radeon_gem_fini(rdev);
radeon_fence_driver_fini(rdev);
- radeon_clocks_fini(rdev);
radeon_agp_fini(rdev);
radeon_bo_fini(rdev);
radeon_atombios_fini(rdev);
diff --git a/drivers/gpu/drm/savage/savage_bci.c b/drivers/gpu/drm/savage/savage_bci.c
index 976dc8d..bf5f83e 100644
--- a/drivers/gpu/drm/savage/savage_bci.c
+++ b/drivers/gpu/drm/savage/savage_bci.c
@@ -1082,10 +1082,10 @@
}
struct drm_ioctl_desc savage_ioctls[] = {
- DRM_IOCTL_DEF(DRM_SAVAGE_BCI_INIT, savage_bci_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_SAVAGE_BCI_CMDBUF, savage_bci_cmdbuf, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_SAVAGE_BCI_EVENT_EMIT, savage_bci_event_emit, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_SAVAGE_BCI_EVENT_WAIT, savage_bci_event_wait, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(SAVAGE_BCI_INIT, savage_bci_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(SAVAGE_BCI_CMDBUF, savage_bci_cmdbuf, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(SAVAGE_BCI_EVENT_EMIT, savage_bci_event_emit, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(SAVAGE_BCI_EVENT_WAIT, savage_bci_event_wait, DRM_AUTH),
};
int savage_max_ioctl = DRM_ARRAY_SIZE(savage_ioctls);
diff --git a/drivers/gpu/drm/sis/sis_mm.c b/drivers/gpu/drm/sis/sis_mm.c
index 07d0f29..7fe2b63 100644
--- a/drivers/gpu/drm/sis/sis_mm.c
+++ b/drivers/gpu/drm/sis/sis_mm.c
@@ -320,12 +320,12 @@
}
struct drm_ioctl_desc sis_ioctls[] = {
- DRM_IOCTL_DEF(DRM_SIS_FB_ALLOC, sis_fb_alloc, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_SIS_FB_FREE, sis_drm_free, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_SIS_AGP_INIT, sis_ioctl_agp_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_SIS_AGP_ALLOC, sis_ioctl_agp_alloc, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_SIS_AGP_FREE, sis_drm_free, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_SIS_FB_INIT, sis_fb_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(SIS_FB_ALLOC, sis_fb_alloc, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(SIS_FB_FREE, sis_drm_free, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(SIS_AGP_INIT, sis_ioctl_agp_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(SIS_AGP_ALLOC, sis_ioctl_agp_alloc, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(SIS_AGP_FREE, sis_drm_free, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(SIS_FB_INIT, sis_fb_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY),
};
int sis_max_ioctl = DRM_ARRAY_SIZE(sis_ioctls);
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index cb4cf7e..db809e0 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -442,6 +442,43 @@
}
/**
+ * Call bo::reserved and with the lru lock held.
+ * Will release GPU memory type usage on destruction.
+ * This is the place to put in driver specific hooks.
+ * Will release the bo::reserved lock and the
+ * lru lock on exit.
+ */
+
+static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
+{
+ struct ttm_bo_global *glob = bo->glob;
+
+ if (bo->ttm) {
+
+ /**
+ * Release the lru_lock, since we don't want to have
+ * an atomic requirement on ttm_tt[unbind|destroy].
+ */
+
+ spin_unlock(&glob->lru_lock);
+ ttm_tt_unbind(bo->ttm);
+ ttm_tt_destroy(bo->ttm);
+ bo->ttm = NULL;
+ spin_lock(&glob->lru_lock);
+ }
+
+ if (bo->mem.mm_node) {
+ drm_mm_put_block(bo->mem.mm_node);
+ bo->mem.mm_node = NULL;
+ }
+
+ atomic_set(&bo->reserved, 0);
+ wake_up_all(&bo->event_queue);
+ spin_unlock(&glob->lru_lock);
+}
+
+
+/**
* If bo idle, remove from delayed- and lru lists, and unref.
* If not idle, and already on delayed list, do nothing.
* If not idle, and not on delayed list, put on delayed list,
@@ -456,6 +493,7 @@
int ret;
spin_lock(&bo->lock);
+retry:
(void) ttm_bo_wait(bo, false, false, !remove_all);
if (!bo->sync_obj) {
@@ -464,31 +502,52 @@
spin_unlock(&bo->lock);
spin_lock(&glob->lru_lock);
- put_count = ttm_bo_del_from_lru(bo);
+ ret = ttm_bo_reserve_locked(bo, false, !remove_all, false, 0);
- ret = ttm_bo_reserve_locked(bo, false, false, false, 0);
- BUG_ON(ret);
- if (bo->ttm)
- ttm_tt_unbind(bo->ttm);
+ /**
+ * Someone else has the object reserved. Bail and retry.
+ */
+
+ if (unlikely(ret == -EBUSY)) {
+ spin_unlock(&glob->lru_lock);
+ spin_lock(&bo->lock);
+ goto requeue;
+ }
+
+ /**
+ * We can re-check for sync object without taking
+ * the bo::lock since setting the sync object requires
+ * also bo::reserved. A busy object at this point may
+ * be caused by another thread starting an accelerated
+ * eviction.
+ */
+
+ if (unlikely(bo->sync_obj)) {
+ atomic_set(&bo->reserved, 0);
+ wake_up_all(&bo->event_queue);
+ spin_unlock(&glob->lru_lock);
+ spin_lock(&bo->lock);
+ if (remove_all)
+ goto retry;
+ else
+ goto requeue;
+ }
+
+ put_count = ttm_bo_del_from_lru(bo);
if (!list_empty(&bo->ddestroy)) {
list_del_init(&bo->ddestroy);
++put_count;
}
- if (bo->mem.mm_node) {
- drm_mm_put_block(bo->mem.mm_node);
- bo->mem.mm_node = NULL;
- }
- spin_unlock(&glob->lru_lock);
- atomic_set(&bo->reserved, 0);
+ ttm_bo_cleanup_memtype_use(bo);
while (put_count--)
kref_put(&bo->list_kref, ttm_bo_ref_bug);
return 0;
}
-
+requeue:
spin_lock(&glob->lru_lock);
if (list_empty(&bo->ddestroy)) {
void *sync_obj = bo->sync_obj;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 7cffb3e..3451a82 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -351,6 +351,7 @@
INIT_LIST_HEAD(&fbo->lru);
INIT_LIST_HEAD(&fbo->swap);
fbo->vm_node = NULL;
+ atomic_set(&fbo->cpu_writers, 0);
fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
kref_init(&fbo->list_kref);
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index ca90479..b1e02ff 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -69,7 +69,7 @@
spinlock_t lock;
bool fill_lock;
struct list_head list;
- int gfp_flags;
+ gfp_t gfp_flags;
unsigned npages;
char *name;
unsigned long nfrees;
@@ -475,7 +475,7 @@
* This function is reentrant if caller updates count depending on number of
* pages returned in pages array.
*/
-static int ttm_alloc_new_pages(struct list_head *pages, int gfp_flags,
+static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
int ttm_flags, enum ttm_caching_state cstate, unsigned count)
{
struct page **caching_array;
@@ -666,7 +666,7 @@
{
struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
struct page *p = NULL;
- int gfp_flags = GFP_USER;
+ gfp_t gfp_flags = GFP_USER;
int r;
/* set zero flag for page allocation if required */
@@ -818,7 +818,7 @@
return 0;
}
-void ttm_page_alloc_fini()
+void ttm_page_alloc_fini(void)
{
int i;
diff --git a/drivers/gpu/drm/via/via_dma.c b/drivers/gpu/drm/via/via_dma.c
index 68dda74..cc0ffa9 100644
--- a/drivers/gpu/drm/via/via_dma.c
+++ b/drivers/gpu/drm/via/via_dma.c
@@ -722,20 +722,20 @@
}
struct drm_ioctl_desc via_ioctls[] = {
- DRM_IOCTL_DEF(DRM_VIA_ALLOCMEM, via_mem_alloc, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_VIA_FREEMEM, via_mem_free, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_VIA_AGP_INIT, via_agp_init, DRM_AUTH|DRM_MASTER),
- DRM_IOCTL_DEF(DRM_VIA_FB_INIT, via_fb_init, DRM_AUTH|DRM_MASTER),
- DRM_IOCTL_DEF(DRM_VIA_MAP_INIT, via_map_init, DRM_AUTH|DRM_MASTER),
- DRM_IOCTL_DEF(DRM_VIA_DEC_FUTEX, via_decoder_futex, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_VIA_DMA_INIT, via_dma_init, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_VIA_CMDBUFFER, via_cmdbuffer, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_VIA_FLUSH, via_flush_ioctl, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_VIA_PCICMD, via_pci_cmdbuffer, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_VIA_CMDBUF_SIZE, via_cmdbuf_size, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_VIA_WAIT_IRQ, via_wait_irq, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_VIA_DMA_BLIT, via_dma_blit, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_VIA_BLIT_SYNC, via_dma_blit_sync, DRM_AUTH)
+ DRM_IOCTL_DEF_DRV(VIA_ALLOCMEM, via_mem_alloc, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(VIA_FREEMEM, via_mem_free, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(VIA_AGP_INIT, via_agp_init, DRM_AUTH|DRM_MASTER),
+ DRM_IOCTL_DEF_DRV(VIA_FB_INIT, via_fb_init, DRM_AUTH|DRM_MASTER),
+ DRM_IOCTL_DEF_DRV(VIA_MAP_INIT, via_map_init, DRM_AUTH|DRM_MASTER),
+ DRM_IOCTL_DEF_DRV(VIA_DEC_FUTEX, via_decoder_futex, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(VIA_DMA_INIT, via_dma_init, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(VIA_CMDBUFFER, via_cmdbuffer, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(VIA_FLUSH, via_flush_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(VIA_PCICMD, via_pci_cmdbuffer, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(VIA_CMDBUF_SIZE, via_cmdbuf_size, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(VIA_WAIT_IRQ, via_wait_irq, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(VIA_DMA_BLIT, via_dma_blit, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(VIA_BLIT_SYNC, via_dma_blit_sync, DRM_AUTH)
};
int via_max_ioctl = DRM_ARRAY_SIZE(via_ioctls);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 9dd395b..a96ed6d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -99,47 +99,47 @@
*/
#define VMW_IOCTL_DEF(ioctl, func, flags) \
- [DRM_IOCTL_NR(ioctl) - DRM_COMMAND_BASE] = {ioctl, flags, func}
+ [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_##ioctl, flags, func, DRM_IOCTL_##ioctl}
/**
* Ioctl definitions.
*/
static struct drm_ioctl_desc vmw_ioctls[] = {
- VMW_IOCTL_DEF(DRM_IOCTL_VMW_GET_PARAM, vmw_getparam_ioctl,
+ VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
DRM_AUTH | DRM_UNLOCKED),
- VMW_IOCTL_DEF(DRM_IOCTL_VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl,
+ VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl,
DRM_AUTH | DRM_UNLOCKED),
- VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl,
+ VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl,
DRM_AUTH | DRM_UNLOCKED),
- VMW_IOCTL_DEF(DRM_IOCTL_VMW_CURSOR_BYPASS,
+ VMW_IOCTL_DEF(VMW_CURSOR_BYPASS,
vmw_kms_cursor_bypass_ioctl,
DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
- VMW_IOCTL_DEF(DRM_IOCTL_VMW_CONTROL_STREAM, vmw_overlay_ioctl,
+ VMW_IOCTL_DEF(VMW_CONTROL_STREAM, vmw_overlay_ioctl,
DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
- VMW_IOCTL_DEF(DRM_IOCTL_VMW_CLAIM_STREAM, vmw_stream_claim_ioctl,
+ VMW_IOCTL_DEF(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl,
DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
- VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_STREAM, vmw_stream_unref_ioctl,
+ VMW_IOCTL_DEF(VMW_UNREF_STREAM, vmw_stream_unref_ioctl,
DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
- VMW_IOCTL_DEF(DRM_IOCTL_VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
+ VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
DRM_AUTH | DRM_UNLOCKED),
- VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
+ VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
DRM_AUTH | DRM_UNLOCKED),
- VMW_IOCTL_DEF(DRM_IOCTL_VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
+ VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
DRM_AUTH | DRM_UNLOCKED),
- VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
+ VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
DRM_AUTH | DRM_UNLOCKED),
- VMW_IOCTL_DEF(DRM_IOCTL_VMW_REF_SURFACE, vmw_surface_reference_ioctl,
+ VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
DRM_AUTH | DRM_UNLOCKED),
- VMW_IOCTL_DEF(DRM_IOCTL_VMW_EXECBUF, vmw_execbuf_ioctl,
+ VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl,
DRM_AUTH | DRM_UNLOCKED),
- VMW_IOCTL_DEF(DRM_IOCTL_VMW_FIFO_DEBUG, vmw_fifo_debug_ioctl,
+ VMW_IOCTL_DEF(VMW_FIFO_DEBUG, vmw_fifo_debug_ioctl,
DRM_AUTH | DRM_ROOT_ONLY | DRM_MASTER | DRM_UNLOCKED),
- VMW_IOCTL_DEF(DRM_IOCTL_VMW_FENCE_WAIT, vmw_fence_wait_ioctl,
+ VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_wait_ioctl,
DRM_AUTH | DRM_UNLOCKED),
- VMW_IOCTL_DEF(DRM_IOCTL_VMW_UPDATE_LAYOUT, vmw_kms_update_layout_ioctl,
+ VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT, vmw_kms_update_layout_ioctl,
DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED)
};
@@ -148,13 +148,16 @@
{0, 0, 0}
};
-static char *vmw_devname = "vmwgfx";
+static int enable_fbdev;
static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
static void vmw_master_init(struct vmw_master *);
static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
void *ptr);
+MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev");
+module_param_named(enable_fbdev, enable_fbdev, int, 0600);
+
static void vmw_print_capabilities(uint32_t capabilities)
{
DRM_INFO("Capabilities:\n");
@@ -192,8 +195,6 @@
{
int ret;
- vmw_kms_save_vga(dev_priv);
-
ret = vmw_fifo_init(dev_priv, &dev_priv->fifo);
if (unlikely(ret != 0)) {
DRM_ERROR("Unable to initialize FIFO.\n");
@@ -206,9 +207,35 @@
static void vmw_release_device(struct vmw_private *dev_priv)
{
vmw_fifo_release(dev_priv, &dev_priv->fifo);
- vmw_kms_restore_vga(dev_priv);
}
+int vmw_3d_resource_inc(struct vmw_private *dev_priv)
+{
+ int ret = 0;
+
+ mutex_lock(&dev_priv->release_mutex);
+ if (unlikely(dev_priv->num_3d_resources++ == 0)) {
+ ret = vmw_request_device(dev_priv);
+ if (unlikely(ret != 0))
+ --dev_priv->num_3d_resources;
+ }
+ mutex_unlock(&dev_priv->release_mutex);
+ return ret;
+}
+
+
+void vmw_3d_resource_dec(struct vmw_private *dev_priv)
+{
+ int32_t n3d;
+
+ mutex_lock(&dev_priv->release_mutex);
+ if (unlikely(--dev_priv->num_3d_resources == 0))
+ vmw_release_device(dev_priv);
+ n3d = (int32_t) dev_priv->num_3d_resources;
+ mutex_unlock(&dev_priv->release_mutex);
+
+ BUG_ON(n3d < 0);
+}
static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
{
@@ -228,6 +255,7 @@
dev_priv->last_read_sequence = (uint32_t) -100;
mutex_init(&dev_priv->hw_mutex);
mutex_init(&dev_priv->cmdbuf_mutex);
+ mutex_init(&dev_priv->release_mutex);
rwlock_init(&dev_priv->resource_lock);
idr_init(&dev_priv->context_idr);
idr_init(&dev_priv->surface_idr);
@@ -244,6 +272,8 @@
dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);
+ dev_priv->enable_fb = enable_fbdev;
+
mutex_lock(&dev_priv->hw_mutex);
vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
@@ -343,17 +373,6 @@
dev->dev_private = dev_priv;
- if (!dev->devname)
- dev->devname = vmw_devname;
-
- if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
- ret = drm_irq_install(dev);
- if (unlikely(ret != 0)) {
- DRM_ERROR("Failed installing irq: %d\n", ret);
- goto out_no_irq;
- }
- }
-
ret = pci_request_regions(dev->pdev, "vmwgfx probe");
dev_priv->stealth = (ret != 0);
if (dev_priv->stealth) {
@@ -369,26 +388,52 @@
goto out_no_device;
}
}
- ret = vmw_request_device(dev_priv);
+ ret = vmw_kms_init(dev_priv);
if (unlikely(ret != 0))
- goto out_no_device;
- vmw_kms_init(dev_priv);
+ goto out_no_kms;
vmw_overlay_init(dev_priv);
- vmw_fb_init(dev_priv);
+ if (dev_priv->enable_fb) {
+ ret = vmw_3d_resource_inc(dev_priv);
+ if (unlikely(ret != 0))
+ goto out_no_fifo;
+ vmw_kms_save_vga(dev_priv);
+ vmw_fb_init(dev_priv);
+ DRM_INFO("%s", vmw_fifo_have_3d(dev_priv) ?
+ "Detected device 3D availability.\n" :
+ "Detected no device 3D availability.\n");
+ } else {
+ DRM_INFO("Delayed 3D detection since we're not "
+ "running the device in SVGA mode yet.\n");
+ }
+
+ if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
+ ret = drm_irq_install(dev);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Failed installing irq: %d\n", ret);
+ goto out_no_irq;
+ }
+ }
dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
register_pm_notifier(&dev_priv->pm_nb);
- DRM_INFO("%s", vmw_fifo_have_3d(dev_priv) ? "Have 3D\n" : "No 3D\n");
-
return 0;
-out_no_device:
- if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
- drm_irq_uninstall(dev_priv->dev);
- if (dev->devname == vmw_devname)
- dev->devname = NULL;
out_no_irq:
+ if (dev_priv->enable_fb) {
+ vmw_fb_close(dev_priv);
+ vmw_kms_restore_vga(dev_priv);
+ vmw_3d_resource_dec(dev_priv);
+ }
+out_no_fifo:
+ vmw_overlay_close(dev_priv);
+ vmw_kms_close(dev_priv);
+out_no_kms:
+ if (dev_priv->stealth)
+ pci_release_region(dev->pdev, 2);
+ else
+ pci_release_regions(dev->pdev);
+out_no_device:
ttm_object_device_release(&dev_priv->tdev);
out_err4:
iounmap(dev_priv->mmio_virt);
@@ -415,19 +460,20 @@
unregister_pm_notifier(&dev_priv->pm_nb);
- vmw_fb_close(dev_priv);
+ if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
+ drm_irq_uninstall(dev_priv->dev);
+ if (dev_priv->enable_fb) {
+ vmw_fb_close(dev_priv);
+ vmw_kms_restore_vga(dev_priv);
+ vmw_3d_resource_dec(dev_priv);
+ }
vmw_kms_close(dev_priv);
vmw_overlay_close(dev_priv);
- vmw_release_device(dev_priv);
if (dev_priv->stealth)
pci_release_region(dev->pdev, 2);
else
pci_release_regions(dev->pdev);
- if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
- drm_irq_uninstall(dev_priv->dev);
- if (dev->devname == vmw_devname)
- dev->devname = NULL;
ttm_object_device_release(&dev_priv->tdev);
iounmap(dev_priv->mmio_virt);
drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start,
@@ -500,7 +546,7 @@
struct drm_ioctl_desc *ioctl =
&vmw_ioctls[nr - DRM_COMMAND_BASE];
- if (unlikely(ioctl->cmd != cmd)) {
+ if (unlikely(ioctl->cmd_drv != cmd)) {
DRM_ERROR("Invalid command format, ioctl %d\n",
nr - DRM_COMMAND_BASE);
return -EINVAL;
@@ -589,6 +635,16 @@
struct vmw_master *vmaster = vmw_master(file_priv->master);
int ret = 0;
+ if (!dev_priv->enable_fb) {
+ ret = vmw_3d_resource_inc(dev_priv);
+ if (unlikely(ret != 0))
+ return ret;
+ vmw_kms_save_vga(dev_priv);
+ mutex_lock(&dev_priv->hw_mutex);
+ vmw_write(dev_priv, SVGA_REG_TRACES, 0);
+ mutex_unlock(&dev_priv->hw_mutex);
+ }
+
if (active) {
BUG_ON(active != &dev_priv->fbdev_master);
ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile);
@@ -617,7 +673,13 @@
return 0;
out_no_active_lock:
- vmw_release_device(dev_priv);
+ if (!dev_priv->enable_fb) {
+ mutex_lock(&dev_priv->hw_mutex);
+ vmw_write(dev_priv, SVGA_REG_TRACES, 1);
+ mutex_unlock(&dev_priv->hw_mutex);
+ vmw_kms_restore_vga(dev_priv);
+ vmw_3d_resource_dec(dev_priv);
+ }
return ret;
}
@@ -645,11 +707,23 @@
ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
+ if (!dev_priv->enable_fb) {
+ ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
+ if (unlikely(ret != 0))
+ DRM_ERROR("Unable to clean VRAM on master drop.\n");
+ mutex_lock(&dev_priv->hw_mutex);
+ vmw_write(dev_priv, SVGA_REG_TRACES, 1);
+ mutex_unlock(&dev_priv->hw_mutex);
+ vmw_kms_restore_vga(dev_priv);
+ vmw_3d_resource_dec(dev_priv);
+ }
+
dev_priv->active_master = &dev_priv->fbdev_master;
ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
ttm_vt_unlock(&dev_priv->fbdev_master.lock);
- vmw_fb_on(dev_priv);
+ if (dev_priv->enable_fb)
+ vmw_fb_on(dev_priv);
}
@@ -722,6 +796,7 @@
.irq_postinstall = vmw_irq_postinstall,
.irq_uninstall = vmw_irq_uninstall,
.irq_handler = vmw_irq_handler,
+ .get_vblank_counter = vmw_get_vblank_counter,
.reclaim_buffers_locked = NULL,
.get_map_ofs = drm_core_get_map_ofs,
.get_reg_ofs = drm_core_get_reg_ofs,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 429f917..58de639 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -277,6 +277,7 @@
bool stealth;
bool is_opened;
+ bool enable_fb;
/**
* Master management.
@@ -285,6 +286,9 @@
struct vmw_master *active_master;
struct vmw_master fbdev_master;
struct notifier_block pm_nb;
+
+ struct mutex release_mutex;
+ uint32_t num_3d_resources;
};
static inline struct vmw_private *vmw_priv(struct drm_device *dev)
@@ -319,6 +323,9 @@
return val;
}
+int vmw_3d_resource_inc(struct vmw_private *dev_priv);
+void vmw_3d_resource_dec(struct vmw_private *dev_priv);
+
/**
* GMR utilities - vmwgfx_gmr.c
*/
@@ -511,6 +518,7 @@
unsigned bbp, unsigned depth);
int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc);
/**
* Overlay control - vmwgfx_overlay.c
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index 870967a..409e172 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -615,6 +615,11 @@
if (unlikely(ret != 0))
goto err_unlock;
+ if (bo->mem.mem_type == TTM_PL_VRAM &&
+ bo->mem.mm_node->start < bo->num_pages)
+ (void) ttm_bo_validate(bo, &vmw_sys_placement, false,
+ false, false);
+
ret = ttm_bo_validate(bo, &ne_placement, false, false, false);
/* Could probably bug on */
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
index e6a1eb7..0fe3176 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -106,6 +106,7 @@
mutex_lock(&dev_priv->hw_mutex);
dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE);
dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE);
+ dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES);
vmw_write(dev_priv, SVGA_REG_ENABLE, 1);
min = 4;
@@ -175,6 +176,8 @@
dev_priv->config_done_state);
vmw_write(dev_priv, SVGA_REG_ENABLE,
dev_priv->enable_state);
+ vmw_write(dev_priv, SVGA_REG_TRACES,
+ dev_priv->traces_state);
mutex_unlock(&dev_priv->hw_mutex);
vmw_fence_queue_takedown(&fifo->fence_queue);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 64d7f47..e882ba0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -898,7 +898,19 @@
save->width = vmw_read(vmw_priv, SVGA_REG_DISPLAY_WIDTH);
save->height = vmw_read(vmw_priv, SVGA_REG_DISPLAY_HEIGHT);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
+ if (i == 0 && vmw_priv->num_displays == 1 &&
+ save->width == 0 && save->height == 0) {
+
+ /*
+ * It should be fairly safe to assume that these
+ * values are uninitialized.
+ */
+
+ save->width = vmw_priv->vga_width - save->pos_x;
+ save->height = vmw_priv->vga_height - save->pos_y;
+ }
}
+
return 0;
}
@@ -984,3 +996,8 @@
ttm_read_unlock(&vmaster->lock);
return ret;
}
+
+u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc)
+{
+ return 0;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index 2ff5cf7..11cb39e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -27,6 +27,8 @@
#include "vmwgfx_kms.h"
+#define VMWGFX_LDU_NUM_DU 8
+
#define vmw_crtc_to_ldu(x) \
container_of(x, struct vmw_legacy_display_unit, base.crtc)
#define vmw_encoder_to_ldu(x) \
@@ -335,7 +337,8 @@
}
static enum drm_connector_status
- vmw_ldu_connector_detect(struct drm_connector *connector)
+ vmw_ldu_connector_detect(struct drm_connector *connector,
+ bool force)
{
if (vmw_connector_to_ldu(connector)->pref_active)
return connector_status_connected;
@@ -516,7 +519,7 @@
drm_connector_init(dev, connector, &vmw_legacy_connector_funcs,
DRM_MODE_CONNECTOR_LVDS);
- connector->status = vmw_ldu_connector_detect(connector);
+ connector->status = vmw_ldu_connector_detect(connector, true);
drm_encoder_init(dev, encoder, &vmw_legacy_encoder_funcs,
DRM_MODE_ENCODER_LVDS);
@@ -535,6 +538,10 @@
int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv)
{
+ struct drm_device *dev = dev_priv->dev;
+ int i;
+ int ret;
+
if (dev_priv->ldu_priv) {
DRM_INFO("ldu system already on\n");
return -EINVAL;
@@ -552,23 +559,24 @@
drm_mode_create_dirty_info_property(dev_priv->dev);
- vmw_ldu_init(dev_priv, 0);
- /* for old hardware without multimon only enable one display */
if (dev_priv->capabilities & SVGA_CAP_MULTIMON) {
- vmw_ldu_init(dev_priv, 1);
- vmw_ldu_init(dev_priv, 2);
- vmw_ldu_init(dev_priv, 3);
- vmw_ldu_init(dev_priv, 4);
- vmw_ldu_init(dev_priv, 5);
- vmw_ldu_init(dev_priv, 6);
- vmw_ldu_init(dev_priv, 7);
+ for (i = 0; i < VMWGFX_LDU_NUM_DU; ++i)
+ vmw_ldu_init(dev_priv, i);
+ ret = drm_vblank_init(dev, VMWGFX_LDU_NUM_DU);
+ } else {
+ /* for old hardware without multimon only enable one display */
+ vmw_ldu_init(dev_priv, 0);
+ ret = drm_vblank_init(dev, 1);
}
- return 0;
+ return ret;
}
int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv)
{
+ struct drm_device *dev = dev_priv->dev;
+
+ drm_vblank_cleanup(dev);
if (!dev_priv->ldu_priv)
return -ENOSYS;
@@ -610,7 +618,7 @@
ldu->pref_height = 600;
ldu->pref_active = false;
}
- con->status = vmw_ldu_connector_detect(con);
+ con->status = vmw_ldu_connector_detect(con, true);
}
mutex_unlock(&dev->mode_config.mutex);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 5f2d5df..c8c40e9 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -211,6 +211,7 @@
cmd->body.cid = cpu_to_le32(res->id);
vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_3d_resource_dec(dev_priv);
}
static int vmw_context_init(struct vmw_private *dev_priv,
@@ -247,6 +248,7 @@
cmd->body.cid = cpu_to_le32(res->id);
vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ (void) vmw_3d_resource_inc(dev_priv);
vmw_resource_activate(res, vmw_hw_context_destroy);
return 0;
}
@@ -406,6 +408,7 @@
cmd->body.sid = cpu_to_le32(res->id);
vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_3d_resource_dec(dev_priv);
}
void vmw_surface_res_free(struct vmw_resource *res)
@@ -473,6 +476,7 @@
}
vmw_fifo_commit(dev_priv, submit_size);
+ (void) vmw_3d_resource_inc(dev_priv);
vmw_resource_activate(res, vmw_hw_surface_destroy);
return 0;
}
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
index b87569e..f366f96 100644
--- a/drivers/gpu/vga/vgaarb.c
+++ b/drivers/gpu/vga/vgaarb.c
@@ -598,7 +598,7 @@
pr_debug("vgaarb: decoding count now is: %d\n", vga_decode_count);
}
-void __vga_set_legacy_decoding(struct pci_dev *pdev, unsigned int decodes, bool userspace)
+static void __vga_set_legacy_decoding(struct pci_dev *pdev, unsigned int decodes, bool userspace)
{
struct vga_device *vgadev;
unsigned long flags;
diff --git a/drivers/hid/hid-cando.c b/drivers/hid/hid-cando.c
index 4267a6f..5925bdc 100644
--- a/drivers/hid/hid-cando.c
+++ b/drivers/hid/hid-cando.c
@@ -237,6 +237,8 @@
USB_DEVICE_ID_CANDO_MULTI_TOUCH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CANDO,
USB_DEVICE_ID_CANDO_MULTI_TOUCH_11_6) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CANDO,
+ USB_DEVICE_ID_CANDO_MULTI_TOUCH_15_6) },
{ }
};
MODULE_DEVICE_TABLE(hid, cando_devices);
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 0c52899..a0dea3d 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1285,10 +1285,14 @@
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUS_T91MT) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUSTEK_MULTITOUCH_YFO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_BELKIN, USB_DEVICE_ID_FLIP_KVM) },
{ HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE_2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH_11_6) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH_15_6) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION_SOLAR) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_TACTICAL_PAD) },
@@ -1578,7 +1582,6 @@
{ HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_24) },
{ HID_USB_DEVICE(USB_VENDOR_ID_AIRCABLE, USB_DEVICE_ID_AIRCABLE1) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ALCOR, USB_DEVICE_ID_ALCOR_USBRS232) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUS_T91MT)},
{ HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_LCM)},
{ HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_LCM2)},
{ HID_USB_DEVICE(USB_VENDOR_ID_AVERMEDIA, USB_DEVICE_ID_AVER_FM_MR800) },
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 85c6d13..c5ae5f1 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -105,6 +105,7 @@
#define USB_VENDOR_ID_ASUS 0x0486
#define USB_DEVICE_ID_ASUS_T91MT 0x0185
+#define USB_DEVICE_ID_ASUSTEK_MULTITOUCH_YFO 0x0186
#define USB_VENDOR_ID_ASUSTEK 0x0b05
#define USB_DEVICE_ID_ASUSTEK_LCM 0x1726
@@ -128,10 +129,12 @@
#define USB_VENDOR_ID_BTC 0x046e
#define USB_DEVICE_ID_BTC_EMPREX_REMOTE 0x5578
+#define USB_DEVICE_ID_BTC_EMPREX_REMOTE_2 0x5577
#define USB_VENDOR_ID_CANDO 0x2087
#define USB_DEVICE_ID_CANDO_MULTI_TOUCH 0x0a01
#define USB_DEVICE_ID_CANDO_MULTI_TOUCH_11_6 0x0b03
+#define USB_DEVICE_ID_CANDO_MULTI_TOUCH_15_6 0x0f01
#define USB_VENDOR_ID_CH 0x068e
#define USB_DEVICE_ID_CH_PRO_PEDALS 0x00f2
@@ -149,6 +152,7 @@
#define USB_VENDOR_ID_CHICONY 0x04f2
#define USB_DEVICE_ID_CHICONY_TACTICAL_PAD 0x0418
+#define USB_DEVICE_ID_CHICONY_MULTI_TOUCH 0xb19d
#define USB_VENDOR_ID_CIDC 0x1677
@@ -500,6 +504,7 @@
#define USB_VENDOR_ID_TURBOX 0x062a
#define USB_DEVICE_ID_TURBOX_KEYBOARD 0x0201
+#define USB_DEVICE_ID_TURBOX_TOUCHSCREEN_MOSART 0x7100
#define USB_VENDOR_ID_TWINHAN 0x6253
#define USB_DEVICE_ID_TWINHAN_IR_REMOTE 0x0100
@@ -507,6 +512,7 @@
#define USB_VENDOR_ID_UCLOGIC 0x5543
#define USB_DEVICE_ID_UCLOGIC_TABLET_PF1209 0x0042
#define USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U 0x0003
+#define USB_DEVICE_ID_UCLOGIC_TABLET_KNA5 0x6001
#define USB_VENDOR_ID_VERNIER 0x08f7
#define USB_DEVICE_ID_VERNIER_LABPRO 0x0001
diff --git a/drivers/hid/hid-mosart.c b/drivers/hid/hid-mosart.c
index e91437c..ac5421d 100644
--- a/drivers/hid/hid-mosart.c
+++ b/drivers/hid/hid-mosart.c
@@ -239,6 +239,7 @@
static const struct hid_device_id mosart_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUS_T91MT) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUSTEK_MULTITOUCH_YFO) },
{ }
};
MODULE_DEVICE_TABLE(hid, mosart_devices);
diff --git a/drivers/hid/hid-topseed.c b/drivers/hid/hid-topseed.c
index 5771f85..956ed9a 100644
--- a/drivers/hid/hid-topseed.c
+++ b/drivers/hid/hid-topseed.c
@@ -64,6 +64,7 @@
static const struct hid_device_id ts_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED, USB_DEVICE_ID_TOPSEED_CYBERLINK) },
{ HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE_2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED2, USB_DEVICE_ID_TOPSEED2_RF_COMBO) },
{ }
};
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
index 47d70c5..a3866b5 100644
--- a/drivers/hid/hidraw.c
+++ b/drivers/hid/hidraw.c
@@ -109,6 +109,12 @@
int ret = 0;
mutex_lock(&minors_lock);
+
+ if (!hidraw_table[minor]) {
+ ret = -ENODEV;
+ goto out;
+ }
+
dev = hidraw_table[minor]->hid;
if (!dev->hid_output_raw_report) {
@@ -244,6 +250,10 @@
mutex_lock(&minors_lock);
dev = hidraw_table[minor];
+ if (!dev) {
+ ret = -ENODEV;
+ goto out;
+ }
switch (cmd) {
case HIDIOCGRDESCSIZE:
@@ -317,6 +327,7 @@
ret = -ENOTTY;
}
+out:
mutex_unlock(&minors_lock);
return ret;
}
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index b729c02..599041a 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -828,6 +828,7 @@
}
} else {
int skipped_report_id = 0;
+ int report_id = buf[0];
if (buf[0] == 0x0) {
/* Don't send the Report ID */
buf++;
@@ -837,7 +838,7 @@
ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
HID_REQ_SET_REPORT,
USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
- ((report_type + 1) << 8) | *buf,
+ ((report_type + 1) << 8) | report_id,
interface->desc.bInterfaceNumber, buf, count,
USB_CTRL_SET_TIMEOUT);
/* count also the report id, if this was a numbered report. */
@@ -1445,6 +1446,11 @@
{ }
};
+struct usb_interface *usbhid_find_interface(int minor)
+{
+ return usb_find_interface(&hid_driver, minor);
+}
+
static struct hid_driver hid_usb_driver = {
.name = "generic-usb",
.id_table = hid_usb_table,
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 2643d31..f0260c6 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -33,8 +33,10 @@
{ USB_VENDOR_ID_AASHIMA, USB_DEVICE_ID_AASHIMA_PREDATOR, HID_QUIRK_BADPAD },
{ USB_VENDOR_ID_ALPS, USB_DEVICE_ID_IBM_GAMEPAD, HID_QUIRK_BADPAD },
{ USB_VENDOR_ID_CHIC, USB_DEVICE_ID_CHIC_GAMEPAD, HID_QUIRK_BADPAD },
+ { USB_VENDOR_ID_DWAV, USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER, HID_QUIRK_MULTI_INPUT | HID_QUIRK_NOGET },
{ USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_MOJO, USB_DEVICE_ID_RETRO_ADAPTER, HID_QUIRK_MULTI_INPUT },
+ { USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_TOUCHSCREEN_MOSART, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_DRIVING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_FLYING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_FIGHTING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
@@ -69,6 +71,7 @@
{ USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_KEYBOARD, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_PF1209, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U, HID_QUIRK_MULTI_INPUT },
+ { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_KNA5, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD, HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT | HID_QUIRK_SKIP_OUTPUT_REPORTS },
{ USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_QUAD_USB_JOYPAD, HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT },
@@ -77,6 +80,8 @@
{ USB_VENDOR_ID_PI_ENGINEERING, USB_DEVICE_ID_PI_ENGINEERING_VEC_USB_FOOTPEDAL, HID_QUIRK_HIDINPUT_FORCE },
+ { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_MULTI_TOUCH, HID_QUIRK_MULTI_INPUT },
+
{ 0, 0 }
};
diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
index 0a29c51..681e620 100644
--- a/drivers/hid/usbhid/hiddev.c
+++ b/drivers/hid/usbhid/hiddev.c
@@ -270,7 +270,7 @@
struct hiddev *hiddev;
int res;
- intf = usb_find_interface(&hiddev_driver, iminor(inode));
+ intf = usbhid_find_interface(iminor(inode));
if (!intf)
return -ENODEV;
hid = usb_get_intfdata(intf);
diff --git a/drivers/hid/usbhid/usbhid.h b/drivers/hid/usbhid/usbhid.h
index 693fd3e..89d2e84 100644
--- a/drivers/hid/usbhid/usbhid.h
+++ b/drivers/hid/usbhid/usbhid.h
@@ -42,6 +42,7 @@
(struct hid_device *hid, struct hid_report *report, unsigned char dir);
int usbhid_get_power(struct hid_device *hid);
void usbhid_put_power(struct hid_device *hid);
+struct usb_interface *usbhid_find_interface(int minor);
/* iofl flags */
#define HID_CTRL_RUNNING 1
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 4d4d09b..97499d0 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -409,7 +409,7 @@
config SENSORS_PKGTEMP
tristate "Intel processor package temperature sensor"
- depends on X86 && PCI && EXPERIMENTAL
+ depends on X86 && EXPERIMENTAL
help
If you say yes here you get support for the package level temperature
sensor inside your CPU. Check documentation/driver for details.
diff --git a/drivers/hwmon/adm1031.c b/drivers/hwmon/adm1031.c
index 15c1a96..0683e6b 100644
--- a/drivers/hwmon/adm1031.c
+++ b/drivers/hwmon/adm1031.c
@@ -79,7 +79,7 @@
int chip_type;
char valid; /* !=0 if following fields are valid */
unsigned long last_updated; /* In jiffies */
- unsigned int update_rate; /* In milliseconds */
+ unsigned int update_interval; /* In milliseconds */
/* The chan_select_table contains the possible configurations for
* auto fan control.
*/
@@ -743,23 +743,23 @@
static SENSOR_DEVICE_ATTR(temp3_fault, S_IRUGO, show_alarm, NULL, 13);
static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL, 14);
-/* Update Rate */
-static const unsigned int update_rates[] = {
+/* Update Interval */
+static const unsigned int update_intervals[] = {
16000, 8000, 4000, 2000, 1000, 500, 250, 125,
};
-static ssize_t show_update_rate(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t show_update_interval(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct i2c_client *client = to_i2c_client(dev);
struct adm1031_data *data = i2c_get_clientdata(client);
- return sprintf(buf, "%u\n", data->update_rate);
+ return sprintf(buf, "%u\n", data->update_interval);
}
-static ssize_t set_update_rate(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t set_update_interval(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct adm1031_data *data = i2c_get_clientdata(client);
@@ -771,12 +771,15 @@
if (err)
return err;
- /* find the nearest update rate from the table */
- for (i = 0; i < ARRAY_SIZE(update_rates) - 1; i++) {
- if (val >= update_rates[i])
+ /*
+ * Find the nearest update interval from the table.
+ * Use it to determine the matching update rate.
+ */
+ for (i = 0; i < ARRAY_SIZE(update_intervals) - 1; i++) {
+ if (val >= update_intervals[i])
break;
}
- /* if not found, we point to the last entry (lowest update rate) */
+ /* if not found, we point to the last entry (lowest update interval) */
/* set the new update rate while preserving other settings */
reg = adm1031_read_value(client, ADM1031_REG_FAN_FILTER);
@@ -785,14 +788,14 @@
adm1031_write_value(client, ADM1031_REG_FAN_FILTER, reg);
mutex_lock(&data->update_lock);
- data->update_rate = update_rates[i];
+ data->update_interval = update_intervals[i];
mutex_unlock(&data->update_lock);
return count;
}
-static DEVICE_ATTR(update_rate, S_IRUGO | S_IWUSR, show_update_rate,
- set_update_rate);
+static DEVICE_ATTR(update_interval, S_IRUGO | S_IWUSR, show_update_interval,
+ set_update_interval);
static struct attribute *adm1031_attributes[] = {
&sensor_dev_attr_fan1_input.dev_attr.attr,
@@ -830,7 +833,7 @@
&sensor_dev_attr_auto_fan1_min_pwm.dev_attr.attr,
- &dev_attr_update_rate.attr,
+ &dev_attr_update_interval.attr,
&dev_attr_alarms.attr,
NULL
@@ -981,7 +984,8 @@
mask = ADM1031_UPDATE_RATE_MASK;
read_val = adm1031_read_value(client, ADM1031_REG_FAN_FILTER);
i = (read_val & mask) >> ADM1031_UPDATE_RATE_SHIFT;
- data->update_rate = update_rates[i];
+ /* Save it as update interval */
+ data->update_interval = update_intervals[i];
}
static struct adm1031_data *adm1031_update_device(struct device *dev)
@@ -993,7 +997,8 @@
mutex_lock(&data->update_lock);
- next_update = data->last_updated + msecs_to_jiffies(data->update_rate);
+ next_update = data->last_updated
+ + msecs_to_jiffies(data->update_interval);
if (time_after(jiffies, next_update) || !data->valid) {
dev_dbg(&client->dev, "Starting adm1031 update\n");
diff --git a/drivers/hwmon/ads7871.c b/drivers/hwmon/ads7871.c
index b300a20..5231934 100644
--- a/drivers/hwmon/ads7871.c
+++ b/drivers/hwmon/ads7871.c
@@ -160,30 +160,12 @@
static int __devinit ads7871_probe(struct spi_device *spi)
{
- int status, ret, err = 0;
+ int ret, err;
uint8_t val;
struct ads7871_data *pdata;
dev_dbg(&spi->dev, "probe\n");
- pdata = kzalloc(sizeof(struct ads7871_data), GFP_KERNEL);
- if (!pdata) {
- err = -ENOMEM;
- goto exit;
- }
-
- status = sysfs_create_group(&spi->dev.kobj, &ads7871_group);
- if (status < 0)
- goto error_free;
-
- pdata->hwmon_dev = hwmon_device_register(&spi->dev);
- if (IS_ERR(pdata->hwmon_dev)) {
- err = PTR_ERR(pdata->hwmon_dev);
- goto error_remove;
- }
-
- spi_set_drvdata(spi, pdata);
-
/* Configure the SPI bus */
spi->mode = (SPI_MODE_0);
spi->bits_per_word = 8;
@@ -201,6 +183,24 @@
we need to make sure we really have a chip*/
if (val != ret) {
err = -ENODEV;
+ goto exit;
+ }
+
+ pdata = kzalloc(sizeof(struct ads7871_data), GFP_KERNEL);
+ if (!pdata) {
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ err = sysfs_create_group(&spi->dev.kobj, &ads7871_group);
+ if (err < 0)
+ goto error_free;
+
+ spi_set_drvdata(spi, pdata);
+
+ pdata->hwmon_dev = hwmon_device_register(&spi->dev);
+ if (IS_ERR(pdata->hwmon_dev)) {
+ err = PTR_ERR(pdata->hwmon_dev);
goto error_remove;
}
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index c070c97..a23b17a 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -36,6 +36,7 @@
#include <linux/pci.h>
#include <asm/msr.h>
#include <asm/processor.h>
+#include <asm/smp.h>
#define DRVNAME "coretemp"
@@ -423,9 +424,18 @@
int err;
struct platform_device *pdev;
struct pdev_entry *pdev_entry;
-#ifdef CONFIG_SMP
struct cpuinfo_x86 *c = &cpu_data(cpu);
-#endif
+
+ /*
+ * CPUID.06H.EAX[0] indicates whether the CPU has thermal
+ * sensors. We check this bit only, all the early CPUs
+ * without thermal sensors will be filtered out.
+ */
+ if (!cpu_has(c, X86_FEATURE_DTS)) {
+ printk(KERN_INFO DRVNAME ": CPU (model=0x%x)"
+ " has no thermal sensor.\n", c->x86_model);
+ return 0;
+ }
mutex_lock(&pdev_list_mutex);
@@ -482,14 +492,22 @@
static void coretemp_device_remove(unsigned int cpu)
{
- struct pdev_entry *p, *n;
+ struct pdev_entry *p;
+ unsigned int i;
+
mutex_lock(&pdev_list_mutex);
- list_for_each_entry_safe(p, n, &pdev_list, list) {
- if (p->cpu == cpu) {
- platform_device_unregister(p->pdev);
- list_del(&p->list);
- kfree(p);
- }
+ list_for_each_entry(p, &pdev_list, list) {
+ if (p->cpu != cpu)
+ continue;
+
+ platform_device_unregister(p->pdev);
+ list_del(&p->list);
+ mutex_unlock(&pdev_list_mutex);
+ kfree(p);
+ for_each_cpu(i, cpu_sibling_mask(cpu))
+ if (i != cpu && !coretemp_device_add(i))
+ break;
+ return;
}
mutex_unlock(&pdev_list_mutex);
}
@@ -518,7 +536,6 @@
static int __init coretemp_init(void)
{
int i, err = -ENODEV;
- struct pdev_entry *p, *n;
/* quick check if we run Intel */
if (cpu_data(0).x86_vendor != X86_VENDOR_INTEL)
@@ -528,30 +545,21 @@
if (err)
goto exit;
- for_each_online_cpu(i) {
- struct cpuinfo_x86 *c = &cpu_data(i);
- /*
- * CPUID.06H.EAX[0] indicates whether the CPU has thermal
- * sensors. We check this bit only, all the early CPUs
- * without thermal sensors will be filtered out.
- */
- if (c->cpuid_level >= 6 && (cpuid_eax(0x06) & 0x01))
- coretemp_device_add(i);
- else {
- printk(KERN_INFO DRVNAME ": CPU (model=0x%x)"
- " has no thermal sensor.\n", c->x86_model);
- }
- }
+ for_each_online_cpu(i)
+ coretemp_device_add(i);
+
+#ifndef CONFIG_HOTPLUG_CPU
if (list_empty(&pdev_list)) {
err = -ENODEV;
goto exit_driver_unreg;
}
+#endif
register_hotcpu_notifier(&coretemp_cpu_notifier);
return 0;
-exit_driver_unreg:
#ifndef CONFIG_HOTPLUG_CPU
+exit_driver_unreg:
platform_driver_unregister(&coretemp_driver);
#endif
exit:
diff --git a/drivers/hwmon/emc1403.c b/drivers/hwmon/emc1403.c
index 5b58b20..8dee3f3 100644
--- a/drivers/hwmon/emc1403.c
+++ b/drivers/hwmon/emc1403.c
@@ -308,7 +308,6 @@
res = sysfs_create_group(&client->dev.kobj, &m_thermal_gr);
if (res) {
dev_warn(&client->dev, "create group failed\n");
- hwmon_device_unregister(data->hwmon_dev);
goto thermal_error1;
}
data->hwmon_dev = hwmon_device_register(&client->dev);
diff --git a/drivers/hwmon/f71882fg.c b/drivers/hwmon/f71882fg.c
index 537841e..75afb3b 100644
--- a/drivers/hwmon/f71882fg.c
+++ b/drivers/hwmon/f71882fg.c
@@ -111,7 +111,7 @@
/* Super-I/O Function prototypes */
static inline int superio_inb(int base, int reg);
static inline int superio_inw(int base, int reg);
-static inline void superio_enter(int base);
+static inline int superio_enter(int base);
static inline void superio_select(int base, int ld);
static inline void superio_exit(int base);
@@ -861,11 +861,20 @@
return val;
}
-static inline void superio_enter(int base)
+static inline int superio_enter(int base)
{
+ /* Don't step on other drivers' I/O space by accident */
+ if (!request_muxed_region(base, 2, DRVNAME)) {
+ printk(KERN_ERR DRVNAME ": I/O address 0x%04x already in use\n",
+ base);
+ return -EBUSY;
+ }
+
/* according to the datasheet the key must be send twice! */
outb(SIO_UNLOCK_KEY, base);
outb(SIO_UNLOCK_KEY, base);
+
+ return 0;
}
static inline void superio_select(int base, int ld)
@@ -877,6 +886,7 @@
static inline void superio_exit(int base)
{
outb(SIO_LOCK_KEY, base);
+ release_region(base, 2);
}
static inline int fan_from_reg(u16 reg)
@@ -2175,21 +2185,15 @@
static int __init f71882fg_find(int sioaddr, unsigned short *address,
struct f71882fg_sio_data *sio_data)
{
- int err = -ENODEV;
u16 devid;
-
- /* Don't step on other drivers' I/O space by accident */
- if (!request_region(sioaddr, 2, DRVNAME)) {
- printk(KERN_ERR DRVNAME ": I/O address 0x%04x already in use\n",
- (int)sioaddr);
- return -EBUSY;
- }
-
- superio_enter(sioaddr);
+ int err = superio_enter(sioaddr);
+ if (err)
+ return err;
devid = superio_inw(sioaddr, SIO_REG_MANID);
if (devid != SIO_FINTEK_ID) {
pr_debug(DRVNAME ": Not a Fintek device\n");
+ err = -ENODEV;
goto exit;
}
@@ -2213,6 +2217,7 @@
default:
printk(KERN_INFO DRVNAME ": Unsupported Fintek device: %04x\n",
(unsigned int)devid);
+ err = -ENODEV;
goto exit;
}
@@ -2223,12 +2228,14 @@
if (!(superio_inb(sioaddr, SIO_REG_ENABLE) & 0x01)) {
printk(KERN_WARNING DRVNAME ": Device not activated\n");
+ err = -ENODEV;
goto exit;
}
*address = superio_inw(sioaddr, SIO_REG_ADDR);
if (*address == 0) {
printk(KERN_WARNING DRVNAME ": Base address not set\n");
+ err = -ENODEV;
goto exit;
}
*address &= ~(REGION_LENGTH - 1); /* Ignore 3 LSB */
@@ -2239,7 +2246,6 @@
(int)superio_inb(sioaddr, SIO_REG_DEVREV));
exit:
superio_exit(sioaddr);
- release_region(sioaddr, 2);
return err;
}
diff --git a/drivers/hwmon/f75375s.c b/drivers/hwmon/f75375s.c
index 0f58ecc..9638d58 100644
--- a/drivers/hwmon/f75375s.c
+++ b/drivers/hwmon/f75375s.c
@@ -79,7 +79,7 @@
#define F75375_REG_PWM2_DROP_DUTY 0x6C
#define FAN_CTRL_LINEAR(nr) (4 + nr)
-#define FAN_CTRL_MODE(nr) (5 + ((nr) * 2))
+#define FAN_CTRL_MODE(nr) (4 + ((nr) * 2))
/*
* Data structures and manipulation thereof
@@ -298,7 +298,7 @@
return -EINVAL;
fanmode = f75375_read8(client, F75375_REG_FAN_TIMER);
- fanmode = ~(3 << FAN_CTRL_MODE(nr));
+ fanmode &= ~(3 << FAN_CTRL_MODE(nr));
switch (val) {
case 0: /* Full speed */
@@ -350,7 +350,7 @@
mutex_lock(&data->update_lock);
conf = f75375_read8(client, F75375_REG_CONFIG1);
- conf = ~(1 << FAN_CTRL_LINEAR(nr));
+ conf &= ~(1 << FAN_CTRL_LINEAR(nr));
if (val == 0)
conf |= (1 << FAN_CTRL_LINEAR(nr)) ;
diff --git a/drivers/hwmon/hp_accel.c b/drivers/hwmon/hp_accel.c
index 7580f55..36e9575 100644
--- a/drivers/hwmon/hp_accel.c
+++ b/drivers/hwmon/hp_accel.c
@@ -221,6 +221,8 @@
AXIS_DMI_MATCH("HPB442x", "HP ProBook 442", xy_rotated_left),
AXIS_DMI_MATCH("HPB452x", "HP ProBook 452", y_inverted),
AXIS_DMI_MATCH("HPB522x", "HP ProBook 522", xy_swap),
+ AXIS_DMI_MATCH("HPB532x", "HP ProBook 532", y_inverted),
+ AXIS_DMI_MATCH("Mini5102", "HP Mini 5102", xy_rotated_left_usd),
{ NULL, }
/* Laptop models without axis info (yet):
* "NC6910" "HP Compaq 6910"
diff --git a/drivers/hwmon/k8temp.c b/drivers/hwmon/k8temp.c
index b9bb3e0..39ead2a 100644
--- a/drivers/hwmon/k8temp.c
+++ b/drivers/hwmon/k8temp.c
@@ -143,6 +143,37 @@
MODULE_DEVICE_TABLE(pci, k8temp_ids);
+static int __devinit is_rev_g_desktop(u8 model)
+{
+ u32 brandidx;
+
+ if (model < 0x69)
+ return 0;
+
+ if (model == 0xc1 || model == 0x6c || model == 0x7c)
+ return 0;
+
+ /*
+ * Differentiate between AM2 and ASB1.
+ * See "Constructing the processor Name String" in "Revision
+ * Guide for AMD NPT Family 0Fh Processors" (33610).
+ */
+ brandidx = cpuid_ebx(0x80000001);
+ brandidx = (brandidx >> 9) & 0x1f;
+
+ /* Single core */
+ if ((model == 0x6f || model == 0x7f) &&
+ (brandidx == 0x7 || brandidx == 0x9 || brandidx == 0xc))
+ return 0;
+
+ /* Dual core */
+ if (model == 0x6b &&
+ (brandidx == 0xb || brandidx == 0xc))
+ return 0;
+
+ return 1;
+}
+
static int __devinit k8temp_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
@@ -179,9 +210,7 @@
"wrong - check erratum #141\n");
}
- if ((model >= 0x69) &&
- !(model == 0xc1 || model == 0x6c || model == 0x7c ||
- model == 0x6b || model == 0x6f || model == 0x7f)) {
+ if (is_rev_g_desktop(model)) {
/*
* RevG desktop CPUs (i.e. no socket S1G1 or
* ASB1 parts) need additional offset,
diff --git a/drivers/hwmon/lis3lv02d.c b/drivers/hwmon/lis3lv02d.c
index 6138f03..fc591ae 100644
--- a/drivers/hwmon/lis3lv02d.c
+++ b/drivers/hwmon/lis3lv02d.c
@@ -277,7 +277,7 @@
wake_up_interruptible(&lis3_dev.misc_wait);
kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
out:
- if (lis3_dev.whoami == WAI_8B && lis3_dev.idev &&
+ if (lis3_dev.pdata && lis3_dev.whoami == WAI_8B && lis3_dev.idev &&
lis3_dev.idev->input->users)
return IRQ_WAKE_THREAD;
return IRQ_HANDLED;
@@ -718,7 +718,7 @@
* io-apic is not configurable (and generates a warning) but I keep it
* in case of support for other hardware.
*/
- if (dev->whoami == WAI_8B)
+ if (dev->pdata && dev->whoami == WAI_8B)
thread_fn = lis302dl_interrupt_thread1_8b;
else
thread_fn = NULL;
diff --git a/drivers/hwmon/lis3lv02d_i2c.c b/drivers/hwmon/lis3lv02d_i2c.c
index dc1f540..8e5933b 100644
--- a/drivers/hwmon/lis3lv02d_i2c.c
+++ b/drivers/hwmon/lis3lv02d_i2c.c
@@ -121,7 +121,7 @@
{
struct lis3lv02d *lis3 = i2c_get_clientdata(client);
- if (!lis3->pdata->wakeup_flags)
+ if (!lis3->pdata || !lis3->pdata->wakeup_flags)
lis3lv02d_poweroff(lis3);
return 0;
}
@@ -130,7 +130,7 @@
{
struct lis3lv02d *lis3 = i2c_get_clientdata(client);
- if (!lis3->pdata->wakeup_flags)
+ if (!lis3->pdata || !lis3->pdata->wakeup_flags)
lis3lv02d_poweron(lis3);
return 0;
}
diff --git a/drivers/hwmon/lis3lv02d_spi.c b/drivers/hwmon/lis3lv02d_spi.c
index 82b1680..b9be5e3 100644
--- a/drivers/hwmon/lis3lv02d_spi.c
+++ b/drivers/hwmon/lis3lv02d_spi.c
@@ -92,7 +92,7 @@
{
struct lis3lv02d *lis3 = spi_get_drvdata(spi);
- if (!lis3->pdata->wakeup_flags)
+ if (!lis3->pdata || !lis3->pdata->wakeup_flags)
lis3lv02d_poweroff(&lis3_dev);
return 0;
@@ -102,7 +102,7 @@
{
struct lis3lv02d *lis3 = spi_get_drvdata(spi);
- if (!lis3->pdata->wakeup_flags)
+ if (!lis3->pdata || !lis3->pdata->wakeup_flags)
lis3lv02d_poweron(lis3);
return 0;
diff --git a/drivers/hwmon/lm95241.c b/drivers/hwmon/lm95241.c
index 94741d4..464340f 100644
--- a/drivers/hwmon/lm95241.c
+++ b/drivers/hwmon/lm95241.c
@@ -91,7 +91,7 @@
struct lm95241_data {
struct device *hwmon_dev;
struct mutex update_lock;
- unsigned long last_updated, rate; /* in jiffies */
+ unsigned long last_updated, interval; /* in jiffies */
char valid; /* zero until following fields are valid */
/* registers values */
u8 local_h, local_l; /* local */
@@ -114,23 +114,23 @@
show_temp(remote1);
show_temp(remote2);
-static ssize_t show_rate(struct device *dev, struct device_attribute *attr,
+static ssize_t show_interval(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct lm95241_data *data = lm95241_update_device(dev);
- snprintf(buf, PAGE_SIZE - 1, "%lu\n", 1000 * data->rate / HZ);
+ snprintf(buf, PAGE_SIZE - 1, "%lu\n", 1000 * data->interval / HZ);
return strlen(buf);
}
-static ssize_t set_rate(struct device *dev, struct device_attribute *attr,
+static ssize_t set_interval(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct lm95241_data *data = i2c_get_clientdata(client);
- strict_strtol(buf, 10, &data->rate);
- data->rate = data->rate * HZ / 1000;
+ strict_strtol(buf, 10, &data->interval);
+ data->interval = data->interval * HZ / 1000;
return count;
}
@@ -286,7 +286,8 @@
static DEVICE_ATTR(temp3_min, S_IWUSR | S_IRUGO, show_min2, set_min2);
static DEVICE_ATTR(temp2_max, S_IWUSR | S_IRUGO, show_max1, set_max1);
static DEVICE_ATTR(temp3_max, S_IWUSR | S_IRUGO, show_max2, set_max2);
-static DEVICE_ATTR(rate, S_IWUSR | S_IRUGO, show_rate, set_rate);
+static DEVICE_ATTR(update_interval, S_IWUSR | S_IRUGO, show_interval,
+ set_interval);
static struct attribute *lm95241_attributes[] = {
&dev_attr_temp1_input.attr,
@@ -298,7 +299,7 @@
&dev_attr_temp3_min.attr,
&dev_attr_temp2_max.attr,
&dev_attr_temp3_max.attr,
- &dev_attr_rate.attr,
+ &dev_attr_update_interval.attr,
NULL
};
@@ -376,7 +377,7 @@
{
struct lm95241_data *data = i2c_get_clientdata(client);
- data->rate = HZ; /* 1 sec default */
+ data->interval = HZ; /* 1 sec default */
data->valid = 0;
data->config = CFG_CR0076;
data->model = 0;
@@ -410,7 +411,7 @@
mutex_lock(&data->update_lock);
- if (time_after(jiffies, data->last_updated + data->rate) ||
+ if (time_after(jiffies, data->last_updated + data->interval) ||
!data->valid) {
dev_dbg(&client->dev, "Updating lm95241 data.\n");
data->local_h =
diff --git a/drivers/hwmon/pkgtemp.c b/drivers/hwmon/pkgtemp.c
index 74157fc..f119039 100644
--- a/drivers/hwmon/pkgtemp.c
+++ b/drivers/hwmon/pkgtemp.c
@@ -33,7 +33,6 @@
#include <linux/list.h>
#include <linux/platform_device.h>
#include <linux/cpu.h>
-#include <linux/pci.h>
#include <asm/msr.h>
#include <asm/processor.h>
@@ -224,7 +223,7 @@
err = sysfs_create_group(&pdev->dev.kobj, &pkgtemp_group);
if (err)
- goto exit_free;
+ goto exit_dev;
data->hwmon_dev = hwmon_device_register(&pdev->dev);
if (IS_ERR(data->hwmon_dev)) {
@@ -238,6 +237,8 @@
exit_class:
sysfs_remove_group(&pdev->dev.kobj, &pkgtemp_group);
+exit_dev:
+ device_remove_file(&pdev->dev, &sensor_dev_attr_temp1_max.dev_attr);
exit_free:
kfree(data);
exit:
@@ -250,6 +251,7 @@
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&pdev->dev.kobj, &pkgtemp_group);
+ device_remove_file(&pdev->dev, &sensor_dev_attr_temp1_max.dev_attr);
platform_set_drvdata(pdev, NULL);
kfree(data);
return 0;
@@ -281,9 +283,10 @@
int err;
struct platform_device *pdev;
struct pdev_entry *pdev_entry;
-#ifdef CONFIG_SMP
struct cpuinfo_x86 *c = &cpu_data(cpu);
-#endif
+
+ if (!cpu_has(c, X86_FEATURE_PTS))
+ return 0;
mutex_lock(&pdev_list_mutex);
@@ -339,17 +342,18 @@
#ifdef CONFIG_HOTPLUG_CPU
static void pkgtemp_device_remove(unsigned int cpu)
{
- struct pdev_entry *p, *n;
+ struct pdev_entry *p;
unsigned int i;
int err;
mutex_lock(&pdev_list_mutex);
- list_for_each_entry_safe(p, n, &pdev_list, list) {
+ list_for_each_entry(p, &pdev_list, list) {
if (p->cpu != cpu)
continue;
platform_device_unregister(p->pdev);
list_del(&p->list);
+ mutex_unlock(&pdev_list_mutex);
kfree(p);
for_each_cpu(i, cpu_core_mask(cpu)) {
if (i != cpu) {
@@ -358,7 +362,7 @@
break;
}
}
- break;
+ return;
}
mutex_unlock(&pdev_list_mutex);
}
@@ -399,11 +403,6 @@
goto exit;
for_each_online_cpu(i) {
- struct cpuinfo_x86 *c = &cpu_data(i);
-
- if (!cpu_has(c, X86_FEATURE_PTS))
- continue;
-
err = pkgtemp_device_add(i);
if (err)
goto exit_devices_unreg;
diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c
index e96e69d..072c580 100644
--- a/drivers/hwmon/w83627ehf.c
+++ b/drivers/hwmon/w83627ehf.c
@@ -127,6 +127,7 @@
static inline void
superio_exit(int ioreg)
{
+ outb(0xaa, ioreg);
outb(0x02, ioreg);
outb(0x02, ioreg + 1);
}
diff --git a/drivers/i2c/busses/i2c-cpm.c b/drivers/i2c/busses/i2c-cpm.c
index f7bd261..f2de3be 100644
--- a/drivers/i2c/busses/i2c-cpm.c
+++ b/drivers/i2c/busses/i2c-cpm.c
@@ -677,6 +677,11 @@
dev_dbg(&ofdev->dev, "hw routines for %s registered.\n",
cpm->adap.name);
+ /*
+ * register OF I2C devices
+ */
+ of_i2c_register_devices(&cpm->adap);
+
return 0;
out_shut:
cpm_i2c_shutdown(cpm);
diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c
index 2222c87..5795c83 100644
--- a/drivers/i2c/busses/i2c-davinci.c
+++ b/drivers/i2c/busses/i2c-davinci.c
@@ -331,21 +331,16 @@
INIT_COMPLETION(dev->cmd_complete);
dev->cmd_err = 0;
- /* Take I2C out of reset, configure it as master and set the
- * start bit */
- flag = DAVINCI_I2C_MDR_IRS | DAVINCI_I2C_MDR_MST | DAVINCI_I2C_MDR_STT;
+ /* Take I2C out of reset and configure it as master */
+ flag = DAVINCI_I2C_MDR_IRS | DAVINCI_I2C_MDR_MST;
/* if the slave address is ten bit address, enable XA bit */
if (msg->flags & I2C_M_TEN)
flag |= DAVINCI_I2C_MDR_XA;
if (!(msg->flags & I2C_M_RD))
flag |= DAVINCI_I2C_MDR_TRX;
- if (stop)
- flag |= DAVINCI_I2C_MDR_STP;
- if (msg->len == 0) {
+ if (msg->len == 0)
flag |= DAVINCI_I2C_MDR_RM;
- flag &= ~DAVINCI_I2C_MDR_STP;
- }
/* Enable receive or transmit interrupts */
w = davinci_i2c_read_reg(dev, DAVINCI_I2C_IMR_REG);
@@ -357,7 +352,11 @@
dev->terminate = 0;
- /* write the data into mode register */
+ /*
+ * Write mode register first as needed for correct behaviour
+ * on OMAP-L138, but don't set STT yet to avoid a race with XRDY
+ * occuring before we have loaded DXR
+ */
davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, flag);
/*
@@ -365,12 +364,19 @@
* because transmit-data-ready interrupt can come before
* NACK-interrupt during sending of previous message and
* ICDXR may have wrong data
+ * It also saves us one interrupt, slightly faster
*/
if ((!(msg->flags & I2C_M_RD)) && dev->buf_len) {
davinci_i2c_write_reg(dev, DAVINCI_I2C_DXR_REG, *dev->buf++);
dev->buf_len--;
}
+ /* Set STT to begin transmit now DXR is loaded */
+ flag |= DAVINCI_I2C_MDR_STT;
+ if (stop && msg->len != 0)
+ flag |= DAVINCI_I2C_MDR_STP;
+ davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, flag);
+
r = wait_for_completion_interruptible_timeout(&dev->cmd_complete,
dev->adapter.timeout);
if (r == 0) {
diff --git a/drivers/i2c/busses/i2c-ibm_iic.c b/drivers/i2c/busses/i2c-ibm_iic.c
index 43ca32f..89eedf4 100644
--- a/drivers/i2c/busses/i2c-ibm_iic.c
+++ b/drivers/i2c/busses/i2c-ibm_iic.c
@@ -761,6 +761,9 @@
dev_info(&ofdev->dev, "using %s mode\n",
dev->fast_mode ? "fast (400 kHz)" : "standard (100 kHz)");
+ /* Now register all the child nodes */
+ of_i2c_register_devices(adap);
+
return 0;
error_cleanup:
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index d1ff940..4c2a62b 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -159,15 +159,9 @@
static int i2c_imx_trx_complete(struct imx_i2c_struct *i2c_imx)
{
- int result;
+ wait_event_timeout(i2c_imx->queue, i2c_imx->i2csr & I2SR_IIF, HZ / 10);
- result = wait_event_interruptible_timeout(i2c_imx->queue,
- i2c_imx->i2csr & I2SR_IIF, HZ / 10);
-
- if (unlikely(result < 0)) {
- dev_dbg(&i2c_imx->adapter.dev, "<%s> result < 0\n", __func__);
- return result;
- } else if (unlikely(!(i2c_imx->i2csr & I2SR_IIF))) {
+ if (unlikely(!(i2c_imx->i2csr & I2SR_IIF))) {
dev_dbg(&i2c_imx->adapter.dev, "<%s> Timeout\n", __func__);
return -ETIMEDOUT;
}
@@ -295,7 +289,7 @@
i2c_imx->i2csr = temp;
temp &= ~I2SR_IIF;
writeb(temp, i2c_imx->base + IMX_I2C_I2SR);
- wake_up_interruptible(&i2c_imx->queue);
+ wake_up(&i2c_imx->queue);
return IRQ_HANDLED;
}
diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c
index a1c419a..b74e6dc 100644
--- a/drivers/i2c/busses/i2c-mpc.c
+++ b/drivers/i2c/busses/i2c-mpc.c
@@ -632,6 +632,7 @@
dev_err(i2c->dev, "failed to add adapter\n");
goto fail_add;
}
+ of_i2c_register_devices(&i2c->adap);
return result;
diff --git a/drivers/i2c/busses/i2c-octeon.c b/drivers/i2c/busses/i2c-octeon.c
index 0e9f85d..56dbe54 100644
--- a/drivers/i2c/busses/i2c-octeon.c
+++ b/drivers/i2c/busses/i2c-octeon.c
@@ -218,7 +218,7 @@
return result;
} else if (result == 0) {
dev_dbg(i2c->dev, "%s: timeout\n", __func__);
- result = -ETIMEDOUT;
+ return -ETIMEDOUT;
}
return 0;
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index 7674efb..b33c785 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -680,6 +680,8 @@
if (r == 0)
r = num;
+
+ omap_i2c_wait_for_bb(dev);
out:
omap_i2c_idle(dev);
return r;
diff --git a/drivers/i2c/busses/i2c-pca-isa.c b/drivers/i2c/busses/i2c-pca-isa.c
index bbd7760..29933f8 100644
--- a/drivers/i2c/busses/i2c-pca-isa.c
+++ b/drivers/i2c/busses/i2c-pca-isa.c
@@ -71,8 +71,8 @@
static int pca_isa_waitforcompletion(void *pd)
{
- long ret = ~0;
unsigned long timeout;
+ long ret;
if (irq > -1) {
ret = wait_event_timeout(pca_wait,
@@ -81,11 +81,15 @@
} else {
/* Do polling */
timeout = jiffies + pca_isa_ops.timeout;
- while (((pca_isa_readbyte(pd, I2C_PCA_CON)
- & I2C_PCA_CON_SI) == 0)
- && (ret = time_before(jiffies, timeout)))
+ do {
+ ret = time_before(jiffies, timeout);
+ if (pca_isa_readbyte(pd, I2C_PCA_CON)
+ & I2C_PCA_CON_SI)
+ break;
udelay(100);
+ } while (ret);
}
+
return ret > 0;
}
diff --git a/drivers/i2c/busses/i2c-pca-platform.c b/drivers/i2c/busses/i2c-pca-platform.c
index ef5c784..5f6d7f8 100644
--- a/drivers/i2c/busses/i2c-pca-platform.c
+++ b/drivers/i2c/busses/i2c-pca-platform.c
@@ -80,8 +80,8 @@
static int i2c_pca_pf_waitforcompletion(void *pd)
{
struct i2c_pca_pf_data *i2c = pd;
- long ret = ~0;
unsigned long timeout;
+ long ret;
if (i2c->irq) {
ret = wait_event_timeout(i2c->wait,
@@ -90,10 +90,13 @@
} else {
/* Do polling */
timeout = jiffies + i2c->adap.timeout;
- while (((i2c->algo_data.read_byte(i2c, I2C_PCA_CON)
- & I2C_PCA_CON_SI) == 0)
- && (ret = time_before(jiffies, timeout)))
+ do {
+ ret = time_before(jiffies, timeout);
+ if (i2c->algo_data.read_byte(i2c, I2C_PCA_CON)
+ & I2C_PCA_CON_SI)
+ break;
udelay(100);
+ } while (ret);
}
return ret > 0;
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index 72902e0..bf831bf 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -662,8 +662,8 @@
unsigned long sda_delay;
if (pdata->sda_delay) {
- sda_delay = (freq / 1000) * pdata->sda_delay;
- sda_delay /= 1000000;
+ sda_delay = clkin * pdata->sda_delay;
+ sda_delay = DIV_ROUND_UP(sda_delay, 1000000);
sda_delay = DIV_ROUND_UP(sda_delay, 5);
if (sda_delay > 3)
sda_delay = 3;
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 6649176..bea4c50 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -32,7 +32,6 @@
#include <linux/init.h>
#include <linux/idr.h>
#include <linux/mutex.h>
-#include <linux/of_i2c.h>
#include <linux/of_device.h>
#include <linux/completion.h>
#include <linux/hardirq.h>
@@ -197,11 +196,12 @@
{
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
- if (pm_runtime_suspended(dev))
- return 0;
-
- if (pm)
- return pm->suspend ? pm->suspend(dev) : 0;
+ if (pm) {
+ if (pm_runtime_suspended(dev))
+ return 0;
+ else
+ return pm->suspend ? pm->suspend(dev) : 0;
+ }
return i2c_legacy_suspend(dev, PMSG_SUSPEND);
}
@@ -216,12 +216,6 @@
else
ret = i2c_legacy_resume(dev);
- if (!ret) {
- pm_runtime_disable(dev);
- pm_runtime_set_active(dev);
- pm_runtime_enable(dev);
- }
-
return ret;
}
@@ -229,11 +223,12 @@
{
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
- if (pm_runtime_suspended(dev))
- return 0;
-
- if (pm)
- return pm->freeze ? pm->freeze(dev) : 0;
+ if (pm) {
+ if (pm_runtime_suspended(dev))
+ return 0;
+ else
+ return pm->freeze ? pm->freeze(dev) : 0;
+ }
return i2c_legacy_suspend(dev, PMSG_FREEZE);
}
@@ -242,11 +237,12 @@
{
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
- if (pm_runtime_suspended(dev))
- return 0;
-
- if (pm)
- return pm->thaw ? pm->thaw(dev) : 0;
+ if (pm) {
+ if (pm_runtime_suspended(dev))
+ return 0;
+ else
+ return pm->thaw ? pm->thaw(dev) : 0;
+ }
return i2c_legacy_resume(dev);
}
@@ -255,11 +251,12 @@
{
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
- if (pm_runtime_suspended(dev))
- return 0;
-
- if (pm)
- return pm->poweroff ? pm->poweroff(dev) : 0;
+ if (pm) {
+ if (pm_runtime_suspended(dev))
+ return 0;
+ else
+ return pm->poweroff ? pm->poweroff(dev) : 0;
+ }
return i2c_legacy_suspend(dev, PMSG_HIBERNATE);
}
@@ -876,9 +873,6 @@
if (adap->nr < __i2c_first_dynamic_bus_num)
i2c_scan_static_board_info(adap);
- /* Register devices from the device tree */
- of_i2c_register_devices(adap);
-
/* Notify drivers */
mutex_lock(&core_lock);
bus_for_each_drv(&i2c_bus_type, NULL, adap, __process_new_adapter);
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index 4c3d1bf..068cef0 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -1448,19 +1448,13 @@
if (hwif == NULL)
continue;
- if (hwif->present)
- hwif_register_devices(hwif);
- }
-
- ide_host_for_each_port(i, hwif, host) {
- if (hwif == NULL)
- continue;
-
ide_sysfs_register_port(hwif);
ide_proc_register_port(hwif);
- if (hwif->present)
+ if (hwif->present) {
ide_proc_port_register_devices(hwif);
+ hwif_register_devices(hwif);
+ }
}
return j ? 0 : -1;
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
old mode 100755
new mode 100644
index a10152b..c37ef64
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -83,7 +83,7 @@
/* Reliable LAPIC Timer States, bit 1 for C1 etc. */
static unsigned int lapic_timer_reliable_states;
-static struct cpuidle_device *intel_idle_cpuidle_devices;
+static struct cpuidle_device __percpu *intel_idle_cpuidle_devices;
static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state);
static struct cpuidle_state *cpuidle_state_table;
@@ -108,7 +108,7 @@
.name = "NHM-C3",
.desc = "MWAIT 0x10",
.driver_data = (void *) 0x10,
- .flags = CPUIDLE_FLAG_TIME_VALID,
+ .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 20,
.power_usage = 500,
.target_residency = 80,
@@ -117,7 +117,7 @@
.name = "NHM-C6",
.desc = "MWAIT 0x20",
.driver_data = (void *) 0x20,
- .flags = CPUIDLE_FLAG_TIME_VALID,
+ .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 200,
.power_usage = 350,
.target_residency = 800,
@@ -149,7 +149,7 @@
.name = "ATM-C4",
.desc = "MWAIT 0x30",
.driver_data = (void *) 0x30,
- .flags = CPUIDLE_FLAG_TIME_VALID,
+ .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 100,
.power_usage = 250,
.target_residency = 400,
@@ -157,13 +157,13 @@
{ /* MWAIT C5 */ },
{ /* MWAIT C6 */
.name = "ATM-C6",
- .desc = "MWAIT 0x40",
- .driver_data = (void *) 0x40,
- .flags = CPUIDLE_FLAG_TIME_VALID,
- .exit_latency = 200,
+ .desc = "MWAIT 0x52",
+ .driver_data = (void *) 0x52,
+ .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 140,
.power_usage = 150,
- .target_residency = 800,
- .enter = NULL }, /* disabled */
+ .target_residency = 560,
+ .enter = &intel_idle },
};
/**
@@ -185,6 +185,16 @@
local_irq_disable();
+ /*
+ * If the state flag indicates that the TLB will be flushed or if this
+ * is the deepest c-state supported, do a voluntary leave mm to avoid
+ * costly and mostly unnecessary wakeups for flushing the user TLB's
+ * associated with the active mm.
+ */
+ if (state->flags & CPUIDLE_FLAG_TLB_FLUSHED ||
+ (&dev->states[dev->state_count - 1] == state))
+ leave_mm(cpu);
+
if (!(lapic_timer_reliable_states & (1 << (cstate))))
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
diff --git a/drivers/ieee1394/ohci1394.c b/drivers/ieee1394/ohci1394.c
index d0dc1db..5081502 100644
--- a/drivers/ieee1394/ohci1394.c
+++ b/drivers/ieee1394/ohci1394.c
@@ -1106,7 +1106,7 @@
if (recv->block_irq_interval * 4 > iso->buf_packets)
recv->block_irq_interval = iso->buf_packets / 4;
if (recv->block_irq_interval < 1)
- recv->block_irq_interval = 1;
+ recv->block_irq_interval = 1;
/* choose a buffer stride */
/* must be a power of 2, and <= PAGE_SIZE */
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.h b/drivers/infiniband/hw/cxgb3/cxio_hal.h
index 8f0caf7..78fbe9f 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.h
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.h
@@ -53,7 +53,7 @@
#define T3_MAX_PBL_SIZE 256
#define T3_MAX_RQ_SIZE 1024
#define T3_MAX_QP_DEPTH (T3_MAX_RQ_SIZE-1)
-#define T3_MAX_CQ_DEPTH 262144
+#define T3_MAX_CQ_DEPTH 65536
#define T3_MAX_NUM_STAG (1<<15)
#define T3_MAX_MR_SIZE 0x100000000ULL
#define T3_PAGESIZE_MASK 0xffff000 /* 4KB-128MB */
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index d88077a..13c8887 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -463,7 +463,8 @@
V_MSS_IDX(mtu_idx) |
V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx);
opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10);
- opt2 = V_FLAVORS_VALID(1) | V_CONG_CONTROL_FLAVOR(cong_flavor);
+ opt2 = F_RX_COALESCE_VALID | V_RX_COALESCE(0) | V_FLAVORS_VALID(1) |
+ V_CONG_CONTROL_FLAVOR(cong_flavor);
skb->priority = CPL_PRIORITY_SETUP;
set_arp_failure_handler(skb, act_open_req_arp_failure);
@@ -1280,7 +1281,8 @@
V_MSS_IDX(mtu_idx) |
V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx);
opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10);
- opt2 = V_FLAVORS_VALID(1) | V_CONG_CONTROL_FLAVOR(cong_flavor);
+ opt2 = F_RX_COALESCE_VALID | V_RX_COALESCE(0) | V_FLAVORS_VALID(1) |
+ V_CONG_CONTROL_FLAVOR(cong_flavor);
rpl = cplhdr(skb);
rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index 443cea5..61e0efd 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -502,7 +502,9 @@
static void nes_retrans_expired(struct nes_cm_node *cm_node)
{
struct iw_cm_id *cm_id = cm_node->cm_id;
- switch (cm_node->state) {
+ enum nes_cm_node_state state = cm_node->state;
+ cm_node->state = NES_CM_STATE_CLOSED;
+ switch (state) {
case NES_CM_STATE_SYN_RCVD:
case NES_CM_STATE_CLOSING:
rem_ref_cm_node(cm_node->cm_core, cm_node);
@@ -511,7 +513,6 @@
case NES_CM_STATE_FIN_WAIT1:
if (cm_node->cm_id)
cm_id->rem_ref(cm_id);
- cm_node->state = NES_CM_STATE_CLOSED;
send_reset(cm_node, NULL);
break;
default:
@@ -1439,9 +1440,6 @@
break;
case NES_CM_STATE_MPAREQ_RCVD:
passive_state = atomic_add_return(1, &cm_node->passive_state);
- if (passive_state == NES_SEND_RESET_EVENT)
- create_event(cm_node, NES_CM_EVENT_RESET);
- cm_node->state = NES_CM_STATE_CLOSED;
dev_kfree_skb_any(skb);
break;
case NES_CM_STATE_ESTABLISHED:
@@ -1456,6 +1454,7 @@
case NES_CM_STATE_CLOSED:
drop_packet(skb);
break;
+ case NES_CM_STATE_FIN_WAIT2:
case NES_CM_STATE_FIN_WAIT1:
case NES_CM_STATE_LAST_ACK:
cm_node->cm_id->rem_ref(cm_node->cm_id);
@@ -2777,6 +2776,12 @@
return -EINVAL;
}
+ passive_state = atomic_add_return(1, &cm_node->passive_state);
+ if (passive_state == NES_SEND_RESET_EVENT) {
+ rem_ref_cm_node(cm_node->cm_core, cm_node);
+ return -ECONNRESET;
+ }
+
/* associate the node with the QP */
nesqp->cm_node = (void *)cm_node;
cm_node->nesqp = nesqp;
@@ -2979,9 +2984,6 @@
printk(KERN_ERR "%s[%u] OFA CM event_handler returned, "
"ret=%d\n", __func__, __LINE__, ret);
- passive_state = atomic_add_return(1, &cm_node->passive_state);
- if (passive_state == NES_SEND_RESET_EVENT)
- create_event(cm_node, NES_CM_EVENT_RESET);
return 0;
}
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index f8233c8..1980a46 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -3468,6 +3468,19 @@
return; /* Ignore it, wait for close complete */
if (atomic_inc_return(&nesqp->close_timer_started) == 1) {
+ if ((tcp_state == NES_AEQE_TCP_STATE_CLOSE_WAIT) &&
+ (nesqp->ibqp_state == IB_QPS_RTS) &&
+ ((nesadapter->eeprom_version >> 16) != NES_A0)) {
+ spin_lock_irqsave(&nesqp->lock, flags);
+ nesqp->hw_iwarp_state = iwarp_state;
+ nesqp->hw_tcp_state = tcp_state;
+ nesqp->last_aeq = async_event_id;
+ next_iwarp_state = NES_CQP_QP_IWARP_STATE_CLOSING;
+ nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_CLOSING;
+ spin_unlock_irqrestore(&nesqp->lock, flags);
+ nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0, 0);
+ nes_cm_disconn(nesqp);
+ }
nesqp->cm_id->add_ref(nesqp->cm_id);
schedule_nes_timer(nesqp->cm_node, (struct sk_buff *)nesqp,
NES_TIMER_TYPE_CLOSE, 1, 0);
@@ -3477,7 +3490,6 @@
nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount),
async_event_id, nesqp->last_aeq, tcp_state);
}
-
break;
case NES_AEQE_AEID_LLP_CLOSE_COMPLETE:
if (nesqp->term_flags) {
diff --git a/drivers/infiniband/hw/nes/nes_hw.h b/drivers/infiniband/hw/nes/nes_hw.h
index aa9183d..1204c34 100644
--- a/drivers/infiniband/hw/nes/nes_hw.h
+++ b/drivers/infiniband/hw/nes/nes_hw.h
@@ -45,6 +45,7 @@
#define NES_PHY_TYPE_KR 9
#define NES_MULTICAST_PF_MAX 8
+#define NES_A0 3
enum pci_regs {
NES_INT_STAT = 0x0000,
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index 6dfdd49..10560c7 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -1446,14 +1446,14 @@
NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200));
u32temp |= NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE;
nes_write_indexed(nesdev,
- NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE + (nesdev->mac_index*0x200), u32temp);
+ NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200), u32temp);
nesdev->disable_tx_flow_control = 0;
} else if ((et_pauseparam->tx_pause == 0) && (nesdev->disable_tx_flow_control == 0)) {
u32temp = nes_read_indexed(nesdev,
NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200));
u32temp &= ~NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE;
nes_write_indexed(nesdev,
- NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE + (nesdev->mac_index*0x200), u32temp);
+ NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200), u32temp);
nesdev->disable_tx_flow_control = 1;
}
if ((et_pauseparam->rx_pause == 1) && (nesdev->disable_rx_flow_control == 1)) {
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index c908c5f..af9ee31 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -28,7 +28,7 @@
int minor;
struct input_handle handle;
wait_queue_head_t wait;
- struct evdev_client *grab;
+ struct evdev_client __rcu *grab;
struct list_head client_list;
spinlock_t client_lock; /* protects client_list */
struct mutex mutex;
@@ -669,6 +669,9 @@
if ((_IOC_NR(cmd) & ~ABS_MAX) == _IOC_NR(EVIOCGABS(0))) {
+ if (!dev->absinfo)
+ return -EINVAL;
+
t = _IOC_NR(cmd) & ABS_MAX;
abs = dev->absinfo[t];
@@ -680,10 +683,13 @@
}
}
- if (_IOC_DIR(cmd) == _IOC_READ) {
+ if (_IOC_DIR(cmd) == _IOC_WRITE) {
if ((_IOC_NR(cmd) & ~ABS_MAX) == _IOC_NR(EVIOCSABS(0))) {
+ if (!dev->absinfo)
+ return -EINVAL;
+
t = _IOC_NR(cmd) & ABS_MAX;
if (copy_from_user(&abs, p, min_t(size_t,
diff --git a/drivers/input/input.c b/drivers/input/input.c
index a9b025f..ab69820 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -1599,11 +1599,14 @@
* @dev: input device supporting MT events and finger tracking
* @num_slots: number of slots used by the device
*
- * This function allocates all necessary memory for MT slot handling
- * in the input device, and adds ABS_MT_SLOT to the device capabilities.
+ * This function allocates all necessary memory for MT slot handling in the
+ * input device, and adds ABS_MT_SLOT to the device capabilities. All slots
+ * are initially marked as unused iby setting ABS_MT_TRACKING_ID to -1.
*/
int input_mt_create_slots(struct input_dev *dev, unsigned int num_slots)
{
+ int i;
+
if (!num_slots)
return 0;
@@ -1614,6 +1617,10 @@
dev->mtsize = num_slots;
input_set_abs_params(dev, ABS_MT_SLOT, 0, num_slots - 1, 0, 0);
+ /* Mark slots as 'unused' */
+ for (i = 0; i < num_slots; i++)
+ dev->mt[i].abs[ABS_MT_TRACKING_ID - ABS_MT_FIRST] = -1;
+
return 0;
}
EXPORT_SYMBOL(input_mt_create_slots);
diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c
index d85bd8a..22239e9 100644
--- a/drivers/input/joydev.c
+++ b/drivers/input/joydev.c
@@ -483,6 +483,9 @@
memcpy(joydev->abspam, abspam, len);
+ for (i = 0; i < joydev->nabs; i++)
+ joydev->absmap[joydev->abspam[i]] = i;
+
out:
kfree(abspam);
return retval;
diff --git a/drivers/input/keyboard/hil_kbd.c b/drivers/input/keyboard/hil_kbd.c
index dcc86b9..19fa94a 100644
--- a/drivers/input/keyboard/hil_kbd.c
+++ b/drivers/input/keyboard/hil_kbd.c
@@ -232,13 +232,13 @@
if (absdev) {
val = lo + (hi << 8);
#ifdef TABLET_AUTOADJUST
- if (val < input_abs_min(dev, ABS_X + i))
+ if (val < input_abs_get_min(dev, ABS_X + i))
input_abs_set_min(dev, ABS_X + i, val);
- if (val > input_abs_max(dev, ABS_X + i))
+ if (val > input_abs_get_max(dev, ABS_X + i))
input_abs_set_max(dev, ABS_X + i, val);
#endif
if (i % 3)
- val = input_abs_max(dev, ABS_X + i) - val;
+ val = input_abs_get_max(dev, ABS_X + i) - val;
input_report_abs(dev, ABS_X + i, val);
} else {
val = (int) (((int8_t) lo) | ((int8_t) hi << 8));
@@ -388,11 +388,11 @@
#ifdef TABLET_AUTOADJUST
for (i = 0; i < ABS_MAX; i++) {
- int diff = input_abs_max(input_dev, ABS_X + i) / 10;
+ int diff = input_abs_get_max(input_dev, ABS_X + i) / 10;
input_abs_set_min(input_dev, ABS_X + i,
- input_abs_min(input_dev, ABS_X + i) + diff)
+ input_abs_get_min(input_dev, ABS_X + i) + diff);
input_abs_set_max(input_dev, ABS_X + i,
- input_abs_max(input_dev, ABS_X + i) - diff)
+ input_abs_get_max(input_dev, ABS_X + i) - diff);
}
#endif
diff --git a/drivers/input/keyboard/pxa27x_keypad.c b/drivers/input/keyboard/pxa27x_keypad.c
index 0e53b3b..f32404f 100644
--- a/drivers/input/keyboard/pxa27x_keypad.c
+++ b/drivers/input/keyboard/pxa27x_keypad.c
@@ -567,8 +567,6 @@
clk_put(keypad->clk);
input_unregister_device(keypad->input_dev);
- input_free_device(keypad->input_dev);
-
iounmap(keypad->mmio_base);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/input/misc/hp_sdc_rtc.c b/drivers/input/misc/hp_sdc_rtc.c
index c190664..7e2c12a 100644
--- a/drivers/input/misc/hp_sdc_rtc.c
+++ b/drivers/input/misc/hp_sdc_rtc.c
@@ -104,7 +104,7 @@
t.endidx = 91;
t.seq = tseq;
t.act.semaphore = &tsem;
- init_MUTEX_LOCKED(&tsem);
+ sema_init(&tsem, 0);
if (hp_sdc_enqueue_transaction(&t)) return -1;
@@ -698,7 +698,7 @@
return -ENODEV;
#endif
- init_MUTEX(&i8042tregs);
+ sema_init(&i8042tregs, 1);
if ((ret = hp_sdc_request_timer_irq(&hp_sdc_rtc_isr)))
return ret;
diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
index bb53fd33..3606985 100644
--- a/drivers/input/misc/uinput.c
+++ b/drivers/input/misc/uinput.c
@@ -404,6 +404,13 @@
retval = uinput_validate_absbits(dev);
if (retval < 0)
goto exit;
+ if (test_bit(ABS_MT_SLOT, dev->absbit)) {
+ int nslot = input_abs_get_max(dev, ABS_MT_SLOT) + 1;
+ input_mt_create_slots(dev, nslot);
+ input_set_events_per_packet(dev, 6 * nslot);
+ } else if (test_bit(ABS_MT_POSITION_X, dev->absbit)) {
+ input_set_events_per_packet(dev, 60);
+ }
}
udev->state = UIST_SETUP_COMPLETE;
@@ -811,6 +818,8 @@
.minor = UINPUT_MINOR,
.name = UINPUT_NAME,
};
+MODULE_ALIAS_MISCDEV(UINPUT_MINOR);
+MODULE_ALIAS("devname:" UINPUT_NAME);
static int __init uinput_init(void)
{
diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c
index ea67c49..b952317 100644
--- a/drivers/input/mouse/bcm5974.c
+++ b/drivers/input/mouse/bcm5974.c
@@ -337,10 +337,14 @@
const struct bcm5974_config *cfg,
const struct tp_finger *f)
{
- input_report_abs(input, ABS_MT_TOUCH_MAJOR, raw2int(f->force_major));
- input_report_abs(input, ABS_MT_TOUCH_MINOR, raw2int(f->force_minor));
- input_report_abs(input, ABS_MT_WIDTH_MAJOR, raw2int(f->size_major));
- input_report_abs(input, ABS_MT_WIDTH_MINOR, raw2int(f->size_minor));
+ input_report_abs(input, ABS_MT_TOUCH_MAJOR,
+ raw2int(f->force_major) << 1);
+ input_report_abs(input, ABS_MT_TOUCH_MINOR,
+ raw2int(f->force_minor) << 1);
+ input_report_abs(input, ABS_MT_WIDTH_MAJOR,
+ raw2int(f->size_major) << 1);
+ input_report_abs(input, ABS_MT_WIDTH_MINOR,
+ raw2int(f->size_minor) << 1);
input_report_abs(input, ABS_MT_ORIENTATION,
MAX_FINGER_ORIENTATION - raw2int(f->orientation));
input_report_abs(input, ABS_MT_POSITION_X, raw2int(f->abs_x));
diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
index 83c24cc..d528a2d 100644
--- a/drivers/input/mousedev.c
+++ b/drivers/input/mousedev.c
@@ -138,8 +138,8 @@
fx(0) = value;
if (mousedev->touch && mousedev->pkt_count >= 2) {
- size = input_abs_get_min(dev, ABS_X) -
- input_abs_get_max(dev, ABS_X);
+ size = input_abs_get_max(dev, ABS_X) -
+ input_abs_get_min(dev, ABS_X);
if (size == 0)
size = 256 * 2;
@@ -155,8 +155,8 @@
fy(0) = value;
if (mousedev->touch && mousedev->pkt_count >= 2) {
/* use X size for ABS_Y to keep the same scale */
- size = input_abs_get_min(dev, ABS_X) -
- input_abs_get_max(dev, ABS_X);
+ size = input_abs_get_max(dev, ABS_X) -
+ input_abs_get_min(dev, ABS_X);
if (size == 0)
size = 256 * 2;
diff --git a/drivers/input/serio/hil_mlc.c b/drivers/input/serio/hil_mlc.c
index c92f4ed..e5624d8 100644
--- a/drivers/input/serio/hil_mlc.c
+++ b/drivers/input/serio/hil_mlc.c
@@ -915,15 +915,15 @@
mlc->ostarted = 0;
rwlock_init(&mlc->lock);
- init_MUTEX(&mlc->osem);
+ sema_init(&mlc->osem, 1);
- init_MUTEX(&mlc->isem);
+ sema_init(&mlc->isem, 1);
mlc->icount = -1;
mlc->imatch = 0;
mlc->opercnt = 0;
- init_MUTEX_LOCKED(&(mlc->csem));
+ sema_init(&(mlc->csem), 0);
hil_mlc_clear_di_scratch(mlc);
hil_mlc_clear_di_map(mlc, 0);
diff --git a/drivers/input/serio/hp_sdc.c b/drivers/input/serio/hp_sdc.c
index bcc2d30..8c0b51c 100644
--- a/drivers/input/serio/hp_sdc.c
+++ b/drivers/input/serio/hp_sdc.c
@@ -905,7 +905,7 @@
ts_sync[1] = 0x0f;
ts_sync[2] = ts_sync[3] = ts_sync[4] = ts_sync[5] = 0;
t_sync.act.semaphore = &s_sync;
- init_MUTEX_LOCKED(&s_sync);
+ sema_init(&s_sync, 0);
hp_sdc_enqueue_transaction(&t_sync);
down(&s_sync); /* Wait for t_sync to complete */
@@ -1039,7 +1039,7 @@
return hp_sdc.dev_err;
}
- init_MUTEX_LOCKED(&tq_init_sem);
+ sema_init(&tq_init_sem, 0);
tq_init.actidx = 0;
tq_init.idx = 1;
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index 46e4ba0..f585131 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -1485,8 +1485,8 @@
static void __exit i8042_exit(void)
{
- platform_driver_unregister(&i8042_driver);
platform_device_unregister(i8042_platform_device);
+ platform_driver_unregister(&i8042_driver);
i8042_platform_exit();
panic_blink = NULL;
diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/input/tablet/wacom_sys.c
index 42ba369..b35876e 100644
--- a/drivers/input/tablet/wacom_sys.c
+++ b/drivers/input/tablet/wacom_sys.c
@@ -103,27 +103,26 @@
static int wacom_open(struct input_dev *dev)
{
struct wacom *wacom = input_get_drvdata(dev);
+ int retval = 0;
+
+ if (usb_autopm_get_interface(wacom->intf) < 0)
+ return -EIO;
mutex_lock(&wacom->lock);
- wacom->irq->dev = wacom->usbdev;
-
- if (usb_autopm_get_interface(wacom->intf) < 0) {
- mutex_unlock(&wacom->lock);
- return -EIO;
- }
-
if (usb_submit_urb(wacom->irq, GFP_KERNEL)) {
- usb_autopm_put_interface(wacom->intf);
- mutex_unlock(&wacom->lock);
- return -EIO;
+ retval = -EIO;
+ goto out;
}
wacom->open = true;
wacom->intf->needs_remote_wakeup = 1;
+out:
mutex_unlock(&wacom->lock);
- return 0;
+ if (retval)
+ usb_autopm_put_interface(wacom->intf);
+ return retval;
}
static void wacom_close(struct input_dev *dev)
@@ -135,6 +134,8 @@
wacom->open = false;
wacom->intf->needs_remote_wakeup = 0;
mutex_unlock(&wacom->lock);
+
+ usb_autopm_put_interface(wacom->intf);
}
static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hid_desc,
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
index 40d77ba..47fd7a0 100644
--- a/drivers/input/tablet/wacom_wac.c
+++ b/drivers/input/tablet/wacom_wac.c
@@ -243,10 +243,10 @@
if (features->type == WACOM_G4 ||
features->type == WACOM_MO) {
input_report_abs(input, ABS_DISTANCE, data[6] & 0x3f);
- rw = (signed)(data[7] & 0x04) - (data[7] & 0x03);
+ rw = (data[7] & 0x04) - (data[7] & 0x03);
} else {
input_report_abs(input, ABS_DISTANCE, data[7] & 0x3f);
- rw = -(signed)data[6];
+ rw = -(signed char)data[6];
}
input_report_rel(input, REL_WHEEL, rw);
}
@@ -442,8 +442,10 @@
/* general pen packet */
if ((data[1] & 0xb8) == 0xa0) {
t = (data[6] << 2) | ((data[7] >> 6) & 3);
- if (features->type >= INTUOS4S && features->type <= INTUOS4L)
+ if ((features->type >= INTUOS4S && features->type <= INTUOS4L) ||
+ features->type == WACOM_21UX2) {
t = (t << 1) | (data[1] & 1);
+ }
input_report_abs(input, ABS_PRESSURE, t);
input_report_abs(input, ABS_TILT_X,
((data[7] << 1) & 0x7e) | (data[8] >> 7));
diff --git a/drivers/isdn/hardware/avm/Kconfig b/drivers/isdn/hardware/avm/Kconfig
index 5dbcbe3..b99b906 100644
--- a/drivers/isdn/hardware/avm/Kconfig
+++ b/drivers/isdn/hardware/avm/Kconfig
@@ -36,12 +36,13 @@
config ISDN_DRV_AVMB1_B1PCMCIA
tristate "AVM B1/M1/M2 PCMCIA support"
+ depends on PCMCIA
help
Enable support for the PCMCIA version of the AVM B1 card.
config ISDN_DRV_AVMB1_AVM_CS
tristate "AVM B1/M1/M2 PCMCIA cs module"
- depends on ISDN_DRV_AVMB1_B1PCMCIA && PCMCIA
+ depends on ISDN_DRV_AVMB1_B1PCMCIA
help
Enable the PCMCIA client driver for the AVM B1/M1/M2
PCMCIA cards.
diff --git a/drivers/isdn/sc/interrupt.c b/drivers/isdn/sc/interrupt.c
index 485be8b..f0225bc 100644
--- a/drivers/isdn/sc/interrupt.c
+++ b/drivers/isdn/sc/interrupt.c
@@ -112,11 +112,19 @@
}
else if(callid>=0x0000 && callid<=0x7FFF)
{
+ int len;
+
pr_debug("%s: Got Incoming Call\n",
sc_adapter[card]->devicename);
- strcpy(setup.phone,&(rcvmsg.msg_data.byte_array[4]));
- strcpy(setup.eazmsn,
- sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn);
+ len = strlcpy(setup.phone, &(rcvmsg.msg_data.byte_array[4]),
+ sizeof(setup.phone));
+ if (len >= sizeof(setup.phone))
+ continue;
+ len = strlcpy(setup.eazmsn,
+ sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
+ sizeof(setup.eazmsn));
+ if (len >= sizeof(setup.eazmsn))
+ continue;
setup.si1 = 7;
setup.si2 = 0;
setup.plan = 0;
@@ -176,7 +184,9 @@
* Handle a GetMyNumber Rsp
*/
if (IS_CE_MESSAGE(rcvmsg,Call,0,GetMyNumber)){
- strcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn,rcvmsg.msg_data.byte_array);
+ strlcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
+ rcvmsg.msg_data.byte_array,
+ sizeof(rcvmsg.msg_data.byte_array));
continue;
}
diff --git a/drivers/leds/leds-ns2.c b/drivers/leds/leds-ns2.c
index 74dce4b..350eb34 100644
--- a/drivers/leds/leds-ns2.c
+++ b/drivers/leds/leds-ns2.c
@@ -81,7 +81,7 @@
int cmd_level;
int slow_level;
- read_lock(&led_dat->rw_lock);
+ read_lock_irq(&led_dat->rw_lock);
cmd_level = gpio_get_value(led_dat->cmd);
slow_level = gpio_get_value(led_dat->slow);
@@ -95,7 +95,7 @@
}
}
- read_unlock(&led_dat->rw_lock);
+ read_unlock_irq(&led_dat->rw_lock);
return ret;
}
@@ -104,8 +104,9 @@
enum ns2_led_modes mode)
{
int i;
+ unsigned long flags;
- write_lock(&led_dat->rw_lock);
+ write_lock_irqsave(&led_dat->rw_lock, flags);
for (i = 0; i < ARRAY_SIZE(ns2_led_modval); i++) {
if (mode == ns2_led_modval[i].mode) {
@@ -116,7 +117,7 @@
}
}
- write_unlock(&led_dat->rw_lock);
+ write_unlock_irqrestore(&led_dat->rw_lock, flags);
}
static void ns2_led_set(struct led_classdev *led_cdev,
diff --git a/drivers/macintosh/adb.c b/drivers/macintosh/adb.c
index 1c4ee6e..bf64e49 100644
--- a/drivers/macintosh/adb.c
+++ b/drivers/macintosh/adb.c
@@ -83,7 +83,7 @@
BLOCKING_NOTIFIER_HEAD(adb_client_list);
static int adb_got_sleep;
static int adb_inited;
-static DECLARE_MUTEX(adb_probe_mutex);
+static DEFINE_SEMAPHORE(adb_probe_mutex);
static int sleepy_trackpad;
static int autopoll_devs;
int __adb_probe_sync;
diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
index 35bc273..2d17e76 100644
--- a/drivers/macintosh/via-pmu.c
+++ b/drivers/macintosh/via-pmu.c
@@ -45,6 +45,7 @@
#include <linux/syscalls.h>
#include <linux/suspend.h>
#include <linux/cpu.h>
+#include <linux/compat.h>
#include <asm/prom.h>
#include <asm/machdep.h>
#include <asm/io.h>
@@ -2349,11 +2350,52 @@
return ret;
}
+#ifdef CONFIG_COMPAT
+#define PMU_IOC_GET_BACKLIGHT32 _IOR('B', 1, compat_size_t)
+#define PMU_IOC_SET_BACKLIGHT32 _IOW('B', 2, compat_size_t)
+#define PMU_IOC_GET_MODEL32 _IOR('B', 3, compat_size_t)
+#define PMU_IOC_HAS_ADB32 _IOR('B', 4, compat_size_t)
+#define PMU_IOC_CAN_SLEEP32 _IOR('B', 5, compat_size_t)
+#define PMU_IOC_GRAB_BACKLIGHT32 _IOR('B', 6, compat_size_t)
+
+static long compat_pmu_ioctl (struct file *filp, u_int cmd, u_long arg)
+{
+ switch (cmd) {
+ case PMU_IOC_SLEEP:
+ break;
+ case PMU_IOC_GET_BACKLIGHT32:
+ cmd = PMU_IOC_GET_BACKLIGHT;
+ break;
+ case PMU_IOC_SET_BACKLIGHT32:
+ cmd = PMU_IOC_SET_BACKLIGHT;
+ break;
+ case PMU_IOC_GET_MODEL32:
+ cmd = PMU_IOC_GET_MODEL;
+ break;
+ case PMU_IOC_HAS_ADB32:
+ cmd = PMU_IOC_HAS_ADB;
+ break;
+ case PMU_IOC_CAN_SLEEP32:
+ cmd = PMU_IOC_CAN_SLEEP;
+ break;
+ case PMU_IOC_GRAB_BACKLIGHT32:
+ cmd = PMU_IOC_GRAB_BACKLIGHT;
+ break;
+ default:
+ return -ENOIOCTLCMD;
+ }
+ return pmu_unlocked_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
+}
+#endif
+
static const struct file_operations pmu_device_fops = {
.read = pmu_read,
.write = pmu_write,
.poll = pmu_fpoll,
.unlocked_ioctl = pmu_unlocked_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = compat_pmu_ioctl,
+#endif
.open = pmu_open,
.release = pmu_release,
};
diff --git a/drivers/md/.gitignore b/drivers/md/.gitignore
deleted file mode 100644
index a7afec6..0000000
--- a/drivers/md/.gitignore
+++ /dev/null
@@ -1,4 +0,0 @@
-mktables
-raid6altivec*.c
-raid6int*.c
-raid6tables.c
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 1ba1e12..e4fb58d 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -1000,10 +1000,11 @@
page = bitmap->sb_page;
offset = sizeof(bitmap_super_t);
if (!file)
- read_sb_page(bitmap->mddev,
- bitmap->mddev->bitmap_info.offset,
- page,
- index, count);
+ page = read_sb_page(
+ bitmap->mddev,
+ bitmap->mddev->bitmap_info.offset,
+ page,
+ index, count);
} else if (file) {
page = read_page(file, index, bitmap, count);
offset = 0;
@@ -1542,8 +1543,7 @@
atomic_read(&bitmap->mddev->recovery_active) == 0);
bitmap->mddev->curr_resync_completed = bitmap->mddev->curr_resync;
- if (bitmap->mddev->persistent)
- set_bit(MD_CHANGE_CLEAN, &bitmap->mddev->flags);
+ set_bit(MD_CHANGE_CLEAN, &bitmap->mddev->flags);
sector &= ~((1ULL << CHUNK_BLOCK_SHIFT(bitmap)) - 1);
s = 0;
while (s < sector && s < bitmap->mddev->resync_max_sectors) {
diff --git a/drivers/md/md.c b/drivers/md/md.c
index c148b63..f20d13e 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -1643,7 +1643,9 @@
bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
if (rdev->sb_size & bmask)
rdev->sb_size = (rdev->sb_size | bmask) + 1;
- }
+ } else
+ max_dev = le32_to_cpu(sb->max_dev);
+
for (i=0; i<max_dev;i++)
sb->dev_roles[i] = cpu_to_le16(0xfffe);
@@ -2167,9 +2169,9 @@
rdev->recovery_offset = mddev->curr_resync_completed;
}
- if (mddev->external || !mddev->persistent) {
- clear_bit(MD_CHANGE_DEVS, &mddev->flags);
+ if (!mddev->persistent) {
clear_bit(MD_CHANGE_CLEAN, &mddev->flags);
+ clear_bit(MD_CHANGE_DEVS, &mddev->flags);
wake_up(&mddev->sb_wait);
return;
}
@@ -2178,7 +2180,6 @@
mddev->utime = get_seconds();
- set_bit(MD_CHANGE_PENDING, &mddev->flags);
if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags))
force_change = 1;
if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags))
@@ -3371,7 +3372,7 @@
case 0:
if (mddev->in_sync)
st = clean;
- else if (test_bit(MD_CHANGE_CLEAN, &mddev->flags))
+ else if (test_bit(MD_CHANGE_PENDING, &mddev->flags))
st = write_pending;
else if (mddev->safemode)
st = active_idle;
@@ -3452,9 +3453,7 @@
mddev->in_sync = 1;
if (mddev->safemode == 1)
mddev->safemode = 0;
- if (mddev->persistent)
- set_bit(MD_CHANGE_CLEAN,
- &mddev->flags);
+ set_bit(MD_CHANGE_CLEAN, &mddev->flags);
}
err = 0;
} else
@@ -3466,8 +3465,7 @@
case active:
if (mddev->pers) {
restart_array(mddev);
- if (mddev->external)
- clear_bit(MD_CHANGE_CLEAN, &mddev->flags);
+ clear_bit(MD_CHANGE_PENDING, &mddev->flags);
wake_up(&mddev->sb_wait);
err = 0;
} else {
@@ -6572,6 +6570,7 @@
if (mddev->in_sync) {
mddev->in_sync = 0;
set_bit(MD_CHANGE_CLEAN, &mddev->flags);
+ set_bit(MD_CHANGE_PENDING, &mddev->flags);
md_wakeup_thread(mddev->thread);
did_change = 1;
}
@@ -6580,7 +6579,6 @@
if (did_change)
sysfs_notify_dirent_safe(mddev->sysfs_state);
wait_event(mddev->sb_wait,
- !test_bit(MD_CHANGE_CLEAN, &mddev->flags) &&
!test_bit(MD_CHANGE_PENDING, &mddev->flags));
}
@@ -6616,6 +6614,7 @@
if (mddev->in_sync) {
mddev->in_sync = 0;
set_bit(MD_CHANGE_CLEAN, &mddev->flags);
+ set_bit(MD_CHANGE_PENDING, &mddev->flags);
if (mddev->safemode_delay &&
mddev->safemode == 0)
mddev->safemode = 1;
@@ -6625,7 +6624,7 @@
} else
spin_unlock_irq(&mddev->write_lock);
- if (test_bit(MD_CHANGE_CLEAN, &mddev->flags))
+ if (test_bit(MD_CHANGE_PENDING, &mddev->flags))
return -EAGAIN;
else
return 0;
@@ -6823,8 +6822,7 @@
atomic_read(&mddev->recovery_active) == 0);
mddev->curr_resync_completed =
mddev->curr_resync;
- if (mddev->persistent)
- set_bit(MD_CHANGE_CLEAN, &mddev->flags);
+ set_bit(MD_CHANGE_CLEAN, &mddev->flags);
sysfs_notify(&mddev->kobj, NULL, "sync_completed");
}
@@ -7073,7 +7071,7 @@
if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
return;
if ( ! (
- (mddev->flags && !mddev->external) ||
+ (mddev->flags & ~ (1<<MD_CHANGE_PENDING)) ||
test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
(mddev->external == 0 && mddev->safemode == 1) ||
@@ -7103,8 +7101,7 @@
mddev->recovery_cp == MaxSector) {
mddev->in_sync = 1;
did_change = 1;
- if (mddev->persistent)
- set_bit(MD_CHANGE_CLEAN, &mddev->flags);
+ set_bit(MD_CHANGE_CLEAN, &mddev->flags);
}
if (mddev->safemode == 1)
mddev->safemode = 0;
diff --git a/drivers/md/md.h b/drivers/md/md.h
index a953fe2..3931299 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -140,7 +140,7 @@
unsigned long flags;
#define MD_CHANGE_DEVS 0 /* Some device status has changed */
#define MD_CHANGE_CLEAN 1 /* transition to or from 'clean' */
-#define MD_CHANGE_PENDING 2 /* superblock update in progress */
+#define MD_CHANGE_PENDING 2 /* switch from 'clean' to 'active' in progress */
int suspended;
atomic_t active_io;
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index ad83a4d..0b830bb 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1839,7 +1839,9 @@
/* take from bio_init */
bio->bi_next = NULL;
+ bio->bi_flags &= ~(BIO_POOL_MASK-1);
bio->bi_flags |= 1 << BIO_UPTODATE;
+ bio->bi_comp_cpu = -1;
bio->bi_rw = READ;
bio->bi_vcnt = 0;
bio->bi_idx = 0;
@@ -1912,7 +1914,7 @@
!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
break;
BUG_ON(sync_blocks < (PAGE_SIZE>>9));
- if (len > (sync_blocks<<9))
+ if ((len >> 9) > sync_blocks)
len = sync_blocks<<9;
}
diff --git a/drivers/media/IR/ir-keytable.c b/drivers/media/IR/ir-keytable.c
index 7e82a9d..7961d59 100644
--- a/drivers/media/IR/ir-keytable.c
+++ b/drivers/media/IR/ir-keytable.c
@@ -319,7 +319,7 @@
* a keyup event might follow immediately after the keydown.
*/
spin_lock_irqsave(&ir->keylock, flags);
- if (time_is_after_eq_jiffies(ir->keyup_jiffies))
+ if (time_is_before_eq_jiffies(ir->keyup_jiffies))
ir_keyup(ir);
spin_unlock_irqrestore(&ir->keylock, flags);
}
@@ -510,6 +510,13 @@
(ir_dev->props && ir_dev->props->driver_type == RC_DRIVER_IR_RAW) ?
" in raw mode" : "");
+ /*
+ * Default delay of 250ms is too short for some protocols, expecially
+ * since the timeout is currently set to 250ms. Increase it to 500ms,
+ * to avoid wrong repetition of the keycodes.
+ */
+ input_dev->rep[REP_DELAY] = 500;
+
return 0;
out_event:
diff --git a/drivers/media/IR/ir-lirc-codec.c b/drivers/media/IR/ir-lirc-codec.c
index 77b5946..e63f757 100644
--- a/drivers/media/IR/ir-lirc-codec.c
+++ b/drivers/media/IR/ir-lirc-codec.c
@@ -267,7 +267,7 @@
features |= LIRC_CAN_SET_SEND_CARRIER;
if (ir_dev->props->s_tx_duty_cycle)
- features |= LIRC_CAN_SET_REC_DUTY_CYCLE;
+ features |= LIRC_CAN_SET_SEND_DUTY_CYCLE;
}
if (ir_dev->props->s_rx_carrier_range)
diff --git a/drivers/media/IR/ir-raw-event.c b/drivers/media/IR/ir-raw-event.c
index 43094e7..8e0e1b1 100644
--- a/drivers/media/IR/ir-raw-event.c
+++ b/drivers/media/IR/ir-raw-event.c
@@ -279,9 +279,11 @@
"rc%u", (unsigned int)ir->devno);
if (IS_ERR(ir->raw->thread)) {
+ int ret = PTR_ERR(ir->raw->thread);
+
kfree(ir->raw);
ir->raw = NULL;
- return PTR_ERR(ir->raw->thread);
+ return ret;
}
mutex_lock(&ir_raw_handler_lock);
diff --git a/drivers/media/IR/ir-sysfs.c b/drivers/media/IR/ir-sysfs.c
index 96dafc4..46d4246 100644
--- a/drivers/media/IR/ir-sysfs.c
+++ b/drivers/media/IR/ir-sysfs.c
@@ -67,13 +67,14 @@
char *tmp = buf;
int i;
- if (ir_dev->props->driver_type == RC_DRIVER_SCANCODE) {
+ if (ir_dev->props && ir_dev->props->driver_type == RC_DRIVER_SCANCODE) {
enabled = ir_dev->rc_tab.ir_type;
allowed = ir_dev->props->allowed_protos;
- } else {
+ } else if (ir_dev->raw) {
enabled = ir_dev->raw->enabled_protocols;
allowed = ir_raw_get_allowed_protocols();
- }
+ } else
+ return sprintf(tmp, "[builtin]\n");
IR_dprintk(1, "allowed - 0x%llx, enabled - 0x%llx\n",
(long long)allowed,
@@ -121,10 +122,14 @@
int rc, i, count = 0;
unsigned long flags;
- if (ir_dev->props->driver_type == RC_DRIVER_SCANCODE)
+ if (ir_dev->props && ir_dev->props->driver_type == RC_DRIVER_SCANCODE)
type = ir_dev->rc_tab.ir_type;
- else
+ else if (ir_dev->raw)
type = ir_dev->raw->enabled_protocols;
+ else {
+ IR_dprintk(1, "Protocol switching not supported\n");
+ return -EINVAL;
+ }
while ((tmp = strsep((char **) &data, " \n")) != NULL) {
if (!*tmp)
@@ -185,7 +190,7 @@
}
}
- if (ir_dev->props->driver_type == RC_DRIVER_SCANCODE) {
+ if (ir_dev->props && ir_dev->props->driver_type == RC_DRIVER_SCANCODE) {
spin_lock_irqsave(&ir_dev->rc_tab.lock, flags);
ir_dev->rc_tab.ir_type = type;
spin_unlock_irqrestore(&ir_dev->rc_tab.lock, flags);
diff --git a/drivers/media/IR/keymaps/rc-rc6-mce.c b/drivers/media/IR/keymaps/rc-rc6-mce.c
index 64264f7..39557ad 100644
--- a/drivers/media/IR/keymaps/rc-rc6-mce.c
+++ b/drivers/media/IR/keymaps/rc-rc6-mce.c
@@ -19,6 +19,7 @@
{ 0x800f0416, KEY_PLAY },
{ 0x800f0418, KEY_PAUSE },
+ { 0x800f046e, KEY_PLAYPAUSE },
{ 0x800f0419, KEY_STOP },
{ 0x800f0417, KEY_RECORD },
@@ -37,6 +38,8 @@
{ 0x800f0411, KEY_VOLUMEDOWN },
{ 0x800f0412, KEY_CHANNELUP },
{ 0x800f0413, KEY_CHANNELDOWN },
+ { 0x800f043a, KEY_BRIGHTNESSUP },
+ { 0x800f0480, KEY_BRIGHTNESSDOWN },
{ 0x800f0401, KEY_NUMERIC_1 },
{ 0x800f0402, KEY_NUMERIC_2 },
diff --git a/drivers/media/IR/mceusb.c b/drivers/media/IR/mceusb.c
index ac6bb2c..bc620e1 100644
--- a/drivers/media/IR/mceusb.c
+++ b/drivers/media/IR/mceusb.c
@@ -120,6 +120,10 @@
{ USB_DEVICE(VENDOR_PHILIPS, 0x0613) },
/* Philips eHome Infrared Transceiver */
{ USB_DEVICE(VENDOR_PHILIPS, 0x0815) },
+ /* Philips/Spinel plus IR transceiver for ASUS */
+ { USB_DEVICE(VENDOR_PHILIPS, 0x206c) },
+ /* Philips/Spinel plus IR transceiver for ASUS */
+ { USB_DEVICE(VENDOR_PHILIPS, 0x2088) },
/* Realtek MCE IR Receiver */
{ USB_DEVICE(VENDOR_REALTEK, 0x0161) },
/* SMK/Toshiba G83C0004D410 */
diff --git a/drivers/media/dvb/dvb-usb/dib0700_core.c b/drivers/media/dvb/dvb-usb/dib0700_core.c
index fe81834..48397f1 100644
--- a/drivers/media/dvb/dvb-usb/dib0700_core.c
+++ b/drivers/media/dvb/dvb-usb/dib0700_core.c
@@ -673,9 +673,6 @@
else
dev->props.rc.core.bulk_mode = false;
- /* Need a higher delay, to avoid wrong repeat */
- dev->rc_input_dev->rep[REP_DELAY] = 500;
-
dib0700_rc_setup(dev);
return 0;
diff --git a/drivers/media/dvb/dvb-usb/dib0700_devices.c b/drivers/media/dvb/dvb-usb/dib0700_devices.c
index f634d2e..e06acd1 100644
--- a/drivers/media/dvb/dvb-usb/dib0700_devices.c
+++ b/drivers/media/dvb/dvb-usb/dib0700_devices.c
@@ -940,6 +940,58 @@
return adap->fe == NULL ? -ENODEV : 0;
}
+/* STK7770P */
+static struct dib7000p_config dib7770p_dib7000p_config = {
+ .output_mpeg2_in_188_bytes = 1,
+
+ .agc_config_count = 1,
+ .agc = &dib7070_agc_config,
+ .bw = &dib7070_bw_config_12_mhz,
+ .tuner_is_baseband = 1,
+ .spur_protect = 1,
+
+ .gpio_dir = DIB7000P_GPIO_DEFAULT_DIRECTIONS,
+ .gpio_val = DIB7000P_GPIO_DEFAULT_VALUES,
+ .gpio_pwm_pos = DIB7000P_GPIO_DEFAULT_PWM_POS,
+
+ .hostbus_diversity = 1,
+ .enable_current_mirror = 1,
+ .disable_sample_and_hold = 0,
+};
+
+static int stk7770p_frontend_attach(struct dvb_usb_adapter *adap)
+{
+ struct usb_device_descriptor *p = &adap->dev->udev->descriptor;
+ if (p->idVendor == cpu_to_le16(USB_VID_PINNACLE) &&
+ p->idProduct == cpu_to_le16(USB_PID_PINNACLE_PCTV72E))
+ dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 0);
+ else
+ dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 1);
+ msleep(10);
+ dib0700_set_gpio(adap->dev, GPIO9, GPIO_OUT, 1);
+ dib0700_set_gpio(adap->dev, GPIO4, GPIO_OUT, 1);
+ dib0700_set_gpio(adap->dev, GPIO7, GPIO_OUT, 1);
+ dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 0);
+
+ dib0700_ctrl_clock(adap->dev, 72, 1);
+
+ msleep(10);
+ dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 1);
+ msleep(10);
+ dib0700_set_gpio(adap->dev, GPIO0, GPIO_OUT, 1);
+
+ if (dib7000p_i2c_enumeration(&adap->dev->i2c_adap, 1, 18,
+ &dib7770p_dib7000p_config) != 0) {
+ err("%s: dib7000p_i2c_enumeration failed. Cannot continue\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ adap->fe = dvb_attach(dib7000p_attach, &adap->dev->i2c_adap, 0x80,
+ &dib7770p_dib7000p_config);
+ return adap->fe == NULL ? -ENODEV : 0;
+}
+
/* DIB807x generic */
static struct dibx000_agc_config dib807x_agc_config[2] = {
{
@@ -1781,7 +1833,7 @@
/* 60 */{ USB_DEVICE(USB_VID_TERRATEC, USB_PID_TERRATEC_CINERGY_T_XXS_2) },
{ USB_DEVICE(USB_VID_DIBCOM, USB_PID_DIBCOM_STK807XPVR) },
{ USB_DEVICE(USB_VID_DIBCOM, USB_PID_DIBCOM_STK807XP) },
- { USB_DEVICE(USB_VID_PIXELVIEW, USB_PID_PIXELVIEW_SBTVD) },
+ { USB_DEVICE_VER(USB_VID_PIXELVIEW, USB_PID_PIXELVIEW_SBTVD, 0x000, 0x3f00) },
{ USB_DEVICE(USB_VID_EVOLUTEPC, USB_PID_TVWAY_PLUS) },
/* 65 */{ USB_DEVICE(USB_VID_PINNACLE, USB_PID_PINNACLE_PCTV73ESE) },
{ USB_DEVICE(USB_VID_PINNACLE, USB_PID_PINNACLE_PCTV282E) },
@@ -2406,7 +2458,7 @@
.pid_filter_count = 32,
.pid_filter = stk70x0p_pid_filter,
.pid_filter_ctrl = stk70x0p_pid_filter_ctrl,
- .frontend_attach = stk7070p_frontend_attach,
+ .frontend_attach = stk7770p_frontend_attach,
.tuner_attach = dib7770p_tuner_attach,
DIB0700_DEFAULT_STREAMING_CONFIG(0x02),
diff --git a/drivers/media/dvb/dvb-usb/opera1.c b/drivers/media/dvb/dvb-usb/opera1.c
index 6b22ec6..f896337 100644
--- a/drivers/media/dvb/dvb-usb/opera1.c
+++ b/drivers/media/dvb/dvb-usb/opera1.c
@@ -483,9 +483,7 @@
}
}
kfree(p);
- if (fw) {
- release_firmware(fw);
- }
+ release_firmware(fw);
return ret;
}
diff --git a/drivers/media/dvb/frontends/dib7000p.c b/drivers/media/dvb/frontends/dib7000p.c
index 2e28b97..3aed0d4 100644
--- a/drivers/media/dvb/frontends/dib7000p.c
+++ b/drivers/media/dvb/frontends/dib7000p.c
@@ -260,6 +260,9 @@
// dprintk( "908: %x, 909: %x\n", reg_908, reg_909);
+ reg_909 |= (state->cfg.disable_sample_and_hold & 1) << 4;
+ reg_908 |= (state->cfg.enable_current_mirror & 1) << 7;
+
dib7000p_write_word(state, 908, reg_908);
dib7000p_write_word(state, 909, reg_909);
}
@@ -778,7 +781,10 @@
default:
case GUARD_INTERVAL_1_32: value *= 1; break;
}
- state->div_sync_wait = (value * 3) / 2 + 32; // add 50% SFN margin + compensate for one DVSY-fifo TODO
+ if (state->cfg.diversity_delay == 0)
+ state->div_sync_wait = (value * 3) / 2 + 48; // add 50% SFN margin + compensate for one DVSY-fifo
+ else
+ state->div_sync_wait = (value * 3) / 2 + state->cfg.diversity_delay; // add 50% SFN margin + compensate for one DVSY-fifo
/* deactive the possibility of diversity reception if extended interleaver */
state->div_force_off = !1 && ch->u.ofdm.transmission_mode != TRANSMISSION_MODE_8K;
diff --git a/drivers/media/dvb/frontends/dib7000p.h b/drivers/media/dvb/frontends/dib7000p.h
index 805dd13..da17345 100644
--- a/drivers/media/dvb/frontends/dib7000p.h
+++ b/drivers/media/dvb/frontends/dib7000p.h
@@ -33,6 +33,11 @@
int (*agc_control) (struct dvb_frontend *, u8 before);
u8 output_mode;
+ u8 disable_sample_and_hold : 1;
+
+ u8 enable_current_mirror : 1;
+ u8 diversity_delay;
+
};
#define DEFAULT_DIB7000P_I2C_ADDRESS 18
diff --git a/drivers/media/dvb/mantis/Kconfig b/drivers/media/dvb/mantis/Kconfig
index decdeda..fd0830e 100644
--- a/drivers/media/dvb/mantis/Kconfig
+++ b/drivers/media/dvb/mantis/Kconfig
@@ -1,6 +1,6 @@
config MANTIS_CORE
tristate "Mantis/Hopper PCI bridge based devices"
- depends on PCI && I2C && INPUT
+ depends on PCI && I2C && INPUT && IR_CORE
help
Support for PCI cards based on the Mantis and Hopper PCi bridge.
diff --git a/drivers/media/dvb/siano/smscoreapi.c b/drivers/media/dvb/siano/smscoreapi.c
index d93468c..ff3b0fa 100644
--- a/drivers/media/dvb/siano/smscoreapi.c
+++ b/drivers/media/dvb/siano/smscoreapi.c
@@ -1098,33 +1098,26 @@
*
* @return pointer to descriptor on success, NULL on error.
*/
-struct smscore_buffer_t *smscore_getbuffer(struct smscore_device_t *coredev)
+
+struct smscore_buffer_t *get_entry(struct smscore_device_t *coredev)
{
struct smscore_buffer_t *cb = NULL;
unsigned long flags;
- DEFINE_WAIT(wait);
-
spin_lock_irqsave(&coredev->bufferslock, flags);
-
- /* This function must return a valid buffer, since the buffer list is
- * finite, we check that there is an available buffer, if not, we wait
- * until such buffer become available.
- */
-
- prepare_to_wait(&coredev->buffer_mng_waitq, &wait, TASK_INTERRUPTIBLE);
- if (list_empty(&coredev->buffers)) {
- spin_unlock_irqrestore(&coredev->bufferslock, flags);
- schedule();
- spin_lock_irqsave(&coredev->bufferslock, flags);
+ if (!list_empty(&coredev->buffers)) {
+ cb = (struct smscore_buffer_t *) coredev->buffers.next;
+ list_del(&cb->entry);
}
-
- finish_wait(&coredev->buffer_mng_waitq, &wait);
-
- cb = (struct smscore_buffer_t *) coredev->buffers.next;
- list_del(&cb->entry);
-
spin_unlock_irqrestore(&coredev->bufferslock, flags);
+ return cb;
+}
+
+struct smscore_buffer_t *smscore_getbuffer(struct smscore_device_t *coredev)
+{
+ struct smscore_buffer_t *cb = NULL;
+
+ wait_event(coredev->buffer_mng_waitq, (cb = get_entry(coredev)));
return cb;
}
diff --git a/drivers/media/radio/si470x/radio-si470x-i2c.c b/drivers/media/radio/si470x/radio-si470x-i2c.c
index 67a4ec8..4ce541a 100644
--- a/drivers/media/radio/si470x/radio-si470x-i2c.c
+++ b/drivers/media/radio/si470x/radio-si470x-i2c.c
@@ -395,7 +395,7 @@
radio->registers[POWERCFG] = POWERCFG_ENABLE;
if (si470x_set_register(radio, POWERCFG) < 0) {
retval = -EIO;
- goto err_all;
+ goto err_video;
}
msleep(110);
diff --git a/drivers/media/video/cx231xx/Makefile b/drivers/media/video/cx231xx/Makefile
index 755dd0c..6f2b573 100644
--- a/drivers/media/video/cx231xx/Makefile
+++ b/drivers/media/video/cx231xx/Makefile
@@ -11,4 +11,5 @@
EXTRA_CFLAGS += -Idrivers/media/common/tuners
EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core
EXTRA_CFLAGS += -Idrivers/media/dvb/frontends
+EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-usb
diff --git a/drivers/media/video/cx231xx/cx231xx-cards.c b/drivers/media/video/cx231xx/cx231xx-cards.c
index 6bdc0ef..f2a4900 100644
--- a/drivers/media/video/cx231xx/cx231xx-cards.c
+++ b/drivers/media/video/cx231xx/cx231xx-cards.c
@@ -32,6 +32,7 @@
#include <media/v4l2-chip-ident.h>
#include <media/cx25840.h>
+#include "dvb-usb-ids.h"
#include "xc5000.h"
#include "cx231xx.h"
@@ -175,6 +176,8 @@
.driver_info = CX231XX_BOARD_CNXT_RDE_250},
{USB_DEVICE(0x0572, 0x58A1),
.driver_info = CX231XX_BOARD_CNXT_RDU_250},
+ {USB_DEVICE_VER(USB_VID_PIXELVIEW, USB_PID_PIXELVIEW_SBTVD, 0x4000,0x4fff),
+ .driver_info = CX231XX_BOARD_UNKNOWN},
{},
};
@@ -226,14 +229,16 @@
dev->board.name, dev->model);
/* set the direction for GPIO pins */
- cx231xx_set_gpio_direction(dev, dev->board.tuner_gpio->bit, 1);
- cx231xx_set_gpio_value(dev, dev->board.tuner_gpio->bit, 1);
- cx231xx_set_gpio_direction(dev, dev->board.tuner_sif_gpio, 1);
+ if (dev->board.tuner_gpio) {
+ cx231xx_set_gpio_direction(dev, dev->board.tuner_gpio->bit, 1);
+ cx231xx_set_gpio_value(dev, dev->board.tuner_gpio->bit, 1);
+ cx231xx_set_gpio_direction(dev, dev->board.tuner_sif_gpio, 1);
- /* request some modules if any required */
+ /* request some modules if any required */
- /* reset the Tuner */
- cx231xx_gpio_set(dev, dev->board.tuner_gpio);
+ /* reset the Tuner */
+ cx231xx_gpio_set(dev, dev->board.tuner_gpio);
+ }
/* set the mode to Analog mode initially */
cx231xx_set_mode(dev, CX231XX_ANALOG_MODE);
diff --git a/drivers/media/video/cx25840/cx25840-core.c b/drivers/media/video/cx25840/cx25840-core.c
index 86ca8c2..f5a3e74 100644
--- a/drivers/media/video/cx25840/cx25840-core.c
+++ b/drivers/media/video/cx25840/cx25840-core.c
@@ -1996,7 +1996,7 @@
state->volume = v4l2_ctrl_new_std(&state->hdl,
&cx25840_audio_ctrl_ops, V4L2_CID_AUDIO_VOLUME,
- 0, 65335, 65535 / 100, default_volume);
+ 0, 65535, 65535 / 100, default_volume);
state->mute = v4l2_ctrl_new_std(&state->hdl,
&cx25840_audio_ctrl_ops, V4L2_CID_AUDIO_MUTE,
0, 1, 1, 0);
diff --git a/drivers/media/video/cx88/Kconfig b/drivers/media/video/cx88/Kconfig
index 99dbae1..0fa85cb 100644
--- a/drivers/media/video/cx88/Kconfig
+++ b/drivers/media/video/cx88/Kconfig
@@ -17,7 +17,7 @@
config VIDEO_CX88_ALSA
tristate "Conexant 2388x DMA audio support"
- depends on VIDEO_CX88 && SND && EXPERIMENTAL
+ depends on VIDEO_CX88 && SND
select SND_PCM
---help---
This is a video4linux driver for direct (DMA) audio on
diff --git a/drivers/media/video/gspca/gspca.c b/drivers/media/video/gspca/gspca.c
index b984610..78abc1c 100644
--- a/drivers/media/video/gspca/gspca.c
+++ b/drivers/media/video/gspca/gspca.c
@@ -223,6 +223,7 @@
usb_rcvintpipe(dev, ep->bEndpointAddress),
buffer, buffer_len,
int_irq, (void *)gspca_dev, interval);
+ urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
gspca_dev->int_urb = urb;
ret = usb_submit_urb(urb, GFP_KERNEL);
if (ret < 0) {
diff --git a/drivers/media/video/gspca/sn9c20x.c b/drivers/media/video/gspca/sn9c20x.c
index 83a718f..9052d57 100644
--- a/drivers/media/video/gspca/sn9c20x.c
+++ b/drivers/media/video/gspca/sn9c20x.c
@@ -2357,8 +2357,7 @@
(data[33] << 10);
avg_lum >>= 9;
atomic_set(&sd->avg_lum, avg_lum);
- gspca_frame_add(gspca_dev, LAST_PACKET,
- data, len);
+ gspca_frame_add(gspca_dev, LAST_PACKET, NULL, 0);
return;
}
if (gspca_dev->last_packet_type == LAST_PACKET) {
diff --git a/drivers/media/video/ivtv/ivtvfb.c b/drivers/media/video/ivtv/ivtvfb.c
index be03a71..f0316d0 100644
--- a/drivers/media/video/ivtv/ivtvfb.c
+++ b/drivers/media/video/ivtv/ivtvfb.c
@@ -466,6 +466,8 @@
struct fb_vblank vblank;
u32 trace;
+ memset(&vblank, 0, sizeof(struct fb_vblank));
+
vblank.flags = FB_VBLANK_HAVE_COUNT |FB_VBLANK_HAVE_VCOUNT |
FB_VBLANK_HAVE_VSYNC;
trace = read_reg(IVTV_REG_DEC_LINE_FIELD) >> 16;
diff --git a/drivers/media/video/mem2mem_testdev.c b/drivers/media/video/mem2mem_testdev.c
index 4525335..a7210d9 100644
--- a/drivers/media/video/mem2mem_testdev.c
+++ b/drivers/media/video/mem2mem_testdev.c
@@ -239,7 +239,7 @@
return -EFAULT;
}
- if (in_buf->vb.size < out_buf->vb.size) {
+ if (in_buf->vb.size > out_buf->vb.size) {
v4l2_err(&dev->v4l2_dev, "Output buffer is too small\n");
return -EINVAL;
}
@@ -1014,6 +1014,7 @@
v4l2_m2m_release(dev->m2m_dev);
del_timer_sync(&dev->timer);
video_unregister_device(dev->vfd);
+ video_device_release(dev->vfd);
v4l2_device_unregister(&dev->v4l2_dev);
kfree(dev);
diff --git a/drivers/media/video/mt9m111.c b/drivers/media/video/mt9m111.c
index 758a4db..c71af4e 100644
--- a/drivers/media/video/mt9m111.c
+++ b/drivers/media/video/mt9m111.c
@@ -447,6 +447,9 @@
dev_dbg(&client->dev, "%s left=%d, top=%d, width=%d, height=%d\n",
__func__, rect.left, rect.top, rect.width, rect.height);
+ if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
ret = mt9m111_make_rect(client, &rect);
if (!ret)
mt9m111->rect = rect;
@@ -466,12 +469,14 @@
static int mt9m111_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
{
+ if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
a->bounds.left = MT9M111_MIN_DARK_COLS;
a->bounds.top = MT9M111_MIN_DARK_ROWS;
a->bounds.width = MT9M111_MAX_WIDTH;
a->bounds.height = MT9M111_MAX_HEIGHT;
a->defrect = a->bounds;
- a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
a->pixelaspect.numerator = 1;
a->pixelaspect.denominator = 1;
@@ -487,6 +492,7 @@
mf->width = mt9m111->rect.width;
mf->height = mt9m111->rect.height;
mf->code = mt9m111->fmt->code;
+ mf->colorspace = mt9m111->fmt->colorspace;
mf->field = V4L2_FIELD_NONE;
return 0;
diff --git a/drivers/media/video/mt9v022.c b/drivers/media/video/mt9v022.c
index e7cd23c..b48473c 100644
--- a/drivers/media/video/mt9v022.c
+++ b/drivers/media/video/mt9v022.c
@@ -402,9 +402,6 @@
if (mt9v022->model != V4L2_IDENT_MT9V022IX7ATC)
return -EINVAL;
break;
- case 0:
- /* No format change, only geometry */
- break;
default:
return -EINVAL;
}
diff --git a/drivers/media/video/mx2_camera.c b/drivers/media/video/mx2_camera.c
index 66ff174..b6ea672 100644
--- a/drivers/media/video/mx2_camera.c
+++ b/drivers/media/video/mx2_camera.c
@@ -378,6 +378,9 @@
spin_lock_irqsave(&pcdev->lock, flags);
+ if (*fb_active == NULL)
+ goto out;
+
vb = &(*fb_active)->vb;
dev_dbg(pcdev->dev, "%s (vb=0x%p) 0x%08lx %d\n", __func__,
vb, vb->baddr, vb->bsize);
@@ -402,6 +405,7 @@
*fb_active = buf;
+out:
spin_unlock_irqrestore(&pcdev->lock, flags);
}
diff --git a/drivers/media/video/pvrusb2/pvrusb2-ctrl.c b/drivers/media/video/pvrusb2/pvrusb2-ctrl.c
index 1b992b8..55ea914 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-ctrl.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-ctrl.c
@@ -513,7 +513,7 @@
if (ret >= 0) {
ret = pvr2_ctrl_range_check(cptr,*valptr);
}
- if (maskptr) *maskptr = ~0;
+ *maskptr = ~0;
} else if (cptr->info->type == pvr2_ctl_bool) {
ret = parse_token(ptr,len,valptr,boolNames,
ARRAY_SIZE(boolNames));
@@ -522,7 +522,7 @@
} else if (ret == 0) {
*valptr = (*valptr & 1) ? !0 : 0;
}
- if (maskptr) *maskptr = 1;
+ *maskptr = 1;
} else if (cptr->info->type == pvr2_ctl_enum) {
ret = parse_token(
ptr,len,valptr,
@@ -531,7 +531,7 @@
if (ret >= 0) {
ret = pvr2_ctrl_range_check(cptr,*valptr);
}
- if (maskptr) *maskptr = ~0;
+ *maskptr = ~0;
} else if (cptr->info->type == pvr2_ctl_bitmask) {
ret = parse_tlist(
ptr,len,maskptr,valptr,
diff --git a/drivers/media/video/s5p-fimc/fimc-core.c b/drivers/media/video/s5p-fimc/fimc-core.c
index b151c7b..6961c55 100644
--- a/drivers/media/video/s5p-fimc/fimc-core.c
+++ b/drivers/media/video/s5p-fimc/fimc-core.c
@@ -393,6 +393,37 @@
dbg("ctx->out_order_1p= %d", ctx->out_order_1p);
}
+static void fimc_prepare_dma_offset(struct fimc_ctx *ctx, struct fimc_frame *f)
+{
+ struct samsung_fimc_variant *variant = ctx->fimc_dev->variant;
+
+ f->dma_offset.y_h = f->offs_h;
+ if (!variant->pix_hoff)
+ f->dma_offset.y_h *= (f->fmt->depth >> 3);
+
+ f->dma_offset.y_v = f->offs_v;
+
+ f->dma_offset.cb_h = f->offs_h;
+ f->dma_offset.cb_v = f->offs_v;
+
+ f->dma_offset.cr_h = f->offs_h;
+ f->dma_offset.cr_v = f->offs_v;
+
+ if (!variant->pix_hoff) {
+ if (f->fmt->planes_cnt == 3) {
+ f->dma_offset.cb_h >>= 1;
+ f->dma_offset.cr_h >>= 1;
+ }
+ if (f->fmt->color == S5P_FIMC_YCBCR420) {
+ f->dma_offset.cb_v >>= 1;
+ f->dma_offset.cr_v >>= 1;
+ }
+ }
+
+ dbg("in_offset: color= %d, y_h= %d, y_v= %d",
+ f->fmt->color, f->dma_offset.y_h, f->dma_offset.y_v);
+}
+
/**
* fimc_prepare_config - check dimensions, operation and color mode
* and pre-calculate offset and the scaling coefficients.
@@ -406,7 +437,6 @@
{
struct fimc_frame *s_frame, *d_frame;
struct fimc_vid_buffer *buf = NULL;
- struct samsung_fimc_variant *variant = ctx->fimc_dev->variant;
int ret = 0;
s_frame = &ctx->s_frame;
@@ -419,61 +449,16 @@
swap(d_frame->width, d_frame->height);
}
- /* Prepare the output offset ratios for scaler. */
- d_frame->dma_offset.y_h = d_frame->offs_h;
- if (!variant->pix_hoff)
- d_frame->dma_offset.y_h *= (d_frame->fmt->depth >> 3);
+ /* Prepare the DMA offset ratios for scaler. */
+ fimc_prepare_dma_offset(ctx, &ctx->s_frame);
+ fimc_prepare_dma_offset(ctx, &ctx->d_frame);
- d_frame->dma_offset.y_v = d_frame->offs_v;
-
- d_frame->dma_offset.cb_h = d_frame->offs_h;
- d_frame->dma_offset.cb_v = d_frame->offs_v;
-
- d_frame->dma_offset.cr_h = d_frame->offs_h;
- d_frame->dma_offset.cr_v = d_frame->offs_v;
-
- if (!variant->pix_hoff && d_frame->fmt->planes_cnt == 3) {
- d_frame->dma_offset.cb_h >>= 1;
- d_frame->dma_offset.cb_v >>= 1;
- d_frame->dma_offset.cr_h >>= 1;
- d_frame->dma_offset.cr_v >>= 1;
- }
-
- dbg("out offset: color= %d, y_h= %d, y_v= %d",
- d_frame->fmt->color,
- d_frame->dma_offset.y_h, d_frame->dma_offset.y_v);
-
- /* Prepare the input offset ratios for scaler. */
- s_frame->dma_offset.y_h = s_frame->offs_h;
- if (!variant->pix_hoff)
- s_frame->dma_offset.y_h *= (s_frame->fmt->depth >> 3);
- s_frame->dma_offset.y_v = s_frame->offs_v;
-
- s_frame->dma_offset.cb_h = s_frame->offs_h;
- s_frame->dma_offset.cb_v = s_frame->offs_v;
-
- s_frame->dma_offset.cr_h = s_frame->offs_h;
- s_frame->dma_offset.cr_v = s_frame->offs_v;
-
- if (!variant->pix_hoff && s_frame->fmt->planes_cnt == 3) {
- s_frame->dma_offset.cb_h >>= 1;
- s_frame->dma_offset.cb_v >>= 1;
- s_frame->dma_offset.cr_h >>= 1;
- s_frame->dma_offset.cr_v >>= 1;
- }
-
- dbg("in offset: color= %d, y_h= %d, y_v= %d",
- s_frame->fmt->color, s_frame->dma_offset.y_h,
- s_frame->dma_offset.y_v);
-
- fimc_set_yuv_order(ctx);
-
- /* Check against the scaler ratio. */
if (s_frame->height > (SCALER_MAX_VRATIO * d_frame->height) ||
s_frame->width > (SCALER_MAX_HRATIO * d_frame->width)) {
err("out of scaler range");
return -EINVAL;
}
+ fimc_set_yuv_order(ctx);
}
/* Input DMA mode is not allowed when the scaler is disabled. */
@@ -822,7 +807,8 @@
} else {
v4l2_err(&ctx->fimc_dev->m2m.v4l2_dev,
"Wrong buffer/video queue type (%d)\n", f->type);
- return -EINVAL;
+ ret = -EINVAL;
+ goto s_fmt_out;
}
pix = &f->fmt.pix;
@@ -1414,8 +1400,10 @@
}
fimc->work_queue = create_workqueue(dev_name(&fimc->pdev->dev));
- if (!fimc->work_queue)
+ if (!fimc->work_queue) {
+ ret = -ENOMEM;
goto err_irq;
+ }
ret = fimc_register_m2m_device(fimc);
if (ret)
@@ -1492,6 +1480,7 @@
};
static struct samsung_fimc_variant fimc01_variant_s5pv210 = {
+ .pix_hoff = 1,
.has_inp_rot = 1,
.has_out_rot = 1,
.min_inp_pixsize = 16,
@@ -1506,6 +1495,7 @@
};
static struct samsung_fimc_variant fimc2_variant_s5pv210 = {
+ .pix_hoff = 1,
.min_inp_pixsize = 16,
.min_out_pixsize = 32,
diff --git a/drivers/media/video/saa7134/saa7134-cards.c b/drivers/media/video/saa7134/saa7134-cards.c
index ec697fc..bb8d83d 100644
--- a/drivers/media/video/saa7134/saa7134-cards.c
+++ b/drivers/media/video/saa7134/saa7134-cards.c
@@ -4323,13 +4323,13 @@
},
[SAA7134_BOARD_BEHOLD_COLUMBUS_TVFM] = {
/* Beholder Intl. Ltd. 2008 */
- /*Dmitry Belimov <d.belimov@gmail.com> */
- .name = "Beholder BeholdTV Columbus TVFM",
+ /* Dmitry Belimov <d.belimov@gmail.com> */
+ .name = "Beholder BeholdTV Columbus TV/FM",
.audio_clock = 0x00187de7,
.tuner_type = TUNER_ALPS_TSBE5_PAL,
- .radio_type = UNSET,
- .tuner_addr = ADDR_UNSET,
- .radio_addr = ADDR_UNSET,
+ .radio_type = TUNER_TEA5767,
+ .tuner_addr = 0xc2 >> 1,
+ .radio_addr = 0xc0 >> 1,
.tda9887_conf = TDA9887_PRESENT,
.gpiomask = 0x000A8004,
.inputs = {{
diff --git a/drivers/media/video/saa7164/saa7164-buffer.c b/drivers/media/video/saa7164/saa7164-buffer.c
index 5713f3a..ddd25d3 100644
--- a/drivers/media/video/saa7164/saa7164-buffer.c
+++ b/drivers/media/video/saa7164/saa7164-buffer.c
@@ -136,10 +136,11 @@
int saa7164_buffer_dealloc(struct saa7164_tsport *port,
struct saa7164_buffer *buf)
{
- struct saa7164_dev *dev = port->dev;
+ struct saa7164_dev *dev;
- if ((buf == 0) || (port == 0))
+ if (!buf || !port)
return SAA_ERR_BAD_PARAMETER;
+ dev = port->dev;
dprintk(DBGLVL_BUF, "%s() deallocating buffer @ 0x%p\n", __func__, buf);
diff --git a/drivers/media/video/uvc/uvc_driver.c b/drivers/media/video/uvc/uvc_driver.c
index 8bdd940..2ac85d8 100644
--- a/drivers/media/video/uvc/uvc_driver.c
+++ b/drivers/media/video/uvc/uvc_driver.c
@@ -486,6 +486,12 @@
max(frame->dwFrameInterval[0],
frame->dwDefaultFrameInterval));
+ if (dev->quirks & UVC_QUIRK_RESTRICT_FRAME_RATE) {
+ frame->bFrameIntervalType = 1;
+ frame->dwFrameInterval[0] =
+ frame->dwDefaultFrameInterval;
+ }
+
uvc_trace(UVC_TRACE_DESCR, "- %ux%u (%u.%u fps)\n",
frame->wWidth, frame->wHeight,
10000000/frame->dwDefaultFrameInterval,
@@ -2026,6 +2032,15 @@
.bInterfaceClass = USB_CLASS_VENDOR_SPEC,
.bInterfaceSubClass = 1,
.bInterfaceProtocol = 0 },
+ /* Chicony CNF7129 (Asus EEE 100HE) */
+ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
+ | USB_DEVICE_ID_MATCH_INT_INFO,
+ .idVendor = 0x04f2,
+ .idProduct = 0xb071,
+ .bInterfaceClass = USB_CLASS_VIDEO,
+ .bInterfaceSubClass = 1,
+ .bInterfaceProtocol = 0,
+ .driver_info = UVC_QUIRK_RESTRICT_FRAME_RATE },
/* Alcor Micro AU3820 (Future Boy PC USB Webcam) */
{ .match_flags = USB_DEVICE_ID_MATCH_DEVICE
| USB_DEVICE_ID_MATCH_INT_INFO,
@@ -2091,6 +2106,15 @@
.bInterfaceProtocol = 0,
.driver_info = UVC_QUIRK_PROBE_MINMAX
| UVC_QUIRK_PROBE_DEF },
+ /* IMC Networks (Medion Akoya) */
+ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
+ | USB_DEVICE_ID_MATCH_INT_INFO,
+ .idVendor = 0x13d3,
+ .idProduct = 0x5103,
+ .bInterfaceClass = USB_CLASS_VIDEO,
+ .bInterfaceSubClass = 1,
+ .bInterfaceProtocol = 0,
+ .driver_info = UVC_QUIRK_STREAM_NO_FID },
/* Syntek (HP Spartan) */
{ .match_flags = USB_DEVICE_ID_MATCH_DEVICE
| USB_DEVICE_ID_MATCH_INT_INFO,
diff --git a/drivers/media/video/uvc/uvcvideo.h b/drivers/media/video/uvc/uvcvideo.h
index bdacf3b..892e0e5 100644
--- a/drivers/media/video/uvc/uvcvideo.h
+++ b/drivers/media/video/uvc/uvcvideo.h
@@ -182,6 +182,7 @@
#define UVC_QUIRK_IGNORE_SELECTOR_UNIT 0x00000020
#define UVC_QUIRK_FIX_BANDWIDTH 0x00000080
#define UVC_QUIRK_PROBE_DEF 0x00000100
+#define UVC_QUIRK_RESTRICT_FRAME_RATE 0x00000200
/* Format flags */
#define UVC_FMT_FLAG_COMPRESSED 0x00000001
diff --git a/drivers/media/video/v4l2-compat-ioctl32.c b/drivers/media/video/v4l2-compat-ioctl32.c
index 073f013..86294ed3 100644
--- a/drivers/media/video/v4l2-compat-ioctl32.c
+++ b/drivers/media/video/v4l2-compat-ioctl32.c
@@ -193,17 +193,24 @@
struct video_code32 {
char loadwhat[16]; /* name or tag of file being passed */
compat_int_t datasize;
- unsigned char *data;
+ compat_uptr_t data;
};
-static int get_microcode32(struct video_code *kp, struct video_code32 __user *up)
+static struct video_code __user *get_microcode32(struct video_code32 *kp)
{
- if (!access_ok(VERIFY_READ, up, sizeof(struct video_code32)) ||
- copy_from_user(kp->loadwhat, up->loadwhat, sizeof(up->loadwhat)) ||
- get_user(kp->datasize, &up->datasize) ||
- copy_from_user(kp->data, up->data, up->datasize))
- return -EFAULT;
- return 0;
+ struct video_code __user *up;
+
+ up = compat_alloc_user_space(sizeof(*up));
+
+ /*
+ * NOTE! We don't actually care if these fail. If the
+ * user address is invalid, the native ioctl will do
+ * the error handling for us
+ */
+ (void) copy_to_user(up->loadwhat, kp->loadwhat, sizeof(up->loadwhat));
+ (void) put_user(kp->datasize, &up->datasize);
+ (void) put_user(compat_ptr(kp->data), &up->data);
+ return up;
}
#define VIDIOCGTUNER32 _IOWR('v', 4, struct video_tuner32)
@@ -739,7 +746,7 @@
struct video_tuner vt;
struct video_buffer vb;
struct video_window vw;
- struct video_code vc;
+ struct video_code32 vc;
struct video_audio va;
#endif
struct v4l2_format v2f;
@@ -818,8 +825,11 @@
break;
case VIDIOCSMICROCODE:
- err = get_microcode32(&karg.vc, up);
- compatible_arg = 0;
+ /* Copy the 32-bit "video_code32" to kernel space */
+ if (copy_from_user(&karg.vc, up, sizeof(karg.vc)))
+ return -EFAULT;
+ /* Convert the 32-bit version to a 64-bit version in user space */
+ up = get_microcode32(&karg.vc);
break;
case VIDIOCSFREQ:
diff --git a/drivers/media/video/videobuf-dma-contig.c b/drivers/media/video/videobuf-dma-contig.c
index 372b87e..6ff9e4b 100644
--- a/drivers/media/video/videobuf-dma-contig.c
+++ b/drivers/media/video/videobuf-dma-contig.c
@@ -393,8 +393,10 @@
}
/* read() method */
- dma_free_coherent(q->dev, mem->size, mem->vaddr, mem->dma_handle);
- mem->vaddr = NULL;
+ if (mem->vaddr) {
+ dma_free_coherent(q->dev, mem->size, mem->vaddr, mem->dma_handle);
+ mem->vaddr = NULL;
+ }
}
EXPORT_SYMBOL_GPL(videobuf_dma_contig_free);
diff --git a/drivers/media/video/videobuf-dma-sg.c b/drivers/media/video/videobuf-dma-sg.c
index 06f9a9c..2ad0bc2 100644
--- a/drivers/media/video/videobuf-dma-sg.c
+++ b/drivers/media/video/videobuf-dma-sg.c
@@ -94,7 +94,7 @@
* must free the memory.
*/
static struct scatterlist *videobuf_pages_to_sg(struct page **pages,
- int nr_pages, int offset)
+ int nr_pages, int offset, size_t size)
{
struct scatterlist *sglist;
int i;
@@ -110,12 +110,14 @@
/* DMA to highmem pages might not work */
goto highmem;
sg_set_page(&sglist[0], pages[0], PAGE_SIZE - offset, offset);
+ size -= PAGE_SIZE - offset;
for (i = 1; i < nr_pages; i++) {
if (NULL == pages[i])
goto nopage;
if (PageHighMem(pages[i]))
goto highmem;
- sg_set_page(&sglist[i], pages[i], PAGE_SIZE, 0);
+ sg_set_page(&sglist[i], pages[i], min(PAGE_SIZE, size), 0);
+ size -= min(PAGE_SIZE, size);
}
return sglist;
@@ -170,7 +172,8 @@
first = (data & PAGE_MASK) >> PAGE_SHIFT;
last = ((data+size-1) & PAGE_MASK) >> PAGE_SHIFT;
- dma->offset = data & ~PAGE_MASK;
+ dma->offset = data & ~PAGE_MASK;
+ dma->size = size;
dma->nr_pages = last-first+1;
dma->pages = kmalloc(dma->nr_pages * sizeof(struct page *), GFP_KERNEL);
if (NULL == dma->pages)
@@ -252,7 +255,7 @@
if (dma->pages) {
dma->sglist = videobuf_pages_to_sg(dma->pages, dma->nr_pages,
- dma->offset);
+ dma->offset, dma->size);
}
if (dma->vaddr) {
dma->sglist = videobuf_vmalloc_to_sg(dma->vaddr,
diff --git a/drivers/mfd/max8925-core.c b/drivers/mfd/max8925-core.c
index 04028a9..428377a 100644
--- a/drivers/mfd/max8925-core.c
+++ b/drivers/mfd/max8925-core.c
@@ -429,24 +429,25 @@
irq_tsc = cache_tsc;
for (i = 0; i < ARRAY_SIZE(max8925_irqs); i++) {
irq_data = &max8925_irqs[i];
+ /* 1 -- disable, 0 -- enable */
switch (irq_data->mask_reg) {
case MAX8925_CHG_IRQ1_MASK:
- irq_chg[0] &= irq_data->enable;
+ irq_chg[0] &= ~irq_data->enable;
break;
case MAX8925_CHG_IRQ2_MASK:
- irq_chg[1] &= irq_data->enable;
+ irq_chg[1] &= ~irq_data->enable;
break;
case MAX8925_ON_OFF_IRQ1_MASK:
- irq_on[0] &= irq_data->enable;
+ irq_on[0] &= ~irq_data->enable;
break;
case MAX8925_ON_OFF_IRQ2_MASK:
- irq_on[1] &= irq_data->enable;
+ irq_on[1] &= ~irq_data->enable;
break;
case MAX8925_RTC_IRQ_MASK:
- irq_rtc &= irq_data->enable;
+ irq_rtc &= ~irq_data->enable;
break;
case MAX8925_TSC_IRQ_MASK:
- irq_tsc &= irq_data->enable;
+ irq_tsc &= ~irq_data->enable;
break;
default:
dev_err(chip->dev, "wrong IRQ\n");
diff --git a/drivers/mfd/wm831x-irq.c b/drivers/mfd/wm831x-irq.c
index 7dabe4d..294183b 100644
--- a/drivers/mfd/wm831x-irq.c
+++ b/drivers/mfd/wm831x-irq.c
@@ -394,8 +394,13 @@
irq = irq - wm831x->irq_base;
- if (irq < WM831X_IRQ_GPIO_1 || irq > WM831X_IRQ_GPIO_11)
- return -EINVAL;
+ if (irq < WM831X_IRQ_GPIO_1 || irq > WM831X_IRQ_GPIO_11) {
+ /* Ignore internal-only IRQs */
+ if (irq >= 0 && irq < WM831X_NUM_IRQS)
+ return 0;
+ else
+ return -EINVAL;
+ }
switch (type) {
case IRQ_TYPE_EDGE_BOTH:
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 0b591b6..b743312 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -368,7 +368,7 @@
If unsure, say N.
To compile this driver as a module, choose M here: the
- module will be called vmware_balloon.
+ module will be called vmw_balloon.
config ARM_CHARLCD
bool "ARM Ltd. Character LCD Driver"
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 255a80d..42eab95 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -33,5 +33,5 @@
obj-$(CONFIG_HMC6352) += hmc6352.o
obj-y += eeprom/
obj-y += cb710/
-obj-$(CONFIG_VMWARE_BALLOON) += vmware_balloon.o
+obj-$(CONFIG_VMWARE_BALLOON) += vmw_balloon.o
obj-$(CONFIG_ARM_CHARLCD) += arm-charlcd.o
diff --git a/drivers/misc/bh1780gli.c b/drivers/misc/bh1780gli.c
index 714c6b4..d5f3a3f 100644
--- a/drivers/misc/bh1780gli.c
+++ b/drivers/misc/bh1780gli.c
@@ -190,7 +190,6 @@
ddata = i2c_get_clientdata(client);
sysfs_remove_group(&client->dev.kobj, &bh1780_attr_group);
- i2c_set_clientdata(client, NULL);
kfree(ddata);
return 0;
diff --git a/drivers/misc/vmware_balloon.c b/drivers/misc/vmw_balloon.c
similarity index 100%
rename from drivers/misc/vmware_balloon.c
rename to drivers/misc/vmw_balloon.c
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 5db49b1..09eee6d 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -1631,6 +1631,19 @@
if (host->bus_ops && !host->bus_dead) {
if (host->bus_ops->suspend)
err = host->bus_ops->suspend(host);
+ if (err == -ENOSYS || !host->bus_ops->resume) {
+ /*
+ * We simply "remove" the card in this case.
+ * It will be redetected on resume.
+ */
+ if (host->bus_ops->remove)
+ host->bus_ops->remove(host);
+ mmc_claim_host(host);
+ mmc_detach_bus(host);
+ mmc_release_host(host);
+ host->pm_flags = 0;
+ err = 0;
+ }
}
mmc_bus_put(host);
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index bd2755e..f332c52 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -362,9 +362,8 @@
goto err;
}
- err = mmc_sd_get_cid(host, host->ocr & ocr, card->raw_cid);
-
- if (!err) {
+ if (ocr & R4_MEMORY_PRESENT
+ && mmc_sd_get_cid(host, host->ocr & ocr, card->raw_cid) == 0) {
card->type = MMC_TYPE_SD_COMBO;
if (oldcard && (oldcard->type != MMC_TYPE_SD_COMBO ||
diff --git a/drivers/mmc/host/at91_mci.c b/drivers/mmc/host/at91_mci.c
index 5f3a599..87226cd 100644
--- a/drivers/mmc/host/at91_mci.c
+++ b/drivers/mmc/host/at91_mci.c
@@ -66,6 +66,7 @@
#include <linux/clk.h>
#include <linux/atmel_pdc.h>
#include <linux/gfp.h>
+#include <linux/highmem.h>
#include <linux/mmc/host.h>
diff --git a/drivers/mmc/host/imxmmc.c b/drivers/mmc/host/imxmmc.c
index 9a68ff4..5a950b1 100644
--- a/drivers/mmc/host/imxmmc.c
+++ b/drivers/mmc/host/imxmmc.c
@@ -148,11 +148,12 @@
while (delay--) {
reg = readw(host->base + MMC_REG_STATUS);
- if (reg & STATUS_CARD_BUS_CLK_RUN)
+ if (reg & STATUS_CARD_BUS_CLK_RUN) {
/* Check twice before cut */
reg = readw(host->base + MMC_REG_STATUS);
if (reg & STATUS_CARD_BUS_CLK_RUN)
return 0;
+ }
if (test_bit(IMXMCI_PEND_STARTED_b, &host->pending_events))
return 0;
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 4a8776f..4526d27 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -2305,7 +2305,6 @@
int ret = 0;
struct platform_device *pdev = to_platform_device(dev);
struct omap_hsmmc_host *host = platform_get_drvdata(pdev);
- pm_message_t state = PMSG_SUSPEND; /* unused by MMC core */
if (host && host->suspended)
return 0;
@@ -2324,8 +2323,8 @@
}
}
cancel_work_sync(&host->mmc_carddetect_work);
- mmc_host_enable(host->mmc);
ret = mmc_suspend_host(host->mmc);
+ mmc_host_enable(host->mmc);
if (ret == 0) {
omap_hsmmc_disable_irq(host);
OMAP_HSMMC_WRITE(host->base, HCTL,
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
index 2e16e0a..976330d 100644
--- a/drivers/mmc/host/s3cmci.c
+++ b/drivers/mmc/host/s3cmci.c
@@ -1600,7 +1600,7 @@
host->pio_active = XFER_NONE;
#ifdef CONFIG_MMC_S3C_PIODMA
- host->dodma = host->pdata->dma;
+ host->dodma = host->pdata->use_dma;
#endif
host->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index 71ad416..aacb862 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -241,8 +241,10 @@
static void sdhci_s3c_notify_change(struct platform_device *dev, int state)
{
struct sdhci_host *host = platform_get_drvdata(dev);
+ unsigned long flags;
+
if (host) {
- spin_lock(&host->lock);
+ spin_lock_irqsave(&host->lock, flags);
if (state) {
dev_dbg(&dev->dev, "card inserted.\n");
host->flags &= ~SDHCI_DEVICE_DEAD;
@@ -253,7 +255,7 @@
host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
}
tasklet_schedule(&host->card_tasklet);
- spin_unlock(&host->lock);
+ spin_unlock_irqrestore(&host->lock, flags);
}
}
@@ -481,8 +483,10 @@
sdhci_remove_host(host, 1);
for (ptr = 0; ptr < 3; ptr++) {
- clk_disable(sc->clk_bus[ptr]);
- clk_put(sc->clk_bus[ptr]);
+ if (sc->clk_bus[ptr]) {
+ clk_disable(sc->clk_bus[ptr]);
+ clk_put(sc->clk_bus[ptr]);
+ }
}
clk_disable(sc->clk_io);
clk_put(sc->clk_io);
diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c
index ee7d0a5..69d98e3 100644
--- a/drivers/mmc/host/tmio_mmc.c
+++ b/drivers/mmc/host/tmio_mmc.c
@@ -164,6 +164,7 @@
static void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
{
struct mmc_data *data = host->data;
+ void *sg_virt;
unsigned short *buf;
unsigned int count;
unsigned long flags;
@@ -173,8 +174,8 @@
return;
}
- buf = (unsigned short *)(tmio_mmc_kmap_atomic(host, &flags) +
- host->sg_off);
+ sg_virt = tmio_mmc_kmap_atomic(host->sg_ptr, &flags);
+ buf = (unsigned short *)(sg_virt + host->sg_off);
count = host->sg_ptr->length - host->sg_off;
if (count > data->blksz)
@@ -191,7 +192,7 @@
host->sg_off += count;
- tmio_mmc_kunmap_atomic(host, &flags);
+ tmio_mmc_kunmap_atomic(sg_virt, &flags);
if (host->sg_off == host->sg_ptr->length)
tmio_mmc_next_sg(host);
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h
index 64f7d5d..0fedc78 100644
--- a/drivers/mmc/host/tmio_mmc.h
+++ b/drivers/mmc/host/tmio_mmc.h
@@ -82,10 +82,7 @@
#define ack_mmc_irqs(host, i) \
do { \
- u32 mask;\
- mask = sd_ctrl_read32((host), CTL_STATUS); \
- mask &= ~((i) & TMIO_MASK_IRQ); \
- sd_ctrl_write32((host), CTL_STATUS, mask); \
+ sd_ctrl_write32((host), CTL_STATUS, ~(i)); \
} while (0)
@@ -177,19 +174,17 @@
return --host->sg_len;
}
-static inline char *tmio_mmc_kmap_atomic(struct tmio_mmc_host *host,
+static inline char *tmio_mmc_kmap_atomic(struct scatterlist *sg,
unsigned long *flags)
{
- struct scatterlist *sg = host->sg_ptr;
-
local_irq_save(*flags);
return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset;
}
-static inline void tmio_mmc_kunmap_atomic(struct tmio_mmc_host *host,
+static inline void tmio_mmc_kunmap_atomic(void *virt,
unsigned long *flags)
{
- kunmap_atomic(sg_page(host->sg_ptr), KM_BIO_SRC_IRQ);
+ kunmap_atomic(virt, KM_BIO_SRC_IRQ);
local_irq_restore(*flags);
}
diff --git a/drivers/mtd/nand/bf5xx_nand.c b/drivers/mtd/nand/bf5xx_nand.c
index a382e3d..6fbeefa 100644
--- a/drivers/mtd/nand/bf5xx_nand.c
+++ b/drivers/mtd/nand/bf5xx_nand.c
@@ -682,7 +682,6 @@
static int __devexit bf5xx_nand_remove(struct platform_device *pdev)
{
struct bf5xx_nand_info *info = to_nand_info(pdev);
- struct mtd_info *mtd = NULL;
platform_set_drvdata(pdev, NULL);
@@ -690,11 +689,7 @@
* and their partitions, then go through freeing the
* resources used
*/
- mtd = &info->mtd;
- if (mtd) {
- nand_release(mtd);
- kfree(mtd);
- }
+ nand_release(&info->mtd);
peripheral_free_list(bfin_nfc_pin_req);
bf5xx_nand_dma_remove(info);
@@ -710,7 +705,7 @@
struct nand_chip *chip = mtd->priv;
int ret;
- ret = nand_scan_ident(mtd, 1);
+ ret = nand_scan_ident(mtd, 1, NULL);
if (ret)
return ret;
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index fcf8ceb..214b03a 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -30,6 +30,8 @@
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/completion.h>
#include <asm/mach/flash.h>
#include <mach/mxc_nand.h>
@@ -67,7 +69,9 @@
#define NFC_V1_V2_CONFIG1_BIG (1 << 5)
#define NFC_V1_V2_CONFIG1_RST (1 << 6)
#define NFC_V1_V2_CONFIG1_CE (1 << 7)
-#define NFC_V1_V2_CONFIG1_ONE_CYCLE (1 << 8)
+#define NFC_V2_CONFIG1_ONE_CYCLE (1 << 8)
+#define NFC_V2_CONFIG1_PPB(x) (((x) & 0x3) << 9)
+#define NFC_V2_CONFIG1_FP_INT (1 << 11)
#define NFC_V1_V2_CONFIG2_INT (1 << 15)
@@ -149,7 +153,7 @@
int irq;
int eccsize;
- wait_queue_head_t irq_waitq;
+ struct completion op_completion;
uint8_t *data_buf;
unsigned int buf_start;
@@ -162,6 +166,7 @@
void (*send_read_id)(struct mxc_nand_host *);
uint16_t (*get_dev_status)(struct mxc_nand_host *);
int (*check_int)(struct mxc_nand_host *);
+ void (*irq_control)(struct mxc_nand_host *, int);
};
/* OOB placement block for use with hardware ecc generation */
@@ -214,9 +219,12 @@
{
struct mxc_nand_host *host = dev_id;
- disable_irq_nosync(irq);
+ if (!host->check_int(host))
+ return IRQ_NONE;
- wake_up(&host->irq_waitq);
+ host->irq_control(host, 0);
+
+ complete(&host->op_completion);
return IRQ_HANDLED;
}
@@ -243,11 +251,54 @@
if (!(tmp & NFC_V1_V2_CONFIG2_INT))
return 0;
- writew(tmp & ~NFC_V1_V2_CONFIG2_INT, NFC_V1_V2_CONFIG2);
+ if (!cpu_is_mx21())
+ writew(tmp & ~NFC_V1_V2_CONFIG2_INT, NFC_V1_V2_CONFIG2);
return 1;
}
+/*
+ * It has been observed that the i.MX21 cannot read the CONFIG2:INT bit
+ * if interrupts are masked (CONFIG1:INT_MSK is set). To handle this, the
+ * driver can enable/disable the irq line rather than simply masking the
+ * interrupts.
+ */
+static void irq_control_mx21(struct mxc_nand_host *host, int activate)
+{
+ if (activate)
+ enable_irq(host->irq);
+ else
+ disable_irq_nosync(host->irq);
+}
+
+static void irq_control_v1_v2(struct mxc_nand_host *host, int activate)
+{
+ uint16_t tmp;
+
+ tmp = readw(NFC_V1_V2_CONFIG1);
+
+ if (activate)
+ tmp &= ~NFC_V1_V2_CONFIG1_INT_MSK;
+ else
+ tmp |= NFC_V1_V2_CONFIG1_INT_MSK;
+
+ writew(tmp, NFC_V1_V2_CONFIG1);
+}
+
+static void irq_control_v3(struct mxc_nand_host *host, int activate)
+{
+ uint32_t tmp;
+
+ tmp = readl(NFC_V3_CONFIG2);
+
+ if (activate)
+ tmp &= ~NFC_V3_CONFIG2_INT_MSK;
+ else
+ tmp |= NFC_V3_CONFIG2_INT_MSK;
+
+ writel(tmp, NFC_V3_CONFIG2);
+}
+
/* This function polls the NANDFC to wait for the basic operation to
* complete by checking the INT bit of config2 register.
*/
@@ -257,10 +308,9 @@
if (useirq) {
if (!host->check_int(host)) {
-
- enable_irq(host->irq);
-
- wait_event(host->irq_waitq, host->check_int(host));
+ INIT_COMPLETION(host->op_completion);
+ host->irq_control(host, 1);
+ wait_for_completion(&host->op_completion);
}
} else {
while (max_retries-- > 0) {
@@ -402,16 +452,16 @@
/* Wait for operation to complete */
wait_op_done(host, true);
- if (this->options & NAND_BUSWIDTH_16) {
- void __iomem *main_buf = host->main_area0;
- /* compress the ID info */
- writeb(readb(main_buf + 2), main_buf + 1);
- writeb(readb(main_buf + 4), main_buf + 2);
- writeb(readb(main_buf + 6), main_buf + 3);
- writeb(readb(main_buf + 8), main_buf + 4);
- writeb(readb(main_buf + 10), main_buf + 5);
- }
memcpy(host->data_buf, host->main_area0, 16);
+
+ if (this->options & NAND_BUSWIDTH_16) {
+ /* compress the ID info */
+ host->data_buf[1] = host->data_buf[2];
+ host->data_buf[2] = host->data_buf[4];
+ host->data_buf[3] = host->data_buf[6];
+ host->data_buf[4] = host->data_buf[8];
+ host->data_buf[5] = host->data_buf[10];
+ }
}
static uint16_t get_dev_status_v3(struct mxc_nand_host *host)
@@ -729,27 +779,30 @@
{
struct nand_chip *nand_chip = mtd->priv;
struct mxc_nand_host *host = nand_chip->priv;
- uint16_t tmp;
+ uint16_t config1 = 0;
- /* enable interrupt, disable spare enable */
- tmp = readw(NFC_V1_V2_CONFIG1);
- tmp &= ~NFC_V1_V2_CONFIG1_INT_MSK;
- tmp &= ~NFC_V1_V2_CONFIG1_SP_EN;
- if (nand_chip->ecc.mode == NAND_ECC_HW) {
- tmp |= NFC_V1_V2_CONFIG1_ECC_EN;
- } else {
- tmp &= ~NFC_V1_V2_CONFIG1_ECC_EN;
- }
+ if (nand_chip->ecc.mode == NAND_ECC_HW)
+ config1 |= NFC_V1_V2_CONFIG1_ECC_EN;
+
+ if (nfc_is_v21())
+ config1 |= NFC_V2_CONFIG1_FP_INT;
+
+ if (!cpu_is_mx21())
+ config1 |= NFC_V1_V2_CONFIG1_INT_MSK;
if (nfc_is_v21() && mtd->writesize) {
+ uint16_t pages_per_block = mtd->erasesize / mtd->writesize;
+
host->eccsize = get_eccsize(mtd);
if (host->eccsize == 4)
- tmp |= NFC_V2_CONFIG1_ECC_MODE_4;
+ config1 |= NFC_V2_CONFIG1_ECC_MODE_4;
+
+ config1 |= NFC_V2_CONFIG1_PPB(ffs(pages_per_block) - 6);
} else {
host->eccsize = 1;
}
- writew(tmp, NFC_V1_V2_CONFIG1);
+ writew(config1, NFC_V1_V2_CONFIG1);
/* preset operation */
/* Unlock the internal RAM Buffer */
@@ -794,6 +847,7 @@
NFC_V3_CONFIG2_2CMD_PHASES |
NFC_V3_CONFIG2_SPAS(mtd->oobsize >> 1) |
NFC_V3_CONFIG2_ST_CMD(0x70) |
+ NFC_V3_CONFIG2_INT_MSK |
NFC_V3_CONFIG2_NUM_ADDR_PHASE0;
if (chip->ecc.mode == NAND_ECC_HW)
@@ -1019,6 +1073,10 @@
host->send_read_id = send_read_id_v1_v2;
host->get_dev_status = get_dev_status_v1_v2;
host->check_int = check_int_v1_v2;
+ if (cpu_is_mx21())
+ host->irq_control = irq_control_mx21;
+ else
+ host->irq_control = irq_control_v1_v2;
}
if (nfc_is_v21()) {
@@ -1057,6 +1115,7 @@
host->send_read_id = send_read_id_v3;
host->check_int = check_int_v3;
host->get_dev_status = get_dev_status_v3;
+ host->irq_control = irq_control_v3;
oob_smallpage = &nandv2_hw_eccoob_smallpage;
oob_largepage = &nandv2_hw_eccoob_largepage;
} else
@@ -1088,14 +1147,34 @@
this->options |= NAND_USE_FLASH_BBT;
}
- init_waitqueue_head(&host->irq_waitq);
+ init_completion(&host->op_completion);
host->irq = platform_get_irq(pdev, 0);
+ /*
+ * mask the interrupt. For i.MX21 explicitely call
+ * irq_control_v1_v2 to use the mask bit. We can't call
+ * disable_irq_nosync() for an interrupt we do not own yet.
+ */
+ if (cpu_is_mx21())
+ irq_control_v1_v2(host, 0);
+ else
+ host->irq_control(host, 0);
+
err = request_irq(host->irq, mxc_nfc_irq, IRQF_DISABLED, DRIVER_NAME, host);
if (err)
goto eirq;
+ host->irq_control(host, 0);
+
+ /*
+ * Now that the interrupt is disabled make sure the interrupt
+ * mask bit is cleared on i.MX21. Otherwise we can't read
+ * the interrupt status bit on this machine.
+ */
+ if (cpu_is_mx21())
+ irq_control_v1_v2(host, 1);
+
/* first scan to find the device and get the page size */
if (nand_scan_ident(mtd, 1, NULL)) {
err = -ENXIO;
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index 133d515..513e0a7 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -413,7 +413,7 @@
prefetch_status = gpmc_read_status(GPMC_PREFETCH_COUNT);
} while (prefetch_status);
/* disable and stop the PFPW engine */
- gpmc_prefetch_reset();
+ gpmc_prefetch_reset(info->gpmc_cs);
dma_unmap_single(&info->pdev->dev, dma_addr, len, dir);
return 0;
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index 4d89f37..4d01cda6 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -1320,6 +1320,7 @@
goto fail_free_irq;
}
+#ifdef CONFIG_MTD_PARTITIONS
if (mtd_has_cmdlinepart()) {
static const char *probes[] = { "cmdlinepart", NULL };
struct mtd_partition *parts;
@@ -1332,6 +1333,9 @@
}
return add_mtd_partitions(mtd, pdata->parts, pdata->nr_parts);
+#else
+ return 0;
+#endif
fail_free_irq:
free_irq(irq, info);
@@ -1364,7 +1368,9 @@
platform_set_drvdata(pdev, NULL);
del_mtd_device(mtd);
+#ifdef CONFIG_MTD_PARTITIONS
del_mtd_partitions(mtd);
+#endif
irq = platform_get_irq(pdev, 0);
if (irq >= 0)
free_irq(irq, info);
diff --git a/drivers/mtd/onenand/samsung.c b/drivers/mtd/onenand/samsung.c
index cb443af..a460f1b 100644
--- a/drivers/mtd/onenand/samsung.c
+++ b/drivers/mtd/onenand/samsung.c
@@ -554,14 +554,13 @@
do {
status = readl(base + S5PC110_DMA_TRANS_STATUS);
+ if (status & S5PC110_DMA_TRANS_STATUS_TE) {
+ writel(S5PC110_DMA_TRANS_CMD_TEC,
+ base + S5PC110_DMA_TRANS_CMD);
+ return -EIO;
+ }
} while (!(status & S5PC110_DMA_TRANS_STATUS_TD));
- if (status & S5PC110_DMA_TRANS_STATUS_TE) {
- writel(S5PC110_DMA_TRANS_CMD_TEC, base + S5PC110_DMA_TRANS_CMD);
- writel(S5PC110_DMA_TRANS_CMD_TDC, base + S5PC110_DMA_TRANS_CMD);
- return -EIO;
- }
-
writel(S5PC110_DMA_TRANS_CMD_TDC, base + S5PC110_DMA_TRANS_CMD);
return 0;
@@ -571,13 +570,12 @@
unsigned char *buffer, int offset, size_t count)
{
struct onenand_chip *this = mtd->priv;
- void __iomem *bufferram;
void __iomem *p;
void *buf = (void *) buffer;
dma_addr_t dma_src, dma_dst;
int err;
- p = bufferram = this->base + area;
+ p = this->base + area;
if (ONENAND_CURRENT_BUFFERRAM(this)) {
if (area == ONENAND_DATARAM)
p += this->writesize;
@@ -621,7 +619,7 @@
normal:
if (count != mtd->writesize) {
/* Copy the bufferram to memory to prevent unaligned access */
- memcpy(this->page_buf, bufferram, mtd->writesize);
+ memcpy(this->page_buf, p, mtd->writesize);
p = this->page_buf + offset;
}
diff --git a/drivers/mtd/ubi/Kconfig.debug b/drivers/mtd/ubi/Kconfig.debug
index 2246f15..61f6e5e 100644
--- a/drivers/mtd/ubi/Kconfig.debug
+++ b/drivers/mtd/ubi/Kconfig.debug
@@ -6,7 +6,7 @@
depends on SYSFS
depends on MTD_UBI
select DEBUG_FS
- select KALLSYMS_ALL
+ select KALLSYMS_ALL if KALLSYMS && DEBUG_KERNEL
help
This option enables UBI debugging.
diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c
index 4dfa6b9..3d2d1a6 100644
--- a/drivers/mtd/ubi/cdev.c
+++ b/drivers/mtd/ubi/cdev.c
@@ -798,18 +798,18 @@
goto out_free;
}
- re = kzalloc(sizeof(struct ubi_rename_entry), GFP_KERNEL);
- if (!re) {
+ re1 = kzalloc(sizeof(struct ubi_rename_entry), GFP_KERNEL);
+ if (!re1) {
err = -ENOMEM;
ubi_close_volume(desc);
goto out_free;
}
- re->remove = 1;
- re->desc = desc;
- list_add(&re->list, &rename_list);
+ re1->remove = 1;
+ re1->desc = desc;
+ list_add(&re1->list, &rename_list);
dbg_msg("will remove volume %d, name \"%s\"",
- re->desc->vol->vol_id, re->desc->vol->name);
+ re1->desc->vol->vol_id, re1->desc->vol->name);
}
mutex_lock(&ubi->device_mutex);
diff --git a/drivers/mtd/ubi/scan.c b/drivers/mtd/ubi/scan.c
index 372a15a..69b52e9 100644
--- a/drivers/mtd/ubi/scan.c
+++ b/drivers/mtd/ubi/scan.c
@@ -843,7 +843,7 @@
case UBI_COMPAT_DELETE:
ubi_msg("\"delete\" compatible internal volume %d:%d"
" found, will remove it", vol_id, lnum);
- err = add_to_list(si, pnum, ec, &si->corr);
+ err = add_to_list(si, pnum, ec, &si->erase);
if (err)
return err;
return 0;
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index ee7b1d8..97a4356 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -1212,7 +1212,8 @@
retry:
spin_lock(&ubi->wl_lock);
e = ubi->lookuptbl[pnum];
- if (e == ubi->move_from || in_wl_tree(e, &ubi->scrub)) {
+ if (e == ubi->move_from || in_wl_tree(e, &ubi->scrub) ||
+ in_wl_tree(e, &ubi->erroneous)) {
spin_unlock(&ubi->wl_lock);
return 0;
}
diff --git a/drivers/net/3c527.c b/drivers/net/3c527.c
index 70705d1..eca55c5 100644
--- a/drivers/net/3c527.c
+++ b/drivers/net/3c527.c
@@ -522,7 +522,7 @@
lp->tx_len = lp->exec_box->data[9]; /* Transmit list count */
lp->rx_len = lp->exec_box->data[11]; /* Receive list count */
- init_MUTEX_LOCKED(&lp->cmd_mutex);
+ sema_init(&lp->cmd_mutex, 0);
init_completion(&lp->execution_cmd);
init_completion(&lp->xceiver_cmd);
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index c754d88..179871d 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -633,7 +633,11 @@
open:1,
medialock:1,
must_free_region:1, /* Flag: if zero, Cardbus owns the I/O region */
- large_frames:1; /* accept large frames */
+ large_frames:1, /* accept large frames */
+ handling_irq:1; /* private in_irq indicator */
+ /* {get|set}_wol operations are already serialized by rtnl.
+ * no additional locking is required for the enable_wol and acpi_set_WOL()
+ */
int drv_flags;
u16 status_enable;
u16 intr_enable;
@@ -646,7 +650,7 @@
u16 io_size; /* Size of PCI region (for release_region) */
/* Serialises access to hardware other than MII and variables below.
- * The lock hierarchy is rtnl_lock > lock > mii_lock > window_lock. */
+ * The lock hierarchy is rtnl_lock > {lock, mii_lock} > window_lock. */
spinlock_t lock;
spinlock_t mii_lock; /* Serialises access to MII */
@@ -1993,10 +1997,9 @@
}
}
- if (status & RxEarly) { /* Rx early is unused. */
- vortex_rx(dev);
+ if (status & RxEarly) /* Rx early is unused. */
iowrite16(AckIntr | RxEarly, ioaddr + EL3_CMD);
- }
+
if (status & StatsFull) { /* Empty statistics. */
static int DoneDidThat;
if (vortex_debug > 4)
@@ -2133,6 +2136,15 @@
dev->name, vp->cur_tx);
}
+ /*
+ * We can't allow a recursion from our interrupt handler back into the
+ * tx routine, as they take the same spin lock, and that causes
+ * deadlock. Just return NETDEV_TX_BUSY and let the stack try again in
+ * a bit
+ */
+ if (vp->handling_irq)
+ return NETDEV_TX_BUSY;
+
if (vp->cur_tx - vp->dirty_tx >= TX_RING_SIZE) {
if (vortex_debug > 0)
pr_warning("%s: BUG! Tx Ring full, refusing to send buffer.\n",
@@ -2288,7 +2300,12 @@
if (status & (HostError | RxEarly | StatsFull | TxComplete | IntReq)) {
if (status == 0xffff)
break;
+ if (status & RxEarly)
+ vortex_rx(dev);
+ spin_unlock(&vp->window_lock);
vortex_error(dev, status);
+ spin_lock(&vp->window_lock);
+ window_set(vp, 7);
}
if (--work_done < 0) {
@@ -2335,11 +2352,13 @@
ioaddr = vp->ioaddr;
+
/*
* It seems dopey to put the spinlock this early, but we could race against vortex_tx_timeout
* and boomerang_start_xmit
*/
spin_lock(&vp->lock);
+ vp->handling_irq = 1;
status = ioread16(ioaddr + EL3_STATUS);
@@ -2447,6 +2466,7 @@
pr_debug("%s: exiting interrupt, status %4.4x.\n",
dev->name, status);
handler_exit:
+ vp->handling_irq = 0;
spin_unlock(&vp->lock);
return IRQ_HANDLED;
}
@@ -2922,28 +2942,31 @@
{
struct vortex_private *vp = netdev_priv(dev);
- spin_lock_irq(&vp->lock);
+ if (!VORTEX_PCI(vp))
+ return;
+
wol->supported = WAKE_MAGIC;
wol->wolopts = 0;
if (vp->enable_wol)
wol->wolopts |= WAKE_MAGIC;
- spin_unlock_irq(&vp->lock);
}
static int vortex_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
struct vortex_private *vp = netdev_priv(dev);
+
+ if (!VORTEX_PCI(vp))
+ return -EOPNOTSUPP;
+
if (wol->wolopts & ~WAKE_MAGIC)
return -EINVAL;
- spin_lock_irq(&vp->lock);
if (wol->wolopts & WAKE_MAGIC)
vp->enable_wol = 1;
else
vp->enable_wol = 0;
acpi_set_WOL(dev);
- spin_unlock_irq(&vp->lock);
return 0;
}
@@ -2971,7 +2994,6 @@
{
int err;
struct vortex_private *vp = netdev_priv(dev);
- unsigned long flags;
pci_power_t state = 0;
if(VORTEX_PCI(vp))
@@ -2981,9 +3003,7 @@
if(state != 0)
pci_set_power_state(VORTEX_PCI(vp), PCI_D0);
- spin_lock_irqsave(&vp->lock, flags);
err = generic_mii_ioctl(&vp->mii, if_mii(rq), cmd, NULL);
- spin_unlock_irqrestore(&vp->lock, flags);
if(state != 0)
pci_set_power_state(VORTEX_PCI(vp), state);
@@ -3188,6 +3208,9 @@
return;
}
+ if (VORTEX_PCI(vp)->current_state < PCI_D3hot)
+ return;
+
/* Change the power state to D3; RxEnable doesn't take effect. */
pci_set_power_state(VORTEX_PCI(vp), PCI_D3hot);
}
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 5a68953..5db667c 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -928,6 +928,16 @@
The module will be called smc91x. If you want to compile it as a
module, say M here and read <file:Documentation/kbuild/modules.txt>.
+config PXA168_ETH
+ tristate "Marvell pxa168 ethernet support"
+ depends on CPU_PXA168
+ select PHYLIB
+ help
+ This driver supports the pxa168 Ethernet ports.
+
+ To compile this driver as a module, choose M here. The module
+ will be called pxa168_eth.
+
config NET_NETX
tristate "NetX Ethernet support"
select MII
@@ -2418,7 +2428,7 @@
config MV643XX_ETH
tristate "Marvell Discovery (643XX) and Orion ethernet support"
- depends on MV64X60 || PPC32 || PLAT_ORION
+ depends on (MV64X60 || PPC32 || PLAT_ORION) && INET
select INET_LRO
select PHYLIB
help
@@ -2793,7 +2803,7 @@
config PASEMI_MAC
tristate "PA Semi 1/10Gbit MAC"
- depends on PPC_PASEMI && PCI
+ depends on PPC_PASEMI && PCI && INET
select PHYLIB
select INET_LRO
help
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 56e8c27..3e8f150 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -244,6 +244,7 @@
obj-$(CONFIG_SMC91X) += smc91x.o
obj-$(CONFIG_SMC911X) += smc911x.o
obj-$(CONFIG_SMSC911X) += smsc911x.o
+obj-$(CONFIG_PXA168_ETH) += pxa168_eth.o
obj-$(CONFIG_BFIN_MAC) += bfin_mac.o
obj-$(CONFIG_DM9000) += dm9000.o
obj-$(CONFIG_PASEMI_MAC) += pasemi_mac_driver.o
diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c
index 63b9ba0..c73be28 100644
--- a/drivers/net/atlx/atl1.c
+++ b/drivers/net/atlx/atl1.c
@@ -1251,6 +1251,12 @@
rrd_ring->desc = NULL;
rrd_ring->dma = 0;
+
+ adapter->cmb.dma = 0;
+ adapter->cmb.cmb = NULL;
+
+ adapter->smb.dma = 0;
+ adapter->smb.smb = NULL;
}
static void atl1_setup_mac_ctrl(struct atl1_adapter *adapter)
@@ -2847,10 +2853,11 @@
pci_enable_wake(pdev, PCI_D3cold, 0);
atl1_reset_hw(&adapter->hw);
- adapter->cmb.cmb->int_stats = 0;
- if (netif_running(netdev))
+ if (netif_running(netdev)) {
+ adapter->cmb.cmb->int_stats = 0;
atl1_up(adapter);
+ }
netif_device_attach(netdev);
return 0;
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
index 37617ab..efeffdf 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -848,6 +848,15 @@
b44_tx(bp);
/* spin_unlock(&bp->tx_lock); */
}
+ if (bp->istat & ISTAT_RFO) { /* fast recovery, in ~20msec */
+ bp->istat &= ~ISTAT_RFO;
+ b44_disable_ints(bp);
+ ssb_device_enable(bp->sdev, 0); /* resets ISTAT_RFO */
+ b44_init_rings(bp);
+ b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
+ netif_wake_queue(bp->dev);
+ }
+
spin_unlock_irqrestore(&bp->lock, flags);
work_done = 0;
@@ -2161,8 +2170,6 @@
dev->irq = sdev->irq;
SET_ETHTOOL_OPS(dev, &b44_ethtool_ops);
- netif_carrier_off(dev);
-
err = ssb_bus_powerup(sdev->bus, 0);
if (err) {
dev_err(sdev->dev,
@@ -2204,6 +2211,8 @@
goto err_out_powerdown;
}
+ netif_carrier_off(dev);
+
ssb_set_drvdata(sdev, dev);
/* Chip reset provides power to the b44 MAC & PCI cores, which
diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h
index 99197bd..53306bf 100644
--- a/drivers/net/benet/be.h
+++ b/drivers/net/benet/be.h
@@ -181,6 +181,7 @@
u64 be_rx_bytes_prev;
u64 be_rx_pkts;
u32 be_rx_rate;
+ u32 be_rx_mcast_pkt;
/* number of non ether type II frames dropped where
* frame len > length field of Mac Hdr */
u32 be_802_3_dropped_frames;
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index 3d30549..34abcc9 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -140,10 +140,8 @@
while ((compl = be_mcc_compl_get(adapter))) {
if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
/* Interpret flags as an async trailer */
- BUG_ON(!is_link_state_evt(compl->flags));
-
- /* Interpret compl as a async link evt */
- be_async_link_state_process(adapter,
+ if (is_link_state_evt(compl->flags))
+ be_async_link_state_process(adapter,
(struct be_async_event_link_state *) compl);
} else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
*status = be_mcc_compl_process(adapter, compl);
@@ -207,7 +205,7 @@
if (msecs > 4000) {
dev_err(&adapter->pdev->dev, "mbox poll timed out\n");
- be_dump_ue(adapter);
+ be_detect_dump_ue(adapter);
return -1;
}
diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h
index bdc10a2..ad1e6fa 100644
--- a/drivers/net/benet/be_cmds.h
+++ b/drivers/net/benet/be_cmds.h
@@ -992,5 +992,5 @@
extern int be_cmd_get_phy_info(struct be_adapter *adapter,
struct be_dma_mem *cmd);
extern int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain);
-extern void be_dump_ue(struct be_adapter *adapter);
+extern void be_detect_dump_ue(struct be_adapter *adapter);
diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c
index cd16243..13f0abb 100644
--- a/drivers/net/benet/be_ethtool.c
+++ b/drivers/net/benet/be_ethtool.c
@@ -60,6 +60,7 @@
{DRVSTAT_INFO(be_rx_events)},
{DRVSTAT_INFO(be_tx_compl)},
{DRVSTAT_INFO(be_rx_compl)},
+ {DRVSTAT_INFO(be_rx_mcast_pkt)},
{DRVSTAT_INFO(be_ethrx_post_fail)},
{DRVSTAT_INFO(be_802_3_dropped_frames)},
{DRVSTAT_INFO(be_802_3_malformed_frames)},
diff --git a/drivers/net/benet/be_hw.h b/drivers/net/benet/be_hw.h
index 5d38046..a2ec5df 100644
--- a/drivers/net/benet/be_hw.h
+++ b/drivers/net/benet/be_hw.h
@@ -167,8 +167,11 @@
#define FLASH_FCoE_BIOS_START_g3 (13631488)
#define FLASH_REDBOOT_START_g3 (262144)
-
-
+/************* Rx Packet Type Encoding **************/
+#define BE_UNICAST_PACKET 0
+#define BE_MULTICAST_PACKET 1
+#define BE_BROADCAST_PACKET 2
+#define BE_RSVD_PACKET 3
/*
* BE descriptors: host memory data structures whose formats
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index 74e146f..6eda7a0 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -247,6 +247,7 @@
dev_stats->tx_packets = drvr_stats(adapter)->be_tx_pkts;
dev_stats->rx_bytes = drvr_stats(adapter)->be_rx_bytes;
dev_stats->tx_bytes = drvr_stats(adapter)->be_tx_bytes;
+ dev_stats->multicast = drvr_stats(adapter)->be_rx_mcast_pkt;
/* bad pkts received */
dev_stats->rx_errors = port_stats->rx_crc_errors +
@@ -294,7 +295,6 @@
/* no space available in linux */
dev_stats->tx_dropped = 0;
- dev_stats->multicast = port_stats->rx_multicast_frames;
dev_stats->collisions = 0;
/* detailed tx_errors */
@@ -848,7 +848,7 @@
}
static void be_rx_stats_update(struct be_adapter *adapter,
- u32 pktsize, u16 numfrags)
+ u32 pktsize, u16 numfrags, u8 pkt_type)
{
struct be_drvr_stats *stats = drvr_stats(adapter);
@@ -856,6 +856,9 @@
stats->be_rx_frags += numfrags;
stats->be_rx_bytes += pktsize;
stats->be_rx_pkts++;
+
+ if (pkt_type == BE_MULTICAST_PACKET)
+ stats->be_rx_mcast_pkt++;
}
static inline bool do_pkt_csum(struct be_eth_rx_compl *rxcp, bool cso)
@@ -925,9 +928,11 @@
u16 rxq_idx, i, j;
u32 pktsize, hdr_len, curr_frag_len, size;
u8 *start;
+ u8 pkt_type;
rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
pktsize = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
+ pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp);
page_info = get_rx_page_info(adapter, rxq_idx);
@@ -993,7 +998,7 @@
BUG_ON(j > MAX_SKB_FRAGS);
done:
- be_rx_stats_update(adapter, pktsize, num_rcvd);
+ be_rx_stats_update(adapter, pktsize, num_rcvd, pkt_type);
}
/* Process the RX completion indicated by rxcp when GRO is disabled */
@@ -1060,6 +1065,7 @@
u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len;
u16 i, rxq_idx = 0, vid, j;
u8 vtm;
+ u8 pkt_type;
num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
/* Is it a flush compl that has no data */
@@ -1070,6 +1076,7 @@
vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
+ pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp);
/* vlanf could be wrongly set in some cards.
* ignore if vtm is not set */
@@ -1125,7 +1132,7 @@
vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, vid);
}
- be_rx_stats_update(adapter, pkt_size, num_rcvd);
+ be_rx_stats_update(adapter, pkt_size, num_rcvd, pkt_type);
}
static struct be_eth_rx_compl *be_rx_compl_get(struct be_adapter *adapter)
@@ -1743,26 +1750,7 @@
return 1;
}
-static inline bool be_detect_ue(struct be_adapter *adapter)
-{
- u32 online0 = 0, online1 = 0;
-
- pci_read_config_dword(adapter->pdev, PCICFG_ONLINE0, &online0);
-
- pci_read_config_dword(adapter->pdev, PCICFG_ONLINE1, &online1);
-
- if (!online0 || !online1) {
- adapter->ue_detected = true;
- dev_err(&adapter->pdev->dev,
- "UE Detected!! online0=%d online1=%d\n",
- online0, online1);
- return true;
- }
-
- return false;
-}
-
-void be_dump_ue(struct be_adapter *adapter)
+void be_detect_dump_ue(struct be_adapter *adapter)
{
u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
u32 i;
@@ -1779,6 +1767,11 @@
ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
+ if (ue_status_lo || ue_status_hi) {
+ adapter->ue_detected = true;
+ dev_err(&adapter->pdev->dev, "UE Detected!!\n");
+ }
+
if (ue_status_lo) {
for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
if (ue_status_lo & 1)
@@ -1814,10 +1807,8 @@
adapter->rx_post_starved = false;
be_post_rx_frags(adapter);
}
- if (!adapter->ue_detected) {
- if (be_detect_ue(adapter))
- be_dump_ue(adapter);
- }
+ if (!adapter->ue_detected)
+ be_detect_dump_ue(adapter);
schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
}
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h
index 53af9c9..0c2d96e 100644
--- a/drivers/net/bnx2x/bnx2x.h
+++ b/drivers/net/bnx2x/bnx2x.h
@@ -20,8 +20,8 @@
* (you will need to reboot afterwards) */
/* #define BNX2X_STOP_ON_ERROR */
-#define DRV_MODULE_VERSION "1.52.53-3"
-#define DRV_MODULE_RELDATE "2010/18/04"
+#define DRV_MODULE_VERSION "1.52.53-4"
+#define DRV_MODULE_RELDATE "2010/16/08"
#define BNX2X_BC_VER 0x040200
#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c
index b4ec2b0..f8c3f08 100644
--- a/drivers/net/bnx2x/bnx2x_main.c
+++ b/drivers/net/bnx2x/bnx2x_main.c
@@ -4328,10 +4328,12 @@
val |= aeu_gpio_mask;
REG_WR(bp, offset, val);
}
+ bp->port.need_hw_lock = 1;
break;
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
+ bp->port.need_hw_lock = 1;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
/* add SPIO 5 to group 0 */
{
u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
@@ -4341,7 +4343,10 @@
REG_WR(bp, reg_addr, val);
}
break;
-
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
+ bp->port.need_hw_lock = 1;
+ break;
default:
break;
}
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 822f586..0ddf4c6 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -2466,6 +2466,9 @@
if (!(dev->flags & IFF_MASTER))
goto out;
+ if (!pskb_may_pull(skb, sizeof(struct lacpdu)))
+ goto out;
+
read_lock(&bond->lock);
slave = bond_get_slave_by_dev((struct bonding *)netdev_priv(dev),
orig_dev);
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index c746b33..26bb118 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -362,6 +362,9 @@
goto out;
}
+ if (!pskb_may_pull(skb, arp_hdr_len(bond_dev)))
+ goto out;
+
if (skb->len < sizeof(struct arp_pkt)) {
pr_debug("Packet is too small to be an ARP\n");
goto out;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 2cc4cfc..e953c6a 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -2797,9 +2797,15 @@
* so it can wait
*/
bond_for_each_slave(bond, slave, i) {
+ unsigned long trans_start = dev_trans_start(slave->dev);
+
if (slave->link != BOND_LINK_UP) {
- if (time_before_eq(jiffies, dev_trans_start(slave->dev) + delta_in_ticks) &&
- time_before_eq(jiffies, slave->dev->last_rx + delta_in_ticks)) {
+ if (time_in_range(jiffies,
+ trans_start - delta_in_ticks,
+ trans_start + delta_in_ticks) &&
+ time_in_range(jiffies,
+ slave->dev->last_rx - delta_in_ticks,
+ slave->dev->last_rx + delta_in_ticks)) {
slave->link = BOND_LINK_UP;
slave->state = BOND_STATE_ACTIVE;
@@ -2827,8 +2833,12 @@
* when the source ip is 0, so don't take the link down
* if we don't know our ip yet
*/
- if (time_after_eq(jiffies, dev_trans_start(slave->dev) + 2*delta_in_ticks) ||
- (time_after_eq(jiffies, slave->dev->last_rx + 2*delta_in_ticks))) {
+ if (!time_in_range(jiffies,
+ trans_start - delta_in_ticks,
+ trans_start + 2 * delta_in_ticks) ||
+ !time_in_range(jiffies,
+ slave->dev->last_rx - delta_in_ticks,
+ slave->dev->last_rx + 2 * delta_in_ticks)) {
slave->link = BOND_LINK_DOWN;
slave->state = BOND_STATE_BACKUP;
@@ -2883,13 +2893,16 @@
{
struct slave *slave;
int i, commit = 0;
+ unsigned long trans_start;
bond_for_each_slave(bond, slave, i) {
slave->new_link = BOND_LINK_NOCHANGE;
if (slave->link != BOND_LINK_UP) {
- if (time_before_eq(jiffies, slave_last_rx(bond, slave) +
- delta_in_ticks)) {
+ if (time_in_range(jiffies,
+ slave_last_rx(bond, slave) - delta_in_ticks,
+ slave_last_rx(bond, slave) + delta_in_ticks)) {
+
slave->new_link = BOND_LINK_UP;
commit++;
}
@@ -2902,8 +2915,9 @@
* active. This avoids bouncing, as the last receive
* times need a full ARP monitor cycle to be updated.
*/
- if (!time_after_eq(jiffies, slave->jiffies +
- 2 * delta_in_ticks))
+ if (time_in_range(jiffies,
+ slave->jiffies - delta_in_ticks,
+ slave->jiffies + 2 * delta_in_ticks))
continue;
/*
@@ -2921,8 +2935,10 @@
*/
if (slave->state == BOND_STATE_BACKUP &&
!bond->current_arp_slave &&
- time_after(jiffies, slave_last_rx(bond, slave) +
- 3 * delta_in_ticks)) {
+ !time_in_range(jiffies,
+ slave_last_rx(bond, slave) - delta_in_ticks,
+ slave_last_rx(bond, slave) + 3 * delta_in_ticks)) {
+
slave->new_link = BOND_LINK_DOWN;
commit++;
}
@@ -2933,11 +2949,15 @@
* - (more than 2*delta since receive AND
* the bond has an IP address)
*/
+ trans_start = dev_trans_start(slave->dev);
if ((slave->state == BOND_STATE_ACTIVE) &&
- (time_after_eq(jiffies, dev_trans_start(slave->dev) +
- 2 * delta_in_ticks) ||
- (time_after_eq(jiffies, slave_last_rx(bond, slave)
- + 2 * delta_in_ticks)))) {
+ (!time_in_range(jiffies,
+ trans_start - delta_in_ticks,
+ trans_start + 2 * delta_in_ticks) ||
+ !time_in_range(jiffies,
+ slave_last_rx(bond, slave) - delta_in_ticks,
+ slave_last_rx(bond, slave) + 2 * delta_in_ticks))) {
+
slave->new_link = BOND_LINK_DOWN;
commit++;
}
@@ -2956,6 +2976,7 @@
{
struct slave *slave;
int i;
+ unsigned long trans_start;
bond_for_each_slave(bond, slave, i) {
switch (slave->new_link) {
@@ -2963,10 +2984,11 @@
continue;
case BOND_LINK_UP:
+ trans_start = dev_trans_start(slave->dev);
if ((!bond->curr_active_slave &&
- time_before_eq(jiffies,
- dev_trans_start(slave->dev) +
- delta_in_ticks)) ||
+ time_in_range(jiffies,
+ trans_start - delta_in_ticks,
+ trans_start + delta_in_ticks)) ||
bond->curr_active_slave != slave) {
slave->link = BOND_LINK_UP;
bond->current_arp_slave = NULL;
@@ -5142,6 +5164,15 @@
res = dev_alloc_name(bond_dev, "bond%d");
if (res < 0)
goto out;
+ } else {
+ /*
+ * If we're given a name to register
+ * we need to ensure that its not already
+ * registered
+ */
+ res = -EEXIST;
+ if (__dev_get_by_name(net, name) != NULL)
+ goto out;
}
res = register_netdevice(bond_dev);
diff --git a/drivers/net/caif/Kconfig b/drivers/net/caif/Kconfig
index 631a624..75bfc3a 100644
--- a/drivers/net/caif/Kconfig
+++ b/drivers/net/caif/Kconfig
@@ -15,7 +15,7 @@
config CAIF_SPI_SLAVE
tristate "CAIF SPI transport driver for slave interface"
- depends on CAIF
+ depends on CAIF && HAS_DMA
default n
---help---
The CAIF Link layer SPI Protocol driver for Slave SPI interface.
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index ad19585..f208712 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -2296,6 +2296,8 @@
case CHELSIO_GET_QSET_NUM:{
struct ch_reg edata;
+ memset(&edata, 0, sizeof(struct ch_reg));
+
edata.cmd = CHELSIO_GET_QSET_NUM;
edata.val = pi->nqsets;
if (copy_to_user(useraddr, &edata, sizeof(edata)))
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
index a4a0d2b..d3d4a57 100644
--- a/drivers/net/e1000e/82571.c
+++ b/drivers/net/e1000e/82571.c
@@ -936,12 +936,14 @@
ew32(IMC, 0xffffffff);
icr = er32(ICR);
- /* Install any alternate MAC address into RAR0 */
- ret_val = e1000_check_alt_mac_addr_generic(hw);
- if (ret_val)
- return ret_val;
+ if (hw->mac.type == e1000_82571) {
+ /* Install any alternate MAC address into RAR0 */
+ ret_val = e1000_check_alt_mac_addr_generic(hw);
+ if (ret_val)
+ return ret_val;
- e1000e_set_laa_state_82571(hw, true);
+ e1000e_set_laa_state_82571(hw, true);
+ }
/* Reinitialize the 82571 serdes link state machine */
if (hw->phy.media_type == e1000_media_type_internal_serdes)
@@ -1618,14 +1620,16 @@
{
s32 ret_val = 0;
- /*
- * If there's an alternate MAC address place it in RAR0
- * so that it will override the Si installed default perm
- * address.
- */
- ret_val = e1000_check_alt_mac_addr_generic(hw);
- if (ret_val)
- goto out;
+ if (hw->mac.type == e1000_82571) {
+ /*
+ * If there's an alternate MAC address place it in RAR0
+ * so that it will override the Si installed default perm
+ * address.
+ */
+ ret_val = e1000_check_alt_mac_addr_generic(hw);
+ if (ret_val)
+ goto out;
+ }
ret_val = e1000_read_mac_addr_generic(hw);
@@ -1833,6 +1837,7 @@
| FLAG_HAS_SMART_POWER_DOWN
| FLAG_HAS_AMT
| FLAG_HAS_SWSM_ON_LOAD,
+ .flags2 = FLAG2_DISABLE_ASPM_L1,
.pba = 20,
.max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN,
.get_variants = e1000_get_variants_82571,
diff --git a/drivers/net/e1000e/defines.h b/drivers/net/e1000e/defines.h
index 307a72f4..93b3bed 100644
--- a/drivers/net/e1000e/defines.h
+++ b/drivers/net/e1000e/defines.h
@@ -621,6 +621,7 @@
#define E1000_FLASH_UPDATES 2000
/* NVM Word Offsets */
+#define NVM_COMPAT 0x0003
#define NVM_ID_LED_SETTINGS 0x0004
#define NVM_INIT_CONTROL2_REG 0x000F
#define NVM_INIT_CONTROL3_PORT_B 0x0014
@@ -643,6 +644,9 @@
/* Mask bits for fields in Word 0x1a of the NVM */
#define NVM_WORD1A_ASPM_MASK 0x000C
+/* Mask bits for fields in Word 0x03 of the EEPROM */
+#define NVM_COMPAT_LOM 0x0800
+
/* For checksumming, the sum of all words in the NVM should equal 0xBABA. */
#define NVM_SUM 0xBABA
diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
index 66ed08f..ba302a5 100644
--- a/drivers/net/e1000e/hw.h
+++ b/drivers/net/e1000e/hw.h
@@ -57,6 +57,7 @@
E1000_SCTL = 0x00024, /* SerDes Control - RW */
E1000_FCAL = 0x00028, /* Flow Control Address Low - RW */
E1000_FCAH = 0x0002C, /* Flow Control Address High -RW */
+ E1000_FEXTNVM4 = 0x00024, /* Future Extended NVM 4 - RW */
E1000_FEXTNVM = 0x00028, /* Future Extended NVM - RW */
E1000_FCT = 0x00030, /* Flow Control Type - RW */
E1000_VET = 0x00038, /* VLAN Ether Type - RW */
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
index 63930d1..57b5435 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -105,6 +105,10 @@
#define E1000_FEXTNVM_SW_CONFIG 1
#define E1000_FEXTNVM_SW_CONFIG_ICH8M (1 << 27) /* Bit redefined for ICH8M :/ */
+#define E1000_FEXTNVM4_BEACON_DURATION_MASK 0x7
+#define E1000_FEXTNVM4_BEACON_DURATION_8USEC 0x7
+#define E1000_FEXTNVM4_BEACON_DURATION_16USEC 0x3
+
#define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL
#define E1000_ICH_RAR_ENTRIES 7
@@ -125,6 +129,7 @@
/* SMBus Address Phy Register */
#define HV_SMB_ADDR PHY_REG(768, 26)
+#define HV_SMB_ADDR_MASK 0x007F
#define HV_SMB_ADDR_PEC_EN 0x0200
#define HV_SMB_ADDR_VALID 0x0080
@@ -237,6 +242,8 @@
static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
+static s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
+static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg)
{
@@ -272,7 +279,7 @@
static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
{
struct e1000_phy_info *phy = &hw->phy;
- u32 ctrl;
+ u32 ctrl, fwsm;
s32 ret_val = 0;
phy->addr = 1;
@@ -294,7 +301,8 @@
* disabled, then toggle the LANPHYPC Value bit to force
* the interconnect to PCIe mode.
*/
- if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
+ fwsm = er32(FWSM);
+ if (!(fwsm & E1000_ICH_FWSM_FW_VALID)) {
ctrl = er32(CTRL);
ctrl |= E1000_CTRL_LANPHYPC_OVERRIDE;
ctrl &= ~E1000_CTRL_LANPHYPC_VALUE;
@@ -303,6 +311,13 @@
ctrl &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
ew32(CTRL, ctrl);
msleep(50);
+
+ /*
+ * Gate automatic PHY configuration by hardware on
+ * non-managed 82579
+ */
+ if (hw->mac.type == e1000_pch2lan)
+ e1000_gate_hw_phy_config_ich8lan(hw, true);
}
/*
@@ -315,6 +330,13 @@
if (ret_val)
goto out;
+ /* Ungate automatic PHY configuration on non-managed 82579 */
+ if ((hw->mac.type == e1000_pch2lan) &&
+ !(fwsm & E1000_ICH_FWSM_FW_VALID)) {
+ msleep(10);
+ e1000_gate_hw_phy_config_ich8lan(hw, false);
+ }
+
phy->id = e1000_phy_unknown;
ret_val = e1000e_get_phy_id(hw);
if (ret_val)
@@ -561,13 +583,10 @@
if (mac->type == e1000_ich8lan)
e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, true);
- /* Disable PHY configuration by hardware, config by software */
- if (mac->type == e1000_pch2lan) {
- u32 extcnf_ctrl = er32(EXTCNF_CTRL);
-
- extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
- ew32(EXTCNF_CTRL, extcnf_ctrl);
- }
+ /* Gate automatic PHY configuration by hardware on managed 82579 */
+ if ((mac->type == e1000_pch2lan) &&
+ (er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
+ e1000_gate_hw_phy_config_ich8lan(hw, true);
return 0;
}
@@ -652,6 +671,12 @@
goto out;
}
+ if (hw->mac.type == e1000_pch2lan) {
+ ret_val = e1000_k1_workaround_lv(hw);
+ if (ret_val)
+ goto out;
+ }
+
/*
* Check if there was DownShift, must be checked
* immediately after link-up
@@ -895,6 +920,34 @@
}
/**
+ * e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states
+ * @hw: pointer to the HW structure
+ *
+ * Assumes semaphore already acquired.
+ *
+ **/
+static s32 e1000_write_smbus_addr(struct e1000_hw *hw)
+{
+ u16 phy_data;
+ u32 strap = er32(STRAP);
+ s32 ret_val = 0;
+
+ strap &= E1000_STRAP_SMBUS_ADDRESS_MASK;
+
+ ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data);
+ if (ret_val)
+ goto out;
+
+ phy_data &= ~HV_SMB_ADDR_MASK;
+ phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT);
+ phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
+ ret_val = e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data);
+
+out:
+ return ret_val;
+}
+
+/**
* e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration
* @hw: pointer to the HW structure
*
@@ -903,7 +956,6 @@
**/
static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
{
- struct e1000_adapter *adapter = hw->adapter;
struct e1000_phy_info *phy = &hw->phy;
u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask;
s32 ret_val = 0;
@@ -921,7 +973,8 @@
if (phy->type != e1000_phy_igp_3)
return ret_val;
- if (adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_AMT) {
+ if ((hw->adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_AMT) ||
+ (hw->adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_C)) {
sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
break;
}
@@ -961,21 +1014,16 @@
cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
- if (!(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) &&
- ((hw->mac.type == e1000_pchlan) ||
- (hw->mac.type == e1000_pch2lan))) {
+ if ((!(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) &&
+ (hw->mac.type == e1000_pchlan)) ||
+ (hw->mac.type == e1000_pch2lan)) {
/*
* HW configures the SMBus address and LEDs when the
* OEM and LCD Write Enable bits are set in the NVM.
* When both NVM bits are cleared, SW will configure
* them instead.
*/
- data = er32(STRAP);
- data &= E1000_STRAP_SMBUS_ADDRESS_MASK;
- reg_data = data >> E1000_STRAP_SMBUS_ADDRESS_SHIFT;
- reg_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
- ret_val = e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR,
- reg_data);
+ ret_val = e1000_write_smbus_addr(hw);
if (ret_val)
goto out;
@@ -1440,10 +1488,6 @@
goto out;
/* Enable jumbo frame workaround in the PHY */
- e1e_rphy(hw, PHY_REG(769, 20), &data);
- ret_val = e1e_wphy(hw, PHY_REG(769, 20), data & ~(1 << 14));
- if (ret_val)
- goto out;
e1e_rphy(hw, PHY_REG(769, 23), &data);
data &= ~(0x7F << 5);
data |= (0x37 << 5);
@@ -1452,7 +1496,6 @@
goto out;
e1e_rphy(hw, PHY_REG(769, 16), &data);
data &= ~(1 << 13);
- data |= (1 << 12);
ret_val = e1e_wphy(hw, PHY_REG(769, 16), data);
if (ret_val)
goto out;
@@ -1477,7 +1520,7 @@
mac_reg = er32(RCTL);
mac_reg &= ~E1000_RCTL_SECRC;
- ew32(FFLT_DBG, mac_reg);
+ ew32(RCTL, mac_reg);
ret_val = e1000e_read_kmrn_reg(hw,
E1000_KMRNCTRLSTA_CTRL_OFFSET,
@@ -1503,17 +1546,12 @@
goto out;
/* Write PHY register values back to h/w defaults */
- e1e_rphy(hw, PHY_REG(769, 20), &data);
- ret_val = e1e_wphy(hw, PHY_REG(769, 20), data & ~(1 << 14));
- if (ret_val)
- goto out;
e1e_rphy(hw, PHY_REG(769, 23), &data);
data &= ~(0x7F << 5);
ret_val = e1e_wphy(hw, PHY_REG(769, 23), data);
if (ret_val)
goto out;
e1e_rphy(hw, PHY_REG(769, 16), &data);
- data &= ~(1 << 12);
data |= (1 << 13);
ret_val = e1e_wphy(hw, PHY_REG(769, 16), data);
if (ret_val)
@@ -1559,6 +1597,69 @@
}
/**
+ * e1000_k1_gig_workaround_lv - K1 Si workaround
+ * @hw: pointer to the HW structure
+ *
+ * Workaround to set the K1 beacon duration for 82579 parts
+ **/
+static s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
+{
+ s32 ret_val = 0;
+ u16 status_reg = 0;
+ u32 mac_reg;
+
+ if (hw->mac.type != e1000_pch2lan)
+ goto out;
+
+ /* Set K1 beacon duration based on 1Gbps speed or otherwise */
+ ret_val = e1e_rphy(hw, HV_M_STATUS, &status_reg);
+ if (ret_val)
+ goto out;
+
+ if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
+ == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
+ mac_reg = er32(FEXTNVM4);
+ mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
+
+ if (status_reg & HV_M_STATUS_SPEED_1000)
+ mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC;
+ else
+ mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
+
+ ew32(FEXTNVM4, mac_reg);
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware
+ * @hw: pointer to the HW structure
+ * @gate: boolean set to true to gate, false to ungate
+ *
+ * Gate/ungate the automatic PHY configuration via hardware; perform
+ * the configuration via software instead.
+ **/
+static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate)
+{
+ u32 extcnf_ctrl;
+
+ if (hw->mac.type != e1000_pch2lan)
+ return;
+
+ extcnf_ctrl = er32(EXTCNF_CTRL);
+
+ if (gate)
+ extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
+ else
+ extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG;
+
+ ew32(EXTCNF_CTRL, extcnf_ctrl);
+ return;
+}
+
+/**
* e1000_lan_init_done_ich8lan - Check for PHY config completion
* @hw: pointer to the HW structure
*
@@ -1602,6 +1703,9 @@
if (e1000_check_reset_block(hw))
goto out;
+ /* Allow time for h/w to get to quiescent state after reset */
+ msleep(10);
+
/* Perform any necessary post-reset workarounds */
switch (hw->mac.type) {
case e1000_pchlan:
@@ -1630,6 +1734,13 @@
/* Configure the LCD with the OEM bits in NVM */
ret_val = e1000_oem_bits_config_ich8lan(hw, true);
+ /* Ungate automatic PHY configuration on non-managed 82579 */
+ if ((hw->mac.type == e1000_pch2lan) &&
+ !(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
+ msleep(10);
+ e1000_gate_hw_phy_config_ich8lan(hw, false);
+ }
+
out:
return ret_val;
}
@@ -1646,6 +1757,11 @@
{
s32 ret_val = 0;
+ /* Gate automatic PHY configuration by hardware on non-managed 82579 */
+ if ((hw->mac.type == e1000_pch2lan) &&
+ !(er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
+ e1000_gate_hw_phy_config_ich8lan(hw, true);
+
ret_val = e1000e_phy_hw_reset_generic(hw);
if (ret_val)
goto out;
@@ -2910,6 +3026,14 @@
* external PHY is reset.
*/
ctrl |= E1000_CTRL_PHY_RST;
+
+ /*
+ * Gate automatic PHY configuration by hardware on
+ * non-managed 82579
+ */
+ if ((hw->mac.type == e1000_pch2lan) &&
+ !(er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
+ e1000_gate_hw_phy_config_ich8lan(hw, true);
}
ret_val = e1000_acquire_swflag_ich8lan(hw);
e_dbg("Issuing a global reset to ich8lan\n");
@@ -3460,13 +3584,20 @@
void e1000e_disable_gig_wol_ich8lan(struct e1000_hw *hw)
{
u32 phy_ctrl;
+ s32 ret_val;
phy_ctrl = er32(PHY_CTRL);
phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU | E1000_PHY_CTRL_GBE_DISABLE;
ew32(PHY_CTRL, phy_ctrl);
- if (hw->mac.type >= e1000_pchlan)
- e1000_phy_hw_reset_ich8lan(hw);
+ if (hw->mac.type >= e1000_pchlan) {
+ e1000_oem_bits_config_ich8lan(hw, true);
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return;
+ e1000_write_smbus_addr(hw);
+ hw->phy.ops.release(hw);
+ }
}
/**
diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c
index df4a279..0fd4eb5 100644
--- a/drivers/net/e1000e/lib.c
+++ b/drivers/net/e1000e/lib.c
@@ -183,6 +183,16 @@
u16 offset, nvm_alt_mac_addr_offset, nvm_data;
u8 alt_mac_addr[ETH_ALEN];
+ ret_val = e1000_read_nvm(hw, NVM_COMPAT, 1, &nvm_data);
+ if (ret_val)
+ goto out;
+
+ /* Check for LOM (vs. NIC) or one of two valid mezzanine cards */
+ if (!((nvm_data & NVM_COMPAT_LOM) ||
+ (hw->adapter->pdev->device == E1000_DEV_ID_82571EB_SERDES_DUAL) ||
+ (hw->adapter->pdev->device == E1000_DEV_ID_82571EB_SERDES_QUAD)))
+ goto out;
+
ret_val = e1000_read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1,
&nvm_alt_mac_addr_offset);
if (ret_val) {
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 2b8ef44..e561d15 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -2704,6 +2704,16 @@
u32 psrctl = 0;
u32 pages = 0;
+ /* Workaround Si errata on 82579 - configure jumbo frame flow */
+ if (hw->mac.type == e1000_pch2lan) {
+ s32 ret_val;
+
+ if (adapter->netdev->mtu > ETH_DATA_LEN)
+ ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true);
+ else
+ ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false);
+ }
+
/* Program MC offset vector base */
rctl = er32(RCTL);
rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
@@ -2744,16 +2754,6 @@
e1e_wphy(hw, 22, phy_data);
}
- /* Workaround Si errata on 82579 - configure jumbo frame flow */
- if (hw->mac.type == e1000_pch2lan) {
- s32 ret_val;
-
- if (rctl & E1000_RCTL_LPE)
- ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true);
- else
- ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false);
- }
-
/* Setup buffer sizes */
rctl &= ~E1000_RCTL_SZ_4096;
rctl |= E1000_RCTL_BSEX;
@@ -4833,6 +4833,15 @@
return -EINVAL;
}
+ /* Jumbo frame workaround on 82579 requires CRC be stripped */
+ if ((adapter->hw.mac.type == e1000_pch2lan) &&
+ !(adapter->flags2 & FLAG2_CRC_STRIPPING) &&
+ (new_mtu > ETH_DATA_LEN)) {
+ e_err("Jumbo Frames not supported on 82579 when CRC "
+ "stripping is disabled.\n");
+ return -EINVAL;
+ }
+
/* 82573 Errata 17 */
if (((adapter->hw.mac.type == e1000_82573) ||
(adapter->hw.mac.type == e1000_82574)) &&
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h
index 99a9299..1846623 100644
--- a/drivers/net/ehea/ehea.h
+++ b/drivers/net/ehea/ehea.h
@@ -40,7 +40,7 @@
#include <asm/io.h>
#define DRV_NAME "ehea"
-#define DRV_VERSION "EHEA_0105"
+#define DRV_VERSION "EHEA_0106"
/* eHEA capability flags */
#define DLPAR_PORT_ADD_REM 1
@@ -400,6 +400,7 @@
u32 poll_counter;
struct net_lro_mgr lro_mgr;
struct net_lro_desc lro_desc[MAX_LRO_DESCRIPTORS];
+ int sq_restart_flag;
};
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index 897719b..6372610 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -533,8 +533,15 @@
int length = cqe->num_bytes_transfered - 4; /*remove CRC */
skb_put(skb, length);
- skb->ip_summed = CHECKSUM_UNNECESSARY;
skb->protocol = eth_type_trans(skb, dev);
+
+ /* The packet was not an IPV4 packet so a complemented checksum was
+ calculated. The value is found in the Internet Checksum field. */
+ if (cqe->status & EHEA_CQE_BLIND_CKSUM) {
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ skb->csum = csum_unfold(~cqe->inet_checksum_value);
+ } else
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
}
static inline struct sk_buff *get_skb_by_index(struct sk_buff **skb_array,
@@ -776,6 +783,53 @@
return processed;
}
+#define SWQE_RESTART_CHECK 0xdeadbeaff00d0000ull
+
+static void reset_sq_restart_flag(struct ehea_port *port)
+{
+ int i;
+
+ for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
+ struct ehea_port_res *pr = &port->port_res[i];
+ pr->sq_restart_flag = 0;
+ }
+}
+
+static void check_sqs(struct ehea_port *port)
+{
+ struct ehea_swqe *swqe;
+ int swqe_index;
+ int i, k;
+
+ for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
+ struct ehea_port_res *pr = &port->port_res[i];
+ k = 0;
+ swqe = ehea_get_swqe(pr->qp, &swqe_index);
+ memset(swqe, 0, SWQE_HEADER_SIZE);
+ atomic_dec(&pr->swqe_avail);
+
+ swqe->tx_control |= EHEA_SWQE_PURGE;
+ swqe->wr_id = SWQE_RESTART_CHECK;
+ swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
+ swqe->tx_control |= EHEA_SWQE_IMM_DATA_PRESENT;
+ swqe->immediate_data_length = 80;
+
+ ehea_post_swqe(pr->qp, swqe);
+
+ while (pr->sq_restart_flag == 0) {
+ msleep(5);
+ if (++k == 100) {
+ ehea_error("HW/SW queues out of sync");
+ ehea_schedule_port_reset(pr->port);
+ return;
+ }
+ }
+ }
+
+ return;
+}
+
+
static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
{
struct sk_buff *skb;
@@ -793,6 +847,13 @@
cqe_counter++;
rmb();
+
+ if (cqe->wr_id == SWQE_RESTART_CHECK) {
+ pr->sq_restart_flag = 1;
+ swqe_av++;
+ break;
+ }
+
if (cqe->status & EHEA_CQE_STAT_ERR_MASK) {
ehea_error("Bad send completion status=0x%04X",
cqe->status);
@@ -2675,8 +2736,10 @@
int k = 0;
while (atomic_read(&pr->swqe_avail) < swqe_max) {
msleep(5);
- if (++k == 20)
+ if (++k == 20) {
+ ehea_error("WARNING: sq not flushed completely");
break;
+ }
}
}
}
@@ -2917,6 +2980,7 @@
port_napi_disable(port);
mutex_unlock(&port->port_lock);
}
+ reset_sq_restart_flag(port);
}
/* Unregister old memory region */
@@ -2951,6 +3015,7 @@
mutex_lock(&port->port_lock);
port_napi_enable(port);
ret = ehea_restart_qps(dev);
+ check_sqs(port);
if (!ret)
netif_wake_queue(dev);
mutex_unlock(&port->port_lock);
diff --git a/drivers/net/ehea/ehea_qmr.h b/drivers/net/ehea/ehea_qmr.h
index f608a6c..3810473 100644
--- a/drivers/net/ehea/ehea_qmr.h
+++ b/drivers/net/ehea/ehea_qmr.h
@@ -150,6 +150,7 @@
#define EHEA_CQE_TYPE_RQ 0x60
#define EHEA_CQE_STAT_ERR_MASK 0x700F
#define EHEA_CQE_STAT_FAT_ERR_MASK 0xF
+#define EHEA_CQE_BLIND_CKSUM 0x8000
#define EHEA_CQE_STAT_ERR_TCP 0x4000
#define EHEA_CQE_STAT_ERR_IP 0x2000
#define EHEA_CQE_STAT_ERR_CRC 0x1000
diff --git a/drivers/net/eql.c b/drivers/net/eql.c
index dda2c79..0cb1cf9 100644
--- a/drivers/net/eql.c
+++ b/drivers/net/eql.c
@@ -555,6 +555,8 @@
equalizer_t *eql;
master_config_t mc;
+ memset(&mc, 0, sizeof(master_config_t));
+
if (eql_is_master(dev)) {
eql = netdev_priv(dev);
mc.max_slaves = eql->max_slaves;
diff --git a/drivers/net/fec.c b/drivers/net/fec.c
index 768b840..cce32d4 100644
--- a/drivers/net/fec.c
+++ b/drivers/net/fec.c
@@ -678,24 +678,37 @@
{
struct fec_enet_private *fep = netdev_priv(dev);
struct phy_device *phy_dev = NULL;
- int ret;
+ char mdio_bus_id[MII_BUS_ID_SIZE];
+ char phy_name[MII_BUS_ID_SIZE + 3];
+ int phy_id;
fep->phy_dev = NULL;
- /* find the first phy */
- phy_dev = phy_find_first(fep->mii_bus);
- if (!phy_dev) {
- printk(KERN_ERR "%s: no PHY found\n", dev->name);
- return -ENODEV;
+ /* check for attached phy */
+ for (phy_id = 0; (phy_id < PHY_MAX_ADDR); phy_id++) {
+ if ((fep->mii_bus->phy_mask & (1 << phy_id)))
+ continue;
+ if (fep->mii_bus->phy_map[phy_id] == NULL)
+ continue;
+ if (fep->mii_bus->phy_map[phy_id]->phy_id == 0)
+ continue;
+ strncpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE);
+ break;
}
- /* attach the mac to the phy */
- ret = phy_connect_direct(dev, phy_dev,
- &fec_enet_adjust_link, 0,
- PHY_INTERFACE_MODE_MII);
- if (ret) {
- printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
- return ret;
+ if (phy_id >= PHY_MAX_ADDR) {
+ printk(KERN_INFO "%s: no PHY, assuming direct connection "
+ "to switch\n", dev->name);
+ strncpy(mdio_bus_id, "0", MII_BUS_ID_SIZE);
+ phy_id = 0;
+ }
+
+ snprintf(phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT, mdio_bus_id, phy_id);
+ phy_dev = phy_connect(dev, phy_name, &fec_enet_adjust_link, 0,
+ PHY_INTERFACE_MODE_MII);
+ if (IS_ERR(phy_dev)) {
+ printk(KERN_ERR "%s: could not attach to PHY\n", dev->name);
+ return PTR_ERR(phy_dev);
}
/* mask with MAC supported features */
@@ -738,7 +751,7 @@
fep->mii_bus->read = fec_enet_mdio_read;
fep->mii_bus->write = fec_enet_mdio_write;
fep->mii_bus->reset = fec_enet_mdio_reset;
- snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%x", pdev->id);
+ snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%x", pdev->id + 1);
fep->mii_bus->priv = fep;
fep->mii_bus->parent = &pdev->dev;
@@ -1311,6 +1324,9 @@
if (ret)
goto failed_mii_init;
+ /* Carrier starts down, phylib will bring it up */
+ netif_carrier_off(ndev);
+
ret = register_netdev(ndev);
if (ret)
goto failed_register;
diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
index 4b52c76..3e5d0b6 100644
--- a/drivers/net/hamradio/6pack.c
+++ b/drivers/net/hamradio/6pack.c
@@ -608,7 +608,7 @@
spin_lock_init(&sp->lock);
atomic_set(&sp->refcnt, 1);
- init_MUTEX_LOCKED(&sp->dead_sem);
+ sema_init(&sp->dead_sem, 0);
/* !!! length of the buffers. MTU is IP MTU, not PACLEN! */
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index 66e88bd..4c62839 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -747,7 +747,7 @@
spin_lock_init(&ax->buflock);
atomic_set(&ax->refcnt, 1);
- init_MUTEX_LOCKED(&ax->dead_sem);
+ sema_init(&ax->dead_sem, 0);
ax->tty = tty;
tty->disc_data = ax;
diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c
index 3506fd6..519e19e 100644
--- a/drivers/net/ibm_newemac/core.c
+++ b/drivers/net/ibm_newemac/core.c
@@ -2928,7 +2928,7 @@
if (dev->emac_irq != NO_IRQ)
irq_dispose_mapping(dev->emac_irq);
err_free:
- kfree(ndev);
+ free_netdev(ndev);
err_gone:
/* if we were on the bootlist, remove us as we won't show up and
* wake up all waiters to notify them in case they were waiting
@@ -2971,7 +2971,7 @@
if (dev->emac_irq != NO_IRQ)
irq_dispose_mapping(dev->emac_irq);
- kfree(dev->ndev);
+ free_netdev(dev->ndev);
return 0;
}
diff --git a/drivers/net/ibm_newemac/debug.c b/drivers/net/ibm_newemac/debug.c
index 3995faf..8c6c1e2 100644
--- a/drivers/net/ibm_newemac/debug.c
+++ b/drivers/net/ibm_newemac/debug.c
@@ -238,7 +238,7 @@
}
#if defined(CONFIG_MAGIC_SYSRQ)
-static void emac_sysrq_handler(int key, struct tty_struct *tty)
+static void emac_sysrq_handler(int key)
{
emac_dbg_dump_all();
}
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
index 2602852..4734c93 100644
--- a/drivers/net/ibmveth.c
+++ b/drivers/net/ibmveth.c
@@ -1113,7 +1113,8 @@
struct ibmveth_adapter *adapter = netdev_priv(dev);
struct vio_dev *viodev = adapter->vdev;
int new_mtu_oh = new_mtu + IBMVETH_BUFF_OH;
- int i;
+ int i, rc;
+ int need_restart = 0;
if (new_mtu < IBMVETH_MAX_MTU)
return -EINVAL;
@@ -1127,35 +1128,32 @@
/* Deactivate all the buffer pools so that the next loop can activate
only the buffer pools necessary to hold the new MTU */
- for (i = 0; i < IbmVethNumBufferPools; i++)
- if (adapter->rx_buff_pool[i].active) {
- ibmveth_free_buffer_pool(adapter,
- &adapter->rx_buff_pool[i]);
- adapter->rx_buff_pool[i].active = 0;
- }
+ if (netif_running(adapter->netdev)) {
+ need_restart = 1;
+ adapter->pool_config = 1;
+ ibmveth_close(adapter->netdev);
+ adapter->pool_config = 0;
+ }
/* Look for an active buffer pool that can hold the new MTU */
for(i = 0; i<IbmVethNumBufferPools; i++) {
adapter->rx_buff_pool[i].active = 1;
if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) {
- if (netif_running(adapter->netdev)) {
- adapter->pool_config = 1;
- ibmveth_close(adapter->netdev);
- adapter->pool_config = 0;
- dev->mtu = new_mtu;
- vio_cmo_set_dev_desired(viodev,
- ibmveth_get_desired_dma
- (viodev));
- return ibmveth_open(adapter->netdev);
- }
dev->mtu = new_mtu;
vio_cmo_set_dev_desired(viodev,
ibmveth_get_desired_dma
(viodev));
+ if (need_restart) {
+ return ibmveth_open(adapter->netdev);
+ }
return 0;
}
}
+
+ if (need_restart && (rc = ibmveth_open(adapter->netdev)))
+ return rc;
+
return -EINVAL;
}
diff --git a/drivers/net/irda/sir_dev.c b/drivers/net/irda/sir_dev.c
index 1b051da..51d7444 100644
--- a/drivers/net/irda/sir_dev.c
+++ b/drivers/net/irda/sir_dev.c
@@ -909,7 +909,7 @@
dev->tx_skb = NULL;
spin_lock_init(&dev->tx_lock);
- init_MUTEX(&dev->fsm.sem);
+ sema_init(&dev->fsm.sem, 1);
dev->drv = drv;
dev->netdev = ndev;
diff --git a/drivers/net/ks8851.c b/drivers/net/ks8851.c
index b4fb07a..51919fc 100644
--- a/drivers/net/ks8851.c
+++ b/drivers/net/ks8851.c
@@ -503,30 +503,33 @@
ks8851_wrreg16(ks, KS_RXQCR,
ks->rc_rxqcr | RXQCR_SDA | RXQCR_ADRFE);
- if (rxlen > 0) {
- skb = netdev_alloc_skb(ks->netdev, rxlen + 2 + 8);
- if (!skb) {
- /* todo - dump frame and move on */
+ if (rxlen > 4) {
+ unsigned int rxalign;
+
+ rxlen -= 4;
+ rxalign = ALIGN(rxlen, 4);
+ skb = netdev_alloc_skb_ip_align(ks->netdev, rxalign);
+ if (skb) {
+
+ /* 4 bytes of status header + 4 bytes of
+ * garbage: we put them before ethernet
+ * header, so that they are copied,
+ * but ignored.
+ */
+
+ rxpkt = skb_put(skb, rxlen) - 8;
+
+ ks8851_rdfifo(ks, rxpkt, rxalign + 8);
+
+ if (netif_msg_pktdata(ks))
+ ks8851_dbg_dumpkkt(ks, rxpkt);
+
+ skb->protocol = eth_type_trans(skb, ks->netdev);
+ netif_rx(skb);
+
+ ks->netdev->stats.rx_packets++;
+ ks->netdev->stats.rx_bytes += rxlen;
}
-
- /* two bytes to ensure ip is aligned, and four bytes
- * for the status header and 4 bytes of garbage */
- skb_reserve(skb, 2 + 4 + 4);
-
- rxpkt = skb_put(skb, rxlen - 4) - 8;
-
- /* align the packet length to 4 bytes, and add 4 bytes
- * as we're getting the rx status header as well */
- ks8851_rdfifo(ks, rxpkt, ALIGN(rxlen, 4) + 8);
-
- if (netif_msg_pktdata(ks))
- ks8851_dbg_dumpkkt(ks, rxpkt);
-
- skb->protocol = eth_type_trans(skb, ks->netdev);
- netif_rx(skb);
-
- ks->netdev->stats.rx_packets++;
- ks->netdev->stats.rx_bytes += rxlen - 4;
}
ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr);
diff --git a/drivers/net/ll_temac_main.c b/drivers/net/ll_temac_main.c
index c7b6247..87f0a93 100644
--- a/drivers/net/ll_temac_main.c
+++ b/drivers/net/ll_temac_main.c
@@ -38,6 +38,7 @@
#include <linux/of_device.h>
#include <linux/of_mdio.h>
#include <linux/of_platform.h>
+#include <linux/of_address.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
#include <linux/tcp.h> /* needed for sizeof(tcphdr) */
@@ -902,8 +903,8 @@
disable_irq(lp->tx_irq);
disable_irq(lp->rx_irq);
- ll_temac_rx_irq(lp->tx_irq, lp);
- ll_temac_tx_irq(lp->rx_irq, lp);
+ ll_temac_rx_irq(lp->tx_irq, ndev);
+ ll_temac_tx_irq(lp->rx_irq, ndev);
enable_irq(lp->tx_irq);
enable_irq(lp->rx_irq);
diff --git a/drivers/net/ll_temac_mdio.c b/drivers/net/ll_temac_mdio.c
index 5ae28c9..8cf9d4f 100644
--- a/drivers/net/ll_temac_mdio.c
+++ b/drivers/net/ll_temac_mdio.c
@@ -10,6 +10,7 @@
#include <linux/phy.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/of_address.h>
#include <linux/slab.h>
#include <linux/of_mdio.h>
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h
index ffa1b9c..6dca357 100644
--- a/drivers/net/netxen/netxen_nic.h
+++ b/drivers/net/netxen/netxen_nic.h
@@ -53,8 +53,8 @@
#define _NETXEN_NIC_LINUX_MAJOR 4
#define _NETXEN_NIC_LINUX_MINOR 0
-#define _NETXEN_NIC_LINUX_SUBVERSION 73
-#define NETXEN_NIC_LINUX_VERSIONID "4.0.73"
+#define _NETXEN_NIC_LINUX_SUBVERSION 74
+#define NETXEN_NIC_LINUX_VERSIONID "4.0.74"
#define NETXEN_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c))
#define _major(v) (((v) >> 24) & 0xff)
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c
index c865dda..b075a35 100644
--- a/drivers/net/netxen/netxen_nic_init.c
+++ b/drivers/net/netxen/netxen_nic_init.c
@@ -1540,7 +1540,6 @@
if (pkt_offset)
skb_pull(skb, pkt_offset);
- skb->truesize = skb->len + sizeof(struct sk_buff);
skb->protocol = eth_type_trans(skb, netdev);
napi_gro_receive(&sds_ring->napi, skb);
@@ -1602,8 +1601,6 @@
skb_put(skb, lro_length + data_offset);
- skb->truesize = skb->len + sizeof(struct sk_buff) + skb_headroom(skb);
-
skb_pull(skb, l2_hdr_offset);
skb->protocol = eth_type_trans(skb, netdev);
@@ -1805,8 +1802,6 @@
netxen_ctx_msg msg = 0;
struct list_head *head;
- spin_lock(&rds_ring->lock);
-
producer = rds_ring->producer;
head = &rds_ring->free_list;
@@ -1853,8 +1848,6 @@
NETXEN_RCV_PRODUCER_OFFSET), msg);
}
}
-
- spin_unlock(&rds_ring->lock);
}
static void
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index fd86e18..73d3145 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -2032,8 +2032,6 @@
struct netxen_adapter *adapter = netdev_priv(netdev);
struct net_device_stats *stats = &netdev->stats;
- memset(stats, 0, sizeof(*stats));
-
stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts;
stats->tx_packets = adapter->stats.xmitfinished;
stats->rx_bytes = adapter->stats.rxbytes;
@@ -2133,9 +2131,16 @@
#ifdef CONFIG_NET_POLL_CONTROLLER
static void netxen_nic_poll_controller(struct net_device *netdev)
{
+ int ring;
+ struct nx_host_sds_ring *sds_ring;
struct netxen_adapter *adapter = netdev_priv(netdev);
+ struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
+
disable_irq(adapter->irq);
- netxen_intr(adapter->irq, adapter);
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ netxen_intr(adapter->irq, sds_ring);
+ }
enable_irq(adapter->irq);
}
#endif
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index bc695d5..fe6983a 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -7269,32 +7269,28 @@
struct niu_parent *parent = np->parent;
struct niu_tcam_entry *tp;
int i, idx, cnt;
- u16 n_entries;
unsigned long flags;
-
+ int ret = 0;
/* put the tcam size here */
nfc->data = tcam_get_size(np);
niu_lock_parent(np, flags);
- n_entries = nfc->rule_cnt;
for (cnt = 0, i = 0; i < nfc->data; i++) {
idx = tcam_get_index(np, i);
tp = &parent->tcam[idx];
if (!tp->valid)
continue;
+ if (cnt == nfc->rule_cnt) {
+ ret = -EMSGSIZE;
+ break;
+ }
rule_locs[cnt] = i;
cnt++;
}
niu_unlock_parent(np, flags);
- if (n_entries != cnt) {
- /* print warning, this should not happen */
- netdev_info(np->dev, "niu%d: In %s(): n_entries[%d] != cnt[%d]!!!\n",
- np->parent->index, __func__, n_entries, cnt);
- }
-
- return 0;
+ return ret;
}
static int niu_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c
index c3edfe4..f9b509a 100644
--- a/drivers/net/pcmcia/pcnet_cs.c
+++ b/drivers/net/pcmcia/pcnet_cs.c
@@ -508,7 +508,8 @@
unsigned int vcc,
void *priv_data)
{
- int *has_shmem = priv_data;
+ int *priv = priv_data;
+ int try = (*priv & 0x1);
int i;
cistpl_io_t *io = &cfg->io;
@@ -525,77 +526,103 @@
i = p_dev->resource[1]->end = 0;
}
- *has_shmem = ((cfg->mem.nwin == 1) &&
- (cfg->mem.win[0].len >= 0x4000));
+ *priv &= ((cfg->mem.nwin == 1) &&
+ (cfg->mem.win[0].len >= 0x4000)) ? 0x10 : ~0x10;
+
p_dev->resource[0]->start = io->win[i].base;
p_dev->resource[0]->end = io->win[i].len;
- p_dev->io_lines = io->flags & CISTPL_IO_LINES_MASK;
+ if (!try)
+ p_dev->io_lines = io->flags & CISTPL_IO_LINES_MASK;
+ else
+ p_dev->io_lines = 16;
if (p_dev->resource[0]->end + p_dev->resource[1]->end >= 32)
return try_io_port(p_dev);
- return 0;
+ return -EINVAL;
+}
+
+static hw_info_t *pcnet_try_config(struct pcmcia_device *link,
+ int *has_shmem, int try)
+{
+ struct net_device *dev = link->priv;
+ hw_info_t *local_hw_info;
+ pcnet_dev_t *info = PRIV(dev);
+ int priv = try;
+ int ret;
+
+ ret = pcmcia_loop_config(link, pcnet_confcheck, &priv);
+ if (ret) {
+ dev_warn(&link->dev, "no useable port range found\n");
+ return NULL;
+ }
+ *has_shmem = (priv & 0x10);
+
+ if (!link->irq)
+ return NULL;
+
+ if (resource_size(link->resource[1]) == 8) {
+ link->conf.Attributes |= CONF_ENABLE_SPKR;
+ link->conf.Status = CCSR_AUDIO_ENA;
+ }
+ if ((link->manf_id == MANFID_IBM) &&
+ (link->card_id == PRODID_IBM_HOME_AND_AWAY))
+ link->conf.ConfigIndex |= 0x10;
+
+ ret = pcmcia_request_configuration(link, &link->conf);
+ if (ret)
+ return NULL;
+
+ dev->irq = link->irq;
+ dev->base_addr = link->resource[0]->start;
+
+ if (info->flags & HAS_MISC_REG) {
+ if ((if_port == 1) || (if_port == 2))
+ dev->if_port = if_port;
+ else
+ dev_notice(&link->dev, "invalid if_port requested\n");
+ } else
+ dev->if_port = 0;
+
+ if ((link->conf.ConfigBase == 0x03c0) &&
+ (link->manf_id == 0x149) && (link->card_id == 0xc1ab)) {
+ dev_info(&link->dev,
+ "this is an AX88190 card - use axnet_cs instead.\n");
+ return NULL;
+ }
+
+ local_hw_info = get_hwinfo(link);
+ if (!local_hw_info)
+ local_hw_info = get_prom(link);
+ if (!local_hw_info)
+ local_hw_info = get_dl10019(link);
+ if (!local_hw_info)
+ local_hw_info = get_ax88190(link);
+ if (!local_hw_info)
+ local_hw_info = get_hwired(link);
+
+ return local_hw_info;
}
static int pcnet_config(struct pcmcia_device *link)
{
struct net_device *dev = link->priv;
pcnet_dev_t *info = PRIV(dev);
- int ret, start_pg, stop_pg, cm_offset;
+ int start_pg, stop_pg, cm_offset;
int has_shmem = 0;
hw_info_t *local_hw_info;
dev_dbg(&link->dev, "pcnet_config\n");
- ret = pcmcia_loop_config(link, pcnet_confcheck, &has_shmem);
- if (ret)
- goto failed;
-
- if (!link->irq)
- goto failed;
-
- if (resource_size(link->resource[1]) == 8) {
- link->conf.Attributes |= CONF_ENABLE_SPKR;
- link->conf.Status = CCSR_AUDIO_ENA;
- }
- if ((link->manf_id == MANFID_IBM) &&
- (link->card_id == PRODID_IBM_HOME_AND_AWAY))
- link->conf.ConfigIndex |= 0x10;
-
- ret = pcmcia_request_configuration(link, &link->conf);
- if (ret)
- goto failed;
- dev->irq = link->irq;
- dev->base_addr = link->resource[0]->start;
- if (info->flags & HAS_MISC_REG) {
- if ((if_port == 1) || (if_port == 2))
- dev->if_port = if_port;
- else
- printk(KERN_NOTICE "pcnet_cs: invalid if_port requested\n");
- } else {
- dev->if_port = 0;
- }
-
- if ((link->conf.ConfigBase == 0x03c0) &&
- (link->manf_id == 0x149) && (link->card_id == 0xc1ab)) {
- printk(KERN_INFO "pcnet_cs: this is an AX88190 card!\n");
- printk(KERN_INFO "pcnet_cs: use axnet_cs instead.\n");
- goto failed;
- }
-
- local_hw_info = get_hwinfo(link);
- if (local_hw_info == NULL)
- local_hw_info = get_prom(link);
- if (local_hw_info == NULL)
- local_hw_info = get_dl10019(link);
- if (local_hw_info == NULL)
- local_hw_info = get_ax88190(link);
- if (local_hw_info == NULL)
- local_hw_info = get_hwired(link);
-
- if (local_hw_info == NULL) {
- printk(KERN_NOTICE "pcnet_cs: unable to read hardware net"
- " address for io base %#3lx\n", dev->base_addr);
- goto failed;
+ local_hw_info = pcnet_try_config(link, &has_shmem, 0);
+ if (!local_hw_info) {
+ /* check whether forcing io_lines to 16 helps... */
+ pcmcia_disable_device(link);
+ local_hw_info = pcnet_try_config(link, &has_shmem, 1);
+ if (local_hw_info == NULL) {
+ dev_notice(&link->dev, "unable to read hardware net"
+ " address for io base %#3lx\n", dev->base_addr);
+ goto failed;
+ }
}
info->flags = local_hw_info->flags;
@@ -1637,6 +1664,7 @@
PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCETTX", 0x547e66dc, 0x6fc5459b),
PCMCIA_DEVICE_PROD_ID12("iPort", "10/100 Ethernet Card", 0x56c538d2, 0x11b0ffc0),
PCMCIA_DEVICE_PROD_ID12("KANSAI ELECTRIC CO.,LTD", "KLA-PCM/T", 0xb18dc3b4, 0xcc51a956),
+ PCMCIA_DEVICE_PROD_ID12("KENTRONICS", "KEP-230", 0xaf8144c9, 0x868f6616),
PCMCIA_DEVICE_PROD_ID12("KCI", "PE520 PCMCIA Ethernet Adapter", 0xa89b87d3, 0x1eb88e64),
PCMCIA_DEVICE_PROD_ID12("KINGMAX", "EN10T2T", 0x7bcb459a, 0xa5c81fa5),
PCMCIA_DEVICE_PROD_ID12("Kingston", "KNE-PC2", 0x1128e633, 0xce2a89b3),
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 6a6b819..6c58da2 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -308,7 +308,7 @@
* may call phy routines that try to grab the same lock, and that may
* lead to a deadlock.
*/
- if (phydev->attached_dev)
+ if (phydev->attached_dev && phydev->adjust_link)
phy_stop_machine(phydev);
if (!mdio_bus_phy_may_suspend(phydev))
@@ -331,7 +331,7 @@
return ret;
no_resume:
- if (phydev->attached_dev)
+ if (phydev->attached_dev && phydev->adjust_link)
phy_start_machine(phydev, NULL);
return 0;
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index c076119..16ddc77 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -466,6 +466,8 @@
phydev->interface = interface;
+ phydev->state = PHY_READY;
+
/* Do initial configuration here, now that
* we have certain key parameters
* (dev_flags and interface) */
diff --git a/drivers/net/ppp_async.c b/drivers/net/ppp_async.c
index af50a53..78d70a6 100644
--- a/drivers/net/ppp_async.c
+++ b/drivers/net/ppp_async.c
@@ -184,7 +184,7 @@
tasklet_init(&ap->tsk, ppp_async_process, (unsigned long) ap);
atomic_set(&ap->refcnt, 1);
- init_MUTEX_LOCKED(&ap->dead_sem);
+ sema_init(&ap->dead_sem, 0);
ap->chan.private = ap;
ap->chan.ops = &async_ops;
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index 6695a51..736b917 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -1314,8 +1314,13 @@
hdrlen = (ppp->flags & SC_MP_XSHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN;
i = 0;
list_for_each_entry(pch, &ppp->channels, clist) {
- navail += pch->avail = (pch->chan != NULL);
- pch->speed = pch->chan->speed;
+ if (pch->chan) {
+ pch->avail = 1;
+ navail++;
+ pch->speed = pch->chan->speed;
+ } else {
+ pch->avail = 0;
+ }
if (pch->avail) {
if (skb_queue_empty(&pch->file.xq) ||
!pch->had_frag) {
diff --git a/drivers/net/pxa168_eth.c b/drivers/net/pxa168_eth.c
new file mode 100644
index 0000000..85eddda
--- /dev/null
+++ b/drivers/net/pxa168_eth.c
@@ -0,0 +1,1666 @@
+/*
+ * PXA168 ethernet driver.
+ * Most of the code is derived from mv643xx ethernet driver.
+ *
+ * Copyright (C) 2010 Marvell International Ltd.
+ * Sachin Sanap <ssanap@marvell.com>
+ * Philip Rakity <prakity@marvell.com>
+ * Mark Brown <markb@marvell.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/init.h>
+#include <linux/dma-mapping.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/etherdevice.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/ethtool.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/workqueue.h>
+#include <linux/clk.h>
+#include <linux/phy.h>
+#include <linux/io.h>
+#include <linux/types.h>
+#include <asm/pgtable.h>
+#include <asm/system.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <asm/cacheflush.h>
+#include <linux/pxa168_eth.h>
+
+#define DRIVER_NAME "pxa168-eth"
+#define DRIVER_VERSION "0.3"
+
+/*
+ * Registers
+ */
+
+#define PHY_ADDRESS 0x0000
+#define SMI 0x0010
+#define PORT_CONFIG 0x0400
+#define PORT_CONFIG_EXT 0x0408
+#define PORT_COMMAND 0x0410
+#define PORT_STATUS 0x0418
+#define HTPR 0x0428
+#define SDMA_CONFIG 0x0440
+#define SDMA_CMD 0x0448
+#define INT_CAUSE 0x0450
+#define INT_W_CLEAR 0x0454
+#define INT_MASK 0x0458
+#define ETH_F_RX_DESC_0 0x0480
+#define ETH_C_RX_DESC_0 0x04A0
+#define ETH_C_TX_DESC_1 0x04E4
+
+/* smi register */
+#define SMI_BUSY (1 << 28) /* 0 - Write, 1 - Read */
+#define SMI_R_VALID (1 << 27) /* 0 - Write, 1 - Read */
+#define SMI_OP_W (0 << 26) /* Write operation */
+#define SMI_OP_R (1 << 26) /* Read operation */
+
+#define PHY_WAIT_ITERATIONS 10
+
+#define PXA168_ETH_PHY_ADDR_DEFAULT 0
+/* RX & TX descriptor command */
+#define BUF_OWNED_BY_DMA (1 << 31)
+
+/* RX descriptor status */
+#define RX_EN_INT (1 << 23)
+#define RX_FIRST_DESC (1 << 17)
+#define RX_LAST_DESC (1 << 16)
+#define RX_ERROR (1 << 15)
+
+/* TX descriptor command */
+#define TX_EN_INT (1 << 23)
+#define TX_GEN_CRC (1 << 22)
+#define TX_ZERO_PADDING (1 << 18)
+#define TX_FIRST_DESC (1 << 17)
+#define TX_LAST_DESC (1 << 16)
+#define TX_ERROR (1 << 15)
+
+/* SDMA_CMD */
+#define SDMA_CMD_AT (1 << 31)
+#define SDMA_CMD_TXDL (1 << 24)
+#define SDMA_CMD_TXDH (1 << 23)
+#define SDMA_CMD_AR (1 << 15)
+#define SDMA_CMD_ERD (1 << 7)
+
+/* Bit definitions of the Port Config Reg */
+#define PCR_HS (1 << 12)
+#define PCR_EN (1 << 7)
+#define PCR_PM (1 << 0)
+
+/* Bit definitions of the Port Config Extend Reg */
+#define PCXR_2BSM (1 << 28)
+#define PCXR_DSCP_EN (1 << 21)
+#define PCXR_MFL_1518 (0 << 14)
+#define PCXR_MFL_1536 (1 << 14)
+#define PCXR_MFL_2048 (2 << 14)
+#define PCXR_MFL_64K (3 << 14)
+#define PCXR_FLP (1 << 11)
+#define PCXR_PRIO_TX_OFF 3
+#define PCXR_TX_HIGH_PRI (7 << PCXR_PRIO_TX_OFF)
+
+/* Bit definitions of the SDMA Config Reg */
+#define SDCR_BSZ_OFF 12
+#define SDCR_BSZ8 (3 << SDCR_BSZ_OFF)
+#define SDCR_BSZ4 (2 << SDCR_BSZ_OFF)
+#define SDCR_BSZ2 (1 << SDCR_BSZ_OFF)
+#define SDCR_BSZ1 (0 << SDCR_BSZ_OFF)
+#define SDCR_BLMR (1 << 6)
+#define SDCR_BLMT (1 << 7)
+#define SDCR_RIFB (1 << 9)
+#define SDCR_RC_OFF 2
+#define SDCR_RC_MAX_RETRANS (0xf << SDCR_RC_OFF)
+
+/*
+ * Bit definitions of the Interrupt Cause Reg
+ * and Interrupt MASK Reg is the same
+ */
+#define ICR_RXBUF (1 << 0)
+#define ICR_TXBUF_H (1 << 2)
+#define ICR_TXBUF_L (1 << 3)
+#define ICR_TXEND_H (1 << 6)
+#define ICR_TXEND_L (1 << 7)
+#define ICR_RXERR (1 << 8)
+#define ICR_TXERR_H (1 << 10)
+#define ICR_TXERR_L (1 << 11)
+#define ICR_TX_UDR (1 << 13)
+#define ICR_MII_CH (1 << 28)
+
+#define ALL_INTS (ICR_TXBUF_H | ICR_TXBUF_L | ICR_TX_UDR |\
+ ICR_TXERR_H | ICR_TXERR_L |\
+ ICR_TXEND_H | ICR_TXEND_L |\
+ ICR_RXBUF | ICR_RXERR | ICR_MII_CH)
+
+#define ETH_HW_IP_ALIGN 2 /* hw aligns IP header */
+
+#define NUM_RX_DESCS 64
+#define NUM_TX_DESCS 64
+
+#define HASH_ADD 0
+#define HASH_DELETE 1
+#define HASH_ADDR_TABLE_SIZE 0x4000 /* 16K (1/2K address - PCR_HS == 1) */
+#define HOP_NUMBER 12
+
+/* Bit definitions for Port status */
+#define PORT_SPEED_100 (1 << 0)
+#define FULL_DUPLEX (1 << 1)
+#define FLOW_CONTROL_ENABLED (1 << 2)
+#define LINK_UP (1 << 3)
+
+/* Bit definitions for work to be done */
+#define WORK_LINK (1 << 0)
+#define WORK_TX_DONE (1 << 1)
+
+/*
+ * Misc definitions.
+ */
+#define SKB_DMA_REALIGN ((PAGE_SIZE - NET_SKB_PAD) % SMP_CACHE_BYTES)
+
+struct rx_desc {
+ u32 cmd_sts; /* Descriptor command status */
+ u16 byte_cnt; /* Descriptor buffer byte count */
+ u16 buf_size; /* Buffer size */
+ u32 buf_ptr; /* Descriptor buffer pointer */
+ u32 next_desc_ptr; /* Next descriptor pointer */
+};
+
+struct tx_desc {
+ u32 cmd_sts; /* Command/status field */
+ u16 reserved;
+ u16 byte_cnt; /* buffer byte count */
+ u32 buf_ptr; /* pointer to buffer for this descriptor */
+ u32 next_desc_ptr; /* Pointer to next descriptor */
+};
+
+struct pxa168_eth_private {
+ int port_num; /* User Ethernet port number */
+
+ int rx_resource_err; /* Rx ring resource error flag */
+
+ /* Next available and first returning Rx resource */
+ int rx_curr_desc_q, rx_used_desc_q;
+
+ /* Next available and first returning Tx resource */
+ int tx_curr_desc_q, tx_used_desc_q;
+
+ struct rx_desc *p_rx_desc_area;
+ dma_addr_t rx_desc_dma;
+ int rx_desc_area_size;
+ struct sk_buff **rx_skb;
+
+ struct tx_desc *p_tx_desc_area;
+ dma_addr_t tx_desc_dma;
+ int tx_desc_area_size;
+ struct sk_buff **tx_skb;
+
+ struct work_struct tx_timeout_task;
+
+ struct net_device *dev;
+ struct napi_struct napi;
+ u8 work_todo;
+ int skb_size;
+
+ struct net_device_stats stats;
+ /* Size of Tx Ring per queue */
+ int tx_ring_size;
+ /* Number of tx descriptors in use */
+ int tx_desc_count;
+ /* Size of Rx Ring per queue */
+ int rx_ring_size;
+ /* Number of rx descriptors in use */
+ int rx_desc_count;
+
+ /*
+ * Used in case RX Ring is empty, which can occur when
+ * system does not have resources (skb's)
+ */
+ struct timer_list timeout;
+ struct mii_bus *smi_bus;
+ struct phy_device *phy;
+
+ /* clock */
+ struct clk *clk;
+ struct pxa168_eth_platform_data *pd;
+ /*
+ * Ethernet controller base address.
+ */
+ void __iomem *base;
+
+ /* Pointer to the hardware address filter table */
+ void *htpr;
+ dma_addr_t htpr_dma;
+};
+
+struct addr_table_entry {
+ __le32 lo;
+ __le32 hi;
+};
+
+/* Bit fields of a Hash Table Entry */
+enum hash_table_entry {
+ HASH_ENTRY_VALID = 1,
+ SKIP = 2,
+ HASH_ENTRY_RECEIVE_DISCARD = 4,
+ HASH_ENTRY_RECEIVE_DISCARD_BIT = 2
+};
+
+static int pxa168_get_settings(struct net_device *dev, struct ethtool_cmd *cmd);
+static int pxa168_set_settings(struct net_device *dev, struct ethtool_cmd *cmd);
+static int pxa168_init_hw(struct pxa168_eth_private *pep);
+static void eth_port_reset(struct net_device *dev);
+static void eth_port_start(struct net_device *dev);
+static int pxa168_eth_open(struct net_device *dev);
+static int pxa168_eth_stop(struct net_device *dev);
+static int ethernet_phy_setup(struct net_device *dev);
+
+static inline u32 rdl(struct pxa168_eth_private *pep, int offset)
+{
+ return readl(pep->base + offset);
+}
+
+static inline void wrl(struct pxa168_eth_private *pep, int offset, u32 data)
+{
+ writel(data, pep->base + offset);
+}
+
+static void abort_dma(struct pxa168_eth_private *pep)
+{
+ int delay;
+ int max_retries = 40;
+
+ do {
+ wrl(pep, SDMA_CMD, SDMA_CMD_AR | SDMA_CMD_AT);
+ udelay(100);
+
+ delay = 10;
+ while ((rdl(pep, SDMA_CMD) & (SDMA_CMD_AR | SDMA_CMD_AT))
+ && delay-- > 0) {
+ udelay(10);
+ }
+ } while (max_retries-- > 0 && delay <= 0);
+
+ if (max_retries <= 0)
+ printk(KERN_ERR "%s : DMA Stuck\n", __func__);
+}
+
+static int ethernet_phy_get(struct pxa168_eth_private *pep)
+{
+ unsigned int reg_data;
+
+ reg_data = rdl(pep, PHY_ADDRESS);
+
+ return (reg_data >> (5 * pep->port_num)) & 0x1f;
+}
+
+static void ethernet_phy_set_addr(struct pxa168_eth_private *pep, int phy_addr)
+{
+ u32 reg_data;
+ int addr_shift = 5 * pep->port_num;
+
+ reg_data = rdl(pep, PHY_ADDRESS);
+ reg_data &= ~(0x1f << addr_shift);
+ reg_data |= (phy_addr & 0x1f) << addr_shift;
+ wrl(pep, PHY_ADDRESS, reg_data);
+}
+
+static void ethernet_phy_reset(struct pxa168_eth_private *pep)
+{
+ int data;
+
+ data = phy_read(pep->phy, MII_BMCR);
+ if (data < 0)
+ return;
+
+ data |= BMCR_RESET;
+ if (phy_write(pep->phy, MII_BMCR, data) < 0)
+ return;
+
+ do {
+ data = phy_read(pep->phy, MII_BMCR);
+ } while (data >= 0 && data & BMCR_RESET);
+}
+
+static void rxq_refill(struct net_device *dev)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+ struct sk_buff *skb;
+ struct rx_desc *p_used_rx_desc;
+ int used_rx_desc;
+
+ while (pep->rx_desc_count < pep->rx_ring_size) {
+ int size;
+
+ skb = dev_alloc_skb(pep->skb_size);
+ if (!skb)
+ break;
+ if (SKB_DMA_REALIGN)
+ skb_reserve(skb, SKB_DMA_REALIGN);
+ pep->rx_desc_count++;
+ /* Get 'used' Rx descriptor */
+ used_rx_desc = pep->rx_used_desc_q;
+ p_used_rx_desc = &pep->p_rx_desc_area[used_rx_desc];
+ size = skb->end - skb->data;
+ p_used_rx_desc->buf_ptr = dma_map_single(NULL,
+ skb->data,
+ size,
+ DMA_FROM_DEVICE);
+ p_used_rx_desc->buf_size = size;
+ pep->rx_skb[used_rx_desc] = skb;
+
+ /* Return the descriptor to DMA ownership */
+ wmb();
+ p_used_rx_desc->cmd_sts = BUF_OWNED_BY_DMA | RX_EN_INT;
+ wmb();
+
+ /* Move the used descriptor pointer to the next descriptor */
+ pep->rx_used_desc_q = (used_rx_desc + 1) % pep->rx_ring_size;
+
+ /* Any Rx return cancels the Rx resource error status */
+ pep->rx_resource_err = 0;
+
+ skb_reserve(skb, ETH_HW_IP_ALIGN);
+ }
+
+ /*
+ * If RX ring is empty of SKB, set a timer to try allocating
+ * again at a later time.
+ */
+ if (pep->rx_desc_count == 0) {
+ pep->timeout.expires = jiffies + (HZ / 10);
+ add_timer(&pep->timeout);
+ }
+}
+
+static inline void rxq_refill_timer_wrapper(unsigned long data)
+{
+ struct pxa168_eth_private *pep = (void *)data;
+ napi_schedule(&pep->napi);
+}
+
+static inline u8 flip_8_bits(u8 x)
+{
+ return (((x) & 0x01) << 3) | (((x) & 0x02) << 1)
+ | (((x) & 0x04) >> 1) | (((x) & 0x08) >> 3)
+ | (((x) & 0x10) << 3) | (((x) & 0x20) << 1)
+ | (((x) & 0x40) >> 1) | (((x) & 0x80) >> 3);
+}
+
+static void nibble_swap_every_byte(unsigned char *mac_addr)
+{
+ int i;
+ for (i = 0; i < ETH_ALEN; i++) {
+ mac_addr[i] = ((mac_addr[i] & 0x0f) << 4) |
+ ((mac_addr[i] & 0xf0) >> 4);
+ }
+}
+
+static void inverse_every_nibble(unsigned char *mac_addr)
+{
+ int i;
+ for (i = 0; i < ETH_ALEN; i++)
+ mac_addr[i] = flip_8_bits(mac_addr[i]);
+}
+
+/*
+ * ----------------------------------------------------------------------------
+ * This function will calculate the hash function of the address.
+ * Inputs
+ * mac_addr_orig - MAC address.
+ * Outputs
+ * return the calculated entry.
+ */
+static u32 hash_function(unsigned char *mac_addr_orig)
+{
+ u32 hash_result;
+ u32 addr0;
+ u32 addr1;
+ u32 addr2;
+ u32 addr3;
+ unsigned char mac_addr[ETH_ALEN];
+
+ /* Make a copy of MAC address since we are going to performe bit
+ * operations on it
+ */
+ memcpy(mac_addr, mac_addr_orig, ETH_ALEN);
+
+ nibble_swap_every_byte(mac_addr);
+ inverse_every_nibble(mac_addr);
+
+ addr0 = (mac_addr[5] >> 2) & 0x3f;
+ addr1 = (mac_addr[5] & 0x03) | (((mac_addr[4] & 0x7f)) << 2);
+ addr2 = ((mac_addr[4] & 0x80) >> 7) | mac_addr[3] << 1;
+ addr3 = (mac_addr[2] & 0xff) | ((mac_addr[1] & 1) << 8);
+
+ hash_result = (addr0 << 9) | (addr1 ^ addr2 ^ addr3);
+ hash_result = hash_result & 0x07ff;
+ return hash_result;
+}
+
+/*
+ * ----------------------------------------------------------------------------
+ * This function will add/del an entry to the address table.
+ * Inputs
+ * pep - ETHERNET .
+ * mac_addr - MAC address.
+ * skip - if 1, skip this address.Used in case of deleting an entry which is a
+ * part of chain in the hash table.We cant just delete the entry since
+ * that will break the chain.We need to defragment the tables time to
+ * time.
+ * rd - 0 Discard packet upon match.
+ * - 1 Receive packet upon match.
+ * Outputs
+ * address table entry is added/deleted.
+ * 0 if success.
+ * -ENOSPC if table full
+ */
+static int add_del_hash_entry(struct pxa168_eth_private *pep,
+ unsigned char *mac_addr,
+ u32 rd, u32 skip, int del)
+{
+ struct addr_table_entry *entry, *start;
+ u32 new_high;
+ u32 new_low;
+ u32 i;
+
+ new_low = (((mac_addr[1] >> 4) & 0xf) << 15)
+ | (((mac_addr[1] >> 0) & 0xf) << 11)
+ | (((mac_addr[0] >> 4) & 0xf) << 7)
+ | (((mac_addr[0] >> 0) & 0xf) << 3)
+ | (((mac_addr[3] >> 4) & 0x1) << 31)
+ | (((mac_addr[3] >> 0) & 0xf) << 27)
+ | (((mac_addr[2] >> 4) & 0xf) << 23)
+ | (((mac_addr[2] >> 0) & 0xf) << 19)
+ | (skip << SKIP) | (rd << HASH_ENTRY_RECEIVE_DISCARD_BIT)
+ | HASH_ENTRY_VALID;
+
+ new_high = (((mac_addr[5] >> 4) & 0xf) << 15)
+ | (((mac_addr[5] >> 0) & 0xf) << 11)
+ | (((mac_addr[4] >> 4) & 0xf) << 7)
+ | (((mac_addr[4] >> 0) & 0xf) << 3)
+ | (((mac_addr[3] >> 5) & 0x7) << 0);
+
+ /*
+ * Pick the appropriate table, start scanning for free/reusable
+ * entries at the index obtained by hashing the specified MAC address
+ */
+ start = (struct addr_table_entry *)(pep->htpr);
+ entry = start + hash_function(mac_addr);
+ for (i = 0; i < HOP_NUMBER; i++) {
+ if (!(le32_to_cpu(entry->lo) & HASH_ENTRY_VALID)) {
+ break;
+ } else {
+ /* if same address put in same position */
+ if (((le32_to_cpu(entry->lo) & 0xfffffff8) ==
+ (new_low & 0xfffffff8)) &&
+ (le32_to_cpu(entry->hi) == new_high)) {
+ break;
+ }
+ }
+ if (entry == start + 0x7ff)
+ entry = start;
+ else
+ entry++;
+ }
+
+ if (((le32_to_cpu(entry->lo) & 0xfffffff8) != (new_low & 0xfffffff8)) &&
+ (le32_to_cpu(entry->hi) != new_high) && del)
+ return 0;
+
+ if (i == HOP_NUMBER) {
+ if (!del) {
+ printk(KERN_INFO "%s: table section is full, need to "
+ "move to 16kB implementation?\n",
+ __FILE__);
+ return -ENOSPC;
+ } else
+ return 0;
+ }
+
+ /*
+ * Update the selected entry
+ */
+ if (del) {
+ entry->hi = 0;
+ entry->lo = 0;
+ } else {
+ entry->hi = cpu_to_le32(new_high);
+ entry->lo = cpu_to_le32(new_low);
+ }
+
+ return 0;
+}
+
+/*
+ * ----------------------------------------------------------------------------
+ * Create an addressTable entry from MAC address info
+ * found in the specifed net_device struct
+ *
+ * Input : pointer to ethernet interface network device structure
+ * Output : N/A
+ */
+static void update_hash_table_mac_address(struct pxa168_eth_private *pep,
+ unsigned char *oaddr,
+ unsigned char *addr)
+{
+ /* Delete old entry */
+ if (oaddr)
+ add_del_hash_entry(pep, oaddr, 1, 0, HASH_DELETE);
+ /* Add new entry */
+ add_del_hash_entry(pep, addr, 1, 0, HASH_ADD);
+}
+
+static int init_hash_table(struct pxa168_eth_private *pep)
+{
+ /*
+ * Hardware expects CPU to build a hash table based on a predefined
+ * hash function and populate it based on hardware address. The
+ * location of the hash table is identified by 32-bit pointer stored
+ * in HTPR internal register. Two possible sizes exists for the hash
+ * table 8kB (256kB of DRAM required (4 x 64 kB banks)) and 1/2kB
+ * (16kB of DRAM required (4 x 4 kB banks)).We currently only support
+ * 1/2kB.
+ */
+ /* TODO: Add support for 8kB hash table and alternative hash
+ * function.Driver can dynamically switch to them if the 1/2kB hash
+ * table is full.
+ */
+ if (pep->htpr == NULL) {
+ pep->htpr = dma_alloc_coherent(pep->dev->dev.parent,
+ HASH_ADDR_TABLE_SIZE,
+ &pep->htpr_dma, GFP_KERNEL);
+ if (pep->htpr == NULL)
+ return -ENOMEM;
+ }
+ memset(pep->htpr, 0, HASH_ADDR_TABLE_SIZE);
+ wrl(pep, HTPR, pep->htpr_dma);
+ return 0;
+}
+
+static void pxa168_eth_set_rx_mode(struct net_device *dev)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+ struct netdev_hw_addr *ha;
+ u32 val;
+
+ val = rdl(pep, PORT_CONFIG);
+ if (dev->flags & IFF_PROMISC)
+ val |= PCR_PM;
+ else
+ val &= ~PCR_PM;
+ wrl(pep, PORT_CONFIG, val);
+
+ /*
+ * Remove the old list of MAC address and add dev->addr
+ * and multicast address.
+ */
+ memset(pep->htpr, 0, HASH_ADDR_TABLE_SIZE);
+ update_hash_table_mac_address(pep, NULL, dev->dev_addr);
+
+ netdev_for_each_mc_addr(ha, dev)
+ update_hash_table_mac_address(pep, NULL, ha->addr);
+}
+
+static int pxa168_eth_set_mac_address(struct net_device *dev, void *addr)
+{
+ struct sockaddr *sa = addr;
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+ unsigned char oldMac[ETH_ALEN];
+
+ if (!is_valid_ether_addr(sa->sa_data))
+ return -EINVAL;
+ memcpy(oldMac, dev->dev_addr, ETH_ALEN);
+ memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN);
+ netif_addr_lock_bh(dev);
+ update_hash_table_mac_address(pep, oldMac, dev->dev_addr);
+ netif_addr_unlock_bh(dev);
+ return 0;
+}
+
+static void eth_port_start(struct net_device *dev)
+{
+ unsigned int val = 0;
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+ int tx_curr_desc, rx_curr_desc;
+
+ /* Perform PHY reset, if there is a PHY. */
+ if (pep->phy != NULL) {
+ struct ethtool_cmd cmd;
+
+ pxa168_get_settings(pep->dev, &cmd);
+ ethernet_phy_reset(pep);
+ pxa168_set_settings(pep->dev, &cmd);
+ }
+
+ /* Assignment of Tx CTRP of given queue */
+ tx_curr_desc = pep->tx_curr_desc_q;
+ wrl(pep, ETH_C_TX_DESC_1,
+ (u32) (pep->tx_desc_dma + tx_curr_desc * sizeof(struct tx_desc)));
+
+ /* Assignment of Rx CRDP of given queue */
+ rx_curr_desc = pep->rx_curr_desc_q;
+ wrl(pep, ETH_C_RX_DESC_0,
+ (u32) (pep->rx_desc_dma + rx_curr_desc * sizeof(struct rx_desc)));
+
+ wrl(pep, ETH_F_RX_DESC_0,
+ (u32) (pep->rx_desc_dma + rx_curr_desc * sizeof(struct rx_desc)));
+
+ /* Clear all interrupts */
+ wrl(pep, INT_CAUSE, 0);
+
+ /* Enable all interrupts for receive, transmit and error. */
+ wrl(pep, INT_MASK, ALL_INTS);
+
+ val = rdl(pep, PORT_CONFIG);
+ val |= PCR_EN;
+ wrl(pep, PORT_CONFIG, val);
+
+ /* Start RX DMA engine */
+ val = rdl(pep, SDMA_CMD);
+ val |= SDMA_CMD_ERD;
+ wrl(pep, SDMA_CMD, val);
+}
+
+static void eth_port_reset(struct net_device *dev)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+ unsigned int val = 0;
+
+ /* Stop all interrupts for receive, transmit and error. */
+ wrl(pep, INT_MASK, 0);
+
+ /* Clear all interrupts */
+ wrl(pep, INT_CAUSE, 0);
+
+ /* Stop RX DMA */
+ val = rdl(pep, SDMA_CMD);
+ val &= ~SDMA_CMD_ERD; /* abort dma command */
+
+ /* Abort any transmit and receive operations and put DMA
+ * in idle state.
+ */
+ abort_dma(pep);
+
+ /* Disable port */
+ val = rdl(pep, PORT_CONFIG);
+ val &= ~PCR_EN;
+ wrl(pep, PORT_CONFIG, val);
+}
+
+/*
+ * txq_reclaim - Free the tx desc data for completed descriptors
+ * If force is non-zero, frees uncompleted descriptors as well
+ */
+static int txq_reclaim(struct net_device *dev, int force)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+ struct tx_desc *desc;
+ u32 cmd_sts;
+ struct sk_buff *skb;
+ int tx_index;
+ dma_addr_t addr;
+ int count;
+ int released = 0;
+
+ netif_tx_lock(dev);
+
+ pep->work_todo &= ~WORK_TX_DONE;
+ while (pep->tx_desc_count > 0) {
+ tx_index = pep->tx_used_desc_q;
+ desc = &pep->p_tx_desc_area[tx_index];
+ cmd_sts = desc->cmd_sts;
+ if (!force && (cmd_sts & BUF_OWNED_BY_DMA)) {
+ if (released > 0) {
+ goto txq_reclaim_end;
+ } else {
+ released = -1;
+ goto txq_reclaim_end;
+ }
+ }
+ pep->tx_used_desc_q = (tx_index + 1) % pep->tx_ring_size;
+ pep->tx_desc_count--;
+ addr = desc->buf_ptr;
+ count = desc->byte_cnt;
+ skb = pep->tx_skb[tx_index];
+ if (skb)
+ pep->tx_skb[tx_index] = NULL;
+
+ if (cmd_sts & TX_ERROR) {
+ if (net_ratelimit())
+ printk(KERN_ERR "%s: Error in TX\n", dev->name);
+ dev->stats.tx_errors++;
+ }
+ dma_unmap_single(NULL, addr, count, DMA_TO_DEVICE);
+ if (skb)
+ dev_kfree_skb_irq(skb);
+ released++;
+ }
+txq_reclaim_end:
+ netif_tx_unlock(dev);
+ return released;
+}
+
+static void pxa168_eth_tx_timeout(struct net_device *dev)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+
+ printk(KERN_INFO "%s: TX timeout desc_count %d\n",
+ dev->name, pep->tx_desc_count);
+
+ schedule_work(&pep->tx_timeout_task);
+}
+
+static void pxa168_eth_tx_timeout_task(struct work_struct *work)
+{
+ struct pxa168_eth_private *pep = container_of(work,
+ struct pxa168_eth_private,
+ tx_timeout_task);
+ struct net_device *dev = pep->dev;
+ pxa168_eth_stop(dev);
+ pxa168_eth_open(dev);
+}
+
+static int rxq_process(struct net_device *dev, int budget)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+ struct net_device_stats *stats = &dev->stats;
+ unsigned int received_packets = 0;
+ struct sk_buff *skb;
+
+ while (budget-- > 0) {
+ int rx_next_curr_desc, rx_curr_desc, rx_used_desc;
+ struct rx_desc *rx_desc;
+ unsigned int cmd_sts;
+
+ /* Do not process Rx ring in case of Rx ring resource error */
+ if (pep->rx_resource_err)
+ break;
+ rx_curr_desc = pep->rx_curr_desc_q;
+ rx_used_desc = pep->rx_used_desc_q;
+ rx_desc = &pep->p_rx_desc_area[rx_curr_desc];
+ cmd_sts = rx_desc->cmd_sts;
+ rmb();
+ if (cmd_sts & (BUF_OWNED_BY_DMA))
+ break;
+ skb = pep->rx_skb[rx_curr_desc];
+ pep->rx_skb[rx_curr_desc] = NULL;
+
+ rx_next_curr_desc = (rx_curr_desc + 1) % pep->rx_ring_size;
+ pep->rx_curr_desc_q = rx_next_curr_desc;
+
+ /* Rx descriptors exhausted. */
+ /* Set the Rx ring resource error flag */
+ if (rx_next_curr_desc == rx_used_desc)
+ pep->rx_resource_err = 1;
+ pep->rx_desc_count--;
+ dma_unmap_single(NULL, rx_desc->buf_ptr,
+ rx_desc->buf_size,
+ DMA_FROM_DEVICE);
+ received_packets++;
+ /*
+ * Update statistics.
+ * Note byte count includes 4 byte CRC count
+ */
+ stats->rx_packets++;
+ stats->rx_bytes += rx_desc->byte_cnt;
+ /*
+ * In case received a packet without first / last bits on OR
+ * the error summary bit is on, the packets needs to be droped.
+ */
+ if (((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
+ (RX_FIRST_DESC | RX_LAST_DESC))
+ || (cmd_sts & RX_ERROR)) {
+
+ stats->rx_dropped++;
+ if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
+ (RX_FIRST_DESC | RX_LAST_DESC)) {
+ if (net_ratelimit())
+ printk(KERN_ERR
+ "%s: Rx pkt on multiple desc\n",
+ dev->name);
+ }
+ if (cmd_sts & RX_ERROR)
+ stats->rx_errors++;
+ dev_kfree_skb_irq(skb);
+ } else {
+ /*
+ * The -4 is for the CRC in the trailer of the
+ * received packet
+ */
+ skb_put(skb, rx_desc->byte_cnt - 4);
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_receive_skb(skb);
+ }
+ dev->last_rx = jiffies;
+ }
+ /* Fill RX ring with skb's */
+ rxq_refill(dev);
+ return received_packets;
+}
+
+static int pxa168_eth_collect_events(struct pxa168_eth_private *pep,
+ struct net_device *dev)
+{
+ u32 icr;
+ int ret = 0;
+
+ icr = rdl(pep, INT_CAUSE);
+ if (icr == 0)
+ return IRQ_NONE;
+
+ wrl(pep, INT_CAUSE, ~icr);
+ if (icr & (ICR_TXBUF_H | ICR_TXBUF_L)) {
+ pep->work_todo |= WORK_TX_DONE;
+ ret = 1;
+ }
+ if (icr & ICR_RXBUF)
+ ret = 1;
+ if (icr & ICR_MII_CH) {
+ pep->work_todo |= WORK_LINK;
+ ret = 1;
+ }
+ return ret;
+}
+
+static void handle_link_event(struct pxa168_eth_private *pep)
+{
+ struct net_device *dev = pep->dev;
+ u32 port_status;
+ int speed;
+ int duplex;
+ int fc;
+
+ port_status = rdl(pep, PORT_STATUS);
+ if (!(port_status & LINK_UP)) {
+ if (netif_carrier_ok(dev)) {
+ printk(KERN_INFO "%s: link down\n", dev->name);
+ netif_carrier_off(dev);
+ txq_reclaim(dev, 1);
+ }
+ return;
+ }
+ if (port_status & PORT_SPEED_100)
+ speed = 100;
+ else
+ speed = 10;
+
+ duplex = (port_status & FULL_DUPLEX) ? 1 : 0;
+ fc = (port_status & FLOW_CONTROL_ENABLED) ? 1 : 0;
+ printk(KERN_INFO "%s: link up, %d Mb/s, %s duplex, "
+ "flow control %sabled\n", dev->name,
+ speed, duplex ? "full" : "half", fc ? "en" : "dis");
+ if (!netif_carrier_ok(dev))
+ netif_carrier_on(dev);
+}
+
+static irqreturn_t pxa168_eth_int_handler(int irq, void *dev_id)
+{
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+
+ if (unlikely(!pxa168_eth_collect_events(pep, dev)))
+ return IRQ_NONE;
+ /* Disable interrupts */
+ wrl(pep, INT_MASK, 0);
+ napi_schedule(&pep->napi);
+ return IRQ_HANDLED;
+}
+
+static void pxa168_eth_recalc_skb_size(struct pxa168_eth_private *pep)
+{
+ int skb_size;
+
+ /*
+ * Reserve 2+14 bytes for an ethernet header (the hardware
+ * automatically prepends 2 bytes of dummy data to each
+ * received packet), 16 bytes for up to four VLAN tags, and
+ * 4 bytes for the trailing FCS -- 36 bytes total.
+ */
+ skb_size = pep->dev->mtu + 36;
+
+ /*
+ * Make sure that the skb size is a multiple of 8 bytes, as
+ * the lower three bits of the receive descriptor's buffer
+ * size field are ignored by the hardware.
+ */
+ pep->skb_size = (skb_size + 7) & ~7;
+
+ /*
+ * If NET_SKB_PAD is smaller than a cache line,
+ * netdev_alloc_skb() will cause skb->data to be misaligned
+ * to a cache line boundary. If this is the case, include
+ * some extra space to allow re-aligning the data area.
+ */
+ pep->skb_size += SKB_DMA_REALIGN;
+
+}
+
+static int set_port_config_ext(struct pxa168_eth_private *pep)
+{
+ int skb_size;
+
+ pxa168_eth_recalc_skb_size(pep);
+ if (pep->skb_size <= 1518)
+ skb_size = PCXR_MFL_1518;
+ else if (pep->skb_size <= 1536)
+ skb_size = PCXR_MFL_1536;
+ else if (pep->skb_size <= 2048)
+ skb_size = PCXR_MFL_2048;
+ else
+ skb_size = PCXR_MFL_64K;
+
+ /* Extended Port Configuration */
+ wrl(pep,
+ PORT_CONFIG_EXT, PCXR_2BSM | /* Two byte prefix aligns IP hdr */
+ PCXR_DSCP_EN | /* Enable DSCP in IP */
+ skb_size | PCXR_FLP | /* do not force link pass */
+ PCXR_TX_HIGH_PRI); /* Transmit - high priority queue */
+
+ return 0;
+}
+
+static int pxa168_init_hw(struct pxa168_eth_private *pep)
+{
+ int err = 0;
+
+ /* Disable interrupts */
+ wrl(pep, INT_MASK, 0);
+ wrl(pep, INT_CAUSE, 0);
+ /* Write to ICR to clear interrupts. */
+ wrl(pep, INT_W_CLEAR, 0);
+ /* Abort any transmit and receive operations and put DMA
+ * in idle state.
+ */
+ abort_dma(pep);
+ /* Initialize address hash table */
+ err = init_hash_table(pep);
+ if (err)
+ return err;
+ /* SDMA configuration */
+ wrl(pep, SDMA_CONFIG, SDCR_BSZ8 | /* Burst size = 32 bytes */
+ SDCR_RIFB | /* Rx interrupt on frame */
+ SDCR_BLMT | /* Little endian transmit */
+ SDCR_BLMR | /* Little endian receive */
+ SDCR_RC_MAX_RETRANS); /* Max retransmit count */
+ /* Port Configuration */
+ wrl(pep, PORT_CONFIG, PCR_HS); /* Hash size is 1/2kb */
+ set_port_config_ext(pep);
+
+ return err;
+}
+
+static int rxq_init(struct net_device *dev)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+ struct rx_desc *p_rx_desc;
+ int size = 0, i = 0;
+ int rx_desc_num = pep->rx_ring_size;
+
+ /* Allocate RX skb rings */
+ pep->rx_skb = kmalloc(sizeof(*pep->rx_skb) * pep->rx_ring_size,
+ GFP_KERNEL);
+ if (!pep->rx_skb) {
+ printk(KERN_ERR "%s: Cannot alloc RX skb ring\n", dev->name);
+ return -ENOMEM;
+ }
+ /* Allocate RX ring */
+ pep->rx_desc_count = 0;
+ size = pep->rx_ring_size * sizeof(struct rx_desc);
+ pep->rx_desc_area_size = size;
+ pep->p_rx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size,
+ &pep->rx_desc_dma, GFP_KERNEL);
+ if (!pep->p_rx_desc_area) {
+ printk(KERN_ERR "%s: Cannot alloc RX ring (size %d bytes)\n",
+ dev->name, size);
+ goto out;
+ }
+ memset((void *)pep->p_rx_desc_area, 0, size);
+ /* initialize the next_desc_ptr links in the Rx descriptors ring */
+ p_rx_desc = (struct rx_desc *)pep->p_rx_desc_area;
+ for (i = 0; i < rx_desc_num; i++) {
+ p_rx_desc[i].next_desc_ptr = pep->rx_desc_dma +
+ ((i + 1) % rx_desc_num) * sizeof(struct rx_desc);
+ }
+ /* Save Rx desc pointer to driver struct. */
+ pep->rx_curr_desc_q = 0;
+ pep->rx_used_desc_q = 0;
+ pep->rx_desc_area_size = rx_desc_num * sizeof(struct rx_desc);
+ return 0;
+out:
+ kfree(pep->rx_skb);
+ return -ENOMEM;
+}
+
+static void rxq_deinit(struct net_device *dev)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+ int curr;
+
+ /* Free preallocated skb's on RX rings */
+ for (curr = 0; pep->rx_desc_count && curr < pep->rx_ring_size; curr++) {
+ if (pep->rx_skb[curr]) {
+ dev_kfree_skb(pep->rx_skb[curr]);
+ pep->rx_desc_count--;
+ }
+ }
+ if (pep->rx_desc_count)
+ printk(KERN_ERR
+ "Error in freeing Rx Ring. %d skb's still\n",
+ pep->rx_desc_count);
+ /* Free RX ring */
+ if (pep->p_rx_desc_area)
+ dma_free_coherent(pep->dev->dev.parent, pep->rx_desc_area_size,
+ pep->p_rx_desc_area, pep->rx_desc_dma);
+ kfree(pep->rx_skb);
+}
+
+static int txq_init(struct net_device *dev)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+ struct tx_desc *p_tx_desc;
+ int size = 0, i = 0;
+ int tx_desc_num = pep->tx_ring_size;
+
+ pep->tx_skb = kmalloc(sizeof(*pep->tx_skb) * pep->tx_ring_size,
+ GFP_KERNEL);
+ if (!pep->tx_skb) {
+ printk(KERN_ERR "%s: Cannot alloc TX skb ring\n", dev->name);
+ return -ENOMEM;
+ }
+ /* Allocate TX ring */
+ pep->tx_desc_count = 0;
+ size = pep->tx_ring_size * sizeof(struct tx_desc);
+ pep->tx_desc_area_size = size;
+ pep->p_tx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size,
+ &pep->tx_desc_dma, GFP_KERNEL);
+ if (!pep->p_tx_desc_area) {
+ printk(KERN_ERR "%s: Cannot allocate Tx Ring (size %d bytes)\n",
+ dev->name, size);
+ goto out;
+ }
+ memset((void *)pep->p_tx_desc_area, 0, pep->tx_desc_area_size);
+ /* Initialize the next_desc_ptr links in the Tx descriptors ring */
+ p_tx_desc = (struct tx_desc *)pep->p_tx_desc_area;
+ for (i = 0; i < tx_desc_num; i++) {
+ p_tx_desc[i].next_desc_ptr = pep->tx_desc_dma +
+ ((i + 1) % tx_desc_num) * sizeof(struct tx_desc);
+ }
+ pep->tx_curr_desc_q = 0;
+ pep->tx_used_desc_q = 0;
+ pep->tx_desc_area_size = tx_desc_num * sizeof(struct tx_desc);
+ return 0;
+out:
+ kfree(pep->tx_skb);
+ return -ENOMEM;
+}
+
+static void txq_deinit(struct net_device *dev)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+
+ /* Free outstanding skb's on TX ring */
+ txq_reclaim(dev, 1);
+ BUG_ON(pep->tx_used_desc_q != pep->tx_curr_desc_q);
+ /* Free TX ring */
+ if (pep->p_tx_desc_area)
+ dma_free_coherent(pep->dev->dev.parent, pep->tx_desc_area_size,
+ pep->p_tx_desc_area, pep->tx_desc_dma);
+ kfree(pep->tx_skb);
+}
+
+static int pxa168_eth_open(struct net_device *dev)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+ int err;
+
+ err = request_irq(dev->irq, pxa168_eth_int_handler,
+ IRQF_DISABLED, dev->name, dev);
+ if (err) {
+ dev_printk(KERN_ERR, &dev->dev, "can't assign irq\n");
+ return -EAGAIN;
+ }
+ pep->rx_resource_err = 0;
+ err = rxq_init(dev);
+ if (err != 0)
+ goto out_free_irq;
+ err = txq_init(dev);
+ if (err != 0)
+ goto out_free_rx_skb;
+ pep->rx_used_desc_q = 0;
+ pep->rx_curr_desc_q = 0;
+
+ /* Fill RX ring with skb's */
+ rxq_refill(dev);
+ pep->rx_used_desc_q = 0;
+ pep->rx_curr_desc_q = 0;
+ netif_carrier_off(dev);
+ eth_port_start(dev);
+ napi_enable(&pep->napi);
+ return 0;
+out_free_rx_skb:
+ rxq_deinit(dev);
+out_free_irq:
+ free_irq(dev->irq, dev);
+ return err;
+}
+
+static int pxa168_eth_stop(struct net_device *dev)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+ eth_port_reset(dev);
+
+ /* Disable interrupts */
+ wrl(pep, INT_MASK, 0);
+ wrl(pep, INT_CAUSE, 0);
+ /* Write to ICR to clear interrupts. */
+ wrl(pep, INT_W_CLEAR, 0);
+ napi_disable(&pep->napi);
+ del_timer_sync(&pep->timeout);
+ netif_carrier_off(dev);
+ free_irq(dev->irq, dev);
+ rxq_deinit(dev);
+ txq_deinit(dev);
+
+ return 0;
+}
+
+static int pxa168_eth_change_mtu(struct net_device *dev, int mtu)
+{
+ int retval;
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+
+ if ((mtu > 9500) || (mtu < 68))
+ return -EINVAL;
+
+ dev->mtu = mtu;
+ retval = set_port_config_ext(pep);
+
+ if (!netif_running(dev))
+ return 0;
+
+ /*
+ * Stop and then re-open the interface. This will allocate RX
+ * skbs of the new MTU.
+ * There is a possible danger that the open will not succeed,
+ * due to memory being full.
+ */
+ pxa168_eth_stop(dev);
+ if (pxa168_eth_open(dev)) {
+ dev_printk(KERN_ERR, &dev->dev,
+ "fatal error on re-opening device after "
+ "MTU change\n");
+ }
+
+ return 0;
+}
+
+static int eth_alloc_tx_desc_index(struct pxa168_eth_private *pep)
+{
+ int tx_desc_curr;
+
+ tx_desc_curr = pep->tx_curr_desc_q;
+ pep->tx_curr_desc_q = (tx_desc_curr + 1) % pep->tx_ring_size;
+ BUG_ON(pep->tx_curr_desc_q == pep->tx_used_desc_q);
+ pep->tx_desc_count++;
+
+ return tx_desc_curr;
+}
+
+static int pxa168_rx_poll(struct napi_struct *napi, int budget)
+{
+ struct pxa168_eth_private *pep =
+ container_of(napi, struct pxa168_eth_private, napi);
+ struct net_device *dev = pep->dev;
+ int work_done = 0;
+
+ if (unlikely(pep->work_todo & WORK_LINK)) {
+ pep->work_todo &= ~(WORK_LINK);
+ handle_link_event(pep);
+ }
+ /*
+ * We call txq_reclaim every time since in NAPI interupts are disabled
+ * and due to this we miss the TX_DONE interrupt,which is not updated in
+ * interrupt status register.
+ */
+ txq_reclaim(dev, 0);
+ if (netif_queue_stopped(dev)
+ && pep->tx_ring_size - pep->tx_desc_count > 1) {
+ netif_wake_queue(dev);
+ }
+ work_done = rxq_process(dev, budget);
+ if (work_done < budget) {
+ napi_complete(napi);
+ wrl(pep, INT_MASK, ALL_INTS);
+ }
+
+ return work_done;
+}
+
+static int pxa168_eth_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+ struct net_device_stats *stats = &dev->stats;
+ struct tx_desc *desc;
+ int tx_index;
+ int length;
+
+ tx_index = eth_alloc_tx_desc_index(pep);
+ desc = &pep->p_tx_desc_area[tx_index];
+ length = skb->len;
+ pep->tx_skb[tx_index] = skb;
+ desc->byte_cnt = length;
+ desc->buf_ptr = dma_map_single(NULL, skb->data, length, DMA_TO_DEVICE);
+ wmb();
+ desc->cmd_sts = BUF_OWNED_BY_DMA | TX_GEN_CRC | TX_FIRST_DESC |
+ TX_ZERO_PADDING | TX_LAST_DESC | TX_EN_INT;
+ wmb();
+ wrl(pep, SDMA_CMD, SDMA_CMD_TXDH | SDMA_CMD_ERD);
+
+ stats->tx_bytes += skb->len;
+ stats->tx_packets++;
+ dev->trans_start = jiffies;
+ if (pep->tx_ring_size - pep->tx_desc_count <= 1) {
+ /* We handled the current skb, but now we are out of space.*/
+ netif_stop_queue(dev);
+ }
+
+ return NETDEV_TX_OK;
+}
+
+static int smi_wait_ready(struct pxa168_eth_private *pep)
+{
+ int i = 0;
+
+ /* wait for the SMI register to become available */
+ for (i = 0; rdl(pep, SMI) & SMI_BUSY; i++) {
+ if (i == PHY_WAIT_ITERATIONS)
+ return -ETIMEDOUT;
+ msleep(10);
+ }
+
+ return 0;
+}
+
+static int pxa168_smi_read(struct mii_bus *bus, int phy_addr, int regnum)
+{
+ struct pxa168_eth_private *pep = bus->priv;
+ int i = 0;
+ int val;
+
+ if (smi_wait_ready(pep)) {
+ printk(KERN_WARNING "pxa168_eth: SMI bus busy timeout\n");
+ return -ETIMEDOUT;
+ }
+ wrl(pep, SMI, (phy_addr << 16) | (regnum << 21) | SMI_OP_R);
+ /* now wait for the data to be valid */
+ for (i = 0; !((val = rdl(pep, SMI)) & SMI_R_VALID); i++) {
+ if (i == PHY_WAIT_ITERATIONS) {
+ printk(KERN_WARNING
+ "pxa168_eth: SMI bus read not valid\n");
+ return -ENODEV;
+ }
+ msleep(10);
+ }
+
+ return val & 0xffff;
+}
+
+static int pxa168_smi_write(struct mii_bus *bus, int phy_addr, int regnum,
+ u16 value)
+{
+ struct pxa168_eth_private *pep = bus->priv;
+
+ if (smi_wait_ready(pep)) {
+ printk(KERN_WARNING "pxa168_eth: SMI bus busy timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ wrl(pep, SMI, (phy_addr << 16) | (regnum << 21) |
+ SMI_OP_W | (value & 0xffff));
+
+ if (smi_wait_ready(pep)) {
+ printk(KERN_ERR "pxa168_eth: SMI bus busy timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int pxa168_eth_do_ioctl(struct net_device *dev, struct ifreq *ifr,
+ int cmd)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+ if (pep->phy != NULL)
+ return phy_mii_ioctl(pep->phy, ifr, cmd);
+
+ return -EOPNOTSUPP;
+}
+
+static struct phy_device *phy_scan(struct pxa168_eth_private *pep, int phy_addr)
+{
+ struct mii_bus *bus = pep->smi_bus;
+ struct phy_device *phydev;
+ int start;
+ int num;
+ int i;
+
+ if (phy_addr == PXA168_ETH_PHY_ADDR_DEFAULT) {
+ /* Scan entire range */
+ start = ethernet_phy_get(pep);
+ num = 32;
+ } else {
+ /* Use phy addr specific to platform */
+ start = phy_addr & 0x1f;
+ num = 1;
+ }
+ phydev = NULL;
+ for (i = 0; i < num; i++) {
+ int addr = (start + i) & 0x1f;
+ if (bus->phy_map[addr] == NULL)
+ mdiobus_scan(bus, addr);
+
+ if (phydev == NULL) {
+ phydev = bus->phy_map[addr];
+ if (phydev != NULL)
+ ethernet_phy_set_addr(pep, addr);
+ }
+ }
+
+ return phydev;
+}
+
+static void phy_init(struct pxa168_eth_private *pep, int speed, int duplex)
+{
+ struct phy_device *phy = pep->phy;
+ ethernet_phy_reset(pep);
+
+ phy_attach(pep->dev, dev_name(&phy->dev), 0, PHY_INTERFACE_MODE_MII);
+
+ if (speed == 0) {
+ phy->autoneg = AUTONEG_ENABLE;
+ phy->speed = 0;
+ phy->duplex = 0;
+ phy->supported &= PHY_BASIC_FEATURES;
+ phy->advertising = phy->supported | ADVERTISED_Autoneg;
+ } else {
+ phy->autoneg = AUTONEG_DISABLE;
+ phy->advertising = 0;
+ phy->speed = speed;
+ phy->duplex = duplex;
+ }
+ phy_start_aneg(phy);
+}
+
+static int ethernet_phy_setup(struct net_device *dev)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+
+ if (pep->pd->init)
+ pep->pd->init();
+ pep->phy = phy_scan(pep, pep->pd->phy_addr & 0x1f);
+ if (pep->phy != NULL)
+ phy_init(pep, pep->pd->speed, pep->pd->duplex);
+ update_hash_table_mac_address(pep, NULL, dev->dev_addr);
+
+ return 0;
+}
+
+static int pxa168_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+ int err;
+
+ err = phy_read_status(pep->phy);
+ if (err == 0)
+ err = phy_ethtool_gset(pep->phy, cmd);
+
+ return err;
+}
+
+static int pxa168_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+
+ return phy_ethtool_sset(pep->phy, cmd);
+}
+
+static void pxa168_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strncpy(info->driver, DRIVER_NAME, 32);
+ strncpy(info->version, DRIVER_VERSION, 32);
+ strncpy(info->fw_version, "N/A", 32);
+ strncpy(info->bus_info, "N/A", 32);
+}
+
+static u32 pxa168_get_link(struct net_device *dev)
+{
+ return !!netif_carrier_ok(dev);
+}
+
+static const struct ethtool_ops pxa168_ethtool_ops = {
+ .get_settings = pxa168_get_settings,
+ .set_settings = pxa168_set_settings,
+ .get_drvinfo = pxa168_get_drvinfo,
+ .get_link = pxa168_get_link,
+};
+
+static const struct net_device_ops pxa168_eth_netdev_ops = {
+ .ndo_open = pxa168_eth_open,
+ .ndo_stop = pxa168_eth_stop,
+ .ndo_start_xmit = pxa168_eth_start_xmit,
+ .ndo_set_rx_mode = pxa168_eth_set_rx_mode,
+ .ndo_set_mac_address = pxa168_eth_set_mac_address,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_do_ioctl = pxa168_eth_do_ioctl,
+ .ndo_change_mtu = pxa168_eth_change_mtu,
+ .ndo_tx_timeout = pxa168_eth_tx_timeout,
+};
+
+static int pxa168_eth_probe(struct platform_device *pdev)
+{
+ struct pxa168_eth_private *pep = NULL;
+ struct net_device *dev = NULL;
+ struct resource *res;
+ struct clk *clk;
+ int err;
+
+ printk(KERN_NOTICE "PXA168 10/100 Ethernet Driver\n");
+
+ clk = clk_get(&pdev->dev, "MFUCLK");
+ if (IS_ERR(clk)) {
+ printk(KERN_ERR "%s: Fast Ethernet failed to get clock\n",
+ DRIVER_NAME);
+ return -ENODEV;
+ }
+ clk_enable(clk);
+
+ dev = alloc_etherdev(sizeof(struct pxa168_eth_private));
+ if (!dev) {
+ err = -ENOMEM;
+ goto err_clk;
+ }
+
+ platform_set_drvdata(pdev, dev);
+ pep = netdev_priv(dev);
+ pep->dev = dev;
+ pep->clk = clk;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ err = -ENODEV;
+ goto err_netdev;
+ }
+ pep->base = ioremap(res->start, res->end - res->start + 1);
+ if (pep->base == NULL) {
+ err = -ENOMEM;
+ goto err_netdev;
+ }
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ BUG_ON(!res);
+ dev->irq = res->start;
+ dev->netdev_ops = &pxa168_eth_netdev_ops;
+ dev->watchdog_timeo = 2 * HZ;
+ dev->base_addr = 0;
+ SET_ETHTOOL_OPS(dev, &pxa168_ethtool_ops);
+
+ INIT_WORK(&pep->tx_timeout_task, pxa168_eth_tx_timeout_task);
+
+ printk(KERN_INFO "%s:Using random mac address\n", DRIVER_NAME);
+ random_ether_addr(dev->dev_addr);
+
+ pep->pd = pdev->dev.platform_data;
+ pep->rx_ring_size = NUM_RX_DESCS;
+ if (pep->pd->rx_queue_size)
+ pep->rx_ring_size = pep->pd->rx_queue_size;
+
+ pep->tx_ring_size = NUM_TX_DESCS;
+ if (pep->pd->tx_queue_size)
+ pep->tx_ring_size = pep->pd->tx_queue_size;
+
+ pep->port_num = pep->pd->port_number;
+ /* Hardware supports only 3 ports */
+ BUG_ON(pep->port_num > 2);
+ netif_napi_add(dev, &pep->napi, pxa168_rx_poll, pep->rx_ring_size);
+
+ memset(&pep->timeout, 0, sizeof(struct timer_list));
+ init_timer(&pep->timeout);
+ pep->timeout.function = rxq_refill_timer_wrapper;
+ pep->timeout.data = (unsigned long)pep;
+
+ pep->smi_bus = mdiobus_alloc();
+ if (pep->smi_bus == NULL) {
+ err = -ENOMEM;
+ goto err_base;
+ }
+ pep->smi_bus->priv = pep;
+ pep->smi_bus->name = "pxa168_eth smi";
+ pep->smi_bus->read = pxa168_smi_read;
+ pep->smi_bus->write = pxa168_smi_write;
+ snprintf(pep->smi_bus->id, MII_BUS_ID_SIZE, "%d", pdev->id);
+ pep->smi_bus->parent = &pdev->dev;
+ pep->smi_bus->phy_mask = 0xffffffff;
+ err = mdiobus_register(pep->smi_bus);
+ if (err)
+ goto err_free_mdio;
+
+ pxa168_init_hw(pep);
+ err = ethernet_phy_setup(dev);
+ if (err)
+ goto err_mdiobus;
+ SET_NETDEV_DEV(dev, &pdev->dev);
+ err = register_netdev(dev);
+ if (err)
+ goto err_mdiobus;
+ return 0;
+
+err_mdiobus:
+ mdiobus_unregister(pep->smi_bus);
+err_free_mdio:
+ mdiobus_free(pep->smi_bus);
+err_base:
+ iounmap(pep->base);
+err_netdev:
+ free_netdev(dev);
+err_clk:
+ clk_disable(clk);
+ clk_put(clk);
+ return err;
+}
+
+static int pxa168_eth_remove(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+
+ if (pep->htpr) {
+ dma_free_coherent(pep->dev->dev.parent, HASH_ADDR_TABLE_SIZE,
+ pep->htpr, pep->htpr_dma);
+ pep->htpr = NULL;
+ }
+ if (pep->clk) {
+ clk_disable(pep->clk);
+ clk_put(pep->clk);
+ pep->clk = NULL;
+ }
+ if (pep->phy != NULL)
+ phy_detach(pep->phy);
+
+ iounmap(pep->base);
+ pep->base = NULL;
+ mdiobus_unregister(pep->smi_bus);
+ mdiobus_free(pep->smi_bus);
+ unregister_netdev(dev);
+ flush_scheduled_work();
+ free_netdev(dev);
+ platform_set_drvdata(pdev, NULL);
+ return 0;
+}
+
+static void pxa168_eth_shutdown(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ eth_port_reset(dev);
+}
+
+#ifdef CONFIG_PM
+static int pxa168_eth_resume(struct platform_device *pdev)
+{
+ return -ENOSYS;
+}
+
+static int pxa168_eth_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ return -ENOSYS;
+}
+
+#else
+#define pxa168_eth_resume NULL
+#define pxa168_eth_suspend NULL
+#endif
+
+static struct platform_driver pxa168_eth_driver = {
+ .probe = pxa168_eth_probe,
+ .remove = pxa168_eth_remove,
+ .shutdown = pxa168_eth_shutdown,
+ .resume = pxa168_eth_resume,
+ .suspend = pxa168_eth_suspend,
+ .driver = {
+ .name = DRIVER_NAME,
+ },
+};
+
+static int __init pxa168_init_module(void)
+{
+ return platform_driver_register(&pxa168_eth_driver);
+}
+
+static void __exit pxa168_cleanup_module(void)
+{
+ platform_driver_unregister(&pxa168_eth_driver);
+}
+
+module_init(pxa168_init_module);
+module_exit(pxa168_cleanup_module);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Ethernet driver for Marvell PXA168");
+MODULE_ALIAS("platform:pxa168_eth");
diff --git a/drivers/net/qlcnic/qlcnic_init.c b/drivers/net/qlcnic/qlcnic_init.c
index 75ba744..2c7cf0b 100644
--- a/drivers/net/qlcnic/qlcnic_init.c
+++ b/drivers/net/qlcnic/qlcnic_init.c
@@ -1316,7 +1316,7 @@
return -ENOMEM;
}
- skb_reserve(skb, 2);
+ skb_reserve(skb, NET_IP_ALIGN);
dma = pci_map_single(pdev, skb->data,
rds_ring->dma_size, PCI_DMA_FROMDEVICE);
@@ -1404,7 +1404,6 @@
if (pkt_offset)
skb_pull(skb, pkt_offset);
- skb->truesize = skb->len + sizeof(struct sk_buff);
skb->protocol = eth_type_trans(skb, netdev);
napi_gro_receive(&sds_ring->napi, skb);
@@ -1466,8 +1465,6 @@
skb_put(skb, lro_length + data_offset);
- skb->truesize = skb->len + sizeof(struct sk_buff) + skb_headroom(skb);
-
skb_pull(skb, l2_hdr_offset);
skb->protocol = eth_type_trans(skb, netdev);
@@ -1700,8 +1697,6 @@
if (pkt_offset)
skb_pull(skb, pkt_offset);
- skb->truesize = skb->len + sizeof(struct sk_buff);
-
if (!qlcnic_check_loopback_buff(skb->data))
adapter->diag_cnt++;
diff --git a/drivers/net/qlcnic/qlcnic_main.c b/drivers/net/qlcnic/qlcnic_main.c
index bf6d87a..66eea59 100644
--- a/drivers/net/qlcnic/qlcnic_main.c
+++ b/drivers/net/qlcnic/qlcnic_main.c
@@ -1983,8 +1983,6 @@
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct net_device_stats *stats = &netdev->stats;
- memset(stats, 0, sizeof(*stats));
-
stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts;
stats->tx_packets = adapter->stats.xmitfinished;
stats->rx_bytes = adapter->stats.rxbytes + adapter->stats.lrobytes;
@@ -2190,9 +2188,16 @@
#ifdef CONFIG_NET_POLL_CONTROLLER
static void qlcnic_poll_controller(struct net_device *netdev)
{
+ int ring;
+ struct qlcnic_host_sds_ring *sds_ring;
struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+
disable_irq(adapter->irq);
- qlcnic_intr(adapter->irq, adapter);
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ qlcnic_intr(adapter->irq, sds_ring);
+ }
enable_irq(adapter->irq);
}
#endif
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index 8d63f69..5f89e83 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -3919,12 +3919,12 @@
for (i = 0; i < qdev->rss_ring_count; i++)
netif_napi_del(&qdev->rx_ring[i].napi);
- ql_free_rx_buffers(qdev);
-
status = ql_adapter_reset(qdev);
if (status)
netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
qdev->func);
+ ql_free_rx_buffers(qdev);
+
return status;
}
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 078bbf4..992db2f 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -1212,7 +1212,8 @@
if ((RTL_R8(ChipCmd) & CmdRxEnb) == 0)
return;
- counters = pci_alloc_consistent(tp->pci_dev, sizeof(*counters), &paddr);
+ counters = dma_alloc_coherent(&tp->pci_dev->dev, sizeof(*counters),
+ &paddr, GFP_KERNEL);
if (!counters)
return;
@@ -1233,7 +1234,8 @@
RTL_W32(CounterAddrLow, 0);
RTL_W32(CounterAddrHigh, 0);
- pci_free_consistent(tp->pci_dev, sizeof(*counters), counters, paddr);
+ dma_free_coherent(&tp->pci_dev->dev, sizeof(*counters), counters,
+ paddr);
}
static void rtl8169_get_ethtool_stats(struct net_device *dev,
@@ -2934,7 +2936,7 @@
.hw_start = rtl_hw_start_8168,
.region = 2,
.align = 8,
- .intr_event = SYSErr | LinkChg | RxOverflow |
+ .intr_event = SYSErr | RxFIFOOver | LinkChg | RxOverflow |
TxErr | TxOK | RxOK | RxErr,
.napi_event = TxErr | TxOK | RxOK | RxOverflow,
.features = RTL_FEATURE_GMII | RTL_FEATURE_MSI,
@@ -3292,15 +3294,15 @@
/*
* Rx and Tx desscriptors needs 256 bytes alignment.
- * pci_alloc_consistent provides more.
+ * dma_alloc_coherent provides more.
*/
- tp->TxDescArray = pci_alloc_consistent(pdev, R8169_TX_RING_BYTES,
- &tp->TxPhyAddr);
+ tp->TxDescArray = dma_alloc_coherent(&pdev->dev, R8169_TX_RING_BYTES,
+ &tp->TxPhyAddr, GFP_KERNEL);
if (!tp->TxDescArray)
goto err_pm_runtime_put;
- tp->RxDescArray = pci_alloc_consistent(pdev, R8169_RX_RING_BYTES,
- &tp->RxPhyAddr);
+ tp->RxDescArray = dma_alloc_coherent(&pdev->dev, R8169_RX_RING_BYTES,
+ &tp->RxPhyAddr, GFP_KERNEL);
if (!tp->RxDescArray)
goto err_free_tx_0;
@@ -3334,12 +3336,12 @@
err_release_ring_2:
rtl8169_rx_clear(tp);
err_free_rx_1:
- pci_free_consistent(pdev, R8169_RX_RING_BYTES, tp->RxDescArray,
- tp->RxPhyAddr);
+ dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
+ tp->RxPhyAddr);
tp->RxDescArray = NULL;
err_free_tx_0:
- pci_free_consistent(pdev, R8169_TX_RING_BYTES, tp->TxDescArray,
- tp->TxPhyAddr);
+ dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
+ tp->TxPhyAddr);
tp->TxDescArray = NULL;
err_pm_runtime_put:
pm_runtime_put_noidle(&pdev->dev);
@@ -3975,7 +3977,7 @@
{
struct pci_dev *pdev = tp->pci_dev;
- pci_unmap_single(pdev, le64_to_cpu(desc->addr), tp->rx_buf_sz,
+ dma_unmap_single(&pdev->dev, le64_to_cpu(desc->addr), tp->rx_buf_sz,
PCI_DMA_FROMDEVICE);
dev_kfree_skb(*sk_buff);
*sk_buff = NULL;
@@ -4000,7 +4002,7 @@
static struct sk_buff *rtl8169_alloc_rx_skb(struct pci_dev *pdev,
struct net_device *dev,
struct RxDesc *desc, int rx_buf_sz,
- unsigned int align)
+ unsigned int align, gfp_t gfp)
{
struct sk_buff *skb;
dma_addr_t mapping;
@@ -4008,13 +4010,13 @@
pad = align ? align : NET_IP_ALIGN;
- skb = netdev_alloc_skb(dev, rx_buf_sz + pad);
+ skb = __netdev_alloc_skb(dev, rx_buf_sz + pad, gfp);
if (!skb)
goto err_out;
skb_reserve(skb, align ? ((pad - 1) & (unsigned long)skb->data) : pad);
- mapping = pci_map_single(pdev, skb->data, rx_buf_sz,
+ mapping = dma_map_single(&pdev->dev, skb->data, rx_buf_sz,
PCI_DMA_FROMDEVICE);
rtl8169_map_to_asic(desc, mapping, rx_buf_sz);
@@ -4039,7 +4041,7 @@
}
static u32 rtl8169_rx_fill(struct rtl8169_private *tp, struct net_device *dev,
- u32 start, u32 end)
+ u32 start, u32 end, gfp_t gfp)
{
u32 cur;
@@ -4054,7 +4056,7 @@
skb = rtl8169_alloc_rx_skb(tp->pci_dev, dev,
tp->RxDescArray + i,
- tp->rx_buf_sz, tp->align);
+ tp->rx_buf_sz, tp->align, gfp);
if (!skb)
break;
@@ -4082,7 +4084,7 @@
memset(tp->tx_skb, 0x0, NUM_TX_DESC * sizeof(struct ring_info));
memset(tp->Rx_skbuff, 0x0, NUM_RX_DESC * sizeof(struct sk_buff *));
- if (rtl8169_rx_fill(tp, dev, 0, NUM_RX_DESC) != NUM_RX_DESC)
+ if (rtl8169_rx_fill(tp, dev, 0, NUM_RX_DESC, GFP_KERNEL) != NUM_RX_DESC)
goto err_out;
rtl8169_mark_as_last_descriptor(tp->RxDescArray + NUM_RX_DESC - 1);
@@ -4099,7 +4101,8 @@
{
unsigned int len = tx_skb->len;
- pci_unmap_single(pdev, le64_to_cpu(desc->addr), len, PCI_DMA_TODEVICE);
+ dma_unmap_single(&pdev->dev, le64_to_cpu(desc->addr), len,
+ PCI_DMA_TODEVICE);
desc->opts1 = 0x00;
desc->opts2 = 0x00;
desc->addr = 0x00;
@@ -4243,7 +4246,8 @@
txd = tp->TxDescArray + entry;
len = frag->size;
addr = ((void *) page_address(frag->page)) + frag->page_offset;
- mapping = pci_map_single(tp->pci_dev, addr, len, PCI_DMA_TODEVICE);
+ mapping = dma_map_single(&tp->pci_dev->dev, addr, len,
+ PCI_DMA_TODEVICE);
/* anti gcc 2.95.3 bugware (sic) */
status = opts1 | len | (RingEnd * !((entry + 1) % NUM_TX_DESC));
@@ -4313,7 +4317,8 @@
tp->tx_skb[entry].skb = skb;
}
- mapping = pci_map_single(tp->pci_dev, skb->data, len, PCI_DMA_TODEVICE);
+ mapping = dma_map_single(&tp->pci_dev->dev, skb->data, len,
+ PCI_DMA_TODEVICE);
tp->tx_skb[entry].len = len;
txd->addr = cpu_to_le64(mapping);
@@ -4477,8 +4482,8 @@
if (!skb)
goto out;
- pci_dma_sync_single_for_cpu(tp->pci_dev, addr, pkt_size,
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(&tp->pci_dev->dev, addr, pkt_size,
+ PCI_DMA_FROMDEVICE);
skb_copy_from_linear_data(*sk_buff, skb->data, pkt_size);
*sk_buff = skb;
done = true;
@@ -4549,11 +4554,11 @@
rtl8169_rx_csum(skb, desc);
if (rtl8169_try_rx_copy(&skb, tp, pkt_size, addr)) {
- pci_dma_sync_single_for_device(pdev, addr,
+ dma_sync_single_for_device(&pdev->dev, addr,
pkt_size, PCI_DMA_FROMDEVICE);
rtl8169_mark_to_asic(desc, tp->rx_buf_sz);
} else {
- pci_unmap_single(pdev, addr, tp->rx_buf_sz,
+ dma_unmap_single(&pdev->dev, addr, tp->rx_buf_sz,
PCI_DMA_FROMDEVICE);
tp->Rx_skbuff[entry] = NULL;
}
@@ -4583,7 +4588,7 @@
count = cur_rx - tp->cur_rx;
tp->cur_rx = cur_rx;
- delta = rtl8169_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx);
+ delta = rtl8169_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx, GFP_ATOMIC);
if (!delta && count)
netif_info(tp, intr, dev, "no Rx buffer allocated\n");
tp->dirty_rx += delta;
@@ -4625,8 +4630,7 @@
}
/* Work around for rx fifo overflow */
- if (unlikely(status & RxFIFOOver) &&
- (tp->mac_version == RTL_GIGA_MAC_VER_11)) {
+ if (unlikely(status & RxFIFOOver)) {
netif_stop_queue(dev);
rtl8169_tx_timeout(dev);
break;
@@ -4770,10 +4774,10 @@
free_irq(dev->irq, dev);
- pci_free_consistent(pdev, R8169_RX_RING_BYTES, tp->RxDescArray,
- tp->RxPhyAddr);
- pci_free_consistent(pdev, R8169_TX_RING_BYTES, tp->TxDescArray,
- tp->TxPhyAddr);
+ dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
+ tp->RxPhyAddr);
+ dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
+ tp->TxPhyAddr);
tp->TxDescArray = NULL;
tp->RxDescArray = NULL;
diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c
index 07eb884..44150f2 100644
--- a/drivers/net/rionet.c
+++ b/drivers/net/rionet.c
@@ -384,7 +384,7 @@
free_pages((unsigned long)rionet_active, rdev->net->hport->sys_size ?
__ilog2(sizeof(void *)) + 4 : 0);
unregister_netdev(ndev);
- kfree(ndev);
+ free_netdev(ndev);
list_for_each_entry_safe(peer, tmp, &rionet_peers, node) {
list_del(&peer->node);
diff --git a/drivers/net/sgiseeq.c b/drivers/net/sgiseeq.c
index cc4bd8c..9265315 100644
--- a/drivers/net/sgiseeq.c
+++ b/drivers/net/sgiseeq.c
@@ -804,7 +804,7 @@
err_out_free_page:
free_page((unsigned long) sp->srings);
err_out_free_dev:
- kfree(dev);
+ free_netdev(dev);
err_out:
return err;
diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c
index f5a9eb1..79fd02b 100644
--- a/drivers/net/sh_eth.c
+++ b/drivers/net/sh_eth.c
@@ -1437,7 +1437,7 @@
static int sh_eth_drv_probe(struct platform_device *pdev)
{
- int ret, i, devno = 0;
+ int ret, devno = 0;
struct resource *res;
struct net_device *ndev = NULL;
struct sh_eth_private *mdp;
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index 40e5c46..465ae7e 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -43,6 +43,7 @@
#include <linux/seq_file.h>
#include <linux/mii.h>
#include <linux/slab.h>
+#include <linux/dmi.h>
#include <asm/irq.h>
#include "skge.h"
@@ -3868,6 +3869,8 @@
netif_info(skge, probe, skge->netdev, "addr %pM\n", dev->dev_addr);
}
+static int only_32bit_dma;
+
static int __devinit skge_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
@@ -3889,7 +3892,7 @@
pci_set_master(pdev);
- if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ if (!only_32bit_dma && !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
using_dac = 1;
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
} else if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
@@ -4147,8 +4150,21 @@
.shutdown = skge_shutdown,
};
+static struct dmi_system_id skge_32bit_dma_boards[] = {
+ {
+ .ident = "Gigabyte nForce boards",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co"),
+ DMI_MATCH(DMI_BOARD_NAME, "nForce"),
+ },
+ },
+ {}
+};
+
static int __init skge_init_module(void)
{
+ if (dmi_check_system(skge_32bit_dma_boards))
+ only_32bit_dma = 1;
skge_debug_init();
return pci_register_driver(&skge_driver);
}
diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c
index 0909ae9..8150ba1 100644
--- a/drivers/net/smsc911x.c
+++ b/drivers/net/smsc911x.c
@@ -58,6 +58,7 @@
MODULE_LICENSE("GPL");
MODULE_VERSION(SMSC_DRV_VERSION);
+MODULE_ALIAS("platform:smsc911x");
#if USE_DEBUG > 0
static int debug = 16;
diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c
index bbb7951..ea0461e 100644
--- a/drivers/net/stmmac/stmmac_main.c
+++ b/drivers/net/stmmac/stmmac_main.c
@@ -1865,15 +1865,15 @@
if (!netif_running(dev))
return 0;
- spin_lock(&priv->lock);
-
if (priv->shutdown) {
/* Re-open the interface and re-init the MAC/DMA
- and the rings. */
+ and the rings (i.e. on hibernation stage) */
stmmac_open(dev);
- goto out_resume;
+ return 0;
}
+ spin_lock(&priv->lock);
+
/* Power Down bit, into the PM register, is cleared
* automatically as soon as a magic packet or a Wake-up frame
* is received. Anyway, it's better to manually clear
@@ -1901,7 +1901,6 @@
netif_start_queue(dev);
-out_resume:
spin_unlock(&priv->lock);
return 0;
}
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index bc3af78..1ec4b9e 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -4666,7 +4666,7 @@
desc_idx, *post_ptr);
drop_it_no_recycle:
/* Other statistics kept track of by card. */
- tp->net_stats.rx_dropped++;
+ tp->rx_dropped++;
goto next_pkt;
}
@@ -4726,7 +4726,7 @@
if (len > (tp->dev->mtu + ETH_HLEN) &&
skb->protocol != htons(ETH_P_8021Q)) {
dev_kfree_skb(skb);
- goto next_pkt;
+ goto drop_it_no_recycle;
}
if (desc->type_flags & RXD_FLAG_VLAN &&
@@ -9240,6 +9240,8 @@
stats->rx_missed_errors = old_stats->rx_missed_errors +
get_stat64(&hw_stats->rx_discards);
+ stats->rx_dropped = tp->rx_dropped;
+
return stats;
}
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index 4937bd1..be7ff13 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -2759,7 +2759,7 @@
/* begin "everything else" cacheline(s) section */
- struct rtnl_link_stats64 net_stats;
+ unsigned long rx_dropped;
struct rtnl_link_stats64 net_stats_prev;
struct tg3_ethtool_stats estats;
struct tg3_ethtool_stats estats_prev;
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
index 5efa577..6888e3d 100644
--- a/drivers/net/tulip/de2104x.c
+++ b/drivers/net/tulip/de2104x.c
@@ -243,6 +243,7 @@
NWayState = (1 << 14) | (1 << 13) | (1 << 12),
NWayRestart = (1 << 12),
NonselPortActive = (1 << 9),
+ SelPortActive = (1 << 8),
LinkFailStatus = (1 << 2),
NetCxnErr = (1 << 1),
};
@@ -363,7 +364,9 @@
/* 21041 transceiver register settings: TP AUTO, BNC, AUI, TP, TP FD*/
static u16 t21041_csr13[] = { 0xEF01, 0xEF09, 0xEF09, 0xEF01, 0xEF09, };
-static u16 t21041_csr14[] = { 0xFFFF, 0xF7FD, 0xF7FD, 0x6F3F, 0x6F3D, };
+static u16 t21041_csr14[] = { 0xFFFF, 0xF7FD, 0xF7FD, 0x7F3F, 0x7F3D, };
+/* If on-chip autonegotiation is broken, use half-duplex (FF3F) instead */
+static u16 t21041_csr14_brk[] = { 0xFF3F, 0xF7FD, 0xF7FD, 0x7F3F, 0x7F3D, };
static u16 t21041_csr15[] = { 0x0008, 0x0006, 0x000E, 0x0008, 0x0008, };
@@ -1064,6 +1067,9 @@
unsigned int carrier;
unsigned long flags;
+ /* clear port active bits */
+ dw32(SIAStatus, NonselPortActive | SelPortActive);
+
carrier = (status & NetCxnErr) ? 0 : 1;
if (carrier) {
@@ -1158,14 +1164,29 @@
static void de_media_interrupt (struct de_private *de, u32 status)
{
if (status & LinkPass) {
+ /* Ignore if current media is AUI or BNC and we can't use TP */
+ if ((de->media_type == DE_MEDIA_AUI ||
+ de->media_type == DE_MEDIA_BNC) &&
+ (de->media_lock ||
+ !de_ok_to_advertise(de, DE_MEDIA_TP_AUTO)))
+ return;
+ /* If current media is not TP, change it to TP */
+ if ((de->media_type == DE_MEDIA_AUI ||
+ de->media_type == DE_MEDIA_BNC)) {
+ de->media_type = DE_MEDIA_TP_AUTO;
+ de_stop_rxtx(de);
+ de_set_media(de);
+ de_start_rxtx(de);
+ }
de_link_up(de);
mod_timer(&de->media_timer, jiffies + DE_TIMER_LINK);
return;
}
BUG_ON(!(status & LinkFail));
-
- if (netif_carrier_ok(de->dev)) {
+ /* Mark the link as down only if current media is TP */
+ if (netif_carrier_ok(de->dev) && de->media_type != DE_MEDIA_AUI &&
+ de->media_type != DE_MEDIA_BNC) {
de_link_down(de);
mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK);
}
@@ -1229,6 +1250,7 @@
if (de->de21040)
return;
+ dw32(CSR13, 0); /* Reset phy */
pci_read_config_dword(de->pdev, PCIPM, &pmctl);
pmctl |= PM_Sleep;
pci_write_config_dword(de->pdev, PCIPM, pmctl);
@@ -1574,12 +1596,15 @@
return 0; /* nothing to change */
de_link_down(de);
+ mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK);
de_stop_rxtx(de);
de->media_type = new_media;
de->media_lock = media_lock;
de->media_advertise = ecmd->advertising;
de_set_media(de);
+ if (netif_running(de->dev))
+ de_start_rxtx(de);
return 0;
}
@@ -1911,8 +1936,14 @@
for (i = 0; i < DE_MAX_MEDIA; i++) {
if (de->media[i].csr13 == 0xffff)
de->media[i].csr13 = t21041_csr13[i];
- if (de->media[i].csr14 == 0xffff)
- de->media[i].csr14 = t21041_csr14[i];
+ if (de->media[i].csr14 == 0xffff) {
+ /* autonegotiation is broken at least on some chip
+ revisions - rev. 0x21 works, 0x11 does not */
+ if (de->pdev->revision < 0x20)
+ de->media[i].csr14 = t21041_csr14_brk[i];
+ else
+ de->media[i].csr14 = t21041_csr14[i];
+ }
if (de->media[i].csr15 == 0xffff)
de->media[i].csr15 = t21041_csr15[i];
}
@@ -2158,6 +2189,8 @@
dev_err(&dev->dev, "pci_enable_device failed in resume\n");
goto out;
}
+ pci_set_master(pdev);
+ de_init_rings(de);
de_init_hw(de);
out_attach:
netif_device_attach(dev);
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 6efca66..1cd752f 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -1652,6 +1652,8 @@
struct uart_icount cnow;
struct hso_tiocmget *tiocmget = serial->tiocmget;
+ memset(&icount, 0, sizeof(struct serial_icounter_struct));
+
if (!tiocmget)
return -ENOENT;
spin_lock_irq(&serial->serial_lock);
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
index 08e7b6a..b2bcf99 100644
--- a/drivers/net/usb/ipheth.c
+++ b/drivers/net/usb/ipheth.c
@@ -58,6 +58,7 @@
#define USB_PRODUCT_IPHONE 0x1290
#define USB_PRODUCT_IPHONE_3G 0x1292
#define USB_PRODUCT_IPHONE_3GS 0x1294
+#define USB_PRODUCT_IPHONE_4 0x1297
#define IPHETH_USBINTF_CLASS 255
#define IPHETH_USBINTF_SUBCLASS 253
@@ -92,6 +93,10 @@
USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_3GS,
IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
IPHETH_USBINTF_PROTO) },
+ { USB_DEVICE_AND_INTERFACE_INFO(
+ USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4,
+ IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
+ IPHETH_USBINTF_PROTO) },
{ }
};
MODULE_DEVICE_TABLE(usb, ipheth_table);
@@ -424,10 +429,6 @@
.ndo_get_stats = &ipheth_stats,
};
-static struct device_type ipheth_type = {
- .name = "wwan",
-};
-
static int ipheth_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
@@ -445,7 +446,7 @@
netdev->netdev_ops = &ipheth_netdev_ops;
netdev->watchdog_timeo = IPHETH_TX_TIMEOUT;
- strcpy(netdev->name, "wwan%d");
+ strcpy(netdev->name, "eth%d");
dev = netdev_priv(netdev);
dev->udev = udev;
@@ -495,7 +496,6 @@
SET_NETDEV_DEV(netdev, &intf->dev);
SET_ETHTOOL_OPS(netdev, &ops);
- SET_NETDEV_DEVTYPE(netdev, &ipheth_type);
retval = register_netdev(netdev);
if (retval) {
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index fd69095..f534123 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -2824,7 +2824,7 @@
netif_napi_add(dev, &vptr->napi, velocity_poll, VELOCITY_NAPI_WEIGHT);
dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER |
- NETIF_F_HW_VLAN_RX | NETIF_F_IP_CSUM | NETIF_F_SG;
+ NETIF_F_HW_VLAN_RX | NETIF_F_IP_CSUM;
ret = register_netdev(dev);
if (ret < 0)
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
index 04c6cd4..10bafd5 100644
--- a/drivers/net/wan/cosa.c
+++ b/drivers/net/wan/cosa.c
@@ -575,7 +575,7 @@
/* Initialize the chardev data structures */
mutex_init(&chan->rlock);
- init_MUTEX(&chan->wsem);
+ sema_init(&chan->wsem, 1);
/* Register the network interface */
if (!(chan->netdev = alloc_hdlcdev(chan))) {
diff --git a/drivers/net/wimax/i2400m/rx.c b/drivers/net/wimax/i2400m/rx.c
index 8cc9e31..1737d14 100644
--- a/drivers/net/wimax/i2400m/rx.c
+++ b/drivers/net/wimax/i2400m/rx.c
@@ -1244,16 +1244,16 @@
int i, result;
struct device *dev = i2400m_dev(i2400m);
const struct i2400m_msg_hdr *msg_hdr;
- size_t pl_itr, pl_size, skb_len;
+ size_t pl_itr, pl_size;
unsigned long flags;
- unsigned num_pls, single_last;
+ unsigned num_pls, single_last, skb_len;
skb_len = skb->len;
- d_fnstart(4, dev, "(i2400m %p skb %p [size %zu])\n",
+ d_fnstart(4, dev, "(i2400m %p skb %p [size %u])\n",
i2400m, skb, skb_len);
result = -EIO;
msg_hdr = (void *) skb->data;
- result = i2400m_rx_msg_hdr_check(i2400m, msg_hdr, skb->len);
+ result = i2400m_rx_msg_hdr_check(i2400m, msg_hdr, skb_len);
if (result < 0)
goto error_msg_hdr_check;
result = -EIO;
@@ -1261,10 +1261,10 @@
pl_itr = sizeof(*msg_hdr) + /* Check payload descriptor(s) */
num_pls * sizeof(msg_hdr->pld[0]);
pl_itr = ALIGN(pl_itr, I2400M_PL_ALIGN);
- if (pl_itr > skb->len) { /* got all the payload descriptors? */
+ if (pl_itr > skb_len) { /* got all the payload descriptors? */
dev_err(dev, "RX: HW BUG? message too short (%u bytes) for "
"%u payload descriptors (%zu each, total %zu)\n",
- skb->len, num_pls, sizeof(msg_hdr->pld[0]), pl_itr);
+ skb_len, num_pls, sizeof(msg_hdr->pld[0]), pl_itr);
goto error_pl_descr_short;
}
/* Walk each payload payload--check we really got it */
@@ -1272,7 +1272,7 @@
/* work around old gcc warnings */
pl_size = i2400m_pld_size(&msg_hdr->pld[i]);
result = i2400m_rx_pl_descr_check(i2400m, &msg_hdr->pld[i],
- pl_itr, skb->len);
+ pl_itr, skb_len);
if (result < 0)
goto error_pl_descr_check;
single_last = num_pls == 1 || i == num_pls - 1;
@@ -1290,16 +1290,16 @@
if (i < i2400m->rx_pl_min)
i2400m->rx_pl_min = i;
i2400m->rx_num++;
- i2400m->rx_size_acc += skb->len;
- if (skb->len < i2400m->rx_size_min)
- i2400m->rx_size_min = skb->len;
- if (skb->len > i2400m->rx_size_max)
- i2400m->rx_size_max = skb->len;
+ i2400m->rx_size_acc += skb_len;
+ if (skb_len < i2400m->rx_size_min)
+ i2400m->rx_size_min = skb_len;
+ if (skb_len > i2400m->rx_size_max)
+ i2400m->rx_size_max = skb_len;
spin_unlock_irqrestore(&i2400m->rx_lock, flags);
error_pl_descr_check:
error_pl_descr_short:
error_msg_hdr_check:
- d_fnend(4, dev, "(i2400m %p skb %p [size %zu]) = %d\n",
+ d_fnend(4, dev, "(i2400m %p skb %p [size %u]) = %d\n",
i2400m, skb, skb_len, result);
return result;
}
diff --git a/drivers/net/wireless/adm8211.c b/drivers/net/wireless/adm8211.c
index a105087..f9aa1bc 100644
--- a/drivers/net/wireless/adm8211.c
+++ b/drivers/net/wireless/adm8211.c
@@ -732,7 +732,7 @@
/* Nothing to do for ADMtek BBP */
} else if (priv->bbp_type != ADM8211_TYPE_ADMTEK)
- wiphy_debug(dev->wiphy, "unsupported bbp type %d\n",
+ wiphy_debug(dev->wiphy, "unsupported BBP type %d\n",
priv->bbp_type);
ADM8211_RESTORE();
@@ -1032,7 +1032,7 @@
break;
}
} else
- wiphy_debug(dev->wiphy, "unsupported bbp %d\n", priv->bbp_type);
+ wiphy_debug(dev->wiphy, "unsupported BBP %d\n", priv->bbp_type);
ADM8211_CSR_WRITE(SYNRF, 0);
@@ -1525,7 +1525,7 @@
retval = request_irq(priv->pdev->irq, adm8211_interrupt,
IRQF_SHARED, "adm8211", dev);
if (retval) {
- wiphy_err(dev->wiphy, "failed to register irq handler\n");
+ wiphy_err(dev->wiphy, "failed to register IRQ handler\n");
goto fail;
}
@@ -1902,7 +1902,7 @@
goto err_free_eeprom;
}
- wiphy_info(dev->wiphy, "hwaddr %pm, rev 0x%02x\n",
+ wiphy_info(dev->wiphy, "hwaddr %pM, Rev 0x%02x\n",
dev->wiphy->perm_addr, pdev->revision);
return 0;
diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
index d5140a8..1128fa8 100644
--- a/drivers/net/wireless/at76c50x-usb.c
+++ b/drivers/net/wireless/at76c50x-usb.c
@@ -655,7 +655,7 @@
exit:
kfree(hwcfg);
if (ret < 0)
- wiphy_err(priv->hw->wiphy, "cannot get hw config (error %d)\n",
+ wiphy_err(priv->hw->wiphy, "cannot get HW Config (error %d)\n",
ret);
return ret;
@@ -960,7 +960,7 @@
sizeof(struct mib_mac_addr));
if (ret < 0) {
wiphy_err(priv->hw->wiphy,
- "at76_get_mib (mac_addr) failed: %d\n", ret);
+ "at76_get_mib (MAC_ADDR) failed: %d\n", ret);
goto exit;
}
@@ -989,7 +989,7 @@
sizeof(struct mib_mac_wep));
if (ret < 0) {
wiphy_err(priv->hw->wiphy,
- "at76_get_mib (mac_wep) failed: %d\n", ret);
+ "at76_get_mib (MAC_WEP) failed: %d\n", ret);
goto exit;
}
@@ -1026,7 +1026,7 @@
sizeof(struct mib_mac_mgmt));
if (ret < 0) {
wiphy_err(priv->hw->wiphy,
- "at76_get_mib (mac_mgmt) failed: %d\n", ret);
+ "at76_get_mib (MAC_MGMT) failed: %d\n", ret);
goto exit;
}
@@ -1062,7 +1062,7 @@
ret = at76_get_mib(priv->udev, MIB_MAC, m, sizeof(struct mib_mac));
if (ret < 0) {
wiphy_err(priv->hw->wiphy,
- "at76_get_mib (mac) failed: %d\n", ret);
+ "at76_get_mib (MAC) failed: %d\n", ret);
goto exit;
}
@@ -1099,7 +1099,7 @@
ret = at76_get_mib(priv->udev, MIB_PHY, m, sizeof(struct mib_phy));
if (ret < 0) {
wiphy_err(priv->hw->wiphy,
- "at76_get_mib (phy) failed: %d\n", ret);
+ "at76_get_mib (PHY) failed: %d\n", ret);
goto exit;
}
@@ -1132,7 +1132,7 @@
ret = at76_get_mib(priv->udev, MIB_LOCAL, m, sizeof(struct mib_local));
if (ret < 0) {
wiphy_err(priv->hw->wiphy,
- "at76_get_mib (local) failed: %d\n", ret);
+ "at76_get_mib (LOCAL) failed: %d\n", ret);
goto exit;
}
@@ -1158,7 +1158,7 @@
sizeof(struct mib_mdomain));
if (ret < 0) {
wiphy_err(priv->hw->wiphy,
- "at76_get_mib (mdomain) failed: %d\n", ret);
+ "at76_get_mib (MDOMAIN) failed: %d\n", ret);
goto exit;
}
@@ -1229,7 +1229,7 @@
struct sk_buff *skb = priv->rx_skb;
if (!priv->rx_urb) {
- wiphy_err(priv->hw->wiphy, "%s: priv->rx_urb is null\n",
+ wiphy_err(priv->hw->wiphy, "%s: priv->rx_urb is NULL\n",
__func__);
return -EFAULT;
}
@@ -1792,7 +1792,7 @@
wiphy_err(priv->hw->wiphy, "error in tx submit urb: %d\n", ret);
if (ret == -EINVAL)
wiphy_err(priv->hw->wiphy,
- "-einval: tx urb %p hcpriv %p complete %p\n",
+ "-EINVAL: tx urb %p hcpriv %p complete %p\n",
priv->tx_urb,
priv->tx_urb->hcpriv, priv->tx_urb->complete);
}
@@ -2310,7 +2310,7 @@
priv->mac80211_registered = 1;
- wiphy_info(priv->hw->wiphy, "usb %s, mac %pm, firmware %d.%d.%d-%d\n",
+ wiphy_info(priv->hw->wiphy, "USB %s, MAC %pM, firmware %d.%d.%d-%d\n",
dev_name(&interface->dev), priv->mac_addr,
priv->fw_version.major, priv->fw_version.minor,
priv->fw_version.patch, priv->fw_version.build);
diff --git a/drivers/net/wireless/ath/ar9170/main.c b/drivers/net/wireless/ath/ar9170/main.c
index c67b05f..debfb0f 100644
--- a/drivers/net/wireless/ath/ar9170/main.c
+++ b/drivers/net/wireless/ath/ar9170/main.c
@@ -245,7 +245,7 @@
{
int i;
- wiphy_debug(ar->hw->wiphy, "qos queue stats\n");
+ wiphy_debug(ar->hw->wiphy, "QoS queue stats\n");
for (i = 0; i < __AR9170_NUM_TXQ; i++)
wiphy_debug(ar->hw->wiphy,
@@ -387,7 +387,7 @@
if (mac && compare_ether_addr(ieee80211_get_DA(hdr), mac)) {
#ifdef AR9170_QUEUE_DEBUG
wiphy_debug(ar->hw->wiphy,
- "skip frame => da %pm != %pm\n",
+ "skip frame => DA %pM != %pM\n",
mac, ieee80211_get_DA(hdr));
ar9170_print_txheader(ar, skb);
#endif /* AR9170_QUEUE_DEBUG */
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 373dcfe..d77ce99 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -1327,6 +1327,10 @@
PCI_DMA_TODEVICE);
rate = ieee80211_get_tx_rate(sc->hw, info);
+ if (!rate) {
+ ret = -EINVAL;
+ goto err_unmap;
+ }
if (info->flags & IEEE80211_TX_CTL_NO_ACK)
flags |= AR5K_TXDESC_NOACK;
diff --git a/drivers/net/wireless/ath/ath9k/ani.c b/drivers/net/wireless/ath/ath9k/ani.c
index cc648b6..a3d95cc 100644
--- a/drivers/net/wireless/ath/ath9k/ani.c
+++ b/drivers/net/wireless/ath/ath9k/ani.c
@@ -543,7 +543,7 @@
if (conf_is_ht40(conf))
return clockrate * 2;
- return clockrate * 2;
+ return clockrate;
}
static int32_t ath9k_hw_ani_get_listen_time(struct ath_hw *ah)
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index b883b17..057fb69 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -797,7 +797,7 @@
length = block[it+1];
length &= 0xff;
- if (length > 0 && spot >= 0 && spot+length < mdataSize) {
+ if (length > 0 && spot >= 0 && spot+length <= mdataSize) {
ath_print(common, ATH_DBG_EEPROM,
"Restore at %d: spot=%d "
"offset=%d length=%d\n",
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.h b/drivers/net/wireless/ath/ath9k/eeprom.h
index 7f48df1..0b09db0 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/eeprom.h
@@ -62,7 +62,7 @@
#define SD_NO_CTL 0xE0
#define NO_CTL 0xff
-#define CTL_MODE_M 7
+#define CTL_MODE_M 0xf
#define CTL_11A 0
#define CTL_11B 1
#define CTL_11G 2
diff --git a/drivers/net/wireless/ath/regd.h b/drivers/net/wireless/ath/regd.h
index a1c3952..345dd97 100644
--- a/drivers/net/wireless/ath/regd.h
+++ b/drivers/net/wireless/ath/regd.h
@@ -31,7 +31,6 @@
#define NO_CTL 0xff
#define SD_NO_CTL 0xE0
#define NO_CTL 0xff
-#define CTL_MODE_M 7
#define CTL_11A 0
#define CTL_11B 1
#define CTL_11G 2
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index 1189dbb..996e9d7 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -2723,14 +2723,6 @@
packet = &priv->rx_buffers[i];
- /* Sync the DMA for the STATUS buffer so CPU is sure to get
- * the correct values */
- pci_dma_sync_single_for_cpu(priv->pci_dev,
- sq->nic +
- sizeof(struct ipw2100_status) * i,
- sizeof(struct ipw2100_status),
- PCI_DMA_FROMDEVICE);
-
/* Sync the DMA for the RX buffer so CPU is sure to get
* the correct values */
pci_dma_sync_single_for_cpu(priv->pci_dev, packet->dma_addr,
diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
index fec0262..0b779a4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
@@ -265,7 +265,7 @@
.support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF,
.chain_noise_scale = 1000,
- .monitor_recover_period = IWL_MONITORING_PERIOD,
+ .monitor_recover_period = IWL_DEF_MONITORING_PERIOD,
.max_event_log_size = 128,
.ucode_tracing = true,
.sensitivity_calib_by_driver = true,
@@ -297,7 +297,7 @@
.support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF,
.chain_noise_scale = 1000,
- .monitor_recover_period = IWL_MONITORING_PERIOD,
+ .monitor_recover_period = IWL_DEF_MONITORING_PERIOD,
.max_event_log_size = 128,
.ucode_tracing = true,
.sensitivity_calib_by_driver = true,
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
index 6950a78..8ccfcd0 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
@@ -2731,7 +2731,7 @@
.led_compensation = 64,
.broken_powersave = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
- .monitor_recover_period = IWL_MONITORING_PERIOD,
+ .monitor_recover_period = IWL_DEF_MONITORING_PERIOD,
.max_event_log_size = 512,
.tx_power_by_driver = true,
};
@@ -2752,7 +2752,7 @@
.led_compensation = 64,
.broken_powersave = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
- .monitor_recover_period = IWL_MONITORING_PERIOD,
+ .monitor_recover_period = IWL_DEF_MONITORING_PERIOD,
.max_event_log_size = 512,
.tx_power_by_driver = true,
};
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
index d6da356..d92b729 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
@@ -2322,7 +2322,7 @@
.led_compensation = 61,
.chain_noise_num_beacons = IWL4965_CAL_NUM_BEACONS,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
- .monitor_recover_period = IWL_MONITORING_PERIOD,
+ .monitor_recover_period = IWL_DEF_MONITORING_PERIOD,
.temperature_kelvin = true,
.max_event_log_size = 512,
.tx_power_by_driver = true,
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index aacf377..48bdcd8 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -510,7 +510,7 @@
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
.chain_noise_scale = 1000,
- .monitor_recover_period = IWL_MONITORING_PERIOD,
+ .monitor_recover_period = IWL_LONG_MONITORING_PERIOD,
.max_event_log_size = 512,
.ucode_tracing = true,
.sensitivity_calib_by_driver = true,
@@ -541,7 +541,7 @@
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
.chain_noise_scale = 1000,
- .monitor_recover_period = IWL_MONITORING_PERIOD,
+ .monitor_recover_period = IWL_LONG_MONITORING_PERIOD,
.max_event_log_size = 512,
.ucode_tracing = true,
.sensitivity_calib_by_driver = true,
@@ -570,7 +570,7 @@
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
.chain_noise_scale = 1000,
- .monitor_recover_period = IWL_MONITORING_PERIOD,
+ .monitor_recover_period = IWL_LONG_MONITORING_PERIOD,
.max_event_log_size = 512,
.ucode_tracing = true,
.sensitivity_calib_by_driver = true,
@@ -601,7 +601,7 @@
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
.chain_noise_scale = 1000,
- .monitor_recover_period = IWL_MONITORING_PERIOD,
+ .monitor_recover_period = IWL_LONG_MONITORING_PERIOD,
.max_event_log_size = 512,
.ucode_tracing = true,
.sensitivity_calib_by_driver = true,
@@ -632,7 +632,7 @@
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
.chain_noise_scale = 1000,
- .monitor_recover_period = IWL_MONITORING_PERIOD,
+ .monitor_recover_period = IWL_LONG_MONITORING_PERIOD,
.max_event_log_size = 512,
.ucode_tracing = true,
.sensitivity_calib_by_driver = true,
@@ -663,7 +663,7 @@
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
.chain_noise_scale = 1000,
- .monitor_recover_period = IWL_MONITORING_PERIOD,
+ .monitor_recover_period = IWL_LONG_MONITORING_PERIOD,
.max_event_log_size = 512,
.ucode_tracing = true,
.sensitivity_calib_by_driver = true,
@@ -693,7 +693,7 @@
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
.chain_noise_scale = 1000,
- .monitor_recover_period = IWL_MONITORING_PERIOD,
+ .monitor_recover_period = IWL_LONG_MONITORING_PERIOD,
.max_event_log_size = 512,
.ucode_tracing = true,
.sensitivity_calib_by_driver = true,
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index af4fd50..cee06b9 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -388,7 +388,7 @@
.support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
.chain_noise_scale = 1000,
- .monitor_recover_period = IWL_MONITORING_PERIOD,
+ .monitor_recover_period = IWL_DEF_MONITORING_PERIOD,
.max_event_log_size = 512,
.ucode_tracing = true,
.sensitivity_calib_by_driver = true,
@@ -424,7 +424,7 @@
.support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
.chain_noise_scale = 1000,
- .monitor_recover_period = IWL_MONITORING_PERIOD,
+ .monitor_recover_period = IWL_DEF_MONITORING_PERIOD,
.max_event_log_size = 512,
.sensitivity_calib_by_driver = true,
.chain_noise_calib_by_driver = true,
@@ -459,7 +459,7 @@
.support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
.chain_noise_scale = 1000,
- .monitor_recover_period = IWL_MONITORING_PERIOD,
+ .monitor_recover_period = IWL_DEF_MONITORING_PERIOD,
.max_event_log_size = 512,
.sensitivity_calib_by_driver = true,
.chain_noise_calib_by_driver = true,
@@ -496,7 +496,7 @@
.support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
.chain_noise_scale = 1000,
- .monitor_recover_period = IWL_MONITORING_PERIOD,
+ .monitor_recover_period = IWL_LONG_MONITORING_PERIOD,
.max_event_log_size = 512,
.sensitivity_calib_by_driver = true,
.chain_noise_calib_by_driver = true,
@@ -532,7 +532,7 @@
.support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
.chain_noise_scale = 1000,
- .monitor_recover_period = IWL_MONITORING_PERIOD,
+ .monitor_recover_period = IWL_LONG_MONITORING_PERIOD,
.max_event_log_size = 512,
.sensitivity_calib_by_driver = true,
.chain_noise_calib_by_driver = true,
@@ -570,7 +570,7 @@
.support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
.chain_noise_scale = 1000,
- .monitor_recover_period = IWL_MONITORING_PERIOD,
+ .monitor_recover_period = IWL_LONG_MONITORING_PERIOD,
.max_event_log_size = 512,
.sensitivity_calib_by_driver = true,
.chain_noise_calib_by_driver = true,
@@ -606,7 +606,7 @@
.support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
.chain_noise_scale = 1000,
- .monitor_recover_period = IWL_MONITORING_PERIOD,
+ .monitor_recover_period = IWL_LONG_MONITORING_PERIOD,
.max_event_log_size = 512,
.sensitivity_calib_by_driver = true,
.chain_noise_calib_by_driver = true,
@@ -644,7 +644,7 @@
.support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
.chain_noise_scale = 1000,
- .monitor_recover_period = IWL_MONITORING_PERIOD,
+ .monitor_recover_period = IWL_LONG_MONITORING_PERIOD,
.max_event_log_size = 512,
.sensitivity_calib_by_driver = true,
.chain_noise_calib_by_driver = true,
@@ -680,7 +680,7 @@
.support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
.chain_noise_scale = 1000,
- .monitor_recover_period = IWL_MONITORING_PERIOD,
+ .monitor_recover_period = IWL_LONG_MONITORING_PERIOD,
.max_event_log_size = 512,
.sensitivity_calib_by_driver = true,
.chain_noise_calib_by_driver = true,
@@ -721,7 +721,7 @@
.support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
.chain_noise_scale = 1000,
- .monitor_recover_period = IWL_MONITORING_PERIOD,
+ .monitor_recover_period = IWL_DEF_MONITORING_PERIOD,
.max_event_log_size = 1024,
.ucode_tracing = true,
.sensitivity_calib_by_driver = true,
@@ -756,7 +756,7 @@
.support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
.chain_noise_scale = 1000,
- .monitor_recover_period = IWL_MONITORING_PERIOD,
+ .monitor_recover_period = IWL_DEF_MONITORING_PERIOD,
.max_event_log_size = 1024,
.ucode_tracing = true,
.sensitivity_calib_by_driver = true,
@@ -791,7 +791,7 @@
.support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
.chain_noise_scale = 1000,
- .monitor_recover_period = IWL_MONITORING_PERIOD,
+ .monitor_recover_period = IWL_DEF_MONITORING_PERIOD,
.max_event_log_size = 1024,
.ucode_tracing = true,
.sensitivity_calib_by_driver = true,
@@ -828,7 +828,7 @@
.support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
.chain_noise_scale = 1500,
- .monitor_recover_period = IWL_MONITORING_PERIOD,
+ .monitor_recover_period = IWL_DEF_MONITORING_PERIOD,
.max_event_log_size = 1024,
.ucode_tracing = true,
.sensitivity_calib_by_driver = true,
@@ -866,7 +866,7 @@
.support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
.chain_noise_scale = 1500,
- .monitor_recover_period = IWL_MONITORING_PERIOD,
+ .monitor_recover_period = IWL_DEF_MONITORING_PERIOD,
.max_event_log_size = 1024,
.ucode_tracing = true,
.sensitivity_calib_by_driver = true,
@@ -902,7 +902,7 @@
.support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
.chain_noise_scale = 1500,
- .monitor_recover_period = IWL_MONITORING_PERIOD,
+ .monitor_recover_period = IWL_DEF_MONITORING_PERIOD,
.max_event_log_size = 1024,
.ucode_tracing = true,
.sensitivity_calib_by_driver = true,
@@ -940,7 +940,7 @@
.support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
.chain_noise_scale = 1000,
- .monitor_recover_period = IWL_MONITORING_PERIOD,
+ .monitor_recover_period = IWL_DEF_MONITORING_PERIOD,
.max_event_log_size = 1024,
.ucode_tracing = true,
.sensitivity_calib_by_driver = true,
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
index 9dd9e64..8fd00a6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
@@ -1411,7 +1411,7 @@
clear_bit(STATUS_SCAN_HW, &priv->status);
clear_bit(STATUS_SCANNING, &priv->status);
/* inform mac80211 scan aborted */
- queue_work(priv->workqueue, &priv->scan_completed);
+ queue_work(priv->workqueue, &priv->abort_scan);
}
int iwlagn_manage_ibss_station(struct iwl_priv *priv,
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index c1882fd..10d7b9b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -3667,6 +3667,49 @@
IWL_DEBUG_MAC80211(priv, "leave\n");
}
+static void iwlagn_configure_filter(struct ieee80211_hw *hw,
+ unsigned int changed_flags,
+ unsigned int *total_flags,
+ u64 multicast)
+{
+ struct iwl_priv *priv = hw->priv;
+ __le32 filter_or = 0, filter_nand = 0;
+
+#define CHK(test, flag) do { \
+ if (*total_flags & (test)) \
+ filter_or |= (flag); \
+ else \
+ filter_nand |= (flag); \
+ } while (0)
+
+ IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n",
+ changed_flags, *total_flags);
+
+ CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
+ CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK);
+ CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
+
+#undef CHK
+
+ mutex_lock(&priv->mutex);
+
+ priv->staging_rxon.filter_flags &= ~filter_nand;
+ priv->staging_rxon.filter_flags |= filter_or;
+
+ iwlcore_commit_rxon(priv);
+
+ mutex_unlock(&priv->mutex);
+
+ /*
+ * Receiving all multicast frames is always enabled by the
+ * default flags setup in iwl_connection_init_rx_config()
+ * since we currently do not support programming multicast
+ * filters into the device.
+ */
+ *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
+ FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
+}
+
static void iwl_mac_flush(struct ieee80211_hw *hw, bool drop)
{
struct iwl_priv *priv = hw->priv;
@@ -3867,7 +3910,7 @@
.add_interface = iwl_mac_add_interface,
.remove_interface = iwl_mac_remove_interface,
.config = iwl_mac_config,
- .configure_filter = iwl_configure_filter,
+ .configure_filter = iwlagn_configure_filter,
.set_key = iwl_mac_set_key,
.update_tkip_key = iwl_mac_update_tkip_key,
.conf_tx = iwl_mac_conf_tx,
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index 2c03c6e..e23c406 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -1328,51 +1328,6 @@
EXPORT_SYMBOL(iwl_apm_init);
-
-void iwl_configure_filter(struct ieee80211_hw *hw,
- unsigned int changed_flags,
- unsigned int *total_flags,
- u64 multicast)
-{
- struct iwl_priv *priv = hw->priv;
- __le32 filter_or = 0, filter_nand = 0;
-
-#define CHK(test, flag) do { \
- if (*total_flags & (test)) \
- filter_or |= (flag); \
- else \
- filter_nand |= (flag); \
- } while (0)
-
- IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n",
- changed_flags, *total_flags);
-
- CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
- CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK);
- CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
-
-#undef CHK
-
- mutex_lock(&priv->mutex);
-
- priv->staging_rxon.filter_flags &= ~filter_nand;
- priv->staging_rxon.filter_flags |= filter_or;
-
- iwlcore_commit_rxon(priv);
-
- mutex_unlock(&priv->mutex);
-
- /*
- * Receiving all multicast frames is always enabled by the
- * default flags setup in iwl_connection_init_rx_config()
- * since we currently do not support programming multicast
- * filters into the device.
- */
- *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
- FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
-}
-EXPORT_SYMBOL(iwl_configure_filter);
-
int iwl_set_hw_params(struct iwl_priv *priv)
{
priv->hw_params.max_rxq_size = RX_QUEUE_SIZE;
@@ -2658,6 +2613,11 @@
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return -EINVAL;
+ if (test_bit(STATUS_SCANNING, &priv->status)) {
+ IWL_DEBUG_INFO(priv, "scan in progress.\n");
+ return -EINVAL;
+ }
+
if (mode >= IWL_MAX_FORCE_RESET) {
IWL_DEBUG_INFO(priv, "invalid reset request.\n");
return -EINVAL;
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
index 4a71dfb..5e6ee3d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ b/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -372,9 +372,6 @@
u32 decrypt_res,
struct ieee80211_rx_status *stats);
void iwl_irq_handle_error(struct iwl_priv *priv);
-void iwl_configure_filter(struct ieee80211_hw *hw,
- unsigned int changed_flags,
- unsigned int *total_flags, u64 multicast);
int iwl_set_hw_params(struct iwl_priv *priv);
void iwl_post_associate(struct iwl_priv *priv, struct ieee80211_vif *vif);
void iwl_bss_info_changed(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
index f35bcad..2e97cd2 100644
--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
@@ -1049,7 +1049,8 @@
#define IWL_DELAY_NEXT_FORCE_FW_RELOAD (HZ*5)
/* timer constants use to monitor and recover stuck tx queues in mSecs */
-#define IWL_MONITORING_PERIOD (1000)
+#define IWL_DEF_MONITORING_PERIOD (1000)
+#define IWL_LONG_MONITORING_PERIOD (5000)
#define IWL_ONE_HUNDRED_MSECS (100)
#define IWL_SIXTY_SECS (60000)
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
index 70c4b8f..d31661c 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -3018,7 +3018,7 @@
clear_bit(STATUS_SCANNING, &priv->status);
/* inform mac80211 scan aborted */
- queue_work(priv->workqueue, &priv->scan_completed);
+ queue_work(priv->workqueue, &priv->abort_scan);
}
static void iwl3945_bg_restart(struct work_struct *data)
@@ -3391,6 +3391,55 @@
return 0;
}
+
+static void iwl3945_configure_filter(struct ieee80211_hw *hw,
+ unsigned int changed_flags,
+ unsigned int *total_flags,
+ u64 multicast)
+{
+ struct iwl_priv *priv = hw->priv;
+ __le32 filter_or = 0, filter_nand = 0;
+
+#define CHK(test, flag) do { \
+ if (*total_flags & (test)) \
+ filter_or |= (flag); \
+ else \
+ filter_nand |= (flag); \
+ } while (0)
+
+ IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n",
+ changed_flags, *total_flags);
+
+ CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
+ CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK);
+ CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
+
+#undef CHK
+
+ mutex_lock(&priv->mutex);
+
+ priv->staging_rxon.filter_flags &= ~filter_nand;
+ priv->staging_rxon.filter_flags |= filter_or;
+
+ /*
+ * Committing directly here breaks for some reason,
+ * but we'll eventually commit the filter flags
+ * change anyway.
+ */
+
+ mutex_unlock(&priv->mutex);
+
+ /*
+ * Receiving all multicast frames is always enabled by the
+ * default flags setup in iwl_connection_init_rx_config()
+ * since we currently do not support programming multicast
+ * filters into the device.
+ */
+ *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
+ FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
+}
+
+
/*****************************************************************************
*
* sysfs attributes
@@ -3796,7 +3845,7 @@
.add_interface = iwl_mac_add_interface,
.remove_interface = iwl_mac_remove_interface,
.config = iwl_mac_config,
- .configure_filter = iwl_configure_filter,
+ .configure_filter = iwl3945_configure_filter,
.set_key = iwl3945_mac_set_key,
.conf_tx = iwl_mac_conf_tx,
.reset_tsf = iwl_mac_reset_tsf,
diff --git a/drivers/net/wireless/libertas/if_sdio.c b/drivers/net/wireless/libertas/if_sdio.c
index ba854c7..87b6349 100644
--- a/drivers/net/wireless/libertas/if_sdio.c
+++ b/drivers/net/wireless/libertas/if_sdio.c
@@ -128,7 +128,7 @@
bool helper_allocated;
bool firmware_allocated;
- u8 buffer[65536];
+ u8 buffer[65536] __attribute__((aligned(4)));
spinlock_t lock;
struct if_sdio_packet *packets;
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 01ad7f7..86fa8ab 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -486,7 +486,7 @@
struct ieee80211_rx_status rx_status;
if (data->idle) {
- wiphy_debug(hw->wiphy, "trying to tx when idle - reject\n");
+ wiphy_debug(hw->wiphy, "Trying to TX when idle - reject\n");
return false;
}
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index d761ed2..f152a25 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -910,14 +910,14 @@
rxq->rxd = pci_alloc_consistent(priv->pdev, size, &rxq->rxd_dma);
if (rxq->rxd == NULL) {
- wiphy_err(hw->wiphy, "failed to alloc rx descriptors\n");
+ wiphy_err(hw->wiphy, "failed to alloc RX descriptors\n");
return -ENOMEM;
}
memset(rxq->rxd, 0, size);
rxq->buf = kmalloc(MWL8K_RX_DESCS * sizeof(*rxq->buf), GFP_KERNEL);
if (rxq->buf == NULL) {
- wiphy_err(hw->wiphy, "failed to alloc rx skbuff list\n");
+ wiphy_err(hw->wiphy, "failed to alloc RX skbuff list\n");
pci_free_consistent(priv->pdev, size, rxq->rxd, rxq->rxd_dma);
return -ENOMEM;
}
@@ -1145,14 +1145,14 @@
txq->txd = pci_alloc_consistent(priv->pdev, size, &txq->txd_dma);
if (txq->txd == NULL) {
- wiphy_err(hw->wiphy, "failed to alloc tx descriptors\n");
+ wiphy_err(hw->wiphy, "failed to alloc TX descriptors\n");
return -ENOMEM;
}
memset(txq->txd, 0, size);
txq->skb = kmalloc(MWL8K_TX_DESCS * sizeof(*txq->skb), GFP_KERNEL);
if (txq->skb == NULL) {
- wiphy_err(hw->wiphy, "failed to alloc tx skbuff list\n");
+ wiphy_err(hw->wiphy, "failed to alloc TX skbuff list\n");
pci_free_consistent(priv->pdev, size, txq->txd, txq->txd_dma);
return -ENOMEM;
}
@@ -1573,7 +1573,7 @@
PCI_DMA_BIDIRECTIONAL);
if (!timeout) {
- wiphy_err(hw->wiphy, "command %s timeout after %u ms\n",
+ wiphy_err(hw->wiphy, "Command %s timeout after %u ms\n",
mwl8k_cmd_name(cmd->code, buf, sizeof(buf)),
MWL8K_CMD_TIMEOUT_MS);
rc = -ETIMEDOUT;
@@ -1584,11 +1584,11 @@
rc = cmd->result ? -EINVAL : 0;
if (rc)
- wiphy_err(hw->wiphy, "command %s error 0x%x\n",
+ wiphy_err(hw->wiphy, "Command %s error 0x%x\n",
mwl8k_cmd_name(cmd->code, buf, sizeof(buf)),
le16_to_cpu(cmd->result));
else if (ms > 2000)
- wiphy_notice(hw->wiphy, "command %s took %d ms\n",
+ wiphy_notice(hw->wiphy, "Command %s took %d ms\n",
mwl8k_cmd_name(cmd->code,
buf, sizeof(buf)),
ms);
@@ -3210,7 +3210,7 @@
rc = request_irq(priv->pdev->irq, mwl8k_interrupt,
IRQF_SHARED, MWL8K_NAME, hw);
if (rc) {
- wiphy_err(hw->wiphy, "failed to register irq handler\n");
+ wiphy_err(hw->wiphy, "failed to register IRQ handler\n");
return -EIO;
}
@@ -3926,7 +3926,7 @@
priv->sram = pci_iomap(pdev, 0, 0x10000);
if (priv->sram == NULL) {
- wiphy_err(hw->wiphy, "cannot map device sram\n");
+ wiphy_err(hw->wiphy, "Cannot map device SRAM\n");
goto err_iounmap;
}
@@ -3938,7 +3938,7 @@
if (priv->regs == NULL) {
priv->regs = pci_iomap(pdev, 2, 0x10000);
if (priv->regs == NULL) {
- wiphy_err(hw->wiphy, "cannot map device registers\n");
+ wiphy_err(hw->wiphy, "Cannot map device registers\n");
goto err_iounmap;
}
}
@@ -3950,14 +3950,14 @@
/* Ask userland hotplug daemon for the device firmware */
rc = mwl8k_request_firmware(priv);
if (rc) {
- wiphy_err(hw->wiphy, "firmware files not found\n");
+ wiphy_err(hw->wiphy, "Firmware files not found\n");
goto err_stop_firmware;
}
/* Load firmware into hardware */
rc = mwl8k_load_firmware(hw);
if (rc) {
- wiphy_err(hw->wiphy, "cannot start firmware\n");
+ wiphy_err(hw->wiphy, "Cannot start firmware\n");
goto err_stop_firmware;
}
@@ -4047,7 +4047,7 @@
rc = request_irq(priv->pdev->irq, mwl8k_interrupt,
IRQF_SHARED, MWL8K_NAME, hw);
if (rc) {
- wiphy_err(hw->wiphy, "failed to register irq handler\n");
+ wiphy_err(hw->wiphy, "failed to register IRQ handler\n");
goto err_free_queues;
}
@@ -4067,7 +4067,7 @@
rc = mwl8k_cmd_get_hw_spec_sta(hw);
}
if (rc) {
- wiphy_err(hw->wiphy, "cannot initialise firmware\n");
+ wiphy_err(hw->wiphy, "Cannot initialise firmware\n");
goto err_free_irq;
}
@@ -4081,14 +4081,14 @@
/* Turn radio off */
rc = mwl8k_cmd_radio_disable(hw);
if (rc) {
- wiphy_err(hw->wiphy, "cannot disable\n");
+ wiphy_err(hw->wiphy, "Cannot disable\n");
goto err_free_irq;
}
/* Clear MAC address */
rc = mwl8k_cmd_set_mac_addr(hw, NULL, "\x00\x00\x00\x00\x00\x00");
if (rc) {
- wiphy_err(hw->wiphy, "cannot clear mac address\n");
+ wiphy_err(hw->wiphy, "Cannot clear MAC address\n");
goto err_free_irq;
}
@@ -4098,7 +4098,7 @@
rc = ieee80211_register_hw(hw);
if (rc) {
- wiphy_err(hw->wiphy, "cannot register device\n");
+ wiphy_err(hw->wiphy, "Cannot register device\n");
goto err_free_queues;
}
diff --git a/drivers/net/wireless/p54/eeprom.c b/drivers/net/wireless/p54/eeprom.c
index d687cb7..78347041 100644
--- a/drivers/net/wireless/p54/eeprom.c
+++ b/drivers/net/wireless/p54/eeprom.c
@@ -167,7 +167,7 @@
}
if (j == 0) {
- wiphy_err(dev->wiphy, "disabling totally damaged %d GHz band\n",
+ wiphy_err(dev->wiphy, "Disabling totally damaged %d GHz band\n",
(band == IEEE80211_BAND_2GHZ) ? 2 : 5);
ret = -ENODATA;
@@ -695,12 +695,12 @@
u8 perm_addr[ETH_ALEN];
wiphy_warn(dev->wiphy,
- "invalid hwaddr! using randomly generated mac addr\n");
+ "Invalid hwaddr! Using randomly generated MAC addr\n");
random_ether_addr(perm_addr);
SET_IEEE80211_PERM_ADDR(dev, perm_addr);
}
- wiphy_info(dev->wiphy, "hwaddr %pm, mac:isl38%02x rf:%s\n",
+ wiphy_info(dev->wiphy, "hwaddr %pM, MAC:isl38%02x RF:%s\n",
dev->wiphy->perm_addr, priv->version,
p54_rf_chips[priv->rxhw]);
diff --git a/drivers/net/wireless/p54/fwio.c b/drivers/net/wireless/p54/fwio.c
index 47006bc..15b20c2 100644
--- a/drivers/net/wireless/p54/fwio.c
+++ b/drivers/net/wireless/p54/fwio.c
@@ -125,7 +125,7 @@
if (fw_version)
wiphy_info(priv->hw->wiphy,
- "fw rev %s - softmac protocol %x.%x\n",
+ "FW rev %s - Softmac protocol %x.%x\n",
fw_version, priv->fw_var >> 8, priv->fw_var & 0xff);
if (priv->fw_var < 0x500)
diff --git a/drivers/net/wireless/p54/led.c b/drivers/net/wireless/p54/led.c
index ea91f5c..3837e1e 100644
--- a/drivers/net/wireless/p54/led.c
+++ b/drivers/net/wireless/p54/led.c
@@ -58,7 +58,7 @@
err = p54_set_leds(priv);
if (err && net_ratelimit())
wiphy_err(priv->hw->wiphy,
- "failed to update leds (%d).\n", err);
+ "failed to update LEDs (%d).\n", err);
if (rerun)
ieee80211_queue_delayed_work(priv->hw, &priv->led_work,
@@ -103,7 +103,7 @@
err = led_classdev_register(wiphy_dev(priv->hw->wiphy), &led->led_dev);
if (err)
wiphy_err(priv->hw->wiphy,
- "failed to register %s led.\n", name);
+ "Failed to register %s LED.\n", name);
else
led->registered = 1;
diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c
index 822f8dc..1eacba4 100644
--- a/drivers/net/wireless/p54/p54pci.c
+++ b/drivers/net/wireless/p54/p54pci.c
@@ -466,7 +466,7 @@
P54P_READ(dev_int);
if (!wait_for_completion_interruptible_timeout(&priv->boot_comp, HZ)) {
- wiphy_err(dev->wiphy, "cannot boot firmware!\n");
+ wiphy_err(dev->wiphy, "Cannot boot firmware!\n");
p54p_stop(dev);
return -ETIMEDOUT;
}
diff --git a/drivers/net/wireless/p54/txrx.c b/drivers/net/wireless/p54/txrx.c
index 427b46f..0e937dc 100644
--- a/drivers/net/wireless/p54/txrx.c
+++ b/drivers/net/wireless/p54/txrx.c
@@ -446,7 +446,7 @@
}
if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) &&
- (!payload->status))
+ !(payload->status & P54_TX_FAILED))
info->flags |= IEEE80211_TX_STAT_ACK;
if (payload->status & P54_TX_PSM_CANCELLED)
info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
@@ -540,7 +540,7 @@
case P54_TRAP_BEACON_TX:
break;
case P54_TRAP_RADAR:
- wiphy_info(priv->hw->wiphy, "radar (freq:%d mhz)\n", freq);
+ wiphy_info(priv->hw->wiphy, "radar (freq:%d MHz)\n", freq);
break;
case P54_TRAP_NO_BEACON:
if (priv->vif)
diff --git a/drivers/net/wireless/rtl818x/rtl8180_dev.c b/drivers/net/wireless/rtl818x/rtl8180_dev.c
index b50c39a..30107ce 100644
--- a/drivers/net/wireless/rtl818x/rtl8180_dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8180_dev.c
@@ -445,7 +445,7 @@
&priv->rx_ring_dma);
if (!priv->rx_ring || (unsigned long)priv->rx_ring & 0xFF) {
- wiphy_err(dev->wiphy, "cannot allocate rx ring\n");
+ wiphy_err(dev->wiphy, "Cannot allocate RX ring\n");
return -ENOMEM;
}
@@ -502,7 +502,7 @@
ring = pci_alloc_consistent(priv->pdev, sizeof(*ring) * entries, &dma);
if (!ring || (unsigned long)ring & 0xFF) {
- wiphy_err(dev->wiphy, "cannot allocate tx ring (prio = %d)\n",
+ wiphy_err(dev->wiphy, "Cannot allocate TX ring (prio = %d)\n",
prio);
return -ENOMEM;
}
@@ -568,7 +568,7 @@
ret = request_irq(priv->pdev->irq, rtl8180_interrupt,
IRQF_SHARED, KBUILD_MODNAME, dev);
if (ret) {
- wiphy_err(dev->wiphy, "failed to register irq handler\n");
+ wiphy_err(dev->wiphy, "failed to register IRQ handler\n");
goto err_free_rings;
}
diff --git a/drivers/net/wireless/rtl818x/rtl8187_dev.c b/drivers/net/wireless/rtl818x/rtl8187_dev.c
index 5738a55..98e0351 100644
--- a/drivers/net/wireless/rtl818x/rtl8187_dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8187_dev.c
@@ -573,7 +573,7 @@
} while (--i);
if (!i) {
- wiphy_err(dev->wiphy, "reset timeout!\n");
+ wiphy_err(dev->wiphy, "Reset timeout!\n");
return -ETIMEDOUT;
}
@@ -1526,7 +1526,7 @@
mutex_init(&priv->conf_mutex);
skb_queue_head_init(&priv->b_tx_status.queue);
- wiphy_info(dev->wiphy, "hwaddr %pm, %s v%d + %s, rfkill mask %d\n",
+ wiphy_info(dev->wiphy, "hwaddr %pM, %s V%d + %s, rfkill mask %d\n",
mac_addr, chip_name, priv->asic_rev, priv->rf->name,
priv->rfkill_mask);
diff --git a/drivers/net/wireless/rtl818x/rtl8187_rtl8225.c b/drivers/net/wireless/rtl818x/rtl8187_rtl8225.c
index fd96f91..97eebdc 100644
--- a/drivers/net/wireless/rtl818x/rtl8187_rtl8225.c
+++ b/drivers/net/wireless/rtl818x/rtl8187_rtl8225.c
@@ -366,7 +366,7 @@
rtl8225_write(dev, 0x02, 0x044d);
msleep(100);
if (!(rtl8225_read(dev, 6) & (1 << 7)))
- wiphy_warn(dev->wiphy, "rf calibration failed! %x\n",
+ wiphy_warn(dev->wiphy, "RF Calibration Failed! %x\n",
rtl8225_read(dev, 6));
}
@@ -735,7 +735,7 @@
rtl8225_write(dev, 0x02, 0x044D);
msleep(100);
if (!(rtl8225_read(dev, 6) & (1 << 7)))
- wiphy_warn(dev->wiphy, "rf calibration failed! %x\n",
+ wiphy_warn(dev->wiphy, "RF Calibration Failed! %x\n",
rtl8225_read(dev, 6));
}
diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
index a9352b2..b7e755f 100644
--- a/drivers/oprofile/buffer_sync.c
+++ b/drivers/oprofile/buffer_sync.c
@@ -141,16 +141,6 @@
.notifier_call = module_load_notify,
};
-
-static void end_sync(void)
-{
- end_cpu_work();
- /* make sure we don't leak task structs */
- process_task_mortuary();
- process_task_mortuary();
-}
-
-
int sync_start(void)
{
int err;
@@ -158,7 +148,7 @@
if (!zalloc_cpumask_var(&marked_cpus, GFP_KERNEL))
return -ENOMEM;
- start_cpu_work();
+ mutex_lock(&buffer_mutex);
err = task_handoff_register(&task_free_nb);
if (err)
@@ -173,7 +163,10 @@
if (err)
goto out4;
+ start_cpu_work();
+
out:
+ mutex_unlock(&buffer_mutex);
return err;
out4:
profile_event_unregister(PROFILE_MUNMAP, &munmap_nb);
@@ -182,7 +175,6 @@
out2:
task_handoff_unregister(&task_free_nb);
out1:
- end_sync();
free_cpumask_var(marked_cpus);
goto out;
}
@@ -190,11 +182,20 @@
void sync_stop(void)
{
+ /* flush buffers */
+ mutex_lock(&buffer_mutex);
+ end_cpu_work();
unregister_module_notifier(&module_load_nb);
profile_event_unregister(PROFILE_MUNMAP, &munmap_nb);
profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb);
task_handoff_unregister(&task_free_nb);
- end_sync();
+ mutex_unlock(&buffer_mutex);
+ flush_scheduled_work();
+
+ /* make sure we don't leak task structs */
+ process_task_mortuary();
+ process_task_mortuary();
+
free_cpumask_var(marked_cpus);
}
diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c
index 219f79e..f179ac2 100644
--- a/drivers/oprofile/cpu_buffer.c
+++ b/drivers/oprofile/cpu_buffer.c
@@ -120,8 +120,6 @@
cancel_delayed_work(&b->work);
}
-
- flush_scheduled_work();
}
/*
diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
index b336cd9..f9bda64 100644
--- a/drivers/oprofile/oprof.c
+++ b/drivers/oprofile/oprof.c
@@ -225,26 +225,17 @@
mutex_unlock(&start_mutex);
}
-int oprofile_set_backtrace(unsigned long val)
+int oprofile_set_ulong(unsigned long *addr, unsigned long val)
{
- int err = 0;
+ int err = -EBUSY;
mutex_lock(&start_mutex);
-
- if (oprofile_started) {
- err = -EBUSY;
- goto out;
+ if (!oprofile_started) {
+ *addr = val;
+ err = 0;
}
-
- if (!oprofile_ops.backtrace) {
- err = -EINVAL;
- goto out;
- }
-
- oprofile_backtrace_depth = val;
-
-out:
mutex_unlock(&start_mutex);
+
return err;
}
@@ -257,16 +248,9 @@
printk(KERN_INFO "oprofile: using timer interrupt.\n");
err = oprofile_timer_init(&oprofile_ops);
if (err)
- goto out_arch;
+ return err;
}
- err = oprofilefs_register();
- if (err)
- goto out_arch;
- return 0;
-
-out_arch:
- oprofile_arch_exit();
- return err;
+ return oprofilefs_register();
}
diff --git a/drivers/oprofile/oprof.h b/drivers/oprofile/oprof.h
index 47e12cb..177b73d 100644
--- a/drivers/oprofile/oprof.h
+++ b/drivers/oprofile/oprof.h
@@ -37,7 +37,7 @@
int oprofile_timer_init(struct oprofile_operations *ops);
void oprofile_timer_exit(void);
-int oprofile_set_backtrace(unsigned long depth);
+int oprofile_set_ulong(unsigned long *addr, unsigned long val);
int oprofile_set_timeout(unsigned long time);
#endif /* OPROF_H */
diff --git a/drivers/oprofile/oprofile_files.c b/drivers/oprofile/oprofile_files.c
index bbd7516..ccf099e 100644
--- a/drivers/oprofile/oprofile_files.c
+++ b/drivers/oprofile/oprofile_files.c
@@ -79,14 +79,17 @@
if (*offset)
return -EINVAL;
+ if (!oprofile_ops.backtrace)
+ return -EINVAL;
+
retval = oprofilefs_ulong_from_user(&val, buf, count);
if (retval)
return retval;
- retval = oprofile_set_backtrace(val);
-
+ retval = oprofile_set_ulong(&oprofile_backtrace_depth, val);
if (retval)
return retval;
+
return count;
}
diff --git a/drivers/oprofile/oprofile_perf.c b/drivers/oprofile/oprofile_perf.c
new file mode 100644
index 0000000..9046f7b
--- /dev/null
+++ b/drivers/oprofile/oprofile_perf.c
@@ -0,0 +1,328 @@
+/*
+ * Copyright 2010 ARM Ltd.
+ *
+ * Perf-events backend for OProfile.
+ */
+#include <linux/perf_event.h>
+#include <linux/platform_device.h>
+#include <linux/oprofile.h>
+#include <linux/slab.h>
+
+/*
+ * Per performance monitor configuration as set via oprofilefs.
+ */
+struct op_counter_config {
+ unsigned long count;
+ unsigned long enabled;
+ unsigned long event;
+ unsigned long unit_mask;
+ unsigned long kernel;
+ unsigned long user;
+ struct perf_event_attr attr;
+};
+
+static int oprofile_perf_enabled;
+static DEFINE_MUTEX(oprofile_perf_mutex);
+
+static struct op_counter_config *counter_config;
+static struct perf_event **perf_events[nr_cpumask_bits];
+static int num_counters;
+
+/*
+ * Overflow callback for oprofile.
+ */
+static void op_overflow_handler(struct perf_event *event, int unused,
+ struct perf_sample_data *data, struct pt_regs *regs)
+{
+ int id;
+ u32 cpu = smp_processor_id();
+
+ for (id = 0; id < num_counters; ++id)
+ if (perf_events[cpu][id] == event)
+ break;
+
+ if (id != num_counters)
+ oprofile_add_sample(regs, id);
+ else
+ pr_warning("oprofile: ignoring spurious overflow "
+ "on cpu %u\n", cpu);
+}
+
+/*
+ * Called by oprofile_perf_setup to create perf attributes to mirror the oprofile
+ * settings in counter_config. Attributes are created as `pinned' events and
+ * so are permanently scheduled on the PMU.
+ */
+static void op_perf_setup(void)
+{
+ int i;
+ u32 size = sizeof(struct perf_event_attr);
+ struct perf_event_attr *attr;
+
+ for (i = 0; i < num_counters; ++i) {
+ attr = &counter_config[i].attr;
+ memset(attr, 0, size);
+ attr->type = PERF_TYPE_RAW;
+ attr->size = size;
+ attr->config = counter_config[i].event;
+ attr->sample_period = counter_config[i].count;
+ attr->pinned = 1;
+ }
+}
+
+static int op_create_counter(int cpu, int event)
+{
+ struct perf_event *pevent;
+
+ if (!counter_config[event].enabled || perf_events[cpu][event])
+ return 0;
+
+ pevent = perf_event_create_kernel_counter(&counter_config[event].attr,
+ cpu, NULL,
+ op_overflow_handler);
+
+ if (IS_ERR(pevent))
+ return PTR_ERR(pevent);
+
+ if (pevent->state != PERF_EVENT_STATE_ACTIVE) {
+ perf_event_release_kernel(pevent);
+ pr_warning("oprofile: failed to enable event %d "
+ "on CPU %d\n", event, cpu);
+ return -EBUSY;
+ }
+
+ perf_events[cpu][event] = pevent;
+
+ return 0;
+}
+
+static void op_destroy_counter(int cpu, int event)
+{
+ struct perf_event *pevent = perf_events[cpu][event];
+
+ if (pevent) {
+ perf_event_release_kernel(pevent);
+ perf_events[cpu][event] = NULL;
+ }
+}
+
+/*
+ * Called by oprofile_perf_start to create active perf events based on the
+ * perviously configured attributes.
+ */
+static int op_perf_start(void)
+{
+ int cpu, event, ret = 0;
+
+ for_each_online_cpu(cpu) {
+ for (event = 0; event < num_counters; ++event) {
+ ret = op_create_counter(cpu, event);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+/*
+ * Called by oprofile_perf_stop at the end of a profiling run.
+ */
+static void op_perf_stop(void)
+{
+ int cpu, event;
+
+ for_each_online_cpu(cpu)
+ for (event = 0; event < num_counters; ++event)
+ op_destroy_counter(cpu, event);
+}
+
+static int oprofile_perf_create_files(struct super_block *sb, struct dentry *root)
+{
+ unsigned int i;
+
+ for (i = 0; i < num_counters; i++) {
+ struct dentry *dir;
+ char buf[4];
+
+ snprintf(buf, sizeof buf, "%d", i);
+ dir = oprofilefs_mkdir(sb, root, buf);
+ oprofilefs_create_ulong(sb, dir, "enabled", &counter_config[i].enabled);
+ oprofilefs_create_ulong(sb, dir, "event", &counter_config[i].event);
+ oprofilefs_create_ulong(sb, dir, "count", &counter_config[i].count);
+ oprofilefs_create_ulong(sb, dir, "unit_mask", &counter_config[i].unit_mask);
+ oprofilefs_create_ulong(sb, dir, "kernel", &counter_config[i].kernel);
+ oprofilefs_create_ulong(sb, dir, "user", &counter_config[i].user);
+ }
+
+ return 0;
+}
+
+static int oprofile_perf_setup(void)
+{
+ spin_lock(&oprofilefs_lock);
+ op_perf_setup();
+ spin_unlock(&oprofilefs_lock);
+ return 0;
+}
+
+static int oprofile_perf_start(void)
+{
+ int ret = -EBUSY;
+
+ mutex_lock(&oprofile_perf_mutex);
+ if (!oprofile_perf_enabled) {
+ ret = 0;
+ op_perf_start();
+ oprofile_perf_enabled = 1;
+ }
+ mutex_unlock(&oprofile_perf_mutex);
+ return ret;
+}
+
+static void oprofile_perf_stop(void)
+{
+ mutex_lock(&oprofile_perf_mutex);
+ if (oprofile_perf_enabled)
+ op_perf_stop();
+ oprofile_perf_enabled = 0;
+ mutex_unlock(&oprofile_perf_mutex);
+}
+
+#ifdef CONFIG_PM
+
+static int oprofile_perf_suspend(struct platform_device *dev, pm_message_t state)
+{
+ mutex_lock(&oprofile_perf_mutex);
+ if (oprofile_perf_enabled)
+ op_perf_stop();
+ mutex_unlock(&oprofile_perf_mutex);
+ return 0;
+}
+
+static int oprofile_perf_resume(struct platform_device *dev)
+{
+ mutex_lock(&oprofile_perf_mutex);
+ if (oprofile_perf_enabled && op_perf_start())
+ oprofile_perf_enabled = 0;
+ mutex_unlock(&oprofile_perf_mutex);
+ return 0;
+}
+
+static struct platform_driver oprofile_driver = {
+ .driver = {
+ .name = "oprofile-perf",
+ },
+ .resume = oprofile_perf_resume,
+ .suspend = oprofile_perf_suspend,
+};
+
+static struct platform_device *oprofile_pdev;
+
+static int __init init_driverfs(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&oprofile_driver);
+ if (ret)
+ return ret;
+
+ oprofile_pdev = platform_device_register_simple(
+ oprofile_driver.driver.name, 0, NULL, 0);
+ if (IS_ERR(oprofile_pdev)) {
+ ret = PTR_ERR(oprofile_pdev);
+ platform_driver_unregister(&oprofile_driver);
+ }
+
+ return ret;
+}
+
+static void exit_driverfs(void)
+{
+ platform_device_unregister(oprofile_pdev);
+ platform_driver_unregister(&oprofile_driver);
+}
+
+#else
+
+static inline int init_driverfs(void) { return 0; }
+static inline void exit_driverfs(void) { }
+
+#endif /* CONFIG_PM */
+
+void oprofile_perf_exit(void)
+{
+ int cpu, id;
+ struct perf_event *event;
+
+ for_each_possible_cpu(cpu) {
+ for (id = 0; id < num_counters; ++id) {
+ event = perf_events[cpu][id];
+ if (event)
+ perf_event_release_kernel(event);
+ }
+
+ kfree(perf_events[cpu]);
+ }
+
+ kfree(counter_config);
+ exit_driverfs();
+}
+
+int __init oprofile_perf_init(struct oprofile_operations *ops)
+{
+ int cpu, ret = 0;
+
+ ret = init_driverfs();
+ if (ret)
+ return ret;
+
+ memset(&perf_events, 0, sizeof(perf_events));
+
+ num_counters = perf_num_counters();
+ if (num_counters <= 0) {
+ pr_info("oprofile: no performance counters\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
+ counter_config = kcalloc(num_counters,
+ sizeof(struct op_counter_config), GFP_KERNEL);
+
+ if (!counter_config) {
+ pr_info("oprofile: failed to allocate %d "
+ "counters\n", num_counters);
+ ret = -ENOMEM;
+ num_counters = 0;
+ goto out;
+ }
+
+ for_each_possible_cpu(cpu) {
+ perf_events[cpu] = kcalloc(num_counters,
+ sizeof(struct perf_event *), GFP_KERNEL);
+ if (!perf_events[cpu]) {
+ pr_info("oprofile: failed to allocate %d perf events "
+ "for cpu %d\n", num_counters, cpu);
+ ret = -ENOMEM;
+ goto out;
+ }
+ }
+
+ ops->create_files = oprofile_perf_create_files;
+ ops->setup = oprofile_perf_setup;
+ ops->start = oprofile_perf_start;
+ ops->stop = oprofile_perf_stop;
+ ops->shutdown = oprofile_perf_stop;
+ ops->cpu_type = op_name_from_perf_id();
+
+ if (!ops->cpu_type)
+ ret = -ENODEV;
+ else
+ pr_info("oprofile: using %s\n", ops->cpu_type);
+
+out:
+ if (ret)
+ oprofile_perf_exit();
+
+ return ret;
+}
diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
index 2766a6d..1944621 100644
--- a/drivers/oprofile/oprofilefs.c
+++ b/drivers/oprofile/oprofilefs.c
@@ -91,16 +91,20 @@
static ssize_t ulong_write_file(struct file *file, char const __user *buf, size_t count, loff_t *offset)
{
- unsigned long *value = file->private_data;
+ unsigned long value;
int retval;
if (*offset)
return -EINVAL;
- retval = oprofilefs_ulong_from_user(value, buf, count);
-
+ retval = oprofilefs_ulong_from_user(&value, buf, count);
if (retval)
return retval;
+
+ retval = oprofile_set_ulong(file->private_data, value);
+ if (retval)
+ return retval;
+
return count;
}
@@ -126,50 +130,41 @@
};
-static struct dentry *__oprofilefs_create_file(struct super_block *sb,
+static int __oprofilefs_create_file(struct super_block *sb,
struct dentry *root, char const *name, const struct file_operations *fops,
- int perm)
+ int perm, void *priv)
{
struct dentry *dentry;
struct inode *inode;
dentry = d_alloc_name(root, name);
if (!dentry)
- return NULL;
+ return -ENOMEM;
inode = oprofilefs_get_inode(sb, S_IFREG | perm);
if (!inode) {
dput(dentry);
- return NULL;
+ return -ENOMEM;
}
inode->i_fop = fops;
d_add(dentry, inode);
- return dentry;
+ dentry->d_inode->i_private = priv;
+ return 0;
}
int oprofilefs_create_ulong(struct super_block *sb, struct dentry *root,
char const *name, unsigned long *val)
{
- struct dentry *d = __oprofilefs_create_file(sb, root, name,
- &ulong_fops, 0644);
- if (!d)
- return -EFAULT;
-
- d->d_inode->i_private = val;
- return 0;
+ return __oprofilefs_create_file(sb, root, name,
+ &ulong_fops, 0644, val);
}
int oprofilefs_create_ro_ulong(struct super_block *sb, struct dentry *root,
char const *name, unsigned long *val)
{
- struct dentry *d = __oprofilefs_create_file(sb, root, name,
- &ulong_ro_fops, 0444);
- if (!d)
- return -EFAULT;
-
- d->d_inode->i_private = val;
- return 0;
+ return __oprofilefs_create_file(sb, root, name,
+ &ulong_ro_fops, 0444, val);
}
@@ -189,31 +184,22 @@
int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
char const *name, atomic_t *val)
{
- struct dentry *d = __oprofilefs_create_file(sb, root, name,
- &atomic_ro_fops, 0444);
- if (!d)
- return -EFAULT;
-
- d->d_inode->i_private = val;
- return 0;
+ return __oprofilefs_create_file(sb, root, name,
+ &atomic_ro_fops, 0444, val);
}
int oprofilefs_create_file(struct super_block *sb, struct dentry *root,
char const *name, const struct file_operations *fops)
{
- if (!__oprofilefs_create_file(sb, root, name, fops, 0644))
- return -EFAULT;
- return 0;
+ return __oprofilefs_create_file(sb, root, name, fops, 0644, NULL);
}
int oprofilefs_create_file_perm(struct super_block *sb, struct dentry *root,
char const *name, const struct file_operations *fops, int perm)
{
- if (!__oprofilefs_create_file(sb, root, name, fops, perm))
- return -EFAULT;
- return 0;
+ return __oprofilefs_create_file(sb, root, name, fops, perm, NULL);
}
diff --git a/drivers/parport/share.c b/drivers/parport/share.c
index dffa5d4..a2d9d1e 100644
--- a/drivers/parport/share.c
+++ b/drivers/parport/share.c
@@ -306,7 +306,7 @@
spin_lock_init(&tmp->pardevice_lock);
tmp->ieee1284.mode = IEEE1284_MODE_COMPAT;
tmp->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
- init_MUTEX_LOCKED (&tmp->ieee1284.irq); /* actually a semaphore at 0 */
+ sema_init(&tmp->ieee1284.irq, 0);
tmp->spintime = parport_default_spintime;
atomic_set (&tmp->ref_count, 1);
INIT_LIST_HEAD(&tmp->full_list);
diff --git a/drivers/pci/hotplug/acpi_pcihp.c b/drivers/pci/hotplug/acpi_pcihp.c
index 45fcc1e..3bc72d1 100644
--- a/drivers/pci/hotplug/acpi_pcihp.c
+++ b/drivers/pci/hotplug/acpi_pcihp.c
@@ -338,9 +338,7 @@
acpi_handle chandle, handle;
struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL };
- flags &= (OSC_PCI_EXPRESS_NATIVE_HP_CONTROL |
- OSC_SHPC_NATIVE_HP_CONTROL |
- OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL);
+ flags &= OSC_SHPC_NATIVE_HP_CONTROL;
if (!flags) {
err("Invalid flags %u specified!\n", flags);
return -EINVAL;
@@ -360,7 +358,7 @@
acpi_get_name(handle, ACPI_FULL_PATHNAME, &string);
dbg("Trying to get hotplug control for %s\n",
(char *)string.pointer);
- status = acpi_pci_osc_control_set(handle, flags);
+ status = acpi_pci_osc_control_set(handle, &flags, flags);
if (ACPI_SUCCESS(status))
goto got_one;
if (status == AE_SUPPORT)
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index 4ed76b4..73d5139 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -176,19 +176,11 @@
{
pciehp_acpi_slot_detection_init();
}
-
-static inline int pciehp_get_hp_hw_control_from_firmware(struct pci_dev *dev)
-{
- int retval;
- u32 flags = (OSC_PCI_EXPRESS_NATIVE_HP_CONTROL |
- OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL);
- retval = acpi_get_hp_hw_control_from_firmware(dev, flags);
- if (retval)
- return retval;
- return pciehp_acpi_slot_detection_check(dev);
-}
#else
#define pciehp_firmware_init() do {} while (0)
-#define pciehp_get_hp_hw_control_from_firmware(dev) 0
+static inline int pciehp_acpi_slot_detection_check(struct pci_dev *dev)
+{
+ return 0;
+}
#endif /* CONFIG_ACPI */
#endif /* _PCIEHP_H */
diff --git a/drivers/pci/hotplug/pciehp_acpi.c b/drivers/pci/hotplug/pciehp_acpi.c
index 1f4000a..2574700 100644
--- a/drivers/pci/hotplug/pciehp_acpi.c
+++ b/drivers/pci/hotplug/pciehp_acpi.c
@@ -85,9 +85,7 @@
acpi_handle handle;
struct dummy_slot *slot, *tmp;
struct pci_dev *pdev = dev->port;
- /* Note: pciehp_detect_mode != PCIEHP_DETECT_ACPI here */
- if (pciehp_get_hp_hw_control_from_firmware(pdev))
- return -ENODEV;
+
pos = pci_pcie_cap(pdev);
if (!pos)
return -ENODEV;
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index 3588ea6..aa5f3ff 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -59,7 +59,7 @@
MODULE_PARM_DESC(pciehp_debug, "Debugging mode enabled or not");
MODULE_PARM_DESC(pciehp_poll_mode, "Using polling mechanism for hot-plug events or not");
MODULE_PARM_DESC(pciehp_poll_time, "Polling mechanism frequency, in seconds");
-MODULE_PARM_DESC(pciehp_force, "Force pciehp, even if _OSC and OSHP are missing");
+MODULE_PARM_DESC(pciehp_force, "Force pciehp, even if OSHP is missing");
#define PCIE_MODULE_NAME "pciehp"
@@ -235,7 +235,7 @@
dev_info(&dev->device,
"Bypassing BIOS check for pciehp use on %s\n",
pci_name(dev->port));
- else if (pciehp_get_hp_hw_control_from_firmware(dev->port))
+ else if (pciehp_acpi_slot_detection_check(dev->port))
goto err_out_none;
ctrl = pcie_init(dev);
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index c3ceebb..4789f8e 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -71,6 +71,49 @@
#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
+/* page table handling */
+#define LEVEL_STRIDE (9)
+#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
+
+static inline int agaw_to_level(int agaw)
+{
+ return agaw + 2;
+}
+
+static inline int agaw_to_width(int agaw)
+{
+ return 30 + agaw * LEVEL_STRIDE;
+}
+
+static inline int width_to_agaw(int width)
+{
+ return (width - 30) / LEVEL_STRIDE;
+}
+
+static inline unsigned int level_to_offset_bits(int level)
+{
+ return (level - 1) * LEVEL_STRIDE;
+}
+
+static inline int pfn_level_offset(unsigned long pfn, int level)
+{
+ return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
+}
+
+static inline unsigned long level_mask(int level)
+{
+ return -1UL << level_to_offset_bits(level);
+}
+
+static inline unsigned long level_size(int level)
+{
+ return 1UL << level_to_offset_bits(level);
+}
+
+static inline unsigned long align_to_level(unsigned long pfn, int level)
+{
+ return (pfn + level_size(level) - 1) & level_mask(level);
+}
/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
are never going to work. */
@@ -434,8 +477,6 @@
}
-static inline int width_to_agaw(int width);
-
static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
{
unsigned long sagaw;
@@ -646,51 +687,6 @@
spin_unlock_irqrestore(&iommu->lock, flags);
}
-/* page table handling */
-#define LEVEL_STRIDE (9)
-#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
-
-static inline int agaw_to_level(int agaw)
-{
- return agaw + 2;
-}
-
-static inline int agaw_to_width(int agaw)
-{
- return 30 + agaw * LEVEL_STRIDE;
-
-}
-
-static inline int width_to_agaw(int width)
-{
- return (width - 30) / LEVEL_STRIDE;
-}
-
-static inline unsigned int level_to_offset_bits(int level)
-{
- return (level - 1) * LEVEL_STRIDE;
-}
-
-static inline int pfn_level_offset(unsigned long pfn, int level)
-{
- return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
-}
-
-static inline unsigned long level_mask(int level)
-{
- return -1UL << level_to_offset_bits(level);
-}
-
-static inline unsigned long level_size(int level)
-{
- return 1UL << level_to_offset_bits(level);
-}
-
-static inline unsigned long align_to_level(unsigned long pfn, int level)
-{
- return (pfn + level_size(level) - 1) & level_mask(level);
-}
-
static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
unsigned long pfn)
{
@@ -3761,6 +3757,33 @@
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
+#define GGC 0x52
+#define GGC_MEMORY_SIZE_MASK (0xf << 8)
+#define GGC_MEMORY_SIZE_NONE (0x0 << 8)
+#define GGC_MEMORY_SIZE_1M (0x1 << 8)
+#define GGC_MEMORY_SIZE_2M (0x3 << 8)
+#define GGC_MEMORY_VT_ENABLED (0x8 << 8)
+#define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
+#define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
+#define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
+
+static void __devinit quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
+{
+ unsigned short ggc;
+
+ if (pci_read_config_word(dev, GGC, &ggc))
+ return;
+
+ if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
+ printk(KERN_INFO "DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
+ dmar_map_gfx = 0;
+ }
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
+
/* On Tylersburg chipsets, some BIOSes have been known to enable the
ISOCH DMAR unit for the Azalia sound device, but not give it any
TLB entries, which causes it to deadlock. Check for that. We do
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index ce6a366..553d8ee 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -608,7 +608,7 @@
* the VF BAR size multiplied by the number of VFs. The alignment
* is just the VF BAR size.
*/
-int pci_sriov_resource_alignment(struct pci_dev *dev, int resno)
+resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, int resno)
{
struct resource tmp;
enum pci_bar_type type;
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 679c39d..6beb11b 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -140,8 +140,10 @@
#ifdef CONFIG_PCIEAER
void pci_no_aer(void);
+bool pci_aer_available(void);
#else
static inline void pci_no_aer(void) { }
+static inline bool pci_aer_available(void) { return false; }
#endif
static inline int pci_no_d1d2(struct pci_dev *dev)
@@ -262,7 +264,8 @@
extern void pci_iov_release(struct pci_dev *dev);
extern int pci_iov_resource_bar(struct pci_dev *dev, int resno,
enum pci_bar_type *type);
-extern int pci_sriov_resource_alignment(struct pci_dev *dev, int resno);
+extern resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev,
+ int resno);
extern void pci_restore_iov_state(struct pci_dev *dev);
extern int pci_iov_bus_range(struct pci_bus *bus);
@@ -318,7 +321,7 @@
}
#endif /* CONFIG_PCI_IOV */
-static inline int pci_resource_alignment(struct pci_dev *dev,
+static inline resource_size_t pci_resource_alignment(struct pci_dev *dev,
struct resource *res)
{
#ifdef CONFIG_PCI_IOV
diff --git a/drivers/pci/pcie/Makefile b/drivers/pci/pcie/Makefile
index ea65454..00c62df 100644
--- a/drivers/pci/pcie/Makefile
+++ b/drivers/pci/pcie/Makefile
@@ -6,10 +6,11 @@
obj-$(CONFIG_PCIEASPM) += aspm.o
pcieportdrv-y := portdrv_core.o portdrv_pci.o portdrv_bus.o
+pcieportdrv-$(CONFIG_ACPI) += portdrv_acpi.o
obj-$(CONFIG_PCIEPORTBUS) += pcieportdrv.o
# Build PCI Express AER if needed
obj-$(CONFIG_PCIEAER) += aer/
-obj-$(CONFIG_PCIE_PME) += pme/
+obj-$(CONFIG_PCIE_PME) += pme.o
diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c
index 484cc55..f409948 100644
--- a/drivers/pci/pcie/aer/aerdrv.c
+++ b/drivers/pci/pcie/aer/aerdrv.c
@@ -72,6 +72,11 @@
pcie_aer_disable = 1; /* has priority over 'forceload' */
}
+bool pci_aer_available(void)
+{
+ return !pcie_aer_disable && pci_msi_enabled();
+}
+
static int set_device_error_reporting(struct pci_dev *dev, void *data)
{
bool enable = *((bool *)data);
@@ -411,9 +416,7 @@
*/
static int __init aer_service_init(void)
{
- if (pcie_aer_disable)
- return -ENXIO;
- if (!pci_msi_enabled())
+ if (!pci_aer_available())
return -ENXIO;
return pcie_port_service_register(&aerdriver);
}
diff --git a/drivers/pci/pcie/aer/aerdrv_acpi.c b/drivers/pci/pcie/aer/aerdrv_acpi.c
index f278d7b..2bb9b89 100644
--- a/drivers/pci/pcie/aer/aerdrv_acpi.c
+++ b/drivers/pci/pcie/aer/aerdrv_acpi.c
@@ -19,42 +19,6 @@
#include <acpi/apei.h>
#include "aerdrv.h"
-/**
- * aer_osc_setup - run ACPI _OSC method
- * @pciedev: pcie_device which AER is being enabled on
- *
- * @return: Zero on success. Nonzero otherwise.
- *
- * Invoked when PCIe bus loads AER service driver. To avoid conflict with
- * BIOS AER support requires BIOS to yield AER control to OS native driver.
- **/
-int aer_osc_setup(struct pcie_device *pciedev)
-{
- acpi_status status = AE_NOT_FOUND;
- struct pci_dev *pdev = pciedev->port;
- acpi_handle handle = NULL;
-
- if (acpi_pci_disabled)
- return -1;
-
- handle = acpi_find_root_bridge_handle(pdev);
- if (handle) {
- status = acpi_pci_osc_control_set(handle,
- OSC_PCI_EXPRESS_AER_CONTROL |
- OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL);
- }
-
- if (ACPI_FAILURE(status)) {
- dev_printk(KERN_DEBUG, &pciedev->device, "AER service couldn't "
- "init device: %s\n",
- (status == AE_SUPPORT || status == AE_NOT_FOUND) ?
- "no _OSC support" : "_OSC failed");
- return -1;
- }
-
- return 0;
-}
-
#ifdef CONFIG_ACPI_APEI
static inline int hest_match_pci(struct acpi_hest_aer_common *p,
struct pci_dev *pci)
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
index fc0b5a9..29e268f 100644
--- a/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -772,22 +772,10 @@
*/
int aer_init(struct pcie_device *dev)
{
- if (pcie_aer_get_firmware_first(dev->port)) {
- dev_printk(KERN_DEBUG, &dev->device,
- "PCIe errors handled by platform firmware.\n");
- goto out;
- }
-
- if (aer_osc_setup(dev))
- goto out;
-
- return 0;
-out:
if (forceload) {
dev_printk(KERN_DEBUG, &dev->device,
"aerdrv forceload requested.\n");
pcie_aer_force_firmware_first(dev->port, 0);
- return 0;
}
- return -ENXIO;
+ return 0;
}
diff --git a/drivers/pci/pcie/pme/pcie_pme.c b/drivers/pci/pcie/pme.c
similarity index 82%
rename from drivers/pci/pcie/pme/pcie_pme.c
rename to drivers/pci/pcie/pme.c
index bbdea18..2f3c904 100644
--- a/drivers/pci/pcie/pme/pcie_pme.c
+++ b/drivers/pci/pcie/pme.c
@@ -23,38 +23,13 @@
#include <linux/pci-acpi.h>
#include <linux/pm_runtime.h>
-#include "../../pci.h"
-#include "pcie_pme.h"
+#include "../pci.h"
+#include "portdrv.h"
#define PCI_EXP_RTSTA_PME 0x10000 /* PME status */
#define PCI_EXP_RTSTA_PENDING 0x20000 /* PME pending */
/*
- * If set, this switch will prevent the PCIe root port PME service driver from
- * being registered. Consequently, the interrupt-based PCIe PME signaling will
- * not be used by any PCIe root ports in that case.
- */
-static bool pcie_pme_disabled = true;
-
-/*
- * The PCI Express Base Specification 2.0, Section 6.1.8, states the following:
- * "In order to maintain compatibility with non-PCI Express-aware system
- * software, system power management logic must be configured by firmware to use
- * the legacy mechanism of signaling PME by default. PCI Express-aware system
- * software must notify the firmware prior to enabling native, interrupt-based
- * PME signaling." However, if the platform doesn't provide us with a suitable
- * notification mechanism or the notification fails, it is not clear whether or
- * not we are supposed to use the interrupt-based PCIe PME signaling. The
- * switch below can be used to indicate the desired behaviour. When set, it
- * will make the kernel use the interrupt-based PCIe PME signaling regardless of
- * the platform notification status, although the kernel will attempt to notify
- * the platform anyway. When unset, it will prevent the kernel from using the
- * the interrupt-based PCIe PME signaling if the platform notification fails,
- * which is the default.
- */
-static bool pcie_pme_force_enable;
-
-/*
* If this switch is set, MSI will not be used for PCIe PME signaling. This
* causes the PCIe port driver to use INTx interrupts only, but it turns out
* that using MSI for PCIe PME signaling doesn't play well with PCIe PME-based
@@ -64,38 +39,13 @@
static int __init pcie_pme_setup(char *str)
{
- if (!strncmp(str, "auto", 4))
- pcie_pme_disabled = false;
- else if (!strncmp(str, "force", 5))
- pcie_pme_force_enable = true;
-
- str = strchr(str, ',');
- if (str) {
- str++;
- str += strspn(str, " \t");
- if (*str && !strcmp(str, "nomsi"))
- pcie_pme_msi_disabled = true;
- }
+ if (!strncmp(str, "nomsi", 5))
+ pcie_pme_msi_disabled = true;
return 1;
}
__setup("pcie_pme=", pcie_pme_setup);
-/**
- * pcie_pme_platform_setup - Ensure that the kernel controls the PCIe PME.
- * @srv: PCIe PME root port service to use for carrying out the check.
- *
- * Notify the platform that the native PCIe PME is going to be used and return
- * 'true' if the control of the PCIe PME registers has been acquired from the
- * platform.
- */
-static bool pcie_pme_platform_setup(struct pcie_device *srv)
-{
- if (!pcie_pme_platform_notify(srv))
- return true;
- return pcie_pme_force_enable;
-}
-
struct pcie_pme_service_data {
spinlock_t lock;
struct pcie_device *srv;
@@ -108,7 +58,7 @@
* @dev: PCIe root port or event collector.
* @enable: Enable or disable the interrupt.
*/
-static void pcie_pme_interrupt_enable(struct pci_dev *dev, bool enable)
+void pcie_pme_interrupt_enable(struct pci_dev *dev, bool enable)
{
int rtctl_pos;
u16 rtctl;
@@ -417,9 +367,6 @@
struct pcie_pme_service_data *data;
int ret;
- if (!pcie_pme_platform_setup(srv))
- return -EACCES;
-
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
@@ -509,8 +456,7 @@
*/
static int __init pcie_pme_service_init(void)
{
- return pcie_pme_disabled ?
- -ENODEV : pcie_port_service_register(&pcie_pme_driver);
+ return pcie_port_service_register(&pcie_pme_driver);
}
module_init(pcie_pme_service_init);
diff --git a/drivers/pci/pcie/pme/Makefile b/drivers/pci/pcie/pme/Makefile
deleted file mode 100644
index 8b92380..0000000
--- a/drivers/pci/pcie/pme/Makefile
+++ /dev/null
@@ -1,8 +0,0 @@
-#
-# Makefile for PCI-Express Root Port PME signaling driver
-#
-
-obj-$(CONFIG_PCIE_PME) += pmedriver.o
-
-pmedriver-objs := pcie_pme.o
-pmedriver-$(CONFIG_ACPI) += pcie_pme_acpi.o
diff --git a/drivers/pci/pcie/pme/pcie_pme.h b/drivers/pci/pcie/pme/pcie_pme.h
deleted file mode 100644
index b30d2b7..0000000
--- a/drivers/pci/pcie/pme/pcie_pme.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * drivers/pci/pcie/pme/pcie_pme.h
- *
- * PCI Express Root Port PME signaling support
- *
- * Copyright (C) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
- */
-
-#ifndef _PCIE_PME_H_
-#define _PCIE_PME_H_
-
-struct pcie_device;
-
-#ifdef CONFIG_ACPI
-extern int pcie_pme_acpi_setup(struct pcie_device *srv);
-
-static inline int pcie_pme_platform_notify(struct pcie_device *srv)
-{
- return pcie_pme_acpi_setup(srv);
-}
-#else /* !CONFIG_ACPI */
-static inline int pcie_pme_platform_notify(struct pcie_device *srv)
-{
- return 0;
-}
-#endif /* !CONFIG_ACPI */
-
-#endif
diff --git a/drivers/pci/pcie/pme/pcie_pme_acpi.c b/drivers/pci/pcie/pme/pcie_pme_acpi.c
deleted file mode 100644
index 83ab228..0000000
--- a/drivers/pci/pcie/pme/pcie_pme_acpi.c
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * PCIe Native PME support, ACPI-related part
- *
- * Copyright (C) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License V2. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-
-#include <linux/pci.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/acpi.h>
-#include <linux/pci-acpi.h>
-#include <linux/pcieport_if.h>
-
-/**
- * pcie_pme_acpi_setup - Request the ACPI BIOS to release control over PCIe PME.
- * @srv - PCIe PME service for a root port or event collector.
- *
- * Invoked when the PCIe bus type loads PCIe PME service driver. To avoid
- * conflict with the BIOS PCIe support requires the BIOS to yield PCIe PME
- * control to the kernel.
- */
-int pcie_pme_acpi_setup(struct pcie_device *srv)
-{
- acpi_status status = AE_NOT_FOUND;
- struct pci_dev *port = srv->port;
- acpi_handle handle;
- int error = 0;
-
- if (acpi_pci_disabled)
- return -ENOSYS;
-
- dev_info(&port->dev, "Requesting control of PCIe PME from ACPI BIOS\n");
-
- handle = acpi_find_root_bridge_handle(port);
- if (!handle)
- return -EINVAL;
-
- status = acpi_pci_osc_control_set(handle,
- OSC_PCI_EXPRESS_PME_CONTROL |
- OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL);
- if (ACPI_FAILURE(status)) {
- dev_info(&port->dev,
- "Failed to receive control of PCIe PME service: %s\n",
- (status == AE_SUPPORT || status == AE_NOT_FOUND) ?
- "no _OSC support" : "ACPI _OSC failed");
- error = -ENODEV;
- }
-
- return error;
-}
diff --git a/drivers/pci/pcie/portdrv.h b/drivers/pci/pcie/portdrv.h
index 813a5c3..7b5aba0 100644
--- a/drivers/pci/pcie/portdrv.h
+++ b/drivers/pci/pcie/portdrv.h
@@ -20,6 +20,9 @@
#define get_descriptor_id(type, service) (((type - 4) << 4) | service)
+extern bool pcie_ports_disabled;
+extern bool pcie_ports_auto;
+
extern struct bus_type pcie_port_bus_type;
extern int pcie_port_device_register(struct pci_dev *dev);
#ifdef CONFIG_PM
@@ -30,6 +33,8 @@
extern int __must_check pcie_port_bus_register(void);
extern void pcie_port_bus_unregister(void);
+struct pci_dev;
+
#ifdef CONFIG_PCIE_PME
extern bool pcie_pme_msi_disabled;
@@ -42,9 +47,26 @@
{
return pcie_pme_msi_disabled;
}
+
+extern void pcie_pme_interrupt_enable(struct pci_dev *dev, bool enable);
#else /* !CONFIG_PCIE_PME */
static inline void pcie_pme_disable_msi(void) {}
static inline bool pcie_pme_no_msi(void) { return false; }
+static inline void pcie_pme_interrupt_enable(struct pci_dev *dev, bool en) {}
#endif /* !CONFIG_PCIE_PME */
+#ifdef CONFIG_ACPI
+extern int pcie_port_acpi_setup(struct pci_dev *port, int *mask);
+
+static inline int pcie_port_platform_notify(struct pci_dev *port, int *mask)
+{
+ return pcie_port_acpi_setup(port, mask);
+}
+#else /* !CONFIG_ACPI */
+static inline int pcie_port_platform_notify(struct pci_dev *port, int *mask)
+{
+ return 0;
+}
+#endif /* !CONFIG_ACPI */
+
#endif /* _PORTDRV_H_ */
diff --git a/drivers/pci/pcie/portdrv_acpi.c b/drivers/pci/pcie/portdrv_acpi.c
new file mode 100644
index 0000000..b7c4cb1
--- /dev/null
+++ b/drivers/pci/pcie/portdrv_acpi.c
@@ -0,0 +1,77 @@
+/*
+ * PCIe Port Native Services Support, ACPI-Related Part
+ *
+ * Copyright (C) 2010 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License V2. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/acpi.h>
+#include <linux/pci-acpi.h>
+#include <linux/pcieport_if.h>
+
+#include "aer/aerdrv.h"
+#include "../pci.h"
+
+/**
+ * pcie_port_acpi_setup - Request the BIOS to release control of PCIe services.
+ * @port: PCIe Port service for a root port or event collector.
+ * @srv_mask: Bit mask of services that can be enabled for @port.
+ *
+ * Invoked when @port is identified as a PCIe port device. To avoid conflicts
+ * with the BIOS PCIe port native services support requires the BIOS to yield
+ * control of these services to the kernel. The mask of services that the BIOS
+ * allows to be enabled for @port is written to @srv_mask.
+ *
+ * NOTE: It turns out that we cannot do that for individual port services
+ * separately, because that would make some systems work incorrectly.
+ */
+int pcie_port_acpi_setup(struct pci_dev *port, int *srv_mask)
+{
+ acpi_status status;
+ acpi_handle handle;
+ u32 flags;
+
+ if (acpi_pci_disabled)
+ return 0;
+
+ handle = acpi_find_root_bridge_handle(port);
+ if (!handle)
+ return -EINVAL;
+
+ flags = OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL
+ | OSC_PCI_EXPRESS_NATIVE_HP_CONTROL
+ | OSC_PCI_EXPRESS_PME_CONTROL;
+
+ if (pci_aer_available()) {
+ if (pcie_aer_get_firmware_first(port))
+ dev_dbg(&port->dev, "PCIe errors handled by BIOS.\n");
+ else
+ flags |= OSC_PCI_EXPRESS_AER_CONTROL;
+ }
+
+ status = acpi_pci_osc_control_set(handle, &flags,
+ OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL);
+ if (ACPI_FAILURE(status)) {
+ dev_dbg(&port->dev, "ACPI _OSC request failed (code %d)\n",
+ status);
+ return -ENODEV;
+ }
+
+ dev_info(&port->dev, "ACPI _OSC control granted for 0x%02x\n", flags);
+
+ *srv_mask = PCIE_PORT_SERVICE_VC;
+ if (flags & OSC_PCI_EXPRESS_NATIVE_HP_CONTROL)
+ *srv_mask |= PCIE_PORT_SERVICE_HP;
+ if (flags & OSC_PCI_EXPRESS_PME_CONTROL)
+ *srv_mask |= PCIE_PORT_SERVICE_PME;
+ if (flags & OSC_PCI_EXPRESS_AER_CONTROL)
+ *srv_mask |= PCIE_PORT_SERVICE_AER;
+
+ return 0;
+}
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index e73effb..a9c222d 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -14,6 +14,8 @@
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/pcieport_if.h>
+#include <linux/aer.h>
+#include <linux/pci-aspm.h>
#include "../pci.h"
#include "portdrv.h"
@@ -236,24 +238,64 @@
int services = 0, pos;
u16 reg16;
u32 reg32;
+ int cap_mask;
+ int err;
+
+ err = pcie_port_platform_notify(dev, &cap_mask);
+ if (pcie_ports_auto) {
+ if (err) {
+ pcie_no_aspm();
+ return 0;
+ }
+ } else {
+ cap_mask = PCIE_PORT_SERVICE_PME | PCIE_PORT_SERVICE_HP
+ | PCIE_PORT_SERVICE_VC;
+ if (pci_aer_available())
+ cap_mask |= PCIE_PORT_SERVICE_AER;
+ }
pos = pci_pcie_cap(dev);
pci_read_config_word(dev, pos + PCI_EXP_FLAGS, ®16);
/* Hot-Plug Capable */
- if (reg16 & PCI_EXP_FLAGS_SLOT) {
+ if ((cap_mask & PCIE_PORT_SERVICE_HP) && (reg16 & PCI_EXP_FLAGS_SLOT)) {
pci_read_config_dword(dev, pos + PCI_EXP_SLTCAP, ®32);
- if (reg32 & PCI_EXP_SLTCAP_HPC)
+ if (reg32 & PCI_EXP_SLTCAP_HPC) {
services |= PCIE_PORT_SERVICE_HP;
+ /*
+ * Disable hot-plug interrupts in case they have been
+ * enabled by the BIOS and the hot-plug service driver
+ * is not loaded.
+ */
+ pos += PCI_EXP_SLTCTL;
+ pci_read_config_word(dev, pos, ®16);
+ reg16 &= ~(PCI_EXP_SLTCTL_CCIE | PCI_EXP_SLTCTL_HPIE);
+ pci_write_config_word(dev, pos, reg16);
+ }
}
/* AER capable */
- if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR))
+ if ((cap_mask & PCIE_PORT_SERVICE_AER)
+ && pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR)) {
services |= PCIE_PORT_SERVICE_AER;
+ /*
+ * Disable AER on this port in case it's been enabled by the
+ * BIOS (the AER service driver will enable it when necessary).
+ */
+ pci_disable_pcie_error_reporting(dev);
+ }
/* VC support */
if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_VC))
services |= PCIE_PORT_SERVICE_VC;
/* Root ports are capable of generating PME too */
- if (dev->pcie_type == PCI_EXP_TYPE_ROOT_PORT)
+ if ((cap_mask & PCIE_PORT_SERVICE_PME)
+ && dev->pcie_type == PCI_EXP_TYPE_ROOT_PORT) {
services |= PCIE_PORT_SERVICE_PME;
+ /*
+ * Disable PME interrupt on this port in case it's been enabled
+ * by the BIOS (the PME service driver will enable it when
+ * necessary).
+ */
+ pcie_pme_interrupt_enable(dev, false);
+ }
return services;
}
@@ -494,6 +536,9 @@
*/
int pcie_port_service_register(struct pcie_port_service_driver *new)
{
+ if (pcie_ports_disabled)
+ return -ENODEV;
+
new->driver.name = (char *)new->name;
new->driver.bus = &pcie_port_bus_type;
new->driver.probe = pcie_port_probe_service;
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
index 3debed2..f9033e1 100644
--- a/drivers/pci/pcie/portdrv_pci.c
+++ b/drivers/pci/pcie/portdrv_pci.c
@@ -15,6 +15,7 @@
#include <linux/pcieport_if.h>
#include <linux/aer.h>
#include <linux/dmi.h>
+#include <linux/pci-aspm.h>
#include "portdrv.h"
#include "aer/aerdrv.h"
@@ -29,6 +30,31 @@
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
+/* If this switch is set, PCIe port native services should not be enabled. */
+bool pcie_ports_disabled;
+
+/*
+ * If this switch is set, ACPI _OSC will be used to determine whether or not to
+ * enable PCIe port native services.
+ */
+bool pcie_ports_auto = true;
+
+static int __init pcie_port_setup(char *str)
+{
+ if (!strncmp(str, "compat", 6)) {
+ pcie_ports_disabled = true;
+ } else if (!strncmp(str, "native", 6)) {
+ pcie_ports_disabled = false;
+ pcie_ports_auto = false;
+ } else if (!strncmp(str, "auto", 4)) {
+ pcie_ports_disabled = false;
+ pcie_ports_auto = true;
+ }
+
+ return 1;
+}
+__setup("pcie_ports=", pcie_port_setup);
+
/* global data */
static int pcie_portdrv_restore_config(struct pci_dev *dev)
@@ -301,6 +327,11 @@
{
int retval;
+ if (pcie_ports_disabled) {
+ pcie_no_aspm();
+ return -EACCES;
+ }
+
dmi_check_system(pcie_portdrv_dmi_table);
retval = pcie_port_bus_register();
@@ -315,11 +346,4 @@
return retval;
}
-static void __exit pcie_portdrv_exit(void)
-{
- pci_unregister_driver(&pcie_portdriver);
- pcie_port_bus_unregister();
-}
-
module_init(pcie_portdrv_init);
-module_exit(pcie_portdrv_exit);
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 89ed181..857ae01 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -163,6 +163,26 @@
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_3, quirk_isa_dma_hangs);
/*
+ * Intel NM10 "TigerPoint" LPC PM1a_STS.BM_STS must be clear
+ * for some HT machines to use C4 w/o hanging.
+ */
+static void __devinit quirk_tigerpoint_bm_sts(struct pci_dev *dev)
+{
+ u32 pmbase;
+ u16 pm1a;
+
+ pci_read_config_dword(dev, 0x40, &pmbase);
+ pmbase = pmbase & 0xff80;
+ pm1a = inw(pmbase);
+
+ if (pm1a & 0x10) {
+ dev_info(&dev->dev, FW_BUG "TigerPoint LPC.BM_STS cleared\n");
+ outw(0x10, pmbase);
+ }
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TGP_LPC, quirk_tigerpoint_bm_sts);
+
+/*
* Chipsets where PCI->PCI transfers vanish or hang
*/
static void __devinit quirk_nopcipci(struct pci_dev *dev)
diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c
index 659eaa0..968cfea 100644
--- a/drivers/pci/slot.c
+++ b/drivers/pci/slot.c
@@ -49,7 +49,7 @@
}
/* these strings match up with the values in pci_bus_speed */
-static char *pci_bus_speed_strings[] = {
+static const char *pci_bus_speed_strings[] = {
"33 MHz PCI", /* 0x00 */
"66 MHz PCI", /* 0x01 */
"66 MHz PCI-X", /* 0x02 */
diff --git a/drivers/pcmcia/pcmcia_resource.c b/drivers/pcmcia/pcmcia_resource.c
index 54aa1c2..9ba4dad 100644
--- a/drivers/pcmcia/pcmcia_resource.c
+++ b/drivers/pcmcia/pcmcia_resource.c
@@ -163,7 +163,7 @@
c = p_dev->function_config;
if (!(c->state & CONFIG_LOCKED)) {
- dev_dbg(&s->dev, "Configuration isnt't locked\n");
+ dev_dbg(&p_dev->dev, "Configuration isnt't locked\n");
mutex_unlock(&s->ops_mutex);
return -EACCES;
}
@@ -220,7 +220,7 @@
s->win[w].card_start = offset;
ret = s->ops->set_mem_map(s, &s->win[w]);
if (ret)
- dev_warn(&s->dev, "failed to set_mem_map\n");
+ dev_warn(&p_dev->dev, "failed to set_mem_map\n");
mutex_unlock(&s->ops_mutex);
return ret;
} /* pcmcia_map_mem_page */
@@ -244,18 +244,18 @@
c = p_dev->function_config;
if (!(s->state & SOCKET_PRESENT)) {
- dev_dbg(&s->dev, "No card present\n");
+ dev_dbg(&p_dev->dev, "No card present\n");
ret = -ENODEV;
goto unlock;
}
if (!(c->state & CONFIG_LOCKED)) {
- dev_dbg(&s->dev, "Configuration isnt't locked\n");
+ dev_dbg(&p_dev->dev, "Configuration isnt't locked\n");
ret = -EACCES;
goto unlock;
}
if (mod->Attributes & (CONF_IRQ_CHANGE_VALID | CONF_VCC_CHANGE_VALID)) {
- dev_dbg(&s->dev,
+ dev_dbg(&p_dev->dev,
"changing Vcc or IRQ is not allowed at this time\n");
ret = -EINVAL;
goto unlock;
@@ -265,20 +265,22 @@
if ((mod->Attributes & CONF_VPP1_CHANGE_VALID) &&
(mod->Attributes & CONF_VPP2_CHANGE_VALID)) {
if (mod->Vpp1 != mod->Vpp2) {
- dev_dbg(&s->dev, "Vpp1 and Vpp2 must be the same\n");
+ dev_dbg(&p_dev->dev,
+ "Vpp1 and Vpp2 must be the same\n");
ret = -EINVAL;
goto unlock;
}
s->socket.Vpp = mod->Vpp1;
if (s->ops->set_socket(s, &s->socket)) {
- dev_printk(KERN_WARNING, &s->dev,
+ dev_printk(KERN_WARNING, &p_dev->dev,
"Unable to set VPP\n");
ret = -EIO;
goto unlock;
}
} else if ((mod->Attributes & CONF_VPP1_CHANGE_VALID) ||
(mod->Attributes & CONF_VPP2_CHANGE_VALID)) {
- dev_dbg(&s->dev, "changing Vcc is not allowed at this time\n");
+ dev_dbg(&p_dev->dev,
+ "changing Vcc is not allowed at this time\n");
ret = -EINVAL;
goto unlock;
}
@@ -401,7 +403,7 @@
win = &s->win[w];
if (!(p_dev->_win & CLIENT_WIN_REQ(w))) {
- dev_dbg(&s->dev, "not releasing unknown window\n");
+ dev_dbg(&p_dev->dev, "not releasing unknown window\n");
mutex_unlock(&s->ops_mutex);
return -EINVAL;
}
@@ -439,7 +441,7 @@
return -ENODEV;
if (req->IntType & INT_CARDBUS) {
- dev_dbg(&s->dev, "IntType may not be INT_CARDBUS\n");
+ dev_dbg(&p_dev->dev, "IntType may not be INT_CARDBUS\n");
return -EINVAL;
}
@@ -447,7 +449,7 @@
c = p_dev->function_config;
if (c->state & CONFIG_LOCKED) {
mutex_unlock(&s->ops_mutex);
- dev_dbg(&s->dev, "Configuration is locked\n");
+ dev_dbg(&p_dev->dev, "Configuration is locked\n");
return -EACCES;
}
@@ -455,7 +457,7 @@
s->socket.Vpp = req->Vpp;
if (s->ops->set_socket(s, &s->socket)) {
mutex_unlock(&s->ops_mutex);
- dev_printk(KERN_WARNING, &s->dev,
+ dev_printk(KERN_WARNING, &p_dev->dev,
"Unable to set socket state\n");
return -EINVAL;
}
@@ -569,19 +571,20 @@
int ret = -EINVAL;
mutex_lock(&s->ops_mutex);
- dev_dbg(&s->dev, "pcmcia_request_io: %pR , %pR", &c->io[0], &c->io[1]);
+ dev_dbg(&p_dev->dev, "pcmcia_request_io: %pR , %pR",
+ &c->io[0], &c->io[1]);
if (!(s->state & SOCKET_PRESENT)) {
- dev_dbg(&s->dev, "pcmcia_request_io: No card present\n");
+ dev_dbg(&p_dev->dev, "pcmcia_request_io: No card present\n");
goto out;
}
if (c->state & CONFIG_LOCKED) {
- dev_dbg(&s->dev, "Configuration is locked\n");
+ dev_dbg(&p_dev->dev, "Configuration is locked\n");
goto out;
}
if (c->state & CONFIG_IO_REQ) {
- dev_dbg(&s->dev, "IO already configured\n");
+ dev_dbg(&p_dev->dev, "IO already configured\n");
goto out;
}
@@ -592,7 +595,13 @@
if (c->io[1].end) {
ret = alloc_io_space(s, &c->io[1], p_dev->io_lines);
if (ret) {
+ struct resource tmp = c->io[0];
+ /* release the previously allocated resource */
release_io_space(s, &c->io[0]);
+ /* but preserve the settings, for they worked... */
+ c->io[0].end = resource_size(&tmp);
+ c->io[0].start = tmp.start;
+ c->io[0].flags = tmp.flags;
goto out;
}
} else
@@ -601,7 +610,7 @@
c->state |= CONFIG_IO_REQ;
p_dev->_io = 1;
- dev_dbg(&s->dev, "pcmcia_request_io succeeded: %pR , %pR",
+ dev_dbg(&p_dev->dev, "pcmcia_request_io succeeded: %pR , %pR",
&c->io[0], &c->io[1]);
out:
mutex_unlock(&s->ops_mutex);
@@ -800,7 +809,7 @@
int w;
if (!(s->state & SOCKET_PRESENT)) {
- dev_dbg(&s->dev, "No card present\n");
+ dev_dbg(&p_dev->dev, "No card present\n");
return -ENODEV;
}
@@ -809,12 +818,12 @@
req->Size = s->map_size;
align = (s->features & SS_CAP_MEM_ALIGN) ? req->Size : s->map_size;
if (req->Size & (s->map_size-1)) {
- dev_dbg(&s->dev, "invalid map size\n");
+ dev_dbg(&p_dev->dev, "invalid map size\n");
return -EINVAL;
}
if ((req->Base && (s->features & SS_CAP_STATIC_MAP)) ||
(req->Base & (align-1))) {
- dev_dbg(&s->dev, "invalid base address\n");
+ dev_dbg(&p_dev->dev, "invalid base address\n");
return -EINVAL;
}
if (req->Base)
@@ -826,7 +835,7 @@
if (!(s->state & SOCKET_WIN_REQ(w)))
break;
if (w == MAX_WIN) {
- dev_dbg(&s->dev, "all windows are used already\n");
+ dev_dbg(&p_dev->dev, "all windows are used already\n");
mutex_unlock(&s->ops_mutex);
return -EINVAL;
}
@@ -837,7 +846,7 @@
win->res = pcmcia_find_mem_region(req->Base, req->Size, align,
0, s);
if (!win->res) {
- dev_dbg(&s->dev, "allocating mem region failed\n");
+ dev_dbg(&p_dev->dev, "allocating mem region failed\n");
mutex_unlock(&s->ops_mutex);
return -EINVAL;
}
@@ -851,7 +860,7 @@
win->card_start = 0;
if (s->ops->set_mem_map(s, win) != 0) {
- dev_dbg(&s->dev, "failed to set memory mapping\n");
+ dev_dbg(&p_dev->dev, "failed to set memory mapping\n");
mutex_unlock(&s->ops_mutex);
return -EIO;
}
@@ -874,7 +883,7 @@
if (win->res)
request_resource(&iomem_resource, res);
- dev_dbg(&s->dev, "request_window results in %pR\n", res);
+ dev_dbg(&p_dev->dev, "request_window results in %pR\n", res);
mutex_unlock(&s->ops_mutex);
*wh = res;
diff --git a/drivers/pcmcia/pd6729.c b/drivers/pcmcia/pd6729.c
index b8a869a..deef665 100644
--- a/drivers/pcmcia/pd6729.c
+++ b/drivers/pcmcia/pd6729.c
@@ -646,7 +646,7 @@
if (!pci_resource_start(dev, 0)) {
dev_warn(&dev->dev, "refusing to load the driver as the "
"io_base is NULL.\n");
- goto err_out_free_mem;
+ goto err_out_disable;
}
dev_info(&dev->dev, "Cirrus PD6729 PCI to PCMCIA Bridge at 0x%llx "
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 044f430..cff7cc2 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -486,10 +486,12 @@
config ACPI_TOSHIBA
tristate "Toshiba Laptop Extras"
depends on ACPI
+ depends on LEDS_CLASS
+ depends on NEW_LEDS
+ depends on BACKLIGHT_CLASS_DEVICE
depends on INPUT
depends on RFKILL || RFKILL = n
select INPUT_POLLDEV
- select BACKLIGHT_CLASS_DEVICE
---help---
This driver adds support for access to certain system settings
on "legacy free" Toshiba laptops. These laptops can be recognized by
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
index f155163..c174114 100644
--- a/drivers/platform/x86/hp-wmi.c
+++ b/drivers/platform/x86/hp-wmi.c
@@ -79,12 +79,13 @@
u32 command;
u32 commandtype;
u32 datasize;
- char *data;
+ u32 data;
};
struct bios_return {
u32 sigpass;
u32 return_code;
+ u32 value;
};
struct key_entry {
@@ -148,7 +149,7 @@
* buffer = kzalloc(128, GFP_KERNEL);
* ret = hp_wmi_perform_query(0x7, 0, buffer, 128)
*/
-static int hp_wmi_perform_query(int query, int write, char *buffer,
+static int hp_wmi_perform_query(int query, int write, u32 *buffer,
int buffersize)
{
struct bios_return bios_return;
@@ -159,7 +160,7 @@
.command = write ? 0x2 : 0x1,
.commandtype = query,
.datasize = buffersize,
- .data = buffer,
+ .data = *buffer,
};
struct acpi_buffer input = { sizeof(struct bios_args), &args };
struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
@@ -177,29 +178,14 @@
bios_return = *((struct bios_return *)obj->buffer.pointer);
- if (bios_return.return_code) {
- printk(KERN_WARNING PREFIX "Query %d returned %d\n", query,
- bios_return.return_code);
- kfree(obj);
- return bios_return.return_code;
- }
- if (obj->buffer.length - sizeof(bios_return) > buffersize) {
- kfree(obj);
- return -EINVAL;
- }
-
- memset(buffer, 0, buffersize);
- memcpy(buffer,
- ((char *)obj->buffer.pointer) + sizeof(struct bios_return),
- obj->buffer.length - sizeof(bios_return));
- kfree(obj);
+ memcpy(buffer, &bios_return.value, sizeof(bios_return.value));
return 0;
}
static int hp_wmi_display_state(void)
{
- int state;
- int ret = hp_wmi_perform_query(HPWMI_DISPLAY_QUERY, 0, (char *)&state,
+ int state = 0;
+ int ret = hp_wmi_perform_query(HPWMI_DISPLAY_QUERY, 0, &state,
sizeof(state));
if (ret)
return -EINVAL;
@@ -208,8 +194,8 @@
static int hp_wmi_hddtemp_state(void)
{
- int state;
- int ret = hp_wmi_perform_query(HPWMI_HDDTEMP_QUERY, 0, (char *)&state,
+ int state = 0;
+ int ret = hp_wmi_perform_query(HPWMI_HDDTEMP_QUERY, 0, &state,
sizeof(state));
if (ret)
return -EINVAL;
@@ -218,8 +204,8 @@
static int hp_wmi_als_state(void)
{
- int state;
- int ret = hp_wmi_perform_query(HPWMI_ALS_QUERY, 0, (char *)&state,
+ int state = 0;
+ int ret = hp_wmi_perform_query(HPWMI_ALS_QUERY, 0, &state,
sizeof(state));
if (ret)
return -EINVAL;
@@ -228,8 +214,8 @@
static int hp_wmi_dock_state(void)
{
- int state;
- int ret = hp_wmi_perform_query(HPWMI_HARDWARE_QUERY, 0, (char *)&state,
+ int state = 0;
+ int ret = hp_wmi_perform_query(HPWMI_HARDWARE_QUERY, 0, &state,
sizeof(state));
if (ret)
@@ -240,8 +226,8 @@
static int hp_wmi_tablet_state(void)
{
- int state;
- int ret = hp_wmi_perform_query(HPWMI_HARDWARE_QUERY, 0, (char *)&state,
+ int state = 0;
+ int ret = hp_wmi_perform_query(HPWMI_HARDWARE_QUERY, 0, &state,
sizeof(state));
if (ret)
return ret;
@@ -256,7 +242,7 @@
int ret;
ret = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 1,
- (char *)&query, sizeof(query));
+ &query, sizeof(query));
if (ret)
return -EINVAL;
return 0;
@@ -268,10 +254,10 @@
static bool hp_wmi_get_sw_state(enum hp_wmi_radio r)
{
- int wireless;
+ int wireless = 0;
int mask;
hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 0,
- (char *)&wireless, sizeof(wireless));
+ &wireless, sizeof(wireless));
/* TBD: Pass error */
mask = 0x200 << (r * 8);
@@ -284,10 +270,10 @@
static bool hp_wmi_get_hw_state(enum hp_wmi_radio r)
{
- int wireless;
+ int wireless = 0;
int mask;
hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 0,
- (char *)&wireless, sizeof(wireless));
+ &wireless, sizeof(wireless));
/* TBD: Pass error */
mask = 0x800 << (r * 8);
@@ -347,7 +333,7 @@
const char *buf, size_t count)
{
u32 tmp = simple_strtoul(buf, NULL, 10);
- int ret = hp_wmi_perform_query(HPWMI_ALS_QUERY, 1, (char *)&tmp,
+ int ret = hp_wmi_perform_query(HPWMI_ALS_QUERY, 1, &tmp,
sizeof(tmp));
if (ret)
return -EINVAL;
@@ -421,7 +407,7 @@
static struct key_entry *key;
union acpi_object *obj;
u32 event_id, event_data;
- int key_code, ret;
+ int key_code = 0, ret;
u32 *location;
acpi_status status;
@@ -475,7 +461,7 @@
break;
case HPWMI_BEZEL_BUTTON:
ret = hp_wmi_perform_query(HPWMI_HOTKEY_QUERY, 0,
- (char *)&key_code,
+ &key_code,
sizeof(key_code));
if (ret)
break;
@@ -578,9 +564,9 @@
static int __devinit hp_wmi_bios_setup(struct platform_device *device)
{
int err;
- int wireless;
+ int wireless = 0;
- err = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 0, (char *)&wireless,
+ err = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 0, &wireless,
sizeof(wireless));
if (err)
return err;
diff --git a/drivers/platform/x86/intel_ips.c b/drivers/platform/x86/intel_ips.c
index 9024480..c44a5e8 100644
--- a/drivers/platform/x86/intel_ips.c
+++ b/drivers/platform/x86/intel_ips.c
@@ -51,7 +51,6 @@
* TODO:
* - handle CPU hotplug
* - provide turbo enable/disable api
- * - make sure we can write turbo enable/disable reg based on MISC_EN
*
* Related documents:
* - CDI 403777, 403778 - Auburndale EDS vol 1 & 2
@@ -230,7 +229,7 @@
#define THM_TC2 0xac
#define THM_DTV 0xb0
#define THM_ITV 0xd8
-#define ITV_ME_SEQNO_MASK 0x000f0000 /* ME should update every ~200ms */
+#define ITV_ME_SEQNO_MASK 0x00ff0000 /* ME should update every ~200ms */
#define ITV_ME_SEQNO_SHIFT (16)
#define ITV_MCH_TEMP_MASK 0x0000ff00
#define ITV_MCH_TEMP_SHIFT (8)
@@ -325,6 +324,7 @@
bool gpu_preferred;
bool poll_turbo_status;
bool second_cpu;
+ bool turbo_toggle_allowed;
struct ips_mcp_limits *limits;
/* Optional MCH interfaces for if i915 is in use */
@@ -415,7 +415,7 @@
new_limit = cur_limit - 8; /* 1W decrease */
/* Clamp to SKU TDP limit */
- if (((new_limit * 10) / 8) < (ips->orig_turbo_limit & TURBO_TDP_MASK))
+ if (new_limit < (ips->orig_turbo_limit & TURBO_TDP_MASK))
new_limit = ips->orig_turbo_limit & TURBO_TDP_MASK;
thm_writew(THM_MPCPC, (new_limit * 10) / 8);
@@ -461,7 +461,8 @@
if (ips->__cpu_turbo_on)
return;
- on_each_cpu(do_enable_cpu_turbo, ips, 1);
+ if (ips->turbo_toggle_allowed)
+ on_each_cpu(do_enable_cpu_turbo, ips, 1);
ips->__cpu_turbo_on = true;
}
@@ -498,7 +499,8 @@
if (!ips->__cpu_turbo_on)
return;
- on_each_cpu(do_disable_cpu_turbo, ips, 1);
+ if (ips->turbo_toggle_allowed)
+ on_each_cpu(do_disable_cpu_turbo, ips, 1);
ips->__cpu_turbo_on = false;
}
@@ -598,17 +600,29 @@
{
unsigned long flags;
bool ret = false;
+ u32 temp_limit;
+ u32 avg_power;
+ const char *msg = "MCP limit exceeded: ";
spin_lock_irqsave(&ips->turbo_status_lock, flags);
- if (ips->mcp_avg_temp > (ips->mcp_temp_limit * 100))
- ret = true;
- if (ips->cpu_avg_power + ips->mch_avg_power > ips->mcp_power_limit)
- ret = true;
- spin_unlock_irqrestore(&ips->turbo_status_lock, flags);
- if (ret)
+ temp_limit = ips->mcp_temp_limit * 100;
+ if (ips->mcp_avg_temp > temp_limit) {
dev_info(&ips->dev->dev,
- "MCP power or thermal limit exceeded\n");
+ "%sAvg temp %u, limit %u\n", msg, ips->mcp_avg_temp,
+ temp_limit);
+ ret = true;
+ }
+
+ avg_power = ips->cpu_avg_power + ips->mch_avg_power;
+ if (avg_power > ips->mcp_power_limit) {
+ dev_info(&ips->dev->dev,
+ "%sAvg power %u, limit %u\n", msg, avg_power,
+ ips->mcp_power_limit);
+ ret = true;
+ }
+
+ spin_unlock_irqrestore(&ips->turbo_status_lock, flags);
return ret;
}
@@ -663,6 +677,27 @@
}
/**
+ * verify_limits - verify BIOS provided limits
+ * @ips: IPS structure
+ *
+ * BIOS can optionally provide non-default limits for power and temp. Check
+ * them here and use the defaults if the BIOS values are not provided or
+ * are otherwise unusable.
+ */
+static void verify_limits(struct ips_driver *ips)
+{
+ if (ips->mcp_power_limit < ips->limits->mcp_power_limit ||
+ ips->mcp_power_limit > 35000)
+ ips->mcp_power_limit = ips->limits->mcp_power_limit;
+
+ if (ips->mcp_temp_limit < ips->limits->core_temp_limit ||
+ ips->mcp_temp_limit < ips->limits->mch_temp_limit ||
+ ips->mcp_temp_limit > 150)
+ ips->mcp_temp_limit = min(ips->limits->core_temp_limit,
+ ips->limits->mch_temp_limit);
+}
+
+/**
* update_turbo_limits - get various limits & settings from regs
* @ips: IPS driver struct
*
@@ -680,12 +715,21 @@
u32 hts = thm_readl(THM_HTS);
ips->cpu_turbo_enabled = !(hts & HTS_PCTD_DIS);
- ips->gpu_turbo_enabled = !(hts & HTS_GTD_DIS);
+ /*
+ * Disable turbo for now, until we can figure out why the power figures
+ * are wrong
+ */
+ ips->cpu_turbo_enabled = false;
+
+ if (ips->gpu_busy)
+ ips->gpu_turbo_enabled = !(hts & HTS_GTD_DIS);
+
ips->core_power_limit = thm_readw(THM_MPCPC);
ips->mch_power_limit = thm_readw(THM_MMGPC);
ips->mcp_temp_limit = thm_readw(THM_PTL);
ips->mcp_power_limit = thm_readw(THM_MPPC);
+ verify_limits(ips);
/* Ignore BIOS CPU vs GPU pref */
}
@@ -858,7 +902,7 @@
ret = (ret * 1000) / 65535;
*last = val;
- return ret;
+ return 0;
}
static const u16 temp_decay_factor = 2;
@@ -940,7 +984,6 @@
kfree(mch_samples);
kfree(cpu_samples);
kfree(mchp_samples);
- kthread_stop(ips->adjust);
return -ENOMEM;
}
@@ -948,7 +991,7 @@
ITV_ME_SEQNO_SHIFT;
seqno_timestamp = get_jiffies_64();
- old_cpu_power = thm_readl(THM_CEC) / 65535;
+ old_cpu_power = thm_readl(THM_CEC);
schedule_timeout_interruptible(msecs_to_jiffies(IPS_SAMPLE_PERIOD));
/* Collect an initial average */
@@ -1150,11 +1193,18 @@
STS_GPL_SHIFT;
/* ignore EC CPU vs GPU pref */
ips->cpu_turbo_enabled = !(sts & STS_PCTD_DIS);
- ips->gpu_turbo_enabled = !(sts & STS_GTD_DIS);
+ /*
+ * Disable turbo for now, until we can figure
+ * out why the power figures are wrong
+ */
+ ips->cpu_turbo_enabled = false;
+ if (ips->gpu_busy)
+ ips->gpu_turbo_enabled = !(sts & STS_GTD_DIS);
ips->mcp_temp_limit = (sts & STS_PTL_MASK) >>
STS_PTL_SHIFT;
ips->mcp_power_limit = (tc1 & STS_PPL_MASK) >>
STS_PPL_SHIFT;
+ verify_limits(ips);
spin_unlock(&ips->turbo_status_lock);
thm_writeb(THM_SEC, SEC_ACK);
@@ -1333,8 +1383,10 @@
* turbo manually or we'll get an illegal MSR access, even though
* turbo will still be available.
*/
- if (!(misc_en & IA32_MISC_TURBO_EN))
- ; /* add turbo MSR write allowed flag if necessary */
+ if (misc_en & IA32_MISC_TURBO_EN)
+ ips->turbo_toggle_allowed = true;
+ else
+ ips->turbo_toggle_allowed = false;
if (strstr(boot_cpu_data.x86_model_id, "CPU M"))
limits = &ips_sv_limits;
@@ -1351,9 +1403,10 @@
tdp = turbo_power & TURBO_TDP_MASK;
/* Sanity check TDP against CPU */
- if (limits->mcp_power_limit != (tdp / 8) * 1000) {
- dev_warn(&ips->dev->dev, "Warning: CPU TDP doesn't match expected value (found %d, expected %d)\n",
- tdp / 8, limits->mcp_power_limit / 1000);
+ if (limits->core_power_limit != (tdp / 8) * 1000) {
+ dev_info(&ips->dev->dev, "CPU TDP doesn't match expected value (found %d, expected %d)\n",
+ tdp / 8, limits->core_power_limit / 1000);
+ limits->core_power_limit = (tdp / 8) * 1000;
}
out:
@@ -1390,7 +1443,7 @@
return true;
out_put_busy:
- symbol_put(i915_gpu_turbo_disable);
+ symbol_put(i915_gpu_busy);
out_put_lower:
symbol_put(i915_gpu_lower);
out_put_raise:
@@ -1532,23 +1585,28 @@
/* Save turbo limits & ratios */
rdmsrl(TURBO_POWER_CURRENT_LIMIT, ips->orig_turbo_limit);
- ips_enable_cpu_turbo(ips);
- ips->cpu_turbo_enabled = true;
+ ips_disable_cpu_turbo(ips);
+ ips->cpu_turbo_enabled = false;
- /* Set up the work queue and monitor/adjust threads */
- ips->monitor = kthread_run(ips_monitor, ips, "ips-monitor");
- if (IS_ERR(ips->monitor)) {
- dev_err(&dev->dev,
- "failed to create thermal monitor thread, aborting\n");
- ret = -ENOMEM;
- goto error_free_irq;
- }
-
+ /* Create thermal adjust thread */
ips->adjust = kthread_create(ips_adjust, ips, "ips-adjust");
if (IS_ERR(ips->adjust)) {
dev_err(&dev->dev,
"failed to create thermal adjust thread, aborting\n");
ret = -ENOMEM;
+ goto error_free_irq;
+
+ }
+
+ /*
+ * Set up the work queue and monitor thread. The monitor thread
+ * will wake up ips_adjust thread.
+ */
+ ips->monitor = kthread_run(ips_monitor, ips, "ips-monitor");
+ if (IS_ERR(ips->monitor)) {
+ dev_err(&dev->dev,
+ "failed to create thermal monitor thread, aborting\n");
+ ret = -ENOMEM;
goto error_thread_cleanup;
}
@@ -1566,7 +1624,7 @@
return ret;
error_thread_cleanup:
- kthread_stop(ips->monitor);
+ kthread_stop(ips->adjust);
error_free_irq:
free_irq(ips->dev->irq, ips);
error_unmap:
diff --git a/drivers/platform/x86/intel_rar_register.c b/drivers/platform/x86/intel_rar_register.c
index 73f8e6d..2b11a333 100644
--- a/drivers/platform/x86/intel_rar_register.c
+++ b/drivers/platform/x86/intel_rar_register.c
@@ -145,7 +145,7 @@
*/
static struct rar_device *_rar_to_device(int rar, int *off)
{
- if (rar >= 0 && rar <= 3) {
+ if (rar >= 0 && rar < MRST_NUM_RAR) {
*off = rar;
return &my_rar_device;
}
diff --git a/drivers/platform/x86/intel_scu_ipc.c b/drivers/platform/x86/intel_scu_ipc.c
index 943f908..6abe18e 100644
--- a/drivers/platform/x86/intel_scu_ipc.c
+++ b/drivers/platform/x86/intel_scu_ipc.c
@@ -487,7 +487,7 @@
mdelay(1);
*data = readl(ipcdev.i2c_base + I2C_DATA_ADDR);
} else if (cmd == IPC_I2C_WRITE) {
- writel(addr, ipcdev.i2c_base + I2C_DATA_ADDR);
+ writel(*data, ipcdev.i2c_base + I2C_DATA_ADDR);
mdelay(1);
writel(addr, ipcdev.i2c_base + IPC_I2C_CNTRL_ADDR);
} else {
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index e35ed12..2d61186 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -3093,7 +3093,8 @@
TPACPI_Q_IBM('1', 'D', TPACPI_HK_Q_INIMASK), /* X22, X23, X24 */
};
-typedef u16 tpacpi_keymap_t[TPACPI_HOTKEY_MAP_LEN];
+typedef u16 tpacpi_keymap_entry_t;
+typedef tpacpi_keymap_entry_t tpacpi_keymap_t[TPACPI_HOTKEY_MAP_LEN];
static int __init hotkey_init(struct ibm_init_struct *iibm)
{
@@ -3230,7 +3231,7 @@
};
#define TPACPI_HOTKEY_MAP_SIZE sizeof(tpacpi_keymap_t)
-#define TPACPI_HOTKEY_MAP_TYPESIZE sizeof(tpacpi_keymap_t[0])
+#define TPACPI_HOTKEY_MAP_TYPESIZE sizeof(tpacpi_keymap_entry_t)
int res, i;
int status;
diff --git a/drivers/power/apm_power.c b/drivers/power/apm_power.c
index 936bae5..dc628cb 100644
--- a/drivers/power/apm_power.c
+++ b/drivers/power/apm_power.c
@@ -233,6 +233,7 @@
empty_design_prop = POWER_SUPPLY_PROP_ENERGY_EMPTY_DESIGN;
now_prop = POWER_SUPPLY_PROP_ENERGY_NOW;
avg_prop = POWER_SUPPLY_PROP_ENERGY_AVG;
+ break;
case SOURCE_VOLTAGE:
full_prop = POWER_SUPPLY_PROP_VOLTAGE_MAX;
empty_prop = POWER_SUPPLY_PROP_VOLTAGE_MIN;
diff --git a/drivers/power/intel_mid_battery.c b/drivers/power/intel_mid_battery.c
index c61ffec..2a10cd3 100644
--- a/drivers/power/intel_mid_battery.c
+++ b/drivers/power/intel_mid_battery.c
@@ -185,8 +185,8 @@
{
u32 data[3];
u8 *p = (u8 *)&data[1];
- int err = intel_scu_ipc_command(IPC_CMD_BATTERY_PROPERTY,
- IPCMSG_BATTERY, NULL, 0, data, 3);
+ int err = intel_scu_ipc_command(IPCMSG_BATTERY,
+ IPC_CMD_BATTERY_PROPERTY, NULL, 0, data, 3);
prop->capacity = data[0];
prop->crnt = *p++;
@@ -207,7 +207,7 @@
static int pmic_scu_ipc_set_charger(int charger)
{
- return intel_scu_ipc_simple_command(charger, IPCMSG_BATTERY);
+ return intel_scu_ipc_simple_command(IPCMSG_BATTERY, charger);
}
/**
diff --git a/drivers/regulator/88pm8607.c b/drivers/regulator/88pm8607.c
index 7d149a8..2ce2eb7 100644
--- a/drivers/regulator/88pm8607.c
+++ b/drivers/regulator/88pm8607.c
@@ -215,7 +215,7 @@
struct pm8607_regulator_info *info = rdev_get_drvdata(rdev);
int ret = -EINVAL;
- if (info->vol_table && (index < (2 << info->vol_nbits))) {
+ if (info->vol_table && (index < (1 << info->vol_nbits))) {
ret = info->vol_table[index];
if (info->slope_double)
ret <<= 1;
@@ -233,7 +233,7 @@
max_uV = max_uV >> 1;
}
if (info->vol_table) {
- for (i = 0; i < (2 << info->vol_nbits); i++) {
+ for (i = 0; i < (1 << info->vol_nbits); i++) {
if (!info->vol_table[i])
break;
if ((min_uV <= info->vol_table[i])
diff --git a/drivers/regulator/ab3100.c b/drivers/regulator/ab3100.c
index 1179099..b349266 100644
--- a/drivers/regulator/ab3100.c
+++ b/drivers/regulator/ab3100.c
@@ -634,12 +634,9 @@
"%s: failed to register regulator %s err %d\n",
__func__, ab3100_regulator_desc[i].name,
err);
- i--;
/* remove the already registered regulators */
- while (i > 0) {
+ while (--i >= 0)
regulator_unregister(ab3100_regulators[i].rdev);
- i--;
- }
return err;
}
diff --git a/drivers/regulator/ab8500.c b/drivers/regulator/ab8500.c
index dc3f1a4..28c7ae6 100644
--- a/drivers/regulator/ab8500.c
+++ b/drivers/regulator/ab8500.c
@@ -157,7 +157,7 @@
if (info->fixed_uV)
return info->fixed_uV;
- if (selector > info->voltages_len)
+ if (selector >= info->voltages_len)
return -EINVAL;
return info->supported_voltages[selector];
@@ -344,13 +344,14 @@
static __devinit int ab8500_regulator_probe(struct platform_device *pdev)
{
struct ab8500 *ab8500 = dev_get_drvdata(pdev->dev.parent);
- struct ab8500_platform_data *pdata = dev_get_platdata(ab8500->dev);
+ struct ab8500_platform_data *pdata;
int i, err;
if (!ab8500) {
dev_err(&pdev->dev, "null mfd parent\n");
return -EINVAL;
}
+ pdata = dev_get_platdata(ab8500->dev);
/* register all regulators */
for (i = 0; i < ARRAY_SIZE(ab8500_regulator_info); i++) {
@@ -368,11 +369,9 @@
dev_err(&pdev->dev, "failed to register regulator %s\n",
info->desc.name);
/* when we fail, un-register all earlier regulators */
- i--;
- while (i > 0) {
+ while (--i >= 0) {
info = &ab8500_regulator_info[i];
regulator_unregister(info->regulator);
- i--;
}
return err;
}
diff --git a/drivers/regulator/ad5398.c b/drivers/regulator/ad5398.c
index d59d2f2..a4be416 100644
--- a/drivers/regulator/ad5398.c
+++ b/drivers/regulator/ad5398.c
@@ -25,7 +25,7 @@
unsigned int current_level;
unsigned int current_mask;
unsigned int current_offset;
- struct regulator_dev rdev;
+ struct regulator_dev *rdev;
};
static int ad5398_calc_current(struct ad5398_chip_info *chip,
@@ -211,7 +211,6 @@
static int __devinit ad5398_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- struct regulator_dev *rdev;
struct regulator_init_data *init_data = client->dev.platform_data;
struct ad5398_chip_info *chip;
const struct ad5398_current_data_format *df =
@@ -233,9 +232,10 @@
chip->current_offset = df->current_offset;
chip->current_mask = (chip->current_level - 1) << chip->current_offset;
- rdev = regulator_register(&ad5398_reg, &client->dev, init_data, chip);
- if (IS_ERR(rdev)) {
- ret = PTR_ERR(rdev);
+ chip->rdev = regulator_register(&ad5398_reg, &client->dev,
+ init_data, chip);
+ if (IS_ERR(chip->rdev)) {
+ ret = PTR_ERR(chip->rdev);
dev_err(&client->dev, "failed to register %s %s\n",
id->name, ad5398_reg.name);
goto err;
@@ -254,9 +254,8 @@
{
struct ad5398_chip_info *chip = i2c_get_clientdata(client);
- regulator_unregister(&chip->rdev);
+ regulator_unregister(chip->rdev);
kfree(chip);
- i2c_set_clientdata(client, NULL);
return 0;
}
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 422a709..cc8b337 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -700,7 +700,7 @@
constraints->min_uA != constraints->max_uA) {
ret = _regulator_get_current_limit(rdev);
if (ret > 0)
- count += sprintf(buf + count, "at %d uA ", ret / 1000);
+ count += sprintf(buf + count, "at %d mA ", ret / 1000);
}
if (constraints->valid_modes_mask & REGULATOR_MODE_FAST)
@@ -2302,8 +2302,10 @@
dev_set_name(&rdev->dev, "regulator.%d",
atomic_inc_return(®ulator_no) - 1);
ret = device_register(&rdev->dev);
- if (ret != 0)
+ if (ret != 0) {
+ put_device(&rdev->dev);
goto clean;
+ }
dev_set_drvdata(&rdev->dev, rdev);
diff --git a/drivers/regulator/isl6271a-regulator.c b/drivers/regulator/isl6271a-regulator.c
index e49d2bd..b8cc638 100644
--- a/drivers/regulator/isl6271a-regulator.c
+++ b/drivers/regulator/isl6271a-regulator.c
@@ -165,7 +165,7 @@
mutex_init(&pmic->mtx);
for (i = 0; i < 3; i++) {
- pmic->rdev[i] = regulator_register(&isl_rd[0], &i2c->dev,
+ pmic->rdev[i] = regulator_register(&isl_rd[i], &i2c->dev,
init_data, pmic);
if (IS_ERR(pmic->rdev[i])) {
dev_err(&i2c->dev, "failed to register %s\n", id->name);
@@ -191,8 +191,6 @@
struct isl_pmic *pmic = i2c_get_clientdata(i2c);
int i;
- i2c_set_clientdata(i2c, NULL);
-
for (i = 0; i < 3; i++)
regulator_unregister(pmic->rdev[i]);
diff --git a/drivers/regulator/max1586.c b/drivers/regulator/max1586.c
index 8867c27..559cfa2 100644
--- a/drivers/regulator/max1586.c
+++ b/drivers/regulator/max1586.c
@@ -121,14 +121,14 @@
if (max_uV < MAX1586_V6_MIN_UV || max_uV > MAX1586_V6_MAX_UV)
return -EINVAL;
- if (min_uV >= 3000000)
- selector = 3;
- if (min_uV < 3000000)
- selector = 2;
- if (min_uV < 2500000)
- selector = 1;
if (min_uV < 1800000)
selector = 0;
+ else if (min_uV < 2500000)
+ selector = 1;
+ else if (min_uV < 3000000)
+ selector = 2;
+ else if (min_uV >= 3000000)
+ selector = 3;
if (max1586_v6_calc_voltage(selector) > max_uV)
return -EINVAL;
diff --git a/drivers/regulator/max8649.c b/drivers/regulator/max8649.c
index 4520ace..6b60a9c 100644
--- a/drivers/regulator/max8649.c
+++ b/drivers/regulator/max8649.c
@@ -330,7 +330,7 @@
/* set external clock frequency */
info->extclk_freq = pdata->extclk_freq;
max8649_set_bits(info->i2c, MAX8649_SYNC, MAX8649_EXT_MASK,
- info->extclk_freq);
+ info->extclk_freq << 6);
}
if (pdata->ramp_timing) {
diff --git a/drivers/regulator/max8998.c b/drivers/regulator/max8998.c
index ab67298..a1baf1f 100644
--- a/drivers/regulator/max8998.c
+++ b/drivers/regulator/max8998.c
@@ -549,7 +549,7 @@
if (!max8998)
return -ENOMEM;
- size = sizeof(struct regulator_dev *) * (pdata->num_regulators + 1);
+ size = sizeof(struct regulator_dev *) * pdata->num_regulators;
max8998->rdev = kzalloc(size, GFP_KERNEL);
if (!max8998->rdev) {
kfree(max8998);
@@ -557,7 +557,9 @@
}
rdev = max8998->rdev;
+ max8998->dev = &pdev->dev;
max8998->iodev = iodev;
+ max8998->num_regulators = pdata->num_regulators;
platform_set_drvdata(pdev, max8998);
for (i = 0; i < pdata->num_regulators; i++) {
@@ -583,7 +585,7 @@
return 0;
err:
- for (i = 0; i <= max8998->num_regulators; i++)
+ for (i = 0; i < max8998->num_regulators; i++)
if (rdev[i])
regulator_unregister(rdev[i]);
@@ -599,7 +601,7 @@
struct regulator_dev **rdev = max8998->rdev;
int i;
- for (i = 0; i <= max8998->num_regulators; i++)
+ for (i = 0; i < max8998->num_regulators; i++)
if (rdev[i])
regulator_unregister(rdev[i]);
diff --git a/drivers/regulator/tps6507x-regulator.c b/drivers/regulator/tps6507x-regulator.c
index c239f42..020f587 100644
--- a/drivers/regulator/tps6507x-regulator.c
+++ b/drivers/regulator/tps6507x-regulator.c
@@ -626,12 +626,6 @@
return error;
}
-/**
- * tps6507x_remove - TPS6507x driver i2c remove handler
- * @client: i2c driver client device structure
- *
- * Unregister TPS driver as an i2c client device driver
- */
static int __devexit tps6507x_pmic_remove(struct platform_device *pdev)
{
struct tps6507x_dev *tps6507x_dev = platform_get_drvdata(pdev);
diff --git a/drivers/regulator/tps6586x-regulator.c b/drivers/regulator/tps6586x-regulator.c
index 8cff141..51237fb 100644
--- a/drivers/regulator/tps6586x-regulator.c
+++ b/drivers/regulator/tps6586x-regulator.c
@@ -133,7 +133,7 @@
mask = ((1 << ri->volt_nbits) - 1) << ri->volt_shift;
val = (val & mask) >> ri->volt_shift;
- if (val > ri->desc.n_voltages)
+ if (val >= ri->desc.n_voltages)
BUG();
return ri->voltages[val] * 1000;
@@ -150,7 +150,7 @@
if (ret)
return ret;
- return tps6586x_set_bits(parent, ri->go_reg, ri->go_bit);
+ return tps6586x_set_bits(parent, ri->go_reg, 1 << ri->go_bit);
}
static int tps6586x_regulator_enable(struct regulator_dev *rdev)
diff --git a/drivers/regulator/wm831x-ldo.c b/drivers/regulator/wm831x-ldo.c
index e686cdb..9edf8f6 100644
--- a/drivers/regulator/wm831x-ldo.c
+++ b/drivers/regulator/wm831x-ldo.c
@@ -215,8 +215,7 @@
case REGULATOR_MODE_IDLE:
ret = wm831x_set_bits(wm831x, ctrl_reg,
- WM831X_LDO1_LP_MODE,
- WM831X_LDO1_LP_MODE);
+ WM831X_LDO1_LP_MODE, 0);
if (ret < 0)
return ret;
@@ -225,10 +224,12 @@
WM831X_LDO1_ON_MODE);
if (ret < 0)
return ret;
+ break;
case REGULATOR_MODE_STANDBY:
ret = wm831x_set_bits(wm831x, ctrl_reg,
- WM831X_LDO1_LP_MODE, 0);
+ WM831X_LDO1_LP_MODE,
+ WM831X_LDO1_LP_MODE);
if (ret < 0)
return ret;
diff --git a/drivers/regulator/wm8350-regulator.c b/drivers/regulator/wm8350-regulator.c
index 0e6ed7d..fe4b8a8 100644
--- a/drivers/regulator/wm8350-regulator.c
+++ b/drivers/regulator/wm8350-regulator.c
@@ -1129,7 +1129,7 @@
mode = REGULATOR_MODE_NORMAL;
} else if (!active && !sleep)
mode = REGULATOR_MODE_IDLE;
- else if (!sleep)
+ else if (sleep)
mode = REGULATOR_MODE_STANDBY;
return mode;
diff --git a/drivers/rtc/rtc-ab3100.c b/drivers/rtc/rtc-ab3100.c
index d26780e..261a07e 100644
--- a/drivers/rtc/rtc-ab3100.c
+++ b/drivers/rtc/rtc-ab3100.c
@@ -235,6 +235,7 @@
err = PTR_ERR(rtc);
return err;
}
+ platform_set_drvdata(pdev, rtc);
return 0;
}
@@ -244,6 +245,7 @@
struct rtc_device *rtc = platform_get_drvdata(pdev);
rtc_device_unregister(rtc);
+ platform_set_drvdata(pdev, NULL);
return 0;
}
diff --git a/drivers/rtc/rtc-bfin.c b/drivers/rtc/rtc-bfin.c
index 72b2bcc..d4fb82d 100644
--- a/drivers/rtc/rtc-bfin.c
+++ b/drivers/rtc/rtc-bfin.c
@@ -426,7 +426,7 @@
enable_irq_wake(IRQ_RTC);
bfin_rtc_sync_pending(&pdev->dev);
} else
- bfin_rtc_int_clear(-1);
+ bfin_rtc_int_clear(0);
return 0;
}
@@ -435,8 +435,17 @@
{
if (device_may_wakeup(&pdev->dev))
disable_irq_wake(IRQ_RTC);
- else
- bfin_write_RTC_ISTAT(-1);
+
+ /*
+ * Since only some of the RTC bits are maintained externally in the
+ * Vbat domain, we need to wait for the RTC MMRs to be synced into
+ * the core after waking up. This happens every RTC 1HZ. Once that
+ * has happened, we can go ahead and re-enable the important write
+ * complete interrupt event.
+ */
+ while (!(bfin_read_RTC_ISTAT() & RTC_ISTAT_SEC))
+ continue;
+ bfin_rtc_int_set(RTC_ISTAT_WRITE_COMPLETE);
return 0;
}
diff --git a/drivers/rtc/rtc-ds3232.c b/drivers/rtc/rtc-ds3232.c
index 9daed8d..9de8516 100644
--- a/drivers/rtc/rtc-ds3232.c
+++ b/drivers/rtc/rtc-ds3232.c
@@ -268,7 +268,6 @@
free_irq(client->irq, client);
out_free:
- i2c_set_clientdata(client, NULL);
kfree(ds3232);
return ret;
}
@@ -287,7 +286,6 @@
}
rtc_device_unregister(ds3232->rtc);
- i2c_set_clientdata(client, NULL);
kfree(ds3232);
return 0;
}
diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c
index 66377f3..d60557c 100644
--- a/drivers/rtc/rtc-m41t80.c
+++ b/drivers/rtc/rtc-m41t80.c
@@ -364,7 +364,7 @@
t->time.tm_isdst = -1;
t->enabled = !!(reg[M41T80_REG_ALARM_MON] & M41T80_ALMON_AFE);
t->pending = !!(reg[M41T80_REG_FLAGS] & M41T80_FLAGS_AF);
- return rtc_valid_tm(t);
+ return 0;
}
static struct rtc_class_ops m41t80_rtc_ops = {
diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c
index 6c418fe..b7a6690 100644
--- a/drivers/rtc/rtc-pl031.c
+++ b/drivers/rtc/rtc-pl031.c
@@ -403,7 +403,7 @@
}
if (request_irq(adev->irq[0], pl031_interrupt,
- IRQF_DISABLED | IRQF_SHARED, "rtc-pl031", ldata)) {
+ IRQF_DISABLED, "rtc-pl031", ldata)) {
ret = -EIO;
goto out_no_irq;
}
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index a0d3ec8..f57a87f 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -310,11 +310,6 @@
s3c_rtc_setaie(alrm->enabled);
- if (alrm->enabled)
- enable_irq_wake(s3c_rtc_alarmno);
- else
- disable_irq_wake(s3c_rtc_alarmno);
-
return 0;
}
@@ -587,6 +582,10 @@
ticnt_en_save &= S3C64XX_RTCCON_TICEN;
}
s3c_rtc_enable(pdev, 0);
+
+ if (device_may_wakeup(&pdev->dev))
+ enable_irq_wake(s3c_rtc_alarmno);
+
return 0;
}
@@ -600,6 +599,10 @@
tmp = readb(s3c_rtc_base + S3C2410_RTCCON);
writeb(tmp | ticnt_en_save, s3c_rtc_base + S3C2410_RTCCON);
}
+
+ if (device_may_wakeup(&pdev->dev))
+ disable_irq_wake(s3c_rtc_alarmno);
+
return 0;
}
#else
diff --git a/drivers/s390/char/ctrlchar.c b/drivers/s390/char/ctrlchar.c
index c6cbcb3..0e9a309 100644
--- a/drivers/s390/char/ctrlchar.c
+++ b/drivers/s390/char/ctrlchar.c
@@ -16,12 +16,11 @@
#ifdef CONFIG_MAGIC_SYSRQ
static int ctrlchar_sysrq_key;
-static struct tty_struct *sysrq_tty;
static void
ctrlchar_handle_sysrq(struct work_struct *work)
{
- handle_sysrq(ctrlchar_sysrq_key, sysrq_tty);
+ handle_sysrq(ctrlchar_sysrq_key);
}
static DECLARE_WORK(ctrlchar_work, ctrlchar_handle_sysrq);
@@ -54,7 +53,6 @@
/* racy */
if (len == 3 && buf[1] == '-') {
ctrlchar_sysrq_key = buf[2];
- sysrq_tty = tty;
schedule_work(&ctrlchar_work);
return CTRLCHAR_SYSRQ;
}
diff --git a/drivers/s390/char/keyboard.c b/drivers/s390/char/keyboard.c
index 18d9a49..8cd58e4 100644
--- a/drivers/s390/char/keyboard.c
+++ b/drivers/s390/char/keyboard.c
@@ -305,7 +305,7 @@
if (kbd->sysrq) {
if (kbd->sysrq == K(KT_LATIN, '-')) {
kbd->sysrq = 0;
- handle_sysrq(value, kbd->tty);
+ handle_sysrq(value);
return;
}
if (value == '-') {
diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c
index b7de025..85cf607 100644
--- a/drivers/s390/char/tape_block.c
+++ b/drivers/s390/char/tape_block.c
@@ -217,8 +217,7 @@
if (!blkdat->request_queue)
return -ENOMEM;
- elevator_exit(blkdat->request_queue->elevator);
- rc = elevator_init(blkdat->request_queue, "noop");
+ rc = elevator_change(blkdat->request_queue, "noop");
if (rc)
goto cleanup_queue;
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c
index 6edf20b..2c7d2d9 100644
--- a/drivers/s390/net/ctcm_main.c
+++ b/drivers/s390/net/ctcm_main.c
@@ -1154,7 +1154,7 @@
dev_fsm, dev_fsm_len, GFP_KERNEL);
if (priv->fsm == NULL) {
CTCMY_DBF_DEV(SETUP, dev, "init_fsm error");
- kfree(dev);
+ free_netdev(dev);
return NULL;
}
fsm_newstate(priv->fsm, DEV_STATE_STOPPED);
@@ -1165,7 +1165,7 @@
grp = ctcmpc_init_mpc_group(priv);
if (grp == NULL) {
MPC_DBF_DEV(SETUP, dev, "init_mpc_group error");
- kfree(dev);
+ free_netdev(dev);
return NULL;
}
tasklet_init(&grp->mpc_tasklet2,
diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c
index 7d4d227..7f11f3e 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.c
+++ b/drivers/scsi/be2iscsi/be_iscsi.c
@@ -300,8 +300,7 @@
enum iscsi_host_param param, char *buf)
{
struct beiscsi_hba *phba = (struct beiscsi_hba *)iscsi_host_priv(shost);
- int len = 0;
- int status;
+ int status = 0;
SE_DEBUG(DBG_LVL_8, "In beiscsi_get_host_param, param= %d\n", param);
switch (param) {
@@ -315,7 +314,7 @@
default:
return iscsi_host_get_param(shost, param, buf);
}
- return len;
+ return status;
}
int beiscsi_get_macaddr(char *buf, struct beiscsi_hba *phba)
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c
index 26350e4..877324f 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.c
+++ b/drivers/scsi/be2iscsi/be_mgmt.c
@@ -368,7 +368,7 @@
memset(req, 0, sizeof(*req));
wrb->tag0 |= tag;
- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 1);
+ be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
OPCODE_COMMON_ISCSI_TCP_CONNECT_AND_OFFLOAD,
sizeof(*req));
diff --git a/drivers/scsi/constants.c b/drivers/scsi/constants.c
index cd05e04..d0c8234 100644
--- a/drivers/scsi/constants.c
+++ b/drivers/scsi/constants.c
@@ -1404,13 +1404,13 @@
{
struct scsi_sense_hdr sshdr;
- scmd_printk(KERN_INFO, cmd, "");
+ scmd_printk(KERN_INFO, cmd, " ");
scsi_decode_sense_buffer(cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE,
&sshdr);
scsi_show_sense_hdr(&sshdr);
scsi_decode_sense_extras(cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE,
&sshdr);
- scmd_printk(KERN_INFO, cmd, "");
+ scmd_printk(KERN_INFO, cmd, " ");
scsi_show_extd_sense(sshdr.asc, sshdr.ascq);
}
EXPORT_SYMBOL(scsi_print_sense);
@@ -1453,7 +1453,7 @@
void scsi_print_result(struct scsi_cmnd *cmd)
{
- scmd_printk(KERN_INFO, cmd, "");
+ scmd_printk(KERN_INFO, cmd, " ");
scsi_show_result(cmd->result);
}
EXPORT_SYMBOL(scsi_print_result);
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 4f5551b..c5d0606 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -3231,6 +3231,12 @@
misc_fw_support = readl(&cfgtable->misc_fw_support);
use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
+ /* The doorbell reset seems to cause lockups on some Smart
+ * Arrays (e.g. P410, P410i, maybe others). Until this is
+ * fixed or at least isolated, avoid the doorbell reset.
+ */
+ use_doorbell = 0;
+
rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell);
if (rc)
goto unmap_cfgtable;
diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
index fda4de3..e88bbdd 100644
--- a/drivers/scsi/osd/osd_initiator.c
+++ b/drivers/scsi/osd/osd_initiator.c
@@ -865,7 +865,7 @@
{
_osd_req_encode_common(or, OSD_ACT_READ, obj, offset, len);
WARN_ON(or->in.bio || or->in.total_bytes);
- WARN_ON(1 == (bio->bi_rw & REQ_WRITE));
+ WARN_ON(bio->bi_rw & REQ_WRITE);
or->in.bio = bio;
or->in.total_bytes = len;
}
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 420238c..114bc5a 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -1838,26 +1838,33 @@
qla24xx_disable_vp(vha);
+ vha->flags.delete_progress = 1;
+
fc_remove_host(vha->host);
scsi_remove_host(vha->host);
- qla2x00_free_fcports(vha);
+ if (vha->timer_active) {
+ qla2x00_vp_stop_timer(vha);
+ DEBUG15(printk(KERN_INFO "scsi(%ld): timer for the vport[%d]"
+ " = %p has stopped\n", vha->host_no, vha->vp_idx, vha));
+ }
qla24xx_deallocate_vp_id(vha);
+ /* No pending activities shall be there on the vha now */
+ DEBUG(msleep(random32()%10)); /* Just to see if something falls on
+ * the net we have placed below */
+
+ BUG_ON(atomic_read(&vha->vref_count));
+
+ qla2x00_free_fcports(vha);
+
mutex_lock(&ha->vport_lock);
ha->cur_vport_count--;
clear_bit(vha->vp_idx, ha->vp_idx_map);
mutex_unlock(&ha->vport_lock);
- if (vha->timer_active) {
- qla2x00_vp_stop_timer(vha);
- DEBUG15(printk ("scsi(%ld): timer for the vport[%d] = %p "
- "has stopped\n",
- vha->host_no, vha->vp_idx, vha));
- }
-
if (vha->req->id && !ha->flags.cpu_affinity_enabled) {
if (qla25xx_delete_req_que(vha, vha->req) != QLA_SUCCESS)
qla_printk(KERN_WARNING, ha,
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index 6cfc28a..b74e6b5 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -29,8 +29,6 @@
/* #define QL_DEBUG_LEVEL_17 */ /* Output EEH trace messages */
/* #define QL_DEBUG_LEVEL_18 */ /* Output T10 CRC trace messages */
-/* #define QL_PRINTK_BUF */ /* Captures printk to buffer */
-
/*
* Macros use for debugging the driver.
*/
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 3a432ea..d2a4e15 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -2641,6 +2641,7 @@
#define MBX_UPDATE_FLASH_ACTIVE 3
struct mutex vport_lock; /* Virtual port synchronization */
+ spinlock_t vport_slock; /* order is hardware_lock, then vport_slock */
struct completion mbx_cmd_comp; /* Serialize mbx access */
struct completion mbx_intr_comp; /* Used for completion notification */
struct completion dcbx_comp; /* For set port config notification */
@@ -2828,6 +2829,7 @@
uint32_t management_server_logged_in :1;
uint32_t process_response_queue :1;
uint32_t difdix_supported:1;
+ uint32_t delete_progress:1;
} flags;
atomic_t loop_state;
@@ -2922,6 +2924,8 @@
struct req_que *req;
int fw_heartbeat_counter;
int seconds_since_last_heartbeat;
+
+ atomic_t vref_count;
} scsi_qla_host_t;
/*
@@ -2932,6 +2936,22 @@
test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags) || \
atomic_read(&ha->loop_state) == LOOP_DOWN)
+#define QLA_VHA_MARK_BUSY(__vha, __bail) do { \
+ atomic_inc(&__vha->vref_count); \
+ mb(); \
+ if (__vha->flags.delete_progress) { \
+ atomic_dec(&__vha->vref_count); \
+ __bail = 1; \
+ } else { \
+ __bail = 0; \
+ } \
+} while (0)
+
+#define QLA_VHA_MARK_NOT_BUSY(__vha) do { \
+ atomic_dec(&__vha->vref_count); \
+} while (0)
+
+
#define qla_printk(level, ha, format, arg...) \
dev_printk(level , &((ha)->pdev->dev) , format , ## arg)
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index d863ed2..9c383ba 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -69,21 +69,29 @@
{
struct srb_ctx *ctx = sp->ctx;
struct srb_iocb *iocb = ctx->u.iocb_cmd;
+ struct scsi_qla_host *vha = sp->fcport->vha;
del_timer_sync(&iocb->timer);
kfree(iocb);
kfree(ctx);
mempool_free(sp, sp->fcport->vha->hw->srb_mempool);
+
+ QLA_VHA_MARK_NOT_BUSY(vha);
}
inline srb_t *
qla2x00_get_ctx_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size,
unsigned long tmo)
{
- srb_t *sp;
+ srb_t *sp = NULL;
struct qla_hw_data *ha = vha->hw;
struct srb_ctx *ctx;
struct srb_iocb *iocb;
+ uint8_t bail;
+
+ QLA_VHA_MARK_BUSY(vha, bail);
+ if (bail)
+ return NULL;
sp = mempool_alloc(ha->srb_mempool, GFP_KERNEL);
if (!sp)
@@ -116,6 +124,8 @@
iocb->timer.function = qla2x00_ctx_sp_timeout;
add_timer(&iocb->timer);
done:
+ if (!sp)
+ QLA_VHA_MARK_NOT_BUSY(vha);
return sp;
}
@@ -1777,11 +1787,15 @@
qla2x00_init_response_q_entries(rsp);
}
+ spin_lock_irqsave(&ha->vport_slock, flags);
/* Clear RSCN queue. */
list_for_each_entry(vp, &ha->vp_list, list) {
vp->rscn_in_ptr = 0;
vp->rscn_out_ptr = 0;
}
+
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
ha->isp_ops->config_rings(vha);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -3218,12 +3232,17 @@
/* Bypass virtual ports of the same host. */
found = 0;
if (ha->num_vhosts) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&ha->vport_slock, flags);
list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
if (new_fcport->d_id.b24 == vp->d_id.b24) {
found = 1;
break;
}
}
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
if (found)
continue;
}
@@ -3343,6 +3362,7 @@
struct qla_hw_data *ha = vha->hw;
struct scsi_qla_host *vp;
struct scsi_qla_host *tvp;
+ unsigned long flags = 0;
rval = QLA_SUCCESS;
@@ -3367,6 +3387,8 @@
/* Check for loop ID being already in use. */
found = 0;
fcport = NULL;
+
+ spin_lock_irqsave(&ha->vport_slock, flags);
list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
list_for_each_entry(fcport, &vp->vp_fcports, list) {
if (fcport->loop_id == dev->loop_id &&
@@ -3379,6 +3401,7 @@
if (found)
break;
}
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
/* If not in use then it is free to use. */
if (!found) {
@@ -3791,14 +3814,27 @@
qla2x00_update_fcports(scsi_qla_host_t *base_vha)
{
fc_port_t *fcport;
- struct scsi_qla_host *tvp, *vha;
+ struct scsi_qla_host *vha;
+ struct qla_hw_data *ha = base_vha->hw;
+ unsigned long flags;
+ spin_lock_irqsave(&ha->vport_slock, flags);
/* Go with deferred removal of rport references. */
- list_for_each_entry_safe(vha, tvp, &base_vha->hw->vp_list, list)
- list_for_each_entry(fcport, &vha->vp_fcports, list)
+ list_for_each_entry(vha, &base_vha->hw->vp_list, list) {
+ atomic_inc(&vha->vref_count);
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
if (fcport && fcport->drport &&
- atomic_read(&fcport->state) != FCS_UNCONFIGURED)
+ atomic_read(&fcport->state) != FCS_UNCONFIGURED) {
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
qla2x00_rport_del(fcport);
+
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ }
+ }
+ atomic_dec(&vha->vref_count);
+ }
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
}
void
@@ -3806,7 +3842,7 @@
{
struct qla_hw_data *ha = vha->hw;
struct scsi_qla_host *vp, *base_vha = pci_get_drvdata(ha->pdev);
- struct scsi_qla_host *tvp;
+ unsigned long flags;
vha->flags.online = 0;
ha->flags.chip_reset_done = 0;
@@ -3824,8 +3860,18 @@
if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
atomic_set(&vha->loop_state, LOOP_DOWN);
qla2x00_mark_all_devices_lost(vha, 0);
- list_for_each_entry_safe(vp, tvp, &base_vha->hw->vp_list, list)
+
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ list_for_each_entry(vp, &base_vha->hw->vp_list, list) {
+ atomic_inc(&vp->vref_count);
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
qla2x00_mark_all_devices_lost(vp, 0);
+
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ atomic_dec(&vp->vref_count);
+ }
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
} else {
if (!atomic_read(&vha->loop_down_timer))
atomic_set(&vha->loop_down_timer,
@@ -3862,8 +3908,8 @@
uint8_t status = 0;
struct qla_hw_data *ha = vha->hw;
struct scsi_qla_host *vp;
- struct scsi_qla_host *tvp;
struct req_que *req = ha->req_q_map[0];
+ unsigned long flags;
if (vha->flags.online) {
qla2x00_abort_isp_cleanup(vha);
@@ -3970,10 +4016,21 @@
DEBUG(printk(KERN_INFO
"qla2x00_abort_isp(%ld): succeeded.\n",
vha->host_no));
- list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
- if (vp->vp_idx)
+
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ list_for_each_entry(vp, &ha->vp_list, list) {
+ if (vp->vp_idx) {
+ atomic_inc(&vp->vref_count);
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
qla2x00_vp_abort_isp(vp);
+
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ atomic_dec(&vp->vref_count);
+ }
}
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
} else {
qla_printk(KERN_INFO, ha,
"qla2x00_abort_isp: **** FAILED ****\n");
@@ -5185,7 +5242,7 @@
struct req_que *req = ha->req_q_map[0];
struct rsp_que *rsp = ha->rsp_q_map[0];
struct scsi_qla_host *vp;
- struct scsi_qla_host *tvp;
+ unsigned long flags;
status = qla2x00_init_rings(vha);
if (!status) {
@@ -5272,10 +5329,21 @@
DEBUG(printk(KERN_INFO
"qla82xx_restart_isp(%ld): succeeded.\n",
vha->host_no));
- list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
- if (vp->vp_idx)
+
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ list_for_each_entry(vp, &ha->vp_list, list) {
+ if (vp->vp_idx) {
+ atomic_inc(&vp->vref_count);
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
qla2x00_vp_abort_isp(vp);
+
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ atomic_dec(&vp->vref_count);
+ }
}
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
} else {
qla_printk(KERN_INFO, ha,
"qla82xx_restart_isp: **** FAILED ****\n");
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 6982ba7..28f65be 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -1706,19 +1706,20 @@
cp->result = DID_ERROR << 16;
break;
}
- } else if (!lscsi_status) {
+ } else {
DEBUG2(qla_printk(KERN_INFO, ha,
"scsi(%ld:%d:%d) Dropped frame(s) detected (0x%x "
"of 0x%x bytes).\n", vha->host_no, cp->device->id,
cp->device->lun, resid, scsi_bufflen(cp)));
- cp->result = DID_ERROR << 16;
- break;
+ cp->result = DID_ERROR << 16 | lscsi_status;
+ goto check_scsi_status;
}
cp->result = DID_OK << 16 | lscsi_status;
logit = 0;
+check_scsi_status:
/*
* Check to see if SCSI Status is non zero. If so report SCSI
* Status.
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 6009b0c..a595ec8 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -2913,7 +2913,7 @@
uint16_t stat = le16_to_cpu(rptid_entry->vp_idx);
struct qla_hw_data *ha = vha->hw;
scsi_qla_host_t *vp;
- scsi_qla_host_t *tvp;
+ unsigned long flags;
if (rptid_entry->entry_status != 0)
return;
@@ -2945,9 +2945,12 @@
return;
}
- list_for_each_entry_safe(vp, tvp, &ha->vp_list, list)
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ list_for_each_entry(vp, &ha->vp_list, list)
if (vp_idx == vp->vp_idx)
break;
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
if (!vp)
return;
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index 987c5b0..2b69392 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -30,6 +30,7 @@
{
uint32_t vp_id;
struct qla_hw_data *ha = vha->hw;
+ unsigned long flags;
/* Find an empty slot and assign an vp_id */
mutex_lock(&ha->vport_lock);
@@ -44,7 +45,11 @@
set_bit(vp_id, ha->vp_idx_map);
ha->num_vhosts++;
vha->vp_idx = vp_id;
+
+ spin_lock_irqsave(&ha->vport_slock, flags);
list_add_tail(&vha->list, &ha->vp_list);
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
mutex_unlock(&ha->vport_lock);
return vp_id;
}
@@ -54,12 +59,31 @@
{
uint16_t vp_id;
struct qla_hw_data *ha = vha->hw;
+ unsigned long flags = 0;
mutex_lock(&ha->vport_lock);
+ /*
+ * Wait for all pending activities to finish before removing vport from
+ * the list.
+ * Lock needs to be held for safe removal from the list (it
+ * ensures no active vp_list traversal while the vport is removed
+ * from the queue)
+ */
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ while (atomic_read(&vha->vref_count)) {
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
+ msleep(500);
+
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ }
+ list_del(&vha->list);
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
vp_id = vha->vp_idx;
ha->num_vhosts--;
clear_bit(vp_id, ha->vp_idx_map);
- list_del(&vha->list);
+
mutex_unlock(&ha->vport_lock);
}
@@ -68,12 +92,17 @@
{
scsi_qla_host_t *vha;
struct scsi_qla_host *tvha;
+ unsigned long flags;
+ spin_lock_irqsave(&ha->vport_slock, flags);
/* Locate matching device in database. */
list_for_each_entry_safe(vha, tvha, &ha->vp_list, list) {
- if (!memcmp(port_name, vha->port_name, WWN_SIZE))
+ if (!memcmp(port_name, vha->port_name, WWN_SIZE)) {
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
return vha;
+ }
}
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
return NULL;
}
@@ -93,6 +122,12 @@
static void
qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha)
{
+ /*
+ * !!! NOTE !!!
+ * This function, if called in contexts other than vp create, disable
+ * or delete, please make sure this is synchronized with the
+ * delete thread.
+ */
fc_port_t *fcport;
list_for_each_entry(fcport, &vha->vp_fcports, list) {
@@ -100,7 +135,6 @@
"loop_id=0x%04x :%x\n",
vha->host_no, fcport->loop_id, fcport->vp_idx));
- atomic_set(&fcport->state, FCS_DEVICE_DEAD);
qla2x00_mark_device_lost(vha, fcport, 0, 0);
atomic_set(&fcport->state, FCS_UNCONFIGURED);
}
@@ -194,12 +228,17 @@
void
qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb)
{
- scsi_qla_host_t *vha, *tvha;
+ scsi_qla_host_t *vha;
struct qla_hw_data *ha = rsp->hw;
int i = 0;
+ unsigned long flags;
- list_for_each_entry_safe(vha, tvha, &ha->vp_list, list) {
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ list_for_each_entry(vha, &ha->vp_list, list) {
if (vha->vp_idx) {
+ atomic_inc(&vha->vref_count);
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
switch (mb[0]) {
case MBA_LIP_OCCURRED:
case MBA_LOOP_UP:
@@ -215,9 +254,13 @@
qla2x00_async_event(vha, rsp, mb);
break;
}
+
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ atomic_dec(&vha->vref_count);
}
i++;
}
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
}
int
@@ -297,7 +340,7 @@
int ret;
struct qla_hw_data *ha = vha->hw;
scsi_qla_host_t *vp;
- struct scsi_qla_host *tvp;
+ unsigned long flags = 0;
if (vha->vp_idx)
return;
@@ -309,10 +352,19 @@
if (!(ha->current_topology & ISP_CFG_F))
return;
- list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
- if (vp->vp_idx)
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ list_for_each_entry(vp, &ha->vp_list, list) {
+ if (vp->vp_idx) {
+ atomic_inc(&vp->vref_count);
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
ret = qla2x00_do_dpc_vp(vp);
+
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ atomic_dec(&vp->vref_count);
+ }
}
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
}
int
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index 915b77a..0a71cc7 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -2672,6 +2672,19 @@
sufficient_dsds:
req_cnt = 1;
+ if (req->cnt < (req_cnt + 2)) {
+ cnt = (uint16_t)RD_REG_DWORD_RELAXED(
+ ®->req_q_out[0]);
+ if (req->ring_index < cnt)
+ req->cnt = cnt - req->ring_index;
+ else
+ req->cnt = req->length -
+ (req->ring_index - cnt);
+ }
+
+ if (req->cnt < (req_cnt + 2))
+ goto queuing_error;
+
ctx = sp->ctx = mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
if (!sp->ctx) {
DEBUG(printk(KERN_INFO
@@ -3307,16 +3320,19 @@
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
}
qla2xxx_wake_dpc(vha);
+ ha->flags.fw_hung = 1;
if (ha->flags.mbox_busy) {
- ha->flags.fw_hung = 1;
ha->flags.mbox_int = 1;
DEBUG2(qla_printk(KERN_ERR, ha,
- "Due to fw hung, doing premature "
- "completion of mbx command\n"));
- complete(&ha->mbx_intr_comp);
+ "Due to fw hung, doing premature "
+ "completion of mbx command\n"));
+ if (test_bit(MBX_INTR_WAIT,
+ &ha->mbx_cmd_flags))
+ complete(&ha->mbx_intr_comp);
}
}
- }
+ } else
+ vha->seconds_since_last_heartbeat = 0;
vha->fw_heartbeat_counter = fw_heartbeat_counter;
}
@@ -3418,13 +3434,15 @@
"%s(): Adapter reset needed!\n", __func__);
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
qla2xxx_wake_dpc(vha);
+ ha->flags.fw_hung = 1;
if (ha->flags.mbox_busy) {
- ha->flags.fw_hung = 1;
ha->flags.mbox_int = 1;
DEBUG2(qla_printk(KERN_ERR, ha,
- "Need reset, doing premature "
- "completion of mbx command\n"));
- complete(&ha->mbx_intr_comp);
+ "Need reset, doing premature "
+ "completion of mbx command\n"));
+ if (test_bit(MBX_INTR_WAIT,
+ &ha->mbx_cmd_flags))
+ complete(&ha->mbx_intr_comp);
}
} else {
qla82xx_check_fw_alive(vha);
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 8c80b49..1e4bff6 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -2341,16 +2341,28 @@
static void
qla2x00_remove_one(struct pci_dev *pdev)
{
- scsi_qla_host_t *base_vha, *vha, *temp;
+ scsi_qla_host_t *base_vha, *vha;
struct qla_hw_data *ha;
+ unsigned long flags;
base_vha = pci_get_drvdata(pdev);
ha = base_vha->hw;
- list_for_each_entry_safe(vha, temp, &ha->vp_list, list) {
- if (vha && vha->fc_vport)
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ list_for_each_entry(vha, &ha->vp_list, list) {
+ atomic_inc(&vha->vref_count);
+
+ if (vha && vha->fc_vport) {
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
fc_vport_terminate(vha->fc_vport);
+
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ }
+
+ atomic_dec(&vha->vref_count);
}
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
set_bit(UNLOADING, &base_vha->dpc_flags);
@@ -2975,10 +2987,17 @@
qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type)
{
struct qla_work_evt *e;
+ uint8_t bail;
+
+ QLA_VHA_MARK_BUSY(vha, bail);
+ if (bail)
+ return NULL;
e = kzalloc(sizeof(struct qla_work_evt), GFP_ATOMIC);
- if (!e)
+ if (!e) {
+ QLA_VHA_MARK_NOT_BUSY(vha);
return NULL;
+ }
INIT_LIST_HEAD(&e->list);
e->type = type;
@@ -3135,6 +3154,9 @@
}
if (e->flags & QLA_EVT_FLAG_FREE)
kfree(e);
+
+ /* For each work completed decrement vha ref count */
+ QLA_VHA_MARK_NOT_BUSY(vha);
}
}
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index e75ccb9..8edbccb 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,9 +7,9 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "8.03.03-k0"
+#define QLA2XXX_VERSION "8.03.04-k0"
#define QLA_DRIVER_MAJOR_VER 8
#define QLA_DRIVER_MINOR_VER 3
-#define QLA_DRIVER_PATCH_VER 3
+#define QLA_DRIVER_PATCH_VER 4
#define QLA_DRIVER_BETA_VER 0
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index ad0ed21..348fba0 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -1046,13 +1046,13 @@
/* If the user actually wanted this page, we can skip the rest */
if (page == 0)
- return -EINVAL;
+ return 0;
for (i = 0; i < min((int)buf[3], buf_len - 4); i++)
if (buf[i + 4] == page)
goto found;
- if (i < buf[3] && i > buf_len)
+ if (i < buf[3] && i >= buf_len - 4)
/* ran off the end of the buffer, give us benefit of doubt */
goto found;
/* The device claims it doesn't support the requested page */
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 9ade720..ee02d38 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1011,8 +1011,8 @@
err_exit:
scsi_release_buffers(cmd);
- scsi_put_command(cmd);
cmd->request->special = NULL;
+ scsi_put_command(cmd);
return error;
}
EXPORT_SYMBOL(scsi_init_io);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 2714bec..ffa0689 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -870,7 +870,7 @@
SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_release\n"));
- if (atomic_dec_return(&sdkp->openers) && sdev->removable) {
+ if (atomic_dec_return(&sdkp->openers) == 0 && sdev->removable) {
if (scsi_block_when_processing_errors(sdev))
scsi_set_medium_removal(sdev, SCSI_REMOVAL_ALLOW);
}
@@ -2625,15 +2625,15 @@
static void sd_print_sense_hdr(struct scsi_disk *sdkp,
struct scsi_sense_hdr *sshdr)
{
- sd_printk(KERN_INFO, sdkp, "");
+ sd_printk(KERN_INFO, sdkp, " ");
scsi_show_sense_hdr(sshdr);
- sd_printk(KERN_INFO, sdkp, "");
+ sd_printk(KERN_INFO, sdkp, " ");
scsi_show_extd_sense(sshdr->asc, sshdr->ascq);
}
static void sd_print_result(struct scsi_disk *sdkp, int result)
{
- sd_printk(KERN_INFO, sdkp, "");
+ sd_printk(KERN_INFO, sdkp, " ");
scsi_show_result(result);
}
diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c
index a7bc8b7..2c3e89d 100644
--- a/drivers/scsi/sym53c8xx_2/sym_hipd.c
+++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c
@@ -72,10 +72,7 @@
static void sym_print_msg(struct sym_ccb *cp, char *label, u_char *msg)
{
- if (label)
- sym_print_addr(cp->cmd, "%s: ", label);
- else
- sym_print_addr(cp->cmd, "");
+ sym_print_addr(cp->cmd, "%s: ", label);
spi_print_msg(msg);
printf("\n");
@@ -4558,7 +4555,8 @@
switch (np->msgin [2]) {
case M_X_MODIFY_DP:
if (DEBUG_FLAGS & DEBUG_POINTER)
- sym_print_msg(cp, NULL, np->msgin);
+ sym_print_msg(cp, "extended msg ",
+ np->msgin);
tmp = (np->msgin[3]<<24) + (np->msgin[4]<<16) +
(np->msgin[5]<<8) + (np->msgin[6]);
sym_modify_dp(np, tp, cp, tmp);
@@ -4585,7 +4583,7 @@
*/
case M_IGN_RESIDUE:
if (DEBUG_FLAGS & DEBUG_POINTER)
- sym_print_msg(cp, NULL, np->msgin);
+ sym_print_msg(cp, "1 or 2 byte ", np->msgin);
if (cp->host_flags & HF_SENSE)
OUTL_DSP(np, SCRIPTA_BA(np, clrack));
else
diff --git a/drivers/serial/68328serial.c b/drivers/serial/68328serial.c
index 7356a56..be0ebce 100644
--- a/drivers/serial/68328serial.c
+++ b/drivers/serial/68328serial.c
@@ -869,7 +869,9 @@
tmp.close_delay = info->close_delay;
tmp.closing_wait = info->closing_wait;
tmp.custom_divisor = info->custom_divisor;
- copy_to_user(retinfo,&tmp,sizeof(*retinfo));
+ if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
+ return -EFAULT;
+
return 0;
}
@@ -882,7 +884,8 @@
if (!new_info)
return -EFAULT;
- copy_from_user(&new_serial,new_info,sizeof(new_serial));
+ if (copy_from_user(&new_serial, new_info, sizeof(new_serial)))
+ return -EFAULT;
old_info = *info;
if (!capable(CAP_SYS_ADMIN)) {
@@ -943,8 +946,7 @@
status = 0;
#endif
local_irq_restore(flags);
- put_user(status,value);
- return 0;
+ return put_user(status, value);
}
/*
@@ -999,27 +1001,18 @@
send_break(info, arg ? arg*(100) : 250);
return 0;
case TIOCGSERIAL:
- if (access_ok(VERIFY_WRITE, (void *) arg,
- sizeof(struct serial_struct)))
- return get_serial_info(info,
- (struct serial_struct *) arg);
- return -EFAULT;
+ return get_serial_info(info,
+ (struct serial_struct *) arg);
case TIOCSSERIAL:
return set_serial_info(info,
(struct serial_struct *) arg);
case TIOCSERGETLSR: /* Get line status register */
- if (access_ok(VERIFY_WRITE, (void *) arg,
- sizeof(unsigned int)))
- return get_lsr_info(info, (unsigned int *) arg);
- return -EFAULT;
+ return get_lsr_info(info, (unsigned int *) arg);
case TIOCSERGSTRUCT:
- if (!access_ok(VERIFY_WRITE, (void *) arg,
- sizeof(struct m68k_serial)))
+ if (copy_to_user((struct m68k_serial *) arg,
+ info, sizeof(struct m68k_serial)))
return -EFAULT;
- copy_to_user((struct m68k_serial *) arg,
- info, sizeof(struct m68k_serial));
return 0;
-
default:
return -ENOIOCTLCMD;
}
diff --git a/drivers/serial/8250_early.c b/drivers/serial/8250_early.c
index b745792..eaafb98 100644
--- a/drivers/serial/8250_early.c
+++ b/drivers/serial/8250_early.c
@@ -203,13 +203,13 @@
if (mmio || mmio32)
printk(KERN_INFO
- "Early serial console at MMIO%s 0x%llu (options '%s')\n",
+ "Early serial console at MMIO%s 0x%llx (options '%s')\n",
mmio32 ? "32" : "",
(unsigned long long)port->mapbase,
device->options);
else
printk(KERN_INFO
- "Early serial console at I/O port 0x%lu (options '%s')\n",
+ "Early serial console at I/O port 0x%lx (options '%s')\n",
port->iobase,
device->options);
diff --git a/drivers/serial/amba-pl010.c b/drivers/serial/amba-pl010.c
index 50441ff..2904aa0 100644
--- a/drivers/serial/amba-pl010.c
+++ b/drivers/serial/amba-pl010.c
@@ -472,14 +472,9 @@
spin_unlock_irqrestore(&uap->port.lock, flags);
}
-static void pl010_set_ldisc(struct uart_port *port)
+static void pl010_set_ldisc(struct uart_port *port, int new)
{
- int line = port->line;
-
- if (line >= port->state->port.tty->driver->num)
- return;
-
- if (port->state->port.tty->ldisc->ops->num == N_PPS) {
+ if (new == N_PPS) {
port->flags |= UPF_HARDPPS_CD;
pl010_enable_ms(port);
} else
diff --git a/drivers/serial/bfin_sport_uart.c b/drivers/serial/bfin_sport_uart.c
index e57fb3d..5318dd3 100644
--- a/drivers/serial/bfin_sport_uart.c
+++ b/drivers/serial/bfin_sport_uart.c
@@ -121,7 +121,7 @@
unsigned int sclk = get_sclk();
/* Set TCR1 and TCR2, TFSR is not enabled for uart */
- SPORT_PUT_TCR1(up, (ITFS | TLSBIT | ITCLK));
+ SPORT_PUT_TCR1(up, (LATFS | ITFS | TFSR | TLSBIT | ITCLK));
SPORT_PUT_TCR2(up, size + 1);
pr_debug("%s TCR1:%x, TCR2:%x\n", __func__, SPORT_GET_TCR1(up), SPORT_GET_TCR2(up));
diff --git a/drivers/serial/ioc3_serial.c b/drivers/serial/ioc3_serial.c
index 93de907..800c546 100644
--- a/drivers/serial/ioc3_serial.c
+++ b/drivers/serial/ioc3_serial.c
@@ -2044,6 +2044,7 @@
if (!port) {
printk(KERN_WARNING
"IOC3 serial memory not available for port\n");
+ ret = -ENOMEM;
goto out4;
}
spin_lock_init(&port->ip_lock);
diff --git a/drivers/serial/mfd.c b/drivers/serial/mfd.c
index bc9af50..5dff45c 100644
--- a/drivers/serial/mfd.c
+++ b/drivers/serial/mfd.c
@@ -27,6 +27,7 @@
#include <linux/init.h>
#include <linux/console.h>
#include <linux/sysrq.h>
+#include <linux/slab.h>
#include <linux/serial_reg.h>
#include <linux/circ_buf.h>
#include <linux/delay.h>
@@ -1423,7 +1424,6 @@
}
phsu = hsu;
-
hsu_debugfs_init(hsu);
return;
@@ -1435,18 +1435,20 @@
static void serial_hsu_remove(struct pci_dev *pdev)
{
- struct hsu_port *hsu;
- int i;
+ void *priv = pci_get_drvdata(pdev);
+ struct uart_hsu_port *up;
- hsu = pci_get_drvdata(pdev);
- if (!hsu)
+ if (!priv)
return;
- for (i = 0; i < 3; i++)
- uart_remove_one_port(&serial_hsu_reg, &hsu->port[i].port);
+ /* For port 0/1/2, priv is the address of uart_hsu_port */
+ if (pdev->device != 0x081E) {
+ up = priv;
+ uart_remove_one_port(&serial_hsu_reg, &up->port);
+ }
pci_set_drvdata(pdev, NULL);
- free_irq(hsu->irq, hsu);
+ free_irq(pdev->irq, priv);
pci_disable_device(pdev);
}
diff --git a/drivers/serial/mpc52xx_uart.c b/drivers/serial/mpc52xx_uart.c
index 8dedb26..c4399e2 100644
--- a/drivers/serial/mpc52xx_uart.c
+++ b/drivers/serial/mpc52xx_uart.c
@@ -500,6 +500,7 @@
psc_fifoc = of_iomap(np, 0);
if (!psc_fifoc) {
pr_err("%s: Can't map FIFOC\n", __func__);
+ of_node_put(np);
return -ENODEV;
}
diff --git a/drivers/serial/mrst_max3110.c b/drivers/serial/mrst_max3110.c
index f6ad1ec..51c15f5 100644
--- a/drivers/serial/mrst_max3110.c
+++ b/drivers/serial/mrst_max3110.c
@@ -29,6 +29,7 @@
#include <linux/module.h>
#include <linux/ioport.h>
+#include <linux/irq.h>
#include <linux/init.h>
#include <linux/console.h>
#include <linux/sysrq.h>
diff --git a/drivers/serial/serial_cs.c b/drivers/serial/serial_cs.c
index 141c695..7d475b2 100644
--- a/drivers/serial/serial_cs.c
+++ b/drivers/serial/serial_cs.c
@@ -335,8 +335,6 @@
info->p_dev = link;
link->priv = info;
- link->resource[0]->flags |= IO_DATA_PATH_WIDTH_8;
- link->resource[0]->end = 8;
link->conf.Attributes = CONF_ENABLE_IRQ;
if (do_sound) {
link->conf.Attributes |= CONF_ENABLE_SPKR;
@@ -411,6 +409,27 @@
/*====================================================================*/
+static int pfc_config(struct pcmcia_device *p_dev)
+{
+ unsigned int port = 0;
+ struct serial_info *info = p_dev->priv;
+
+ if ((p_dev->resource[1]->end != 0) &&
+ (resource_size(p_dev->resource[1]) == 8)) {
+ port = p_dev->resource[1]->start;
+ info->slave = 1;
+ } else if ((info->manfid == MANFID_OSITECH) &&
+ (resource_size(p_dev->resource[0]) == 0x40)) {
+ port = p_dev->resource[0]->start + 0x28;
+ info->slave = 1;
+ }
+ if (info->slave)
+ return setup_serial(p_dev, info, port, p_dev->irq);
+
+ dev_warn(&p_dev->dev, "no usable port range found, giving up\n");
+ return -ENODEV;
+}
+
static int simple_config_check(struct pcmcia_device *p_dev,
cistpl_cftable_entry_t *cf,
cistpl_cftable_entry_t *dflt,
@@ -461,23 +480,8 @@
struct serial_info *info = link->priv;
int i = -ENODEV, try;
- /* If the card is already configured, look up the port and irq */
- if (link->function_config) {
- unsigned int port = 0;
- if ((link->resource[1]->end != 0) &&
- (resource_size(link->resource[1]) == 8)) {
- port = link->resource[1]->end;
- info->slave = 1;
- } else if ((info->manfid == MANFID_OSITECH) &&
- (resource_size(link->resource[0]) == 0x40)) {
- port = link->resource[0]->start + 0x28;
- info->slave = 1;
- }
- if (info->slave) {
- return setup_serial(link, info, port,
- link->irq);
- }
- }
+ link->resource[0]->flags |= IO_DATA_PATH_WIDTH_8;
+ link->resource[0]->end = 8;
/* First pass: look for a config entry that looks normal.
* Two tries: without IO aliases, then with aliases */
@@ -491,8 +495,7 @@
if (!pcmcia_loop_config(link, simple_config_check_notpicky, NULL))
goto found_port;
- printk(KERN_NOTICE
- "serial_cs: no usable port range found, giving up\n");
+ dev_warn(&link->dev, "no usable port range found, giving up\n");
return -1;
found_port:
@@ -558,6 +561,7 @@
int i, base2 = 0;
/* First, look for a generic full-sized window */
+ link->resource[0]->flags |= IO_DATA_PATH_WIDTH_8;
link->resource[0]->end = info->multi * 8;
if (pcmcia_loop_config(link, multi_config_check, &base2)) {
/* If that didn't work, look for two windows */
@@ -565,15 +569,14 @@
info->multi = 2;
if (pcmcia_loop_config(link, multi_config_check_notpicky,
&base2)) {
- printk(KERN_NOTICE "serial_cs: no usable port range"
+ dev_warn(&link->dev, "no usable port range "
"found, giving up\n");
return -ENODEV;
}
}
if (!link->irq)
- dev_warn(&link->dev,
- "serial_cs: no usable IRQ found, continuing...\n");
+ dev_warn(&link->dev, "no usable IRQ found, continuing...\n");
/*
* Apply any configuration quirks.
@@ -675,6 +678,7 @@
multifunction cards that ask for appropriate IO port ranges */
if ((info->multi == 0) &&
(link->has_func_id) &&
+ (link->socket->pcmcia_pfc == 0) &&
((link->func_id == CISTPL_FUNCID_MULTI) ||
(link->func_id == CISTPL_FUNCID_SERIAL)))
pcmcia_loop_config(link, serial_check_for_multi, info);
@@ -685,7 +689,13 @@
if (info->quirk && info->quirk->multi != -1)
info->multi = info->quirk->multi;
- if (info->multi > 1)
+ dev_info(&link->dev,
+ "trying to set up [0x%04x:0x%04x] (pfc: %d, multi: %d, quirk: %p)\n",
+ link->manf_id, link->card_id,
+ link->socket->pcmcia_pfc, info->multi, info->quirk);
+ if (link->socket->pcmcia_pfc)
+ i = pfc_config(link);
+ else if (info->multi > 1)
i = multi_config(link);
else
i = simple_config(link);
@@ -704,7 +714,7 @@
return 0;
failed:
- dev_warn(&link->dev, "serial_cs: failed to initialize\n");
+ dev_warn(&link->dev, "failed to initialize\n");
serial_remove(link);
return -ENODEV;
}
diff --git a/drivers/serial/sn_console.c b/drivers/serial/sn_console.c
index 7e5e5ef..cff9a30 100644
--- a/drivers/serial/sn_console.c
+++ b/drivers/serial/sn_console.c
@@ -492,7 +492,7 @@
sysrq_requested = 0;
if (ch && time_before(jiffies, sysrq_timeout)) {
spin_unlock_irqrestore(&port->sc_port.lock, flags);
- handle_sysrq(ch, NULL);
+ handle_sysrq(ch);
spin_lock_irqsave(&port->sc_port.lock, flags);
/* ignore actual sysrq command char */
continue;
diff --git a/drivers/spi/amba-pl022.c b/drivers/spi/amba-pl022.c
index acd35d1..4c37c4e2 100644
--- a/drivers/spi/amba-pl022.c
+++ b/drivers/spi/amba-pl022.c
@@ -503,8 +503,9 @@
msg->state = NULL;
if (msg->complete)
msg->complete(msg->context);
- /* This message is completed, so let's turn off the clock! */
+ /* This message is completed, so let's turn off the clocks! */
clk_disable(pl022->clk);
+ amba_pclk_disable(pl022->adev);
}
/**
@@ -1139,9 +1140,10 @@
/* Setup the SPI using the per chip configuration */
pl022->cur_chip = spi_get_ctldata(pl022->cur_msg->spi);
/*
- * We enable the clock here, then the clock will be disabled when
+ * We enable the clocks here, then the clocks will be disabled when
* giveback() is called in each method (poll/interrupt/DMA)
*/
+ amba_pclk_enable(pl022->adev);
clk_enable(pl022->clk);
restore_state(pl022);
flush(pl022);
@@ -1786,11 +1788,9 @@
}
/* Disable SSP */
- clk_enable(pl022->clk);
writew((readw(SSP_CR1(pl022->virtbase)) & (~SSP_CR1_MASK_SSE)),
SSP_CR1(pl022->virtbase));
load_ssp_default_config(pl022);
- clk_disable(pl022->clk);
status = request_irq(adev->irq[0], pl022_interrupt_handler, 0, "pl022",
pl022);
@@ -1818,6 +1818,8 @@
goto err_spi_register;
}
dev_dbg(dev, "probe succeded\n");
+ /* Disable the silicon block pclk and clock it when needed */
+ amba_pclk_disable(adev);
return 0;
err_spi_register:
@@ -1879,9 +1881,9 @@
return status;
}
- clk_enable(pl022->clk);
+ amba_pclk_enable(adev);
load_ssp_default_config(pl022);
- clk_disable(pl022->clk);
+ amba_pclk_disable(adev);
dev_dbg(&adev->dev, "suspended\n");
return 0;
}
@@ -1981,7 +1983,7 @@
return amba_driver_register(&pl022_driver);
}
-module_init(pl022_init);
+subsys_initcall(pl022_init);
static void __exit pl022_exit(void)
{
diff --git a/drivers/spi/dw_spi.c b/drivers/spi/dw_spi.c
index d256cb0..5624785 100644
--- a/drivers/spi/dw_spi.c
+++ b/drivers/spi/dw_spi.c
@@ -181,10 +181,6 @@
wait_till_not_busy(dws);
}
-static void null_cs_control(u32 command)
-{
-}
-
static int null_writer(struct dw_spi *dws)
{
u8 n_bytes = dws->n_bytes;
@@ -322,7 +318,7 @@
struct spi_transfer,
transfer_list);
- if (!last_transfer->cs_change)
+ if (!last_transfer->cs_change && dws->cs_control)
dws->cs_control(MRST_SPI_DEASSERT);
msg->state = NULL;
@@ -396,6 +392,11 @@
static irqreturn_t dw_spi_irq(int irq, void *dev_id)
{
struct dw_spi *dws = dev_id;
+ u16 irq_status, irq_mask = 0x3f;
+
+ irq_status = dw_readw(dws, isr) & irq_mask;
+ if (!irq_status)
+ return IRQ_NONE;
if (!dws->cur_msg) {
spi_mask_intr(dws, SPI_INT_TXEI);
@@ -544,13 +545,13 @@
*/
if (dws->cs_control) {
if (dws->rx && dws->tx)
- chip->tmode = 0x00;
+ chip->tmode = SPI_TMOD_TR;
else if (dws->rx)
- chip->tmode = 0x02;
+ chip->tmode = SPI_TMOD_RO;
else
- chip->tmode = 0x01;
+ chip->tmode = SPI_TMOD_TO;
- cr0 &= ~(0x3 << SPI_MODE_OFFSET);
+ cr0 &= ~SPI_TMOD_MASK;
cr0 |= (chip->tmode << SPI_TMOD_OFFSET);
}
@@ -699,9 +700,6 @@
chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
if (!chip)
return -ENOMEM;
-
- chip->cs_control = null_cs_control;
- chip->enable_dma = 0;
}
/*
@@ -883,7 +881,7 @@
dws->dma_inited = 0;
dws->dma_addr = (dma_addr_t)(dws->paddr + 0x60);
- ret = request_irq(dws->irq, dw_spi_irq, 0,
+ ret = request_irq(dws->irq, dw_spi_irq, IRQF_SHARED,
"dw_spi", dws);
if (ret < 0) {
dev_err(&master->dev, "can not get IRQ\n");
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index a9e5c79..b5a78a1 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -23,6 +23,7 @@
#include <linux/init.h>
#include <linux/cache.h>
#include <linux/mutex.h>
+#include <linux/of_device.h>
#include <linux/slab.h>
#include <linux/mod_devicetable.h>
#include <linux/spi/spi.h>
@@ -86,6 +87,10 @@
const struct spi_device *spi = to_spi_device(dev);
const struct spi_driver *sdrv = to_spi_driver(drv);
+ /* Attempt an OF style match */
+ if (of_driver_match_device(dev, drv))
+ return 1;
+
if (sdrv->id_table)
return !!spi_match_id(sdrv->id_table, spi);
@@ -554,11 +559,9 @@
EXPORT_SYMBOL_GPL(spi_register_master);
-static int __unregister(struct device *dev, void *master_dev)
+static int __unregister(struct device *dev, void *null)
{
- /* note: before about 2.6.14-rc1 this would corrupt memory: */
- if (dev != master_dev)
- spi_unregister_device(to_spi_device(dev));
+ spi_unregister_device(to_spi_device(dev));
return 0;
}
@@ -576,8 +579,7 @@
{
int dummy;
- dummy = device_for_each_child(master->dev.parent, &master->dev,
- __unregister);
+ dummy = device_for_each_child(&master->dev, NULL, __unregister);
device_unregister(&master->dev);
}
EXPORT_SYMBOL_GPL(spi_unregister_master);
diff --git a/drivers/spi/spi_gpio.c b/drivers/spi/spi_gpio.c
index e24a634..63e51b0 100644
--- a/drivers/spi/spi_gpio.c
+++ b/drivers/spi/spi_gpio.c
@@ -350,7 +350,7 @@
spi_gpio->bitbang.master = spi_master_get(master);
spi_gpio->bitbang.chipselect = spi_gpio_chipselect;
- if ((master_flags & (SPI_MASTER_NO_RX | SPI_MASTER_NO_RX)) == 0) {
+ if ((master_flags & (SPI_MASTER_NO_TX | SPI_MASTER_NO_RX)) == 0) {
spi_gpio->bitbang.txrx_word[SPI_MODE_0] = spi_gpio_txrx_word_mode0;
spi_gpio->bitbang.txrx_word[SPI_MODE_1] = spi_gpio_txrx_word_mode1;
spi_gpio->bitbang.txrx_word[SPI_MODE_2] = spi_gpio_txrx_word_mode2;
diff --git a/drivers/spi/spi_mpc8xxx.c b/drivers/spi/spi_mpc8xxx.c
index d31b57f..1dd86b8 100644
--- a/drivers/spi/spi_mpc8xxx.c
+++ b/drivers/spi/spi_mpc8xxx.c
@@ -408,11 +408,17 @@
xfer_ofs = mspi->xfer_in_progress->len - mspi->count;
- out_be32(&rx_bd->cbd_bufaddr, mspi->rx_dma + xfer_ofs);
+ if (mspi->rx_dma == mspi->dma_dummy_rx)
+ out_be32(&rx_bd->cbd_bufaddr, mspi->rx_dma);
+ else
+ out_be32(&rx_bd->cbd_bufaddr, mspi->rx_dma + xfer_ofs);
out_be16(&rx_bd->cbd_datlen, 0);
out_be16(&rx_bd->cbd_sc, BD_SC_EMPTY | BD_SC_INTRPT | BD_SC_WRAP);
- out_be32(&tx_bd->cbd_bufaddr, mspi->tx_dma + xfer_ofs);
+ if (mspi->tx_dma == mspi->dma_dummy_tx)
+ out_be32(&tx_bd->cbd_bufaddr, mspi->tx_dma);
+ else
+ out_be32(&tx_bd->cbd_bufaddr, mspi->tx_dma + xfer_ofs);
out_be16(&tx_bd->cbd_datlen, xfer_len);
out_be16(&tx_bd->cbd_sc, BD_SC_READY | BD_SC_INTRPT | BD_SC_WRAP |
BD_SC_LAST);
diff --git a/drivers/spi/spi_s3c64xx.c b/drivers/spi/spi_s3c64xx.c
index 9736581..c3038da 100644
--- a/drivers/spi/spi_s3c64xx.c
+++ b/drivers/spi/spi_s3c64xx.c
@@ -200,6 +200,9 @@
val = readl(regs + S3C64XX_SPI_STATUS);
} while (TX_FIFO_LVL(val, sci) && loops--);
+ if (loops == 0)
+ dev_warn(&sdd->pdev->dev, "Timed out flushing TX FIFO\n");
+
/* Flush RxFIFO*/
loops = msecs_to_loops(1);
do {
@@ -210,6 +213,9 @@
break;
} while (loops--);
+ if (loops == 0)
+ dev_warn(&sdd->pdev->dev, "Timed out flushing RX FIFO\n");
+
val = readl(regs + S3C64XX_SPI_CH_CFG);
val &= ~S3C64XX_SPI_CH_SW_RST;
writel(val, regs + S3C64XX_SPI_CH_CFG);
@@ -320,16 +326,17 @@
/* millisecs to xfer 'len' bytes @ 'cur_speed' */
ms = xfer->len * 8 * 1000 / sdd->cur_speed;
- ms += 5; /* some tolerance */
+ ms += 10; /* some tolerance */
if (dma_mode) {
val = msecs_to_jiffies(ms) + 10;
val = wait_for_completion_timeout(&sdd->xfer_completion, val);
} else {
+ u32 status;
val = msecs_to_loops(ms);
do {
- val = readl(regs + S3C64XX_SPI_STATUS);
- } while (RX_FIFO_LVL(val, sci) < xfer->len && --val);
+ status = readl(regs + S3C64XX_SPI_STATUS);
+ } while (RX_FIFO_LVL(status, sci) < xfer->len && --val);
}
if (!val)
@@ -447,8 +454,8 @@
writel(val, regs + S3C64XX_SPI_CLK_CFG);
}
-void s3c64xx_spi_dma_rxcb(struct s3c2410_dma_chan *chan, void *buf_id,
- int size, enum s3c2410_dma_buffresult res)
+static void s3c64xx_spi_dma_rxcb(struct s3c2410_dma_chan *chan, void *buf_id,
+ int size, enum s3c2410_dma_buffresult res)
{
struct s3c64xx_spi_driver_data *sdd = buf_id;
unsigned long flags;
@@ -467,8 +474,8 @@
spin_unlock_irqrestore(&sdd->lock, flags);
}
-void s3c64xx_spi_dma_txcb(struct s3c2410_dma_chan *chan, void *buf_id,
- int size, enum s3c2410_dma_buffresult res)
+static void s3c64xx_spi_dma_txcb(struct s3c2410_dma_chan *chan, void *buf_id,
+ int size, enum s3c2410_dma_buffresult res)
{
struct s3c64xx_spi_driver_data *sdd = buf_id;
unsigned long flags;
@@ -508,8 +515,9 @@
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
if (xfer->tx_buf != NULL) {
- xfer->tx_dma = dma_map_single(dev, xfer->tx_buf,
- xfer->len, DMA_TO_DEVICE);
+ xfer->tx_dma = dma_map_single(dev,
+ (void *)xfer->tx_buf, xfer->len,
+ DMA_TO_DEVICE);
if (dma_mapping_error(dev, xfer->tx_dma)) {
dev_err(dev, "dma_map_single Tx failed\n");
xfer->tx_dma = XFER_DMAADDR_INVALID;
@@ -919,6 +927,13 @@
return -ENODEV;
}
+ sci = pdev->dev.platform_data;
+ if (!sci->src_clk_name) {
+ dev_err(&pdev->dev,
+ "Board init must call s3c64xx_spi_set_info()\n");
+ return -EINVAL;
+ }
+
/* Check for availability of necessary resource */
dmatx_res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
@@ -946,8 +961,6 @@
return -ENOMEM;
}
- sci = pdev->dev.platform_data;
-
platform_set_drvdata(pdev, master);
sdd = spi_master_get_devdata(master);
@@ -1170,7 +1183,7 @@
{
return platform_driver_probe(&s3c64xx_spi_driver, s3c64xx_spi_probe);
}
-module_init(s3c64xx_spi_init);
+subsys_initcall(s3c64xx_spi_init);
static void __exit s3c64xx_spi_exit(void)
{
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 4a7a7a7..335311a 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -113,8 +113,6 @@
source "drivers/staging/memrar/Kconfig"
-source "drivers/staging/sep/Kconfig"
-
source "drivers/staging/iio/Kconfig"
source "drivers/staging/zram/Kconfig"
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index ca5c03e..e3f1e1b 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -38,7 +38,6 @@
obj-$(CONFIG_HYPERV) += hv/
obj-$(CONFIG_VME_BUS) += vme/
obj-$(CONFIG_MRST_RAR_HANDLER) += memrar/
-obj-$(CONFIG_DX_SEP) += sep/
obj-$(CONFIG_IIO) += iio/
obj-$(CONFIG_ZRAM) += zram/
obj-$(CONFIG_WLAGS49_H2) += wlags49_h2/
diff --git a/drivers/staging/batman-adv/bat_sysfs.c b/drivers/staging/batman-adv/bat_sysfs.c
index b4a8d5e..05ca15a 100644
--- a/drivers/staging/batman-adv/bat_sysfs.c
+++ b/drivers/staging/batman-adv/bat_sysfs.c
@@ -267,6 +267,10 @@
if (atomic_read(&bat_priv->log_level) == log_level_tmp)
return count;
+ bat_info(net_dev, "Changing log level from: %i to: %li\n",
+ atomic_read(&bat_priv->log_level),
+ log_level_tmp);
+
atomic_set(&bat_priv->log_level, (unsigned)log_level_tmp);
return count;
}
diff --git a/drivers/staging/batman-adv/hard-interface.c b/drivers/staging/batman-adv/hard-interface.c
index 92c216a..6e973a7 100644
--- a/drivers/staging/batman-adv/hard-interface.c
+++ b/drivers/staging/batman-adv/hard-interface.c
@@ -30,7 +30,6 @@
#include "hash.h"
#include <linux/if_arp.h>
-#include <linux/netfilter_bridge.h>
#define MIN(x, y) ((x) < (y) ? (x) : (y))
@@ -129,6 +128,9 @@
static void update_mac_addresses(struct batman_if *batman_if)
{
+ if (!batman_if || !batman_if->packet_buff)
+ return;
+
addr_to_string(batman_if->addr_str, batman_if->net_dev->dev_addr);
memcpy(((struct batman_packet *)(batman_if->packet_buff))->orig,
@@ -194,8 +196,6 @@
if (batman_if->if_status != IF_INACTIVE)
return;
- dev_hold(batman_if->net_dev);
-
update_mac_addresses(batman_if);
batman_if->if_status = IF_TO_BE_ACTIVATED;
@@ -222,8 +222,6 @@
(batman_if->if_status != IF_TO_BE_ACTIVATED))
return;
- dev_put(batman_if->net_dev);
-
batman_if->if_status = IF_INACTIVE;
bat_info(net_dev, "Interface deactivated: %s\n", batman_if->dev);
@@ -318,11 +316,13 @@
if (ret != 1)
goto out;
+ dev_hold(net_dev);
+
batman_if = kmalloc(sizeof(struct batman_if), GFP_ATOMIC);
if (!batman_if) {
pr_err("Can't add interface (%s): out of memory\n",
net_dev->name);
- goto out;
+ goto release_dev;
}
batman_if->dev = kstrdup(net_dev->name, GFP_ATOMIC);
@@ -336,6 +336,7 @@
batman_if->if_num = -1;
batman_if->net_dev = net_dev;
batman_if->if_status = IF_NOT_IN_USE;
+ batman_if->packet_buff = NULL;
INIT_LIST_HEAD(&batman_if->list);
check_known_mac_addr(batman_if->net_dev->dev_addr);
@@ -346,6 +347,8 @@
kfree(batman_if->dev);
free_if:
kfree(batman_if);
+release_dev:
+ dev_put(net_dev);
out:
return NULL;
}
@@ -374,6 +377,7 @@
batman_if->if_status = IF_TO_BE_REMOVED;
list_del_rcu(&batman_if->list);
sysfs_del_hardif(&batman_if->hardif_obj);
+ dev_put(batman_if->net_dev);
call_rcu(&batman_if->rcu, hardif_free_interface);
}
@@ -393,15 +397,13 @@
/* FIXME: each batman_if will be attached to a softif */
struct bat_priv *bat_priv = netdev_priv(soft_device);
- if (!batman_if)
- batman_if = hardif_add_interface(net_dev);
+ if (!batman_if && event == NETDEV_REGISTER)
+ batman_if = hardif_add_interface(net_dev);
if (!batman_if)
goto out;
switch (event) {
- case NETDEV_REGISTER:
- break;
case NETDEV_UP:
hardif_activate_interface(soft_device, bat_priv, batman_if);
break;
@@ -428,11 +430,6 @@
return NOTIFY_DONE;
}
-static int batman_skb_recv_finish(struct sk_buff *skb)
-{
- return NF_ACCEPT;
-}
-
/* receive a packet with the batman ethertype coming on a hard
* interface */
int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
@@ -442,8 +439,6 @@
struct bat_priv *bat_priv = netdev_priv(soft_device);
struct batman_packet *batman_packet;
struct batman_if *batman_if;
- struct net_device_stats *stats;
- struct rtnl_link_stats64 temp;
int ret;
skb = skb_share_check(skb, GFP_ATOMIC);
@@ -455,13 +450,6 @@
if (atomic_read(&module_state) != MODULE_ACTIVE)
goto err_free;
- /* if netfilter/ebtables wants to block incoming batman
- * packets then give them a chance to do so here */
- ret = NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_IN, skb, dev, NULL,
- batman_skb_recv_finish);
- if (ret != 1)
- goto err_out;
-
/* packet should hold at least type and version */
if (unlikely(skb_headlen(skb) < 2))
goto err_free;
@@ -479,12 +467,6 @@
if (batman_if->if_status != IF_ACTIVE)
goto err_free;
- stats = (struct net_device_stats *)dev_get_stats(skb->dev, &temp);
- if (stats) {
- stats->rx_packets++;
- stats->rx_bytes += skb->len;
- }
-
batman_packet = (struct batman_packet *)skb->data;
if (batman_packet->version != COMPAT_VERSION) {
diff --git a/drivers/staging/batman-adv/icmp_socket.c b/drivers/staging/batman-adv/icmp_socket.c
index fc3d32c..3ae7dd2 100644
--- a/drivers/staging/batman-adv/icmp_socket.c
+++ b/drivers/staging/batman-adv/icmp_socket.c
@@ -67,6 +67,7 @@
INIT_LIST_HEAD(&socket_client->queue_list);
socket_client->queue_len = 0;
socket_client->index = i;
+ socket_client->bat_priv = inode->i_private;
spin_lock_init(&socket_client->lock);
init_waitqueue_head(&socket_client->queue_wait);
@@ -151,9 +152,8 @@
static ssize_t bat_socket_write(struct file *file, const char __user *buff,
size_t len, loff_t *off)
{
- /* FIXME: each orig_node->batman_if will be attached to a softif */
- struct bat_priv *bat_priv = netdev_priv(soft_device);
struct socket_client *socket_client = file->private_data;
+ struct bat_priv *bat_priv = socket_client->bat_priv;
struct icmp_packet_rr icmp_packet;
struct orig_node *orig_node;
struct batman_if *batman_if;
@@ -168,6 +168,9 @@
return -EINVAL;
}
+ if (!bat_priv->primary_if)
+ return -EFAULT;
+
if (len >= sizeof(struct icmp_packet_rr))
packet_len = sizeof(struct icmp_packet_rr);
@@ -223,7 +226,8 @@
if (batman_if->if_status != IF_ACTIVE)
goto dst_unreach;
- memcpy(icmp_packet.orig, batman_if->net_dev->dev_addr, ETH_ALEN);
+ memcpy(icmp_packet.orig,
+ bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
if (packet_len == sizeof(struct icmp_packet_rr))
memcpy(icmp_packet.rr, batman_if->net_dev->dev_addr, ETH_ALEN);
@@ -271,7 +275,7 @@
goto err;
d = debugfs_create_file(ICMP_SOCKET, S_IFREG | S_IWUSR | S_IRUSR,
- bat_priv->debug_dir, NULL, &fops);
+ bat_priv->debug_dir, bat_priv, &fops);
if (d)
goto err;
diff --git a/drivers/staging/batman-adv/main.c b/drivers/staging/batman-adv/main.c
index 2686019..ef7c20a 100644
--- a/drivers/staging/batman-adv/main.c
+++ b/drivers/staging/batman-adv/main.c
@@ -250,10 +250,13 @@
int is_my_mac(uint8_t *addr)
{
struct batman_if *batman_if;
+
rcu_read_lock();
list_for_each_entry_rcu(batman_if, &if_list, list) {
- if ((batman_if->net_dev) &&
- (compare_orig(batman_if->net_dev->dev_addr, addr))) {
+ if (batman_if->if_status != IF_ACTIVE)
+ continue;
+
+ if (compare_orig(batman_if->net_dev->dev_addr, addr)) {
rcu_read_unlock();
return 1;
}
diff --git a/drivers/staging/batman-adv/originator.c b/drivers/staging/batman-adv/originator.c
index 28bb627..de5a8c1 100644
--- a/drivers/staging/batman-adv/originator.c
+++ b/drivers/staging/batman-adv/originator.c
@@ -391,11 +391,12 @@
int orig_hash_add_if(struct batman_if *batman_if, int max_if_num)
{
struct orig_node *orig_node;
+ unsigned long flags;
HASHIT(hashit);
/* resize all orig nodes because orig_node->bcast_own(_sum) depend on
* if_num */
- spin_lock(&orig_hash_lock);
+ spin_lock_irqsave(&orig_hash_lock, flags);
while (hash_iterate(orig_hash, &hashit)) {
orig_node = hashit.bucket->data;
@@ -404,11 +405,11 @@
goto err;
}
- spin_unlock(&orig_hash_lock);
+ spin_unlock_irqrestore(&orig_hash_lock, flags);
return 0;
err:
- spin_unlock(&orig_hash_lock);
+ spin_unlock_irqrestore(&orig_hash_lock, flags);
return -ENOMEM;
}
@@ -468,12 +469,13 @@
{
struct batman_if *batman_if_tmp;
struct orig_node *orig_node;
+ unsigned long flags;
HASHIT(hashit);
int ret;
/* resize all orig nodes because orig_node->bcast_own(_sum) depend on
* if_num */
- spin_lock(&orig_hash_lock);
+ spin_lock_irqsave(&orig_hash_lock, flags);
while (hash_iterate(orig_hash, &hashit)) {
orig_node = hashit.bucket->data;
@@ -500,10 +502,10 @@
rcu_read_unlock();
batman_if->if_num = -1;
- spin_unlock(&orig_hash_lock);
+ spin_unlock_irqrestore(&orig_hash_lock, flags);
return 0;
err:
- spin_unlock(&orig_hash_lock);
+ spin_unlock_irqrestore(&orig_hash_lock, flags);
return -ENOMEM;
}
diff --git a/drivers/staging/batman-adv/routing.c b/drivers/staging/batman-adv/routing.c
index 066cc91..032195e 100644
--- a/drivers/staging/batman-adv/routing.c
+++ b/drivers/staging/batman-adv/routing.c
@@ -783,6 +783,8 @@
static int recv_my_icmp_packet(struct sk_buff *skb, size_t icmp_len)
{
+ /* FIXME: each batman_if will be attached to a softif */
+ struct bat_priv *bat_priv = netdev_priv(soft_device);
struct orig_node *orig_node;
struct icmp_packet_rr *icmp_packet;
struct ethhdr *ethhdr;
@@ -801,6 +803,9 @@
return NET_RX_DROP;
}
+ if (!bat_priv->primary_if)
+ return NET_RX_DROP;
+
/* answer echo request (ping) */
/* get routing information */
spin_lock_irqsave(&orig_hash_lock, flags);
@@ -830,7 +835,8 @@
}
memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
- memcpy(icmp_packet->orig, ethhdr->h_dest, ETH_ALEN);
+ memcpy(icmp_packet->orig,
+ bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
icmp_packet->msg_type = ECHO_REPLY;
icmp_packet->ttl = TTL;
@@ -845,6 +851,8 @@
static int recv_icmp_ttl_exceeded(struct sk_buff *skb, size_t icmp_len)
{
+ /* FIXME: each batman_if will be attached to a softif */
+ struct bat_priv *bat_priv = netdev_priv(soft_device);
struct orig_node *orig_node;
struct icmp_packet *icmp_packet;
struct ethhdr *ethhdr;
@@ -865,6 +873,9 @@
return NET_RX_DROP;
}
+ if (!bat_priv->primary_if)
+ return NET_RX_DROP;
+
/* get routing information */
spin_lock_irqsave(&orig_hash_lock, flags);
orig_node = ((struct orig_node *)
@@ -892,7 +903,8 @@
}
memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
- memcpy(icmp_packet->orig, ethhdr->h_dest, ETH_ALEN);
+ memcpy(icmp_packet->orig,
+ bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
icmp_packet->msg_type = TTL_EXCEEDED;
icmp_packet->ttl = TTL;
diff --git a/drivers/staging/batman-adv/send.c b/drivers/staging/batman-adv/send.c
index 055edee..da3c82e4 100644
--- a/drivers/staging/batman-adv/send.c
+++ b/drivers/staging/batman-adv/send.c
@@ -29,7 +29,6 @@
#include "vis.h"
#include "aggregation.h"
-#include <linux/netfilter_bridge.h>
static void send_outstanding_bcast_packet(struct work_struct *work);
@@ -92,12 +91,9 @@
/* dev_queue_xmit() returns a negative result on error. However on
* congestion and traffic shaping, it drops and returns NET_XMIT_DROP
- * (which is > 0). This will not be treated as an error.
- * Also, if netfilter/ebtables wants to block outgoing batman
- * packets then giving them a chance to do so here */
+ * (which is > 0). This will not be treated as an error. */
- return NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev,
- dev_queue_xmit);
+ return dev_queue_xmit(skb);
send_skb_err:
kfree_skb(skb);
return NET_XMIT_DROP;
diff --git a/drivers/staging/batman-adv/types.h b/drivers/staging/batman-adv/types.h
index 21d0717..9aa9d36 100644
--- a/drivers/staging/batman-adv/types.h
+++ b/drivers/staging/batman-adv/types.h
@@ -126,6 +126,7 @@
unsigned char index;
spinlock_t lock;
wait_queue_head_t queue_wait;
+ struct bat_priv *bat_priv;
};
struct socket_packet {
diff --git a/drivers/staging/comedi/drivers/das08_cs.c b/drivers/staging/comedi/drivers/das08_cs.c
index c6aa52f..48d9fb1 100644
--- a/drivers/staging/comedi/drivers/das08_cs.c
+++ b/drivers/staging/comedi/drivers/das08_cs.c
@@ -222,7 +222,6 @@
p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH;
p_dev->resource[0]->flags |=
pcmcia_io_cfg_data_width(io->flags);
- p_dev->io.IOAddrLines = io->flags & CISTPL_IO_LINES_MASK;
p_dev->resource[0]->start = io->win[0].base;
p_dev->resource[0]->end = io->win[0].len;
if (io->nwin > 1) {
diff --git a/drivers/staging/hv/netvsc_drv.c b/drivers/staging/hv/netvsc_drv.c
index 56e1157..64a0114 100644
--- a/drivers/staging/hv/netvsc_drv.c
+++ b/drivers/staging/hv/netvsc_drv.c
@@ -327,6 +327,9 @@
.ndo_stop = netvsc_close,
.ndo_start_xmit = netvsc_start_xmit,
.ndo_set_multicast_list = netvsc_set_multicast_list,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
};
static int netvsc_probe(struct device *device)
diff --git a/drivers/staging/hv/ring_buffer.c b/drivers/staging/hv/ring_buffer.c
index 17bc762..d78c569 100644
--- a/drivers/staging/hv/ring_buffer.c
+++ b/drivers/staging/hv/ring_buffer.c
@@ -193,8 +193,7 @@
static inline u64
GetRingBufferIndices(struct hv_ring_buffer_info *RingInfo)
{
- return ((u64)RingInfo->RingBuffer->WriteIndex << 32)
- || RingInfo->RingBuffer->ReadIndex;
+ return (u64)RingInfo->RingBuffer->WriteIndex << 32;
}
diff --git a/drivers/staging/hv/storvsc_api.h b/drivers/staging/hv/storvsc_api.h
index 0063bde..8505a1c 100644
--- a/drivers/staging/hv/storvsc_api.h
+++ b/drivers/staging/hv/storvsc_api.h
@@ -28,10 +28,10 @@
#include "vmbus_api.h"
/* Defines */
-#define STORVSC_RING_BUFFER_SIZE (10*PAGE_SIZE)
+#define STORVSC_RING_BUFFER_SIZE (20*PAGE_SIZE)
#define BLKVSC_RING_BUFFER_SIZE (20*PAGE_SIZE)
-#define STORVSC_MAX_IO_REQUESTS 64
+#define STORVSC_MAX_IO_REQUESTS 128
/*
* In Hyper-V, each port/path/target maps to 1 scsi host adapter. In
diff --git a/drivers/staging/hv/storvsc_drv.c b/drivers/staging/hv/storvsc_drv.c
index 075b61b..62882a4 100644
--- a/drivers/staging/hv/storvsc_drv.c
+++ b/drivers/staging/hv/storvsc_drv.c
@@ -495,7 +495,7 @@
/* ASSERT(orig_sgl[i].offset + orig_sgl[i].length <= PAGE_SIZE); */
- if (j == 0)
+ if (bounce_addr == 0)
bounce_addr = (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])), KM_IRQ0);
while (srclen) {
@@ -556,7 +556,7 @@
destlen = orig_sgl[i].length;
/* ASSERT(orig_sgl[i].offset + orig_sgl[i].length <= PAGE_SIZE); */
- if (j == 0)
+ if (bounce_addr == 0)
bounce_addr = (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])), KM_IRQ0);
while (destlen) {
@@ -615,6 +615,7 @@
unsigned int request_size = 0;
int i;
struct scatterlist *sgl;
+ unsigned int sg_count = 0;
DPRINT_DBG(STORVSC_DRV, "scmnd %p dir %d, use_sg %d buf %p len %d "
"queue depth %d tagged %d", scmnd, scmnd->sc_data_direction,
@@ -697,6 +698,7 @@
request->DataBuffer.Length = scsi_bufflen(scmnd);
if (scsi_sg_count(scmnd)) {
sgl = (struct scatterlist *)scsi_sglist(scmnd);
+ sg_count = scsi_sg_count(scmnd);
/* check if we need to bounce the sgl */
if (do_bounce_buffer(sgl, scsi_sg_count(scmnd)) != -1) {
@@ -731,15 +733,16 @@
scsi_sg_count(scmnd));
sgl = cmd_request->bounce_sgl;
+ sg_count = cmd_request->bounce_sgl_count;
}
request->DataBuffer.Offset = sgl[0].offset;
- for (i = 0; i < scsi_sg_count(scmnd); i++) {
+ for (i = 0; i < sg_count; i++) {
DPRINT_DBG(STORVSC_DRV, "sgl[%d] len %d offset %d\n",
i, sgl[i].length, sgl[i].offset);
request->DataBuffer.PfnArray[i] =
- page_to_pfn(sg_page((&sgl[i])));
+ page_to_pfn(sg_page((&sgl[i])));
}
} else if (scsi_sglist(scmnd)) {
/* ASSERT(scsi_bufflen(scmnd) <= PAGE_SIZE); */
diff --git a/drivers/staging/octeon/Kconfig b/drivers/staging/octeon/Kconfig
index 638ad6b..9493128 100644
--- a/drivers/staging/octeon/Kconfig
+++ b/drivers/staging/octeon/Kconfig
@@ -1,6 +1,6 @@
config OCTEON_ETHERNET
tristate "Cavium Networks Octeon Ethernet support"
- depends on CPU_CAVIUM_OCTEON
+ depends on CPU_CAVIUM_OCTEON && NETDEVICES
select PHYLIB
select MDIO_OCTEON
help
diff --git a/drivers/staging/rt2860/usb_main_dev.c b/drivers/staging/rt2860/usb_main_dev.c
index a0fe31d..ebf9074 100644
--- a/drivers/staging/rt2860/usb_main_dev.c
+++ b/drivers/staging/rt2860/usb_main_dev.c
@@ -44,6 +44,7 @@
{USB_DEVICE(0x07B8, 0x2870)}, /* AboCom */
{USB_DEVICE(0x07B8, 0x2770)}, /* AboCom */
{USB_DEVICE(0x0DF6, 0x0039)}, /* Sitecom 2770 */
+ {USB_DEVICE(0x0DF6, 0x003F)}, /* Sitecom 2770 */
{USB_DEVICE(0x083A, 0x7512)}, /* Arcadyan 2770 */
{USB_DEVICE(0x0789, 0x0162)}, /* Logitec 2870 */
{USB_DEVICE(0x0789, 0x0163)}, /* Logitec 2870 */
@@ -95,7 +96,8 @@
{USB_DEVICE(0x050d, 0x815c)},
{USB_DEVICE(0x1482, 0x3C09)}, /* Abocom */
{USB_DEVICE(0x14B2, 0x3C09)}, /* Alpha */
- {USB_DEVICE(0x04E8, 0x2018)}, /* samsung */
+ {USB_DEVICE(0x04E8, 0x2018)}, /* samsung linkstick2 */
+ {USB_DEVICE(0x1690, 0x0740)}, /* Askey */
{USB_DEVICE(0x5A57, 0x0280)}, /* Zinwell */
{USB_DEVICE(0x5A57, 0x0282)}, /* Zinwell */
{USB_DEVICE(0x7392, 0x7718)},
@@ -105,21 +107,34 @@
{USB_DEVICE(0x1737, 0x0071)}, /* Linksys WUSB600N */
{USB_DEVICE(0x0411, 0x00e8)}, /* Buffalo WLI-UC-G300N */
{USB_DEVICE(0x050d, 0x815c)}, /* Belkin F5D8053 */
+ {USB_DEVICE(0x100D, 0x9031)}, /* Motorola 2770 */
#endif /* RT2870 // */
#ifdef RT3070
{USB_DEVICE(0x148F, 0x3070)}, /* Ralink 3070 */
{USB_DEVICE(0x148F, 0x3071)}, /* Ralink 3071 */
{USB_DEVICE(0x148F, 0x3072)}, /* Ralink 3072 */
{USB_DEVICE(0x0DB0, 0x3820)}, /* Ralink 3070 */
+ {USB_DEVICE(0x0DB0, 0x871C)}, /* Ralink 3070 */
+ {USB_DEVICE(0x0DB0, 0x822C)}, /* Ralink 3070 */
+ {USB_DEVICE(0x0DB0, 0x871B)}, /* Ralink 3070 */
+ {USB_DEVICE(0x0DB0, 0x822B)}, /* Ralink 3070 */
{USB_DEVICE(0x0DF6, 0x003E)}, /* Sitecom 3070 */
{USB_DEVICE(0x0DF6, 0x0042)}, /* Sitecom 3072 */
+ {USB_DEVICE(0x0DF6, 0x0048)}, /* Sitecom 3070 */
+ {USB_DEVICE(0x0DF6, 0x0047)}, /* Sitecom 3071 */
{USB_DEVICE(0x14B2, 0x3C12)}, /* AL 3070 */
{USB_DEVICE(0x18C5, 0x0012)}, /* Corega 3070 */
{USB_DEVICE(0x083A, 0x7511)}, /* Arcadyan 3070 */
+ {USB_DEVICE(0x083A, 0xA701)}, /* SMC 3070 */
+ {USB_DEVICE(0x083A, 0xA702)}, /* SMC 3072 */
{USB_DEVICE(0x1740, 0x9703)}, /* EnGenius 3070 */
{USB_DEVICE(0x1740, 0x9705)}, /* EnGenius 3071 */
{USB_DEVICE(0x1740, 0x9706)}, /* EnGenius 3072 */
+ {USB_DEVICE(0x1740, 0x9707)}, /* EnGenius 3070 */
+ {USB_DEVICE(0x1740, 0x9708)}, /* EnGenius 3071 */
+ {USB_DEVICE(0x1740, 0x9709)}, /* EnGenius 3072 */
{USB_DEVICE(0x13D3, 0x3273)}, /* AzureWave 3070 */
+ {USB_DEVICE(0x13D3, 0x3305)}, /* AzureWave 3070*/
{USB_DEVICE(0x1044, 0x800D)}, /* Gigabyte GN-WB32L 3070 */
{USB_DEVICE(0x2019, 0xAB25)}, /* Planex Communications, Inc. RT3070 */
{USB_DEVICE(0x07B8, 0x3070)}, /* AboCom 3070 */
@@ -132,14 +147,36 @@
{USB_DEVICE(0x07D1, 0x3C0D)}, /* D-Link 3070 */
{USB_DEVICE(0x07D1, 0x3C0E)}, /* D-Link 3070 */
{USB_DEVICE(0x07D1, 0x3C0F)}, /* D-Link 3070 */
+ {USB_DEVICE(0x07D1, 0x3C16)}, /* D-Link 3070 */
+ {USB_DEVICE(0x07D1, 0x3C17)}, /* D-Link 8070 */
{USB_DEVICE(0x1D4D, 0x000C)}, /* Pegatron Corporation 3070 */
{USB_DEVICE(0x1D4D, 0x000E)}, /* Pegatron Corporation 3070 */
{USB_DEVICE(0x5A57, 0x5257)}, /* Zinwell 3070 */
{USB_DEVICE(0x5A57, 0x0283)}, /* Zinwell 3072 */
{USB_DEVICE(0x04BB, 0x0945)}, /* I-O DATA 3072 */
+ {USB_DEVICE(0x04BB, 0x0947)}, /* I-O DATA 3070 */
+ {USB_DEVICE(0x04BB, 0x0948)}, /* I-O DATA 3072 */
{USB_DEVICE(0x203D, 0x1480)}, /* Encore 3070 */
+ {USB_DEVICE(0x20B8, 0x8888)}, /* PARA INDUSTRIAL 3070 */
+ {USB_DEVICE(0x0B05, 0x1784)}, /* Asus 3072 */
+ {USB_DEVICE(0x203D, 0x14A9)}, /* Encore 3070*/
+ {USB_DEVICE(0x0DB0, 0x899A)}, /* MSI 3070*/
+ {USB_DEVICE(0x0DB0, 0x3870)}, /* MSI 3070*/
+ {USB_DEVICE(0x0DB0, 0x870A)}, /* MSI 3070*/
+ {USB_DEVICE(0x0DB0, 0x6899)}, /* MSI 3070 */
+ {USB_DEVICE(0x0DB0, 0x3822)}, /* MSI 3070 */
+ {USB_DEVICE(0x0DB0, 0x3871)}, /* MSI 3070 */
+ {USB_DEVICE(0x0DB0, 0x871A)}, /* MSI 3070 */
+ {USB_DEVICE(0x0DB0, 0x822A)}, /* MSI 3070 */
+ {USB_DEVICE(0x0DB0, 0x3821)}, /* Ralink 3070 */
+ {USB_DEVICE(0x0DB0, 0x821A)}, /* Ralink 3070 */
+ {USB_DEVICE(0x083A, 0xA703)}, /* IO-MAGIC */
+ {USB_DEVICE(0x13D3, 0x3307)}, /* Azurewave */
+ {USB_DEVICE(0x13D3, 0x3321)}, /* Azurewave */
+ {USB_DEVICE(0x07FA, 0x7712)}, /* Edimax */
+ {USB_DEVICE(0x0789, 0x0166)}, /* Edimax */
+ {USB_DEVICE(0x148F, 0x2070)}, /* Edimax */
#endif /* RT3070 // */
- {USB_DEVICE(0x0DF6, 0x003F)}, /* Sitecom WL-608 */
{USB_DEVICE(0x1737, 0x0077)}, /* Linksys WUSB54GC-EU v3 */
{USB_DEVICE(0x2001, 0x3C09)}, /* D-Link */
{USB_DEVICE(0x2001, 0x3C0A)}, /* D-Link 3072 */
diff --git a/drivers/staging/sep/Kconfig b/drivers/staging/sep/Kconfig
deleted file mode 100644
index 0a9c39c..0000000
--- a/drivers/staging/sep/Kconfig
+++ /dev/null
@@ -1,10 +0,0 @@
-config DX_SEP
- tristate "Discretix SEP driver"
-# depends on MRST
- depends on RAR_REGISTER && PCI
- default y
- help
- Discretix SEP driver
-
- If unsure say M. The compiled module will be
- called sep_driver.ko
diff --git a/drivers/staging/sep/Makefile b/drivers/staging/sep/Makefile
deleted file mode 100644
index 628d5f9..0000000
--- a/drivers/staging/sep/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
-obj-$(CONFIG_DX_SEP) := sep_driver.o
-
diff --git a/drivers/staging/sep/TODO b/drivers/staging/sep/TODO
deleted file mode 100644
index ff0e931..0000000
--- a/drivers/staging/sep/TODO
+++ /dev/null
@@ -1,8 +0,0 @@
-Todo's so far (from Alan Cox)
-- Fix firmware loading
-- Get firmware into firmware git tree
-- Review and tidy each algorithm function
-- Check whether it can be plugged into any of the kernel crypto API
- interfaces
-- Do something about the magic shared memory interface and replace it
- with something saner (in Linux terms)
diff --git a/drivers/staging/sep/sep_dev.h b/drivers/staging/sep/sep_dev.h
deleted file mode 100644
index 9200524..0000000
--- a/drivers/staging/sep/sep_dev.h
+++ /dev/null
@@ -1,110 +0,0 @@
-#ifndef __SEP_DEV_H__
-#define __SEP_DEV_H__
-
-/*
- *
- * sep_dev.h - Security Processor Device Structures
- *
- * Copyright(c) 2009 Intel Corporation. All rights reserved.
- * Copyright(c) 2009 Discretix. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59
- * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- * CONTACTS:
- *
- * Alan Cox alan@linux.intel.com
- *
- */
-
-struct sep_device {
- /* pointer to pci dev */
- struct pci_dev *pdev;
-
- unsigned long in_use;
-
- /* address of the shared memory allocated during init for SEP driver
- (coherent alloc) */
- void *shared_addr;
- /* the physical address of the shared area */
- dma_addr_t shared_bus;
-
- /* restricted access region (coherent alloc) */
- dma_addr_t rar_bus;
- void *rar_addr;
- /* firmware regions: cache is at rar_addr */
- unsigned long cache_size;
-
- /* follows the cache */
- dma_addr_t resident_bus;
- unsigned long resident_size;
- void *resident_addr;
-
- /* start address of the access to the SEP registers from driver */
- void __iomem *reg_addr;
- /* transaction counter that coordinates the transactions between SEP and HOST */
- unsigned long send_ct;
- /* counter for the messages from sep */
- unsigned long reply_ct;
- /* counter for the number of bytes allocated in the pool for the current
- transaction */
- unsigned long data_pool_bytes_allocated;
-
- /* array of pointers to the pages that represent input data for the synchronic
- DMA action */
- struct page **in_page_array;
-
- /* array of pointers to the pages that represent out data for the synchronic
- DMA action */
- struct page **out_page_array;
-
- /* number of pages in the sep_in_page_array */
- unsigned long in_num_pages;
-
- /* number of pages in the sep_out_page_array */
- unsigned long out_num_pages;
-
- /* global data for every flow */
- struct sep_flow_context_t flows[SEP_DRIVER_NUM_FLOWS];
-
- /* pointer to the workqueue that handles the flow done interrupts */
- struct workqueue_struct *flow_wq;
-
-};
-
-static struct sep_device *sep_dev;
-
-static inline void sep_write_reg(struct sep_device *dev, int reg, u32 value)
-{
- void __iomem *addr = dev->reg_addr + reg;
- writel(value, addr);
-}
-
-static inline u32 sep_read_reg(struct sep_device *dev, int reg)
-{
- void __iomem *addr = dev->reg_addr + reg;
- return readl(addr);
-}
-
-/* wait for SRAM write complete(indirect write */
-static inline void sep_wait_sram_write(struct sep_device *dev)
-{
- u32 reg_val;
- do
- reg_val = sep_read_reg(dev, HW_SRAM_DATA_READY_REG_ADDR);
- while (!(reg_val & 1));
-}
-
-
-#endif
diff --git a/drivers/staging/sep/sep_driver.c b/drivers/staging/sep/sep_driver.c
deleted file mode 100644
index ecbde34..0000000
--- a/drivers/staging/sep/sep_driver.c
+++ /dev/null
@@ -1,2742 +0,0 @@
-/*
- *
- * sep_driver.c - Security Processor Driver main group of functions
- *
- * Copyright(c) 2009 Intel Corporation. All rights reserved.
- * Copyright(c) 2009 Discretix. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59
- * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- * CONTACTS:
- *
- * Mark Allyn mark.a.allyn@intel.com
- *
- * CHANGES:
- *
- * 2009.06.26 Initial publish
- *
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/fs.h>
-#include <linux/cdev.h>
-#include <linux/kdev_t.h>
-#include <linux/mutex.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/poll.h>
-#include <linux/wait.h>
-#include <linux/pci.h>
-#include <linux/firmware.h>
-#include <linux/slab.h>
-#include <asm/ioctl.h>
-#include <linux/ioport.h>
-#include <asm/io.h>
-#include <linux/interrupt.h>
-#include <linux/pagemap.h>
-#include <asm/cacheflush.h>
-#include "sep_driver_hw_defs.h"
-#include "sep_driver_config.h"
-#include "sep_driver_api.h"
-#include "sep_dev.h"
-
-#if SEP_DRIVER_ARM_DEBUG_MODE
-
-#define CRYS_SEP_ROM_length 0x4000
-#define CRYS_SEP_ROM_start_address 0x8000C000UL
-#define CRYS_SEP_ROM_start_address_offset 0xC000UL
-#define SEP_ROM_BANK_register 0x80008420UL
-#define SEP_ROM_BANK_register_offset 0x8420UL
-#define SEP_RAR_IO_MEM_REGION_START_ADDRESS 0x82000000
-
-/*
- * THESE 2 definitions are specific to the board - must be
- * defined during integration
- */
-#define SEP_RAR_IO_MEM_REGION_START_ADDRESS 0xFF0D0000
-
-/* 2M size */
-
-static void sep_load_rom_code(struct sep_device *sep)
-{
- /* Index variables */
- unsigned long i, k, j;
- u32 reg;
- u32 error;
- u32 warning;
-
- /* Loading ROM from SEP_ROM_image.h file */
- k = sizeof(CRYS_SEP_ROM);
-
- edbg("SEP Driver: DX_CC_TST_SepRomLoader start\n");
-
- edbg("SEP Driver: k is %lu\n", k);
- edbg("SEP Driver: sep->reg_addr is %p\n", sep->reg_addr);
- edbg("SEP Driver: CRYS_SEP_ROM_start_address_offset is %p\n", CRYS_SEP_ROM_start_address_offset);
-
- for (i = 0; i < 4; i++) {
- /* write bank */
- sep_write_reg(sep, SEP_ROM_BANK_register_offset, i);
-
- for (j = 0; j < CRYS_SEP_ROM_length / 4; j++) {
- sep_write_reg(sep, CRYS_SEP_ROM_start_address_offset + 4 * j, CRYS_SEP_ROM[i * 0x1000 + j]);
-
- k = k - 4;
-
- if (k == 0) {
- j = CRYS_SEP_ROM_length;
- i = 4;
- }
- }
- }
-
- /* reset the SEP */
- sep_write_reg(sep, HW_HOST_SEP_SW_RST_REG_ADDR, 0x1);
-
- /* poll for SEP ROM boot finish */
- do
- reg = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
- while (!reg);
-
- edbg("SEP Driver: ROM polling ended\n");
-
- switch (reg) {
- case 0x1:
- /* fatal error - read erro status from GPRO */
- error = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
- edbg("SEP Driver: ROM polling case 1\n");
- break;
- case 0x4:
- /* Cold boot ended successfully */
- case 0x8:
- /* Warmboot ended successfully */
- case 0x10:
- /* ColdWarm boot ended successfully */
- error = 0;
- case 0x2:
- /* Boot First Phase ended */
- warning = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
- case 0x20:
- edbg("SEP Driver: ROM polling case %d\n", reg);
- break;
- }
-
-}
-
-#else
-static void sep_load_rom_code(struct sep_device *sep) { }
-#endif /* SEP_DRIVER_ARM_DEBUG_MODE */
-
-
-
-/*----------------------------------------
- DEFINES
------------------------------------------*/
-
-#define BASE_ADDRESS_FOR_SYSTEM 0xfffc0000
-#define SEP_RAR_IO_MEM_REGION_SIZE 0x40000
-
-/*--------------------------------------------
- GLOBAL variables
---------------------------------------------*/
-
-/* debug messages level */
-static int debug;
-module_param(debug, int , 0);
-MODULE_PARM_DESC(debug, "Flag to enable SEP debug messages");
-
-/* Keep this a single static object for now to keep the conversion easy */
-
-static struct sep_device sep_instance;
-static struct sep_device *sep_dev = &sep_instance;
-
-/*
- mutex for the access to the internals of the sep driver
-*/
-static DEFINE_MUTEX(sep_mutex);
-
-
-/* wait queue head (event) of the driver */
-static DECLARE_WAIT_QUEUE_HEAD(sep_event);
-
-/**
- * sep_load_firmware - copy firmware cache/resident
- * @sep: device we are loading
- *
- * This functions copies the cache and resident from their source
- * location into destination shared memory.
- */
-
-static int sep_load_firmware(struct sep_device *sep)
-{
- const struct firmware *fw;
- char *cache_name = "sep/cache.image.bin";
- char *res_name = "sep/resident.image.bin";
- int error;
-
- edbg("SEP Driver:rar_virtual is %p\n", sep->rar_addr);
- edbg("SEP Driver:rar_bus is %08llx\n", (unsigned long long)sep->rar_bus);
-
- /* load cache */
- error = request_firmware(&fw, cache_name, &sep->pdev->dev);
- if (error) {
- edbg("SEP Driver:cant request cache fw\n");
- return error;
- }
- edbg("SEP Driver:cache %08Zx@%p\n", fw->size, (void *) fw->data);
-
- memcpy(sep->rar_addr, (void *)fw->data, fw->size);
- sep->cache_size = fw->size;
- release_firmware(fw);
-
- sep->resident_bus = sep->rar_bus + sep->cache_size;
- sep->resident_addr = sep->rar_addr + sep->cache_size;
-
- /* load resident */
- error = request_firmware(&fw, res_name, &sep->pdev->dev);
- if (error) {
- edbg("SEP Driver:cant request res fw\n");
- return error;
- }
- edbg("sep: res %08Zx@%p\n", fw->size, (void *)fw->data);
-
- memcpy(sep->resident_addr, (void *) fw->data, fw->size);
- sep->resident_size = fw->size;
- release_firmware(fw);
-
- edbg("sep: resident v %p b %08llx cache v %p b %08llx\n",
- sep->resident_addr, (unsigned long long)sep->resident_bus,
- sep->rar_addr, (unsigned long long)sep->rar_bus);
- return 0;
-}
-
-MODULE_FIRMWARE("sep/cache.image.bin");
-MODULE_FIRMWARE("sep/resident.image.bin");
-
-/**
- * sep_map_and_alloc_shared_area - allocate shared block
- * @sep: security processor
- * @size: size of shared area
- *
- * Allocate a shared buffer in host memory that can be used by both the
- * kernel and also the hardware interface via DMA.
- */
-
-static int sep_map_and_alloc_shared_area(struct sep_device *sep,
- unsigned long size)
-{
- /* shared_addr = ioremap_nocache(0xda00000,shared_area_size); */
- sep->shared_addr = dma_alloc_coherent(&sep->pdev->dev, size,
- &sep->shared_bus, GFP_KERNEL);
-
- if (!sep->shared_addr) {
- edbg("sep_driver :shared memory dma_alloc_coherent failed\n");
- return -ENOMEM;
- }
- /* set the bus address of the shared area */
- edbg("sep: shared_addr %ld bytes @%p (bus %08llx)\n",
- size, sep->shared_addr, (unsigned long long)sep->shared_bus);
- return 0;
-}
-
-/**
- * sep_unmap_and_free_shared_area - free shared block
- * @sep: security processor
- *
- * Free the shared area allocated to the security processor. The
- * processor must have finished with this and any final posted
- * writes cleared before we do so.
- */
-static void sep_unmap_and_free_shared_area(struct sep_device *sep, int size)
-{
- dma_free_coherent(&sep->pdev->dev, size,
- sep->shared_addr, sep->shared_bus);
-}
-
-/**
- * sep_shared_virt_to_bus - convert bus/virt addresses
- *
- * Returns the bus address inside the shared area according
- * to the virtual address.
- */
-
-static dma_addr_t sep_shared_virt_to_bus(struct sep_device *sep,
- void *virt_address)
-{
- dma_addr_t pa = sep->shared_bus + (virt_address - sep->shared_addr);
- edbg("sep: virt to bus b %08llx v %p\n", (unsigned long long) pa,
- virt_address);
- return pa;
-}
-
-/**
- * sep_shared_bus_to_virt - convert bus/virt addresses
- *
- * Returns virtual address inside the shared area according
- * to the bus address.
- */
-
-static void *sep_shared_bus_to_virt(struct sep_device *sep,
- dma_addr_t bus_address)
-{
- return sep->shared_addr + (bus_address - sep->shared_bus);
-}
-
-
-/**
- * sep_try_open - attempt to open a SEP device
- * @sep: device to attempt to open
- *
- * Atomically attempt to get ownership of a SEP device.
- * Returns 1 if the device was opened, 0 on failure.
- */
-
-static int sep_try_open(struct sep_device *sep)
-{
- if (!test_and_set_bit(0, &sep->in_use))
- return 1;
- return 0;
-}
-
-/**
- * sep_open - device open method
- * @inode: inode of sep device
- * @filp: file handle to sep device
- *
- * Open method for the SEP device. Called when userspace opens
- * the SEP device node. Must also release the memory data pool
- * allocations.
- *
- * Returns zero on success otherwise an error code.
- */
-
-static int sep_open(struct inode *inode, struct file *filp)
-{
- if (sep_dev == NULL)
- return -ENODEV;
-
- /* check the blocking mode */
- if (filp->f_flags & O_NDELAY) {
- if (sep_try_open(sep_dev) == 0)
- return -EAGAIN;
- } else
- if (wait_event_interruptible(sep_event, sep_try_open(sep_dev)) < 0)
- return -EINTR;
-
- /* Bind to the device, we only have one which makes it easy */
- filp->private_data = sep_dev;
- /* release data pool allocations */
- sep_dev->data_pool_bytes_allocated = 0;
- return 0;
-}
-
-
-/**
- * sep_release - close a SEP device
- * @inode: inode of SEP device
- * @filp: file handle being closed
- *
- * Called on the final close of a SEP device. As the open protects against
- * multiple simultaenous opens that means this method is called when the
- * final reference to the open handle is dropped.
- */
-
-static int sep_release(struct inode *inode, struct file *filp)
-{
- struct sep_device *sep = filp->private_data;
-#if 0 /*!SEP_DRIVER_POLLING_MODE */
- /* close IMR */
- sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, 0x7FFF);
- /* release IRQ line */
- free_irq(SEP_DIRVER_IRQ_NUM, sep);
-
-#endif
- /* Ensure any blocked open progresses */
- clear_bit(0, &sep->in_use);
- wake_up(&sep_event);
- return 0;
-}
-
-/*---------------------------------------------------------------
- map function - this functions maps the message shared area
------------------------------------------------------------------*/
-static int sep_mmap(struct file *filp, struct vm_area_struct *vma)
-{
- dma_addr_t bus_addr;
- struct sep_device *sep = filp->private_data;
-
- dbg("-------->SEP Driver: mmap start\n");
-
- /* check that the size of the mapped range is as the size of the message
- shared area */
- if ((vma->vm_end - vma->vm_start) > SEP_DRIVER_MMMAP_AREA_SIZE) {
- edbg("SEP Driver mmap requested size is more than allowed\n");
- printk(KERN_WARNING "SEP Driver mmap requested size is more than allowed\n");
- printk(KERN_WARNING "SEP Driver vma->vm_end is %08lx\n", vma->vm_end);
- printk(KERN_WARNING "SEP Driver vma->vm_end is %08lx\n", vma->vm_start);
- return -EAGAIN;
- }
-
- edbg("SEP Driver:sep->shared_addr is %p\n", sep->shared_addr);
-
- /* get bus address */
- bus_addr = sep->shared_bus;
-
- edbg("SEP Driver: phys_addr is %08llx\n", (unsigned long long)bus_addr);
-
- if (remap_pfn_range(vma, vma->vm_start, bus_addr >> PAGE_SHIFT, vma->vm_end - vma->vm_start, vma->vm_page_prot)) {
- edbg("SEP Driver remap_page_range failed\n");
- printk(KERN_WARNING "SEP Driver remap_page_range failed\n");
- return -EAGAIN;
- }
-
- dbg("SEP Driver:<-------- mmap end\n");
-
- return 0;
-}
-
-
-/*-----------------------------------------------
- poll function
-*----------------------------------------------*/
-static unsigned int sep_poll(struct file *filp, poll_table * wait)
-{
- unsigned long count;
- unsigned int mask = 0;
- unsigned long retval = 0; /* flow id */
- struct sep_device *sep = filp->private_data;
-
- dbg("---------->SEP Driver poll: start\n");
-
-
-#if SEP_DRIVER_POLLING_MODE
-
- while (sep->send_ct != (retval & 0x7FFFFFFF)) {
- retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
-
- for (count = 0; count < 10 * 4; count += 4)
- edbg("Poll Debug Word %lu of the message is %lu\n", count, *((unsigned long *) (sep->shared_addr + SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES + count)));
- }
-
- sep->reply_ct++;
-#else
- /* add the event to the polling wait table */
- poll_wait(filp, &sep_event, wait);
-
-#endif
-
- edbg("sep->send_ct is %lu\n", sep->send_ct);
- edbg("sep->reply_ct is %lu\n", sep->reply_ct);
-
- /* check if the data is ready */
- if (sep->send_ct == sep->reply_ct) {
- for (count = 0; count < 12 * 4; count += 4)
- edbg("Sep Mesg Word %lu of the message is %lu\n", count, *((unsigned long *) (sep->shared_addr + count)));
-
- for (count = 0; count < 10 * 4; count += 4)
- edbg("Debug Data Word %lu of the message is %lu\n", count, *((unsigned long *) (sep->shared_addr + 0x1800 + count)));
-
- retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
- edbg("retval is %lu\n", retval);
- /* check if the this is sep reply or request */
- if (retval >> 31) {
- edbg("SEP Driver: sep request in\n");
- /* request */
- mask |= POLLOUT | POLLWRNORM;
- } else {
- edbg("SEP Driver: sep reply in\n");
- mask |= POLLIN | POLLRDNORM;
- }
- }
- dbg("SEP Driver:<-------- poll exit\n");
- return mask;
-}
-
-/**
- * sep_time_address - address in SEP memory of time
- * @sep: SEP device we want the address from
- *
- * Return the address of the two dwords in memory used for time
- * setting.
- */
-
-static u32 *sep_time_address(struct sep_device *sep)
-{
- return sep->shared_addr + SEP_DRIVER_SYSTEM_TIME_MEMORY_OFFSET_IN_BYTES;
-}
-
-/**
- * sep_set_time - set the SEP time
- * @sep: the SEP we are setting the time for
- *
- * Calculates time and sets it at the predefined address.
- * Called with the sep mutex held.
- */
-static unsigned long sep_set_time(struct sep_device *sep)
-{
- struct timeval time;
- u32 *time_addr; /* address of time as seen by the kernel */
-
-
- dbg("sep:sep_set_time start\n");
-
- do_gettimeofday(&time);
-
- /* set value in the SYSTEM MEMORY offset */
- time_addr = sep_time_address(sep);
-
- time_addr[0] = SEP_TIME_VAL_TOKEN;
- time_addr[1] = time.tv_sec;
-
- edbg("SEP Driver:time.tv_sec is %lu\n", time.tv_sec);
- edbg("SEP Driver:time_addr is %p\n", time_addr);
- edbg("SEP Driver:sep->shared_addr is %p\n", sep->shared_addr);
-
- return time.tv_sec;
-}
-
-/**
- * sep_dump_message - dump the message that is pending
- * @sep: sep device
- *
- * Dump out the message pending in the shared message area
- */
-
-static void sep_dump_message(struct sep_device *sep)
-{
- int count;
- for (count = 0; count < 12 * 4; count += 4)
- edbg("Word %d of the message is %u\n", count, *((u32 *) (sep->shared_addr + count)));
-}
-
-/**
- * sep_send_command_handler - kick off a command
- * @sep: sep being signalled
- *
- * This function raises interrupt to SEP that signals that is has a new
- * command from the host
- */
-
-static void sep_send_command_handler(struct sep_device *sep)
-{
- dbg("sep:sep_send_command_handler start\n");
-
- mutex_lock(&sep_mutex);
- sep_set_time(sep);
-
- /* FIXME: flush cache */
- flush_cache_all();
-
- sep_dump_message(sep);
- /* update counter */
- sep->send_ct++;
- /* send interrupt to SEP */
- sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x2);
- dbg("SEP Driver:<-------- sep_send_command_handler end\n");
- mutex_unlock(&sep_mutex);
- return;
-}
-
-/**
- * sep_send_reply_command_handler - kick off a command reply
- * @sep: sep being signalled
- *
- * This function raises interrupt to SEP that signals that is has a new
- * command from the host
- */
-
-static void sep_send_reply_command_handler(struct sep_device *sep)
-{
- dbg("sep:sep_send_reply_command_handler start\n");
-
- /* flash cache */
- flush_cache_all();
-
- sep_dump_message(sep);
-
- mutex_lock(&sep_mutex);
- sep->send_ct++; /* update counter */
- /* send the interrupt to SEP */
- sep_write_reg(sep, HW_HOST_HOST_SEP_GPR2_REG_ADDR, sep->send_ct);
- /* update both counters */
- sep->send_ct++;
- sep->reply_ct++;
- mutex_unlock(&sep_mutex);
- dbg("sep: sep_send_reply_command_handler end\n");
-}
-
-/*
- This function handles the allocate data pool memory request
- This function returns calculates the bus address of the
- allocated memory, and the offset of this area from the mapped address.
- Therefore, the FVOs in user space can calculate the exact virtual
- address of this allocated memory
-*/
-static int sep_allocate_data_pool_memory_handler(struct sep_device *sep,
- unsigned long arg)
-{
- int error;
- struct sep_driver_alloc_t command_args;
-
- dbg("SEP Driver:--------> sep_allocate_data_pool_memory_handler start\n");
-
- error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_alloc_t));
- if (error) {
- error = -EFAULT;
- goto end_function;
- }
-
- /* allocate memory */
- if ((sep->data_pool_bytes_allocated + command_args.num_bytes) > SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES) {
- error = -ENOMEM;
- goto end_function;
- }
-
- /* set the virtual and bus address */
- command_args.offset = SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES + sep->data_pool_bytes_allocated;
- command_args.phys_address = sep->shared_bus + SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES + sep->data_pool_bytes_allocated;
-
- /* write the memory back to the user space */
- error = copy_to_user((void *) arg, (void *) &command_args, sizeof(struct sep_driver_alloc_t));
- if (error) {
- error = -EFAULT;
- goto end_function;
- }
-
- /* set the allocation */
- sep->data_pool_bytes_allocated += command_args.num_bytes;
-
-end_function:
- dbg("SEP Driver:<-------- sep_allocate_data_pool_memory_handler end\n");
- return error;
-}
-
-/*
- This function handles write into allocated data pool command
-*/
-static int sep_write_into_data_pool_handler(struct sep_device *sep, unsigned long arg)
-{
- int error;
- void *virt_address;
- unsigned long va;
- unsigned long app_in_address;
- unsigned long num_bytes;
- void *data_pool_area_addr;
-
- dbg("SEP Driver:--------> sep_write_into_data_pool_handler start\n");
-
- /* get the application address */
- error = get_user(app_in_address, &(((struct sep_driver_write_t *) arg)->app_address));
- if (error)
- goto end_function;
-
- /* get the virtual kernel address address */
- error = get_user(va, &(((struct sep_driver_write_t *) arg)->datapool_address));
- if (error)
- goto end_function;
- virt_address = (void *)va;
-
- /* get the number of bytes */
- error = get_user(num_bytes, &(((struct sep_driver_write_t *) arg)->num_bytes));
- if (error)
- goto end_function;
-
- /* calculate the start of the data pool */
- data_pool_area_addr = sep->shared_addr + SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES;
-
-
- /* check that the range of the virtual kernel address is correct */
- if (virt_address < data_pool_area_addr || virt_address > (data_pool_area_addr + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES)) {
- error = -EINVAL;
- goto end_function;
- }
- /* copy the application data */
- error = copy_from_user(virt_address, (void *) app_in_address, num_bytes);
- if (error)
- error = -EFAULT;
-end_function:
- dbg("SEP Driver:<-------- sep_write_into_data_pool_handler end\n");
- return error;
-}
-
-/*
- this function handles the read from data pool command
-*/
-static int sep_read_from_data_pool_handler(struct sep_device *sep, unsigned long arg)
-{
- int error;
- /* virtual address of dest application buffer */
- unsigned long app_out_address;
- /* virtual address of the data pool */
- unsigned long va;
- void *virt_address;
- unsigned long num_bytes;
- void *data_pool_area_addr;
-
- dbg("SEP Driver:--------> sep_read_from_data_pool_handler start\n");
-
- /* get the application address */
- error = get_user(app_out_address, &(((struct sep_driver_write_t *) arg)->app_address));
- if (error)
- goto end_function;
-
- /* get the virtual kernel address address */
- error = get_user(va, &(((struct sep_driver_write_t *) arg)->datapool_address));
- if (error)
- goto end_function;
- virt_address = (void *)va;
-
- /* get the number of bytes */
- error = get_user(num_bytes, &(((struct sep_driver_write_t *) arg)->num_bytes));
- if (error)
- goto end_function;
-
- /* calculate the start of the data pool */
- data_pool_area_addr = sep->shared_addr + SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES;
-
- /* FIXME: These are incomplete all over the driver: what about + len
- and when doing that also overflows */
- /* check that the range of the virtual kernel address is correct */
- if (virt_address < data_pool_area_addr || virt_address > data_pool_area_addr + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES) {
- error = -EINVAL;
- goto end_function;
- }
-
- /* copy the application data */
- error = copy_to_user((void *) app_out_address, virt_address, num_bytes);
- if (error)
- error = -EFAULT;
-end_function:
- dbg("SEP Driver:<-------- sep_read_from_data_pool_handler end\n");
- return error;
-}
-
-/*
- This function releases all the application virtual buffer physical pages,
- that were previously locked
-*/
-static int sep_free_dma_pages(struct page **page_array_ptr, unsigned long num_pages, unsigned long dirtyFlag)
-{
- unsigned long count;
-
- if (dirtyFlag) {
- for (count = 0; count < num_pages; count++) {
- /* the out array was written, therefore the data was changed */
- if (!PageReserved(page_array_ptr[count]))
- SetPageDirty(page_array_ptr[count]);
- page_cache_release(page_array_ptr[count]);
- }
- } else {
- /* free in pages - the data was only read, therefore no update was done
- on those pages */
- for (count = 0; count < num_pages; count++)
- page_cache_release(page_array_ptr[count]);
- }
-
- if (page_array_ptr)
- /* free the array */
- kfree(page_array_ptr);
-
- return 0;
-}
-
-/*
- This function locks all the physical pages of the kernel virtual buffer
- and construct a basic lli array, where each entry holds the physical
- page address and the size that application data holds in this physical pages
-*/
-static int sep_lock_kernel_pages(struct sep_device *sep,
- unsigned long kernel_virt_addr,
- unsigned long data_size,
- unsigned long *num_pages_ptr,
- struct sep_lli_entry_t **lli_array_ptr,
- struct page ***page_array_ptr)
-{
- int error = 0;
- /* the the page of the end address of the user space buffer */
- unsigned long end_page;
- /* the page of the start address of the user space buffer */
- unsigned long start_page;
- /* the range in pages */
- unsigned long num_pages;
- struct sep_lli_entry_t *lli_array;
- /* next kernel address to map */
- unsigned long next_kernel_address;
- unsigned long count;
-
- dbg("SEP Driver:--------> sep_lock_kernel_pages start\n");
-
- /* set start and end pages and num pages */
- end_page = (kernel_virt_addr + data_size - 1) >> PAGE_SHIFT;
- start_page = kernel_virt_addr >> PAGE_SHIFT;
- num_pages = end_page - start_page + 1;
-
- edbg("SEP Driver: kernel_virt_addr is %08lx\n", kernel_virt_addr);
- edbg("SEP Driver: data_size is %lu\n", data_size);
- edbg("SEP Driver: start_page is %lx\n", start_page);
- edbg("SEP Driver: end_page is %lx\n", end_page);
- edbg("SEP Driver: num_pages is %lu\n", num_pages);
-
- lli_array = kmalloc(sizeof(struct sep_lli_entry_t) * num_pages, GFP_ATOMIC);
- if (!lli_array) {
- edbg("SEP Driver: kmalloc for lli_array failed\n");
- error = -ENOMEM;
- goto end_function;
- }
-
- /* set the start address of the first page - app data may start not at
- the beginning of the page */
- lli_array[0].physical_address = (unsigned long) virt_to_phys((unsigned long *) kernel_virt_addr);
-
- /* check that not all the data is in the first page only */
- if ((PAGE_SIZE - (kernel_virt_addr & (~PAGE_MASK))) >= data_size)
- lli_array[0].block_size = data_size;
- else
- lli_array[0].block_size = PAGE_SIZE - (kernel_virt_addr & (~PAGE_MASK));
-
- /* debug print */
- dbg("lli_array[0].physical_address is %08lx, lli_array[0].block_size is %lu\n", lli_array[0].physical_address, lli_array[0].block_size);
-
- /* advance the address to the start of the next page */
- next_kernel_address = (kernel_virt_addr & PAGE_MASK) + PAGE_SIZE;
-
- /* go from the second page to the prev before last */
- for (count = 1; count < (num_pages - 1); count++) {
- lli_array[count].physical_address = (unsigned long) virt_to_phys((unsigned long *) next_kernel_address);
- lli_array[count].block_size = PAGE_SIZE;
-
- edbg("lli_array[%lu].physical_address is %08lx, lli_array[%lu].block_size is %lu\n", count, lli_array[count].physical_address, count, lli_array[count].block_size);
- next_kernel_address += PAGE_SIZE;
- }
-
- /* if more then 1 pages locked - then update for the last page size needed */
- if (num_pages > 1) {
- /* update the address of the last page */
- lli_array[count].physical_address = (unsigned long) virt_to_phys((unsigned long *) next_kernel_address);
-
- /* set the size of the last page */
- lli_array[count].block_size = (kernel_virt_addr + data_size) & (~PAGE_MASK);
-
- if (lli_array[count].block_size == 0) {
- dbg("app_virt_addr is %08lx\n", kernel_virt_addr);
- dbg("data_size is %lu\n", data_size);
- while (1);
- }
-
- edbg("lli_array[%lu].physical_address is %08lx, lli_array[%lu].block_size is %lu\n", count, lli_array[count].physical_address, count, lli_array[count].block_size);
- }
- /* set output params */
- *lli_array_ptr = lli_array;
- *num_pages_ptr = num_pages;
- *page_array_ptr = 0;
-end_function:
- dbg("SEP Driver:<-------- sep_lock_kernel_pages end\n");
- return 0;
-}
-
-/*
- This function locks all the physical pages of the application virtual buffer
- and construct a basic lli array, where each entry holds the physical page
- address and the size that application data holds in this physical pages
-*/
-static int sep_lock_user_pages(struct sep_device *sep,
- unsigned long app_virt_addr,
- unsigned long data_size,
- unsigned long *num_pages_ptr,
- struct sep_lli_entry_t **lli_array_ptr,
- struct page ***page_array_ptr)
-{
- int error = 0;
- /* the the page of the end address of the user space buffer */
- unsigned long end_page;
- /* the page of the start address of the user space buffer */
- unsigned long start_page;
- /* the range in pages */
- unsigned long num_pages;
- struct page **page_array;
- struct sep_lli_entry_t *lli_array;
- unsigned long count;
- int result;
-
- dbg("SEP Driver:--------> sep_lock_user_pages start\n");
-
- /* set start and end pages and num pages */
- end_page = (app_virt_addr + data_size - 1) >> PAGE_SHIFT;
- start_page = app_virt_addr >> PAGE_SHIFT;
- num_pages = end_page - start_page + 1;
-
- edbg("SEP Driver: app_virt_addr is %08lx\n", app_virt_addr);
- edbg("SEP Driver: data_size is %lu\n", data_size);
- edbg("SEP Driver: start_page is %lu\n", start_page);
- edbg("SEP Driver: end_page is %lu\n", end_page);
- edbg("SEP Driver: num_pages is %lu\n", num_pages);
-
- /* allocate array of pages structure pointers */
- page_array = kmalloc(sizeof(struct page *) * num_pages, GFP_ATOMIC);
- if (!page_array) {
- edbg("SEP Driver: kmalloc for page_array failed\n");
-
- error = -ENOMEM;
- goto end_function;
- }
-
- lli_array = kmalloc(sizeof(struct sep_lli_entry_t) * num_pages, GFP_ATOMIC);
- if (!lli_array) {
- edbg("SEP Driver: kmalloc for lli_array failed\n");
-
- error = -ENOMEM;
- goto end_function_with_error1;
- }
-
- /* convert the application virtual address into a set of physical */
- down_read(¤t->mm->mmap_sem);
- result = get_user_pages(current, current->mm, app_virt_addr, num_pages, 1, 0, page_array, 0);
- up_read(¤t->mm->mmap_sem);
-
- /* check the number of pages locked - if not all then exit with error */
- if (result != num_pages) {
- dbg("SEP Driver: not all pages locked by get_user_pages\n");
-
- error = -ENOMEM;
- goto end_function_with_error2;
- }
-
- /* flush the cache */
- for (count = 0; count < num_pages; count++)
- flush_dcache_page(page_array[count]);
-
- /* set the start address of the first page - app data may start not at
- the beginning of the page */
- lli_array[0].physical_address = ((unsigned long) page_to_phys(page_array[0])) + (app_virt_addr & (~PAGE_MASK));
-
- /* check that not all the data is in the first page only */
- if ((PAGE_SIZE - (app_virt_addr & (~PAGE_MASK))) >= data_size)
- lli_array[0].block_size = data_size;
- else
- lli_array[0].block_size = PAGE_SIZE - (app_virt_addr & (~PAGE_MASK));
-
- /* debug print */
- dbg("lli_array[0].physical_address is %08lx, lli_array[0].block_size is %lu\n", lli_array[0].physical_address, lli_array[0].block_size);
-
- /* go from the second page to the prev before last */
- for (count = 1; count < (num_pages - 1); count++) {
- lli_array[count].physical_address = (unsigned long) page_to_phys(page_array[count]);
- lli_array[count].block_size = PAGE_SIZE;
-
- edbg("lli_array[%lu].physical_address is %08lx, lli_array[%lu].block_size is %lu\n", count, lli_array[count].physical_address, count, lli_array[count].block_size);
- }
-
- /* if more then 1 pages locked - then update for the last page size needed */
- if (num_pages > 1) {
- /* update the address of the last page */
- lli_array[count].physical_address = (unsigned long) page_to_phys(page_array[count]);
-
- /* set the size of the last page */
- lli_array[count].block_size = (app_virt_addr + data_size) & (~PAGE_MASK);
-
- if (lli_array[count].block_size == 0) {
- dbg("app_virt_addr is %08lx\n", app_virt_addr);
- dbg("data_size is %lu\n", data_size);
- while (1);
- }
- edbg("lli_array[%lu].physical_address is %08lx, lli_array[%lu].block_size is %lu\n",
- count, lli_array[count].physical_address,
- count, lli_array[count].block_size);
- }
-
- /* set output params */
- *lli_array_ptr = lli_array;
- *num_pages_ptr = num_pages;
- *page_array_ptr = page_array;
- goto end_function;
-
-end_function_with_error2:
- /* release the cache */
- for (count = 0; count < num_pages; count++)
- page_cache_release(page_array[count]);
- kfree(lli_array);
-end_function_with_error1:
- kfree(page_array);
-end_function:
- dbg("SEP Driver:<-------- sep_lock_user_pages end\n");
- return 0;
-}
-
-
-/*
- this function calculates the size of data that can be inserted into the lli
- table from this array the condition is that either the table is full
- (all etnries are entered), or there are no more entries in the lli array
-*/
-static unsigned long sep_calculate_lli_table_max_size(struct sep_lli_entry_t *lli_in_array_ptr, unsigned long num_array_entries)
-{
- unsigned long table_data_size = 0;
- unsigned long counter;
-
- /* calculate the data in the out lli table if till we fill the whole
- table or till the data has ended */
- for (counter = 0; (counter < (SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP - 1)) && (counter < num_array_entries); counter++)
- table_data_size += lli_in_array_ptr[counter].block_size;
- return table_data_size;
-}
-
-/*
- this functions builds ont lli table from the lli_array according to
- the given size of data
-*/
-static void sep_build_lli_table(struct sep_lli_entry_t *lli_array_ptr, struct sep_lli_entry_t *lli_table_ptr, unsigned long *num_processed_entries_ptr, unsigned long *num_table_entries_ptr, unsigned long table_data_size)
-{
- unsigned long curr_table_data_size;
- /* counter of lli array entry */
- unsigned long array_counter;
-
- dbg("SEP Driver:--------> sep_build_lli_table start\n");
-
- /* init currrent table data size and lli array entry counter */
- curr_table_data_size = 0;
- array_counter = 0;
- *num_table_entries_ptr = 1;
-
- edbg("SEP Driver:table_data_size is %lu\n", table_data_size);
-
- /* fill the table till table size reaches the needed amount */
- while (curr_table_data_size < table_data_size) {
- /* update the number of entries in table */
- (*num_table_entries_ptr)++;
-
- lli_table_ptr->physical_address = lli_array_ptr[array_counter].physical_address;
- lli_table_ptr->block_size = lli_array_ptr[array_counter].block_size;
- curr_table_data_size += lli_table_ptr->block_size;
-
- edbg("SEP Driver:lli_table_ptr is %08lx\n", (unsigned long) lli_table_ptr);
- edbg("SEP Driver:lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
- edbg("SEP Driver:lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
-
- /* check for overflow of the table data */
- if (curr_table_data_size > table_data_size) {
- edbg("SEP Driver:curr_table_data_size > table_data_size\n");
-
- /* update the size of block in the table */
- lli_table_ptr->block_size -= (curr_table_data_size - table_data_size);
-
- /* update the physical address in the lli array */
- lli_array_ptr[array_counter].physical_address += lli_table_ptr->block_size;
-
- /* update the block size left in the lli array */
- lli_array_ptr[array_counter].block_size = (curr_table_data_size - table_data_size);
- } else
- /* advance to the next entry in the lli_array */
- array_counter++;
-
- edbg("SEP Driver:lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
- edbg("SEP Driver:lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
-
- /* move to the next entry in table */
- lli_table_ptr++;
- }
-
- /* set the info entry to default */
- lli_table_ptr->physical_address = 0xffffffff;
- lli_table_ptr->block_size = 0;
-
- edbg("SEP Driver:lli_table_ptr is %08lx\n", (unsigned long) lli_table_ptr);
- edbg("SEP Driver:lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
- edbg("SEP Driver:lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
-
- /* set the output parameter */
- *num_processed_entries_ptr += array_counter;
-
- edbg("SEP Driver:*num_processed_entries_ptr is %lu\n", *num_processed_entries_ptr);
- dbg("SEP Driver:<-------- sep_build_lli_table end\n");
- return;
-}
-
-/*
- this function goes over the list of the print created tables and
- prints all the data
-*/
-static void sep_debug_print_lli_tables(struct sep_device *sep, struct sep_lli_entry_t *lli_table_ptr, unsigned long num_table_entries, unsigned long table_data_size)
-{
- unsigned long table_count;
- unsigned long entries_count;
-
- dbg("SEP Driver:--------> sep_debug_print_lli_tables start\n");
-
- table_count = 1;
- while ((unsigned long) lli_table_ptr != 0xffffffff) {
- edbg("SEP Driver: lli table %08lx, table_data_size is %lu\n", table_count, table_data_size);
- edbg("SEP Driver: num_table_entries is %lu\n", num_table_entries);
-
- /* print entries of the table (without info entry) */
- for (entries_count = 0; entries_count < num_table_entries; entries_count++, lli_table_ptr++) {
- edbg("SEP Driver:lli_table_ptr address is %08lx\n", (unsigned long) lli_table_ptr);
- edbg("SEP Driver:phys address is %08lx block size is %lu\n", lli_table_ptr->physical_address, lli_table_ptr->block_size);
- }
-
- /* point to the info entry */
- lli_table_ptr--;
-
- edbg("SEP Driver:phys lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
- edbg("SEP Driver:phys lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
-
-
- table_data_size = lli_table_ptr->block_size & 0xffffff;
- num_table_entries = (lli_table_ptr->block_size >> 24) & 0xff;
- lli_table_ptr = (struct sep_lli_entry_t *)
- (lli_table_ptr->physical_address);
-
- edbg("SEP Driver:phys table_data_size is %lu num_table_entries is %lu lli_table_ptr is%lu\n", table_data_size, num_table_entries, (unsigned long) lli_table_ptr);
-
- if ((unsigned long) lli_table_ptr != 0xffffffff)
- lli_table_ptr = (struct sep_lli_entry_t *) sep_shared_bus_to_virt(sep, (unsigned long) lli_table_ptr);
-
- table_count++;
- }
- dbg("SEP Driver:<-------- sep_debug_print_lli_tables end\n");
-}
-
-
-/*
- This function prepares only input DMA table for synhronic symmetric
- operations (HASH)
-*/
-static int sep_prepare_input_dma_table(struct sep_device *sep,
- unsigned long app_virt_addr,
- unsigned long data_size,
- unsigned long block_size,
- unsigned long *lli_table_ptr,
- unsigned long *num_entries_ptr,
- unsigned long *table_data_size_ptr,
- bool isKernelVirtualAddress)
-{
- /* pointer to the info entry of the table - the last entry */
- struct sep_lli_entry_t *info_entry_ptr;
- /* array of pointers ot page */
- struct sep_lli_entry_t *lli_array_ptr;
- /* points to the first entry to be processed in the lli_in_array */
- unsigned long current_entry;
- /* num entries in the virtual buffer */
- unsigned long sep_lli_entries;
- /* lli table pointer */
- struct sep_lli_entry_t *in_lli_table_ptr;
- /* the total data in one table */
- unsigned long table_data_size;
- /* number of entries in lli table */
- unsigned long num_entries_in_table;
- /* next table address */
- void *lli_table_alloc_addr;
- unsigned long result;
-
- dbg("SEP Driver:--------> sep_prepare_input_dma_table start\n");
-
- edbg("SEP Driver:data_size is %lu\n", data_size);
- edbg("SEP Driver:block_size is %lu\n", block_size);
-
- /* initialize the pages pointers */
- sep->in_page_array = 0;
- sep->in_num_pages = 0;
-
- if (data_size == 0) {
- /* special case - created 2 entries table with zero data */
- in_lli_table_ptr = (struct sep_lli_entry_t *) (sep->shared_addr + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES);
- /* FIXME: Should the entry below not be for _bus */
- in_lli_table_ptr->physical_address = (unsigned long)sep->shared_addr + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
- in_lli_table_ptr->block_size = 0;
-
- in_lli_table_ptr++;
- in_lli_table_ptr->physical_address = 0xFFFFFFFF;
- in_lli_table_ptr->block_size = 0;
-
- *lli_table_ptr = sep->shared_bus + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
- *num_entries_ptr = 2;
- *table_data_size_ptr = 0;
-
- goto end_function;
- }
-
- /* check if the pages are in Kernel Virtual Address layout */
- if (isKernelVirtualAddress == true)
- /* lock the pages of the kernel buffer and translate them to pages */
- result = sep_lock_kernel_pages(sep, app_virt_addr, data_size, &sep->in_num_pages, &lli_array_ptr, &sep->in_page_array);
- else
- /* lock the pages of the user buffer and translate them to pages */
- result = sep_lock_user_pages(sep, app_virt_addr, data_size, &sep->in_num_pages, &lli_array_ptr, &sep->in_page_array);
-
- if (result)
- return result;
-
- edbg("SEP Driver:output sep->in_num_pages is %lu\n", sep->in_num_pages);
-
- current_entry = 0;
- info_entry_ptr = 0;
- sep_lli_entries = sep->in_num_pages;
-
- /* initiate to point after the message area */
- lli_table_alloc_addr = sep->shared_addr + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
-
- /* loop till all the entries in in array are not processed */
- while (current_entry < sep_lli_entries) {
- /* set the new input and output tables */
- in_lli_table_ptr = (struct sep_lli_entry_t *) lli_table_alloc_addr;
-
- lli_table_alloc_addr += sizeof(struct sep_lli_entry_t) * SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
-
- /* calculate the maximum size of data for input table */
- table_data_size = sep_calculate_lli_table_max_size(&lli_array_ptr[current_entry], (sep_lli_entries - current_entry));
-
- /* now calculate the table size so that it will be module block size */
- table_data_size = (table_data_size / block_size) * block_size;
-
- edbg("SEP Driver:output table_data_size is %lu\n", table_data_size);
-
- /* construct input lli table */
- sep_build_lli_table(&lli_array_ptr[current_entry], in_lli_table_ptr, ¤t_entry, &num_entries_in_table, table_data_size);
-
- if (info_entry_ptr == 0) {
- /* set the output parameters to physical addresses */
- *lli_table_ptr = sep_shared_virt_to_bus(sep, in_lli_table_ptr);
- *num_entries_ptr = num_entries_in_table;
- *table_data_size_ptr = table_data_size;
-
- edbg("SEP Driver:output lli_table_in_ptr is %08lx\n", *lli_table_ptr);
- } else {
- /* update the info entry of the previous in table */
- info_entry_ptr->physical_address = sep_shared_virt_to_bus(sep, in_lli_table_ptr);
- info_entry_ptr->block_size = ((num_entries_in_table) << 24) | (table_data_size);
- }
-
- /* save the pointer to the info entry of the current tables */
- info_entry_ptr = in_lli_table_ptr + num_entries_in_table - 1;
- }
-
- /* print input tables */
- sep_debug_print_lli_tables(sep, (struct sep_lli_entry_t *)
- sep_shared_bus_to_virt(sep, *lli_table_ptr), *num_entries_ptr, *table_data_size_ptr);
-
- /* the array of the pages */
- kfree(lli_array_ptr);
-end_function:
- dbg("SEP Driver:<-------- sep_prepare_input_dma_table end\n");
- return 0;
-
-}
-
-/*
- This function creates the input and output dma tables for
- symmetric operations (AES/DES) according to the block size from LLI arays
-*/
-static int sep_construct_dma_tables_from_lli(struct sep_device *sep,
- struct sep_lli_entry_t *lli_in_array,
- unsigned long sep_in_lli_entries,
- struct sep_lli_entry_t *lli_out_array,
- unsigned long sep_out_lli_entries,
- unsigned long block_size, unsigned long *lli_table_in_ptr, unsigned long *lli_table_out_ptr, unsigned long *in_num_entries_ptr, unsigned long *out_num_entries_ptr, unsigned long *table_data_size_ptr)
-{
- /* points to the area where next lli table can be allocated: keep void *
- as there is pointer scaling to fix otherwise */
- void *lli_table_alloc_addr;
- /* input lli table */
- struct sep_lli_entry_t *in_lli_table_ptr;
- /* output lli table */
- struct sep_lli_entry_t *out_lli_table_ptr;
- /* pointer to the info entry of the table - the last entry */
- struct sep_lli_entry_t *info_in_entry_ptr;
- /* pointer to the info entry of the table - the last entry */
- struct sep_lli_entry_t *info_out_entry_ptr;
- /* points to the first entry to be processed in the lli_in_array */
- unsigned long current_in_entry;
- /* points to the first entry to be processed in the lli_out_array */
- unsigned long current_out_entry;
- /* max size of the input table */
- unsigned long in_table_data_size;
- /* max size of the output table */
- unsigned long out_table_data_size;
- /* flag te signifies if this is the first tables build from the arrays */
- unsigned long first_table_flag;
- /* the data size that should be in table */
- unsigned long table_data_size;
- /* number of etnries in the input table */
- unsigned long num_entries_in_table;
- /* number of etnries in the output table */
- unsigned long num_entries_out_table;
-
- dbg("SEP Driver:--------> sep_construct_dma_tables_from_lli start\n");
-
- /* initiate to pint after the message area */
- lli_table_alloc_addr = sep->shared_addr + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
-
- current_in_entry = 0;
- current_out_entry = 0;
- first_table_flag = 1;
- info_in_entry_ptr = 0;
- info_out_entry_ptr = 0;
-
- /* loop till all the entries in in array are not processed */
- while (current_in_entry < sep_in_lli_entries) {
- /* set the new input and output tables */
- in_lli_table_ptr = (struct sep_lli_entry_t *) lli_table_alloc_addr;
-
- lli_table_alloc_addr += sizeof(struct sep_lli_entry_t) * SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
-
- /* set the first output tables */
- out_lli_table_ptr = (struct sep_lli_entry_t *) lli_table_alloc_addr;
-
- lli_table_alloc_addr += sizeof(struct sep_lli_entry_t) * SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
-
- /* calculate the maximum size of data for input table */
- in_table_data_size = sep_calculate_lli_table_max_size(&lli_in_array[current_in_entry], (sep_in_lli_entries - current_in_entry));
-
- /* calculate the maximum size of data for output table */
- out_table_data_size = sep_calculate_lli_table_max_size(&lli_out_array[current_out_entry], (sep_out_lli_entries - current_out_entry));
-
- edbg("SEP Driver:in_table_data_size is %lu\n", in_table_data_size);
- edbg("SEP Driver:out_table_data_size is %lu\n", out_table_data_size);
-
- /* check where the data is smallest */
- table_data_size = in_table_data_size;
- if (table_data_size > out_table_data_size)
- table_data_size = out_table_data_size;
-
- /* now calculate the table size so that it will be module block size */
- table_data_size = (table_data_size / block_size) * block_size;
-
- dbg("SEP Driver:table_data_size is %lu\n", table_data_size);
-
- /* construct input lli table */
- sep_build_lli_table(&lli_in_array[current_in_entry], in_lli_table_ptr, ¤t_in_entry, &num_entries_in_table, table_data_size);
-
- /* construct output lli table */
- sep_build_lli_table(&lli_out_array[current_out_entry], out_lli_table_ptr, ¤t_out_entry, &num_entries_out_table, table_data_size);
-
- /* if info entry is null - this is the first table built */
- if (info_in_entry_ptr == 0) {
- /* set the output parameters to physical addresses */
- *lli_table_in_ptr = sep_shared_virt_to_bus(sep, in_lli_table_ptr);
- *in_num_entries_ptr = num_entries_in_table;
- *lli_table_out_ptr = sep_shared_virt_to_bus(sep, out_lli_table_ptr);
- *out_num_entries_ptr = num_entries_out_table;
- *table_data_size_ptr = table_data_size;
-
- edbg("SEP Driver:output lli_table_in_ptr is %08lx\n", *lli_table_in_ptr);
- edbg("SEP Driver:output lli_table_out_ptr is %08lx\n", *lli_table_out_ptr);
- } else {
- /* update the info entry of the previous in table */
- info_in_entry_ptr->physical_address = sep_shared_virt_to_bus(sep, in_lli_table_ptr);
- info_in_entry_ptr->block_size = ((num_entries_in_table) << 24) | (table_data_size);
-
- /* update the info entry of the previous in table */
- info_out_entry_ptr->physical_address = sep_shared_virt_to_bus(sep, out_lli_table_ptr);
- info_out_entry_ptr->block_size = ((num_entries_out_table) << 24) | (table_data_size);
- }
-
- /* save the pointer to the info entry of the current tables */
- info_in_entry_ptr = in_lli_table_ptr + num_entries_in_table - 1;
- info_out_entry_ptr = out_lli_table_ptr + num_entries_out_table - 1;
-
- edbg("SEP Driver:output num_entries_out_table is %lu\n", (unsigned long) num_entries_out_table);
- edbg("SEP Driver:output info_in_entry_ptr is %lu\n", (unsigned long) info_in_entry_ptr);
- edbg("SEP Driver:output info_out_entry_ptr is %lu\n", (unsigned long) info_out_entry_ptr);
- }
-
- /* print input tables */
- sep_debug_print_lli_tables(sep, (struct sep_lli_entry_t *)
- sep_shared_bus_to_virt(sep, *lli_table_in_ptr), *in_num_entries_ptr, *table_data_size_ptr);
- /* print output tables */
- sep_debug_print_lli_tables(sep, (struct sep_lli_entry_t *)
- sep_shared_bus_to_virt(sep, *lli_table_out_ptr), *out_num_entries_ptr, *table_data_size_ptr);
- dbg("SEP Driver:<-------- sep_construct_dma_tables_from_lli end\n");
- return 0;
-}
-
-
-/*
- This function builds input and output DMA tables for synhronic
- symmetric operations (AES, DES). It also checks that each table
- is of the modular block size
-*/
-static int sep_prepare_input_output_dma_table(struct sep_device *sep,
- unsigned long app_virt_in_addr,
- unsigned long app_virt_out_addr,
- unsigned long data_size,
- unsigned long block_size,
- unsigned long *lli_table_in_ptr, unsigned long *lli_table_out_ptr, unsigned long *in_num_entries_ptr, unsigned long *out_num_entries_ptr, unsigned long *table_data_size_ptr, bool isKernelVirtualAddress)
-{
- /* array of pointers of page */
- struct sep_lli_entry_t *lli_in_array;
- /* array of pointers of page */
- struct sep_lli_entry_t *lli_out_array;
- int result = 0;
-
- dbg("SEP Driver:--------> sep_prepare_input_output_dma_table start\n");
-
- /* initialize the pages pointers */
- sep->in_page_array = 0;
- sep->out_page_array = 0;
-
- /* check if the pages are in Kernel Virtual Address layout */
- if (isKernelVirtualAddress == true) {
- /* lock the pages of the kernel buffer and translate them to pages */
- result = sep_lock_kernel_pages(sep, app_virt_in_addr, data_size, &sep->in_num_pages, &lli_in_array, &sep->in_page_array);
- if (result) {
- edbg("SEP Driver: sep_lock_kernel_pages for input virtual buffer failed\n");
- goto end_function;
- }
- } else {
- /* lock the pages of the user buffer and translate them to pages */
- result = sep_lock_user_pages(sep, app_virt_in_addr, data_size, &sep->in_num_pages, &lli_in_array, &sep->in_page_array);
- if (result) {
- edbg("SEP Driver: sep_lock_user_pages for input virtual buffer failed\n");
- goto end_function;
- }
- }
-
- if (isKernelVirtualAddress == true) {
- result = sep_lock_kernel_pages(sep, app_virt_out_addr, data_size, &sep->out_num_pages, &lli_out_array, &sep->out_page_array);
- if (result) {
- edbg("SEP Driver: sep_lock_kernel_pages for output virtual buffer failed\n");
- goto end_function_with_error1;
- }
- } else {
- result = sep_lock_user_pages(sep, app_virt_out_addr, data_size, &sep->out_num_pages, &lli_out_array, &sep->out_page_array);
- if (result) {
- edbg("SEP Driver: sep_lock_user_pages for output virtual buffer failed\n");
- goto end_function_with_error1;
- }
- }
- edbg("sep->in_num_pages is %lu\n", sep->in_num_pages);
- edbg("sep->out_num_pages is %lu\n", sep->out_num_pages);
- edbg("SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP is %x\n", SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
-
-
- /* call the fucntion that creates table from the lli arrays */
- result = sep_construct_dma_tables_from_lli(sep, lli_in_array, sep->in_num_pages, lli_out_array, sep->out_num_pages, block_size, lli_table_in_ptr, lli_table_out_ptr, in_num_entries_ptr, out_num_entries_ptr, table_data_size_ptr);
- if (result) {
- edbg("SEP Driver: sep_construct_dma_tables_from_lli failed\n");
- goto end_function_with_error2;
- }
-
- /* fall through - free the lli entry arrays */
- dbg("in_num_entries_ptr is %08lx\n", *in_num_entries_ptr);
- dbg("out_num_entries_ptr is %08lx\n", *out_num_entries_ptr);
- dbg("table_data_size_ptr is %08lx\n", *table_data_size_ptr);
-end_function_with_error2:
- kfree(lli_out_array);
-end_function_with_error1:
- kfree(lli_in_array);
-end_function:
- dbg("SEP Driver:<-------- sep_prepare_input_output_dma_table end result = %d\n", (int) result);
- return result;
-
-}
-
-/*
- this function handles tha request for creation of the DMA table
- for the synchronic symmetric operations (AES,DES)
-*/
-static int sep_create_sync_dma_tables_handler(struct sep_device *sep,
- unsigned long arg)
-{
- int error;
- /* command arguments */
- struct sep_driver_build_sync_table_t command_args;
-
- dbg("SEP Driver:--------> sep_create_sync_dma_tables_handler start\n");
-
- error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_build_sync_table_t));
- if (error) {
- error = -EFAULT;
- goto end_function;
- }
-
- edbg("app_in_address is %08lx\n", command_args.app_in_address);
- edbg("app_out_address is %08lx\n", command_args.app_out_address);
- edbg("data_size is %lu\n", command_args.data_in_size);
- edbg("block_size is %lu\n", command_args.block_size);
-
- /* check if we need to build only input table or input/output */
- if (command_args.app_out_address)
- /* prepare input and output tables */
- error = sep_prepare_input_output_dma_table(sep,
- command_args.app_in_address,
- command_args.app_out_address,
- command_args.data_in_size,
- command_args.block_size,
- &command_args.in_table_address,
- &command_args.out_table_address, &command_args.in_table_num_entries, &command_args.out_table_num_entries, &command_args.table_data_size, command_args.isKernelVirtualAddress);
- else
- /* prepare input tables */
- error = sep_prepare_input_dma_table(sep,
- command_args.app_in_address,
- command_args.data_in_size, command_args.block_size, &command_args.in_table_address, &command_args.in_table_num_entries, &command_args.table_data_size, command_args.isKernelVirtualAddress);
-
- if (error)
- goto end_function;
- /* copy to user */
- if (copy_to_user((void *) arg, (void *) &command_args, sizeof(struct sep_driver_build_sync_table_t)))
- error = -EFAULT;
-end_function:
- dbg("SEP Driver:<-------- sep_create_sync_dma_tables_handler end\n");
- return error;
-}
-
-/*
- this function handles the request for freeing dma table for synhronic actions
-*/
-static int sep_free_dma_table_data_handler(struct sep_device *sep)
-{
- dbg("SEP Driver:--------> sep_free_dma_table_data_handler start\n");
-
- /* free input pages array */
- sep_free_dma_pages(sep->in_page_array, sep->in_num_pages, 0);
-
- /* free output pages array if needed */
- if (sep->out_page_array)
- sep_free_dma_pages(sep->out_page_array, sep->out_num_pages, 1);
-
- /* reset all the values */
- sep->in_page_array = 0;
- sep->out_page_array = 0;
- sep->in_num_pages = 0;
- sep->out_num_pages = 0;
- dbg("SEP Driver:<-------- sep_free_dma_table_data_handler end\n");
- return 0;
-}
-
-/*
- this function find a space for the new flow dma table
-*/
-static int sep_find_free_flow_dma_table_space(struct sep_device *sep,
- unsigned long **table_address_ptr)
-{
- int error = 0;
- /* pointer to the id field of the flow dma table */
- unsigned long *start_table_ptr;
- /* Do not make start_addr unsigned long * unless fixing the offset
- computations ! */
- void *flow_dma_area_start_addr;
- unsigned long *flow_dma_area_end_addr;
- /* maximum table size in words */
- unsigned long table_size_in_words;
-
- /* find the start address of the flow DMA table area */
- flow_dma_area_start_addr = sep->shared_addr + SEP_DRIVER_FLOW_DMA_TABLES_AREA_OFFSET_IN_BYTES;
-
- /* set end address of the flow table area */
- flow_dma_area_end_addr = flow_dma_area_start_addr + SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES;
-
- /* set table size in words */
- table_size_in_words = SEP_DRIVER_MAX_FLOW_NUM_ENTRIES_IN_TABLE * (sizeof(struct sep_lli_entry_t) / sizeof(long)) + 2;
-
- /* set the pointer to the start address of DMA area */
- start_table_ptr = flow_dma_area_start_addr;
-
- /* find the space for the next table */
- while (((*start_table_ptr & 0x7FFFFFFF) != 0) && start_table_ptr < flow_dma_area_end_addr)
- start_table_ptr += table_size_in_words;
-
- /* check if we reached the end of floa tables area */
- if (start_table_ptr >= flow_dma_area_end_addr)
- error = -1;
- else
- *table_address_ptr = start_table_ptr;
-
- return error;
-}
-
-/*
- This function creates one DMA table for flow and returns its data,
- and pointer to its info entry
-*/
-static int sep_prepare_one_flow_dma_table(struct sep_device *sep,
- unsigned long virt_buff_addr,
- unsigned long virt_buff_size,
- struct sep_lli_entry_t *table_data,
- struct sep_lli_entry_t **info_entry_ptr,
- struct sep_flow_context_t *flow_data_ptr,
- bool isKernelVirtualAddress)
-{
- int error;
- /* the range in pages */
- unsigned long lli_array_size;
- struct sep_lli_entry_t *lli_array;
- struct sep_lli_entry_t *flow_dma_table_entry_ptr;
- unsigned long *start_dma_table_ptr;
- /* total table data counter */
- unsigned long dma_table_data_count;
- /* pointer that will keep the pointer to the pages of the virtual buffer */
- struct page **page_array_ptr;
- unsigned long entry_count;
-
- /* find the space for the new table */
- error = sep_find_free_flow_dma_table_space(sep, &start_dma_table_ptr);
- if (error)
- goto end_function;
-
- /* check if the pages are in Kernel Virtual Address layout */
- if (isKernelVirtualAddress == true)
- /* lock kernel buffer in the memory */
- error = sep_lock_kernel_pages(sep, virt_buff_addr, virt_buff_size, &lli_array_size, &lli_array, &page_array_ptr);
- else
- /* lock user buffer in the memory */
- error = sep_lock_user_pages(sep, virt_buff_addr, virt_buff_size, &lli_array_size, &lli_array, &page_array_ptr);
-
- if (error)
- goto end_function;
-
- /* set the pointer to page array at the beginning of table - this table is
- now considered taken */
- *start_dma_table_ptr = lli_array_size;
-
- /* point to the place of the pages pointers of the table */
- start_dma_table_ptr++;
-
- /* set the pages pointer */
- *start_dma_table_ptr = (unsigned long) page_array_ptr;
-
- /* set the pointer to the first entry */
- flow_dma_table_entry_ptr = (struct sep_lli_entry_t *) (++start_dma_table_ptr);
-
- /* now create the entries for table */
- for (dma_table_data_count = entry_count = 0; entry_count < lli_array_size; entry_count++) {
- flow_dma_table_entry_ptr->physical_address = lli_array[entry_count].physical_address;
-
- flow_dma_table_entry_ptr->block_size = lli_array[entry_count].block_size;
-
- /* set the total data of a table */
- dma_table_data_count += lli_array[entry_count].block_size;
-
- flow_dma_table_entry_ptr++;
- }
-
- /* set the physical address */
- table_data->physical_address = virt_to_phys(start_dma_table_ptr);
-
- /* set the num_entries and total data size */
- table_data->block_size = ((lli_array_size + 1) << SEP_NUM_ENTRIES_OFFSET_IN_BITS) | (dma_table_data_count);
-
- /* set the info entry */
- flow_dma_table_entry_ptr->physical_address = 0xffffffff;
- flow_dma_table_entry_ptr->block_size = 0;
-
- /* set the pointer to info entry */
- *info_entry_ptr = flow_dma_table_entry_ptr;
-
- /* the array of the lli entries */
- kfree(lli_array);
-end_function:
- return error;
-}
-
-
-
-/*
- This function creates a list of tables for flow and returns the data for
- the first and last tables of the list
-*/
-static int sep_prepare_flow_dma_tables(struct sep_device *sep,
- unsigned long num_virtual_buffers,
- unsigned long first_buff_addr, struct sep_flow_context_t *flow_data_ptr, struct sep_lli_entry_t *first_table_data_ptr, struct sep_lli_entry_t *last_table_data_ptr, bool isKernelVirtualAddress)
-{
- int error;
- unsigned long virt_buff_addr;
- unsigned long virt_buff_size;
- struct sep_lli_entry_t table_data;
- struct sep_lli_entry_t *info_entry_ptr;
- struct sep_lli_entry_t *prev_info_entry_ptr;
- unsigned long i;
-
- /* init vars */
- error = 0;
- prev_info_entry_ptr = 0;
-
- /* init the first table to default */
- table_data.physical_address = 0xffffffff;
- first_table_data_ptr->physical_address = 0xffffffff;
- table_data.block_size = 0;
-
- for (i = 0; i < num_virtual_buffers; i++) {
- /* get the virtual buffer address */
- error = get_user(virt_buff_addr, &first_buff_addr);
- if (error)
- goto end_function;
-
- /* get the virtual buffer size */
- first_buff_addr++;
- error = get_user(virt_buff_size, &first_buff_addr);
- if (error)
- goto end_function;
-
- /* advance the address to point to the next pair of address|size */
- first_buff_addr++;
-
- /* now prepare the one flow LLI table from the data */
- error = sep_prepare_one_flow_dma_table(sep, virt_buff_addr, virt_buff_size, &table_data, &info_entry_ptr, flow_data_ptr, isKernelVirtualAddress);
- if (error)
- goto end_function;
-
- if (i == 0) {
- /* if this is the first table - save it to return to the user
- application */
- *first_table_data_ptr = table_data;
-
- /* set the pointer to info entry */
- prev_info_entry_ptr = info_entry_ptr;
- } else {
- /* not first table - the previous table info entry should
- be updated */
- prev_info_entry_ptr->block_size = (0x1 << SEP_INT_FLAG_OFFSET_IN_BITS) | (table_data.block_size);
-
- /* set the pointer to info entry */
- prev_info_entry_ptr = info_entry_ptr;
- }
- }
-
- /* set the last table data */
- *last_table_data_ptr = table_data;
-end_function:
- return error;
-}
-
-/*
- this function goes over all the flow tables connected to the given
- table and deallocate them
-*/
-static void sep_deallocated_flow_tables(struct sep_lli_entry_t *first_table_ptr)
-{
- /* id pointer */
- unsigned long *table_ptr;
- /* end address of the flow dma area */
- unsigned long num_entries;
- unsigned long num_pages;
- struct page **pages_ptr;
- /* maximum table size in words */
- struct sep_lli_entry_t *info_entry_ptr;
-
- /* set the pointer to the first table */
- table_ptr = (unsigned long *) first_table_ptr->physical_address;
-
- /* set the num of entries */
- num_entries = (first_table_ptr->block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS)
- & SEP_NUM_ENTRIES_MASK;
-
- /* go over all the connected tables */
- while (*table_ptr != 0xffffffff) {
- /* get number of pages */
- num_pages = *(table_ptr - 2);
-
- /* get the pointer to the pages */
- pages_ptr = (struct page **) (*(table_ptr - 1));
-
- /* free the pages */
- sep_free_dma_pages(pages_ptr, num_pages, 1);
-
- /* goto to the info entry */
- info_entry_ptr = ((struct sep_lli_entry_t *) table_ptr) + (num_entries - 1);
-
- table_ptr = (unsigned long *) info_entry_ptr->physical_address;
- num_entries = (info_entry_ptr->block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK;
- }
-
- return;
-}
-
-/**
- * sep_find_flow_context - find a flow
- * @sep: the SEP we are working with
- * @flow_id: flow identifier
- *
- * Returns a pointer the matching flow, or NULL if the flow does not
- * exist.
- */
-
-static struct sep_flow_context_t *sep_find_flow_context(struct sep_device *sep,
- unsigned long flow_id)
-{
- int count;
- /*
- * always search for flow with id default first - in case we
- * already started working on the flow there can be no situation
- * when 2 flows are with default flag
- */
- for (count = 0; count < SEP_DRIVER_NUM_FLOWS; count++) {
- if (sep->flows[count].flow_id == flow_id)
- return &sep->flows[count];
- }
- return NULL;
-}
-
-
-/*
- this function handles the request to create the DMA tables for flow
-*/
-static int sep_create_flow_dma_tables_handler(struct sep_device *sep,
- unsigned long arg)
-{
- int error = -ENOENT;
- struct sep_driver_build_flow_table_t command_args;
- /* first table - output */
- struct sep_lli_entry_t first_table_data;
- /* dma table data */
- struct sep_lli_entry_t last_table_data;
- /* pointer to the info entry of the previuos DMA table */
- struct sep_lli_entry_t *prev_info_entry_ptr;
- /* pointer to the flow data strucutre */
- struct sep_flow_context_t *flow_context_ptr;
-
- dbg("SEP Driver:--------> sep_create_flow_dma_tables_handler start\n");
-
- /* init variables */
- prev_info_entry_ptr = 0;
- first_table_data.physical_address = 0xffffffff;
-
- /* find the free structure for flow data */
- error = -EINVAL;
- flow_context_ptr = sep_find_flow_context(sep, SEP_FREE_FLOW_ID);
- if (flow_context_ptr == NULL)
- goto end_function;
-
- error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_build_flow_table_t));
- if (error) {
- error = -EFAULT;
- goto end_function;
- }
-
- /* create flow tables */
- error = sep_prepare_flow_dma_tables(sep, command_args.num_virtual_buffers, command_args.virt_buff_data_addr, flow_context_ptr, &first_table_data, &last_table_data, command_args.isKernelVirtualAddress);
- if (error)
- goto end_function_with_error;
-
- /* check if flow is static */
- if (!command_args.flow_type)
- /* point the info entry of the last to the info entry of the first */
- last_table_data = first_table_data;
-
- /* set output params */
- command_args.first_table_addr = first_table_data.physical_address;
- command_args.first_table_num_entries = ((first_table_data.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK);
- command_args.first_table_data_size = (first_table_data.block_size & SEP_TABLE_DATA_SIZE_MASK);
-
- /* send the parameters to user application */
- error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_build_flow_table_t));
- if (error) {
- error = -EFAULT;
- goto end_function_with_error;
- }
-
- /* all the flow created - update the flow entry with temp id */
- flow_context_ptr->flow_id = SEP_TEMP_FLOW_ID;
-
- /* set the processing tables data in the context */
- if (command_args.input_output_flag == SEP_DRIVER_IN_FLAG)
- flow_context_ptr->input_tables_in_process = first_table_data;
- else
- flow_context_ptr->output_tables_in_process = first_table_data;
-
- goto end_function;
-
-end_function_with_error:
- /* free the allocated tables */
- sep_deallocated_flow_tables(&first_table_data);
-end_function:
- dbg("SEP Driver:<-------- sep_create_flow_dma_tables_handler end\n");
- return error;
-}
-
-/*
- this function handles add tables to flow
-*/
-static int sep_add_flow_tables_handler(struct sep_device *sep, unsigned long arg)
-{
- int error;
- unsigned long num_entries;
- struct sep_driver_add_flow_table_t command_args;
- struct sep_flow_context_t *flow_context_ptr;
- /* first dma table data */
- struct sep_lli_entry_t first_table_data;
- /* last dma table data */
- struct sep_lli_entry_t last_table_data;
- /* pointer to the info entry of the current DMA table */
- struct sep_lli_entry_t *info_entry_ptr;
-
- dbg("SEP Driver:--------> sep_add_flow_tables_handler start\n");
-
- /* get input parameters */
- error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_add_flow_table_t));
- if (error) {
- error = -EFAULT;
- goto end_function;
- }
-
- /* find the flow structure for the flow id */
- flow_context_ptr = sep_find_flow_context(sep, command_args.flow_id);
- if (flow_context_ptr == NULL)
- goto end_function;
-
- /* prepare the flow dma tables */
- error = sep_prepare_flow_dma_tables(sep, command_args.num_virtual_buffers, command_args.virt_buff_data_addr, flow_context_ptr, &first_table_data, &last_table_data, command_args.isKernelVirtualAddress);
- if (error)
- goto end_function_with_error;
-
- /* now check if there is already an existing add table for this flow */
- if (command_args.inputOutputFlag == SEP_DRIVER_IN_FLAG) {
- /* this buffer was for input buffers */
- if (flow_context_ptr->input_tables_flag) {
- /* add table already exists - add the new tables to the end
- of the previous */
- num_entries = (flow_context_ptr->last_input_table.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK;
-
- info_entry_ptr = (struct sep_lli_entry_t *)
- (flow_context_ptr->last_input_table.physical_address + (sizeof(struct sep_lli_entry_t) * (num_entries - 1)));
-
- /* connect to list of tables */
- *info_entry_ptr = first_table_data;
-
- /* set the first table data */
- first_table_data = flow_context_ptr->first_input_table;
- } else {
- /* set the input flag */
- flow_context_ptr->input_tables_flag = 1;
-
- /* set the first table data */
- flow_context_ptr->first_input_table = first_table_data;
- }
- /* set the last table data */
- flow_context_ptr->last_input_table = last_table_data;
- } else { /* this is output tables */
-
- /* this buffer was for input buffers */
- if (flow_context_ptr->output_tables_flag) {
- /* add table already exists - add the new tables to
- the end of the previous */
- num_entries = (flow_context_ptr->last_output_table.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK;
-
- info_entry_ptr = (struct sep_lli_entry_t *)
- (flow_context_ptr->last_output_table.physical_address + (sizeof(struct sep_lli_entry_t) * (num_entries - 1)));
-
- /* connect to list of tables */
- *info_entry_ptr = first_table_data;
-
- /* set the first table data */
- first_table_data = flow_context_ptr->first_output_table;
- } else {
- /* set the input flag */
- flow_context_ptr->output_tables_flag = 1;
-
- /* set the first table data */
- flow_context_ptr->first_output_table = first_table_data;
- }
- /* set the last table data */
- flow_context_ptr->last_output_table = last_table_data;
- }
-
- /* set output params */
- command_args.first_table_addr = first_table_data.physical_address;
- command_args.first_table_num_entries = ((first_table_data.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK);
- command_args.first_table_data_size = (first_table_data.block_size & SEP_TABLE_DATA_SIZE_MASK);
-
- /* send the parameters to user application */
- error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_add_flow_table_t));
- if (error)
- error = -EFAULT;
-end_function_with_error:
- /* free the allocated tables */
- sep_deallocated_flow_tables(&first_table_data);
-end_function:
- dbg("SEP Driver:<-------- sep_add_flow_tables_handler end\n");
- return error;
-}
-
-/*
- this function add the flow add message to the specific flow
-*/
-static int sep_add_flow_tables_message_handler(struct sep_device *sep, unsigned long arg)
-{
- int error;
- struct sep_driver_add_message_t command_args;
- struct sep_flow_context_t *flow_context_ptr;
-
- dbg("SEP Driver:--------> sep_add_flow_tables_message_handler start\n");
-
- error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_add_message_t));
- if (error) {
- error = -EFAULT;
- goto end_function;
- }
-
- /* check input */
- if (command_args.message_size_in_bytes > SEP_MAX_ADD_MESSAGE_LENGTH_IN_BYTES) {
- error = -ENOMEM;
- goto end_function;
- }
-
- /* find the flow context */
- flow_context_ptr = sep_find_flow_context(sep, command_args.flow_id);
- if (flow_context_ptr == NULL)
- goto end_function;
-
- /* copy the message into context */
- flow_context_ptr->message_size_in_bytes = command_args.message_size_in_bytes;
- error = copy_from_user(flow_context_ptr->message, (void *) command_args.message_address, command_args.message_size_in_bytes);
- if (error)
- error = -EFAULT;
-end_function:
- dbg("SEP Driver:<-------- sep_add_flow_tables_message_handler end\n");
- return error;
-}
-
-
-/*
- this function returns the bus and virtual addresses of the static pool
-*/
-static int sep_get_static_pool_addr_handler(struct sep_device *sep, unsigned long arg)
-{
- int error;
- struct sep_driver_static_pool_addr_t command_args;
-
- dbg("SEP Driver:--------> sep_get_static_pool_addr_handler start\n");
-
- /*prepare the output parameters in the struct */
- command_args.physical_static_address = sep->shared_bus + SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES;
- command_args.virtual_static_address = (unsigned long)sep->shared_addr + SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES;
-
- edbg("SEP Driver:bus_static_address is %08lx, virtual_static_address %08lx\n", command_args.physical_static_address, command_args.virtual_static_address);
-
- /* send the parameters to user application */
- error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_static_pool_addr_t));
- if (error)
- error = -EFAULT;
- dbg("SEP Driver:<-------- sep_get_static_pool_addr_handler end\n");
- return error;
-}
-
-/*
- this address gets the offset of the physical address from the start
- of the mapped area
-*/
-static int sep_get_physical_mapped_offset_handler(struct sep_device *sep, unsigned long arg)
-{
- int error;
- struct sep_driver_get_mapped_offset_t command_args;
-
- dbg("SEP Driver:--------> sep_get_physical_mapped_offset_handler start\n");
-
- error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_get_mapped_offset_t));
- if (error) {
- error = -EFAULT;
- goto end_function;
- }
-
- if (command_args.physical_address < sep->shared_bus) {
- error = -EINVAL;
- goto end_function;
- }
-
- /*prepare the output parameters in the struct */
- command_args.offset = command_args.physical_address - sep->shared_bus;
-
- edbg("SEP Driver:bus_address is %08lx, offset is %lu\n", command_args.physical_address, command_args.offset);
-
- /* send the parameters to user application */
- error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_get_mapped_offset_t));
- if (error)
- error = -EFAULT;
-end_function:
- dbg("SEP Driver:<-------- sep_get_physical_mapped_offset_handler end\n");
- return error;
-}
-
-
-/*
- ?
-*/
-static int sep_start_handler(struct sep_device *sep)
-{
- unsigned long reg_val;
- unsigned long error = 0;
-
- dbg("SEP Driver:--------> sep_start_handler start\n");
-
- /* wait in polling for message from SEP */
- do
- reg_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
- while (!reg_val);
-
- /* check the value */
- if (reg_val == 0x1)
- /* fatal error - read error status from GPRO */
- error = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
- dbg("SEP Driver:<-------- sep_start_handler end\n");
- return error;
-}
-
-/*
- this function handles the request for SEP initialization
-*/
-static int sep_init_handler(struct sep_device *sep, unsigned long arg)
-{
- unsigned long message_word;
- unsigned long *message_ptr;
- struct sep_driver_init_t command_args;
- unsigned long counter;
- unsigned long error;
- unsigned long reg_val;
-
- dbg("SEP Driver:--------> sep_init_handler start\n");
- error = 0;
-
- error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_init_t));
- if (error) {
- error = -EFAULT;
- goto end_function;
- }
- dbg("SEP Driver:--------> sep_init_handler - finished copy_from_user\n");
-
- /* PATCH - configure the DMA to single -burst instead of multi-burst */
- /*sep_configure_dma_burst(); */
-
- dbg("SEP Driver:--------> sep_init_handler - finished sep_configure_dma_burst \n");
-
- message_ptr = (unsigned long *) command_args.message_addr;
-
- /* set the base address of the SRAM */
- sep_write_reg(sep, HW_SRAM_ADDR_REG_ADDR, HW_CC_SRAM_BASE_ADDRESS);
-
- for (counter = 0; counter < command_args.message_size_in_words; counter++, message_ptr++) {
- get_user(message_word, message_ptr);
- /* write data to SRAM */
- sep_write_reg(sep, HW_SRAM_DATA_REG_ADDR, message_word);
- edbg("SEP Driver:message_word is %lu\n", message_word);
- /* wait for write complete */
- sep_wait_sram_write(sep);
- }
- dbg("SEP Driver:--------> sep_init_handler - finished getting messages from user space\n");
- /* signal SEP */
- sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x1);
-
- do
- reg_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
- while (!(reg_val & 0xFFFFFFFD));
-
- dbg("SEP Driver:--------> sep_init_handler - finished waiting for reg_val & 0xFFFFFFFD \n");
-
- /* check the value */
- if (reg_val == 0x1) {
- edbg("SEP Driver:init failed\n");
-
- error = sep_read_reg(sep, 0x8060);
- edbg("SEP Driver:sw monitor is %lu\n", error);
-
- /* fatal error - read erro status from GPRO */
- error = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
- edbg("SEP Driver:error is %lu\n", error);
- }
-end_function:
- dbg("SEP Driver:<-------- sep_init_handler end\n");
- return error;
-
-}
-
-/*
- this function handles the request cache and resident reallocation
-*/
-static int sep_realloc_cache_resident_handler(struct sep_device *sep,
- unsigned long arg)
-{
- struct sep_driver_realloc_cache_resident_t command_args;
- int error;
-
- /* copy cache and resident to the their intended locations */
- error = sep_load_firmware(sep);
- if (error)
- return error;
-
- command_args.new_base_addr = sep->shared_bus;
-
- /* find the new base address according to the lowest address between
- cache, resident and shared area */
- if (sep->resident_bus < command_args.new_base_addr)
- command_args.new_base_addr = sep->resident_bus;
- if (sep->rar_bus < command_args.new_base_addr)
- command_args.new_base_addr = sep->rar_bus;
-
- /* set the return parameters */
- command_args.new_cache_addr = sep->rar_bus;
- command_args.new_resident_addr = sep->resident_bus;
-
- /* set the new shared area */
- command_args.new_shared_area_addr = sep->shared_bus;
-
- edbg("SEP Driver:command_args.new_shared_addr is %08llx\n", command_args.new_shared_area_addr);
- edbg("SEP Driver:command_args.new_base_addr is %08llx\n", command_args.new_base_addr);
- edbg("SEP Driver:command_args.new_resident_addr is %08llx\n", command_args.new_resident_addr);
- edbg("SEP Driver:command_args.new_rar_addr is %08llx\n", command_args.new_cache_addr);
-
- /* return to user */
- if (copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_realloc_cache_resident_t)))
- return -EFAULT;
- return 0;
-}
-
-/**
- * sep_get_time_handler - time request from user space
- * @sep: sep we are to set the time for
- * @arg: pointer to user space arg buffer
- *
- * This function reports back the time and the address in the SEP
- * shared buffer at which it has been placed. (Do we really need this!!!)
- */
-
-static int sep_get_time_handler(struct sep_device *sep, unsigned long arg)
-{
- struct sep_driver_get_time_t command_args;
-
- mutex_lock(&sep_mutex);
- command_args.time_value = sep_set_time(sep);
- command_args.time_physical_address = (unsigned long)sep_time_address(sep);
- mutex_unlock(&sep_mutex);
- if (copy_to_user((void __user *)arg,
- &command_args, sizeof(struct sep_driver_get_time_t)))
- return -EFAULT;
- return 0;
-
-}
-
-/*
- This API handles the end transaction request
-*/
-static int sep_end_transaction_handler(struct sep_device *sep, unsigned long arg)
-{
- dbg("SEP Driver:--------> sep_end_transaction_handler start\n");
-
-#if 0 /*!SEP_DRIVER_POLLING_MODE */
- /* close IMR */
- sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, 0x7FFF);
-
- /* release IRQ line */
- free_irq(SEP_DIRVER_IRQ_NUM, sep);
-
- /* lock the sep mutex */
- mutex_unlock(&sep_mutex);
-#endif
-
- dbg("SEP Driver:<-------- sep_end_transaction_handler end\n");
-
- return 0;
-}
-
-
-/**
- * sep_set_flow_id_handler - handle flow setting
- * @sep: the SEP we are configuring
- * @flow_id: the flow we are setting
- *
- * This function handler the set flow id command
- */
-static int sep_set_flow_id_handler(struct sep_device *sep,
- unsigned long flow_id)
-{
- int error = 0;
- struct sep_flow_context_t *flow_data_ptr;
-
- /* find the flow data structure that was just used for creating new flow
- - its id should be default */
-
- mutex_lock(&sep_mutex);
- flow_data_ptr = sep_find_flow_context(sep, SEP_TEMP_FLOW_ID);
- if (flow_data_ptr)
- flow_data_ptr->flow_id = flow_id; /* set flow id */
- else
- error = -EINVAL;
- mutex_unlock(&sep_mutex);
- return error;
-}
-
-static long sep_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
-{
- int error = 0;
- struct sep_device *sep = filp->private_data;
-
- dbg("------------>SEP Driver: ioctl start\n");
-
- edbg("SEP Driver: cmd is %x\n", cmd);
-
- switch (cmd) {
- case SEP_IOCSENDSEPCOMMAND:
- /* send command to SEP */
- sep_send_command_handler(sep);
- edbg("SEP Driver: after sep_send_command_handler\n");
- break;
- case SEP_IOCSENDSEPRPLYCOMMAND:
- /* send reply command to SEP */
- sep_send_reply_command_handler(sep);
- break;
- case SEP_IOCALLOCDATAPOLL:
- /* allocate data pool */
- error = sep_allocate_data_pool_memory_handler(sep, arg);
- break;
- case SEP_IOCWRITEDATAPOLL:
- /* write data into memory pool */
- error = sep_write_into_data_pool_handler(sep, arg);
- break;
- case SEP_IOCREADDATAPOLL:
- /* read data from data pool into application memory */
- error = sep_read_from_data_pool_handler(sep, arg);
- break;
- case SEP_IOCCREATESYMDMATABLE:
- /* create dma table for synhronic operation */
- error = sep_create_sync_dma_tables_handler(sep, arg);
- break;
- case SEP_IOCCREATEFLOWDMATABLE:
- /* create flow dma tables */
- error = sep_create_flow_dma_tables_handler(sep, arg);
- break;
- case SEP_IOCFREEDMATABLEDATA:
- /* free the pages */
- error = sep_free_dma_table_data_handler(sep);
- break;
- case SEP_IOCSETFLOWID:
- /* set flow id */
- error = sep_set_flow_id_handler(sep, (unsigned long)arg);
- break;
- case SEP_IOCADDFLOWTABLE:
- /* add tables to the dynamic flow */
- error = sep_add_flow_tables_handler(sep, arg);
- break;
- case SEP_IOCADDFLOWMESSAGE:
- /* add message of add tables to flow */
- error = sep_add_flow_tables_message_handler(sep, arg);
- break;
- case SEP_IOCSEPSTART:
- /* start command to sep */
- error = sep_start_handler(sep);
- break;
- case SEP_IOCSEPINIT:
- /* init command to sep */
- error = sep_init_handler(sep, arg);
- break;
- case SEP_IOCGETSTATICPOOLADDR:
- /* get the physical and virtual addresses of the static pool */
- error = sep_get_static_pool_addr_handler(sep, arg);
- break;
- case SEP_IOCENDTRANSACTION:
- error = sep_end_transaction_handler(sep, arg);
- break;
- case SEP_IOCREALLOCCACHERES:
- error = sep_realloc_cache_resident_handler(sep, arg);
- break;
- case SEP_IOCGETMAPPEDADDROFFSET:
- error = sep_get_physical_mapped_offset_handler(sep, arg);
- break;
- case SEP_IOCGETIME:
- error = sep_get_time_handler(sep, arg);
- break;
- default:
- error = -ENOTTY;
- break;
- }
- dbg("SEP Driver:<-------- ioctl end\n");
- return error;
-}
-
-
-
-#if !SEP_DRIVER_POLLING_MODE
-
-/* handler for flow done interrupt */
-
-static void sep_flow_done_handler(struct work_struct *work)
-{
- struct sep_flow_context_t *flow_data_ptr;
-
- /* obtain the mutex */
- mutex_lock(&sep_mutex);
-
- /* get the pointer to context */
- flow_data_ptr = (struct sep_flow_context_t *) work;
-
- /* free all the current input tables in sep */
- sep_deallocated_flow_tables(&flow_data_ptr->input_tables_in_process);
-
- /* free all the current tables output tables in SEP (if needed) */
- if (flow_data_ptr->output_tables_in_process.physical_address != 0xffffffff)
- sep_deallocated_flow_tables(&flow_data_ptr->output_tables_in_process);
-
- /* check if we have additional tables to be sent to SEP only input
- flag may be checked */
- if (flow_data_ptr->input_tables_flag) {
- /* copy the message to the shared RAM and signal SEP */
- memcpy((void *) flow_data_ptr->message, (void *) sep->shared_addr, flow_data_ptr->message_size_in_bytes);
-
- sep_write_reg(sep, HW_HOST_HOST_SEP_GPR2_REG_ADDR, 0x2);
- }
- mutex_unlock(&sep_mutex);
-}
-/*
- interrupt handler function
-*/
-static irqreturn_t sep_inthandler(int irq, void *dev_id)
-{
- irqreturn_t int_error;
- unsigned long reg_val;
- unsigned long flow_id;
- struct sep_flow_context_t *flow_context_ptr;
- struct sep_device *sep = dev_id;
-
- int_error = IRQ_HANDLED;
-
- /* read the IRR register to check if this is SEP interrupt */
- reg_val = sep_read_reg(sep, HW_HOST_IRR_REG_ADDR);
- edbg("SEP Interrupt - reg is %08lx\n", reg_val);
-
- /* check if this is the flow interrupt */
- if (0 /*reg_val & (0x1 << 11) */ ) {
- /* read GPRO to find out the which flow is done */
- flow_id = sep_read_reg(sep, HW_HOST_IRR_REG_ADDR);
-
- /* find the contex of the flow */
- flow_context_ptr = sep_find_flow_context(sep, flow_id >> 28);
- if (flow_context_ptr == NULL)
- goto end_function_with_error;
-
- /* queue the work */
- INIT_WORK(&flow_context_ptr->flow_wq, sep_flow_done_handler);
- queue_work(sep->flow_wq, &flow_context_ptr->flow_wq);
-
- } else {
- /* check if this is reply interrupt from SEP */
- if (reg_val & (0x1 << 13)) {
- /* update the counter of reply messages */
- sep->reply_ct++;
- /* wake up the waiting process */
- wake_up(&sep_event);
- } else {
- int_error = IRQ_NONE;
- goto end_function;
- }
- }
-end_function_with_error:
- /* clear the interrupt */
- sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, reg_val);
-end_function:
- return int_error;
-}
-
-#endif
-
-
-
-#if 0
-
-static void sep_wait_busy(struct sep_device *sep)
-{
- u32 reg;
-
- do {
- reg = sep_read_reg(sep, HW_HOST_SEP_BUSY_REG_ADDR);
- } while (reg);
-}
-
-/*
- PATCH for configuring the DMA to single burst instead of multi-burst
-*/
-static void sep_configure_dma_burst(struct sep_device *sep)
-{
-#define HW_AHB_RD_WR_BURSTS_REG_ADDR 0x0E10UL
-
- dbg("SEP Driver:<-------- sep_configure_dma_burst start \n");
-
- /* request access to registers from SEP */
- sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x2);
-
- dbg("SEP Driver:<-------- sep_configure_dma_burst finished request access to registers from SEP (write reg) \n");
-
- sep_wait_busy(sep);
-
- dbg("SEP Driver:<-------- sep_configure_dma_burst finished request access to registers from SEP (while(revVal) wait loop) \n");
-
- /* set the DMA burst register to single burst */
- sep_write_reg(sep, HW_AHB_RD_WR_BURSTS_REG_ADDR, 0x0UL);
-
- /* release the sep busy */
- sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x0UL);
- sep_wait_busy(sep);
-
- dbg("SEP Driver:<-------- sep_configure_dma_burst done \n");
-
-}
-
-#endif
-
-/*
- Function that is activated on the successful probe of the SEP device
-*/
-static int __devinit sep_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
- int error = 0;
- struct sep_device *sep;
- int counter;
- int size; /* size of memory for allocation */
-
- edbg("Sep pci probe starting\n");
- if (sep_dev != NULL) {
- dev_warn(&pdev->dev, "only one SEP supported.\n");
- return -EBUSY;
- }
-
- /* enable the device */
- error = pci_enable_device(pdev);
- if (error) {
- edbg("error enabling pci device\n");
- goto end_function;
- }
-
- /* set the pci dev pointer */
- sep_dev = &sep_instance;
- sep = &sep_instance;
-
- edbg("sep->shared_addr = %p\n", sep->shared_addr);
- /* transaction counter that coordinates the transactions between SEP
- and HOST */
- sep->send_ct = 0;
- /* counter for the messages from sep */
- sep->reply_ct = 0;
- /* counter for the number of bytes allocated in the pool
- for the current transaction */
- sep->data_pool_bytes_allocated = 0;
-
- /* calculate the total size for allocation */
- size = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES +
- SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES + SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES + SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES;
-
- /* allocate the shared area */
- if (sep_map_and_alloc_shared_area(sep, size)) {
- error = -ENOMEM;
- /* allocation failed */
- goto end_function_error;
- }
- /* now set the memory regions */
-#if (SEP_DRIVER_RECONFIG_MESSAGE_AREA == 1)
- /* Note: this test section will need moving before it could ever
- work as the registers are not yet mapped ! */
- /* send the new SHARED MESSAGE AREA to the SEP */
- sep_write_reg(sep, HW_HOST_HOST_SEP_GPR1_REG_ADDR, sep->shared_bus);
-
- /* poll for SEP response */
- retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
- while (retval != 0xffffffff && retval != sep->shared_bus)
- retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
-
- /* check the return value (register) */
- if (retval != sep->shared_bus) {
- error = -ENOMEM;
- goto end_function_deallocate_sep_shared_area;
- }
-#endif
- /* init the flow contextes */
- for (counter = 0; counter < SEP_DRIVER_NUM_FLOWS; counter++)
- sep->flows[counter].flow_id = SEP_FREE_FLOW_ID;
-
- sep->flow_wq = create_singlethread_workqueue("sepflowwq");
- if (sep->flow_wq == NULL) {
- error = -ENOMEM;
- edbg("sep_driver:flow queue creation failed\n");
- goto end_function_deallocate_sep_shared_area;
- }
- edbg("SEP Driver: create flow workqueue \n");
- sep->pdev = pci_dev_get(pdev);
-
- sep->reg_addr = pci_ioremap_bar(pdev, 0);
- if (!sep->reg_addr) {
- edbg("sep: ioremap of registers failed.\n");
- goto end_function_deallocate_sep_shared_area;
- }
- edbg("SEP Driver:reg_addr is %p\n", sep->reg_addr);
-
- /* load the rom code */
- sep_load_rom_code(sep);
-
- /* set up system base address and shared memory location */
- sep->rar_addr = dma_alloc_coherent(&sep->pdev->dev,
- 2 * SEP_RAR_IO_MEM_REGION_SIZE,
- &sep->rar_bus, GFP_KERNEL);
-
- if (!sep->rar_addr) {
- edbg("SEP Driver:can't allocate rar\n");
- goto end_function_uniomap;
- }
-
-
- edbg("SEP Driver:rar_bus is %08llx\n", (unsigned long long)sep->rar_bus);
- edbg("SEP Driver:rar_virtual is %p\n", sep->rar_addr);
-
-#if !SEP_DRIVER_POLLING_MODE
-
- edbg("SEP Driver: about to write IMR and ICR REG_ADDR\n");
-
- /* clear ICR register */
- sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
-
- /* set the IMR register - open only GPR 2 */
- sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
-
- edbg("SEP Driver: about to call request_irq\n");
- /* get the interrupt line */
- error = request_irq(pdev->irq, sep_inthandler, IRQF_SHARED, "sep_driver", sep);
- if (error)
- goto end_function_free_res;
- return 0;
- edbg("SEP Driver: about to write IMR REG_ADDR");
-
- /* set the IMR register - open only GPR 2 */
- sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
-
-end_function_free_res:
- dma_free_coherent(&sep->pdev->dev, 2 * SEP_RAR_IO_MEM_REGION_SIZE,
- sep->rar_addr, sep->rar_bus);
-#endif /* SEP_DRIVER_POLLING_MODE */
-end_function_uniomap:
- iounmap(sep->reg_addr);
-end_function_deallocate_sep_shared_area:
- /* de-allocate shared area */
- sep_unmap_and_free_shared_area(sep, size);
-end_function_error:
- sep_dev = NULL;
-end_function:
- return error;
-}
-
-static const struct pci_device_id sep_pci_id_tbl[] = {
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x080c)},
- {0}
-};
-
-MODULE_DEVICE_TABLE(pci, sep_pci_id_tbl);
-
-/* field for registering driver to PCI device */
-static struct pci_driver sep_pci_driver = {
- .name = "sep_sec_driver",
- .id_table = sep_pci_id_tbl,
- .probe = sep_probe
- /* FIXME: remove handler */
-};
-
-/* major and minor device numbers */
-static dev_t sep_devno;
-
-/* the files operations structure of the driver */
-static struct file_operations sep_file_operations = {
- .owner = THIS_MODULE,
- .unlocked_ioctl = sep_ioctl,
- .poll = sep_poll,
- .open = sep_open,
- .release = sep_release,
- .mmap = sep_mmap,
-};
-
-
-/* cdev struct of the driver */
-static struct cdev sep_cdev;
-
-/*
- this function registers the driver to the file system
-*/
-static int sep_register_driver_to_fs(void)
-{
- int ret_val = alloc_chrdev_region(&sep_devno, 0, 1, "sep_sec_driver");
- if (ret_val) {
- edbg("sep: major number allocation failed, retval is %d\n",
- ret_val);
- return ret_val;
- }
- /* init cdev */
- cdev_init(&sep_cdev, &sep_file_operations);
- sep_cdev.owner = THIS_MODULE;
-
- /* register the driver with the kernel */
- ret_val = cdev_add(&sep_cdev, sep_devno, 1);
- if (ret_val) {
- edbg("sep_driver:cdev_add failed, retval is %d\n", ret_val);
- /* unregister dev numbers */
- unregister_chrdev_region(sep_devno, 1);
- }
- return ret_val;
-}
-
-
-/*--------------------------------------------------------------
- init function
-----------------------------------------------------------------*/
-static int __init sep_init(void)
-{
- int ret_val = 0;
- dbg("SEP Driver:-------->Init start\n");
- /* FIXME: Probe can occur before we are ready to survive a probe */
- ret_val = pci_register_driver(&sep_pci_driver);
- if (ret_val) {
- edbg("sep_driver:sep_driver_to_device failed, ret_val is %d\n", ret_val);
- goto end_function_unregister_from_fs;
- }
- /* register driver to fs */
- ret_val = sep_register_driver_to_fs();
- if (ret_val)
- goto end_function_unregister_pci;
- goto end_function;
-end_function_unregister_pci:
- pci_unregister_driver(&sep_pci_driver);
-end_function_unregister_from_fs:
- /* unregister from fs */
- cdev_del(&sep_cdev);
- /* unregister dev numbers */
- unregister_chrdev_region(sep_devno, 1);
-end_function:
- dbg("SEP Driver:<-------- Init end\n");
- return ret_val;
-}
-
-
-/*-------------------------------------------------------------
- exit function
---------------------------------------------------------------*/
-static void __exit sep_exit(void)
-{
- int size;
-
- dbg("SEP Driver:--------> Exit start\n");
-
- /* unregister from fs */
- cdev_del(&sep_cdev);
- /* unregister dev numbers */
- unregister_chrdev_region(sep_devno, 1);
- /* calculate the total size for de-allocation */
- size = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES +
- SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES + SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES + SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES;
- /* FIXME: We need to do this in the unload for the device */
- /* free shared area */
- if (sep_dev) {
- sep_unmap_and_free_shared_area(sep_dev, size);
- edbg("SEP Driver: free pages SEP SHARED AREA \n");
- iounmap((void *) sep_dev->reg_addr);
- edbg("SEP Driver: iounmap \n");
- }
- edbg("SEP Driver: release_mem_region \n");
- dbg("SEP Driver:<-------- Exit end\n");
-}
-
-
-module_init(sep_init);
-module_exit(sep_exit);
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/sep/sep_driver_api.h b/drivers/staging/sep/sep_driver_api.h
deleted file mode 100644
index 7ef16da..0000000
--- a/drivers/staging/sep/sep_driver_api.h
+++ /dev/null
@@ -1,425 +0,0 @@
-/*
- *
- * sep_driver_api.h - Security Processor Driver api definitions
- *
- * Copyright(c) 2009 Intel Corporation. All rights reserved.
- * Copyright(c) 2009 Discretix. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59
- * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- * CONTACTS:
- *
- * Mark Allyn mark.a.allyn@intel.com
- *
- * CHANGES:
- *
- * 2009.06.26 Initial publish
- *
- */
-
-#ifndef __SEP_DRIVER_API_H__
-#define __SEP_DRIVER_API_H__
-
-
-
-/*----------------------------------------------------------------
- IOCTL command defines
- -----------------------------------------------------------------*/
-
-/* magic number 1 of the sep IOCTL command */
-#define SEP_IOC_MAGIC_NUMBER 's'
-
-/* sends interrupt to sep that message is ready */
-#define SEP_IOCSENDSEPCOMMAND _IO(SEP_IOC_MAGIC_NUMBER , 0)
-
-/* sends interrupt to sep that message is ready */
-#define SEP_IOCSENDSEPRPLYCOMMAND _IO(SEP_IOC_MAGIC_NUMBER , 1)
-
-/* allocate memory in data pool */
-#define SEP_IOCALLOCDATAPOLL _IO(SEP_IOC_MAGIC_NUMBER , 2)
-
-/* write to pre-allocated memory in data pool */
-#define SEP_IOCWRITEDATAPOLL _IO(SEP_IOC_MAGIC_NUMBER , 3)
-
-/* read from pre-allocated memory in data pool */
-#define SEP_IOCREADDATAPOLL _IO(SEP_IOC_MAGIC_NUMBER , 4)
-
-/* create sym dma lli tables */
-#define SEP_IOCCREATESYMDMATABLE _IO(SEP_IOC_MAGIC_NUMBER , 5)
-
-/* create flow dma lli tables */
-#define SEP_IOCCREATEFLOWDMATABLE _IO(SEP_IOC_MAGIC_NUMBER , 6)
-
-/* free dynamic data aalocated during table creation */
-#define SEP_IOCFREEDMATABLEDATA _IO(SEP_IOC_MAGIC_NUMBER , 7)
-
-/* get the static pool area addresses (physical and virtual) */
-#define SEP_IOCGETSTATICPOOLADDR _IO(SEP_IOC_MAGIC_NUMBER , 8)
-
-/* set flow id command */
-#define SEP_IOCSETFLOWID _IO(SEP_IOC_MAGIC_NUMBER , 9)
-
-/* add tables to the dynamic flow */
-#define SEP_IOCADDFLOWTABLE _IO(SEP_IOC_MAGIC_NUMBER , 10)
-
-/* add flow add tables message */
-#define SEP_IOCADDFLOWMESSAGE _IO(SEP_IOC_MAGIC_NUMBER , 11)
-
-/* start sep command */
-#define SEP_IOCSEPSTART _IO(SEP_IOC_MAGIC_NUMBER , 12)
-
-/* init sep command */
-#define SEP_IOCSEPINIT _IO(SEP_IOC_MAGIC_NUMBER , 13)
-
-/* end transaction command */
-#define SEP_IOCENDTRANSACTION _IO(SEP_IOC_MAGIC_NUMBER , 15)
-
-/* reallocate cache and resident */
-#define SEP_IOCREALLOCCACHERES _IO(SEP_IOC_MAGIC_NUMBER , 16)
-
-/* get the offset of the address starting from the beginnnig of the map area */
-#define SEP_IOCGETMAPPEDADDROFFSET _IO(SEP_IOC_MAGIC_NUMBER , 17)
-
-/* get time address and value */
-#define SEP_IOCGETIME _IO(SEP_IOC_MAGIC_NUMBER , 19)
-
-/*-------------------------------------------
- TYPEDEFS
-----------------------------------------------*/
-
-/*
- init command struct
-*/
-struct sep_driver_init_t {
- /* start of the 1G of the host memory address that SEP can access */
- unsigned long message_addr;
-
- /* start address of resident */
- unsigned long message_size_in_words;
-
-};
-
-
-/*
- realloc cache resident command
-*/
-struct sep_driver_realloc_cache_resident_t {
- /* new cache address */
- u64 new_cache_addr;
- /* new resident address */
- u64 new_resident_addr;
- /* new resident address */
- u64 new_shared_area_addr;
- /* new base address */
- u64 new_base_addr;
-};
-
-struct sep_driver_alloc_t {
- /* virtual address of allocated space */
- unsigned long offset;
-
- /* physical address of allocated space */
- unsigned long phys_address;
-
- /* number of bytes to allocate */
- unsigned long num_bytes;
-};
-
-/*
- */
-struct sep_driver_write_t {
- /* application space address */
- unsigned long app_address;
-
- /* address of the data pool */
- unsigned long datapool_address;
-
- /* number of bytes to write */
- unsigned long num_bytes;
-};
-
-/*
- */
-struct sep_driver_read_t {
- /* application space address */
- unsigned long app_address;
-
- /* address of the data pool */
- unsigned long datapool_address;
-
- /* number of bytes to read */
- unsigned long num_bytes;
-};
-
-/*
-*/
-struct sep_driver_build_sync_table_t {
- /* address value of the data in */
- unsigned long app_in_address;
-
- /* size of data in */
- unsigned long data_in_size;
-
- /* address of the data out */
- unsigned long app_out_address;
-
- /* the size of the block of the operation - if needed,
- every table will be modulo this parameter */
- unsigned long block_size;
-
- /* the physical address of the first input DMA table */
- unsigned long in_table_address;
-
- /* number of entries in the first input DMA table */
- unsigned long in_table_num_entries;
-
- /* the physical address of the first output DMA table */
- unsigned long out_table_address;
-
- /* number of entries in the first output DMA table */
- unsigned long out_table_num_entries;
-
- /* data in the first input table */
- unsigned long table_data_size;
-
- /* distinct user/kernel layout */
- bool isKernelVirtualAddress;
-
-};
-
-/*
-*/
-struct sep_driver_build_flow_table_t {
- /* flow type */
- unsigned long flow_type;
-
- /* flag for input output */
- unsigned long input_output_flag;
-
- /* address value of the data in */
- unsigned long virt_buff_data_addr;
-
- /* size of data in */
- unsigned long num_virtual_buffers;
-
- /* the physical address of the first input DMA table */
- unsigned long first_table_addr;
-
- /* number of entries in the first input DMA table */
- unsigned long first_table_num_entries;
-
- /* data in the first input table */
- unsigned long first_table_data_size;
-
- /* distinct user/kernel layout */
- bool isKernelVirtualAddress;
-};
-
-
-struct sep_driver_add_flow_table_t {
- /* flow id */
- unsigned long flow_id;
-
- /* flag for input output */
- unsigned long inputOutputFlag;
-
- /* address value of the data in */
- unsigned long virt_buff_data_addr;
-
- /* size of data in */
- unsigned long num_virtual_buffers;
-
- /* address of the first table */
- unsigned long first_table_addr;
-
- /* number of entries in the first table */
- unsigned long first_table_num_entries;
-
- /* data size of the first table */
- unsigned long first_table_data_size;
-
- /* distinct user/kernel layout */
- bool isKernelVirtualAddress;
-
-};
-
-/*
- command struct for set flow id
-*/
-struct sep_driver_set_flow_id_t {
- /* flow id to set */
- unsigned long flow_id;
-};
-
-
-/* command struct for add tables message */
-struct sep_driver_add_message_t {
- /* flow id to set */
- unsigned long flow_id;
-
- /* message size in bytes */
- unsigned long message_size_in_bytes;
-
- /* address of the message */
- unsigned long message_address;
-};
-
-/* command struct for static pool addresses */
-struct sep_driver_static_pool_addr_t {
- /* physical address of the static pool */
- unsigned long physical_static_address;
-
- /* virtual address of the static pool */
- unsigned long virtual_static_address;
-};
-
-/* command struct for getiing offset of the physical address from
- the start of the mapped area */
-struct sep_driver_get_mapped_offset_t {
- /* physical address of the static pool */
- unsigned long physical_address;
-
- /* virtual address of the static pool */
- unsigned long offset;
-};
-
-/* command struct for getting time value and address */
-struct sep_driver_get_time_t {
- /* physical address of stored time */
- unsigned long time_physical_address;
-
- /* value of the stored time */
- unsigned long time_value;
-};
-
-
-/*
- structure that represent one entry in the DMA LLI table
-*/
-struct sep_lli_entry_t {
- /* physical address */
- unsigned long physical_address;
-
- /* block size */
- unsigned long block_size;
-};
-
-/*
- structure that reperesents data needed for lli table construction
-*/
-struct sep_lli_prepare_table_data_t {
- /* pointer to the memory where the first lli entry to be built */
- struct sep_lli_entry_t *lli_entry_ptr;
-
- /* pointer to the array of lli entries from which the table is to be built */
- struct sep_lli_entry_t *lli_array_ptr;
-
- /* number of elements in lli array */
- int lli_array_size;
-
- /* number of entries in the created table */
- int num_table_entries;
-
- /* number of array entries processed during table creation */
- int num_array_entries_processed;
-
- /* the totatl data size in the created table */
- int lli_table_total_data_size;
-};
-
-/*
- structure that represent tone table - it is not used in code, jkust
- to show what table looks like
-*/
-struct sep_lli_table_t {
- /* number of pages mapped in this tables. If 0 - means that the table
- is not defined (used as a valid flag) */
- unsigned long num_pages;
- /*
- pointer to array of page pointers that represent the mapping of the
- virtual buffer defined by the table to the physical memory. If this
- pointer is NULL, it means that the table is not defined
- (used as a valid flag)
- */
- struct page **table_page_array_ptr;
-
- /* maximum flow entries in table */
- struct sep_lli_entry_t lli_entries[SEP_DRIVER_MAX_FLOW_NUM_ENTRIES_IN_TABLE];
-};
-
-
-/*
- structure for keeping the mapping of the virtual buffer into physical pages
-*/
-struct sep_flow_buffer_data {
- /* pointer to the array of page structs pointers to the pages of the
- virtual buffer */
- struct page **page_array_ptr;
-
- /* number of pages taken by the virtual buffer */
- unsigned long num_pages;
-
- /* this flag signals if this page_array is the last one among many that were
- sent in one setting to SEP */
- unsigned long last_page_array_flag;
-};
-
-/*
- struct that keeps all the data for one flow
-*/
-struct sep_flow_context_t {
- /*
- work struct for handling the flow done interrupt in the workqueue
- this structure must be in the first place, since it will be used
- forcasting to the containing flow context
- */
- struct work_struct flow_wq;
-
- /* flow id */
- unsigned long flow_id;
-
- /* additional input tables exists */
- unsigned long input_tables_flag;
-
- /* additional output tables exists */
- unsigned long output_tables_flag;
-
- /* data of the first input file */
- struct sep_lli_entry_t first_input_table;
-
- /* data of the first output table */
- struct sep_lli_entry_t first_output_table;
-
- /* last input table data */
- struct sep_lli_entry_t last_input_table;
-
- /* last output table data */
- struct sep_lli_entry_t last_output_table;
-
- /* first list of table */
- struct sep_lli_entry_t input_tables_in_process;
-
- /* output table in process (in sep) */
- struct sep_lli_entry_t output_tables_in_process;
-
- /* size of messages in bytes */
- unsigned long message_size_in_bytes;
-
- /* message */
- unsigned char message[SEP_MAX_ADD_MESSAGE_LENGTH_IN_BYTES];
-};
-
-
-#endif
diff --git a/drivers/staging/sep/sep_driver_config.h b/drivers/staging/sep/sep_driver_config.h
deleted file mode 100644
index 6008fe5..0000000
--- a/drivers/staging/sep/sep_driver_config.h
+++ /dev/null
@@ -1,225 +0,0 @@
-/*
- *
- * sep_driver_config.h - Security Processor Driver configuration
- *
- * Copyright(c) 2009 Intel Corporation. All rights reserved.
- * Copyright(c) 2009 Discretix. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59
- * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- * CONTACTS:
- *
- * Mark Allyn mark.a.allyn@intel.com
- *
- * CHANGES:
- *
- * 2009.06.26 Initial publish
- *
- */
-
-#ifndef __SEP_DRIVER_CONFIG_H__
-#define __SEP_DRIVER_CONFIG_H__
-
-
-/*--------------------------------------
- DRIVER CONFIGURATION FLAGS
- -------------------------------------*/
-
-/* if flag is on , then the driver is running in polling and
- not interrupt mode */
-#define SEP_DRIVER_POLLING_MODE 1
-
-/* flag which defines if the shared area address should be
- reconfiged (send to SEP anew) during init of the driver */
-#define SEP_DRIVER_RECONFIG_MESSAGE_AREA 0
-
-/* the mode for running on the ARM1172 Evaluation platform (flag is 1) */
-#define SEP_DRIVER_ARM_DEBUG_MODE 0
-
-/*-------------------------------------------
- INTERNAL DATA CONFIGURATION
- -------------------------------------------*/
-
-/* flag for the input array */
-#define SEP_DRIVER_IN_FLAG 0
-
-/* flag for output array */
-#define SEP_DRIVER_OUT_FLAG 1
-
-/* maximum number of entries in one LLI tables */
-#define SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP 8
-
-
-/*--------------------------------------------------------
- SHARED AREA memory total size is 36K
- it is divided is following:
-
- SHARED_MESSAGE_AREA 8K }
- }
- STATIC_POOL_AREA 4K } MAPPED AREA ( 24 K)
- }
- DATA_POOL_AREA 12K }
-
- SYNCHRONIC_DMA_TABLES_AREA 5K
-
- FLOW_DMA_TABLES_AREA 4K
-
- SYSTEM_MEMORY_AREA 3k
-
- SYSTEM_MEMORY total size is 3k
- it is divided as following:
-
- TIME_MEMORY_AREA 8B
------------------------------------------------------------*/
-
-
-
-/*
- the maximum length of the message - the rest of the message shared
- area will be dedicated to the dma lli tables
-*/
-#define SEP_DRIVER_MAX_MESSAGE_SIZE_IN_BYTES (8 * 1024)
-
-/* the size of the message shared area in pages */
-#define SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES (8 * 1024)
-
-/* the size of the data pool static area in pages */
-#define SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES (4 * 1024)
-
-/* the size of the data pool shared area size in pages */
-#define SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES (12 * 1024)
-
-/* the size of the message shared area in pages */
-#define SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_SIZE_IN_BYTES (1024 * 5)
-
-
-/* the size of the data pool shared area size in pages */
-#define SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES (1024 * 4)
-
-/* system data (time, caller id etc') pool */
-#define SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES 100
-
-
-/* area size that is mapped - we map the MESSAGE AREA, STATIC POOL and
- DATA POOL areas. area must be module 4k */
-#define SEP_DRIVER_MMMAP_AREA_SIZE (1024 * 24)
-
-
-/*-----------------------------------------------
- offsets of the areas starting from the shared area start address
-*/
-
-/* message area offset */
-#define SEP_DRIVER_MESSAGE_AREA_OFFSET_IN_BYTES 0
-
-/* static pool area offset */
-#define SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES \
- (SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES)
-
-/* data pool area offset */
-#define SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES \
- (SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES + \
- SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES)
-
-/* synhronic dma tables area offset */
-#define SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES \
- (SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES + \
- SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES)
-
-/* sep driver flow dma tables area offset */
-#define SEP_DRIVER_FLOW_DMA_TABLES_AREA_OFFSET_IN_BYTES \
- (SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES + \
- SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_SIZE_IN_BYTES)
-
-/* system memory offset in bytes */
-#define SEP_DRIVER_SYSTEM_DATA_MEMORY_OFFSET_IN_BYTES \
- (SEP_DRIVER_FLOW_DMA_TABLES_AREA_OFFSET_IN_BYTES + \
- SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES)
-
-/* offset of the time area */
-#define SEP_DRIVER_SYSTEM_TIME_MEMORY_OFFSET_IN_BYTES \
- (SEP_DRIVER_SYSTEM_DATA_MEMORY_OFFSET_IN_BYTES)
-
-
-
-/* start physical address of the SEP registers memory in HOST */
-#define SEP_IO_MEM_REGION_START_ADDRESS 0x80000000
-
-/* size of the SEP registers memory region in HOST (for now 100 registers) */
-#define SEP_IO_MEM_REGION_SIZE (2 * 0x100000)
-
-/* define the number of IRQ for SEP interrupts */
-#define SEP_DIRVER_IRQ_NUM 1
-
-/* maximum number of add buffers */
-#define SEP_MAX_NUM_ADD_BUFFERS 100
-
-/* number of flows */
-#define SEP_DRIVER_NUM_FLOWS 4
-
-/* maximum number of entries in flow table */
-#define SEP_DRIVER_MAX_FLOW_NUM_ENTRIES_IN_TABLE 25
-
-/* offset of the num entries in the block length entry of the LLI */
-#define SEP_NUM_ENTRIES_OFFSET_IN_BITS 24
-
-/* offset of the interrupt flag in the block length entry of the LLI */
-#define SEP_INT_FLAG_OFFSET_IN_BITS 31
-
-/* mask for extracting data size from LLI */
-#define SEP_TABLE_DATA_SIZE_MASK 0xFFFFFF
-
-/* mask for entries after being shifted left */
-#define SEP_NUM_ENTRIES_MASK 0x7F
-
-/* default flow id */
-#define SEP_FREE_FLOW_ID 0xFFFFFFFF
-
-/* temp flow id used during cretiong of new flow until receiving
- real flow id from sep */
-#define SEP_TEMP_FLOW_ID (SEP_DRIVER_NUM_FLOWS + 1)
-
-/* maximum add buffers message length in bytes */
-#define SEP_MAX_ADD_MESSAGE_LENGTH_IN_BYTES (7 * 4)
-
-/* maximum number of concurrent virtual buffers */
-#define SEP_MAX_VIRT_BUFFERS_CONCURRENT 100
-
-/* the token that defines the start of time address */
-#define SEP_TIME_VAL_TOKEN 0x12345678
-
-/* DEBUG LEVEL MASKS */
-#define SEP_DEBUG_LEVEL_BASIC 0x1
-
-#define SEP_DEBUG_LEVEL_EXTENDED 0x4
-
-
-/* Debug helpers */
-
-#define dbg(fmt, args...) \
-do {\
- if (debug & SEP_DEBUG_LEVEL_BASIC) \
- printk(KERN_DEBUG fmt, ##args); \
-} while(0);
-
-#define edbg(fmt, args...) \
-do { \
- if (debug & SEP_DEBUG_LEVEL_EXTENDED) \
- printk(KERN_DEBUG fmt, ##args); \
-} while(0);
-
-
-
-#endif
diff --git a/drivers/staging/sep/sep_driver_hw_defs.h b/drivers/staging/sep/sep_driver_hw_defs.h
deleted file mode 100644
index ea6abd8..0000000
--- a/drivers/staging/sep/sep_driver_hw_defs.h
+++ /dev/null
@@ -1,232 +0,0 @@
-/*
- *
- * sep_driver_hw_defs.h - Security Processor Driver hardware definitions
- *
- * Copyright(c) 2009 Intel Corporation. All rights reserved.
- * Copyright(c) 2009 Discretix. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59
- * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- * CONTACTS:
- *
- * Mark Allyn mark.a.allyn@intel.com
- *
- * CHANGES:
- *
- * 2009.06.26 Initial publish
- *
- */
-
-#ifndef SEP_DRIVER_HW_DEFS__H
-#define SEP_DRIVER_HW_DEFS__H
-
-/*--------------------------------------------------------------------------*/
-/* Abstract: HW Registers Defines. */
-/* */
-/* Note: This file was automatically created !!! */
-/* DO NOT EDIT THIS FILE !!! */
-/*--------------------------------------------------------------------------*/
-
-
-/* cf registers */
-#define HW_R0B_ADDR_0_REG_ADDR 0x0000UL
-#define HW_R0B_ADDR_1_REG_ADDR 0x0004UL
-#define HW_R0B_ADDR_2_REG_ADDR 0x0008UL
-#define HW_R0B_ADDR_3_REG_ADDR 0x000cUL
-#define HW_R0B_ADDR_4_REG_ADDR 0x0010UL
-#define HW_R0B_ADDR_5_REG_ADDR 0x0014UL
-#define HW_R0B_ADDR_6_REG_ADDR 0x0018UL
-#define HW_R0B_ADDR_7_REG_ADDR 0x001cUL
-#define HW_R0B_ADDR_8_REG_ADDR 0x0020UL
-#define HW_R2B_ADDR_0_REG_ADDR 0x0080UL
-#define HW_R2B_ADDR_1_REG_ADDR 0x0084UL
-#define HW_R2B_ADDR_2_REG_ADDR 0x0088UL
-#define HW_R2B_ADDR_3_REG_ADDR 0x008cUL
-#define HW_R2B_ADDR_4_REG_ADDR 0x0090UL
-#define HW_R2B_ADDR_5_REG_ADDR 0x0094UL
-#define HW_R2B_ADDR_6_REG_ADDR 0x0098UL
-#define HW_R2B_ADDR_7_REG_ADDR 0x009cUL
-#define HW_R2B_ADDR_8_REG_ADDR 0x00a0UL
-#define HW_R3B_REG_ADDR 0x00C0UL
-#define HW_R4B_REG_ADDR 0x0100UL
-#define HW_CSA_ADDR_0_REG_ADDR 0x0140UL
-#define HW_CSA_ADDR_1_REG_ADDR 0x0144UL
-#define HW_CSA_ADDR_2_REG_ADDR 0x0148UL
-#define HW_CSA_ADDR_3_REG_ADDR 0x014cUL
-#define HW_CSA_ADDR_4_REG_ADDR 0x0150UL
-#define HW_CSA_ADDR_5_REG_ADDR 0x0154UL
-#define HW_CSA_ADDR_6_REG_ADDR 0x0158UL
-#define HW_CSA_ADDR_7_REG_ADDR 0x015cUL
-#define HW_CSA_ADDR_8_REG_ADDR 0x0160UL
-#define HW_CSA_REG_ADDR 0x0140UL
-#define HW_SINB_REG_ADDR 0x0180UL
-#define HW_SOUTB_REG_ADDR 0x0184UL
-#define HW_PKI_CONTROL_REG_ADDR 0x01C0UL
-#define HW_PKI_STATUS_REG_ADDR 0x01C4UL
-#define HW_PKI_BUSY_REG_ADDR 0x01C8UL
-#define HW_PKI_A_1025_REG_ADDR 0x01CCUL
-#define HW_PKI_SDMA_CTL_REG_ADDR 0x01D0UL
-#define HW_PKI_SDMA_OFFSET_REG_ADDR 0x01D4UL
-#define HW_PKI_SDMA_POINTERS_REG_ADDR 0x01D8UL
-#define HW_PKI_SDMA_DLENG_REG_ADDR 0x01DCUL
-#define HW_PKI_SDMA_EXP_POINTERS_REG_ADDR 0x01E0UL
-#define HW_PKI_SDMA_RES_POINTERS_REG_ADDR 0x01E4UL
-#define HW_PKI_CLR_REG_ADDR 0x01E8UL
-#define HW_PKI_SDMA_BUSY_REG_ADDR 0x01E8UL
-#define HW_PKI_SDMA_FIRST_EXP_N_REG_ADDR 0x01ECUL
-#define HW_PKI_SDMA_MUL_BY1_REG_ADDR 0x01F0UL
-#define HW_PKI_SDMA_RMUL_SEL_REG_ADDR 0x01F4UL
-#define HW_DES_KEY_0_REG_ADDR 0x0208UL
-#define HW_DES_KEY_1_REG_ADDR 0x020CUL
-#define HW_DES_KEY_2_REG_ADDR 0x0210UL
-#define HW_DES_KEY_3_REG_ADDR 0x0214UL
-#define HW_DES_KEY_4_REG_ADDR 0x0218UL
-#define HW_DES_KEY_5_REG_ADDR 0x021CUL
-#define HW_DES_CONTROL_0_REG_ADDR 0x0220UL
-#define HW_DES_CONTROL_1_REG_ADDR 0x0224UL
-#define HW_DES_IV_0_REG_ADDR 0x0228UL
-#define HW_DES_IV_1_REG_ADDR 0x022CUL
-#define HW_AES_KEY_0_ADDR_0_REG_ADDR 0x0400UL
-#define HW_AES_KEY_0_ADDR_1_REG_ADDR 0x0404UL
-#define HW_AES_KEY_0_ADDR_2_REG_ADDR 0x0408UL
-#define HW_AES_KEY_0_ADDR_3_REG_ADDR 0x040cUL
-#define HW_AES_KEY_0_ADDR_4_REG_ADDR 0x0410UL
-#define HW_AES_KEY_0_ADDR_5_REG_ADDR 0x0414UL
-#define HW_AES_KEY_0_ADDR_6_REG_ADDR 0x0418UL
-#define HW_AES_KEY_0_ADDR_7_REG_ADDR 0x041cUL
-#define HW_AES_KEY_0_REG_ADDR 0x0400UL
-#define HW_AES_IV_0_ADDR_0_REG_ADDR 0x0440UL
-#define HW_AES_IV_0_ADDR_1_REG_ADDR 0x0444UL
-#define HW_AES_IV_0_ADDR_2_REG_ADDR 0x0448UL
-#define HW_AES_IV_0_ADDR_3_REG_ADDR 0x044cUL
-#define HW_AES_IV_0_REG_ADDR 0x0440UL
-#define HW_AES_CTR1_ADDR_0_REG_ADDR 0x0460UL
-#define HW_AES_CTR1_ADDR_1_REG_ADDR 0x0464UL
-#define HW_AES_CTR1_ADDR_2_REG_ADDR 0x0468UL
-#define HW_AES_CTR1_ADDR_3_REG_ADDR 0x046cUL
-#define HW_AES_CTR1_REG_ADDR 0x0460UL
-#define HW_AES_SK_REG_ADDR 0x0478UL
-#define HW_AES_MAC_OK_REG_ADDR 0x0480UL
-#define HW_AES_PREV_IV_0_ADDR_0_REG_ADDR 0x0490UL
-#define HW_AES_PREV_IV_0_ADDR_1_REG_ADDR 0x0494UL
-#define HW_AES_PREV_IV_0_ADDR_2_REG_ADDR 0x0498UL
-#define HW_AES_PREV_IV_0_ADDR_3_REG_ADDR 0x049cUL
-#define HW_AES_PREV_IV_0_REG_ADDR 0x0490UL
-#define HW_AES_CONTROL_REG_ADDR 0x04C0UL
-#define HW_HASH_H0_REG_ADDR 0x0640UL
-#define HW_HASH_H1_REG_ADDR 0x0644UL
-#define HW_HASH_H2_REG_ADDR 0x0648UL
-#define HW_HASH_H3_REG_ADDR 0x064CUL
-#define HW_HASH_H4_REG_ADDR 0x0650UL
-#define HW_HASH_H5_REG_ADDR 0x0654UL
-#define HW_HASH_H6_REG_ADDR 0x0658UL
-#define HW_HASH_H7_REG_ADDR 0x065CUL
-#define HW_HASH_H8_REG_ADDR 0x0660UL
-#define HW_HASH_H9_REG_ADDR 0x0664UL
-#define HW_HASH_H10_REG_ADDR 0x0668UL
-#define HW_HASH_H11_REG_ADDR 0x066CUL
-#define HW_HASH_H12_REG_ADDR 0x0670UL
-#define HW_HASH_H13_REG_ADDR 0x0674UL
-#define HW_HASH_H14_REG_ADDR 0x0678UL
-#define HW_HASH_H15_REG_ADDR 0x067CUL
-#define HW_HASH_CONTROL_REG_ADDR 0x07C0UL
-#define HW_HASH_PAD_EN_REG_ADDR 0x07C4UL
-#define HW_HASH_PAD_CFG_REG_ADDR 0x07C8UL
-#define HW_HASH_CUR_LEN_0_REG_ADDR 0x07CCUL
-#define HW_HASH_CUR_LEN_1_REG_ADDR 0x07D0UL
-#define HW_HASH_CUR_LEN_2_REG_ADDR 0x07D4UL
-#define HW_HASH_CUR_LEN_3_REG_ADDR 0x07D8UL
-#define HW_HASH_PARAM_REG_ADDR 0x07DCUL
-#define HW_HASH_INT_BUSY_REG_ADDR 0x07E0UL
-#define HW_HASH_SW_RESET_REG_ADDR 0x07E4UL
-#define HW_HASH_ENDIANESS_REG_ADDR 0x07E8UL
-#define HW_HASH_DATA_REG_ADDR 0x07ECUL
-#define HW_DRNG_CONTROL_REG_ADDR 0x0800UL
-#define HW_DRNG_VALID_REG_ADDR 0x0804UL
-#define HW_DRNG_DATA_REG_ADDR 0x0808UL
-#define HW_RND_SRC_EN_REG_ADDR 0x080CUL
-#define HW_AES_CLK_ENABLE_REG_ADDR 0x0810UL
-#define HW_DES_CLK_ENABLE_REG_ADDR 0x0814UL
-#define HW_HASH_CLK_ENABLE_REG_ADDR 0x0818UL
-#define HW_PKI_CLK_ENABLE_REG_ADDR 0x081CUL
-#define HW_CLK_STATUS_REG_ADDR 0x0824UL
-#define HW_CLK_ENABLE_REG_ADDR 0x0828UL
-#define HW_DRNG_SAMPLE_REG_ADDR 0x0850UL
-#define HW_RND_SRC_CTL_REG_ADDR 0x0858UL
-#define HW_CRYPTO_CTL_REG_ADDR 0x0900UL
-#define HW_CRYPTO_STATUS_REG_ADDR 0x090CUL
-#define HW_CRYPTO_BUSY_REG_ADDR 0x0910UL
-#define HW_AES_BUSY_REG_ADDR 0x0914UL
-#define HW_DES_BUSY_REG_ADDR 0x0918UL
-#define HW_HASH_BUSY_REG_ADDR 0x091CUL
-#define HW_CONTENT_REG_ADDR 0x0924UL
-#define HW_VERSION_REG_ADDR 0x0928UL
-#define HW_CONTEXT_ID_REG_ADDR 0x0930UL
-#define HW_DIN_BUFFER_REG_ADDR 0x0C00UL
-#define HW_DIN_MEM_DMA_BUSY_REG_ADDR 0x0c20UL
-#define HW_SRC_LLI_MEM_ADDR_REG_ADDR 0x0c24UL
-#define HW_SRC_LLI_WORD0_REG_ADDR 0x0C28UL
-#define HW_SRC_LLI_WORD1_REG_ADDR 0x0C2CUL
-#define HW_SRAM_SRC_ADDR_REG_ADDR 0x0c30UL
-#define HW_DIN_SRAM_BYTES_LEN_REG_ADDR 0x0c34UL
-#define HW_DIN_SRAM_DMA_BUSY_REG_ADDR 0x0C38UL
-#define HW_WRITE_ALIGN_REG_ADDR 0x0C3CUL
-#define HW_OLD_DATA_REG_ADDR 0x0C48UL
-#define HW_WRITE_ALIGN_LAST_REG_ADDR 0x0C4CUL
-#define HW_DOUT_BUFFER_REG_ADDR 0x0C00UL
-#define HW_DST_LLI_WORD0_REG_ADDR 0x0D28UL
-#define HW_DST_LLI_WORD1_REG_ADDR 0x0D2CUL
-#define HW_DST_LLI_MEM_ADDR_REG_ADDR 0x0D24UL
-#define HW_DOUT_MEM_DMA_BUSY_REG_ADDR 0x0D20UL
-#define HW_SRAM_DEST_ADDR_REG_ADDR 0x0D30UL
-#define HW_DOUT_SRAM_BYTES_LEN_REG_ADDR 0x0D34UL
-#define HW_DOUT_SRAM_DMA_BUSY_REG_ADDR 0x0D38UL
-#define HW_READ_ALIGN_REG_ADDR 0x0D3CUL
-#define HW_READ_LAST_DATA_REG_ADDR 0x0D44UL
-#define HW_RC4_THRU_CPU_REG_ADDR 0x0D4CUL
-#define HW_AHB_SINGLE_REG_ADDR 0x0E00UL
-#define HW_SRAM_DATA_REG_ADDR 0x0F00UL
-#define HW_SRAM_ADDR_REG_ADDR 0x0F04UL
-#define HW_SRAM_DATA_READY_REG_ADDR 0x0F08UL
-#define HW_HOST_IRR_REG_ADDR 0x0A00UL
-#define HW_HOST_IMR_REG_ADDR 0x0A04UL
-#define HW_HOST_ICR_REG_ADDR 0x0A08UL
-#define HW_HOST_SEP_SRAM_THRESHOLD_REG_ADDR 0x0A10UL
-#define HW_HOST_SEP_BUSY_REG_ADDR 0x0A14UL
-#define HW_HOST_SEP_LCS_REG_ADDR 0x0A18UL
-#define HW_HOST_CC_SW_RST_REG_ADDR 0x0A40UL
-#define HW_HOST_SEP_SW_RST_REG_ADDR 0x0A44UL
-#define HW_HOST_FLOW_DMA_SW_INT0_REG_ADDR 0x0A80UL
-#define HW_HOST_FLOW_DMA_SW_INT1_REG_ADDR 0x0A84UL
-#define HW_HOST_FLOW_DMA_SW_INT2_REG_ADDR 0x0A88UL
-#define HW_HOST_FLOW_DMA_SW_INT3_REG_ADDR 0x0A8cUL
-#define HW_HOST_FLOW_DMA_SW_INT4_REG_ADDR 0x0A90UL
-#define HW_HOST_FLOW_DMA_SW_INT5_REG_ADDR 0x0A94UL
-#define HW_HOST_FLOW_DMA_SW_INT6_REG_ADDR 0x0A98UL
-#define HW_HOST_FLOW_DMA_SW_INT7_REG_ADDR 0x0A9cUL
-#define HW_HOST_SEP_HOST_GPR0_REG_ADDR 0x0B00UL
-#define HW_HOST_SEP_HOST_GPR1_REG_ADDR 0x0B04UL
-#define HW_HOST_SEP_HOST_GPR2_REG_ADDR 0x0B08UL
-#define HW_HOST_SEP_HOST_GPR3_REG_ADDR 0x0B0CUL
-#define HW_HOST_HOST_SEP_GPR0_REG_ADDR 0x0B80UL
-#define HW_HOST_HOST_SEP_GPR1_REG_ADDR 0x0B84UL
-#define HW_HOST_HOST_SEP_GPR2_REG_ADDR 0x0B88UL
-#define HW_HOST_HOST_SEP_GPR3_REG_ADDR 0x0B8CUL
-#define HW_HOST_HOST_ENDIAN_REG_ADDR 0x0B90UL
-#define HW_HOST_HOST_COMM_CLK_EN_REG_ADDR 0x0B94UL
-#define HW_CLR_SRAM_BUSY_REG_REG_ADDR 0x0F0CUL
-#define HW_CC_SRAM_BASE_ADDRESS 0x5800UL
-
-#endif /* ifndef HW_DEFS */
diff --git a/drivers/staging/spectra/Kconfig b/drivers/staging/spectra/Kconfig
index 5e2ffef..d231ae2 100644
--- a/drivers/staging/spectra/Kconfig
+++ b/drivers/staging/spectra/Kconfig
@@ -2,6 +2,7 @@
menuconfig SPECTRA
tristate "Denali Spectra Flash Translation Layer"
depends on BLOCK
+ depends on X86_MRST
default n
---help---
Enable the FTL pseudo-filesystem used with the NAND Flash
diff --git a/drivers/staging/spectra/ffsport.c b/drivers/staging/spectra/ffsport.c
index d0c5c97..fa21a0f 100644
--- a/drivers/staging/spectra/ffsport.c
+++ b/drivers/staging/spectra/ffsport.c
@@ -27,6 +27,8 @@
#include <linux/kthread.h>
#include <linux/log2.h>
#include <linux/init.h>
+#include <linux/smp_lock.h>
+#include <linux/slab.h>
/**** Helper functions used for Div, Remainder operation on u64 ****/
@@ -113,7 +115,6 @@
#define GLOB_SBD_NAME "nd"
#define GLOB_SBD_IRQ_NUM (29)
-#define GLOB_VERSION "driver version 20091110"
#define GLOB_SBD_IOCTL_GC (0x7701)
#define GLOB_SBD_IOCTL_WL (0x7702)
@@ -272,13 +273,6 @@
return res_blks;
}
-static void SBD_prepare_flush(struct request_queue *q, struct request *rq)
-{
- rq->cmd_type = REQ_TYPE_LINUX_BLOCK;
- /* rq->timeout = 5 * HZ; */
- rq->cmd[0] = REQ_LB_OP_FLUSH;
-}
-
/* Transfer a full request. */
static int do_transfer(struct spectra_nand_dev *tr, struct request *req)
{
@@ -296,8 +290,7 @@
IdentifyDeviceData.PagesPerBlock *
res_blks_os;
- if (req->cmd_type == REQ_TYPE_LINUX_BLOCK &&
- req->cmd[0] == REQ_LB_OP_FLUSH) {
+ if (req->cmd_type & REQ_FLUSH) {
if (force_flush_cache()) /* Fail to flush cache */
return -EIO;
else
@@ -597,11 +590,23 @@
return -ENOTTY;
}
+int GLOB_SBD_unlocked_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, unsigned long arg)
+{
+ int ret;
+
+ lock_kernel();
+ ret = GLOB_SBD_ioctl(bdev, mode, cmd, arg);
+ unlock_kernel();
+
+ return ret;
+}
+
static struct block_device_operations GLOB_SBD_ops = {
.owner = THIS_MODULE,
.open = GLOB_SBD_open,
.release = GLOB_SBD_release,
- .locked_ioctl = GLOB_SBD_ioctl,
+ .ioctl = GLOB_SBD_unlocked_ioctl,
.getgeo = GLOB_SBD_getgeo,
};
@@ -650,8 +655,7 @@
/* Here we force report 512 byte hardware sector size to Kernel */
blk_queue_logical_block_size(dev->queue, 512);
- blk_queue_ordered(dev->queue, QUEUE_ORDERED_DRAIN_FLUSH,
- SBD_prepare_flush);
+ blk_queue_ordered(dev->queue, QUEUE_ORDERED_DRAIN_FLUSH);
dev->thread = kthread_run(spectra_trans_thread, dev, "nand_thd");
if (IS_ERR(dev->thread)) {
diff --git a/drivers/staging/spectra/flash.c b/drivers/staging/spectra/flash.c
index 134aa51..9b5218b 100644
--- a/drivers/staging/spectra/flash.c
+++ b/drivers/staging/spectra/flash.c
@@ -61,7 +61,6 @@
static void FTL_Cache_Write_Page(u8 *pData, u64 dwPageAddr,
u8 cache_blk, u16 flag);
static int FTL_Cache_Write(void);
-static int FTL_Cache_Write_Back(u8 *pData, u64 blk_addr);
static void FTL_Calculate_LRU(void);
static u32 FTL_Get_Block_Index(u32 wBlockNum);
@@ -86,8 +85,6 @@
static int FTL_Replace_Block(u64 blk_addr);
static int FTL_Adjust_Relative_Erase_Count(u32 Index_of_MAX);
-static int FTL_Flash_Error_Handle(u8 *pData, u64 old_page_addr, u64 blk_addr);
-
struct device_info_tag DeviceInfo;
struct flash_cache_tag Cache;
static struct spectra_l2_cache_info cache_l2;
@@ -775,7 +772,7 @@
{
struct list_head *p;
struct spectra_l2_cache_list *pnd;
- int n, i;
+ int n;
n = 0;
list_for_each(p, &cache_l2.table.list) {
@@ -1538,79 +1535,6 @@
}
/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: FTL_Cache_Update_Block
-* Inputs: pointer to buffer,page address,block address
-* Outputs: PASS=0 / FAIL=1
-* Description: It updates the cache
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-static int FTL_Cache_Update_Block(u8 *pData,
- u64 old_page_addr, u64 blk_addr)
-{
- int i, j;
- u8 *buf = pData;
- int wResult = PASS;
- int wFoundInCache;
- u64 page_addr;
- u64 addr;
- u64 old_blk_addr;
- u16 page_offset;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- old_blk_addr = (u64)(old_page_addr >>
- DeviceInfo.nBitsInBlockDataSize) * DeviceInfo.wBlockDataSize;
- page_offset = (u16)(GLOB_u64_Remainder(old_page_addr, 2) >>
- DeviceInfo.nBitsInPageDataSize);
-
- for (i = 0; i < DeviceInfo.wPagesPerBlock; i += Cache.pages_per_item) {
- page_addr = old_blk_addr + i * DeviceInfo.wPageDataSize;
- if (i != page_offset) {
- wFoundInCache = FAIL;
- for (j = 0; j < CACHE_ITEM_NUM; j++) {
- addr = Cache.array[j].address;
- addr = FTL_Get_Physical_Block_Addr(addr) +
- GLOB_u64_Remainder(addr, 2);
- if ((addr >= page_addr) && addr <
- (page_addr + Cache.cache_item_size)) {
- wFoundInCache = PASS;
- buf = Cache.array[j].buf;
- Cache.array[j].changed = SET;
-#if CMD_DMA
-#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
- int_cache[ftl_cmd_cnt].item = j;
- int_cache[ftl_cmd_cnt].cache.address =
- Cache.array[j].address;
- int_cache[ftl_cmd_cnt].cache.changed =
- Cache.array[j].changed;
-#endif
-#endif
- break;
- }
- }
- if (FAIL == wFoundInCache) {
- if (ERR == FTL_Cache_Read_All(g_pTempBuf,
- page_addr)) {
- wResult = FAIL;
- break;
- }
- buf = g_pTempBuf;
- }
- } else {
- buf = pData;
- }
-
- if (FAIL == FTL_Cache_Write_All(buf,
- blk_addr + (page_addr - old_blk_addr))) {
- wResult = FAIL;
- break;
- }
- }
-
- return wResult;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
* Function: FTL_Copy_Block
* Inputs: source block address
* Destination block address
@@ -1698,7 +1622,7 @@
static int erase_l2_cache_blocks(void)
{
int i, ret = PASS;
- u32 pblk, lblk;
+ u32 pblk, lblk = BAD_BLOCK;
u64 addr;
u32 *pbt = (u32 *)g_pBlockTable;
@@ -2004,87 +1928,6 @@
return ret;
}
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: FTL_Cache_Write_Back
-* Inputs: pointer to data cached in sys memory
-* address of free block in flash
-* Outputs: PASS=0 / FAIL=1
-* Description: writes all the pages of Cache Block to flash
-*
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-static int FTL_Cache_Write_Back(u8 *pData, u64 blk_addr)
-{
- int i, j, iErase;
- u64 old_page_addr, addr, phy_addr;
- u32 *pbt = (u32 *)g_pBlockTable;
- u32 lba;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- old_page_addr = FTL_Get_Physical_Block_Addr(blk_addr) +
- GLOB_u64_Remainder(blk_addr, 2);
-
- iErase = (FAIL == FTL_Replace_Block(blk_addr)) ? PASS : FAIL;
-
- pbt[BLK_FROM_ADDR(blk_addr)] &= (~SPARE_BLOCK);
-
-#if CMD_DMA
- p_BTableChangesDelta = (struct BTableChangesDelta *)g_pBTDelta_Free;
- g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
-
- p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
- p_BTableChangesDelta->BT_Index = (u32)(blk_addr >>
- DeviceInfo.nBitsInBlockDataSize);
- p_BTableChangesDelta->BT_Entry_Value =
- pbt[(u32)(blk_addr >> DeviceInfo.nBitsInBlockDataSize)];
- p_BTableChangesDelta->ValidFields = 0x0C;
-#endif
-
- if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus) {
- g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
- FTL_Write_IN_Progress_Block_Table_Page();
- }
-
- for (i = 0; i < RETRY_TIMES; i++) {
- if (PASS == iErase) {
- phy_addr = FTL_Get_Physical_Block_Addr(blk_addr);
- if (FAIL == GLOB_FTL_Block_Erase(phy_addr)) {
- lba = BLK_FROM_ADDR(blk_addr);
- MARK_BLOCK_AS_BAD(pbt[lba]);
- i = RETRY_TIMES;
- break;
- }
- }
-
- for (j = 0; j < CACHE_ITEM_NUM; j++) {
- addr = Cache.array[j].address;
- if ((addr <= blk_addr) &&
- ((addr + Cache.cache_item_size) > blk_addr))
- cache_block_to_write = j;
- }
-
- phy_addr = FTL_Get_Physical_Block_Addr(blk_addr);
- if (PASS == FTL_Cache_Update_Block(pData,
- old_page_addr, phy_addr)) {
- cache_block_to_write = UNHIT_CACHE_ITEM;
- break;
- } else {
- iErase = PASS;
- }
- }
-
- if (i >= RETRY_TIMES) {
- if (ERR == FTL_Flash_Error_Handle(pData,
- old_page_addr, blk_addr))
- return ERR;
- else
- return FAIL;
- }
-
- return PASS;
-}
-
/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
* Function: FTL_Cache_Write_Page
* Inputs: Pointer to buffer, page address, cache block number
@@ -2370,159 +2213,6 @@
return 1;
}
-/******************************************************************
-* Function: GLOB_FTL_Flash_Format
-* Inputs: none
-* Outputs: PASS
-* Description: The block table stores bad block info, including MDF+
-* blocks gone bad over the ages. Therefore, if we have a
-* block table in place, then use it to scan for bad blocks
-* If not, then scan for MDF.
-* Now, a block table will only be found if spectra was already
-* being used. For a fresh flash, we'll go thru scanning for
-* MDF. If spectra was being used, then there is a chance that
-* the MDF has been corrupted. Spectra avoids writing to the
-* first 2 bytes of the spare area to all pages in a block. This
-* covers all known flash devices. However, since flash
-* manufacturers have no standard of where the MDF is stored,
-* this cannot guarantee that the MDF is protected for future
-* devices too. The initial scanning for the block table assures
-* this. It is ok even if the block table is outdated, as all
-* we're looking for are bad block markers.
-* Use this when mounting a file system or starting a
-* new flash.
-*
-*********************************************************************/
-static int FTL_Format_Flash(u8 valid_block_table)
-{
- u32 i, j;
- u32 *pbt = (u32 *)g_pBlockTable;
- u32 tempNode;
- int ret;
-
-#if CMD_DMA
- u32 *pbtStartingCopy = (u32 *)g_pBTStartingCopy;
- if (ftl_cmd_cnt)
- return FAIL;
-#endif
-
- if (FAIL == FTL_Check_Block_Table(FAIL))
- valid_block_table = 0;
-
- if (valid_block_table) {
- u8 switched = 1;
- u32 block, k;
-
- k = DeviceInfo.wSpectraStartBlock;
- while (switched && (k < DeviceInfo.wSpectraEndBlock)) {
- switched = 0;
- k++;
- for (j = DeviceInfo.wSpectraStartBlock, i = 0;
- j <= DeviceInfo.wSpectraEndBlock;
- j++, i++) {
- block = (pbt[i] & ~BAD_BLOCK) -
- DeviceInfo.wSpectraStartBlock;
- if (block != i) {
- switched = 1;
- tempNode = pbt[i];
- pbt[i] = pbt[block];
- pbt[block] = tempNode;
- }
- }
- }
- if ((k == DeviceInfo.wSpectraEndBlock) && switched)
- valid_block_table = 0;
- }
-
- if (!valid_block_table) {
- memset(g_pBlockTable, 0,
- DeviceInfo.wDataBlockNum * sizeof(u32));
- memset(g_pWearCounter, 0,
- DeviceInfo.wDataBlockNum * sizeof(u8));
- if (DeviceInfo.MLCDevice)
- memset(g_pReadCounter, 0,
- DeviceInfo.wDataBlockNum * sizeof(u16));
-#if CMD_DMA
- memset(g_pBTStartingCopy, 0,
- DeviceInfo.wDataBlockNum * sizeof(u32));
- memset(g_pWearCounterCopy, 0,
- DeviceInfo.wDataBlockNum * sizeof(u8));
- if (DeviceInfo.MLCDevice)
- memset(g_pReadCounterCopy, 0,
- DeviceInfo.wDataBlockNum * sizeof(u16));
-#endif
- for (j = DeviceInfo.wSpectraStartBlock, i = 0;
- j <= DeviceInfo.wSpectraEndBlock;
- j++, i++) {
- if (GLOB_LLD_Get_Bad_Block((u32)j))
- pbt[i] = (u32)(BAD_BLOCK | j);
- }
- }
-
- nand_dbg_print(NAND_DBG_WARN, "Erasing all blocks in the NAND\n");
-
- for (j = DeviceInfo.wSpectraStartBlock, i = 0;
- j <= DeviceInfo.wSpectraEndBlock;
- j++, i++) {
- if ((pbt[i] & BAD_BLOCK) != BAD_BLOCK) {
- ret = GLOB_LLD_Erase_Block(j);
- if (FAIL == ret) {
- pbt[i] = (u32)(j);
- MARK_BLOCK_AS_BAD(pbt[i]);
- nand_dbg_print(NAND_DBG_WARN,
- "NAND Program fail in %s, Line %d, "
- "Function: %s, new Bad Block %d generated!\n",
- __FILE__, __LINE__, __func__, (int)j);
- } else {
- pbt[i] = (u32)(SPARE_BLOCK | j);
- }
- }
-#if CMD_DMA
- pbtStartingCopy[i] = pbt[i];
-#endif
- }
-
- g_wBlockTableOffset = 0;
- for (i = 0; (i <= (DeviceInfo.wSpectraEndBlock -
- DeviceInfo.wSpectraStartBlock))
- && ((pbt[i] & BAD_BLOCK) == BAD_BLOCK); i++)
- ;
- if (i > (DeviceInfo.wSpectraEndBlock - DeviceInfo.wSpectraStartBlock)) {
- printk(KERN_ERR "All blocks bad!\n");
- return FAIL;
- } else {
- g_wBlockTableIndex = pbt[i] & ~BAD_BLOCK;
- if (i != BLOCK_TABLE_INDEX) {
- tempNode = pbt[i];
- pbt[i] = pbt[BLOCK_TABLE_INDEX];
- pbt[BLOCK_TABLE_INDEX] = tempNode;
- }
- }
- pbt[BLOCK_TABLE_INDEX] &= (~SPARE_BLOCK);
-
-#if CMD_DMA
- pbtStartingCopy[BLOCK_TABLE_INDEX] &= (~SPARE_BLOCK);
-#endif
-
- g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
- memset(g_pBTBlocks, 0xFF,
- (1 + LAST_BT_ID - FIRST_BT_ID) * sizeof(u32));
- g_pBTBlocks[FIRST_BT_ID-FIRST_BT_ID] = g_wBlockTableIndex;
- FTL_Write_Block_Table(FAIL);
-
- for (i = 0; i < CACHE_ITEM_NUM; i++) {
- Cache.array[i].address = NAND_CACHE_INIT_ADDR;
- Cache.array[i].use_cnt = 0;
- Cache.array[i].changed = CLEAR;
- }
-
-#if (RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE && CMD_DMA)
- memcpy((void *)&cache_start_copy, (void *)&Cache,
- sizeof(struct flash_cache_tag));
-#endif
- return PASS;
-}
-
static int force_format_nand(void)
{
u32 i;
@@ -3031,112 +2721,6 @@
return wResult;
}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: FTL_Flash_Error_Handle
-* Inputs: Pointer to data
-* Page address
-* Block address
-* Outputs: PASS=0 / FAIL=1
-* Description: It handles any error occured during Spectra operation
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-static int FTL_Flash_Error_Handle(u8 *pData, u64 old_page_addr,
- u64 blk_addr)
-{
- u32 i;
- int j;
- u32 tmp_node, blk_node = BLK_FROM_ADDR(blk_addr);
- u64 phy_addr;
- int wErase = FAIL;
- int wResult = FAIL;
- u32 *pbt = (u32 *)g_pBlockTable;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- if (ERR == GLOB_FTL_Garbage_Collection())
- return ERR;
-
- do {
- for (i = DeviceInfo.wSpectraEndBlock -
- DeviceInfo.wSpectraStartBlock;
- i > 0; i--) {
- if (IS_SPARE_BLOCK(i)) {
- tmp_node = (u32)(BAD_BLOCK |
- pbt[blk_node]);
- pbt[blk_node] = (u32)(pbt[i] &
- (~SPARE_BLOCK));
- pbt[i] = tmp_node;
-#if CMD_DMA
- p_BTableChangesDelta =
- (struct BTableChangesDelta *)
- g_pBTDelta_Free;
- g_pBTDelta_Free +=
- sizeof(struct BTableChangesDelta);
-
- p_BTableChangesDelta->ftl_cmd_cnt =
- ftl_cmd_cnt;
- p_BTableChangesDelta->BT_Index =
- blk_node;
- p_BTableChangesDelta->BT_Entry_Value =
- pbt[blk_node];
- p_BTableChangesDelta->ValidFields = 0x0C;
-
- p_BTableChangesDelta =
- (struct BTableChangesDelta *)
- g_pBTDelta_Free;
- g_pBTDelta_Free +=
- sizeof(struct BTableChangesDelta);
-
- p_BTableChangesDelta->ftl_cmd_cnt =
- ftl_cmd_cnt;
- p_BTableChangesDelta->BT_Index = i;
- p_BTableChangesDelta->BT_Entry_Value = pbt[i];
- p_BTableChangesDelta->ValidFields = 0x0C;
-#endif
- wResult = PASS;
- break;
- }
- }
-
- if (FAIL == wResult) {
- if (FAIL == GLOB_FTL_Garbage_Collection())
- break;
- else
- continue;
- }
-
- if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus) {
- g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
- FTL_Write_IN_Progress_Block_Table_Page();
- }
-
- phy_addr = FTL_Get_Physical_Block_Addr(blk_addr);
-
- for (j = 0; j < RETRY_TIMES; j++) {
- if (PASS == wErase) {
- if (FAIL == GLOB_FTL_Block_Erase(phy_addr)) {
- MARK_BLOCK_AS_BAD(pbt[blk_node]);
- break;
- }
- }
- if (PASS == FTL_Cache_Update_Block(pData,
- old_page_addr,
- phy_addr)) {
- wResult = PASS;
- break;
- } else {
- wResult = FAIL;
- wErase = PASS;
- }
- }
- } while (FAIL == wResult);
-
- FTL_Write_Block_Table(FAIL);
-
- return wResult;
-}
-
/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
* Function: FTL_Get_Page_Num
* Inputs: Size in bytes
diff --git a/drivers/staging/ti-st/st.h b/drivers/staging/ti-st/st.h
index 9952579..1b3060e 100644
--- a/drivers/staging/ti-st/st.h
+++ b/drivers/staging/ti-st/st.h
@@ -80,5 +80,4 @@
extern long st_register(struct st_proto_s *);
extern long st_unregister(enum proto_type);
-extern struct platform_device *st_get_plat_device(void);
#endif /* ST_H */
diff --git a/drivers/staging/ti-st/st_core.c b/drivers/staging/ti-st/st_core.c
index 063c9b1..b85d8bf 100644
--- a/drivers/staging/ti-st/st_core.c
+++ b/drivers/staging/ti-st/st_core.c
@@ -38,7 +38,6 @@
#include "st_ll.h"
#include "st.h"
-#define VERBOSE
/* strings to be used for rfkill entries and by
* ST Core to be used for sysfs debug entry
*/
@@ -581,7 +580,7 @@
long err = 0;
unsigned long flags = 0;
- st_kim_ref(&st_gdata);
+ st_kim_ref(&st_gdata, 0);
pr_info("%s(%d) ", __func__, new_proto->type);
if (st_gdata == NULL || new_proto == NULL || new_proto->recv == NULL
|| new_proto->reg_complete_cb == NULL) {
@@ -713,7 +712,7 @@
pr_debug("%s: %d ", __func__, type);
- st_kim_ref(&st_gdata);
+ st_kim_ref(&st_gdata, 0);
if (type < ST_BT || type >= ST_MAX) {
pr_err(" protocol %d not supported", type);
return -EPROTONOSUPPORT;
@@ -767,7 +766,7 @@
#endif
long len;
- st_kim_ref(&st_gdata);
+ st_kim_ref(&st_gdata, 0);
if (unlikely(skb == NULL || st_gdata == NULL
|| st_gdata->tty == NULL)) {
pr_err("data/tty unavailable to perform write");
@@ -818,7 +817,7 @@
struct st_data_s *st_gdata;
pr_info("%s ", __func__);
- st_kim_ref(&st_gdata);
+ st_kim_ref(&st_gdata, 0);
st_gdata->tty = tty;
tty->disc_data = st_gdata;
diff --git a/drivers/staging/ti-st/st_core.h b/drivers/staging/ti-st/st_core.h
index e0c32d1..8601320 100644
--- a/drivers/staging/ti-st/st_core.h
+++ b/drivers/staging/ti-st/st_core.h
@@ -117,7 +117,7 @@
void st_core_exit(struct st_data_s *);
/* ask for reference from KIM */
-void st_kim_ref(struct st_data_s **);
+void st_kim_ref(struct st_data_s **, int);
#define GPS_STUB_TEST
#ifdef GPS_STUB_TEST
diff --git a/drivers/staging/ti-st/st_kim.c b/drivers/staging/ti-st/st_kim.c
index b4a6c7f..9e99463 100644
--- a/drivers/staging/ti-st/st_kim.c
+++ b/drivers/staging/ti-st/st_kim.c
@@ -72,11 +72,26 @@
PROTO_ENTRY(ST_GPS, "GPS"),
};
+#define MAX_ST_DEVICES 3 /* Imagine 1 on each UART for now */
+struct platform_device *st_kim_devices[MAX_ST_DEVICES];
/**********************************************************************/
/* internal functions */
/**
+ * st_get_plat_device -
+ * function which returns the reference to the platform device
+ * requested by id. As of now only 1 such device exists (id=0)
+ * the context requesting for reference can get the id to be
+ * requested by a. The protocol driver which is registering or
+ * b. the tty device which is opened.
+ */
+static struct platform_device *st_get_plat_device(int id)
+{
+ return st_kim_devices[id];
+}
+
+/**
* validate_firmware_response -
* function to return whether the firmware response was proper
* in case of error don't complete so that waiting for proper
@@ -353,7 +368,7 @@
struct kim_data_s *kim_gdata;
pr_info(" %s ", __func__);
- kim_pdev = st_get_plat_device();
+ kim_pdev = st_get_plat_device(0);
kim_gdata = dev_get_drvdata(&kim_pdev->dev);
if (kim_gdata->gpios[type] == -1) {
@@ -574,12 +589,12 @@
* This would enable multiple such platform devices to exist
* on a given platform
*/
-void st_kim_ref(struct st_data_s **core_data)
+void st_kim_ref(struct st_data_s **core_data, int id)
{
struct platform_device *pdev;
struct kim_data_s *kim_gdata;
/* get kim_gdata reference from platform device */
- pdev = st_get_plat_device();
+ pdev = st_get_plat_device(id);
kim_gdata = dev_get_drvdata(&pdev->dev);
*core_data = kim_gdata->core_data;
}
@@ -623,6 +638,7 @@
long *gpios = pdev->dev.platform_data;
struct kim_data_s *kim_gdata;
+ st_kim_devices[pdev->id] = pdev;
kim_gdata = kzalloc(sizeof(struct kim_data_s), GFP_ATOMIC);
if (!kim_gdata) {
pr_err("no mem to allocate");
diff --git a/drivers/staging/tm6000/Kconfig b/drivers/staging/tm6000/Kconfig
index c725356..de7ebb9 100644
--- a/drivers/staging/tm6000/Kconfig
+++ b/drivers/staging/tm6000/Kconfig
@@ -1,6 +1,6 @@
config VIDEO_TM6000
tristate "TV Master TM5600/6000/6010 driver"
- depends on VIDEO_DEV && I2C && INPUT && USB && EXPERIMENTAL
+ depends on VIDEO_DEV && I2C && INPUT && IR_CORE && USB && EXPERIMENTAL
select VIDEO_TUNER
select MEDIA_TUNER_XC2028
select MEDIA_TUNER_XC5000
diff --git a/drivers/staging/tm6000/tm6000-input.c b/drivers/staging/tm6000/tm6000-input.c
index 32f7a0a..54f7667 100644
--- a/drivers/staging/tm6000/tm6000-input.c
+++ b/drivers/staging/tm6000/tm6000-input.c
@@ -46,7 +46,7 @@
}
struct tm6000_ir_poll_result {
- u8 rc_data[4];
+ u16 rc_data;
};
struct tm6000_IR {
@@ -60,9 +60,9 @@
int polling;
struct delayed_work work;
u8 wait:1;
+ u8 key:1;
struct urb *int_urb;
u8 *urb_data;
- u8 key:1;
int (*get_key) (struct tm6000_IR *, struct tm6000_ir_poll_result *);
@@ -122,13 +122,14 @@
if (urb->status != 0)
printk(KERN_INFO "not ready\n");
- else if (urb->actual_length > 0)
+ else if (urb->actual_length > 0) {
memcpy(ir->urb_data, urb->transfer_buffer, urb->actual_length);
- dprintk("data %02x %02x %02x %02x\n", ir->urb_data[0],
- ir->urb_data[1], ir->urb_data[2], ir->urb_data[3]);
+ dprintk("data %02x %02x %02x %02x\n", ir->urb_data[0],
+ ir->urb_data[1], ir->urb_data[2], ir->urb_data[3]);
- ir->key = 1;
+ ir->key = 1;
+ }
rc = usb_submit_urb(urb, GFP_ATOMIC);
}
@@ -140,30 +141,47 @@
int rc;
u8 buf[2];
- if (ir->wait && !&dev->int_in) {
- poll_result->rc_data[0] = 0xff;
+ if (ir->wait && !&dev->int_in)
return 0;
- }
if (&dev->int_in) {
- poll_result->rc_data[0] = ir->urb_data[0];
- poll_result->rc_data[1] = ir->urb_data[1];
+ if (ir->ir.ir_type == IR_TYPE_RC5)
+ poll_result->rc_data = ir->urb_data[0];
+ else
+ poll_result->rc_data = ir->urb_data[0] | ir->urb_data[1] << 8;
} else {
tm6000_set_reg(dev, REQ_04_EN_DISABLE_MCU_INT, 2, 0);
msleep(10);
tm6000_set_reg(dev, REQ_04_EN_DISABLE_MCU_INT, 2, 1);
msleep(10);
- rc = tm6000_read_write_usb(dev, USB_DIR_IN | USB_TYPE_VENDOR |
- USB_RECIP_DEVICE, REQ_02_GET_IR_CODE, 0, 0, buf, 1);
+ if (ir->ir.ir_type == IR_TYPE_RC5) {
+ rc = tm6000_read_write_usb(dev, USB_DIR_IN |
+ USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ REQ_02_GET_IR_CODE, 0, 0, buf, 1);
- msleep(10);
+ msleep(10);
- dprintk("read data=%02x\n", buf[0]);
- if (rc < 0)
- return rc;
+ dprintk("read data=%02x\n", buf[0]);
+ if (rc < 0)
+ return rc;
- poll_result->rc_data[0] = buf[0];
+ poll_result->rc_data = buf[0];
+ } else {
+ rc = tm6000_read_write_usb(dev, USB_DIR_IN |
+ USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ REQ_02_GET_IR_CODE, 0, 0, buf, 2);
+
+ msleep(10);
+
+ dprintk("read data=%04x\n", buf[0] | buf[1] << 8);
+ if (rc < 0)
+ return rc;
+
+ poll_result->rc_data = buf[0] | buf[1] << 8;
+ }
+ if ((poll_result->rc_data & 0x00ff) != 0xff)
+ ir->key = 1;
}
return 0;
}
@@ -180,12 +198,11 @@
return;
}
- dprintk("ir->get_key result data=%02x %02x\n",
- poll_result.rc_data[0], poll_result.rc_data[1]);
+ dprintk("ir->get_key result data=%04x\n", poll_result.rc_data);
- if (poll_result.rc_data[0] != 0xff && ir->key == 1) {
+ if (ir->key) {
ir_input_keydown(ir->input->input_dev, &ir->ir,
- poll_result.rc_data[0] | poll_result.rc_data[1] << 8);
+ (u32)poll_result.rc_data);
ir_input_nokey(ir->input->input_dev, &ir->ir);
ir->key = 0;
diff --git a/drivers/staging/vt6655/wpactl.c b/drivers/staging/vt6655/wpactl.c
index 0142338b..4bdb836 100644
--- a/drivers/staging/vt6655/wpactl.c
+++ b/drivers/staging/vt6655/wpactl.c
@@ -766,9 +766,14 @@
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "wpa_ie_len = %d\n", param->u.wpa_associate.wpa_ie_len);
- if (param->u.wpa_associate.wpa_ie &&
- copy_from_user(&abyWPAIE[0], param->u.wpa_associate.wpa_ie, param->u.wpa_associate.wpa_ie_len))
- return -EINVAL;
+ if (param->u.wpa_associate.wpa_ie_len) {
+ if (!param->u.wpa_associate.wpa_ie)
+ return -EINVAL;
+ if (param->u.wpa_associate.wpa_ie_len > sizeof(abyWPAIE))
+ return -EINVAL;
+ if (copy_from_user(&abyWPAIE[0], param->u.wpa_associate.wpa_ie, param->u.wpa_associate.wpa_ie_len))
+ return -EFAULT;
+ }
if (param->u.wpa_associate.mode == 1)
pMgmt->eConfigMode = WMAC_CONFIG_IBSS_STA;
diff --git a/drivers/staging/wlan-ng/cfg80211.c b/drivers/staging/wlan-ng/cfg80211.c
index 368c30a..4af83d5 100644
--- a/drivers/staging/wlan-ng/cfg80211.c
+++ b/drivers/staging/wlan-ng/cfg80211.c
@@ -219,6 +219,7 @@
return -ENOENT;
params.key_len = len;
params.key = wlandev->wep_keys[key_index];
+ params.seq_len = 0;
callback(cookie, ¶ms);
@@ -735,6 +736,8 @@
priv->band.n_channels = ARRAY_SIZE(prism2_channels);
priv->band.bitrates = priv->rates;
priv->band.n_bitrates = ARRAY_SIZE(prism2_rates);
+ priv->band.band = IEEE80211_BAND_2GHZ;
+ priv->band.ht_cap.ht_supported = false;
wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band;
set_wiphy_dev(wiphy, dev);
diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
index 77d4d71..722c840 100644
--- a/drivers/staging/zram/zram_drv.c
+++ b/drivers/staging/zram/zram_drv.c
@@ -769,6 +769,7 @@
free_devices:
while (dev_id)
destroy_device(&devices[--dev_id]);
+ kfree(devices);
unregister:
unregister_blkdev(zram_major, "zram");
out:
diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
index 593fc5e..5af23cc 100644
--- a/drivers/usb/atm/cxacru.c
+++ b/drivers/usb/atm/cxacru.c
@@ -1127,6 +1127,7 @@
{
struct cxacru_data *instance;
struct usb_device *usb_dev = interface_to_usbdev(intf);
+ struct usb_host_endpoint *cmd_ep = usb_dev->ep_in[CXACRU_EP_CMD];
int ret;
/* instance init */
@@ -1171,15 +1172,34 @@
goto fail;
}
- usb_fill_int_urb(instance->rcv_urb,
+ if (!cmd_ep) {
+ dbg("cxacru_bind: no command endpoint");
+ ret = -ENODEV;
+ goto fail;
+ }
+
+ if ((cmd_ep->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
+ == USB_ENDPOINT_XFER_INT) {
+ usb_fill_int_urb(instance->rcv_urb,
usb_dev, usb_rcvintpipe(usb_dev, CXACRU_EP_CMD),
instance->rcv_buf, PAGE_SIZE,
cxacru_blocking_completion, &instance->rcv_done, 1);
- usb_fill_int_urb(instance->snd_urb,
+ usb_fill_int_urb(instance->snd_urb,
usb_dev, usb_sndintpipe(usb_dev, CXACRU_EP_CMD),
instance->snd_buf, PAGE_SIZE,
cxacru_blocking_completion, &instance->snd_done, 4);
+ } else {
+ usb_fill_bulk_urb(instance->rcv_urb,
+ usb_dev, usb_rcvbulkpipe(usb_dev, CXACRU_EP_CMD),
+ instance->rcv_buf, PAGE_SIZE,
+ cxacru_blocking_completion, &instance->rcv_done);
+
+ usb_fill_bulk_urb(instance->snd_urb,
+ usb_dev, usb_sndbulkpipe(usb_dev, CXACRU_EP_CMD),
+ instance->snd_buf, PAGE_SIZE,
+ cxacru_blocking_completion, &instance->snd_done);
+ }
mutex_init(&instance->cm_serialize);
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 1833b3a..bc62fae 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -965,7 +965,8 @@
}
if (!buflen) {
- if (intf->cur_altsetting->endpoint->extralen &&
+ if (intf->cur_altsetting->endpoint &&
+ intf->cur_altsetting->endpoint->extralen &&
intf->cur_altsetting->endpoint->extra) {
dev_dbg(&intf->dev,
"Seeking extra descriptors on endpoint\n");
@@ -1481,6 +1482,11 @@
USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, \
USB_CDC_ACM_PROTO_VENDOR)
+#define SAMSUNG_PCSUITE_ACM_INFO(x) \
+ USB_DEVICE_AND_INTERFACE_INFO(0x04e7, x, \
+ USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, \
+ USB_CDC_ACM_PROTO_VENDOR)
+
/*
* USB driver structure.
*/
@@ -1591,6 +1597,17 @@
{ NOKIA_PCSUITE_ACM_INFO(0x0108), }, /* Nokia 5320 XpressMusic 2G */
{ NOKIA_PCSUITE_ACM_INFO(0x01f5), }, /* Nokia N97, RM-505 */
{ NOKIA_PCSUITE_ACM_INFO(0x02e3), }, /* Nokia 5230, RM-588 */
+ { NOKIA_PCSUITE_ACM_INFO(0x0178), }, /* Nokia E63 */
+ { NOKIA_PCSUITE_ACM_INFO(0x010e), }, /* Nokia E75 */
+ { NOKIA_PCSUITE_ACM_INFO(0x02d9), }, /* Nokia 6760 Slide */
+ { NOKIA_PCSUITE_ACM_INFO(0x01d0), }, /* Nokia E52 */
+ { NOKIA_PCSUITE_ACM_INFO(0x0223), }, /* Nokia E72 */
+ { NOKIA_PCSUITE_ACM_INFO(0x0275), }, /* Nokia X6 */
+ { NOKIA_PCSUITE_ACM_INFO(0x026c), }, /* Nokia N97 Mini */
+ { NOKIA_PCSUITE_ACM_INFO(0x0154), }, /* Nokia 5800 XpressMusic */
+ { NOKIA_PCSUITE_ACM_INFO(0x04ce), }, /* Nokia E90 */
+ { NOKIA_PCSUITE_ACM_INFO(0x01d4), }, /* Nokia E55 */
+ { SAMSUNG_PCSUITE_ACM_INFO(0x6651), }, /* Samsung GTi8510 (INNOV8) */
/* NOTE: non-Nokia COMM/ACM/0xff is likely MSFT RNDIS... NOT a modem! */
@@ -1599,6 +1616,10 @@
.driver_info = NOT_A_MODEM,
},
+ /* control interfaces without any protocol set */
+ { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
+ USB_CDC_PROTO_NONE) },
+
/* control interfaces with various AT-command sets */
{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
USB_CDC_ACM_PROTO_AT_V25TER) },
diff --git a/drivers/usb/core/Kconfig b/drivers/usb/core/Kconfig
index 7e594449..9eed5b5 100644
--- a/drivers/usb/core/Kconfig
+++ b/drivers/usb/core/Kconfig
@@ -91,12 +91,12 @@
If you are unsure about this, say N here.
config USB_SUSPEND
- bool "USB runtime power management (suspend/resume and wakeup)"
+ bool "USB runtime power management (autosuspend) and wakeup"
depends on USB && PM_RUNTIME
help
If you say Y here, you can use driver calls or the sysfs
- "power/level" file to suspend or resume individual USB
- peripherals and to enable or disable autosuspend (see
+ "power/control" file to enable or disable autosuspend for
+ individual USB peripherals (see
Documentation/usb/power-management.txt for more details).
Also, USB "remote wakeup" signaling is supported, whereby some
diff --git a/drivers/usb/core/file.c b/drivers/usb/core/file.c
index f06f5db..1e6ccef 100644
--- a/drivers/usb/core/file.c
+++ b/drivers/usb/core/file.c
@@ -159,9 +159,9 @@
int usb_register_dev(struct usb_interface *intf,
struct usb_class_driver *class_driver)
{
- int retval = -EINVAL;
+ int retval;
int minor_base = class_driver->minor_base;
- int minor = 0;
+ int minor;
char name[20];
char *temp;
@@ -173,12 +173,17 @@
*/
minor_base = 0;
#endif
- intf->minor = -1;
-
- dbg ("looking for a minor, starting at %d", minor_base);
if (class_driver->fops == NULL)
- goto exit;
+ return -EINVAL;
+ if (intf->minor >= 0)
+ return -EADDRINUSE;
+
+ retval = init_usb_class();
+ if (retval)
+ return retval;
+
+ dev_dbg(&intf->dev, "looking for a minor, starting at %d", minor_base);
down_write(&minor_rwsem);
for (minor = minor_base; minor < MAX_USB_MINORS; ++minor) {
@@ -186,20 +191,12 @@
continue;
usb_minors[minor] = class_driver->fops;
-
- retval = 0;
+ intf->minor = minor;
break;
}
up_write(&minor_rwsem);
-
- if (retval)
- goto exit;
-
- retval = init_usb_class();
- if (retval)
- goto exit;
-
- intf->minor = minor;
+ if (intf->minor < 0)
+ return -EXFULL;
/* create a usb class device for this usb interface */
snprintf(name, sizeof(name), class_driver->name, minor - minor_base);
@@ -213,11 +210,11 @@
"%s", temp);
if (IS_ERR(intf->usb_dev)) {
down_write(&minor_rwsem);
- usb_minors[intf->minor] = NULL;
+ usb_minors[minor] = NULL;
+ intf->minor = -1;
up_write(&minor_rwsem);
retval = PTR_ERR(intf->usb_dev);
}
-exit:
return retval;
}
EXPORT_SYMBOL_GPL(usb_register_dev);
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index fd4c36e..9f0ce7d 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -1724,6 +1724,15 @@
if (ret)
goto free_interfaces;
+ /* if it's already configured, clear out old state first.
+ * getting rid of old interfaces means unbinding their drivers.
+ */
+ if (dev->state != USB_STATE_ADDRESS)
+ usb_disable_device(dev, 1); /* Skip ep0 */
+
+ /* Get rid of pending async Set-Config requests for this device */
+ cancel_async_set_config(dev);
+
/* Make sure we have bandwidth (and available HCD resources) for this
* configuration. Remove endpoints from the schedule if we're dropping
* this configuration to set configuration 0. After this point, the
@@ -1733,20 +1742,11 @@
mutex_lock(&hcd->bandwidth_mutex);
ret = usb_hcd_alloc_bandwidth(dev, cp, NULL, NULL);
if (ret < 0) {
- usb_autosuspend_device(dev);
mutex_unlock(&hcd->bandwidth_mutex);
+ usb_autosuspend_device(dev);
goto free_interfaces;
}
- /* if it's already configured, clear out old state first.
- * getting rid of old interfaces means unbinding their drivers.
- */
- if (dev->state != USB_STATE_ADDRESS)
- usb_disable_device(dev, 1); /* Skip ep0 */
-
- /* Get rid of pending async Set-Config requests for this device */
- cancel_async_set_config(dev);
-
ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
USB_REQ_SET_CONFIGURATION, 0, configuration, 0,
NULL, 0, USB_CTRL_SET_TIMEOUT);
@@ -1761,8 +1761,8 @@
if (!cp) {
usb_set_device_state(dev, USB_STATE_ADDRESS);
usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL);
- usb_autosuspend_device(dev);
mutex_unlock(&hcd->bandwidth_mutex);
+ usb_autosuspend_device(dev);
goto free_interfaces;
}
mutex_unlock(&hcd->bandwidth_mutex);
@@ -1802,6 +1802,7 @@
intf->dev.groups = usb_interface_groups;
intf->dev.dma_mask = dev->dev.dma_mask;
INIT_WORK(&intf->reset_ws, __usb_queue_reset_device);
+ intf->minor = -1;
device_initialize(&intf->dev);
dev_set_name(&intf->dev, "%d-%s:%d.%d",
dev->bus->busnum, dev->devpath,
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index e483f80..1160c55 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -723,12 +723,12 @@
/**
* usb_string_ids_n() - allocate unused string IDs in batch
- * @cdev: the device whose string descriptor IDs are being allocated
+ * @c: the device whose string descriptor IDs are being allocated
* @n: number of string IDs to allocate
* Context: single threaded during gadget setup
*
* Returns the first requested ID. This ID and next @n-1 IDs are now
- * valid IDs. At least providind that @n is non zore because if it
+ * valid IDs. At least provided that @n is non-zero because if it
* is, returns last requested ID which is now very useful information.
*
* @usb_string_ids_n() is called from bind() callbacks to allocate
diff --git a/drivers/usb/gadget/m66592-udc.c b/drivers/usb/gadget/m66592-udc.c
index 166bf71..e03058f 100644
--- a/drivers/usb/gadget/m66592-udc.c
+++ b/drivers/usb/gadget/m66592-udc.c
@@ -1609,6 +1609,7 @@
/* initialize ucd */
m66592 = kzalloc(sizeof(struct m66592), GFP_KERNEL);
if (m66592 == NULL) {
+ ret = -ENOMEM;
pr_err("kzalloc error\n");
goto clean_up;
}
diff --git a/drivers/usb/gadget/r8a66597-udc.c b/drivers/usb/gadget/r8a66597-udc.c
index 70a8178..2456ccd 100644
--- a/drivers/usb/gadget/r8a66597-udc.c
+++ b/drivers/usb/gadget/r8a66597-udc.c
@@ -1557,6 +1557,7 @@
/* initialize ucd */
r8a66597 = kzalloc(sizeof(struct r8a66597), GFP_KERNEL);
if (r8a66597 == NULL) {
+ ret = -ENOMEM;
printk(KERN_ERR "kzalloc error\n");
goto clean_up;
}
diff --git a/drivers/usb/gadget/rndis.c b/drivers/usb/gadget/rndis.c
index 020fa5a..972d5dd 100644
--- a/drivers/usb/gadget/rndis.c
+++ b/drivers/usb/gadget/rndis.c
@@ -293,9 +293,13 @@
/* mandatory */
case OID_GEN_VENDOR_DESCRIPTION:
pr_debug("%s: OID_GEN_VENDOR_DESCRIPTION\n", __func__);
- length = strlen (rndis_per_dev_params [configNr].vendorDescr);
- memcpy (outbuf,
- rndis_per_dev_params [configNr].vendorDescr, length);
+ if ( rndis_per_dev_params [configNr].vendorDescr ) {
+ length = strlen (rndis_per_dev_params [configNr].vendorDescr);
+ memcpy (outbuf,
+ rndis_per_dev_params [configNr].vendorDescr, length);
+ } else {
+ outbuf[0] = 0;
+ }
retval = 0;
break;
@@ -1148,7 +1152,7 @@
#endif /* CONFIG_USB_GADGET_DEBUG_FILES */
-int __init rndis_init (void)
+int rndis_init(void)
{
u8 i;
diff --git a/drivers/usb/gadget/rndis.h b/drivers/usb/gadget/rndis.h
index c236aaa..907c330 100644
--- a/drivers/usb/gadget/rndis.h
+++ b/drivers/usb/gadget/rndis.h
@@ -262,7 +262,7 @@
int rndis_state (int configNr);
extern void rndis_set_host_mac (int configNr, const u8 *addr);
-int __devinit rndis_init (void);
+int rndis_init(void);
void rndis_exit (void);
#endif /* _LINUX_RNDIS_H */
diff --git a/drivers/usb/gadget/s3c-hsotg.c b/drivers/usb/gadget/s3c-hsotg.c
index 521ebed..a229744 100644
--- a/drivers/usb/gadget/s3c-hsotg.c
+++ b/drivers/usb/gadget/s3c-hsotg.c
@@ -12,8 +12,6 @@
* published by the Free Software Foundation.
*/
-#define DEBUG
-
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/spinlock.h>
diff --git a/drivers/usb/gadget/uvc_v4l2.c b/drivers/usb/gadget/uvc_v4l2.c
index 2dcffda..5e807f0 100644
--- a/drivers/usb/gadget/uvc_v4l2.c
+++ b/drivers/usb/gadget/uvc_v4l2.c
@@ -94,7 +94,7 @@
break;
}
- if (format == NULL || format->fcc != fmt->fmt.pix.pixelformat) {
+ if (i == ARRAY_SIZE(uvc_formats)) {
printk(KERN_INFO "Unsupported format 0x%08x.\n",
fmt->fmt.pix.pixelformat);
return -EINVAL;
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
index 58b72d7..a1e8d27 100644
--- a/drivers/usb/host/ehci-pci.c
+++ b/drivers/usb/host/ehci-pci.c
@@ -119,6 +119,11 @@
ehci->broken_periodic = 1;
ehci_info(ehci, "using broken periodic workaround\n");
}
+ if (pdev->device == 0x0806 || pdev->device == 0x0811
+ || pdev->device == 0x0829) {
+ ehci_info(ehci, "disable lpm for langwell/penwell\n");
+ ehci->has_lpm = 0;
+ }
break;
case PCI_VENDOR_ID_TDI:
if (pdev->device == PCI_DEVICE_ID_TDI_EHCI) {
diff --git a/drivers/usb/host/ehci-ppc-of.c b/drivers/usb/host/ehci-ppc-of.c
index 335ee69..ba52be4 100644
--- a/drivers/usb/host/ehci-ppc-of.c
+++ b/drivers/usb/host/ehci-ppc-of.c
@@ -192,17 +192,19 @@
}
rv = usb_add_hcd(hcd, irq, 0);
- if (rv == 0)
- return 0;
+ if (rv)
+ goto err_ehci;
+ return 0;
+
+err_ehci:
+ if (ehci->has_amcc_usb23)
+ iounmap(ehci->ohci_hcctrl_reg);
iounmap(hcd->regs);
err_ioremap:
irq_dispose_mapping(irq);
err_irq:
release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
-
- if (ehci->has_amcc_usb23)
- iounmap(ehci->ohci_hcctrl_reg);
err_rmr:
usb_put_hcd(hcd);
diff --git a/drivers/usb/host/isp1760-hcd.c b/drivers/usb/host/isp1760-hcd.c
index d1a3dfc..bdba8c5 100644
--- a/drivers/usb/host/isp1760-hcd.c
+++ b/drivers/usb/host/isp1760-hcd.c
@@ -829,6 +829,7 @@
* almost immediately. With ISP1761, this register requires a delay of
* 195ns between a write and subsequent read (see section 15.1.1.3).
*/
+ mmiowb();
ndelay(195);
skip_map = isp1760_readl(hcd->regs + HC_ATL_PTD_SKIPMAP_REG);
@@ -870,6 +871,7 @@
* almost immediately. With ISP1761, this register requires a delay of
* 195ns between a write and subsequent read (see section 15.1.1.3).
*/
+ mmiowb();
ndelay(195);
skip_map = isp1760_readl(hcd->regs + HC_INT_PTD_SKIPMAP_REG);
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index bc3f4f4..48e60d1 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -131,7 +131,7 @@
*seg = (*seg)->next;
*trb = ((*seg)->trbs);
} else {
- *trb = (*trb)++;
+ (*trb)++;
}
}
@@ -1551,6 +1551,10 @@
/* calc actual length */
if (ep->skip) {
td->urb->iso_frame_desc[idx].actual_length = 0;
+ /* Update ring dequeue pointer */
+ while (ep_ring->dequeue != td->last_trb)
+ inc_deq(xhci, ep_ring, false);
+ inc_deq(xhci, ep_ring, false);
return finish_td(xhci, td, event_trb, event, ep, status, true);
}
diff --git a/drivers/usb/misc/adutux.c b/drivers/usb/misc/adutux.c
index d240de0..801324a 100644
--- a/drivers/usb/misc/adutux.c
+++ b/drivers/usb/misc/adutux.c
@@ -439,7 +439,7 @@
/* drain secondary buffer */
int amount = bytes_to_read < data_in_secondary ? bytes_to_read : data_in_secondary;
i = copy_to_user(buffer, dev->read_buffer_secondary+dev->secondary_head, amount);
- if (i < 0) {
+ if (i) {
retval = -EFAULT;
goto exit;
}
diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
index 2de49c8..bc88c79 100644
--- a/drivers/usb/misc/iowarrior.c
+++ b/drivers/usb/misc/iowarrior.c
@@ -542,7 +542,7 @@
retval = io_res;
else {
io_res = copy_to_user(user_buffer, buffer, dev->report_size);
- if (io_res < 0)
+ if (io_res)
retval = -EFAULT;
}
break;
@@ -574,7 +574,7 @@
}
io_res = copy_to_user((struct iowarrior_info __user *)arg, &info,
sizeof(struct iowarrior_info));
- if (io_res < 0)
+ if (io_res)
retval = -EFAULT;
break;
}
diff --git a/drivers/usb/musb/cppi_dma.c b/drivers/usb/musb/cppi_dma.c
index 59dc3d3..5ab5bb8 100644
--- a/drivers/usb/musb/cppi_dma.c
+++ b/drivers/usb/musb/cppi_dma.c
@@ -322,6 +322,7 @@
index, transmit ? 'T' : 'R', cppi_ch);
cppi_ch->hw_ep = ep;
cppi_ch->channel.status = MUSB_DMA_STATUS_FREE;
+ cppi_ch->channel.max_len = 0x7fffffff;
DBG(4, "Allocate CPPI%d %cX\n", index, transmit ? 'T' : 'R');
return &cppi_ch->channel;
diff --git a/drivers/usb/musb/musb_debugfs.c b/drivers/usb/musb/musb_debugfs.c
index c79a5e3..9e8639d 100644
--- a/drivers/usb/musb/musb_debugfs.c
+++ b/drivers/usb/musb/musb_debugfs.c
@@ -195,15 +195,14 @@
static int musb_test_mode_open(struct inode *inode, struct file *file)
{
- file->private_data = inode->i_private;
-
return single_open(file, musb_test_mode_show, inode->i_private);
}
static ssize_t musb_test_mode_write(struct file *file,
const char __user *ubuf, size_t count, loff_t *ppos)
{
- struct musb *musb = file->private_data;
+ struct seq_file *s = file->private_data;
+ struct musb *musb = s->private;
u8 test = 0;
char buf[18];
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index 6fca870..d065e23 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -300,6 +300,11 @@
#ifndef CONFIG_MUSB_PIO_ONLY
if (is_dma_capable() && musb_ep->dma) {
struct dma_controller *c = musb->dma_controller;
+ size_t request_size;
+
+ /* setup DMA, then program endpoint CSR */
+ request_size = min_t(size_t, request->length - request->actual,
+ musb_ep->dma->max_len);
use_dma = (request->dma != DMA_ADDR_INVALID);
@@ -307,11 +312,6 @@
#ifdef CONFIG_USB_INVENTRA_DMA
{
- size_t request_size;
-
- /* setup DMA, then program endpoint CSR */
- request_size = min_t(size_t, request->length,
- musb_ep->dma->max_len);
if (request_size < musb_ep->packet_sz)
musb_ep->dma->desired_mode = 0;
else
@@ -373,8 +373,8 @@
use_dma = use_dma && c->channel_program(
musb_ep->dma, musb_ep->packet_sz,
0,
- request->dma,
- request->length);
+ request->dma + request->actual,
+ request_size);
if (!use_dma) {
c->channel_release(musb_ep->dma);
musb_ep->dma = NULL;
@@ -386,8 +386,8 @@
use_dma = use_dma && c->channel_program(
musb_ep->dma, musb_ep->packet_sz,
request->zero,
- request->dma,
- request->length);
+ request->dma + request->actual,
+ request_size);
#endif
}
#endif
@@ -501,26 +501,14 @@
request->zero = 0;
}
- /* ... or if not, then complete it. */
- musb_g_giveback(musb_ep, request, 0);
-
- /*
- * Kickstart next transfer if appropriate;
- * the packet that just completed might not
- * be transmitted for hours or days.
- * REVISIT for double buffering...
- * FIXME revisit for stalls too...
- */
- musb_ep_select(mbase, epnum);
- csr = musb_readw(epio, MUSB_TXCSR);
- if (csr & MUSB_TXCSR_FIFONOTEMPTY)
- return;
-
- request = musb_ep->desc ? next_request(musb_ep) : NULL;
- if (!request) {
- DBG(4, "%s idle now\n",
- musb_ep->end_point.name);
- return;
+ if (request->actual == request->length) {
+ musb_g_giveback(musb_ep, request, 0);
+ request = musb_ep->desc ? next_request(musb_ep) : NULL;
+ if (!request) {
+ DBG(4, "%s idle now\n",
+ musb_ep->end_point.name);
+ return;
+ }
}
}
@@ -568,11 +556,19 @@
{
const u8 epnum = req->epnum;
struct usb_request *request = &req->request;
- struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_out;
+ struct musb_ep *musb_ep;
void __iomem *epio = musb->endpoints[epnum].regs;
unsigned fifo_count = 0;
- u16 len = musb_ep->packet_sz;
+ u16 len;
u16 csr = musb_readw(epio, MUSB_RXCSR);
+ struct musb_hw_ep *hw_ep = &musb->endpoints[epnum];
+
+ if (hw_ep->is_shared_fifo)
+ musb_ep = &hw_ep->ep_in;
+ else
+ musb_ep = &hw_ep->ep_out;
+
+ len = musb_ep->packet_sz;
/* We shouldn't get here while DMA is active, but we do... */
if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
@@ -647,8 +643,8 @@
*/
csr |= MUSB_RXCSR_DMAENAB;
-#ifdef USE_MODE1
csr |= MUSB_RXCSR_AUTOCLEAR;
+#ifdef USE_MODE1
/* csr |= MUSB_RXCSR_DMAMODE; */
/* this special sequence (enabling and then
@@ -663,10 +659,11 @@
if (request->actual < request->length) {
int transfer_size = 0;
#ifdef USE_MODE1
- transfer_size = min(request->length,
+ transfer_size = min(request->length - request->actual,
channel->max_len);
#else
- transfer_size = len;
+ transfer_size = min(request->length - request->actual,
+ (unsigned)len);
#endif
if (transfer_size <= musb_ep->packet_sz)
musb_ep->dma->desired_mode = 0;
@@ -740,9 +737,15 @@
u16 csr;
struct usb_request *request;
void __iomem *mbase = musb->mregs;
- struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_out;
+ struct musb_ep *musb_ep;
void __iomem *epio = musb->endpoints[epnum].regs;
struct dma_channel *dma;
+ struct musb_hw_ep *hw_ep = &musb->endpoints[epnum];
+
+ if (hw_ep->is_shared_fifo)
+ musb_ep = &hw_ep->ep_in;
+ else
+ musb_ep = &hw_ep->ep_out;
musb_ep_select(mbase, epnum);
@@ -1081,7 +1084,7 @@
/*
* Context: controller locked, IRQs blocked.
*/
-static void musb_ep_restart(struct musb *musb, struct musb_request *req)
+void musb_ep_restart(struct musb *musb, struct musb_request *req)
{
DBG(3, "<== %s request %p len %u on hw_ep%d\n",
req->tx ? "TX/IN" : "RX/OUT",
diff --git a/drivers/usb/musb/musb_gadget.h b/drivers/usb/musb/musb_gadget.h
index c8b1403..572b1da 100644
--- a/drivers/usb/musb/musb_gadget.h
+++ b/drivers/usb/musb/musb_gadget.h
@@ -105,4 +105,6 @@
extern void musb_g_giveback(struct musb_ep *, struct usb_request *, int);
+extern void musb_ep_restart(struct musb *, struct musb_request *);
+
#endif /* __MUSB_GADGET_H */
diff --git a/drivers/usb/musb/musb_gadget_ep0.c b/drivers/usb/musb/musb_gadget_ep0.c
index 59bef8f..6dd03f4 100644
--- a/drivers/usb/musb/musb_gadget_ep0.c
+++ b/drivers/usb/musb/musb_gadget_ep0.c
@@ -261,6 +261,7 @@
ctrlrequest->wIndex & 0x0f;
struct musb_ep *musb_ep;
struct musb_hw_ep *ep;
+ struct musb_request *request;
void __iomem *regs;
int is_in;
u16 csr;
@@ -302,6 +303,14 @@
musb_writew(regs, MUSB_RXCSR, csr);
}
+ /* Maybe start the first request in the queue */
+ request = to_musb_request(
+ next_request(musb_ep));
+ if (!musb_ep->busy && request) {
+ DBG(3, "restarting the request\n");
+ musb_ep_restart(musb, request);
+ }
+
/* select ep0 again */
musb_ep_select(mbase, 0);
} break;
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 877d20b..9e65c47 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -660,6 +660,12 @@
qh->segsize = length;
+ /*
+ * Ensure the data reaches to main memory before starting
+ * DMA transfer
+ */
+ wmb();
+
if (!dma->channel_program(channel, pkt_size, mode,
urb->transfer_dma + offset, length)) {
dma->channel_release(channel);
diff --git a/drivers/usb/otg/twl4030-usb.c b/drivers/usb/otg/twl4030-usb.c
index 0e88885..0bc9769 100644
--- a/drivers/usb/otg/twl4030-usb.c
+++ b/drivers/usb/otg/twl4030-usb.c
@@ -347,11 +347,20 @@
}
}
+static void __twl4030_phy_power(struct twl4030_usb *twl, int on)
+{
+ u8 pwr = twl4030_usb_read(twl, PHY_PWR_CTRL);
+
+ if (on)
+ pwr &= ~PHY_PWR_PHYPWD;
+ else
+ pwr |= PHY_PWR_PHYPWD;
+
+ WARN_ON(twl4030_usb_write_verify(twl, PHY_PWR_CTRL, pwr) < 0);
+}
+
static void twl4030_phy_power(struct twl4030_usb *twl, int on)
{
- u8 pwr;
-
- pwr = twl4030_usb_read(twl, PHY_PWR_CTRL);
if (on) {
regulator_enable(twl->usb3v1);
regulator_enable(twl->usb1v8);
@@ -365,15 +374,13 @@
twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0,
VUSB_DEDICATED2);
regulator_enable(twl->usb1v5);
- pwr &= ~PHY_PWR_PHYPWD;
- WARN_ON(twl4030_usb_write_verify(twl, PHY_PWR_CTRL, pwr) < 0);
+ __twl4030_phy_power(twl, 1);
twl4030_usb_write(twl, PHY_CLK_CTRL,
twl4030_usb_read(twl, PHY_CLK_CTRL) |
(PHY_CLK_CTRL_CLOCKGATING_EN |
PHY_CLK_CTRL_CLK32K_EN));
- } else {
- pwr |= PHY_PWR_PHYPWD;
- WARN_ON(twl4030_usb_write_verify(twl, PHY_PWR_CTRL, pwr) < 0);
+ } else {
+ __twl4030_phy_power(twl, 0);
regulator_disable(twl->usb1v5);
regulator_disable(twl->usb1v8);
regulator_disable(twl->usb3v1);
@@ -387,19 +394,25 @@
twl4030_phy_power(twl, 0);
twl->asleep = 1;
+ dev_dbg(twl->dev, "%s\n", __func__);
+}
+
+static void __twl4030_phy_resume(struct twl4030_usb *twl)
+{
+ twl4030_phy_power(twl, 1);
+ twl4030_i2c_access(twl, 1);
+ twl4030_usb_set_mode(twl, twl->usb_mode);
+ if (twl->usb_mode == T2_USB_MODE_ULPI)
+ twl4030_i2c_access(twl, 0);
}
static void twl4030_phy_resume(struct twl4030_usb *twl)
{
if (!twl->asleep)
return;
-
- twl4030_phy_power(twl, 1);
- twl4030_i2c_access(twl, 1);
- twl4030_usb_set_mode(twl, twl->usb_mode);
- if (twl->usb_mode == T2_USB_MODE_ULPI)
- twl4030_i2c_access(twl, 0);
+ __twl4030_phy_resume(twl);
twl->asleep = 0;
+ dev_dbg(twl->dev, "%s\n", __func__);
}
static int twl4030_usb_ldo_init(struct twl4030_usb *twl)
@@ -408,8 +421,8 @@
twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, 0xC0, PROTECT_KEY);
twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, 0x0C, PROTECT_KEY);
- /* put VUSB3V1 LDO in active state */
- twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0, VUSB_DEDICATED2);
+ /* Keep VUSB3V1 LDO in sleep state until VBUS/ID change detected*/
+ /*twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0, VUSB_DEDICATED2);*/
/* input to VUSB3V1 LDO is from VBAT, not VBUS */
twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0x14, VUSB_DEDICATED1);
@@ -502,6 +515,26 @@
return IRQ_HANDLED;
}
+static void twl4030_usb_phy_init(struct twl4030_usb *twl)
+{
+ int status;
+
+ status = twl4030_usb_linkstat(twl);
+ if (status >= 0) {
+ if (status == USB_EVENT_NONE) {
+ __twl4030_phy_power(twl, 0);
+ twl->asleep = 1;
+ } else {
+ __twl4030_phy_resume(twl);
+ twl->asleep = 0;
+ }
+
+ blocking_notifier_call_chain(&twl->otg.notifier, status,
+ twl->otg.gadget);
+ }
+ sysfs_notify(&twl->dev->kobj, NULL, "vbus");
+}
+
static int twl4030_set_suspend(struct otg_transceiver *x, int suspend)
{
struct twl4030_usb *twl = xceiv_to_twl(x);
@@ -568,7 +601,7 @@
twl->otg.set_peripheral = twl4030_set_peripheral;
twl->otg.set_suspend = twl4030_set_suspend;
twl->usb_mode = pdata->usb_mode;
- twl->asleep = 1;
+ twl->asleep = 1;
/* init spinlock for workqueue */
spin_lock_init(&twl->lock);
@@ -606,15 +639,10 @@
return status;
}
- /* The IRQ handler just handles changes from the previous states
- * of the ID and VBUS pins ... in probe() we must initialize that
- * previous state. The easy way: fake an IRQ.
- *
- * REVISIT: a real IRQ might have happened already, if PREEMPT is
- * enabled. Else the IRQ may not yet be configured or enabled,
- * because of scheduling delays.
+ /* Power down phy or make it work according to
+ * current link state.
*/
- twl4030_usb_irq(twl->irq, twl);
+ twl4030_usb_phy_init(twl);
dev_info(&pdev->dev, "Initialized TWL4030 USB module\n");
return 0;
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 2bef441..4f1744c 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -56,6 +56,7 @@
static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x0471, 0x066A) }, /* AKTAKOM ACE-1001 cable */
{ USB_DEVICE(0x0489, 0xE000) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */
+ { USB_DEVICE(0x0489, 0xE003) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */
{ USB_DEVICE(0x0745, 0x1000) }, /* CipherLab USB CCD Barcode Scanner 1000 */
{ USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */
{ USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC Device */
@@ -88,6 +89,7 @@
{ USB_DEVICE(0x10C4, 0x8149) }, /* West Mountain Radio Computerized Battery Analyzer */
{ USB_DEVICE(0x10C4, 0x814A) }, /* West Mountain Radio RIGblaster P&P */
{ USB_DEVICE(0x10C4, 0x814B) }, /* West Mountain Radio RIGtalk */
+ { USB_DEVICE(0x10C4, 0x8156) }, /* B&G H3000 link cable */
{ USB_DEVICE(0x10C4, 0x815E) }, /* Helicomm IP-Link 1220-DVM */
{ USB_DEVICE(0x10C4, 0x818B) }, /* AVIT Research USB to TTL */
{ USB_DEVICE(0x10C4, 0x819F) }, /* MJS USB Toslink Switcher */
@@ -109,6 +111,7 @@
{ USB_DEVICE(0x10C4, 0x83A8) }, /* Amber Wireless AMB2560 */
{ USB_DEVICE(0x10C4, 0x8411) }, /* Kyocera GPS Module */
{ USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */
+ { USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */
{ USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
{ USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
{ USB_DEVICE(0x10C4, 0xEA71) }, /* Infinity GPS-MIC-1 Radio Monophone */
@@ -122,14 +125,14 @@
{ USB_DEVICE(0x1555, 0x0004) }, /* Owen AC4 USB-RS485 Converter */
{ USB_DEVICE(0x166A, 0x0303) }, /* Clipsal 5500PCU C-Bus USB interface */
{ USB_DEVICE(0x16D6, 0x0001) }, /* Jablotron serial interface */
- { USB_DEVICE(0x17F4, 0xAAAA) }, /* Wavesense Jazz blood glucose meter */
- { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
- { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
- { USB_DEVICE(0x413C, 0x9500) }, /* DW700 GPS USB interface */
{ USB_DEVICE(0x16DC, 0x0010) }, /* W-IE-NE-R Plein & Baus GmbH PL512 Power Supply */
{ USB_DEVICE(0x16DC, 0x0011) }, /* W-IE-NE-R Plein & Baus GmbH RCM Remote Control for MARATON Power Supply */
{ USB_DEVICE(0x16DC, 0x0012) }, /* W-IE-NE-R Plein & Baus GmbH MPOD Multi Channel Power Supply */
{ USB_DEVICE(0x16DC, 0x0015) }, /* W-IE-NE-R Plein & Baus GmbH CML Control, Monitoring and Data Logger */
+ { USB_DEVICE(0x17F4, 0xAAAA) }, /* Wavesense Jazz blood glucose meter */
+ { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
+ { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
+ { USB_DEVICE(0x413C, 0x9500) }, /* DW700 GPS USB interface */
{ } /* Terminating Entry */
};
@@ -222,8 +225,8 @@
#define BITS_STOP_2 0x0002
/* CP210X_SET_BREAK */
-#define BREAK_ON 0x0000
-#define BREAK_OFF 0x0001
+#define BREAK_ON 0x0001
+#define BREAK_OFF 0x0000
/* CP210X_(SET_MHS|GET_MDMSTS) */
#define CONTROL_DTR 0x0001
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index eb12d9b..97cc87d 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -180,6 +180,7 @@
{ USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_IOBOARD_PID) },
{ USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_MINI_IOBOARD_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_SPROG_II) },
+ { USB_DEVICE(FTDI_VID, FTDI_LENZ_LIUSB_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_XF_632_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_XF_634_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_XF_547_PID) },
@@ -750,6 +751,16 @@
{ USB_DEVICE(FTDI_VID, XVERVE_SIGNALYZER_SH4_PID),
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ USB_DEVICE(FTDI_VID, SEGWAY_RMP200_PID) },
+ { USB_DEVICE(IONICS_VID, IONICS_PLUGCOMPUTER_PID),
+ .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+ { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_24_MASTER_WING_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_PC_WING_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_USB_DMX_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_MIDI_TIMECODE_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_MINI_WING_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_MAXI_WING_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_MEDIA_WING_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_WING_PID) },
{ }, /* Optional parameter entry */
{ } /* Terminating entry */
};
@@ -1376,7 +1387,7 @@
}
/* set max packet size based on descriptor */
- priv->max_packet_size = ep_desc->wMaxPacketSize;
+ priv->max_packet_size = le16_to_cpu(ep_desc->wMaxPacketSize);
dev_info(&udev->dev, "Setting MaxPacketSize %d\n", priv->max_packet_size);
}
@@ -1831,7 +1842,7 @@
if (port->port.console && port->sysrq) {
for (i = 0; i < len; i++, ch++) {
- if (!usb_serial_handle_sysrq_char(tty, port, *ch))
+ if (!usb_serial_handle_sysrq_char(port, *ch))
tty_insert_flip_char(tty, *ch, flag);
}
} else {
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 6e612c5..15a4583 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -110,6 +110,9 @@
/* Propox devices */
#define FTDI_PROPOX_JTAGCABLEII_PID 0xD738
+/* Lenz LI-USB Computer Interface. */
+#define FTDI_LENZ_LIUSB_PID 0xD780
+
/*
* Xsens Technologies BV products (http://www.xsens.com).
*/
@@ -132,6 +135,18 @@
#define FTDI_NDI_AURORA_SCU_PID 0xDA74 /* NDI Aurora SCU */
/*
+ * ChamSys Limited (www.chamsys.co.uk) USB wing/interface product IDs
+ */
+#define FTDI_CHAMSYS_24_MASTER_WING_PID 0xDAF8
+#define FTDI_CHAMSYS_PC_WING_PID 0xDAF9
+#define FTDI_CHAMSYS_USB_DMX_PID 0xDAFA
+#define FTDI_CHAMSYS_MIDI_TIMECODE_PID 0xDAFB
+#define FTDI_CHAMSYS_MINI_WING_PID 0xDAFC
+#define FTDI_CHAMSYS_MAXI_WING_PID 0xDAFD
+#define FTDI_CHAMSYS_MEDIA_WING_PID 0xDAFE
+#define FTDI_CHAMSYS_WING_PID 0xDAFF
+
+/*
* Westrex International devices submitted by Cory Lee
*/
#define FTDI_WESTREX_MODEL_777_PID 0xDC00 /* Model 777 */
@@ -989,6 +1004,12 @@
#define ALTI2_N3_PID 0x6001 /* Neptune 3 */
/*
+ * Ionics PlugComputer
+ */
+#define IONICS_VID 0x1c0c
+#define IONICS_PLUGCOMPUTER_PID 0x0102
+
+/*
* Dresden Elektronik Sensor Terminal Board
*/
#define DE_VID 0x1cf1 /* Vendor ID */
diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
index ca92f67..e6833e2 100644
--- a/drivers/usb/serial/generic.c
+++ b/drivers/usb/serial/generic.c
@@ -343,7 +343,7 @@
tty_insert_flip_string(tty, ch, urb->actual_length);
else {
for (i = 0; i < urb->actual_length; i++, ch++) {
- if (!usb_serial_handle_sysrq_char(tty, port, *ch))
+ if (!usb_serial_handle_sysrq_char(port, *ch))
tty_insert_flip_char(tty, *ch, TTY_NORMAL);
}
}
@@ -448,12 +448,11 @@
EXPORT_SYMBOL_GPL(usb_serial_generic_unthrottle);
#ifdef CONFIG_MAGIC_SYSRQ
-int usb_serial_handle_sysrq_char(struct tty_struct *tty,
- struct usb_serial_port *port, unsigned int ch)
+int usb_serial_handle_sysrq_char(struct usb_serial_port *port, unsigned int ch)
{
if (port->sysrq && port->port.console) {
if (ch && time_before(jiffies, port->sysrq)) {
- handle_sysrq(ch, tty);
+ handle_sysrq(ch);
port->sysrq = 0;
return 1;
}
@@ -462,8 +461,7 @@
return 0;
}
#else
-int usb_serial_handle_sysrq_char(struct tty_struct *tty,
- struct usb_serial_port *port, unsigned int ch)
+int usb_serial_handle_sysrq_char(struct usb_serial_port *port, unsigned int ch)
{
return 0;
}
@@ -518,6 +516,7 @@
for (i = 0; i < serial->num_ports; ++i)
generic_cleanup(serial->port[i]);
}
+EXPORT_SYMBOL_GPL(usb_serial_generic_disconnect);
void usb_serial_generic_release(struct usb_serial *serial)
{
diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
index dc47f98..a7cfc59 100644
--- a/drivers/usb/serial/io_ti.c
+++ b/drivers/usb/serial/io_ti.c
@@ -1151,7 +1151,7 @@
/* Check if we have an old version in the I2C and
update if necessary */
- if (download_cur_ver != download_new_ver) {
+ if (download_cur_ver < download_new_ver) {
dbg("%s - Update I2C dld from %d.%d to %d.%d",
__func__,
firmware_version->Ver_Major,
@@ -1284,7 +1284,7 @@
kfree(header);
kfree(rom_desc);
kfree(ti_manuf_desc);
- return status;
+ return -EINVAL;
}
/* Update I2C with type 0xf2 record with correct
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index 30922a7..aa66581 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -2024,6 +2024,9 @@
case TIOCGICOUNT:
cnow = mos7720_port->icount;
+
+ memset(&icount, 0, sizeof(struct serial_icounter_struct));
+
icount.cts = cnow.cts;
icount.dsr = cnow.dsr;
icount.rng = cnow.rng;
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index 585b7e6..1a42bc2 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -119,16 +119,20 @@
* by making a change here, in moschip_port_id_table, and in
* moschip_id_table_combined
*/
-#define USB_VENDOR_ID_BANDB 0x0856
-#define BANDB_DEVICE_ID_USO9ML2_2 0xAC22
-#define BANDB_DEVICE_ID_USO9ML2_4 0xAC24
-#define BANDB_DEVICE_ID_US9ML2_2 0xAC29
-#define BANDB_DEVICE_ID_US9ML2_4 0xAC30
-#define BANDB_DEVICE_ID_USPTL4_2 0xAC31
-#define BANDB_DEVICE_ID_USPTL4_4 0xAC32
-#define BANDB_DEVICE_ID_USOPTL4_2 0xAC42
-#define BANDB_DEVICE_ID_USOPTL4_4 0xAC44
-#define BANDB_DEVICE_ID_USOPTL2_4 0xAC24
+#define USB_VENDOR_ID_BANDB 0x0856
+#define BANDB_DEVICE_ID_USO9ML2_2 0xAC22
+#define BANDB_DEVICE_ID_USO9ML2_2P 0xBC00
+#define BANDB_DEVICE_ID_USO9ML2_4 0xAC24
+#define BANDB_DEVICE_ID_USO9ML2_4P 0xBC01
+#define BANDB_DEVICE_ID_US9ML2_2 0xAC29
+#define BANDB_DEVICE_ID_US9ML2_4 0xAC30
+#define BANDB_DEVICE_ID_USPTL4_2 0xAC31
+#define BANDB_DEVICE_ID_USPTL4_4 0xAC32
+#define BANDB_DEVICE_ID_USOPTL4_2 0xAC42
+#define BANDB_DEVICE_ID_USOPTL4_2P 0xBC02
+#define BANDB_DEVICE_ID_USOPTL4_4 0xAC44
+#define BANDB_DEVICE_ID_USOPTL4_4P 0xBC03
+#define BANDB_DEVICE_ID_USOPTL2_4 0xAC24
/* This driver also supports
* ATEN UC2324 device using Moschip MCS7840
@@ -184,13 +188,17 @@
{USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)},
{USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)},
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2)},
+ {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2P)},
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4)},
+ {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4P)},
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_2)},
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_4)},
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_2)},
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_4)},
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)},
+ {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2P)},
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)},
+ {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4P)},
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL2_4)},
{USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)},
{USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)},
@@ -201,13 +209,17 @@
{USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)},
{USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)},
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2)},
+ {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2P)},
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4)},
+ {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4P)},
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_2)},
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_4)},
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_2)},
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_4)},
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)},
+ {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2P)},
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)},
+ {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4P)},
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL2_4)},
{USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)},
{USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)},
@@ -2273,6 +2285,9 @@
case TIOCGICOUNT:
cnow = mos7840_port->icount;
smp_rmb();
+
+ memset(&icount, 0, sizeof(struct serial_icounter_struct));
+
icount.cts = cnow.cts;
icount.dsr = cnow.dsr;
icount.rng = cnow.rng;
diff --git a/drivers/usb/serial/navman.c b/drivers/usb/serial/navman.c
index a6b207c..1f00f24 100644
--- a/drivers/usb/serial/navman.c
+++ b/drivers/usb/serial/navman.c
@@ -25,6 +25,7 @@
static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x0a99, 0x0001) }, /* Talon Technology device */
+ { USB_DEVICE(0x0df7, 0x0900) }, /* Mobile Action i-gotU */
{ },
};
MODULE_DEVICE_TABLE(usb, id_table);
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 9fc6ea2..c46911a 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -164,6 +164,14 @@
#define YISO_VENDOR_ID 0x0EAB
#define YISO_PRODUCT_U893 0xC893
+/*
+ * NOVATEL WIRELESS PRODUCTS
+ *
+ * Note from Novatel Wireless:
+ * If your Novatel modem does not work on linux, don't
+ * change the option module, but check our website. If
+ * that does not help, contact ddeschepper@nvtl.com
+*/
/* MERLIN EVDO PRODUCTS */
#define NOVATELWIRELESS_PRODUCT_V640 0x1100
#define NOVATELWIRELESS_PRODUCT_V620 0x1110
@@ -185,24 +193,39 @@
#define NOVATELWIRELESS_PRODUCT_EU730 0x2400
#define NOVATELWIRELESS_PRODUCT_EU740 0x2410
#define NOVATELWIRELESS_PRODUCT_EU870D 0x2420
-
/* OVATION PRODUCTS */
#define NOVATELWIRELESS_PRODUCT_MC727 0x4100
#define NOVATELWIRELESS_PRODUCT_MC950D 0x4400
-#define NOVATELWIRELESS_PRODUCT_U727 0x5010
-#define NOVATELWIRELESS_PRODUCT_MC727_NEW 0x5100
-#define NOVATELWIRELESS_PRODUCT_MC760 0x6000
+/*
+ * Note from Novatel Wireless:
+ * All PID in the 5xxx range are currently reserved for
+ * auto-install CDROMs, and should not be added to this
+ * module.
+ *
+ * #define NOVATELWIRELESS_PRODUCT_U727 0x5010
+ * #define NOVATELWIRELESS_PRODUCT_MC727_NEW 0x5100
+*/
#define NOVATELWIRELESS_PRODUCT_OVMC760 0x6002
-
-/* FUTURE NOVATEL PRODUCTS */
-#define NOVATELWIRELESS_PRODUCT_EVDO_HIGHSPEED 0X6001
-#define NOVATELWIRELESS_PRODUCT_HSPA_FULLSPEED 0X7000
-#define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED 0X7001
-#define NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_FULLSPEED 0X8000
-#define NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_HIGHSPEED 0X8001
-#define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED 0X9000
-#define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED 0X9001
-#define NOVATELWIRELESS_PRODUCT_GLOBAL 0XA001
+#define NOVATELWIRELESS_PRODUCT_MC780 0x6010
+#define NOVATELWIRELESS_PRODUCT_EVDO_FULLSPEED 0x6000
+#define NOVATELWIRELESS_PRODUCT_EVDO_HIGHSPEED 0x6001
+#define NOVATELWIRELESS_PRODUCT_HSPA_FULLSPEED 0x7000
+#define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED 0x7001
+#define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED3 0x7003
+#define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED4 0x7004
+#define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED5 0x7005
+#define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED6 0x7006
+#define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED7 0x7007
+#define NOVATELWIRELESS_PRODUCT_MC996D 0x7030
+#define NOVATELWIRELESS_PRODUCT_MF3470 0x7041
+#define NOVATELWIRELESS_PRODUCT_MC547 0x7042
+#define NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_FULLSPEED 0x8000
+#define NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_HIGHSPEED 0x8001
+#define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED 0x9000
+#define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED 0x9001
+#define NOVATELWIRELESS_PRODUCT_G1 0xA001
+#define NOVATELWIRELESS_PRODUCT_G1_M 0xA002
+#define NOVATELWIRELESS_PRODUCT_G2 0xA010
/* AMOI PRODUCTS */
#define AMOI_VENDOR_ID 0x1614
@@ -365,6 +388,10 @@
#define OLIVETTI_VENDOR_ID 0x0b3c
#define OLIVETTI_PRODUCT_OLICARD100 0xc000
+/* Celot products */
+#define CELOT_VENDOR_ID 0x211f
+#define CELOT_PRODUCT_CT680M 0x6801
+
/* some devices interfaces need special handling due to a number of reasons */
enum option_blacklist_reason {
OPTION_BLACKLIST_NONE = 0,
@@ -486,36 +513,44 @@
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_ETS1220, 0xff, 0xff, 0xff) },
{ USB_DEVICE(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E14AC) },
- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) }, /* Novatel Merlin V640/XV620 */
- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) }, /* Novatel Merlin V620/S620 */
- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V740) }, /* Novatel Merlin EX720/V740/X720 */
- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V720) }, /* Novatel Merlin V720/S720/PC720 */
- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U730) }, /* Novatel U730/U740 (VF version) */
- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U740) }, /* Novatel U740 */
- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U870) }, /* Novatel U870 */
- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_XU870) }, /* Novatel Merlin XU870 HSDPA/3G */
- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_X950D) }, /* Novatel X950D */
- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EV620) }, /* Novatel EV620/ES620 CDMA/EV-DO */
- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_ES720) }, /* Novatel ES620/ES720/U720/USB720 */
- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E725) }, /* Novatel E725/E726 */
- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_ES620) }, /* Novatel Merlin ES620 SM Bus */
- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU730) }, /* Novatel EU730 and Vodafone EU740 */
- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU740) }, /* Novatel non-Vodafone EU740 */
- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU870D) }, /* Novatel EU850D/EU860D/EU870D */
- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC950D) }, /* Novatel MC930D/MC950D */
- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC727) }, /* Novatel MC727/U727/USB727 */
- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC727_NEW) }, /* Novatel MC727/U727/USB727 refresh */
- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U727) }, /* Novatel MC727/U727/USB727 */
- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC760) }, /* Novatel MC760/U760/USB760 */
- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_OVMC760) }, /* Novatel Ovation MC760 */
- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_FULLSPEED) }, /* Novatel HSPA product */
- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_FULLSPEED) }, /* Novatel EVDO Embedded product */
- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED) }, /* Novatel HSPA Embedded product */
- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_HIGHSPEED) }, /* Novatel EVDO product */
- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED) }, /* Novatel HSPA product */
- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_HIGHSPEED) }, /* Novatel EVDO Embedded product */
- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED) }, /* Novatel HSPA Embedded product */
- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_GLOBAL) }, /* Novatel Global product */
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V740) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V720) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U730) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U740) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U870) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_XU870) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_X950D) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EV620) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_ES720) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E725) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_ES620) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU730) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU740) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU870D) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC950D) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC727) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_OVMC760) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC780) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_FULLSPEED) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_FULLSPEED) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_FULLSPEED) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_HIGHSPEED) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED3) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED4) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED5) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED6) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED7) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC996D) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MF3470) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC547) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_HIGHSPEED) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G1) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G1_M) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G2) },
{ USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01) },
{ USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01A) },
@@ -887,10 +922,9 @@
{ USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100F) },
{ USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1011)},
{ USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1012)},
-
{ USB_DEVICE(CINTERION_VENDOR_ID, 0x0047) },
-
{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) },
+ { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, option_ids);
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index 6b60018..8ae4c6c 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -86,6 +86,7 @@
{ USB_DEVICE(SUPERIAL_VENDOR_ID, SUPERIAL_PRODUCT_ID) },
{ USB_DEVICE(HP_VENDOR_ID, HP_LD220_PRODUCT_ID) },
{ USB_DEVICE(CRESSI_VENDOR_ID, CRESSI_EDY_PRODUCT_ID) },
+ { USB_DEVICE(ZEAGLE_VENDOR_ID, ZEAGLE_N2ITION3_PRODUCT_ID) },
{ USB_DEVICE(SONY_VENDOR_ID, SONY_QN3USB_PRODUCT_ID) },
{ USB_DEVICE(SANWA_VENDOR_ID, SANWA_PRODUCT_ID) },
{ USB_DEVICE(ADLINK_VENDOR_ID, ADLINK_ND6530_PRODUCT_ID) },
@@ -788,7 +789,7 @@
if (port->port.console && port->sysrq) {
for (i = 0; i < urb->actual_length; ++i)
- if (!usb_serial_handle_sysrq_char(tty, port, data[i]))
+ if (!usb_serial_handle_sysrq_char(port, data[i]))
tty_insert_flip_char(tty, data[i], tty_flag);
} else {
tty_insert_flip_string_fixed_flag(tty, data, tty_flag,
diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
index a871645..43eb9bd 100644
--- a/drivers/usb/serial/pl2303.h
+++ b/drivers/usb/serial/pl2303.h
@@ -128,6 +128,10 @@
#define CRESSI_VENDOR_ID 0x04b8
#define CRESSI_EDY_PRODUCT_ID 0x0521
+/* Zeagle dive computer interface */
+#define ZEAGLE_VENDOR_ID 0x04b8
+#define ZEAGLE_N2ITION3_PRODUCT_ID 0x0522
+
/* Sony, USB data cable for CMD-Jxx mobile phones */
#define SONY_VENDOR_ID 0x054c
#define SONY_QN3USB_PRODUCT_ID 0x0437
diff --git a/drivers/usb/serial/ssu100.c b/drivers/usb/serial/ssu100.c
index 6e82d4f..e986002 100644
--- a/drivers/usb/serial/ssu100.c
+++ b/drivers/usb/serial/ssu100.c
@@ -15,6 +15,7 @@
#include <linux/serial.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
+#include <linux/serial_reg.h>
#include <linux/uaccess.h>
#define QT_OPEN_CLOSE_CHANNEL 0xca
@@ -27,36 +28,11 @@
#define QT_HW_FLOW_CONTROL_MASK 0xc5
#define QT_SW_FLOW_CONTROL_MASK 0xc6
-#define MODEM_CTL_REGISTER 0x04
-#define MODEM_STATUS_REGISTER 0x06
-
-
-#define SERIAL_LSR_OE 0x02
-#define SERIAL_LSR_PE 0x04
-#define SERIAL_LSR_FE 0x08
-#define SERIAL_LSR_BI 0x10
-
-#define SERIAL_LSR_TEMT 0x40
-
-#define SERIAL_MCR_DTR 0x01
-#define SERIAL_MCR_RTS 0x02
-#define SERIAL_MCR_LOOP 0x10
-
-#define SERIAL_MSR_CTS 0x10
-#define SERIAL_MSR_CD 0x80
-#define SERIAL_MSR_RI 0x40
-#define SERIAL_MSR_DSR 0x20
#define SERIAL_MSR_MASK 0xf0
-#define SERIAL_CRTSCTS ((SERIAL_MCR_RTS << 8) | SERIAL_MSR_CTS)
+#define SERIAL_CRTSCTS ((UART_MCR_RTS << 8) | UART_MSR_CTS)
-#define SERIAL_8_DATA 0x03
-#define SERIAL_7_DATA 0x02
-#define SERIAL_6_DATA 0x01
-#define SERIAL_5_DATA 0x00
-
-#define SERIAL_ODD_PARITY 0X08
-#define SERIAL_EVEN_PARITY 0X18
+#define SERIAL_EVEN_PARITY (UART_LCR_PARITY | UART_LCR_EPAR)
#define MAX_BAUD_RATE 460800
@@ -70,7 +46,7 @@
#define FULLPWRBIT 0x00000080
#define NEXT_BOARD_POWER_BIT 0x00000004
-static int debug = 1;
+static int debug;
/* Version Information */
#define DRIVER_VERSION "v0.1"
@@ -99,10 +75,12 @@
};
struct ssu100_port_private {
+ spinlock_t status_lock;
u8 shadowLSR;
u8 shadowMSR;
wait_queue_head_t delta_msr_wait; /* Used for TIOCMIWAIT */
unsigned short max_packet_size;
+ struct async_icount icount;
};
static void ssu100_release(struct usb_serial *serial)
@@ -150,9 +128,10 @@
static inline int ssu100_setregister(struct usb_device *dev,
unsigned short uart,
+ unsigned short reg,
u16 data)
{
- u16 value = (data << 8) | MODEM_CTL_REGISTER;
+ u16 value = (data << 8) | reg;
return usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
QT_SET_GET_REGISTER, 0x40, value, uart,
@@ -178,11 +157,11 @@
clear &= ~set; /* 'set' takes precedence over 'clear' */
urb_value = 0;
if (set & TIOCM_DTR)
- urb_value |= SERIAL_MCR_DTR;
+ urb_value |= UART_MCR_DTR;
if (set & TIOCM_RTS)
- urb_value |= SERIAL_MCR_RTS;
+ urb_value |= UART_MCR_RTS;
- result = ssu100_setregister(dev, 0, urb_value);
+ result = ssu100_setregister(dev, 0, UART_MCR, urb_value);
if (result < 0)
dbg("%s Error from MODEM_CTRL urb", __func__);
@@ -264,24 +243,24 @@
if (cflag & PARENB) {
if (cflag & PARODD)
- urb_value |= SERIAL_ODD_PARITY;
+ urb_value |= UART_LCR_PARITY;
else
urb_value |= SERIAL_EVEN_PARITY;
}
switch (cflag & CSIZE) {
case CS5:
- urb_value |= SERIAL_5_DATA;
+ urb_value |= UART_LCR_WLEN5;
break;
case CS6:
- urb_value |= SERIAL_6_DATA;
+ urb_value |= UART_LCR_WLEN6;
break;
case CS7:
- urb_value |= SERIAL_7_DATA;
+ urb_value |= UART_LCR_WLEN7;
break;
default:
case CS8:
- urb_value |= SERIAL_8_DATA;
+ urb_value |= UART_LCR_WLEN8;
break;
}
@@ -333,6 +312,7 @@
struct ssu100_port_private *priv = usb_get_serial_port_data(port);
u8 *data;
int result;
+ unsigned long flags;
dbg("%s - port %d", __func__, port->number);
@@ -350,11 +330,10 @@
return result;
}
- priv->shadowLSR = data[0] & (SERIAL_LSR_OE | SERIAL_LSR_PE |
- SERIAL_LSR_FE | SERIAL_LSR_BI);
-
- priv->shadowMSR = data[1] & (SERIAL_MSR_CTS | SERIAL_MSR_DSR |
- SERIAL_MSR_RI | SERIAL_MSR_CD);
+ spin_lock_irqsave(&priv->status_lock, flags);
+ priv->shadowLSR = data[0];
+ priv->shadowMSR = data[1];
+ spin_unlock_irqrestore(&priv->status_lock, flags);
kfree(data);
@@ -398,11 +377,51 @@
return 0;
}
+static int wait_modem_info(struct usb_serial_port *port, unsigned int arg)
+{
+ struct ssu100_port_private *priv = usb_get_serial_port_data(port);
+ struct async_icount prev, cur;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->status_lock, flags);
+ prev = priv->icount;
+ spin_unlock_irqrestore(&priv->status_lock, flags);
+
+ while (1) {
+ wait_event_interruptible(priv->delta_msr_wait,
+ ((priv->icount.rng != prev.rng) ||
+ (priv->icount.dsr != prev.dsr) ||
+ (priv->icount.dcd != prev.dcd) ||
+ (priv->icount.cts != prev.cts)));
+
+ if (signal_pending(current))
+ return -ERESTARTSYS;
+
+ spin_lock_irqsave(&priv->status_lock, flags);
+ cur = priv->icount;
+ spin_unlock_irqrestore(&priv->status_lock, flags);
+
+ if ((prev.rng == cur.rng) &&
+ (prev.dsr == cur.dsr) &&
+ (prev.dcd == cur.dcd) &&
+ (prev.cts == cur.cts))
+ return -EIO;
+
+ if ((arg & TIOCM_RNG && (prev.rng != cur.rng)) ||
+ (arg & TIOCM_DSR && (prev.dsr != cur.dsr)) ||
+ (arg & TIOCM_CD && (prev.dcd != cur.dcd)) ||
+ (arg & TIOCM_CTS && (prev.cts != cur.cts)))
+ return 0;
+ }
+ return 0;
+}
+
static int ssu100_ioctl(struct tty_struct *tty, struct file *file,
unsigned int cmd, unsigned long arg)
{
struct usb_serial_port *port = tty->driver_data;
struct ssu100_port_private *priv = usb_get_serial_port_data(port);
+ void __user *user_arg = (void __user *)arg;
dbg("%s cmd 0x%04x", __func__, cmd);
@@ -412,28 +431,28 @@
(struct serial_struct __user *) arg);
case TIOCMIWAIT:
- while (priv != NULL) {
- u8 prevMSR = priv->shadowMSR & SERIAL_MSR_MASK;
- interruptible_sleep_on(&priv->delta_msr_wait);
- /* see if a signal did it */
- if (signal_pending(current))
- return -ERESTARTSYS;
- else {
- u8 diff = (priv->shadowMSR & SERIAL_MSR_MASK) ^ prevMSR;
- if (!diff)
- return -EIO; /* no change => error */
+ return wait_modem_info(port, arg);
- /* Return 0 if caller wanted to know about
- these bits */
-
- if (((arg & TIOCM_RNG) && (diff & SERIAL_MSR_RI)) ||
- ((arg & TIOCM_DSR) && (diff & SERIAL_MSR_DSR)) ||
- ((arg & TIOCM_CD) && (diff & SERIAL_MSR_CD)) ||
- ((arg & TIOCM_CTS) && (diff & SERIAL_MSR_CTS)))
- return 0;
- }
- }
+ case TIOCGICOUNT:
+ {
+ struct serial_icounter_struct icount;
+ struct async_icount cnow = priv->icount;
+ memset(&icount, 0, sizeof(icount));
+ icount.cts = cnow.cts;
+ icount.dsr = cnow.dsr;
+ icount.rng = cnow.rng;
+ icount.dcd = cnow.dcd;
+ icount.rx = cnow.rx;
+ icount.tx = cnow.tx;
+ icount.frame = cnow.frame;
+ icount.overrun = cnow.overrun;
+ icount.parity = cnow.parity;
+ icount.brk = cnow.brk;
+ icount.buf_overrun = cnow.buf_overrun;
+ if (copy_to_user(user_arg, &icount, sizeof(icount)))
+ return -EFAULT;
return 0;
+ }
default:
break;
@@ -455,6 +474,7 @@
unsigned num_endpoints;
int i;
+ unsigned long flags;
num_endpoints = interface->cur_altsetting->desc.bNumEndpoints;
dev_info(&udev->dev, "Number of endpoints %d\n", num_endpoints);
@@ -466,7 +486,9 @@
}
/* set max packet size based on descriptor */
+ spin_lock_irqsave(&priv->status_lock, flags);
priv->max_packet_size = ep_desc->wMaxPacketSize;
+ spin_unlock_irqrestore(&priv->status_lock, flags);
dev_info(&udev->dev, "Setting MaxPacketSize %d\n", priv->max_packet_size);
}
@@ -485,9 +507,9 @@
return -ENOMEM;
}
+ spin_lock_init(&priv->status_lock);
init_waitqueue_head(&priv->delta_msr_wait);
usb_set_serial_port_data(port, priv);
-
ssu100_set_max_packet_size(port);
return ssu100_initdevice(serial->dev);
@@ -506,20 +528,20 @@
if (!d)
return -ENOMEM;
- r = ssu100_getregister(dev, 0, MODEM_CTL_REGISTER, d);
+ r = ssu100_getregister(dev, 0, UART_MCR, d);
if (r < 0)
goto mget_out;
- r = ssu100_getregister(dev, 0, MODEM_STATUS_REGISTER, d+1);
+ r = ssu100_getregister(dev, 0, UART_MSR, d+1);
if (r < 0)
goto mget_out;
- r = (d[0] & SERIAL_MCR_DTR ? TIOCM_DTR : 0) |
- (d[0] & SERIAL_MCR_RTS ? TIOCM_RTS : 0) |
- (d[1] & SERIAL_MSR_CTS ? TIOCM_CTS : 0) |
- (d[1] & SERIAL_MSR_CD ? TIOCM_CAR : 0) |
- (d[1] & SERIAL_MSR_RI ? TIOCM_RI : 0) |
- (d[1] & SERIAL_MSR_DSR ? TIOCM_DSR : 0);
+ r = (d[0] & UART_MCR_DTR ? TIOCM_DTR : 0) |
+ (d[0] & UART_MCR_RTS ? TIOCM_RTS : 0) |
+ (d[1] & UART_MSR_CTS ? TIOCM_CTS : 0) |
+ (d[1] & UART_MSR_DCD ? TIOCM_CAR : 0) |
+ (d[1] & UART_MSR_RI ? TIOCM_RI : 0) |
+ (d[1] & UART_MSR_DSR ? TIOCM_DSR : 0);
mget_out:
kfree(d);
@@ -546,7 +568,7 @@
if (!port->serial->disconnected) {
/* Disable flow control */
if (!on &&
- ssu100_setregister(dev, 0, 0) < 0)
+ ssu100_setregister(dev, 0, UART_MCR, 0) < 0)
dev_err(&port->dev, "error from flowcontrol urb\n");
/* drop RTS and DTR */
if (on)
@@ -557,34 +579,88 @@
mutex_unlock(&port->serial->disc_mutex);
}
+static void ssu100_update_msr(struct usb_serial_port *port, u8 msr)
+{
+ struct ssu100_port_private *priv = usb_get_serial_port_data(port);
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->status_lock, flags);
+ priv->shadowMSR = msr;
+ spin_unlock_irqrestore(&priv->status_lock, flags);
+
+ if (msr & UART_MSR_ANY_DELTA) {
+ /* update input line counters */
+ if (msr & UART_MSR_DCTS)
+ priv->icount.cts++;
+ if (msr & UART_MSR_DDSR)
+ priv->icount.dsr++;
+ if (msr & UART_MSR_DDCD)
+ priv->icount.dcd++;
+ if (msr & UART_MSR_TERI)
+ priv->icount.rng++;
+ wake_up_interruptible(&priv->delta_msr_wait);
+ }
+}
+
+static void ssu100_update_lsr(struct usb_serial_port *port, u8 lsr,
+ char *tty_flag)
+{
+ struct ssu100_port_private *priv = usb_get_serial_port_data(port);
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->status_lock, flags);
+ priv->shadowLSR = lsr;
+ spin_unlock_irqrestore(&priv->status_lock, flags);
+
+ *tty_flag = TTY_NORMAL;
+ if (lsr & UART_LSR_BRK_ERROR_BITS) {
+ /* we always want to update icount, but we only want to
+ * update tty_flag for one case */
+ if (lsr & UART_LSR_BI) {
+ priv->icount.brk++;
+ *tty_flag = TTY_BREAK;
+ usb_serial_handle_break(port);
+ }
+ if (lsr & UART_LSR_PE) {
+ priv->icount.parity++;
+ if (*tty_flag == TTY_NORMAL)
+ *tty_flag = TTY_PARITY;
+ }
+ if (lsr & UART_LSR_FE) {
+ priv->icount.frame++;
+ if (*tty_flag == TTY_NORMAL)
+ *tty_flag = TTY_FRAME;
+ }
+ if (lsr & UART_LSR_OE){
+ priv->icount.overrun++;
+ if (*tty_flag == TTY_NORMAL)
+ *tty_flag = TTY_OVERRUN;
+ }
+ }
+
+}
+
static int ssu100_process_packet(struct tty_struct *tty,
struct usb_serial_port *port,
struct ssu100_port_private *priv,
char *packet, int len)
{
int i;
- char flag;
+ char flag = TTY_NORMAL;
char *ch;
dbg("%s - port %d", __func__, port->number);
- if (len < 4) {
- dbg("%s - malformed packet", __func__);
- return 0;
- }
-
- if ((packet[0] == 0x1b) && (packet[1] == 0x1b) &&
+ if ((len >= 4) &&
+ (packet[0] == 0x1b) && (packet[1] == 0x1b) &&
((packet[2] == 0x00) || (packet[2] == 0x01))) {
- if (packet[2] == 0x00)
- priv->shadowLSR = packet[3] & (SERIAL_LSR_OE |
- SERIAL_LSR_PE |
- SERIAL_LSR_FE |
- SERIAL_LSR_BI);
-
- if (packet[2] == 0x01) {
- priv->shadowMSR = packet[3];
- wake_up_interruptible(&priv->delta_msr_wait);
+ if (packet[2] == 0x00) {
+ ssu100_update_lsr(port, packet[3], &flag);
+ if (flag == TTY_OVERRUN)
+ tty_insert_flip_char(tty, 0, TTY_OVERRUN);
}
+ if (packet[2] == 0x01)
+ ssu100_update_msr(port, packet[3]);
len -= 4;
ch = packet + 4;
@@ -596,7 +672,7 @@
if (port->port.console && port->sysrq) {
for (i = 0; i < len; i++, ch++) {
- if (!usb_serial_handle_sysrq_char(tty, port, *ch))
+ if (!usb_serial_handle_sysrq_char(port, *ch))
tty_insert_flip_char(tty, *ch, flag);
}
} else
@@ -631,7 +707,6 @@
tty_kref_put(tty);
}
-
static struct usb_serial_driver ssu100_device = {
.driver = {
.owner = THIS_MODULE,
@@ -653,6 +728,7 @@
.tiocmset = ssu100_tiocmset,
.ioctl = ssu100_ioctl,
.set_termios = ssu100_set_termios,
+ .disconnect = usb_serial_generic_disconnect,
};
static int __init ssu100_init(void)
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index 2a982e6..7a2177c 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -736,6 +736,7 @@
serial = create_serial(dev, interface, type);
if (!serial) {
+ module_put(type->driver.owner);
dev_err(&interface->dev, "%s - out of memory\n", __func__);
return -ENOMEM;
}
@@ -746,11 +747,11 @@
id = get_iface_id(type, interface);
retval = type->probe(serial, id);
- module_put(type->driver.owner);
if (retval) {
dbg("sub driver rejected device");
kfree(serial);
+ module_put(type->driver.owner);
return retval;
}
}
@@ -822,6 +823,7 @@
if (num_bulk_in == 0 || num_bulk_out == 0) {
dev_info(&interface->dev, "PL-2303 hack: descriptors matched but endpoints did not\n");
kfree(serial);
+ module_put(type->driver.owner);
return -ENODEV;
}
}
@@ -835,22 +837,15 @@
dev_err(&interface->dev,
"Generic device with no bulk out, not allowed.\n");
kfree(serial);
+ module_put(type->driver.owner);
return -EIO;
}
}
#endif
if (!num_ports) {
/* if this device type has a calc_num_ports function, call it */
- if (type->calc_num_ports) {
- if (!try_module_get(type->driver.owner)) {
- dev_err(&interface->dev,
- "module get failed, exiting\n");
- kfree(serial);
- return -EIO;
- }
+ if (type->calc_num_ports)
num_ports = type->calc_num_ports(serial);
- module_put(type->driver.owner);
- }
if (!num_ports)
num_ports = type->num_ports;
}
@@ -1039,13 +1034,7 @@
/* if this device type has an attach function, call it */
if (type->attach) {
- if (!try_module_get(type->driver.owner)) {
- dev_err(&interface->dev,
- "module get failed, exiting\n");
- goto probe_error;
- }
retval = type->attach(serial);
- module_put(type->driver.owner);
if (retval < 0)
goto probe_error;
serial->attached = 1;
@@ -1088,10 +1077,12 @@
exit:
/* success */
usb_set_intfdata(interface, serial);
+ module_put(type->driver.owner);
return 0;
probe_error:
usb_serial_put(serial);
+ module_put(type->driver.owner);
return -EIO;
}
EXPORT_SYMBOL_GPL(usb_serial_probe);
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 29e850a..17927b1 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -127,7 +127,10 @@
size_t len, total_len = 0;
int err, wmem;
size_t hdr_size;
- struct socket *sock = rcu_dereference(vq->private_data);
+ struct socket *sock;
+
+ sock = rcu_dereference_check(vq->private_data,
+ lockdep_is_held(&vq->mutex));
if (!sock)
return;
@@ -243,7 +246,7 @@
int r, nlogs = 0;
while (datalen > 0) {
- if (unlikely(headcount >= VHOST_NET_MAX_SG)) {
+ if (unlikely(seg >= VHOST_NET_MAX_SG)) {
r = -ENOBUFS;
goto err;
}
@@ -582,7 +585,10 @@
static void vhost_net_enable_vq(struct vhost_net *n,
struct vhost_virtqueue *vq)
{
- struct socket *sock = vq->private_data;
+ struct socket *sock;
+
+ sock = rcu_dereference_protected(vq->private_data,
+ lockdep_is_held(&vq->mutex));
if (!sock)
return;
if (vq == n->vqs + VHOST_NET_VQ_TX) {
@@ -598,7 +604,8 @@
struct socket *sock;
mutex_lock(&vq->mutex);
- sock = vq->private_data;
+ sock = rcu_dereference_protected(vq->private_data,
+ lockdep_is_held(&vq->mutex));
vhost_net_disable_vq(n, vq);
rcu_assign_pointer(vq->private_data, NULL);
mutex_unlock(&vq->mutex);
@@ -736,7 +743,8 @@
}
/* start polling new socket */
- oldsock = vq->private_data;
+ oldsock = rcu_dereference_protected(vq->private_data,
+ lockdep_is_held(&vq->mutex));
if (sock != oldsock) {
vhost_net_disable_vq(n, vq);
rcu_assign_pointer(vq->private_data, sock);
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index e05557d..8b5a1b3 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -60,17 +60,8 @@
return 0;
}
-/* Init poll structure */
-void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
- unsigned long mask, struct vhost_dev *dev)
+static void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn)
{
- struct vhost_work *work = &poll->work;
-
- init_waitqueue_func_entry(&poll->wait, vhost_poll_wakeup);
- init_poll_funcptr(&poll->table, vhost_poll_func);
- poll->mask = mask;
- poll->dev = dev;
-
INIT_LIST_HEAD(&work->node);
work->fn = fn;
init_waitqueue_head(&work->done);
@@ -78,6 +69,18 @@
work->queue_seq = work->done_seq = 0;
}
+/* Init poll structure */
+void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
+ unsigned long mask, struct vhost_dev *dev)
+{
+ init_waitqueue_func_entry(&poll->wait, vhost_poll_wakeup);
+ init_poll_funcptr(&poll->table, vhost_poll_func);
+ poll->mask = mask;
+ poll->dev = dev;
+
+ vhost_work_init(&poll->work, fn);
+}
+
/* Start polling a file. We add ourselves to file's wait queue. The caller must
* keep a reference to a file until after vhost_poll_stop is called. */
void vhost_poll_start(struct vhost_poll *poll, struct file *file)
@@ -95,35 +98,38 @@
remove_wait_queue(poll->wqh, &poll->wait);
}
-/* Flush any work that has been scheduled. When calling this, don't hold any
- * locks that are also used by the callback. */
-void vhost_poll_flush(struct vhost_poll *poll)
+static void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work)
{
- struct vhost_work *work = &poll->work;
unsigned seq;
int left;
int flushing;
- spin_lock_irq(&poll->dev->work_lock);
+ spin_lock_irq(&dev->work_lock);
seq = work->queue_seq;
work->flushing++;
- spin_unlock_irq(&poll->dev->work_lock);
+ spin_unlock_irq(&dev->work_lock);
wait_event(work->done, ({
- spin_lock_irq(&poll->dev->work_lock);
+ spin_lock_irq(&dev->work_lock);
left = seq - work->done_seq <= 0;
- spin_unlock_irq(&poll->dev->work_lock);
+ spin_unlock_irq(&dev->work_lock);
left;
}));
- spin_lock_irq(&poll->dev->work_lock);
+ spin_lock_irq(&dev->work_lock);
flushing = --work->flushing;
- spin_unlock_irq(&poll->dev->work_lock);
+ spin_unlock_irq(&dev->work_lock);
BUG_ON(flushing < 0);
}
-void vhost_poll_queue(struct vhost_poll *poll)
+/* Flush any work that has been scheduled. When calling this, don't hold any
+ * locks that are also used by the callback. */
+void vhost_poll_flush(struct vhost_poll *poll)
{
- struct vhost_dev *dev = poll->dev;
- struct vhost_work *work = &poll->work;
+ vhost_work_flush(poll->dev, &poll->work);
+}
+
+static inline void vhost_work_queue(struct vhost_dev *dev,
+ struct vhost_work *work)
+{
unsigned long flags;
spin_lock_irqsave(&dev->work_lock, flags);
@@ -135,6 +141,11 @@
spin_unlock_irqrestore(&dev->work_lock, flags);
}
+void vhost_poll_queue(struct vhost_poll *poll)
+{
+ vhost_work_queue(poll->dev, &poll->work);
+}
+
static void vhost_vq_reset(struct vhost_dev *dev,
struct vhost_virtqueue *vq)
{
@@ -236,6 +247,29 @@
return dev->mm == current->mm ? 0 : -EPERM;
}
+struct vhost_attach_cgroups_struct {
+ struct vhost_work work;
+ struct task_struct *owner;
+ int ret;
+};
+
+static void vhost_attach_cgroups_work(struct vhost_work *work)
+{
+ struct vhost_attach_cgroups_struct *s;
+ s = container_of(work, struct vhost_attach_cgroups_struct, work);
+ s->ret = cgroup_attach_task_all(s->owner, current);
+}
+
+static int vhost_attach_cgroups(struct vhost_dev *dev)
+{
+ struct vhost_attach_cgroups_struct attach;
+ attach.owner = current;
+ vhost_work_init(&attach.work, vhost_attach_cgroups_work);
+ vhost_work_queue(dev, &attach.work);
+ vhost_work_flush(dev, &attach.work);
+ return attach.ret;
+}
+
/* Caller should have device mutex */
static long vhost_dev_set_owner(struct vhost_dev *dev)
{
@@ -255,14 +289,16 @@
}
dev->worker = worker;
- err = cgroup_attach_task_current_cg(worker);
+ wake_up_process(worker); /* avoid contributing to loadavg */
+
+ err = vhost_attach_cgroups(dev);
if (err)
goto err_cgroup;
- wake_up_process(worker); /* avoid contributing to loadavg */
return 0;
err_cgroup:
kthread_stop(worker);
+ dev->worker = NULL;
err_worker:
if (dev->mm)
mmput(dev->mm);
@@ -284,7 +320,7 @@
vhost_dev_cleanup(dev);
memory->nregions = 0;
- dev->memory = memory;
+ RCU_INIT_POINTER(dev->memory, memory);
return 0;
}
@@ -316,14 +352,18 @@
fput(dev->log_file);
dev->log_file = NULL;
/* No one will access memory at this point */
- kfree(dev->memory);
- dev->memory = NULL;
+ kfree(rcu_dereference_protected(dev->memory,
+ lockdep_is_held(&dev->mutex)));
+ RCU_INIT_POINTER(dev->memory, NULL);
if (dev->mm)
mmput(dev->mm);
dev->mm = NULL;
WARN_ON(!list_empty(&dev->work_list));
- kthread_stop(dev->worker);
+ if (dev->worker) {
+ kthread_stop(dev->worker);
+ dev->worker = NULL;
+ }
}
static int log_access_ok(void __user *log_base, u64 addr, unsigned long sz)
@@ -401,14 +441,22 @@
/* Caller should have device mutex but not vq mutex */
int vhost_log_access_ok(struct vhost_dev *dev)
{
- return memory_access_ok(dev, dev->memory, 1);
+ struct vhost_memory *mp;
+
+ mp = rcu_dereference_protected(dev->memory,
+ lockdep_is_held(&dev->mutex));
+ return memory_access_ok(dev, mp, 1);
}
/* Verify access for write logging. */
/* Caller should have vq mutex and device mutex */
static int vq_log_access_ok(struct vhost_virtqueue *vq, void __user *log_base)
{
- return vq_memory_access_ok(log_base, vq->dev->memory,
+ struct vhost_memory *mp;
+
+ mp = rcu_dereference_protected(vq->dev->memory,
+ lockdep_is_held(&vq->mutex));
+ return vq_memory_access_ok(log_base, mp,
vhost_has_feature(vq->dev, VHOST_F_LOG_ALL)) &&
(!vq->log_used || log_access_ok(log_base, vq->log_addr,
sizeof *vq->used +
@@ -448,7 +496,8 @@
kfree(newmem);
return -EFAULT;
}
- oldmem = d->memory;
+ oldmem = rcu_dereference_protected(d->memory,
+ lockdep_is_held(&d->mutex));
rcu_assign_pointer(d->memory, newmem);
synchronize_rcu();
kfree(oldmem);
@@ -819,11 +868,12 @@
if (r < 0)
return r;
len -= l;
- if (!len)
+ if (!len) {
+ if (vq->log_ctx)
+ eventfd_signal(vq->log_ctx, 1);
return 0;
+ }
}
- if (vq->log_ctx)
- eventfd_signal(vq->log_ctx, 1);
/* Length written exceeds what we have stored. This is a bug. */
BUG();
return 0;
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index afd7729..af3c11d 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -106,7 +106,7 @@
* vhost_work execution acts instead of rcu_read_lock() and the end of
* vhost_work execution acts instead of rcu_read_lock().
* Writers use virtqueue mutex. */
- void *private_data;
+ void __rcu *private_data;
/* Log write descriptors */
void __user *log_base;
struct vhost_log log[VHOST_NET_MAX_SG];
@@ -116,7 +116,7 @@
/* Readers use RCU to access memory table pointer
* log base pointer and features.
* Writers use mutex below.*/
- struct vhost_memory *memory;
+ struct vhost_memory __rcu *memory;
struct mm_struct *mm;
struct mutex mutex;
unsigned acked_features;
@@ -173,7 +173,11 @@
static inline int vhost_has_feature(struct vhost_dev *dev, int bit)
{
- unsigned acked_features = rcu_dereference(dev->acked_features);
+ unsigned acked_features;
+
+ acked_features =
+ rcu_dereference_index_check(dev->acked_features,
+ lockdep_is_held(&dev->mutex));
return acked_features & (1 << bit);
}
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index 84f8423..7ccc967 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -3508,7 +3508,7 @@
softback_buf = 0UL;
for (i = 0; i < FB_MAX; i++) {
- int pending;
+ int pending = 0;
mapped = 0;
info = registered_fb[i];
@@ -3516,7 +3516,8 @@
if (info == NULL)
continue;
- pending = cancel_work_sync(&info->queue);
+ if (info->queue.func)
+ pending = cancel_work_sync(&info->queue);
DPRINTK("fbcon: %s pending work\n", (pending ? "canceled" :
"no"));
diff --git a/drivers/video/efifb.c b/drivers/video/efifb.c
index 815f84b..70477c2 100644
--- a/drivers/video/efifb.c
+++ b/drivers/video/efifb.c
@@ -13,7 +13,7 @@
#include <linux/platform_device.h>
#include <linux/screen_info.h>
#include <linux/dmi.h>
-
+#include <linux/pci.h>
#include <video/vga.h>
static struct fb_var_screeninfo efifb_defined __devinitdata = {
@@ -39,17 +39,31 @@
M_I20, /* 20-Inch iMac */
M_I20_SR, /* 20-Inch iMac (Santa Rosa) */
M_I24, /* 24-Inch iMac */
+ M_I24_8_1, /* 24-Inch iMac, 8,1th gen */
+ M_I24_10_1, /* 24-Inch iMac, 10,1th gen */
+ M_I27_11_1, /* 27-Inch iMac, 11,1th gen */
M_MINI, /* Mac Mini */
+ M_MINI_3_1, /* Mac Mini, 3,1th gen */
+ M_MINI_4_1, /* Mac Mini, 4,1th gen */
M_MB, /* MacBook */
M_MB_2, /* MacBook, 2nd rev. */
M_MB_3, /* MacBook, 3rd rev. */
+ M_MB_5_1, /* MacBook, 5th rev. */
+ M_MB_6_1, /* MacBook, 6th rev. */
+ M_MB_7_1, /* MacBook, 7th rev. */
M_MB_SR, /* MacBook, 2nd gen, (Santa Rosa) */
M_MBA, /* MacBook Air */
M_MBP, /* MacBook Pro */
M_MBP_2, /* MacBook Pro 2nd gen */
+ M_MBP_2_2, /* MacBook Pro 2,2nd gen */
M_MBP_SR, /* MacBook Pro (Santa Rosa) */
M_MBP_4, /* MacBook Pro, 4th gen */
M_MBP_5_1, /* MacBook Pro, 5,1th gen */
+ M_MBP_5_2, /* MacBook Pro, 5,2th gen */
+ M_MBP_5_3, /* MacBook Pro, 5,3rd gen */
+ M_MBP_6_1, /* MacBook Pro, 6,1th gen */
+ M_MBP_6_2, /* MacBook Pro, 6,2th gen */
+ M_MBP_7_1, /* MacBook Pro, 7,1th gen */
M_UNKNOWN /* placeholder */
};
@@ -64,14 +78,28 @@
[M_I20] = { "i20", 0x80010000, 1728 * 4, 1680, 1050 }, /* guess */
[M_I20_SR] = { "imac7", 0x40010000, 1728 * 4, 1680, 1050 },
[M_I24] = { "i24", 0x80010000, 2048 * 4, 1920, 1200 }, /* guess */
+ [M_I24_8_1] = { "imac8", 0xc0060000, 2048 * 4, 1920, 1200 },
+ [M_I24_10_1] = { "imac10", 0xc0010000, 2048 * 4, 1920, 1080 },
+ [M_I27_11_1] = { "imac11", 0xc0010000, 2560 * 4, 2560, 1440 },
[M_MINI]= { "mini", 0x80000000, 2048 * 4, 1024, 768 },
+ [M_MINI_3_1] = { "mini31", 0x40010000, 1024 * 4, 1024, 768 },
+ [M_MINI_4_1] = { "mini41", 0xc0010000, 2048 * 4, 1920, 1200 },
[M_MB] = { "macbook", 0x80000000, 2048 * 4, 1280, 800 },
+ [M_MB_5_1] = { "macbook51", 0x80010000, 2048 * 4, 1280, 800 },
+ [M_MB_6_1] = { "macbook61", 0x80010000, 2048 * 4, 1280, 800 },
+ [M_MB_7_1] = { "macbook71", 0x80010000, 2048 * 4, 1280, 800 },
[M_MBA] = { "mba", 0x80000000, 2048 * 4, 1280, 800 },
[M_MBP] = { "mbp", 0x80010000, 1472 * 4, 1440, 900 },
[M_MBP_2] = { "mbp2", 0, 0, 0, 0 }, /* placeholder */
+ [M_MBP_2_2] = { "mbp22", 0x80010000, 1472 * 4, 1440, 900 },
[M_MBP_SR] = { "mbp3", 0x80030000, 2048 * 4, 1440, 900 },
[M_MBP_4] = { "mbp4", 0xc0060000, 2048 * 4, 1920, 1200 },
[M_MBP_5_1] = { "mbp51", 0xc0010000, 2048 * 4, 1440, 900 },
+ [M_MBP_5_2] = { "mbp52", 0xc0010000, 2048 * 4, 1920, 1200 },
+ [M_MBP_5_3] = { "mbp53", 0xd0010000, 2048 * 4, 1440, 900 },
+ [M_MBP_6_1] = { "mbp61", 0x90030000, 2048 * 4, 1920, 1200 },
+ [M_MBP_6_2] = { "mbp62", 0x90030000, 2048 * 4, 1680, 1050 },
+ [M_MBP_7_1] = { "mbp71", 0xc0010000, 2048 * 4, 1280, 800 },
[M_UNKNOWN] = { NULL, 0, 0, 0, 0 }
};
@@ -92,7 +120,12 @@
EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "iMac6,1", M_I24),
EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac6,1", M_I24),
EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac7,1", M_I20_SR),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac8,1", M_I24_8_1),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac10,1", M_I24_10_1),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac11,1", M_I27_11_1),
EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "Macmini1,1", M_MINI),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "Macmini3,1", M_MINI_3_1),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "Macmini4,1", M_MINI_4_1),
EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBook1,1", M_MB),
/* At least one of these two will be right; maybe both? */
EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBook2,1", M_MB),
@@ -101,14 +134,23 @@
EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBook3,1", M_MB),
EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook3,1", M_MB),
EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook4,1", M_MB),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook5,1", M_MB_5_1),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook6,1", M_MB_6_1),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook7,1", M_MB_7_1),
EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookAir1,1", M_MBA),
EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro1,1", M_MBP),
EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro2,1", M_MBP_2),
+ EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro2,2", M_MBP_2_2),
EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro2,1", M_MBP_2),
EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro3,1", M_MBP_SR),
EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro3,1", M_MBP_SR),
EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro4,1", M_MBP_4),
EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro5,1", M_MBP_5_1),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro5,2", M_MBP_5_2),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro5,3", M_MBP_5_3),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro6,1", M_MBP_6_1),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro6,2", M_MBP_6_2),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro7,1", M_MBP_7_1),
{},
};
@@ -116,7 +158,7 @@
{
struct efifb_dmi_info *info = id->driver_data;
if (info->base == 0)
- return -ENODEV;
+ return 0;
printk(KERN_INFO "efifb: dmi detected %s - framebuffer at %p "
"(%dx%d, stride %d)\n", id->ident,
@@ -124,18 +166,55 @@
info->stride);
/* Trust the bootloader over the DMI tables */
- if (screen_info.lfb_base == 0)
+ if (screen_info.lfb_base == 0) {
+#if defined(CONFIG_PCI)
+ struct pci_dev *dev = NULL;
+ int found_bar = 0;
+#endif
screen_info.lfb_base = info->base;
- if (screen_info.lfb_linelength == 0)
- screen_info.lfb_linelength = info->stride;
- if (screen_info.lfb_width == 0)
- screen_info.lfb_width = info->width;
- if (screen_info.lfb_height == 0)
- screen_info.lfb_height = info->height;
- if (screen_info.orig_video_isVGA == 0)
- screen_info.orig_video_isVGA = VIDEO_TYPE_EFI;
- return 0;
+#if defined(CONFIG_PCI)
+ /* make sure that the address in the table is actually on a
+ * VGA device's PCI BAR */
+
+ for_each_pci_dev(dev) {
+ int i;
+ if ((dev->class >> 8) != PCI_CLASS_DISPLAY_VGA)
+ continue;
+ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
+ resource_size_t start, end;
+
+ start = pci_resource_start(dev, i);
+ if (start == 0)
+ break;
+ end = pci_resource_end(dev, i);
+ if (screen_info.lfb_base >= start &&
+ screen_info.lfb_base < end) {
+ found_bar = 1;
+ }
+ }
+ }
+ if (!found_bar)
+ screen_info.lfb_base = 0;
+#endif
+ }
+ if (screen_info.lfb_base) {
+ if (screen_info.lfb_linelength == 0)
+ screen_info.lfb_linelength = info->stride;
+ if (screen_info.lfb_width == 0)
+ screen_info.lfb_width = info->width;
+ if (screen_info.lfb_height == 0)
+ screen_info.lfb_height = info->height;
+ if (screen_info.orig_video_isVGA == 0)
+ screen_info.orig_video_isVGA = VIDEO_TYPE_EFI;
+ } else {
+ screen_info.lfb_linelength = 0;
+ screen_info.lfb_width = 0;
+ screen_info.lfb_height = 0;
+ screen_info.orig_video_isVGA = 0;
+ return 0;
+ }
+ return 1;
}
static int efifb_setcolreg(unsigned regno, unsigned red, unsigned green,
diff --git a/drivers/video/pxa168fb.c b/drivers/video/pxa168fb.c
index c91a7f7..a31a77f 100644
--- a/drivers/video/pxa168fb.c
+++ b/drivers/video/pxa168fb.c
@@ -298,8 +298,8 @@
* Set bit to enable graphics DMA.
*/
x = readl(fbi->reg_base + LCD_SPU_DMA_CTRL0);
- x |= fbi->active ? 0x00000100 : 0;
- fbi->active = 0;
+ x &= ~CFG_GRA_ENA_MASK;
+ x |= fbi->active ? CFG_GRA_ENA(1) : CFG_GRA_ENA(0);
/*
* If we are in a pseudo-color mode, we need to enable
@@ -559,7 +559,7 @@
.fb_imageblit = cfb_imageblit,
};
-static int __init pxa168fb_init_mode(struct fb_info *info,
+static int __devinit pxa168fb_init_mode(struct fb_info *info,
struct pxa168fb_mach_info *mi)
{
struct pxa168fb_info *fbi = info->par;
@@ -599,7 +599,7 @@
return ret;
}
-static int __init pxa168fb_probe(struct platform_device *pdev)
+static int __devinit pxa168fb_probe(struct platform_device *pdev)
{
struct pxa168fb_mach_info *mi;
struct fb_info *info = 0;
@@ -792,7 +792,7 @@
.probe = pxa168fb_probe,
};
-static int __devinit pxa168fb_init(void)
+static int __init pxa168fb_init(void)
{
return platform_driver_register(&pxa168fb_driver);
}
diff --git a/drivers/video/sis/sis_main.c b/drivers/video/sis/sis_main.c
index 559bf17..b52f8e4 100644
--- a/drivers/video/sis/sis_main.c
+++ b/drivers/video/sis/sis_main.c
@@ -1701,6 +1701,9 @@
break;
case FBIOGET_VBLANK:
+
+ memset(&sisvbblank, 0, sizeof(struct fb_vblank));
+
sisvbblank.count = 0;
sisvbblank.flags = sisfb_setupvbblankflags(ivideo, &sisvbblank.vcount, &sisvbblank.hcount);
diff --git a/drivers/video/via/ioctl.c b/drivers/video/via/ioctl.c
index da03c07..4d553d0 100644
--- a/drivers/video/via/ioctl.c
+++ b/drivers/video/via/ioctl.c
@@ -25,6 +25,8 @@
{
struct viafb_ioctl_info viainfo;
+ memset(&viainfo, 0, sizeof(struct viafb_ioctl_info));
+
viainfo.viafb_id = VIAID;
viainfo.vendor_id = PCI_VIA_VENDOR_ID;
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index b036677..24efd8e 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -213,11 +213,11 @@
here to enable the OMAP1610/OMAP1710/OMAP2420/OMAP3430/OMAP4430 watchdog timer.
config PNX4008_WATCHDOG
- tristate "PNX4008 Watchdog"
- depends on ARCH_PNX4008
+ tristate "PNX4008 and LPC32XX Watchdog"
+ depends on ARCH_PNX4008 || ARCH_LPC32XX
help
Say Y here if to include support for the watchdog timer
- in the PNX4008 processor.
+ in the PNX4008 or LPC32XX processor.
This driver can be built as a module by choosing M. The module
will be called pnx4008_wdt.
diff --git a/drivers/watchdog/sb_wdog.c b/drivers/watchdog/sb_wdog.c
index 88c83aa..f31493e 100644
--- a/drivers/watchdog/sb_wdog.c
+++ b/drivers/watchdog/sb_wdog.c
@@ -305,7 +305,7 @@
if (ret) {
printk(KERN_ERR "%s: failed to request irq 1 - %d\n",
ident.identity, ret);
- return ret;
+ goto out;
}
ret = misc_register(&sbwdog_miscdev);
@@ -313,14 +313,20 @@
printk(KERN_INFO "%s: timeout is %ld.%ld secs\n",
ident.identity,
timeout / 1000000, (timeout / 100000) % 10);
- } else
- free_irq(1, (void *)user_dog);
+ return 0;
+ }
+ free_irq(1, (void *)user_dog);
+out:
+ unregister_reboot_notifier(&sbwdog_notifier);
+
return ret;
}
static void __exit sbwdog_exit(void)
{
misc_deregister(&sbwdog_miscdev);
+ free_irq(1, (void *)user_dog);
+ unregister_reboot_notifier(&sbwdog_notifier);
}
module_init(sbwdog_init);
diff --git a/drivers/watchdog/ts72xx_wdt.c b/drivers/watchdog/ts72xx_wdt.c
index 458c499..18cdeb4 100644
--- a/drivers/watchdog/ts72xx_wdt.c
+++ b/drivers/watchdog/ts72xx_wdt.c
@@ -449,6 +449,9 @@
wdt->pdev = pdev;
mutex_init(&wdt->lock);
+ /* make sure that the watchdog is disabled */
+ ts72xx_wdt_stop(wdt);
+
error = misc_register(&ts72xx_wdt_miscdev);
if (error) {
dev_err(&pdev->dev, "failed to register miscdev\n");
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 72f91bf..13365ba 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -112,6 +112,7 @@
#define VALID_EVTCHN(chn) ((chn) != 0)
static struct irq_chip xen_dynamic_chip;
+static struct irq_chip xen_percpu_chip;
/* Constructor for packed IRQ information. */
static struct irq_info mk_unbound_info(void)
@@ -377,7 +378,7 @@
irq = find_unbound_irq();
set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
- handle_level_irq, "event");
+ handle_edge_irq, "event");
evtchn_to_irq[evtchn] = irq;
irq_info[irq] = mk_evtchn_info(evtchn);
@@ -403,8 +404,8 @@
if (irq < 0)
goto out;
- set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
- handle_level_irq, "ipi");
+ set_irq_chip_and_handler_name(irq, &xen_percpu_chip,
+ handle_percpu_irq, "ipi");
bind_ipi.vcpu = cpu;
if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
@@ -444,8 +445,8 @@
irq = find_unbound_irq();
- set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
- handle_level_irq, "virq");
+ set_irq_chip_and_handler_name(irq, &xen_percpu_chip,
+ handle_percpu_irq, "virq");
evtchn_to_irq[evtchn] = irq;
irq_info[irq] = mk_virq_info(evtchn, virq);
@@ -964,6 +965,16 @@
.retrigger = retrigger_dynirq,
};
+static struct irq_chip xen_percpu_chip __read_mostly = {
+ .name = "xen-percpu",
+
+ .disable = disable_dynirq,
+ .mask = disable_dynirq,
+ .unmask = enable_dynirq,
+
+ .ack = ack_dynirq,
+};
+
int xen_set_callback_via(uint64_t via)
{
struct xen_hvm_param a;
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index 1799bd8..ef9c7db 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -237,7 +237,7 @@
goto again;
if (sysrq_key != '\0')
- handle_sysrq(sysrq_key, NULL);
+ handle_sysrq(sysrq_key);
}
static struct xenbus_watch sysrq_watch = {
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
index 29bac51..d409495 100644
--- a/drivers/xen/xenbus/xenbus_probe.c
+++ b/drivers/xen/xenbus/xenbus_probe.c
@@ -755,7 +755,10 @@
{
int ret = 0;
- blocking_notifier_chain_register(&xenstore_chain, nb);
+ if (xenstored_ready > 0)
+ ret = nb->notifier_call(nb, 0, NULL);
+ else
+ blocking_notifier_chain_register(&xenstore_chain, nb);
return ret;
}
@@ -769,7 +772,7 @@
void xenbus_probe(struct work_struct *unused)
{
- BUG_ON((xenstored_ready <= 0));
+ xenstored_ready = 1;
/* Enumerate devices in xenstore and watch for changes. */
xenbus_probe_devices(&xenbus_frontend);
@@ -835,8 +838,8 @@
xen_store_evtchn = xen_start_info->store_evtchn;
xen_store_mfn = xen_start_info->store_mfn;
xen_store_interface = mfn_to_virt(xen_store_mfn);
+ xenstored_ready = 1;
}
- xenstored_ready = 1;
}
/* Initialize the interface to xenstore. */
diff --git a/firmware/Makefile b/firmware/Makefile
index b27f09f..9c2d194 100644
--- a/firmware/Makefile
+++ b/firmware/Makefile
@@ -142,7 +142,7 @@
fw-shipped-all := $(fw-shipped-y) $(fw-shipped-m) $(fw-shipped-)
# Directories which we _might_ need to create, so we have a rule for them.
-firmware-dirs := $(sort $(patsubst %,$(objtree)/$(obj)/%/,$(dir $(fw-external-y) $(fw-shipped-all))))
+firmware-dirs := $(sort $(addprefix $(objtree)/$(obj)/,$(dir $(fw-external-y) $(fw-shipped-all))))
quiet_cmd_mkdir = MKDIR $(patsubst $(objtree)/%,%,$@)
cmd_mkdir = mkdir -p $@
diff --git a/fs/9p/fid.c b/fs/9p/fid.c
index 3585636..6406f89 100644
--- a/fs/9p/fid.c
+++ b/fs/9p/fid.c
@@ -242,7 +242,8 @@
}
kfree(wnames);
fid_out:
- v9fs_fid_add(dentry, fid);
+ if (!IS_ERR(fid))
+ v9fs_fid_add(dentry, fid);
err_out:
up_read(&v9ses->rename_sem);
return fid;
diff --git a/fs/9p/vfs_dir.c b/fs/9p/vfs_dir.c
index 16c8a2a..899f168 100644
--- a/fs/9p/vfs_dir.c
+++ b/fs/9p/vfs_dir.c
@@ -292,9 +292,11 @@
fid = filp->private_data;
P9_DPRINTK(P9_DEBUG_VFS,
- "inode: %p filp: %p fid: %d\n", inode, filp, fid->fid);
+ "v9fs_dir_release: inode: %p filp: %p fid: %d\n",
+ inode, filp, fid ? fid->fid : -1);
filemap_write_and_wait(inode->i_mapping);
- p9_client_clunk(fid);
+ if (fid)
+ p9_client_clunk(fid);
return 0;
}
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index c7c23ea..9e670d5 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -730,7 +730,10 @@
P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n", err);
goto error;
}
- dentry->d_op = &v9fs_cached_dentry_operations;
+ if (v9ses->cache)
+ dentry->d_op = &v9fs_cached_dentry_operations;
+ else
+ dentry->d_op = &v9fs_dentry_operations;
d_instantiate(dentry, inode);
err = v9fs_fid_add(dentry, fid);
if (err < 0)
@@ -1128,6 +1131,7 @@
v9fs_stat2inode(st, dentry->d_inode, dentry->d_inode->i_sb);
generic_fillattr(dentry->d_inode, stat);
+ p9stat_free(st);
kfree(st);
return 0;
}
@@ -1489,6 +1493,7 @@
retval = strnlen(buffer, buflen);
done:
+ p9stat_free(st);
kfree(st);
return retval;
}
@@ -1942,7 +1947,7 @@
.unlink = v9fs_vfs_unlink,
.mkdir = v9fs_vfs_mkdir,
.rmdir = v9fs_vfs_rmdir,
- .mknod = v9fs_vfs_mknod_dotl,
+ .mknod = v9fs_vfs_mknod,
.rename = v9fs_vfs_rename,
.getattr = v9fs_vfs_getattr,
.setattr = v9fs_vfs_setattr,
diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c
index f931107..1d12ba0 100644
--- a/fs/9p/vfs_super.c
+++ b/fs/9p/vfs_super.c
@@ -122,6 +122,10 @@
fid = v9fs_session_init(v9ses, dev_name, data);
if (IS_ERR(fid)) {
retval = PTR_ERR(fid);
+ /*
+ * we need to call session_close to tear down some
+ * of the data structure setup by session_init
+ */
goto close_session;
}
@@ -144,7 +148,6 @@
retval = -ENOMEM;
goto release_sb;
}
-
sb->s_root = root;
if (v9fs_proto_dotl(v9ses)) {
@@ -152,7 +155,7 @@
st = p9_client_getattr_dotl(fid, P9_STATS_BASIC);
if (IS_ERR(st)) {
retval = PTR_ERR(st);
- goto clunk_fid;
+ goto release_sb;
}
v9fs_stat2inode_dotl(st, root->d_inode);
@@ -162,7 +165,7 @@
st = p9_client_stat(fid);
if (IS_ERR(st)) {
retval = PTR_ERR(st);
- goto clunk_fid;
+ goto release_sb;
}
root->d_inode->i_ino = v9fs_qid2ino(&st->qid);
@@ -174,19 +177,24 @@
v9fs_fid_add(root, fid);
-P9_DPRINTK(P9_DEBUG_VFS, " simple set mount, return 0\n");
+ P9_DPRINTK(P9_DEBUG_VFS, " simple set mount, return 0\n");
simple_set_mnt(mnt, sb);
return 0;
clunk_fid:
p9_client_clunk(fid);
-
close_session:
v9fs_session_close(v9ses);
kfree(v9ses);
return retval;
-
release_sb:
+ /*
+ * we will do the session_close and root dentry release
+ * in the below call. But we need to clunk fid, because we haven't
+ * attached the fid to dentry so it won't get clunked
+ * automatically.
+ */
+ p9_client_clunk(fid);
deactivate_locked_super(sb);
return retval;
}
diff --git a/fs/affs/super.c b/fs/affs/super.c
index 33c4e7e..9581ea9 100644
--- a/fs/affs/super.c
+++ b/fs/affs/super.c
@@ -109,8 +109,8 @@
{
struct affs_inode_info *ei = (struct affs_inode_info *) foo;
- init_MUTEX(&ei->i_link_lock);
- init_MUTEX(&ei->i_ext_lock);
+ sema_init(&ei->i_link_lock, 1);
+ sema_init(&ei->i_ext_lock, 1);
inode_init_once(&ei->vfs_inode);
}
diff --git a/fs/aio.c b/fs/aio.c
index 3006b5b..250b0a7 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -712,8 +712,16 @@
*/
ret = retry(iocb);
- if (ret != -EIOCBRETRY && ret != -EIOCBQUEUED)
+ if (ret != -EIOCBRETRY && ret != -EIOCBQUEUED) {
+ /*
+ * There's no easy way to restart the syscall since other AIO's
+ * may be already running. Just fail this IO with EINTR.
+ */
+ if (unlikely(ret == -ERESTARTSYS || ret == -ERESTARTNOINTR ||
+ ret == -ERESTARTNOHAND || ret == -ERESTART_RESTARTBLOCK))
+ ret = -EINTR;
aio_complete(iocb, ret, 0);
+ }
out:
spin_lock_irq(&ctx->ctx_lock);
@@ -1659,6 +1667,9 @@
if (unlikely(nr < 0))
return -EINVAL;
+ if (unlikely(nr > LONG_MAX/sizeof(*iocbpp)))
+ nr = LONG_MAX/sizeof(*iocbpp);
+
if (unlikely(!access_ok(VERIFY_READ, iocbpp, (nr*sizeof(*iocbpp)))))
return -EFAULT;
diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
index f96eff0..a6395bd 100644
--- a/fs/binfmt_aout.c
+++ b/fs/binfmt_aout.c
@@ -134,10 +134,6 @@
if (!dump_write(file, dump_start, dump_size))
goto end_coredump;
}
-/* Finally dump the task struct. Not be used by gdb, but could be useful */
- set_fs(KERNEL_DS);
- if (!dump_write(file, current, sizeof(*current)))
- goto end_coredump;
end_coredump:
set_fs(fs);
return has_dumped;
diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
index a7528b9..fd0cc0b 100644
--- a/fs/binfmt_misc.c
+++ b/fs/binfmt_misc.c
@@ -724,7 +724,7 @@
{
int err = register_filesystem(&bm_fs_type);
if (!err) {
- err = register_binfmt(&misc_format);
+ err = insert_binfmt(&misc_format);
if (err)
unregister_filesystem(&bm_fs_type);
}
diff --git a/fs/bio-integrity.c b/fs/bio-integrity.c
index 612a5c3..4d0ff5e 100644
--- a/fs/bio-integrity.c
+++ b/fs/bio-integrity.c
@@ -413,10 +413,10 @@
/* Allocate kernel buffer for protection data */
len = sectors * blk_integrity_tuple_size(bi);
- buf = kmalloc(len, GFP_NOIO | __GFP_NOFAIL | q->bounce_gfp);
+ buf = kmalloc(len, GFP_NOIO | q->bounce_gfp);
if (unlikely(buf == NULL)) {
printk(KERN_ERR "could not allocate integrity buffer\n");
- return -EIO;
+ return -ENOMEM;
}
end = (((unsigned long) buf) + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
diff --git a/fs/ceph/Kconfig b/fs/ceph/Kconfig
index bc87b9c..9eb134e 100644
--- a/fs/ceph/Kconfig
+++ b/fs/ceph/Kconfig
@@ -1,8 +1,11 @@
config CEPH_FS
tristate "Ceph distributed file system (EXPERIMENTAL)"
depends on INET && EXPERIMENTAL
+ select CEPH_LIB
select LIBCRC32C
select CRYPTO_AES
+ select CRYPTO
+ default n
help
Choose Y or M here to include support for mounting the
experimental Ceph distributed file system. Ceph is an extremely
@@ -13,15 +16,3 @@
If unsure, say N.
-config CEPH_FS_PRETTYDEBUG
- bool "Include file:line in ceph debug output"
- depends on CEPH_FS
- default n
- help
- If you say Y here, debug output will include a filename and
- line to aid debugging. This icnreases kernel size and slows
- execution slightly when debug call sites are enabled (e.g.,
- via CONFIG_DYNAMIC_DEBUG).
-
- If unsure, say N.
-
diff --git a/fs/ceph/Makefile b/fs/ceph/Makefile
index 278e117..9e6c4f2 100644
--- a/fs/ceph/Makefile
+++ b/fs/ceph/Makefile
@@ -8,15 +8,8 @@
ceph-objs := super.o inode.o dir.o file.o locks.o addr.o ioctl.o \
export.o caps.o snap.o xattr.o \
- messenger.o msgpool.o buffer.o pagelist.o \
- mds_client.o mdsmap.o \
- mon_client.o \
- osd_client.o osdmap.o crush/crush.o crush/mapper.o crush/hash.o \
- debugfs.o \
- auth.o auth_none.o \
- crypto.o armor.o \
- auth_x.o \
- ceph_fs.o ceph_strings.o ceph_hash.o ceph_frag.o
+ mds_client.o mdsmap.o strings.o ceph_frag.o \
+ debugfs.o
else
#Otherwise we were called directly from the command
diff --git a/fs/ceph/README b/fs/ceph/README
deleted file mode 100644
index 18352fa..0000000
--- a/fs/ceph/README
+++ /dev/null
@@ -1,20 +0,0 @@
-#
-# The following files are shared by (and manually synchronized
-# between) the Ceph userland and kernel client.
-#
-# userland kernel
-src/include/ceph_fs.h fs/ceph/ceph_fs.h
-src/include/ceph_fs.cc fs/ceph/ceph_fs.c
-src/include/msgr.h fs/ceph/msgr.h
-src/include/rados.h fs/ceph/rados.h
-src/include/ceph_strings.cc fs/ceph/ceph_strings.c
-src/include/ceph_frag.h fs/ceph/ceph_frag.h
-src/include/ceph_frag.cc fs/ceph/ceph_frag.c
-src/include/ceph_hash.h fs/ceph/ceph_hash.h
-src/include/ceph_hash.cc fs/ceph/ceph_hash.c
-src/crush/crush.c fs/ceph/crush/crush.c
-src/crush/crush.h fs/ceph/crush/crush.h
-src/crush/mapper.c fs/ceph/crush/mapper.c
-src/crush/mapper.h fs/ceph/crush/mapper.h
-src/crush/hash.h fs/ceph/crush/hash.h
-src/crush/hash.c fs/ceph/crush/hash.c
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index 5598a0d..51bcc5c 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -1,4 +1,4 @@
-#include "ceph_debug.h"
+#include <linux/ceph/ceph_debug.h>
#include <linux/backing-dev.h>
#include <linux/fs.h>
@@ -10,7 +10,8 @@
#include <linux/task_io_accounting_ops.h>
#include "super.h"
-#include "osd_client.h"
+#include "mds_client.h"
+#include <linux/ceph/osd_client.h>
/*
* Ceph address space ops.
@@ -87,7 +88,7 @@
/* dirty the head */
spin_lock(&inode->i_lock);
- if (ci->i_wrbuffer_ref_head == 0)
+ if (ci->i_head_snapc == NULL)
ci->i_head_snapc = ceph_get_snap_context(snapc);
++ci->i_wrbuffer_ref_head;
if (ci->i_wrbuffer_ref == 0)
@@ -105,13 +106,7 @@
spin_lock_irq(&mapping->tree_lock);
if (page->mapping) { /* Race with truncate? */
WARN_ON_ONCE(!PageUptodate(page));
-
- if (mapping_cap_account_dirty(mapping)) {
- __inc_zone_page_state(page, NR_FILE_DIRTY);
- __inc_bdi_stat(mapping->backing_dev_info,
- BDI_RECLAIMABLE);
- task_io_account_write(PAGE_CACHE_SIZE);
- }
+ account_page_dirtied(page, page->mapping);
radix_tree_tag_set(&mapping->page_tree,
page_index(page), PAGECACHE_TAG_DIRTY);
@@ -199,7 +194,8 @@
{
struct inode *inode = filp->f_dentry->d_inode;
struct ceph_inode_info *ci = ceph_inode(inode);
- struct ceph_osd_client *osdc = &ceph_inode_to_client(inode)->osdc;
+ struct ceph_osd_client *osdc =
+ &ceph_inode_to_client(inode)->client->osdc;
int err = 0;
u64 len = PAGE_CACHE_SIZE;
@@ -271,7 +267,8 @@
{
struct inode *inode = file->f_dentry->d_inode;
struct ceph_inode_info *ci = ceph_inode(inode);
- struct ceph_osd_client *osdc = &ceph_inode_to_client(inode)->osdc;
+ struct ceph_osd_client *osdc =
+ &ceph_inode_to_client(inode)->client->osdc;
int rc = 0;
struct page **pages;
loff_t offset;
@@ -352,7 +349,7 @@
break;
}
}
- if (!snapc && ci->i_head_snapc) {
+ if (!snapc && ci->i_wrbuffer_ref_head) {
snapc = ceph_get_snap_context(ci->i_head_snapc);
dout(" head snapc %p has %d dirty pages\n",
snapc, ci->i_wrbuffer_ref_head);
@@ -371,7 +368,7 @@
{
struct inode *inode;
struct ceph_inode_info *ci;
- struct ceph_client *client;
+ struct ceph_fs_client *fsc;
struct ceph_osd_client *osdc;
loff_t page_off = page->index << PAGE_CACHE_SHIFT;
int len = PAGE_CACHE_SIZE;
@@ -389,8 +386,8 @@
}
inode = page->mapping->host;
ci = ceph_inode(inode);
- client = ceph_inode_to_client(inode);
- osdc = &client->osdc;
+ fsc = ceph_inode_to_client(inode);
+ osdc = &fsc->client->osdc;
/* verify this is a writeable snap context */
snapc = (void *)page->private;
@@ -417,13 +414,13 @@
if (i_size < page_off + len)
len = i_size - page_off;
- dout("writepage %p page %p index %lu on %llu~%u\n",
- inode, page, page->index, page_off, len);
+ dout("writepage %p page %p index %lu on %llu~%u snapc %p\n",
+ inode, page, page->index, page_off, len, snapc);
- writeback_stat = atomic_long_inc_return(&client->writeback_count);
+ writeback_stat = atomic_long_inc_return(&fsc->writeback_count);
if (writeback_stat >
- CONGESTION_ON_THRESH(client->mount_args->congestion_kb))
- set_bdi_congested(&client->backing_dev_info, BLK_RW_ASYNC);
+ CONGESTION_ON_THRESH(fsc->mount_options->congestion_kb))
+ set_bdi_congested(&fsc->backing_dev_info, BLK_RW_ASYNC);
set_page_writeback(page);
err = ceph_osdc_writepages(osdc, ceph_vino(inode),
@@ -502,7 +499,7 @@
struct address_space *mapping = inode->i_mapping;
__s32 rc = -EIO;
u64 bytes = 0;
- struct ceph_client *client = ceph_inode_to_client(inode);
+ struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
long writeback_stat;
unsigned issued = ceph_caps_issued(ci);
@@ -535,10 +532,10 @@
WARN_ON(!PageUptodate(page));
writeback_stat =
- atomic_long_dec_return(&client->writeback_count);
+ atomic_long_dec_return(&fsc->writeback_count);
if (writeback_stat <
- CONGESTION_OFF_THRESH(client->mount_args->congestion_kb))
- clear_bdi_congested(&client->backing_dev_info,
+ CONGESTION_OFF_THRESH(fsc->mount_options->congestion_kb))
+ clear_bdi_congested(&fsc->backing_dev_info,
BLK_RW_ASYNC);
ceph_put_snap_context((void *)page->private);
@@ -575,13 +572,13 @@
* mempool. we avoid the mempool if we can because req->r_num_pages
* may be less than the maximum write size.
*/
-static void alloc_page_vec(struct ceph_client *client,
+static void alloc_page_vec(struct ceph_fs_client *fsc,
struct ceph_osd_request *req)
{
req->r_pages = kmalloc(sizeof(struct page *) * req->r_num_pages,
GFP_NOFS);
if (!req->r_pages) {
- req->r_pages = mempool_alloc(client->wb_pagevec_pool, GFP_NOFS);
+ req->r_pages = mempool_alloc(fsc->wb_pagevec_pool, GFP_NOFS);
req->r_pages_from_pool = 1;
WARN_ON(!req->r_pages);
}
@@ -596,7 +593,7 @@
struct inode *inode = mapping->host;
struct backing_dev_info *bdi = mapping->backing_dev_info;
struct ceph_inode_info *ci = ceph_inode(inode);
- struct ceph_client *client;
+ struct ceph_fs_client *fsc;
pgoff_t index, start, end;
int range_whole = 0;
int should_loop = 1;
@@ -623,13 +620,13 @@
wbc->sync_mode == WB_SYNC_NONE ? "NONE" :
(wbc->sync_mode == WB_SYNC_ALL ? "ALL" : "HOLD"));
- client = ceph_inode_to_client(inode);
- if (client->mount_state == CEPH_MOUNT_SHUTDOWN) {
+ fsc = ceph_inode_to_client(inode);
+ if (fsc->mount_state == CEPH_MOUNT_SHUTDOWN) {
pr_warning("writepage_start %p on forced umount\n", inode);
return -EIO; /* we're in a forced umount, don't write! */
}
- if (client->mount_args->wsize && client->mount_args->wsize < wsize)
- wsize = client->mount_args->wsize;
+ if (fsc->mount_options->wsize && fsc->mount_options->wsize < wsize)
+ wsize = fsc->mount_options->wsize;
if (wsize < PAGE_CACHE_SIZE)
wsize = PAGE_CACHE_SIZE;
max_pages_ever = wsize >> PAGE_CACHE_SHIFT;
@@ -772,9 +769,10 @@
/* ok */
if (locked_pages == 0) {
/* prepare async write request */
- offset = page->index << PAGE_CACHE_SHIFT;
+ offset = (unsigned long long)page->index
+ << PAGE_CACHE_SHIFT;
len = wsize;
- req = ceph_osdc_new_request(&client->osdc,
+ req = ceph_osdc_new_request(&fsc->client->osdc,
&ci->i_layout,
ceph_vino(inode),
offset, &len,
@@ -787,7 +785,7 @@
&inode->i_mtime, true, 1);
max_pages = req->r_num_pages;
- alloc_page_vec(client, req);
+ alloc_page_vec(fsc, req);
req->r_callback = writepages_finish;
req->r_inode = inode;
}
@@ -799,10 +797,10 @@
inode, page, page->index);
writeback_stat =
- atomic_long_inc_return(&client->writeback_count);
+ atomic_long_inc_return(&fsc->writeback_count);
if (writeback_stat > CONGESTION_ON_THRESH(
- client->mount_args->congestion_kb)) {
- set_bdi_congested(&client->backing_dev_info,
+ fsc->mount_options->congestion_kb)) {
+ set_bdi_congested(&fsc->backing_dev_info,
BLK_RW_ASYNC);
}
@@ -851,7 +849,7 @@
op->payload_len = cpu_to_le32(len);
req->r_request->hdr.data_len = cpu_to_le32(len);
- ceph_osdc_start_request(&client->osdc, req, true);
+ ceph_osdc_start_request(&fsc->client->osdc, req, true);
req = NULL;
/* continue? */
@@ -920,7 +918,7 @@
{
struct inode *inode = file->f_dentry->d_inode;
struct ceph_inode_info *ci = ceph_inode(inode);
- struct ceph_mds_client *mdsc = &ceph_inode_to_client(inode)->mdsc;
+ struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
loff_t page_off = pos & PAGE_CACHE_MASK;
int pos_in_page = pos & ~PAGE_CACHE_MASK;
int end_in_page = pos_in_page + len;
@@ -1058,8 +1056,8 @@
struct page *page, void *fsdata)
{
struct inode *inode = file->f_dentry->d_inode;
- struct ceph_client *client = ceph_inode_to_client(inode);
- struct ceph_mds_client *mdsc = &client->mdsc;
+ struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
+ struct ceph_mds_client *mdsc = fsc->mdsc;
unsigned from = pos & (PAGE_CACHE_SIZE - 1);
int check_cap = 0;
@@ -1128,7 +1126,7 @@
{
struct inode *inode = vma->vm_file->f_dentry->d_inode;
struct page *page = vmf->page;
- struct ceph_mds_client *mdsc = &ceph_inode_to_client(inode)->mdsc;
+ struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
loff_t off = page->index << PAGE_CACHE_SHIFT;
loff_t size, len;
int ret;
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index 7bf182b..98ab13e 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -1,4 +1,4 @@
-#include "ceph_debug.h"
+#include <linux/ceph/ceph_debug.h>
#include <linux/fs.h>
#include <linux/kernel.h>
@@ -9,8 +9,9 @@
#include <linux/writeback.h>
#include "super.h"
-#include "decode.h"
-#include "messenger.h"
+#include "mds_client.h"
+#include <linux/ceph/decode.h>
+#include <linux/ceph/messenger.h>
/*
* Capability management
@@ -287,11 +288,11 @@
spin_unlock(&mdsc->caps_list_lock);
}
-void ceph_reservation_status(struct ceph_client *client,
+void ceph_reservation_status(struct ceph_fs_client *fsc,
int *total, int *avail, int *used, int *reserved,
int *min)
{
- struct ceph_mds_client *mdsc = &client->mdsc;
+ struct ceph_mds_client *mdsc = fsc->mdsc;
if (total)
*total = mdsc->caps_total_count;
@@ -399,7 +400,7 @@
static void __cap_set_timeouts(struct ceph_mds_client *mdsc,
struct ceph_inode_info *ci)
{
- struct ceph_mount_args *ma = mdsc->client->mount_args;
+ struct ceph_mount_options *ma = mdsc->fsc->mount_options;
ci->i_hold_caps_min = round_jiffies(jiffies +
ma->caps_wanted_delay_min * HZ);
@@ -515,7 +516,7 @@
unsigned seq, unsigned mseq, u64 realmino, int flags,
struct ceph_cap_reservation *caps_reservation)
{
- struct ceph_mds_client *mdsc = &ceph_inode_to_client(inode)->mdsc;
+ struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_cap *new_cap = NULL;
struct ceph_cap *cap;
@@ -814,7 +815,7 @@
used |= CEPH_CAP_PIN;
if (ci->i_rd_ref)
used |= CEPH_CAP_FILE_RD;
- if (ci->i_rdcache_ref || ci->i_rdcache_gen)
+ if (ci->i_rdcache_ref || ci->vfs_inode.i_data.nrpages)
used |= CEPH_CAP_FILE_CACHE;
if (ci->i_wr_ref)
used |= CEPH_CAP_FILE_WR;
@@ -873,7 +874,7 @@
struct ceph_mds_session *session = cap->session;
struct ceph_inode_info *ci = cap->ci;
struct ceph_mds_client *mdsc =
- &ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc;
+ ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc;
int removed = 0;
dout("__ceph_remove_cap %p from %p\n", cap, &ci->vfs_inode);
@@ -1082,6 +1083,7 @@
gid_t gid;
struct ceph_mds_session *session;
u64 xattr_version = 0;
+ struct ceph_buffer *xattr_blob = NULL;
int delayed = 0;
u64 flush_tid = 0;
int i;
@@ -1142,6 +1144,10 @@
for (i = 0; i < CEPH_CAP_BITS; i++)
if (flushing & (1 << i))
ci->i_cap_flush_tid[i] = flush_tid;
+
+ follows = ci->i_head_snapc->seq;
+ } else {
+ follows = 0;
}
keep = cap->implemented;
@@ -1155,14 +1161,14 @@
mtime = inode->i_mtime;
atime = inode->i_atime;
time_warp_seq = ci->i_time_warp_seq;
- follows = ci->i_snap_realm->cached_context->seq;
uid = inode->i_uid;
gid = inode->i_gid;
mode = inode->i_mode;
- if (dropping & CEPH_CAP_XATTR_EXCL) {
+ if (flushing & CEPH_CAP_XATTR_EXCL) {
__ceph_build_xattrs_blob(ci);
- xattr_version = ci->i_xattrs.version + 1;
+ xattr_blob = ci->i_xattrs.blob;
+ xattr_version = ci->i_xattrs.version;
}
spin_unlock(&inode->i_lock);
@@ -1170,9 +1176,7 @@
ret = send_cap_msg(session, ceph_vino(inode).ino, cap_id,
op, keep, want, flushing, seq, flush_tid, issue_seq, mseq,
size, max_size, &mtime, &atime, time_warp_seq,
- uid, gid, mode,
- xattr_version,
- (flushing & CEPH_CAP_XATTR_EXCL) ? ci->i_xattrs.blob : NULL,
+ uid, gid, mode, xattr_version, xattr_blob,
follows);
if (ret < 0) {
dout("error sending cap msg, must requeue %p\n", inode);
@@ -1192,10 +1196,14 @@
* asynchronously back to the MDS once sync writes complete and dirty
* data is written out.
*
+ * Unless @again is true, skip cap_snaps that were already sent to
+ * the MDS (i.e., during this session).
+ *
* Called under i_lock. Takes s_mutex as needed.
*/
void __ceph_flush_snaps(struct ceph_inode_info *ci,
- struct ceph_mds_session **psession)
+ struct ceph_mds_session **psession,
+ int again)
__releases(ci->vfs_inode->i_lock)
__acquires(ci->vfs_inode->i_lock)
{
@@ -1203,7 +1211,7 @@
int mds;
struct ceph_cap_snap *capsnap;
u32 mseq;
- struct ceph_mds_client *mdsc = &ceph_inode_to_client(inode)->mdsc;
+ struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
struct ceph_mds_session *session = NULL; /* if session != NULL, we hold
session->s_mutex */
u64 next_follows = 0; /* keep track of how far we've gotten through the
@@ -1224,7 +1232,7 @@
* pages to be written out.
*/
if (capsnap->dirty_pages || capsnap->writing)
- continue;
+ break;
/*
* if cap writeback already occurred, we should have dropped
@@ -1237,6 +1245,13 @@
dout("no auth cap (migrating?), doing nothing\n");
goto out;
}
+
+ /* only flush each capsnap once */
+ if (!again && !list_empty(&capsnap->flushing_item)) {
+ dout("already flushed %p, skipping\n", capsnap);
+ continue;
+ }
+
mds = ci->i_auth_cap->session->s_mds;
mseq = ci->i_auth_cap->mseq;
@@ -1273,8 +1288,8 @@
&session->s_cap_snaps_flushing);
spin_unlock(&inode->i_lock);
- dout("flush_snaps %p cap_snap %p follows %lld size %llu\n",
- inode, capsnap, next_follows, capsnap->size);
+ dout("flush_snaps %p cap_snap %p follows %lld tid %llu\n",
+ inode, capsnap, capsnap->follows, capsnap->flush_tid);
send_cap_msg(session, ceph_vino(inode).ino, 0,
CEPH_CAP_OP_FLUSHSNAP, capsnap->issued, 0,
capsnap->dirty, 0, capsnap->flush_tid, 0, mseq,
@@ -1282,7 +1297,7 @@
&capsnap->mtime, &capsnap->atime,
capsnap->time_warp_seq,
capsnap->uid, capsnap->gid, capsnap->mode,
- 0, NULL,
+ capsnap->xattr_version, capsnap->xattr_blob,
capsnap->follows);
next_follows = capsnap->follows + 1;
@@ -1311,7 +1326,7 @@
struct inode *inode = &ci->vfs_inode;
spin_lock(&inode->i_lock);
- __ceph_flush_snaps(ci, NULL);
+ __ceph_flush_snaps(ci, NULL, 0);
spin_unlock(&inode->i_lock);
}
@@ -1322,7 +1337,7 @@
void __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask)
{
struct ceph_mds_client *mdsc =
- &ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc;
+ ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc;
struct inode *inode = &ci->vfs_inode;
int was = ci->i_dirty_caps;
int dirty = 0;
@@ -1332,7 +1347,11 @@
ceph_cap_string(was | mask));
ci->i_dirty_caps |= mask;
if (was == 0) {
- dout(" inode %p now dirty\n", &ci->vfs_inode);
+ if (!ci->i_head_snapc)
+ ci->i_head_snapc = ceph_get_snap_context(
+ ci->i_snap_realm->cached_context);
+ dout(" inode %p now dirty snapc %p\n", &ci->vfs_inode,
+ ci->i_head_snapc);
BUG_ON(!list_empty(&ci->i_dirty_item));
spin_lock(&mdsc->cap_dirty_lock);
list_add(&ci->i_dirty_item, &mdsc->cap_dirty);
@@ -1360,7 +1379,7 @@
static int __mark_caps_flushing(struct inode *inode,
struct ceph_mds_session *session)
{
- struct ceph_mds_client *mdsc = &ceph_sb_to_client(inode->i_sb)->mdsc;
+ struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
struct ceph_inode_info *ci = ceph_inode(inode);
int flushing;
@@ -1398,17 +1417,6 @@
/*
* try to invalidate mapping pages without blocking.
*/
-static int mapping_is_empty(struct address_space *mapping)
-{
- struct page *page = find_get_page(mapping, 0);
-
- if (!page)
- return 1;
-
- put_page(page);
- return 0;
-}
-
static int try_nonblocking_invalidate(struct inode *inode)
{
struct ceph_inode_info *ci = ceph_inode(inode);
@@ -1418,7 +1426,7 @@
invalidate_mapping_pages(&inode->i_data, 0, -1);
spin_lock(&inode->i_lock);
- if (mapping_is_empty(&inode->i_data) &&
+ if (inode->i_data.nrpages == 0 &&
invalidating_gen == ci->i_rdcache_gen) {
/* success. */
dout("try_nonblocking_invalidate %p success\n", inode);
@@ -1444,8 +1452,8 @@
void ceph_check_caps(struct ceph_inode_info *ci, int flags,
struct ceph_mds_session *session)
{
- struct ceph_client *client = ceph_inode_to_client(&ci->vfs_inode);
- struct ceph_mds_client *mdsc = &client->mdsc;
+ struct ceph_fs_client *fsc = ceph_inode_to_client(&ci->vfs_inode);
+ struct ceph_mds_client *mdsc = fsc->mdsc;
struct inode *inode = &ci->vfs_inode;
struct ceph_cap *cap;
int file_wanted, used;
@@ -1470,7 +1478,7 @@
/* flush snaps first time around only */
if (!list_empty(&ci->i_cap_snaps))
- __ceph_flush_snaps(ci, &session);
+ __ceph_flush_snaps(ci, &session, 0);
goto retry_locked;
retry:
spin_lock(&inode->i_lock);
@@ -1515,7 +1523,7 @@
*/
if ((!is_delayed || mdsc->stopping) &&
ci->i_wrbuffer_ref == 0 && /* no dirty pages... */
- ci->i_rdcache_gen && /* may have cached pages */
+ inode->i_data.nrpages && /* have cached pages */
(file_wanted == 0 || /* no open files */
(revoking & (CEPH_CAP_FILE_CACHE|
CEPH_CAP_FILE_LAZYIO))) && /* or revoking cache */
@@ -1688,7 +1696,7 @@
static int try_flush_caps(struct inode *inode, struct ceph_mds_session *session,
unsigned *flush_tid)
{
- struct ceph_mds_client *mdsc = &ceph_sb_to_client(inode->i_sb)->mdsc;
+ struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
struct ceph_inode_info *ci = ceph_inode(inode);
int unlock_session = session ? 0 : 1;
int flushing = 0;
@@ -1854,7 +1862,7 @@
caps_are_flushed(inode, flush_tid));
} else {
struct ceph_mds_client *mdsc =
- &ceph_sb_to_client(inode->i_sb)->mdsc;
+ ceph_sb_to_client(inode->i_sb)->mdsc;
spin_lock(&inode->i_lock);
if (__ceph_caps_dirty(ci))
@@ -1887,7 +1895,7 @@
if (cap && cap->session == session) {
dout("kick_flushing_caps %p cap %p capsnap %p\n", inode,
cap, capsnap);
- __ceph_flush_snaps(ci, &session);
+ __ceph_flush_snaps(ci, &session, 1);
} else {
pr_err("%p auth cap %p not mds%d ???\n", inode,
cap, session->s_mds);
@@ -2190,7 +2198,9 @@
if (ci->i_head_snapc == snapc) {
ci->i_wrbuffer_ref_head -= nr;
- if (!ci->i_wrbuffer_ref_head) {
+ if (ci->i_wrbuffer_ref_head == 0 &&
+ ci->i_dirty_caps == 0 && ci->i_flushing_caps == 0) {
+ BUG_ON(!ci->i_head_snapc);
ceph_put_snap_context(ci->i_head_snapc);
ci->i_head_snapc = NULL;
}
@@ -2263,7 +2273,8 @@
{
struct ceph_inode_info *ci = ceph_inode(inode);
int mds = session->s_mds;
- int seq = le32_to_cpu(grant->seq);
+ unsigned seq = le32_to_cpu(grant->seq);
+ unsigned issue_seq = le32_to_cpu(grant->issue_seq);
int newcaps = le32_to_cpu(grant->caps);
int issued, implemented, used, wanted, dirty;
u64 size = le64_to_cpu(grant->size);
@@ -2275,8 +2286,8 @@
int revoked_rdcache = 0;
int queue_invalidate = 0;
- dout("handle_cap_grant inode %p cap %p mds%d seq %d %s\n",
- inode, cap, mds, seq, ceph_cap_string(newcaps));
+ dout("handle_cap_grant inode %p cap %p mds%d seq %u/%u %s\n",
+ inode, cap, mds, seq, issue_seq, ceph_cap_string(newcaps));
dout(" size %llu max_size %llu, i_size %llu\n", size, max_size,
inode->i_size);
@@ -2372,6 +2383,7 @@
}
cap->seq = seq;
+ cap->issue_seq = issue_seq;
/* file layout may have changed */
ci->i_layout = grant->layout;
@@ -2443,7 +2455,7 @@
__releases(inode->i_lock)
{
struct ceph_inode_info *ci = ceph_inode(inode);
- struct ceph_mds_client *mdsc = &ceph_sb_to_client(inode->i_sb)->mdsc;
+ struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
unsigned seq = le32_to_cpu(m->seq);
int dirty = le32_to_cpu(m->dirty);
int cleaned = 0;
@@ -2483,6 +2495,11 @@
dout(" inode %p now clean\n", inode);
BUG_ON(!list_empty(&ci->i_dirty_item));
drop = 1;
+ if (ci->i_wrbuffer_ref_head == 0) {
+ BUG_ON(!ci->i_head_snapc);
+ ceph_put_snap_context(ci->i_head_snapc);
+ ci->i_head_snapc = NULL;
+ }
} else {
BUG_ON(list_empty(&ci->i_dirty_item));
}
@@ -2686,7 +2703,7 @@
struct ceph_msg *msg)
{
struct ceph_mds_client *mdsc = session->s_mdsc;
- struct super_block *sb = mdsc->client->sb;
+ struct super_block *sb = mdsc->fsc->sb;
struct inode *inode;
struct ceph_cap *cap;
struct ceph_mds_caps *h;
@@ -2749,15 +2766,7 @@
if (op == CEPH_CAP_OP_IMPORT)
__queue_cap_release(session, vino.ino, cap_id,
mseq, seq);
-
- /*
- * send any full release message to try to move things
- * along for the mds (who clearly thinks we still have this
- * cap).
- */
- ceph_add_cap_releases(mdsc, session);
- ceph_send_cap_releases(mdsc, session);
- goto done;
+ goto flush_cap_releases;
}
/* these will work even if we don't have a cap yet */
@@ -2785,7 +2794,7 @@
dout(" no cap on %p ino %llx.%llx from mds%d\n",
inode, ceph_ino(inode), ceph_snap(inode), mds);
spin_unlock(&inode->i_lock);
- goto done;
+ goto flush_cap_releases;
}
/* note that each of these drops i_lock for us */
@@ -2809,6 +2818,17 @@
ceph_cap_op_name(op));
}
+ goto done;
+
+flush_cap_releases:
+ /*
+ * send any full release message to try to move things
+ * along for the mds (who clearly thinks we still have this
+ * cap).
+ */
+ ceph_add_cap_releases(mdsc, session);
+ ceph_send_cap_releases(mdsc, session);
+
done:
mutex_unlock(&session->s_mutex);
done_unlocked:
diff --git a/fs/ceph/ceph_frag.c b/fs/ceph/ceph_frag.c
index ab6cf35..bdce8b1 100644
--- a/fs/ceph/ceph_frag.c
+++ b/fs/ceph/ceph_frag.c
@@ -1,7 +1,8 @@
/*
* Ceph 'frag' type
*/
-#include "types.h"
+#include <linux/module.h>
+#include <linux/ceph/types.h>
int ceph_frag_compare(__u32 a, __u32 b)
{
diff --git a/fs/ceph/ceph_strings.c b/fs/ceph/ceph_strings.c
deleted file mode 100644
index c6179d3..0000000
--- a/fs/ceph/ceph_strings.c
+++ /dev/null
@@ -1,193 +0,0 @@
-/*
- * Ceph string constants
- */
-#include "types.h"
-
-const char *ceph_entity_type_name(int type)
-{
- switch (type) {
- case CEPH_ENTITY_TYPE_MDS: return "mds";
- case CEPH_ENTITY_TYPE_OSD: return "osd";
- case CEPH_ENTITY_TYPE_MON: return "mon";
- case CEPH_ENTITY_TYPE_CLIENT: return "client";
- case CEPH_ENTITY_TYPE_AUTH: return "auth";
- default: return "unknown";
- }
-}
-
-const char *ceph_osd_op_name(int op)
-{
- switch (op) {
- case CEPH_OSD_OP_READ: return "read";
- case CEPH_OSD_OP_STAT: return "stat";
-
- case CEPH_OSD_OP_MASKTRUNC: return "masktrunc";
-
- case CEPH_OSD_OP_WRITE: return "write";
- case CEPH_OSD_OP_DELETE: return "delete";
- case CEPH_OSD_OP_TRUNCATE: return "truncate";
- case CEPH_OSD_OP_ZERO: return "zero";
- case CEPH_OSD_OP_WRITEFULL: return "writefull";
- case CEPH_OSD_OP_ROLLBACK: return "rollback";
-
- case CEPH_OSD_OP_APPEND: return "append";
- case CEPH_OSD_OP_STARTSYNC: return "startsync";
- case CEPH_OSD_OP_SETTRUNC: return "settrunc";
- case CEPH_OSD_OP_TRIMTRUNC: return "trimtrunc";
-
- case CEPH_OSD_OP_TMAPUP: return "tmapup";
- case CEPH_OSD_OP_TMAPGET: return "tmapget";
- case CEPH_OSD_OP_TMAPPUT: return "tmapput";
-
- case CEPH_OSD_OP_GETXATTR: return "getxattr";
- case CEPH_OSD_OP_GETXATTRS: return "getxattrs";
- case CEPH_OSD_OP_SETXATTR: return "setxattr";
- case CEPH_OSD_OP_SETXATTRS: return "setxattrs";
- case CEPH_OSD_OP_RESETXATTRS: return "resetxattrs";
- case CEPH_OSD_OP_RMXATTR: return "rmxattr";
- case CEPH_OSD_OP_CMPXATTR: return "cmpxattr";
-
- case CEPH_OSD_OP_PULL: return "pull";
- case CEPH_OSD_OP_PUSH: return "push";
- case CEPH_OSD_OP_BALANCEREADS: return "balance-reads";
- case CEPH_OSD_OP_UNBALANCEREADS: return "unbalance-reads";
- case CEPH_OSD_OP_SCRUB: return "scrub";
-
- case CEPH_OSD_OP_WRLOCK: return "wrlock";
- case CEPH_OSD_OP_WRUNLOCK: return "wrunlock";
- case CEPH_OSD_OP_RDLOCK: return "rdlock";
- case CEPH_OSD_OP_RDUNLOCK: return "rdunlock";
- case CEPH_OSD_OP_UPLOCK: return "uplock";
- case CEPH_OSD_OP_DNLOCK: return "dnlock";
-
- case CEPH_OSD_OP_CALL: return "call";
-
- case CEPH_OSD_OP_PGLS: return "pgls";
- }
- return "???";
-}
-
-const char *ceph_mds_state_name(int s)
-{
- switch (s) {
- /* down and out */
- case CEPH_MDS_STATE_DNE: return "down:dne";
- case CEPH_MDS_STATE_STOPPED: return "down:stopped";
- /* up and out */
- case CEPH_MDS_STATE_BOOT: return "up:boot";
- case CEPH_MDS_STATE_STANDBY: return "up:standby";
- case CEPH_MDS_STATE_STANDBY_REPLAY: return "up:standby-replay";
- case CEPH_MDS_STATE_CREATING: return "up:creating";
- case CEPH_MDS_STATE_STARTING: return "up:starting";
- /* up and in */
- case CEPH_MDS_STATE_REPLAY: return "up:replay";
- case CEPH_MDS_STATE_RESOLVE: return "up:resolve";
- case CEPH_MDS_STATE_RECONNECT: return "up:reconnect";
- case CEPH_MDS_STATE_REJOIN: return "up:rejoin";
- case CEPH_MDS_STATE_CLIENTREPLAY: return "up:clientreplay";
- case CEPH_MDS_STATE_ACTIVE: return "up:active";
- case CEPH_MDS_STATE_STOPPING: return "up:stopping";
- }
- return "???";
-}
-
-const char *ceph_session_op_name(int op)
-{
- switch (op) {
- case CEPH_SESSION_REQUEST_OPEN: return "request_open";
- case CEPH_SESSION_OPEN: return "open";
- case CEPH_SESSION_REQUEST_CLOSE: return "request_close";
- case CEPH_SESSION_CLOSE: return "close";
- case CEPH_SESSION_REQUEST_RENEWCAPS: return "request_renewcaps";
- case CEPH_SESSION_RENEWCAPS: return "renewcaps";
- case CEPH_SESSION_STALE: return "stale";
- case CEPH_SESSION_RECALL_STATE: return "recall_state";
- }
- return "???";
-}
-
-const char *ceph_mds_op_name(int op)
-{
- switch (op) {
- case CEPH_MDS_OP_LOOKUP: return "lookup";
- case CEPH_MDS_OP_LOOKUPHASH: return "lookuphash";
- case CEPH_MDS_OP_LOOKUPPARENT: return "lookupparent";
- case CEPH_MDS_OP_GETATTR: return "getattr";
- case CEPH_MDS_OP_SETXATTR: return "setxattr";
- case CEPH_MDS_OP_SETATTR: return "setattr";
- case CEPH_MDS_OP_RMXATTR: return "rmxattr";
- case CEPH_MDS_OP_READDIR: return "readdir";
- case CEPH_MDS_OP_MKNOD: return "mknod";
- case CEPH_MDS_OP_LINK: return "link";
- case CEPH_MDS_OP_UNLINK: return "unlink";
- case CEPH_MDS_OP_RENAME: return "rename";
- case CEPH_MDS_OP_MKDIR: return "mkdir";
- case CEPH_MDS_OP_RMDIR: return "rmdir";
- case CEPH_MDS_OP_SYMLINK: return "symlink";
- case CEPH_MDS_OP_CREATE: return "create";
- case CEPH_MDS_OP_OPEN: return "open";
- case CEPH_MDS_OP_LOOKUPSNAP: return "lookupsnap";
- case CEPH_MDS_OP_LSSNAP: return "lssnap";
- case CEPH_MDS_OP_MKSNAP: return "mksnap";
- case CEPH_MDS_OP_RMSNAP: return "rmsnap";
- case CEPH_MDS_OP_SETFILELOCK: return "setfilelock";
- case CEPH_MDS_OP_GETFILELOCK: return "getfilelock";
- }
- return "???";
-}
-
-const char *ceph_cap_op_name(int op)
-{
- switch (op) {
- case CEPH_CAP_OP_GRANT: return "grant";
- case CEPH_CAP_OP_REVOKE: return "revoke";
- case CEPH_CAP_OP_TRUNC: return "trunc";
- case CEPH_CAP_OP_EXPORT: return "export";
- case CEPH_CAP_OP_IMPORT: return "import";
- case CEPH_CAP_OP_UPDATE: return "update";
- case CEPH_CAP_OP_DROP: return "drop";
- case CEPH_CAP_OP_FLUSH: return "flush";
- case CEPH_CAP_OP_FLUSH_ACK: return "flush_ack";
- case CEPH_CAP_OP_FLUSHSNAP: return "flushsnap";
- case CEPH_CAP_OP_FLUSHSNAP_ACK: return "flushsnap_ack";
- case CEPH_CAP_OP_RELEASE: return "release";
- case CEPH_CAP_OP_RENEW: return "renew";
- }
- return "???";
-}
-
-const char *ceph_lease_op_name(int o)
-{
- switch (o) {
- case CEPH_MDS_LEASE_REVOKE: return "revoke";
- case CEPH_MDS_LEASE_RELEASE: return "release";
- case CEPH_MDS_LEASE_RENEW: return "renew";
- case CEPH_MDS_LEASE_REVOKE_ACK: return "revoke_ack";
- }
- return "???";
-}
-
-const char *ceph_snap_op_name(int o)
-{
- switch (o) {
- case CEPH_SNAP_OP_UPDATE: return "update";
- case CEPH_SNAP_OP_CREATE: return "create";
- case CEPH_SNAP_OP_DESTROY: return "destroy";
- case CEPH_SNAP_OP_SPLIT: return "split";
- }
- return "???";
-}
-
-const char *ceph_pool_op_name(int op)
-{
- switch (op) {
- case POOL_OP_CREATE: return "create";
- case POOL_OP_DELETE: return "delete";
- case POOL_OP_AUID_CHANGE: return "auid change";
- case POOL_OP_CREATE_SNAP: return "create snap";
- case POOL_OP_DELETE_SNAP: return "delete snap";
- case POOL_OP_CREATE_UNMANAGED_SNAP: return "create unmanaged snap";
- case POOL_OP_DELETE_UNMANAGED_SNAP: return "delete unmanaged snap";
- }
- return "???";
-}
diff --git a/fs/ceph/debugfs.c b/fs/ceph/debugfs.c
index 360c4f2..7ae1b3d 100644
--- a/fs/ceph/debugfs.c
+++ b/fs/ceph/debugfs.c
@@ -1,4 +1,4 @@
-#include "ceph_debug.h"
+#include <linux/ceph/ceph_debug.h>
#include <linux/device.h>
#include <linux/slab.h>
@@ -7,143 +7,49 @@
#include <linux/debugfs.h>
#include <linux/seq_file.h>
+#include <linux/ceph/libceph.h>
+#include <linux/ceph/mon_client.h>
+#include <linux/ceph/auth.h>
+#include <linux/ceph/debugfs.h>
+
#include "super.h"
-#include "mds_client.h"
-#include "mon_client.h"
-#include "auth.h"
#ifdef CONFIG_DEBUG_FS
-/*
- * Implement /sys/kernel/debug/ceph fun
- *
- * /sys/kernel/debug/ceph/client* - an instance of the ceph client
- * .../osdmap - current osdmap
- * .../mdsmap - current mdsmap
- * .../monmap - current monmap
- * .../osdc - active osd requests
- * .../mdsc - active mds requests
- * .../monc - mon client state
- * .../dentry_lru - dump contents of dentry lru
- * .../caps - expose cap (reservation) stats
- * .../bdi - symlink to ../../bdi/something
- */
-
-static struct dentry *ceph_debugfs_dir;
-
-static int monmap_show(struct seq_file *s, void *p)
-{
- int i;
- struct ceph_client *client = s->private;
-
- if (client->monc.monmap == NULL)
- return 0;
-
- seq_printf(s, "epoch %d\n", client->monc.monmap->epoch);
- for (i = 0; i < client->monc.monmap->num_mon; i++) {
- struct ceph_entity_inst *inst =
- &client->monc.monmap->mon_inst[i];
-
- seq_printf(s, "\t%s%lld\t%s\n",
- ENTITY_NAME(inst->name),
- pr_addr(&inst->addr.in_addr));
- }
- return 0;
-}
+#include "mds_client.h"
static int mdsmap_show(struct seq_file *s, void *p)
{
int i;
- struct ceph_client *client = s->private;
+ struct ceph_fs_client *fsc = s->private;
- if (client->mdsc.mdsmap == NULL)
+ if (fsc->mdsc == NULL || fsc->mdsc->mdsmap == NULL)
return 0;
- seq_printf(s, "epoch %d\n", client->mdsc.mdsmap->m_epoch);
- seq_printf(s, "root %d\n", client->mdsc.mdsmap->m_root);
+ seq_printf(s, "epoch %d\n", fsc->mdsc->mdsmap->m_epoch);
+ seq_printf(s, "root %d\n", fsc->mdsc->mdsmap->m_root);
seq_printf(s, "session_timeout %d\n",
- client->mdsc.mdsmap->m_session_timeout);
+ fsc->mdsc->mdsmap->m_session_timeout);
seq_printf(s, "session_autoclose %d\n",
- client->mdsc.mdsmap->m_session_autoclose);
- for (i = 0; i < client->mdsc.mdsmap->m_max_mds; i++) {
+ fsc->mdsc->mdsmap->m_session_autoclose);
+ for (i = 0; i < fsc->mdsc->mdsmap->m_max_mds; i++) {
struct ceph_entity_addr *addr =
- &client->mdsc.mdsmap->m_info[i].addr;
- int state = client->mdsc.mdsmap->m_info[i].state;
+ &fsc->mdsc->mdsmap->m_info[i].addr;
+ int state = fsc->mdsc->mdsmap->m_info[i].state;
- seq_printf(s, "\tmds%d\t%s\t(%s)\n", i, pr_addr(&addr->in_addr),
+ seq_printf(s, "\tmds%d\t%s\t(%s)\n", i,
+ ceph_pr_addr(&addr->in_addr),
ceph_mds_state_name(state));
}
return 0;
}
-static int osdmap_show(struct seq_file *s, void *p)
-{
- int i;
- struct ceph_client *client = s->private;
- struct rb_node *n;
-
- if (client->osdc.osdmap == NULL)
- return 0;
- seq_printf(s, "epoch %d\n", client->osdc.osdmap->epoch);
- seq_printf(s, "flags%s%s\n",
- (client->osdc.osdmap->flags & CEPH_OSDMAP_NEARFULL) ?
- " NEARFULL" : "",
- (client->osdc.osdmap->flags & CEPH_OSDMAP_FULL) ?
- " FULL" : "");
- for (n = rb_first(&client->osdc.osdmap->pg_pools); n; n = rb_next(n)) {
- struct ceph_pg_pool_info *pool =
- rb_entry(n, struct ceph_pg_pool_info, node);
- seq_printf(s, "pg_pool %d pg_num %d / %d, lpg_num %d / %d\n",
- pool->id, pool->v.pg_num, pool->pg_num_mask,
- pool->v.lpg_num, pool->lpg_num_mask);
- }
- for (i = 0; i < client->osdc.osdmap->max_osd; i++) {
- struct ceph_entity_addr *addr =
- &client->osdc.osdmap->osd_addr[i];
- int state = client->osdc.osdmap->osd_state[i];
- char sb[64];
-
- seq_printf(s, "\tosd%d\t%s\t%3d%%\t(%s)\n",
- i, pr_addr(&addr->in_addr),
- ((client->osdc.osdmap->osd_weight[i]*100) >> 16),
- ceph_osdmap_state_str(sb, sizeof(sb), state));
- }
- return 0;
-}
-
-static int monc_show(struct seq_file *s, void *p)
-{
- struct ceph_client *client = s->private;
- struct ceph_mon_generic_request *req;
- struct ceph_mon_client *monc = &client->monc;
- struct rb_node *rp;
-
- mutex_lock(&monc->mutex);
-
- if (monc->have_mdsmap)
- seq_printf(s, "have mdsmap %u\n", (unsigned)monc->have_mdsmap);
- if (monc->have_osdmap)
- seq_printf(s, "have osdmap %u\n", (unsigned)monc->have_osdmap);
- if (monc->want_next_osdmap)
- seq_printf(s, "want next osdmap\n");
-
- for (rp = rb_first(&monc->generic_request_tree); rp; rp = rb_next(rp)) {
- __u16 op;
- req = rb_entry(rp, struct ceph_mon_generic_request, node);
- op = le16_to_cpu(req->request->hdr.type);
- if (op == CEPH_MSG_STATFS)
- seq_printf(s, "%lld statfs\n", req->tid);
- else
- seq_printf(s, "%lld unknown\n", req->tid);
- }
-
- mutex_unlock(&monc->mutex);
- return 0;
-}
-
+/*
+ * mdsc debugfs
+ */
static int mdsc_show(struct seq_file *s, void *p)
{
- struct ceph_client *client = s->private;
- struct ceph_mds_client *mdsc = &client->mdsc;
+ struct ceph_fs_client *fsc = s->private;
+ struct ceph_mds_client *mdsc = fsc->mdsc;
struct ceph_mds_request *req;
struct rb_node *rp;
int pathlen;
@@ -171,6 +77,8 @@
} else if (req->r_dentry) {
path = ceph_mdsc_build_path(req->r_dentry, &pathlen,
&pathbase, 0);
+ if (IS_ERR(path))
+ path = NULL;
spin_lock(&req->r_dentry->d_lock);
seq_printf(s, " #%llx/%.*s (%s)",
ceph_ino(req->r_dentry->d_parent->d_inode),
@@ -187,6 +95,8 @@
if (req->r_old_dentry) {
path = ceph_mdsc_build_path(req->r_old_dentry, &pathlen,
&pathbase, 0);
+ if (IS_ERR(path))
+ path = NULL;
spin_lock(&req->r_old_dentry->d_lock);
seq_printf(s, " #%llx/%.*s (%s)",
ceph_ino(req->r_old_dentry->d_parent->d_inode),
@@ -210,61 +120,12 @@
return 0;
}
-static int osdc_show(struct seq_file *s, void *pp)
-{
- struct ceph_client *client = s->private;
- struct ceph_osd_client *osdc = &client->osdc;
- struct rb_node *p;
-
- mutex_lock(&osdc->request_mutex);
- for (p = rb_first(&osdc->requests); p; p = rb_next(p)) {
- struct ceph_osd_request *req;
- struct ceph_osd_request_head *head;
- struct ceph_osd_op *op;
- int num_ops;
- int opcode, olen;
- int i;
-
- req = rb_entry(p, struct ceph_osd_request, r_node);
-
- seq_printf(s, "%lld\tosd%d\t%d.%x\t", req->r_tid,
- req->r_osd ? req->r_osd->o_osd : -1,
- le32_to_cpu(req->r_pgid.pool),
- le16_to_cpu(req->r_pgid.ps));
-
- head = req->r_request->front.iov_base;
- op = (void *)(head + 1);
-
- num_ops = le16_to_cpu(head->num_ops);
- olen = le32_to_cpu(head->object_len);
- seq_printf(s, "%.*s", olen,
- (const char *)(head->ops + num_ops));
-
- if (req->r_reassert_version.epoch)
- seq_printf(s, "\t%u'%llu",
- (unsigned)le32_to_cpu(req->r_reassert_version.epoch),
- le64_to_cpu(req->r_reassert_version.version));
- else
- seq_printf(s, "\t");
-
- for (i = 0; i < num_ops; i++) {
- opcode = le16_to_cpu(op->op);
- seq_printf(s, "\t%s", ceph_osd_op_name(opcode));
- op++;
- }
-
- seq_printf(s, "\n");
- }
- mutex_unlock(&osdc->request_mutex);
- return 0;
-}
-
static int caps_show(struct seq_file *s, void *p)
{
- struct ceph_client *client = s->private;
+ struct ceph_fs_client *fsc = s->private;
int total, avail, used, reserved, min;
- ceph_reservation_status(client, &total, &avail, &used, &reserved, &min);
+ ceph_reservation_status(fsc, &total, &avail, &used, &reserved, &min);
seq_printf(s, "total\t\t%d\n"
"avail\t\t%d\n"
"used\t\t%d\n"
@@ -276,8 +137,8 @@
static int dentry_lru_show(struct seq_file *s, void *ptr)
{
- struct ceph_client *client = s->private;
- struct ceph_mds_client *mdsc = &client->mdsc;
+ struct ceph_fs_client *fsc = s->private;
+ struct ceph_mds_client *mdsc = fsc->mdsc;
struct ceph_dentry_info *di;
spin_lock(&mdsc->dentry_lru_lock);
@@ -291,199 +152,124 @@
return 0;
}
-#define DEFINE_SHOW_FUNC(name) \
-static int name##_open(struct inode *inode, struct file *file) \
-{ \
- struct seq_file *sf; \
- int ret; \
- \
- ret = single_open(file, name, NULL); \
- sf = file->private_data; \
- sf->private = inode->i_private; \
- return ret; \
-} \
- \
-static const struct file_operations name##_fops = { \
- .open = name##_open, \
- .read = seq_read, \
- .llseek = seq_lseek, \
- .release = single_release, \
-};
+CEPH_DEFINE_SHOW_FUNC(mdsmap_show)
+CEPH_DEFINE_SHOW_FUNC(mdsc_show)
+CEPH_DEFINE_SHOW_FUNC(caps_show)
+CEPH_DEFINE_SHOW_FUNC(dentry_lru_show)
-DEFINE_SHOW_FUNC(monmap_show)
-DEFINE_SHOW_FUNC(mdsmap_show)
-DEFINE_SHOW_FUNC(osdmap_show)
-DEFINE_SHOW_FUNC(monc_show)
-DEFINE_SHOW_FUNC(mdsc_show)
-DEFINE_SHOW_FUNC(osdc_show)
-DEFINE_SHOW_FUNC(dentry_lru_show)
-DEFINE_SHOW_FUNC(caps_show)
+/*
+ * debugfs
+ */
static int congestion_kb_set(void *data, u64 val)
{
- struct ceph_client *client = (struct ceph_client *)data;
+ struct ceph_fs_client *fsc = (struct ceph_fs_client *)data;
- if (client)
- client->mount_args->congestion_kb = (int)val;
-
+ fsc->mount_options->congestion_kb = (int)val;
return 0;
}
static int congestion_kb_get(void *data, u64 *val)
{
- struct ceph_client *client = (struct ceph_client *)data;
+ struct ceph_fs_client *fsc = (struct ceph_fs_client *)data;
- if (client)
- *val = (u64)client->mount_args->congestion_kb;
-
+ *val = (u64)fsc->mount_options->congestion_kb;
return 0;
}
-
DEFINE_SIMPLE_ATTRIBUTE(congestion_kb_fops, congestion_kb_get,
congestion_kb_set, "%llu\n");
-int __init ceph_debugfs_init(void)
+
+void ceph_fs_debugfs_cleanup(struct ceph_fs_client *fsc)
{
- ceph_debugfs_dir = debugfs_create_dir("ceph", NULL);
- if (!ceph_debugfs_dir)
- return -ENOMEM;
- return 0;
+ dout("ceph_fs_debugfs_cleanup\n");
+ debugfs_remove(fsc->debugfs_bdi);
+ debugfs_remove(fsc->debugfs_congestion_kb);
+ debugfs_remove(fsc->debugfs_mdsmap);
+ debugfs_remove(fsc->debugfs_caps);
+ debugfs_remove(fsc->debugfs_mdsc);
+ debugfs_remove(fsc->debugfs_dentry_lru);
}
-void ceph_debugfs_cleanup(void)
+int ceph_fs_debugfs_init(struct ceph_fs_client *fsc)
{
- debugfs_remove(ceph_debugfs_dir);
-}
+ char name[100];
+ int err = -ENOMEM;
-int ceph_debugfs_client_init(struct ceph_client *client)
-{
- int ret = 0;
- char name[80];
-
- snprintf(name, sizeof(name), "%pU.client%lld", &client->fsid,
- client->monc.auth->global_id);
-
- client->debugfs_dir = debugfs_create_dir(name, ceph_debugfs_dir);
- if (!client->debugfs_dir)
- goto out;
-
- client->monc.debugfs_file = debugfs_create_file("monc",
- 0600,
- client->debugfs_dir,
- client,
- &monc_show_fops);
- if (!client->monc.debugfs_file)
- goto out;
-
- client->mdsc.debugfs_file = debugfs_create_file("mdsc",
- 0600,
- client->debugfs_dir,
- client,
- &mdsc_show_fops);
- if (!client->mdsc.debugfs_file)
- goto out;
-
- client->osdc.debugfs_file = debugfs_create_file("osdc",
- 0600,
- client->debugfs_dir,
- client,
- &osdc_show_fops);
- if (!client->osdc.debugfs_file)
- goto out;
-
- client->debugfs_monmap = debugfs_create_file("monmap",
- 0600,
- client->debugfs_dir,
- client,
- &monmap_show_fops);
- if (!client->debugfs_monmap)
- goto out;
-
- client->debugfs_mdsmap = debugfs_create_file("mdsmap",
- 0600,
- client->debugfs_dir,
- client,
- &mdsmap_show_fops);
- if (!client->debugfs_mdsmap)
- goto out;
-
- client->debugfs_osdmap = debugfs_create_file("osdmap",
- 0600,
- client->debugfs_dir,
- client,
- &osdmap_show_fops);
- if (!client->debugfs_osdmap)
- goto out;
-
- client->debugfs_dentry_lru = debugfs_create_file("dentry_lru",
- 0600,
- client->debugfs_dir,
- client,
- &dentry_lru_show_fops);
- if (!client->debugfs_dentry_lru)
- goto out;
-
- client->debugfs_caps = debugfs_create_file("caps",
- 0400,
- client->debugfs_dir,
- client,
- &caps_show_fops);
- if (!client->debugfs_caps)
- goto out;
-
- client->debugfs_congestion_kb =
+ dout("ceph_fs_debugfs_init\n");
+ fsc->debugfs_congestion_kb =
debugfs_create_file("writeback_congestion_kb",
0600,
- client->debugfs_dir,
- client,
+ fsc->client->debugfs_dir,
+ fsc,
&congestion_kb_fops);
- if (!client->debugfs_congestion_kb)
+ if (!fsc->debugfs_congestion_kb)
goto out;
- sprintf(name, "../../bdi/%s", dev_name(client->sb->s_bdi->dev));
- client->debugfs_bdi = debugfs_create_symlink("bdi", client->debugfs_dir,
- name);
+ dout("a\n");
+
+ snprintf(name, sizeof(name), "../../bdi/%s",
+ dev_name(fsc->backing_dev_info.dev));
+ fsc->debugfs_bdi =
+ debugfs_create_symlink("bdi",
+ fsc->client->debugfs_dir,
+ name);
+ if (!fsc->debugfs_bdi)
+ goto out;
+
+ dout("b\n");
+ fsc->debugfs_mdsmap = debugfs_create_file("mdsmap",
+ 0600,
+ fsc->client->debugfs_dir,
+ fsc,
+ &mdsmap_show_fops);
+ if (!fsc->debugfs_mdsmap)
+ goto out;
+
+ dout("ca\n");
+ fsc->debugfs_mdsc = debugfs_create_file("mdsc",
+ 0600,
+ fsc->client->debugfs_dir,
+ fsc,
+ &mdsc_show_fops);
+ if (!fsc->debugfs_mdsc)
+ goto out;
+
+ dout("da\n");
+ fsc->debugfs_caps = debugfs_create_file("caps",
+ 0400,
+ fsc->client->debugfs_dir,
+ fsc,
+ &caps_show_fops);
+ if (!fsc->debugfs_caps)
+ goto out;
+
+ dout("ea\n");
+ fsc->debugfs_dentry_lru = debugfs_create_file("dentry_lru",
+ 0600,
+ fsc->client->debugfs_dir,
+ fsc,
+ &dentry_lru_show_fops);
+ if (!fsc->debugfs_dentry_lru)
+ goto out;
return 0;
out:
- ceph_debugfs_client_cleanup(client);
- return ret;
+ ceph_fs_debugfs_cleanup(fsc);
+ return err;
}
-void ceph_debugfs_client_cleanup(struct ceph_client *client)
-{
- debugfs_remove(client->debugfs_bdi);
- debugfs_remove(client->debugfs_caps);
- debugfs_remove(client->debugfs_dentry_lru);
- debugfs_remove(client->debugfs_osdmap);
- debugfs_remove(client->debugfs_mdsmap);
- debugfs_remove(client->debugfs_monmap);
- debugfs_remove(client->osdc.debugfs_file);
- debugfs_remove(client->mdsc.debugfs_file);
- debugfs_remove(client->monc.debugfs_file);
- debugfs_remove(client->debugfs_congestion_kb);
- debugfs_remove(client->debugfs_dir);
-}
#else /* CONFIG_DEBUG_FS */
-int __init ceph_debugfs_init(void)
+int ceph_fs_debugfs_init(struct ceph_fs_client *fsc)
{
return 0;
}
-void ceph_debugfs_cleanup(void)
-{
-}
-
-int ceph_debugfs_client_init(struct ceph_client *client)
-{
- return 0;
-}
-
-void ceph_debugfs_client_cleanup(struct ceph_client *client)
+void ceph_fs_debugfs_cleanup(struct ceph_fs_client *fsc)
{
}
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index 67bbb41..e0a2dc6 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -1,4 +1,4 @@
-#include "ceph_debug.h"
+#include <linux/ceph/ceph_debug.h>
#include <linux/spinlock.h>
#include <linux/fs_struct.h>
@@ -7,6 +7,7 @@
#include <linux/sched.h>
#include "super.h"
+#include "mds_client.h"
/*
* Directory operations: readdir, lookup, create, link, unlink,
@@ -46,7 +47,7 @@
else
dentry->d_op = &ceph_snap_dentry_ops;
- di = kmem_cache_alloc(ceph_dentry_cachep, GFP_NOFS);
+ di = kmem_cache_alloc(ceph_dentry_cachep, GFP_NOFS | __GFP_ZERO);
if (!di)
return -ENOMEM; /* oh well */
@@ -94,10 +95,7 @@
*/
static int __dcache_readdir(struct file *filp,
void *dirent, filldir_t filldir)
- __releases(inode->i_lock)
- __acquires(inode->i_lock)
{
- struct inode *inode = filp->f_dentry->d_inode;
struct ceph_file_info *fi = filp->private_data;
struct dentry *parent = filp->f_dentry;
struct inode *dir = parent->d_inode;
@@ -153,7 +151,6 @@
atomic_inc(&dentry->d_count);
spin_unlock(&dcache_lock);
- spin_unlock(&inode->i_lock);
dout(" %llu (%llu) dentry %p %.*s %p\n", di->offset, filp->f_pos,
dentry, dentry->d_name.len, dentry->d_name.name, dentry->d_inode);
@@ -171,35 +168,30 @@
} else {
dput(last);
}
- last = NULL;
}
-
- spin_lock(&inode->i_lock);
- spin_lock(&dcache_lock);
-
last = dentry;
if (err < 0)
- goto out_unlock;
+ goto out;
- p = p->prev;
filp->f_pos++;
/* make sure a dentry wasn't dropped while we didn't have dcache_lock */
- if ((ceph_inode(dir)->i_ceph_flags & CEPH_I_COMPLETE))
- goto more;
- dout(" lost I_COMPLETE on %p; falling back to mds\n", dir);
- err = -EAGAIN;
+ if (!ceph_i_test(dir, CEPH_I_COMPLETE)) {
+ dout(" lost I_COMPLETE on %p; falling back to mds\n", dir);
+ err = -EAGAIN;
+ goto out;
+ }
+
+ spin_lock(&dcache_lock);
+ p = p->prev; /* advance to next dentry */
+ goto more;
out_unlock:
spin_unlock(&dcache_lock);
-
- if (last) {
- spin_unlock(&inode->i_lock);
+out:
+ if (last)
dput(last);
- spin_lock(&inode->i_lock);
- }
-
return err;
}
@@ -227,15 +219,15 @@
struct ceph_file_info *fi = filp->private_data;
struct inode *inode = filp->f_dentry->d_inode;
struct ceph_inode_info *ci = ceph_inode(inode);
- struct ceph_client *client = ceph_inode_to_client(inode);
- struct ceph_mds_client *mdsc = &client->mdsc;
+ struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
+ struct ceph_mds_client *mdsc = fsc->mdsc;
unsigned frag = fpos_frag(filp->f_pos);
int off = fpos_off(filp->f_pos);
int err;
u32 ftype;
struct ceph_mds_reply_info_parsed *rinfo;
- const int max_entries = client->mount_args->max_readdir;
- const int max_bytes = client->mount_args->max_readdir_bytes;
+ const int max_entries = fsc->mount_options->max_readdir;
+ const int max_bytes = fsc->mount_options->max_readdir_bytes;
dout("readdir %p filp %p frag %u off %u\n", inode, filp, frag, off);
if (fi->at_end)
@@ -267,17 +259,17 @@
/* can we use the dcache? */
spin_lock(&inode->i_lock);
if ((filp->f_pos == 2 || fi->dentry) &&
- !ceph_test_opt(client, NOASYNCREADDIR) &&
+ !ceph_test_mount_opt(fsc, NOASYNCREADDIR) &&
ceph_snap(inode) != CEPH_SNAPDIR &&
(ci->i_ceph_flags & CEPH_I_COMPLETE) &&
__ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1)) {
+ spin_unlock(&inode->i_lock);
err = __dcache_readdir(filp, dirent, filldir);
- if (err != -EAGAIN) {
- spin_unlock(&inode->i_lock);
+ if (err != -EAGAIN)
return err;
- }
+ } else {
+ spin_unlock(&inode->i_lock);
}
- spin_unlock(&inode->i_lock);
if (fi->dentry) {
err = note_last_dentry(fi, fi->dentry->d_name.name,
fi->dentry->d_name.len);
@@ -487,14 +479,13 @@
struct dentry *ceph_finish_lookup(struct ceph_mds_request *req,
struct dentry *dentry, int err)
{
- struct ceph_client *client = ceph_sb_to_client(dentry->d_sb);
+ struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
struct inode *parent = dentry->d_parent->d_inode;
/* .snap dir? */
if (err == -ENOENT &&
- ceph_vino(parent).ino != CEPH_INO_ROOT && /* no .snap in root dir */
strcmp(dentry->d_name.name,
- client->mount_args->snapdir_name) == 0) {
+ fsc->mount_options->snapdir_name) == 0) {
struct inode *inode = ceph_get_snapdir(parent);
dout("ENOENT on snapdir %p '%.*s', linking to snapdir %p\n",
dentry, dentry->d_name.len, dentry->d_name.name, inode);
@@ -539,8 +530,8 @@
static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
struct nameidata *nd)
{
- struct ceph_client *client = ceph_sb_to_client(dir->i_sb);
- struct ceph_mds_client *mdsc = &client->mdsc;
+ struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
+ struct ceph_mds_client *mdsc = fsc->mdsc;
struct ceph_mds_request *req;
int op;
int err;
@@ -572,7 +563,7 @@
spin_lock(&dir->i_lock);
dout(" dir %p flags are %d\n", dir, ci->i_ceph_flags);
if (strncmp(dentry->d_name.name,
- client->mount_args->snapdir_name,
+ fsc->mount_options->snapdir_name,
dentry->d_name.len) &&
!is_root_ceph_dentry(dir, dentry) &&
(ci->i_ceph_flags & CEPH_I_COMPLETE) &&
@@ -629,8 +620,8 @@
static int ceph_mknod(struct inode *dir, struct dentry *dentry,
int mode, dev_t rdev)
{
- struct ceph_client *client = ceph_sb_to_client(dir->i_sb);
- struct ceph_mds_client *mdsc = &client->mdsc;
+ struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
+ struct ceph_mds_client *mdsc = fsc->mdsc;
struct ceph_mds_request *req;
int err;
@@ -685,8 +676,8 @@
static int ceph_symlink(struct inode *dir, struct dentry *dentry,
const char *dest)
{
- struct ceph_client *client = ceph_sb_to_client(dir->i_sb);
- struct ceph_mds_client *mdsc = &client->mdsc;
+ struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
+ struct ceph_mds_client *mdsc = fsc->mdsc;
struct ceph_mds_request *req;
int err;
@@ -716,8 +707,8 @@
static int ceph_mkdir(struct inode *dir, struct dentry *dentry, int mode)
{
- struct ceph_client *client = ceph_sb_to_client(dir->i_sb);
- struct ceph_mds_client *mdsc = &client->mdsc;
+ struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
+ struct ceph_mds_client *mdsc = fsc->mdsc;
struct ceph_mds_request *req;
int err = -EROFS;
int op;
@@ -758,8 +749,8 @@
static int ceph_link(struct dentry *old_dentry, struct inode *dir,
struct dentry *dentry)
{
- struct ceph_client *client = ceph_sb_to_client(dir->i_sb);
- struct ceph_mds_client *mdsc = &client->mdsc;
+ struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
+ struct ceph_mds_client *mdsc = fsc->mdsc;
struct ceph_mds_request *req;
int err;
@@ -813,8 +804,8 @@
*/
static int ceph_unlink(struct inode *dir, struct dentry *dentry)
{
- struct ceph_client *client = ceph_sb_to_client(dir->i_sb);
- struct ceph_mds_client *mdsc = &client->mdsc;
+ struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
+ struct ceph_mds_client *mdsc = fsc->mdsc;
struct inode *inode = dentry->d_inode;
struct ceph_mds_request *req;
int err = -EROFS;
@@ -854,8 +845,8 @@
static int ceph_rename(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry)
{
- struct ceph_client *client = ceph_sb_to_client(old_dir->i_sb);
- struct ceph_mds_client *mdsc = &client->mdsc;
+ struct ceph_fs_client *fsc = ceph_sb_to_client(old_dir->i_sb);
+ struct ceph_mds_client *mdsc = fsc->mdsc;
struct ceph_mds_request *req;
int err;
@@ -1021,11 +1012,15 @@
static void ceph_dentry_release(struct dentry *dentry)
{
struct ceph_dentry_info *di = ceph_dentry(dentry);
- struct inode *parent_inode = dentry->d_parent->d_inode;
- u64 snapid = ceph_snap(parent_inode);
+ struct inode *parent_inode = NULL;
+ u64 snapid = CEPH_NOSNAP;
+ if (!IS_ROOT(dentry)) {
+ parent_inode = dentry->d_parent->d_inode;
+ if (parent_inode)
+ snapid = ceph_snap(parent_inode);
+ }
dout("dentry_release %p parent %p\n", dentry, parent_inode);
-
if (parent_inode && snapid != CEPH_SNAPDIR) {
struct ceph_inode_info *ci = ceph_inode(parent_inode);
@@ -1072,7 +1067,7 @@
struct ceph_inode_info *ci = ceph_inode(inode);
int left;
- if (!ceph_test_opt(ceph_sb_to_client(inode->i_sb), DIRSTAT))
+ if (!ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb), DIRSTAT))
return -EISDIR;
if (!cf->dir_info) {
@@ -1173,7 +1168,7 @@
dout("dentry_lru_add %p %p '%.*s'\n", di, dn,
dn->d_name.len, dn->d_name.name);
if (di) {
- mdsc = &ceph_sb_to_client(dn->d_sb)->mdsc;
+ mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
spin_lock(&mdsc->dentry_lru_lock);
list_add_tail(&di->lru, &mdsc->dentry_lru);
mdsc->num_dentry++;
@@ -1189,7 +1184,7 @@
dout("dentry_lru_touch %p %p '%.*s' (offset %lld)\n", di, dn,
dn->d_name.len, dn->d_name.name, di->offset);
if (di) {
- mdsc = &ceph_sb_to_client(dn->d_sb)->mdsc;
+ mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
spin_lock(&mdsc->dentry_lru_lock);
list_move_tail(&di->lru, &mdsc->dentry_lru);
spin_unlock(&mdsc->dentry_lru_lock);
@@ -1204,7 +1199,7 @@
dout("dentry_lru_del %p %p '%.*s'\n", di, dn,
dn->d_name.len, dn->d_name.name);
if (di) {
- mdsc = &ceph_sb_to_client(dn->d_sb)->mdsc;
+ mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
spin_lock(&mdsc->dentry_lru_lock);
list_del_init(&di->lru);
mdsc->num_dentry--;
diff --git a/fs/ceph/export.c b/fs/ceph/export.c
index 4480cb1..2297d94 100644
--- a/fs/ceph/export.c
+++ b/fs/ceph/export.c
@@ -1,10 +1,11 @@
-#include "ceph_debug.h"
+#include <linux/ceph/ceph_debug.h>
#include <linux/exportfs.h>
#include <linux/slab.h>
#include <asm/unaligned.h>
#include "super.h"
+#include "mds_client.h"
/*
* NFS export support
@@ -42,32 +43,37 @@
static int ceph_encode_fh(struct dentry *dentry, u32 *rawfh, int *max_len,
int connectable)
{
+ int type;
struct ceph_nfs_fh *fh = (void *)rawfh;
struct ceph_nfs_confh *cfh = (void *)rawfh;
struct dentry *parent = dentry->d_parent;
struct inode *inode = dentry->d_inode;
- int type;
+ int connected_handle_length = sizeof(*cfh)/4;
+ int handle_length = sizeof(*fh)/4;
/* don't re-export snaps */
if (ceph_snap(inode) != CEPH_NOSNAP)
return -EINVAL;
- if (*max_len >= sizeof(*cfh)) {
+ if (*max_len >= connected_handle_length) {
dout("encode_fh %p connectable\n", dentry);
cfh->ino = ceph_ino(dentry->d_inode);
cfh->parent_ino = ceph_ino(parent->d_inode);
cfh->parent_name_hash = parent->d_name.hash;
- *max_len = sizeof(*cfh);
+ *max_len = connected_handle_length;
type = 2;
- } else if (*max_len > sizeof(*fh)) {
- if (connectable)
- return -ENOSPC;
+ } else if (*max_len >= handle_length) {
+ if (connectable) {
+ *max_len = connected_handle_length;
+ return 255;
+ }
dout("encode_fh %p\n", dentry);
fh->ino = ceph_ino(dentry->d_inode);
- *max_len = sizeof(*fh);
+ *max_len = handle_length;
type = 1;
} else {
- return -ENOSPC;
+ *max_len = handle_length;
+ return 255;
}
return type;
}
@@ -115,7 +121,7 @@
static struct dentry *__cfh_to_dentry(struct super_block *sb,
struct ceph_nfs_confh *cfh)
{
- struct ceph_mds_client *mdsc = &ceph_sb_to_client(sb)->mdsc;
+ struct ceph_mds_client *mdsc = ceph_sb_to_client(sb)->mdsc;
struct inode *inode;
struct dentry *dentry;
struct ceph_vino vino;
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index 8c044a4..e77c28c 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -1,5 +1,6 @@
-#include "ceph_debug.h"
+#include <linux/ceph/ceph_debug.h>
+#include <linux/module.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/file.h>
@@ -38,8 +39,8 @@
static struct ceph_mds_request *
prepare_open_request(struct super_block *sb, int flags, int create_mode)
{
- struct ceph_client *client = ceph_sb_to_client(sb);
- struct ceph_mds_client *mdsc = &client->mdsc;
+ struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
+ struct ceph_mds_client *mdsc = fsc->mdsc;
struct ceph_mds_request *req;
int want_auth = USE_ANY_MDS;
int op = (flags & O_CREAT) ? CEPH_MDS_OP_CREATE : CEPH_MDS_OP_OPEN;
@@ -117,8 +118,8 @@
int ceph_open(struct inode *inode, struct file *file)
{
struct ceph_inode_info *ci = ceph_inode(inode);
- struct ceph_client *client = ceph_sb_to_client(inode->i_sb);
- struct ceph_mds_client *mdsc = &client->mdsc;
+ struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
+ struct ceph_mds_client *mdsc = fsc->mdsc;
struct ceph_mds_request *req;
struct ceph_file_info *cf = file->private_data;
struct inode *parent_inode = file->f_dentry->d_parent->d_inode;
@@ -216,8 +217,8 @@
struct nameidata *nd, int mode,
int locked_dir)
{
- struct ceph_client *client = ceph_sb_to_client(dir->i_sb);
- struct ceph_mds_client *mdsc = &client->mdsc;
+ struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
+ struct ceph_mds_client *mdsc = fsc->mdsc;
struct file *file = nd->intent.open.file;
struct inode *parent_inode = get_dentry_parent_inode(file->f_dentry);
struct ceph_mds_request *req;
@@ -270,163 +271,6 @@
}
/*
- * build a vector of user pages
- */
-static struct page **get_direct_page_vector(const char __user *data,
- int num_pages,
- loff_t off, size_t len)
-{
- struct page **pages;
- int rc;
-
- pages = kmalloc(sizeof(*pages) * num_pages, GFP_NOFS);
- if (!pages)
- return ERR_PTR(-ENOMEM);
-
- down_read(¤t->mm->mmap_sem);
- rc = get_user_pages(current, current->mm, (unsigned long)data,
- num_pages, 0, 0, pages, NULL);
- up_read(¤t->mm->mmap_sem);
- if (rc < 0)
- goto fail;
- return pages;
-
-fail:
- kfree(pages);
- return ERR_PTR(rc);
-}
-
-static void put_page_vector(struct page **pages, int num_pages)
-{
- int i;
-
- for (i = 0; i < num_pages; i++)
- put_page(pages[i]);
- kfree(pages);
-}
-
-void ceph_release_page_vector(struct page **pages, int num_pages)
-{
- int i;
-
- for (i = 0; i < num_pages; i++)
- __free_pages(pages[i], 0);
- kfree(pages);
-}
-
-/*
- * allocate a vector new pages
- */
-static struct page **ceph_alloc_page_vector(int num_pages, gfp_t flags)
-{
- struct page **pages;
- int i;
-
- pages = kmalloc(sizeof(*pages) * num_pages, flags);
- if (!pages)
- return ERR_PTR(-ENOMEM);
- for (i = 0; i < num_pages; i++) {
- pages[i] = __page_cache_alloc(flags);
- if (pages[i] == NULL) {
- ceph_release_page_vector(pages, i);
- return ERR_PTR(-ENOMEM);
- }
- }
- return pages;
-}
-
-/*
- * copy user data into a page vector
- */
-static int copy_user_to_page_vector(struct page **pages,
- const char __user *data,
- loff_t off, size_t len)
-{
- int i = 0;
- int po = off & ~PAGE_CACHE_MASK;
- int left = len;
- int l, bad;
-
- while (left > 0) {
- l = min_t(int, PAGE_CACHE_SIZE-po, left);
- bad = copy_from_user(page_address(pages[i]) + po, data, l);
- if (bad == l)
- return -EFAULT;
- data += l - bad;
- left -= l - bad;
- po += l - bad;
- if (po == PAGE_CACHE_SIZE) {
- po = 0;
- i++;
- }
- }
- return len;
-}
-
-/*
- * copy user data from a page vector into a user pointer
- */
-static int copy_page_vector_to_user(struct page **pages, char __user *data,
- loff_t off, size_t len)
-{
- int i = 0;
- int po = off & ~PAGE_CACHE_MASK;
- int left = len;
- int l, bad;
-
- while (left > 0) {
- l = min_t(int, left, PAGE_CACHE_SIZE-po);
- bad = copy_to_user(data, page_address(pages[i]) + po, l);
- if (bad == l)
- return -EFAULT;
- data += l - bad;
- left -= l - bad;
- if (po) {
- po += l - bad;
- if (po == PAGE_CACHE_SIZE)
- po = 0;
- }
- i++;
- }
- return len;
-}
-
-/*
- * Zero an extent within a page vector. Offset is relative to the
- * start of the first page.
- */
-static void zero_page_vector_range(int off, int len, struct page **pages)
-{
- int i = off >> PAGE_CACHE_SHIFT;
-
- off &= ~PAGE_CACHE_MASK;
-
- dout("zero_page_vector_page %u~%u\n", off, len);
-
- /* leading partial page? */
- if (off) {
- int end = min((int)PAGE_CACHE_SIZE, off + len);
- dout("zeroing %d %p head from %d\n", i, pages[i],
- (int)off);
- zero_user_segment(pages[i], off, end);
- len -= (end - off);
- i++;
- }
- while (len >= PAGE_CACHE_SIZE) {
- dout("zeroing %d %p len=%d\n", i, pages[i], len);
- zero_user_segment(pages[i], 0, PAGE_CACHE_SIZE);
- len -= PAGE_CACHE_SIZE;
- i++;
- }
- /* trailing partial page? */
- if (len) {
- dout("zeroing %d %p tail to %d\n", i, pages[i], (int)len);
- zero_user_segment(pages[i], 0, len);
- }
-}
-
-
-/*
* Read a range of bytes striped over one or more objects. Iterate over
* objects we stripe over. (That's not atomic, but good enough for now.)
*
@@ -438,7 +282,7 @@
struct page **pages, int num_pages,
int *checkeof)
{
- struct ceph_client *client = ceph_inode_to_client(inode);
+ struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
struct ceph_inode_info *ci = ceph_inode(inode);
u64 pos, this_len;
int page_off = off & ~PAGE_CACHE_MASK; /* first byte's offset in page */
@@ -459,7 +303,7 @@
more:
this_len = left;
- ret = ceph_osdc_readpages(&client->osdc, ceph_vino(inode),
+ ret = ceph_osdc_readpages(&fsc->client->osdc, ceph_vino(inode),
&ci->i_layout, pos, &this_len,
ci->i_truncate_seq,
ci->i_truncate_size,
@@ -477,8 +321,8 @@
if (read < pos - off) {
dout(" zero gap %llu to %llu\n", off + read, pos);
- zero_page_vector_range(page_off + read,
- pos - off - read, pages);
+ ceph_zero_page_vector_range(page_off + read,
+ pos - off - read, pages);
}
pos += ret;
read = pos - off;
@@ -495,8 +339,8 @@
/* was original extent fully inside i_size? */
if (pos + left <= inode->i_size) {
dout("zero tail\n");
- zero_page_vector_range(page_off + read, len - read,
- pages);
+ ceph_zero_page_vector_range(page_off + read, len - read,
+ pages);
read = len;
goto out;
}
@@ -531,7 +375,7 @@
(file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
if (file->f_flags & O_DIRECT) {
- pages = get_direct_page_vector(data, num_pages, off, len);
+ pages = ceph_get_direct_page_vector(data, num_pages, off, len);
/*
* flush any page cache pages in this range. this
@@ -552,13 +396,13 @@
ret = striped_read(inode, off, len, pages, num_pages, checkeof);
if (ret >= 0 && (file->f_flags & O_DIRECT) == 0)
- ret = copy_page_vector_to_user(pages, data, off, ret);
+ ret = ceph_copy_page_vector_to_user(pages, data, off, ret);
if (ret >= 0)
*poff = off + ret;
done:
if (file->f_flags & O_DIRECT)
- put_page_vector(pages, num_pages);
+ ceph_put_page_vector(pages, num_pages);
else
ceph_release_page_vector(pages, num_pages);
dout("sync_read result %d\n", ret);
@@ -594,7 +438,7 @@
{
struct inode *inode = file->f_dentry->d_inode;
struct ceph_inode_info *ci = ceph_inode(inode);
- struct ceph_client *client = ceph_inode_to_client(inode);
+ struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
struct ceph_osd_request *req;
struct page **pages;
int num_pages;
@@ -642,7 +486,7 @@
*/
more:
len = left;
- req = ceph_osdc_new_request(&client->osdc, &ci->i_layout,
+ req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
ceph_vino(inode), pos, &len,
CEPH_OSD_OP_WRITE, flags,
ci->i_snap_realm->cached_context,
@@ -655,7 +499,7 @@
num_pages = calc_pages_for(pos, len);
if (file->f_flags & O_DIRECT) {
- pages = get_direct_page_vector(data, num_pages, pos, len);
+ pages = ceph_get_direct_page_vector(data, num_pages, pos, len);
if (IS_ERR(pages)) {
ret = PTR_ERR(pages);
goto out;
@@ -673,7 +517,7 @@
ret = PTR_ERR(pages);
goto out;
}
- ret = copy_user_to_page_vector(pages, data, pos, len);
+ ret = ceph_copy_user_to_page_vector(pages, data, pos, len);
if (ret < 0) {
ceph_release_page_vector(pages, num_pages);
goto out;
@@ -689,7 +533,7 @@
req->r_num_pages = num_pages;
req->r_inode = inode;
- ret = ceph_osdc_start_request(&client->osdc, req, false);
+ ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
if (!ret) {
if (req->r_safe_callback) {
/*
@@ -697,15 +541,15 @@
* start_request so that a tid has been assigned.
*/
spin_lock(&ci->i_unsafe_lock);
- list_add(&ci->i_unsafe_writes, &req->r_unsafe_item);
+ list_add(&req->r_unsafe_item, &ci->i_unsafe_writes);
spin_unlock(&ci->i_unsafe_lock);
ceph_get_cap_refs(ci, CEPH_CAP_FILE_WR);
}
- ret = ceph_osdc_wait_request(&client->osdc, req);
+ ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
}
if (file->f_flags & O_DIRECT)
- put_page_vector(pages, num_pages);
+ ceph_put_page_vector(pages, num_pages);
else if (file->f_flags & O_SYNC)
ceph_release_page_vector(pages, num_pages);
@@ -814,7 +658,8 @@
struct ceph_file_info *fi = file->private_data;
struct inode *inode = file->f_dentry->d_inode;
struct ceph_inode_info *ci = ceph_inode(inode);
- struct ceph_osd_client *osdc = &ceph_sb_to_client(inode->i_sb)->osdc;
+ struct ceph_osd_client *osdc =
+ &ceph_sb_to_client(inode->i_sb)->client->osdc;
loff_t endoff = pos + iov->iov_len;
int want, got = 0;
int ret, err;
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index 5d893d3..1d6a45b 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -1,4 +1,4 @@
-#include "ceph_debug.h"
+#include <linux/ceph/ceph_debug.h>
#include <linux/module.h>
#include <linux/fs.h>
@@ -13,7 +13,8 @@
#include <linux/pagevec.h>
#include "super.h"
-#include "decode.h"
+#include "mds_client.h"
+#include <linux/ceph/decode.h>
/*
* Ceph inode operations
@@ -384,7 +385,7 @@
*/
if (ci->i_snap_realm) {
struct ceph_mds_client *mdsc =
- &ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc;
+ ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc;
struct ceph_snap_realm *realm = ci->i_snap_realm;
dout(" dropping residual ref to snap realm %p\n", realm);
@@ -677,6 +678,7 @@
if (ci->i_files == 0 && ci->i_subdirs == 0 &&
ceph_snap(inode) == CEPH_NOSNAP &&
(le32_to_cpu(info->cap.caps) & CEPH_CAP_FILE_SHARED) &&
+ (issued & CEPH_CAP_FILE_EXCL) == 0 &&
(ci->i_ceph_flags & CEPH_I_COMPLETE) == 0) {
dout(" marking %p complete (empty)\n", inode);
ci->i_ceph_flags |= CEPH_I_COMPLETE;
@@ -684,7 +686,7 @@
}
/* it may be better to set st_size in getattr instead? */
- if (ceph_test_opt(ceph_sb_to_client(inode->i_sb), RBYTES))
+ if (ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb), RBYTES))
inode->i_size = ci->i_rbytes;
break;
default:
@@ -844,7 +846,7 @@
* the caller) if we fail.
*/
static struct dentry *splice_dentry(struct dentry *dn, struct inode *in,
- bool *prehash)
+ bool *prehash, bool set_offset)
{
struct dentry *realdn;
@@ -876,7 +878,8 @@
}
if ((!prehash || *prehash) && d_unhashed(dn))
d_rehash(dn);
- ceph_set_dentry_offset(dn);
+ if (set_offset)
+ ceph_set_dentry_offset(dn);
out:
return dn;
}
@@ -899,7 +902,7 @@
struct inode *in = NULL;
struct ceph_mds_reply_inode *ininfo;
struct ceph_vino vino;
- struct ceph_client *client = ceph_sb_to_client(sb);
+ struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
int i = 0;
int err = 0;
@@ -963,7 +966,7 @@
*/
if (rinfo->head->is_dentry && !req->r_aborted &&
(rinfo->head->is_target || strncmp(req->r_dentry->d_name.name,
- client->mount_args->snapdir_name,
+ fsc->mount_options->snapdir_name,
req->r_dentry->d_name.len))) {
/*
* lookup link rename : null -> possibly existing inode
@@ -1061,7 +1064,7 @@
d_delete(dn);
goto done;
}
- dn = splice_dentry(dn, in, &have_lease);
+ dn = splice_dentry(dn, in, &have_lease, true);
if (IS_ERR(dn)) {
err = PTR_ERR(dn);
goto done;
@@ -1104,7 +1107,7 @@
goto done;
}
dout(" linking snapped dir %p to dn %p\n", in, dn);
- dn = splice_dentry(dn, in, NULL);
+ dn = splice_dentry(dn, in, NULL, true);
if (IS_ERR(dn)) {
err = PTR_ERR(dn);
goto done;
@@ -1229,14 +1232,14 @@
in = dn->d_inode;
} else {
in = ceph_get_inode(parent->d_sb, vino);
- if (in == NULL) {
+ if (IS_ERR(in)) {
dout("new_inode badness\n");
d_delete(dn);
dput(dn);
- err = -ENOMEM;
+ err = PTR_ERR(in);
goto out;
}
- dn = splice_dentry(dn, in, NULL);
+ dn = splice_dentry(dn, in, NULL, false);
if (IS_ERR(dn))
dn = NULL;
}
@@ -1531,7 +1534,7 @@
struct inode *parent_inode = dentry->d_parent->d_inode;
const unsigned int ia_valid = attr->ia_valid;
struct ceph_mds_request *req;
- struct ceph_mds_client *mdsc = &ceph_sb_to_client(dentry->d_sb)->mdsc;
+ struct ceph_mds_client *mdsc = ceph_sb_to_client(dentry->d_sb)->mdsc;
int issued;
int release = 0, dirtied = 0;
int mask = 0;
@@ -1726,8 +1729,8 @@
*/
int ceph_do_getattr(struct inode *inode, int mask)
{
- struct ceph_client *client = ceph_sb_to_client(inode->i_sb);
- struct ceph_mds_client *mdsc = &client->mdsc;
+ struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
+ struct ceph_mds_client *mdsc = fsc->mdsc;
struct ceph_mds_request *req;
int err;
diff --git a/fs/ceph/ioctl.c b/fs/ceph/ioctl.c
index 76e307d..8888c9ba 100644
--- a/fs/ceph/ioctl.c
+++ b/fs/ceph/ioctl.c
@@ -1,8 +1,10 @@
#include <linux/in.h>
-#include "ioctl.h"
#include "super.h"
-#include "ceph_debug.h"
+#include "mds_client.h"
+#include <linux/ceph/ceph_debug.h>
+
+#include "ioctl.h"
/*
@@ -37,7 +39,7 @@
{
struct inode *inode = file->f_dentry->d_inode;
struct inode *parent_inode = file->f_dentry->d_parent->d_inode;
- struct ceph_mds_client *mdsc = &ceph_sb_to_client(inode->i_sb)->mdsc;
+ struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
struct ceph_mds_request *req;
struct ceph_ioctl_layout l;
int err, i;
@@ -90,6 +92,68 @@
}
/*
+ * Set a layout policy on a directory inode. All items in the tree
+ * rooted at this inode will inherit this layout on creation,
+ * (It doesn't apply retroactively )
+ * unless a subdirectory has its own layout policy.
+ */
+static long ceph_ioctl_set_layout_policy (struct file *file, void __user *arg)
+{
+ struct inode *inode = file->f_dentry->d_inode;
+ struct ceph_mds_request *req;
+ struct ceph_ioctl_layout l;
+ int err, i;
+ struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
+
+ /* copy and validate */
+ if (copy_from_user(&l, arg, sizeof(l)))
+ return -EFAULT;
+
+ if ((l.object_size & ~PAGE_MASK) ||
+ (l.stripe_unit & ~PAGE_MASK) ||
+ !l.stripe_unit ||
+ (l.object_size &&
+ (unsigned)l.object_size % (unsigned)l.stripe_unit))
+ return -EINVAL;
+
+ /* make sure it's a valid data pool */
+ if (l.data_pool > 0) {
+ mutex_lock(&mdsc->mutex);
+ err = -EINVAL;
+ for (i = 0; i < mdsc->mdsmap->m_num_data_pg_pools; i++)
+ if (mdsc->mdsmap->m_data_pg_pools[i] == l.data_pool) {
+ err = 0;
+ break;
+ }
+ mutex_unlock(&mdsc->mutex);
+ if (err)
+ return err;
+ }
+
+ req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETDIRLAYOUT,
+ USE_AUTH_MDS);
+
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+ req->r_inode = igrab(inode);
+
+ req->r_args.setlayout.layout.fl_stripe_unit =
+ cpu_to_le32(l.stripe_unit);
+ req->r_args.setlayout.layout.fl_stripe_count =
+ cpu_to_le32(l.stripe_count);
+ req->r_args.setlayout.layout.fl_object_size =
+ cpu_to_le32(l.object_size);
+ req->r_args.setlayout.layout.fl_pg_pool =
+ cpu_to_le32(l.data_pool);
+ req->r_args.setlayout.layout.fl_pg_preferred =
+ cpu_to_le32(l.preferred_osd);
+
+ err = ceph_mdsc_do_request(mdsc, inode, req);
+ ceph_mdsc_put_request(req);
+ return err;
+}
+
+/*
* Return object name, size/offset information, and location (OSD
* number, network address) for a given file offset.
*/
@@ -98,7 +162,8 @@
struct ceph_ioctl_dataloc dl;
struct inode *inode = file->f_dentry->d_inode;
struct ceph_inode_info *ci = ceph_inode(inode);
- struct ceph_osd_client *osdc = &ceph_sb_to_client(inode->i_sb)->osdc;
+ struct ceph_osd_client *osdc =
+ &ceph_sb_to_client(inode->i_sb)->client->osdc;
u64 len = 1, olen;
u64 tmp;
struct ceph_object_layout ol;
@@ -174,11 +239,15 @@
case CEPH_IOC_SET_LAYOUT:
return ceph_ioctl_set_layout(file, (void __user *)arg);
+ case CEPH_IOC_SET_LAYOUT_POLICY:
+ return ceph_ioctl_set_layout_policy(file, (void __user *)arg);
+
case CEPH_IOC_GET_DATALOC:
return ceph_ioctl_get_dataloc(file, (void __user *)arg);
case CEPH_IOC_LAZYIO:
return ceph_ioctl_lazyio(file);
}
+
return -ENOTTY;
}
diff --git a/fs/ceph/ioctl.h b/fs/ceph/ioctl.h
index 88451a3..a6ce54e 100644
--- a/fs/ceph/ioctl.h
+++ b/fs/ceph/ioctl.h
@@ -4,7 +4,7 @@
#include <linux/ioctl.h>
#include <linux/types.h>
-#define CEPH_IOCTL_MAGIC 0x97
+#define CEPH_IOCTL_MAGIC 0x98
/* just use u64 to align sanely on all archs */
struct ceph_ioctl_layout {
@@ -17,6 +17,8 @@
struct ceph_ioctl_layout)
#define CEPH_IOC_SET_LAYOUT _IOW(CEPH_IOCTL_MAGIC, 2, \
struct ceph_ioctl_layout)
+#define CEPH_IOC_SET_LAYOUT_POLICY _IOW(CEPH_IOCTL_MAGIC, 5, \
+ struct ceph_ioctl_layout)
/*
* Extract identity, address of the OSD and object storing a given
diff --git a/fs/ceph/locks.c b/fs/ceph/locks.c
index ae85af0..40abde9 100644
--- a/fs/ceph/locks.c
+++ b/fs/ceph/locks.c
@@ -1,11 +1,11 @@
-#include "ceph_debug.h"
+#include <linux/ceph/ceph_debug.h>
#include <linux/file.h>
#include <linux/namei.h>
#include "super.h"
#include "mds_client.h"
-#include "pagelist.h"
+#include <linux/ceph/pagelist.h>
/**
* Implement fcntl and flock locking functions.
@@ -16,7 +16,7 @@
{
struct inode *inode = file->f_dentry->d_inode;
struct ceph_mds_client *mdsc =
- &ceph_sb_to_client(inode->i_sb)->mdsc;
+ ceph_sb_to_client(inode->i_sb)->mdsc;
struct ceph_mds_request *req;
int err;
@@ -82,7 +82,8 @@
length = fl->fl_end - fl->fl_start + 1;
err = ceph_lock_message(CEPH_LOCK_FCNTL, op, file,
- (u64)fl->fl_pid, (u64)fl->fl_nspid,
+ (u64)fl->fl_pid,
+ (u64)(unsigned long)fl->fl_nspid,
lock_cmd, fl->fl_start,
length, wait);
if (!err) {
@@ -92,7 +93,8 @@
/* undo! This should only happen if the kernel detects
* local deadlock. */
ceph_lock_message(CEPH_LOCK_FCNTL, op, file,
- (u64)fl->fl_pid, (u64)fl->fl_nspid,
+ (u64)fl->fl_pid,
+ (u64)(unsigned long)fl->fl_nspid,
CEPH_LOCK_UNLOCK, fl->fl_start,
length, 0);
dout("got %d on posix_lock_file, undid lock", err);
@@ -132,7 +134,8 @@
length = fl->fl_end - fl->fl_start + 1;
err = ceph_lock_message(CEPH_LOCK_FLOCK, CEPH_MDS_OP_SETFILELOCK,
- file, (u64)fl->fl_pid, (u64)fl->fl_nspid,
+ file, (u64)fl->fl_pid,
+ (u64)(unsigned long)fl->fl_nspid,
lock_cmd, fl->fl_start,
length, wait);
if (!err) {
@@ -141,7 +144,7 @@
ceph_lock_message(CEPH_LOCK_FLOCK,
CEPH_MDS_OP_SETFILELOCK,
file, (u64)fl->fl_pid,
- (u64)fl->fl_nspid,
+ (u64)(unsigned long)fl->fl_nspid,
CEPH_LOCK_UNLOCK, fl->fl_start,
length, 0);
dout("got %d on flock_lock_file_wait, undid lock", err);
@@ -178,8 +181,9 @@
* Encode the flock and fcntl locks for the given inode into the pagelist.
* Format is: #fcntl locks, sequential fcntl locks, #flock locks,
* sequential flock locks.
- * Must be called with BLK already held, and the lock numbers should have
- * been gathered under the same lock holding window.
+ * Must be called with lock_flocks() already held.
+ * If we encounter more of a specific lock type than expected,
+ * we return the value 1.
*/
int ceph_encode_locks(struct inode *inode, struct ceph_pagelist *pagelist,
int num_fcntl_locks, int num_flock_locks)
@@ -187,6 +191,8 @@
struct file_lock *lock;
struct ceph_filelock cephlock;
int err = 0;
+ int seen_fcntl = 0;
+ int seen_flock = 0;
dout("encoding %d flock and %d fcntl locks", num_flock_locks,
num_fcntl_locks);
@@ -195,6 +201,11 @@
goto fail;
for (lock = inode->i_flock; lock != NULL; lock = lock->fl_next) {
if (lock->fl_flags & FL_POSIX) {
+ ++seen_fcntl;
+ if (seen_fcntl > num_fcntl_locks) {
+ err = -ENOSPC;
+ goto fail;
+ }
err = lock_to_ceph_filelock(lock, &cephlock);
if (err)
goto fail;
@@ -210,6 +221,11 @@
goto fail;
for (lock = inode->i_flock; lock != NULL; lock = lock->fl_next) {
if (lock->fl_flags & FL_FLOCK) {
+ ++seen_flock;
+ if (seen_flock > num_flock_locks) {
+ err = -ENOSPC;
+ goto fail;
+ }
err = lock_to_ceph_filelock(lock, &cephlock);
if (err)
goto fail;
@@ -235,7 +251,8 @@
cephlock->length = cpu_to_le64(lock->fl_end - lock->fl_start + 1);
cephlock->client = cpu_to_le64(0);
cephlock->pid = cpu_to_le64(lock->fl_pid);
- cephlock->pid_namespace = cpu_to_le64((u64)lock->fl_nspid);
+ cephlock->pid_namespace =
+ cpu_to_le64((u64)(unsigned long)lock->fl_nspid);
switch (lock->fl_type) {
case F_RDLCK:
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index a75ddbf..3142b15 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -1,17 +1,21 @@
-#include "ceph_debug.h"
+#include <linux/ceph/ceph_debug.h>
+#include <linux/fs.h>
#include <linux/wait.h>
#include <linux/slab.h>
#include <linux/sched.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
#include <linux/smp_lock.h>
-#include "mds_client.h"
-#include "mon_client.h"
#include "super.h"
-#include "messenger.h"
-#include "decode.h"
-#include "auth.h"
-#include "pagelist.h"
+#include "mds_client.h"
+
+#include <linux/ceph/messenger.h>
+#include <linux/ceph/decode.h>
+#include <linux/ceph/pagelist.h>
+#include <linux/ceph/auth.h>
+#include <linux/ceph/debugfs.h>
/*
* A cluster of MDS (metadata server) daemons is responsible for
@@ -286,8 +290,9 @@
atomic_read(&s->s_ref), atomic_read(&s->s_ref)-1);
if (atomic_dec_and_test(&s->s_ref)) {
if (s->s_authorizer)
- s->s_mdsc->client->monc.auth->ops->destroy_authorizer(
- s->s_mdsc->client->monc.auth, s->s_authorizer);
+ s->s_mdsc->fsc->client->monc.auth->ops->destroy_authorizer(
+ s->s_mdsc->fsc->client->monc.auth,
+ s->s_authorizer);
kfree(s);
}
}
@@ -344,7 +349,7 @@
s->s_seq = 0;
mutex_init(&s->s_mutex);
- ceph_con_init(mdsc->client->msgr, &s->s_con);
+ ceph_con_init(mdsc->fsc->client->msgr, &s->s_con);
s->s_con.private = s;
s->s_con.ops = &mds_con_ops;
s->s_con.peer_name.type = CEPH_ENTITY_TYPE_MDS;
@@ -560,6 +565,13 @@
*
* Called under mdsc->mutex.
*/
+struct dentry *get_nonsnap_parent(struct dentry *dentry)
+{
+ while (!IS_ROOT(dentry) && ceph_snap(dentry->d_inode) != CEPH_NOSNAP)
+ dentry = dentry->d_parent;
+ return dentry;
+}
+
static int __choose_mds(struct ceph_mds_client *mdsc,
struct ceph_mds_request *req)
{
@@ -590,14 +602,29 @@
if (req->r_inode) {
inode = req->r_inode;
} else if (req->r_dentry) {
- if (req->r_dentry->d_inode) {
+ struct inode *dir = req->r_dentry->d_parent->d_inode;
+
+ if (dir->i_sb != mdsc->fsc->sb) {
+ /* not this fs! */
+ inode = req->r_dentry->d_inode;
+ } else if (ceph_snap(dir) != CEPH_NOSNAP) {
+ /* direct snapped/virtual snapdir requests
+ * based on parent dir inode */
+ struct dentry *dn =
+ get_nonsnap_parent(req->r_dentry->d_parent);
+ inode = dn->d_inode;
+ dout("__choose_mds using nonsnap parent %p\n", inode);
+ } else if (req->r_dentry->d_inode) {
+ /* dentry target */
inode = req->r_dentry->d_inode;
} else {
- inode = req->r_dentry->d_parent->d_inode;
+ /* dir + name */
+ inode = dir;
hash = req->r_dentry->d_name.hash;
is_hash = true;
}
}
+
dout("__choose_mds %p is_hash=%d (%d) mode %d\n", inode, (int)is_hash,
(int)hash, mode);
if (!inode)
@@ -862,7 +889,7 @@
__ceph_remove_cap(cap);
if (!__ceph_is_any_real_caps(ci)) {
struct ceph_mds_client *mdsc =
- &ceph_sb_to_client(inode->i_sb)->mdsc;
+ ceph_sb_to_client(inode->i_sb)->mdsc;
spin_lock(&mdsc->cap_dirty_lock);
if (!list_empty(&ci->i_dirty_item)) {
@@ -1124,7 +1151,7 @@
struct ceph_msg *msg, *partial = NULL;
struct ceph_mds_cap_release *head;
int err = -ENOMEM;
- int extra = mdsc->client->mount_args->cap_release_safety;
+ int extra = mdsc->fsc->mount_options->cap_release_safety;
int num;
dout("add_cap_releases %p mds%d extra %d\n", session, session->s_mds,
@@ -2063,7 +2090,7 @@
/* insert trace into our cache */
mutex_lock(&req->r_fill_mutex);
- err = ceph_fill_trace(mdsc->client->sb, req, req->r_session);
+ err = ceph_fill_trace(mdsc->fsc->sb, req, req->r_session);
if (err == 0) {
if (result == 0 && rinfo->dir_nr)
ceph_readdir_prepopulate(req, req->r_session);
@@ -2208,7 +2235,7 @@
pr_info("mds%d reconnect denied\n", session->s_mds);
remove_session_caps(session);
wake = 1; /* for good measure */
- complete_all(&mdsc->session_close_waiters);
+ wake_up_all(&mdsc->session_close_wq);
kick_requests(mdsc, mds);
break;
@@ -2302,7 +2329,7 @@
path = ceph_mdsc_build_path(dentry, &pathlen, &pathbase, 0);
if (IS_ERR(path)) {
err = PTR_ERR(path);
- BUG_ON(err);
+ goto out_dput;
}
} else {
path = NULL;
@@ -2310,7 +2337,7 @@
}
err = ceph_pagelist_encode_string(pagelist, path, pathlen);
if (err)
- goto out;
+ goto out_free;
spin_lock(&inode->i_lock);
cap->seq = 0; /* reset cap seq */
@@ -2339,23 +2366,42 @@
if (recon_state->flock) {
int num_fcntl_locks, num_flock_locks;
+ struct ceph_pagelist_cursor trunc_point;
- lock_kernel();
- ceph_count_locks(inode, &num_fcntl_locks, &num_flock_locks);
- rec.v2.flock_len = (2*sizeof(u32) +
- (num_fcntl_locks+num_flock_locks) *
- sizeof(struct ceph_filelock));
+ ceph_pagelist_set_cursor(pagelist, &trunc_point);
+ do {
+ lock_flocks();
+ ceph_count_locks(inode, &num_fcntl_locks,
+ &num_flock_locks);
+ rec.v2.flock_len = (2*sizeof(u32) +
+ (num_fcntl_locks+num_flock_locks) *
+ sizeof(struct ceph_filelock));
+ unlock_flocks();
+ /* pre-alloc pagelist */
+ ceph_pagelist_truncate(pagelist, &trunc_point);
+ err = ceph_pagelist_append(pagelist, &rec, reclen);
+ if (!err)
+ err = ceph_pagelist_reserve(pagelist,
+ rec.v2.flock_len);
+
+ /* encode locks */
+ if (!err) {
+ lock_flocks();
+ err = ceph_encode_locks(inode,
+ pagelist,
+ num_fcntl_locks,
+ num_flock_locks);
+ unlock_flocks();
+ }
+ } while (err == -ENOSPC);
+ } else {
err = ceph_pagelist_append(pagelist, &rec, reclen);
- if (!err)
- err = ceph_encode_locks(inode, pagelist,
- num_fcntl_locks,
- num_flock_locks);
- unlock_kernel();
}
-out:
+out_free:
kfree(path);
+out_dput:
dput(dentry);
return err;
}
@@ -2588,7 +2634,7 @@
struct ceph_mds_session *session,
struct ceph_msg *msg)
{
- struct super_block *sb = mdsc->client->sb;
+ struct super_block *sb = mdsc->fsc->sb;
struct inode *inode;
struct ceph_inode_info *ci;
struct dentry *parent, *dentry;
@@ -2866,17 +2912,23 @@
schedule_delayed(mdsc);
}
+int ceph_mdsc_init(struct ceph_fs_client *fsc)
-int ceph_mdsc_init(struct ceph_mds_client *mdsc, struct ceph_client *client)
{
- mdsc->client = client;
+ struct ceph_mds_client *mdsc;
+
+ mdsc = kzalloc(sizeof(struct ceph_mds_client), GFP_NOFS);
+ if (!mdsc)
+ return -ENOMEM;
+ mdsc->fsc = fsc;
+ fsc->mdsc = mdsc;
mutex_init(&mdsc->mutex);
mdsc->mdsmap = kzalloc(sizeof(*mdsc->mdsmap), GFP_NOFS);
if (mdsc->mdsmap == NULL)
return -ENOMEM;
init_completion(&mdsc->safe_umount_waiters);
- init_completion(&mdsc->session_close_waiters);
+ init_waitqueue_head(&mdsc->session_close_wq);
INIT_LIST_HEAD(&mdsc->waiting_for_map);
mdsc->sessions = NULL;
mdsc->max_sessions = 0;
@@ -2902,7 +2954,7 @@
INIT_LIST_HEAD(&mdsc->dentry_lru);
ceph_caps_init(mdsc);
- ceph_adjust_min_caps(mdsc, client->min_caps);
+ ceph_adjust_min_caps(mdsc, fsc->min_caps);
return 0;
}
@@ -2914,7 +2966,7 @@
static void wait_requests(struct ceph_mds_client *mdsc)
{
struct ceph_mds_request *req;
- struct ceph_client *client = mdsc->client;
+ struct ceph_fs_client *fsc = mdsc->fsc;
mutex_lock(&mdsc->mutex);
if (__get_oldest_req(mdsc)) {
@@ -2922,7 +2974,7 @@
dout("wait_requests waiting for requests\n");
wait_for_completion_timeout(&mdsc->safe_umount_waiters,
- client->mount_args->mount_timeout * HZ);
+ fsc->client->options->mount_timeout * HZ);
/* tear down remaining requests */
mutex_lock(&mdsc->mutex);
@@ -3005,7 +3057,7 @@
{
u64 want_tid, want_flush;
- if (mdsc->client->mount_state == CEPH_MOUNT_SHUTDOWN)
+ if (mdsc->fsc->mount_state == CEPH_MOUNT_SHUTDOWN)
return;
dout("sync\n");
@@ -3021,6 +3073,23 @@
wait_event(mdsc->cap_flushing_wq, check_cap_flush(mdsc, want_flush));
}
+/*
+ * true if all sessions are closed, or we force unmount
+ */
+bool done_closing_sessions(struct ceph_mds_client *mdsc)
+{
+ int i, n = 0;
+
+ if (mdsc->fsc->mount_state == CEPH_MOUNT_SHUTDOWN)
+ return true;
+
+ mutex_lock(&mdsc->mutex);
+ for (i = 0; i < mdsc->max_sessions; i++)
+ if (mdsc->sessions[i])
+ n++;
+ mutex_unlock(&mdsc->mutex);
+ return n == 0;
+}
/*
* called after sb is ro.
@@ -3029,45 +3098,32 @@
{
struct ceph_mds_session *session;
int i;
- int n;
- struct ceph_client *client = mdsc->client;
- unsigned long started, timeout = client->mount_args->mount_timeout * HZ;
+ struct ceph_fs_client *fsc = mdsc->fsc;
+ unsigned long timeout = fsc->client->options->mount_timeout * HZ;
dout("close_sessions\n");
- mutex_lock(&mdsc->mutex);
-
/* close sessions */
- started = jiffies;
- while (time_before(jiffies, started + timeout)) {
- dout("closing sessions\n");
- n = 0;
- for (i = 0; i < mdsc->max_sessions; i++) {
- session = __ceph_lookup_mds_session(mdsc, i);
- if (!session)
- continue;
- mutex_unlock(&mdsc->mutex);
- mutex_lock(&session->s_mutex);
- __close_session(mdsc, session);
- mutex_unlock(&session->s_mutex);
- ceph_put_mds_session(session);
- mutex_lock(&mdsc->mutex);
- n++;
- }
- if (n == 0)
- break;
-
- if (client->mount_state == CEPH_MOUNT_SHUTDOWN)
- break;
-
- dout("waiting for sessions to close\n");
+ mutex_lock(&mdsc->mutex);
+ for (i = 0; i < mdsc->max_sessions; i++) {
+ session = __ceph_lookup_mds_session(mdsc, i);
+ if (!session)
+ continue;
mutex_unlock(&mdsc->mutex);
- wait_for_completion_timeout(&mdsc->session_close_waiters,
- timeout);
+ mutex_lock(&session->s_mutex);
+ __close_session(mdsc, session);
+ mutex_unlock(&session->s_mutex);
+ ceph_put_mds_session(session);
mutex_lock(&mdsc->mutex);
}
+ mutex_unlock(&mdsc->mutex);
+
+ dout("waiting for sessions to close\n");
+ wait_event_timeout(mdsc->session_close_wq, done_closing_sessions(mdsc),
+ timeout);
/* tear down remaining sessions */
+ mutex_lock(&mdsc->mutex);
for (i = 0; i < mdsc->max_sessions; i++) {
if (mdsc->sessions[i]) {
session = get_session(mdsc->sessions[i]);
@@ -3080,9 +3136,7 @@
mutex_lock(&mdsc->mutex);
}
}
-
WARN_ON(!list_empty(&mdsc->cap_delay_list));
-
mutex_unlock(&mdsc->mutex);
ceph_cleanup_empty_realms(mdsc);
@@ -3092,7 +3146,7 @@
dout("stopped\n");
}
-void ceph_mdsc_stop(struct ceph_mds_client *mdsc)
+static void ceph_mdsc_stop(struct ceph_mds_client *mdsc)
{
dout("stop\n");
cancel_delayed_work_sync(&mdsc->delayed_work); /* cancel timer */
@@ -3102,6 +3156,15 @@
ceph_caps_finalize(mdsc);
}
+void ceph_mdsc_destroy(struct ceph_fs_client *fsc)
+{
+ struct ceph_mds_client *mdsc = fsc->mdsc;
+
+ ceph_mdsc_stop(mdsc);
+ fsc->mdsc = NULL;
+ kfree(mdsc);
+}
+
/*
* handle mds map update.
@@ -3118,14 +3181,14 @@
ceph_decode_need(&p, end, sizeof(fsid)+2*sizeof(u32), bad);
ceph_decode_copy(&p, &fsid, sizeof(fsid));
- if (ceph_check_fsid(mdsc->client, &fsid) < 0)
+ if (ceph_check_fsid(mdsc->fsc->client, &fsid) < 0)
return;
epoch = ceph_decode_32(&p);
maplen = ceph_decode_32(&p);
dout("handle_map epoch %u len %d\n", epoch, (int)maplen);
/* do we need it? */
- ceph_monc_got_mdsmap(&mdsc->client->monc, epoch);
+ ceph_monc_got_mdsmap(&mdsc->fsc->client->monc, epoch);
mutex_lock(&mdsc->mutex);
if (mdsc->mdsmap && epoch <= mdsc->mdsmap->m_epoch) {
dout("handle_map epoch %u <= our %u\n",
@@ -3149,7 +3212,7 @@
} else {
mdsc->mdsmap = newmap; /* first mds map */
}
- mdsc->client->sb->s_maxbytes = mdsc->mdsmap->m_max_file_size;
+ mdsc->fsc->sb->s_maxbytes = mdsc->mdsmap->m_max_file_size;
__wake_requests(mdsc, &mdsc->waiting_for_map);
@@ -3250,7 +3313,7 @@
{
struct ceph_mds_session *s = con->private;
struct ceph_mds_client *mdsc = s->s_mdsc;
- struct ceph_auth_client *ac = mdsc->client->monc.auth;
+ struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
int ret = 0;
if (force_new && s->s_authorizer) {
@@ -3284,7 +3347,7 @@
{
struct ceph_mds_session *s = con->private;
struct ceph_mds_client *mdsc = s->s_mdsc;
- struct ceph_auth_client *ac = mdsc->client->monc.auth;
+ struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
return ac->ops->verify_authorizer_reply(ac, s->s_authorizer, len);
}
@@ -3293,12 +3356,12 @@
{
struct ceph_mds_session *s = con->private;
struct ceph_mds_client *mdsc = s->s_mdsc;
- struct ceph_auth_client *ac = mdsc->client->monc.auth;
+ struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
if (ac->ops->invalidate_authorizer)
ac->ops->invalidate_authorizer(ac, CEPH_ENTITY_TYPE_MDS);
- return ceph_monc_validate_auth(&mdsc->client->monc);
+ return ceph_monc_validate_auth(&mdsc->fsc->client->monc);
}
static const struct ceph_connection_operations mds_con_ops = {
@@ -3311,7 +3374,4 @@
.peer_reset = peer_reset,
};
-
-
-
/* eof */
diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h
index ab7e89f..d66d63c 100644
--- a/fs/ceph/mds_client.h
+++ b/fs/ceph/mds_client.h
@@ -8,9 +8,9 @@
#include <linux/rbtree.h>
#include <linux/spinlock.h>
-#include "types.h"
-#include "messenger.h"
-#include "mdsmap.h"
+#include <linux/ceph/types.h>
+#include <linux/ceph/messenger.h>
+#include <linux/ceph/mdsmap.h>
/*
* Some lock dependencies:
@@ -26,7 +26,7 @@
*
*/
-struct ceph_client;
+struct ceph_fs_client;
struct ceph_cap;
/*
@@ -230,11 +230,12 @@
* mds client state
*/
struct ceph_mds_client {
- struct ceph_client *client;
+ struct ceph_fs_client *fsc;
struct mutex mutex; /* all nested structures */
struct ceph_mdsmap *mdsmap;
- struct completion safe_umount_waiters, session_close_waiters;
+ struct completion safe_umount_waiters;
+ wait_queue_head_t session_close_wq;
struct list_head waiting_for_map;
struct ceph_mds_session **sessions; /* NULL for mds if no session */
@@ -288,11 +289,6 @@
int caps_avail_count; /* unused, unreserved */
int caps_min_count; /* keep at least this many
(unreserved) */
-
-#ifdef CONFIG_DEBUG_FS
- struct dentry *debugfs_file;
-#endif
-
spinlock_t dentry_lru_lock;
struct list_head dentry_lru;
int num_dentry;
@@ -315,10 +311,9 @@
extern int ceph_send_msg_mds(struct ceph_mds_client *mdsc,
struct ceph_msg *msg, int mds);
-extern int ceph_mdsc_init(struct ceph_mds_client *mdsc,
- struct ceph_client *client);
+extern int ceph_mdsc_init(struct ceph_fs_client *fsc);
extern void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc);
-extern void ceph_mdsc_stop(struct ceph_mds_client *mdsc);
+extern void ceph_mdsc_destroy(struct ceph_fs_client *fsc);
extern void ceph_mdsc_sync(struct ceph_mds_client *mdsc);
diff --git a/fs/ceph/mdsmap.c b/fs/ceph/mdsmap.c
index 040be6d..73b7d44 100644
--- a/fs/ceph/mdsmap.c
+++ b/fs/ceph/mdsmap.c
@@ -1,4 +1,4 @@
-#include "ceph_debug.h"
+#include <linux/ceph/ceph_debug.h>
#include <linux/bug.h>
#include <linux/err.h>
@@ -6,9 +6,9 @@
#include <linux/slab.h>
#include <linux/types.h>
-#include "mdsmap.h"
-#include "messenger.h"
-#include "decode.h"
+#include <linux/ceph/mdsmap.h>
+#include <linux/ceph/messenger.h>
+#include <linux/ceph/decode.h>
#include "super.h"
@@ -117,7 +117,8 @@
}
dout("mdsmap_decode %d/%d %lld mds%d.%d %s %s\n",
- i+1, n, global_id, mds, inc, pr_addr(&addr.in_addr),
+ i+1, n, global_id, mds, inc,
+ ceph_pr_addr(&addr.in_addr),
ceph_mds_state_name(state));
if (mds >= 0 && mds < m->m_max_mds && state > 0) {
m->m_info[mds].global_id = global_id;
diff --git a/fs/ceph/pagelist.c b/fs/ceph/pagelist.c
deleted file mode 100644
index b6859f4..0000000
--- a/fs/ceph/pagelist.c
+++ /dev/null
@@ -1,55 +0,0 @@
-
-#include <linux/gfp.h>
-#include <linux/pagemap.h>
-#include <linux/highmem.h>
-
-#include "pagelist.h"
-
-int ceph_pagelist_release(struct ceph_pagelist *pl)
-{
- if (pl->mapped_tail)
- kunmap(pl->mapped_tail);
- while (!list_empty(&pl->head)) {
- struct page *page = list_first_entry(&pl->head, struct page,
- lru);
- list_del(&page->lru);
- __free_page(page);
- }
- return 0;
-}
-
-static int ceph_pagelist_addpage(struct ceph_pagelist *pl)
-{
- struct page *page = __page_cache_alloc(GFP_NOFS);
- if (!page)
- return -ENOMEM;
- pl->room += PAGE_SIZE;
- list_add_tail(&page->lru, &pl->head);
- if (pl->mapped_tail)
- kunmap(pl->mapped_tail);
- pl->mapped_tail = kmap(page);
- return 0;
-}
-
-int ceph_pagelist_append(struct ceph_pagelist *pl, void *buf, size_t len)
-{
- while (pl->room < len) {
- size_t bit = pl->room;
- int ret;
-
- memcpy(pl->mapped_tail + (pl->length & ~PAGE_CACHE_MASK),
- buf, bit);
- pl->length += bit;
- pl->room -= bit;
- buf += bit;
- len -= bit;
- ret = ceph_pagelist_addpage(pl);
- if (ret)
- return ret;
- }
-
- memcpy(pl->mapped_tail + (pl->length & ~PAGE_CACHE_MASK), buf, len);
- pl->length += len;
- pl->room -= len;
- return 0;
-}
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
index c0b26b6..39c243a 100644
--- a/fs/ceph/snap.c
+++ b/fs/ceph/snap.c
@@ -1,10 +1,12 @@
-#include "ceph_debug.h"
+#include <linux/ceph/ceph_debug.h>
#include <linux/sort.h>
#include <linux/slab.h>
#include "super.h"
-#include "decode.h"
+#include "mds_client.h"
+
+#include <linux/ceph/decode.h>
/*
* Snapshots in ceph are driven in large part by cooperation from the
@@ -119,6 +121,7 @@
INIT_LIST_HEAD(&realm->children);
INIT_LIST_HEAD(&realm->child_item);
INIT_LIST_HEAD(&realm->empty_item);
+ INIT_LIST_HEAD(&realm->dirty_item);
INIT_LIST_HEAD(&realm->inodes_with_caps);
spin_lock_init(&realm->inodes_with_caps_lock);
__insert_snap_realm(&mdsc->snap_realms, realm);
@@ -435,7 +438,7 @@
{
struct inode *inode = &ci->vfs_inode;
struct ceph_cap_snap *capsnap;
- int used;
+ int used, dirty;
capsnap = kzalloc(sizeof(*capsnap), GFP_NOFS);
if (!capsnap) {
@@ -445,6 +448,7 @@
spin_lock(&inode->i_lock);
used = __ceph_caps_used(ci);
+ dirty = __ceph_caps_dirty(ci);
if (__ceph_have_pending_cap_snap(ci)) {
/* there is no point in queuing multiple "pending" cap_snaps,
as no new writes are allowed to start when pending, so any
@@ -452,27 +456,37 @@
cap_snap. lucky us. */
dout("queue_cap_snap %p already pending\n", inode);
kfree(capsnap);
- } else if (ci->i_wrbuffer_ref_head || (used & CEPH_CAP_FILE_WR)) {
+ } else if (ci->i_wrbuffer_ref_head || (used & CEPH_CAP_FILE_WR) ||
+ (dirty & (CEPH_CAP_AUTH_EXCL|CEPH_CAP_XATTR_EXCL|
+ CEPH_CAP_FILE_EXCL|CEPH_CAP_FILE_WR))) {
struct ceph_snap_context *snapc = ci->i_head_snapc;
+ dout("queue_cap_snap %p cap_snap %p queuing under %p\n", inode,
+ capsnap, snapc);
igrab(inode);
-
+
atomic_set(&capsnap->nref, 1);
capsnap->ci = ci;
INIT_LIST_HEAD(&capsnap->ci_item);
INIT_LIST_HEAD(&capsnap->flushing_item);
- capsnap->follows = snapc->seq - 1;
+ capsnap->follows = snapc->seq;
capsnap->issued = __ceph_caps_issued(ci, NULL);
- capsnap->dirty = __ceph_caps_dirty(ci);
+ capsnap->dirty = dirty;
capsnap->mode = inode->i_mode;
capsnap->uid = inode->i_uid;
capsnap->gid = inode->i_gid;
- /* fixme? */
- capsnap->xattr_blob = NULL;
- capsnap->xattr_len = 0;
+ if (dirty & CEPH_CAP_XATTR_EXCL) {
+ __ceph_build_xattrs_blob(ci);
+ capsnap->xattr_blob =
+ ceph_buffer_get(ci->i_xattrs.blob);
+ capsnap->xattr_version = ci->i_xattrs.version;
+ } else {
+ capsnap->xattr_blob = NULL;
+ capsnap->xattr_version = 0;
+ }
/* dirty page count moved from _head to this cap_snap;
all subsequent writes page dirties occur _after_ this
@@ -480,7 +494,9 @@
capsnap->dirty_pages = ci->i_wrbuffer_ref_head;
ci->i_wrbuffer_ref_head = 0;
capsnap->context = snapc;
- ci->i_head_snapc = NULL;
+ ci->i_head_snapc =
+ ceph_get_snap_context(ci->i_snap_realm->cached_context);
+ dout(" new snapc is %p\n", ci->i_head_snapc);
list_add_tail(&capsnap->ci_item, &ci->i_cap_snaps);
if (used & CEPH_CAP_FILE_WR) {
@@ -512,7 +528,7 @@
struct ceph_cap_snap *capsnap)
{
struct inode *inode = &ci->vfs_inode;
- struct ceph_mds_client *mdsc = &ceph_sb_to_client(inode->i_sb)->mdsc;
+ struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
BUG_ON(capsnap->writing);
capsnap->size = inode->i_size;
@@ -539,6 +555,41 @@
return 1; /* caller may want to ceph_flush_snaps */
}
+/*
+ * Queue cap_snaps for snap writeback for this realm and its children.
+ * Called under snap_rwsem, so realm topology won't change.
+ */
+static void queue_realm_cap_snaps(struct ceph_snap_realm *realm)
+{
+ struct ceph_inode_info *ci;
+ struct inode *lastinode = NULL;
+ struct ceph_snap_realm *child;
+
+ dout("queue_realm_cap_snaps %p %llx inodes\n", realm, realm->ino);
+
+ spin_lock(&realm->inodes_with_caps_lock);
+ list_for_each_entry(ci, &realm->inodes_with_caps,
+ i_snap_realm_item) {
+ struct inode *inode = igrab(&ci->vfs_inode);
+ if (!inode)
+ continue;
+ spin_unlock(&realm->inodes_with_caps_lock);
+ if (lastinode)
+ iput(lastinode);
+ lastinode = inode;
+ ceph_queue_cap_snap(ci);
+ spin_lock(&realm->inodes_with_caps_lock);
+ }
+ spin_unlock(&realm->inodes_with_caps_lock);
+ if (lastinode)
+ iput(lastinode);
+
+ dout("queue_realm_cap_snaps %p %llx children\n", realm, realm->ino);
+ list_for_each_entry(child, &realm->children, child_item)
+ queue_realm_cap_snaps(child);
+
+ dout("queue_realm_cap_snaps %p %llx done\n", realm, realm->ino);
+}
/*
* Parse and apply a snapblob "snap trace" from the MDS. This specifies
@@ -556,6 +607,7 @@
struct ceph_snap_realm *realm;
int invalidate = 0;
int err = -ENOMEM;
+ LIST_HEAD(dirty_realms);
dout("update_snap_trace deletion=%d\n", deletion);
more:
@@ -578,45 +630,6 @@
}
}
- if (le64_to_cpu(ri->seq) > realm->seq) {
- dout("update_snap_trace updating %llx %p %lld -> %lld\n",
- realm->ino, realm, realm->seq, le64_to_cpu(ri->seq));
- /*
- * if the realm seq has changed, queue a cap_snap for every
- * inode with open caps. we do this _before_ we update
- * the realm info so that we prepare for writeback under the
- * _previous_ snap context.
- *
- * ...unless it's a snap deletion!
- */
- if (!deletion) {
- struct ceph_inode_info *ci;
- struct inode *lastinode = NULL;
-
- spin_lock(&realm->inodes_with_caps_lock);
- list_for_each_entry(ci, &realm->inodes_with_caps,
- i_snap_realm_item) {
- struct inode *inode = igrab(&ci->vfs_inode);
- if (!inode)
- continue;
- spin_unlock(&realm->inodes_with_caps_lock);
- if (lastinode)
- iput(lastinode);
- lastinode = inode;
- ceph_queue_cap_snap(ci);
- spin_lock(&realm->inodes_with_caps_lock);
- }
- spin_unlock(&realm->inodes_with_caps_lock);
- if (lastinode)
- iput(lastinode);
- dout("update_snap_trace cap_snaps queued\n");
- }
-
- } else {
- dout("update_snap_trace %llx %p seq %lld unchanged\n",
- realm->ino, realm, realm->seq);
- }
-
/* ensure the parent is correct */
err = adjust_snap_realm_parent(mdsc, realm, le64_to_cpu(ri->parent));
if (err < 0)
@@ -624,6 +637,8 @@
invalidate += err;
if (le64_to_cpu(ri->seq) > realm->seq) {
+ dout("update_snap_trace updating %llx %p %lld -> %lld\n",
+ realm->ino, realm, realm->seq, le64_to_cpu(ri->seq));
/* update realm parameters, snap lists */
realm->seq = le64_to_cpu(ri->seq);
realm->created = le64_to_cpu(ri->created);
@@ -641,9 +656,17 @@
if (err < 0)
goto fail;
+ /* queue realm for cap_snap creation */
+ list_add(&realm->dirty_item, &dirty_realms);
+
invalidate = 1;
} else if (!realm->cached_context) {
+ dout("update_snap_trace %llx %p seq %lld new\n",
+ realm->ino, realm, realm->seq);
invalidate = 1;
+ } else {
+ dout("update_snap_trace %llx %p seq %lld unchanged\n",
+ realm->ino, realm, realm->seq);
}
dout("done with %llx %p, invalidated=%d, %p %p\n", realm->ino,
@@ -656,6 +679,14 @@
if (invalidate)
rebuild_snap_realms(realm);
+ /*
+ * queue cap snaps _after_ we've built the new snap contexts,
+ * so that i_head_snapc can be set appropriately.
+ */
+ list_for_each_entry(realm, &dirty_realms, dirty_item) {
+ queue_realm_cap_snaps(realm);
+ }
+
__cleanup_empty_realms(mdsc);
return 0;
@@ -688,7 +719,7 @@
igrab(inode);
spin_unlock(&mdsc->snap_flush_lock);
spin_lock(&inode->i_lock);
- __ceph_flush_snaps(ci, &session);
+ __ceph_flush_snaps(ci, &session, 0);
spin_unlock(&inode->i_lock);
iput(inode);
spin_lock(&mdsc->snap_flush_lock);
@@ -718,7 +749,7 @@
struct ceph_mds_session *session,
struct ceph_msg *msg)
{
- struct super_block *sb = mdsc->client->sb;
+ struct super_block *sb = mdsc->fsc->sb;
int mds = session->s_mds;
u64 split;
int op;
@@ -789,6 +820,7 @@
};
struct inode *inode = ceph_find_inode(sb, vino);
struct ceph_inode_info *ci;
+ struct ceph_snap_realm *oldrealm;
if (!inode)
continue;
@@ -814,18 +846,19 @@
dout(" will move %p to split realm %llx %p\n",
inode, realm->ino, realm);
/*
- * Remove the inode from the realm's inode
- * list, but don't add it to the new realm
- * yet. We don't want the cap_snap to be
- * queued (again) by ceph_update_snap_trace()
- * below. Queue it _now_, under the old context.
+ * Move the inode to the new realm
*/
spin_lock(&realm->inodes_with_caps_lock);
list_del_init(&ci->i_snap_realm_item);
+ list_add(&ci->i_snap_realm_item,
+ &realm->inodes_with_caps);
+ oldrealm = ci->i_snap_realm;
+ ci->i_snap_realm = realm;
spin_unlock(&realm->inodes_with_caps_lock);
spin_unlock(&inode->i_lock);
- ceph_queue_cap_snap(ci);
+ ceph_get_snap_realm(mdsc, realm);
+ ceph_put_snap_realm(mdsc, oldrealm);
iput(inode);
continue;
@@ -853,43 +886,9 @@
ceph_update_snap_trace(mdsc, p, e,
op == CEPH_SNAP_OP_DESTROY);
- if (op == CEPH_SNAP_OP_SPLIT) {
- /*
- * ok, _now_ add the inodes into the new realm.
- */
- for (i = 0; i < num_split_inos; i++) {
- struct ceph_vino vino = {
- .ino = le64_to_cpu(split_inos[i]),
- .snap = CEPH_NOSNAP,
- };
- struct inode *inode = ceph_find_inode(sb, vino);
- struct ceph_inode_info *ci;
-
- if (!inode)
- continue;
- ci = ceph_inode(inode);
- spin_lock(&inode->i_lock);
- if (list_empty(&ci->i_snap_realm_item)) {
- struct ceph_snap_realm *oldrealm =
- ci->i_snap_realm;
-
- dout(" moving %p to split realm %llx %p\n",
- inode, realm->ino, realm);
- spin_lock(&realm->inodes_with_caps_lock);
- list_add(&ci->i_snap_realm_item,
- &realm->inodes_with_caps);
- ci->i_snap_realm = realm;
- spin_unlock(&realm->inodes_with_caps_lock);
- ceph_get_snap_realm(mdsc, realm);
- ceph_put_snap_realm(mdsc, oldrealm);
- }
- spin_unlock(&inode->i_lock);
- iput(inode);
- }
-
+ if (op == CEPH_SNAP_OP_SPLIT)
/* we took a reference when we created the realm, above */
ceph_put_snap_realm(mdsc, realm);
- }
__cleanup_empty_realms(mdsc);
diff --git a/fs/ceph/strings.c b/fs/ceph/strings.c
new file mode 100644
index 0000000..cd5097d
--- /dev/null
+++ b/fs/ceph/strings.c
@@ -0,0 +1,117 @@
+/*
+ * Ceph fs string constants
+ */
+#include <linux/module.h>
+#include <linux/ceph/types.h>
+
+
+const char *ceph_mds_state_name(int s)
+{
+ switch (s) {
+ /* down and out */
+ case CEPH_MDS_STATE_DNE: return "down:dne";
+ case CEPH_MDS_STATE_STOPPED: return "down:stopped";
+ /* up and out */
+ case CEPH_MDS_STATE_BOOT: return "up:boot";
+ case CEPH_MDS_STATE_STANDBY: return "up:standby";
+ case CEPH_MDS_STATE_STANDBY_REPLAY: return "up:standby-replay";
+ case CEPH_MDS_STATE_CREATING: return "up:creating";
+ case CEPH_MDS_STATE_STARTING: return "up:starting";
+ /* up and in */
+ case CEPH_MDS_STATE_REPLAY: return "up:replay";
+ case CEPH_MDS_STATE_RESOLVE: return "up:resolve";
+ case CEPH_MDS_STATE_RECONNECT: return "up:reconnect";
+ case CEPH_MDS_STATE_REJOIN: return "up:rejoin";
+ case CEPH_MDS_STATE_CLIENTREPLAY: return "up:clientreplay";
+ case CEPH_MDS_STATE_ACTIVE: return "up:active";
+ case CEPH_MDS_STATE_STOPPING: return "up:stopping";
+ }
+ return "???";
+}
+
+const char *ceph_session_op_name(int op)
+{
+ switch (op) {
+ case CEPH_SESSION_REQUEST_OPEN: return "request_open";
+ case CEPH_SESSION_OPEN: return "open";
+ case CEPH_SESSION_REQUEST_CLOSE: return "request_close";
+ case CEPH_SESSION_CLOSE: return "close";
+ case CEPH_SESSION_REQUEST_RENEWCAPS: return "request_renewcaps";
+ case CEPH_SESSION_RENEWCAPS: return "renewcaps";
+ case CEPH_SESSION_STALE: return "stale";
+ case CEPH_SESSION_RECALL_STATE: return "recall_state";
+ }
+ return "???";
+}
+
+const char *ceph_mds_op_name(int op)
+{
+ switch (op) {
+ case CEPH_MDS_OP_LOOKUP: return "lookup";
+ case CEPH_MDS_OP_LOOKUPHASH: return "lookuphash";
+ case CEPH_MDS_OP_LOOKUPPARENT: return "lookupparent";
+ case CEPH_MDS_OP_GETATTR: return "getattr";
+ case CEPH_MDS_OP_SETXATTR: return "setxattr";
+ case CEPH_MDS_OP_SETATTR: return "setattr";
+ case CEPH_MDS_OP_RMXATTR: return "rmxattr";
+ case CEPH_MDS_OP_READDIR: return "readdir";
+ case CEPH_MDS_OP_MKNOD: return "mknod";
+ case CEPH_MDS_OP_LINK: return "link";
+ case CEPH_MDS_OP_UNLINK: return "unlink";
+ case CEPH_MDS_OP_RENAME: return "rename";
+ case CEPH_MDS_OP_MKDIR: return "mkdir";
+ case CEPH_MDS_OP_RMDIR: return "rmdir";
+ case CEPH_MDS_OP_SYMLINK: return "symlink";
+ case CEPH_MDS_OP_CREATE: return "create";
+ case CEPH_MDS_OP_OPEN: return "open";
+ case CEPH_MDS_OP_LOOKUPSNAP: return "lookupsnap";
+ case CEPH_MDS_OP_LSSNAP: return "lssnap";
+ case CEPH_MDS_OP_MKSNAP: return "mksnap";
+ case CEPH_MDS_OP_RMSNAP: return "rmsnap";
+ case CEPH_MDS_OP_SETFILELOCK: return "setfilelock";
+ case CEPH_MDS_OP_GETFILELOCK: return "getfilelock";
+ }
+ return "???";
+}
+
+const char *ceph_cap_op_name(int op)
+{
+ switch (op) {
+ case CEPH_CAP_OP_GRANT: return "grant";
+ case CEPH_CAP_OP_REVOKE: return "revoke";
+ case CEPH_CAP_OP_TRUNC: return "trunc";
+ case CEPH_CAP_OP_EXPORT: return "export";
+ case CEPH_CAP_OP_IMPORT: return "import";
+ case CEPH_CAP_OP_UPDATE: return "update";
+ case CEPH_CAP_OP_DROP: return "drop";
+ case CEPH_CAP_OP_FLUSH: return "flush";
+ case CEPH_CAP_OP_FLUSH_ACK: return "flush_ack";
+ case CEPH_CAP_OP_FLUSHSNAP: return "flushsnap";
+ case CEPH_CAP_OP_FLUSHSNAP_ACK: return "flushsnap_ack";
+ case CEPH_CAP_OP_RELEASE: return "release";
+ case CEPH_CAP_OP_RENEW: return "renew";
+ }
+ return "???";
+}
+
+const char *ceph_lease_op_name(int o)
+{
+ switch (o) {
+ case CEPH_MDS_LEASE_REVOKE: return "revoke";
+ case CEPH_MDS_LEASE_RELEASE: return "release";
+ case CEPH_MDS_LEASE_RENEW: return "renew";
+ case CEPH_MDS_LEASE_REVOKE_ACK: return "revoke_ack";
+ }
+ return "???";
+}
+
+const char *ceph_snap_op_name(int o)
+{
+ switch (o) {
+ case CEPH_SNAP_OP_UPDATE: return "update";
+ case CEPH_SNAP_OP_CREATE: return "create";
+ case CEPH_SNAP_OP_DESTROY: return "destroy";
+ case CEPH_SNAP_OP_SPLIT: return "split";
+ }
+ return "???";
+}
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index 9922628..d6e0e04 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -1,5 +1,5 @@
-#include "ceph_debug.h"
+#include <linux/ceph/ceph_debug.h>
#include <linux/backing-dev.h>
#include <linux/ctype.h>
@@ -15,10 +15,13 @@
#include <linux/statfs.h>
#include <linux/string.h>
-#include "decode.h"
#include "super.h"
-#include "mon_client.h"
-#include "auth.h"
+#include "mds_client.h"
+
+#include <linux/ceph/decode.h>
+#include <linux/ceph/mon_client.h>
+#include <linux/ceph/auth.h>
+#include <linux/ceph/debugfs.h>
/*
* Ceph superblock operations
@@ -26,36 +29,22 @@
* Handle the basics of mounting, unmounting.
*/
-
-/*
- * find filename portion of a path (/foo/bar/baz -> baz)
- */
-const char *ceph_file_part(const char *s, int len)
-{
- const char *e = s + len;
-
- while (e != s && *(e-1) != '/')
- e--;
- return e;
-}
-
-
/*
* super ops
*/
static void ceph_put_super(struct super_block *s)
{
- struct ceph_client *client = ceph_sb_to_client(s);
+ struct ceph_fs_client *fsc = ceph_sb_to_client(s);
dout("put_super\n");
- ceph_mdsc_close_sessions(&client->mdsc);
+ ceph_mdsc_close_sessions(fsc->mdsc);
/*
* ensure we release the bdi before put_anon_super releases
* the device name.
*/
- if (s->s_bdi == &client->backing_dev_info) {
- bdi_unregister(&client->backing_dev_info);
+ if (s->s_bdi == &fsc->backing_dev_info) {
+ bdi_unregister(&fsc->backing_dev_info);
s->s_bdi = NULL;
}
@@ -64,14 +53,14 @@
static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf)
{
- struct ceph_client *client = ceph_inode_to_client(dentry->d_inode);
- struct ceph_monmap *monmap = client->monc.monmap;
+ struct ceph_fs_client *fsc = ceph_inode_to_client(dentry->d_inode);
+ struct ceph_monmap *monmap = fsc->client->monc.monmap;
struct ceph_statfs st;
u64 fsid;
int err;
dout("statfs\n");
- err = ceph_monc_do_statfs(&client->monc, &st);
+ err = ceph_monc_do_statfs(&fsc->client->monc, &st);
if (err < 0)
return err;
@@ -104,49 +93,237 @@
static int ceph_sync_fs(struct super_block *sb, int wait)
{
- struct ceph_client *client = ceph_sb_to_client(sb);
+ struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
if (!wait) {
dout("sync_fs (non-blocking)\n");
- ceph_flush_dirty_caps(&client->mdsc);
+ ceph_flush_dirty_caps(fsc->mdsc);
dout("sync_fs (non-blocking) done\n");
return 0;
}
dout("sync_fs (blocking)\n");
- ceph_osdc_sync(&ceph_sb_to_client(sb)->osdc);
- ceph_mdsc_sync(&ceph_sb_to_client(sb)->mdsc);
+ ceph_osdc_sync(&fsc->client->osdc);
+ ceph_mdsc_sync(fsc->mdsc);
dout("sync_fs (blocking) done\n");
return 0;
}
-static int default_congestion_kb(void)
+/*
+ * mount options
+ */
+enum {
+ Opt_wsize,
+ Opt_rsize,
+ Opt_caps_wanted_delay_min,
+ Opt_caps_wanted_delay_max,
+ Opt_cap_release_safety,
+ Opt_readdir_max_entries,
+ Opt_readdir_max_bytes,
+ Opt_congestion_kb,
+ Opt_last_int,
+ /* int args above */
+ Opt_snapdirname,
+ Opt_last_string,
+ /* string args above */
+ Opt_dirstat,
+ Opt_nodirstat,
+ Opt_rbytes,
+ Opt_norbytes,
+ Opt_noasyncreaddir,
+};
+
+static match_table_t fsopt_tokens = {
+ {Opt_wsize, "wsize=%d"},
+ {Opt_rsize, "rsize=%d"},
+ {Opt_caps_wanted_delay_min, "caps_wanted_delay_min=%d"},
+ {Opt_caps_wanted_delay_max, "caps_wanted_delay_max=%d"},
+ {Opt_cap_release_safety, "cap_release_safety=%d"},
+ {Opt_readdir_max_entries, "readdir_max_entries=%d"},
+ {Opt_readdir_max_bytes, "readdir_max_bytes=%d"},
+ {Opt_congestion_kb, "write_congestion_kb=%d"},
+ /* int args above */
+ {Opt_snapdirname, "snapdirname=%s"},
+ /* string args above */
+ {Opt_dirstat, "dirstat"},
+ {Opt_nodirstat, "nodirstat"},
+ {Opt_rbytes, "rbytes"},
+ {Opt_norbytes, "norbytes"},
+ {Opt_noasyncreaddir, "noasyncreaddir"},
+ {-1, NULL}
+};
+
+static int parse_fsopt_token(char *c, void *private)
{
- int congestion_kb;
+ struct ceph_mount_options *fsopt = private;
+ substring_t argstr[MAX_OPT_ARGS];
+ int token, intval, ret;
- /*
- * Copied from NFS
- *
- * congestion size, scale with available memory.
- *
- * 64MB: 8192k
- * 128MB: 11585k
- * 256MB: 16384k
- * 512MB: 23170k
- * 1GB: 32768k
- * 2GB: 46340k
- * 4GB: 65536k
- * 8GB: 92681k
- * 16GB: 131072k
- *
- * This allows larger machines to have larger/more transfers.
- * Limit the default to 256M
- */
- congestion_kb = (16*int_sqrt(totalram_pages)) << (PAGE_SHIFT-10);
- if (congestion_kb > 256*1024)
- congestion_kb = 256*1024;
+ token = match_token((char *)c, fsopt_tokens, argstr);
+ if (token < 0)
+ return -EINVAL;
- return congestion_kb;
+ if (token < Opt_last_int) {
+ ret = match_int(&argstr[0], &intval);
+ if (ret < 0) {
+ pr_err("bad mount option arg (not int) "
+ "at '%s'\n", c);
+ return ret;
+ }
+ dout("got int token %d val %d\n", token, intval);
+ } else if (token > Opt_last_int && token < Opt_last_string) {
+ dout("got string token %d val %s\n", token,
+ argstr[0].from);
+ } else {
+ dout("got token %d\n", token);
+ }
+
+ switch (token) {
+ case Opt_snapdirname:
+ kfree(fsopt->snapdir_name);
+ fsopt->snapdir_name = kstrndup(argstr[0].from,
+ argstr[0].to-argstr[0].from,
+ GFP_KERNEL);
+ if (!fsopt->snapdir_name)
+ return -ENOMEM;
+ break;
+
+ /* misc */
+ case Opt_wsize:
+ fsopt->wsize = intval;
+ break;
+ case Opt_rsize:
+ fsopt->rsize = intval;
+ break;
+ case Opt_caps_wanted_delay_min:
+ fsopt->caps_wanted_delay_min = intval;
+ break;
+ case Opt_caps_wanted_delay_max:
+ fsopt->caps_wanted_delay_max = intval;
+ break;
+ case Opt_readdir_max_entries:
+ fsopt->max_readdir = intval;
+ break;
+ case Opt_readdir_max_bytes:
+ fsopt->max_readdir_bytes = intval;
+ break;
+ case Opt_congestion_kb:
+ fsopt->congestion_kb = intval;
+ break;
+ case Opt_dirstat:
+ fsopt->flags |= CEPH_MOUNT_OPT_DIRSTAT;
+ break;
+ case Opt_nodirstat:
+ fsopt->flags &= ~CEPH_MOUNT_OPT_DIRSTAT;
+ break;
+ case Opt_rbytes:
+ fsopt->flags |= CEPH_MOUNT_OPT_RBYTES;
+ break;
+ case Opt_norbytes:
+ fsopt->flags &= ~CEPH_MOUNT_OPT_RBYTES;
+ break;
+ case Opt_noasyncreaddir:
+ fsopt->flags |= CEPH_MOUNT_OPT_NOASYNCREADDIR;
+ break;
+ default:
+ BUG_ON(token);
+ }
+ return 0;
+}
+
+static void destroy_mount_options(struct ceph_mount_options *args)
+{
+ dout("destroy_mount_options %p\n", args);
+ kfree(args->snapdir_name);
+ kfree(args);
+}
+
+static int strcmp_null(const char *s1, const char *s2)
+{
+ if (!s1 && !s2)
+ return 0;
+ if (s1 && !s2)
+ return -1;
+ if (!s1 && s2)
+ return 1;
+ return strcmp(s1, s2);
+}
+
+static int compare_mount_options(struct ceph_mount_options *new_fsopt,
+ struct ceph_options *new_opt,
+ struct ceph_fs_client *fsc)
+{
+ struct ceph_mount_options *fsopt1 = new_fsopt;
+ struct ceph_mount_options *fsopt2 = fsc->mount_options;
+ int ofs = offsetof(struct ceph_mount_options, snapdir_name);
+ int ret;
+
+ ret = memcmp(fsopt1, fsopt2, ofs);
+ if (ret)
+ return ret;
+
+ ret = strcmp_null(fsopt1->snapdir_name, fsopt2->snapdir_name);
+ if (ret)
+ return ret;
+
+ return ceph_compare_options(new_opt, fsc->client);
+}
+
+static int parse_mount_options(struct ceph_mount_options **pfsopt,
+ struct ceph_options **popt,
+ int flags, char *options,
+ const char *dev_name,
+ const char **path)
+{
+ struct ceph_mount_options *fsopt;
+ const char *dev_name_end;
+ int err = -ENOMEM;
+
+ fsopt = kzalloc(sizeof(*fsopt), GFP_KERNEL);
+ if (!fsopt)
+ return -ENOMEM;
+
+ dout("parse_mount_options %p, dev_name '%s'\n", fsopt, dev_name);
+
+ fsopt->sb_flags = flags;
+ fsopt->flags = CEPH_MOUNT_OPT_DEFAULT;
+
+ fsopt->rsize = CEPH_MOUNT_RSIZE_DEFAULT;
+ fsopt->snapdir_name = kstrdup(CEPH_SNAPDIRNAME_DEFAULT, GFP_KERNEL);
+ fsopt->cap_release_safety = CEPH_CAP_RELEASE_SAFETY_DEFAULT;
+ fsopt->max_readdir = CEPH_MAX_READDIR_DEFAULT;
+ fsopt->max_readdir_bytes = CEPH_MAX_READDIR_BYTES_DEFAULT;
+ fsopt->congestion_kb = default_congestion_kb();
+
+ /* ip1[:port1][,ip2[:port2]...]:/subdir/in/fs */
+ err = -EINVAL;
+ if (!dev_name)
+ goto out;
+ *path = strstr(dev_name, ":/");
+ if (*path == NULL) {
+ pr_err("device name is missing path (no :/ in %s)\n",
+ dev_name);
+ goto out;
+ }
+ dev_name_end = *path;
+ dout("device name '%.*s'\n", (int)(dev_name_end - dev_name), dev_name);
+
+ /* path on server */
+ *path += 2;
+ dout("server path '%s'\n", *path);
+
+ err = ceph_parse_options(popt, options, dev_name, dev_name_end,
+ parse_fsopt_token, (void *)fsopt);
+ if (err)
+ goto out;
+
+ /* success */
+ *pfsopt = fsopt;
+ return 0;
+
+out:
+ destroy_mount_options(fsopt);
+ return err;
}
/**
@@ -156,60 +333,176 @@
*/
static int ceph_show_options(struct seq_file *m, struct vfsmount *mnt)
{
- struct ceph_client *client = ceph_sb_to_client(mnt->mnt_sb);
- struct ceph_mount_args *args = client->mount_args;
+ struct ceph_fs_client *fsc = ceph_sb_to_client(mnt->mnt_sb);
+ struct ceph_mount_options *fsopt = fsc->mount_options;
+ struct ceph_options *opt = fsc->client->options;
- if (args->flags & CEPH_OPT_FSID)
- seq_printf(m, ",fsid=%pU", &args->fsid);
- if (args->flags & CEPH_OPT_NOSHARE)
+ if (opt->flags & CEPH_OPT_FSID)
+ seq_printf(m, ",fsid=%pU", &opt->fsid);
+ if (opt->flags & CEPH_OPT_NOSHARE)
seq_puts(m, ",noshare");
- if (args->flags & CEPH_OPT_DIRSTAT)
- seq_puts(m, ",dirstat");
- if ((args->flags & CEPH_OPT_RBYTES) == 0)
- seq_puts(m, ",norbytes");
- if (args->flags & CEPH_OPT_NOCRC)
+ if (opt->flags & CEPH_OPT_NOCRC)
seq_puts(m, ",nocrc");
- if (args->flags & CEPH_OPT_NOASYNCREADDIR)
+
+ if (opt->name)
+ seq_printf(m, ",name=%s", opt->name);
+ if (opt->secret)
+ seq_puts(m, ",secret=<hidden>");
+
+ if (opt->mount_timeout != CEPH_MOUNT_TIMEOUT_DEFAULT)
+ seq_printf(m, ",mount_timeout=%d", opt->mount_timeout);
+ if (opt->osd_idle_ttl != CEPH_OSD_IDLE_TTL_DEFAULT)
+ seq_printf(m, ",osd_idle_ttl=%d", opt->osd_idle_ttl);
+ if (opt->osd_timeout != CEPH_OSD_TIMEOUT_DEFAULT)
+ seq_printf(m, ",osdtimeout=%d", opt->osd_timeout);
+ if (opt->osd_keepalive_timeout != CEPH_OSD_KEEPALIVE_DEFAULT)
+ seq_printf(m, ",osdkeepalivetimeout=%d",
+ opt->osd_keepalive_timeout);
+
+ if (fsopt->flags & CEPH_MOUNT_OPT_DIRSTAT)
+ seq_puts(m, ",dirstat");
+ if ((fsopt->flags & CEPH_MOUNT_OPT_RBYTES) == 0)
+ seq_puts(m, ",norbytes");
+ if (fsopt->flags & CEPH_MOUNT_OPT_NOASYNCREADDIR)
seq_puts(m, ",noasyncreaddir");
- if (args->mount_timeout != CEPH_MOUNT_TIMEOUT_DEFAULT)
- seq_printf(m, ",mount_timeout=%d", args->mount_timeout);
- if (args->osd_idle_ttl != CEPH_OSD_IDLE_TTL_DEFAULT)
- seq_printf(m, ",osd_idle_ttl=%d", args->osd_idle_ttl);
- if (args->osd_timeout != CEPH_OSD_TIMEOUT_DEFAULT)
- seq_printf(m, ",osdtimeout=%d", args->osd_timeout);
- if (args->osd_keepalive_timeout != CEPH_OSD_KEEPALIVE_DEFAULT)
- seq_printf(m, ",osdkeepalivetimeout=%d",
- args->osd_keepalive_timeout);
- if (args->wsize)
- seq_printf(m, ",wsize=%d", args->wsize);
- if (args->rsize != CEPH_MOUNT_RSIZE_DEFAULT)
- seq_printf(m, ",rsize=%d", args->rsize);
- if (args->congestion_kb != default_congestion_kb())
- seq_printf(m, ",write_congestion_kb=%d", args->congestion_kb);
- if (args->caps_wanted_delay_min != CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT)
+ if (fsopt->wsize)
+ seq_printf(m, ",wsize=%d", fsopt->wsize);
+ if (fsopt->rsize != CEPH_MOUNT_RSIZE_DEFAULT)
+ seq_printf(m, ",rsize=%d", fsopt->rsize);
+ if (fsopt->congestion_kb != default_congestion_kb())
+ seq_printf(m, ",write_congestion_kb=%d", fsopt->congestion_kb);
+ if (fsopt->caps_wanted_delay_min != CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT)
seq_printf(m, ",caps_wanted_delay_min=%d",
- args->caps_wanted_delay_min);
- if (args->caps_wanted_delay_max != CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT)
+ fsopt->caps_wanted_delay_min);
+ if (fsopt->caps_wanted_delay_max != CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT)
seq_printf(m, ",caps_wanted_delay_max=%d",
- args->caps_wanted_delay_max);
- if (args->cap_release_safety != CEPH_CAP_RELEASE_SAFETY_DEFAULT)
+ fsopt->caps_wanted_delay_max);
+ if (fsopt->cap_release_safety != CEPH_CAP_RELEASE_SAFETY_DEFAULT)
seq_printf(m, ",cap_release_safety=%d",
- args->cap_release_safety);
- if (args->max_readdir != CEPH_MAX_READDIR_DEFAULT)
- seq_printf(m, ",readdir_max_entries=%d", args->max_readdir);
- if (args->max_readdir_bytes != CEPH_MAX_READDIR_BYTES_DEFAULT)
- seq_printf(m, ",readdir_max_bytes=%d", args->max_readdir_bytes);
- if (strcmp(args->snapdir_name, CEPH_SNAPDIRNAME_DEFAULT))
- seq_printf(m, ",snapdirname=%s", args->snapdir_name);
- if (args->name)
- seq_printf(m, ",name=%s", args->name);
- if (args->secret)
- seq_puts(m, ",secret=<hidden>");
+ fsopt->cap_release_safety);
+ if (fsopt->max_readdir != CEPH_MAX_READDIR_DEFAULT)
+ seq_printf(m, ",readdir_max_entries=%d", fsopt->max_readdir);
+ if (fsopt->max_readdir_bytes != CEPH_MAX_READDIR_BYTES_DEFAULT)
+ seq_printf(m, ",readdir_max_bytes=%d", fsopt->max_readdir_bytes);
+ if (strcmp(fsopt->snapdir_name, CEPH_SNAPDIRNAME_DEFAULT))
+ seq_printf(m, ",snapdirname=%s", fsopt->snapdir_name);
return 0;
}
/*
+ * handle any mon messages the standard library doesn't understand.
+ * return error if we don't either.
+ */
+static int extra_mon_dispatch(struct ceph_client *client, struct ceph_msg *msg)
+{
+ struct ceph_fs_client *fsc = client->private;
+ int type = le16_to_cpu(msg->hdr.type);
+
+ switch (type) {
+ case CEPH_MSG_MDS_MAP:
+ ceph_mdsc_handle_map(fsc->mdsc, msg);
+ return 0;
+
+ default:
+ return -1;
+ }
+}
+
+/*
+ * create a new fs client
+ */
+struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt,
+ struct ceph_options *opt)
+{
+ struct ceph_fs_client *fsc;
+ int err = -ENOMEM;
+
+ fsc = kzalloc(sizeof(*fsc), GFP_KERNEL);
+ if (!fsc)
+ return ERR_PTR(-ENOMEM);
+
+ fsc->client = ceph_create_client(opt, fsc);
+ if (IS_ERR(fsc->client)) {
+ err = PTR_ERR(fsc->client);
+ goto fail;
+ }
+ fsc->client->extra_mon_dispatch = extra_mon_dispatch;
+ fsc->client->supported_features |= CEPH_FEATURE_FLOCK;
+ fsc->client->monc.want_mdsmap = 1;
+
+ fsc->mount_options = fsopt;
+
+ fsc->sb = NULL;
+ fsc->mount_state = CEPH_MOUNT_MOUNTING;
+
+ atomic_long_set(&fsc->writeback_count, 0);
+
+ err = bdi_init(&fsc->backing_dev_info);
+ if (err < 0)
+ goto fail_client;
+
+ err = -ENOMEM;
+ fsc->wb_wq = create_workqueue("ceph-writeback");
+ if (fsc->wb_wq == NULL)
+ goto fail_bdi;
+ fsc->pg_inv_wq = create_singlethread_workqueue("ceph-pg-invalid");
+ if (fsc->pg_inv_wq == NULL)
+ goto fail_wb_wq;
+ fsc->trunc_wq = create_singlethread_workqueue("ceph-trunc");
+ if (fsc->trunc_wq == NULL)
+ goto fail_pg_inv_wq;
+
+ /* set up mempools */
+ err = -ENOMEM;
+ fsc->wb_pagevec_pool = mempool_create_kmalloc_pool(10,
+ fsc->mount_options->wsize >> PAGE_CACHE_SHIFT);
+ if (!fsc->wb_pagevec_pool)
+ goto fail_trunc_wq;
+
+ /* caps */
+ fsc->min_caps = fsopt->max_readdir;
+
+ return fsc;
+
+fail_trunc_wq:
+ destroy_workqueue(fsc->trunc_wq);
+fail_pg_inv_wq:
+ destroy_workqueue(fsc->pg_inv_wq);
+fail_wb_wq:
+ destroy_workqueue(fsc->wb_wq);
+fail_bdi:
+ bdi_destroy(&fsc->backing_dev_info);
+fail_client:
+ ceph_destroy_client(fsc->client);
+fail:
+ kfree(fsc);
+ return ERR_PTR(err);
+}
+
+void destroy_fs_client(struct ceph_fs_client *fsc)
+{
+ dout("destroy_fs_client %p\n", fsc);
+
+ destroy_workqueue(fsc->wb_wq);
+ destroy_workqueue(fsc->pg_inv_wq);
+ destroy_workqueue(fsc->trunc_wq);
+
+ bdi_destroy(&fsc->backing_dev_info);
+
+ mempool_destroy(fsc->wb_pagevec_pool);
+
+ destroy_mount_options(fsc->mount_options);
+
+ ceph_fs_debugfs_cleanup(fsc);
+
+ ceph_destroy_client(fsc->client);
+
+ kfree(fsc);
+ dout("destroy_fs_client %p done\n", fsc);
+}
+
+/*
* caches
*/
struct kmem_cache *ceph_inode_cachep;
@@ -274,12 +567,12 @@
*/
static void ceph_umount_begin(struct super_block *sb)
{
- struct ceph_client *client = ceph_sb_to_client(sb);
+ struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
dout("ceph_umount_begin - starting forced umount\n");
- if (!client)
+ if (!fsc)
return;
- client->mount_state = CEPH_MOUNT_SHUTDOWN;
+ fsc->mount_state = CEPH_MOUNT_SHUTDOWN;
return;
}
@@ -294,483 +587,15 @@
.umount_begin = ceph_umount_begin,
};
-
-const char *ceph_msg_type_name(int type)
-{
- switch (type) {
- case CEPH_MSG_SHUTDOWN: return "shutdown";
- case CEPH_MSG_PING: return "ping";
- case CEPH_MSG_AUTH: return "auth";
- case CEPH_MSG_AUTH_REPLY: return "auth_reply";
- case CEPH_MSG_MON_MAP: return "mon_map";
- case CEPH_MSG_MON_GET_MAP: return "mon_get_map";
- case CEPH_MSG_MON_SUBSCRIBE: return "mon_subscribe";
- case CEPH_MSG_MON_SUBSCRIBE_ACK: return "mon_subscribe_ack";
- case CEPH_MSG_STATFS: return "statfs";
- case CEPH_MSG_STATFS_REPLY: return "statfs_reply";
- case CEPH_MSG_MDS_MAP: return "mds_map";
- case CEPH_MSG_CLIENT_SESSION: return "client_session";
- case CEPH_MSG_CLIENT_RECONNECT: return "client_reconnect";
- case CEPH_MSG_CLIENT_REQUEST: return "client_request";
- case CEPH_MSG_CLIENT_REQUEST_FORWARD: return "client_request_forward";
- case CEPH_MSG_CLIENT_REPLY: return "client_reply";
- case CEPH_MSG_CLIENT_CAPS: return "client_caps";
- case CEPH_MSG_CLIENT_CAPRELEASE: return "client_cap_release";
- case CEPH_MSG_CLIENT_SNAP: return "client_snap";
- case CEPH_MSG_CLIENT_LEASE: return "client_lease";
- case CEPH_MSG_OSD_MAP: return "osd_map";
- case CEPH_MSG_OSD_OP: return "osd_op";
- case CEPH_MSG_OSD_OPREPLY: return "osd_opreply";
- default: return "unknown";
- }
-}
-
-
-/*
- * mount options
- */
-enum {
- Opt_wsize,
- Opt_rsize,
- Opt_osdtimeout,
- Opt_osdkeepalivetimeout,
- Opt_mount_timeout,
- Opt_osd_idle_ttl,
- Opt_caps_wanted_delay_min,
- Opt_caps_wanted_delay_max,
- Opt_cap_release_safety,
- Opt_readdir_max_entries,
- Opt_readdir_max_bytes,
- Opt_congestion_kb,
- Opt_last_int,
- /* int args above */
- Opt_fsid,
- Opt_snapdirname,
- Opt_name,
- Opt_secret,
- Opt_last_string,
- /* string args above */
- Opt_ip,
- Opt_noshare,
- Opt_dirstat,
- Opt_nodirstat,
- Opt_rbytes,
- Opt_norbytes,
- Opt_nocrc,
- Opt_noasyncreaddir,
-};
-
-static match_table_t arg_tokens = {
- {Opt_wsize, "wsize=%d"},
- {Opt_rsize, "rsize=%d"},
- {Opt_osdtimeout, "osdtimeout=%d"},
- {Opt_osdkeepalivetimeout, "osdkeepalive=%d"},
- {Opt_mount_timeout, "mount_timeout=%d"},
- {Opt_osd_idle_ttl, "osd_idle_ttl=%d"},
- {Opt_caps_wanted_delay_min, "caps_wanted_delay_min=%d"},
- {Opt_caps_wanted_delay_max, "caps_wanted_delay_max=%d"},
- {Opt_cap_release_safety, "cap_release_safety=%d"},
- {Opt_readdir_max_entries, "readdir_max_entries=%d"},
- {Opt_readdir_max_bytes, "readdir_max_bytes=%d"},
- {Opt_congestion_kb, "write_congestion_kb=%d"},
- /* int args above */
- {Opt_fsid, "fsid=%s"},
- {Opt_snapdirname, "snapdirname=%s"},
- {Opt_name, "name=%s"},
- {Opt_secret, "secret=%s"},
- /* string args above */
- {Opt_ip, "ip=%s"},
- {Opt_noshare, "noshare"},
- {Opt_dirstat, "dirstat"},
- {Opt_nodirstat, "nodirstat"},
- {Opt_rbytes, "rbytes"},
- {Opt_norbytes, "norbytes"},
- {Opt_nocrc, "nocrc"},
- {Opt_noasyncreaddir, "noasyncreaddir"},
- {-1, NULL}
-};
-
-static int parse_fsid(const char *str, struct ceph_fsid *fsid)
-{
- int i = 0;
- char tmp[3];
- int err = -EINVAL;
- int d;
-
- dout("parse_fsid '%s'\n", str);
- tmp[2] = 0;
- while (*str && i < 16) {
- if (ispunct(*str)) {
- str++;
- continue;
- }
- if (!isxdigit(str[0]) || !isxdigit(str[1]))
- break;
- tmp[0] = str[0];
- tmp[1] = str[1];
- if (sscanf(tmp, "%x", &d) < 1)
- break;
- fsid->fsid[i] = d & 0xff;
- i++;
- str += 2;
- }
-
- if (i == 16)
- err = 0;
- dout("parse_fsid ret %d got fsid %pU", err, fsid);
- return err;
-}
-
-static struct ceph_mount_args *parse_mount_args(int flags, char *options,
- const char *dev_name,
- const char **path)
-{
- struct ceph_mount_args *args;
- const char *c;
- int err = -ENOMEM;
- substring_t argstr[MAX_OPT_ARGS];
-
- args = kzalloc(sizeof(*args), GFP_KERNEL);
- if (!args)
- return ERR_PTR(-ENOMEM);
- args->mon_addr = kcalloc(CEPH_MAX_MON, sizeof(*args->mon_addr),
- GFP_KERNEL);
- if (!args->mon_addr)
- goto out;
-
- dout("parse_mount_args %p, dev_name '%s'\n", args, dev_name);
-
- /* start with defaults */
- args->sb_flags = flags;
- args->flags = CEPH_OPT_DEFAULT;
- args->osd_timeout = CEPH_OSD_TIMEOUT_DEFAULT;
- args->osd_keepalive_timeout = CEPH_OSD_KEEPALIVE_DEFAULT;
- args->mount_timeout = CEPH_MOUNT_TIMEOUT_DEFAULT; /* seconds */
- args->osd_idle_ttl = CEPH_OSD_IDLE_TTL_DEFAULT; /* seconds */
- args->caps_wanted_delay_min = CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT;
- args->caps_wanted_delay_max = CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT;
- args->rsize = CEPH_MOUNT_RSIZE_DEFAULT;
- args->snapdir_name = kstrdup(CEPH_SNAPDIRNAME_DEFAULT, GFP_KERNEL);
- args->cap_release_safety = CEPH_CAP_RELEASE_SAFETY_DEFAULT;
- args->max_readdir = CEPH_MAX_READDIR_DEFAULT;
- args->max_readdir_bytes = CEPH_MAX_READDIR_BYTES_DEFAULT;
- args->congestion_kb = default_congestion_kb();
-
- /* ip1[:port1][,ip2[:port2]...]:/subdir/in/fs */
- err = -EINVAL;
- if (!dev_name)
- goto out;
- *path = strstr(dev_name, ":/");
- if (*path == NULL) {
- pr_err("device name is missing path (no :/ in %s)\n",
- dev_name);
- goto out;
- }
-
- /* get mon ip(s) */
- err = ceph_parse_ips(dev_name, *path, args->mon_addr,
- CEPH_MAX_MON, &args->num_mon);
- if (err < 0)
- goto out;
-
- /* path on server */
- *path += 2;
- dout("server path '%s'\n", *path);
-
- /* parse mount options */
- while ((c = strsep(&options, ",")) != NULL) {
- int token, intval, ret;
- if (!*c)
- continue;
- err = -EINVAL;
- token = match_token((char *)c, arg_tokens, argstr);
- if (token < 0) {
- pr_err("bad mount option at '%s'\n", c);
- goto out;
- }
- if (token < Opt_last_int) {
- ret = match_int(&argstr[0], &intval);
- if (ret < 0) {
- pr_err("bad mount option arg (not int) "
- "at '%s'\n", c);
- continue;
- }
- dout("got int token %d val %d\n", token, intval);
- } else if (token > Opt_last_int && token < Opt_last_string) {
- dout("got string token %d val %s\n", token,
- argstr[0].from);
- } else {
- dout("got token %d\n", token);
- }
- switch (token) {
- case Opt_ip:
- err = ceph_parse_ips(argstr[0].from,
- argstr[0].to,
- &args->my_addr,
- 1, NULL);
- if (err < 0)
- goto out;
- args->flags |= CEPH_OPT_MYIP;
- break;
-
- case Opt_fsid:
- err = parse_fsid(argstr[0].from, &args->fsid);
- if (err == 0)
- args->flags |= CEPH_OPT_FSID;
- break;
- case Opt_snapdirname:
- kfree(args->snapdir_name);
- args->snapdir_name = kstrndup(argstr[0].from,
- argstr[0].to-argstr[0].from,
- GFP_KERNEL);
- break;
- case Opt_name:
- args->name = kstrndup(argstr[0].from,
- argstr[0].to-argstr[0].from,
- GFP_KERNEL);
- break;
- case Opt_secret:
- args->secret = kstrndup(argstr[0].from,
- argstr[0].to-argstr[0].from,
- GFP_KERNEL);
- break;
-
- /* misc */
- case Opt_wsize:
- args->wsize = intval;
- break;
- case Opt_rsize:
- args->rsize = intval;
- break;
- case Opt_osdtimeout:
- args->osd_timeout = intval;
- break;
- case Opt_osdkeepalivetimeout:
- args->osd_keepalive_timeout = intval;
- break;
- case Opt_osd_idle_ttl:
- args->osd_idle_ttl = intval;
- break;
- case Opt_mount_timeout:
- args->mount_timeout = intval;
- break;
- case Opt_caps_wanted_delay_min:
- args->caps_wanted_delay_min = intval;
- break;
- case Opt_caps_wanted_delay_max:
- args->caps_wanted_delay_max = intval;
- break;
- case Opt_readdir_max_entries:
- args->max_readdir = intval;
- break;
- case Opt_readdir_max_bytes:
- args->max_readdir_bytes = intval;
- break;
- case Opt_congestion_kb:
- args->congestion_kb = intval;
- break;
-
- case Opt_noshare:
- args->flags |= CEPH_OPT_NOSHARE;
- break;
-
- case Opt_dirstat:
- args->flags |= CEPH_OPT_DIRSTAT;
- break;
- case Opt_nodirstat:
- args->flags &= ~CEPH_OPT_DIRSTAT;
- break;
- case Opt_rbytes:
- args->flags |= CEPH_OPT_RBYTES;
- break;
- case Opt_norbytes:
- args->flags &= ~CEPH_OPT_RBYTES;
- break;
- case Opt_nocrc:
- args->flags |= CEPH_OPT_NOCRC;
- break;
- case Opt_noasyncreaddir:
- args->flags |= CEPH_OPT_NOASYNCREADDIR;
- break;
-
- default:
- BUG_ON(token);
- }
- }
- return args;
-
-out:
- kfree(args->mon_addr);
- kfree(args);
- return ERR_PTR(err);
-}
-
-static void destroy_mount_args(struct ceph_mount_args *args)
-{
- dout("destroy_mount_args %p\n", args);
- kfree(args->snapdir_name);
- args->snapdir_name = NULL;
- kfree(args->name);
- args->name = NULL;
- kfree(args->secret);
- args->secret = NULL;
- kfree(args);
-}
-
-/*
- * create a fresh client instance
- */
-static struct ceph_client *ceph_create_client(struct ceph_mount_args *args)
-{
- struct ceph_client *client;
- int err = -ENOMEM;
-
- client = kzalloc(sizeof(*client), GFP_KERNEL);
- if (client == NULL)
- return ERR_PTR(-ENOMEM);
-
- mutex_init(&client->mount_mutex);
-
- init_waitqueue_head(&client->auth_wq);
-
- client->sb = NULL;
- client->mount_state = CEPH_MOUNT_MOUNTING;
- client->mount_args = args;
-
- client->msgr = NULL;
-
- client->auth_err = 0;
- atomic_long_set(&client->writeback_count, 0);
-
- err = bdi_init(&client->backing_dev_info);
- if (err < 0)
- goto fail;
-
- err = -ENOMEM;
- client->wb_wq = create_workqueue("ceph-writeback");
- if (client->wb_wq == NULL)
- goto fail_bdi;
- client->pg_inv_wq = create_singlethread_workqueue("ceph-pg-invalid");
- if (client->pg_inv_wq == NULL)
- goto fail_wb_wq;
- client->trunc_wq = create_singlethread_workqueue("ceph-trunc");
- if (client->trunc_wq == NULL)
- goto fail_pg_inv_wq;
-
- /* set up mempools */
- err = -ENOMEM;
- client->wb_pagevec_pool = mempool_create_kmalloc_pool(10,
- client->mount_args->wsize >> PAGE_CACHE_SHIFT);
- if (!client->wb_pagevec_pool)
- goto fail_trunc_wq;
-
- /* caps */
- client->min_caps = args->max_readdir;
-
- /* subsystems */
- err = ceph_monc_init(&client->monc, client);
- if (err < 0)
- goto fail_mempool;
- err = ceph_osdc_init(&client->osdc, client);
- if (err < 0)
- goto fail_monc;
- err = ceph_mdsc_init(&client->mdsc, client);
- if (err < 0)
- goto fail_osdc;
- return client;
-
-fail_osdc:
- ceph_osdc_stop(&client->osdc);
-fail_monc:
- ceph_monc_stop(&client->monc);
-fail_mempool:
- mempool_destroy(client->wb_pagevec_pool);
-fail_trunc_wq:
- destroy_workqueue(client->trunc_wq);
-fail_pg_inv_wq:
- destroy_workqueue(client->pg_inv_wq);
-fail_wb_wq:
- destroy_workqueue(client->wb_wq);
-fail_bdi:
- bdi_destroy(&client->backing_dev_info);
-fail:
- kfree(client);
- return ERR_PTR(err);
-}
-
-static void ceph_destroy_client(struct ceph_client *client)
-{
- dout("destroy_client %p\n", client);
-
- /* unmount */
- ceph_mdsc_stop(&client->mdsc);
- ceph_osdc_stop(&client->osdc);
-
- /*
- * make sure mds and osd connections close out before destroying
- * the auth module, which is needed to free those connections'
- * ceph_authorizers.
- */
- ceph_msgr_flush();
-
- ceph_monc_stop(&client->monc);
-
- ceph_debugfs_client_cleanup(client);
- destroy_workqueue(client->wb_wq);
- destroy_workqueue(client->pg_inv_wq);
- destroy_workqueue(client->trunc_wq);
-
- bdi_destroy(&client->backing_dev_info);
-
- if (client->msgr)
- ceph_messenger_destroy(client->msgr);
- mempool_destroy(client->wb_pagevec_pool);
-
- destroy_mount_args(client->mount_args);
-
- kfree(client);
- dout("destroy_client %p done\n", client);
-}
-
-/*
- * Initially learn our fsid, or verify an fsid matches.
- */
-int ceph_check_fsid(struct ceph_client *client, struct ceph_fsid *fsid)
-{
- if (client->have_fsid) {
- if (ceph_fsid_compare(&client->fsid, fsid)) {
- pr_err("bad fsid, had %pU got %pU",
- &client->fsid, fsid);
- return -1;
- }
- } else {
- pr_info("client%lld fsid %pU\n", client->monc.auth->global_id,
- fsid);
- memcpy(&client->fsid, fsid, sizeof(*fsid));
- ceph_debugfs_client_init(client);
- client->have_fsid = true;
- }
- return 0;
-}
-
-/*
- * true if we have the mon map (and have thus joined the cluster)
- */
-static int have_mon_and_osd_map(struct ceph_client *client)
-{
- return client->monc.monmap && client->monc.monmap->epoch &&
- client->osdc.osdmap && client->osdc.osdmap->epoch;
-}
-
/*
* Bootstrap mount by opening the root directory. Note the mount
* @started time from caller, and time out if this takes too long.
*/
-static struct dentry *open_root_dentry(struct ceph_client *client,
+static struct dentry *open_root_dentry(struct ceph_fs_client *fsc,
const char *path,
unsigned long started)
{
- struct ceph_mds_client *mdsc = &client->mdsc;
+ struct ceph_mds_client *mdsc = fsc->mdsc;
struct ceph_mds_request *req = NULL;
int err;
struct dentry *root;
@@ -784,14 +609,14 @@
req->r_ino1.ino = CEPH_INO_ROOT;
req->r_ino1.snap = CEPH_NOSNAP;
req->r_started = started;
- req->r_timeout = client->mount_args->mount_timeout * HZ;
+ req->r_timeout = fsc->client->options->mount_timeout * HZ;
req->r_args.getattr.mask = cpu_to_le32(CEPH_STAT_CAP_INODE);
req->r_num_caps = 2;
err = ceph_mdsc_do_request(mdsc, NULL, req);
if (err == 0) {
dout("open_root_inode success\n");
if (ceph_ino(req->r_target_inode) == CEPH_INO_ROOT &&
- client->sb->s_root == NULL)
+ fsc->sb->s_root == NULL)
root = d_alloc_root(req->r_target_inode);
else
root = d_obtain_alias(req->r_target_inode);
@@ -804,105 +629,86 @@
return root;
}
+
+
+
/*
* mount: join the ceph cluster, and open root directory.
*/
-static int ceph_mount(struct ceph_client *client, struct vfsmount *mnt,
+static int ceph_mount(struct ceph_fs_client *fsc, struct vfsmount *mnt,
const char *path)
{
- struct ceph_entity_addr *myaddr = NULL;
int err;
- unsigned long timeout = client->mount_args->mount_timeout * HZ;
unsigned long started = jiffies; /* note the start time */
struct dentry *root;
+ int first = 0; /* first vfsmount for this super_block */
dout("mount start\n");
- mutex_lock(&client->mount_mutex);
+ mutex_lock(&fsc->client->mount_mutex);
- /* initialize the messenger */
- if (client->msgr == NULL) {
- if (ceph_test_opt(client, MYIP))
- myaddr = &client->mount_args->my_addr;
- client->msgr = ceph_messenger_create(myaddr);
- if (IS_ERR(client->msgr)) {
- err = PTR_ERR(client->msgr);
- client->msgr = NULL;
- goto out;
- }
- client->msgr->nocrc = ceph_test_opt(client, NOCRC);
- }
-
- /* open session, and wait for mon, mds, and osd maps */
- err = ceph_monc_open_session(&client->monc);
+ err = __ceph_open_session(fsc->client, started);
if (err < 0)
goto out;
- while (!have_mon_and_osd_map(client)) {
- err = -EIO;
- if (timeout && time_after_eq(jiffies, started + timeout))
- goto out;
-
- /* wait */
- dout("mount waiting for mon_map\n");
- err = wait_event_interruptible_timeout(client->auth_wq,
- have_mon_and_osd_map(client) || (client->auth_err < 0),
- timeout);
- if (err == -EINTR || err == -ERESTARTSYS)
- goto out;
- if (client->auth_err < 0) {
- err = client->auth_err;
- goto out;
- }
- }
-
dout("mount opening root\n");
- root = open_root_dentry(client, "", started);
+ root = open_root_dentry(fsc, "", started);
if (IS_ERR(root)) {
err = PTR_ERR(root);
goto out;
}
- if (client->sb->s_root)
+ if (fsc->sb->s_root) {
dput(root);
- else
- client->sb->s_root = root;
+ } else {
+ fsc->sb->s_root = root;
+ first = 1;
+
+ err = ceph_fs_debugfs_init(fsc);
+ if (err < 0)
+ goto fail;
+ }
if (path[0] == 0) {
dget(root);
} else {
dout("mount opening base mountpoint\n");
- root = open_root_dentry(client, path, started);
+ root = open_root_dentry(fsc, path, started);
if (IS_ERR(root)) {
err = PTR_ERR(root);
- dput(client->sb->s_root);
- client->sb->s_root = NULL;
- goto out;
+ goto fail;
}
}
mnt->mnt_root = root;
- mnt->mnt_sb = client->sb;
+ mnt->mnt_sb = fsc->sb;
- client->mount_state = CEPH_MOUNT_MOUNTED;
+ fsc->mount_state = CEPH_MOUNT_MOUNTED;
dout("mount success\n");
err = 0;
out:
- mutex_unlock(&client->mount_mutex);
+ mutex_unlock(&fsc->client->mount_mutex);
return err;
+
+fail:
+ if (first) {
+ dput(fsc->sb->s_root);
+ fsc->sb->s_root = NULL;
+ }
+ goto out;
}
static int ceph_set_super(struct super_block *s, void *data)
{
- struct ceph_client *client = data;
+ struct ceph_fs_client *fsc = data;
int ret;
dout("set_super %p data %p\n", s, data);
- s->s_flags = client->mount_args->sb_flags;
+ s->s_flags = fsc->mount_options->sb_flags;
s->s_maxbytes = 1ULL << 40; /* temp value until we get mdsmap */
- s->s_fs_info = client;
- client->sb = s;
+ s->s_fs_info = fsc;
+ fsc->sb = s;
s->s_op = &ceph_super_ops;
s->s_export_op = &ceph_export_ops;
@@ -917,7 +723,7 @@
fail:
s->s_fs_info = NULL;
- client->sb = NULL;
+ fsc->sb = NULL;
return ret;
}
@@ -926,30 +732,23 @@
*/
static int ceph_compare_super(struct super_block *sb, void *data)
{
- struct ceph_client *new = data;
- struct ceph_mount_args *args = new->mount_args;
- struct ceph_client *other = ceph_sb_to_client(sb);
- int i;
+ struct ceph_fs_client *new = data;
+ struct ceph_mount_options *fsopt = new->mount_options;
+ struct ceph_options *opt = new->client->options;
+ struct ceph_fs_client *other = ceph_sb_to_client(sb);
dout("ceph_compare_super %p\n", sb);
- if (args->flags & CEPH_OPT_FSID) {
- if (ceph_fsid_compare(&args->fsid, &other->fsid)) {
- dout("fsid doesn't match\n");
- return 0;
- }
- } else {
- /* do we share (a) monitor? */
- for (i = 0; i < new->monc.monmap->num_mon; i++)
- if (ceph_monmap_contains(other->monc.monmap,
- &new->monc.monmap->mon_inst[i].addr))
- break;
- if (i == new->monc.monmap->num_mon) {
- dout("mon ip not part of monmap\n");
- return 0;
- }
- dout("mon ip matches existing sb %p\n", sb);
+
+ if (compare_mount_options(fsopt, opt, other)) {
+ dout("monitor(s)/mount options don't match\n");
+ return 0;
}
- if (args->sb_flags != other->mount_args->sb_flags) {
+ if ((opt->flags & CEPH_OPT_FSID) &&
+ ceph_fsid_compare(&opt->fsid, &other->client->fsid)) {
+ dout("fsid doesn't match\n");
+ return 0;
+ }
+ if (fsopt->sb_flags != other->mount_options->sb_flags) {
dout("flags differ\n");
return 0;
}
@@ -961,19 +760,20 @@
*/
static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
-static int ceph_register_bdi(struct super_block *sb, struct ceph_client *client)
+static int ceph_register_bdi(struct super_block *sb,
+ struct ceph_fs_client *fsc)
{
int err;
/* set ra_pages based on rsize mount option? */
- if (client->mount_args->rsize >= PAGE_CACHE_SIZE)
- client->backing_dev_info.ra_pages =
- (client->mount_args->rsize + PAGE_CACHE_SIZE - 1)
+ if (fsc->mount_options->rsize >= PAGE_CACHE_SIZE)
+ fsc->backing_dev_info.ra_pages =
+ (fsc->mount_options->rsize + PAGE_CACHE_SIZE - 1)
>> PAGE_SHIFT;
- err = bdi_register(&client->backing_dev_info, NULL, "ceph-%d",
+ err = bdi_register(&fsc->backing_dev_info, NULL, "ceph-%d",
atomic_long_inc_return(&bdi_seq));
if (!err)
- sb->s_bdi = &client->backing_dev_info;
+ sb->s_bdi = &fsc->backing_dev_info;
return err;
}
@@ -982,46 +782,52 @@
struct vfsmount *mnt)
{
struct super_block *sb;
- struct ceph_client *client;
+ struct ceph_fs_client *fsc;
int err;
int (*compare_super)(struct super_block *, void *) = ceph_compare_super;
const char *path = NULL;
- struct ceph_mount_args *args;
+ struct ceph_mount_options *fsopt = NULL;
+ struct ceph_options *opt = NULL;
dout("ceph_get_sb\n");
- args = parse_mount_args(flags, data, dev_name, &path);
- if (IS_ERR(args)) {
- err = PTR_ERR(args);
+ err = parse_mount_options(&fsopt, &opt, flags, data, dev_name, &path);
+ if (err < 0)
goto out_final;
- }
/* create client (which we may/may not use) */
- client = ceph_create_client(args);
- if (IS_ERR(client)) {
- err = PTR_ERR(client);
+ fsc = create_fs_client(fsopt, opt);
+ if (IS_ERR(fsc)) {
+ err = PTR_ERR(fsc);
+ kfree(fsopt);
+ kfree(opt);
goto out_final;
}
- if (client->mount_args->flags & CEPH_OPT_NOSHARE)
+ err = ceph_mdsc_init(fsc);
+ if (err < 0)
+ goto out;
+
+ if (ceph_test_opt(fsc->client, NOSHARE))
compare_super = NULL;
- sb = sget(fs_type, compare_super, ceph_set_super, client);
+ sb = sget(fs_type, compare_super, ceph_set_super, fsc);
if (IS_ERR(sb)) {
err = PTR_ERR(sb);
goto out;
}
- if (ceph_sb_to_client(sb) != client) {
- ceph_destroy_client(client);
- client = ceph_sb_to_client(sb);
- dout("get_sb got existing client %p\n", client);
+ if (ceph_sb_to_client(sb) != fsc) {
+ ceph_mdsc_destroy(fsc);
+ destroy_fs_client(fsc);
+ fsc = ceph_sb_to_client(sb);
+ dout("get_sb got existing client %p\n", fsc);
} else {
- dout("get_sb using new client %p\n", client);
- err = ceph_register_bdi(sb, client);
+ dout("get_sb using new client %p\n", fsc);
+ err = ceph_register_bdi(sb, fsc);
if (err < 0)
goto out_splat;
}
- err = ceph_mount(client, mnt, path);
+ err = ceph_mount(fsc, mnt, path);
if (err < 0)
goto out_splat;
dout("root %p inode %p ino %llx.%llx\n", mnt->mnt_root,
@@ -1029,12 +835,13 @@
return 0;
out_splat:
- ceph_mdsc_close_sessions(&client->mdsc);
+ ceph_mdsc_close_sessions(fsc->mdsc);
deactivate_locked_super(sb);
goto out_final;
out:
- ceph_destroy_client(client);
+ ceph_mdsc_destroy(fsc);
+ destroy_fs_client(fsc);
out_final:
dout("ceph_get_sb fail %d\n", err);
return err;
@@ -1042,11 +849,12 @@
static void ceph_kill_sb(struct super_block *s)
{
- struct ceph_client *client = ceph_sb_to_client(s);
+ struct ceph_fs_client *fsc = ceph_sb_to_client(s);
dout("kill_sb %p\n", s);
- ceph_mdsc_pre_umount(&client->mdsc);
+ ceph_mdsc_pre_umount(fsc->mdsc);
kill_anon_super(s); /* will call put_super after sb is r/o */
- ceph_destroy_client(client);
+ ceph_mdsc_destroy(fsc);
+ destroy_fs_client(fsc);
}
static struct file_system_type ceph_fs_type = {
@@ -1062,36 +870,20 @@
static int __init init_ceph(void)
{
- int ret = 0;
-
- ret = ceph_debugfs_init();
- if (ret < 0)
- goto out;
-
- ret = ceph_msgr_init();
- if (ret < 0)
- goto out_debugfs;
-
- ret = init_caches();
+ int ret = init_caches();
if (ret)
- goto out_msgr;
+ goto out;
ret = register_filesystem(&ceph_fs_type);
if (ret)
goto out_icache;
- pr_info("loaded (mon/mds/osd proto %d/%d/%d, osdmap %d/%d %d/%d)\n",
- CEPH_MONC_PROTOCOL, CEPH_MDSC_PROTOCOL, CEPH_OSDC_PROTOCOL,
- CEPH_OSDMAP_VERSION, CEPH_OSDMAP_VERSION_EXT,
- CEPH_OSDMAP_INC_VERSION, CEPH_OSDMAP_INC_VERSION_EXT);
+ pr_info("loaded (mds proto %d)\n", CEPH_MDSC_PROTOCOL);
+
return 0;
out_icache:
destroy_caches();
-out_msgr:
- ceph_msgr_exit();
-out_debugfs:
- ceph_debugfs_cleanup();
out:
return ret;
}
@@ -1101,8 +893,6 @@
dout("exit_ceph\n");
unregister_filesystem(&ceph_fs_type);
destroy_caches();
- ceph_msgr_exit();
- ceph_debugfs_cleanup();
}
module_init(init_ceph);
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index 2482d69..1886294 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -1,7 +1,7 @@
#ifndef _FS_CEPH_SUPER_H
#define _FS_CEPH_SUPER_H
-#include "ceph_debug.h"
+#include <linux/ceph/ceph_debug.h>
#include <asm/unaligned.h>
#include <linux/backing-dev.h>
@@ -14,13 +14,7 @@
#include <linux/writeback.h>
#include <linux/slab.h>
-#include "types.h"
-#include "messenger.h"
-#include "msgpool.h"
-#include "mon_client.h"
-#include "mds_client.h"
-#include "osd_client.h"
-#include "ceph_fs.h"
+#include <linux/ceph/libceph.h>
/* f_type in struct statfs */
#define CEPH_SUPER_MAGIC 0x00c36400
@@ -30,42 +24,25 @@
#define CEPH_BLOCK_SHIFT 20 /* 1 MB */
#define CEPH_BLOCK (1 << CEPH_BLOCK_SHIFT)
-/*
- * Supported features
- */
-#define CEPH_FEATURE_SUPPORTED CEPH_FEATURE_NOSRCADDR | CEPH_FEATURE_FLOCK
-#define CEPH_FEATURE_REQUIRED CEPH_FEATURE_NOSRCADDR
+#define CEPH_MOUNT_OPT_DIRSTAT (1<<4) /* `cat dirname` for stats */
+#define CEPH_MOUNT_OPT_RBYTES (1<<5) /* dir st_bytes = rbytes */
+#define CEPH_MOUNT_OPT_NOASYNCREADDIR (1<<7) /* no dcache readdir */
-/*
- * mount options
- */
-#define CEPH_OPT_FSID (1<<0)
-#define CEPH_OPT_NOSHARE (1<<1) /* don't share client with other sbs */
-#define CEPH_OPT_MYIP (1<<2) /* specified my ip */
-#define CEPH_OPT_DIRSTAT (1<<4) /* funky `cat dirname` for stats */
-#define CEPH_OPT_RBYTES (1<<5) /* dir st_bytes = rbytes */
-#define CEPH_OPT_NOCRC (1<<6) /* no data crc on writes */
-#define CEPH_OPT_NOASYNCREADDIR (1<<7) /* no dcache readdir */
+#define CEPH_MOUNT_OPT_DEFAULT (CEPH_MOUNT_OPT_RBYTES)
-#define CEPH_OPT_DEFAULT (CEPH_OPT_RBYTES)
+#define ceph_set_mount_opt(fsc, opt) \
+ (fsc)->mount_options->flags |= CEPH_MOUNT_OPT_##opt;
+#define ceph_test_mount_opt(fsc, opt) \
+ (!!((fsc)->mount_options->flags & CEPH_MOUNT_OPT_##opt))
-#define ceph_set_opt(client, opt) \
- (client)->mount_args->flags |= CEPH_OPT_##opt;
-#define ceph_test_opt(client, opt) \
- (!!((client)->mount_args->flags & CEPH_OPT_##opt))
+#define CEPH_MAX_READDIR_DEFAULT 1024
+#define CEPH_MAX_READDIR_BYTES_DEFAULT (512*1024)
+#define CEPH_SNAPDIRNAME_DEFAULT ".snap"
-
-struct ceph_mount_args {
- int sb_flags;
+struct ceph_mount_options {
int flags;
- struct ceph_fsid fsid;
- struct ceph_entity_addr my_addr;
- int num_mon;
- struct ceph_entity_addr *mon_addr;
- int mount_timeout;
- int osd_idle_ttl;
- int osd_timeout;
- int osd_keepalive_timeout;
+ int sb_flags;
+
int wsize;
int rsize; /* max readahead */
int congestion_kb; /* max writeback in flight */
@@ -73,82 +50,25 @@
int cap_release_safety;
int max_readdir; /* max readdir result (entires) */
int max_readdir_bytes; /* max readdir result (bytes) */
+
+ /*
+ * everything above this point can be memcmp'd; everything below
+ * is handled in compare_mount_options()
+ */
+
char *snapdir_name; /* default ".snap" */
- char *name;
- char *secret;
};
-/*
- * defaults
- */
-#define CEPH_MOUNT_TIMEOUT_DEFAULT 60
-#define CEPH_OSD_TIMEOUT_DEFAULT 60 /* seconds */
-#define CEPH_OSD_KEEPALIVE_DEFAULT 5
-#define CEPH_OSD_IDLE_TTL_DEFAULT 60
-#define CEPH_MOUNT_RSIZE_DEFAULT (512*1024) /* readahead */
-#define CEPH_MAX_READDIR_DEFAULT 1024
-#define CEPH_MAX_READDIR_BYTES_DEFAULT (512*1024)
-
-#define CEPH_MSG_MAX_FRONT_LEN (16*1024*1024)
-#define CEPH_MSG_MAX_DATA_LEN (16*1024*1024)
-
-#define CEPH_SNAPDIRNAME_DEFAULT ".snap"
-#define CEPH_AUTH_NAME_DEFAULT "guest"
-/*
- * Delay telling the MDS we no longer want caps, in case we reopen
- * the file. Delay a minimum amount of time, even if we send a cap
- * message for some other reason. Otherwise, take the oppotunity to
- * update the mds to avoid sending another message later.
- */
-#define CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT 5 /* cap release delay */
-#define CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT 60 /* cap release delay */
-
-#define CEPH_CAP_RELEASE_SAFETY_DEFAULT (CEPH_CAPS_PER_RELEASE * 4)
-
-/* mount state */
-enum {
- CEPH_MOUNT_MOUNTING,
- CEPH_MOUNT_MOUNTED,
- CEPH_MOUNT_UNMOUNTING,
- CEPH_MOUNT_UNMOUNTED,
- CEPH_MOUNT_SHUTDOWN,
-};
-
-/*
- * subtract jiffies
- */
-static inline unsigned long time_sub(unsigned long a, unsigned long b)
-{
- BUG_ON(time_after(b, a));
- return (long)a - (long)b;
-}
-
-/*
- * per-filesystem client state
- *
- * possibly shared by multiple mount points, if they are
- * mounting the same ceph filesystem/cluster.
- */
-struct ceph_client {
- struct ceph_fsid fsid;
- bool have_fsid;
-
- struct mutex mount_mutex; /* serialize mount attempts */
- struct ceph_mount_args *mount_args;
-
+struct ceph_fs_client {
struct super_block *sb;
+ struct ceph_mount_options *mount_options;
+ struct ceph_client *client;
+
unsigned long mount_state;
- wait_queue_head_t auth_wq;
-
- int auth_err;
-
int min_caps; /* min caps i added */
- struct ceph_messenger *msgr; /* messenger instance */
- struct ceph_mon_client monc;
- struct ceph_mds_client mdsc;
- struct ceph_osd_client osdc;
+ struct ceph_mds_client *mdsc;
/* writeback */
mempool_t *wb_pagevec_pool;
@@ -160,14 +80,14 @@
struct backing_dev_info backing_dev_info;
#ifdef CONFIG_DEBUG_FS
- struct dentry *debugfs_monmap;
- struct dentry *debugfs_mdsmap, *debugfs_osdmap;
- struct dentry *debugfs_dir, *debugfs_dentry_lru, *debugfs_caps;
+ struct dentry *debugfs_dentry_lru, *debugfs_caps;
struct dentry *debugfs_congestion_kb;
struct dentry *debugfs_bdi;
+ struct dentry *debugfs_mdsc, *debugfs_mdsmap;
#endif
};
+
/*
* File i/o capability. This tracks shared state with the metadata
* server that allows us to cache or writeback attributes or to read
@@ -216,8 +136,7 @@
uid_t uid;
gid_t gid;
- void *xattr_blob;
- int xattr_len;
+ struct ceph_buffer *xattr_blob;
u64 xattr_version;
u64 size;
@@ -229,8 +148,11 @@
static inline void ceph_put_cap_snap(struct ceph_cap_snap *capsnap)
{
- if (atomic_dec_and_test(&capsnap->nref))
+ if (atomic_dec_and_test(&capsnap->nref)) {
+ if (capsnap->xattr_blob)
+ ceph_buffer_put(capsnap->xattr_blob);
kfree(capsnap);
+ }
}
/*
@@ -273,6 +195,20 @@
int should_free_val;
};
+/*
+ * Ceph dentry state
+ */
+struct ceph_dentry_info {
+ struct ceph_mds_session *lease_session;
+ u32 lease_gen, lease_shared_gen;
+ u32 lease_seq;
+ unsigned long lease_renew_after, lease_renew_from;
+ struct list_head lru;
+ struct dentry *dentry;
+ u64 time;
+ u64 offset;
+};
+
struct ceph_inode_xattrs_info {
/*
* (still encoded) xattr blob. we avoid the overhead of parsing
@@ -294,11 +230,6 @@
/*
* Ceph inode.
*/
-#define CEPH_I_COMPLETE 1 /* we have complete directory cached */
-#define CEPH_I_NODELAY 4 /* do not delay cap release */
-#define CEPH_I_FLUSH 8 /* do not delay flush of dirty metadata */
-#define CEPH_I_NOFLUSH 16 /* do not flush dirty caps */
-
struct ceph_inode_info {
struct ceph_vino i_vino; /* ceph ino + snap */
@@ -342,7 +273,8 @@
unsigned i_cap_exporting_issued;
struct ceph_cap_reservation i_cap_migration_resv;
struct list_head i_cap_snaps; /* snapped state pending flush to mds */
- struct ceph_snap_context *i_head_snapc; /* set if wr_buffer_head > 0 */
+ struct ceph_snap_context *i_head_snapc; /* set if wr_buffer_head > 0 or
+ dirty|flushing caps */
unsigned i_snap_caps; /* cap bits for snapped files */
int i_nr_by_mode[CEPH_FILE_MODE_NUM]; /* open file counts */
@@ -388,69 +320,9 @@
return container_of(inode, struct ceph_inode_info, vfs_inode);
}
-static inline void ceph_i_clear(struct inode *inode, unsigned mask)
+static inline struct ceph_vino ceph_vino(struct inode *inode)
{
- struct ceph_inode_info *ci = ceph_inode(inode);
-
- spin_lock(&inode->i_lock);
- ci->i_ceph_flags &= ~mask;
- spin_unlock(&inode->i_lock);
-}
-
-static inline void ceph_i_set(struct inode *inode, unsigned mask)
-{
- struct ceph_inode_info *ci = ceph_inode(inode);
-
- spin_lock(&inode->i_lock);
- ci->i_ceph_flags |= mask;
- spin_unlock(&inode->i_lock);
-}
-
-static inline bool ceph_i_test(struct inode *inode, unsigned mask)
-{
- struct ceph_inode_info *ci = ceph_inode(inode);
- bool r;
-
- smp_mb();
- r = (ci->i_ceph_flags & mask) == mask;
- return r;
-}
-
-
-/* find a specific frag @f */
-extern struct ceph_inode_frag *__ceph_find_frag(struct ceph_inode_info *ci,
- u32 f);
-
-/*
- * choose fragment for value @v. copy frag content to pfrag, if leaf
- * exists
- */
-extern u32 ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
- struct ceph_inode_frag *pfrag,
- int *found);
-
-/*
- * Ceph dentry state
- */
-struct ceph_dentry_info {
- struct ceph_mds_session *lease_session;
- u32 lease_gen, lease_shared_gen;
- u32 lease_seq;
- unsigned long lease_renew_after, lease_renew_from;
- struct list_head lru;
- struct dentry *dentry;
- u64 time;
- u64 offset;
-};
-
-static inline struct ceph_dentry_info *ceph_dentry(struct dentry *dentry)
-{
- return (struct ceph_dentry_info *)dentry->d_fsdata;
-}
-
-static inline loff_t ceph_make_fpos(unsigned frag, unsigned off)
-{
- return ((loff_t)frag << 32) | (loff_t)off;
+ return ceph_inode(inode)->i_vino;
}
/*
@@ -469,18 +341,6 @@
return ino;
}
-static inline int ceph_set_ino_cb(struct inode *inode, void *data)
-{
- ceph_inode(inode)->i_vino = *(struct ceph_vino *)data;
- inode->i_ino = ceph_vino_to_ino(*(struct ceph_vino *)data);
- return 0;
-}
-
-static inline struct ceph_vino ceph_vino(struct inode *inode)
-{
- return ceph_inode(inode)->i_vino;
-}
-
/* for printf-style formatting */
#define ceph_vinop(i) ceph_inode(i)->i_vino.ino, ceph_inode(i)->i_vino.snap
@@ -510,6 +370,73 @@
/*
+ * Ceph inode.
+ */
+#define CEPH_I_COMPLETE 1 /* we have complete directory cached */
+#define CEPH_I_NODELAY 4 /* do not delay cap release */
+#define CEPH_I_FLUSH 8 /* do not delay flush of dirty metadata */
+#define CEPH_I_NOFLUSH 16 /* do not flush dirty caps */
+
+static inline void ceph_i_clear(struct inode *inode, unsigned mask)
+{
+ struct ceph_inode_info *ci = ceph_inode(inode);
+
+ spin_lock(&inode->i_lock);
+ ci->i_ceph_flags &= ~mask;
+ spin_unlock(&inode->i_lock);
+}
+
+static inline void ceph_i_set(struct inode *inode, unsigned mask)
+{
+ struct ceph_inode_info *ci = ceph_inode(inode);
+
+ spin_lock(&inode->i_lock);
+ ci->i_ceph_flags |= mask;
+ spin_unlock(&inode->i_lock);
+}
+
+static inline bool ceph_i_test(struct inode *inode, unsigned mask)
+{
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ bool r;
+
+ spin_lock(&inode->i_lock);
+ r = (ci->i_ceph_flags & mask) == mask;
+ spin_unlock(&inode->i_lock);
+ return r;
+}
+
+
+/* find a specific frag @f */
+extern struct ceph_inode_frag *__ceph_find_frag(struct ceph_inode_info *ci,
+ u32 f);
+
+/*
+ * choose fragment for value @v. copy frag content to pfrag, if leaf
+ * exists
+ */
+extern u32 ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
+ struct ceph_inode_frag *pfrag,
+ int *found);
+
+static inline struct ceph_dentry_info *ceph_dentry(struct dentry *dentry)
+{
+ return (struct ceph_dentry_info *)dentry->d_fsdata;
+}
+
+static inline loff_t ceph_make_fpos(unsigned frag, unsigned off)
+{
+ return ((loff_t)frag << 32) | (loff_t)off;
+}
+
+static inline int ceph_set_ino_cb(struct inode *inode, void *data)
+{
+ ceph_inode(inode)->i_vino = *(struct ceph_vino *)data;
+ inode->i_ino = ceph_vino_to_ino(*(struct ceph_vino *)data);
+ return 0;
+}
+
+/*
* caps helpers
*/
static inline bool __ceph_is_any_real_caps(struct ceph_inode_info *ci)
@@ -573,18 +500,18 @@
struct ceph_cap_reservation *ctx, int need);
extern int ceph_unreserve_caps(struct ceph_mds_client *mdsc,
struct ceph_cap_reservation *ctx);
-extern void ceph_reservation_status(struct ceph_client *client,
+extern void ceph_reservation_status(struct ceph_fs_client *client,
int *total, int *avail, int *used,
int *reserved, int *min);
-static inline struct ceph_client *ceph_inode_to_client(struct inode *inode)
+static inline struct ceph_fs_client *ceph_inode_to_client(struct inode *inode)
{
- return (struct ceph_client *)inode->i_sb->s_fs_info;
+ return (struct ceph_fs_client *)inode->i_sb->s_fs_info;
}
-static inline struct ceph_client *ceph_sb_to_client(struct super_block *sb)
+static inline struct ceph_fs_client *ceph_sb_to_client(struct super_block *sb)
{
- return (struct ceph_client *)sb->s_fs_info;
+ return (struct ceph_fs_client *)sb->s_fs_info;
}
@@ -614,51 +541,6 @@
/*
- * snapshots
- */
-
-/*
- * A "snap context" is the set of existing snapshots when we
- * write data. It is used by the OSD to guide its COW behavior.
- *
- * The ceph_snap_context is refcounted, and attached to each dirty
- * page, indicating which context the dirty data belonged when it was
- * dirtied.
- */
-struct ceph_snap_context {
- atomic_t nref;
- u64 seq;
- int num_snaps;
- u64 snaps[];
-};
-
-static inline struct ceph_snap_context *
-ceph_get_snap_context(struct ceph_snap_context *sc)
-{
- /*
- printk("get_snap_context %p %d -> %d\n", sc, atomic_read(&sc->nref),
- atomic_read(&sc->nref)+1);
- */
- if (sc)
- atomic_inc(&sc->nref);
- return sc;
-}
-
-static inline void ceph_put_snap_context(struct ceph_snap_context *sc)
-{
- if (!sc)
- return;
- /*
- printk("put_snap_context %p %d -> %d\n", sc, atomic_read(&sc->nref),
- atomic_read(&sc->nref)-1);
- */
- if (atomic_dec_and_test(&sc->nref)) {
- /*printk(" deleting snap_context %p\n", sc);*/
- kfree(sc);
- }
-}
-
-/*
* A "snap realm" describes a subset of the file hierarchy sharing
* the same set of snapshots that apply to it. The realms themselves
* are organized into a hierarchy, such that children inherit (some of)
@@ -687,6 +569,8 @@
struct list_head empty_item; /* if i have ref==0 */
+ struct list_head dirty_item; /* if realm needs new context */
+
/* the current set of snaps for this realm */
struct ceph_snap_context *cached_context;
@@ -694,16 +578,33 @@
spinlock_t inodes_with_caps_lock;
};
-
-
-/*
- * calculate the number of pages a given length and offset map onto,
- * if we align the data.
- */
-static inline int calc_pages_for(u64 off, u64 len)
+static inline int default_congestion_kb(void)
{
- return ((off+len+PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT) -
- (off >> PAGE_CACHE_SHIFT);
+ int congestion_kb;
+
+ /*
+ * Copied from NFS
+ *
+ * congestion size, scale with available memory.
+ *
+ * 64MB: 8192k
+ * 128MB: 11585k
+ * 256MB: 16384k
+ * 512MB: 23170k
+ * 1GB: 32768k
+ * 2GB: 46340k
+ * 4GB: 65536k
+ * 8GB: 92681k
+ * 16GB: 131072k
+ *
+ * This allows larger machines to have larger/more transfers.
+ * Limit the default to 256M
+ */
+ congestion_kb = (16*int_sqrt(totalram_pages)) << (PAGE_SHIFT-10);
+ if (congestion_kb > 256*1024)
+ congestion_kb = 256*1024;
+
+ return congestion_kb;
}
@@ -736,16 +637,6 @@
ci_item)->writing;
}
-
-/* super.c */
-extern struct kmem_cache *ceph_inode_cachep;
-extern struct kmem_cache *ceph_cap_cachep;
-extern struct kmem_cache *ceph_dentry_cachep;
-extern struct kmem_cache *ceph_file_cachep;
-
-extern const char *ceph_msg_type_name(int type);
-extern int ceph_check_fsid(struct ceph_client *client, struct ceph_fsid *fsid);
-
/* inode.c */
extern const struct inode_operations ceph_file_iops;
@@ -823,7 +714,8 @@
extern void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
struct ceph_snap_context *snapc);
extern void __ceph_flush_snaps(struct ceph_inode_info *ci,
- struct ceph_mds_session **psession);
+ struct ceph_mds_session **psession,
+ int again);
extern void ceph_check_caps(struct ceph_inode_info *ci, int flags,
struct ceph_mds_session *session);
extern void ceph_check_delayed_caps(struct ceph_mds_client *mdsc);
@@ -851,12 +743,18 @@
/* file.c */
extern const struct file_operations ceph_file_fops;
extern const struct address_space_operations ceph_aops;
+extern int ceph_copy_to_page_vector(struct page **pages,
+ const char *data,
+ loff_t off, size_t len);
+extern int ceph_copy_from_page_vector(struct page **pages,
+ char *data,
+ loff_t off, size_t len);
+extern struct page **ceph_alloc_page_vector(int num_pages, gfp_t flags);
extern int ceph_open(struct inode *inode, struct file *file);
extern struct dentry *ceph_lookup_open(struct inode *dir, struct dentry *dentry,
struct nameidata *nd, int mode,
int locked_dir);
extern int ceph_release(struct inode *inode, struct file *filp);
-extern void ceph_release_page_vector(struct page **pages, int num_pages);
/* dir.c */
extern const struct file_operations ceph_dir_fops;
@@ -886,12 +784,6 @@
/* export.c */
extern const struct export_operations ceph_export_ops;
-/* debugfs.c */
-extern int ceph_debugfs_init(void);
-extern void ceph_debugfs_cleanup(void);
-extern int ceph_debugfs_client_init(struct ceph_client *client);
-extern void ceph_debugfs_client_cleanup(struct ceph_client *client);
-
/* locks.c */
extern int ceph_lock(struct file *file, int cmd, struct file_lock *fl);
extern int ceph_flock(struct file *file, int cmd, struct file_lock *fl);
@@ -908,4 +800,8 @@
return NULL;
}
+/* debugfs.c */
+extern int ceph_fs_debugfs_init(struct ceph_fs_client *client);
+extern void ceph_fs_debugfs_cleanup(struct ceph_fs_client *client);
+
#endif /* _FS_CEPH_SUPER_H */
diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c
index 097a265..6e12a6b 100644
--- a/fs/ceph/xattr.c
+++ b/fs/ceph/xattr.c
@@ -1,6 +1,9 @@
-#include "ceph_debug.h"
+#include <linux/ceph/ceph_debug.h>
+
#include "super.h"
-#include "decode.h"
+#include "mds_client.h"
+
+#include <linux/ceph/decode.h>
#include <linux/xattr.h>
#include <linux/slab.h>
@@ -485,6 +488,7 @@
ci->i_xattrs.blob = ci->i_xattrs.prealloc_blob;
ci->i_xattrs.prealloc_blob = NULL;
ci->i_xattrs.dirty = false;
+ ci->i_xattrs.version++;
}
}
@@ -619,12 +623,12 @@
static int ceph_sync_setxattr(struct dentry *dentry, const char *name,
const char *value, size_t size, int flags)
{
- struct ceph_client *client = ceph_sb_to_client(dentry->d_sb);
+ struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
struct inode *inode = dentry->d_inode;
struct ceph_inode_info *ci = ceph_inode(inode);
struct inode *parent_inode = dentry->d_parent->d_inode;
struct ceph_mds_request *req;
- struct ceph_mds_client *mdsc = &client->mdsc;
+ struct ceph_mds_client *mdsc = fsc->mdsc;
int err;
int i, nr_pages;
struct page **pages = NULL;
@@ -712,10 +716,9 @@
/* preallocate memory for xattr name, value, index node */
err = -ENOMEM;
- newname = kmalloc(name_len + 1, GFP_NOFS);
+ newname = kmemdup(name, name_len + 1, GFP_NOFS);
if (!newname)
goto out;
- memcpy(newname, name, name_len + 1);
if (val_len) {
newval = kmalloc(val_len + 1, GFP_NOFS);
@@ -776,8 +779,8 @@
static int ceph_send_removexattr(struct dentry *dentry, const char *name)
{
- struct ceph_client *client = ceph_sb_to_client(dentry->d_sb);
- struct ceph_mds_client *mdsc = &client->mdsc;
+ struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
+ struct ceph_mds_client *mdsc = fsc->mdsc;
struct inode *inode = dentry->d_inode;
struct inode *parent_inode = dentry->d_parent->d_inode;
struct ceph_mds_request *req;
diff --git a/fs/char_dev.c b/fs/char_dev.c
index f80a4f2..143d393 100644
--- a/fs/char_dev.c
+++ b/fs/char_dev.c
@@ -40,7 +40,9 @@
#endif
/* permit direct mmap, for read, write or exec */
BDI_CAP_MAP_DIRECT |
- BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP),
+ BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP |
+ /* no writeback happens */
+ BDI_CAP_NO_ACCT_AND_WRITEBACK),
};
static struct kobj_map *cdev_map;
diff --git a/fs/cifs/cifs_unicode.h b/fs/cifs/cifs_unicode.h
index 6506382..7fe6b52 100644
--- a/fs/cifs/cifs_unicode.h
+++ b/fs/cifs/cifs_unicode.h
@@ -30,6 +30,8 @@
* This is a compressed table of upper and lower case conversion.
*
*/
+#ifndef _CIFS_UNICODE_H
+#define _CIFS_UNICODE_H
#include <asm/byteorder.h>
#include <linux/types.h>
@@ -67,8 +69,8 @@
#endif /* UNIUPR_NOUPPER */
#ifndef UNIUPR_NOLOWER
-extern signed char UniLowerTable[512];
-extern struct UniCaseRange UniLowerRange[];
+extern signed char CifsUniLowerTable[512];
+extern const struct UniCaseRange CifsUniLowerRange[];
#endif /* UNIUPR_NOLOWER */
#ifdef __KERNEL__
@@ -337,15 +339,15 @@
* UniTolower: Convert a unicode character to lower case
*/
static inline wchar_t
-UniTolower(wchar_t uc)
+UniTolower(register wchar_t uc)
{
- register struct UniCaseRange *rp;
+ register const struct UniCaseRange *rp;
- if (uc < sizeof(UniLowerTable)) {
+ if (uc < sizeof(CifsUniLowerTable)) {
/* Latin characters */
- return uc + UniLowerTable[uc]; /* Use base tables */
+ return uc + CifsUniLowerTable[uc]; /* Use base tables */
} else {
- rp = UniLowerRange; /* Use range tables */
+ rp = CifsUniLowerRange; /* Use range tables */
while (rp->start) {
if (uc < rp->start) /* Before start of range */
return uc; /* Uppercase = input */
@@ -374,3 +376,5 @@
}
#endif
+
+#endif /* _CIFS_UNICODE_H */
diff --git a/fs/cifs/cifs_uniupr.h b/fs/cifs/cifs_uniupr.h
index 18a9d97..0ac7c5a 100644
--- a/fs/cifs/cifs_uniupr.h
+++ b/fs/cifs/cifs_uniupr.h
@@ -140,7 +140,7 @@
/*
* Latin lower case
*/
-static signed char CifsUniLowerTable[512] = {
+signed char CifsUniLowerTable[512] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 000-00f */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 010-01f */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 020-02f */
@@ -242,12 +242,12 @@
/*
* Lower Case Range
*/
-static const struct UniCaseRange CifsUniLowerRange[] = {
- 0x0380, 0x03ab, UniCaseRangeL0380,
- 0x0400, 0x042f, UniCaseRangeL0400,
- 0x0490, 0x04cb, UniCaseRangeL0490,
- 0x1e00, 0x1ff7, UniCaseRangeL1e00,
- 0xff20, 0xff3a, UniCaseRangeLff20,
- 0, 0, 0
+const struct UniCaseRange CifsUniLowerRange[] = {
+ {0x0380, 0x03ab, UniCaseRangeL0380},
+ {0x0400, 0x042f, UniCaseRangeL0400},
+ {0x0490, 0x04cb, UniCaseRangeL0490},
+ {0x1e00, 0x1ff7, UniCaseRangeL1e00},
+ {0xff20, 0xff3a, UniCaseRangeLff20},
+ {0}
};
#endif
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
index 847628d..35042d8 100644
--- a/fs/cifs/cifsencrypt.c
+++ b/fs/cifs/cifsencrypt.c
@@ -223,63 +223,6 @@
return 0;
}
-int CalcNTLMv2_partial_mac_key(struct cifsSesInfo *ses,
- const struct nls_table *nls_info)
-{
- char temp_hash[16];
- struct HMACMD5Context ctx;
- char *ucase_buf;
- __le16 *unicode_buf;
- unsigned int i, user_name_len, dom_name_len;
-
- if (ses == NULL)
- return -EINVAL;
-
- E_md4hash(ses->password, temp_hash);
-
- hmac_md5_init_limK_to_64(temp_hash, 16, &ctx);
- user_name_len = strlen(ses->userName);
- if (user_name_len > MAX_USERNAME_SIZE)
- return -EINVAL;
- if (ses->domainName == NULL)
- return -EINVAL; /* BB should we use CIFS_LINUX_DOM */
- dom_name_len = strlen(ses->domainName);
- if (dom_name_len > MAX_USERNAME_SIZE)
- return -EINVAL;
-
- ucase_buf = kmalloc((MAX_USERNAME_SIZE+1), GFP_KERNEL);
- if (ucase_buf == NULL)
- return -ENOMEM;
- unicode_buf = kmalloc((MAX_USERNAME_SIZE+1)*4, GFP_KERNEL);
- if (unicode_buf == NULL) {
- kfree(ucase_buf);
- return -ENOMEM;
- }
-
- for (i = 0; i < user_name_len; i++)
- ucase_buf[i] = nls_info->charset2upper[(int)ses->userName[i]];
- ucase_buf[i] = 0;
- user_name_len = cifs_strtoUCS(unicode_buf, ucase_buf,
- MAX_USERNAME_SIZE*2, nls_info);
- unicode_buf[user_name_len] = 0;
- user_name_len++;
-
- for (i = 0; i < dom_name_len; i++)
- ucase_buf[i] = nls_info->charset2upper[(int)ses->domainName[i]];
- ucase_buf[i] = 0;
- dom_name_len = cifs_strtoUCS(unicode_buf+user_name_len, ucase_buf,
- MAX_USERNAME_SIZE*2, nls_info);
-
- unicode_buf[user_name_len + dom_name_len] = 0;
- hmac_md5_update((const unsigned char *) unicode_buf,
- (user_name_len+dom_name_len)*2, &ctx);
-
- hmac_md5_final(ses->server->ntlmv2_hash, &ctx);
- kfree(ucase_buf);
- kfree(unicode_buf);
- return 0;
-}
-
#ifdef CONFIG_CIFS_WEAK_PW_HASH
void calc_lanman_hash(const char *password, const char *cryptkey, bool encrypt,
char *lnm_session_key)
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index 1f54508..1d60c65 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -87,8 +87,9 @@
extern int decode_negTokenInit(unsigned char *security_blob, int length,
struct TCP_Server_Info *server);
extern int cifs_convert_address(struct sockaddr *dst, const char *src, int len);
+extern int cifs_set_port(struct sockaddr *addr, const unsigned short int port);
extern int cifs_fill_sockaddr(struct sockaddr *dst, const char *src, int len,
- unsigned short int port);
+ const unsigned short int port);
extern int map_smb_to_linux_error(struct smb_hdr *smb, int logErr);
extern void header_assemble(struct smb_hdr *, char /* command */ ,
const struct cifsTconInfo *, int /* length of
@@ -365,8 +366,6 @@
__u32 expected_sequence_number);
extern int cifs_calculate_mac_key(struct mac_key *key, const char *rn,
const char *pass);
-extern int CalcNTLMv2_partial_mac_key(struct cifsSesInfo *,
- const struct nls_table *);
extern void CalcNTLMv2_response(const struct cifsSesInfo *, char *);
extern void setup_ntlmv2_rsp(struct cifsSesInfo *, char *,
const struct nls_table *);
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index c65c341..7e83b35 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -232,7 +232,7 @@
small_smb_init(int smb_command, int wct, struct cifsTconInfo *tcon,
void **request_buf)
{
- int rc = 0;
+ int rc;
rc = cifs_reconnect_tcon(tcon, smb_command);
if (rc)
@@ -250,7 +250,7 @@
if (tcon != NULL)
cifs_stats_inc(&tcon->num_smbs_sent);
- return rc;
+ return 0;
}
int
@@ -281,16 +281,9 @@
/* If the return code is zero, this function must fill in request_buf pointer */
static int
-smb_init(int smb_command, int wct, struct cifsTconInfo *tcon,
- void **request_buf /* returned */ ,
- void **response_buf /* returned */ )
+__smb_init(int smb_command, int wct, struct cifsTconInfo *tcon,
+ void **request_buf, void **response_buf)
{
- int rc = 0;
-
- rc = cifs_reconnect_tcon(tcon, smb_command);
- if (rc)
- return rc;
-
*request_buf = cifs_buf_get();
if (*request_buf == NULL) {
/* BB should we add a retry in here if not a writepage? */
@@ -309,7 +302,31 @@
if (tcon != NULL)
cifs_stats_inc(&tcon->num_smbs_sent);
- return rc;
+ return 0;
+}
+
+/* If the return code is zero, this function must fill in request_buf pointer */
+static int
+smb_init(int smb_command, int wct, struct cifsTconInfo *tcon,
+ void **request_buf, void **response_buf)
+{
+ int rc;
+
+ rc = cifs_reconnect_tcon(tcon, smb_command);
+ if (rc)
+ return rc;
+
+ return __smb_init(smb_command, wct, tcon, request_buf, response_buf);
+}
+
+static int
+smb_init_no_reconnect(int smb_command, int wct, struct cifsTconInfo *tcon,
+ void **request_buf, void **response_buf)
+{
+ if (tcon->ses->need_reconnect || tcon->need_reconnect)
+ return -EHOSTDOWN;
+
+ return __smb_init(smb_command, wct, tcon, request_buf, response_buf);
}
static int validate_t2(struct smb_t2_rsp *pSMB)
@@ -4534,8 +4551,8 @@
cFYI(1, "In QFSUnixInfo");
QFSUnixRetry:
- rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
- (void **) &pSMBr);
+ rc = smb_init_no_reconnect(SMB_COM_TRANSACTION2, 15, tcon,
+ (void **) &pSMB, (void **) &pSMBr);
if (rc)
return rc;
@@ -4604,8 +4621,8 @@
cFYI(1, "In SETFSUnixInfo");
SETFSUnixRetry:
/* BB switch to small buf init to save memory */
- rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
- (void **) &pSMBr);
+ rc = smb_init_no_reconnect(SMB_COM_TRANSACTION2, 15, tcon,
+ (void **) &pSMB, (void **) &pSMBr);
if (rc)
return rc;
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 95c2ea6..88c84a3 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -400,7 +400,9 @@
cFYI(1, "call to reconnect done");
csocket = server->ssocket;
continue;
- } else if ((length == -ERESTARTSYS) || (length == -EAGAIN)) {
+ } else if (length == -ERESTARTSYS ||
+ length == -EAGAIN ||
+ length == -EINTR) {
msleep(1); /* minimum sleep to prevent looping
allowing socket to clear and app threads to set
tcpStatus CifsNeedReconnect if server hung */
@@ -414,18 +416,6 @@
} else
continue;
} else if (length <= 0) {
- if (server->tcpStatus == CifsNew) {
- cFYI(1, "tcp session abend after SMBnegprot");
- /* some servers kill the TCP session rather than
- returning an SMB negprot error, in which
- case reconnecting here is not going to help,
- and so simply return error to mount */
- break;
- }
- if (!try_to_freeze() && (length == -EINTR)) {
- cFYI(1, "cifsd thread killed");
- break;
- }
cFYI(1, "Reconnect after unexpected peek error %d",
length);
cifs_reconnect(server);
@@ -466,27 +456,19 @@
an error on SMB negprot response */
cFYI(1, "Negative RFC1002 Session Response Error 0x%x)",
pdu_length);
- if (server->tcpStatus == CifsNew) {
- /* if nack on negprot (rather than
- ret of smb negprot error) reconnecting
- not going to help, ret error to mount */
- break;
- } else {
- /* give server a second to
- clean up before reconnect attempt */
- msleep(1000);
- /* always try 445 first on reconnect
- since we get NACK on some if we ever
- connected to port 139 (the NACK is
- since we do not begin with RFC1001
- session initialize frame) */
- server->addr.sockAddr.sin_port =
- htons(CIFS_PORT);
- cifs_reconnect(server);
- csocket = server->ssocket;
- wake_up(&server->response_q);
- continue;
- }
+ /* give server a second to clean up */
+ msleep(1000);
+ /* always try 445 first on reconnect since we get NACK
+ * on some if we ever connected to port 139 (the NACK
+ * is since we do not begin with RFC1001 session
+ * initialize frame)
+ */
+ cifs_set_port((struct sockaddr *)
+ &server->addr.sockAddr, CIFS_PORT);
+ cifs_reconnect(server);
+ csocket = server->ssocket;
+ wake_up(&server->response_q);
+ continue;
} else if (temp != (char) 0) {
cERROR(1, "Unknown RFC 1002 frame");
cifs_dump_mem(" Received Data: ", (char *)smb_buffer,
@@ -522,8 +504,7 @@
total_read += length) {
length = kernel_recvmsg(csocket, &smb_msg, &iov, 1,
pdu_length - total_read, 0);
- if ((server->tcpStatus == CifsExiting) ||
- (length == -EINTR)) {
+ if (server->tcpStatus == CifsExiting) {
/* then will exit */
reconnect = 2;
break;
@@ -534,8 +515,9 @@
/* Now we will reread sock */
reconnect = 1;
break;
- } else if ((length == -ERESTARTSYS) ||
- (length == -EAGAIN)) {
+ } else if (length == -ERESTARTSYS ||
+ length == -EAGAIN ||
+ length == -EINTR) {
msleep(1); /* minimum sleep to prevent looping,
allowing socket to clear and app
threads to set tcpStatus
@@ -1673,7 +1655,9 @@
MAX_USERNAME_SIZE))
continue;
if (strlen(vol->username) != 0 &&
- strncmp(ses->password, vol->password,
+ ses->password != NULL &&
+ strncmp(ses->password,
+ vol->password ? vol->password : "",
MAX_PASSWORD_SIZE))
continue;
}
@@ -1722,9 +1706,6 @@
if (ses) {
cFYI(1, "Existing smb sess found (status=%d)", ses->status);
- /* existing SMB ses has a server reference already */
- cifs_put_tcp_session(server);
-
mutex_lock(&ses->session_mutex);
rc = cifs_negotiate_protocol(xid, ses);
if (rc) {
@@ -1747,6 +1728,9 @@
}
}
mutex_unlock(&ses->session_mutex);
+
+ /* existing SMB ses has a server reference already */
+ cifs_put_tcp_session(server);
FreeXid(xid);
return ses;
}
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index 578d88c..f9ed075 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -305,8 +305,7 @@
full_path = build_path_from_dentry(direntry);
if (full_path == NULL) {
rc = -ENOMEM;
- FreeXid(xid);
- return rc;
+ goto cifs_create_out;
}
if (oplockEnabled)
@@ -365,9 +364,8 @@
buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
if (buf == NULL) {
- kfree(full_path);
- FreeXid(xid);
- return -ENOMEM;
+ rc = -ENOMEM;
+ goto cifs_create_out;
}
/*
@@ -496,6 +494,11 @@
struct cifsTconInfo *pTcon;
char *full_path = NULL;
struct inode *newinode = NULL;
+ int oplock = 0;
+ u16 fileHandle;
+ FILE_ALL_INFO *buf = NULL;
+ unsigned int bytes_written;
+ struct win_dev *pdev;
if (!old_valid_dev(device_number))
return -EINVAL;
@@ -506,9 +509,12 @@
pTcon = cifs_sb->tcon;
full_path = build_path_from_dentry(direntry);
- if (full_path == NULL)
+ if (full_path == NULL) {
rc = -ENOMEM;
- else if (pTcon->unix_ext) {
+ goto mknod_out;
+ }
+
+ if (pTcon->unix_ext) {
struct cifs_unix_set_info_args args = {
.mode = mode & ~current_umask(),
.ctime = NO_CHANGE_64,
@@ -527,87 +533,78 @@
cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
+ if (rc)
+ goto mknod_out;
- if (!rc) {
- rc = cifs_get_inode_info_unix(&newinode, full_path,
+ rc = cifs_get_inode_info_unix(&newinode, full_path,
inode->i_sb, xid);
- if (pTcon->nocase)
- direntry->d_op = &cifs_ci_dentry_ops;
- else
- direntry->d_op = &cifs_dentry_ops;
- if (rc == 0)
- d_instantiate(direntry, newinode);
- }
- } else {
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL) {
- int oplock = 0;
- u16 fileHandle;
- FILE_ALL_INFO *buf;
+ if (pTcon->nocase)
+ direntry->d_op = &cifs_ci_dentry_ops;
+ else
+ direntry->d_op = &cifs_dentry_ops;
- cFYI(1, "sfu compat create special file");
-
- buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
- if (buf == NULL) {
- kfree(full_path);
- rc = -ENOMEM;
- FreeXid(xid);
- return rc;
- }
-
- rc = CIFSSMBOpen(xid, pTcon, full_path,
- FILE_CREATE, /* fail if exists */
- GENERIC_WRITE /* BB would
- WRITE_OWNER | WRITE_DAC be better? */,
- /* Create a file and set the
- file attribute to SYSTEM */
- CREATE_NOT_DIR | CREATE_OPTION_SPECIAL,
- &fileHandle, &oplock, buf,
- cifs_sb->local_nls,
- cifs_sb->mnt_cifs_flags &
- CIFS_MOUNT_MAP_SPECIAL_CHR);
-
- /* BB FIXME - add handling for backlevel servers
- which need legacy open and check for all
- calls to SMBOpen for fallback to SMBLeagcyOpen */
- if (!rc) {
- /* BB Do not bother to decode buf since no
- local inode yet to put timestamps in,
- but we can reuse it safely */
- unsigned int bytes_written;
- struct win_dev *pdev;
- pdev = (struct win_dev *)buf;
- if (S_ISCHR(mode)) {
- memcpy(pdev->type, "IntxCHR", 8);
- pdev->major =
- cpu_to_le64(MAJOR(device_number));
- pdev->minor =
- cpu_to_le64(MINOR(device_number));
- rc = CIFSSMBWrite(xid, pTcon,
- fileHandle,
- sizeof(struct win_dev),
- 0, &bytes_written, (char *)pdev,
- NULL, 0);
- } else if (S_ISBLK(mode)) {
- memcpy(pdev->type, "IntxBLK", 8);
- pdev->major =
- cpu_to_le64(MAJOR(device_number));
- pdev->minor =
- cpu_to_le64(MINOR(device_number));
- rc = CIFSSMBWrite(xid, pTcon,
- fileHandle,
- sizeof(struct win_dev),
- 0, &bytes_written, (char *)pdev,
- NULL, 0);
- } /* else if(S_ISFIFO */
- CIFSSMBClose(xid, pTcon, fileHandle);
- d_drop(direntry);
- }
- kfree(buf);
- /* add code here to set EAs */
- }
+ if (rc == 0)
+ d_instantiate(direntry, newinode);
+ goto mknod_out;
}
+ if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL))
+ goto mknod_out;
+
+
+ cFYI(1, "sfu compat create special file");
+
+ buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
+ if (buf == NULL) {
+ kfree(full_path);
+ rc = -ENOMEM;
+ FreeXid(xid);
+ return rc;
+ }
+
+ /* FIXME: would WRITE_OWNER | WRITE_DAC be better? */
+ rc = CIFSSMBOpen(xid, pTcon, full_path, FILE_CREATE,
+ GENERIC_WRITE, CREATE_NOT_DIR | CREATE_OPTION_SPECIAL,
+ &fileHandle, &oplock, buf, cifs_sb->local_nls,
+ cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
+ if (rc)
+ goto mknod_out;
+
+ /* BB Do not bother to decode buf since no local inode yet to put
+ * timestamps in, but we can reuse it safely */
+
+ pdev = (struct win_dev *)buf;
+ if (S_ISCHR(mode)) {
+ memcpy(pdev->type, "IntxCHR", 8);
+ pdev->major =
+ cpu_to_le64(MAJOR(device_number));
+ pdev->minor =
+ cpu_to_le64(MINOR(device_number));
+ rc = CIFSSMBWrite(xid, pTcon,
+ fileHandle,
+ sizeof(struct win_dev),
+ 0, &bytes_written, (char *)pdev,
+ NULL, 0);
+ } else if (S_ISBLK(mode)) {
+ memcpy(pdev->type, "IntxBLK", 8);
+ pdev->major =
+ cpu_to_le64(MAJOR(device_number));
+ pdev->minor =
+ cpu_to_le64(MINOR(device_number));
+ rc = CIFSSMBWrite(xid, pTcon,
+ fileHandle,
+ sizeof(struct win_dev),
+ 0, &bytes_written, (char *)pdev,
+ NULL, 0);
+ } /* else if (S_ISFIFO) */
+ CIFSSMBClose(xid, pTcon, fileHandle);
+ d_drop(direntry);
+
+ /* FIXME: add code here to set EAs */
+
+mknod_out:
kfree(full_path);
+ kfree(buf);
FreeXid(xid);
return rc;
}
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index db11fde..de748c6 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -242,8 +242,7 @@
full_path = build_path_from_dentry(file->f_path.dentry);
if (full_path == NULL) {
rc = -ENOMEM;
- FreeXid(xid);
- return rc;
+ goto out;
}
cFYI(1, "inode = 0x%p file flags are 0x%x for %s",
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 4bc47e5..53cce8c 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -801,6 +801,8 @@
inode->i_flags |= S_NOATIME | S_NOCMTIME;
if (inode->i_state & I_NEW) {
inode->i_ino = hash;
+ if (S_ISREG(inode->i_mode))
+ inode->i_data.backing_dev_info = sb->s_bdi;
#ifdef CONFIG_CIFS_FSCACHE
/* initialize per-inode cache cookie pointer */
CIFS_I(inode)->fscache = NULL;
@@ -834,7 +836,7 @@
xid, NULL);
if (!inode)
- return ERR_PTR(-ENOMEM);
+ return ERR_PTR(rc);
#ifdef CONFIG_CIFS_FSCACHE
/* populate tcon->resource_id */
@@ -1462,29 +1464,18 @@
{
char *fromName = NULL;
char *toName = NULL;
- struct cifs_sb_info *cifs_sb_source;
- struct cifs_sb_info *cifs_sb_target;
+ struct cifs_sb_info *cifs_sb;
struct cifsTconInfo *tcon;
FILE_UNIX_BASIC_INFO *info_buf_source = NULL;
FILE_UNIX_BASIC_INFO *info_buf_target;
int xid, rc, tmprc;
- cifs_sb_target = CIFS_SB(target_dir->i_sb);
- cifs_sb_source = CIFS_SB(source_dir->i_sb);
- tcon = cifs_sb_source->tcon;
+ cifs_sb = CIFS_SB(source_dir->i_sb);
+ tcon = cifs_sb->tcon;
xid = GetXid();
/*
- * BB: this might be allowed if same server, but different share.
- * Consider adding support for this
- */
- if (tcon != cifs_sb_target->tcon) {
- rc = -EXDEV;
- goto cifs_rename_exit;
- }
-
- /*
* we already have the rename sem so we do not need to
* grab it again here to protect the path integrity
*/
@@ -1519,17 +1510,16 @@
info_buf_target = info_buf_source + 1;
tmprc = CIFSSMBUnixQPathInfo(xid, tcon, fromName,
info_buf_source,
- cifs_sb_source->local_nls,
- cifs_sb_source->mnt_cifs_flags &
+ cifs_sb->local_nls,
+ cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
if (tmprc != 0)
goto unlink_target;
- tmprc = CIFSSMBUnixQPathInfo(xid, tcon,
- toName, info_buf_target,
- cifs_sb_target->local_nls,
- /* remap based on source sb */
- cifs_sb_source->mnt_cifs_flags &
+ tmprc = CIFSSMBUnixQPathInfo(xid, tcon, toName,
+ info_buf_target,
+ cifs_sb->local_nls,
+ cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
if (tmprc == 0 && (info_buf_source->UniqueId ==
diff --git a/fs/cifs/netmisc.c b/fs/cifs/netmisc.c
index f978511..9aad47a 100644
--- a/fs/cifs/netmisc.c
+++ b/fs/cifs/netmisc.c
@@ -206,24 +206,28 @@
}
int
+cifs_set_port(struct sockaddr *addr, const unsigned short int port)
+{
+ switch (addr->sa_family) {
+ case AF_INET:
+ ((struct sockaddr_in *)addr)->sin_port = htons(port);
+ break;
+ case AF_INET6:
+ ((struct sockaddr_in6 *)addr)->sin6_port = htons(port);
+ break;
+ default:
+ return 0;
+ }
+ return 1;
+}
+
+int
cifs_fill_sockaddr(struct sockaddr *dst, const char *src, int len,
const unsigned short int port)
{
if (!cifs_convert_address(dst, src, len))
return 0;
-
- switch (dst->sa_family) {
- case AF_INET:
- ((struct sockaddr_in *)dst)->sin_port = htons(port);
- break;
- case AF_INET6:
- ((struct sockaddr_in6 *)dst)->sin6_port = htons(port);
- break;
- default:
- return 0;
- }
-
- return 1;
+ return cifs_set_port(dst, port);
}
/*****************************************************************************
diff --git a/fs/coda/psdev.c b/fs/coda/psdev.c
index de89645..116af75 100644
--- a/fs/coda/psdev.c
+++ b/fs/coda/psdev.c
@@ -184,8 +184,8 @@
}
/* adjust outsize. is this useful ?? */
- req->uc_outSize = nbytes;
- req->uc_flags |= REQ_WRITE;
+ req->uc_outSize = nbytes;
+ req->uc_flags |= CODA_REQ_WRITE;
count = nbytes;
/* Convert filedescriptor into a file handle */
diff --git a/fs/compat.c b/fs/compat.c
index 718c706..0644a15 100644
--- a/fs/compat.c
+++ b/fs/compat.c
@@ -1153,7 +1153,7 @@
{
compat_ssize_t tot_len;
struct iovec iovstack[UIO_FASTIOV];
- struct iovec *iov;
+ struct iovec *iov = iovstack;
ssize_t ret;
io_fn_t fn;
iov_fn_t fnv;
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 51f270b..48d74c7 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -634,7 +634,7 @@
int ret = 0;
if (dio->bio) {
- loff_t cur_offset = dio->block_in_file << dio->blkbits;
+ loff_t cur_offset = dio->cur_page_fs_offset;
loff_t bio_next_offset = dio->logical_offset_in_bio +
dio->bio->bi_size;
@@ -659,7 +659,7 @@
* Submit now if the underlying fs is about to perform a
* metadata read
*/
- if (dio->boundary)
+ else if (dio->boundary)
dio_bio_submit(dio);
}
diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
index a2e3b56..cbadc1b 100644
--- a/fs/ecryptfs/crypto.c
+++ b/fs/ecryptfs/crypto.c
@@ -1793,7 +1793,7 @@
static struct list_head key_tfm_list;
struct mutex key_tfm_list_mutex;
-int ecryptfs_init_crypto(void)
+int __init ecryptfs_init_crypto(void)
{
mutex_init(&key_tfm_list_mutex);
INIT_LIST_HEAD(&key_tfm_list);
@@ -2169,7 +2169,6 @@
(ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE
+ encoded_name_no_prefix_size);
(*encoded_name)[(*encoded_name_size)] = '\0';
- (*encoded_name_size)++;
} else {
rc = -EOPNOTSUPP;
}
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
index 6c55113..3fbc942 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -349,7 +349,7 @@
/**
* ecryptfs_new_lower_dentry
- * @ename: The name of the new dentry.
+ * @name: The name of the new dentry.
* @lower_dir_dentry: Parent directory of the new dentry.
* @nd: nameidata from last lookup.
*
@@ -386,20 +386,19 @@
* ecryptfs_lookup_one_lower
* @ecryptfs_dentry: The eCryptfs dentry that we are looking up
* @lower_dir_dentry: lower parent directory
+ * @name: lower file name
*
* Get the lower dentry from vfs. If lower dentry does not exist yet,
* create it.
*/
static struct dentry *
ecryptfs_lookup_one_lower(struct dentry *ecryptfs_dentry,
- struct dentry *lower_dir_dentry)
+ struct dentry *lower_dir_dentry, struct qstr *name)
{
struct nameidata nd;
struct vfsmount *lower_mnt;
- struct qstr *name;
int err;
- name = &ecryptfs_dentry->d_name;
lower_mnt = mntget(ecryptfs_dentry_to_lower_mnt(
ecryptfs_dentry->d_parent));
err = vfs_path_lookup(lower_dir_dentry, lower_mnt, name->name , 0, &nd);
@@ -434,6 +433,7 @@
size_t encrypted_and_encoded_name_size;
struct ecryptfs_mount_crypt_stat *mount_crypt_stat = NULL;
struct dentry *lower_dir_dentry, *lower_dentry;
+ struct qstr lower_name;
int rc = 0;
ecryptfs_dentry->d_op = &ecryptfs_dops;
@@ -444,9 +444,17 @@
goto out_d_drop;
}
lower_dir_dentry = ecryptfs_dentry_to_lower(ecryptfs_dentry->d_parent);
-
+ lower_name.name = ecryptfs_dentry->d_name.name;
+ lower_name.len = ecryptfs_dentry->d_name.len;
+ lower_name.hash = ecryptfs_dentry->d_name.hash;
+ if (lower_dir_dentry->d_op && lower_dir_dentry->d_op->d_hash) {
+ rc = lower_dir_dentry->d_op->d_hash(lower_dir_dentry,
+ &lower_name);
+ if (rc < 0)
+ goto out_d_drop;
+ }
lower_dentry = ecryptfs_lookup_one_lower(ecryptfs_dentry,
- lower_dir_dentry);
+ lower_dir_dentry, &lower_name);
if (IS_ERR(lower_dentry)) {
rc = PTR_ERR(lower_dentry);
ecryptfs_printk(KERN_DEBUG, "%s: lookup_one_lower() returned "
@@ -471,8 +479,17 @@
"filename; rc = [%d]\n", __func__, rc);
goto out_d_drop;
}
+ lower_name.name = encrypted_and_encoded_name;
+ lower_name.len = encrypted_and_encoded_name_size;
+ lower_name.hash = full_name_hash(lower_name.name, lower_name.len);
+ if (lower_dir_dentry->d_op && lower_dir_dentry->d_op->d_hash) {
+ rc = lower_dir_dentry->d_op->d_hash(lower_dir_dentry,
+ &lower_name);
+ if (rc < 0)
+ goto out_d_drop;
+ }
lower_dentry = ecryptfs_lookup_one_lower(ecryptfs_dentry,
- lower_dir_dentry);
+ lower_dir_dentry, &lower_name);
if (IS_ERR(lower_dentry)) {
rc = PTR_ERR(lower_dentry);
ecryptfs_printk(KERN_DEBUG, "%s: lookup_one_lower() returned "
diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c
index 89c5476..73811cf 100644
--- a/fs/ecryptfs/keystore.c
+++ b/fs/ecryptfs/keystore.c
@@ -515,6 +515,7 @@
if (!s) {
printk(KERN_ERR "%s: Out of memory whilst trying to kmalloc "
"[%zd] bytes of kernel memory\n", __func__, sizeof(*s));
+ rc = -ENOMEM;
goto out;
}
s->desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
@@ -806,6 +807,7 @@
if (!s) {
printk(KERN_ERR "%s: Out of memory whilst trying to kmalloc "
"[%zd] bytes of kernel memory\n", __func__, sizeof(*s));
+ rc = -ENOMEM;
goto out;
}
s->desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
diff --git a/fs/ecryptfs/kthread.c b/fs/ecryptfs/kthread.c
index d8c3a37..0851ab6 100644
--- a/fs/ecryptfs/kthread.c
+++ b/fs/ecryptfs/kthread.c
@@ -86,7 +86,7 @@
return 0;
}
-int ecryptfs_init_kthread(void)
+int __init ecryptfs_init_kthread(void)
{
int rc = 0;
diff --git a/fs/ecryptfs/messaging.c b/fs/ecryptfs/messaging.c
index bcb68c0..ab22480 100644
--- a/fs/ecryptfs/messaging.c
+++ b/fs/ecryptfs/messaging.c
@@ -473,7 +473,7 @@
return rc;
}
-int ecryptfs_init_messaging(void)
+int __init ecryptfs_init_messaging(void)
{
int i;
int rc = 0;
diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
index 3745f61..00208c3 100644
--- a/fs/ecryptfs/miscdev.c
+++ b/fs/ecryptfs/miscdev.c
@@ -500,7 +500,7 @@
*
* Returns zero on success; non-zero otherwise
*/
-int ecryptfs_init_ecryptfs_miscdev(void)
+int __init ecryptfs_init_ecryptfs_miscdev(void)
{
int rc;
diff --git a/fs/exec.c b/fs/exec.c
index 2d94552..6d2b6f9 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -376,6 +376,9 @@
argv++;
if (i++ >= max)
return -E2BIG;
+
+ if (fatal_signal_pending(current))
+ return -ERESTARTNOHAND;
cond_resched();
}
}
@@ -419,6 +422,12 @@
while (len > 0) {
int offset, bytes_to_copy;
+ if (fatal_signal_pending(current)) {
+ ret = -ERESTARTNOHAND;
+ goto out;
+ }
+ cond_resched();
+
offset = pos % PAGE_SIZE;
if (offset == 0)
offset = PAGE_SIZE;
@@ -594,6 +603,11 @@
#else
stack_top = arch_align_stack(stack_top);
stack_top = PAGE_ALIGN(stack_top);
+
+ if (unlikely(stack_top < mmap_min_addr) ||
+ unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
+ return -ENOMEM;
+
stack_shift = vma->vm_end - stack_top;
bprm->p -= stack_shift;
@@ -2000,3 +2014,43 @@
fail:
return;
}
+
+/*
+ * Core dumping helper functions. These are the only things you should
+ * do on a core-file: use only these functions to write out all the
+ * necessary info.
+ */
+int dump_write(struct file *file, const void *addr, int nr)
+{
+ return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, addr, nr, &file->f_pos) == nr;
+}
+EXPORT_SYMBOL(dump_write);
+
+int dump_seek(struct file *file, loff_t off)
+{
+ int ret = 1;
+
+ if (file->f_op->llseek && file->f_op->llseek != no_llseek) {
+ if (file->f_op->llseek(file, off, SEEK_CUR) < 0)
+ return 0;
+ } else {
+ char *buf = (char *)get_zeroed_page(GFP_KERNEL);
+
+ if (!buf)
+ return 0;
+ while (off > 0) {
+ unsigned long n = off;
+
+ if (n > PAGE_SIZE)
+ n = PAGE_SIZE;
+ if (!dump_write(file, buf, n)) {
+ ret = 0;
+ break;
+ }
+ off -= n;
+ }
+ free_page((unsigned long)buf);
+ }
+ return ret;
+}
+EXPORT_SYMBOL(dump_seek);
diff --git a/fs/exofs/inode.c b/fs/exofs/inode.c
index eb7368e..3eadd97 100644
--- a/fs/exofs/inode.c
+++ b/fs/exofs/inode.c
@@ -54,6 +54,9 @@
unsigned nr_pages;
unsigned long length;
loff_t pg_first; /* keep 64bit also in 32-arches */
+ bool read_4_write; /* This means two things: that the read is sync
+ * And the pages should not be unlocked.
+ */
};
static void _pcol_init(struct page_collect *pcol, unsigned expected_pages,
@@ -71,6 +74,7 @@
pcol->nr_pages = 0;
pcol->length = 0;
pcol->pg_first = -1;
+ pcol->read_4_write = false;
}
static void _pcol_reset(struct page_collect *pcol)
@@ -347,7 +351,8 @@
if (PageError(page))
ClearPageError(page);
- unlock_page(page);
+ if (!pcol->read_4_write)
+ unlock_page(page);
EXOFS_DBGMSG("readpage_strip(0x%lx, 0x%lx) empty page,"
" splitting\n", inode->i_ino, page->index);
@@ -428,6 +433,7 @@
/* readpage_strip might call read_exec(,is_sync==false) at several
* places but not if we have a single page.
*/
+ pcol.read_4_write = is_sync;
ret = readpage_strip(&pcol, page);
if (ret) {
EXOFS_ERR("_readpage => %d\n", ret);
diff --git a/fs/fcntl.c b/fs/fcntl.c
index 6769fd0..f8cc34f 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -769,11 +769,15 @@
static int __init fcntl_init(void)
{
- /* please add new bits here to ensure allocation uniqueness */
- BUILD_BUG_ON(19 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
+ /*
+ * Please add new bits here to ensure allocation uniqueness.
+ * Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY
+ * is defined as O_NONBLOCK on some platforms and not on others.
+ */
+ BUILD_BUG_ON(18 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
O_RDONLY | O_WRONLY | O_RDWR |
O_CREAT | O_EXCL | O_NOCTTY |
- O_TRUNC | O_APPEND | O_NONBLOCK |
+ O_TRUNC | O_APPEND | /* O_NONBLOCK | */
__O_SYNC | O_DSYNC | FASYNC |
O_DIRECT | O_LARGEFILE | O_DIRECTORY |
O_NOFOLLOW | O_NOATIME | O_CLOEXEC |
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 7d9d06b..ab38fef 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -52,8 +52,6 @@
#define CREATE_TRACE_POINTS
#include <trace/events/writeback.h>
-#define inode_to_bdi(inode) ((inode)->i_mapping->backing_dev_info)
-
/*
* We don't actually have pdflush, but this one is exported though /proc...
*/
@@ -71,6 +69,16 @@
return test_bit(BDI_writeback_running, &bdi->state);
}
+static inline struct backing_dev_info *inode_to_bdi(struct inode *inode)
+{
+ struct super_block *sb = inode->i_sb;
+
+ if (strcmp(sb->s_type->name, "bdev") == 0)
+ return inode->i_mapping->backing_dev_info;
+
+ return sb->s_bdi;
+}
+
static void bdi_queue_work(struct backing_dev_info *bdi,
struct wb_writeback_work *work)
{
@@ -808,7 +816,7 @@
wb->last_active = jiffies;
set_current_state(TASK_INTERRUPTIBLE);
- if (!list_empty(&bdi->work_list)) {
+ if (!list_empty(&bdi->work_list) || kthread_should_stop()) {
__set_current_state(TASK_RUNNING);
continue;
}
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 69ad053..cde755c 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -276,7 +276,7 @@
* Called with fc->lock, unlocks it
*/
static void request_end(struct fuse_conn *fc, struct fuse_req *req)
-__releases(&fc->lock)
+__releases(fc->lock)
{
void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
req->end = NULL;
@@ -306,8 +306,8 @@
static void wait_answer_interruptible(struct fuse_conn *fc,
struct fuse_req *req)
-__releases(&fc->lock)
-__acquires(&fc->lock)
+__releases(fc->lock)
+__acquires(fc->lock)
{
if (signal_pending(current))
return;
@@ -325,8 +325,8 @@
}
static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
-__releases(&fc->lock)
-__acquires(&fc->lock)
+__releases(fc->lock)
+__acquires(fc->lock)
{
if (!fc->no_interrupt) {
/* Any signal may interrupt this */
@@ -905,8 +905,8 @@
/* Wait until a request is available on the pending list */
static void request_wait(struct fuse_conn *fc)
-__releases(&fc->lock)
-__acquires(&fc->lock)
+__releases(fc->lock)
+__acquires(fc->lock)
{
DECLARE_WAITQUEUE(wait, current);
@@ -934,7 +934,7 @@
*/
static int fuse_read_interrupt(struct fuse_conn *fc, struct fuse_copy_state *cs,
size_t nbytes, struct fuse_req *req)
-__releases(&fc->lock)
+__releases(fc->lock)
{
struct fuse_in_header ih;
struct fuse_interrupt_in arg;
@@ -1354,7 +1354,7 @@
loff_t file_size;
unsigned int num;
unsigned int offset;
- size_t total_len;
+ size_t total_len = 0;
req = fuse_get_req(fc);
if (IS_ERR(req))
@@ -1720,8 +1720,8 @@
* This function releases and reacquires fc->lock
*/
static void end_requests(struct fuse_conn *fc, struct list_head *head)
-__releases(&fc->lock)
-__acquires(&fc->lock)
+__releases(fc->lock)
+__acquires(fc->lock)
{
while (!list_empty(head)) {
struct fuse_req *req;
@@ -1744,8 +1744,8 @@
* locked).
*/
static void end_io_requests(struct fuse_conn *fc)
-__releases(&fc->lock)
-__acquires(&fc->lock)
+__releases(fc->lock)
+__acquires(fc->lock)
{
while (!list_empty(&fc->io)) {
struct fuse_req *req =
@@ -1769,6 +1769,16 @@
}
}
+static void end_queued_requests(struct fuse_conn *fc)
+__releases(fc->lock)
+__acquires(fc->lock)
+{
+ fc->max_background = UINT_MAX;
+ flush_bg_queue(fc);
+ end_requests(fc, &fc->pending);
+ end_requests(fc, &fc->processing);
+}
+
/*
* Abort all requests.
*
@@ -1795,8 +1805,7 @@
fc->connected = 0;
fc->blocked = 0;
end_io_requests(fc);
- end_requests(fc, &fc->pending);
- end_requests(fc, &fc->processing);
+ end_queued_requests(fc);
wake_up_all(&fc->waitq);
wake_up_all(&fc->blocked_waitq);
kill_fasync(&fc->fasync, SIGIO, POLL_IN);
@@ -1811,8 +1820,9 @@
if (fc) {
spin_lock(&fc->lock);
fc->connected = 0;
- end_requests(fc, &fc->pending);
- end_requests(fc, &fc->processing);
+ fc->blocked = 0;
+ end_queued_requests(fc);
+ wake_up_all(&fc->blocked_waitq);
spin_unlock(&fc->lock);
fuse_conn_put(fc);
}
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 147c1f7..c822458 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -1144,8 +1144,8 @@
/* Called under fc->lock, may release and reacquire it */
static void fuse_send_writepage(struct fuse_conn *fc, struct fuse_req *req)
-__releases(&fc->lock)
-__acquires(&fc->lock)
+__releases(fc->lock)
+__acquires(fc->lock)
{
struct fuse_inode *fi = get_fuse_inode(req->inode);
loff_t size = i_size_read(req->inode);
@@ -1183,8 +1183,8 @@
* Called with fc->lock
*/
void fuse_flush_writepages(struct inode *inode)
-__releases(&fc->lock)
-__acquires(&fc->lock)
+__releases(fc->lock)
+__acquires(fc->lock)
{
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_inode *fi = get_fuse_inode(inode);
diff --git a/fs/gfs2/Kconfig b/fs/gfs2/Kconfig
index cc96655..c465ae0 100644
--- a/fs/gfs2/Kconfig
+++ b/fs/gfs2/Kconfig
@@ -1,6 +1,6 @@
config GFS2_FS
tristate "GFS2 file system support"
- depends on EXPERIMENTAL && (64BIT || LBDAF)
+ depends on (64BIT || LBDAF)
select DLM if GFS2_FS_LOCKING_DLM
select CONFIGFS_FS if GFS2_FS_LOCKING_DLM
select SYSFS if GFS2_FS_LOCKING_DLM
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index 194fe16..6b24afb 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -36,8 +36,8 @@
#include "glops.h"
-static void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page,
- unsigned int from, unsigned int to)
+void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page,
+ unsigned int from, unsigned int to)
{
struct buffer_head *head = page_buffers(page);
unsigned int bsize = head->b_size;
@@ -615,7 +615,7 @@
unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
int alloc_required;
int error = 0;
- struct gfs2_alloc *al;
+ struct gfs2_alloc *al = NULL;
pgoff_t index = pos >> PAGE_CACHE_SHIFT;
unsigned from = pos & (PAGE_CACHE_SIZE - 1);
unsigned to = from + len;
@@ -663,6 +663,8 @@
rblocks += RES_STATFS + RES_QUOTA;
if (&ip->i_inode == sdp->sd_rindex)
rblocks += 2 * RES_STATFS;
+ if (alloc_required)
+ rblocks += gfs2_rg_blocks(al);
error = gfs2_trans_begin(sdp, rblocks,
PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize);
@@ -696,13 +698,11 @@
page_cache_release(page);
- /*
- * XXX(truncate): the call below should probably be replaced with
- * a call to the gfs2-specific truncate blocks helper to actually
- * release disk blocks..
- */
+ gfs2_trans_end(sdp);
if (pos + len > ip->i_inode.i_size)
- truncate_setsize(&ip->i_inode, ip->i_inode.i_size);
+ gfs2_trim_blocks(&ip->i_inode);
+ goto out_trans_fail;
+
out_endtrans:
gfs2_trans_end(sdp);
out_trans_fail:
@@ -802,10 +802,8 @@
page_cache_release(page);
if (copied) {
- if (inode->i_size < to) {
+ if (inode->i_size < to)
i_size_write(inode, to);
- ip->i_disksize = inode->i_size;
- }
gfs2_dinode_out(ip, di);
mark_inode_dirty(inode);
}
@@ -876,8 +874,6 @@
ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
if (ret > 0) {
- if (inode->i_size > ip->i_disksize)
- ip->i_disksize = inode->i_size;
gfs2_dinode_out(ip, dibh->b_data);
mark_inode_dirty(inode);
}
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index 6f48280..5476c06 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -50,7 +50,7 @@
* @ip: the inode
* @dibh: the dinode buffer
* @block: the block number that was allocated
- * @private: any locked page held by the caller process
+ * @page: The (optional) page. This is looked up if @page is NULL
*
* Returns: errno
*/
@@ -109,8 +109,7 @@
/**
* gfs2_unstuff_dinode - Unstuff a dinode when the data has grown too big
* @ip: The GFS2 inode to unstuff
- * @unstuffer: the routine that handles unstuffing a non-zero length file
- * @private: private data for the unstuffer
+ * @page: The (optional) page. This is looked up if the @page is NULL
*
* This routine unstuffs a dinode and returns it to a "normal" state such
* that the height can be grown in the traditional way.
@@ -132,7 +131,7 @@
if (error)
goto out;
- if (ip->i_disksize) {
+ if (i_size_read(&ip->i_inode)) {
/* Get a free block, fill it with the stuffed data,
and write it out to disk */
@@ -161,7 +160,7 @@
di = (struct gfs2_dinode *)dibh->b_data;
gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
- if (ip->i_disksize) {
+ if (i_size_read(&ip->i_inode)) {
*(__be64 *)(di + 1) = cpu_to_be64(block);
gfs2_add_inode_blocks(&ip->i_inode, 1);
di->di_blocks = cpu_to_be64(gfs2_get_inode_blocks(&ip->i_inode));
@@ -885,83 +884,14 @@
}
/**
- * do_grow - Make a file look bigger than it is
- * @ip: the inode
- * @size: the size to set the file to
- *
- * Called with an exclusive lock on @ip.
- *
- * Returns: errno
- */
-
-static int do_grow(struct gfs2_inode *ip, u64 size)
-{
- struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
- struct gfs2_alloc *al;
- struct buffer_head *dibh;
- int error;
-
- al = gfs2_alloc_get(ip);
- if (!al)
- return -ENOMEM;
-
- error = gfs2_quota_lock_check(ip);
- if (error)
- goto out;
-
- al->al_requested = sdp->sd_max_height + RES_DATA;
-
- error = gfs2_inplace_reserve(ip);
- if (error)
- goto out_gunlock_q;
-
- error = gfs2_trans_begin(sdp,
- sdp->sd_max_height + al->al_rgd->rd_length +
- RES_JDATA + RES_DINODE + RES_STATFS + RES_QUOTA, 0);
- if (error)
- goto out_ipres;
-
- error = gfs2_meta_inode_buffer(ip, &dibh);
- if (error)
- goto out_end_trans;
-
- if (size > sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode)) {
- if (gfs2_is_stuffed(ip)) {
- error = gfs2_unstuff_dinode(ip, NULL);
- if (error)
- goto out_brelse;
- }
- }
-
- ip->i_disksize = size;
- ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME;
- gfs2_trans_add_bh(ip->i_gl, dibh, 1);
- gfs2_dinode_out(ip, dibh->b_data);
-
-out_brelse:
- brelse(dibh);
-out_end_trans:
- gfs2_trans_end(sdp);
-out_ipres:
- gfs2_inplace_release(ip);
-out_gunlock_q:
- gfs2_quota_unlock(ip);
-out:
- gfs2_alloc_put(ip);
- return error;
-}
-
-
-/**
* gfs2_block_truncate_page - Deal with zeroing out data for truncate
*
* This is partly borrowed from ext3.
*/
-static int gfs2_block_truncate_page(struct address_space *mapping)
+static int gfs2_block_truncate_page(struct address_space *mapping, loff_t from)
{
struct inode *inode = mapping->host;
struct gfs2_inode *ip = GFS2_I(inode);
- loff_t from = inode->i_size;
unsigned long index = from >> PAGE_CACHE_SHIFT;
unsigned offset = from & (PAGE_CACHE_SIZE-1);
unsigned blocksize, iblock, length, pos;
@@ -1023,9 +953,11 @@
return err;
}
-static int trunc_start(struct gfs2_inode *ip, u64 size)
+static int trunc_start(struct inode *inode, u64 oldsize, u64 newsize)
{
- struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
+ struct gfs2_inode *ip = GFS2_I(inode);
+ struct gfs2_sbd *sdp = GFS2_SB(inode);
+ struct address_space *mapping = inode->i_mapping;
struct buffer_head *dibh;
int journaled = gfs2_is_jdata(ip);
int error;
@@ -1039,31 +971,26 @@
if (error)
goto out;
- if (gfs2_is_stuffed(ip)) {
- u64 dsize = size + sizeof(struct gfs2_dinode);
- ip->i_disksize = size;
- ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME;
- gfs2_trans_add_bh(ip->i_gl, dibh, 1);
- gfs2_dinode_out(ip, dibh->b_data);
- if (dsize > dibh->b_size)
- dsize = dibh->b_size;
- gfs2_buffer_clear_tail(dibh, dsize);
- error = 1;
- } else {
- if (size & (u64)(sdp->sd_sb.sb_bsize - 1))
- error = gfs2_block_truncate_page(ip->i_inode.i_mapping);
+ gfs2_trans_add_bh(ip->i_gl, dibh, 1);
- if (!error) {
- ip->i_disksize = size;
- ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME;
- ip->i_diskflags |= GFS2_DIF_TRUNC_IN_PROG;
- gfs2_trans_add_bh(ip->i_gl, dibh, 1);
- gfs2_dinode_out(ip, dibh->b_data);
+ if (gfs2_is_stuffed(ip)) {
+ gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode) + newsize);
+ } else {
+ if (newsize & (u64)(sdp->sd_sb.sb_bsize - 1)) {
+ error = gfs2_block_truncate_page(mapping, newsize);
+ if (error)
+ goto out_brelse;
}
+ ip->i_diskflags |= GFS2_DIF_TRUNC_IN_PROG;
}
- brelse(dibh);
+ i_size_write(inode, newsize);
+ ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME;
+ gfs2_dinode_out(ip, dibh->b_data);
+ truncate_pagecache(inode, oldsize, newsize);
+out_brelse:
+ brelse(dibh);
out:
gfs2_trans_end(sdp);
return error;
@@ -1123,7 +1050,7 @@
if (error)
goto out;
- if (!ip->i_disksize) {
+ if (!i_size_read(&ip->i_inode)) {
ip->i_height = 0;
ip->i_goal = ip->i_no_addr;
gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
@@ -1143,92 +1070,154 @@
/**
* do_shrink - make a file smaller
- * @ip: the inode
- * @size: the size to make the file
- * @truncator: function to truncate the last partial block
+ * @inode: the inode
+ * @oldsize: the current inode size
+ * @newsize: the size to make the file
*
- * Called with an exclusive lock on @ip.
+ * Called with an exclusive lock on @inode. The @size must
+ * be equal to or smaller than the current inode size.
*
* Returns: errno
*/
-static int do_shrink(struct gfs2_inode *ip, u64 size)
+static int do_shrink(struct inode *inode, u64 oldsize, u64 newsize)
{
+ struct gfs2_inode *ip = GFS2_I(inode);
int error;
- error = trunc_start(ip, size);
+ error = trunc_start(inode, oldsize, newsize);
if (error < 0)
return error;
- if (error > 0)
+ if (gfs2_is_stuffed(ip))
return 0;
- error = trunc_dealloc(ip, size);
- if (!error)
+ error = trunc_dealloc(ip, newsize);
+ if (error == 0)
error = trunc_end(ip);
return error;
}
-static int do_touch(struct gfs2_inode *ip, u64 size)
+void gfs2_trim_blocks(struct inode *inode)
{
- struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
+ u64 size = inode->i_size;
+ int ret;
+
+ ret = do_shrink(inode, size, size);
+ WARN_ON(ret != 0);
+}
+
+/**
+ * do_grow - Touch and update inode size
+ * @inode: The inode
+ * @size: The new size
+ *
+ * This function updates the timestamps on the inode and
+ * may also increase the size of the inode. This function
+ * must not be called with @size any smaller than the current
+ * inode size.
+ *
+ * Although it is not strictly required to unstuff files here,
+ * earlier versions of GFS2 have a bug in the stuffed file reading
+ * code which will result in a buffer overrun if the size is larger
+ * than the max stuffed file size. In order to prevent this from
+ * occuring, such files are unstuffed, but in other cases we can
+ * just update the inode size directly.
+ *
+ * Returns: 0 on success, or -ve on error
+ */
+
+static int do_grow(struct inode *inode, u64 size)
+{
+ struct gfs2_inode *ip = GFS2_I(inode);
+ struct gfs2_sbd *sdp = GFS2_SB(inode);
struct buffer_head *dibh;
+ struct gfs2_alloc *al = NULL;
int error;
- error = gfs2_trans_begin(sdp, RES_DINODE, 0);
- if (error)
- return error;
+ if (gfs2_is_stuffed(ip) &&
+ (size > (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode)))) {
+ al = gfs2_alloc_get(ip);
+ if (al == NULL)
+ return -ENOMEM;
- down_write(&ip->i_rw_mutex);
+ error = gfs2_quota_lock_check(ip);
+ if (error)
+ goto do_grow_alloc_put;
+
+ al->al_requested = 1;
+ error = gfs2_inplace_reserve(ip);
+ if (error)
+ goto do_grow_qunlock;
+ }
+
+ error = gfs2_trans_begin(sdp, RES_DINODE + RES_STATFS + RES_RG_BIT, 0);
+ if (error)
+ goto do_grow_release;
+
+ if (al) {
+ error = gfs2_unstuff_dinode(ip, NULL);
+ if (error)
+ goto do_end_trans;
+ }
error = gfs2_meta_inode_buffer(ip, &dibh);
if (error)
- goto do_touch_out;
+ goto do_end_trans;
+ i_size_write(inode, size);
ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME;
gfs2_trans_add_bh(ip->i_gl, dibh, 1);
gfs2_dinode_out(ip, dibh->b_data);
brelse(dibh);
-do_touch_out:
- up_write(&ip->i_rw_mutex);
+do_end_trans:
gfs2_trans_end(sdp);
+do_grow_release:
+ if (al) {
+ gfs2_inplace_release(ip);
+do_grow_qunlock:
+ gfs2_quota_unlock(ip);
+do_grow_alloc_put:
+ gfs2_alloc_put(ip);
+ }
return error;
}
/**
- * gfs2_truncatei - make a file a given size
- * @ip: the inode
- * @size: the size to make the file
- * @truncator: function to truncate the last partial block
+ * gfs2_setattr_size - make a file a given size
+ * @inode: the inode
+ * @newsize: the size to make the file
*
- * The file size can grow, shrink, or stay the same size.
+ * The file size can grow, shrink, or stay the same size. This
+ * is called holding i_mutex and an exclusive glock on the inode
+ * in question.
*
* Returns: errno
*/
-int gfs2_truncatei(struct gfs2_inode *ip, u64 size)
+int gfs2_setattr_size(struct inode *inode, u64 newsize)
{
- int error;
+ int ret;
+ u64 oldsize;
- if (gfs2_assert_warn(GFS2_SB(&ip->i_inode), S_ISREG(ip->i_inode.i_mode)))
- return -EINVAL;
+ BUG_ON(!S_ISREG(inode->i_mode));
- if (size > ip->i_disksize)
- error = do_grow(ip, size);
- else if (size < ip->i_disksize)
- error = do_shrink(ip, size);
- else
- /* update time stamps */
- error = do_touch(ip, size);
+ ret = inode_newsize_ok(inode, newsize);
+ if (ret)
+ return ret;
- return error;
+ oldsize = inode->i_size;
+ if (newsize >= oldsize)
+ return do_grow(inode, newsize);
+
+ return do_shrink(inode, oldsize, newsize);
}
int gfs2_truncatei_resume(struct gfs2_inode *ip)
{
int error;
- error = trunc_dealloc(ip, ip->i_disksize);
+ error = trunc_dealloc(ip, i_size_read(&ip->i_inode));
if (!error)
error = trunc_end(ip);
return error;
@@ -1269,7 +1258,7 @@
shift = sdp->sd_sb.sb_bsize_shift;
BUG_ON(gfs2_is_dir(ip));
- end_of_file = (ip->i_disksize + sdp->sd_sb.sb_bsize - 1) >> shift;
+ end_of_file = (i_size_read(&ip->i_inode) + sdp->sd_sb.sb_bsize - 1) >> shift;
lblock = offset >> shift;
lblock_stop = (offset + len + sdp->sd_sb.sb_bsize - 1) >> shift;
if (lblock_stop > end_of_file)
diff --git a/fs/gfs2/bmap.h b/fs/gfs2/bmap.h
index a20a521..42fea03 100644
--- a/fs/gfs2/bmap.h
+++ b/fs/gfs2/bmap.h
@@ -44,14 +44,16 @@
}
}
-int gfs2_unstuff_dinode(struct gfs2_inode *ip, struct page *page);
-int gfs2_block_map(struct inode *inode, sector_t lblock, struct buffer_head *bh, int create);
-int gfs2_extent_map(struct inode *inode, u64 lblock, int *new, u64 *dblock, unsigned *extlen);
-
-int gfs2_truncatei(struct gfs2_inode *ip, u64 size);
-int gfs2_truncatei_resume(struct gfs2_inode *ip);
-int gfs2_file_dealloc(struct gfs2_inode *ip);
-int gfs2_write_alloc_required(struct gfs2_inode *ip, u64 offset,
- unsigned int len);
+extern int gfs2_unstuff_dinode(struct gfs2_inode *ip, struct page *page);
+extern int gfs2_block_map(struct inode *inode, sector_t lblock,
+ struct buffer_head *bh, int create);
+extern int gfs2_extent_map(struct inode *inode, u64 lblock, int *new,
+ u64 *dblock, unsigned *extlen);
+extern int gfs2_setattr_size(struct inode *inode, u64 size);
+extern void gfs2_trim_blocks(struct inode *inode);
+extern int gfs2_truncatei_resume(struct gfs2_inode *ip);
+extern int gfs2_file_dealloc(struct gfs2_inode *ip);
+extern int gfs2_write_alloc_required(struct gfs2_inode *ip, u64 offset,
+ unsigned int len);
#endif /* __BMAP_DOT_H__ */
diff --git a/fs/gfs2/dentry.c b/fs/gfs2/dentry.c
index bb7907b..6798755 100644
--- a/fs/gfs2/dentry.c
+++ b/fs/gfs2/dentry.c
@@ -49,7 +49,7 @@
ip = GFS2_I(inode);
}
- if (sdp->sd_args.ar_localcaching)
+ if (sdp->sd_lockstruct.ls_ops->lm_mount == NULL)
goto valid;
had_lock = (gfs2_glock_is_locked_by_me(dip->i_gl) != NULL);
diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c
index b9dd88a..5c356d0 100644
--- a/fs/gfs2/dir.c
+++ b/fs/gfs2/dir.c
@@ -79,6 +79,9 @@
#define gfs2_disk_hash2offset(h) (((u64)(h)) >> 1)
#define gfs2_dir_offset2hash(p) ((u32)(((u64)(p)) << 1))
+struct qstr gfs2_qdot __read_mostly;
+struct qstr gfs2_qdotdot __read_mostly;
+
typedef int (*leaf_call_t) (struct gfs2_inode *dip, u32 index, u32 len,
u64 leaf_no, void *data);
typedef int (*gfs2_dscan_t)(const struct gfs2_dirent *dent,
@@ -127,8 +130,8 @@
gfs2_trans_add_bh(ip->i_gl, dibh, 1);
memcpy(dibh->b_data + offset + sizeof(struct gfs2_dinode), buf, size);
- if (ip->i_disksize < offset + size)
- ip->i_disksize = offset + size;
+ if (ip->i_inode.i_size < offset + size)
+ i_size_write(&ip->i_inode, offset + size);
ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME;
gfs2_dinode_out(ip, dibh->b_data);
@@ -225,8 +228,8 @@
if (error)
return error;
- if (ip->i_disksize < offset + copied)
- ip->i_disksize = offset + copied;
+ if (ip->i_inode.i_size < offset + copied)
+ i_size_write(&ip->i_inode, offset + copied);
ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME;
gfs2_trans_add_bh(ip->i_gl, dibh, 1);
@@ -275,12 +278,13 @@
unsigned int o;
int copied = 0;
int error = 0;
+ u64 disksize = i_size_read(&ip->i_inode);
- if (offset >= ip->i_disksize)
+ if (offset >= disksize)
return 0;
- if (offset + size > ip->i_disksize)
- size = ip->i_disksize - offset;
+ if (offset + size > disksize)
+ size = disksize - offset;
if (!size)
return 0;
@@ -727,7 +731,7 @@
unsigned hsize = 1 << ip->i_depth;
unsigned index;
u64 ln;
- if (hsize * sizeof(u64) != ip->i_disksize) {
+ if (hsize * sizeof(u64) != i_size_read(inode)) {
gfs2_consist_inode(ip);
return ERR_PTR(-EIO);
}
@@ -879,7 +883,7 @@
for (x = sdp->sd_hash_ptrs; x--; lp++)
*lp = cpu_to_be64(bn);
- dip->i_disksize = sdp->sd_sb.sb_bsize / 2;
+ i_size_write(inode, sdp->sd_sb.sb_bsize / 2);
gfs2_add_inode_blocks(&dip->i_inode, 1);
dip->i_diskflags |= GFS2_DIF_EXHASH;
@@ -1057,11 +1061,12 @@
u64 *buf;
u64 *from, *to;
u64 block;
+ u64 disksize = i_size_read(&dip->i_inode);
int x;
int error = 0;
hsize = 1 << dip->i_depth;
- if (hsize * sizeof(u64) != dip->i_disksize) {
+ if (hsize * sizeof(u64) != disksize) {
gfs2_consist_inode(dip);
return -EIO;
}
@@ -1072,7 +1077,7 @@
if (!buf)
return -ENOMEM;
- for (block = dip->i_disksize >> sdp->sd_hash_bsize_shift; block--;) {
+ for (block = disksize >> sdp->sd_hash_bsize_shift; block--;) {
error = gfs2_dir_read_data(dip, (char *)buf,
block * sdp->sd_hash_bsize,
sdp->sd_hash_bsize, 1);
@@ -1370,7 +1375,7 @@
unsigned depth = 0;
hsize = 1 << dip->i_depth;
- if (hsize * sizeof(u64) != dip->i_disksize) {
+ if (hsize * sizeof(u64) != i_size_read(inode)) {
gfs2_consist_inode(dip);
return -EIO;
}
@@ -1784,7 +1789,7 @@
int error = 0;
hsize = 1 << dip->i_depth;
- if (hsize * sizeof(u64) != dip->i_disksize) {
+ if (hsize * sizeof(u64) != i_size_read(&dip->i_inode)) {
gfs2_consist_inode(dip);
return -EIO;
}
diff --git a/fs/gfs2/dir.h b/fs/gfs2/dir.h
index 4f91944..a98f644 100644
--- a/fs/gfs2/dir.h
+++ b/fs/gfs2/dir.h
@@ -17,23 +17,24 @@
struct gfs2_inode;
struct gfs2_inum;
-struct inode *gfs2_dir_search(struct inode *dir, const struct qstr *filename);
-int gfs2_dir_check(struct inode *dir, const struct qstr *filename,
- const struct gfs2_inode *ip);
-int gfs2_dir_add(struct inode *inode, const struct qstr *filename,
- const struct gfs2_inode *ip, unsigned int type);
-int gfs2_dir_del(struct gfs2_inode *dip, const struct qstr *filename);
-int gfs2_dir_read(struct inode *inode, u64 *offset, void *opaque,
- filldir_t filldir);
-int gfs2_dir_mvino(struct gfs2_inode *dip, const struct qstr *filename,
- const struct gfs2_inode *nip, unsigned int new_type);
+extern struct inode *gfs2_dir_search(struct inode *dir,
+ const struct qstr *filename);
+extern int gfs2_dir_check(struct inode *dir, const struct qstr *filename,
+ const struct gfs2_inode *ip);
+extern int gfs2_dir_add(struct inode *inode, const struct qstr *filename,
+ const struct gfs2_inode *ip, unsigned int type);
+extern int gfs2_dir_del(struct gfs2_inode *dip, const struct qstr *filename);
+extern int gfs2_dir_read(struct inode *inode, u64 *offset, void *opaque,
+ filldir_t filldir);
+extern int gfs2_dir_mvino(struct gfs2_inode *dip, const struct qstr *filename,
+ const struct gfs2_inode *nip, unsigned int new_type);
-int gfs2_dir_exhash_dealloc(struct gfs2_inode *dip);
+extern int gfs2_dir_exhash_dealloc(struct gfs2_inode *dip);
-int gfs2_diradd_alloc_required(struct inode *dir,
- const struct qstr *filename);
-int gfs2_dir_get_new_buffer(struct gfs2_inode *ip, u64 block,
- struct buffer_head **bhp);
+extern int gfs2_diradd_alloc_required(struct inode *dir,
+ const struct qstr *filename);
+extern int gfs2_dir_get_new_buffer(struct gfs2_inode *ip, u64 block,
+ struct buffer_head **bhp);
static inline u32 gfs2_disk_hash(const char *data, int len)
{
@@ -61,4 +62,7 @@
memcpy(dent + 1, name->name, name->len);
}
+extern struct qstr gfs2_qdot;
+extern struct qstr gfs2_qdotdot;
+
#endif /* __DIR_DOT_H__ */
diff --git a/fs/gfs2/export.c b/fs/gfs2/export.c
index dfe237a..06d5827 100644
--- a/fs/gfs2/export.c
+++ b/fs/gfs2/export.c
@@ -126,16 +126,9 @@
static struct dentry *gfs2_get_parent(struct dentry *child)
{
- struct qstr dotdot;
struct dentry *dentry;
- /*
- * XXX(hch): it would be a good idea to keep this around as a
- * static variable.
- */
- gfs2_str2qstr(&dotdot, "..");
-
- dentry = d_obtain_alias(gfs2_lookupi(child->d_inode, &dotdot, 1));
+ dentry = d_obtain_alias(gfs2_lookupi(child->d_inode, &gfs2_qdotdot, 1));
if (!IS_ERR(dentry))
dentry->d_op = &gfs2_dops;
return dentry;
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index 4edd662..237ee6a 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -382,8 +382,10 @@
rblocks = RES_DINODE + ind_blocks;
if (gfs2_is_jdata(ip))
rblocks += data_blocks ? data_blocks : 1;
- if (ind_blocks || data_blocks)
+ if (ind_blocks || data_blocks) {
rblocks += RES_STATFS + RES_QUOTA;
+ rblocks += gfs2_rg_blocks(al);
+ }
ret = gfs2_trans_begin(sdp, rblocks, 0);
if (ret)
goto out_trans_fail;
@@ -491,7 +493,7 @@
goto fail;
if (!(file->f_flags & O_LARGEFILE) &&
- ip->i_disksize > MAX_NON_LFS) {
+ i_size_read(inode) > MAX_NON_LFS) {
error = -EOVERFLOW;
goto fail_gunlock;
}
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 9adf8f9..8777885 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -441,6 +441,8 @@
else
gfs2_glock_put_nolock(gl);
}
+ if (held1 && held2 && list_empty(&gl->gl_holders))
+ clear_bit(GLF_QUEUED, &gl->gl_flags);
gl->gl_state = new_state;
gl->gl_tchange = jiffies;
@@ -1012,6 +1014,7 @@
if (unlikely((gh->gh_flags & LM_FLAG_PRIORITY) && !insert_pt))
insert_pt = &gh2->gh_list;
}
+ set_bit(GLF_QUEUED, &gl->gl_flags);
if (likely(insert_pt == NULL)) {
list_add_tail(&gh->gh_list, &gl->gl_holders);
if (unlikely(gh->gh_flags & LM_FLAG_PRIORITY))
@@ -1310,10 +1313,12 @@
gfs2_glock_hold(gl);
holdtime = gl->gl_tchange + gl->gl_ops->go_min_hold_time;
- if (time_before(now, holdtime))
- delay = holdtime - now;
- if (test_bit(GLF_REPLY_PENDING, &gl->gl_flags))
- delay = gl->gl_ops->go_min_hold_time;
+ if (test_bit(GLF_QUEUED, &gl->gl_flags)) {
+ if (time_before(now, holdtime))
+ delay = holdtime - now;
+ if (test_bit(GLF_REPLY_PENDING, &gl->gl_flags))
+ delay = gl->gl_ops->go_min_hold_time;
+ }
spin_lock(&gl->gl_spin);
handle_callback(gl, state, delay);
@@ -1512,7 +1517,7 @@
spin_unlock(&lru_lock);
spin_lock(&gl->gl_spin);
- if (find_first_holder(gl) == NULL && gl->gl_state != LM_ST_UNLOCKED)
+ if (gl->gl_state != LM_ST_UNLOCKED)
handle_callback(gl, LM_ST_UNLOCKED, 0);
spin_unlock(&gl->gl_spin);
gfs2_glock_hold(gl);
@@ -1660,6 +1665,8 @@
*p++ = 'I';
if (test_bit(GLF_FROZEN, gflags))
*p++ = 'F';
+ if (test_bit(GLF_QUEUED, gflags))
+ *p++ = 'q';
*p = 0;
return buf;
}
@@ -1776,10 +1783,12 @@
}
#endif
- glock_workqueue = create_workqueue("glock_workqueue");
+ glock_workqueue = alloc_workqueue("glock_workqueue", WQ_RESCUER |
+ WQ_HIGHPRI | WQ_FREEZEABLE, 0);
if (IS_ERR(glock_workqueue))
return PTR_ERR(glock_workqueue);
- gfs2_delete_workqueue = create_workqueue("delete_workqueue");
+ gfs2_delete_workqueue = alloc_workqueue("delete_workqueue", WQ_RESCUER |
+ WQ_FREEZEABLE, 0);
if (IS_ERR(gfs2_delete_workqueue)) {
destroy_workqueue(glock_workqueue);
return PTR_ERR(gfs2_delete_workqueue);
diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h
index 2bda191..db1c26d 100644
--- a/fs/gfs2/glock.h
+++ b/fs/gfs2/glock.h
@@ -215,7 +215,7 @@
void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...);
/**
- * gfs2_glock_nq_init - intialize a holder and enqueue it on a glock
+ * gfs2_glock_nq_init - initialize a holder and enqueue it on a glock
* @gl: the glock
* @state: the state we're requesting
* @flags: the modifier flags
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index 49f97d3..0d149dc 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -262,13 +262,12 @@
const struct gfs2_inode *ip = gl->gl_object;
if (ip == NULL)
return 0;
- gfs2_print_dbg(seq, " I: n:%llu/%llu t:%u f:0x%02lx d:0x%08x s:%llu/%llu\n",
+ gfs2_print_dbg(seq, " I: n:%llu/%llu t:%u f:0x%02lx d:0x%08x s:%llu\n",
(unsigned long long)ip->i_no_formal_ino,
(unsigned long long)ip->i_no_addr,
IF2DT(ip->i_inode.i_mode), ip->i_flags,
(unsigned int)ip->i_diskflags,
- (unsigned long long)ip->i_inode.i_size,
- (unsigned long long)ip->i_disksize);
+ (unsigned long long)i_size_read(&ip->i_inode));
return 0;
}
@@ -453,7 +452,6 @@
[LM_TYPE_META] = &gfs2_meta_glops,
[LM_TYPE_INODE] = &gfs2_inode_glops,
[LM_TYPE_RGRP] = &gfs2_rgrp_glops,
- [LM_TYPE_NONDISK] = &gfs2_trans_glops,
[LM_TYPE_IOPEN] = &gfs2_iopen_glops,
[LM_TYPE_FLOCK] = &gfs2_flock_glops,
[LM_TYPE_NONDISK] = &gfs2_nondisk_glops,
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index fdbf4b3..764fbb4 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -196,6 +196,7 @@
GLF_REPLY_PENDING = 9,
GLF_INITIAL = 10,
GLF_FROZEN = 11,
+ GLF_QUEUED = 12,
};
struct gfs2_glock {
@@ -267,7 +268,6 @@
u64 i_no_formal_ino;
u64 i_generation;
u64 i_eattr;
- loff_t i_disksize;
unsigned long i_flags; /* GIF_... */
struct gfs2_glock *i_gl; /* Move into i_gh? */
struct gfs2_holder i_iopen_gh;
@@ -416,11 +416,8 @@
char ar_locktable[GFS2_LOCKNAME_LEN]; /* Name of the Lock Table */
char ar_hostdata[GFS2_LOCKNAME_LEN]; /* Host specific data */
unsigned int ar_spectator:1; /* Don't get a journal */
- unsigned int ar_ignore_local_fs:1; /* Ignore optimisations */
unsigned int ar_localflocks:1; /* Let the VFS do flock|fcntl */
- unsigned int ar_localcaching:1; /* Local caching */
unsigned int ar_debug:1; /* Oops on errors */
- unsigned int ar_upgrade:1; /* Upgrade ondisk format */
unsigned int ar_posix_acl:1; /* Enable posix acls */
unsigned int ar_quota:2; /* off/account/on */
unsigned int ar_suiddir:1; /* suiddir support */
@@ -497,7 +494,7 @@
*/
struct lm_lockstruct {
- unsigned int ls_jid;
+ int ls_jid;
unsigned int ls_first;
unsigned int ls_first_done;
unsigned int ls_nodir;
@@ -572,6 +569,7 @@
struct list_head sd_rindex_mru_list;
struct gfs2_rgrpd *sd_rindex_forward;
unsigned int sd_rgrps;
+ unsigned int sd_max_rg_data;
/* Journal index stuff */
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index 08140f1..06370f8 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -359,8 +359,7 @@
* to do that.
*/
ip->i_inode.i_nlink = be32_to_cpu(str->di_nlink);
- ip->i_disksize = be64_to_cpu(str->di_size);
- i_size_write(&ip->i_inode, ip->i_disksize);
+ i_size_write(&ip->i_inode, be64_to_cpu(str->di_size));
gfs2_set_inode_blocks(&ip->i_inode, be64_to_cpu(str->di_blocks));
atime.tv_sec = be64_to_cpu(str->di_atime);
atime.tv_nsec = be32_to_cpu(str->di_atime_nsec);
@@ -1055,7 +1054,7 @@
str->di_uid = cpu_to_be32(ip->i_inode.i_uid);
str->di_gid = cpu_to_be32(ip->i_inode.i_gid);
str->di_nlink = cpu_to_be32(ip->i_inode.i_nlink);
- str->di_size = cpu_to_be64(ip->i_disksize);
+ str->di_size = cpu_to_be64(i_size_read(&ip->i_inode));
str->di_blocks = cpu_to_be64(gfs2_get_inode_blocks(&ip->i_inode));
str->di_atime = cpu_to_be64(ip->i_inode.i_atime.tv_sec);
str->di_mtime = cpu_to_be64(ip->i_inode.i_mtime.tv_sec);
@@ -1085,8 +1084,8 @@
(unsigned long long)ip->i_no_formal_ino);
printk(KERN_INFO " no_addr = %llu\n",
(unsigned long long)ip->i_no_addr);
- printk(KERN_INFO " i_disksize = %llu\n",
- (unsigned long long)ip->i_disksize);
+ printk(KERN_INFO " i_size = %llu\n",
+ (unsigned long long)i_size_read(&ip->i_inode));
printk(KERN_INFO " blocks = %llu\n",
(unsigned long long)gfs2_get_inode_blocks(&ip->i_inode));
printk(KERN_INFO " i_goal = %llu\n",
diff --git a/fs/gfs2/inode.h b/fs/gfs2/inode.h
index 300ada3..6720d7d 100644
--- a/fs/gfs2/inode.h
+++ b/fs/gfs2/inode.h
@@ -19,6 +19,8 @@
extern int gfs2_internal_read(struct gfs2_inode *ip,
struct file_ra_state *ra_state,
char *buf, loff_t *pos, unsigned size);
+extern void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page,
+ unsigned int from, unsigned int to);
extern void gfs2_set_aops(struct inode *inode);
static inline int gfs2_is_stuffed(const struct gfs2_inode *ip)
@@ -80,6 +82,19 @@
dent->de_inum.no_addr = cpu_to_be64(ip->i_no_addr);
}
+static inline int gfs2_check_internal_file_size(struct inode *inode,
+ u64 minsize, u64 maxsize)
+{
+ u64 size = i_size_read(inode);
+ if (size < minsize || size > maxsize)
+ goto err;
+ if (size & ((1 << inode->i_blkbits) - 1))
+ goto err;
+ return 0;
+err:
+ gfs2_consist_inode(GFS2_I(inode));
+ return -EIO;
+}
extern void gfs2_set_iop(struct inode *inode);
extern struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned type,
diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c
index 0e0470e..1c09425 100644
--- a/fs/gfs2/lock_dlm.c
+++ b/fs/gfs2/lock_dlm.c
@@ -42,9 +42,9 @@
ret |= LM_OUT_CANCELED;
goto out;
case -EAGAIN: /* Try lock fails */
+ case -EDEADLK: /* Deadlock detected */
goto out;
- case -EINVAL: /* Invalid */
- case -ENOMEM: /* Out of memory */
+ case -ETIMEDOUT: /* Canceled due to timeout */
ret |= LM_OUT_ERROR;
goto out;
case 0: /* Success */
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
index cde1248..ac750bd 100644
--- a/fs/gfs2/log.c
+++ b/fs/gfs2/log.c
@@ -932,7 +932,7 @@
do {
prepare_to_wait(&sdp->sd_logd_waitq, &wait,
- TASK_UNINTERRUPTIBLE);
+ TASK_INTERRUPTIBLE);
if (!gfs2_ail_flush_reqd(sdp) &&
!gfs2_jrnl_flush_reqd(sdp) &&
!kthread_should_stop())
diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c
index b1e9630..d7eb1e2 100644
--- a/fs/gfs2/main.c
+++ b/fs/gfs2/main.c
@@ -24,6 +24,7 @@
#include "glock.h"
#include "quota.h"
#include "recovery.h"
+#include "dir.h"
static struct shrinker qd_shrinker = {
.shrink = gfs2_shrink_qd_memory,
@@ -78,6 +79,9 @@
{
int error;
+ gfs2_str2qstr(&gfs2_qdot, ".");
+ gfs2_str2qstr(&gfs2_qdotdot, "..");
+
error = gfs2_sys_init();
if (error)
return error;
@@ -140,7 +144,7 @@
error = -ENOMEM;
gfs_recovery_wq = alloc_workqueue("gfs_recovery",
- WQ_NON_REENTRANT | WQ_RESCUER, 0);
+ WQ_RESCUER | WQ_FREEZEABLE, 0);
if (!gfs_recovery_wq)
goto fail_wq;
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index 4d4b1e8..aeafc23 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -38,14 +38,6 @@
#define DO 0
#define UNDO 1
-static const u32 gfs2_old_fs_formats[] = {
- 0
-};
-
-static const u32 gfs2_old_multihost_formats[] = {
- 0
-};
-
/**
* gfs2_tune_init - Fill a gfs2_tune structure with default values
* @gt: tune
@@ -135,8 +127,6 @@
static int gfs2_check_sb(struct gfs2_sbd *sdp, struct gfs2_sb_host *sb, int silent)
{
- unsigned int x;
-
if (sb->sb_magic != GFS2_MAGIC ||
sb->sb_type != GFS2_METATYPE_SB) {
if (!silent)
@@ -150,55 +140,9 @@
sb->sb_multihost_format == GFS2_FORMAT_MULTI)
return 0;
- if (sb->sb_fs_format != GFS2_FORMAT_FS) {
- for (x = 0; gfs2_old_fs_formats[x]; x++)
- if (gfs2_old_fs_formats[x] == sb->sb_fs_format)
- break;
+ fs_warn(sdp, "Unknown on-disk format, unable to mount\n");
- if (!gfs2_old_fs_formats[x]) {
- printk(KERN_WARNING
- "GFS2: code version (%u, %u) is incompatible "
- "with ondisk format (%u, %u)\n",
- GFS2_FORMAT_FS, GFS2_FORMAT_MULTI,
- sb->sb_fs_format, sb->sb_multihost_format);
- printk(KERN_WARNING
- "GFS2: I don't know how to upgrade this FS\n");
- return -EINVAL;
- }
- }
-
- if (sb->sb_multihost_format != GFS2_FORMAT_MULTI) {
- for (x = 0; gfs2_old_multihost_formats[x]; x++)
- if (gfs2_old_multihost_formats[x] ==
- sb->sb_multihost_format)
- break;
-
- if (!gfs2_old_multihost_formats[x]) {
- printk(KERN_WARNING
- "GFS2: code version (%u, %u) is incompatible "
- "with ondisk format (%u, %u)\n",
- GFS2_FORMAT_FS, GFS2_FORMAT_MULTI,
- sb->sb_fs_format, sb->sb_multihost_format);
- printk(KERN_WARNING
- "GFS2: I don't know how to upgrade this FS\n");
- return -EINVAL;
- }
- }
-
- if (!sdp->sd_args.ar_upgrade) {
- printk(KERN_WARNING
- "GFS2: code version (%u, %u) is incompatible "
- "with ondisk format (%u, %u)\n",
- GFS2_FORMAT_FS, GFS2_FORMAT_MULTI,
- sb->sb_fs_format, sb->sb_multihost_format);
- printk(KERN_INFO
- "GFS2: Use the \"upgrade\" mount option to upgrade "
- "the FS\n");
- printk(KERN_INFO "GFS2: See the manual for more details\n");
- return -EINVAL;
- }
-
- return 0;
+ return -EINVAL;
}
static void end_bio_io_page(struct bio *bio, int error)
@@ -586,7 +530,7 @@
prev_db = 0;
- for (lb = 0; lb < ip->i_disksize >> sdp->sd_sb.sb_bsize_shift; lb++) {
+ for (lb = 0; lb < i_size_read(jd->jd_inode) >> sdp->sd_sb.sb_bsize_shift; lb++) {
bh.b_state = 0;
bh.b_blocknr = 0;
bh.b_size = 1 << ip->i_inode.i_blkbits;
@@ -1022,7 +966,6 @@
if (!strcmp("lock_nolock", proto)) {
lm = &nolock_ops;
sdp->sd_args.ar_localflocks = 1;
- sdp->sd_args.ar_localcaching = 1;
#ifdef CONFIG_GFS2_FS_LOCKING_DLM
} else if (!strcmp("lock_dlm", proto)) {
lm = &gfs2_dlm_ops;
@@ -1113,8 +1056,6 @@
static int wait_on_journal(struct gfs2_sbd *sdp)
{
- if (sdp->sd_args.ar_spectator)
- return 0;
if (sdp->sd_lockstruct.ls_ops->lm_mount == NULL)
return 0;
@@ -1217,6 +1158,20 @@
if (error)
goto fail_sb;
+ /*
+ * If user space has failed to join the cluster or some similar
+ * failure has occurred, then the journal id will contain a
+ * negative (error) number. This will then be returned to the
+ * caller (of the mount syscall). We do this even for spectator
+ * mounts (which just write a jid of 0 to indicate "ok" even though
+ * the jid is unused in the spectator case)
+ */
+ if (sdp->sd_lockstruct.ls_jid < 0) {
+ error = sdp->sd_lockstruct.ls_jid;
+ sdp->sd_lockstruct.ls_jid = 0;
+ goto fail_sb;
+ }
+
error = init_inodes(sdp, DO);
if (error)
goto fail_sb;
diff --git a/fs/gfs2/ops_inode.c b/fs/gfs2/ops_inode.c
index 1009be2..0534510 100644
--- a/fs/gfs2/ops_inode.c
+++ b/fs/gfs2/ops_inode.c
@@ -18,6 +18,8 @@
#include <linux/gfs2_ondisk.h>
#include <linux/crc32.h>
#include <linux/fiemap.h>
+#include <linux/swap.h>
+#include <linux/falloc.h>
#include <asm/uaccess.h>
#include "gfs2.h"
@@ -217,7 +219,7 @@
goto out_gunlock_q;
error = gfs2_trans_begin(sdp, sdp->sd_max_dirres +
- al->al_rgd->rd_length +
+ gfs2_rg_blocks(al) +
2 * RES_DINODE + RES_STATFS +
RES_QUOTA, 0);
if (error)
@@ -406,7 +408,6 @@
ip = ghs[1].gh_gl->gl_object;
- ip->i_disksize = size;
i_size_write(inode, size);
error = gfs2_meta_inode_buffer(ip, &dibh);
@@ -461,7 +462,7 @@
ip = ghs[1].gh_gl->gl_object;
ip->i_inode.i_nlink = 2;
- ip->i_disksize = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode);
+ i_size_write(inode, sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode));
ip->i_diskflags |= GFS2_DIF_JDATA;
ip->i_entries = 2;
@@ -470,18 +471,15 @@
if (!gfs2_assert_withdraw(sdp, !error)) {
struct gfs2_dinode *di = (struct gfs2_dinode *)dibh->b_data;
struct gfs2_dirent *dent = (struct gfs2_dirent *)(di+1);
- struct qstr str;
- gfs2_str2qstr(&str, ".");
gfs2_trans_add_bh(ip->i_gl, dibh, 1);
- gfs2_qstr2dirent(&str, GFS2_DIRENT_SIZE(str.len), dent);
+ gfs2_qstr2dirent(&gfs2_qdot, GFS2_DIRENT_SIZE(gfs2_qdot.len), dent);
dent->de_inum = di->di_num; /* already GFS2 endian */
dent->de_type = cpu_to_be16(DT_DIR);
di->di_entries = cpu_to_be32(1);
- gfs2_str2qstr(&str, "..");
dent = (struct gfs2_dirent *)((char*)dent + GFS2_DIRENT_SIZE(1));
- gfs2_qstr2dirent(&str, dibh->b_size - GFS2_DIRENT_SIZE(1) - sizeof(struct gfs2_dinode), dent);
+ gfs2_qstr2dirent(&gfs2_qdotdot, dibh->b_size - GFS2_DIRENT_SIZE(1) - sizeof(struct gfs2_dinode), dent);
gfs2_inum_out(dip, dent);
dent->de_type = cpu_to_be16(DT_DIR);
@@ -522,7 +520,6 @@
static int gfs2_rmdiri(struct gfs2_inode *dip, const struct qstr *name,
struct gfs2_inode *ip)
{
- struct qstr dotname;
int error;
if (ip->i_entries != 2) {
@@ -539,13 +536,11 @@
if (error)
return error;
- gfs2_str2qstr(&dotname, ".");
- error = gfs2_dir_del(ip, &dotname);
+ error = gfs2_dir_del(ip, &gfs2_qdot);
if (error)
return error;
- gfs2_str2qstr(&dotname, "..");
- error = gfs2_dir_del(ip, &dotname);
+ error = gfs2_dir_del(ip, &gfs2_qdotdot);
if (error)
return error;
@@ -694,11 +689,8 @@
struct inode *dir = &to->i_inode;
struct super_block *sb = dir->i_sb;
struct inode *tmp;
- struct qstr dotdot;
int error = 0;
- gfs2_str2qstr(&dotdot, "..");
-
igrab(dir);
for (;;) {
@@ -711,7 +703,7 @@
break;
}
- tmp = gfs2_lookupi(dir, &dotdot, 1);
+ tmp = gfs2_lookupi(dir, &gfs2_qdotdot, 1);
if (IS_ERR(tmp)) {
error = PTR_ERR(tmp);
break;
@@ -744,7 +736,7 @@
struct gfs2_inode *ip = GFS2_I(odentry->d_inode);
struct gfs2_inode *nip = NULL;
struct gfs2_sbd *sdp = GFS2_SB(odir);
- struct gfs2_holder ghs[5], r_gh = { .gh_gl = NULL, };
+ struct gfs2_holder ghs[5], r_gh = { .gh_gl = NULL, }, ri_gh;
struct gfs2_rgrpd *nrgd;
unsigned int num_gh;
int dir_rename = 0;
@@ -758,6 +750,9 @@
return 0;
}
+ error = gfs2_rindex_hold(sdp, &ri_gh);
+ if (error)
+ return error;
if (odip != ndip) {
error = gfs2_glock_nq_init(sdp->sd_rename_gl, LM_ST_EXCLUSIVE,
@@ -887,12 +882,12 @@
al->al_requested = sdp->sd_max_dirres;
- error = gfs2_inplace_reserve(ndip);
+ error = gfs2_inplace_reserve_ri(ndip);
if (error)
goto out_gunlock_q;
error = gfs2_trans_begin(sdp, sdp->sd_max_dirres +
- al->al_rgd->rd_length +
+ gfs2_rg_blocks(al) +
4 * RES_DINODE + 4 * RES_LEAF +
RES_STATFS + RES_QUOTA + 4, 0);
if (error)
@@ -920,9 +915,6 @@
}
if (dir_rename) {
- struct qstr name;
- gfs2_str2qstr(&name, "..");
-
error = gfs2_change_nlink(ndip, +1);
if (error)
goto out_end_trans;
@@ -930,7 +922,7 @@
if (error)
goto out_end_trans;
- error = gfs2_dir_mvino(ip, &name, ndip, DT_DIR);
+ error = gfs2_dir_mvino(ip, &gfs2_qdotdot, ndip, DT_DIR);
if (error)
goto out_end_trans;
} else {
@@ -972,6 +964,7 @@
if (r_gh.gh_gl)
gfs2_glock_dq_uninit(&r_gh);
out:
+ gfs2_glock_dq_uninit(&ri_gh);
return error;
}
@@ -990,7 +983,7 @@
struct gfs2_inode *ip = GFS2_I(dentry->d_inode);
struct gfs2_holder i_gh;
struct buffer_head *dibh;
- unsigned int x;
+ unsigned int x, size;
char *buf;
int error;
@@ -1002,7 +995,8 @@
return NULL;
}
- if (!ip->i_disksize) {
+ size = (unsigned int)i_size_read(&ip->i_inode);
+ if (size == 0) {
gfs2_consist_inode(ip);
buf = ERR_PTR(-EIO);
goto out;
@@ -1014,7 +1008,7 @@
goto out;
}
- x = ip->i_disksize + 1;
+ x = size + 1;
buf = kmalloc(x, GFP_NOFS);
if (!buf)
buf = ERR_PTR(-ENOMEM);
@@ -1071,30 +1065,6 @@
return error;
}
-/*
- * XXX(truncate): the truncate_setsize calls should be moved to the end.
- */
-static int setattr_size(struct inode *inode, struct iattr *attr)
-{
- struct gfs2_inode *ip = GFS2_I(inode);
- struct gfs2_sbd *sdp = GFS2_SB(inode);
- int error;
-
- if (attr->ia_size != ip->i_disksize) {
- error = gfs2_trans_begin(sdp, 0, sdp->sd_jdesc->jd_blocks);
- if (error)
- return error;
- truncate_setsize(inode, attr->ia_size);
- gfs2_trans_end(sdp);
- }
-
- error = gfs2_truncatei(ip, attr->ia_size);
- if (error && (inode->i_size != ip->i_disksize))
- i_size_write(inode, ip->i_disksize);
-
- return error;
-}
-
static int setattr_chown(struct inode *inode, struct iattr *attr)
{
struct gfs2_inode *ip = GFS2_I(inode);
@@ -1195,7 +1165,7 @@
goto out;
if (attr->ia_valid & ATTR_SIZE)
- error = setattr_size(inode, attr);
+ error = gfs2_setattr_size(inode, attr->ia_size);
else if (attr->ia_valid & (ATTR_UID | ATTR_GID))
error = setattr_chown(inode, attr);
else if ((attr->ia_valid & ATTR_MODE) && IS_POSIXACL(inode))
@@ -1301,6 +1271,257 @@
return ret;
}
+static void empty_write_end(struct page *page, unsigned from,
+ unsigned to)
+{
+ struct gfs2_inode *ip = GFS2_I(page->mapping->host);
+
+ page_zero_new_buffers(page, from, to);
+ flush_dcache_page(page);
+ mark_page_accessed(page);
+
+ if (!gfs2_is_writeback(ip))
+ gfs2_page_add_databufs(ip, page, from, to);
+
+ block_commit_write(page, from, to);
+}
+
+
+static int write_empty_blocks(struct page *page, unsigned from, unsigned to)
+{
+ unsigned start, end, next;
+ struct buffer_head *bh, *head;
+ int error;
+
+ if (!page_has_buffers(page)) {
+ error = block_prepare_write(page, from, to, gfs2_block_map);
+ if (unlikely(error))
+ return error;
+
+ empty_write_end(page, from, to);
+ return 0;
+ }
+
+ bh = head = page_buffers(page);
+ next = end = 0;
+ while (next < from) {
+ next += bh->b_size;
+ bh = bh->b_this_page;
+ }
+ start = next;
+ do {
+ next += bh->b_size;
+ if (buffer_mapped(bh)) {
+ if (end) {
+ error = block_prepare_write(page, start, end,
+ gfs2_block_map);
+ if (unlikely(error))
+ return error;
+ empty_write_end(page, start, end);
+ end = 0;
+ }
+ start = next;
+ }
+ else
+ end = next;
+ bh = bh->b_this_page;
+ } while (next < to);
+
+ if (end) {
+ error = block_prepare_write(page, start, end, gfs2_block_map);
+ if (unlikely(error))
+ return error;
+ empty_write_end(page, start, end);
+ }
+
+ return 0;
+}
+
+static int fallocate_chunk(struct inode *inode, loff_t offset, loff_t len,
+ int mode)
+{
+ struct gfs2_inode *ip = GFS2_I(inode);
+ struct buffer_head *dibh;
+ int error;
+ u64 start = offset >> PAGE_CACHE_SHIFT;
+ unsigned int start_offset = offset & ~PAGE_CACHE_MASK;
+ u64 end = (offset + len - 1) >> PAGE_CACHE_SHIFT;
+ pgoff_t curr;
+ struct page *page;
+ unsigned int end_offset = (offset + len) & ~PAGE_CACHE_MASK;
+ unsigned int from, to;
+
+ if (!end_offset)
+ end_offset = PAGE_CACHE_SIZE;
+
+ error = gfs2_meta_inode_buffer(ip, &dibh);
+ if (unlikely(error))
+ goto out;
+
+ gfs2_trans_add_bh(ip->i_gl, dibh, 1);
+
+ if (gfs2_is_stuffed(ip)) {
+ error = gfs2_unstuff_dinode(ip, NULL);
+ if (unlikely(error))
+ goto out;
+ }
+
+ curr = start;
+ offset = start << PAGE_CACHE_SHIFT;
+ from = start_offset;
+ to = PAGE_CACHE_SIZE;
+ while (curr <= end) {
+ page = grab_cache_page_write_begin(inode->i_mapping, curr,
+ AOP_FLAG_NOFS);
+ if (unlikely(!page)) {
+ error = -ENOMEM;
+ goto out;
+ }
+
+ if (curr == end)
+ to = end_offset;
+ error = write_empty_blocks(page, from, to);
+ if (!error && offset + to > inode->i_size &&
+ !(mode & FALLOC_FL_KEEP_SIZE)) {
+ i_size_write(inode, offset + to);
+ }
+ unlock_page(page);
+ page_cache_release(page);
+ if (error)
+ goto out;
+ curr++;
+ offset += PAGE_CACHE_SIZE;
+ from = 0;
+ }
+
+ gfs2_dinode_out(ip, dibh->b_data);
+ mark_inode_dirty(inode);
+
+ brelse(dibh);
+
+out:
+ return error;
+}
+
+static void calc_max_reserv(struct gfs2_inode *ip, loff_t max, loff_t *len,
+ unsigned int *data_blocks, unsigned int *ind_blocks)
+{
+ const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
+ unsigned int max_blocks = ip->i_alloc->al_rgd->rd_free_clone;
+ unsigned int tmp, max_data = max_blocks - 3 * (sdp->sd_max_height - 1);
+
+ for (tmp = max_data; tmp > sdp->sd_diptrs;) {
+ tmp = DIV_ROUND_UP(tmp, sdp->sd_inptrs);
+ max_data -= tmp;
+ }
+ /* This calculation isn't the exact reverse of gfs2_write_calc_reserve,
+ so it might end up with fewer data blocks */
+ if (max_data <= *data_blocks)
+ return;
+ *data_blocks = max_data;
+ *ind_blocks = max_blocks - max_data;
+ *len = ((loff_t)max_data - 3) << sdp->sd_sb.sb_bsize_shift;
+ if (*len > max) {
+ *len = max;
+ gfs2_write_calc_reserv(ip, max, data_blocks, ind_blocks);
+ }
+}
+
+static long gfs2_fallocate(struct inode *inode, int mode, loff_t offset,
+ loff_t len)
+{
+ struct gfs2_sbd *sdp = GFS2_SB(inode);
+ struct gfs2_inode *ip = GFS2_I(inode);
+ unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
+ loff_t bytes, max_bytes;
+ struct gfs2_alloc *al;
+ int error;
+ loff_t next = (offset + len - 1) >> sdp->sd_sb.sb_bsize_shift;
+ next = (next + 1) << sdp->sd_sb.sb_bsize_shift;
+
+ offset = (offset >> sdp->sd_sb.sb_bsize_shift) <<
+ sdp->sd_sb.sb_bsize_shift;
+
+ len = next - offset;
+ bytes = sdp->sd_max_rg_data * sdp->sd_sb.sb_bsize / 2;
+ if (!bytes)
+ bytes = UINT_MAX;
+
+ gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &ip->i_gh);
+ error = gfs2_glock_nq(&ip->i_gh);
+ if (unlikely(error))
+ goto out_uninit;
+
+ if (!gfs2_write_alloc_required(ip, offset, len))
+ goto out_unlock;
+
+ while (len > 0) {
+ if (len < bytes)
+ bytes = len;
+ al = gfs2_alloc_get(ip);
+ if (!al) {
+ error = -ENOMEM;
+ goto out_unlock;
+ }
+
+ error = gfs2_quota_lock_check(ip);
+ if (error)
+ goto out_alloc_put;
+
+retry:
+ gfs2_write_calc_reserv(ip, bytes, &data_blocks, &ind_blocks);
+
+ al->al_requested = data_blocks + ind_blocks;
+ error = gfs2_inplace_reserve(ip);
+ if (error) {
+ if (error == -ENOSPC && bytes > sdp->sd_sb.sb_bsize) {
+ bytes >>= 1;
+ goto retry;
+ }
+ goto out_qunlock;
+ }
+ max_bytes = bytes;
+ calc_max_reserv(ip, len, &max_bytes, &data_blocks, &ind_blocks);
+ al->al_requested = data_blocks + ind_blocks;
+
+ rblocks = RES_DINODE + ind_blocks + RES_STATFS + RES_QUOTA +
+ RES_RG_HDR + gfs2_rg_blocks(al);
+ if (gfs2_is_jdata(ip))
+ rblocks += data_blocks ? data_blocks : 1;
+
+ error = gfs2_trans_begin(sdp, rblocks,
+ PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize);
+ if (error)
+ goto out_trans_fail;
+
+ error = fallocate_chunk(inode, offset, max_bytes, mode);
+ gfs2_trans_end(sdp);
+
+ if (error)
+ goto out_trans_fail;
+
+ len -= max_bytes;
+ offset += max_bytes;
+ gfs2_inplace_release(ip);
+ gfs2_quota_unlock(ip);
+ gfs2_alloc_put(ip);
+ }
+ goto out_unlock;
+
+out_trans_fail:
+ gfs2_inplace_release(ip);
+out_qunlock:
+ gfs2_quota_unlock(ip);
+out_alloc_put:
+ gfs2_alloc_put(ip);
+out_unlock:
+ gfs2_glock_dq(&ip->i_gh);
+out_uninit:
+ gfs2_holder_uninit(&ip->i_gh);
+ return error;
+}
+
+
static int gfs2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
u64 start, u64 len)
{
@@ -1351,6 +1572,7 @@
.getxattr = gfs2_getxattr,
.listxattr = gfs2_listxattr,
.removexattr = gfs2_removexattr,
+ .fallocate = gfs2_fallocate,
.fiemap = gfs2_fiemap,
};
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index 1bc6b56..58a9b99 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -735,10 +735,8 @@
goto out;
size = loc + sizeof(struct gfs2_quota);
- if (size > inode->i_size) {
- ip->i_disksize = size;
+ if (size > inode->i_size)
i_size_write(inode, size);
- }
inode->i_mtime = inode->i_atime = CURRENT_TIME;
gfs2_trans_add_bh(ip->i_gl, dibh, 1);
gfs2_dinode_out(ip, dibh->b_data);
@@ -817,7 +815,7 @@
goto out_alloc;
if (nalloc)
- blocks += al->al_rgd->rd_length + nalloc * ind_blocks + RES_STATFS;
+ blocks += gfs2_rg_blocks(al) + nalloc * ind_blocks + RES_STATFS;
error = gfs2_trans_begin(sdp, blocks, 0);
if (error)
@@ -1190,18 +1188,17 @@
int gfs2_quota_init(struct gfs2_sbd *sdp)
{
struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
- unsigned int blocks = ip->i_disksize >> sdp->sd_sb.sb_bsize_shift;
+ u64 size = i_size_read(sdp->sd_qc_inode);
+ unsigned int blocks = size >> sdp->sd_sb.sb_bsize_shift;
unsigned int x, slot = 0;
unsigned int found = 0;
u64 dblock;
u32 extlen = 0;
int error;
- if (!ip->i_disksize || ip->i_disksize > (64 << 20) ||
- ip->i_disksize & (sdp->sd_sb.sb_bsize - 1)) {
- gfs2_consist_inode(ip);
+ if (gfs2_check_internal_file_size(sdp->sd_qc_inode, 1, 64 << 20))
return -EIO;
- }
+
sdp->sd_quota_slots = blocks * sdp->sd_qc_per_block;
sdp->sd_quota_chunks = DIV_ROUND_UP(sdp->sd_quota_slots, 8 * PAGE_SIZE);
@@ -1589,6 +1586,7 @@
error = gfs2_inplace_reserve(ip);
if (error)
goto out_alloc;
+ blocks += gfs2_rg_blocks(al);
}
error = gfs2_trans_begin(sdp, blocks + RES_DINODE + 1, 0);
diff --git a/fs/gfs2/recovery.c b/fs/gfs2/recovery.c
index f7f89a9..f2a02ed 100644
--- a/fs/gfs2/recovery.c
+++ b/fs/gfs2/recovery.c
@@ -455,11 +455,13 @@
int ro = 0;
unsigned int pass;
int error;
+ int jlocked = 0;
- if (jd->jd_jid != sdp->sd_lockstruct.ls_jid) {
+ if (sdp->sd_args.ar_spectator ||
+ (jd->jd_jid != sdp->sd_lockstruct.ls_jid)) {
fs_info(sdp, "jid=%u: Trying to acquire journal lock...\n",
jd->jd_jid);
-
+ jlocked = 1;
/* Acquire the journal lock so we can do recovery */
error = gfs2_glock_nq_num(sdp, jd->jd_jid, &gfs2_journal_glops,
@@ -554,13 +556,12 @@
jd->jd_jid, t);
}
- if (jd->jd_jid != sdp->sd_lockstruct.ls_jid)
- gfs2_glock_dq_uninit(&ji_gh);
-
gfs2_recovery_done(sdp, jd->jd_jid, LM_RD_SUCCESS);
- if (jd->jd_jid != sdp->sd_lockstruct.ls_jid)
+ if (jlocked) {
+ gfs2_glock_dq_uninit(&ji_gh);
gfs2_glock_dq_uninit(&j_gh);
+ }
fs_info(sdp, "jid=%u: Done\n", jd->jd_jid);
goto done;
@@ -568,7 +569,7 @@
fail_gunlock_tr:
gfs2_glock_dq_uninit(&t_gh);
fail_gunlock_ji:
- if (jd->jd_jid != sdp->sd_lockstruct.ls_jid) {
+ if (jlocked) {
gfs2_glock_dq_uninit(&ji_gh);
fail_gunlock_j:
gfs2_glock_dq_uninit(&j_gh);
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index 171a744..fb67f59 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -500,7 +500,7 @@
for (rgrps = 0;; rgrps++) {
loff_t pos = rgrps * sizeof(struct gfs2_rindex);
- if (pos + sizeof(struct gfs2_rindex) >= ip->i_disksize)
+ if (pos + sizeof(struct gfs2_rindex) >= i_size_read(inode))
break;
error = gfs2_internal_read(ip, &ra_state, buf, &pos,
sizeof(struct gfs2_rindex));
@@ -588,7 +588,9 @@
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
struct inode *inode = &ip->i_inode;
struct file_ra_state ra_state;
- u64 rgrp_count = ip->i_disksize;
+ u64 rgrp_count = i_size_read(inode);
+ struct gfs2_rgrpd *rgd;
+ unsigned int max_data = 0;
int error;
do_div(rgrp_count, sizeof(struct gfs2_rindex));
@@ -603,6 +605,10 @@
}
}
+ list_for_each_entry(rgd, &sdp->sd_rindex_list, rd_list)
+ if (rgd->rd_data > max_data)
+ max_data = rgd->rd_data;
+ sdp->sd_max_rg_data = max_data;
sdp->sd_rindex_uptodate = 1;
return 0;
}
@@ -622,13 +628,15 @@
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
struct inode *inode = &ip->i_inode;
struct file_ra_state ra_state;
+ struct gfs2_rgrpd *rgd;
+ unsigned int max_data = 0;
int error;
file_ra_state_init(&ra_state, inode->i_mapping);
for (sdp->sd_rgrps = 0;; sdp->sd_rgrps++) {
/* Ignore partials */
if ((sdp->sd_rgrps + 1) * sizeof(struct gfs2_rindex) >
- ip->i_disksize)
+ i_size_read(inode))
break;
error = read_rindex_entry(ip, &ra_state);
if (error) {
@@ -636,6 +644,10 @@
return error;
}
}
+ list_for_each_entry(rgd, &sdp->sd_rindex_list, rd_list)
+ if (rgd->rd_data > max_data)
+ max_data = rgd->rd_data;
+ sdp->sd_max_rg_data = max_data;
sdp->sd_rindex_uptodate = 1;
return 0;
@@ -1188,7 +1200,8 @@
* Returns: errno
*/
-int gfs2_inplace_reserve_i(struct gfs2_inode *ip, char *file, unsigned int line)
+int gfs2_inplace_reserve_i(struct gfs2_inode *ip, int hold_rindex,
+ char *file, unsigned int line)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
struct gfs2_alloc *al = ip->i_alloc;
@@ -1199,12 +1212,15 @@
return -EINVAL;
try_again:
- /* We need to hold the rindex unless the inode we're using is
- the rindex itself, in which case it's already held. */
- if (ip != GFS2_I(sdp->sd_rindex))
- error = gfs2_rindex_hold(sdp, &al->al_ri_gh);
- else if (!sdp->sd_rgrps) /* We may not have the rindex read in, so: */
- error = gfs2_ri_update_special(ip);
+ if (hold_rindex) {
+ /* We need to hold the rindex unless the inode we're using is
+ the rindex itself, in which case it's already held. */
+ if (ip != GFS2_I(sdp->sd_rindex))
+ error = gfs2_rindex_hold(sdp, &al->al_ri_gh);
+ else if (!sdp->sd_rgrps) /* We may not have the rindex read
+ in, so: */
+ error = gfs2_ri_update_special(ip);
+ }
if (error)
return error;
@@ -1215,7 +1231,7 @@
try to free it, and try the allocation again. */
error = get_local_rgrp(ip, &unlinked, &last_unlinked);
if (error) {
- if (ip != GFS2_I(sdp->sd_rindex))
+ if (hold_rindex && ip != GFS2_I(sdp->sd_rindex))
gfs2_glock_dq_uninit(&al->al_ri_gh);
if (error != -EAGAIN)
return error;
@@ -1257,7 +1273,7 @@
al->al_rgd = NULL;
if (al->al_rgd_gh.gh_gl)
gfs2_glock_dq_uninit(&al->al_rgd_gh);
- if (ip != GFS2_I(sdp->sd_rindex))
+ if (ip != GFS2_I(sdp->sd_rindex) && al->al_ri_gh.gh_gl)
gfs2_glock_dq_uninit(&al->al_ri_gh);
}
@@ -1496,11 +1512,19 @@
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
struct buffer_head *dibh;
struct gfs2_alloc *al = ip->i_alloc;
- struct gfs2_rgrpd *rgd = al->al_rgd;
+ struct gfs2_rgrpd *rgd;
u32 goal, blk;
u64 block;
int error;
+ /* Only happens if there is a bug in gfs2, return something distinctive
+ * to ensure that it is noticed.
+ */
+ if (al == NULL)
+ return -ECANCELED;
+
+ rgd = al->al_rgd;
+
if (rgrp_contains_block(rgd, ip->i_goal))
goal = ip->i_goal - rgd->rd_data0;
else
diff --git a/fs/gfs2/rgrp.h b/fs/gfs2/rgrp.h
index f07119d..0e35c04 100644
--- a/fs/gfs2/rgrp.h
+++ b/fs/gfs2/rgrp.h
@@ -39,10 +39,12 @@
ip->i_alloc = NULL;
}
-extern int gfs2_inplace_reserve_i(struct gfs2_inode *ip, char *file,
- unsigned int line);
+extern int gfs2_inplace_reserve_i(struct gfs2_inode *ip, int hold_rindex,
+ char *file, unsigned int line);
#define gfs2_inplace_reserve(ip) \
-gfs2_inplace_reserve_i((ip), __FILE__, __LINE__)
+ gfs2_inplace_reserve_i((ip), 1, __FILE__, __LINE__)
+#define gfs2_inplace_reserve_ri(ip) \
+ gfs2_inplace_reserve_i((ip), 0, __FILE__, __LINE__)
extern void gfs2_inplace_release(struct gfs2_inode *ip);
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index 77cb9f8..047d117 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -85,6 +85,7 @@
{Opt_locktable, "locktable=%s"},
{Opt_hostdata, "hostdata=%s"},
{Opt_spectator, "spectator"},
+ {Opt_spectator, "norecovery"},
{Opt_ignore_local_fs, "ignore_local_fs"},
{Opt_localflocks, "localflocks"},
{Opt_localcaching, "localcaching"},
@@ -159,13 +160,13 @@
args->ar_spectator = 1;
break;
case Opt_ignore_local_fs:
- args->ar_ignore_local_fs = 1;
+ /* Retained for backwards compat only */
break;
case Opt_localflocks:
args->ar_localflocks = 1;
break;
case Opt_localcaching:
- args->ar_localcaching = 1;
+ /* Retained for backwards compat only */
break;
case Opt_debug:
if (args->ar_errors == GFS2_ERRORS_PANIC) {
@@ -179,7 +180,7 @@
args->ar_debug = 0;
break;
case Opt_upgrade:
- args->ar_upgrade = 1;
+ /* Retained for backwards compat only */
break;
case Opt_acl:
args->ar_posix_acl = 1;
@@ -342,15 +343,14 @@
{
struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
+ u64 size = i_size_read(jd->jd_inode);
- if (ip->i_disksize < (8 << 20) || ip->i_disksize > (1 << 30) ||
- (ip->i_disksize & (sdp->sd_sb.sb_bsize - 1))) {
- gfs2_consist_inode(ip);
+ if (gfs2_check_internal_file_size(jd->jd_inode, 8 << 20, 1 << 30))
return -EIO;
- }
- jd->jd_blocks = ip->i_disksize >> sdp->sd_sb.sb_bsize_shift;
- if (gfs2_write_alloc_required(ip, 0, ip->i_disksize)) {
+ jd->jd_blocks = size >> sdp->sd_sb.sb_bsize_shift;
+
+ if (gfs2_write_alloc_required(ip, 0, size)) {
gfs2_consist_inode(ip);
return -EIO;
}
@@ -1129,9 +1129,7 @@
/* Some flags must not be changed */
if (args_neq(&args, &sdp->sd_args, spectator) ||
- args_neq(&args, &sdp->sd_args, ignore_local_fs) ||
args_neq(&args, &sdp->sd_args, localflocks) ||
- args_neq(&args, &sdp->sd_args, localcaching) ||
args_neq(&args, &sdp->sd_args, meta))
return -EINVAL;
@@ -1234,16 +1232,10 @@
seq_printf(s, ",hostdata=%s", args->ar_hostdata);
if (args->ar_spectator)
seq_printf(s, ",spectator");
- if (args->ar_ignore_local_fs)
- seq_printf(s, ",ignore_local_fs");
if (args->ar_localflocks)
seq_printf(s, ",localflocks");
- if (args->ar_localcaching)
- seq_printf(s, ",localcaching");
if (args->ar_debug)
seq_printf(s, ",debug");
- if (args->ar_upgrade)
- seq_printf(s, ",upgrade");
if (args->ar_posix_acl)
seq_printf(s, ",acl");
if (args->ar_quota != GFS2_QUOTA_DEFAULT) {
diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c
index ccacffd..748ccb5 100644
--- a/fs/gfs2/sys.c
+++ b/fs/gfs2/sys.c
@@ -230,7 +230,10 @@
if (gltype > LM_TYPE_JOURNAL)
return -EINVAL;
- glops = gfs2_glops_list[gltype];
+ if (gltype == LM_TYPE_NONDISK && glnum == GFS2_TRANS_LOCK)
+ glops = &gfs2_trans_glops;
+ else
+ glops = gfs2_glops_list[gltype];
if (glops == NULL)
return -EINVAL;
if (!test_and_set_bit(SDF_DEMOTE, &sdp->sd_flags))
@@ -399,31 +402,32 @@
static ssize_t jid_show(struct gfs2_sbd *sdp, char *buf)
{
- return sprintf(buf, "%u\n", sdp->sd_lockstruct.ls_jid);
+ return sprintf(buf, "%d\n", sdp->sd_lockstruct.ls_jid);
}
static ssize_t jid_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
{
- unsigned jid;
+ int jid;
int rv;
- rv = sscanf(buf, "%u", &jid);
+ rv = sscanf(buf, "%d", &jid);
if (rv != 1)
return -EINVAL;
spin_lock(&sdp->sd_jindex_spin);
rv = -EINVAL;
- if (sdp->sd_args.ar_spectator)
- goto out;
if (sdp->sd_lockstruct.ls_ops->lm_mount == NULL)
goto out;
rv = -EBUSY;
- if (test_and_clear_bit(SDF_NOJOURNALID, &sdp->sd_flags) == 0)
+ if (test_bit(SDF_NOJOURNALID, &sdp->sd_flags) == 0)
goto out;
+ rv = 0;
+ if (sdp->sd_args.ar_spectator && jid > 0)
+ rv = jid = -EINVAL;
sdp->sd_lockstruct.ls_jid = jid;
+ clear_bit(SDF_NOJOURNALID, &sdp->sd_flags);
smp_mb__after_clear_bit();
wake_up_bit(&sdp->sd_flags, SDF_NOJOURNALID);
- rv = 0;
out:
spin_unlock(&sdp->sd_jindex_spin);
return rv ? rv : len;
@@ -617,7 +621,7 @@
add_uevent_var(env, "LOCKTABLE=%s", sdp->sd_table_name);
add_uevent_var(env, "LOCKPROTO=%s", sdp->sd_proto_name);
if (!test_bit(SDF_NOJOURNALID, &sdp->sd_flags))
- add_uevent_var(env, "JOURNALID=%u", sdp->sd_lockstruct.ls_jid);
+ add_uevent_var(env, "JOURNALID=%d", sdp->sd_lockstruct.ls_jid);
if (gfs2_uuid_valid(uuid))
add_uevent_var(env, "UUID=%pUB", uuid);
return 0;
diff --git a/fs/gfs2/trace_gfs2.h b/fs/gfs2/trace_gfs2.h
index 148d55c..cedb0bb 100644
--- a/fs/gfs2/trace_gfs2.h
+++ b/fs/gfs2/trace_gfs2.h
@@ -39,7 +39,8 @@
{(1UL << GLF_INVALIDATE_IN_PROGRESS), "i" }, \
{(1UL << GLF_REPLY_PENDING), "r" }, \
{(1UL << GLF_INITIAL), "I" }, \
- {(1UL << GLF_FROZEN), "F" })
+ {(1UL << GLF_FROZEN), "F" }, \
+ {(1UL << GLF_QUEUED), "q" })
#ifndef NUMPTY
#define NUMPTY
diff --git a/fs/gfs2/trans.h b/fs/gfs2/trans.h
index edf9d4b..fb56b78 100644
--- a/fs/gfs2/trans.h
+++ b/fs/gfs2/trans.h
@@ -20,11 +20,20 @@
#define RES_JDATA 1
#define RES_DATA 1
#define RES_LEAF 1
+#define RES_RG_HDR 1
#define RES_RG_BIT 2
#define RES_EATTR 1
#define RES_STATFS 1
#define RES_QUOTA 2
+/* reserve either the number of blocks to be allocated plus the rg header
+ * block, or all of the blocks in the rg, whichever is smaller */
+static inline unsigned int gfs2_rg_blocks(const struct gfs2_alloc *al)
+{
+ return (al->al_requested < al->al_rgd->rd_length)?
+ al->al_requested + 1 : al->al_rgd->rd_length;
+}
+
int gfs2_trans_begin(struct gfs2_sbd *sdp, unsigned int blocks,
unsigned int revokes);
diff --git a/fs/gfs2/xattr.c b/fs/gfs2/xattr.c
index 776af6e..30b58f0 100644
--- a/fs/gfs2/xattr.c
+++ b/fs/gfs2/xattr.c
@@ -734,7 +734,7 @@
goto out_gunlock_q;
error = gfs2_trans_begin(GFS2_SB(&ip->i_inode),
- blks + al->al_rgd->rd_length +
+ blks + gfs2_rg_blocks(al) +
RES_DINODE + RES_STATFS + RES_QUOTA, 0);
if (error)
goto out_ipres;
diff --git a/fs/hfs/bfind.c b/fs/hfs/bfind.c
index 4129cdb..571abe9 100644
--- a/fs/hfs/bfind.c
+++ b/fs/hfs/bfind.c
@@ -23,7 +23,7 @@
fd->search_key = ptr;
fd->key = ptr + tree->max_key_len + 2;
dprint(DBG_BNODE_REFS, "find_init: %d (%p)\n", tree->cnid, __builtin_return_address(0));
- down(&tree->tree_lock);
+ mutex_lock(&tree->tree_lock);
return 0;
}
@@ -32,7 +32,7 @@
hfs_bnode_put(fd->bnode);
kfree(fd->search_key);
dprint(DBG_BNODE_REFS, "find_exit: %d (%p)\n", fd->tree->cnid, __builtin_return_address(0));
- up(&fd->tree->tree_lock);
+ mutex_unlock(&fd->tree->tree_lock);
fd->tree = NULL;
}
diff --git a/fs/hfs/btree.c b/fs/hfs/btree.c
index 38a0a99..3ebc437 100644
--- a/fs/hfs/btree.c
+++ b/fs/hfs/btree.c
@@ -27,7 +27,7 @@
if (!tree)
return NULL;
- init_MUTEX(&tree->tree_lock);
+ mutex_init(&tree->tree_lock);
spin_lock_init(&tree->hash_lock);
/* Set the correct compare function */
tree->sb = sb;
diff --git a/fs/hfs/btree.h b/fs/hfs/btree.h
index cc51905..2a1d712 100644
--- a/fs/hfs/btree.h
+++ b/fs/hfs/btree.h
@@ -33,7 +33,7 @@
unsigned int depth;
//unsigned int map1_size, map_size;
- struct semaphore tree_lock;
+ struct mutex tree_lock;
unsigned int pages_per_bnode;
spinlock_t hash_lock;
diff --git a/fs/hfsplus/bfind.c b/fs/hfsplus/bfind.c
index 5007a41..d182438 100644
--- a/fs/hfsplus/bfind.c
+++ b/fs/hfsplus/bfind.c
@@ -23,7 +23,7 @@
fd->search_key = ptr;
fd->key = ptr + tree->max_key_len + 2;
dprint(DBG_BNODE_REFS, "find_init: %d (%p)\n", tree->cnid, __builtin_return_address(0));
- down(&tree->tree_lock);
+ mutex_lock(&tree->tree_lock);
return 0;
}
@@ -32,7 +32,7 @@
hfs_bnode_put(fd->bnode);
kfree(fd->search_key);
dprint(DBG_BNODE_REFS, "find_exit: %d (%p)\n", fd->tree->cnid, __builtin_return_address(0));
- up(&fd->tree->tree_lock);
+ mutex_unlock(&fd->tree->tree_lock);
fd->tree = NULL;
}
@@ -52,6 +52,10 @@
rec = (e + b) / 2;
len = hfs_brec_lenoff(bnode, rec, &off);
keylen = hfs_brec_keylen(bnode, rec);
+ if (keylen == 0) {
+ res = -EINVAL;
+ goto fail;
+ }
hfs_bnode_read(bnode, fd->key, off, keylen);
cmpval = bnode->tree->keycmp(fd->key, fd->search_key);
if (!cmpval) {
@@ -67,6 +71,10 @@
if (rec != e && e >= 0) {
len = hfs_brec_lenoff(bnode, e, &off);
keylen = hfs_brec_keylen(bnode, e);
+ if (keylen == 0) {
+ res = -EINVAL;
+ goto fail;
+ }
hfs_bnode_read(bnode, fd->key, off, keylen);
}
done:
@@ -75,6 +83,7 @@
fd->keylength = keylen;
fd->entryoffset = off + keylen;
fd->entrylength = len - keylen;
+fail:
return res;
}
@@ -198,6 +207,10 @@
len = hfs_brec_lenoff(bnode, fd->record, &off);
keylen = hfs_brec_keylen(bnode, fd->record);
+ if (keylen == 0) {
+ res = -EINVAL;
+ goto out;
+ }
fd->keyoffset = off;
fd->keylength = keylen;
fd->entryoffset = off + keylen;
diff --git a/fs/hfsplus/bitmap.c b/fs/hfsplus/bitmap.c
index ea30afc..ad57f59 100644
--- a/fs/hfsplus/bitmap.c
+++ b/fs/hfsplus/bitmap.c
@@ -17,6 +17,7 @@
int hfsplus_block_allocate(struct super_block *sb, u32 size, u32 offset, u32 *max)
{
+ struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
struct page *page;
struct address_space *mapping;
__be32 *pptr, *curr, *end;
@@ -29,8 +30,8 @@
return size;
dprint(DBG_BITMAP, "block_allocate: %u,%u,%u\n", size, offset, len);
- mutex_lock(&HFSPLUS_SB(sb).alloc_file->i_mutex);
- mapping = HFSPLUS_SB(sb).alloc_file->i_mapping;
+ mutex_lock(&sbi->alloc_mutex);
+ mapping = sbi->alloc_file->i_mapping;
page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS, NULL);
if (IS_ERR(page)) {
start = size;
@@ -150,16 +151,17 @@
set_page_dirty(page);
kunmap(page);
*max = offset + (curr - pptr) * 32 + i - start;
- HFSPLUS_SB(sb).free_blocks -= *max;
+ sbi->free_blocks -= *max;
sb->s_dirt = 1;
dprint(DBG_BITMAP, "-> %u,%u\n", start, *max);
out:
- mutex_unlock(&HFSPLUS_SB(sb).alloc_file->i_mutex);
+ mutex_unlock(&sbi->alloc_mutex);
return start;
}
int hfsplus_block_free(struct super_block *sb, u32 offset, u32 count)
{
+ struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
struct page *page;
struct address_space *mapping;
__be32 *pptr, *curr, *end;
@@ -172,11 +174,11 @@
dprint(DBG_BITMAP, "block_free: %u,%u\n", offset, count);
/* are all of the bits in range? */
- if ((offset + count) > HFSPLUS_SB(sb).total_blocks)
+ if ((offset + count) > sbi->total_blocks)
return -2;
- mutex_lock(&HFSPLUS_SB(sb).alloc_file->i_mutex);
- mapping = HFSPLUS_SB(sb).alloc_file->i_mapping;
+ mutex_lock(&sbi->alloc_mutex);
+ mapping = sbi->alloc_file->i_mapping;
pnr = offset / PAGE_CACHE_BITS;
page = read_mapping_page(mapping, pnr, NULL);
pptr = kmap(page);
@@ -224,9 +226,9 @@
out:
set_page_dirty(page);
kunmap(page);
- HFSPLUS_SB(sb).free_blocks += len;
+ sbi->free_blocks += len;
sb->s_dirt = 1;
- mutex_unlock(&HFSPLUS_SB(sb).alloc_file->i_mutex);
+ mutex_unlock(&sbi->alloc_mutex);
return 0;
}
diff --git a/fs/hfsplus/brec.c b/fs/hfsplus/brec.c
index c88e5d7..2f39d05 100644
--- a/fs/hfsplus/brec.c
+++ b/fs/hfsplus/brec.c
@@ -42,10 +42,13 @@
recoff = hfs_bnode_read_u16(node, node->tree->node_size - (rec + 1) * 2);
if (!recoff)
return 0;
- if (node->tree->attributes & HFS_TREE_BIGKEYS)
- retval = hfs_bnode_read_u16(node, recoff) + 2;
- else
- retval = (hfs_bnode_read_u8(node, recoff) | 1) + 1;
+
+ retval = hfs_bnode_read_u16(node, recoff) + 2;
+ if (retval > node->tree->max_key_len + 2) {
+ printk(KERN_ERR "hfs: keylen %d too large\n",
+ retval);
+ retval = 0;
+ }
}
return retval;
}
@@ -216,7 +219,7 @@
static struct hfs_bnode *hfs_bnode_split(struct hfs_find_data *fd)
{
struct hfs_btree *tree;
- struct hfs_bnode *node, *new_node;
+ struct hfs_bnode *node, *new_node, *next_node;
struct hfs_bnode_desc node_desc;
int num_recs, new_rec_off, new_off, old_rec_off;
int data_start, data_end, size;
@@ -235,6 +238,17 @@
new_node->type = node->type;
new_node->height = node->height;
+ if (node->next)
+ next_node = hfs_bnode_find(tree, node->next);
+ else
+ next_node = NULL;
+
+ if (IS_ERR(next_node)) {
+ hfs_bnode_put(node);
+ hfs_bnode_put(new_node);
+ return next_node;
+ }
+
size = tree->node_size / 2 - node->num_recs * 2 - 14;
old_rec_off = tree->node_size - 4;
num_recs = 1;
@@ -248,6 +262,8 @@
/* panic? */
hfs_bnode_put(node);
hfs_bnode_put(new_node);
+ if (next_node)
+ hfs_bnode_put(next_node);
return ERR_PTR(-ENOSPC);
}
@@ -302,8 +318,7 @@
hfs_bnode_write(node, &node_desc, 0, sizeof(node_desc));
/* update next bnode header */
- if (new_node->next) {
- struct hfs_bnode *next_node = hfs_bnode_find(tree, new_node->next);
+ if (next_node) {
next_node->prev = new_node->this;
hfs_bnode_read(next_node, &node_desc, 0, sizeof(node_desc));
node_desc.prev = cpu_to_be32(next_node->prev);
diff --git a/fs/hfsplus/btree.c b/fs/hfsplus/btree.c
index e49fcee..22e4d4e 100644
--- a/fs/hfsplus/btree.c
+++ b/fs/hfsplus/btree.c
@@ -30,7 +30,7 @@
if (!tree)
return NULL;
- init_MUTEX(&tree->tree_lock);
+ mutex_init(&tree->tree_lock);
spin_lock_init(&tree->hash_lock);
tree->sb = sb;
tree->cnid = id;
@@ -39,10 +39,16 @@
goto free_tree;
tree->inode = inode;
+ if (!HFSPLUS_I(tree->inode)->first_blocks) {
+ printk(KERN_ERR
+ "hfs: invalid btree extent records (0 size).\n");
+ goto free_inode;
+ }
+
mapping = tree->inode->i_mapping;
page = read_mapping_page(mapping, 0, NULL);
if (IS_ERR(page))
- goto free_tree;
+ goto free_inode;
/* Load the header */
head = (struct hfs_btree_header_rec *)(kmap(page) + sizeof(struct hfs_bnode_desc));
@@ -57,27 +63,56 @@
tree->max_key_len = be16_to_cpu(head->max_key_len);
tree->depth = be16_to_cpu(head->depth);
- /* Set the correct compare function */
- if (id == HFSPLUS_EXT_CNID) {
+ /* Verify the tree and set the correct compare function */
+ switch (id) {
+ case HFSPLUS_EXT_CNID:
+ if (tree->max_key_len != HFSPLUS_EXT_KEYLEN - sizeof(u16)) {
+ printk(KERN_ERR "hfs: invalid extent max_key_len %d\n",
+ tree->max_key_len);
+ goto fail_page;
+ }
+ if (tree->attributes & HFS_TREE_VARIDXKEYS) {
+ printk(KERN_ERR "hfs: invalid extent btree flag\n");
+ goto fail_page;
+ }
+
tree->keycmp = hfsplus_ext_cmp_key;
- } else if (id == HFSPLUS_CAT_CNID) {
- if ((HFSPLUS_SB(sb).flags & HFSPLUS_SB_HFSX) &&
+ break;
+ case HFSPLUS_CAT_CNID:
+ if (tree->max_key_len != HFSPLUS_CAT_KEYLEN - sizeof(u16)) {
+ printk(KERN_ERR "hfs: invalid catalog max_key_len %d\n",
+ tree->max_key_len);
+ goto fail_page;
+ }
+ if (!(tree->attributes & HFS_TREE_VARIDXKEYS)) {
+ printk(KERN_ERR "hfs: invalid catalog btree flag\n");
+ goto fail_page;
+ }
+
+ if (test_bit(HFSPLUS_SB_HFSX, &HFSPLUS_SB(sb)->flags) &&
(head->key_type == HFSPLUS_KEY_BINARY))
tree->keycmp = hfsplus_cat_bin_cmp_key;
else {
tree->keycmp = hfsplus_cat_case_cmp_key;
- HFSPLUS_SB(sb).flags |= HFSPLUS_SB_CASEFOLD;
+ set_bit(HFSPLUS_SB_CASEFOLD, &HFSPLUS_SB(sb)->flags);
}
- } else {
+ break;
+ default:
printk(KERN_ERR "hfs: unknown B*Tree requested\n");
goto fail_page;
}
+ if (!(tree->attributes & HFS_TREE_BIGKEYS)) {
+ printk(KERN_ERR "hfs: invalid btree flag\n");
+ goto fail_page;
+ }
+
size = tree->node_size;
if (!is_power_of_2(size))
goto fail_page;
if (!tree->node_count)
goto fail_page;
+
tree->node_size_shift = ffs(size) - 1;
tree->pages_per_bnode = (tree->node_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
@@ -87,10 +122,11 @@
return tree;
fail_page:
- tree->inode->i_mapping->a_ops = &hfsplus_aops;
page_cache_release(page);
- free_tree:
+ free_inode:
+ tree->inode->i_mapping->a_ops = &hfsplus_aops;
iput(tree->inode);
+ free_tree:
kfree(tree);
return NULL;
}
@@ -192,17 +228,18 @@
while (!tree->free_nodes) {
struct inode *inode = tree->inode;
+ struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
u32 count;
int res;
res = hfsplus_file_extend(inode);
if (res)
return ERR_PTR(res);
- HFSPLUS_I(inode).phys_size = inode->i_size =
- (loff_t)HFSPLUS_I(inode).alloc_blocks <<
- HFSPLUS_SB(tree->sb).alloc_blksz_shift;
- HFSPLUS_I(inode).fs_blocks = HFSPLUS_I(inode).alloc_blocks <<
- HFSPLUS_SB(tree->sb).fs_shift;
+ hip->phys_size = inode->i_size =
+ (loff_t)hip->alloc_blocks <<
+ HFSPLUS_SB(tree->sb)->alloc_blksz_shift;
+ hip->fs_blocks =
+ hip->alloc_blocks << HFSPLUS_SB(tree->sb)->fs_shift;
inode_set_bytes(inode, inode->i_size);
count = inode->i_size >> tree->node_size_shift;
tree->free_nodes = count - tree->node_count;
diff --git a/fs/hfsplus/catalog.c b/fs/hfsplus/catalog.c
index f6874ac..8af45fc 100644
--- a/fs/hfsplus/catalog.c
+++ b/fs/hfsplus/catalog.c
@@ -67,7 +67,7 @@
key->key_len = cpu_to_be16(6 + ustrlen);
}
-static void hfsplus_set_perms(struct inode *inode, struct hfsplus_perm *perms)
+void hfsplus_cat_set_perms(struct inode *inode, struct hfsplus_perm *perms)
{
if (inode->i_flags & S_IMMUTABLE)
perms->rootflags |= HFSPLUS_FLG_IMMUTABLE;
@@ -77,15 +77,24 @@
perms->rootflags |= HFSPLUS_FLG_APPEND;
else
perms->rootflags &= ~HFSPLUS_FLG_APPEND;
- HFSPLUS_I(inode).rootflags = perms->rootflags;
- HFSPLUS_I(inode).userflags = perms->userflags;
+
+ perms->userflags = HFSPLUS_I(inode)->userflags;
perms->mode = cpu_to_be16(inode->i_mode);
perms->owner = cpu_to_be32(inode->i_uid);
perms->group = cpu_to_be32(inode->i_gid);
+
+ if (S_ISREG(inode->i_mode))
+ perms->dev = cpu_to_be32(inode->i_nlink);
+ else if (S_ISBLK(inode->i_mode) || S_ISCHR(inode->i_mode))
+ perms->dev = cpu_to_be32(inode->i_rdev);
+ else
+ perms->dev = 0;
}
static int hfsplus_cat_build_record(hfsplus_cat_entry *entry, u32 cnid, struct inode *inode)
{
+ struct hfsplus_sb_info *sbi = HFSPLUS_SB(inode->i_sb);
+
if (S_ISDIR(inode->i_mode)) {
struct hfsplus_cat_folder *folder;
@@ -93,13 +102,13 @@
memset(folder, 0, sizeof(*folder));
folder->type = cpu_to_be16(HFSPLUS_FOLDER);
folder->id = cpu_to_be32(inode->i_ino);
- HFSPLUS_I(inode).create_date =
+ HFSPLUS_I(inode)->create_date =
folder->create_date =
folder->content_mod_date =
folder->attribute_mod_date =
folder->access_date = hfsp_now2mt();
- hfsplus_set_perms(inode, &folder->permissions);
- if (inode == HFSPLUS_SB(inode->i_sb).hidden_dir)
+ hfsplus_cat_set_perms(inode, &folder->permissions);
+ if (inode == sbi->hidden_dir)
/* invisible and namelocked */
folder->user_info.frFlags = cpu_to_be16(0x5000);
return sizeof(*folder);
@@ -111,19 +120,19 @@
file->type = cpu_to_be16(HFSPLUS_FILE);
file->flags = cpu_to_be16(HFSPLUS_FILE_THREAD_EXISTS);
file->id = cpu_to_be32(cnid);
- HFSPLUS_I(inode).create_date =
+ HFSPLUS_I(inode)->create_date =
file->create_date =
file->content_mod_date =
file->attribute_mod_date =
file->access_date = hfsp_now2mt();
if (cnid == inode->i_ino) {
- hfsplus_set_perms(inode, &file->permissions);
+ hfsplus_cat_set_perms(inode, &file->permissions);
if (S_ISLNK(inode->i_mode)) {
file->user_info.fdType = cpu_to_be32(HFSP_SYMLINK_TYPE);
file->user_info.fdCreator = cpu_to_be32(HFSP_SYMLINK_CREATOR);
} else {
- file->user_info.fdType = cpu_to_be32(HFSPLUS_SB(inode->i_sb).type);
- file->user_info.fdCreator = cpu_to_be32(HFSPLUS_SB(inode->i_sb).creator);
+ file->user_info.fdType = cpu_to_be32(sbi->type);
+ file->user_info.fdCreator = cpu_to_be32(sbi->creator);
}
if ((file->permissions.rootflags | file->permissions.userflags) & HFSPLUS_FLG_IMMUTABLE)
file->flags |= cpu_to_be16(HFSPLUS_FILE_LOCKED);
@@ -131,8 +140,8 @@
file->user_info.fdType = cpu_to_be32(HFSP_HARDLINK_TYPE);
file->user_info.fdCreator = cpu_to_be32(HFSP_HFSPLUS_CREATOR);
file->user_info.fdFlags = cpu_to_be16(0x100);
- file->create_date = HFSPLUS_I(HFSPLUS_SB(inode->i_sb).hidden_dir).create_date;
- file->permissions.dev = cpu_to_be32(HFSPLUS_I(inode).dev);
+ file->create_date = HFSPLUS_I(sbi->hidden_dir)->create_date;
+ file->permissions.dev = cpu_to_be32(HFSPLUS_I(inode)->linkid);
}
return sizeof(*file);
}
@@ -180,15 +189,14 @@
int hfsplus_create_cat(u32 cnid, struct inode *dir, struct qstr *str, struct inode *inode)
{
+ struct super_block *sb = dir->i_sb;
struct hfs_find_data fd;
- struct super_block *sb;
hfsplus_cat_entry entry;
int entry_size;
int err;
dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n", str->name, cnid, inode->i_nlink);
- sb = dir->i_sb;
- hfs_find_init(HFSPLUS_SB(sb).cat_tree, &fd);
+ hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd);
hfsplus_cat_build_key(sb, fd.search_key, cnid, NULL);
entry_size = hfsplus_fill_cat_thread(sb, &entry, S_ISDIR(inode->i_mode) ?
@@ -234,7 +242,7 @@
int hfsplus_delete_cat(u32 cnid, struct inode *dir, struct qstr *str)
{
- struct super_block *sb;
+ struct super_block *sb = dir->i_sb;
struct hfs_find_data fd;
struct hfsplus_fork_raw fork;
struct list_head *pos;
@@ -242,8 +250,7 @@
u16 type;
dprint(DBG_CAT_MOD, "delete_cat: %s,%u\n", str ? str->name : NULL, cnid);
- sb = dir->i_sb;
- hfs_find_init(HFSPLUS_SB(sb).cat_tree, &fd);
+ hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd);
if (!str) {
int len;
@@ -279,7 +286,7 @@
hfsplus_free_fork(sb, cnid, &fork, HFSPLUS_TYPE_RSRC);
}
- list_for_each(pos, &HFSPLUS_I(dir).open_dir_list) {
+ list_for_each(pos, &HFSPLUS_I(dir)->open_dir_list) {
struct hfsplus_readdir_data *rd =
list_entry(pos, struct hfsplus_readdir_data, list);
if (fd.tree->keycmp(fd.search_key, (void *)&rd->key) < 0)
@@ -312,7 +319,7 @@
struct inode *src_dir, struct qstr *src_name,
struct inode *dst_dir, struct qstr *dst_name)
{
- struct super_block *sb;
+ struct super_block *sb = src_dir->i_sb;
struct hfs_find_data src_fd, dst_fd;
hfsplus_cat_entry entry;
int entry_size, type;
@@ -320,8 +327,7 @@
dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n", cnid, src_dir->i_ino, src_name->name,
dst_dir->i_ino, dst_name->name);
- sb = src_dir->i_sb;
- hfs_find_init(HFSPLUS_SB(sb).cat_tree, &src_fd);
+ hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &src_fd);
dst_fd = src_fd;
/* find the old dir entry and read the data */
diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c
index 764fd1b..d236d85 100644
--- a/fs/hfsplus/dir.c
+++ b/fs/hfsplus/dir.c
@@ -39,7 +39,7 @@
dentry->d_op = &hfsplus_dentry_operations;
dentry->d_fsdata = NULL;
- hfs_find_init(HFSPLUS_SB(sb).cat_tree, &fd);
+ hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd);
hfsplus_cat_build_key(sb, fd.search_key, dir->i_ino, &dentry->d_name);
again:
err = hfs_brec_read(&fd, &entry, sizeof(entry));
@@ -68,9 +68,9 @@
cnid = be32_to_cpu(entry.file.id);
if (entry.file.user_info.fdType == cpu_to_be32(HFSP_HARDLINK_TYPE) &&
entry.file.user_info.fdCreator == cpu_to_be32(HFSP_HFSPLUS_CREATOR) &&
- (entry.file.create_date == HFSPLUS_I(HFSPLUS_SB(sb).hidden_dir).create_date ||
- entry.file.create_date == HFSPLUS_I(sb->s_root->d_inode).create_date) &&
- HFSPLUS_SB(sb).hidden_dir) {
+ (entry.file.create_date == HFSPLUS_I(HFSPLUS_SB(sb)->hidden_dir)->create_date ||
+ entry.file.create_date == HFSPLUS_I(sb->s_root->d_inode)->create_date) &&
+ HFSPLUS_SB(sb)->hidden_dir) {
struct qstr str;
char name[32];
@@ -86,7 +86,8 @@
linkid = be32_to_cpu(entry.file.permissions.dev);
str.len = sprintf(name, "iNode%d", linkid);
str.name = name;
- hfsplus_cat_build_key(sb, fd.search_key, HFSPLUS_SB(sb).hidden_dir->i_ino, &str);
+ hfsplus_cat_build_key(sb, fd.search_key,
+ HFSPLUS_SB(sb)->hidden_dir->i_ino, &str);
goto again;
}
} else if (!dentry->d_fsdata)
@@ -101,7 +102,7 @@
if (IS_ERR(inode))
return ERR_CAST(inode);
if (S_ISREG(inode->i_mode))
- HFSPLUS_I(inode).dev = linkid;
+ HFSPLUS_I(inode)->linkid = linkid;
out:
d_add(dentry, inode);
return NULL;
@@ -124,7 +125,7 @@
if (filp->f_pos >= inode->i_size)
return 0;
- hfs_find_init(HFSPLUS_SB(sb).cat_tree, &fd);
+ hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd);
hfsplus_cat_build_key(sb, fd.search_key, inode->i_ino, NULL);
err = hfs_brec_find(&fd);
if (err)
@@ -180,8 +181,9 @@
err = -EIO;
goto out;
}
- if (HFSPLUS_SB(sb).hidden_dir &&
- HFSPLUS_SB(sb).hidden_dir->i_ino == be32_to_cpu(entry.folder.id))
+ if (HFSPLUS_SB(sb)->hidden_dir &&
+ HFSPLUS_SB(sb)->hidden_dir->i_ino ==
+ be32_to_cpu(entry.folder.id))
goto next;
if (filldir(dirent, strbuf, len, filp->f_pos,
be32_to_cpu(entry.folder.id), DT_DIR))
@@ -217,7 +219,7 @@
}
filp->private_data = rd;
rd->file = filp;
- list_add(&rd->list, &HFSPLUS_I(inode).open_dir_list);
+ list_add(&rd->list, &HFSPLUS_I(inode)->open_dir_list);
}
memcpy(&rd->key, fd.key, sizeof(struct hfsplus_cat_key));
out:
@@ -229,38 +231,18 @@
{
struct hfsplus_readdir_data *rd = file->private_data;
if (rd) {
+ mutex_lock(&inode->i_mutex);
list_del(&rd->list);
+ mutex_unlock(&inode->i_mutex);
kfree(rd);
}
return 0;
}
-static int hfsplus_create(struct inode *dir, struct dentry *dentry, int mode,
- struct nameidata *nd)
-{
- struct inode *inode;
- int res;
-
- inode = hfsplus_new_inode(dir->i_sb, mode);
- if (!inode)
- return -ENOSPC;
-
- res = hfsplus_create_cat(inode->i_ino, dir, &dentry->d_name, inode);
- if (res) {
- inode->i_nlink = 0;
- hfsplus_delete_inode(inode);
- iput(inode);
- return res;
- }
- hfsplus_instantiate(dentry, inode, inode->i_ino);
- mark_inode_dirty(inode);
- return 0;
-}
-
static int hfsplus_link(struct dentry *src_dentry, struct inode *dst_dir,
struct dentry *dst_dentry)
{
- struct super_block *sb = dst_dir->i_sb;
+ struct hfsplus_sb_info *sbi = HFSPLUS_SB(dst_dir->i_sb);
struct inode *inode = src_dentry->d_inode;
struct inode *src_dir = src_dentry->d_parent->d_inode;
struct qstr str;
@@ -270,7 +252,10 @@
if (HFSPLUS_IS_RSRC(inode))
return -EPERM;
+ if (!S_ISREG(inode->i_mode))
+ return -EPERM;
+ mutex_lock(&sbi->vh_mutex);
if (inode->i_ino == (u32)(unsigned long)src_dentry->d_fsdata) {
for (;;) {
get_random_bytes(&id, sizeof(cnid));
@@ -279,40 +264,41 @@
str.len = sprintf(name, "iNode%d", id);
res = hfsplus_rename_cat(inode->i_ino,
src_dir, &src_dentry->d_name,
- HFSPLUS_SB(sb).hidden_dir, &str);
+ sbi->hidden_dir, &str);
if (!res)
break;
if (res != -EEXIST)
- return res;
+ goto out;
}
- HFSPLUS_I(inode).dev = id;
- cnid = HFSPLUS_SB(sb).next_cnid++;
+ HFSPLUS_I(inode)->linkid = id;
+ cnid = sbi->next_cnid++;
src_dentry->d_fsdata = (void *)(unsigned long)cnid;
res = hfsplus_create_cat(cnid, src_dir, &src_dentry->d_name, inode);
if (res)
/* panic? */
- return res;
- HFSPLUS_SB(sb).file_count++;
+ goto out;
+ sbi->file_count++;
}
- cnid = HFSPLUS_SB(sb).next_cnid++;
+ cnid = sbi->next_cnid++;
res = hfsplus_create_cat(cnid, dst_dir, &dst_dentry->d_name, inode);
if (res)
- return res;
+ goto out;
inc_nlink(inode);
hfsplus_instantiate(dst_dentry, inode, cnid);
atomic_inc(&inode->i_count);
inode->i_ctime = CURRENT_TIME_SEC;
mark_inode_dirty(inode);
- HFSPLUS_SB(sb).file_count++;
- sb->s_dirt = 1;
-
- return 0;
+ sbi->file_count++;
+ dst_dir->i_sb->s_dirt = 1;
+out:
+ mutex_unlock(&sbi->vh_mutex);
+ return res;
}
static int hfsplus_unlink(struct inode *dir, struct dentry *dentry)
{
- struct super_block *sb = dir->i_sb;
+ struct hfsplus_sb_info *sbi = HFSPLUS_SB(dir->i_sb);
struct inode *inode = dentry->d_inode;
struct qstr str;
char name[32];
@@ -322,21 +308,22 @@
if (HFSPLUS_IS_RSRC(inode))
return -EPERM;
+ mutex_lock(&sbi->vh_mutex);
cnid = (u32)(unsigned long)dentry->d_fsdata;
if (inode->i_ino == cnid &&
- atomic_read(&HFSPLUS_I(inode).opencnt)) {
+ atomic_read(&HFSPLUS_I(inode)->opencnt)) {
str.name = name;
str.len = sprintf(name, "temp%lu", inode->i_ino);
res = hfsplus_rename_cat(inode->i_ino,
dir, &dentry->d_name,
- HFSPLUS_SB(sb).hidden_dir, &str);
+ sbi->hidden_dir, &str);
if (!res)
inode->i_flags |= S_DEAD;
- return res;
+ goto out;
}
res = hfsplus_delete_cat(cnid, dir, &dentry->d_name);
if (res)
- return res;
+ goto out;
if (inode->i_nlink > 0)
drop_nlink(inode);
@@ -344,10 +331,10 @@
clear_nlink(inode);
if (!inode->i_nlink) {
if (inode->i_ino != cnid) {
- HFSPLUS_SB(sb).file_count--;
- if (!atomic_read(&HFSPLUS_I(inode).opencnt)) {
+ sbi->file_count--;
+ if (!atomic_read(&HFSPLUS_I(inode)->opencnt)) {
res = hfsplus_delete_cat(inode->i_ino,
- HFSPLUS_SB(sb).hidden_dir,
+ sbi->hidden_dir,
NULL);
if (!res)
hfsplus_delete_inode(inode);
@@ -356,107 +343,108 @@
} else
hfsplus_delete_inode(inode);
} else
- HFSPLUS_SB(sb).file_count--;
+ sbi->file_count--;
inode->i_ctime = CURRENT_TIME_SEC;
mark_inode_dirty(inode);
-
+out:
+ mutex_unlock(&sbi->vh_mutex);
return res;
}
-static int hfsplus_mkdir(struct inode *dir, struct dentry *dentry, int mode)
-{
- struct inode *inode;
- int res;
-
- inode = hfsplus_new_inode(dir->i_sb, S_IFDIR | mode);
- if (!inode)
- return -ENOSPC;
-
- res = hfsplus_create_cat(inode->i_ino, dir, &dentry->d_name, inode);
- if (res) {
- inode->i_nlink = 0;
- hfsplus_delete_inode(inode);
- iput(inode);
- return res;
- }
- hfsplus_instantiate(dentry, inode, inode->i_ino);
- mark_inode_dirty(inode);
- return 0;
-}
-
static int hfsplus_rmdir(struct inode *dir, struct dentry *dentry)
{
- struct inode *inode;
+ struct hfsplus_sb_info *sbi = HFSPLUS_SB(dir->i_sb);
+ struct inode *inode = dentry->d_inode;
int res;
- inode = dentry->d_inode;
if (inode->i_size != 2)
return -ENOTEMPTY;
+
+ mutex_lock(&sbi->vh_mutex);
res = hfsplus_delete_cat(inode->i_ino, dir, &dentry->d_name);
if (res)
- return res;
+ goto out;
clear_nlink(inode);
inode->i_ctime = CURRENT_TIME_SEC;
hfsplus_delete_inode(inode);
mark_inode_dirty(inode);
- return 0;
+out:
+ mutex_unlock(&sbi->vh_mutex);
+ return res;
}
static int hfsplus_symlink(struct inode *dir, struct dentry *dentry,
const char *symname)
{
- struct super_block *sb;
+ struct hfsplus_sb_info *sbi = HFSPLUS_SB(dir->i_sb);
struct inode *inode;
- int res;
+ int res = -ENOSPC;
- sb = dir->i_sb;
- inode = hfsplus_new_inode(sb, S_IFLNK | S_IRWXUGO);
+ mutex_lock(&sbi->vh_mutex);
+ inode = hfsplus_new_inode(dir->i_sb, S_IFLNK | S_IRWXUGO);
if (!inode)
- return -ENOSPC;
+ goto out;
res = page_symlink(inode, symname, strlen(symname) + 1);
- if (res) {
- inode->i_nlink = 0;
- hfsplus_delete_inode(inode);
- iput(inode);
- return res;
- }
+ if (res)
+ goto out_err;
- mark_inode_dirty(inode);
res = hfsplus_create_cat(inode->i_ino, dir, &dentry->d_name, inode);
+ if (res)
+ goto out_err;
- if (!res) {
- hfsplus_instantiate(dentry, inode, inode->i_ino);
- mark_inode_dirty(inode);
- }
+ hfsplus_instantiate(dentry, inode, inode->i_ino);
+ mark_inode_dirty(inode);
+ goto out;
+out_err:
+ inode->i_nlink = 0;
+ hfsplus_delete_inode(inode);
+ iput(inode);
+out:
+ mutex_unlock(&sbi->vh_mutex);
return res;
}
static int hfsplus_mknod(struct inode *dir, struct dentry *dentry,
int mode, dev_t rdev)
{
- struct super_block *sb;
+ struct hfsplus_sb_info *sbi = HFSPLUS_SB(dir->i_sb);
struct inode *inode;
- int res;
+ int res = -ENOSPC;
- sb = dir->i_sb;
- inode = hfsplus_new_inode(sb, mode);
+ mutex_lock(&sbi->vh_mutex);
+ inode = hfsplus_new_inode(dir->i_sb, mode);
if (!inode)
- return -ENOSPC;
+ goto out;
+
+ if (S_ISBLK(mode) || S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode))
+ init_special_inode(inode, mode, rdev);
res = hfsplus_create_cat(inode->i_ino, dir, &dentry->d_name, inode);
if (res) {
inode->i_nlink = 0;
hfsplus_delete_inode(inode);
iput(inode);
- return res;
+ goto out;
}
- init_special_inode(inode, mode, rdev);
+
hfsplus_instantiate(dentry, inode, inode->i_ino);
mark_inode_dirty(inode);
+out:
+ mutex_unlock(&sbi->vh_mutex);
+ return res;
+}
- return 0;
+static int hfsplus_create(struct inode *dir, struct dentry *dentry, int mode,
+ struct nameidata *nd)
+{
+ return hfsplus_mknod(dir, dentry, mode, 0);
+}
+
+static int hfsplus_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+{
+ return hfsplus_mknod(dir, dentry, mode | S_IFDIR, 0);
}
static int hfsplus_rename(struct inode *old_dir, struct dentry *old_dentry,
@@ -466,7 +454,10 @@
/* Unlink destination if it already exists */
if (new_dentry->d_inode) {
- res = hfsplus_unlink(new_dir, new_dentry);
+ if (S_ISDIR(new_dentry->d_inode->i_mode))
+ res = hfsplus_rmdir(new_dir, new_dentry);
+ else
+ res = hfsplus_unlink(new_dir, new_dentry);
if (res)
return res;
}
diff --git a/fs/hfsplus/extents.c b/fs/hfsplus/extents.c
index 0022eec..0c9cb18 100644
--- a/fs/hfsplus/extents.c
+++ b/fs/hfsplus/extents.c
@@ -85,33 +85,47 @@
static void __hfsplus_ext_write_extent(struct inode *inode, struct hfs_find_data *fd)
{
+ struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
int res;
- hfsplus_ext_build_key(fd->search_key, inode->i_ino, HFSPLUS_I(inode).cached_start,
- HFSPLUS_IS_RSRC(inode) ? HFSPLUS_TYPE_RSRC : HFSPLUS_TYPE_DATA);
+ WARN_ON(!mutex_is_locked(&hip->extents_lock));
+
+ hfsplus_ext_build_key(fd->search_key, inode->i_ino, hip->cached_start,
+ HFSPLUS_IS_RSRC(inode) ?
+ HFSPLUS_TYPE_RSRC : HFSPLUS_TYPE_DATA);
+
res = hfs_brec_find(fd);
- if (HFSPLUS_I(inode).flags & HFSPLUS_FLG_EXT_NEW) {
+ if (hip->flags & HFSPLUS_FLG_EXT_NEW) {
if (res != -ENOENT)
return;
- hfs_brec_insert(fd, HFSPLUS_I(inode).cached_extents, sizeof(hfsplus_extent_rec));
- HFSPLUS_I(inode).flags &= ~(HFSPLUS_FLG_EXT_DIRTY | HFSPLUS_FLG_EXT_NEW);
+ hfs_brec_insert(fd, hip->cached_extents,
+ sizeof(hfsplus_extent_rec));
+ hip->flags &= ~(HFSPLUS_FLG_EXT_DIRTY | HFSPLUS_FLG_EXT_NEW);
} else {
if (res)
return;
- hfs_bnode_write(fd->bnode, HFSPLUS_I(inode).cached_extents, fd->entryoffset, fd->entrylength);
- HFSPLUS_I(inode).flags &= ~HFSPLUS_FLG_EXT_DIRTY;
+ hfs_bnode_write(fd->bnode, hip->cached_extents,
+ fd->entryoffset, fd->entrylength);
+ hip->flags &= ~HFSPLUS_FLG_EXT_DIRTY;
+ }
+}
+
+static void hfsplus_ext_write_extent_locked(struct inode *inode)
+{
+ if (HFSPLUS_I(inode)->flags & HFSPLUS_FLG_EXT_DIRTY) {
+ struct hfs_find_data fd;
+
+ hfs_find_init(HFSPLUS_SB(inode->i_sb)->ext_tree, &fd);
+ __hfsplus_ext_write_extent(inode, &fd);
+ hfs_find_exit(&fd);
}
}
void hfsplus_ext_write_extent(struct inode *inode)
{
- if (HFSPLUS_I(inode).flags & HFSPLUS_FLG_EXT_DIRTY) {
- struct hfs_find_data fd;
-
- hfs_find_init(HFSPLUS_SB(inode->i_sb).ext_tree, &fd);
- __hfsplus_ext_write_extent(inode, &fd);
- hfs_find_exit(&fd);
- }
+ mutex_lock(&HFSPLUS_I(inode)->extents_lock);
+ hfsplus_ext_write_extent_locked(inode);
+ mutex_unlock(&HFSPLUS_I(inode)->extents_lock);
}
static inline int __hfsplus_ext_read_extent(struct hfs_find_data *fd,
@@ -136,33 +150,39 @@
static inline int __hfsplus_ext_cache_extent(struct hfs_find_data *fd, struct inode *inode, u32 block)
{
+ struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
int res;
- if (HFSPLUS_I(inode).flags & HFSPLUS_FLG_EXT_DIRTY)
+ WARN_ON(!mutex_is_locked(&hip->extents_lock));
+
+ if (hip->flags & HFSPLUS_FLG_EXT_DIRTY)
__hfsplus_ext_write_extent(inode, fd);
- res = __hfsplus_ext_read_extent(fd, HFSPLUS_I(inode).cached_extents, inode->i_ino,
- block, HFSPLUS_IS_RSRC(inode) ? HFSPLUS_TYPE_RSRC : HFSPLUS_TYPE_DATA);
+ res = __hfsplus_ext_read_extent(fd, hip->cached_extents, inode->i_ino,
+ block, HFSPLUS_IS_RSRC(inode) ?
+ HFSPLUS_TYPE_RSRC :
+ HFSPLUS_TYPE_DATA);
if (!res) {
- HFSPLUS_I(inode).cached_start = be32_to_cpu(fd->key->ext.start_block);
- HFSPLUS_I(inode).cached_blocks = hfsplus_ext_block_count(HFSPLUS_I(inode).cached_extents);
+ hip->cached_start = be32_to_cpu(fd->key->ext.start_block);
+ hip->cached_blocks = hfsplus_ext_block_count(hip->cached_extents);
} else {
- HFSPLUS_I(inode).cached_start = HFSPLUS_I(inode).cached_blocks = 0;
- HFSPLUS_I(inode).flags &= ~(HFSPLUS_FLG_EXT_DIRTY | HFSPLUS_FLG_EXT_NEW);
+ hip->cached_start = hip->cached_blocks = 0;
+ hip->flags &= ~(HFSPLUS_FLG_EXT_DIRTY | HFSPLUS_FLG_EXT_NEW);
}
return res;
}
static int hfsplus_ext_read_extent(struct inode *inode, u32 block)
{
+ struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
struct hfs_find_data fd;
int res;
- if (block >= HFSPLUS_I(inode).cached_start &&
- block < HFSPLUS_I(inode).cached_start + HFSPLUS_I(inode).cached_blocks)
+ if (block >= hip->cached_start &&
+ block < hip->cached_start + hip->cached_blocks)
return 0;
- hfs_find_init(HFSPLUS_SB(inode->i_sb).ext_tree, &fd);
+ hfs_find_init(HFSPLUS_SB(inode->i_sb)->ext_tree, &fd);
res = __hfsplus_ext_cache_extent(&fd, inode, block);
hfs_find_exit(&fd);
return res;
@@ -172,21 +192,21 @@
int hfsplus_get_block(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create)
{
- struct super_block *sb;
+ struct super_block *sb = inode->i_sb;
+ struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
+ struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
int res = -EIO;
u32 ablock, dblock, mask;
int shift;
- sb = inode->i_sb;
-
/* Convert inode block to disk allocation block */
- shift = HFSPLUS_SB(sb).alloc_blksz_shift - sb->s_blocksize_bits;
- ablock = iblock >> HFSPLUS_SB(sb).fs_shift;
+ shift = sbi->alloc_blksz_shift - sb->s_blocksize_bits;
+ ablock = iblock >> sbi->fs_shift;
- if (iblock >= HFSPLUS_I(inode).fs_blocks) {
- if (iblock > HFSPLUS_I(inode).fs_blocks || !create)
+ if (iblock >= hip->fs_blocks) {
+ if (iblock > hip->fs_blocks || !create)
return -EIO;
- if (ablock >= HFSPLUS_I(inode).alloc_blocks) {
+ if (ablock >= hip->alloc_blocks) {
res = hfsplus_file_extend(inode);
if (res)
return res;
@@ -194,33 +214,33 @@
} else
create = 0;
- if (ablock < HFSPLUS_I(inode).first_blocks) {
- dblock = hfsplus_ext_find_block(HFSPLUS_I(inode).first_extents, ablock);
+ if (ablock < hip->first_blocks) {
+ dblock = hfsplus_ext_find_block(hip->first_extents, ablock);
goto done;
}
if (inode->i_ino == HFSPLUS_EXT_CNID)
return -EIO;
- mutex_lock(&HFSPLUS_I(inode).extents_lock);
+ mutex_lock(&hip->extents_lock);
res = hfsplus_ext_read_extent(inode, ablock);
if (!res) {
- dblock = hfsplus_ext_find_block(HFSPLUS_I(inode).cached_extents, ablock -
- HFSPLUS_I(inode).cached_start);
+ dblock = hfsplus_ext_find_block(hip->cached_extents,
+ ablock - hip->cached_start);
} else {
- mutex_unlock(&HFSPLUS_I(inode).extents_lock);
+ mutex_unlock(&hip->extents_lock);
return -EIO;
}
- mutex_unlock(&HFSPLUS_I(inode).extents_lock);
+ mutex_unlock(&hip->extents_lock);
done:
dprint(DBG_EXTENT, "get_block(%lu): %llu - %u\n", inode->i_ino, (long long)iblock, dblock);
- mask = (1 << HFSPLUS_SB(sb).fs_shift) - 1;
- map_bh(bh_result, sb, (dblock << HFSPLUS_SB(sb).fs_shift) + HFSPLUS_SB(sb).blockoffset + (iblock & mask));
+ mask = (1 << sbi->fs_shift) - 1;
+ map_bh(bh_result, sb, (dblock << sbi->fs_shift) + sbi->blockoffset + (iblock & mask));
if (create) {
set_buffer_new(bh_result);
- HFSPLUS_I(inode).phys_size += sb->s_blocksize;
- HFSPLUS_I(inode).fs_blocks++;
+ hip->phys_size += sb->s_blocksize;
+ hip->fs_blocks++;
inode_add_bytes(inode, sb->s_blocksize);
mark_inode_dirty(inode);
}
@@ -327,7 +347,7 @@
if (total_blocks == blocks)
return 0;
- hfs_find_init(HFSPLUS_SB(sb).ext_tree, &fd);
+ hfs_find_init(HFSPLUS_SB(sb)->ext_tree, &fd);
do {
res = __hfsplus_ext_read_extent(&fd, ext_entry, cnid,
total_blocks, type);
@@ -348,29 +368,33 @@
int hfsplus_file_extend(struct inode *inode)
{
struct super_block *sb = inode->i_sb;
+ struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
+ struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
u32 start, len, goal;
int res;
- if (HFSPLUS_SB(sb).alloc_file->i_size * 8 < HFSPLUS_SB(sb).total_blocks - HFSPLUS_SB(sb).free_blocks + 8) {
+ if (sbi->alloc_file->i_size * 8 <
+ sbi->total_blocks - sbi->free_blocks + 8) {
// extend alloc file
- printk(KERN_ERR "hfs: extend alloc file! (%Lu,%u,%u)\n", HFSPLUS_SB(sb).alloc_file->i_size * 8,
- HFSPLUS_SB(sb).total_blocks, HFSPLUS_SB(sb).free_blocks);
+ printk(KERN_ERR "hfs: extend alloc file! (%Lu,%u,%u)\n",
+ sbi->alloc_file->i_size * 8,
+ sbi->total_blocks, sbi->free_blocks);
return -ENOSPC;
}
- mutex_lock(&HFSPLUS_I(inode).extents_lock);
- if (HFSPLUS_I(inode).alloc_blocks == HFSPLUS_I(inode).first_blocks)
- goal = hfsplus_ext_lastblock(HFSPLUS_I(inode).first_extents);
+ mutex_lock(&hip->extents_lock);
+ if (hip->alloc_blocks == hip->first_blocks)
+ goal = hfsplus_ext_lastblock(hip->first_extents);
else {
- res = hfsplus_ext_read_extent(inode, HFSPLUS_I(inode).alloc_blocks);
+ res = hfsplus_ext_read_extent(inode, hip->alloc_blocks);
if (res)
goto out;
- goal = hfsplus_ext_lastblock(HFSPLUS_I(inode).cached_extents);
+ goal = hfsplus_ext_lastblock(hip->cached_extents);
}
- len = HFSPLUS_I(inode).clump_blocks;
- start = hfsplus_block_allocate(sb, HFSPLUS_SB(sb).total_blocks, goal, &len);
- if (start >= HFSPLUS_SB(sb).total_blocks) {
+ len = hip->clump_blocks;
+ start = hfsplus_block_allocate(sb, sbi->total_blocks, goal, &len);
+ if (start >= sbi->total_blocks) {
start = hfsplus_block_allocate(sb, goal, 0, &len);
if (start >= goal) {
res = -ENOSPC;
@@ -379,56 +403,56 @@
}
dprint(DBG_EXTENT, "extend %lu: %u,%u\n", inode->i_ino, start, len);
- if (HFSPLUS_I(inode).alloc_blocks <= HFSPLUS_I(inode).first_blocks) {
- if (!HFSPLUS_I(inode).first_blocks) {
+
+ if (hip->alloc_blocks <= hip->first_blocks) {
+ if (!hip->first_blocks) {
dprint(DBG_EXTENT, "first extents\n");
/* no extents yet */
- HFSPLUS_I(inode).first_extents[0].start_block = cpu_to_be32(start);
- HFSPLUS_I(inode).first_extents[0].block_count = cpu_to_be32(len);
+ hip->first_extents[0].start_block = cpu_to_be32(start);
+ hip->first_extents[0].block_count = cpu_to_be32(len);
res = 0;
} else {
/* try to append to extents in inode */
- res = hfsplus_add_extent(HFSPLUS_I(inode).first_extents,
- HFSPLUS_I(inode).alloc_blocks,
+ res = hfsplus_add_extent(hip->first_extents,
+ hip->alloc_blocks,
start, len);
if (res == -ENOSPC)
goto insert_extent;
}
if (!res) {
- hfsplus_dump_extent(HFSPLUS_I(inode).first_extents);
- HFSPLUS_I(inode).first_blocks += len;
+ hfsplus_dump_extent(hip->first_extents);
+ hip->first_blocks += len;
}
} else {
- res = hfsplus_add_extent(HFSPLUS_I(inode).cached_extents,
- HFSPLUS_I(inode).alloc_blocks -
- HFSPLUS_I(inode).cached_start,
+ res = hfsplus_add_extent(hip->cached_extents,
+ hip->alloc_blocks - hip->cached_start,
start, len);
if (!res) {
- hfsplus_dump_extent(HFSPLUS_I(inode).cached_extents);
- HFSPLUS_I(inode).flags |= HFSPLUS_FLG_EXT_DIRTY;
- HFSPLUS_I(inode).cached_blocks += len;
+ hfsplus_dump_extent(hip->cached_extents);
+ hip->flags |= HFSPLUS_FLG_EXT_DIRTY;
+ hip->cached_blocks += len;
} else if (res == -ENOSPC)
goto insert_extent;
}
out:
- mutex_unlock(&HFSPLUS_I(inode).extents_lock);
+ mutex_unlock(&hip->extents_lock);
if (!res) {
- HFSPLUS_I(inode).alloc_blocks += len;
+ hip->alloc_blocks += len;
mark_inode_dirty(inode);
}
return res;
insert_extent:
dprint(DBG_EXTENT, "insert new extent\n");
- hfsplus_ext_write_extent(inode);
+ hfsplus_ext_write_extent_locked(inode);
- memset(HFSPLUS_I(inode).cached_extents, 0, sizeof(hfsplus_extent_rec));
- HFSPLUS_I(inode).cached_extents[0].start_block = cpu_to_be32(start);
- HFSPLUS_I(inode).cached_extents[0].block_count = cpu_to_be32(len);
- hfsplus_dump_extent(HFSPLUS_I(inode).cached_extents);
- HFSPLUS_I(inode).flags |= HFSPLUS_FLG_EXT_DIRTY | HFSPLUS_FLG_EXT_NEW;
- HFSPLUS_I(inode).cached_start = HFSPLUS_I(inode).alloc_blocks;
- HFSPLUS_I(inode).cached_blocks = len;
+ memset(hip->cached_extents, 0, sizeof(hfsplus_extent_rec));
+ hip->cached_extents[0].start_block = cpu_to_be32(start);
+ hip->cached_extents[0].block_count = cpu_to_be32(len);
+ hfsplus_dump_extent(hip->cached_extents);
+ hip->flags |= HFSPLUS_FLG_EXT_DIRTY | HFSPLUS_FLG_EXT_NEW;
+ hip->cached_start = hip->alloc_blocks;
+ hip->cached_blocks = len;
res = 0;
goto out;
@@ -437,13 +461,15 @@
void hfsplus_file_truncate(struct inode *inode)
{
struct super_block *sb = inode->i_sb;
+ struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
struct hfs_find_data fd;
u32 alloc_cnt, blk_cnt, start;
int res;
- dprint(DBG_INODE, "truncate: %lu, %Lu -> %Lu\n", inode->i_ino,
- (long long)HFSPLUS_I(inode).phys_size, inode->i_size);
- if (inode->i_size > HFSPLUS_I(inode).phys_size) {
+ dprint(DBG_INODE, "truncate: %lu, %Lu -> %Lu\n",
+ inode->i_ino, (long long)hip->phys_size, inode->i_size);
+
+ if (inode->i_size > hip->phys_size) {
struct address_space *mapping = inode->i_mapping;
struct page *page;
void *fsdata;
@@ -460,47 +486,48 @@
return;
mark_inode_dirty(inode);
return;
- } else if (inode->i_size == HFSPLUS_I(inode).phys_size)
+ } else if (inode->i_size == hip->phys_size)
return;
- blk_cnt = (inode->i_size + HFSPLUS_SB(sb).alloc_blksz - 1) >> HFSPLUS_SB(sb).alloc_blksz_shift;
- alloc_cnt = HFSPLUS_I(inode).alloc_blocks;
+ blk_cnt = (inode->i_size + HFSPLUS_SB(sb)->alloc_blksz - 1) >>
+ HFSPLUS_SB(sb)->alloc_blksz_shift;
+ alloc_cnt = hip->alloc_blocks;
if (blk_cnt == alloc_cnt)
goto out;
- mutex_lock(&HFSPLUS_I(inode).extents_lock);
- hfs_find_init(HFSPLUS_SB(sb).ext_tree, &fd);
+ mutex_lock(&hip->extents_lock);
+ hfs_find_init(HFSPLUS_SB(sb)->ext_tree, &fd);
while (1) {
- if (alloc_cnt == HFSPLUS_I(inode).first_blocks) {
- hfsplus_free_extents(sb, HFSPLUS_I(inode).first_extents,
+ if (alloc_cnt == hip->first_blocks) {
+ hfsplus_free_extents(sb, hip->first_extents,
alloc_cnt, alloc_cnt - blk_cnt);
- hfsplus_dump_extent(HFSPLUS_I(inode).first_extents);
- HFSPLUS_I(inode).first_blocks = blk_cnt;
+ hfsplus_dump_extent(hip->first_extents);
+ hip->first_blocks = blk_cnt;
break;
}
res = __hfsplus_ext_cache_extent(&fd, inode, alloc_cnt);
if (res)
break;
- start = HFSPLUS_I(inode).cached_start;
- hfsplus_free_extents(sb, HFSPLUS_I(inode).cached_extents,
+ start = hip->cached_start;
+ hfsplus_free_extents(sb, hip->cached_extents,
alloc_cnt - start, alloc_cnt - blk_cnt);
- hfsplus_dump_extent(HFSPLUS_I(inode).cached_extents);
+ hfsplus_dump_extent(hip->cached_extents);
if (blk_cnt > start) {
- HFSPLUS_I(inode).flags |= HFSPLUS_FLG_EXT_DIRTY;
+ hip->flags |= HFSPLUS_FLG_EXT_DIRTY;
break;
}
alloc_cnt = start;
- HFSPLUS_I(inode).cached_start = HFSPLUS_I(inode).cached_blocks = 0;
- HFSPLUS_I(inode).flags &= ~(HFSPLUS_FLG_EXT_DIRTY | HFSPLUS_FLG_EXT_NEW);
+ hip->cached_start = hip->cached_blocks = 0;
+ hip->flags &= ~(HFSPLUS_FLG_EXT_DIRTY | HFSPLUS_FLG_EXT_NEW);
hfs_brec_remove(&fd);
}
hfs_find_exit(&fd);
- mutex_unlock(&HFSPLUS_I(inode).extents_lock);
+ mutex_unlock(&hip->extents_lock);
- HFSPLUS_I(inode).alloc_blocks = blk_cnt;
+ hip->alloc_blocks = blk_cnt;
out:
- HFSPLUS_I(inode).phys_size = inode->i_size;
- HFSPLUS_I(inode).fs_blocks = (inode->i_size + sb->s_blocksize - 1) >> sb->s_blocksize_bits;
- inode_set_bytes(inode, HFSPLUS_I(inode).fs_blocks << sb->s_blocksize_bits);
+ hip->phys_size = inode->i_size;
+ hip->fs_blocks = (inode->i_size + sb->s_blocksize - 1) >> sb->s_blocksize_bits;
+ inode_set_bytes(inode, hip->fs_blocks << sb->s_blocksize_bits);
mark_inode_dirty(inode);
}
diff --git a/fs/hfsplus/hfsplus_fs.h b/fs/hfsplus/hfsplus_fs.h
index dc856be..cb3653e 100644
--- a/fs/hfsplus/hfsplus_fs.h
+++ b/fs/hfsplus/hfsplus_fs.h
@@ -62,7 +62,7 @@
unsigned int depth;
//unsigned int map1_size, map_size;
- struct semaphore tree_lock;
+ struct mutex tree_lock;
unsigned int pages_per_bnode;
spinlock_t hash_lock;
@@ -121,16 +121,21 @@
u32 sect_count;
int fs_shift;
- /* Stuff in host order from Vol Header */
+ /* immutable data from the volume header */
u32 alloc_blksz;
int alloc_blksz_shift;
u32 total_blocks;
+ u32 data_clump_blocks, rsrc_clump_blocks;
+
+ /* mutable data from the volume header, protected by alloc_mutex */
u32 free_blocks;
- u32 next_alloc;
+ struct mutex alloc_mutex;
+
+ /* mutable data from the volume header, protected by vh_mutex */
u32 next_cnid;
u32 file_count;
u32 folder_count;
- u32 data_clump_blocks, rsrc_clump_blocks;
+ struct mutex vh_mutex;
/* Config options */
u32 creator;
@@ -143,40 +148,50 @@
int part, session;
unsigned long flags;
-
- struct hlist_head rsrc_inodes;
};
-#define HFSPLUS_SB_WRITEBACKUP 0x0001
-#define HFSPLUS_SB_NODECOMPOSE 0x0002
-#define HFSPLUS_SB_FORCE 0x0004
-#define HFSPLUS_SB_HFSX 0x0008
-#define HFSPLUS_SB_CASEFOLD 0x0010
+#define HFSPLUS_SB_WRITEBACKUP 0
+#define HFSPLUS_SB_NODECOMPOSE 1
+#define HFSPLUS_SB_FORCE 2
+#define HFSPLUS_SB_HFSX 3
+#define HFSPLUS_SB_CASEFOLD 4
struct hfsplus_inode_info {
- struct mutex extents_lock;
- u32 clump_blocks, alloc_blocks;
- sector_t fs_blocks;
- /* Allocation extents from catalog record or volume header */
- hfsplus_extent_rec first_extents;
- u32 first_blocks;
- hfsplus_extent_rec cached_extents;
- u32 cached_start, cached_blocks;
atomic_t opencnt;
- struct inode *rsrc_inode;
+ /*
+ * Extent allocation information, protected by extents_lock.
+ */
+ u32 first_blocks;
+ u32 clump_blocks;
+ u32 alloc_blocks;
+ u32 cached_start;
+ u32 cached_blocks;
+ hfsplus_extent_rec first_extents;
+ hfsplus_extent_rec cached_extents;
unsigned long flags;
+ struct mutex extents_lock;
+ /*
+ * Immutable data.
+ */
+ struct inode *rsrc_inode;
__be32 create_date;
- /* Device number in hfsplus_permissions in catalog */
- u32 dev;
- /* BSD system and user file flags */
- u8 rootflags;
- u8 userflags;
+ /*
+ * Protected by sbi->vh_mutex.
+ */
+ u32 linkid;
+
+ /*
+ * Protected by i_mutex.
+ */
+ sector_t fs_blocks;
+ u8 userflags; /* BSD user file flags */
struct list_head open_dir_list;
loff_t phys_size;
+
struct inode vfs_inode;
};
@@ -184,8 +199,8 @@
#define HFSPLUS_FLG_EXT_DIRTY 0x0002
#define HFSPLUS_FLG_EXT_NEW 0x0004
-#define HFSPLUS_IS_DATA(inode) (!(HFSPLUS_I(inode).flags & HFSPLUS_FLG_RSRC))
-#define HFSPLUS_IS_RSRC(inode) (HFSPLUS_I(inode).flags & HFSPLUS_FLG_RSRC)
+#define HFSPLUS_IS_DATA(inode) (!(HFSPLUS_I(inode)->flags & HFSPLUS_FLG_RSRC))
+#define HFSPLUS_IS_RSRC(inode) (HFSPLUS_I(inode)->flags & HFSPLUS_FLG_RSRC)
struct hfs_find_data {
/* filled by caller */
@@ -311,6 +326,7 @@
int hfsplus_delete_cat(u32, struct inode *, struct qstr *);
int hfsplus_rename_cat(u32, struct inode *, struct qstr *,
struct inode *, struct qstr *);
+void hfsplus_cat_set_perms(struct inode *inode, struct hfsplus_perm *perms);
/* dir.c */
extern const struct inode_operations hfsplus_dir_inode_operations;
@@ -372,26 +388,15 @@
int hfs_part_find(struct super_block *, sector_t *, sector_t *);
/* access macros */
-/*
static inline struct hfsplus_sb_info *HFSPLUS_SB(struct super_block *sb)
{
return sb->s_fs_info;
}
+
static inline struct hfsplus_inode_info *HFSPLUS_I(struct inode *inode)
{
return list_entry(inode, struct hfsplus_inode_info, vfs_inode);
}
-*/
-#define HFSPLUS_SB(super) (*(struct hfsplus_sb_info *)(super)->s_fs_info)
-#define HFSPLUS_I(inode) (*list_entry(inode, struct hfsplus_inode_info, vfs_inode))
-
-#if 1
-#define hfsplus_kmap(p) ({ struct page *__p = (p); kmap(__p); })
-#define hfsplus_kunmap(p) ({ struct page *__p = (p); kunmap(__p); __p; })
-#else
-#define hfsplus_kmap(p) kmap(p)
-#define hfsplus_kunmap(p) kunmap(p)
-#endif
#define sb_bread512(sb, sec, data) ({ \
struct buffer_head *__bh; \
@@ -419,6 +424,4 @@
#define hfsp_ut2mt(t) __hfsp_ut2mt((t).tv_sec)
#define hfsp_now2mt() __hfsp_ut2mt(get_seconds())
-#define kdev_t_to_nr(x) (x)
-
#endif
diff --git a/fs/hfsplus/hfsplus_raw.h b/fs/hfsplus/hfsplus_raw.h
index fe99fe8..6892899 100644
--- a/fs/hfsplus/hfsplus_raw.h
+++ b/fs/hfsplus/hfsplus_raw.h
@@ -200,6 +200,7 @@
struct hfsplus_unistr name;
} __packed;
+#define HFSPLUS_CAT_KEYLEN (sizeof(struct hfsplus_cat_key))
/* Structs from hfs.h */
struct hfsp_point {
@@ -323,7 +324,7 @@
__be32 start_block;
} __packed;
-#define HFSPLUS_EXT_KEYLEN 12
+#define HFSPLUS_EXT_KEYLEN sizeof(struct hfsplus_ext_key)
/* HFS+ generic BTree key */
typedef union {
diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
index c5a979d..7844928 100644
--- a/fs/hfsplus/inode.c
+++ b/fs/hfsplus/inode.c
@@ -36,7 +36,7 @@
*pagep = NULL;
ret = cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
hfsplus_get_block,
- &HFSPLUS_I(mapping->host).phys_size);
+ &HFSPLUS_I(mapping->host)->phys_size);
if (unlikely(ret)) {
loff_t isize = mapping->host->i_size;
if (pos + len > isize)
@@ -62,13 +62,13 @@
switch (inode->i_ino) {
case HFSPLUS_EXT_CNID:
- tree = HFSPLUS_SB(sb).ext_tree;
+ tree = HFSPLUS_SB(sb)->ext_tree;
break;
case HFSPLUS_CAT_CNID:
- tree = HFSPLUS_SB(sb).cat_tree;
+ tree = HFSPLUS_SB(sb)->cat_tree;
break;
case HFSPLUS_ATTR_CNID:
- tree = HFSPLUS_SB(sb).attr_tree;
+ tree = HFSPLUS_SB(sb)->attr_tree;
break;
default:
BUG();
@@ -172,12 +172,13 @@
struct hfs_find_data fd;
struct super_block *sb = dir->i_sb;
struct inode *inode = NULL;
+ struct hfsplus_inode_info *hip;
int err;
if (HFSPLUS_IS_RSRC(dir) || strcmp(dentry->d_name.name, "rsrc"))
goto out;
- inode = HFSPLUS_I(dir).rsrc_inode;
+ inode = HFSPLUS_I(dir)->rsrc_inode;
if (inode)
goto out;
@@ -185,12 +186,13 @@
if (!inode)
return ERR_PTR(-ENOMEM);
+ hip = HFSPLUS_I(inode);
inode->i_ino = dir->i_ino;
- INIT_LIST_HEAD(&HFSPLUS_I(inode).open_dir_list);
- mutex_init(&HFSPLUS_I(inode).extents_lock);
- HFSPLUS_I(inode).flags = HFSPLUS_FLG_RSRC;
+ INIT_LIST_HEAD(&hip->open_dir_list);
+ mutex_init(&hip->extents_lock);
+ hip->flags = HFSPLUS_FLG_RSRC;
- hfs_find_init(HFSPLUS_SB(sb).cat_tree, &fd);
+ hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd);
err = hfsplus_find_cat(sb, dir->i_ino, &fd);
if (!err)
err = hfsplus_cat_read_inode(inode, &fd);
@@ -199,10 +201,18 @@
iput(inode);
return ERR_PTR(err);
}
- HFSPLUS_I(inode).rsrc_inode = dir;
- HFSPLUS_I(dir).rsrc_inode = inode;
+ hip->rsrc_inode = dir;
+ HFSPLUS_I(dir)->rsrc_inode = inode;
igrab(dir);
- hlist_add_head(&inode->i_hash, &HFSPLUS_SB(sb).rsrc_inodes);
+
+ /*
+ * __mark_inode_dirty expects inodes to be hashed. Since we don't
+ * want resource fork inodes in the regular inode space, we make them
+ * appear hashed, but do not put on any lists. hlist_del()
+ * will work fine and require no locking.
+ */
+ inode->i_hash.pprev = &inode->i_hash.next;
+
mark_inode_dirty(inode);
out:
d_add(dentry, inode);
@@ -211,30 +221,27 @@
static void hfsplus_get_perms(struct inode *inode, struct hfsplus_perm *perms, int dir)
{
- struct super_block *sb = inode->i_sb;
+ struct hfsplus_sb_info *sbi = HFSPLUS_SB(inode->i_sb);
u16 mode;
mode = be16_to_cpu(perms->mode);
inode->i_uid = be32_to_cpu(perms->owner);
if (!inode->i_uid && !mode)
- inode->i_uid = HFSPLUS_SB(sb).uid;
+ inode->i_uid = sbi->uid;
inode->i_gid = be32_to_cpu(perms->group);
if (!inode->i_gid && !mode)
- inode->i_gid = HFSPLUS_SB(sb).gid;
+ inode->i_gid = sbi->gid;
if (dir) {
- mode = mode ? (mode & S_IALLUGO) :
- (S_IRWXUGO & ~(HFSPLUS_SB(sb).umask));
+ mode = mode ? (mode & S_IALLUGO) : (S_IRWXUGO & ~(sbi->umask));
mode |= S_IFDIR;
} else if (!mode)
- mode = S_IFREG | ((S_IRUGO|S_IWUGO) &
- ~(HFSPLUS_SB(sb).umask));
+ mode = S_IFREG | ((S_IRUGO|S_IWUGO) & ~(sbi->umask));
inode->i_mode = mode;
- HFSPLUS_I(inode).rootflags = perms->rootflags;
- HFSPLUS_I(inode).userflags = perms->userflags;
+ HFSPLUS_I(inode)->userflags = perms->userflags;
if (perms->rootflags & HFSPLUS_FLG_IMMUTABLE)
inode->i_flags |= S_IMMUTABLE;
else
@@ -245,30 +252,13 @@
inode->i_flags &= ~S_APPEND;
}
-static void hfsplus_set_perms(struct inode *inode, struct hfsplus_perm *perms)
-{
- if (inode->i_flags & S_IMMUTABLE)
- perms->rootflags |= HFSPLUS_FLG_IMMUTABLE;
- else
- perms->rootflags &= ~HFSPLUS_FLG_IMMUTABLE;
- if (inode->i_flags & S_APPEND)
- perms->rootflags |= HFSPLUS_FLG_APPEND;
- else
- perms->rootflags &= ~HFSPLUS_FLG_APPEND;
- perms->userflags = HFSPLUS_I(inode).userflags;
- perms->mode = cpu_to_be16(inode->i_mode);
- perms->owner = cpu_to_be32(inode->i_uid);
- perms->group = cpu_to_be32(inode->i_gid);
- perms->dev = cpu_to_be32(HFSPLUS_I(inode).dev);
-}
-
static int hfsplus_file_open(struct inode *inode, struct file *file)
{
if (HFSPLUS_IS_RSRC(inode))
- inode = HFSPLUS_I(inode).rsrc_inode;
+ inode = HFSPLUS_I(inode)->rsrc_inode;
if (!(file->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS)
return -EOVERFLOW;
- atomic_inc(&HFSPLUS_I(inode).opencnt);
+ atomic_inc(&HFSPLUS_I(inode)->opencnt);
return 0;
}
@@ -277,12 +267,13 @@
struct super_block *sb = inode->i_sb;
if (HFSPLUS_IS_RSRC(inode))
- inode = HFSPLUS_I(inode).rsrc_inode;
- if (atomic_dec_and_test(&HFSPLUS_I(inode).opencnt)) {
+ inode = HFSPLUS_I(inode)->rsrc_inode;
+ if (atomic_dec_and_test(&HFSPLUS_I(inode)->opencnt)) {
mutex_lock(&inode->i_mutex);
hfsplus_file_truncate(inode);
if (inode->i_flags & S_DEAD) {
- hfsplus_delete_cat(inode->i_ino, HFSPLUS_SB(sb).hidden_dir, NULL);
+ hfsplus_delete_cat(inode->i_ino,
+ HFSPLUS_SB(sb)->hidden_dir, NULL);
hfsplus_delete_inode(inode);
}
mutex_unlock(&inode->i_mutex);
@@ -361,47 +352,52 @@
struct inode *hfsplus_new_inode(struct super_block *sb, int mode)
{
+ struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
struct inode *inode = new_inode(sb);
+ struct hfsplus_inode_info *hip;
+
if (!inode)
return NULL;
- inode->i_ino = HFSPLUS_SB(sb).next_cnid++;
+ inode->i_ino = sbi->next_cnid++;
inode->i_mode = mode;
inode->i_uid = current_fsuid();
inode->i_gid = current_fsgid();
inode->i_nlink = 1;
inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC;
- INIT_LIST_HEAD(&HFSPLUS_I(inode).open_dir_list);
- mutex_init(&HFSPLUS_I(inode).extents_lock);
- atomic_set(&HFSPLUS_I(inode).opencnt, 0);
- HFSPLUS_I(inode).flags = 0;
- memset(HFSPLUS_I(inode).first_extents, 0, sizeof(hfsplus_extent_rec));
- memset(HFSPLUS_I(inode).cached_extents, 0, sizeof(hfsplus_extent_rec));
- HFSPLUS_I(inode).alloc_blocks = 0;
- HFSPLUS_I(inode).first_blocks = 0;
- HFSPLUS_I(inode).cached_start = 0;
- HFSPLUS_I(inode).cached_blocks = 0;
- HFSPLUS_I(inode).phys_size = 0;
- HFSPLUS_I(inode).fs_blocks = 0;
- HFSPLUS_I(inode).rsrc_inode = NULL;
+
+ hip = HFSPLUS_I(inode);
+ INIT_LIST_HEAD(&hip->open_dir_list);
+ mutex_init(&hip->extents_lock);
+ atomic_set(&hip->opencnt, 0);
+ hip->flags = 0;
+ memset(hip->first_extents, 0, sizeof(hfsplus_extent_rec));
+ memset(hip->cached_extents, 0, sizeof(hfsplus_extent_rec));
+ hip->alloc_blocks = 0;
+ hip->first_blocks = 0;
+ hip->cached_start = 0;
+ hip->cached_blocks = 0;
+ hip->phys_size = 0;
+ hip->fs_blocks = 0;
+ hip->rsrc_inode = NULL;
if (S_ISDIR(inode->i_mode)) {
inode->i_size = 2;
- HFSPLUS_SB(sb).folder_count++;
+ sbi->folder_count++;
inode->i_op = &hfsplus_dir_inode_operations;
inode->i_fop = &hfsplus_dir_operations;
} else if (S_ISREG(inode->i_mode)) {
- HFSPLUS_SB(sb).file_count++;
+ sbi->file_count++;
inode->i_op = &hfsplus_file_inode_operations;
inode->i_fop = &hfsplus_file_operations;
inode->i_mapping->a_ops = &hfsplus_aops;
- HFSPLUS_I(inode).clump_blocks = HFSPLUS_SB(sb).data_clump_blocks;
+ hip->clump_blocks = sbi->data_clump_blocks;
} else if (S_ISLNK(inode->i_mode)) {
- HFSPLUS_SB(sb).file_count++;
+ sbi->file_count++;
inode->i_op = &page_symlink_inode_operations;
inode->i_mapping->a_ops = &hfsplus_aops;
- HFSPLUS_I(inode).clump_blocks = 1;
+ hip->clump_blocks = 1;
} else
- HFSPLUS_SB(sb).file_count++;
+ sbi->file_count++;
insert_inode_hash(inode);
mark_inode_dirty(inode);
sb->s_dirt = 1;
@@ -414,11 +410,11 @@
struct super_block *sb = inode->i_sb;
if (S_ISDIR(inode->i_mode)) {
- HFSPLUS_SB(sb).folder_count--;
+ HFSPLUS_SB(sb)->folder_count--;
sb->s_dirt = 1;
return;
}
- HFSPLUS_SB(sb).file_count--;
+ HFSPLUS_SB(sb)->file_count--;
if (S_ISREG(inode->i_mode)) {
if (!inode->i_nlink) {
inode->i_size = 0;
@@ -434,34 +430,39 @@
void hfsplus_inode_read_fork(struct inode *inode, struct hfsplus_fork_raw *fork)
{
struct super_block *sb = inode->i_sb;
+ struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
+ struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
u32 count;
int i;
- memcpy(&HFSPLUS_I(inode).first_extents, &fork->extents,
- sizeof(hfsplus_extent_rec));
+ memcpy(&hip->first_extents, &fork->extents, sizeof(hfsplus_extent_rec));
for (count = 0, i = 0; i < 8; i++)
count += be32_to_cpu(fork->extents[i].block_count);
- HFSPLUS_I(inode).first_blocks = count;
- memset(HFSPLUS_I(inode).cached_extents, 0, sizeof(hfsplus_extent_rec));
- HFSPLUS_I(inode).cached_start = 0;
- HFSPLUS_I(inode).cached_blocks = 0;
+ hip->first_blocks = count;
+ memset(hip->cached_extents, 0, sizeof(hfsplus_extent_rec));
+ hip->cached_start = 0;
+ hip->cached_blocks = 0;
- HFSPLUS_I(inode).alloc_blocks = be32_to_cpu(fork->total_blocks);
- inode->i_size = HFSPLUS_I(inode).phys_size = be64_to_cpu(fork->total_size);
- HFSPLUS_I(inode).fs_blocks = (inode->i_size + sb->s_blocksize - 1) >> sb->s_blocksize_bits;
- inode_set_bytes(inode, HFSPLUS_I(inode).fs_blocks << sb->s_blocksize_bits);
- HFSPLUS_I(inode).clump_blocks = be32_to_cpu(fork->clump_size) >> HFSPLUS_SB(sb).alloc_blksz_shift;
- if (!HFSPLUS_I(inode).clump_blocks)
- HFSPLUS_I(inode).clump_blocks = HFSPLUS_IS_RSRC(inode) ? HFSPLUS_SB(sb).rsrc_clump_blocks :
- HFSPLUS_SB(sb).data_clump_blocks;
+ hip->alloc_blocks = be32_to_cpu(fork->total_blocks);
+ hip->phys_size = inode->i_size = be64_to_cpu(fork->total_size);
+ hip->fs_blocks =
+ (inode->i_size + sb->s_blocksize - 1) >> sb->s_blocksize_bits;
+ inode_set_bytes(inode, hip->fs_blocks << sb->s_blocksize_bits);
+ hip->clump_blocks =
+ be32_to_cpu(fork->clump_size) >> sbi->alloc_blksz_shift;
+ if (!hip->clump_blocks) {
+ hip->clump_blocks = HFSPLUS_IS_RSRC(inode) ?
+ sbi->rsrc_clump_blocks :
+ sbi->data_clump_blocks;
+ }
}
void hfsplus_inode_write_fork(struct inode *inode, struct hfsplus_fork_raw *fork)
{
- memcpy(&fork->extents, &HFSPLUS_I(inode).first_extents,
+ memcpy(&fork->extents, &HFSPLUS_I(inode)->first_extents,
sizeof(hfsplus_extent_rec));
fork->total_size = cpu_to_be64(inode->i_size);
- fork->total_blocks = cpu_to_be32(HFSPLUS_I(inode).alloc_blocks);
+ fork->total_blocks = cpu_to_be32(HFSPLUS_I(inode)->alloc_blocks);
}
int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd)
@@ -472,7 +473,7 @@
type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset);
- HFSPLUS_I(inode).dev = 0;
+ HFSPLUS_I(inode)->linkid = 0;
if (type == HFSPLUS_FOLDER) {
struct hfsplus_cat_folder *folder = &entry.folder;
@@ -486,8 +487,8 @@
inode->i_atime = hfsp_mt2ut(folder->access_date);
inode->i_mtime = hfsp_mt2ut(folder->content_mod_date);
inode->i_ctime = hfsp_mt2ut(folder->attribute_mod_date);
- HFSPLUS_I(inode).create_date = folder->create_date;
- HFSPLUS_I(inode).fs_blocks = 0;
+ HFSPLUS_I(inode)->create_date = folder->create_date;
+ HFSPLUS_I(inode)->fs_blocks = 0;
inode->i_op = &hfsplus_dir_inode_operations;
inode->i_fop = &hfsplus_dir_operations;
} else if (type == HFSPLUS_FILE) {
@@ -518,7 +519,7 @@
inode->i_atime = hfsp_mt2ut(file->access_date);
inode->i_mtime = hfsp_mt2ut(file->content_mod_date);
inode->i_ctime = hfsp_mt2ut(file->attribute_mod_date);
- HFSPLUS_I(inode).create_date = file->create_date;
+ HFSPLUS_I(inode)->create_date = file->create_date;
} else {
printk(KERN_ERR "hfs: bad catalog entry used to create inode\n");
res = -EIO;
@@ -533,12 +534,12 @@
hfsplus_cat_entry entry;
if (HFSPLUS_IS_RSRC(inode))
- main_inode = HFSPLUS_I(inode).rsrc_inode;
+ main_inode = HFSPLUS_I(inode)->rsrc_inode;
if (!main_inode->i_nlink)
return 0;
- if (hfs_find_init(HFSPLUS_SB(main_inode->i_sb).cat_tree, &fd))
+ if (hfs_find_init(HFSPLUS_SB(main_inode->i_sb)->cat_tree, &fd))
/* panic? */
return -EIO;
@@ -554,7 +555,7 @@
hfs_bnode_read(fd.bnode, &entry, fd.entryoffset,
sizeof(struct hfsplus_cat_folder));
/* simple node checks? */
- hfsplus_set_perms(inode, &folder->permissions);
+ hfsplus_cat_set_perms(inode, &folder->permissions);
folder->access_date = hfsp_ut2mt(inode->i_atime);
folder->content_mod_date = hfsp_ut2mt(inode->i_mtime);
folder->attribute_mod_date = hfsp_ut2mt(inode->i_ctime);
@@ -576,11 +577,7 @@
hfs_bnode_read(fd.bnode, &entry, fd.entryoffset,
sizeof(struct hfsplus_cat_file));
hfsplus_inode_write_fork(inode, &file->data_fork);
- if (S_ISREG(inode->i_mode))
- HFSPLUS_I(inode).dev = inode->i_nlink;
- if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
- HFSPLUS_I(inode).dev = kdev_t_to_nr(inode->i_rdev);
- hfsplus_set_perms(inode, &file->permissions);
+ hfsplus_cat_set_perms(inode, &file->permissions);
if ((file->permissions.rootflags | file->permissions.userflags) & HFSPLUS_FLG_IMMUTABLE)
file->flags |= cpu_to_be16(HFSPLUS_FILE_LOCKED);
else
diff --git a/fs/hfsplus/ioctl.c b/fs/hfsplus/ioctl.c
index ac405f0..5b4667e 100644
--- a/fs/hfsplus/ioctl.c
+++ b/fs/hfsplus/ioctl.c
@@ -17,83 +17,98 @@
#include <linux/mount.h>
#include <linux/sched.h>
#include <linux/xattr.h>
-#include <linux/smp_lock.h>
#include <asm/uaccess.h>
#include "hfsplus_fs.h"
-long hfsplus_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+static int hfsplus_ioctl_getflags(struct file *file, int __user *user_flags)
{
- struct inode *inode = filp->f_path.dentry->d_inode;
- unsigned int flags;
+ struct inode *inode = file->f_path.dentry->d_inode;
+ struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
+ unsigned int flags = 0;
- lock_kernel();
+ if (inode->i_flags & S_IMMUTABLE)
+ flags |= FS_IMMUTABLE_FL;
+ if (inode->i_flags |= S_APPEND)
+ flags |= FS_APPEND_FL;
+ if (hip->userflags & HFSPLUS_FLG_NODUMP)
+ flags |= FS_NODUMP_FL;
+
+ return put_user(flags, user_flags);
+}
+
+static int hfsplus_ioctl_setflags(struct file *file, int __user *user_flags)
+{
+ struct inode *inode = file->f_path.dentry->d_inode;
+ struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
+ unsigned int flags;
+ int err = 0;
+
+ err = mnt_want_write(file->f_path.mnt);
+ if (err)
+ goto out;
+
+ if (!is_owner_or_cap(inode)) {
+ err = -EACCES;
+ goto out_drop_write;
+ }
+
+ if (get_user(flags, user_flags)) {
+ err = -EFAULT;
+ goto out_drop_write;
+ }
+
+ mutex_lock(&inode->i_mutex);
+
+ if ((flags & (FS_IMMUTABLE_FL|FS_APPEND_FL)) ||
+ inode->i_flags & (S_IMMUTABLE|S_APPEND)) {
+ if (!capable(CAP_LINUX_IMMUTABLE)) {
+ err = -EPERM;
+ goto out_unlock_inode;
+ }
+ }
+
+ /* don't silently ignore unsupported ext2 flags */
+ if (flags & ~(FS_IMMUTABLE_FL|FS_APPEND_FL|FS_NODUMP_FL)) {
+ err = -EOPNOTSUPP;
+ goto out_unlock_inode;
+ }
+
+ if (flags & FS_IMMUTABLE_FL)
+ inode->i_flags |= S_IMMUTABLE;
+ else
+ inode->i_flags &= ~S_IMMUTABLE;
+
+ if (flags & FS_APPEND_FL)
+ inode->i_flags |= S_APPEND;
+ else
+ inode->i_flags &= ~S_APPEND;
+
+ if (flags & FS_NODUMP_FL)
+ hip->userflags |= HFSPLUS_FLG_NODUMP;
+ else
+ hip->userflags &= ~HFSPLUS_FLG_NODUMP;
+
+ inode->i_ctime = CURRENT_TIME_SEC;
+ mark_inode_dirty(inode);
+
+out_unlock_inode:
+ mutex_lock(&inode->i_mutex);
+out_drop_write:
+ mnt_drop_write(file->f_path.mnt);
+out:
+ return err;
+}
+
+long hfsplus_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ void __user *argp = (void __user *)arg;
+
switch (cmd) {
case HFSPLUS_IOC_EXT2_GETFLAGS:
- flags = 0;
- if (HFSPLUS_I(inode).rootflags & HFSPLUS_FLG_IMMUTABLE)
- flags |= FS_IMMUTABLE_FL; /* EXT2_IMMUTABLE_FL */
- if (HFSPLUS_I(inode).rootflags & HFSPLUS_FLG_APPEND)
- flags |= FS_APPEND_FL; /* EXT2_APPEND_FL */
- if (HFSPLUS_I(inode).userflags & HFSPLUS_FLG_NODUMP)
- flags |= FS_NODUMP_FL; /* EXT2_NODUMP_FL */
- return put_user(flags, (int __user *)arg);
- case HFSPLUS_IOC_EXT2_SETFLAGS: {
- int err = 0;
- err = mnt_want_write(filp->f_path.mnt);
- if (err) {
- unlock_kernel();
- return err;
- }
-
- if (!is_owner_or_cap(inode)) {
- err = -EACCES;
- goto setflags_out;
- }
- if (get_user(flags, (int __user *)arg)) {
- err = -EFAULT;
- goto setflags_out;
- }
- if (flags & (FS_IMMUTABLE_FL|FS_APPEND_FL) ||
- HFSPLUS_I(inode).rootflags & (HFSPLUS_FLG_IMMUTABLE|HFSPLUS_FLG_APPEND)) {
- if (!capable(CAP_LINUX_IMMUTABLE)) {
- err = -EPERM;
- goto setflags_out;
- }
- }
-
- /* don't silently ignore unsupported ext2 flags */
- if (flags & ~(FS_IMMUTABLE_FL|FS_APPEND_FL|FS_NODUMP_FL)) {
- err = -EOPNOTSUPP;
- goto setflags_out;
- }
- if (flags & FS_IMMUTABLE_FL) { /* EXT2_IMMUTABLE_FL */
- inode->i_flags |= S_IMMUTABLE;
- HFSPLUS_I(inode).rootflags |= HFSPLUS_FLG_IMMUTABLE;
- } else {
- inode->i_flags &= ~S_IMMUTABLE;
- HFSPLUS_I(inode).rootflags &= ~HFSPLUS_FLG_IMMUTABLE;
- }
- if (flags & FS_APPEND_FL) { /* EXT2_APPEND_FL */
- inode->i_flags |= S_APPEND;
- HFSPLUS_I(inode).rootflags |= HFSPLUS_FLG_APPEND;
- } else {
- inode->i_flags &= ~S_APPEND;
- HFSPLUS_I(inode).rootflags &= ~HFSPLUS_FLG_APPEND;
- }
- if (flags & FS_NODUMP_FL) /* EXT2_NODUMP_FL */
- HFSPLUS_I(inode).userflags |= HFSPLUS_FLG_NODUMP;
- else
- HFSPLUS_I(inode).userflags &= ~HFSPLUS_FLG_NODUMP;
-
- inode->i_ctime = CURRENT_TIME_SEC;
- mark_inode_dirty(inode);
-setflags_out:
- mnt_drop_write(filp->f_path.mnt);
- unlock_kernel();
- return err;
- }
+ return hfsplus_ioctl_getflags(file, argp);
+ case HFSPLUS_IOC_EXT2_SETFLAGS:
+ return hfsplus_ioctl_setflags(file, argp);
default:
- unlock_kernel();
return -ENOTTY;
}
}
@@ -110,7 +125,7 @@
if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
return -EOPNOTSUPP;
- res = hfs_find_init(HFSPLUS_SB(inode->i_sb).cat_tree, &fd);
+ res = hfs_find_init(HFSPLUS_SB(inode->i_sb)->cat_tree, &fd);
if (res)
return res;
res = hfsplus_find_cat(inode->i_sb, inode->i_ino, &fd);
@@ -153,7 +168,7 @@
return -EOPNOTSUPP;
if (size) {
- res = hfs_find_init(HFSPLUS_SB(inode->i_sb).cat_tree, &fd);
+ res = hfs_find_init(HFSPLUS_SB(inode->i_sb)->cat_tree, &fd);
if (res)
return res;
res = hfsplus_find_cat(inode->i_sb, inode->i_ino, &fd);
@@ -177,7 +192,7 @@
} else
res = size ? -ERANGE : 4;
} else
- res = -ENODATA;
+ res = -EOPNOTSUPP;
out:
if (size)
hfs_find_exit(&fd);
diff --git a/fs/hfsplus/options.c b/fs/hfsplus/options.c
index 572628b..f9ab276 100644
--- a/fs/hfsplus/options.c
+++ b/fs/hfsplus/options.c
@@ -143,13 +143,13 @@
kfree(p);
break;
case opt_decompose:
- sbi->flags &= ~HFSPLUS_SB_NODECOMPOSE;
+ clear_bit(HFSPLUS_SB_NODECOMPOSE, &sbi->flags);
break;
case opt_nodecompose:
- sbi->flags |= HFSPLUS_SB_NODECOMPOSE;
+ set_bit(HFSPLUS_SB_NODECOMPOSE, &sbi->flags);
break;
case opt_force:
- sbi->flags |= HFSPLUS_SB_FORCE;
+ set_bit(HFSPLUS_SB_FORCE, &sbi->flags);
break;
default:
return 0;
@@ -171,7 +171,7 @@
int hfsplus_show_options(struct seq_file *seq, struct vfsmount *mnt)
{
- struct hfsplus_sb_info *sbi = &HFSPLUS_SB(mnt->mnt_sb);
+ struct hfsplus_sb_info *sbi = HFSPLUS_SB(mnt->mnt_sb);
if (sbi->creator != HFSPLUS_DEF_CR_TYPE)
seq_printf(seq, ",creator=%.4s", (char *)&sbi->creator);
@@ -184,7 +184,7 @@
seq_printf(seq, ",session=%u", sbi->session);
if (sbi->nls)
seq_printf(seq, ",nls=%s", sbi->nls->charset);
- if (sbi->flags & HFSPLUS_SB_NODECOMPOSE)
+ if (test_bit(HFSPLUS_SB_NODECOMPOSE, &sbi->flags))
seq_printf(seq, ",nodecompose");
return 0;
}
diff --git a/fs/hfsplus/part_tbl.c b/fs/hfsplus/part_tbl.c
index 1528a6f..208b16c 100644
--- a/fs/hfsplus/part_tbl.c
+++ b/fs/hfsplus/part_tbl.c
@@ -74,6 +74,7 @@
int hfs_part_find(struct super_block *sb,
sector_t *part_start, sector_t *part_size)
{
+ struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
struct buffer_head *bh;
__be16 *data;
int i, size, res;
@@ -95,7 +96,7 @@
for (i = 0; i < size; p++, i++) {
if (p->pdStart && p->pdSize &&
p->pdFSID == cpu_to_be32(0x54465331)/*"TFS1"*/ &&
- (HFSPLUS_SB(sb).part < 0 || HFSPLUS_SB(sb).part == i)) {
+ (sbi->part < 0 || sbi->part == i)) {
*part_start += be32_to_cpu(p->pdStart);
*part_size = be32_to_cpu(p->pdSize);
res = 0;
@@ -111,7 +112,7 @@
size = be32_to_cpu(pm->pmMapBlkCnt);
for (i = 0; i < size;) {
if (!memcmp(pm->pmPartType,"Apple_HFS", 9) &&
- (HFSPLUS_SB(sb).part < 0 || HFSPLUS_SB(sb).part == i)) {
+ (sbi->part < 0 || sbi->part == i)) {
*part_start += be32_to_cpu(pm->pmPyPartStart);
*part_size = be32_to_cpu(pm->pmPartBlkCnt);
res = 0;
diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
index 3b55c05..9a88d75 100644
--- a/fs/hfsplus/super.c
+++ b/fs/hfsplus/super.c
@@ -12,7 +12,6 @@
#include <linux/pagemap.h>
#include <linux/fs.h>
#include <linux/slab.h>
-#include <linux/smp_lock.h>
#include <linux/vfs.h>
#include <linux/nls.h>
@@ -21,40 +20,11 @@
#include "hfsplus_fs.h"
-struct inode *hfsplus_iget(struct super_block *sb, unsigned long ino)
+static int hfsplus_system_read_inode(struct inode *inode)
{
- struct hfs_find_data fd;
- struct hfsplus_vh *vhdr;
- struct inode *inode;
- long err = -EIO;
+ struct hfsplus_vh *vhdr = HFSPLUS_SB(inode->i_sb)->s_vhdr;
- inode = iget_locked(sb, ino);
- if (!inode)
- return ERR_PTR(-ENOMEM);
- if (!(inode->i_state & I_NEW))
- return inode;
-
- INIT_LIST_HEAD(&HFSPLUS_I(inode).open_dir_list);
- mutex_init(&HFSPLUS_I(inode).extents_lock);
- HFSPLUS_I(inode).flags = 0;
- HFSPLUS_I(inode).rsrc_inode = NULL;
- atomic_set(&HFSPLUS_I(inode).opencnt, 0);
-
- if (inode->i_ino >= HFSPLUS_FIRSTUSER_CNID) {
- read_inode:
- hfs_find_init(HFSPLUS_SB(inode->i_sb).cat_tree, &fd);
- err = hfsplus_find_cat(inode->i_sb, inode->i_ino, &fd);
- if (!err)
- err = hfsplus_cat_read_inode(inode, &fd);
- hfs_find_exit(&fd);
- if (err)
- goto bad_inode;
- goto done;
- }
- vhdr = HFSPLUS_SB(inode->i_sb).s_vhdr;
- switch(inode->i_ino) {
- case HFSPLUS_ROOT_CNID:
- goto read_inode;
+ switch (inode->i_ino) {
case HFSPLUS_EXT_CNID:
hfsplus_inode_read_fork(inode, &vhdr->ext_file);
inode->i_mapping->a_ops = &hfsplus_btree_aops;
@@ -75,74 +45,101 @@
inode->i_mapping->a_ops = &hfsplus_btree_aops;
break;
default:
- goto bad_inode;
+ return -EIO;
}
-done:
+ return 0;
+}
+
+struct inode *hfsplus_iget(struct super_block *sb, unsigned long ino)
+{
+ struct hfs_find_data fd;
+ struct inode *inode;
+ int err;
+
+ inode = iget_locked(sb, ino);
+ if (!inode)
+ return ERR_PTR(-ENOMEM);
+ if (!(inode->i_state & I_NEW))
+ return inode;
+
+ INIT_LIST_HEAD(&HFSPLUS_I(inode)->open_dir_list);
+ mutex_init(&HFSPLUS_I(inode)->extents_lock);
+ HFSPLUS_I(inode)->flags = 0;
+ HFSPLUS_I(inode)->rsrc_inode = NULL;
+ atomic_set(&HFSPLUS_I(inode)->opencnt, 0);
+
+ if (inode->i_ino >= HFSPLUS_FIRSTUSER_CNID ||
+ inode->i_ino == HFSPLUS_ROOT_CNID) {
+ hfs_find_init(HFSPLUS_SB(inode->i_sb)->cat_tree, &fd);
+ err = hfsplus_find_cat(inode->i_sb, inode->i_ino, &fd);
+ if (!err)
+ err = hfsplus_cat_read_inode(inode, &fd);
+ hfs_find_exit(&fd);
+ } else {
+ err = hfsplus_system_read_inode(inode);
+ }
+
+ if (err) {
+ iget_failed(inode);
+ return ERR_PTR(err);
+ }
+
unlock_new_inode(inode);
return inode;
+}
-bad_inode:
- iget_failed(inode);
- return ERR_PTR(err);
+static int hfsplus_system_write_inode(struct inode *inode)
+{
+ struct hfsplus_sb_info *sbi = HFSPLUS_SB(inode->i_sb);
+ struct hfsplus_vh *vhdr = sbi->s_vhdr;
+ struct hfsplus_fork_raw *fork;
+ struct hfs_btree *tree = NULL;
+
+ switch (inode->i_ino) {
+ case HFSPLUS_EXT_CNID:
+ fork = &vhdr->ext_file;
+ tree = sbi->ext_tree;
+ break;
+ case HFSPLUS_CAT_CNID:
+ fork = &vhdr->cat_file;
+ tree = sbi->cat_tree;
+ break;
+ case HFSPLUS_ALLOC_CNID:
+ fork = &vhdr->alloc_file;
+ break;
+ case HFSPLUS_START_CNID:
+ fork = &vhdr->start_file;
+ break;
+ case HFSPLUS_ATTR_CNID:
+ fork = &vhdr->attr_file;
+ tree = sbi->attr_tree;
+ default:
+ return -EIO;
+ }
+
+ if (fork->total_size != cpu_to_be64(inode->i_size)) {
+ set_bit(HFSPLUS_SB_WRITEBACKUP, &sbi->flags);
+ inode->i_sb->s_dirt = 1;
+ }
+ hfsplus_inode_write_fork(inode, fork);
+ if (tree)
+ hfs_btree_write(tree);
+ return 0;
}
static int hfsplus_write_inode(struct inode *inode,
struct writeback_control *wbc)
{
- struct hfsplus_vh *vhdr;
- int ret = 0;
-
dprint(DBG_INODE, "hfsplus_write_inode: %lu\n", inode->i_ino);
+
hfsplus_ext_write_extent(inode);
- if (inode->i_ino >= HFSPLUS_FIRSTUSER_CNID) {
+
+ if (inode->i_ino >= HFSPLUS_FIRSTUSER_CNID ||
+ inode->i_ino == HFSPLUS_ROOT_CNID)
return hfsplus_cat_write_inode(inode);
- }
- vhdr = HFSPLUS_SB(inode->i_sb).s_vhdr;
- switch (inode->i_ino) {
- case HFSPLUS_ROOT_CNID:
- ret = hfsplus_cat_write_inode(inode);
- break;
- case HFSPLUS_EXT_CNID:
- if (vhdr->ext_file.total_size != cpu_to_be64(inode->i_size)) {
- HFSPLUS_SB(inode->i_sb).flags |= HFSPLUS_SB_WRITEBACKUP;
- inode->i_sb->s_dirt = 1;
- }
- hfsplus_inode_write_fork(inode, &vhdr->ext_file);
- hfs_btree_write(HFSPLUS_SB(inode->i_sb).ext_tree);
- break;
- case HFSPLUS_CAT_CNID:
- if (vhdr->cat_file.total_size != cpu_to_be64(inode->i_size)) {
- HFSPLUS_SB(inode->i_sb).flags |= HFSPLUS_SB_WRITEBACKUP;
- inode->i_sb->s_dirt = 1;
- }
- hfsplus_inode_write_fork(inode, &vhdr->cat_file);
- hfs_btree_write(HFSPLUS_SB(inode->i_sb).cat_tree);
- break;
- case HFSPLUS_ALLOC_CNID:
- if (vhdr->alloc_file.total_size != cpu_to_be64(inode->i_size)) {
- HFSPLUS_SB(inode->i_sb).flags |= HFSPLUS_SB_WRITEBACKUP;
- inode->i_sb->s_dirt = 1;
- }
- hfsplus_inode_write_fork(inode, &vhdr->alloc_file);
- break;
- case HFSPLUS_START_CNID:
- if (vhdr->start_file.total_size != cpu_to_be64(inode->i_size)) {
- HFSPLUS_SB(inode->i_sb).flags |= HFSPLUS_SB_WRITEBACKUP;
- inode->i_sb->s_dirt = 1;
- }
- hfsplus_inode_write_fork(inode, &vhdr->start_file);
- break;
- case HFSPLUS_ATTR_CNID:
- if (vhdr->attr_file.total_size != cpu_to_be64(inode->i_size)) {
- HFSPLUS_SB(inode->i_sb).flags |= HFSPLUS_SB_WRITEBACKUP;
- inode->i_sb->s_dirt = 1;
- }
- hfsplus_inode_write_fork(inode, &vhdr->attr_file);
- hfs_btree_write(HFSPLUS_SB(inode->i_sb).attr_tree);
- break;
- }
- return ret;
+ else
+ return hfsplus_system_write_inode(inode);
}
static void hfsplus_evict_inode(struct inode *inode)
@@ -151,51 +148,53 @@
truncate_inode_pages(&inode->i_data, 0);
end_writeback(inode);
if (HFSPLUS_IS_RSRC(inode)) {
- HFSPLUS_I(HFSPLUS_I(inode).rsrc_inode).rsrc_inode = NULL;
- iput(HFSPLUS_I(inode).rsrc_inode);
+ HFSPLUS_I(HFSPLUS_I(inode)->rsrc_inode)->rsrc_inode = NULL;
+ iput(HFSPLUS_I(inode)->rsrc_inode);
}
}
int hfsplus_sync_fs(struct super_block *sb, int wait)
{
- struct hfsplus_vh *vhdr = HFSPLUS_SB(sb).s_vhdr;
+ struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
+ struct hfsplus_vh *vhdr = sbi->s_vhdr;
dprint(DBG_SUPER, "hfsplus_write_super\n");
- lock_super(sb);
+ mutex_lock(&sbi->vh_mutex);
+ mutex_lock(&sbi->alloc_mutex);
sb->s_dirt = 0;
- vhdr->free_blocks = cpu_to_be32(HFSPLUS_SB(sb).free_blocks);
- vhdr->next_alloc = cpu_to_be32(HFSPLUS_SB(sb).next_alloc);
- vhdr->next_cnid = cpu_to_be32(HFSPLUS_SB(sb).next_cnid);
- vhdr->folder_count = cpu_to_be32(HFSPLUS_SB(sb).folder_count);
- vhdr->file_count = cpu_to_be32(HFSPLUS_SB(sb).file_count);
+ vhdr->free_blocks = cpu_to_be32(sbi->free_blocks);
+ vhdr->next_cnid = cpu_to_be32(sbi->next_cnid);
+ vhdr->folder_count = cpu_to_be32(sbi->folder_count);
+ vhdr->file_count = cpu_to_be32(sbi->file_count);
- mark_buffer_dirty(HFSPLUS_SB(sb).s_vhbh);
- if (HFSPLUS_SB(sb).flags & HFSPLUS_SB_WRITEBACKUP) {
- if (HFSPLUS_SB(sb).sect_count) {
+ mark_buffer_dirty(sbi->s_vhbh);
+ if (test_and_clear_bit(HFSPLUS_SB_WRITEBACKUP, &sbi->flags)) {
+ if (sbi->sect_count) {
struct buffer_head *bh;
u32 block, offset;
- block = HFSPLUS_SB(sb).blockoffset;
- block += (HFSPLUS_SB(sb).sect_count - 2) >> (sb->s_blocksize_bits - 9);
- offset = ((HFSPLUS_SB(sb).sect_count - 2) << 9) & (sb->s_blocksize - 1);
- printk(KERN_DEBUG "hfs: backup: %u,%u,%u,%u\n", HFSPLUS_SB(sb).blockoffset,
- HFSPLUS_SB(sb).sect_count, block, offset);
+ block = sbi->blockoffset;
+ block += (sbi->sect_count - 2) >> (sb->s_blocksize_bits - 9);
+ offset = ((sbi->sect_count - 2) << 9) & (sb->s_blocksize - 1);
+ printk(KERN_DEBUG "hfs: backup: %u,%u,%u,%u\n",
+ sbi->blockoffset, sbi->sect_count,
+ block, offset);
bh = sb_bread(sb, block);
if (bh) {
vhdr = (struct hfsplus_vh *)(bh->b_data + offset);
if (be16_to_cpu(vhdr->signature) == HFSPLUS_VOLHEAD_SIG) {
- memcpy(vhdr, HFSPLUS_SB(sb).s_vhdr, sizeof(*vhdr));
+ memcpy(vhdr, sbi->s_vhdr, sizeof(*vhdr));
mark_buffer_dirty(bh);
brelse(bh);
} else
printk(KERN_WARNING "hfs: backup not found!\n");
}
}
- HFSPLUS_SB(sb).flags &= ~HFSPLUS_SB_WRITEBACKUP;
}
- unlock_super(sb);
+ mutex_unlock(&sbi->alloc_mutex);
+ mutex_unlock(&sbi->vh_mutex);
return 0;
}
@@ -209,48 +208,48 @@
static void hfsplus_put_super(struct super_block *sb)
{
+ struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
+
dprint(DBG_SUPER, "hfsplus_put_super\n");
+
if (!sb->s_fs_info)
return;
- lock_kernel();
-
if (sb->s_dirt)
hfsplus_write_super(sb);
- if (!(sb->s_flags & MS_RDONLY) && HFSPLUS_SB(sb).s_vhdr) {
- struct hfsplus_vh *vhdr = HFSPLUS_SB(sb).s_vhdr;
+ if (!(sb->s_flags & MS_RDONLY) && sbi->s_vhdr) {
+ struct hfsplus_vh *vhdr = sbi->s_vhdr;
vhdr->modify_date = hfsp_now2mt();
vhdr->attributes |= cpu_to_be32(HFSPLUS_VOL_UNMNT);
vhdr->attributes &= cpu_to_be32(~HFSPLUS_VOL_INCNSTNT);
- mark_buffer_dirty(HFSPLUS_SB(sb).s_vhbh);
- sync_dirty_buffer(HFSPLUS_SB(sb).s_vhbh);
+ mark_buffer_dirty(sbi->s_vhbh);
+ sync_dirty_buffer(sbi->s_vhbh);
}
- hfs_btree_close(HFSPLUS_SB(sb).cat_tree);
- hfs_btree_close(HFSPLUS_SB(sb).ext_tree);
- iput(HFSPLUS_SB(sb).alloc_file);
- iput(HFSPLUS_SB(sb).hidden_dir);
- brelse(HFSPLUS_SB(sb).s_vhbh);
- unload_nls(HFSPLUS_SB(sb).nls);
+ hfs_btree_close(sbi->cat_tree);
+ hfs_btree_close(sbi->ext_tree);
+ iput(sbi->alloc_file);
+ iput(sbi->hidden_dir);
+ brelse(sbi->s_vhbh);
+ unload_nls(sbi->nls);
kfree(sb->s_fs_info);
sb->s_fs_info = NULL;
-
- unlock_kernel();
}
static int hfsplus_statfs(struct dentry *dentry, struct kstatfs *buf)
{
struct super_block *sb = dentry->d_sb;
+ struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
buf->f_type = HFSPLUS_SUPER_MAGIC;
buf->f_bsize = sb->s_blocksize;
- buf->f_blocks = HFSPLUS_SB(sb).total_blocks << HFSPLUS_SB(sb).fs_shift;
- buf->f_bfree = HFSPLUS_SB(sb).free_blocks << HFSPLUS_SB(sb).fs_shift;
+ buf->f_blocks = sbi->total_blocks << sbi->fs_shift;
+ buf->f_bfree = sbi->free_blocks << sbi->fs_shift;
buf->f_bavail = buf->f_bfree;
buf->f_files = 0xFFFFFFFF;
- buf->f_ffree = 0xFFFFFFFF - HFSPLUS_SB(sb).next_cnid;
+ buf->f_ffree = 0xFFFFFFFF - sbi->next_cnid;
buf->f_fsid.val[0] = (u32)id;
buf->f_fsid.val[1] = (u32)(id >> 32);
buf->f_namelen = HFSPLUS_MAX_STRLEN;
@@ -263,11 +262,11 @@
if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY))
return 0;
if (!(*flags & MS_RDONLY)) {
- struct hfsplus_vh *vhdr = HFSPLUS_SB(sb).s_vhdr;
+ struct hfsplus_vh *vhdr = HFSPLUS_SB(sb)->s_vhdr;
struct hfsplus_sb_info sbi;
memset(&sbi, 0, sizeof(struct hfsplus_sb_info));
- sbi.nls = HFSPLUS_SB(sb).nls;
+ sbi.nls = HFSPLUS_SB(sb)->nls;
if (!hfsplus_parse_options(data, &sbi))
return -EINVAL;
@@ -276,7 +275,7 @@
"running fsck.hfsplus is recommended. leaving read-only.\n");
sb->s_flags |= MS_RDONLY;
*flags |= MS_RDONLY;
- } else if (sbi.flags & HFSPLUS_SB_FORCE) {
+ } else if (test_bit(HFSPLUS_SB_FORCE, &sbi.flags)) {
/* nothing */
} else if (vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_SOFTLOCK)) {
printk(KERN_WARNING "hfs: filesystem is marked locked, leaving read-only.\n");
@@ -320,7 +319,8 @@
return -ENOMEM;
sb->s_fs_info = sbi;
- INIT_HLIST_HEAD(&sbi->rsrc_inodes);
+ mutex_init(&sbi->alloc_mutex);
+ mutex_init(&sbi->vh_mutex);
hfsplus_fill_defaults(sbi);
if (!hfsplus_parse_options(data, sbi)) {
printk(KERN_ERR "hfs: unable to parse mount options\n");
@@ -344,7 +344,7 @@
err = -EINVAL;
goto cleanup;
}
- vhdr = HFSPLUS_SB(sb).s_vhdr;
+ vhdr = sbi->s_vhdr;
/* Copy parts of the volume header into the superblock */
sb->s_magic = HFSPLUS_VOLHEAD_SIG;
@@ -353,18 +353,19 @@
printk(KERN_ERR "hfs: wrong filesystem version\n");
goto cleanup;
}
- HFSPLUS_SB(sb).total_blocks = be32_to_cpu(vhdr->total_blocks);
- HFSPLUS_SB(sb).free_blocks = be32_to_cpu(vhdr->free_blocks);
- HFSPLUS_SB(sb).next_alloc = be32_to_cpu(vhdr->next_alloc);
- HFSPLUS_SB(sb).next_cnid = be32_to_cpu(vhdr->next_cnid);
- HFSPLUS_SB(sb).file_count = be32_to_cpu(vhdr->file_count);
- HFSPLUS_SB(sb).folder_count = be32_to_cpu(vhdr->folder_count);
- HFSPLUS_SB(sb).data_clump_blocks = be32_to_cpu(vhdr->data_clump_sz) >> HFSPLUS_SB(sb).alloc_blksz_shift;
- if (!HFSPLUS_SB(sb).data_clump_blocks)
- HFSPLUS_SB(sb).data_clump_blocks = 1;
- HFSPLUS_SB(sb).rsrc_clump_blocks = be32_to_cpu(vhdr->rsrc_clump_sz) >> HFSPLUS_SB(sb).alloc_blksz_shift;
- if (!HFSPLUS_SB(sb).rsrc_clump_blocks)
- HFSPLUS_SB(sb).rsrc_clump_blocks = 1;
+ sbi->total_blocks = be32_to_cpu(vhdr->total_blocks);
+ sbi->free_blocks = be32_to_cpu(vhdr->free_blocks);
+ sbi->next_cnid = be32_to_cpu(vhdr->next_cnid);
+ sbi->file_count = be32_to_cpu(vhdr->file_count);
+ sbi->folder_count = be32_to_cpu(vhdr->folder_count);
+ sbi->data_clump_blocks =
+ be32_to_cpu(vhdr->data_clump_sz) >> sbi->alloc_blksz_shift;
+ if (!sbi->data_clump_blocks)
+ sbi->data_clump_blocks = 1;
+ sbi->rsrc_clump_blocks =
+ be32_to_cpu(vhdr->rsrc_clump_sz) >> sbi->alloc_blksz_shift;
+ if (!sbi->rsrc_clump_blocks)
+ sbi->rsrc_clump_blocks = 1;
/* Set up operations so we can load metadata */
sb->s_op = &hfsplus_sops;
@@ -374,7 +375,7 @@
printk(KERN_WARNING "hfs: Filesystem was not cleanly unmounted, "
"running fsck.hfsplus is recommended. mounting read-only.\n");
sb->s_flags |= MS_RDONLY;
- } else if (sbi->flags & HFSPLUS_SB_FORCE) {
+ } else if (test_and_clear_bit(HFSPLUS_SB_FORCE, &sbi->flags)) {
/* nothing */
} else if (vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_SOFTLOCK)) {
printk(KERN_WARNING "hfs: Filesystem is marked locked, mounting read-only.\n");
@@ -384,16 +385,15 @@
"use the force option at your own risk, mounting read-only.\n");
sb->s_flags |= MS_RDONLY;
}
- sbi->flags &= ~HFSPLUS_SB_FORCE;
/* Load metadata objects (B*Trees) */
- HFSPLUS_SB(sb).ext_tree = hfs_btree_open(sb, HFSPLUS_EXT_CNID);
- if (!HFSPLUS_SB(sb).ext_tree) {
+ sbi->ext_tree = hfs_btree_open(sb, HFSPLUS_EXT_CNID);
+ if (!sbi->ext_tree) {
printk(KERN_ERR "hfs: failed to load extents file\n");
goto cleanup;
}
- HFSPLUS_SB(sb).cat_tree = hfs_btree_open(sb, HFSPLUS_CAT_CNID);
- if (!HFSPLUS_SB(sb).cat_tree) {
+ sbi->cat_tree = hfs_btree_open(sb, HFSPLUS_CAT_CNID);
+ if (!sbi->cat_tree) {
printk(KERN_ERR "hfs: failed to load catalog file\n");
goto cleanup;
}
@@ -404,7 +404,7 @@
err = PTR_ERR(inode);
goto cleanup;
}
- HFSPLUS_SB(sb).alloc_file = inode;
+ sbi->alloc_file = inode;
/* Load the root directory */
root = hfsplus_iget(sb, HFSPLUS_ROOT_CNID);
@@ -423,7 +423,7 @@
str.len = sizeof(HFSP_HIDDENDIR_NAME) - 1;
str.name = HFSP_HIDDENDIR_NAME;
- hfs_find_init(HFSPLUS_SB(sb).cat_tree, &fd);
+ hfs_find_init(sbi->cat_tree, &fd);
hfsplus_cat_build_key(sb, fd.search_key, HFSPLUS_ROOT_CNID, &str);
if (!hfs_brec_read(&fd, &entry, sizeof(entry))) {
hfs_find_exit(&fd);
@@ -434,7 +434,7 @@
err = PTR_ERR(inode);
goto cleanup;
}
- HFSPLUS_SB(sb).hidden_dir = inode;
+ sbi->hidden_dir = inode;
} else
hfs_find_exit(&fd);
@@ -449,15 +449,19 @@
be32_add_cpu(&vhdr->write_count, 1);
vhdr->attributes &= cpu_to_be32(~HFSPLUS_VOL_UNMNT);
vhdr->attributes |= cpu_to_be32(HFSPLUS_VOL_INCNSTNT);
- mark_buffer_dirty(HFSPLUS_SB(sb).s_vhbh);
- sync_dirty_buffer(HFSPLUS_SB(sb).s_vhbh);
+ mark_buffer_dirty(sbi->s_vhbh);
+ sync_dirty_buffer(sbi->s_vhbh);
- if (!HFSPLUS_SB(sb).hidden_dir) {
+ if (!sbi->hidden_dir) {
printk(KERN_DEBUG "hfs: create hidden dir...\n");
- HFSPLUS_SB(sb).hidden_dir = hfsplus_new_inode(sb, S_IFDIR);
- hfsplus_create_cat(HFSPLUS_SB(sb).hidden_dir->i_ino, sb->s_root->d_inode,
- &str, HFSPLUS_SB(sb).hidden_dir);
- mark_inode_dirty(HFSPLUS_SB(sb).hidden_dir);
+
+ mutex_lock(&sbi->vh_mutex);
+ sbi->hidden_dir = hfsplus_new_inode(sb, S_IFDIR);
+ hfsplus_create_cat(sbi->hidden_dir->i_ino, sb->s_root->d_inode,
+ &str, sbi->hidden_dir);
+ mutex_unlock(&sbi->vh_mutex);
+
+ mark_inode_dirty(sbi->hidden_dir);
}
out:
unload_nls(sbi->nls);
@@ -486,7 +490,7 @@
static void hfsplus_destroy_inode(struct inode *inode)
{
- kmem_cache_free(hfsplus_inode_cachep, &HFSPLUS_I(inode));
+ kmem_cache_free(hfsplus_inode_cachep, HFSPLUS_I(inode));
}
#define HFSPLUS_INODE_SIZE sizeof(struct hfsplus_inode_info)
diff --git a/fs/hfsplus/unicode.c b/fs/hfsplus/unicode.c
index 628ccf6..b66d67d 100644
--- a/fs/hfsplus/unicode.c
+++ b/fs/hfsplus/unicode.c
@@ -121,7 +121,7 @@
int hfsplus_uni2asc(struct super_block *sb, const struct hfsplus_unistr *ustr, char *astr, int *len_p)
{
const hfsplus_unichr *ip;
- struct nls_table *nls = HFSPLUS_SB(sb).nls;
+ struct nls_table *nls = HFSPLUS_SB(sb)->nls;
u8 *op;
u16 cc, c0, c1;
u16 *ce1, *ce2;
@@ -132,7 +132,7 @@
ustrlen = be16_to_cpu(ustr->length);
len = *len_p;
ce1 = NULL;
- compose = !(HFSPLUS_SB(sb).flags & HFSPLUS_SB_NODECOMPOSE);
+ compose = !test_bit(HFSPLUS_SB_NODECOMPOSE, &HFSPLUS_SB(sb)->flags);
while (ustrlen > 0) {
c0 = be16_to_cpu(*ip++);
@@ -246,7 +246,7 @@
static inline int asc2unichar(struct super_block *sb, const char *astr, int len,
wchar_t *uc)
{
- int size = HFSPLUS_SB(sb).nls->char2uni(astr, len, uc);
+ int size = HFSPLUS_SB(sb)->nls->char2uni(astr, len, uc);
if (size <= 0) {
*uc = '?';
size = 1;
@@ -293,7 +293,7 @@
u16 *dstr, outlen = 0;
wchar_t c;
- decompose = !(HFSPLUS_SB(sb).flags & HFSPLUS_SB_NODECOMPOSE);
+ decompose = !test_bit(HFSPLUS_SB_NODECOMPOSE, &HFSPLUS_SB(sb)->flags);
while (outlen < HFSPLUS_MAX_STRLEN && len > 0) {
size = asc2unichar(sb, astr, len, &c);
@@ -330,8 +330,8 @@
wchar_t c;
u16 c2;
- casefold = (HFSPLUS_SB(sb).flags & HFSPLUS_SB_CASEFOLD);
- decompose = !(HFSPLUS_SB(sb).flags & HFSPLUS_SB_NODECOMPOSE);
+ casefold = test_bit(HFSPLUS_SB_CASEFOLD, &HFSPLUS_SB(sb)->flags);
+ decompose = !test_bit(HFSPLUS_SB_NODECOMPOSE, &HFSPLUS_SB(sb)->flags);
hash = init_name_hash();
astr = str->name;
len = str->len;
@@ -373,8 +373,8 @@
u16 c1, c2;
wchar_t c;
- casefold = (HFSPLUS_SB(sb).flags & HFSPLUS_SB_CASEFOLD);
- decompose = !(HFSPLUS_SB(sb).flags & HFSPLUS_SB_NODECOMPOSE);
+ casefold = test_bit(HFSPLUS_SB_CASEFOLD, &HFSPLUS_SB(sb)->flags);
+ decompose = !test_bit(HFSPLUS_SB_NODECOMPOSE, &HFSPLUS_SB(sb)->flags);
astr1 = s1->name;
len1 = s1->len;
astr2 = s2->name;
diff --git a/fs/hfsplus/wrapper.c b/fs/hfsplus/wrapper.c
index bed78ac8..8972c20 100644
--- a/fs/hfsplus/wrapper.c
+++ b/fs/hfsplus/wrapper.c
@@ -65,8 +65,8 @@
*start = 0;
*size = sb->s_bdev->bd_inode->i_size >> 9;
- if (HFSPLUS_SB(sb).session >= 0) {
- te.cdte_track = HFSPLUS_SB(sb).session;
+ if (HFSPLUS_SB(sb)->session >= 0) {
+ te.cdte_track = HFSPLUS_SB(sb)->session;
te.cdte_format = CDROM_LBA;
res = ioctl_by_bdev(sb->s_bdev, CDROMREADTOCENTRY, (unsigned long)&te);
if (!res && (te.cdte_ctrl & CDROM_DATA_TRACK) == 4) {
@@ -87,6 +87,7 @@
/* Takes in super block, returns true if good data read */
int hfsplus_read_wrapper(struct super_block *sb)
{
+ struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
struct buffer_head *bh;
struct hfsplus_vh *vhdr;
struct hfsplus_wd wd;
@@ -122,7 +123,7 @@
if (vhdr->signature == cpu_to_be16(HFSPLUS_VOLHEAD_SIG))
break;
if (vhdr->signature == cpu_to_be16(HFSPLUS_VOLHEAD_SIGX)) {
- HFSPLUS_SB(sb).flags |= HFSPLUS_SB_HFSX;
+ set_bit(HFSPLUS_SB_HFSX, &sbi->flags);
break;
}
brelse(bh);
@@ -143,11 +144,11 @@
if (blocksize < HFSPLUS_SECTOR_SIZE ||
((blocksize - 1) & blocksize))
return -EINVAL;
- HFSPLUS_SB(sb).alloc_blksz = blocksize;
- HFSPLUS_SB(sb).alloc_blksz_shift = 0;
+ sbi->alloc_blksz = blocksize;
+ sbi->alloc_blksz_shift = 0;
while ((blocksize >>= 1) != 0)
- HFSPLUS_SB(sb).alloc_blksz_shift++;
- blocksize = min(HFSPLUS_SB(sb).alloc_blksz, (u32)PAGE_SIZE);
+ sbi->alloc_blksz_shift++;
+ blocksize = min(sbi->alloc_blksz, (u32)PAGE_SIZE);
/* align block size to block offset */
while (part_start & ((blocksize >> HFSPLUS_SECTOR_SHIFT) - 1))
@@ -158,23 +159,26 @@
return -EINVAL;
}
- HFSPLUS_SB(sb).blockoffset = part_start >>
- (sb->s_blocksize_bits - HFSPLUS_SECTOR_SHIFT);
- HFSPLUS_SB(sb).sect_count = part_size;
- HFSPLUS_SB(sb).fs_shift = HFSPLUS_SB(sb).alloc_blksz_shift -
- sb->s_blocksize_bits;
+ sbi->blockoffset =
+ part_start >> (sb->s_blocksize_bits - HFSPLUS_SECTOR_SHIFT);
+ sbi->sect_count = part_size;
+ sbi->fs_shift = sbi->alloc_blksz_shift - sb->s_blocksize_bits;
bh = sb_bread512(sb, part_start + HFSPLUS_VOLHEAD_SECTOR, vhdr);
if (!bh)
return -EIO;
/* should still be the same... */
- if (vhdr->signature != (HFSPLUS_SB(sb).flags & HFSPLUS_SB_HFSX ?
- cpu_to_be16(HFSPLUS_VOLHEAD_SIGX) :
- cpu_to_be16(HFSPLUS_VOLHEAD_SIG)))
- goto error;
- HFSPLUS_SB(sb).s_vhbh = bh;
- HFSPLUS_SB(sb).s_vhdr = vhdr;
+ if (test_bit(HFSPLUS_SB_HFSX, &sbi->flags)) {
+ if (vhdr->signature != cpu_to_be16(HFSPLUS_VOLHEAD_SIGX))
+ goto error;
+ } else {
+ if (vhdr->signature != cpu_to_be16(HFSPLUS_VOLHEAD_SIG))
+ goto error;
+ }
+
+ sbi->s_vhbh = bh;
+ sbi->s_vhdr = vhdr;
return 0;
error:
diff --git a/fs/minix/namei.c b/fs/minix/namei.c
index e20ee85..f3f3578 100644
--- a/fs/minix/namei.c
+++ b/fs/minix/namei.c
@@ -115,7 +115,7 @@
inode_inc_link_count(dir);
- inode = minix_new_inode(dir, mode, &err);
+ inode = minix_new_inode(dir, S_IFDIR | mode, &err);
if (!inode)
goto out_dir;
diff --git a/fs/namespace.c b/fs/namespace.c
index de402eb..a72eaab 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -1484,13 +1484,30 @@
}
/*
+ * Sanity check the flags to change_mnt_propagation.
+ */
+
+static int flags_to_propagation_type(int flags)
+{
+ int type = flags & ~MS_REC;
+
+ /* Fail if any non-propagation flags are set */
+ if (type & ~(MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE))
+ return 0;
+ /* Only one propagation flag should be set */
+ if (!is_power_of_2(type))
+ return 0;
+ return type;
+}
+
+/*
* recursively change the type of the mountpoint.
*/
static int do_change_type(struct path *path, int flag)
{
struct vfsmount *m, *mnt = path->mnt;
int recurse = flag & MS_REC;
- int type = flag & ~MS_REC;
+ int type;
int err = 0;
if (!capable(CAP_SYS_ADMIN))
@@ -1499,6 +1516,10 @@
if (path->dentry != path->mnt->mnt_root)
return -EINVAL;
+ type = flags_to_propagation_type(flag);
+ if (!type)
+ return -EINVAL;
+
down_write(&namespace_sem);
if (type == MS_SHARED) {
err = invent_group_ids(mnt, recurse);
diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig
index 6c2aad4..f7e13db 100644
--- a/fs/nfs/Kconfig
+++ b/fs/nfs/Kconfig
@@ -63,6 +63,7 @@
config NFS_V4
bool "NFS client support for NFS version 4"
depends on NFS_FS
+ select SUNRPC_GSS
help
This option enables support for version 4 of the NFS protocol
(RFC 3530) in the kernel's NFS client.
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 4e7df2a..e734072 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -275,7 +275,7 @@
sin1->sin6_scope_id != sin2->sin6_scope_id)
return 0;
- return ipv6_addr_equal(&sin1->sin6_addr, &sin1->sin6_addr);
+ return ipv6_addr_equal(&sin1->sin6_addr, &sin2->sin6_addr);
}
#else /* !defined(CONFIG_IPV6) && !defined(CONFIG_IPV6_MODULE) */
static int nfs_sockaddr_match_ipaddr6(const struct sockaddr *sa1,
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index eb51bd6..05bf3c0 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -723,10 +723,6 @@
default:
BUG();
}
- if (res < 0)
- dprintk(KERN_WARNING "%s: VFS is out of sync with lock manager"
- " - error %d!\n",
- __func__, res);
return res;
}
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index ec3966e..f4cbf0c 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -431,7 +431,15 @@
goto out_err;
error = server->nfs_client->rpc_ops->statfs(server, fh, &res);
+ if (unlikely(error == -ESTALE)) {
+ struct dentry *pd_dentry;
+ pd_dentry = dget_parent(dentry);
+ if (pd_dentry != NULL) {
+ nfs_zap_caches(pd_dentry->d_inode);
+ dput(pd_dentry);
+ }
+ }
nfs_free_fattr(res.fattr);
if (error < 0)
goto out_err;
diff --git a/fs/nfsd/Kconfig b/fs/nfsd/Kconfig
index 95932f5..4264377 100644
--- a/fs/nfsd/Kconfig
+++ b/fs/nfsd/Kconfig
@@ -69,6 +69,7 @@
depends on NFSD && PROC_FS && EXPERIMENTAL
select NFSD_V3
select FS_POSIX_ACL
+ select SUNRPC_GSS
help
This option enables support in your system's NFS server for
version 4 of the NFS protocol (RFC 3530).
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 2e73571..cf0d2ff 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -440,7 +440,7 @@
static int nfs4_access_to_omode(u32 access)
{
- switch (access) {
+ switch (access & NFS4_SHARE_ACCESS_BOTH) {
case NFS4_SHARE_ACCESS_READ:
return O_RDONLY;
case NFS4_SHARE_ACCESS_WRITE:
@@ -2450,14 +2450,13 @@
static __be32
nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp, struct svc_fh *cur_fh, struct nfs4_stateid *stp, struct nfsd4_open *open)
{
- u32 op_share_access, new_access;
+ u32 op_share_access = open->op_share_access & ~NFS4_SHARE_WANT_MASK;
+ bool new_access;
__be32 status;
- set_access(&new_access, stp->st_access_bmap);
- new_access = (~new_access) & open->op_share_access & ~NFS4_SHARE_WANT_MASK;
-
+ new_access = !test_bit(op_share_access, &stp->st_access_bmap);
if (new_access) {
- status = nfs4_get_vfs_file(rqstp, fp, cur_fh, new_access);
+ status = nfs4_get_vfs_file(rqstp, fp, cur_fh, op_share_access);
if (status)
return status;
}
@@ -2470,7 +2469,6 @@
return status;
}
/* remember the open */
- op_share_access = open->op_share_access & ~NFS4_SHARE_WANT_MASK;
__set_bit(op_share_access, &stp->st_access_bmap);
__set_bit(open->op_share_deny, &stp->st_deny_bmap);
@@ -2983,7 +2981,6 @@
*filpp = find_readable_file(stp->st_file);
else
*filpp = find_writeable_file(stp->st_file);
- BUG_ON(!*filpp); /* assured by check_openmode */
}
}
status = nfs_ok;
@@ -3561,7 +3558,8 @@
struct nfs4_stateowner *open_sop = NULL;
struct nfs4_stateowner *lock_sop = NULL;
struct nfs4_stateid *lock_stp;
- struct file *filp;
+ struct nfs4_file *fp;
+ struct file *filp = NULL;
struct file_lock file_lock;
struct file_lock conflock;
__be32 status = 0;
@@ -3591,7 +3589,6 @@
* lock stateid.
*/
struct nfs4_stateid *open_stp = NULL;
- struct nfs4_file *fp;
status = nfserr_stale_clientid;
if (!nfsd4_has_session(cstate) &&
@@ -3634,6 +3631,7 @@
if (status)
goto out;
lock_sop = lock->lk_replay_owner;
+ fp = lock_stp->st_file;
}
/* lock->lk_replay_owner and lock_stp have been created or found */
@@ -3648,13 +3646,19 @@
switch (lock->lk_type) {
case NFS4_READ_LT:
case NFS4_READW_LT:
- filp = find_readable_file(lock_stp->st_file);
+ if (find_readable_file(lock_stp->st_file)) {
+ nfs4_get_vfs_file(rqstp, fp, &cstate->current_fh, NFS4_SHARE_ACCESS_READ);
+ filp = find_readable_file(lock_stp->st_file);
+ }
file_lock.fl_type = F_RDLCK;
cmd = F_SETLK;
break;
case NFS4_WRITE_LT:
case NFS4_WRITEW_LT:
- filp = find_writeable_file(lock_stp->st_file);
+ if (find_writeable_file(lock_stp->st_file)) {
+ nfs4_get_vfs_file(rqstp, fp, &cstate->current_fh, NFS4_SHARE_ACCESS_WRITE);
+ filp = find_writeable_file(lock_stp->st_file);
+ }
file_lock.fl_type = F_WRLCK;
cmd = F_SETLK;
break;
diff --git a/fs/nfsd/nfsfh.h b/fs/nfsd/nfsfh.h
index cdfb8c6..c16f8d8 100644
--- a/fs/nfsd/nfsfh.h
+++ b/fs/nfsd/nfsfh.h
@@ -196,8 +196,6 @@
static inline void
fh_unlock(struct svc_fh *fhp)
{
- BUG_ON(!fhp->fh_dentry);
-
if (fhp->fh_locked) {
fill_post_wcc(fhp);
mutex_unlock(&fhp->fh_dentry->d_inode->i_mutex);
diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
index 7731a75..322518c 100644
--- a/fs/nfsd/state.h
+++ b/fs/nfsd/state.h
@@ -363,23 +363,23 @@
* at all? */
static inline struct file *find_writeable_file(struct nfs4_file *f)
{
- if (f->fi_fds[O_RDWR])
- return f->fi_fds[O_RDWR];
- return f->fi_fds[O_WRONLY];
+ if (f->fi_fds[O_WRONLY])
+ return f->fi_fds[O_WRONLY];
+ return f->fi_fds[O_RDWR];
}
static inline struct file *find_readable_file(struct nfs4_file *f)
{
- if (f->fi_fds[O_RDWR])
- return f->fi_fds[O_RDWR];
- return f->fi_fds[O_RDONLY];
+ if (f->fi_fds[O_RDONLY])
+ return f->fi_fds[O_RDONLY];
+ return f->fi_fds[O_RDWR];
}
static inline struct file *find_any_file(struct nfs4_file *f)
{
if (f->fi_fds[O_RDWR])
return f->fi_fds[O_RDWR];
- else if (f->fi_fds[O_RDWR])
+ else if (f->fi_fds[O_WRONLY])
return f->fi_fds[O_WRONLY];
else
return f->fi_fds[O_RDONLY];
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 96360a8..661a6cf 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -2033,15 +2033,17 @@
__be32
nfsd_statfs(struct svc_rqst *rqstp, struct svc_fh *fhp, struct kstatfs *stat, int access)
{
- struct path path = {
- .mnt = fhp->fh_export->ex_path.mnt,
- .dentry = fhp->fh_dentry,
- };
__be32 err;
err = fh_verify(rqstp, fhp, 0, NFSD_MAY_NOP | access);
- if (!err && vfs_statfs(&path, stat))
- err = nfserr_io;
+ if (!err) {
+ struct path path = {
+ .mnt = fhp->fh_export->ex_path.mnt,
+ .dentry = fhp->fh_dentry,
+ };
+ if (vfs_statfs(&path, stat))
+ err = nfserr_io;
+ }
return err;
}
diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c
index 4317f17..ba7c10c 100644
--- a/fs/nilfs2/the_nilfs.c
+++ b/fs/nilfs2/the_nilfs.c
@@ -446,6 +446,7 @@
nilfs_mdt_destroy(nilfs->ns_cpfile);
nilfs_mdt_destroy(nilfs->ns_sufile);
nilfs_mdt_destroy(nilfs->ns_dat);
+ nilfs_mdt_destroy(nilfs->ns_gc_dat);
failed:
nilfs_clear_recovery_info(&ri);
diff --git a/fs/notify/Kconfig b/fs/notify/Kconfig
index 22c629e..b388443 100644
--- a/fs/notify/Kconfig
+++ b/fs/notify/Kconfig
@@ -3,4 +3,4 @@
source "fs/notify/dnotify/Kconfig"
source "fs/notify/inotify/Kconfig"
-source "fs/notify/fanotify/Kconfig"
+#source "fs/notify/fanotify/Kconfig"
diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c
index 756566f..85366c7 100644
--- a/fs/notify/fanotify/fanotify.c
+++ b/fs/notify/fanotify/fanotify.c
@@ -165,9 +165,6 @@
"mask=%x data=%p data_type=%d\n", __func__, group, to_tell,
inode_mark, vfsmnt_mark, event_mask, data, data_type);
- pr_debug("%s: group=%p vfsmount_mark=%p inode_mark=%p mask=%x\n",
- __func__, group, vfsmnt_mark, inode_mark, event_mask);
-
/* sorry, fanotify only gives a damn about files and dirs */
if (!S_ISREG(to_tell->i_mode) &&
!S_ISDIR(to_tell->i_mode))
diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
index 032b837..5ed8e58 100644
--- a/fs/notify/fanotify/fanotify_user.c
+++ b/fs/notify/fanotify/fanotify_user.c
@@ -195,6 +195,14 @@
re->fd = fd;
mutex_lock(&group->fanotify_data.access_mutex);
+
+ if (group->fanotify_data.bypass_perm) {
+ mutex_unlock(&group->fanotify_data.access_mutex);
+ kmem_cache_free(fanotify_response_event_cache, re);
+ event->response = FAN_ALLOW;
+ return 0;
+ }
+
list_add_tail(&re->list, &group->fanotify_data.access_list);
mutex_unlock(&group->fanotify_data.access_mutex);
@@ -364,9 +372,28 @@
static int fanotify_release(struct inode *ignored, struct file *file)
{
struct fsnotify_group *group = file->private_data;
+ struct fanotify_response_event *re, *lre;
pr_debug("%s: file=%p group=%p\n", __func__, file, group);
+#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
+ mutex_lock(&group->fanotify_data.access_mutex);
+
+ group->fanotify_data.bypass_perm = true;
+
+ list_for_each_entry_safe(re, lre, &group->fanotify_data.access_list, list) {
+ pr_debug("%s: found group=%p re=%p event=%p\n", __func__, group,
+ re, re->event);
+
+ list_del_init(&re->list);
+ re->event->response = FAN_ALLOW;
+
+ kmem_cache_free(fanotify_response_event_cache, re);
+ }
+ mutex_unlock(&group->fanotify_data.access_mutex);
+
+ wake_up(&group->fanotify_data.access_waitq);
+#endif
/* matches the fanotify_init->fsnotify_alloc_group */
fsnotify_put_group(group);
@@ -614,7 +641,7 @@
__func__, flags, event_f_flags);
if (!capable(CAP_SYS_ADMIN))
- return -EACCES;
+ return -EPERM;
if (flags & ~FAN_ALL_INIT_FLAGS)
return -EINVAL;
diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c
index 3970392..3680242 100644
--- a/fs/notify/fsnotify.c
+++ b/fs/notify/fsnotify.c
@@ -148,13 +148,14 @@
const unsigned char *file_name,
struct fsnotify_event **event)
{
- struct fsnotify_group *group = inode_mark->group;
- __u32 inode_test_mask = (mask & ~FS_EVENT_ON_CHILD);
- __u32 vfsmount_test_mask = (mask & ~FS_EVENT_ON_CHILD);
+ struct fsnotify_group *group = NULL;
+ __u32 inode_test_mask = 0;
+ __u32 vfsmount_test_mask = 0;
- pr_debug("%s: group=%p to_tell=%p mnt=%p mark=%p mask=%x data=%p"
- " data_is=%d cookie=%d event=%p\n", __func__, group, to_tell,
- mnt, inode_mark, mask, data, data_is, cookie, *event);
+ if (unlikely(!inode_mark && !vfsmount_mark)) {
+ BUG();
+ return 0;
+ }
/* clear ignored on inode modification */
if (mask & FS_MODIFY) {
@@ -168,18 +169,29 @@
/* does the inode mark tell us to do something? */
if (inode_mark) {
+ group = inode_mark->group;
+ inode_test_mask = (mask & ~FS_EVENT_ON_CHILD);
inode_test_mask &= inode_mark->mask;
inode_test_mask &= ~inode_mark->ignored_mask;
}
/* does the vfsmount_mark tell us to do something? */
if (vfsmount_mark) {
+ vfsmount_test_mask = (mask & ~FS_EVENT_ON_CHILD);
+ group = vfsmount_mark->group;
vfsmount_test_mask &= vfsmount_mark->mask;
vfsmount_test_mask &= ~vfsmount_mark->ignored_mask;
if (inode_mark)
vfsmount_test_mask &= ~inode_mark->ignored_mask;
}
+ pr_debug("%s: group=%p to_tell=%p mnt=%p mask=%x inode_mark=%p"
+ " inode_test_mask=%x vfsmount_mark=%p vfsmount_test_mask=%x"
+ " data=%p data_is=%d cookie=%d event=%p\n",
+ __func__, group, to_tell, mnt, mask, inode_mark,
+ inode_test_mask, vfsmount_mark, vfsmount_test_mask, data,
+ data_is, cookie, *event);
+
if (!inode_test_mask && !vfsmount_test_mask)
return 0;
@@ -207,13 +219,12 @@
int fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is,
const unsigned char *file_name, u32 cookie)
{
- struct hlist_node *inode_node, *vfsmount_node;
+ struct hlist_node *inode_node = NULL, *vfsmount_node = NULL;
struct fsnotify_mark *inode_mark = NULL, *vfsmount_mark = NULL;
struct fsnotify_group *inode_group, *vfsmount_group;
struct fsnotify_event *event = NULL;
struct vfsmount *mnt;
int idx, ret = 0;
- bool used_inode = false, used_vfsmount = false;
/* global tests shouldn't care about events on child only the specific event */
__u32 test_mask = (mask & ~FS_EVENT_ON_CHILD);
@@ -238,57 +249,50 @@
(test_mask & to_tell->i_fsnotify_mask))
inode_node = srcu_dereference(to_tell->i_fsnotify_marks.first,
&fsnotify_mark_srcu);
- else
- inode_node = NULL;
- if (mnt) {
- if ((mask & FS_MODIFY) ||
- (test_mask & mnt->mnt_fsnotify_mask))
- vfsmount_node = srcu_dereference(mnt->mnt_fsnotify_marks.first,
- &fsnotify_mark_srcu);
- else
- vfsmount_node = NULL;
- } else {
- mnt = NULL;
- vfsmount_node = NULL;
+ if (mnt && ((mask & FS_MODIFY) ||
+ (test_mask & mnt->mnt_fsnotify_mask))) {
+ vfsmount_node = srcu_dereference(mnt->mnt_fsnotify_marks.first,
+ &fsnotify_mark_srcu);
+ inode_node = srcu_dereference(to_tell->i_fsnotify_marks.first,
+ &fsnotify_mark_srcu);
}
while (inode_node || vfsmount_node) {
+ inode_group = vfsmount_group = NULL;
+
if (inode_node) {
inode_mark = hlist_entry(srcu_dereference(inode_node, &fsnotify_mark_srcu),
struct fsnotify_mark, i.i_list);
inode_group = inode_mark->group;
- } else
- inode_group = (void *)-1;
+ }
if (vfsmount_node) {
vfsmount_mark = hlist_entry(srcu_dereference(vfsmount_node, &fsnotify_mark_srcu),
struct fsnotify_mark, m.m_list);
vfsmount_group = vfsmount_mark->group;
- } else
- vfsmount_group = (void *)-1;
+ }
- if (inode_group < vfsmount_group) {
+ if (inode_group > vfsmount_group) {
/* handle inode */
send_to_group(to_tell, NULL, inode_mark, NULL, mask, data,
data_is, cookie, file_name, &event);
- used_inode = true;
- } else if (vfsmount_group < inode_group) {
+ /* we didn't use the vfsmount_mark */
+ vfsmount_group = NULL;
+ } else if (vfsmount_group > inode_group) {
send_to_group(to_tell, mnt, NULL, vfsmount_mark, mask, data,
data_is, cookie, file_name, &event);
- used_vfsmount = true;
+ inode_group = NULL;
} else {
send_to_group(to_tell, mnt, inode_mark, vfsmount_mark,
mask, data, data_is, cookie, file_name,
&event);
- used_vfsmount = true;
- used_inode = true;
}
- if (used_inode)
+ if (inode_group)
inode_node = srcu_dereference(inode_node->next,
&fsnotify_mark_srcu);
- if (used_vfsmount)
+ if (vfsmount_group)
vfsmount_node = srcu_dereference(vfsmount_node->next,
&fsnotify_mark_srcu);
}
diff --git a/fs/ocfs2/acl.c b/fs/ocfs2/acl.c
index a76e0aa..3919150 100644
--- a/fs/ocfs2/acl.c
+++ b/fs/ocfs2/acl.c
@@ -209,7 +209,10 @@
}
inode->i_mode = new_mode;
+ inode->i_ctime = CURRENT_TIME;
di->i_mode = cpu_to_le16(inode->i_mode);
+ di->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
+ di->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
ocfs2_journal_dirty(handle, di_bh);
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index 215e12c..592fae5 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -6672,7 +6672,7 @@
last_page_bytes = PAGE_ALIGN(end);
index = start >> PAGE_CACHE_SHIFT;
do {
- pages[numpages] = grab_cache_page(mapping, index);
+ pages[numpages] = find_or_create_page(mapping, index, GFP_NOFS);
if (!pages[numpages]) {
ret = -ENOMEM;
mlog_errno(ret);
diff --git a/fs/ocfs2/blockcheck.c b/fs/ocfs2/blockcheck.c
index ec6d123..c7ee03c 100644
--- a/fs/ocfs2/blockcheck.c
+++ b/fs/ocfs2/blockcheck.c
@@ -439,7 +439,7 @@
ocfs2_blockcheck_inc_failure(stats);
mlog(ML_ERROR,
- "CRC32 failed: stored: %u, computed %u. Applying ECC.\n",
+ "CRC32 failed: stored: 0x%x, computed 0x%x. Applying ECC.\n",
(unsigned int)check.bc_crc32e, (unsigned int)crc);
/* Ok, try ECC fixups */
@@ -453,7 +453,7 @@
goto out;
}
- mlog(ML_ERROR, "Fixed CRC32 failed: stored: %u, computed %u\n",
+ mlog(ML_ERROR, "Fixed CRC32 failed: stored: 0x%x, computed 0x%x\n",
(unsigned int)check.bc_crc32e, (unsigned int)crc);
rc = -EIO;
diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c
index 1361997..cbe2f05 100644
--- a/fs/ocfs2/cluster/tcp.c
+++ b/fs/ocfs2/cluster/tcp.c
@@ -977,7 +977,7 @@
int o2net_send_message_vec(u32 msg_type, u32 key, struct kvec *caller_vec,
size_t caller_veclen, u8 target_node, int *status)
{
- int ret;
+ int ret = 0;
struct o2net_msg *msg = NULL;
size_t veclen, caller_bytes = 0;
struct kvec *vec = NULL;
diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c
index f04ebcf..c49f6de 100644
--- a/fs/ocfs2/dir.c
+++ b/fs/ocfs2/dir.c
@@ -3931,6 +3931,15 @@
goto out_commit;
}
+ cpos = split_hash;
+ ret = ocfs2_dx_dir_new_cluster(dir, &et, cpos, handle,
+ data_ac, meta_ac, new_dx_leaves,
+ num_dx_leaves);
+ if (ret) {
+ mlog_errno(ret);
+ goto out_commit;
+ }
+
for (i = 0; i < num_dx_leaves; i++) {
ret = ocfs2_journal_access_dl(handle, INODE_CACHE(dir),
orig_dx_leaves[i],
@@ -3939,15 +3948,14 @@
mlog_errno(ret);
goto out_commit;
}
- }
- cpos = split_hash;
- ret = ocfs2_dx_dir_new_cluster(dir, &et, cpos, handle,
- data_ac, meta_ac, new_dx_leaves,
- num_dx_leaves);
- if (ret) {
- mlog_errno(ret);
- goto out_commit;
+ ret = ocfs2_journal_access_dl(handle, INODE_CACHE(dir),
+ new_dx_leaves[i],
+ OCFS2_JOURNAL_ACCESS_WRITE);
+ if (ret) {
+ mlog_errno(ret);
+ goto out_commit;
+ }
}
ocfs2_dx_dir_transfer_leaf(dir, split_hash, handle, tmp_dx_leaf,
diff --git a/fs/ocfs2/dlm/dlmcommon.h b/fs/ocfs2/dlm/dlmcommon.h
index 4b6ae2c..7652989 100644
--- a/fs/ocfs2/dlm/dlmcommon.h
+++ b/fs/ocfs2/dlm/dlmcommon.h
@@ -1030,6 +1030,7 @@
struct dlm_lock_resource *res);
void dlm_clean_master_list(struct dlm_ctxt *dlm,
u8 dead_node);
+void dlm_force_free_mles(struct dlm_ctxt *dlm);
int dlm_lock_basts_flushed(struct dlm_ctxt *dlm, struct dlm_lock *lock);
int __dlm_lockres_has_locks(struct dlm_lock_resource *res);
int __dlm_lockres_unused(struct dlm_lock_resource *res);
diff --git a/fs/ocfs2/dlm/dlmdebug.c b/fs/ocfs2/dlm/dlmdebug.c
index 5efdd37..901ca52 100644
--- a/fs/ocfs2/dlm/dlmdebug.c
+++ b/fs/ocfs2/dlm/dlmdebug.c
@@ -636,8 +636,14 @@
spin_lock(&dlm->track_lock);
if (oldres)
track_list = &oldres->tracking;
- else
+ else {
track_list = &dlm->tracking_list;
+ if (list_empty(track_list)) {
+ dl = NULL;
+ spin_unlock(&dlm->track_lock);
+ goto bail;
+ }
+ }
list_for_each_entry(res, track_list, tracking) {
if (&res->tracking == &dlm->tracking_list)
@@ -660,6 +666,7 @@
} else
dl = NULL;
+bail:
/* passed to seq_show */
return dl;
}
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c
index 153abb5..11a5c87 100644
--- a/fs/ocfs2/dlm/dlmdomain.c
+++ b/fs/ocfs2/dlm/dlmdomain.c
@@ -693,6 +693,7 @@
dlm_mark_domain_leaving(dlm);
dlm_leave_domain(dlm);
+ dlm_force_free_mles(dlm);
dlm_complete_dlm_shutdown(dlm);
}
dlm_put(dlm);
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index ffb4c68..f564b0e 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -3433,3 +3433,43 @@
wake_up(&res->wq);
wake_up(&dlm->migration_wq);
}
+
+void dlm_force_free_mles(struct dlm_ctxt *dlm)
+{
+ int i;
+ struct hlist_head *bucket;
+ struct dlm_master_list_entry *mle;
+ struct hlist_node *tmp, *list;
+
+ /*
+ * We notified all other nodes that we are exiting the domain and
+ * marked the dlm state to DLM_CTXT_LEAVING. If any mles are still
+ * around we force free them and wake any processes that are waiting
+ * on the mles
+ */
+ spin_lock(&dlm->spinlock);
+ spin_lock(&dlm->master_lock);
+
+ BUG_ON(dlm->dlm_state != DLM_CTXT_LEAVING);
+ BUG_ON((find_next_bit(dlm->domain_map, O2NM_MAX_NODES, 0) < O2NM_MAX_NODES));
+
+ for (i = 0; i < DLM_HASH_BUCKETS; i++) {
+ bucket = dlm_master_hash(dlm, i);
+ hlist_for_each_safe(list, tmp, bucket) {
+ mle = hlist_entry(list, struct dlm_master_list_entry,
+ master_hash_node);
+ if (mle->type != DLM_MLE_BLOCK) {
+ mlog(ML_ERROR, "bad mle: %p\n", mle);
+ dlm_print_one_mle(mle);
+ }
+ atomic_set(&mle->woken, 1);
+ wake_up(&mle->wq);
+
+ __dlm_unlink_mle(dlm, mle);
+ __dlm_mle_detach_hb_events(dlm, mle);
+ __dlm_put_mle(mle);
+ }
+ }
+ spin_unlock(&dlm->master_lock);
+ spin_unlock(&dlm->spinlock);
+}
diff --git a/fs/ocfs2/dlmglue.h b/fs/ocfs2/dlmglue.h
index d1ce48e..1d596d8 100644
--- a/fs/ocfs2/dlmglue.h
+++ b/fs/ocfs2/dlmglue.h
@@ -84,6 +84,7 @@
OI_LS_PARENT,
OI_LS_RENAME1,
OI_LS_RENAME2,
+ OI_LS_REFLINK_TARGET,
};
int ocfs2_dlm_init(struct ocfs2_super *osb);
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 81296b4..9a03c15 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -36,6 +36,7 @@
#include <linux/writeback.h>
#include <linux/falloc.h>
#include <linux/quotaops.h>
+#include <linux/blkdev.h>
#define MLOG_MASK_PREFIX ML_INODE
#include <cluster/masklog.h>
@@ -190,8 +191,16 @@
if (err)
goto bail;
- if (datasync && !(inode->i_state & I_DIRTY_DATASYNC))
+ if (datasync && !(inode->i_state & I_DIRTY_DATASYNC)) {
+ /*
+ * We still have to flush drive's caches to get data to the
+ * platter
+ */
+ if (osb->s_mount_opt & OCFS2_MOUNT_BARRIER)
+ blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL,
+ NULL, BLKDEV_IFL_WAIT);
goto bail;
+ }
journal = osb->journal->j_journal;
err = jbd2_journal_force_commit(journal);
@@ -774,7 +783,7 @@
BUG_ON(abs_to > (((u64)index + 1) << PAGE_CACHE_SHIFT));
BUG_ON(abs_from & (inode->i_blkbits - 1));
- page = grab_cache_page(mapping, index);
+ page = find_or_create_page(mapping, index, GFP_NOFS);
if (!page) {
ret = -ENOMEM;
mlog_errno(ret);
@@ -2329,7 +2338,7 @@
BUG_ON(ret == -EIOCBQUEUED && !(file->f_flags & O_DIRECT));
if (((file->f_flags & O_DSYNC) && !direct_io) || IS_SYNC(inode) ||
- ((file->f_flags & O_DIRECT) && has_refcount)) {
+ ((file->f_flags & O_DIRECT) && !direct_io)) {
ret = filemap_fdatawrite_range(file->f_mapping, pos,
pos + count - 1);
if (ret < 0)
diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c
index 0492464..eece3e0 100644
--- a/fs/ocfs2/inode.c
+++ b/fs/ocfs2/inode.c
@@ -488,7 +488,11 @@
OCFS2_BH_IGNORE_CACHE);
} else {
status = ocfs2_read_blocks_sync(osb, args->fi_blkno, 1, &bh);
- if (!status)
+ /*
+ * If buffer is in jbd, then its checksum may not have been
+ * computed as yet.
+ */
+ if (!status && !buffer_jbd(bh))
status = ocfs2_validate_inode_block(osb->sb, bh);
}
if (status < 0) {
diff --git a/fs/ocfs2/mmap.c b/fs/ocfs2/mmap.c
index af2b8fe..4c18f4a 100644
--- a/fs/ocfs2/mmap.c
+++ b/fs/ocfs2/mmap.c
@@ -74,9 +74,11 @@
/*
* Another node might have truncated while we were waiting on
* cluster locks.
+ * We don't check size == 0 before the shift. This is borrowed
+ * from do_generic_file_read.
*/
- last_index = size >> PAGE_CACHE_SHIFT;
- if (page->index > last_index) {
+ last_index = (size - 1) >> PAGE_CACHE_SHIFT;
+ if (unlikely(!size || page->index > last_index)) {
ret = -EINVAL;
goto out;
}
@@ -107,7 +109,7 @@
* because the "write" would invalidate their data.
*/
if (page->index == last_index)
- len = size & ~PAGE_CACHE_MASK;
+ len = ((size - 1) & ~PAGE_CACHE_MASK) + 1;
ret = ocfs2_write_begin_nolock(mapping, pos, len, 0, &locked_page,
&fsdata, di_bh, page);
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
index f171b51..a00dda2 100644
--- a/fs/ocfs2/namei.c
+++ b/fs/ocfs2/namei.c
@@ -472,32 +472,23 @@
return status;
}
-static int ocfs2_mknod_locked(struct ocfs2_super *osb,
- struct inode *dir,
- struct inode *inode,
- dev_t dev,
- struct buffer_head **new_fe_bh,
- struct buffer_head *parent_fe_bh,
- handle_t *handle,
- struct ocfs2_alloc_context *inode_ac)
+static int __ocfs2_mknod_locked(struct inode *dir,
+ struct inode *inode,
+ dev_t dev,
+ struct buffer_head **new_fe_bh,
+ struct buffer_head *parent_fe_bh,
+ handle_t *handle,
+ struct ocfs2_alloc_context *inode_ac,
+ u64 fe_blkno, u64 suballoc_loc, u16 suballoc_bit)
{
int status = 0;
+ struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
struct ocfs2_dinode *fe = NULL;
struct ocfs2_extent_list *fel;
- u64 suballoc_loc, fe_blkno = 0;
- u16 suballoc_bit;
u16 feat;
*new_fe_bh = NULL;
- status = ocfs2_claim_new_inode(handle, dir, parent_fe_bh,
- inode_ac, &suballoc_loc,
- &suballoc_bit, &fe_blkno);
- if (status < 0) {
- mlog_errno(status);
- goto leave;
- }
-
/* populate as many fields early on as possible - many of
* these are used by the support functions here and in
* callers. */
@@ -591,6 +582,34 @@
return status;
}
+static int ocfs2_mknod_locked(struct ocfs2_super *osb,
+ struct inode *dir,
+ struct inode *inode,
+ dev_t dev,
+ struct buffer_head **new_fe_bh,
+ struct buffer_head *parent_fe_bh,
+ handle_t *handle,
+ struct ocfs2_alloc_context *inode_ac)
+{
+ int status = 0;
+ u64 suballoc_loc, fe_blkno = 0;
+ u16 suballoc_bit;
+
+ *new_fe_bh = NULL;
+
+ status = ocfs2_claim_new_inode(handle, dir, parent_fe_bh,
+ inode_ac, &suballoc_loc,
+ &suballoc_bit, &fe_blkno);
+ if (status < 0) {
+ mlog_errno(status);
+ return status;
+ }
+
+ return __ocfs2_mknod_locked(dir, inode, dev, new_fe_bh,
+ parent_fe_bh, handle, inode_ac,
+ fe_blkno, suballoc_loc, suballoc_bit);
+}
+
static int ocfs2_mkdir(struct inode *dir,
struct dentry *dentry,
int mode)
@@ -1852,61 +1871,117 @@
return status;
}
+static int ocfs2_lookup_lock_orphan_dir(struct ocfs2_super *osb,
+ struct inode **ret_orphan_dir,
+ struct buffer_head **ret_orphan_dir_bh)
+{
+ struct inode *orphan_dir_inode;
+ struct buffer_head *orphan_dir_bh = NULL;
+ int ret = 0;
+
+ orphan_dir_inode = ocfs2_get_system_file_inode(osb,
+ ORPHAN_DIR_SYSTEM_INODE,
+ osb->slot_num);
+ if (!orphan_dir_inode) {
+ ret = -ENOENT;
+ mlog_errno(ret);
+ return ret;
+ }
+
+ mutex_lock(&orphan_dir_inode->i_mutex);
+
+ ret = ocfs2_inode_lock(orphan_dir_inode, &orphan_dir_bh, 1);
+ if (ret < 0) {
+ mutex_unlock(&orphan_dir_inode->i_mutex);
+ iput(orphan_dir_inode);
+
+ mlog_errno(ret);
+ return ret;
+ }
+
+ *ret_orphan_dir = orphan_dir_inode;
+ *ret_orphan_dir_bh = orphan_dir_bh;
+
+ return 0;
+}
+
+static int __ocfs2_prepare_orphan_dir(struct inode *orphan_dir_inode,
+ struct buffer_head *orphan_dir_bh,
+ u64 blkno,
+ char *name,
+ struct ocfs2_dir_lookup_result *lookup)
+{
+ int ret;
+ struct ocfs2_super *osb = OCFS2_SB(orphan_dir_inode->i_sb);
+
+ ret = ocfs2_blkno_stringify(blkno, name);
+ if (ret < 0) {
+ mlog_errno(ret);
+ return ret;
+ }
+
+ ret = ocfs2_prepare_dir_for_insert(osb, orphan_dir_inode,
+ orphan_dir_bh, name,
+ OCFS2_ORPHAN_NAMELEN, lookup);
+ if (ret < 0) {
+ mlog_errno(ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * ocfs2_prepare_orphan_dir() - Prepare an orphan directory for
+ * insertion of an orphan.
+ * @osb: ocfs2 file system
+ * @ret_orphan_dir: Orphan dir inode - returned locked!
+ * @blkno: Actual block number of the inode to be inserted into orphan dir.
+ * @lookup: dir lookup result, to be passed back into functions like
+ * ocfs2_orphan_add
+ *
+ * Returns zero on success and the ret_orphan_dir, name and lookup
+ * fields will be populated.
+ *
+ * Returns non-zero on failure.
+ */
static int ocfs2_prepare_orphan_dir(struct ocfs2_super *osb,
struct inode **ret_orphan_dir,
u64 blkno,
char *name,
struct ocfs2_dir_lookup_result *lookup)
{
- struct inode *orphan_dir_inode;
+ struct inode *orphan_dir_inode = NULL;
struct buffer_head *orphan_dir_bh = NULL;
- int status = 0;
+ int ret = 0;
- status = ocfs2_blkno_stringify(blkno, name);
- if (status < 0) {
- mlog_errno(status);
- return status;
+ ret = ocfs2_lookup_lock_orphan_dir(osb, &orphan_dir_inode,
+ &orphan_dir_bh);
+ if (ret < 0) {
+ mlog_errno(ret);
+ return ret;
}
- orphan_dir_inode = ocfs2_get_system_file_inode(osb,
- ORPHAN_DIR_SYSTEM_INODE,
- osb->slot_num);
- if (!orphan_dir_inode) {
- status = -ENOENT;
- mlog_errno(status);
- return status;
- }
-
- mutex_lock(&orphan_dir_inode->i_mutex);
-
- status = ocfs2_inode_lock(orphan_dir_inode, &orphan_dir_bh, 1);
- if (status < 0) {
- mlog_errno(status);
- goto leave;
- }
-
- status = ocfs2_prepare_dir_for_insert(osb, orphan_dir_inode,
- orphan_dir_bh, name,
- OCFS2_ORPHAN_NAMELEN, lookup);
- if (status < 0) {
- ocfs2_inode_unlock(orphan_dir_inode, 1);
-
- mlog_errno(status);
- goto leave;
+ ret = __ocfs2_prepare_orphan_dir(orphan_dir_inode, orphan_dir_bh,
+ blkno, name, lookup);
+ if (ret < 0) {
+ mlog_errno(ret);
+ goto out;
}
*ret_orphan_dir = orphan_dir_inode;
-leave:
- if (status) {
+out:
+ brelse(orphan_dir_bh);
+
+ if (ret) {
+ ocfs2_inode_unlock(orphan_dir_inode, 1);
mutex_unlock(&orphan_dir_inode->i_mutex);
iput(orphan_dir_inode);
}
- brelse(orphan_dir_bh);
-
- mlog_exit(status);
- return status;
+ mlog_exit(ret);
+ return ret;
}
static int ocfs2_orphan_add(struct ocfs2_super *osb,
@@ -2053,6 +2128,99 @@
return status;
}
+/**
+ * ocfs2_prep_new_orphaned_file() - Prepare the orphan dir to recieve a newly
+ * allocated file. This is different from the typical 'add to orphan dir'
+ * operation in that the inode does not yet exist. This is a problem because
+ * the orphan dir stringifies the inode block number to come up with it's
+ * dirent. Obviously if the inode does not yet exist we have a chicken and egg
+ * problem. This function works around it by calling deeper into the orphan
+ * and suballoc code than other callers. Use this only by necessity.
+ * @dir: The directory which this inode will ultimately wind up under - not the
+ * orphan dir!
+ * @dir_bh: buffer_head the @dir inode block
+ * @orphan_name: string of length (CFS2_ORPHAN_NAMELEN + 1). Will be filled
+ * with the string to be used for orphan dirent. Pass back to the orphan dir
+ * code.
+ * @ret_orphan_dir: orphan dir inode returned to be passed back into orphan
+ * dir code.
+ * @ret_di_blkno: block number where the new inode will be allocated.
+ * @orphan_insert: Dir insert context to be passed back into orphan dir code.
+ * @ret_inode_ac: Inode alloc context to be passed back to the allocator.
+ *
+ * Returns zero on success and the ret_orphan_dir, name and lookup
+ * fields will be populated.
+ *
+ * Returns non-zero on failure.
+ */
+static int ocfs2_prep_new_orphaned_file(struct inode *dir,
+ struct buffer_head *dir_bh,
+ char *orphan_name,
+ struct inode **ret_orphan_dir,
+ u64 *ret_di_blkno,
+ struct ocfs2_dir_lookup_result *orphan_insert,
+ struct ocfs2_alloc_context **ret_inode_ac)
+{
+ int ret;
+ u64 di_blkno;
+ struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
+ struct inode *orphan_dir = NULL;
+ struct buffer_head *orphan_dir_bh = NULL;
+ struct ocfs2_alloc_context *inode_ac = NULL;
+
+ ret = ocfs2_lookup_lock_orphan_dir(osb, &orphan_dir, &orphan_dir_bh);
+ if (ret < 0) {
+ mlog_errno(ret);
+ return ret;
+ }
+
+ /* reserve an inode spot */
+ ret = ocfs2_reserve_new_inode(osb, &inode_ac);
+ if (ret < 0) {
+ if (ret != -ENOSPC)
+ mlog_errno(ret);
+ goto out;
+ }
+
+ ret = ocfs2_find_new_inode_loc(dir, dir_bh, inode_ac,
+ &di_blkno);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ ret = __ocfs2_prepare_orphan_dir(orphan_dir, orphan_dir_bh,
+ di_blkno, orphan_name, orphan_insert);
+ if (ret < 0) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+out:
+ if (ret == 0) {
+ *ret_orphan_dir = orphan_dir;
+ *ret_di_blkno = di_blkno;
+ *ret_inode_ac = inode_ac;
+ /*
+ * orphan_name and orphan_insert are already up to
+ * date via prepare_orphan_dir
+ */
+ } else {
+ /* Unroll reserve_new_inode* */
+ if (inode_ac)
+ ocfs2_free_alloc_context(inode_ac);
+
+ /* Unroll orphan dir locking */
+ mutex_unlock(&orphan_dir->i_mutex);
+ ocfs2_inode_unlock(orphan_dir, 1);
+ iput(orphan_dir);
+ }
+
+ brelse(orphan_dir_bh);
+
+ return 0;
+}
+
int ocfs2_create_inode_in_orphan(struct inode *dir,
int mode,
struct inode **new_inode)
@@ -2068,6 +2236,8 @@
struct buffer_head *new_di_bh = NULL;
struct ocfs2_alloc_context *inode_ac = NULL;
struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
+ u64 uninitialized_var(di_blkno), suballoc_loc;
+ u16 suballoc_bit;
status = ocfs2_inode_lock(dir, &parent_di_bh, 1);
if (status < 0) {
@@ -2076,20 +2246,9 @@
return status;
}
- /*
- * We give the orphan dir the root blkno to fake an orphan name,
- * and allocate enough space for our insertion.
- */
- status = ocfs2_prepare_orphan_dir(osb, &orphan_dir,
- osb->root_blkno,
- orphan_name, &orphan_insert);
- if (status < 0) {
- mlog_errno(status);
- goto leave;
- }
-
- /* reserve an inode spot */
- status = ocfs2_reserve_new_inode(osb, &inode_ac);
+ status = ocfs2_prep_new_orphaned_file(dir, parent_di_bh,
+ orphan_name, &orphan_dir,
+ &di_blkno, &orphan_insert, &inode_ac);
if (status < 0) {
if (status != -ENOSPC)
mlog_errno(status);
@@ -2116,17 +2275,20 @@
goto leave;
did_quota_inode = 1;
- inode->i_nlink = 0;
- /* do the real work now. */
- status = ocfs2_mknod_locked(osb, dir, inode,
- 0, &new_di_bh, parent_di_bh, handle,
- inode_ac);
+ status = ocfs2_claim_new_inode_at_loc(handle, dir, inode_ac,
+ &suballoc_loc,
+ &suballoc_bit, di_blkno);
if (status < 0) {
mlog_errno(status);
goto leave;
}
- status = ocfs2_blkno_stringify(OCFS2_I(inode)->ip_blkno, orphan_name);
+ inode->i_nlink = 0;
+ /* do the real work now. */
+ status = __ocfs2_mknod_locked(dir, inode,
+ 0, &new_di_bh, parent_di_bh, handle,
+ inode_ac, di_blkno, suballoc_loc,
+ suballoc_bit);
if (status < 0) {
mlog_errno(status);
goto leave;
diff --git a/fs/ocfs2/ocfs2_fs.h b/fs/ocfs2/ocfs2_fs.h
index 33f1c9a..fa31d05 100644
--- a/fs/ocfs2/ocfs2_fs.h
+++ b/fs/ocfs2/ocfs2_fs.h
@@ -235,18 +235,31 @@
#define OCFS2_HAS_REFCOUNT_FL (0x0010)
/* Inode attributes, keep in sync with EXT2 */
-#define OCFS2_SECRM_FL (0x00000001) /* Secure deletion */
-#define OCFS2_UNRM_FL (0x00000002) /* Undelete */
-#define OCFS2_COMPR_FL (0x00000004) /* Compress file */
-#define OCFS2_SYNC_FL (0x00000008) /* Synchronous updates */
-#define OCFS2_IMMUTABLE_FL (0x00000010) /* Immutable file */
-#define OCFS2_APPEND_FL (0x00000020) /* writes to file may only append */
-#define OCFS2_NODUMP_FL (0x00000040) /* do not dump file */
-#define OCFS2_NOATIME_FL (0x00000080) /* do not update atime */
-#define OCFS2_DIRSYNC_FL (0x00010000) /* dirsync behaviour (directories only) */
+#define OCFS2_SECRM_FL FS_SECRM_FL /* Secure deletion */
+#define OCFS2_UNRM_FL FS_UNRM_FL /* Undelete */
+#define OCFS2_COMPR_FL FS_COMPR_FL /* Compress file */
+#define OCFS2_SYNC_FL FS_SYNC_FL /* Synchronous updates */
+#define OCFS2_IMMUTABLE_FL FS_IMMUTABLE_FL /* Immutable file */
+#define OCFS2_APPEND_FL FS_APPEND_FL /* writes to file may only append */
+#define OCFS2_NODUMP_FL FS_NODUMP_FL /* do not dump file */
+#define OCFS2_NOATIME_FL FS_NOATIME_FL /* do not update atime */
+/* Reserved for compression usage... */
+#define OCFS2_DIRTY_FL FS_DIRTY_FL
+#define OCFS2_COMPRBLK_FL FS_COMPRBLK_FL /* One or more compressed clusters */
+#define OCFS2_NOCOMP_FL FS_NOCOMP_FL /* Don't compress */
+#define OCFS2_ECOMPR_FL FS_ECOMPR_FL /* Compression error */
+/* End compression flags --- maybe not all used */
+#define OCFS2_BTREE_FL FS_BTREE_FL /* btree format dir */
+#define OCFS2_INDEX_FL FS_INDEX_FL /* hash-indexed directory */
+#define OCFS2_IMAGIC_FL FS_IMAGIC_FL /* AFS directory */
+#define OCFS2_JOURNAL_DATA_FL FS_JOURNAL_DATA_FL /* Reserved for ext3 */
+#define OCFS2_NOTAIL_FL FS_NOTAIL_FL /* file tail should not be merged */
+#define OCFS2_DIRSYNC_FL FS_DIRSYNC_FL /* dirsync behaviour (directories only) */
+#define OCFS2_TOPDIR_FL FS_TOPDIR_FL /* Top of directory hierarchies*/
+#define OCFS2_RESERVED_FL FS_RESERVED_FL /* reserved for ext2 lib */
-#define OCFS2_FL_VISIBLE (0x000100FF) /* User visible flags */
-#define OCFS2_FL_MODIFIABLE (0x000100FF) /* User modifiable flags */
+#define OCFS2_FL_VISIBLE FS_FL_USER_VISIBLE /* User visible flags */
+#define OCFS2_FL_MODIFIABLE FS_FL_USER_MODIFIABLE /* User modifiable flags */
/*
* Extent record flags (e_node.leaf.flags)
diff --git a/fs/ocfs2/ocfs2_ioctl.h b/fs/ocfs2/ocfs2_ioctl.h
index 2d3420a..5d24150 100644
--- a/fs/ocfs2/ocfs2_ioctl.h
+++ b/fs/ocfs2/ocfs2_ioctl.h
@@ -23,10 +23,10 @@
/*
* ioctl commands
*/
-#define OCFS2_IOC_GETFLAGS _IOR('f', 1, long)
-#define OCFS2_IOC_SETFLAGS _IOW('f', 2, long)
-#define OCFS2_IOC32_GETFLAGS _IOR('f', 1, int)
-#define OCFS2_IOC32_SETFLAGS _IOW('f', 2, int)
+#define OCFS2_IOC_GETFLAGS FS_IOC_GETFLAGS
+#define OCFS2_IOC_SETFLAGS FS_IOC_SETFLAGS
+#define OCFS2_IOC32_GETFLAGS FS_IOC32_GETFLAGS
+#define OCFS2_IOC32_SETFLAGS FS_IOC32_SETFLAGS
/*
* Space reservation / allocation / free ioctls and argument structure
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
index 73a11cc..efdd756 100644
--- a/fs/ocfs2/refcounttree.c
+++ b/fs/ocfs2/refcounttree.c
@@ -2960,7 +2960,7 @@
if (map_end & (PAGE_CACHE_SIZE - 1))
to = map_end & (PAGE_CACHE_SIZE - 1);
- page = grab_cache_page(mapping, page_index);
+ page = find_or_create_page(mapping, page_index, GFP_NOFS);
/*
* In case PAGE_CACHE_SIZE <= CLUSTER_SIZE, This page
@@ -3179,7 +3179,8 @@
if (map_end > end)
map_end = end;
- page = grab_cache_page(context->inode->i_mapping, page_index);
+ page = find_or_create_page(context->inode->i_mapping,
+ page_index, GFP_NOFS);
BUG_ON(!page);
wait_on_page_writeback(page);
@@ -4200,8 +4201,9 @@
goto out;
}
- mutex_lock(&new_inode->i_mutex);
- ret = ocfs2_inode_lock(new_inode, &new_bh, 1);
+ mutex_lock_nested(&new_inode->i_mutex, I_MUTEX_CHILD);
+ ret = ocfs2_inode_lock_nested(new_inode, &new_bh, 1,
+ OI_LS_REFLINK_TARGET);
if (ret) {
mlog_errno(ret);
goto out_unlock;
diff --git a/fs/ocfs2/reservations.c b/fs/ocfs2/reservations.c
index d8b6e42..3e78db3 100644
--- a/fs/ocfs2/reservations.c
+++ b/fs/ocfs2/reservations.c
@@ -732,25 +732,23 @@
struct ocfs2_alloc_reservation *resv,
int *cstart, int *clen)
{
- unsigned int wanted = *clen;
-
if (resv == NULL || ocfs2_resmap_disabled(resmap))
return -ENOSPC;
spin_lock(&resv_lock);
- /*
- * We don't want to over-allocate for temporary
- * windows. Otherwise, we run the risk of fragmenting the
- * allocation space.
- */
- wanted = ocfs2_resv_window_bits(resmap, resv);
- if ((resv->r_flags & OCFS2_RESV_FLAG_TMP) || wanted < *clen)
- wanted = *clen;
-
if (ocfs2_resv_empty(resv)) {
- mlog(0, "empty reservation, find new window\n");
+ /*
+ * We don't want to over-allocate for temporary
+ * windows. Otherwise, we run the risk of fragmenting the
+ * allocation space.
+ */
+ unsigned int wanted = ocfs2_resv_window_bits(resmap, resv);
+ if ((resv->r_flags & OCFS2_RESV_FLAG_TMP) || wanted < *clen)
+ wanted = *clen;
+
+ mlog(0, "empty reservation, find new window\n");
/*
* Try to get a window here. If it works, we must fall
* through and test the bitmap . This avoids some
diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
index a8e6a95..849c2f0 100644
--- a/fs/ocfs2/suballoc.c
+++ b/fs/ocfs2/suballoc.c
@@ -57,11 +57,28 @@
u64 sr_bg_blkno; /* The bg we allocated from. Set
to 0 when a block group is
contiguous. */
+ u64 sr_bg_stable_blkno; /*
+ * Doesn't change, always
+ * set to target block
+ * group descriptor
+ * block.
+ */
u64 sr_blkno; /* The first allocated block */
unsigned int sr_bit_offset; /* The bit in the bg */
unsigned int sr_bits; /* How many bits we claimed */
};
+static u64 ocfs2_group_from_res(struct ocfs2_suballoc_result *res)
+{
+ if (res->sr_blkno == 0)
+ return 0;
+
+ if (res->sr_bg_blkno)
+ return res->sr_bg_blkno;
+
+ return ocfs2_which_suballoc_group(res->sr_blkno, res->sr_bit_offset);
+}
+
static inline void ocfs2_debug_bg(struct ocfs2_group_desc *bg);
static inline void ocfs2_debug_suballoc_inode(struct ocfs2_dinode *fe);
static inline u16 ocfs2_find_victim_chain(struct ocfs2_chain_list *cl);
@@ -138,6 +155,10 @@
brelse(ac->ac_bh);
ac->ac_bh = NULL;
ac->ac_resv = NULL;
+ if (ac->ac_find_loc_priv) {
+ kfree(ac->ac_find_loc_priv);
+ ac->ac_find_loc_priv = NULL;
+ }
}
void ocfs2_free_alloc_context(struct ocfs2_alloc_context *ac)
@@ -336,7 +357,7 @@
static void ocfs2_bg_discontig_add_extent(struct ocfs2_super *osb,
struct ocfs2_group_desc *bg,
struct ocfs2_chain_list *cl,
- u64 p_blkno, u32 clusters)
+ u64 p_blkno, unsigned int clusters)
{
struct ocfs2_extent_list *el = &bg->bg_list;
struct ocfs2_extent_rec *rec;
@@ -348,7 +369,7 @@
rec->e_blkno = cpu_to_le64(p_blkno);
rec->e_cpos = cpu_to_le32(le16_to_cpu(bg->bg_bits) /
le16_to_cpu(cl->cl_bpc));
- rec->e_leaf_clusters = cpu_to_le32(clusters);
+ rec->e_leaf_clusters = cpu_to_le16(clusters);
le16_add_cpu(&bg->bg_bits, clusters * le16_to_cpu(cl->cl_bpc));
le16_add_cpu(&bg->bg_free_bits_count,
clusters * le16_to_cpu(cl->cl_bpc));
@@ -1678,6 +1699,15 @@
if (!ret)
ocfs2_bg_discontig_fix_result(ac, gd, res);
+ /*
+ * sr_bg_blkno might have been changed by
+ * ocfs2_bg_discontig_fix_result
+ */
+ res->sr_bg_stable_blkno = group_bh->b_blocknr;
+
+ if (ac->ac_find_loc_only)
+ goto out_loc_only;
+
ret = ocfs2_alloc_dinode_update_counts(alloc_inode, handle, ac->ac_bh,
res->sr_bits,
le16_to_cpu(gd->bg_chain));
@@ -1691,6 +1721,7 @@
if (ret < 0)
mlog_errno(ret);
+out_loc_only:
*bits_left = le16_to_cpu(gd->bg_free_bits_count);
out:
@@ -1708,7 +1739,6 @@
{
int status;
u16 chain;
- u32 tmp_used;
u64 next_group;
struct inode *alloc_inode = ac->ac_inode;
struct buffer_head *group_bh = NULL;
@@ -1770,6 +1800,11 @@
if (!status)
ocfs2_bg_discontig_fix_result(ac, bg, res);
+ /*
+ * sr_bg_blkno might have been changed by
+ * ocfs2_bg_discontig_fix_result
+ */
+ res->sr_bg_stable_blkno = group_bh->b_blocknr;
/*
* Keep track of previous block descriptor read. When
@@ -1796,22 +1831,17 @@
}
}
- /* Ok, claim our bits now: set the info on dinode, chainlist
- * and then the group */
- status = ocfs2_journal_access_di(handle,
- INODE_CACHE(alloc_inode),
- ac->ac_bh,
- OCFS2_JOURNAL_ACCESS_WRITE);
- if (status < 0) {
+ if (ac->ac_find_loc_only)
+ goto out_loc_only;
+
+ status = ocfs2_alloc_dinode_update_counts(alloc_inode, handle,
+ ac->ac_bh, res->sr_bits,
+ chain);
+ if (status) {
mlog_errno(status);
goto bail;
}
- tmp_used = le32_to_cpu(fe->id1.bitmap1.i_used);
- fe->id1.bitmap1.i_used = cpu_to_le32(res->sr_bits + tmp_used);
- le32_add_cpu(&cl->cl_recs[chain].c_free, -res->sr_bits);
- ocfs2_journal_dirty(handle, ac->ac_bh);
-
status = ocfs2_block_group_set_bits(handle,
alloc_inode,
bg,
@@ -1826,6 +1856,7 @@
mlog(0, "Allocated %u bits from suballocator %llu\n", res->sr_bits,
(unsigned long long)le64_to_cpu(fe->i_blkno));
+out_loc_only:
*bits_left = le16_to_cpu(bg->bg_free_bits_count);
bail:
brelse(group_bh);
@@ -1845,6 +1876,7 @@
int status;
u16 victim, i;
u16 bits_left = 0;
+ u64 hint = ac->ac_last_group;
struct ocfs2_chain_list *cl;
struct ocfs2_dinode *fe;
@@ -1872,7 +1904,7 @@
goto bail;
}
- res->sr_bg_blkno = ac->ac_last_group;
+ res->sr_bg_blkno = hint;
if (res->sr_bg_blkno) {
/* Attempt to short-circuit the usual search mechanism
* by jumping straight to the most recently used
@@ -1896,8 +1928,10 @@
status = ocfs2_search_chain(ac, handle, bits_wanted, min_bits,
res, &bits_left);
- if (!status)
+ if (!status) {
+ hint = ocfs2_group_from_res(res);
goto set_hint;
+ }
if (status < 0 && status != -ENOSPC) {
mlog_errno(status);
goto bail;
@@ -1920,8 +1954,10 @@
ac->ac_chain = i;
status = ocfs2_search_chain(ac, handle, bits_wanted, min_bits,
res, &bits_left);
- if (!status)
+ if (!status) {
+ hint = ocfs2_group_from_res(res);
break;
+ }
if (status < 0 && status != -ENOSPC) {
mlog_errno(status);
goto bail;
@@ -1936,7 +1972,7 @@
if (bits_left < min_bits)
ac->ac_last_group = 0;
else
- ac->ac_last_group = res->sr_bg_blkno;
+ ac->ac_last_group = hint;
}
bail:
@@ -2016,6 +2052,136 @@
OCFS2_I(dir)->ip_last_used_slot = ac->ac_alloc_slot;
}
+int ocfs2_find_new_inode_loc(struct inode *dir,
+ struct buffer_head *parent_fe_bh,
+ struct ocfs2_alloc_context *ac,
+ u64 *fe_blkno)
+{
+ int ret;
+ handle_t *handle = NULL;
+ struct ocfs2_suballoc_result *res;
+
+ BUG_ON(!ac);
+ BUG_ON(ac->ac_bits_given != 0);
+ BUG_ON(ac->ac_bits_wanted != 1);
+ BUG_ON(ac->ac_which != OCFS2_AC_USE_INODE);
+
+ res = kzalloc(sizeof(*res), GFP_NOFS);
+ if (res == NULL) {
+ ret = -ENOMEM;
+ mlog_errno(ret);
+ goto out;
+ }
+
+ ocfs2_init_inode_ac_group(dir, parent_fe_bh, ac);
+
+ /*
+ * The handle started here is for chain relink. Alternatively,
+ * we could just disable relink for these calls.
+ */
+ handle = ocfs2_start_trans(OCFS2_SB(dir->i_sb), OCFS2_SUBALLOC_ALLOC);
+ if (IS_ERR(handle)) {
+ ret = PTR_ERR(handle);
+ handle = NULL;
+ mlog_errno(ret);
+ goto out;
+ }
+
+ /*
+ * This will instruct ocfs2_claim_suballoc_bits and
+ * ocfs2_search_one_group to search but save actual allocation
+ * for later.
+ */
+ ac->ac_find_loc_only = 1;
+
+ ret = ocfs2_claim_suballoc_bits(ac, handle, 1, 1, res);
+ if (ret < 0) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ ac->ac_find_loc_priv = res;
+ *fe_blkno = res->sr_blkno;
+
+out:
+ if (handle)
+ ocfs2_commit_trans(OCFS2_SB(dir->i_sb), handle);
+
+ if (ret)
+ kfree(res);
+
+ return ret;
+}
+
+int ocfs2_claim_new_inode_at_loc(handle_t *handle,
+ struct inode *dir,
+ struct ocfs2_alloc_context *ac,
+ u64 *suballoc_loc,
+ u16 *suballoc_bit,
+ u64 di_blkno)
+{
+ int ret;
+ u16 chain;
+ struct ocfs2_suballoc_result *res = ac->ac_find_loc_priv;
+ struct buffer_head *bg_bh = NULL;
+ struct ocfs2_group_desc *bg;
+ struct ocfs2_dinode *di = (struct ocfs2_dinode *) ac->ac_bh->b_data;
+
+ /*
+ * Since di_blkno is being passed back in, we check for any
+ * inconsistencies which may have happened between
+ * calls. These are code bugs as di_blkno is not expected to
+ * change once returned from ocfs2_find_new_inode_loc()
+ */
+ BUG_ON(res->sr_blkno != di_blkno);
+
+ ret = ocfs2_read_group_descriptor(ac->ac_inode, di,
+ res->sr_bg_stable_blkno, &bg_bh);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ bg = (struct ocfs2_group_desc *) bg_bh->b_data;
+ chain = le16_to_cpu(bg->bg_chain);
+
+ ret = ocfs2_alloc_dinode_update_counts(ac->ac_inode, handle,
+ ac->ac_bh, res->sr_bits,
+ chain);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ ret = ocfs2_block_group_set_bits(handle,
+ ac->ac_inode,
+ bg,
+ bg_bh,
+ res->sr_bit_offset,
+ res->sr_bits);
+ if (ret < 0) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ mlog(0, "Allocated %u bits from suballocator %llu\n", res->sr_bits,
+ (unsigned long long)di_blkno);
+
+ atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
+
+ BUG_ON(res->sr_bits != 1);
+
+ *suballoc_loc = res->sr_bg_blkno;
+ *suballoc_bit = res->sr_bit_offset;
+ ac->ac_bits_given++;
+ ocfs2_save_inode_ac_group(dir, ac);
+
+out:
+ brelse(bg_bh);
+
+ return ret;
+}
+
int ocfs2_claim_new_inode(handle_t *handle,
struct inode *dir,
struct buffer_head *parent_fe_bh,
@@ -2567,7 +2733,8 @@
* suballoc_bit.
*/
static int ocfs2_get_suballoc_slot_bit(struct ocfs2_super *osb, u64 blkno,
- u16 *suballoc_slot, u16 *suballoc_bit)
+ u16 *suballoc_slot, u64 *group_blkno,
+ u16 *suballoc_bit)
{
int status;
struct buffer_head *inode_bh = NULL;
@@ -2604,6 +2771,8 @@
*suballoc_slot = le16_to_cpu(inode_fe->i_suballoc_slot);
if (suballoc_bit)
*suballoc_bit = le16_to_cpu(inode_fe->i_suballoc_bit);
+ if (group_blkno)
+ *group_blkno = le64_to_cpu(inode_fe->i_suballoc_loc);
bail:
brelse(inode_bh);
@@ -2621,7 +2790,8 @@
*/
static int ocfs2_test_suballoc_bit(struct ocfs2_super *osb,
struct inode *suballoc,
- struct buffer_head *alloc_bh, u64 blkno,
+ struct buffer_head *alloc_bh,
+ u64 group_blkno, u64 blkno,
u16 bit, int *res)
{
struct ocfs2_dinode *alloc_di;
@@ -2642,10 +2812,8 @@
goto bail;
}
- if (alloc_di->i_suballoc_loc)
- bg_blkno = le64_to_cpu(alloc_di->i_suballoc_loc);
- else
- bg_blkno = ocfs2_which_suballoc_group(blkno, bit);
+ bg_blkno = group_blkno ? group_blkno :
+ ocfs2_which_suballoc_group(blkno, bit);
status = ocfs2_read_group_descriptor(suballoc, alloc_di, bg_blkno,
&group_bh);
if (status < 0) {
@@ -2680,6 +2848,7 @@
int ocfs2_test_inode_bit(struct ocfs2_super *osb, u64 blkno, int *res)
{
int status;
+ u64 group_blkno = 0;
u16 suballoc_bit = 0, suballoc_slot = 0;
struct inode *inode_alloc_inode;
struct buffer_head *alloc_bh = NULL;
@@ -2687,7 +2856,7 @@
mlog_entry("blkno: %llu", (unsigned long long)blkno);
status = ocfs2_get_suballoc_slot_bit(osb, blkno, &suballoc_slot,
- &suballoc_bit);
+ &group_blkno, &suballoc_bit);
if (status < 0) {
mlog(ML_ERROR, "get alloc slot and bit failed %d\n", status);
goto bail;
@@ -2715,7 +2884,7 @@
}
status = ocfs2_test_suballoc_bit(osb, inode_alloc_inode, alloc_bh,
- blkno, suballoc_bit, res);
+ group_blkno, blkno, suballoc_bit, res);
if (status < 0)
mlog(ML_ERROR, "test suballoc bit failed %d\n", status);
diff --git a/fs/ocfs2/suballoc.h b/fs/ocfs2/suballoc.h
index a017dd3..b8afabf 100644
--- a/fs/ocfs2/suballoc.h
+++ b/fs/ocfs2/suballoc.h
@@ -56,6 +56,9 @@
u64 ac_max_block; /* Highest block number to allocate. 0 is
is the same as ~0 - unlimited */
+ int ac_find_loc_only; /* hack for reflink operation ordering */
+ struct ocfs2_suballoc_result *ac_find_loc_priv; /* */
+
struct ocfs2_alloc_reservation *ac_resv;
};
@@ -197,4 +200,22 @@
struct ocfs2_alloc_context **meta_ac);
int ocfs2_test_inode_bit(struct ocfs2_super *osb, u64 blkno, int *res);
+
+
+
+/*
+ * The following two interfaces are for ocfs2_create_inode_in_orphan().
+ */
+int ocfs2_find_new_inode_loc(struct inode *dir,
+ struct buffer_head *parent_fe_bh,
+ struct ocfs2_alloc_context *ac,
+ u64 *fe_blkno);
+
+int ocfs2_claim_new_inode_at_loc(handle_t *handle,
+ struct inode *dir,
+ struct ocfs2_alloc_context *ac,
+ u64 *suballoc_loc,
+ u16 *suballoc_bit,
+ u64 di_blkno);
+
#endif /* _CHAINALLOC_H_ */
diff --git a/fs/ocfs2/symlink.c b/fs/ocfs2/symlink.c
index 32499d21..9975457 100644
--- a/fs/ocfs2/symlink.c
+++ b/fs/ocfs2/symlink.c
@@ -128,7 +128,7 @@
}
/* Fast symlinks can't be large */
- len = strlen(target);
+ len = strnlen(target, ocfs2_fast_symlink_chars(inode->i_sb));
link = kzalloc(len + 1, GFP_NOFS);
if (!link) {
status = -ENOMEM;
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
index d03469f..06fa5e7 100644
--- a/fs/ocfs2/xattr.c
+++ b/fs/ocfs2/xattr.c
@@ -1286,13 +1286,11 @@
xis.inode_bh = xbs.inode_bh = di_bh;
di = (struct ocfs2_dinode *)di_bh->b_data;
- down_read(&oi->ip_xattr_sem);
ret = ocfs2_xattr_ibody_get(inode, name_index, name, buffer,
buffer_size, &xis);
if (ret == -ENODATA && di->i_xattr_loc)
ret = ocfs2_xattr_block_get(inode, name_index, name, buffer,
buffer_size, &xbs);
- up_read(&oi->ip_xattr_sem);
return ret;
}
@@ -1316,8 +1314,10 @@
mlog_errno(ret);
return ret;
}
+ down_read(&OCFS2_I(inode)->ip_xattr_sem);
ret = ocfs2_xattr_get_nolock(inode, di_bh, name_index,
name, buffer, buffer_size);
+ up_read(&OCFS2_I(inode)->ip_xattr_sem);
ocfs2_inode_unlock(inode, 0);
diff --git a/fs/proc/base.c b/fs/proc/base.c
index a1c43e7..8e4adda 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -2675,7 +2675,7 @@
INF("auxv", S_IRUSR, proc_pid_auxv),
ONE("status", S_IRUGO, proc_pid_status),
ONE("personality", S_IRUSR, proc_pid_personality),
- INF("limits", S_IRUSR, proc_pid_limits),
+ INF("limits", S_IRUGO, proc_pid_limits),
#ifdef CONFIG_SCHED_DEBUG
REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
#endif
@@ -3011,7 +3011,7 @@
INF("auxv", S_IRUSR, proc_pid_auxv),
ONE("status", S_IRUGO, proc_pid_status),
ONE("personality", S_IRUSR, proc_pid_personality),
- INF("limits", S_IRUSR, proc_pid_limits),
+ INF("limits", S_IRUGO, proc_pid_limits),
#ifdef CONFIG_SCHED_DEBUG
REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
#endif
diff --git a/fs/proc/page.c b/fs/proc/page.c
index 180cf5a..3b8b456 100644
--- a/fs/proc/page.c
+++ b/fs/proc/page.c
@@ -146,7 +146,7 @@
u |= kpf_copy_bit(k, KPF_HWPOISON, PG_hwpoison);
#endif
-#ifdef CONFIG_IA64_UNCACHED_ALLOCATOR
+#ifdef CONFIG_ARCH_USES_PG_UNCACHED
u |= kpf_copy_bit(k, KPF_UNCACHED, PG_uncached);
#endif
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 439fc1f..1dbca4e8 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -224,7 +224,8 @@
/* We don't show the stack guard page in /proc/maps */
start = vma->vm_start;
if (vma->vm_flags & VM_GROWSDOWN)
- start += PAGE_SIZE;
+ if (!vma_stack_continue(vma->vm_prev, vma->vm_start))
+ start += PAGE_SIZE;
seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
start,
@@ -362,13 +363,13 @@
mss->referenced += PAGE_SIZE;
mapcount = page_mapcount(page);
if (mapcount >= 2) {
- if (pte_dirty(ptent))
+ if (pte_dirty(ptent) || PageDirty(page))
mss->shared_dirty += PAGE_SIZE;
else
mss->shared_clean += PAGE_SIZE;
mss->pss += (PAGE_SIZE << PSS_SHIFT) / mapcount;
} else {
- if (pte_dirty(ptent))
+ if (pte_dirty(ptent) || PageDirty(page))
mss->private_dirty += PAGE_SIZE;
else
mss->private_clean += PAGE_SIZE;
diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
index 91c817f..2367fb3 100644
--- a/fs/proc/vmcore.c
+++ b/fs/proc/vmcore.c
@@ -163,7 +163,7 @@
static const struct file_operations proc_vmcore_operations = {
.read = read_vmcore,
- .llseek = generic_file_llseek,
+ .llseek = default_llseek,
};
static struct vmcore* __init get_new_element(void)
diff --git a/fs/reiserfs/ioctl.c b/fs/reiserfs/ioctl.c
index f53505d..5cbb81e 100644
--- a/fs/reiserfs/ioctl.c
+++ b/fs/reiserfs/ioctl.c
@@ -170,6 +170,7 @@
int reiserfs_unpack(struct inode *inode, struct file *filp)
{
int retval = 0;
+ int depth;
int index;
struct page *page;
struct address_space *mapping;
@@ -188,8 +189,8 @@
/* we need to make sure nobody is changing the file size beneath
** us
*/
- mutex_lock(&inode->i_mutex);
- reiserfs_write_lock(inode->i_sb);
+ reiserfs_mutex_lock_safe(&inode->i_mutex, inode->i_sb);
+ depth = reiserfs_write_lock_once(inode->i_sb);
write_from = inode->i_size & (blocksize - 1);
/* if we are on a block boundary, we are already unpacked. */
@@ -224,6 +225,6 @@
out:
mutex_unlock(&inode->i_mutex);
- reiserfs_write_unlock(inode->i_sb);
+ reiserfs_write_unlock_once(inode->i_sb, depth);
return retval;
}
diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
index 1b27b56..da3fefe 100644
--- a/fs/sysfs/file.c
+++ b/fs/sysfs/file.c
@@ -340,7 +340,7 @@
char *p;
p = d_path(&file->f_path, last_sysfs_file, sizeof(last_sysfs_file));
- if (p)
+ if (!IS_ERR(p))
memmove(last_sysfs_file, p, strlen(p) + 1);
/* need attr_sd for attr and ops, its parent for kobj */
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index 15412fe..b552f81 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -852,8 +852,8 @@
SetPageUptodate(page);
if (count) {
- wbc->nr_to_write--;
- if (wbc->nr_to_write <= 0)
+ if (--wbc->nr_to_write <= 0 &&
+ wbc->sync_mode == WB_SYNC_NONE)
done = 1;
}
xfs_start_page_writeback(page, !page_dirty, count);
@@ -1068,7 +1068,7 @@
* by themselves.
*/
if ((current->flags & (PF_MEMALLOC|PF_KSWAPD)) == PF_MEMALLOC)
- goto out_fail;
+ goto redirty;
/*
* We need a transaction if there are delalloc or unwritten buffers
@@ -1080,7 +1080,7 @@
*/
xfs_count_page_state(page, &delalloc, &unwritten);
if ((current->flags & PF_FSTRANS) && (delalloc || unwritten))
- goto out_fail;
+ goto redirty;
/* Is this page beyond the end of the file? */
offset = i_size_read(inode);
@@ -1245,12 +1245,15 @@
if (iohead)
xfs_cancel_ioend(iohead);
+ if (err == -EAGAIN)
+ goto redirty;
+
xfs_aops_discard_page(page);
ClearPageUptodate(page);
unlock_page(page);
return err;
-out_fail:
+redirty:
redirty_page_for_writepage(wbc, page);
unlock_page(page);
return 0;
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index ea79072..286e36e 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -440,12 +440,7 @@
ASSERT(btp == bp->b_target);
if (bp->b_file_offset == range_base &&
bp->b_buffer_length == range_length) {
- /*
- * If we look at something, bring it to the
- * front of the list for next time.
- */
atomic_inc(&bp->b_hold);
- list_move(&bp->b_hash_list, &hash->bh_list);
goto found;
}
}
@@ -1443,8 +1438,7 @@
{
unsigned int i;
- btp->bt_hashshift = external ? 3 : 8; /* 8 or 256 buckets */
- btp->bt_hashmask = (1 << btp->bt_hashshift) - 1;
+ btp->bt_hashshift = external ? 3 : 12; /* 8 or 4096 buckets */
btp->bt_hash = kmem_zalloc_large((1 << btp->bt_hashshift) *
sizeof(xfs_bufhash_t));
for (i = 0; i < (1 << btp->bt_hashshift); i++) {
@@ -1938,7 +1932,8 @@
if (!xfs_buf_zone)
goto out;
- xfslogd_workqueue = create_workqueue("xfslogd");
+ xfslogd_workqueue = alloc_workqueue("xfslogd",
+ WQ_RESCUER | WQ_HIGHPRI, 1);
if (!xfslogd_workqueue)
goto out_free_buf_zone;
diff --git a/fs/xfs/linux-2.6/xfs_buf.h b/fs/xfs/linux-2.6/xfs_buf.h
index d072e5f..2a05614 100644
--- a/fs/xfs/linux-2.6/xfs_buf.h
+++ b/fs/xfs/linux-2.6/xfs_buf.h
@@ -137,7 +137,6 @@
size_t bt_smask;
/* per device buffer hash table */
- uint bt_hashmask;
uint bt_hashshift;
xfs_bufhash_t *bt_hash;
diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c
index 237f5ff..3b9e626 100644
--- a/fs/xfs/linux-2.6/xfs_ioctl.c
+++ b/fs/xfs/linux-2.6/xfs_ioctl.c
@@ -785,6 +785,8 @@
{
struct fsxattr fa;
+ memset(&fa, 0, sizeof(struct fsxattr));
+
xfs_ilock(ip, XFS_ILOCK_SHARED);
fa.fsx_xflags = xfs_ip2xflags(ip);
fa.fsx_extsize = ip->i_d.di_extsize << ip->i_mount->m_sb.sb_blocklog;
@@ -907,6 +909,13 @@
return XFS_ERROR(EIO);
/*
+ * Disallow 32bit project ids because on-disk structure
+ * is 16bit only.
+ */
+ if ((mask & FSX_PROJID) && (fa->fsx_projid > (__uint16_t)-1))
+ return XFS_ERROR(EINVAL);
+
+ /*
* If disk quotas is on, we make sure that the dquots do exist on disk,
* before we start any other transactions. Trying to do this later
* is messy. We don't care to take a readlock to look at the ids
diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c
index 68be25d..b1fc2a6 100644
--- a/fs/xfs/linux-2.6/xfs_iops.c
+++ b/fs/xfs/linux-2.6/xfs_iops.c
@@ -664,7 +664,7 @@
fieinfo->fi_extents_max + 1;
bm.bmv_count = min_t(__s32, bm.bmv_count,
(PAGE_SIZE * 16 / sizeof(struct getbmapx)));
- bm.bmv_iflags = BMV_IF_PREALLOC;
+ bm.bmv_iflags = BMV_IF_PREALLOC | BMV_IF_NO_HOLES;
if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR)
bm.bmv_iflags |= BMV_IF_ATTRFORK;
if (!(fieinfo->fi_flags & FIEMAP_FLAG_SYNC))
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
index 15c35b6..a4e0797 100644
--- a/fs/xfs/linux-2.6/xfs_super.c
+++ b/fs/xfs/linux-2.6/xfs_super.c
@@ -1226,6 +1226,7 @@
struct xfs_inode *ip = XFS_I(dentry->d_inode);
__uint64_t fakeinos, id;
xfs_extlen_t lsize;
+ __int64_t ffree;
statp->f_type = XFS_SB_MAGIC;
statp->f_namelen = MAXNAMELEN - 1;
@@ -1249,7 +1250,11 @@
statp->f_files = min_t(typeof(statp->f_files),
statp->f_files,
mp->m_maxicount);
- statp->f_ffree = statp->f_files - (sbp->sb_icount - sbp->sb_ifree);
+
+ /* make sure statp->f_ffree does not underflow */
+ ffree = statp->f_files - (sbp->sb_icount - sbp->sb_ifree);
+ statp->f_ffree = max_t(__int64_t, ffree, 0);
+
spin_unlock(&mp->m_sb_lock);
if ((ip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) ||
@@ -1402,7 +1407,7 @@
xfs_save_resvblks(mp);
xfs_quiesce_attr(mp);
- return -xfs_fs_log_dummy(mp);
+ return -xfs_fs_log_dummy(mp, SYNC_WAIT);
}
STATIC int
diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c
index dfcbd98..81976ff 100644
--- a/fs/xfs/linux-2.6/xfs_sync.c
+++ b/fs/xfs/linux-2.6/xfs_sync.c
@@ -34,6 +34,7 @@
#include "xfs_inode_item.h"
#include "xfs_quota.h"
#include "xfs_trace.h"
+#include "xfs_fsops.h"
#include <linux/kthread.h>
#include <linux/freezer.h>
@@ -341,38 +342,6 @@
}
STATIC int
-xfs_commit_dummy_trans(
- struct xfs_mount *mp,
- uint flags)
-{
- struct xfs_inode *ip = mp->m_rootip;
- struct xfs_trans *tp;
- int error;
-
- /*
- * Put a dummy transaction in the log to tell recovery
- * that all others are OK.
- */
- tp = xfs_trans_alloc(mp, XFS_TRANS_DUMMY1);
- error = xfs_trans_reserve(tp, 0, XFS_ICHANGE_LOG_RES(mp), 0, 0, 0);
- if (error) {
- xfs_trans_cancel(tp, 0);
- return error;
- }
-
- xfs_ilock(ip, XFS_ILOCK_EXCL);
-
- xfs_trans_ijoin(tp, ip);
- xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
- error = xfs_trans_commit(tp, 0);
- xfs_iunlock(ip, XFS_ILOCK_EXCL);
-
- /* the log force ensures this transaction is pushed to disk */
- xfs_log_force(mp, (flags & SYNC_WAIT) ? XFS_LOG_SYNC : 0);
- return error;
-}
-
-STATIC int
xfs_sync_fsdata(
struct xfs_mount *mp)
{
@@ -432,7 +401,7 @@
/* mark the log as covered if needed */
if (xfs_log_need_covered(mp))
- error2 = xfs_commit_dummy_trans(mp, SYNC_WAIT);
+ error2 = xfs_fs_log_dummy(mp, SYNC_WAIT);
/* flush data-only devices */
if (mp->m_rtdev_targp)
@@ -563,7 +532,7 @@
/*
* Every sync period we need to unpin all items, reclaim inodes and sync
* disk quotas. We might need to cover the log to indicate that the
- * filesystem is idle.
+ * filesystem is idle and not frozen.
*/
STATIC void
xfs_sync_worker(
@@ -577,8 +546,9 @@
xfs_reclaim_inodes(mp, 0);
/* dgc: errors ignored here */
error = xfs_qm_sync(mp, SYNC_TRYLOCK);
- if (xfs_log_need_covered(mp))
- error = xfs_commit_dummy_trans(mp, 0);
+ if (mp->m_super->s_frozen == SB_UNFROZEN &&
+ xfs_log_need_covered(mp))
+ error = xfs_fs_log_dummy(mp, 0);
}
mp->m_sync_seq++;
wake_up(&mp->m_wait_single_sync_task);
@@ -698,14 +668,11 @@
xfs_perag_put(pag);
}
-void
-__xfs_inode_clear_reclaim_tag(
- xfs_mount_t *mp,
+STATIC void
+__xfs_inode_clear_reclaim(
xfs_perag_t *pag,
xfs_inode_t *ip)
{
- radix_tree_tag_clear(&pag->pag_ici_root,
- XFS_INO_TO_AGINO(mp, ip->i_ino), XFS_ICI_RECLAIM_TAG);
pag->pag_ici_reclaimable--;
if (!pag->pag_ici_reclaimable) {
/* clear the reclaim tag from the perag radix tree */
@@ -719,6 +686,17 @@
}
}
+void
+__xfs_inode_clear_reclaim_tag(
+ xfs_mount_t *mp,
+ xfs_perag_t *pag,
+ xfs_inode_t *ip)
+{
+ radix_tree_tag_clear(&pag->pag_ici_root,
+ XFS_INO_TO_AGINO(mp, ip->i_ino), XFS_ICI_RECLAIM_TAG);
+ __xfs_inode_clear_reclaim(pag, ip);
+}
+
/*
* Inodes in different states need to be treated differently, and the return
* value of xfs_iflush is not sufficient to get this right. The following table
@@ -868,6 +846,7 @@
if (!radix_tree_delete(&pag->pag_ici_root,
XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino)))
ASSERT(0);
+ __xfs_inode_clear_reclaim(pag, ip);
write_unlock(&pag->pag_ici_lock);
/*
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
index 23f14e5..f90dadd 100644
--- a/fs/xfs/xfs_bmap.c
+++ b/fs/xfs/xfs_bmap.c
@@ -5533,12 +5533,24 @@
map[i].br_startblock))
goto out_free_map;
- nexleft--;
bmv->bmv_offset =
out[cur_ext].bmv_offset +
out[cur_ext].bmv_length;
bmv->bmv_length =
max_t(__int64_t, 0, bmvend - bmv->bmv_offset);
+
+ /*
+ * In case we don't want to return the hole,
+ * don't increase cur_ext so that we can reuse
+ * it in the next loop.
+ */
+ if ((iflags & BMV_IF_NO_HOLES) &&
+ map[i].br_startblock == HOLESTARTBLOCK) {
+ memset(&out[cur_ext], 0, sizeof(out[cur_ext]));
+ continue;
+ }
+
+ nexleft--;
bmv->bmv_entries++;
cur_ext++;
}
diff --git a/fs/xfs/xfs_fs.h b/fs/xfs/xfs_fs.h
index 7cf7220..87c2e9d 100644
--- a/fs/xfs/xfs_fs.h
+++ b/fs/xfs/xfs_fs.h
@@ -114,8 +114,10 @@
#define BMV_IF_NO_DMAPI_READ 0x2 /* Do not generate DMAPI read event */
#define BMV_IF_PREALLOC 0x4 /* rtn status BMV_OF_PREALLOC if req */
#define BMV_IF_DELALLOC 0x8 /* rtn status BMV_OF_DELALLOC if req */
+#define BMV_IF_NO_HOLES 0x10 /* Do not return holes */
#define BMV_IF_VALID \
- (BMV_IF_ATTRFORK|BMV_IF_NO_DMAPI_READ|BMV_IF_PREALLOC|BMV_IF_DELALLOC)
+ (BMV_IF_ATTRFORK|BMV_IF_NO_DMAPI_READ|BMV_IF_PREALLOC| \
+ BMV_IF_DELALLOC|BMV_IF_NO_HOLES)
/* bmv_oflags values - returned for each non-header segment */
#define BMV_OF_PREALLOC 0x1 /* segment = unwritten pre-allocation */
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
index dbca5f5..43b1d56 100644
--- a/fs/xfs/xfs_fsops.c
+++ b/fs/xfs/xfs_fsops.c
@@ -604,31 +604,36 @@
return 0;
}
+/*
+ * Dump a transaction into the log that contains no real change. This is needed
+ * to be able to make the log dirty or stamp the current tail LSN into the log
+ * during the covering operation.
+ *
+ * We cannot use an inode here for this - that will push dirty state back up
+ * into the VFS and then periodic inode flushing will prevent log covering from
+ * making progress. Hence we log a field in the superblock instead.
+ */
int
xfs_fs_log_dummy(
- xfs_mount_t *mp)
+ xfs_mount_t *mp,
+ int flags)
{
xfs_trans_t *tp;
- xfs_inode_t *ip;
int error;
tp = _xfs_trans_alloc(mp, XFS_TRANS_DUMMY1, KM_SLEEP);
- error = xfs_trans_reserve(tp, 0, XFS_ICHANGE_LOG_RES(mp), 0, 0, 0);
+ error = xfs_trans_reserve(tp, 0, mp->m_sb.sb_sectsize + 128, 0, 0,
+ XFS_DEFAULT_LOG_COUNT);
if (error) {
xfs_trans_cancel(tp, 0);
return error;
}
- ip = mp->m_rootip;
- xfs_ilock(ip, XFS_ILOCK_EXCL);
-
- xfs_trans_ijoin(tp, ip);
- xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
- xfs_trans_set_sync(tp);
- error = xfs_trans_commit(tp, 0);
-
- xfs_iunlock(ip, XFS_ILOCK_EXCL);
- return error;
+ /* log the UUID because it is an unchanging field */
+ xfs_mod_sb(tp, XFS_SB_UUID);
+ if (flags & SYNC_WAIT)
+ xfs_trans_set_sync(tp);
+ return xfs_trans_commit(tp, 0);
}
int
diff --git a/fs/xfs/xfs_fsops.h b/fs/xfs/xfs_fsops.h
index 88435e0..a786c52 100644
--- a/fs/xfs/xfs_fsops.h
+++ b/fs/xfs/xfs_fsops.h
@@ -25,6 +25,6 @@
extern int xfs_reserve_blocks(xfs_mount_t *mp, __uint64_t *inval,
xfs_fsop_resblks_t *outval);
extern int xfs_fs_goingdown(xfs_mount_t *mp, __uint32_t inflags);
-extern int xfs_fs_log_dummy(xfs_mount_t *mp);
+extern int xfs_fs_log_dummy(xfs_mount_t *mp, int flags);
#endif /* __XFS_FSOPS_H__ */
diff --git a/fs/xfs/xfs_ialloc.c b/fs/xfs/xfs_ialloc.c
index abf80ae..5371d2d 100644
--- a/fs/xfs/xfs_ialloc.c
+++ b/fs/xfs/xfs_ialloc.c
@@ -1213,7 +1213,6 @@
struct xfs_inobt_rec_incore rec;
struct xfs_btree_cur *cur;
struct xfs_buf *agbp;
- xfs_agino_t startino;
int error;
int i;
@@ -1227,13 +1226,13 @@
}
/*
- * derive and lookup the exact inode record for the given agino. If the
- * record cannot be found, then it's an invalid inode number and we
- * should abort.
+ * Lookup the inode record for the given agino. If the record cannot be
+ * found, then it's an invalid inode number and we should abort. Once
+ * we have a record, we need to ensure it contains the inode number
+ * we are looking up.
*/
cur = xfs_inobt_init_cursor(mp, tp, agbp, agno);
- startino = agino & ~(XFS_IALLOC_INODES(mp) - 1);
- error = xfs_inobt_lookup(cur, startino, XFS_LOOKUP_EQ, &i);
+ error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &i);
if (!error) {
if (i)
error = xfs_inobt_get_rec(cur, &rec, &i);
@@ -1246,6 +1245,11 @@
if (error)
return error;
+ /* check that the returned record contains the required inode */
+ if (rec.ir_startino > agino ||
+ rec.ir_startino + XFS_IALLOC_INODES(mp) <= agino)
+ return EINVAL;
+
/* for untrusted inodes check it is allocated first */
if ((flags & XFS_IGET_UNTRUSTED) &&
(rec.ir_free & XFS_INOBT_MASK(agino - rec.ir_startino)))
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 68415cb..34798f3 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -1914,6 +1914,11 @@
return 0;
}
+/*
+ * A big issue when freeing the inode cluster is is that we _cannot_ skip any
+ * inodes that are in memory - they all must be marked stale and attached to
+ * the cluster buffer.
+ */
STATIC void
xfs_ifree_cluster(
xfs_inode_t *free_ip,
@@ -1945,8 +1950,6 @@
}
for (j = 0; j < nbufs; j++, inum += ninodes) {
- int found = 0;
-
blkno = XFS_AGB_TO_DADDR(mp, XFS_INO_TO_AGNO(mp, inum),
XFS_INO_TO_AGBNO(mp, inum));
@@ -1965,7 +1968,9 @@
/*
* Walk the inodes already attached to the buffer and mark them
* stale. These will all have the flush locks held, so an
- * in-memory inode walk can't lock them.
+ * in-memory inode walk can't lock them. By marking them all
+ * stale first, we will not attempt to lock them in the loop
+ * below as the XFS_ISTALE flag will be set.
*/
lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *);
while (lip) {
@@ -1977,11 +1982,11 @@
&iip->ili_flush_lsn,
&iip->ili_item.li_lsn);
xfs_iflags_set(iip->ili_inode, XFS_ISTALE);
- found++;
}
lip = lip->li_bio_list;
}
+
/*
* For each inode in memory attempt to add it to the inode
* buffer and set it up for being staled on buffer IO
@@ -1993,6 +1998,7 @@
* even trying to lock them.
*/
for (i = 0; i < ninodes; i++) {
+retry:
read_lock(&pag->pag_ici_lock);
ip = radix_tree_lookup(&pag->pag_ici_root,
XFS_INO_TO_AGINO(mp, (inum + i)));
@@ -2003,38 +2009,36 @@
continue;
}
- /* don't try to lock/unlock the current inode */
+ /*
+ * Don't try to lock/unlock the current inode, but we
+ * _cannot_ skip the other inodes that we did not find
+ * in the list attached to the buffer and are not
+ * already marked stale. If we can't lock it, back off
+ * and retry.
+ */
if (ip != free_ip &&
!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) {
read_unlock(&pag->pag_ici_lock);
- continue;
+ delay(1);
+ goto retry;
}
read_unlock(&pag->pag_ici_lock);
- if (!xfs_iflock_nowait(ip)) {
- if (ip != free_ip)
- xfs_iunlock(ip, XFS_ILOCK_EXCL);
- continue;
- }
-
+ xfs_iflock(ip);
xfs_iflags_set(ip, XFS_ISTALE);
- if (xfs_inode_clean(ip)) {
- ASSERT(ip != free_ip);
- xfs_ifunlock(ip);
- xfs_iunlock(ip, XFS_ILOCK_EXCL);
- continue;
- }
+ /*
+ * we don't need to attach clean inodes or those only
+ * with unlogged changes (which we throw away, anyway).
+ */
iip = ip->i_itemp;
- if (!iip) {
- /* inode with unlogged changes only */
+ if (!iip || xfs_inode_clean(ip)) {
ASSERT(ip != free_ip);
ip->i_update_core = 0;
xfs_ifunlock(ip);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
continue;
}
- found++;
iip->ili_last_fields = iip->ili_format.ilf_fields;
iip->ili_format.ilf_fields = 0;
@@ -2049,8 +2053,7 @@
xfs_iunlock(ip, XFS_ILOCK_EXCL);
}
- if (found)
- xfs_trans_stale_inode_buf(tp, bp);
+ xfs_trans_stale_inode_buf(tp, bp);
xfs_trans_binval(tp, bp);
}
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 925d572..33f718f 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -3015,7 +3015,8 @@
XFS_STATS_INC(xs_log_force);
- xlog_cil_push(log, 1);
+ if (log->l_cilp)
+ xlog_cil_force(log);
spin_lock(&log->l_icloglock);
@@ -3167,7 +3168,7 @@
XFS_STATS_INC(xs_log_force);
if (log->l_cilp) {
- lsn = xlog_cil_push_lsn(log, lsn);
+ lsn = xlog_cil_force_lsn(log, lsn);
if (lsn == NULLCOMMITLSN)
return 0;
}
@@ -3724,7 +3725,7 @@
* call below.
*/
if (!logerror && (mp->m_flags & XFS_MOUNT_DELAYLOG))
- xlog_cil_push(log, 1);
+ xlog_cil_force(log);
/*
* We must hold both the GRANT lock and the LOG lock,
diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c
index 31e4ea2..7e206fc 100644
--- a/fs/xfs/xfs_log_cil.c
+++ b/fs/xfs/xfs_log_cil.c
@@ -68,6 +68,7 @@
ctx->sequence = 1;
ctx->cil = cil;
cil->xc_ctx = ctx;
+ cil->xc_current_sequence = ctx->sequence;
cil->xc_log = log;
log->l_cilp = cil;
@@ -269,15 +270,10 @@
static void
xlog_cil_format_items(
struct log *log,
- struct xfs_log_vec *log_vector,
- struct xlog_ticket *ticket,
- xfs_lsn_t *start_lsn)
+ struct xfs_log_vec *log_vector)
{
struct xfs_log_vec *lv;
- if (start_lsn)
- *start_lsn = log->l_cilp->xc_ctx->sequence;
-
ASSERT(log_vector);
for (lv = log_vector; lv; lv = lv->lv_next) {
void *ptr;
@@ -301,12 +297,27 @@
ptr += vec->i_len;
}
ASSERT(ptr == lv->lv_buf + lv->lv_buf_len);
-
- xlog_cil_insert(log, ticket, lv->lv_item, lv);
}
}
static void
+xlog_cil_insert_items(
+ struct log *log,
+ struct xfs_log_vec *log_vector,
+ struct xlog_ticket *ticket,
+ xfs_lsn_t *start_lsn)
+{
+ struct xfs_log_vec *lv;
+
+ if (start_lsn)
+ *start_lsn = log->l_cilp->xc_ctx->sequence;
+
+ ASSERT(log_vector);
+ for (lv = log_vector; lv; lv = lv->lv_next)
+ xlog_cil_insert(log, ticket, lv->lv_item, lv);
+}
+
+static void
xlog_cil_free_logvec(
struct xfs_log_vec *log_vector)
{
@@ -321,80 +332,6 @@
}
/*
- * Commit a transaction with the given vector to the Committed Item List.
- *
- * To do this, we need to format the item, pin it in memory if required and
- * account for the space used by the transaction. Once we have done that we
- * need to release the unused reservation for the transaction, attach the
- * transaction to the checkpoint context so we carry the busy extents through
- * to checkpoint completion, and then unlock all the items in the transaction.
- *
- * For more specific information about the order of operations in
- * xfs_log_commit_cil() please refer to the comments in
- * xfs_trans_commit_iclog().
- *
- * Called with the context lock already held in read mode to lock out
- * background commit, returns without it held once background commits are
- * allowed again.
- */
-int
-xfs_log_commit_cil(
- struct xfs_mount *mp,
- struct xfs_trans *tp,
- struct xfs_log_vec *log_vector,
- xfs_lsn_t *commit_lsn,
- int flags)
-{
- struct log *log = mp->m_log;
- int log_flags = 0;
- int push = 0;
-
- if (flags & XFS_TRANS_RELEASE_LOG_RES)
- log_flags = XFS_LOG_REL_PERM_RESERV;
-
- if (XLOG_FORCED_SHUTDOWN(log)) {
- xlog_cil_free_logvec(log_vector);
- return XFS_ERROR(EIO);
- }
-
- /* lock out background commit */
- down_read(&log->l_cilp->xc_ctx_lock);
- xlog_cil_format_items(log, log_vector, tp->t_ticket, commit_lsn);
-
- /* check we didn't blow the reservation */
- if (tp->t_ticket->t_curr_res < 0)
- xlog_print_tic_res(log->l_mp, tp->t_ticket);
-
- /* attach the transaction to the CIL if it has any busy extents */
- if (!list_empty(&tp->t_busy)) {
- spin_lock(&log->l_cilp->xc_cil_lock);
- list_splice_init(&tp->t_busy,
- &log->l_cilp->xc_ctx->busy_extents);
- spin_unlock(&log->l_cilp->xc_cil_lock);
- }
-
- tp->t_commit_lsn = *commit_lsn;
- xfs_log_done(mp, tp->t_ticket, NULL, log_flags);
- xfs_trans_unreserve_and_mod_sb(tp);
-
- /* check for background commit before unlock */
- if (log->l_cilp->xc_ctx->space_used > XLOG_CIL_SPACE_LIMIT(log))
- push = 1;
- up_read(&log->l_cilp->xc_ctx_lock);
-
- /*
- * We need to push CIL every so often so we don't cache more than we
- * can fit in the log. The limit really is that a checkpoint can't be
- * more than half the log (the current checkpoint is not allowed to
- * overwrite the previous checkpoint), but commit latency and memory
- * usage limit this to a smaller size in most cases.
- */
- if (push)
- xlog_cil_push(log, 0);
- return 0;
-}
-
-/*
* Mark all items committed and clear busy extents. We free the log vector
* chains in a separate pass so that we unpin the log items as quickly as
* possible.
@@ -427,13 +364,23 @@
}
/*
- * Push the Committed Item List to the log. If the push_now flag is not set,
- * then it is a background flush and so we can chose to ignore it.
+ * Push the Committed Item List to the log. If @push_seq flag is zero, then it
+ * is a background flush and so we can chose to ignore it. Otherwise, if the
+ * current sequence is the same as @push_seq we need to do a flush. If
+ * @push_seq is less than the current sequence, then it has already been
+ * flushed and we don't need to do anything - the caller will wait for it to
+ * complete if necessary.
+ *
+ * @push_seq is a value rather than a flag because that allows us to do an
+ * unlocked check of the sequence number for a match. Hence we can allows log
+ * forces to run racily and not issue pushes for the same sequence twice. If we
+ * get a race between multiple pushes for the same sequence they will block on
+ * the first one and then abort, hence avoiding needless pushes.
*/
-int
+STATIC int
xlog_cil_push(
struct log *log,
- int push_now)
+ xfs_lsn_t push_seq)
{
struct xfs_cil *cil = log->l_cilp;
struct xfs_log_vec *lv;
@@ -453,12 +400,20 @@
if (!cil)
return 0;
+ ASSERT(!push_seq || push_seq <= cil->xc_ctx->sequence);
+
new_ctx = kmem_zalloc(sizeof(*new_ctx), KM_SLEEP|KM_NOFS);
new_ctx->ticket = xlog_cil_ticket_alloc(log);
- /* lock out transaction commit, but don't block on background push */
+ /*
+ * Lock out transaction commit, but don't block for background pushes
+ * unless we are well over the CIL space limit. See the definition of
+ * XLOG_CIL_HARD_SPACE_LIMIT() for the full explanation of the logic
+ * used here.
+ */
if (!down_write_trylock(&cil->xc_ctx_lock)) {
- if (!push_now)
+ if (!push_seq &&
+ cil->xc_ctx->space_used < XLOG_CIL_HARD_SPACE_LIMIT(log))
goto out_free_ticket;
down_write(&cil->xc_ctx_lock);
}
@@ -469,7 +424,11 @@
goto out_skip;
/* check for spurious background flush */
- if (!push_now && cil->xc_ctx->space_used < XLOG_CIL_SPACE_LIMIT(log))
+ if (!push_seq && cil->xc_ctx->space_used < XLOG_CIL_SPACE_LIMIT(log))
+ goto out_skip;
+
+ /* check for a previously pushed seqeunce */
+ if (push_seq && push_seq < cil->xc_ctx->sequence)
goto out_skip;
/*
@@ -515,6 +474,13 @@
cil->xc_ctx = new_ctx;
/*
+ * mirror the new sequence into the cil structure so that we can do
+ * unlocked checks against the current sequence in log forces without
+ * risking deferencing a freed context pointer.
+ */
+ cil->xc_current_sequence = new_ctx->sequence;
+
+ /*
* The switch is now done, so we can drop the context lock and move out
* of a shared context. We can't just go straight to the commit record,
* though - we need to synchronise with previous and future commits so
@@ -626,6 +592,102 @@
}
/*
+ * Commit a transaction with the given vector to the Committed Item List.
+ *
+ * To do this, we need to format the item, pin it in memory if required and
+ * account for the space used by the transaction. Once we have done that we
+ * need to release the unused reservation for the transaction, attach the
+ * transaction to the checkpoint context so we carry the busy extents through
+ * to checkpoint completion, and then unlock all the items in the transaction.
+ *
+ * For more specific information about the order of operations in
+ * xfs_log_commit_cil() please refer to the comments in
+ * xfs_trans_commit_iclog().
+ *
+ * Called with the context lock already held in read mode to lock out
+ * background commit, returns without it held once background commits are
+ * allowed again.
+ */
+int
+xfs_log_commit_cil(
+ struct xfs_mount *mp,
+ struct xfs_trans *tp,
+ struct xfs_log_vec *log_vector,
+ xfs_lsn_t *commit_lsn,
+ int flags)
+{
+ struct log *log = mp->m_log;
+ int log_flags = 0;
+ int push = 0;
+
+ if (flags & XFS_TRANS_RELEASE_LOG_RES)
+ log_flags = XFS_LOG_REL_PERM_RESERV;
+
+ if (XLOG_FORCED_SHUTDOWN(log)) {
+ xlog_cil_free_logvec(log_vector);
+ return XFS_ERROR(EIO);
+ }
+
+ /*
+ * do all the hard work of formatting items (including memory
+ * allocation) outside the CIL context lock. This prevents stalling CIL
+ * pushes when we are low on memory and a transaction commit spends a
+ * lot of time in memory reclaim.
+ */
+ xlog_cil_format_items(log, log_vector);
+
+ /* lock out background commit */
+ down_read(&log->l_cilp->xc_ctx_lock);
+ xlog_cil_insert_items(log, log_vector, tp->t_ticket, commit_lsn);
+
+ /* check we didn't blow the reservation */
+ if (tp->t_ticket->t_curr_res < 0)
+ xlog_print_tic_res(log->l_mp, tp->t_ticket);
+
+ /* attach the transaction to the CIL if it has any busy extents */
+ if (!list_empty(&tp->t_busy)) {
+ spin_lock(&log->l_cilp->xc_cil_lock);
+ list_splice_init(&tp->t_busy,
+ &log->l_cilp->xc_ctx->busy_extents);
+ spin_unlock(&log->l_cilp->xc_cil_lock);
+ }
+
+ tp->t_commit_lsn = *commit_lsn;
+ xfs_log_done(mp, tp->t_ticket, NULL, log_flags);
+ xfs_trans_unreserve_and_mod_sb(tp);
+
+ /*
+ * Once all the items of the transaction have been copied to the CIL,
+ * the items can be unlocked and freed.
+ *
+ * This needs to be done before we drop the CIL context lock because we
+ * have to update state in the log items and unlock them before they go
+ * to disk. If we don't, then the CIL checkpoint can race with us and
+ * we can run checkpoint completion before we've updated and unlocked
+ * the log items. This affects (at least) processing of stale buffers,
+ * inodes and EFIs.
+ */
+ xfs_trans_free_items(tp, *commit_lsn, 0);
+
+ /* check for background commit before unlock */
+ if (log->l_cilp->xc_ctx->space_used > XLOG_CIL_SPACE_LIMIT(log))
+ push = 1;
+
+ up_read(&log->l_cilp->xc_ctx_lock);
+
+ /*
+ * We need to push CIL every so often so we don't cache more than we
+ * can fit in the log. The limit really is that a checkpoint can't be
+ * more than half the log (the current checkpoint is not allowed to
+ * overwrite the previous checkpoint), but commit latency and memory
+ * usage limit this to a smaller size in most cases.
+ */
+ if (push)
+ xlog_cil_push(log, 0);
+ return 0;
+}
+
+/*
* Conditionally push the CIL based on the sequence passed in.
*
* We only need to push if we haven't already pushed the sequence
@@ -639,39 +701,34 @@
* commit lsn is there. It'll be empty, so this is broken for now.
*/
xfs_lsn_t
-xlog_cil_push_lsn(
+xlog_cil_force_lsn(
struct log *log,
- xfs_lsn_t push_seq)
+ xfs_lsn_t sequence)
{
struct xfs_cil *cil = log->l_cilp;
struct xfs_cil_ctx *ctx;
xfs_lsn_t commit_lsn = NULLCOMMITLSN;
-restart:
- down_write(&cil->xc_ctx_lock);
- ASSERT(push_seq <= cil->xc_ctx->sequence);
+ ASSERT(sequence <= cil->xc_current_sequence);
- /* check to see if we need to force out the current context */
- if (push_seq == cil->xc_ctx->sequence) {
- up_write(&cil->xc_ctx_lock);
- xlog_cil_push(log, 1);
- goto restart;
- }
+ /*
+ * check to see if we need to force out the current context.
+ * xlog_cil_push() handles racing pushes for the same sequence,
+ * so no need to deal with it here.
+ */
+ if (sequence == cil->xc_current_sequence)
+ xlog_cil_push(log, sequence);
/*
* See if we can find a previous sequence still committing.
- * We can drop the flush lock as soon as we have the cil lock
- * because we are now only comparing contexts protected by
- * the cil lock.
- *
* We need to wait for all previous sequence commits to complete
* before allowing the force of push_seq to go ahead. Hence block
* on commits for those as well.
*/
+restart:
spin_lock(&cil->xc_cil_lock);
- up_write(&cil->xc_ctx_lock);
list_for_each_entry(ctx, &cil->xc_committing, committing) {
- if (ctx->sequence > push_seq)
+ if (ctx->sequence > sequence)
continue;
if (!ctx->commit_lsn) {
/*
@@ -681,7 +738,7 @@
sv_wait(&cil->xc_commit_wait, 0, &cil->xc_cil_lock, 0);
goto restart;
}
- if (ctx->sequence != push_seq)
+ if (ctx->sequence != sequence)
continue;
/* found it! */
commit_lsn = ctx->commit_lsn;
diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h
index 8c07261..edcdfe0 100644
--- a/fs/xfs/xfs_log_priv.h
+++ b/fs/xfs/xfs_log_priv.h
@@ -422,16 +422,17 @@
struct rw_semaphore xc_ctx_lock;
struct list_head xc_committing;
sv_t xc_commit_wait;
+ xfs_lsn_t xc_current_sequence;
};
/*
- * The amount of log space we should the CIL to aggregate is difficult to size.
- * Whatever we chose we have to make we can get a reservation for the log space
- * effectively, that it is large enough to capture sufficient relogging to
- * reduce log buffer IO significantly, but it is not too large for the log or
- * induces too much latency when writing out through the iclogs. We track both
- * space consumed and the number of vectors in the checkpoint context, so we
- * need to decide which to use for limiting.
+ * The amount of log space we allow the CIL to aggregate is difficult to size.
+ * Whatever we choose, we have to make sure we can get a reservation for the
+ * log space effectively, that it is large enough to capture sufficient
+ * relogging to reduce log buffer IO significantly, but it is not too large for
+ * the log or induces too much latency when writing out through the iclogs. We
+ * track both space consumed and the number of vectors in the checkpoint
+ * context, so we need to decide which to use for limiting.
*
* Every log buffer we write out during a push needs a header reserved, which
* is at least one sector and more for v2 logs. Hence we need a reservation of
@@ -458,16 +459,21 @@
* checkpoint transaction ticket is specific to the checkpoint context, rather
* than the CIL itself.
*
- * With dynamic reservations, we can basically make up arbitrary limits for the
- * checkpoint size so long as they don't violate any other size rules. Hence
- * the initial maximum size for the checkpoint transaction will be set to a
- * quarter of the log or 8MB, which ever is smaller. 8MB is an arbitrary limit
- * right now based on the latency of writing out a large amount of data through
- * the circular iclog buffers.
+ * With dynamic reservations, we can effectively make up arbitrary limits for
+ * the checkpoint size so long as they don't violate any other size rules.
+ * Recovery imposes a rule that no transaction exceed half the log, so we are
+ * limited by that. Furthermore, the log transaction reservation subsystem
+ * tries to keep 25% of the log free, so we need to keep below that limit or we
+ * risk running out of free log space to start any new transactions.
+ *
+ * In order to keep background CIL push efficient, we will set a lower
+ * threshold at which background pushing is attempted without blocking current
+ * transaction commits. A separate, higher bound defines when CIL pushes are
+ * enforced to ensure we stay within our maximum checkpoint size bounds.
+ * threshold, yet give us plenty of space for aggregation on large logs.
*/
-
-#define XLOG_CIL_SPACE_LIMIT(log) \
- (min((log->l_logsize >> 2), (8 * 1024 * 1024)))
+#define XLOG_CIL_SPACE_LIMIT(log) (log->l_logsize >> 3)
+#define XLOG_CIL_HARD_SPACE_LIMIT(log) (3 * (log->l_logsize >> 4))
/*
* The reservation head lsn is not made up of a cycle number and block number.
@@ -562,8 +568,16 @@
void xlog_cil_init_post_recovery(struct log *log);
void xlog_cil_destroy(struct log *log);
-int xlog_cil_push(struct log *log, int push_now);
-xfs_lsn_t xlog_cil_push_lsn(struct log *log, xfs_lsn_t push_sequence);
+/*
+ * CIL force routines
+ */
+xfs_lsn_t xlog_cil_force_lsn(struct log *log, xfs_lsn_t sequence);
+
+static inline void
+xlog_cil_force(struct log *log)
+{
+ xlog_cil_force_lsn(log, log->l_cilp->xc_current_sequence);
+}
/*
* Unmount record type is used as a pseudo transaction type for the ticket.
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
index fdca741..1c47eda 100644
--- a/fs/xfs/xfs_trans.c
+++ b/fs/xfs/xfs_trans.c
@@ -1167,7 +1167,7 @@
* Unlock all of the items of a transaction and free all the descriptors
* of that transaction.
*/
-STATIC void
+void
xfs_trans_free_items(
struct xfs_trans *tp,
xfs_lsn_t commit_lsn,
@@ -1653,9 +1653,6 @@
return error;
current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
-
- /* xfs_trans_free_items() unlocks them first */
- xfs_trans_free_items(tp, *commit_lsn, 0);
xfs_trans_free(tp);
return 0;
}
diff --git a/fs/xfs/xfs_trans_priv.h b/fs/xfs/xfs_trans_priv.h
index e2d93d8..62da86c 100644
--- a/fs/xfs/xfs_trans_priv.h
+++ b/fs/xfs/xfs_trans_priv.h
@@ -25,7 +25,8 @@
void xfs_trans_add_item(struct xfs_trans *, struct xfs_log_item *);
void xfs_trans_del_item(struct xfs_log_item *);
-
+void xfs_trans_free_items(struct xfs_trans *tp, xfs_lsn_t commit_lsn,
+ int flags);
void xfs_trans_item_committed(struct xfs_log_item *lip,
xfs_lsn_t commit_lsn, int aborted);
void xfs_trans_unreserve_and_mod_sb(struct xfs_trans *tp);
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
index 66d585c..4c7c7bf 100644
--- a/fs/xfs/xfs_vnodeops.c
+++ b/fs/xfs/xfs_vnodeops.c
@@ -2299,15 +2299,22 @@
e = allocatesize_fsb;
}
+ /*
+ * The transaction reservation is limited to a 32-bit block
+ * count, hence we need to limit the number of blocks we are
+ * trying to reserve to avoid an overflow. We can't allocate
+ * more than @nimaps extents, and an extent is limited on disk
+ * to MAXEXTLEN (21 bits), so use that to enforce the limit.
+ */
+ resblks = min_t(xfs_fileoff_t, (e - s), (MAXEXTLEN * nimaps));
if (unlikely(rt)) {
- resrtextents = qblocks = (uint)(e - s);
+ resrtextents = qblocks = resblks;
resrtextents /= mp->m_sb.sb_rextsize;
resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
quota_flag = XFS_QMOPT_RES_RTBLKS;
} else {
resrtextents = 0;
- resblks = qblocks = \
- XFS_DIOSTRAT_SPACE_RES(mp, (uint)(e - s));
+ resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resblks);
quota_flag = XFS_QMOPT_RES_REGBLKS;
}
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index baacd98..4de84ce 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -377,9 +377,6 @@
u32 osc_support_set; /* _OSC state of support bits */
u32 osc_control_set; /* _OSC state of control bits */
- u32 osc_control_qry; /* the latest _OSC query result */
-
- u32 osc_queried:1; /* has _OSC control been queried? */
};
/* helper */
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index c0786d4..984cdc6 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -55,7 +55,7 @@
extern u8 acpi_gbl_permanent_mmap;
/*
- * Globals that are publically available, allowing for
+ * Globals that are publicly available, allowing for
* run time configuration
*/
extern u32 acpi_dbg_level;
diff --git a/include/asm-generic/gpio.h b/include/asm-generic/gpio.h
index c7376bf..8ca18e2 100644
--- a/include/asm-generic/gpio.h
+++ b/include/asm-generic/gpio.h
@@ -16,15 +16,27 @@
* While the GPIO programming interface defines valid GPIO numbers
* to be in the range 0..MAX_INT, this library restricts them to the
* smaller range 0..ARCH_NR_GPIOS-1.
+ *
+ * ARCH_NR_GPIOS is somewhat arbitrary; it usually reflects the sum of
+ * builtin/SoC GPIOs plus a number of GPIOs on expanders; the latter is
+ * actually an estimate of a board-specific value.
*/
#ifndef ARCH_NR_GPIOS
#define ARCH_NR_GPIOS 256
#endif
+/*
+ * "valid" GPIO numbers are nonnegative and may be passed to
+ * setup routines like gpio_request(). only some valid numbers
+ * can successfully be requested and used.
+ *
+ * Invalid GPIO numbers are useful for indicating no-such-GPIO in
+ * platform data and other tables.
+ */
+
static inline int gpio_is_valid(int number)
{
- /* only some non-negative numbers are valid */
return ((unsigned)number) < ARCH_NR_GPIOS;
}
diff --git a/include/asm-generic/hardirq.h b/include/asm-generic/hardirq.h
index 62f5908..04d0a97 100644
--- a/include/asm-generic/hardirq.h
+++ b/include/asm-generic/hardirq.h
@@ -3,13 +3,13 @@
#include <linux/cache.h>
#include <linux/threads.h>
-#include <linux/irq.h>
typedef struct {
unsigned int __softirq_pending;
} ____cacheline_aligned irq_cpustat_t;
#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
+#include <linux/irq.h>
#ifndef ack_bad_irq
static inline void ack_bad_irq(unsigned int irq)
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h
index b5043a9..08923b6 100644
--- a/include/asm-generic/percpu.h
+++ b/include/asm-generic/percpu.h
@@ -70,11 +70,16 @@
#else /* ! SMP */
-#define per_cpu(var, cpu) (*((void)(cpu), &(var)))
-#define __get_cpu_var(var) (var)
-#define __raw_get_cpu_var(var) (var)
-#define this_cpu_ptr(ptr) per_cpu_ptr(ptr, 0)
-#define __this_cpu_ptr(ptr) this_cpu_ptr(ptr)
+#define VERIFY_PERCPU_PTR(__p) ({ \
+ __verify_pcpu_ptr((__p)); \
+ (typeof(*(__p)) __kernel __force *)(__p); \
+})
+
+#define per_cpu(var, cpu) (*((void)(cpu), VERIFY_PERCPU_PTR(&(var))))
+#define __get_cpu_var(var) (*VERIFY_PERCPU_PTR(&(var)))
+#define __raw_get_cpu_var(var) (*VERIFY_PERCPU_PTR(&(var)))
+#define this_cpu_ptr(ptr) per_cpu_ptr(ptr, 0)
+#define __this_cpu_ptr(ptr) this_cpu_ptr(ptr)
#endif /* SMP */
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 8a92a17..ef2af99 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -220,6 +220,8 @@
\
BUG_TABLE \
\
+ JUMP_TABLE \
+ \
/* PCI quirks */ \
.pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \
@@ -563,6 +565,14 @@
#define BUG_TABLE
#endif
+#define JUMP_TABLE \
+ . = ALIGN(8); \
+ __jump_table : AT(ADDR(__jump_table) - LOAD_OFFSET) { \
+ VMLINUX_SYMBOL(__start___jump_table) = .; \
+ *(__jump_table) \
+ VMLINUX_SYMBOL(__stop___jump_table) = .; \
+ }
+
#ifdef CONFIG_PM_TRACE
#define TRACEDATA \
. = ALIGN(4); \
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 2a512bc..4c9461a 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -305,14 +305,16 @@
unsigned int cmd;
int flags;
drm_ioctl_t *func;
+ unsigned int cmd_drv;
};
/**
* Creates a driver or general drm_ioctl_desc array entry for the given
* ioctl, for use by drm_ioctl().
*/
-#define DRM_IOCTL_DEF(ioctl, _func, _flags) \
- [DRM_IOCTL_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags}
+
+#define DRM_IOCTL_DEF_DRV(ioctl, _func, _flags) \
+ [DRM_IOCTL_NR(DRM_##ioctl)] = {.cmd = DRM_##ioctl, .func = _func, .flags = _flags, .cmd_drv = DRM_IOCTL_##ioctl}
struct drm_magic_entry {
struct list_head head;
@@ -610,7 +612,7 @@
struct kref refcount;
/** Handle count of this object. Each handle also holds a reference */
- struct kref handlecount;
+ atomic_t handle_count; /* number of handles on this object */
/** Related drm device */
struct drm_device *dev;
@@ -806,7 +808,6 @@
*/
int (*gem_init_object) (struct drm_gem_object *obj);
void (*gem_free_object) (struct drm_gem_object *obj);
- void (*gem_free_object_unlocked) (struct drm_gem_object *obj);
/* vga arb irq handler */
void (*vgaarb_irq)(struct drm_device *dev, bool state);
@@ -1173,6 +1174,7 @@
extern int drm_mmap(struct file *filp, struct vm_area_struct *vma);
extern int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma);
extern void drm_vm_open_locked(struct vm_area_struct *vma);
+extern void drm_vm_close_locked(struct vm_area_struct *vma);
extern resource_size_t drm_core_get_map_ofs(struct drm_local_map * map);
extern resource_size_t drm_core_get_reg_ofs(struct drm_device *dev);
extern unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait);
@@ -1453,12 +1455,11 @@
void drm_gem_destroy(struct drm_device *dev);
void drm_gem_object_release(struct drm_gem_object *obj);
void drm_gem_object_free(struct kref *kref);
-void drm_gem_object_free_unlocked(struct kref *kref);
struct drm_gem_object *drm_gem_object_alloc(struct drm_device *dev,
size_t size);
int drm_gem_object_init(struct drm_device *dev,
struct drm_gem_object *obj, size_t size);
-void drm_gem_object_handle_free(struct kref *kref);
+void drm_gem_object_handle_free(struct drm_gem_object *obj);
void drm_gem_vm_open(struct vm_area_struct *vma);
void drm_gem_vm_close(struct vm_area_struct *vma);
int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
@@ -1481,8 +1482,12 @@
static inline void
drm_gem_object_unreference_unlocked(struct drm_gem_object *obj)
{
- if (obj != NULL)
- kref_put(&obj->refcount, drm_gem_object_free_unlocked);
+ if (obj != NULL) {
+ struct drm_device *dev = obj->dev;
+ mutex_lock(&dev->struct_mutex);
+ kref_put(&obj->refcount, drm_gem_object_free);
+ mutex_unlock(&dev->struct_mutex);
+ }
}
int drm_gem_handle_create(struct drm_file *file_priv,
@@ -1493,7 +1498,7 @@
drm_gem_object_handle_reference(struct drm_gem_object *obj)
{
drm_gem_object_reference(obj);
- kref_get(&obj->handlecount);
+ atomic_inc(&obj->handle_count);
}
static inline void
@@ -1502,12 +1507,15 @@
if (obj == NULL)
return;
+ if (atomic_read(&obj->handle_count) == 0)
+ return;
/*
* Must bump handle count first as this may be the last
* ref, in which case the object would disappear before we
* checked for a name
*/
- kref_put(&obj->handlecount, drm_gem_object_handle_free);
+ if (atomic_dec_and_test(&obj->handle_count))
+ drm_gem_object_handle_free(obj);
drm_gem_object_unreference(obj);
}
@@ -1517,12 +1525,17 @@
if (obj == NULL)
return;
+ if (atomic_read(&obj->handle_count) == 0)
+ return;
+
/*
* Must bump handle count first as this may be the last
* ref, in which case the object would disappear before we
* checked for a name
*/
- kref_put(&obj->handlecount, drm_gem_object_handle_free);
+
+ if (atomic_dec_and_test(&obj->handle_count))
+ drm_gem_object_handle_free(obj);
drm_gem_object_unreference_unlocked(obj);
}
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index c9f3cc5..3e5a51a 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -386,7 +386,15 @@
void (*dpms)(struct drm_connector *connector, int mode);
void (*save)(struct drm_connector *connector);
void (*restore)(struct drm_connector *connector);
- enum drm_connector_status (*detect)(struct drm_connector *connector);
+
+ /* Check to see if anything is attached to the connector.
+ * @force is set to false whilst polling, true when checking the
+ * connector due to user request. @force can be used by the driver
+ * to avoid expensive, destructive operations during automated
+ * probing.
+ */
+ enum drm_connector_status (*detect)(struct drm_connector *connector,
+ bool force);
int (*fill_modes)(struct drm_connector *connector, uint32_t max_width, uint32_t max_height);
int (*set_property)(struct drm_connector *connector, struct drm_property *property,
uint64_t val);
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index 3a9940e..883c1d4 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -85,7 +85,6 @@
{0x1002, 0x5460, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
{0x1002, 0x5462, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
{0x1002, 0x5464, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
- {0x1002, 0x5657, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5548, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5549, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
{0x1002, 0x554A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
@@ -103,6 +102,7 @@
{0x1002, 0x564F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5652, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5653, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x5657, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP}, \
{0x1002, 0x5835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \
{0x1002, 0x5954, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
diff --git a/include/drm/i830_drm.h b/include/drm/i830_drm.h
index 4b00d2d..61315c2 100644
--- a/include/drm/i830_drm.h
+++ b/include/drm/i830_drm.h
@@ -264,20 +264,20 @@
#define DRM_I830_GETPARAM 0x0c
#define DRM_I830_SETPARAM 0x0d
-#define DRM_IOCTL_I830_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_IOCTL_I830_INIT, drm_i830_init_t)
-#define DRM_IOCTL_I830_VERTEX DRM_IOW( DRM_COMMAND_BASE + DRM_IOCTL_I830_VERTEX, drm_i830_vertex_t)
-#define DRM_IOCTL_I830_CLEAR DRM_IOW( DRM_COMMAND_BASE + DRM_IOCTL_I830_CLEAR, drm_i830_clear_t)
-#define DRM_IOCTL_I830_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_IOCTL_I830_FLUSH)
-#define DRM_IOCTL_I830_GETAGE DRM_IO ( DRM_COMMAND_BASE + DRM_IOCTL_I830_GETAGE)
-#define DRM_IOCTL_I830_GETBUF DRM_IOWR(DRM_COMMAND_BASE + DRM_IOCTL_I830_GETBUF, drm_i830_dma_t)
-#define DRM_IOCTL_I830_SWAP DRM_IO ( DRM_COMMAND_BASE + DRM_IOCTL_I830_SWAP)
-#define DRM_IOCTL_I830_COPY DRM_IOW( DRM_COMMAND_BASE + DRM_IOCTL_I830_COPY, drm_i830_copy_t)
-#define DRM_IOCTL_I830_DOCOPY DRM_IO ( DRM_COMMAND_BASE + DRM_IOCTL_I830_DOCOPY)
-#define DRM_IOCTL_I830_FLIP DRM_IO ( DRM_COMMAND_BASE + DRM_IOCTL_I830_FLIP)
-#define DRM_IOCTL_I830_IRQ_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_IOCTL_I830_IRQ_EMIT, drm_i830_irq_emit_t)
-#define DRM_IOCTL_I830_IRQ_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_IOCTL_I830_IRQ_WAIT, drm_i830_irq_wait_t)
-#define DRM_IOCTL_I830_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_IOCTL_I830_GETPARAM, drm_i830_getparam_t)
-#define DRM_IOCTL_I830_SETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_IOCTL_I830_SETPARAM, drm_i830_setparam_t)
+#define DRM_IOCTL_I830_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I830_INIT, drm_i830_init_t)
+#define DRM_IOCTL_I830_VERTEX DRM_IOW( DRM_COMMAND_BASE + DRM_I830_VERTEX, drm_i830_vertex_t)
+#define DRM_IOCTL_I830_CLEAR DRM_IOW( DRM_COMMAND_BASE + DRM_I830_CLEAR, drm_i830_clear_t)
+#define DRM_IOCTL_I830_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I830_FLUSH)
+#define DRM_IOCTL_I830_GETAGE DRM_IO ( DRM_COMMAND_BASE + DRM_I830_GETAGE)
+#define DRM_IOCTL_I830_GETBUF DRM_IOWR(DRM_COMMAND_BASE + DRM_I830_GETBUF, drm_i830_dma_t)
+#define DRM_IOCTL_I830_SWAP DRM_IO ( DRM_COMMAND_BASE + DRM_I830_SWAP)
+#define DRM_IOCTL_I830_COPY DRM_IOW( DRM_COMMAND_BASE + DRM_I830_COPY, drm_i830_copy_t)
+#define DRM_IOCTL_I830_DOCOPY DRM_IO ( DRM_COMMAND_BASE + DRM_I830_DOCOPY)
+#define DRM_IOCTL_I830_FLIP DRM_IO ( DRM_COMMAND_BASE + DRM_I830_FLIP)
+#define DRM_IOCTL_I830_IRQ_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I830_IRQ_EMIT, drm_i830_irq_emit_t)
+#define DRM_IOCTL_I830_IRQ_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_I830_IRQ_WAIT, drm_i830_irq_wait_t)
+#define DRM_IOCTL_I830_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_I830_GETPARAM, drm_i830_getparam_t)
+#define DRM_IOCTL_I830_SETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_I830_SETPARAM, drm_i830_setparam_t)
typedef struct _drm_i830_clear {
int clear_color;
diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
index 8f8b072..e41c74f 100644
--- a/include/drm/i915_drm.h
+++ b/include/drm/i915_drm.h
@@ -215,6 +215,7 @@
#define DRM_IOCTL_I915_SET_VBLANK_PIPE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
#define DRM_IOCTL_I915_GET_VBLANK_PIPE DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
#define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t)
+#define DRM_IOCTL_I915_HWS_ADDR DRM_IOW(DRM_COMMAND_BASE + DRM_I915_HWS_ADDR, struct drm_i915_gem_init)
#define DRM_IOCTL_I915_GEM_INIT DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init)
#define DRM_IOCTL_I915_GEM_EXECBUFFER DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer)
#define DRM_IOCTL_I915_GEM_EXECBUFFER2 DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2, struct drm_i915_gem_execbuffer2)
diff --git a/include/drm/mga_drm.h b/include/drm/mga_drm.h
index 3ffbc47..c16097f 100644
--- a/include/drm/mga_drm.h
+++ b/include/drm/mga_drm.h
@@ -248,7 +248,7 @@
#define DRM_MGA_DMA_BOOTSTRAP 0x0c
#define DRM_IOCTL_MGA_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_INIT, drm_mga_init_t)
-#define DRM_IOCTL_MGA_FLUSH DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_FLUSH, drm_lock_t)
+#define DRM_IOCTL_MGA_FLUSH DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_FLUSH, struct drm_lock)
#define DRM_IOCTL_MGA_RESET DRM_IO( DRM_COMMAND_BASE + DRM_MGA_RESET)
#define DRM_IOCTL_MGA_SWAP DRM_IO( DRM_COMMAND_BASE + DRM_MGA_SWAP)
#define DRM_IOCTL_MGA_CLEAR DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_CLEAR, drm_mga_clear_t)
diff --git a/include/drm/nouveau_drm.h b/include/drm/nouveau_drm.h
index fe917de..01a7141 100644
--- a/include/drm/nouveau_drm.h
+++ b/include/drm/nouveau_drm.h
@@ -197,4 +197,17 @@
#define DRM_NOUVEAU_GEM_CPU_FINI 0x43
#define DRM_NOUVEAU_GEM_INFO 0x44
+#define DRM_IOCTL_NOUVEAU_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GETPARAM, struct drm_nouveau_getparam)
+#define DRM_IOCTL_NOUVEAU_SETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_SETPARAM, struct drm_nouveau_setparam)
+#define DRM_IOCTL_NOUVEAU_CHANNEL_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_CHANNEL_ALLOC, struct drm_nouveau_channel_alloc)
+#define DRM_IOCTL_NOUVEAU_CHANNEL_FREE DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_CHANNEL_FREE, struct drm_nouveau_channel_free)
+#define DRM_IOCTL_NOUVEAU_GROBJ_ALLOC DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_GROBJ_ALLOC, struct drm_nouveau_grobj_alloc)
+#define DRM_IOCTL_NOUVEAU_NOTIFIEROBJ_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_NOTIFIEROBJ_ALLOC, struct drm_nouveau_notifierobj_alloc)
+#define DRM_IOCTL_NOUVEAU_GPUOBJ_FREE DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_GPUOBJ_FREE, struct drm_nouveau_gpuobj_free)
+#define DRM_IOCTL_NOUVEAU_GEM_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_NEW, struct drm_nouveau_gem_new)
+#define DRM_IOCTL_NOUVEAU_GEM_PUSHBUF DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_PUSHBUF, struct drm_nouveau_gem_pushbuf)
+#define DRM_IOCTL_NOUVEAU_GEM_CPU_PREP DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_CPU_PREP, struct drm_nouveau_gem_cpu_prep)
+#define DRM_IOCTL_NOUVEAU_GEM_CPU_FINI DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_CPU_FINI, struct drm_nouveau_gem_cpu_fini)
+#define DRM_IOCTL_NOUVEAU_GEM_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_INFO, struct drm_nouveau_gem_info)
+
#endif /* __NOUVEAU_DRM_H__ */
diff --git a/include/drm/radeon_drm.h b/include/drm/radeon_drm.h
index 0acaf8f..10f8b53 100644
--- a/include/drm/radeon_drm.h
+++ b/include/drm/radeon_drm.h
@@ -547,8 +547,8 @@
#define DRM_IOCTL_RADEON_GEM_WAIT_IDLE DRM_IOW(DRM_COMMAND_BASE + DRM_RADEON_GEM_WAIT_IDLE, struct drm_radeon_gem_wait_idle)
#define DRM_IOCTL_RADEON_CS DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_CS, struct drm_radeon_cs)
#define DRM_IOCTL_RADEON_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_INFO, struct drm_radeon_info)
-#define DRM_IOCTL_RADEON_SET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_SET_TILING, struct drm_radeon_gem_set_tiling)
-#define DRM_IOCTL_RADEON_GET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_GET_TILING, struct drm_radeon_gem_get_tiling)
+#define DRM_IOCTL_RADEON_GEM_SET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_SET_TILING, struct drm_radeon_gem_set_tiling)
+#define DRM_IOCTL_RADEON_GEM_GET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_GET_TILING, struct drm_radeon_gem_get_tiling)
#define DRM_IOCTL_RADEON_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_BUSY, struct drm_radeon_gem_busy)
typedef struct drm_radeon_init {
diff --git a/include/drm/savage_drm.h b/include/drm/savage_drm.h
index 8a576ef..4863cf6 100644
--- a/include/drm/savage_drm.h
+++ b/include/drm/savage_drm.h
@@ -63,10 +63,10 @@
#define DRM_SAVAGE_BCI_EVENT_EMIT 0x02
#define DRM_SAVAGE_BCI_EVENT_WAIT 0x03
-#define DRM_IOCTL_SAVAGE_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_INIT, drm_savage_init_t)
-#define DRM_IOCTL_SAVAGE_CMDBUF DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_CMDBUF, drm_savage_cmdbuf_t)
-#define DRM_IOCTL_SAVAGE_EVENT_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_SAVAGE_BCI_EVENT_EMIT, drm_savage_event_emit_t)
-#define DRM_IOCTL_SAVAGE_EVENT_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_EVENT_WAIT, drm_savage_event_wait_t)
+#define DRM_IOCTL_SAVAGE_BCI_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_INIT, drm_savage_init_t)
+#define DRM_IOCTL_SAVAGE_BCI_CMDBUF DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_CMDBUF, drm_savage_cmdbuf_t)
+#define DRM_IOCTL_SAVAGE_BCI_EVENT_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_SAVAGE_BCI_EVENT_EMIT, drm_savage_event_emit_t)
+#define DRM_IOCTL_SAVAGE_BCI_EVENT_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_EVENT_WAIT, drm_savage_event_wait_t)
#define SAVAGE_DMA_PCI 1
#define SAVAGE_DMA_AGP 3
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index 267a86c..2040e6c 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -246,9 +246,11 @@
atomic_t reserved;
-
/**
* Members protected by the bo::lock
+ * In addition, setting sync_obj to anything else
+ * than NULL requires bo::reserved to be held. This allows for
+ * checking NULL while reserved but not holding bo::lock.
*/
void *sync_obj_arg;
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index 626b629..4e8ea8c 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -118,7 +118,6 @@
header-y += ext2_fs.h
header-y += fadvise.h
header-y += falloc.h
-header-y += fanotify.h
header-y += fb.h
header-y += fcntl.h
header-y += fd.h
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index ccf94dc..c227757 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -304,8 +304,8 @@
OSC_PCI_EXPRESS_PME_CONTROL | \
OSC_PCI_EXPRESS_AER_CONTROL | \
OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL)
-
-extern acpi_status acpi_pci_osc_control_set(acpi_handle handle, u32 flags);
+extern acpi_status acpi_pci_osc_control_set(acpi_handle handle,
+ u32 *mask, u32 req);
extern void acpi_early_init(void);
#else /* !CONFIG_ACPI */
diff --git a/include/linux/acpi_pmtmr.h b/include/linux/acpi_pmtmr.h
index 7e3d285..1d0ef1a 100644
--- a/include/linux/acpi_pmtmr.h
+++ b/include/linux/acpi_pmtmr.h
@@ -25,8 +25,6 @@
return acpi_pm_read_verified() & ACPI_PM_MASK;
}
-extern void pmtimer_wait(unsigned);
-
#else
static inline u32 acpi_pm_read_early(void)
diff --git a/fs/ceph/auth.h b/include/linux/ceph/auth.h
similarity index 97%
rename from fs/ceph/auth.h
rename to include/linux/ceph/auth.h
index d38a2fb..7fff521 100644
--- a/fs/ceph/auth.h
+++ b/include/linux/ceph/auth.h
@@ -1,8 +1,8 @@
#ifndef _FS_CEPH_AUTH_H
#define _FS_CEPH_AUTH_H
-#include "types.h"
-#include "buffer.h"
+#include <linux/ceph/types.h>
+#include <linux/ceph/buffer.h>
/*
* Abstract interface for communicating with the authenticate module.
diff --git a/fs/ceph/buffer.h b/include/linux/ceph/buffer.h
similarity index 100%
rename from fs/ceph/buffer.h
rename to include/linux/ceph/buffer.h
diff --git a/fs/ceph/ceph_debug.h b/include/linux/ceph/ceph_debug.h
similarity index 86%
rename from fs/ceph/ceph_debug.h
rename to include/linux/ceph/ceph_debug.h
index 1818c23..aa2e191 100644
--- a/fs/ceph/ceph_debug.h
+++ b/include/linux/ceph/ceph_debug.h
@@ -3,7 +3,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#ifdef CONFIG_CEPH_FS_PRETTYDEBUG
+#ifdef CONFIG_CEPH_LIB_PRETTYDEBUG
/*
* wrap pr_debug to include a filename:lineno prefix on each line.
@@ -14,7 +14,8 @@
# if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG)
extern const char *ceph_file_part(const char *s, int len);
# define dout(fmt, ...) \
- pr_debug(" %12.12s:%-4d : " fmt, \
+ pr_debug("%.*s %12.12s:%-4d : " fmt, \
+ 8 - (int)sizeof(KBUILD_MODNAME), " ", \
ceph_file_part(__FILE__, sizeof(__FILE__)), \
__LINE__, ##__VA_ARGS__)
# else
diff --git a/fs/ceph/ceph_frag.h b/include/linux/ceph/ceph_frag.h
similarity index 100%
rename from fs/ceph/ceph_frag.h
rename to include/linux/ceph/ceph_frag.h
diff --git a/fs/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h
similarity index 99%
rename from fs/ceph/ceph_fs.h
rename to include/linux/ceph/ceph_fs.h
index d5619ac..c3c74ae 100644
--- a/fs/ceph/ceph_fs.h
+++ b/include/linux/ceph/ceph_fs.h
@@ -299,6 +299,7 @@
CEPH_MDS_OP_SETATTR = 0x01108,
CEPH_MDS_OP_SETFILELOCK= 0x01109,
CEPH_MDS_OP_GETFILELOCK= 0x00110,
+ CEPH_MDS_OP_SETDIRLAYOUT=0x0110a,
CEPH_MDS_OP_MKNOD = 0x01201,
CEPH_MDS_OP_LINK = 0x01202,
diff --git a/fs/ceph/ceph_hash.h b/include/linux/ceph/ceph_hash.h
similarity index 100%
rename from fs/ceph/ceph_hash.h
rename to include/linux/ceph/ceph_hash.h
diff --git a/include/linux/ceph/debugfs.h b/include/linux/ceph/debugfs.h
new file mode 100644
index 0000000..2a79702
--- /dev/null
+++ b/include/linux/ceph/debugfs.h
@@ -0,0 +1,33 @@
+#ifndef _FS_CEPH_DEBUGFS_H
+#define _FS_CEPH_DEBUGFS_H
+
+#include "ceph_debug.h"
+#include "types.h"
+
+#define CEPH_DEFINE_SHOW_FUNC(name) \
+static int name##_open(struct inode *inode, struct file *file) \
+{ \
+ struct seq_file *sf; \
+ int ret; \
+ \
+ ret = single_open(file, name, NULL); \
+ sf = file->private_data; \
+ sf->private = inode->i_private; \
+ return ret; \
+} \
+ \
+static const struct file_operations name##_fops = { \
+ .open = name##_open, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+ .release = single_release, \
+};
+
+/* debugfs.c */
+extern int ceph_debugfs_init(void);
+extern void ceph_debugfs_cleanup(void);
+extern int ceph_debugfs_client_init(struct ceph_client *client);
+extern void ceph_debugfs_client_cleanup(struct ceph_client *client);
+
+#endif
+
diff --git a/fs/ceph/decode.h b/include/linux/ceph/decode.h
similarity index 96%
rename from fs/ceph/decode.h
rename to include/linux/ceph/decode.h
index 3d25415..c5b6939 100644
--- a/fs/ceph/decode.h
+++ b/include/linux/ceph/decode.h
@@ -191,6 +191,11 @@
ceph_encode_need(p, end, n, bad); \
ceph_encode_copy(p, pv, n); \
} while (0)
+#define ceph_encode_string_safe(p, end, s, n, bad) \
+ do { \
+ ceph_encode_need(p, end, n, bad); \
+ ceph_encode_string(p, end, s, n); \
+ } while (0)
#endif
diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h
new file mode 100644
index 0000000..f22b2e9
--- /dev/null
+++ b/include/linux/ceph/libceph.h
@@ -0,0 +1,249 @@
+#ifndef _FS_CEPH_LIBCEPH_H
+#define _FS_CEPH_LIBCEPH_H
+
+#include "ceph_debug.h"
+
+#include <asm/unaligned.h>
+#include <linux/backing-dev.h>
+#include <linux/completion.h>
+#include <linux/exportfs.h>
+#include <linux/fs.h>
+#include <linux/mempool.h>
+#include <linux/pagemap.h>
+#include <linux/wait.h>
+#include <linux/writeback.h>
+#include <linux/slab.h>
+
+#include "types.h"
+#include "messenger.h"
+#include "msgpool.h"
+#include "mon_client.h"
+#include "osd_client.h"
+#include "ceph_fs.h"
+
+/*
+ * Supported features
+ */
+#define CEPH_FEATURE_SUPPORTED_DEFAULT CEPH_FEATURE_NOSRCADDR
+#define CEPH_FEATURE_REQUIRED_DEFAULT CEPH_FEATURE_NOSRCADDR
+
+/*
+ * mount options
+ */
+#define CEPH_OPT_FSID (1<<0)
+#define CEPH_OPT_NOSHARE (1<<1) /* don't share client with other sbs */
+#define CEPH_OPT_MYIP (1<<2) /* specified my ip */
+#define CEPH_OPT_NOCRC (1<<3) /* no data crc on writes */
+
+#define CEPH_OPT_DEFAULT (0);
+
+#define ceph_set_opt(client, opt) \
+ (client)->options->flags |= CEPH_OPT_##opt;
+#define ceph_test_opt(client, opt) \
+ (!!((client)->options->flags & CEPH_OPT_##opt))
+
+struct ceph_options {
+ int flags;
+ struct ceph_fsid fsid;
+ struct ceph_entity_addr my_addr;
+ int mount_timeout;
+ int osd_idle_ttl;
+ int osd_timeout;
+ int osd_keepalive_timeout;
+
+ /*
+ * any type that can't be simply compared or doesn't need need
+ * to be compared should go beyond this point,
+ * ceph_compare_options() should be updated accordingly
+ */
+
+ struct ceph_entity_addr *mon_addr; /* should be the first
+ pointer type of args */
+ int num_mon;
+ char *name;
+ char *secret;
+};
+
+/*
+ * defaults
+ */
+#define CEPH_MOUNT_TIMEOUT_DEFAULT 60
+#define CEPH_OSD_TIMEOUT_DEFAULT 60 /* seconds */
+#define CEPH_OSD_KEEPALIVE_DEFAULT 5
+#define CEPH_OSD_IDLE_TTL_DEFAULT 60
+#define CEPH_MOUNT_RSIZE_DEFAULT (512*1024) /* readahead */
+
+#define CEPH_MSG_MAX_FRONT_LEN (16*1024*1024)
+#define CEPH_MSG_MAX_DATA_LEN (16*1024*1024)
+
+#define CEPH_AUTH_NAME_DEFAULT "guest"
+
+/*
+ * Delay telling the MDS we no longer want caps, in case we reopen
+ * the file. Delay a minimum amount of time, even if we send a cap
+ * message for some other reason. Otherwise, take the oppotunity to
+ * update the mds to avoid sending another message later.
+ */
+#define CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT 5 /* cap release delay */
+#define CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT 60 /* cap release delay */
+
+#define CEPH_CAP_RELEASE_SAFETY_DEFAULT (CEPH_CAPS_PER_RELEASE * 4)
+
+/* mount state */
+enum {
+ CEPH_MOUNT_MOUNTING,
+ CEPH_MOUNT_MOUNTED,
+ CEPH_MOUNT_UNMOUNTING,
+ CEPH_MOUNT_UNMOUNTED,
+ CEPH_MOUNT_SHUTDOWN,
+};
+
+/*
+ * subtract jiffies
+ */
+static inline unsigned long time_sub(unsigned long a, unsigned long b)
+{
+ BUG_ON(time_after(b, a));
+ return (long)a - (long)b;
+}
+
+struct ceph_mds_client;
+
+/*
+ * per client state
+ *
+ * possibly shared by multiple mount points, if they are
+ * mounting the same ceph filesystem/cluster.
+ */
+struct ceph_client {
+ struct ceph_fsid fsid;
+ bool have_fsid;
+
+ void *private;
+
+ struct ceph_options *options;
+
+ struct mutex mount_mutex; /* serialize mount attempts */
+ wait_queue_head_t auth_wq;
+ int auth_err;
+
+ int (*extra_mon_dispatch)(struct ceph_client *, struct ceph_msg *);
+
+ u32 supported_features;
+ u32 required_features;
+
+ struct ceph_messenger *msgr; /* messenger instance */
+ struct ceph_mon_client monc;
+ struct ceph_osd_client osdc;
+
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *debugfs_dir;
+ struct dentry *debugfs_monmap;
+ struct dentry *debugfs_osdmap;
+#endif
+};
+
+
+
+/*
+ * snapshots
+ */
+
+/*
+ * A "snap context" is the set of existing snapshots when we
+ * write data. It is used by the OSD to guide its COW behavior.
+ *
+ * The ceph_snap_context is refcounted, and attached to each dirty
+ * page, indicating which context the dirty data belonged when it was
+ * dirtied.
+ */
+struct ceph_snap_context {
+ atomic_t nref;
+ u64 seq;
+ int num_snaps;
+ u64 snaps[];
+};
+
+static inline struct ceph_snap_context *
+ceph_get_snap_context(struct ceph_snap_context *sc)
+{
+ /*
+ printk("get_snap_context %p %d -> %d\n", sc, atomic_read(&sc->nref),
+ atomic_read(&sc->nref)+1);
+ */
+ if (sc)
+ atomic_inc(&sc->nref);
+ return sc;
+}
+
+static inline void ceph_put_snap_context(struct ceph_snap_context *sc)
+{
+ if (!sc)
+ return;
+ /*
+ printk("put_snap_context %p %d -> %d\n", sc, atomic_read(&sc->nref),
+ atomic_read(&sc->nref)-1);
+ */
+ if (atomic_dec_and_test(&sc->nref)) {
+ /*printk(" deleting snap_context %p\n", sc);*/
+ kfree(sc);
+ }
+}
+
+/*
+ * calculate the number of pages a given length and offset map onto,
+ * if we align the data.
+ */
+static inline int calc_pages_for(u64 off, u64 len)
+{
+ return ((off+len+PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT) -
+ (off >> PAGE_CACHE_SHIFT);
+}
+
+/* ceph_common.c */
+extern const char *ceph_msg_type_name(int type);
+extern int ceph_check_fsid(struct ceph_client *client, struct ceph_fsid *fsid);
+extern struct kmem_cache *ceph_inode_cachep;
+extern struct kmem_cache *ceph_cap_cachep;
+extern struct kmem_cache *ceph_dentry_cachep;
+extern struct kmem_cache *ceph_file_cachep;
+
+extern int ceph_parse_options(struct ceph_options **popt, char *options,
+ const char *dev_name, const char *dev_name_end,
+ int (*parse_extra_token)(char *c, void *private),
+ void *private);
+extern void ceph_destroy_options(struct ceph_options *opt);
+extern int ceph_compare_options(struct ceph_options *new_opt,
+ struct ceph_client *client);
+extern struct ceph_client *ceph_create_client(struct ceph_options *opt,
+ void *private);
+extern u64 ceph_client_id(struct ceph_client *client);
+extern void ceph_destroy_client(struct ceph_client *client);
+extern int __ceph_open_session(struct ceph_client *client,
+ unsigned long started);
+extern int ceph_open_session(struct ceph_client *client);
+
+/* pagevec.c */
+extern void ceph_release_page_vector(struct page **pages, int num_pages);
+
+extern struct page **ceph_get_direct_page_vector(const char __user *data,
+ int num_pages,
+ loff_t off, size_t len);
+extern void ceph_put_page_vector(struct page **pages, int num_pages);
+extern void ceph_release_page_vector(struct page **pages, int num_pages);
+extern struct page **ceph_alloc_page_vector(int num_pages, gfp_t flags);
+extern int ceph_copy_user_to_page_vector(struct page **pages,
+ const char __user *data,
+ loff_t off, size_t len);
+extern int ceph_copy_to_page_vector(struct page **pages,
+ const char *data,
+ loff_t off, size_t len);
+extern int ceph_copy_from_page_vector(struct page **pages,
+ char *data,
+ loff_t off, size_t len);
+extern int ceph_copy_page_vector_to_user(struct page **pages, char __user *data,
+ loff_t off, size_t len);
+extern void ceph_zero_page_vector_range(int off, int len, struct page **pages);
+
+
+#endif /* _FS_CEPH_SUPER_H */
diff --git a/fs/ceph/mdsmap.h b/include/linux/ceph/mdsmap.h
similarity index 100%
rename from fs/ceph/mdsmap.h
rename to include/linux/ceph/mdsmap.h
diff --git a/fs/ceph/messenger.h b/include/linux/ceph/messenger.h
similarity index 95%
rename from fs/ceph/messenger.h
rename to include/linux/ceph/messenger.h
index 76fbc95..5956d62 100644
--- a/fs/ceph/messenger.h
+++ b/include/linux/ceph/messenger.h
@@ -65,6 +65,9 @@
*/
u32 global_seq;
spinlock_t global_seq_lock;
+
+ u32 supported_features;
+ u32 required_features;
};
/*
@@ -82,6 +85,10 @@
struct ceph_pagelist *pagelist; /* instead of pages */
struct list_head list_head;
struct kref kref;
+ struct bio *bio; /* instead of pages/pagelist */
+ struct bio *bio_iter; /* bio iterator */
+ int bio_seg; /* current bio segment */
+ struct ceph_pagelist *trail; /* the trailing part of the data */
bool front_is_vmalloc;
bool more_to_follow;
bool needs_out_seq;
@@ -205,7 +212,7 @@
};
-extern const char *pr_addr(const struct sockaddr_storage *ss);
+extern const char *ceph_pr_addr(const struct sockaddr_storage *ss);
extern int ceph_parse_ips(const char *c, const char *end,
struct ceph_entity_addr *addr,
int max_count, int *count);
@@ -216,7 +223,8 @@
extern void ceph_msgr_flush(void);
extern struct ceph_messenger *ceph_messenger_create(
- struct ceph_entity_addr *myaddr);
+ struct ceph_entity_addr *myaddr,
+ u32 features, u32 required);
extern void ceph_messenger_destroy(struct ceph_messenger *);
extern void ceph_con_init(struct ceph_messenger *msgr,
diff --git a/fs/ceph/mon_client.h b/include/linux/ceph/mon_client.h
similarity index 99%
rename from fs/ceph/mon_client.h
rename to include/linux/ceph/mon_client.h
index 8e396f2..545f859 100644
--- a/fs/ceph/mon_client.h
+++ b/include/linux/ceph/mon_client.h
@@ -79,6 +79,7 @@
u64 last_tid;
/* mds/osd map */
+ int want_mdsmap;
int want_next_osdmap; /* 1 = want, 2 = want+asked */
u32 have_osdmap, have_mdsmap;
diff --git a/fs/ceph/msgpool.h b/include/linux/ceph/msgpool.h
similarity index 100%
rename from fs/ceph/msgpool.h
rename to include/linux/ceph/msgpool.h
diff --git a/fs/ceph/msgr.h b/include/linux/ceph/msgr.h
similarity index 100%
rename from fs/ceph/msgr.h
rename to include/linux/ceph/msgr.h
diff --git a/fs/ceph/osd_client.h b/include/linux/ceph/osd_client.h
similarity index 75%
rename from fs/ceph/osd_client.h
rename to include/linux/ceph/osd_client.h
index ce77698..6c91fb0 100644
--- a/fs/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -15,6 +15,7 @@
struct ceph_osd_request;
struct ceph_osd_client;
struct ceph_authorizer;
+struct ceph_pagelist;
/*
* completion callback for async writepages
@@ -68,6 +69,7 @@
struct list_head r_unsafe_item;
struct inode *r_inode; /* for use by callbacks */
+ void *r_priv; /* ditto */
char r_oid[40]; /* object name */
int r_oid_len;
@@ -80,6 +82,11 @@
struct page **r_pages; /* pages for data payload */
int r_pages_from_pool;
int r_own_pages; /* if true, i own page list */
+#ifdef CONFIG_BLOCK
+ struct bio *r_bio; /* instead of pages */
+#endif
+
+ struct ceph_pagelist *r_trail; /* trailing part of the data */
};
struct ceph_osd_client {
@@ -110,6 +117,42 @@
struct ceph_msgpool msgpool_op_reply;
};
+struct ceph_osd_req_op {
+ u16 op; /* CEPH_OSD_OP_* */
+ u32 flags; /* CEPH_OSD_FLAG_* */
+ union {
+ struct {
+ u64 offset, length;
+ u64 truncate_size;
+ u32 truncate_seq;
+ } extent;
+ struct {
+ const char *name;
+ u32 name_len;
+ const char *val;
+ u32 value_len;
+ __u8 cmp_op; /* CEPH_OSD_CMPXATTR_OP_* */
+ __u8 cmp_mode; /* CEPH_OSD_CMPXATTR_MODE_* */
+ } xattr;
+ struct {
+ const char *class_name;
+ __u8 class_len;
+ const char *method_name;
+ __u8 method_len;
+ __u8 argc;
+ const char *indata;
+ u32 indata_len;
+ } cls;
+ struct {
+ u64 cookie, count;
+ } pgls;
+ struct {
+ u64 snapid;
+ } snap;
+ };
+ u32 payload_len;
+};
+
extern int ceph_osdc_init(struct ceph_osd_client *osdc,
struct ceph_client *client);
extern void ceph_osdc_stop(struct ceph_osd_client *osdc);
@@ -119,6 +162,30 @@
extern void ceph_osdc_handle_map(struct ceph_osd_client *osdc,
struct ceph_msg *msg);
+extern void ceph_calc_raw_layout(struct ceph_osd_client *osdc,
+ struct ceph_file_layout *layout,
+ u64 snapid,
+ u64 off, u64 *plen, u64 *bno,
+ struct ceph_osd_request *req,
+ struct ceph_osd_req_op *op);
+
+extern struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
+ int flags,
+ struct ceph_snap_context *snapc,
+ struct ceph_osd_req_op *ops,
+ bool use_mempool,
+ gfp_t gfp_flags,
+ struct page **pages,
+ struct bio *bio);
+
+extern void ceph_osdc_build_request(struct ceph_osd_request *req,
+ u64 off, u64 *plen,
+ struct ceph_osd_req_op *src_ops,
+ struct ceph_snap_context *snapc,
+ struct timespec *mtime,
+ const char *oid,
+ int oid_len);
+
extern struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *,
struct ceph_file_layout *layout,
struct ceph_vino vino,
diff --git a/fs/ceph/osdmap.h b/include/linux/ceph/osdmap.h
similarity index 96%
rename from fs/ceph/osdmap.h
rename to include/linux/ceph/osdmap.h
index 970b547..ba4c205 100644
--- a/fs/ceph/osdmap.h
+++ b/include/linux/ceph/osdmap.h
@@ -4,7 +4,7 @@
#include <linux/rbtree.h>
#include "types.h"
#include "ceph_fs.h"
-#include "crush/crush.h"
+#include <linux/crush/crush.h>
/*
* The osd map describes the current membership of the osd cluster and
@@ -125,4 +125,6 @@
extern int ceph_calc_pg_primary(struct ceph_osdmap *osdmap,
struct ceph_pg pgid);
+extern int ceph_pg_poolid_by_name(struct ceph_osdmap *map, const char *name);
+
#endif
diff --git a/fs/ceph/pagelist.h b/include/linux/ceph/pagelist.h
similarity index 62%
rename from fs/ceph/pagelist.h
rename to include/linux/ceph/pagelist.h
index e8a4187..9660d6b 100644
--- a/fs/ceph/pagelist.h
+++ b/include/linux/ceph/pagelist.h
@@ -8,6 +8,14 @@
void *mapped_tail;
size_t length;
size_t room;
+ struct list_head free_list;
+ size_t num_pages_free;
+};
+
+struct ceph_pagelist_cursor {
+ struct ceph_pagelist *pl; /* pagelist, for error checking */
+ struct list_head *page_lru; /* page in list */
+ size_t room; /* room remaining to reset to */
};
static inline void ceph_pagelist_init(struct ceph_pagelist *pl)
@@ -16,10 +24,23 @@
pl->mapped_tail = NULL;
pl->length = 0;
pl->room = 0;
+ INIT_LIST_HEAD(&pl->free_list);
+ pl->num_pages_free = 0;
}
+
extern int ceph_pagelist_release(struct ceph_pagelist *pl);
-extern int ceph_pagelist_append(struct ceph_pagelist *pl, void *d, size_t l);
+extern int ceph_pagelist_append(struct ceph_pagelist *pl, const void *d, size_t l);
+
+extern int ceph_pagelist_reserve(struct ceph_pagelist *pl, size_t space);
+
+extern int ceph_pagelist_free_reserve(struct ceph_pagelist *pl);
+
+extern void ceph_pagelist_set_cursor(struct ceph_pagelist *pl,
+ struct ceph_pagelist_cursor *c);
+
+extern int ceph_pagelist_truncate(struct ceph_pagelist *pl,
+ struct ceph_pagelist_cursor *c);
static inline int ceph_pagelist_encode_64(struct ceph_pagelist *pl, u64 v)
{
diff --git a/fs/ceph/rados.h b/include/linux/ceph/rados.h
similarity index 100%
rename from fs/ceph/rados.h
rename to include/linux/ceph/rados.h
diff --git a/fs/ceph/types.h b/include/linux/ceph/types.h
similarity index 100%
rename from fs/ceph/types.h
rename to include/linux/ceph/types.h
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index ed3e92e..709dfb9 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -75,7 +75,7 @@
unsigned long flags;
/* ID for this css, if possible */
- struct css_id *id;
+ struct css_id __rcu *id;
};
/* bits in struct cgroup_subsys_state flags field */
@@ -205,7 +205,7 @@
struct list_head children; /* my children */
struct cgroup *parent; /* my parent */
- struct dentry *dentry; /* cgroup fs entry, RCU protected */
+ struct dentry __rcu *dentry; /* cgroup fs entry, RCU protected */
/* Private pointers for each registered subsystem */
struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT];
@@ -578,7 +578,12 @@
void cgroup_iter_end(struct cgroup *cgrp, struct cgroup_iter *it);
int cgroup_scan_tasks(struct cgroup_scanner *scan);
int cgroup_attach_task(struct cgroup *, struct task_struct *);
-int cgroup_attach_task_current_cg(struct task_struct *);
+int cgroup_attach_task_all(struct task_struct *from, struct task_struct *);
+
+static inline int cgroup_attach_task_current_cg(struct task_struct *tsk)
+{
+ return cgroup_attach_task_all(current, tsk);
+}
/*
* CSS ID is ID for cgroup_subsys_state structs under subsys. This only works
@@ -636,6 +641,11 @@
}
/* No cgroups - nothing to do */
+static inline int cgroup_attach_task_all(struct task_struct *from,
+ struct task_struct *t)
+{
+ return 0;
+}
static inline int cgroup_attach_task_current_cg(struct task_struct *t)
{
return 0;
diff --git a/include/linux/compat.h b/include/linux/compat.h
index 9ddc878..5778b55 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -360,5 +360,8 @@
const struct compat_iovec __user *uvector, unsigned long nr_segs,
unsigned long fast_segs, struct iovec *fast_pointer,
struct iovec **ret_pointer);
+
+extern void __user *compat_alloc_user_space(unsigned long len);
+
#endif /* CONFIG_COMPAT */
#endif /* _LINUX_COMPAT_H */
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index c1a62c5..320d6c9 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -16,7 +16,11 @@
# define __release(x) __context__(x,-1)
# define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
# define __percpu __attribute__((noderef, address_space(3)))
+#ifdef CONFIG_SPARSE_RCU_POINTER
+# define __rcu __attribute__((noderef, address_space(4)))
+#else
# define __rcu
+#endif
extern void __chk_user_ptr(const volatile void __user *);
extern void __chk_io_ptr(const volatile void __iomem *);
#else
diff --git a/include/linux/coredump.h b/include/linux/coredump.h
index 8ba66a9..ba4b85a 100644
--- a/include/linux/coredump.h
+++ b/include/linux/coredump.h
@@ -9,37 +9,7 @@
* These are the only things you should do on a core-file: use only these
* functions to write out all the necessary info.
*/
-static inline int dump_write(struct file *file, const void *addr, int nr)
-{
- return file->f_op->write(file, addr, nr, &file->f_pos) == nr;
-}
-
-static inline int dump_seek(struct file *file, loff_t off)
-{
- int ret = 1;
-
- if (file->f_op->llseek && file->f_op->llseek != no_llseek) {
- if (file->f_op->llseek(file, off, SEEK_CUR) < 0)
- return 0;
- } else {
- char *buf = (char *)get_zeroed_page(GFP_KERNEL);
-
- if (!buf)
- return 0;
- while (off > 0) {
- unsigned long n = off;
-
- if (n > PAGE_SIZE)
- n = PAGE_SIZE;
- if (!dump_write(file, buf, n)) {
- ret = 0;
- break;
- }
- off -= n;
- }
- free_page((unsigned long)buf);
- }
- return ret;
-}
+extern int dump_write(struct file *file, const void *addr, int nr);
+extern int dump_seek(struct file *file, loff_t off);
#endif /* _LINUX_COREDUMP_H */
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index 36ca972..1be416b 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -53,6 +53,7 @@
#define CPUIDLE_FLAG_BALANCED (0x40) /* medium latency, moderate savings */
#define CPUIDLE_FLAG_DEEP (0x80) /* high latency, large savings */
#define CPUIDLE_FLAG_IGNORE (0x100) /* ignore during this idle period */
+#define CPUIDLE_FLAG_TLB_FLUSHED (0x200) /* tlb will be flushed */
#define CPUIDLE_DRIVER_FLAGS_MASK (0xFFFF0000)
diff --git a/include/linux/cred.h b/include/linux/cred.h
index 4d2c395..4aaeab3 100644
--- a/include/linux/cred.h
+++ b/include/linux/cred.h
@@ -84,7 +84,7 @@
atomic_t usage;
pid_t tgid; /* thread group process ID */
spinlock_t lock;
- struct key *session_keyring; /* keyring inherited over fork */
+ struct key __rcu *session_keyring; /* keyring inherited over fork */
struct key *process_keyring; /* keyring private to this process */
struct rcu_head rcu; /* RCU deletion hook */
};
diff --git a/fs/ceph/crush/crush.h b/include/linux/crush/crush.h
similarity index 100%
rename from fs/ceph/crush/crush.h
rename to include/linux/crush/crush.h
diff --git a/fs/ceph/crush/hash.h b/include/linux/crush/hash.h
similarity index 100%
rename from fs/ceph/crush/hash.h
rename to include/linux/crush/hash.h
diff --git a/fs/ceph/crush/mapper.h b/include/linux/crush/mapper.h
similarity index 100%
rename from fs/ceph/crush/mapper.h
rename to include/linux/crush/mapper.h
diff --git a/include/linux/debug_locks.h b/include/linux/debug_locks.h
index 29b3ce3..2833452 100644
--- a/include/linux/debug_locks.h
+++ b/include/linux/debug_locks.h
@@ -49,7 +49,6 @@
#ifdef CONFIG_LOCKDEP
extern void debug_show_all_locks(void);
-extern void __debug_show_held_locks(struct task_struct *task);
extern void debug_show_held_locks(struct task_struct *task);
extern void debug_check_no_locks_freed(const void *from, unsigned long len);
extern void debug_check_no_locks_held(struct task_struct *task);
@@ -58,10 +57,6 @@
{
}
-static inline void __debug_show_held_locks(struct task_struct *task)
-{
-}
-
static inline void debug_show_held_locks(struct task_struct *task)
{
}
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index ce29b81..ba8319a 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -102,6 +102,9 @@
return DMA_BIT_MASK(32);
}
+#ifdef ARCH_HAS_DMA_SET_COHERENT_MASK
+int dma_set_coherent_mask(struct device *dev, u64 mask);
+#else
static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
{
if (!dma_supported(dev, mask))
@@ -109,6 +112,7 @@
dev->coherent_dma_mask = mask;
return 0;
}
+#endif
extern u64 dma_get_required_mask(struct device *dev);
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index c61d4ca..e210649 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -548,7 +548,7 @@
return (dma->max_pq & DMA_HAS_PQ_CONTINUE) == DMA_HAS_PQ_CONTINUE;
}
-static unsigned short dma_dev_to_maxpq(struct dma_device *dma)
+static inline unsigned short dma_dev_to_maxpq(struct dma_device *dma)
{
return dma->max_pq & ~DMA_HAS_PQ_CONTINUE;
}
diff --git a/include/linux/dynamic_debug.h b/include/linux/dynamic_debug.h
index 52c0da4..bef3cda 100644
--- a/include/linux/dynamic_debug.h
+++ b/include/linux/dynamic_debug.h
@@ -1,6 +1,8 @@
#ifndef _DYNAMIC_DEBUG_H
#define _DYNAMIC_DEBUG_H
+#include <linux/jump_label.h>
+
/* dynamic_printk_enabled, and dynamic_printk_enabled2 are bitmasks in which
* bit n is set to 1 if any modname hashes into the bucket n, 0 otherwise. They
* use independent hash functions, to reduce the chance of false positives.
@@ -22,8 +24,6 @@
const char *function;
const char *filename;
const char *format;
- char primary_hash;
- char secondary_hash;
unsigned int lineno:24;
/*
* The flags field controls the behaviour at the callsite.
@@ -33,6 +33,7 @@
#define _DPRINTK_FLAGS_PRINT (1<<0) /* printk() a message using the format */
#define _DPRINTK_FLAGS_DEFAULT 0
unsigned int flags:8;
+ char enabled;
} __attribute__((aligned(8)));
@@ -42,33 +43,35 @@
#if defined(CONFIG_DYNAMIC_DEBUG)
extern int ddebug_remove_module(const char *mod_name);
-#define __dynamic_dbg_enabled(dd) ({ \
- int __ret = 0; \
- if (unlikely((dynamic_debug_enabled & (1LL << DEBUG_HASH)) && \
- (dynamic_debug_enabled2 & (1LL << DEBUG_HASH2)))) \
- if (unlikely(dd.flags)) \
- __ret = 1; \
- __ret; })
-
#define dynamic_pr_debug(fmt, ...) do { \
+ __label__ do_printk; \
+ __label__ out; \
static struct _ddebug descriptor \
__used \
__attribute__((section("__verbose"), aligned(8))) = \
- { KBUILD_MODNAME, __func__, __FILE__, fmt, DEBUG_HASH, \
- DEBUG_HASH2, __LINE__, _DPRINTK_FLAGS_DEFAULT }; \
- if (__dynamic_dbg_enabled(descriptor)) \
- printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); \
+ { KBUILD_MODNAME, __func__, __FILE__, fmt, __LINE__, \
+ _DPRINTK_FLAGS_DEFAULT }; \
+ JUMP_LABEL(&descriptor.enabled, do_printk); \
+ goto out; \
+do_printk: \
+ printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); \
+out: ; \
} while (0)
#define dynamic_dev_dbg(dev, fmt, ...) do { \
+ __label__ do_printk; \
+ __label__ out; \
static struct _ddebug descriptor \
__used \
__attribute__((section("__verbose"), aligned(8))) = \
- { KBUILD_MODNAME, __func__, __FILE__, fmt, DEBUG_HASH, \
- DEBUG_HASH2, __LINE__, _DPRINTK_FLAGS_DEFAULT }; \
- if (__dynamic_dbg_enabled(descriptor)) \
- dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); \
+ { KBUILD_MODNAME, __func__, __FILE__, fmt, __LINE__, \
+ _DPRINTK_FLAGS_DEFAULT }; \
+ JUMP_LABEL(&descriptor.enabled, do_printk); \
+ goto out; \
+do_printk: \
+ dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); \
+out: ; \
} while (0)
#else
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index 2c958f4..4fd978e 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -93,6 +93,7 @@
struct elevator_type *elevator_type;
struct mutex sysfs_lock;
struct hlist_head *hash;
+ unsigned int registered:1;
};
/*
@@ -136,6 +137,7 @@
extern int elevator_init(struct request_queue *, char *);
extern void elevator_exit(struct elevator_queue *);
+extern int elevator_change(struct request_queue *, const char *);
extern int elv_rq_merge_ok(struct request *, struct bio *);
/*
diff --git a/include/linux/fanotify.h b/include/linux/fanotify.h
index f0949a5..63531a6 100644
--- a/include/linux/fanotify.h
+++ b/include/linux/fanotify.h
@@ -65,14 +65,14 @@
FAN_ALL_PERM_EVENTS |\
FAN_Q_OVERFLOW)
-#define FANOTIFY_METADATA_VERSION 1
+#define FANOTIFY_METADATA_VERSION 2
struct fanotify_event_metadata {
__u32 event_len;
__u32 vers;
- __s32 fd;
__u64 mask;
- __s64 pid;
+ __s32 fd;
+ __s32 pid;
} __attribute__ ((packed));
struct fanotify_response {
@@ -95,11 +95,4 @@
(long)(meta)->event_len >= (long)FAN_EVENT_METADATA_LEN && \
(long)(meta)->event_len <= (long)(len))
-#ifdef __KERNEL__
-
-struct fanotify_wait {
- struct fsnotify_event *event;
- __s32 fd;
-};
-#endif /* __KERNEL__ */
#endif /* _LINUX_FANOTIFY_H */
diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h
index f59ed29..133c0ba 100644
--- a/include/linux/fdtable.h
+++ b/include/linux/fdtable.h
@@ -31,7 +31,7 @@
struct fdtable {
unsigned int max_fds;
- struct file ** fd; /* current fd array */
+ struct file __rcu **fd; /* current fd array */
fd_set *close_on_exec;
fd_set *open_fds;
struct rcu_head rcu;
@@ -46,7 +46,7 @@
* read mostly part
*/
atomic_t count;
- struct fdtable *fdt;
+ struct fdtable __rcu *fdt;
struct fdtable fdtab;
/*
* written part on a separate cache line in SMP
@@ -55,7 +55,7 @@
int next_fd;
struct embedded_fd_set close_on_exec_init;
struct embedded_fd_set open_fds_init;
- struct file * fd_array[NR_OPEN_DEFAULT];
+ struct file __rcu * fd_array[NR_OPEN_DEFAULT];
};
#define rcu_dereference_check_fdtable(files, fdtfd) \
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 76041b6..3168dcf 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1093,6 +1093,10 @@
#include <linux/fcntl.h>
+/* temporary stubs for BKL removal */
+#define lock_flocks() lock_kernel()
+#define unlock_flocks() unlock_kernel()
+
extern void send_sigio(struct fown_struct *fown, int fd, int band);
#ifdef CONFIG_FILE_LOCKING
@@ -1380,7 +1384,7 @@
* Saved mount options for lazy filesystems using
* generic_show_options()
*/
- char *s_options;
+ char __rcu *s_options;
};
extern struct timespec current_fs_time(struct super_block *sb);
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index ed36fb5..e40190d 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -156,6 +156,7 @@
struct mutex access_mutex;
struct list_head access_list;
wait_queue_head_t access_waitq;
+ bool bypass_perm; /* protected by access_mutex */
#endif /* CONFIG_FANOTIFY_ACCESS_PERMISSIONS */
int f_flags;
} fanotify_data;
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index 02b8b24..8beabb9 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -191,8 +191,8 @@
unsigned int flags;
#ifdef CONFIG_PERF_EVENTS
- int perf_refcount;
- struct hlist_head *perf_events;
+ int perf_refcount;
+ struct hlist_head __percpu *perf_events;
#endif
};
@@ -252,8 +252,8 @@
extern int perf_trace_init(struct perf_event *event);
extern void perf_trace_destroy(struct perf_event *event);
-extern int perf_trace_enable(struct perf_event *event);
-extern void perf_trace_disable(struct perf_event *event);
+extern int perf_trace_add(struct perf_event *event, int flags);
+extern void perf_trace_del(struct perf_event *event, int flags);
extern int ftrace_profile_set_filter(struct perf_event *event, int event_id,
char *filter_str);
extern void ftrace_profile_free_filter(struct perf_event *event);
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index 5f2f4c4..af3f06b 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -129,8 +129,8 @@
struct disk_part_tbl {
struct rcu_head rcu_head;
int len;
- struct hd_struct *last_lookup;
- struct hd_struct *part[];
+ struct hd_struct __rcu *last_lookup;
+ struct hd_struct __rcu *part[];
};
struct gendisk {
@@ -149,7 +149,7 @@
* non-critical accesses use RCU. Always access through
* helpers.
*/
- struct disk_part_tbl *part_tbl;
+ struct disk_part_tbl __rcu *part_tbl;
struct hd_struct part0;
const struct block_device_operations *fops;
diff --git a/include/linux/gpio.h b/include/linux/gpio.h
index 03f616b..e41f7dd 100644
--- a/include/linux/gpio.h
+++ b/include/linux/gpio.h
@@ -13,6 +13,7 @@
#include <linux/errno.h>
struct device;
+struct gpio_chip;
/*
* Some platforms don't support the GPIO programming interface.
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
index d5b3876..96c323a 100644
--- a/include/linux/hardirq.h
+++ b/include/linux/hardirq.h
@@ -64,6 +64,8 @@
#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
#define NMI_OFFSET (1UL << NMI_SHIFT)
+#define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET)
+
#ifndef PREEMPT_ACTIVE
#define PREEMPT_ACTIVE_BITS 1
#define PREEMPT_ACTIVE_SHIFT (NMI_SHIFT + NMI_BITS)
@@ -82,10 +84,13 @@
/*
* Are we doing bottom half or hardware interrupt processing?
* Are we in a softirq context? Interrupt context?
+ * in_softirq - Are we currently processing softirq or have bh disabled?
+ * in_serving_softirq - Are we currently processing softirq?
*/
#define in_irq() (hardirq_count())
#define in_softirq() (softirq_count())
#define in_interrupt() (irq_count())
+#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
/*
* Are we in NMI context?
@@ -132,14 +137,16 @@
struct task_struct;
-#ifndef CONFIG_VIRT_CPU_ACCOUNTING
+#if !defined(CONFIG_VIRT_CPU_ACCOUNTING) && !defined(CONFIG_IRQ_TIME_ACCOUNTING)
static inline void account_system_vtime(struct task_struct *tsk)
{
}
+#else
+extern void account_system_vtime(struct task_struct *tsk);
#endif
#if defined(CONFIG_NO_HZ)
-#if defined(CONFIG_TINY_RCU)
+#if defined(CONFIG_TINY_RCU) || defined(CONFIG_TINY_PREEMPT_RCU)
extern void rcu_enter_nohz(void);
extern void rcu_exit_nohz(void);
diff --git a/include/linux/i2c/sx150x.h b/include/linux/i2c/sx150x.h
index ee3049c..52baa79 100644
--- a/include/linux/i2c/sx150x.h
+++ b/include/linux/i2c/sx150x.h
@@ -63,6 +63,9 @@
* IRQ lines will appear. Similarly to gpio_base, the expander
* will create a block of irqs beginning at this number.
* This value is ignored if irq_summary is < 0.
+ * @reset_during_probe: If set to true, the driver will trigger a full
+ * reset of the chip at the beginning of the probe
+ * in order to place it in a known state.
*/
struct sx150x_platform_data {
unsigned gpio_base;
@@ -73,6 +76,7 @@
u16 io_polarity;
int irq_summary;
unsigned irq_base;
+ bool reset_during_probe;
};
#endif /* __LINUX_I2C_SX150X_H */
diff --git a/include/linux/idr.h b/include/linux/idr.h
index e968db7..cdb715e 100644
--- a/include/linux/idr.h
+++ b/include/linux/idr.h
@@ -50,14 +50,14 @@
struct idr_layer {
unsigned long bitmap; /* A zero bit means "space here" */
- struct idr_layer *ary[1<<IDR_BITS];
+ struct idr_layer __rcu *ary[1<<IDR_BITS];
int count; /* When zero, we can release it */
int layer; /* distance from leaf */
struct rcu_head rcu_head;
};
struct idr {
- struct idr_layer *top;
+ struct idr_layer __rcu *top;
struct idr_layer *id_free;
int layers; /* only valid without concurrent changes */
int id_free_cnt;
diff --git a/include/linux/if_ether.h b/include/linux/if_ether.h
index c831467..bed7a46 100644
--- a/include/linux/if_ether.h
+++ b/include/linux/if_ether.h
@@ -119,7 +119,7 @@
unsigned char h_dest[ETH_ALEN]; /* destination eth addr */
unsigned char h_source[ETH_ALEN]; /* source ether addr */
__be16 h_proto; /* packet type ID field */
-} __packed;
+} __attribute__((packed));
#ifdef __KERNEL__
#include <linux/skbuff.h>
diff --git a/include/linux/if_fddi.h b/include/linux/if_fddi.h
index 9947c39..e6dc11e 100644
--- a/include/linux/if_fddi.h
+++ b/include/linux/if_fddi.h
@@ -67,7 +67,7 @@
__u8 dsap; /* destination service access point */
__u8 ssap; /* source service access point */
__u8 ctrl; /* control byte #1 */
-} __packed;
+} __attribute__((packed));
/* Define 802.2 Type 2 header */
struct fddi_8022_2_hdr {
@@ -75,7 +75,7 @@
__u8 ssap; /* source service access point */
__u8 ctrl_1; /* control byte #1 */
__u8 ctrl_2; /* control byte #2 */
-} __packed;
+} __attribute__((packed));
/* Define 802.2 SNAP header */
#define FDDI_K_OUI_LEN 3
@@ -85,7 +85,7 @@
__u8 ctrl; /* always 0x03 */
__u8 oui[FDDI_K_OUI_LEN]; /* organizational universal id */
__be16 ethertype; /* packet type ID field */
-} __packed;
+} __attribute__((packed));
/* Define FDDI LLC frame header */
struct fddihdr {
@@ -98,7 +98,7 @@
struct fddi_8022_2_hdr llc_8022_2;
struct fddi_snap_hdr llc_snap;
} hdr;
-} __packed;
+} __attribute__((packed));
#ifdef __KERNEL__
#include <linux/netdevice.h>
diff --git a/include/linux/if_hippi.h b/include/linux/if_hippi.h
index 5fe5f30..cdc049f 100644
--- a/include/linux/if_hippi.h
+++ b/include/linux/if_hippi.h
@@ -104,7 +104,7 @@
__be32 fixed;
#endif
__be32 d2_size;
-} __packed;
+} __attribute__((packed));
struct hippi_le_hdr {
#if defined (__BIG_ENDIAN_BITFIELD)
@@ -129,7 +129,7 @@
__u8 daddr[HIPPI_ALEN];
__u16 locally_administered;
__u8 saddr[HIPPI_ALEN];
-} __packed;
+} __attribute__((packed));
#define HIPPI_OUI_LEN 3
/*
@@ -142,12 +142,12 @@
__u8 ctrl; /* always 0x03 */
__u8 oui[HIPPI_OUI_LEN]; /* organizational universal id (zero)*/
__be16 ethertype; /* packet type ID field */
-} __packed;
+} __attribute__((packed));
struct hippi_hdr {
struct hippi_fp_hdr fp;
struct hippi_le_hdr le;
struct hippi_snap_hdr snap;
-} __packed;
+} __attribute__((packed));
#endif /* _LINUX_IF_HIPPI_H */
diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
index 1925e0c..27741e0 100644
--- a/include/linux/if_pppox.h
+++ b/include/linux/if_pppox.h
@@ -59,7 +59,7 @@
union{
struct pppoe_addr pppoe;
}sa_addr;
-} __packed;
+} __attribute__((packed));
/* The use of the above union isn't viable because the size of this
* struct must stay fixed over time -- applications use sizeof(struct
@@ -70,7 +70,7 @@
sa_family_t sa_family; /* address family, AF_PPPOX */
unsigned int sa_protocol; /* protocol identifier */
struct pppol2tp_addr pppol2tp;
-} __packed;
+} __attribute__((packed));
/* The L2TPv3 protocol changes tunnel and session ids from 16 to 32
* bits. So we need a different sockaddr structure.
@@ -79,7 +79,7 @@
sa_family_t sa_family; /* address family, AF_PPPOX */
unsigned int sa_protocol; /* protocol identifier */
struct pppol2tpv3_addr pppol2tp;
-} __packed;
+} __attribute__((packed));
/*********************************************************************
*
@@ -101,7 +101,7 @@
__be16 tag_type;
__be16 tag_len;
char tag_data[0];
-} __attribute ((packed));
+} __attribute__ ((packed));
/* Tag identifiers */
#define PTT_EOL __cpu_to_be16(0x0000)
@@ -129,7 +129,7 @@
__be16 sid;
__be16 length;
struct pppoe_tag tag[0];
-} __packed;
+} __attribute__((packed));
/* Length of entire PPPoE + PPP header */
#define PPPOE_SES_HLEN 8
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 1f43fa5..2fea6c8 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -82,11 +82,17 @@
# define CAP_INIT_BSET CAP_FULL_SET
#ifdef CONFIG_TREE_PREEMPT_RCU
+#define INIT_TASK_RCU_TREE_PREEMPT() \
+ .rcu_blocked_node = NULL,
+#else
+#define INIT_TASK_RCU_TREE_PREEMPT(tsk)
+#endif
+#ifdef CONFIG_PREEMPT_RCU
#define INIT_TASK_RCU_PREEMPT(tsk) \
.rcu_read_lock_nesting = 0, \
.rcu_read_unlock_special = 0, \
- .rcu_blocked_node = NULL, \
- .rcu_node_entry = LIST_HEAD_INIT(tsk.rcu_node_entry),
+ .rcu_node_entry = LIST_HEAD_INIT(tsk.rcu_node_entry), \
+ INIT_TASK_RCU_TREE_PREEMPT()
#else
#define INIT_TASK_RCU_PREEMPT(tsk)
#endif
@@ -137,8 +143,8 @@
.children = LIST_HEAD_INIT(tsk.children), \
.sibling = LIST_HEAD_INIT(tsk.sibling), \
.group_leader = &tsk, \
- .real_cred = &init_cred, \
- .cred = &init_cred, \
+ RCU_INIT_POINTER(.real_cred, &init_cred), \
+ RCU_INIT_POINTER(.cred, &init_cred), \
.cred_guard_mutex = \
__MUTEX_INITIALIZER(tsk.cred_guard_mutex), \
.comm = "swapper", \
diff --git a/include/linux/input.h b/include/linux/input.h
index 896a922..d6ae176 100644
--- a/include/linux/input.h
+++ b/include/linux/input.h
@@ -1196,7 +1196,7 @@
int (*flush)(struct input_dev *dev, struct file *file);
int (*event)(struct input_dev *dev, unsigned int type, unsigned int code, int value);
- struct input_handle *grab;
+ struct input_handle __rcu *grab;
spinlock_t event_lock;
struct mutex mutex;
diff --git a/include/linux/intel-gtt.h b/include/linux/intel-gtt.h
new file mode 100644
index 0000000..1d19ab2
--- /dev/null
+++ b/include/linux/intel-gtt.h
@@ -0,0 +1,20 @@
+/*
+ * Common Intel AGPGART and GTT definitions.
+ */
+#ifndef _INTEL_GTT_H
+#define _INTEL_GTT_H
+
+#include <linux/agp_backend.h>
+
+/* This is for Intel only GTT controls.
+ *
+ * Sandybridge: AGP_USER_CACHED_MEMORY default to LLC only
+ */
+
+#define AGP_USER_CACHED_MEMORY_LLC_MLC (AGP_USER_TYPES + 2)
+#define AGP_USER_UNCACHED_MEMORY (AGP_USER_TYPES + 4)
+
+/* flag for GFDT type */
+#define AGP_USER_CACHED_MEMORY_GFDT (1 << 3)
+
+#endif
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index a0384a4..531495d 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -18,6 +18,7 @@
#include <asm/atomic.h>
#include <asm/ptrace.h>
#include <asm/system.h>
+#include <trace/events/irq.h>
/*
* These correspond to the IORESOURCE_IRQ_* defines in
@@ -407,7 +408,12 @@
asmlinkage void __do_softirq(void);
extern void open_softirq(int nr, void (*action)(struct softirq_action *));
extern void softirq_init(void);
-#define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0)
+static inline void __raise_softirq_irqoff(unsigned int nr)
+{
+ trace_softirq_raise((struct softirq_action *)(unsigned long)nr, NULL);
+ or_softirq_pending(1UL << nr);
+}
+
extern void raise_softirq_irqoff(unsigned int nr);
extern void raise_softirq(unsigned int nr);
extern void wakeup_softirqd(void);
diff --git a/include/linux/io-mapping.h b/include/linux/io-mapping.h
index 0a6b3d5..7fb5927 100644
--- a/include/linux/io-mapping.h
+++ b/include/linux/io-mapping.h
@@ -79,7 +79,7 @@
}
/* Atomic map/unmap */
-static inline void *
+static inline void __iomem *
io_mapping_map_atomic_wc(struct io_mapping *mapping,
unsigned long offset,
int slot)
@@ -94,12 +94,12 @@
}
static inline void
-io_mapping_unmap_atomic(void *vaddr, int slot)
+io_mapping_unmap_atomic(void __iomem *vaddr, int slot)
{
iounmap_atomic(vaddr, slot);
}
-static inline void *
+static inline void __iomem *
io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset)
{
resource_size_t phys_addr;
@@ -111,7 +111,7 @@
}
static inline void
-io_mapping_unmap(void *vaddr)
+io_mapping_unmap(void __iomem *vaddr)
{
iounmap(vaddr);
}
@@ -125,38 +125,38 @@
static inline struct io_mapping *
io_mapping_create_wc(resource_size_t base, unsigned long size)
{
- return (struct io_mapping *) ioremap_wc(base, size);
+ return (struct io_mapping __force *) ioremap_wc(base, size);
}
static inline void
io_mapping_free(struct io_mapping *mapping)
{
- iounmap(mapping);
+ iounmap((void __force __iomem *) mapping);
}
/* Atomic map/unmap */
-static inline void *
+static inline void __iomem *
io_mapping_map_atomic_wc(struct io_mapping *mapping,
unsigned long offset,
int slot)
{
- return ((char *) mapping) + offset;
+ return ((char __force __iomem *) mapping) + offset;
}
static inline void
-io_mapping_unmap_atomic(void *vaddr, int slot)
+io_mapping_unmap_atomic(void __iomem *vaddr, int slot)
{
}
/* Non-atomic map/unmap */
-static inline void *
+static inline void __iomem *
io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset)
{
- return ((char *) mapping) + offset;
+ return ((char __force __iomem *) mapping) + offset;
}
static inline void
-io_mapping_unmap(void *vaddr)
+io_mapping_unmap(void __iomem *vaddr)
{
}
diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h
index 64d5291..3e70b21 100644
--- a/include/linux/iocontext.h
+++ b/include/linux/iocontext.h
@@ -53,7 +53,7 @@
struct radix_tree_root radix_root;
struct hlist_head cic_list;
- void *ioc_data;
+ void __rcu *ioc_data;
};
static inline struct io_context *ioc_task_link(struct io_context *ioc)
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index ab9e9e8..e62683b 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -58,7 +58,7 @@
/*
* TLV encoded option data follows.
*/
-} __packed; /* required for some archs */
+} __attribute__((packed)); /* required for some archs */
#define ipv6_destopt_hdr ipv6_opt_hdr
#define ipv6_hopopt_hdr ipv6_opt_hdr
@@ -99,7 +99,7 @@
__u8 type;
__u8 length;
struct in6_addr addr;
-} __packed;
+} __attribute__((packed));
/*
* IPv6 fixed header
diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h
new file mode 100644
index 0000000..4fa09d4
--- /dev/null
+++ b/include/linux/irq_work.h
@@ -0,0 +1,20 @@
+#ifndef _LINUX_IRQ_WORK_H
+#define _LINUX_IRQ_WORK_H
+
+struct irq_work {
+ struct irq_work *next;
+ void (*func)(struct irq_work *);
+};
+
+static inline
+void init_irq_work(struct irq_work *entry, void (*func)(struct irq_work *))
+{
+ entry->next = NULL;
+ entry->func = func;
+}
+
+bool irq_work_queue(struct irq_work *entry);
+void irq_work_run(void);
+void irq_work_sync(struct irq_work *entry);
+
+#endif /* _LINUX_IRQ_WORK_H */
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
new file mode 100644
index 0000000..b67cb18
--- /dev/null
+++ b/include/linux/jump_label.h
@@ -0,0 +1,74 @@
+#ifndef _LINUX_JUMP_LABEL_H
+#define _LINUX_JUMP_LABEL_H
+
+#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_HAVE_ARCH_JUMP_LABEL)
+# include <asm/jump_label.h>
+# define HAVE_JUMP_LABEL
+#endif
+
+enum jump_label_type {
+ JUMP_LABEL_ENABLE,
+ JUMP_LABEL_DISABLE
+};
+
+struct module;
+
+#ifdef HAVE_JUMP_LABEL
+
+extern struct jump_entry __start___jump_table[];
+extern struct jump_entry __stop___jump_table[];
+
+extern void arch_jump_label_transform(struct jump_entry *entry,
+ enum jump_label_type type);
+extern void arch_jump_label_text_poke_early(jump_label_t addr);
+extern void jump_label_update(unsigned long key, enum jump_label_type type);
+extern void jump_label_apply_nops(struct module *mod);
+extern int jump_label_text_reserved(void *start, void *end);
+
+#define jump_label_enable(key) \
+ jump_label_update((unsigned long)key, JUMP_LABEL_ENABLE);
+
+#define jump_label_disable(key) \
+ jump_label_update((unsigned long)key, JUMP_LABEL_DISABLE);
+
+#else
+
+#define JUMP_LABEL(key, label) \
+do { \
+ if (unlikely(*key)) \
+ goto label; \
+} while (0)
+
+#define jump_label_enable(cond_var) \
+do { \
+ *(cond_var) = 1; \
+} while (0)
+
+#define jump_label_disable(cond_var) \
+do { \
+ *(cond_var) = 0; \
+} while (0)
+
+static inline int jump_label_apply_nops(struct module *mod)
+{
+ return 0;
+}
+
+static inline int jump_label_text_reserved(void *start, void *end)
+{
+ return 0;
+}
+
+#endif
+
+#define COND_STMT(key, stmt) \
+do { \
+ __label__ jl_enabled; \
+ JUMP_LABEL(key, jl_enabled); \
+ if (0) { \
+jl_enabled: \
+ stmt; \
+ } \
+} while (0)
+
+#endif
diff --git a/include/linux/jump_label_ref.h b/include/linux/jump_label_ref.h
new file mode 100644
index 0000000..e5d012a
--- /dev/null
+++ b/include/linux/jump_label_ref.h
@@ -0,0 +1,44 @@
+#ifndef _LINUX_JUMP_LABEL_REF_H
+#define _LINUX_JUMP_LABEL_REF_H
+
+#include <linux/jump_label.h>
+#include <asm/atomic.h>
+
+#ifdef HAVE_JUMP_LABEL
+
+static inline void jump_label_inc(atomic_t *key)
+{
+ if (atomic_add_return(1, key) == 1)
+ jump_label_enable(key);
+}
+
+static inline void jump_label_dec(atomic_t *key)
+{
+ if (atomic_dec_and_test(key))
+ jump_label_disable(key);
+}
+
+#else /* !HAVE_JUMP_LABEL */
+
+static inline void jump_label_inc(atomic_t *key)
+{
+ atomic_inc(key);
+}
+
+static inline void jump_label_dec(atomic_t *key)
+{
+ atomic_dec(key);
+}
+
+#undef JUMP_LABEL
+#define JUMP_LABEL(key, label) \
+do { \
+ if (unlikely(__builtin_choose_expr( \
+ __builtin_types_compatible_p(typeof(key), atomic_t *), \
+ atomic_read((atomic_t *)(key)), *(key)))) \
+ goto label; \
+} while (0)
+
+#endif /* HAVE_JUMP_LABEL */
+
+#endif /* _LINUX_JUMP_LABEL_REF_H */
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 2b0a35e..1759ba5 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -58,7 +58,18 @@
#define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f))
#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
-#define roundup(x, y) ((((x) + ((y) - 1)) / (y)) * (y))
+#define roundup(x, y) ( \
+{ \
+ typeof(y) __y = y; \
+ (((x) + (__y - 1)) / __y) * __y; \
+} \
+)
+#define rounddown(x, y) ( \
+{ \
+ typeof(x) __x = (x); \
+ __x - (__x % (y)); \
+} \
+)
#define DIV_ROUND_CLOSEST(x, divisor)( \
{ \
typeof(divisor) __divisor = divisor; \
diff --git a/include/linux/key.h b/include/linux/key.h
index cd50dfa..3db0adc 100644
--- a/include/linux/key.h
+++ b/include/linux/key.h
@@ -178,8 +178,9 @@
*/
union {
unsigned long value;
+ void __rcu *rcudata;
void *data;
- struct keyring_list *subscriptions;
+ struct keyring_list __rcu *subscriptions;
} payload;
};
diff --git a/include/linux/kfifo.h b/include/linux/kfifo.h
index 4aa95f2..62dbee5 100644
--- a/include/linux/kfifo.h
+++ b/include/linux/kfifo.h
@@ -214,7 +214,7 @@
*/
#define kfifo_reset(fifo) \
(void)({ \
- typeof(fifo + 1) __tmp = (fifo); \
+ typeof((fifo) + 1) __tmp = (fifo); \
__tmp->kfifo.in = __tmp->kfifo.out = 0; \
})
@@ -228,7 +228,7 @@
*/
#define kfifo_reset_out(fifo) \
(void)({ \
- typeof(fifo + 1) __tmp = (fifo); \
+ typeof((fifo) + 1) __tmp = (fifo); \
__tmp->kfifo.out = __tmp->kfifo.in; \
})
@@ -238,7 +238,7 @@
*/
#define kfifo_len(fifo) \
({ \
- typeof(fifo + 1) __tmpl = (fifo); \
+ typeof((fifo) + 1) __tmpl = (fifo); \
__tmpl->kfifo.in - __tmpl->kfifo.out; \
})
@@ -248,7 +248,7 @@
*/
#define kfifo_is_empty(fifo) \
({ \
- typeof(fifo + 1) __tmpq = (fifo); \
+ typeof((fifo) + 1) __tmpq = (fifo); \
__tmpq->kfifo.in == __tmpq->kfifo.out; \
})
@@ -258,7 +258,7 @@
*/
#define kfifo_is_full(fifo) \
({ \
- typeof(fifo + 1) __tmpq = (fifo); \
+ typeof((fifo) + 1) __tmpq = (fifo); \
kfifo_len(__tmpq) > __tmpq->kfifo.mask; \
})
@@ -269,7 +269,7 @@
#define kfifo_avail(fifo) \
__kfifo_must_check_helper( \
({ \
- typeof(fifo + 1) __tmpq = (fifo); \
+ typeof((fifo) + 1) __tmpq = (fifo); \
const size_t __recsize = sizeof(*__tmpq->rectype); \
unsigned int __avail = kfifo_size(__tmpq) - kfifo_len(__tmpq); \
(__recsize) ? ((__avail <= __recsize) ? 0 : \
@@ -284,7 +284,7 @@
*/
#define kfifo_skip(fifo) \
(void)({ \
- typeof(fifo + 1) __tmp = (fifo); \
+ typeof((fifo) + 1) __tmp = (fifo); \
const size_t __recsize = sizeof(*__tmp->rectype); \
struct __kfifo *__kfifo = &__tmp->kfifo; \
if (__recsize) \
@@ -302,7 +302,7 @@
#define kfifo_peek_len(fifo) \
__kfifo_must_check_helper( \
({ \
- typeof(fifo + 1) __tmp = (fifo); \
+ typeof((fifo) + 1) __tmp = (fifo); \
const size_t __recsize = sizeof(*__tmp->rectype); \
struct __kfifo *__kfifo = &__tmp->kfifo; \
(!__recsize) ? kfifo_len(__tmp) * sizeof(*__tmp->type) : \
@@ -325,7 +325,7 @@
#define kfifo_alloc(fifo, size, gfp_mask) \
__kfifo_must_check_helper( \
({ \
- typeof(fifo + 1) __tmp = (fifo); \
+ typeof((fifo) + 1) __tmp = (fifo); \
struct __kfifo *__kfifo = &__tmp->kfifo; \
__is_kfifo_ptr(__tmp) ? \
__kfifo_alloc(__kfifo, size, sizeof(*__tmp->type), gfp_mask) : \
@@ -339,7 +339,7 @@
*/
#define kfifo_free(fifo) \
({ \
- typeof(fifo + 1) __tmp = (fifo); \
+ typeof((fifo) + 1) __tmp = (fifo); \
struct __kfifo *__kfifo = &__tmp->kfifo; \
if (__is_kfifo_ptr(__tmp)) \
__kfifo_free(__kfifo); \
@@ -358,7 +358,7 @@
*/
#define kfifo_init(fifo, buffer, size) \
({ \
- typeof(fifo + 1) __tmp = (fifo); \
+ typeof((fifo) + 1) __tmp = (fifo); \
struct __kfifo *__kfifo = &__tmp->kfifo; \
__is_kfifo_ptr(__tmp) ? \
__kfifo_init(__kfifo, buffer, size, sizeof(*__tmp->type)) : \
@@ -379,8 +379,8 @@
*/
#define kfifo_put(fifo, val) \
({ \
- typeof(fifo + 1) __tmp = (fifo); \
- typeof(val + 1) __val = (val); \
+ typeof((fifo) + 1) __tmp = (fifo); \
+ typeof((val) + 1) __val = (val); \
unsigned int __ret; \
const size_t __recsize = sizeof(*__tmp->rectype); \
struct __kfifo *__kfifo = &__tmp->kfifo; \
@@ -421,8 +421,8 @@
#define kfifo_get(fifo, val) \
__kfifo_must_check_helper( \
({ \
- typeof(fifo + 1) __tmp = (fifo); \
- typeof(val + 1) __val = (val); \
+ typeof((fifo) + 1) __tmp = (fifo); \
+ typeof((val) + 1) __val = (val); \
unsigned int __ret; \
const size_t __recsize = sizeof(*__tmp->rectype); \
struct __kfifo *__kfifo = &__tmp->kfifo; \
@@ -462,8 +462,8 @@
#define kfifo_peek(fifo, val) \
__kfifo_must_check_helper( \
({ \
- typeof(fifo + 1) __tmp = (fifo); \
- typeof(val + 1) __val = (val); \
+ typeof((fifo) + 1) __tmp = (fifo); \
+ typeof((val) + 1) __val = (val); \
unsigned int __ret; \
const size_t __recsize = sizeof(*__tmp->rectype); \
struct __kfifo *__kfifo = &__tmp->kfifo; \
@@ -501,8 +501,8 @@
*/
#define kfifo_in(fifo, buf, n) \
({ \
- typeof(fifo + 1) __tmp = (fifo); \
- typeof(buf + 1) __buf = (buf); \
+ typeof((fifo) + 1) __tmp = (fifo); \
+ typeof((buf) + 1) __buf = (buf); \
unsigned long __n = (n); \
const size_t __recsize = sizeof(*__tmp->rectype); \
struct __kfifo *__kfifo = &__tmp->kfifo; \
@@ -554,8 +554,8 @@
#define kfifo_out(fifo, buf, n) \
__kfifo_must_check_helper( \
({ \
- typeof(fifo + 1) __tmp = (fifo); \
- typeof(buf + 1) __buf = (buf); \
+ typeof((fifo) + 1) __tmp = (fifo); \
+ typeof((buf) + 1) __buf = (buf); \
unsigned long __n = (n); \
const size_t __recsize = sizeof(*__tmp->rectype); \
struct __kfifo *__kfifo = &__tmp->kfifo; \
@@ -611,7 +611,7 @@
#define kfifo_from_user(fifo, from, len, copied) \
__kfifo_must_check_helper( \
({ \
- typeof(fifo + 1) __tmp = (fifo); \
+ typeof((fifo) + 1) __tmp = (fifo); \
const void __user *__from = (from); \
unsigned int __len = (len); \
unsigned int *__copied = (copied); \
@@ -639,7 +639,7 @@
#define kfifo_to_user(fifo, to, len, copied) \
__kfifo_must_check_helper( \
({ \
- typeof(fifo + 1) __tmp = (fifo); \
+ typeof((fifo) + 1) __tmp = (fifo); \
void __user *__to = (to); \
unsigned int __len = (len); \
unsigned int *__copied = (copied); \
@@ -666,7 +666,7 @@
*/
#define kfifo_dma_in_prepare(fifo, sgl, nents, len) \
({ \
- typeof(fifo + 1) __tmp = (fifo); \
+ typeof((fifo) + 1) __tmp = (fifo); \
struct scatterlist *__sgl = (sgl); \
int __nents = (nents); \
unsigned int __len = (len); \
@@ -690,7 +690,7 @@
*/
#define kfifo_dma_in_finish(fifo, len) \
(void)({ \
- typeof(fifo + 1) __tmp = (fifo); \
+ typeof((fifo) + 1) __tmp = (fifo); \
unsigned int __len = (len); \
const size_t __recsize = sizeof(*__tmp->rectype); \
struct __kfifo *__kfifo = &__tmp->kfifo; \
@@ -717,7 +717,7 @@
*/
#define kfifo_dma_out_prepare(fifo, sgl, nents, len) \
({ \
- typeof(fifo + 1) __tmp = (fifo); \
+ typeof((fifo) + 1) __tmp = (fifo); \
struct scatterlist *__sgl = (sgl); \
int __nents = (nents); \
unsigned int __len = (len); \
@@ -741,7 +741,7 @@
*/
#define kfifo_dma_out_finish(fifo, len) \
(void)({ \
- typeof(fifo + 1) __tmp = (fifo); \
+ typeof((fifo) + 1) __tmp = (fifo); \
unsigned int __len = (len); \
const size_t __recsize = sizeof(*__tmp->rectype); \
struct __kfifo *__kfifo = &__tmp->kfifo; \
@@ -766,8 +766,8 @@
#define kfifo_out_peek(fifo, buf, n) \
__kfifo_must_check_helper( \
({ \
- typeof(fifo + 1) __tmp = (fifo); \
- typeof(buf + 1) __buf = (buf); \
+ typeof((fifo) + 1) __tmp = (fifo); \
+ typeof((buf) + 1) __buf = (buf); \
unsigned long __n = (n); \
const size_t __recsize = sizeof(*__tmp->rectype); \
struct __kfifo *__kfifo = &__tmp->kfifo; \
diff --git a/include/linux/kobject.h b/include/linux/kobject.h
index cf343a8..7950a37 100644
--- a/include/linux/kobject.h
+++ b/include/linux/kobject.h
@@ -22,6 +22,7 @@
#include <linux/compiler.h>
#include <linux/spinlock.h>
#include <linux/kref.h>
+#include <linux/kobject_ns.h>
#include <linux/kernel.h>
#include <linux/wait.h>
#include <asm/atomic.h>
@@ -136,42 +137,8 @@
extern const struct sysfs_ops kobj_sysfs_ops;
-/*
- * Namespace types which are used to tag kobjects and sysfs entries.
- * Network namespace will likely be the first.
- */
-enum kobj_ns_type {
- KOBJ_NS_TYPE_NONE = 0,
- KOBJ_NS_TYPE_NET,
- KOBJ_NS_TYPES
-};
-
struct sock;
-/*
- * Callbacks so sysfs can determine namespaces
- * @current_ns: return calling task's namespace
- * @netlink_ns: return namespace to which a sock belongs (right?)
- * @initial_ns: return the initial namespace (i.e. init_net_ns)
- */
-struct kobj_ns_type_operations {
- enum kobj_ns_type type;
- const void *(*current_ns)(void);
- const void *(*netlink_ns)(struct sock *sk);
- const void *(*initial_ns)(void);
-};
-
-int kobj_ns_type_register(const struct kobj_ns_type_operations *ops);
-int kobj_ns_type_registered(enum kobj_ns_type type);
-const struct kobj_ns_type_operations *kobj_child_ns_ops(struct kobject *parent);
-const struct kobj_ns_type_operations *kobj_ns_ops(struct kobject *kobj);
-
-const void *kobj_ns_current(enum kobj_ns_type type);
-const void *kobj_ns_netlink(enum kobj_ns_type type, struct sock *sk);
-const void *kobj_ns_initial(enum kobj_ns_type type);
-void kobj_ns_exit(enum kobj_ns_type type, const void *ns);
-
-
/**
* struct kset - a set of kobjects of a specific type, belonging to a specific subsystem.
*
diff --git a/include/linux/kobject_ns.h b/include/linux/kobject_ns.h
new file mode 100644
index 0000000..82cb5bf
--- /dev/null
+++ b/include/linux/kobject_ns.h
@@ -0,0 +1,56 @@
+/* Kernel object name space definitions
+ *
+ * Copyright (c) 2002-2003 Patrick Mochel
+ * Copyright (c) 2002-2003 Open Source Development Labs
+ * Copyright (c) 2006-2008 Greg Kroah-Hartman <greg@kroah.com>
+ * Copyright (c) 2006-2008 Novell Inc.
+ *
+ * Split from kobject.h by David Howells (dhowells@redhat.com)
+ *
+ * This file is released under the GPLv2.
+ *
+ * Please read Documentation/kobject.txt before using the kobject
+ * interface, ESPECIALLY the parts about reference counts and object
+ * destructors.
+ */
+
+#ifndef _LINUX_KOBJECT_NS_H
+#define _LINUX_KOBJECT_NS_H
+
+struct sock;
+struct kobject;
+
+/*
+ * Namespace types which are used to tag kobjects and sysfs entries.
+ * Network namespace will likely be the first.
+ */
+enum kobj_ns_type {
+ KOBJ_NS_TYPE_NONE = 0,
+ KOBJ_NS_TYPE_NET,
+ KOBJ_NS_TYPES
+};
+
+/*
+ * Callbacks so sysfs can determine namespaces
+ * @current_ns: return calling task's namespace
+ * @netlink_ns: return namespace to which a sock belongs (right?)
+ * @initial_ns: return the initial namespace (i.e. init_net_ns)
+ */
+struct kobj_ns_type_operations {
+ enum kobj_ns_type type;
+ const void *(*current_ns)(void);
+ const void *(*netlink_ns)(struct sock *sk);
+ const void *(*initial_ns)(void);
+};
+
+int kobj_ns_type_register(const struct kobj_ns_type_operations *ops);
+int kobj_ns_type_registered(enum kobj_ns_type type);
+const struct kobj_ns_type_operations *kobj_child_ns_ops(struct kobject *parent);
+const struct kobj_ns_type_operations *kobj_ns_ops(struct kobject *kobj);
+
+const void *kobj_ns_current(enum kobj_ns_type type);
+const void *kobj_ns_netlink(enum kobj_ns_type type, struct sock *sk);
+const void *kobj_ns_initial(enum kobj_ns_type type);
+void kobj_ns_exit(enum kobj_ns_type type, const void *ns);
+
+#endif /* _LINUX_KOBJECT_NS_H */
diff --git a/include/linux/ksm.h b/include/linux/ksm.h
index 74d691e..3319a69 100644
--- a/include/linux/ksm.h
+++ b/include/linux/ksm.h
@@ -16,6 +16,9 @@
struct stable_node;
struct mem_cgroup;
+struct page *ksm_does_need_to_copy(struct page *page,
+ struct vm_area_struct *vma, unsigned long address);
+
#ifdef CONFIG_KSM
int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
unsigned long end, int advice, unsigned long *vm_flags);
@@ -70,19 +73,14 @@
* We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE,
* but what if the vma was unmerged while the page was swapped out?
*/
-struct page *ksm_does_need_to_copy(struct page *page,
- struct vm_area_struct *vma, unsigned long address);
-static inline struct page *ksm_might_need_to_copy(struct page *page,
+static inline int ksm_might_need_to_copy(struct page *page,
struct vm_area_struct *vma, unsigned long address)
{
struct anon_vma *anon_vma = page_anon_vma(page);
- if (!anon_vma ||
- (anon_vma->root == vma->anon_vma->root &&
- page->index == linear_page_index(vma, address)))
- return page;
-
- return ksm_does_need_to_copy(page, vma, address);
+ return anon_vma &&
+ (anon_vma->root != vma->anon_vma->root ||
+ page->index != linear_page_index(vma, address));
}
int page_referenced_ksm(struct page *page,
@@ -115,10 +113,10 @@
return 0;
}
-static inline struct page *ksm_might_need_to_copy(struct page *page,
+static inline int ksm_might_need_to_copy(struct page *page,
struct vm_area_struct *vma, unsigned long address)
{
- return page;
+ return 0;
}
static inline int page_referenced_ksm(struct page *page,
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index c13cc48..ac740b2 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -205,7 +205,7 @@
struct mutex irq_lock;
#ifdef CONFIG_HAVE_KVM_IRQCHIP
- struct kvm_irq_routing_table *irq_routing;
+ struct kvm_irq_routing_table __rcu *irq_routing;
struct hlist_head mask_notifier_list;
struct hlist_head irq_ack_notifier_list;
#endif
diff --git a/include/linux/lglock.h b/include/linux/lglock.h
index b288cb7..f549056 100644
--- a/include/linux/lglock.h
+++ b/include/linux/lglock.h
@@ -150,7 +150,7 @@
int i; \
preempt_disable(); \
rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_); \
- for_each_online_cpu(i) { \
+ for_each_possible_cpu(i) { \
arch_spinlock_t *lock; \
lock = &per_cpu(name##_lock, i); \
arch_spin_lock(lock); \
@@ -161,7 +161,7 @@
void name##_global_unlock(void) { \
int i; \
rwlock_release(&name##_lock_dep_map, 1, _RET_IP_); \
- for_each_online_cpu(i) { \
+ for_each_possible_cpu(i) { \
arch_spinlock_t *lock; \
lock = &per_cpu(name##_lock, i); \
arch_spin_unlock(lock); \
diff --git a/include/linux/libata.h b/include/linux/libata.h
index f010f18..45fb296 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -335,6 +335,7 @@
ATA_EHI_HOTPLUGGED = (1 << 0), /* could have been hotplugged */
ATA_EHI_NO_AUTOPSY = (1 << 2), /* no autopsy */
ATA_EHI_QUIET = (1 << 3), /* be quiet */
+ ATA_EHI_NO_RECOVERY = (1 << 4), /* no recovery */
ATA_EHI_DID_SOFTRESET = (1 << 16), /* already soft-reset this port */
ATA_EHI_DID_HARDRESET = (1 << 17), /* already soft-reset this port */
@@ -723,6 +724,7 @@
struct ata_ioports ioaddr; /* ATA cmd/ctl/dma register blocks */
u8 ctl; /* cache of ATA control register */
u8 last_ctl; /* Cache last written value */
+ struct ata_link* sff_pio_task_link; /* link currently used */
struct delayed_work sff_pio_task;
#ifdef CONFIG_ATA_BMDMA
struct ata_bmdma_prd *bmdma_prd; /* BMDMA SG list */
@@ -1594,7 +1596,7 @@
extern void ata_sff_irq_clear(struct ata_port *ap);
extern int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
u8 status, int in_wq);
-extern void ata_sff_queue_pio_task(struct ata_port *ap, unsigned long delay);
+extern void ata_sff_queue_pio_task(struct ata_link *link, unsigned long delay);
extern unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc);
extern bool ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc);
extern unsigned int ata_sff_port_intr(struct ata_port *ap,
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 06aed83..2186a64 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -32,6 +32,17 @@
#define MAX_LOCKDEP_SUBCLASSES 8UL
/*
+ * NR_LOCKDEP_CACHING_CLASSES ... Number of classes
+ * cached in the instance of lockdep_map
+ *
+ * Currently main class (subclass == 0) and signle depth subclass
+ * are cached in lockdep_map. This optimization is mainly targeting
+ * on rq->lock. double_rq_lock() acquires this highly competitive with
+ * single depth.
+ */
+#define NR_LOCKDEP_CACHING_CLASSES 2
+
+/*
* Lock-classes are keyed via unique addresses, by embedding the
* lockclass-key into the kernel (or module) .data section. (For
* static locks we use the lock address itself as the key.)
@@ -138,7 +149,7 @@
*/
struct lockdep_map {
struct lock_class_key *key;
- struct lock_class *class_cache;
+ struct lock_class *class_cache[NR_LOCKDEP_CACHING_CLASSES];
const char *name;
#ifdef CONFIG_LOCK_STAT
int cpu;
diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h
index bafffc7..18fd130 100644
--- a/include/linux/miscdevice.h
+++ b/include/linux/miscdevice.h
@@ -33,6 +33,7 @@
#define MWAVE_MINOR 219 /* ACP/Mwave Modem */
#define MPT_MINOR 220
#define MPT2SAS_MINOR 221
+#define UINPUT_MINOR 223
#define HPET_MINOR 228
#define FUSE_MINOR 229
#define KVM_MINOR 232
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 709f672..74949fb 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -78,7 +78,11 @@
#define VM_MAYSHARE 0x00000080
#define VM_GROWSDOWN 0x00000100 /* general info on the segment */
+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
#define VM_GROWSUP 0x00000200
+#else
+#define VM_GROWSUP 0x00000000
+#endif
#define VM_PFNMAP 0x00000400 /* Page-ranges managed without "struct page", just pure PFN */
#define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */
@@ -860,6 +864,12 @@
int set_page_dirty_lock(struct page *page);
int clear_page_dirty_for_io(struct page *page);
+/* Is the vma a continuation of the stack vma above it? */
+static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr)
+{
+ return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
+}
+
extern unsigned long move_page_tables(struct vm_area_struct *vma,
unsigned long old_addr, struct vm_area_struct *new_vma,
unsigned long new_addr, unsigned long len);
@@ -1330,8 +1340,10 @@
/* Do stack extension */
extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
-#ifdef CONFIG_IA64
+#if VM_GROWSUP
extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
+#else
+ #define expand_upwards(vma, address) do { } while (0)
#endif
extern int expand_stack_downwards(struct vm_area_struct *vma,
unsigned long address);
@@ -1357,7 +1369,15 @@
return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
}
+#ifdef CONFIG_MMU
pgprot_t vm_get_page_prot(unsigned long vm_flags);
+#else
+static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
+{
+ return __pgprot(0);
+}
+#endif
+
struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
unsigned long pfn, unsigned long size, pgprot_t);
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index ee7e258..cb57d65 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -299,7 +299,7 @@
* new_owner->mm == mm
* new_owner->alloc_lock is held
*/
- struct task_struct *owner;
+ struct task_struct __rcu *owner;
#endif
#ifdef CONFIG_PROC_FS
diff --git a/include/linux/mmc/sdio.h b/include/linux/mmc/sdio.h
index 329a8fa..245cdac 100644
--- a/include/linux/mmc/sdio.h
+++ b/include/linux/mmc/sdio.h
@@ -38,6 +38,8 @@
* [8:0] Byte/block count
*/
+#define R4_MEMORY_PRESENT (1 << 27)
+
/*
SDIO status in R5
Type
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 6e6e626..3984c4e 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -284,6 +284,13 @@
unsigned long watermark[NR_WMARK];
/*
+ * When free pages are below this point, additional steps are taken
+ * when reading the number of free pages to avoid per-cpu counter
+ * drift allowing watermarks to be breached
+ */
+ unsigned long percpu_drift_mark;
+
+ /*
* We don't know if the memory that we're going to allocate will be freeable
* or/and it will be released eventually, so to avoid totally wasting several
* GB of ram we must reserve some of the lower zone memory (otherwise we risk
@@ -441,6 +448,12 @@
return test_bit(ZONE_OOM_LOCKED, &zone->flags);
}
+#ifdef CONFIG_SMP
+unsigned long zone_nr_free_pages(struct zone *zone);
+#else
+#define zone_nr_free_pages(zone) zone_page_state(zone, NR_FREE_PAGES)
+#endif /* CONFIG_SMP */
+
/*
* The "priority" of VM scanning is how much of the queues we will scan in one
* go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the
diff --git a/include/linux/module.h b/include/linux/module.h
index 8a6b9fd..b29e745 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -350,7 +350,10 @@
struct tracepoint *tracepoints;
unsigned int num_tracepoints;
#endif
-
+#ifdef HAVE_JUMP_LABEL
+ struct jump_entry *jump_entries;
+ unsigned int num_jump_entries;
+#endif
#ifdef CONFIG_TRACING
const char **trace_bprintk_fmt_start;
unsigned int num_trace_bprintk_fmt;
@@ -686,17 +689,16 @@
#ifdef CONFIG_GENERIC_BUG
-int module_bug_finalize(const Elf_Ehdr *, const Elf_Shdr *,
+void module_bug_finalize(const Elf_Ehdr *, const Elf_Shdr *,
struct module *);
void module_bug_cleanup(struct module *);
#else /* !CONFIG_GENERIC_BUG */
-static inline int module_bug_finalize(const Elf_Ehdr *hdr,
+static inline void module_bug_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs,
struct module *mod)
{
- return 0;
}
static inline void module_bug_cleanup(struct module *mod) {}
#endif /* CONFIG_GENERIC_BUG */
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index 878cab4..f363bc8 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -78,6 +78,14 @@
# include <linux/mutex-debug.h>
#else
# define __DEBUG_MUTEX_INITIALIZER(lockname)
+/**
+ * mutex_init - initialize the mutex
+ * @mutex: the mutex to be initialized
+ *
+ * Initialize the mutex to unlocked state.
+ *
+ * It is not allowed to initialize an already locked mutex.
+ */
# define mutex_init(mutex) \
do { \
static struct lock_class_key __key; \
diff --git a/include/linux/nbd.h b/include/linux/nbd.h
index bb58854..d146ca1 100644
--- a/include/linux/nbd.h
+++ b/include/linux/nbd.h
@@ -88,7 +88,7 @@
char handle[8];
__be64 from;
__be32 len;
-} __packed;
+} __attribute__((packed));
/*
* This is the reply packet that nbd-server sends back to the client after
diff --git a/include/linux/ncp.h b/include/linux/ncp.h
index 3ace837..99f0ade 100644
--- a/include/linux/ncp.h
+++ b/include/linux/ncp.h
@@ -27,7 +27,7 @@
__u8 conn_high;
__u8 function;
__u8 data[0];
-} __packed;
+} __attribute__((packed));
#define NCP_REPLY (0x3333)
#define NCP_WATCHDOG (0x3E3E)
@@ -42,7 +42,7 @@
__u8 completion_code;
__u8 connection_state;
__u8 data[0];
-} __packed;
+} __attribute__((packed));
#define NCP_VOLNAME_LEN (16)
#define NCP_NUMBER_OF_VOLUMES (256)
@@ -158,7 +158,7 @@
#ifdef __KERNEL__
struct nw_nfs_info nfs;
#endif
-} __packed;
+} __attribute__((packed));
/* modify mask - use with MODIFY_DOS_INFO structure */
#define DM_ATTRIBUTES (cpu_to_le32(0x02))
@@ -190,12 +190,12 @@
__u16 inheritanceGrantMask;
__u16 inheritanceRevokeMask;
__u32 maximumSpace;
-} __packed;
+} __attribute__((packed));
struct nw_search_sequence {
__u8 volNumber;
__u32 dirBase;
__u32 sequence;
-} __packed;
+} __attribute__((packed));
#endif /* _LINUX_NCP_H */
diff --git a/include/linux/netfilter/nfnetlink_conntrack.h b/include/linux/netfilter/nfnetlink_conntrack.h
index 9ed534c..70cd060 100644
--- a/include/linux/netfilter/nfnetlink_conntrack.h
+++ b/include/linux/netfilter/nfnetlink_conntrack.h
@@ -39,8 +39,9 @@
CTA_TUPLE_MASTER,
CTA_NAT_SEQ_ADJ_ORIG,
CTA_NAT_SEQ_ADJ_REPLY,
- CTA_SECMARK,
+ CTA_SECMARK, /* obsolete */
CTA_ZONE,
+ CTA_SECCTX,
__CTA_MAX
};
#define CTA_MAX (__CTA_MAX - 1)
@@ -172,4 +173,11 @@
};
#define CTA_HELP_MAX (__CTA_HELP_MAX - 1)
+enum ctattr_secctx {
+ CTA_SECCTX_UNSPEC,
+ CTA_SECCTX_NAME,
+ __CTA_SECCTX_MAX
+};
+#define CTA_SECCTX_MAX (__CTA_SECCTX_MAX - 1)
+
#endif /* _IPCONNTRACK_NETLINK_H */
diff --git a/include/linux/netfilter/xt_IDLETIMER.h b/include/linux/netfilter/xt_IDLETIMER.h
index 3e1aa1b..208ae93 100644
--- a/include/linux/netfilter/xt_IDLETIMER.h
+++ b/include/linux/netfilter/xt_IDLETIMER.h
@@ -39,7 +39,7 @@
char label[MAX_IDLETIMER_LABEL_SIZE];
/* for kernel module internal use only */
- struct idletimer_tg *timer __attribute((aligned(8)));
+ struct idletimer_tg *timer __attribute__((aligned(8)));
};
#endif
diff --git a/include/linux/netfilter/xt_SECMARK.h b/include/linux/netfilter/xt_SECMARK.h
index 6fcd344..989092b 100644
--- a/include/linux/netfilter/xt_SECMARK.h
+++ b/include/linux/netfilter/xt_SECMARK.h
@@ -11,18 +11,12 @@
* packets are being marked for.
*/
#define SECMARK_MODE_SEL 0x01 /* SELinux */
-#define SECMARK_SELCTX_MAX 256
-
-struct xt_secmark_target_selinux_info {
- __u32 selsid;
- char selctx[SECMARK_SELCTX_MAX];
-};
+#define SECMARK_SECCTX_MAX 256
struct xt_secmark_target_info {
__u8 mode;
- union {
- struct xt_secmark_target_selinux_info sel;
- } u;
+ __u32 secid;
+ char secctx[SECMARK_SECCTX_MAX];
};
#endif /*_XT_SECMARK_H_target */
diff --git a/include/linux/netfilter/xt_ipvs.h b/include/linux/netfilter/xt_ipvs.h
index 1167aeb..eff34ac 100644
--- a/include/linux/netfilter/xt_ipvs.h
+++ b/include/linux/netfilter/xt_ipvs.h
@@ -1,6 +1,8 @@
#ifndef _XT_IPVS_H
#define _XT_IPVS_H
+#include <linux/types.h>
+
enum {
XT_IPVS_IPVS_PROPERTY = 1 << 0, /* all other options imply this one */
XT_IPVS_PROTO = 1 << 1,
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index 59d0669..1235669 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -27,8 +27,6 @@
#define MAX_LINKS 32
-struct net;
-
struct sockaddr_nl {
sa_family_t nl_family; /* AF_NETLINK */
unsigned short nl_pad; /* zero */
@@ -151,6 +149,8 @@
#include <linux/capability.h>
#include <linux/skbuff.h>
+struct net;
+
static inline struct nlmsghdr *nlmsg_hdr(const struct sk_buff *skb)
{
return (struct nlmsghdr *)skb->data;
diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h
index 791d510..50d8009 100644
--- a/include/linux/netpoll.h
+++ b/include/linux/netpoll.h
@@ -63,20 +63,20 @@
unsigned long flags;
bool ret = false;
- rcu_read_lock_bh();
+ local_irq_save(flags);
npinfo = rcu_dereference_bh(skb->dev->npinfo);
if (!npinfo || (list_empty(&npinfo->rx_np) && !npinfo->rx_flags))
goto out;
- spin_lock_irqsave(&npinfo->rx_lock, flags);
+ spin_lock(&npinfo->rx_lock);
/* check rx_flags again with the lock held */
if (npinfo->rx_flags && __netpoll_rx(skb))
ret = true;
- spin_unlock_irqrestore(&npinfo->rx_lock, flags);
+ spin_unlock(&npinfo->rx_lock);
out:
- rcu_read_unlock_bh();
+ local_irq_restore(flags);
return ret;
}
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index 508f8cf..d0edf7d 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -185,7 +185,7 @@
struct nfs4_cached_acl *nfs4_acl;
/* NFSv4 state */
struct list_head open_states;
- struct nfs_delegation *delegation;
+ struct nfs_delegation __rcu *delegation;
fmode_t delegation_state;
struct rw_semaphore rwsem;
#endif /* CONFIG_NFS_V4*/
diff --git a/include/linux/notifier.h b/include/linux/notifier.h
index b2f1a4d..2026f9e 100644
--- a/include/linux/notifier.h
+++ b/include/linux/notifier.h
@@ -49,28 +49,28 @@
struct notifier_block {
int (*notifier_call)(struct notifier_block *, unsigned long, void *);
- struct notifier_block *next;
+ struct notifier_block __rcu *next;
int priority;
};
struct atomic_notifier_head {
spinlock_t lock;
- struct notifier_block *head;
+ struct notifier_block __rcu *head;
};
struct blocking_notifier_head {
struct rw_semaphore rwsem;
- struct notifier_block *head;
+ struct notifier_block __rcu *head;
};
struct raw_notifier_head {
- struct notifier_block *head;
+ struct notifier_block __rcu *head;
};
struct srcu_notifier_head {
struct mutex mutex;
struct srcu_struct srcu;
- struct notifier_block *head;
+ struct notifier_block __rcu *head;
};
#define ATOMIC_INIT_NOTIFIER_HEAD(name) do { \
diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
index 5171639..32fb812 100644
--- a/include/linux/oprofile.h
+++ b/include/linux/oprofile.h
@@ -15,6 +15,7 @@
#include <linux/types.h>
#include <linux/spinlock.h>
+#include <linux/init.h>
#include <asm/atomic.h>
/* Each escaped entry is prefixed by ESCAPE_CODE
@@ -185,4 +186,10 @@
int oprofile_add_data64(struct op_entry *entry, u64 val);
int oprofile_write_commit(struct op_entry *entry);
+#ifdef CONFIG_PERF_EVENTS
+int __init oprofile_perf_init(struct oprofile_operations *ops);
+void oprofile_perf_exit(void);
+char *op_name_from_perf_id(void);
+#endif /* CONFIG_PERF_EVENTS */
+
#endif /* OPROFILE_H */
diff --git a/include/linux/pci.h b/include/linux/pci.h
index b1d1795..c8d95e3 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -1214,6 +1214,9 @@
unsigned int devfn)
{ return NULL; }
+static inline int pci_domain_nr(struct pci_bus *bus)
+{ return 0; }
+
#define dev_is_pci(d) (false)
#define dev_is_pf(d) (false)
#define dev_num_vf(d) (0)
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index f6a3b2d..2615c37 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -393,6 +393,9 @@
#define PCI_DEVICE_ID_VLSI_82C147 0x0105
#define PCI_DEVICE_ID_VLSI_VAS96011 0x0702
+/* AMD RD890 Chipset */
+#define PCI_DEVICE_ID_RD890_IOMMU 0x5a23
+
#define PCI_VENDOR_ID_ADL 0x1005
#define PCI_DEVICE_ID_ADL_2301 0x2301
@@ -514,6 +517,7 @@
#define PCI_DEVICE_ID_AMD_11H_NB_DRAM 0x1302
#define PCI_DEVICE_ID_AMD_11H_NB_MISC 0x1303
#define PCI_DEVICE_ID_AMD_11H_NB_LINK 0x1304
+#define PCI_DEVICE_ID_AMD_15H_NB_MISC 0x1603
#define PCI_DEVICE_ID_AMD_LANCE 0x2000
#define PCI_DEVICE_ID_AMD_LANCE_HOME 0x2001
#define PCI_DEVICE_ID_AMD_SCSI 0x2020
@@ -2300,6 +2304,8 @@
#define PCI_DEVICE_ID_P2010 0x0079
#define PCI_DEVICE_ID_P1020E 0x0100
#define PCI_DEVICE_ID_P1020 0x0101
+#define PCI_DEVICE_ID_P1021E 0x0102
+#define PCI_DEVICE_ID_P1021 0x0103
#define PCI_DEVICE_ID_P1011E 0x0108
#define PCI_DEVICE_ID_P1011 0x0109
#define PCI_DEVICE_ID_P1022E 0x0110
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index b8b9084..0eb5083 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -39,6 +39,15 @@
preempt_enable(); \
} while (0)
+#define get_cpu_ptr(var) ({ \
+ preempt_disable(); \
+ this_cpu_ptr(var); })
+
+#define put_cpu_ptr(var) do { \
+ (void)(var); \
+ preempt_enable(); \
+} while (0)
+
#ifdef CONFIG_SMP
/* minimum unit size, also is the maximum supported allocation size */
@@ -149,7 +158,7 @@
#else /* CONFIG_SMP */
-#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); })
+#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); VERIFY_PERCPU_PTR((ptr)); })
/* can't distinguish from other static vars, always false */
static inline bool is_kernel_percpu_address(unsigned long addr)
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 716f99b..057bf22 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -486,6 +486,8 @@
#include <linux/workqueue.h>
#include <linux/ftrace.h>
#include <linux/cpu.h>
+#include <linux/irq_work.h>
+#include <linux/jump_label_ref.h>
#include <asm/atomic.h>
#include <asm/local.h>
@@ -529,16 +531,22 @@
int last_cpu;
};
struct { /* software */
- s64 remaining;
struct hrtimer hrtimer;
};
#ifdef CONFIG_HAVE_HW_BREAKPOINT
struct { /* breakpoint */
struct arch_hw_breakpoint info;
struct list_head bp_list;
+ /*
+ * Crufty hack to avoid the chicken and egg
+ * problem hw_breakpoint has with context
+ * creation and event initalization.
+ */
+ struct task_struct *bp_target;
};
#endif
};
+ int state;
local64_t prev_count;
u64 sample_period;
u64 last_period;
@@ -550,6 +558,13 @@
#endif
};
+/*
+ * hw_perf_event::state flags
+ */
+#define PERF_HES_STOPPED 0x01 /* the counter is stopped */
+#define PERF_HES_UPTODATE 0x02 /* event->count up-to-date */
+#define PERF_HES_ARCH 0x04
+
struct perf_event;
/*
@@ -561,36 +576,70 @@
* struct pmu - generic performance monitoring unit
*/
struct pmu {
- int (*enable) (struct perf_event *event);
- void (*disable) (struct perf_event *event);
- int (*start) (struct perf_event *event);
- void (*stop) (struct perf_event *event);
+ struct list_head entry;
+
+ int * __percpu pmu_disable_count;
+ struct perf_cpu_context * __percpu pmu_cpu_context;
+ int task_ctx_nr;
+
+ /*
+ * Fully disable/enable this PMU, can be used to protect from the PMI
+ * as well as for lazy/batch writing of the MSRs.
+ */
+ void (*pmu_enable) (struct pmu *pmu); /* optional */
+ void (*pmu_disable) (struct pmu *pmu); /* optional */
+
+ /*
+ * Try and initialize the event for this PMU.
+ * Should return -ENOENT when the @event doesn't match this PMU.
+ */
+ int (*event_init) (struct perf_event *event);
+
+#define PERF_EF_START 0x01 /* start the counter when adding */
+#define PERF_EF_RELOAD 0x02 /* reload the counter when starting */
+#define PERF_EF_UPDATE 0x04 /* update the counter when stopping */
+
+ /*
+ * Adds/Removes a counter to/from the PMU, can be done inside
+ * a transaction, see the ->*_txn() methods.
+ */
+ int (*add) (struct perf_event *event, int flags);
+ void (*del) (struct perf_event *event, int flags);
+
+ /*
+ * Starts/Stops a counter present on the PMU. The PMI handler
+ * should stop the counter when perf_event_overflow() returns
+ * !0. ->start() will be used to continue.
+ */
+ void (*start) (struct perf_event *event, int flags);
+ void (*stop) (struct perf_event *event, int flags);
+
+ /*
+ * Updates the counter value of the event.
+ */
void (*read) (struct perf_event *event);
- void (*unthrottle) (struct perf_event *event);
/*
- * Group events scheduling is treated as a transaction, add group
- * events as a whole and perform one schedulability test. If the test
- * fails, roll back the whole group
+ * Group events scheduling is treated as a transaction, add
+ * group events as a whole and perform one schedulability test.
+ * If the test fails, roll back the whole group
+ *
+ * Start the transaction, after this ->add() doesn't need to
+ * do schedulability tests.
*/
-
+ void (*start_txn) (struct pmu *pmu); /* optional */
/*
- * Start the transaction, after this ->enable() doesn't need
- * to do schedulability tests.
- */
- void (*start_txn) (const struct pmu *pmu);
- /*
- * If ->start_txn() disabled the ->enable() schedulability test
+ * If ->start_txn() disabled the ->add() schedulability test
* then ->commit_txn() is required to perform one. On success
* the transaction is closed. On error the transaction is kept
* open until ->cancel_txn() is called.
*/
- int (*commit_txn) (const struct pmu *pmu);
+ int (*commit_txn) (struct pmu *pmu); /* optional */
/*
- * Will cancel the transaction, assumes ->disable() is called for
- * each successfull ->enable() during the transaction.
+ * Will cancel the transaction, assumes ->del() is called
+ * for each successfull ->add() during the transaction.
*/
- void (*cancel_txn) (const struct pmu *pmu);
+ void (*cancel_txn) (struct pmu *pmu); /* optional */
};
/**
@@ -631,11 +680,6 @@
void *data_pages[0];
};
-struct perf_pending_entry {
- struct perf_pending_entry *next;
- void (*func)(struct perf_pending_entry *);
-};
-
struct perf_sample_data;
typedef void (*perf_overflow_handler_t)(struct perf_event *, int,
@@ -656,6 +700,7 @@
#define PERF_ATTACH_CONTEXT 0x01
#define PERF_ATTACH_GROUP 0x02
+#define PERF_ATTACH_TASK 0x04
/**
* struct perf_event - performance event kernel representation:
@@ -669,7 +714,7 @@
int nr_siblings;
int group_flags;
struct perf_event *group_leader;
- const struct pmu *pmu;
+ struct pmu *pmu;
enum perf_event_active_state state;
unsigned int attach_state;
@@ -743,7 +788,7 @@
int pending_wakeup;
int pending_kill;
int pending_disable;
- struct perf_pending_entry pending;
+ struct irq_work pending;
atomic_t event_limit;
@@ -763,12 +808,19 @@
#endif /* CONFIG_PERF_EVENTS */
};
+enum perf_event_context_type {
+ task_context,
+ cpu_context,
+};
+
/**
* struct perf_event_context - event context structure
*
* Used as a container for task events and CPU events as well:
*/
struct perf_event_context {
+ enum perf_event_context_type type;
+ struct pmu *pmu;
/*
* Protect the states of the events in the list,
* nr_active, and the list:
@@ -808,6 +860,12 @@
struct rcu_head rcu_head;
};
+/*
+ * Number of contexts where an event can trigger:
+ * task, softirq, hardirq, nmi.
+ */
+#define PERF_NR_CONTEXTS 4
+
/**
* struct perf_event_cpu_context - per cpu event context structure
*/
@@ -815,18 +873,9 @@
struct perf_event_context ctx;
struct perf_event_context *task_ctx;
int active_oncpu;
- int max_pertask;
int exclusive;
- struct swevent_hlist *swevent_hlist;
- struct mutex hlist_mutex;
- int hlist_refcount;
-
- /*
- * Recursion avoidance:
- *
- * task, softirq, irq, nmi context
- */
- int recursion[4];
+ struct list_head rotation_list;
+ int jiffies_interval;
};
struct perf_output_handle {
@@ -842,26 +891,34 @@
#ifdef CONFIG_PERF_EVENTS
-/*
- * Set by architecture code:
- */
-extern int perf_max_events;
+extern int perf_pmu_register(struct pmu *pmu);
+extern void perf_pmu_unregister(struct pmu *pmu);
-extern const struct pmu *hw_perf_event_init(struct perf_event *event);
+extern int perf_num_counters(void);
+extern const char *perf_pmu_name(void);
+extern void __perf_event_task_sched_in(struct task_struct *task);
+extern void __perf_event_task_sched_out(struct task_struct *task, struct task_struct *next);
-extern void perf_event_task_sched_in(struct task_struct *task);
-extern void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next);
-extern void perf_event_task_tick(struct task_struct *task);
+extern atomic_t perf_task_events;
+
+static inline void perf_event_task_sched_in(struct task_struct *task)
+{
+ COND_STMT(&perf_task_events, __perf_event_task_sched_in(task));
+}
+
+static inline
+void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next)
+{
+ COND_STMT(&perf_task_events, __perf_event_task_sched_out(task, next));
+}
+
extern int perf_event_init_task(struct task_struct *child);
extern void perf_event_exit_task(struct task_struct *child);
extern void perf_event_free_task(struct task_struct *task);
-extern void set_perf_event_pending(void);
-extern void perf_event_do_pending(void);
+extern void perf_event_delayed_put(struct task_struct *task);
extern void perf_event_print_debug(void);
-extern void __perf_disable(void);
-extern bool __perf_enable(void);
-extern void perf_disable(void);
-extern void perf_enable(void);
+extern void perf_pmu_disable(struct pmu *pmu);
+extern void perf_pmu_enable(struct pmu *pmu);
extern int perf_event_task_disable(void);
extern int perf_event_task_enable(void);
extern void perf_event_update_userpage(struct perf_event *event);
@@ -869,7 +926,7 @@
extern struct perf_event *
perf_event_create_kernel_counter(struct perf_event_attr *attr,
int cpu,
- pid_t pid,
+ struct task_struct *task,
perf_overflow_handler_t callback);
extern u64 perf_event_read_value(struct perf_event *event,
u64 *enabled, u64 *running);
@@ -920,14 +977,7 @@
*/
static inline int is_software_event(struct perf_event *event)
{
- switch (event->attr.type) {
- case PERF_TYPE_SOFTWARE:
- case PERF_TYPE_TRACEPOINT:
- /* for now the breakpoint stuff also works as software event */
- case PERF_TYPE_BREAKPOINT:
- return 1;
- }
- return 0;
+ return event->pmu->task_ctx_nr == perf_sw_context;
}
extern atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX];
@@ -954,18 +1004,20 @@
perf_arch_fetch_caller_regs(regs, CALLER_ADDR0);
}
-static inline void
+static __always_inline void
perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr)
{
- if (atomic_read(&perf_swevent_enabled[event_id])) {
- struct pt_regs hot_regs;
+ struct pt_regs hot_regs;
- if (!regs) {
- perf_fetch_caller_regs(&hot_regs);
- regs = &hot_regs;
- }
- __perf_sw_event(event_id, nr, nmi, regs, addr);
+ JUMP_LABEL(&perf_swevent_enabled[event_id], have_event);
+ return;
+
+have_event:
+ if (!regs) {
+ perf_fetch_caller_regs(&hot_regs);
+ regs = &hot_regs;
}
+ __perf_sw_event(event_id, nr, nmi, regs, addr);
}
extern void perf_event_mmap(struct vm_area_struct *vma);
@@ -976,7 +1028,21 @@
extern void perf_event_comm(struct task_struct *tsk);
extern void perf_event_fork(struct task_struct *tsk);
-extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs);
+/* Callchains */
+DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry);
+
+extern void perf_callchain_user(struct perf_callchain_entry *entry,
+ struct pt_regs *regs);
+extern void perf_callchain_kernel(struct perf_callchain_entry *entry,
+ struct pt_regs *regs);
+
+
+static inline void
+perf_callchain_store(struct perf_callchain_entry *entry, u64 ip)
+{
+ if (entry->nr < PERF_MAX_STACK_DEPTH)
+ entry->ip[entry->nr++] = ip;
+}
extern int sysctl_perf_event_paranoid;
extern int sysctl_perf_event_mlock;
@@ -1019,21 +1085,18 @@
extern void perf_swevent_put_recursion_context(int rctx);
extern void perf_event_enable(struct perf_event *event);
extern void perf_event_disable(struct perf_event *event);
+extern void perf_event_task_tick(void);
#else
static inline void
perf_event_task_sched_in(struct task_struct *task) { }
static inline void
perf_event_task_sched_out(struct task_struct *task,
struct task_struct *next) { }
-static inline void
-perf_event_task_tick(struct task_struct *task) { }
static inline int perf_event_init_task(struct task_struct *child) { return 0; }
static inline void perf_event_exit_task(struct task_struct *child) { }
static inline void perf_event_free_task(struct task_struct *task) { }
-static inline void perf_event_do_pending(void) { }
+static inline void perf_event_delayed_put(struct task_struct *task) { }
static inline void perf_event_print_debug(void) { }
-static inline void perf_disable(void) { }
-static inline void perf_enable(void) { }
static inline int perf_event_task_disable(void) { return -EINVAL; }
static inline int perf_event_task_enable(void) { return -EINVAL; }
@@ -1056,6 +1119,7 @@
static inline void perf_swevent_put_recursion_context(int rctx) { }
static inline void perf_event_enable(struct perf_event *event) { }
static inline void perf_event_disable(struct perf_event *event) { }
+static inline void perf_event_task_tick(void) { }
#endif
#define perf_output_put(handle, x) \
diff --git a/include/linux/phonet.h b/include/linux/phonet.h
index 24426c3..76edadf 100644
--- a/include/linux/phonet.h
+++ b/include/linux/phonet.h
@@ -56,7 +56,7 @@
__be16 pn_length;
__u8 pn_robj;
__u8 pn_sobj;
-} __packed;
+} __attribute__((packed));
/* Common Phonet payload header */
struct phonetmsg {
@@ -98,7 +98,7 @@
__u8 spn_dev;
__u8 spn_resource;
__u8 spn_zero[sizeof(struct sockaddr) - sizeof(sa_family_t) - 3];
-} __packed;
+} __attribute__((packed));
/* Well known address */
#define PN_DEV_PC 0x10
diff --git a/include/linux/pxa168_eth.h b/include/linux/pxa168_eth.h
new file mode 100644
index 0000000..18d75e7
--- /dev/null
+++ b/include/linux/pxa168_eth.h
@@ -0,0 +1,30 @@
+/*
+ *pxa168 ethernet platform device data definition file.
+ */
+#ifndef __LINUX_PXA168_ETH_H
+#define __LINUX_PXA168_ETH_H
+
+struct pxa168_eth_platform_data {
+ int port_number;
+ int phy_addr;
+
+ /*
+ * If speed is 0, then speed and duplex are autonegotiated.
+ */
+ int speed; /* 0, SPEED_10, SPEED_100 */
+ int duplex; /* DUPLEX_HALF or DUPLEX_FULL */
+
+ /*
+ * Override default RX/TX queue sizes if nonzero.
+ */
+ int rx_queue_size;
+ int tx_queue_size;
+
+ /*
+ * init callback is used for board specific initialization
+ * e.g on Aspenite its used to initialize the PHY transceiver.
+ */
+ int (*init)(void);
+};
+
+#endif /* __LINUX_PXA168_ETH_H */
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
index d50ba85..d1a9193 100644
--- a/include/linux/quotaops.h
+++ b/include/linux/quotaops.h
@@ -274,8 +274,14 @@
int ret;
ret = dquot_alloc_space_nodirty(inode, nr);
- if (!ret)
- mark_inode_dirty_sync(inode);
+ if (!ret) {
+ /*
+ * Mark inode fully dirty. Since we are allocating blocks, inode
+ * would become fully dirty soon anyway and it reportedly
+ * reduces inode_lock contention.
+ */
+ mark_inode_dirty(inode);
+ }
return ret;
}
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
index 634b8e6..a39cbed 100644
--- a/include/linux/radix-tree.h
+++ b/include/linux/radix-tree.h
@@ -47,6 +47,8 @@
{
return (void *)((unsigned long)ptr & ~RADIX_TREE_INDIRECT_PTR);
}
+#define radix_tree_indirect_to_ptr(ptr) \
+ radix_tree_indirect_to_ptr((void __force *)(ptr))
static inline int radix_tree_is_indirect_ptr(void *ptr)
{
@@ -61,7 +63,7 @@
struct radix_tree_root {
unsigned int height;
gfp_t gfp_mask;
- struct radix_tree_node *rnode;
+ struct radix_tree_node __rcu *rnode;
};
#define RADIX_TREE_INIT(mask) { \
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index 4ec3b38..f31ef61 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -10,6 +10,21 @@
#include <linux/rcupdate.h>
/*
+ * Why is there no list_empty_rcu()? Because list_empty() serves this
+ * purpose. The list_empty() function fetches the RCU-protected pointer
+ * and compares it to the address of the list head, but neither dereferences
+ * this pointer itself nor provides this pointer to the caller. Therefore,
+ * it is not necessary to use rcu_dereference(), so that list_empty() can
+ * be used anywhere you would want to use a list_empty_rcu().
+ */
+
+/*
+ * return the ->next pointer of a list_head in an rcu safe
+ * way, we must not access it directly
+ */
+#define list_next_rcu(list) (*((struct list_head __rcu **)(&(list)->next)))
+
+/*
* Insert a new entry between two known consecutive entries.
*
* This is only for internal list manipulation where we know
@@ -20,7 +35,7 @@
{
new->next = next;
new->prev = prev;
- rcu_assign_pointer(prev->next, new);
+ rcu_assign_pointer(list_next_rcu(prev), new);
next->prev = new;
}
@@ -138,7 +153,7 @@
{
new->next = old->next;
new->prev = old->prev;
- rcu_assign_pointer(new->prev->next, new);
+ rcu_assign_pointer(list_next_rcu(new->prev), new);
new->next->prev = new;
old->prev = LIST_POISON2;
}
@@ -193,7 +208,7 @@
*/
last->next = at;
- rcu_assign_pointer(head->next, first);
+ rcu_assign_pointer(list_next_rcu(head), first);
first->prev = head;
at->prev = last;
}
@@ -208,7 +223,9 @@
* primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
*/
#define list_entry_rcu(ptr, type, member) \
- container_of(rcu_dereference_raw(ptr), type, member)
+ ({typeof (*ptr) __rcu *__ptr = (typeof (*ptr) __rcu __force *)ptr; \
+ container_of((typeof(ptr))rcu_dereference_raw(__ptr), type, member); \
+ })
/**
* list_first_entry_rcu - get the first element from a list
@@ -225,9 +242,9 @@
list_entry_rcu((ptr)->next, type, member)
#define __list_for_each_rcu(pos, head) \
- for (pos = rcu_dereference_raw((head)->next); \
+ for (pos = rcu_dereference_raw(list_next_rcu(head)); \
pos != (head); \
- pos = rcu_dereference_raw(pos->next))
+ pos = rcu_dereference_raw(list_next_rcu((pos)))
/**
* list_for_each_entry_rcu - iterate over rcu list of given type
@@ -257,9 +274,9 @@
* as long as the traversal is guarded by rcu_read_lock().
*/
#define list_for_each_continue_rcu(pos, head) \
- for ((pos) = rcu_dereference_raw((pos)->next); \
+ for ((pos) = rcu_dereference_raw(list_next_rcu(pos)); \
prefetch((pos)->next), (pos) != (head); \
- (pos) = rcu_dereference_raw((pos)->next))
+ (pos) = rcu_dereference_raw(list_next_rcu(pos)))
/**
* list_for_each_entry_continue_rcu - continue iteration over list of given type
@@ -314,12 +331,19 @@
new->next = next;
new->pprev = old->pprev;
- rcu_assign_pointer(*new->pprev, new);
+ rcu_assign_pointer(*(struct hlist_node __rcu **)new->pprev, new);
if (next)
new->next->pprev = &new->next;
old->pprev = LIST_POISON2;
}
+/*
+ * return the first or the next element in an RCU protected hlist
+ */
+#define hlist_first_rcu(head) (*((struct hlist_node __rcu **)(&(head)->first)))
+#define hlist_next_rcu(node) (*((struct hlist_node __rcu **)(&(node)->next)))
+#define hlist_pprev_rcu(node) (*((struct hlist_node __rcu **)((node)->pprev)))
+
/**
* hlist_add_head_rcu
* @n: the element to add to the hash list.
@@ -346,7 +370,7 @@
n->next = first;
n->pprev = &h->first;
- rcu_assign_pointer(h->first, n);
+ rcu_assign_pointer(hlist_first_rcu(h), n);
if (first)
first->pprev = &n->next;
}
@@ -374,7 +398,7 @@
{
n->pprev = next->pprev;
n->next = next;
- rcu_assign_pointer(*(n->pprev), n);
+ rcu_assign_pointer(hlist_pprev_rcu(n), n);
next->pprev = &n->next;
}
@@ -401,15 +425,15 @@
{
n->next = prev->next;
n->pprev = &prev->next;
- rcu_assign_pointer(prev->next, n);
+ rcu_assign_pointer(hlist_next_rcu(prev), n);
if (n->next)
n->next->pprev = &n->next;
}
-#define __hlist_for_each_rcu(pos, head) \
- for (pos = rcu_dereference((head)->first); \
- pos && ({ prefetch(pos->next); 1; }); \
- pos = rcu_dereference(pos->next))
+#define __hlist_for_each_rcu(pos, head) \
+ for (pos = rcu_dereference(hlist_first_rcu(head)); \
+ pos && ({ prefetch(pos->next); 1; }); \
+ pos = rcu_dereference(hlist_next_rcu(pos)))
/**
* hlist_for_each_entry_rcu - iterate over rcu list of given type
@@ -422,11 +446,11 @@
* the _rcu list-mutation primitives such as hlist_add_head_rcu()
* as long as the traversal is guarded by rcu_read_lock().
*/
-#define hlist_for_each_entry_rcu(tpos, pos, head, member) \
- for (pos = rcu_dereference_raw((head)->first); \
+#define hlist_for_each_entry_rcu(tpos, pos, head, member) \
+ for (pos = rcu_dereference_raw(hlist_first_rcu(head)); \
pos && ({ prefetch(pos->next); 1; }) && \
({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \
- pos = rcu_dereference_raw(pos->next))
+ pos = rcu_dereference_raw(hlist_next_rcu(pos)))
/**
* hlist_for_each_entry_rcu_bh - iterate over rcu list of given type
diff --git a/include/linux/rculist_nulls.h b/include/linux/rculist_nulls.h
index b70ffe5..2ae1371 100644
--- a/include/linux/rculist_nulls.h
+++ b/include/linux/rculist_nulls.h
@@ -37,6 +37,12 @@
}
}
+#define hlist_nulls_first_rcu(head) \
+ (*((struct hlist_nulls_node __rcu __force **)&(head)->first))
+
+#define hlist_nulls_next_rcu(node) \
+ (*((struct hlist_nulls_node __rcu __force **)&(node)->next))
+
/**
* hlist_nulls_del_rcu - deletes entry from hash list without re-initialization
* @n: the element to delete from the hash list.
@@ -88,7 +94,7 @@
n->next = first;
n->pprev = &h->first;
- rcu_assign_pointer(h->first, n);
+ rcu_assign_pointer(hlist_nulls_first_rcu(h), n);
if (!is_a_nulls(first))
first->pprev = &n->next;
}
@@ -100,11 +106,11 @@
* @member: the name of the hlist_nulls_node within the struct.
*
*/
-#define hlist_nulls_for_each_entry_rcu(tpos, pos, head, member) \
- for (pos = rcu_dereference_raw((head)->first); \
- (!is_a_nulls(pos)) && \
+#define hlist_nulls_for_each_entry_rcu(tpos, pos, head, member) \
+ for (pos = rcu_dereference_raw(hlist_nulls_first_rcu(head)); \
+ (!is_a_nulls(pos)) && \
({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member); 1; }); \
- pos = rcu_dereference_raw(pos->next))
+ pos = rcu_dereference_raw(hlist_nulls_next_rcu(pos)))
#endif
#endif
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 9fbc54a..03cda7b 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -41,11 +41,15 @@
#include <linux/lockdep.h>
#include <linux/completion.h>
#include <linux/debugobjects.h>
+#include <linux/compiler.h>
#ifdef CONFIG_RCU_TORTURE_TEST
extern int rcutorture_runnable; /* for sysctl */
#endif /* #ifdef CONFIG_RCU_TORTURE_TEST */
+#define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b))
+#define ULONG_CMP_LT(a, b) (ULONG_MAX / 2 < (a) - (b))
+
/**
* struct rcu_head - callback structure for use with RCU
* @next: next update requests in a list
@@ -57,29 +61,94 @@
};
/* Exported common interfaces */
-extern void rcu_barrier(void);
+extern void call_rcu_sched(struct rcu_head *head,
+ void (*func)(struct rcu_head *rcu));
+extern void synchronize_sched(void);
extern void rcu_barrier_bh(void);
extern void rcu_barrier_sched(void);
extern void synchronize_sched_expedited(void);
extern int sched_expedited_torture_stats(char *page);
+static inline void __rcu_read_lock_bh(void)
+{
+ local_bh_disable();
+}
+
+static inline void __rcu_read_unlock_bh(void)
+{
+ local_bh_enable();
+}
+
+#ifdef CONFIG_PREEMPT_RCU
+
+extern void __rcu_read_lock(void);
+extern void __rcu_read_unlock(void);
+void synchronize_rcu(void);
+
+/*
+ * Defined as a macro as it is a very low level header included from
+ * areas that don't even know about current. This gives the rcu_read_lock()
+ * nesting depth, but makes sense only if CONFIG_PREEMPT_RCU -- in other
+ * types of kernel builds, the rcu_read_lock() nesting depth is unknowable.
+ */
+#define rcu_preempt_depth() (current->rcu_read_lock_nesting)
+
+#else /* #ifdef CONFIG_PREEMPT_RCU */
+
+static inline void __rcu_read_lock(void)
+{
+ preempt_disable();
+}
+
+static inline void __rcu_read_unlock(void)
+{
+ preempt_enable();
+}
+
+static inline void synchronize_rcu(void)
+{
+ synchronize_sched();
+}
+
+static inline int rcu_preempt_depth(void)
+{
+ return 0;
+}
+
+#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
+
/* Internal to kernel */
extern void rcu_init(void);
+extern void rcu_sched_qs(int cpu);
+extern void rcu_bh_qs(int cpu);
+extern void rcu_check_callbacks(int cpu, int user);
+struct notifier_block;
+
+#ifdef CONFIG_NO_HZ
+
+extern void rcu_enter_nohz(void);
+extern void rcu_exit_nohz(void);
+
+#else /* #ifdef CONFIG_NO_HZ */
+
+static inline void rcu_enter_nohz(void)
+{
+}
+
+static inline void rcu_exit_nohz(void)
+{
+}
+
+#endif /* #else #ifdef CONFIG_NO_HZ */
#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU)
#include <linux/rcutree.h>
-#elif defined(CONFIG_TINY_RCU)
+#elif defined(CONFIG_TINY_RCU) || defined(CONFIG_TINY_PREEMPT_RCU)
#include <linux/rcutiny.h>
#else
#error "Unknown RCU implementation specified to kernel configuration"
#endif
-#define RCU_HEAD_INIT { .next = NULL, .func = NULL }
-#define RCU_HEAD(head) struct rcu_head head = RCU_HEAD_INIT
-#define INIT_RCU_HEAD(ptr) do { \
- (ptr)->next = NULL; (ptr)->func = NULL; \
-} while (0)
-
/*
* init_rcu_head_on_stack()/destroy_rcu_head_on_stack() are needed for dynamic
* initialization and destruction of rcu_head on the stack. rcu_head structures
@@ -120,14 +189,15 @@
extern int debug_lockdep_rcu_enabled(void);
/**
- * rcu_read_lock_held - might we be in RCU read-side critical section?
+ * rcu_read_lock_held() - might we be in RCU read-side critical section?
*
* If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an RCU
* read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC,
* this assumes we are in an RCU read-side critical section unless it can
- * prove otherwise.
+ * prove otherwise. This is useful for debug checks in functions that
+ * require that they be called within an RCU read-side critical section.
*
- * Check debug_lockdep_rcu_enabled() to prevent false positives during boot
+ * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot
* and while lockdep is disabled.
*/
static inline int rcu_read_lock_held(void)
@@ -144,14 +214,16 @@
extern int rcu_read_lock_bh_held(void);
/**
- * rcu_read_lock_sched_held - might we be in RCU-sched read-side critical section?
+ * rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section?
*
* If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an
* RCU-sched read-side critical section. In absence of
* CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side
* critical section unless it can prove otherwise. Note that disabling
* of preemption (including disabling irqs) counts as an RCU-sched
- * read-side critical section.
+ * read-side critical section. This is useful for debug checks in functions
+ * that required that they be called within an RCU-sched read-side
+ * critical section.
*
* Check debug_lockdep_rcu_enabled() to prevent false positives during boot
* and while lockdep is disabled.
@@ -211,7 +283,11 @@
extern int rcu_my_thread_group_empty(void);
-#define __do_rcu_dereference_check(c) \
+/**
+ * rcu_lockdep_assert - emit lockdep splat if specified condition not met
+ * @c: condition to check
+ */
+#define rcu_lockdep_assert(c) \
do { \
static bool __warned; \
if (debug_lockdep_rcu_enabled() && !__warned && !(c)) { \
@@ -220,41 +296,163 @@
} \
} while (0)
+#else /* #ifdef CONFIG_PROVE_RCU */
+
+#define rcu_lockdep_assert(c) do { } while (0)
+
+#endif /* #else #ifdef CONFIG_PROVE_RCU */
+
+/*
+ * Helper functions for rcu_dereference_check(), rcu_dereference_protected()
+ * and rcu_assign_pointer(). Some of these could be folded into their
+ * callers, but they are left separate in order to ease introduction of
+ * multiple flavors of pointers to match the multiple flavors of RCU
+ * (e.g., __rcu_bh, * __rcu_sched, and __srcu), should this make sense in
+ * the future.
+ */
+
+#ifdef __CHECKER__
+#define rcu_dereference_sparse(p, space) \
+ ((void)(((typeof(*p) space *)p) == p))
+#else /* #ifdef __CHECKER__ */
+#define rcu_dereference_sparse(p, space)
+#endif /* #else #ifdef __CHECKER__ */
+
+#define __rcu_access_pointer(p, space) \
+ ({ \
+ typeof(*p) *_________p1 = (typeof(*p)*__force )ACCESS_ONCE(p); \
+ rcu_dereference_sparse(p, space); \
+ ((typeof(*p) __force __kernel *)(_________p1)); \
+ })
+#define __rcu_dereference_check(p, c, space) \
+ ({ \
+ typeof(*p) *_________p1 = (typeof(*p)*__force )ACCESS_ONCE(p); \
+ rcu_lockdep_assert(c); \
+ rcu_dereference_sparse(p, space); \
+ smp_read_barrier_depends(); \
+ ((typeof(*p) __force __kernel *)(_________p1)); \
+ })
+#define __rcu_dereference_protected(p, c, space) \
+ ({ \
+ rcu_lockdep_assert(c); \
+ rcu_dereference_sparse(p, space); \
+ ((typeof(*p) __force __kernel *)(p)); \
+ })
+
+#define __rcu_dereference_index_check(p, c) \
+ ({ \
+ typeof(p) _________p1 = ACCESS_ONCE(p); \
+ rcu_lockdep_assert(c); \
+ smp_read_barrier_depends(); \
+ (_________p1); \
+ })
+#define __rcu_assign_pointer(p, v, space) \
+ ({ \
+ if (!__builtin_constant_p(v) || \
+ ((v) != NULL)) \
+ smp_wmb(); \
+ (p) = (typeof(*v) __force space *)(v); \
+ })
+
+
/**
- * rcu_dereference_check - rcu_dereference with debug checking
+ * rcu_access_pointer() - fetch RCU pointer with no dereferencing
+ * @p: The pointer to read
+ *
+ * Return the value of the specified RCU-protected pointer, but omit the
+ * smp_read_barrier_depends() and keep the ACCESS_ONCE(). This is useful
+ * when the value of this pointer is accessed, but the pointer is not
+ * dereferenced, for example, when testing an RCU-protected pointer against
+ * NULL. Although rcu_access_pointer() may also be used in cases where
+ * update-side locks prevent the value of the pointer from changing, you
+ * should instead use rcu_dereference_protected() for this use case.
+ */
+#define rcu_access_pointer(p) __rcu_access_pointer((p), __rcu)
+
+/**
+ * rcu_dereference_check() - rcu_dereference with debug checking
* @p: The pointer to read, prior to dereferencing
* @c: The conditions under which the dereference will take place
*
* Do an rcu_dereference(), but check that the conditions under which the
- * dereference will take place are correct. Typically the conditions indicate
- * the various locking conditions that should be held at that point. The check
- * should return true if the conditions are satisfied.
+ * dereference will take place are correct. Typically the conditions
+ * indicate the various locking conditions that should be held at that
+ * point. The check should return true if the conditions are satisfied.
+ * An implicit check for being in an RCU read-side critical section
+ * (rcu_read_lock()) is included.
*
* For example:
*
- * bar = rcu_dereference_check(foo->bar, rcu_read_lock_held() ||
- * lockdep_is_held(&foo->lock));
+ * bar = rcu_dereference_check(foo->bar, lockdep_is_held(&foo->lock));
*
* could be used to indicate to lockdep that foo->bar may only be dereferenced
- * if either the RCU read lock is held, or that the lock required to replace
+ * if either rcu_read_lock() is held, or that the lock required to replace
* the bar struct at foo->bar is held.
*
* Note that the list of conditions may also include indications of when a lock
* need not be held, for example during initialisation or destruction of the
* target struct:
*
- * bar = rcu_dereference_check(foo->bar, rcu_read_lock_held() ||
- * lockdep_is_held(&foo->lock) ||
+ * bar = rcu_dereference_check(foo->bar, lockdep_is_held(&foo->lock) ||
* atomic_read(&foo->usage) == 0);
+ *
+ * Inserts memory barriers on architectures that require them
+ * (currently only the Alpha), prevents the compiler from refetching
+ * (and from merging fetches), and, more importantly, documents exactly
+ * which pointers are protected by RCU and checks that the pointer is
+ * annotated as __rcu.
*/
#define rcu_dereference_check(p, c) \
- ({ \
- __do_rcu_dereference_check(c); \
- rcu_dereference_raw(p); \
- })
+ __rcu_dereference_check((p), rcu_read_lock_held() || (c), __rcu)
/**
- * rcu_dereference_protected - fetch RCU pointer when updates prevented
+ * rcu_dereference_bh_check() - rcu_dereference_bh with debug checking
+ * @p: The pointer to read, prior to dereferencing
+ * @c: The conditions under which the dereference will take place
+ *
+ * This is the RCU-bh counterpart to rcu_dereference_check().
+ */
+#define rcu_dereference_bh_check(p, c) \
+ __rcu_dereference_check((p), rcu_read_lock_bh_held() || (c), __rcu)
+
+/**
+ * rcu_dereference_sched_check() - rcu_dereference_sched with debug checking
+ * @p: The pointer to read, prior to dereferencing
+ * @c: The conditions under which the dereference will take place
+ *
+ * This is the RCU-sched counterpart to rcu_dereference_check().
+ */
+#define rcu_dereference_sched_check(p, c) \
+ __rcu_dereference_check((p), rcu_read_lock_sched_held() || (c), \
+ __rcu)
+
+#define rcu_dereference_raw(p) rcu_dereference_check(p, 1) /*@@@ needed? @@@*/
+
+/**
+ * rcu_dereference_index_check() - rcu_dereference for indices with debug checking
+ * @p: The pointer to read, prior to dereferencing
+ * @c: The conditions under which the dereference will take place
+ *
+ * Similar to rcu_dereference_check(), but omits the sparse checking.
+ * This allows rcu_dereference_index_check() to be used on integers,
+ * which can then be used as array indices. Attempting to use
+ * rcu_dereference_check() on an integer will give compiler warnings
+ * because the sparse address-space mechanism relies on dereferencing
+ * the RCU-protected pointer. Dereferencing integers is not something
+ * that even gcc will put up with.
+ *
+ * Note that this function does not implicitly check for RCU read-side
+ * critical sections. If this function gains lots of uses, it might
+ * make sense to provide versions for each flavor of RCU, but it does
+ * not make sense as of early 2010.
+ */
+#define rcu_dereference_index_check(p, c) \
+ __rcu_dereference_index_check((p), (c))
+
+/**
+ * rcu_dereference_protected() - fetch RCU pointer when updates prevented
+ * @p: The pointer to read, prior to dereferencing
+ * @c: The conditions under which the dereference will take place
*
* Return the value of the specified RCU-protected pointer, but omit
* both the smp_read_barrier_depends() and the ACCESS_ONCE(). This
@@ -263,35 +461,61 @@
* prevent the compiler from repeating this reference or combining it
* with other references, so it should not be used without protection
* of appropriate locks.
+ *
+ * This function is only for update-side use. Using this function
+ * when protected only by rcu_read_lock() will result in infrequent
+ * but very ugly failures.
*/
#define rcu_dereference_protected(p, c) \
- ({ \
- __do_rcu_dereference_check(c); \
- (p); \
- })
-
-#else /* #ifdef CONFIG_PROVE_RCU */
-
-#define rcu_dereference_check(p, c) rcu_dereference_raw(p)
-#define rcu_dereference_protected(p, c) (p)
-
-#endif /* #else #ifdef CONFIG_PROVE_RCU */
+ __rcu_dereference_protected((p), (c), __rcu)
/**
- * rcu_access_pointer - fetch RCU pointer with no dereferencing
+ * rcu_dereference_bh_protected() - fetch RCU-bh pointer when updates prevented
+ * @p: The pointer to read, prior to dereferencing
+ * @c: The conditions under which the dereference will take place
*
- * Return the value of the specified RCU-protected pointer, but omit the
- * smp_read_barrier_depends() and keep the ACCESS_ONCE(). This is useful
- * when the value of this pointer is accessed, but the pointer is not
- * dereferenced, for example, when testing an RCU-protected pointer against
- * NULL. This may also be used in cases where update-side locks prevent
- * the value of the pointer from changing, but rcu_dereference_protected()
- * is a lighter-weight primitive for this use case.
+ * This is the RCU-bh counterpart to rcu_dereference_protected().
*/
-#define rcu_access_pointer(p) ACCESS_ONCE(p)
+#define rcu_dereference_bh_protected(p, c) \
+ __rcu_dereference_protected((p), (c), __rcu)
/**
- * rcu_read_lock - mark the beginning of an RCU read-side critical section.
+ * rcu_dereference_sched_protected() - fetch RCU-sched pointer when updates prevented
+ * @p: The pointer to read, prior to dereferencing
+ * @c: The conditions under which the dereference will take place
+ *
+ * This is the RCU-sched counterpart to rcu_dereference_protected().
+ */
+#define rcu_dereference_sched_protected(p, c) \
+ __rcu_dereference_protected((p), (c), __rcu)
+
+
+/**
+ * rcu_dereference() - fetch RCU-protected pointer for dereferencing
+ * @p: The pointer to read, prior to dereferencing
+ *
+ * This is a simple wrapper around rcu_dereference_check().
+ */
+#define rcu_dereference(p) rcu_dereference_check(p, 0)
+
+/**
+ * rcu_dereference_bh() - fetch an RCU-bh-protected pointer for dereferencing
+ * @p: The pointer to read, prior to dereferencing
+ *
+ * Makes rcu_dereference_check() do the dirty work.
+ */
+#define rcu_dereference_bh(p) rcu_dereference_bh_check(p, 0)
+
+/**
+ * rcu_dereference_sched() - fetch RCU-sched-protected pointer for dereferencing
+ * @p: The pointer to read, prior to dereferencing
+ *
+ * Makes rcu_dereference_check() do the dirty work.
+ */
+#define rcu_dereference_sched(p) rcu_dereference_sched_check(p, 0)
+
+/**
+ * rcu_read_lock() - mark the beginning of an RCU read-side critical section
*
* When synchronize_rcu() is invoked on one CPU while other CPUs
* are within RCU read-side critical sections, then the
@@ -302,7 +526,7 @@
* until after the all the other CPUs exit their critical sections.
*
* Note, however, that RCU callbacks are permitted to run concurrently
- * with RCU read-side critical sections. One way that this can happen
+ * with new RCU read-side critical sections. One way that this can happen
* is via the following sequence of events: (1) CPU 0 enters an RCU
* read-side critical section, (2) CPU 1 invokes call_rcu() to register
* an RCU callback, (3) CPU 0 exits the RCU read-side critical section,
@@ -317,7 +541,20 @@
* will be deferred until the outermost RCU read-side critical section
* completes.
*
- * It is illegal to block while in an RCU read-side critical section.
+ * You can avoid reading and understanding the next paragraph by
+ * following this rule: don't put anything in an rcu_read_lock() RCU
+ * read-side critical section that would block in a !PREEMPT kernel.
+ * But if you want the full story, read on!
+ *
+ * In non-preemptible RCU implementations (TREE_RCU and TINY_RCU), it
+ * is illegal to block while in an RCU read-side critical section. In
+ * preemptible RCU implementations (TREE_PREEMPT_RCU and TINY_PREEMPT_RCU)
+ * in CONFIG_PREEMPT kernel builds, RCU read-side critical sections may
+ * be preempted, but explicit blocking is illegal. Finally, in preemptible
+ * RCU implementations in real-time (CONFIG_PREEMPT_RT) kernel builds,
+ * RCU read-side critical sections may be preempted and they may also
+ * block, but only when acquiring spinlocks that are subject to priority
+ * inheritance.
*/
static inline void rcu_read_lock(void)
{
@@ -337,7 +574,7 @@
*/
/**
- * rcu_read_unlock - marks the end of an RCU read-side critical section.
+ * rcu_read_unlock() - marks the end of an RCU read-side critical section.
*
* See rcu_read_lock() for more information.
*/
@@ -349,15 +586,16 @@
}
/**
- * rcu_read_lock_bh - mark the beginning of a softirq-only RCU critical section
+ * rcu_read_lock_bh() - mark the beginning of an RCU-bh critical section
*
* This is equivalent of rcu_read_lock(), but to be used when updates
- * are being done using call_rcu_bh(). Since call_rcu_bh() callbacks
- * consider completion of a softirq handler to be a quiescent state,
- * a process in RCU read-side critical section must be protected by
- * disabling softirqs. Read-side critical sections in interrupt context
- * can use just rcu_read_lock().
- *
+ * are being done using call_rcu_bh() or synchronize_rcu_bh(). Since
+ * both call_rcu_bh() and synchronize_rcu_bh() consider completion of a
+ * softirq handler to be a quiescent state, a process in RCU read-side
+ * critical section must be protected by disabling softirqs. Read-side
+ * critical sections in interrupt context can use just rcu_read_lock(),
+ * though this should at least be commented to avoid confusing people
+ * reading the code.
*/
static inline void rcu_read_lock_bh(void)
{
@@ -379,13 +617,12 @@
}
/**
- * rcu_read_lock_sched - mark the beginning of a RCU-classic critical section
+ * rcu_read_lock_sched() - mark the beginning of a RCU-sched critical section
*
- * Should be used with either
- * - synchronize_sched()
- * or
- * - call_rcu_sched() and rcu_barrier_sched()
- * on the write-side to insure proper synchronization.
+ * This is equivalent of rcu_read_lock(), but to be used when updates
+ * are being done using call_rcu_sched() or synchronize_rcu_sched().
+ * Read-side critical sections can also be introduced by anything that
+ * disables preemption, including local_irq_disable() and friends.
*/
static inline void rcu_read_lock_sched(void)
{
@@ -420,54 +657,14 @@
preempt_enable_notrace();
}
-
/**
- * rcu_dereference_raw - fetch an RCU-protected pointer
+ * rcu_assign_pointer() - assign to RCU-protected pointer
+ * @p: pointer to assign to
+ * @v: value to assign (publish)
*
- * The caller must be within some flavor of RCU read-side critical
- * section, or must be otherwise preventing the pointer from changing,
- * for example, by holding an appropriate lock. This pointer may later
- * be safely dereferenced. It is the caller's responsibility to have
- * done the right thing, as this primitive does no checking of any kind.
- *
- * Inserts memory barriers on architectures that require them
- * (currently only the Alpha), and, more importantly, documents
- * exactly which pointers are protected by RCU.
- */
-#define rcu_dereference_raw(p) ({ \
- typeof(p) _________p1 = ACCESS_ONCE(p); \
- smp_read_barrier_depends(); \
- (_________p1); \
- })
-
-/**
- * rcu_dereference - fetch an RCU-protected pointer, checking for RCU
- *
- * Makes rcu_dereference_check() do the dirty work.
- */
-#define rcu_dereference(p) \
- rcu_dereference_check(p, rcu_read_lock_held())
-
-/**
- * rcu_dereference_bh - fetch an RCU-protected pointer, checking for RCU-bh
- *
- * Makes rcu_dereference_check() do the dirty work.
- */
-#define rcu_dereference_bh(p) \
- rcu_dereference_check(p, rcu_read_lock_bh_held())
-
-/**
- * rcu_dereference_sched - fetch RCU-protected pointer, checking for RCU-sched
- *
- * Makes rcu_dereference_check() do the dirty work.
- */
-#define rcu_dereference_sched(p) \
- rcu_dereference_check(p, rcu_read_lock_sched_held())
-
-/**
- * rcu_assign_pointer - assign (publicize) a pointer to a newly
- * initialized structure that will be dereferenced by RCU read-side
- * critical sections. Returns the value assigned.
+ * Assigns the specified value to the specified RCU-protected
+ * pointer, ensuring that any concurrent RCU readers will see
+ * any prior initialization. Returns the value assigned.
*
* Inserts memory barriers on architectures that require them
* (pretty much all of them other than x86), and also prevents
@@ -476,14 +673,17 @@
* call documents which pointers will be dereferenced by RCU read-side
* code.
*/
-
#define rcu_assign_pointer(p, v) \
- ({ \
- if (!__builtin_constant_p(v) || \
- ((v) != NULL)) \
- smp_wmb(); \
- (p) = (v); \
- })
+ __rcu_assign_pointer((p), (v), __rcu)
+
+/**
+ * RCU_INIT_POINTER() - initialize an RCU protected pointer
+ *
+ * Initialize an RCU-protected pointer in such a way to avoid RCU-lockdep
+ * splats.
+ */
+#define RCU_INIT_POINTER(p, v) \
+ p = (typeof(*v) __force __rcu *)(v)
/* Infrastructure to implement the synchronize_() primitives. */
@@ -494,26 +694,37 @@
extern void wakeme_after_rcu(struct rcu_head *head);
+#ifdef CONFIG_PREEMPT_RCU
+
/**
- * call_rcu - Queue an RCU callback for invocation after a grace period.
+ * call_rcu() - Queue an RCU callback for invocation after a grace period.
* @head: structure to be used for queueing the RCU updates.
- * @func: actual update function to be invoked after the grace period
+ * @func: actual callback function to be invoked after the grace period
*
- * The update function will be invoked some time after a full grace
- * period elapses, in other words after all currently executing RCU
- * read-side critical sections have completed. RCU read-side critical
+ * The callback function will be invoked some time after a full grace
+ * period elapses, in other words after all pre-existing RCU read-side
+ * critical sections have completed. However, the callback function
+ * might well execute concurrently with RCU read-side critical sections
+ * that started after call_rcu() was invoked. RCU read-side critical
* sections are delimited by rcu_read_lock() and rcu_read_unlock(),
* and may be nested.
*/
extern void call_rcu(struct rcu_head *head,
void (*func)(struct rcu_head *head));
+#else /* #ifdef CONFIG_PREEMPT_RCU */
+
+/* In classic RCU, call_rcu() is just call_rcu_sched(). */
+#define call_rcu call_rcu_sched
+
+#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
+
/**
- * call_rcu_bh - Queue an RCU for invocation after a quicker grace period.
+ * call_rcu_bh() - Queue an RCU for invocation after a quicker grace period.
* @head: structure to be used for queueing the RCU updates.
- * @func: actual update function to be invoked after the grace period
+ * @func: actual callback function to be invoked after the grace period
*
- * The update function will be invoked some time after a full grace
+ * The callback function will be invoked some time after a full grace
* period elapses, in other words after all currently executing RCU
* read-side critical sections have completed. call_rcu_bh() assumes
* that the read-side critical sections end on completion of a softirq
@@ -566,37 +777,4 @@
}
#endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
-#ifndef CONFIG_PROVE_RCU
-#define __do_rcu_dereference_check(c) do { } while (0)
-#endif /* #ifdef CONFIG_PROVE_RCU */
-
-#define __rcu_dereference_index_check(p, c) \
- ({ \
- typeof(p) _________p1 = ACCESS_ONCE(p); \
- __do_rcu_dereference_check(c); \
- smp_read_barrier_depends(); \
- (_________p1); \
- })
-
-/**
- * rcu_dereference_index_check() - rcu_dereference for indices with debug checking
- * @p: The pointer to read, prior to dereferencing
- * @c: The conditions under which the dereference will take place
- *
- * Similar to rcu_dereference_check(), but omits the sparse checking.
- * This allows rcu_dereference_index_check() to be used on integers,
- * which can then be used as array indices. Attempting to use
- * rcu_dereference_check() on an integer will give compiler warnings
- * because the sparse address-space mechanism relies on dereferencing
- * the RCU-protected pointer. Dereferencing integers is not something
- * that even gcc will put up with.
- *
- * Note that this function does not implicitly check for RCU read-side
- * critical sections. If this function gains lots of uses, it might
- * make sense to provide versions for each flavor of RCU, but it does
- * not make sense as of early 2010.
- */
-#define rcu_dereference_index_check(p, c) \
- __rcu_dereference_index_check((p), (c))
-
#endif /* __LINUX_RCUPDATE_H */
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index e2e8931..13877cb 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -27,27 +27,71 @@
#include <linux/cache.h>
-void rcu_sched_qs(int cpu);
-void rcu_bh_qs(int cpu);
-static inline void rcu_note_context_switch(int cpu)
+#define rcu_init_sched() do { } while (0)
+
+#ifdef CONFIG_TINY_RCU
+
+static inline void synchronize_rcu_expedited(void)
{
- rcu_sched_qs(cpu);
+ synchronize_sched(); /* Only one CPU, so pretty fast anyway!!! */
}
-#define __rcu_read_lock() preempt_disable()
-#define __rcu_read_unlock() preempt_enable()
-#define __rcu_read_lock_bh() local_bh_disable()
-#define __rcu_read_unlock_bh() local_bh_enable()
-#define call_rcu_sched call_rcu
+static inline void rcu_barrier(void)
+{
+ rcu_barrier_sched(); /* Only one CPU, so only one list of callbacks! */
+}
-#define rcu_init_sched() do { } while (0)
-extern void rcu_check_callbacks(int cpu, int user);
+#else /* #ifdef CONFIG_TINY_RCU */
+
+void rcu_barrier(void);
+void synchronize_rcu_expedited(void);
+
+#endif /* #else #ifdef CONFIG_TINY_RCU */
+
+static inline void synchronize_rcu_bh(void)
+{
+ synchronize_sched();
+}
+
+static inline void synchronize_rcu_bh_expedited(void)
+{
+ synchronize_sched();
+}
+
+#ifdef CONFIG_TINY_RCU
+
+static inline void rcu_preempt_note_context_switch(void)
+{
+}
+
+static inline void exit_rcu(void)
+{
+}
static inline int rcu_needs_cpu(int cpu)
{
return 0;
}
+#else /* #ifdef CONFIG_TINY_RCU */
+
+void rcu_preempt_note_context_switch(void);
+extern void exit_rcu(void);
+int rcu_preempt_needs_cpu(void);
+
+static inline int rcu_needs_cpu(int cpu)
+{
+ return rcu_preempt_needs_cpu();
+}
+
+#endif /* #else #ifdef CONFIG_TINY_RCU */
+
+static inline void rcu_note_context_switch(int cpu)
+{
+ rcu_sched_qs(cpu);
+ rcu_preempt_note_context_switch();
+}
+
/*
* Return the number of grace periods.
*/
@@ -76,54 +120,8 @@
{
}
-extern void synchronize_sched(void);
-
-static inline void synchronize_rcu(void)
+static inline void rcu_cpu_stall_reset(void)
{
- synchronize_sched();
-}
-
-static inline void synchronize_rcu_bh(void)
-{
- synchronize_sched();
-}
-
-static inline void synchronize_rcu_expedited(void)
-{
- synchronize_sched();
-}
-
-static inline void synchronize_rcu_bh_expedited(void)
-{
- synchronize_sched();
-}
-
-struct notifier_block;
-
-#ifdef CONFIG_NO_HZ
-
-extern void rcu_enter_nohz(void);
-extern void rcu_exit_nohz(void);
-
-#else /* #ifdef CONFIG_NO_HZ */
-
-static inline void rcu_enter_nohz(void)
-{
-}
-
-static inline void rcu_exit_nohz(void)
-{
-}
-
-#endif /* #else #ifdef CONFIG_NO_HZ */
-
-static inline void exit_rcu(void)
-{
-}
-
-static inline int rcu_preempt_depth(void)
-{
- return 0;
}
#ifdef CONFIG_DEBUG_LOCK_ALLOC
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index c0ed1c0..95518e6 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -30,64 +30,23 @@
#ifndef __LINUX_RCUTREE_H
#define __LINUX_RCUTREE_H
-struct notifier_block;
-
-extern void rcu_sched_qs(int cpu);
-extern void rcu_bh_qs(int cpu);
extern void rcu_note_context_switch(int cpu);
extern int rcu_needs_cpu(int cpu);
+extern void rcu_cpu_stall_reset(void);
#ifdef CONFIG_TREE_PREEMPT_RCU
-extern void __rcu_read_lock(void);
-extern void __rcu_read_unlock(void);
-extern void synchronize_rcu(void);
extern void exit_rcu(void);
-/*
- * Defined as macro as it is a very low level header
- * included from areas that don't even know about current
- */
-#define rcu_preempt_depth() (current->rcu_read_lock_nesting)
-
#else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
-static inline void __rcu_read_lock(void)
-{
- preempt_disable();
-}
-
-static inline void __rcu_read_unlock(void)
-{
- preempt_enable();
-}
-
-#define synchronize_rcu synchronize_sched
-
static inline void exit_rcu(void)
{
}
-static inline int rcu_preempt_depth(void)
-{
- return 0;
-}
-
#endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */
-static inline void __rcu_read_lock_bh(void)
-{
- local_bh_disable();
-}
-static inline void __rcu_read_unlock_bh(void)
-{
- local_bh_enable();
-}
-
-extern void call_rcu_sched(struct rcu_head *head,
- void (*func)(struct rcu_head *rcu));
extern void synchronize_rcu_bh(void);
-extern void synchronize_sched(void);
extern void synchronize_rcu_expedited(void);
static inline void synchronize_rcu_bh_expedited(void)
@@ -95,7 +54,7 @@
synchronize_sched_expedited();
}
-extern void rcu_check_callbacks(int cpu, int user);
+extern void rcu_barrier(void);
extern long rcu_batches_completed(void);
extern long rcu_batches_completed_bh(void);
@@ -104,18 +63,6 @@
extern void rcu_bh_force_quiescent_state(void);
extern void rcu_sched_force_quiescent_state(void);
-#ifdef CONFIG_NO_HZ
-void rcu_enter_nohz(void);
-void rcu_exit_nohz(void);
-#else /* CONFIG_NO_HZ */
-static inline void rcu_enter_nohz(void)
-{
-}
-static inline void rcu_exit_nohz(void)
-{
-}
-#endif /* CONFIG_NO_HZ */
-
/* A context switch is a grace period for RCU-sched and RCU-bh. */
static inline int rcu_blocking_is_gp(void)
{
diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
index 4f82326..08c32e4 100644
--- a/include/linux/rfkill.h
+++ b/include/linux/rfkill.h
@@ -81,7 +81,7 @@
__u8 type;
__u8 op;
__u8 soft, hard;
-} __packed;
+} __attribute__((packed));
/*
* We are planning to be backward and forward compatible with changes
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 1e2a6db..0383601 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -875,6 +875,7 @@
SD_LV_NONE = 0,
SD_LV_SIBLING,
SD_LV_MC,
+ SD_LV_BOOK,
SD_LV_CPU,
SD_LV_NODE,
SD_LV_ALLNODES,
@@ -1160,6 +1161,13 @@
struct rcu_node;
+enum perf_event_task_context {
+ perf_invalid_context = -1,
+ perf_hw_context = 0,
+ perf_sw_context,
+ perf_nr_task_contexts,
+};
+
struct task_struct {
volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
void *stack;
@@ -1202,11 +1210,13 @@
unsigned int policy;
cpumask_t cpus_allowed;
-#ifdef CONFIG_TREE_PREEMPT_RCU
+#ifdef CONFIG_PREEMPT_RCU
int rcu_read_lock_nesting;
char rcu_read_unlock_special;
- struct rcu_node *rcu_blocked_node;
struct list_head rcu_node_entry;
+#endif /* #ifdef CONFIG_PREEMPT_RCU */
+#ifdef CONFIG_TREE_PREEMPT_RCU
+ struct rcu_node *rcu_blocked_node;
#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
@@ -1288,9 +1298,9 @@
struct list_head cpu_timers[3];
/* process credentials */
- const struct cred *real_cred; /* objective and real subjective task
+ const struct cred __rcu *real_cred; /* objective and real subjective task
* credentials (COW) */
- const struct cred *cred; /* effective (overridable) subjective task
+ const struct cred __rcu *cred; /* effective (overridable) subjective task
* credentials (COW) */
struct mutex cred_guard_mutex; /* guard against foreign influences on
* credential calculations
@@ -1418,7 +1428,7 @@
#endif
#ifdef CONFIG_CGROUPS
/* Control Group info protected by css_set_lock */
- struct css_set *cgroups;
+ struct css_set __rcu *cgroups;
/* cg_list protected by css_set_lock and tsk->alloc_lock */
struct list_head cg_list;
#endif
@@ -1431,7 +1441,7 @@
struct futex_pi_state *pi_state_cache;
#endif
#ifdef CONFIG_PERF_EVENTS
- struct perf_event_context *perf_event_ctxp;
+ struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
struct mutex perf_event_mutex;
struct list_head perf_event_list;
#endif
@@ -1681,8 +1691,7 @@
/*
* Per process flags
*/
-#define PF_ALIGNWARN 0x00000001 /* Print alignment warning msgs */
- /* Not implemented yet, only for 486*/
+#define PF_KSOFTIRQD 0x00000001 /* I am ksoftirqd */
#define PF_STARTING 0x00000002 /* being created */
#define PF_EXITING 0x00000004 /* getting shut down */
#define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */
@@ -1740,7 +1749,7 @@
#define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
#define used_math() tsk_used_math(current)
-#ifdef CONFIG_TREE_PREEMPT_RCU
+#ifdef CONFIG_PREEMPT_RCU
#define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */
#define RCU_READ_UNLOCK_NEED_QS (1 << 1) /* RCU core needs CPU response. */
@@ -1749,7 +1758,9 @@
{
p->rcu_read_lock_nesting = 0;
p->rcu_read_unlock_special = 0;
+#ifdef CONFIG_TREE_PREEMPT_RCU
p->rcu_blocked_node = NULL;
+#endif
INIT_LIST_HEAD(&p->rcu_node_entry);
}
@@ -1826,6 +1837,19 @@
extern void sched_clock_idle_wakeup_event(u64 delta_ns);
#endif
+#ifdef CONFIG_IRQ_TIME_ACCOUNTING
+/*
+ * An i/f to runtime opt-in for irq time accounting based off of sched_clock.
+ * The reason for this explicit opt-in is not to have perf penalty with
+ * slow sched_clocks.
+ */
+extern void enable_sched_clock_irqtime(void);
+extern void disable_sched_clock_irqtime(void);
+#else
+static inline void enable_sched_clock_irqtime(void) {}
+static inline void disable_sched_clock_irqtime(void) {}
+#endif
+
extern unsigned long long
task_sched_runtime(struct task_struct *task);
extern unsigned long long thread_group_sched_runtime(struct task_struct *task);
@@ -2367,9 +2391,9 @@
extern int __cond_resched_softirq(void);
-#define cond_resched_softirq() ({ \
- __might_sleep(__FILE__, __LINE__, SOFTIRQ_OFFSET); \
- __cond_resched_softirq(); \
+#define cond_resched_softirq() ({ \
+ __might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \
+ __cond_resched_softirq(); \
})
/*
diff --git a/include/linux/security.h b/include/linux/security.h
index a22219a..b8246a8 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -74,7 +74,7 @@
extern int cap_task_fix_setuid(struct cred *new, const struct cred *old, int flags);
extern int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3,
unsigned long arg4, unsigned long arg5);
-extern int cap_task_setscheduler(struct task_struct *p, int policy, struct sched_param *lp);
+extern int cap_task_setscheduler(struct task_struct *p);
extern int cap_task_setioprio(struct task_struct *p, int ioprio);
extern int cap_task_setnice(struct task_struct *p, int nice);
extern int cap_syslog(int type, bool from_file);
@@ -959,6 +959,12 @@
* Sets the new child socket's sid to the openreq sid.
* @inet_conn_established:
* Sets the connection's peersid to the secmark on skb.
+ * @secmark_relabel_packet:
+ * check if the process should be allowed to relabel packets to the given secid
+ * @security_secmark_refcount_inc
+ * tells the LSM to increment the number of secmark labeling rules loaded
+ * @security_secmark_refcount_dec
+ * tells the LSM to decrement the number of secmark labeling rules loaded
* @req_classify_flow:
* Sets the flow's sid to the openreq sid.
* @tun_dev_create:
@@ -1279,9 +1285,13 @@
* Return 0 if permission is granted.
*
* @secid_to_secctx:
- * Convert secid to security context.
+ * Convert secid to security context. If secdata is NULL the length of
+ * the result will be returned in seclen, but no secdata will be returned.
+ * This does mean that the length could change between calls to check the
+ * length and the next call which actually allocates and returns the secdata.
* @secid contains the security ID.
* @secdata contains the pointer that stores the converted security context.
+ * @seclen pointer which contains the length of the data
* @secctx_to_secid:
* Convert security context to secid.
* @secid contains the pointer to the generated security ID.
@@ -1501,8 +1511,7 @@
int (*task_getioprio) (struct task_struct *p);
int (*task_setrlimit) (struct task_struct *p, unsigned int resource,
struct rlimit *new_rlim);
- int (*task_setscheduler) (struct task_struct *p, int policy,
- struct sched_param *lp);
+ int (*task_setscheduler) (struct task_struct *p);
int (*task_getscheduler) (struct task_struct *p);
int (*task_movememory) (struct task_struct *p);
int (*task_kill) (struct task_struct *p,
@@ -1594,6 +1603,9 @@
struct request_sock *req);
void (*inet_csk_clone) (struct sock *newsk, const struct request_sock *req);
void (*inet_conn_established) (struct sock *sk, struct sk_buff *skb);
+ int (*secmark_relabel_packet) (u32 secid);
+ void (*secmark_refcount_inc) (void);
+ void (*secmark_refcount_dec) (void);
void (*req_classify_flow) (const struct request_sock *req, struct flowi *fl);
int (*tun_dev_create)(void);
void (*tun_dev_post_create)(struct sock *sk);
@@ -1752,8 +1764,7 @@
int security_task_getioprio(struct task_struct *p);
int security_task_setrlimit(struct task_struct *p, unsigned int resource,
struct rlimit *new_rlim);
-int security_task_setscheduler(struct task_struct *p,
- int policy, struct sched_param *lp);
+int security_task_setscheduler(struct task_struct *p);
int security_task_getscheduler(struct task_struct *p);
int security_task_movememory(struct task_struct *p);
int security_task_kill(struct task_struct *p, struct siginfo *info,
@@ -2320,11 +2331,9 @@
return 0;
}
-static inline int security_task_setscheduler(struct task_struct *p,
- int policy,
- struct sched_param *lp)
+static inline int security_task_setscheduler(struct task_struct *p)
{
- return cap_task_setscheduler(p, policy, lp);
+ return cap_task_setscheduler(p);
}
static inline int security_task_getscheduler(struct task_struct *p)
@@ -2551,6 +2560,9 @@
const struct request_sock *req);
void security_inet_conn_established(struct sock *sk,
struct sk_buff *skb);
+int security_secmark_relabel_packet(u32 secid);
+void security_secmark_refcount_inc(void);
+void security_secmark_refcount_dec(void);
int security_tun_dev_create(void);
void security_tun_dev_post_create(struct sock *sk);
int security_tun_dev_attach(struct sock *sk);
@@ -2705,6 +2717,19 @@
{
}
+static inline int security_secmark_relabel_packet(u32 secid)
+{
+ return 0;
+}
+
+static inline void security_secmark_refcount_inc(void)
+{
+}
+
+static inline void security_secmark_refcount_dec(void)
+{
+}
+
static inline int security_tun_dev_create(void)
{
return 0;
diff --git a/include/linux/selinux.h b/include/linux/selinux.h
index 82e0f26..44f4596 100644
--- a/include/linux/selinux.h
+++ b/include/linux/selinux.h
@@ -21,74 +21,11 @@
#ifdef CONFIG_SECURITY_SELINUX
/**
- * selinux_string_to_sid - map a security context string to a security ID
- * @str: the security context string to be mapped
- * @sid: ID value returned via this.
- *
- * Returns 0 if successful, with the SID stored in sid. A value
- * of zero for sid indicates no SID could be determined (but no error
- * occurred).
- */
-int selinux_string_to_sid(char *str, u32 *sid);
-
-/**
- * selinux_secmark_relabel_packet_permission - secmark permission check
- * @sid: SECMARK ID value to be applied to network packet
- *
- * Returns 0 if the current task is allowed to set the SECMARK label of
- * packets with the supplied security ID. Note that it is implicit that
- * the packet is always being relabeled from the default unlabeled value,
- * and that the access control decision is made in the AVC.
- */
-int selinux_secmark_relabel_packet_permission(u32 sid);
-
-/**
- * selinux_secmark_refcount_inc - increments the secmark use counter
- *
- * SELinux keeps track of the current SECMARK targets in use so it knows
- * when to apply SECMARK label access checks to network packets. This
- * function incements this reference count to indicate that a new SECMARK
- * target has been configured.
- */
-void selinux_secmark_refcount_inc(void);
-
-/**
- * selinux_secmark_refcount_dec - decrements the secmark use counter
- *
- * SELinux keeps track of the current SECMARK targets in use so it knows
- * when to apply SECMARK label access checks to network packets. This
- * function decements this reference count to indicate that one of the
- * existing SECMARK targets has been removed/flushed.
- */
-void selinux_secmark_refcount_dec(void);
-
-/**
* selinux_is_enabled - is SELinux enabled?
*/
bool selinux_is_enabled(void);
#else
-static inline int selinux_string_to_sid(const char *str, u32 *sid)
-{
- *sid = 0;
- return 0;
-}
-
-static inline int selinux_secmark_relabel_packet_permission(u32 sid)
-{
- return 0;
-}
-
-static inline void selinux_secmark_refcount_inc(void)
-{
- return;
-}
-
-static inline void selinux_secmark_refcount_dec(void)
-{
- return;
-}
-
static inline bool selinux_is_enabled(void)
{
return false;
diff --git a/include/linux/semaphore.h b/include/linux/semaphore.h
index 7415839..5310d27 100644
--- a/include/linux/semaphore.h
+++ b/include/linux/semaphore.h
@@ -26,6 +26,9 @@
.wait_list = LIST_HEAD_INIT((name).wait_list), \
}
+#define DEFINE_SEMAPHORE(name) \
+ struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1)
+
#define DECLARE_MUTEX(name) \
struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1)
diff --git a/include/linux/serial.h b/include/linux/serial.h
index 1ebc694..ef91406 100644
--- a/include/linux/serial.h
+++ b/include/linux/serial.h
@@ -77,8 +77,7 @@
#define PORT_16654 11
#define PORT_16850 12
#define PORT_RSA 13 /* RSA-DV II/S card */
-#define PORT_U6_16550A 14
-#define PORT_MAX 14
+#define PORT_MAX 13
#define SERIAL_IO_PORT 0
#define SERIAL_IO_HUB6 1
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index 3c2ad99..563e234 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -44,7 +44,8 @@
#define PORT_RM9000 16 /* PMC-Sierra RM9xxx internal UART */
#define PORT_OCTEON 17 /* Cavium OCTEON internal UART */
#define PORT_AR7 18 /* Texas Instruments AR7 internal UART */
-#define PORT_MAX_8250 18 /* max port ID */
+#define PORT_U6_16550A 19 /* ST-Ericsson U6xxx internal UART */
+#define PORT_MAX_8250 19 /* max port ID */
/*
* ARM specific type numbers. These are not currently guaranteed
@@ -465,7 +466,7 @@
#ifdef SUPPORT_SYSRQ
if (port->sysrq) {
if (ch && time_before(jiffies, port->sysrq)) {
- handle_sysrq(ch, port->state->port.tty);
+ handle_sysrq(ch);
port->sysrq = 0;
return 1;
}
diff --git a/include/linux/socket.h b/include/linux/socket.h
index a2fada9..a8f56e1 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -322,7 +322,7 @@
int offset,
unsigned int len, __wsum *csump);
-extern int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode);
+extern long verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode);
extern int memcpy_toiovec(struct iovec *v, unsigned char *kdata, int len);
extern int memcpy_toiovecend(const struct iovec *v, unsigned char *kdata,
int offset, int len);
diff --git a/include/linux/spi/dw_spi.h b/include/linux/spi/dw_spi.h
index cc813f9..c91302f 100644
--- a/include/linux/spi/dw_spi.h
+++ b/include/linux/spi/dw_spi.h
@@ -14,7 +14,9 @@
#define SPI_MODE_OFFSET 6
#define SPI_SCPH_OFFSET 6
#define SPI_SCOL_OFFSET 7
+
#define SPI_TMOD_OFFSET 8
+#define SPI_TMOD_MASK (0x3 << SPI_TMOD_OFFSET)
#define SPI_TMOD_TR 0x0 /* xmit & recv */
#define SPI_TMOD_TO 0x1 /* xmit only */
#define SPI_TMOD_RO 0x2 /* recv only */
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index 4d5d2f5..58971e8 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -108,19 +108,43 @@
#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
/**
- * srcu_dereference - fetch SRCU-protected pointer with checking
+ * srcu_dereference_check - fetch SRCU-protected pointer for later dereferencing
+ * @p: the pointer to fetch and protect for later dereferencing
+ * @sp: pointer to the srcu_struct, which is used to check that we
+ * really are in an SRCU read-side critical section.
+ * @c: condition to check for update-side use
*
- * Makes rcu_dereference_check() do the dirty work.
+ * If PROVE_RCU is enabled, invoking this outside of an RCU read-side
+ * critical section will result in an RCU-lockdep splat, unless @c evaluates
+ * to 1. The @c argument will normally be a logical expression containing
+ * lockdep_is_held() calls.
*/
-#define srcu_dereference(p, sp) \
- rcu_dereference_check(p, srcu_read_lock_held(sp))
+#define srcu_dereference_check(p, sp, c) \
+ __rcu_dereference_check((p), srcu_read_lock_held(sp) || (c), __rcu)
+
+/**
+ * srcu_dereference - fetch SRCU-protected pointer for later dereferencing
+ * @p: the pointer to fetch and protect for later dereferencing
+ * @sp: pointer to the srcu_struct, which is used to check that we
+ * really are in an SRCU read-side critical section.
+ *
+ * Makes rcu_dereference_check() do the dirty work. If PROVE_RCU
+ * is enabled, invoking this outside of an RCU read-side critical
+ * section will result in an RCU-lockdep splat.
+ */
+#define srcu_dereference(p, sp) srcu_dereference_check((p), (sp), 0)
/**
* srcu_read_lock - register a new reader for an SRCU-protected structure.
* @sp: srcu_struct in which to register the new reader.
*
* Enter an SRCU read-side critical section. Note that SRCU read-side
- * critical sections may be nested.
+ * critical sections may be nested. However, it is illegal to
+ * call anything that waits on an SRCU grace period for the same
+ * srcu_struct, whether directly or indirectly. Please note that
+ * one way to indirectly wait on an SRCU grace period is to acquire
+ * a mutex that is held elsewhere while calling synchronize_srcu() or
+ * synchronize_srcu_expedited().
*/
static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp)
{
diff --git a/include/linux/stop_machine.h b/include/linux/stop_machine.h
index 6b524a0..1808960 100644
--- a/include/linux/stop_machine.h
+++ b/include/linux/stop_machine.h
@@ -126,8 +126,8 @@
#else /* CONFIG_STOP_MACHINE && CONFIG_SMP */
-static inline int stop_machine(int (*fn)(void *), void *data,
- const struct cpumask *cpus)
+static inline int __stop_machine(int (*fn)(void *), void *data,
+ const struct cpumask *cpus)
{
int ret;
local_irq_disable();
@@ -136,5 +136,11 @@
return ret;
}
+static inline int stop_machine(int (*fn)(void *), void *data,
+ const struct cpumask *cpus)
+{
+ return __stop_machine(fn, data, cpus);
+}
+
#endif /* CONFIG_STOP_MACHINE && CONFIG_SMP */
#endif /* _LINUX_STOP_MACHINE */
diff --git a/include/linux/sunrpc/auth_gss.h b/include/linux/sunrpc/auth_gss.h
index 671538d..8eee9db 100644
--- a/include/linux/sunrpc/auth_gss.h
+++ b/include/linux/sunrpc/auth_gss.h
@@ -69,7 +69,7 @@
enum rpc_gss_proc gc_proc;
u32 gc_seq;
spinlock_t gc_seq_lock;
- struct gss_ctx *gc_gss_ctx;
+ struct gss_ctx __rcu *gc_gss_ctx;
struct xdr_netobj gc_wire_ctx;
u32 gc_win;
unsigned long gc_expiry;
@@ -80,7 +80,7 @@
struct gss_cred {
struct rpc_cred gc_base;
enum rpc_gss_svc gc_service;
- struct gss_cl_ctx *gc_ctx;
+ struct gss_cl_ctx __rcu *gc_ctx;
struct gss_upcall_msg *gc_upcall;
unsigned long gc_upcall_timestamp;
unsigned char gc_machine_cred : 1;
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index 569dc72..85f38a63 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -30,7 +30,7 @@
* The high-level client handle
*/
struct rpc_clnt {
- struct kref cl_kref; /* Number of references */
+ atomic_t cl_count; /* Number of references */
struct list_head cl_clients; /* Global list of clients */
struct list_head cl_tasks; /* List of tasks */
spinlock_t cl_lock; /* spinlock */
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 2fee51a..7cdd633 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -19,6 +19,7 @@
#define SWAP_FLAG_PREFER 0x8000 /* set if swap priority specified */
#define SWAP_FLAG_PRIO_MASK 0x7fff
#define SWAP_FLAG_PRIO_SHIFT 0
+#define SWAP_FLAG_DISCARD 0x10000 /* discard swap cluster after use */
static inline int current_is_kswapd(void)
{
@@ -142,7 +143,7 @@
enum {
SWP_USED = (1 << 0), /* is slot in swap_info[] used? */
SWP_WRITEOK = (1 << 1), /* ok to write to this swap? */
- SWP_DISCARDABLE = (1 << 2), /* blkdev supports discard */
+ SWP_DISCARDABLE = (1 << 2), /* swapon+blkdev support discard */
SWP_DISCARDING = (1 << 3), /* now discarding a free cluster */
SWP_SOLIDSTATE = (1 << 4), /* blkdev seeks are cheap */
SWP_CONTINUED = (1 << 5), /* swap_map has count continuation */
@@ -315,6 +316,7 @@
extern long total_swap_pages;
extern void si_swapinfo(struct sysinfo *);
extern swp_entry_t get_swap_page(void);
+extern swp_entry_t get_swap_page_of_type(int);
extern int valid_swaphandles(swp_entry_t, unsigned long *);
extern int add_swap_count_continuation(swp_entry_t, gfp_t);
extern void swap_shmem_alloc(swp_entry_t);
@@ -331,13 +333,6 @@
extern int try_to_free_swap(struct page *);
struct backing_dev_info;
-#ifdef CONFIG_HIBERNATION
-void hibernation_freeze_swap(void);
-void hibernation_thaw_swap(void);
-swp_entry_t get_swap_for_hibernation(int type);
-void swap_free_for_hibernation(swp_entry_t val);
-#endif
-
/* linux/mm/thrash.c */
extern struct mm_struct *swap_token_mm;
extern void grab_swap_token(struct mm_struct *);
diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
index 3c92121..96eb576 100644
--- a/include/linux/sysfs.h
+++ b/include/linux/sysfs.h
@@ -16,6 +16,7 @@
#include <linux/errno.h>
#include <linux/list.h>
#include <linux/lockdep.h>
+#include <linux/kobject_ns.h>
#include <asm/atomic.h>
struct kobject;
diff --git a/include/linux/sysrq.h b/include/linux/sysrq.h
index 609e8ca..387fa7d 100644
--- a/include/linux/sysrq.h
+++ b/include/linux/sysrq.h
@@ -15,9 +15,7 @@
#define _LINUX_SYSRQ_H
#include <linux/errno.h>
-
-struct pt_regs;
-struct tty_struct;
+#include <linux/types.h>
/* Possible values of bitmask for enabling sysrq functions */
/* 0x0001 is reserved for enable everything */
@@ -31,7 +29,7 @@
#define SYSRQ_ENABLE_RTNICE 0x0100
struct sysrq_key_op {
- void (*handler)(int, struct tty_struct *);
+ void (*handler)(int);
char *help_msg;
char *action_msg;
int enable_mask;
@@ -44,8 +42,8 @@
* are available -- else NULL's).
*/
-void handle_sysrq(int key, struct tty_struct *tty);
-void __handle_sysrq(int key, struct tty_struct *tty, int check_mask);
+void handle_sysrq(int key);
+void __handle_sysrq(int key, bool check_mask);
int register_sysrq_key(int key, struct sysrq_key_op *op);
int unregister_sysrq_key(int key, struct sysrq_key_op *op);
struct sysrq_key_op *__sysrq_get_key_op(int key);
@@ -54,7 +52,11 @@
#else
-static inline void handle_sysrq(int key, struct tty_struct *tty)
+static inline void handle_sysrq(int key)
+{
+}
+
+static inline void __handle_sysrq(int key, bool check_mask)
{
}
diff --git a/include/linux/topology.h b/include/linux/topology.h
index 64e084f..b91a40e 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -201,6 +201,12 @@
.balance_interval = 64, \
}
+#ifdef CONFIG_SCHED_BOOK
+#ifndef SD_BOOK_INIT
+#error Please define an appropriate SD_BOOK_INIT in include/asm/topology.h!!!
+#endif
+#endif /* CONFIG_SCHED_BOOK */
+
#ifdef CONFIG_NUMA
#ifndef SD_NODE_INIT
#error Please define an appropriate SD_NODE_INIT in include/asm/topology.h!!!
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index 103d1b6..a4a90b6 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -17,6 +17,7 @@
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/rcupdate.h>
+#include <linux/jump_label.h>
struct module;
struct tracepoint;
@@ -145,7 +146,9 @@
extern struct tracepoint __tracepoint_##name; \
static inline void trace_##name(proto) \
{ \
- if (unlikely(__tracepoint_##name.state)) \
+ JUMP_LABEL(&__tracepoint_##name.state, do_trace); \
+ return; \
+do_trace: \
__DO_TRACE(&__tracepoint_##name, \
TP_PROTO(data_proto), \
TP_ARGS(data_args)); \
diff --git a/include/linux/types.h b/include/linux/types.h
index 01a082f..357dbc1 100644
--- a/include/linux/types.h
+++ b/include/linux/types.h
@@ -121,7 +121,15 @@
typedef __s64 int64_t;
#endif
-/* this is a special 64bit data type that is 8-byte aligned */
+/*
+ * aligned_u64 should be used in defining kernel<->userspace ABIs to avoid
+ * common 32/64-bit compat problems.
+ * 64-bit values align to 4-byte boundaries on x86_32 (and possibly other
+ * architectures) and to 8-byte boundaries on 64-bit architetures. The new
+ * aligned_64 type enforces 8-byte alignment so that structs containing
+ * aligned_64 values have the same alignment on 32-bit and 64-bit architectures.
+ * No conversions are necessary between 32-bit user-space and a 64-bit kernel.
+ */
#define aligned_u64 __u64 __attribute__((aligned(8)))
#define aligned_be64 __be64 __attribute__((aligned(8)))
#define aligned_le64 __le64 __attribute__((aligned(8)))
@@ -178,6 +186,11 @@
typedef __u16 __bitwise __sum16;
typedef __u32 __bitwise __wsum;
+/* this is a special 64bit data type that is 8-byte aligned */
+#define __aligned_u64 __u64 __attribute__((aligned(8)))
+#define __aligned_be64 __be64 __attribute__((aligned(8)))
+#define __aligned_le64 __le64 __attribute__((aligned(8)))
+
#ifdef __KERNEL__
typedef unsigned __bitwise__ gfp_t;
typedef unsigned __bitwise__ fmode_t;
diff --git a/include/linux/uinput.h b/include/linux/uinput.h
index 60c81da..05f7fed2 100644
--- a/include/linux/uinput.h
+++ b/include/linux/uinput.h
@@ -37,7 +37,6 @@
#define UINPUT_VERSION 3
#ifdef __KERNEL__
-#define UINPUT_MINOR 223
#define UINPUT_NAME "uinput"
#define UINPUT_BUFFER_SIZE 16
#define UINPUT_NUM_REQUESTS 16
diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h
index 890bc14..6170681 100644
--- a/include/linux/usb/composite.h
+++ b/include/linux/usb/composite.h
@@ -247,6 +247,7 @@
* value; it should return zero on successful initialization.
* @unbind: Reverses @bind(); called as a side effect of unregistering
* this driver.
+ * @disconnect: optional driver disconnect method
* @suspend: Notifies when the host stops sending USB traffic,
* after function notifications
* @resume: Notifies configuration when the host restarts USB traffic,
diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h
index 84a4c44..55675b1 100644
--- a/include/linux/usb/serial.h
+++ b/include/linux/usb/serial.h
@@ -342,8 +342,7 @@
extern void usb_serial_generic_process_read_urb(struct urb *urb);
extern int usb_serial_generic_prepare_write_buffer(struct usb_serial_port *port,
void *dest, size_t size);
-extern int usb_serial_handle_sysrq_char(struct tty_struct *tty,
- struct usb_serial_port *port,
+extern int usb_serial_handle_sysrq_char(struct usb_serial_port *port,
unsigned int ch);
extern int usb_serial_handle_break(struct usb_serial_port *port);
diff --git a/include/linux/vgaarb.h b/include/linux/vgaarb.h
index 6228b5b..e9e1524 100644
--- a/include/linux/vgaarb.h
+++ b/include/linux/vgaarb.h
@@ -93,8 +93,11 @@
* Nested calls are supported (a per-resource counter is maintained)
*/
-extern int vga_get(struct pci_dev *pdev, unsigned int rsrc,
- int interruptible);
+#if defined(CONFIG_VGA_ARB)
+extern int vga_get(struct pci_dev *pdev, unsigned int rsrc, int interruptible);
+#else
+static inline int vga_get(struct pci_dev *pdev, unsigned int rsrc, int interruptible) { return 0; }
+#endif
/**
* vga_get_interruptible
@@ -131,7 +134,11 @@
* are already locked by another card. It can be called in any context
*/
+#if defined(CONFIG_VGA_ARB)
extern int vga_tryget(struct pci_dev *pdev, unsigned int rsrc);
+#else
+static inline int vga_tryget(struct pci_dev *pdev, unsigned int rsrc) { return 0; }
+#endif
/**
* vga_put - release lock on legacy VGA resources
@@ -146,7 +153,11 @@
* released if the counter reaches 0.
*/
+#if defined(CONFIG_VGA_ARB)
extern void vga_put(struct pci_dev *pdev, unsigned int rsrc);
+#else
+#define vga_put(pdev, rsrc)
+#endif
/**
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index 7f43ccd..eaaea37 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -170,6 +170,28 @@
return x;
}
+/*
+ * More accurate version that also considers the currently pending
+ * deltas. For that we need to loop over all cpus to find the current
+ * deltas. There is no synchronization so the result cannot be
+ * exactly accurate either.
+ */
+static inline unsigned long zone_page_state_snapshot(struct zone *zone,
+ enum zone_stat_item item)
+{
+ long x = atomic_long_read(&zone->vm_stat[item]);
+
+#ifdef CONFIG_SMP
+ int cpu;
+ for_each_online_cpu(cpu)
+ x += per_cpu_ptr(zone->pageset, cpu)->vm_stat_diff[item];
+
+ if (x < 0)
+ x = 0;
+#endif
+ return x;
+}
+
extern unsigned long global_reclaimable_pages(void);
extern unsigned long zone_reclaimable_pages(struct zone *zone);
diff --git a/include/linux/wait.h b/include/linux/wait.h
index 0836ccc..3efc9f3 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -614,6 +614,7 @@
(wait)->private = current; \
(wait)->func = autoremove_wake_function; \
INIT_LIST_HEAD(&(wait)->task_list); \
+ (wait)->flags = 0; \
} while (0)
/**
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 4f9d277..25e02c9 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -25,18 +25,20 @@
enum {
WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */
- WORK_STRUCT_CWQ_BIT = 1, /* data points to cwq */
- WORK_STRUCT_LINKED_BIT = 2, /* next work is linked to this one */
+ WORK_STRUCT_DELAYED_BIT = 1, /* work item is delayed */
+ WORK_STRUCT_CWQ_BIT = 2, /* data points to cwq */
+ WORK_STRUCT_LINKED_BIT = 3, /* next work is linked to this one */
#ifdef CONFIG_DEBUG_OBJECTS_WORK
- WORK_STRUCT_STATIC_BIT = 3, /* static initializer (debugobjects) */
- WORK_STRUCT_COLOR_SHIFT = 4, /* color for workqueue flushing */
+ WORK_STRUCT_STATIC_BIT = 4, /* static initializer (debugobjects) */
+ WORK_STRUCT_COLOR_SHIFT = 5, /* color for workqueue flushing */
#else
- WORK_STRUCT_COLOR_SHIFT = 3, /* color for workqueue flushing */
+ WORK_STRUCT_COLOR_SHIFT = 4, /* color for workqueue flushing */
#endif
WORK_STRUCT_COLOR_BITS = 4,
WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT,
+ WORK_STRUCT_DELAYED = 1 << WORK_STRUCT_DELAYED_BIT,
WORK_STRUCT_CWQ = 1 << WORK_STRUCT_CWQ_BIT,
WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT,
#ifdef CONFIG_DEBUG_OBJECTS_WORK
@@ -59,8 +61,8 @@
/*
* Reserve 7 bits off of cwq pointer w/ debugobjects turned
- * off. This makes cwqs aligned to 128 bytes which isn't too
- * excessive while allowing 15 workqueue flush colors.
+ * off. This makes cwqs aligned to 256 bytes and allows 15
+ * workqueue flush colors.
*/
WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT +
WORK_STRUCT_COLOR_BITS,
@@ -233,6 +235,10 @@
#define work_clear_pending(work) \
clear_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))
+/*
+ * Workqueue flags and constants. For details, please refer to
+ * Documentation/workqueue.txt.
+ */
enum {
WQ_NON_REENTRANT = 1 << 0, /* guarantee non-reentrance */
WQ_UNBOUND = 1 << 1, /* not bound to any cpu */
@@ -241,6 +247,8 @@
WQ_HIGHPRI = 1 << 4, /* high priority */
WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */
+ WQ_DYING = 1 << 6, /* internal: workqueue is dying */
+
WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */
WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */
WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2,
diff --git a/include/media/videobuf-dma-sg.h b/include/media/videobuf-dma-sg.h
index 97e07f4..aa4ebb4 100644
--- a/include/media/videobuf-dma-sg.h
+++ b/include/media/videobuf-dma-sg.h
@@ -48,6 +48,7 @@
/* for userland buffer */
int offset;
+ size_t size;
struct page **pages;
/* for kernel buffers */
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index 45375b4..4d40c4d 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -121,6 +121,7 @@
* IPv6 Address Label subsystem (addrlabel.c)
*/
extern int ipv6_addr_label_init(void);
+extern void ipv6_addr_label_cleanup(void);
extern void ipv6_addr_label_rtnl_register(void);
extern u32 ipv6_addr_label(struct net *net,
const struct in6_addr *addr,
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
index 27a902d..30fce01 100644
--- a/include/net/bluetooth/bluetooth.h
+++ b/include/net/bluetooth/bluetooth.h
@@ -161,12 +161,30 @@
{
struct sk_buff *skb;
+ release_sock(sk);
if ((skb = sock_alloc_send_skb(sk, len + BT_SKB_RESERVE, nb, err))) {
skb_reserve(skb, BT_SKB_RESERVE);
bt_cb(skb)->incoming = 0;
}
+ lock_sock(sk);
+
+ if (!skb && *err)
+ return NULL;
+
+ *err = sock_error(sk);
+ if (*err)
+ goto out;
+
+ if (sk->sk_shutdown) {
+ *err = -ECONNRESET;
+ goto out;
+ }
return skb;
+
+out:
+ kfree_skb(skb);
+ return NULL;
}
int bt_err(__u16 code);
diff --git a/include/net/cls_cgroup.h b/include/net/cls_cgroup.h
index 726cc35..a4dc5b0 100644
--- a/include/net/cls_cgroup.h
+++ b/include/net/cls_cgroup.h
@@ -27,11 +27,17 @@
#ifdef CONFIG_NET_CLS_CGROUP
static inline u32 task_cls_classid(struct task_struct *p)
{
+ int classid;
+
if (in_interrupt())
return 0;
- return container_of(task_subsys_state(p, net_cls_subsys_id),
- struct cgroup_cls_state, css)->classid;
+ rcu_read_lock();
+ classid = container_of(task_subsys_state(p, net_cls_subsys_id),
+ struct cgroup_cls_state, css)->classid;
+ rcu_read_unlock();
+
+ return classid;
}
#else
extern int net_cls_subsys_id;
@@ -45,7 +51,8 @@
return 0;
rcu_read_lock();
- id = rcu_dereference(net_cls_subsys_id);
+ id = rcu_dereference_index_check(net_cls_subsys_id,
+ rcu_read_lock_held());
if (id >= 0)
classid = container_of(task_subsys_state(p, id),
struct cgroup_cls_state, css)->classid;
diff --git a/include/net/dst.h b/include/net/dst.h
index 81d1413..0238650 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -242,6 +242,7 @@
dev->stats.rx_packets++;
dev->stats.rx_bytes += skb->len;
skb->rxhash = 0;
+ skb_set_queue_mapping(skb, 0);
skb_dst_drop(skb);
nf_reset(skb);
}
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index a4747a0..f976885 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -955,6 +955,9 @@
return csum_partial(diff, sizeof(diff), oldsum);
}
+extern void ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp,
+ int outin);
+
#endif /* __KERNEL__ */
#endif /* _NET_IP_VS_H */
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index e624dae..caf17db 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -75,7 +75,7 @@
/* nf_conn feature for connections that have a helper */
struct nf_conn_help {
/* Helper. if any */
- struct nf_conntrack_helper *helper;
+ struct nf_conntrack_helper __rcu *helper;
union nf_conntrack_help help;
diff --git a/include/net/route.h b/include/net/route.h
index bd732d6..7e5e73b 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -199,6 +199,8 @@
fl.fl_ip_sport = sport;
fl.fl_ip_dport = dport;
fl.proto = protocol;
+ if (inet_sk(sk)->transparent)
+ fl.flags |= FLOWI_FLAG_ANYSRC;
ip_rt_put(*rp);
*rp = NULL;
security_sk_classify_flow(sk, &fl);
diff --git a/include/net/sock.h b/include/net/sock.h
index ac53bfb..adab9dc 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -752,6 +752,7 @@
/* Keeping track of sk's, looking them up, and port selection methods. */
void (*hash)(struct sock *sk);
void (*unhash)(struct sock *sk);
+ void (*rehash)(struct sock *sk);
int (*get_port)(struct sock *sk, unsigned short snum);
/* Keeping track of sockets in use */
diff --git a/include/net/tcp.h b/include/net/tcp.h
index df6a2eb..3e4b33e 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -268,11 +268,21 @@
return seq3 - seq2 >= seq1 - seq2;
}
-static inline int tcp_too_many_orphans(struct sock *sk, int num)
+static inline bool tcp_too_many_orphans(struct sock *sk, int shift)
{
- return (num > sysctl_tcp_max_orphans) ||
- (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
- atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[2]);
+ struct percpu_counter *ocp = sk->sk_prot->orphan_count;
+ int orphans = percpu_counter_read_positive(ocp);
+
+ if (orphans << shift > sysctl_tcp_max_orphans) {
+ orphans = percpu_counter_sum_positive(ocp);
+ if (orphans << shift > sysctl_tcp_max_orphans)
+ return true;
+ }
+
+ if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
+ atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[2])
+ return true;
+ return false;
}
/* syncookies: remember time of last synqueue overflow */
@@ -465,8 +475,22 @@
/* Bound MSS / TSO packet size with the half of the window */
static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
{
- if (tp->max_window && pktsize > (tp->max_window >> 1))
- return max(tp->max_window >> 1, 68U - tp->tcp_header_len);
+ int cutoff;
+
+ /* When peer uses tiny windows, there is no use in packetizing
+ * to sub-MSS pieces for the sake of SWS or making sure there
+ * are enough packets in the pipe for fast recovery.
+ *
+ * On the other hand, for extremely large MSS devices, handling
+ * smaller than MSS windows in this way does make sense.
+ */
+ if (tp->max_window >= 512)
+ cutoff = (tp->max_window >> 1);
+ else
+ cutoff = tp->max_window;
+
+ if (cutoff && pktsize > cutoff)
+ return max_t(int, cutoff, 68U - tp->tcp_header_len);
else
return pktsize;
}
diff --git a/include/net/udp.h b/include/net/udp.h
index 7abdf30..a184d34 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -151,6 +151,7 @@
}
extern void udp_lib_unhash(struct sock *sk);
+extern void udp_lib_rehash(struct sock *sk, u16 new_hash);
static inline void udp_lib_close(struct sock *sk, long timeout)
{
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index fc8f36d..4f53532 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -298,8 +298,8 @@
const struct xfrm_type *type_map[IPPROTO_MAX];
struct xfrm_mode *mode_map[XFRM_MODE_MAX];
int (*init_flags)(struct xfrm_state *x);
- void (*init_tempsel)(struct xfrm_state *x, struct flowi *fl,
- struct xfrm_tmpl *tmpl,
+ void (*init_tempsel)(struct xfrm_selector *sel, struct flowi *fl);
+ void (*init_temprop)(struct xfrm_state *x, struct xfrm_tmpl *tmpl,
xfrm_address_t *daddr, xfrm_address_t *saddr);
int (*tmpl_sort)(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n);
int (*state_sort)(struct xfrm_state **dst, struct xfrm_state **src, int n);
diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
index 0e4cfb6..6fa7cba 100644
--- a/include/trace/events/irq.h
+++ b/include/trace/events/irq.h
@@ -5,7 +5,9 @@
#define _TRACE_IRQ_H
#include <linux/tracepoint.h>
-#include <linux/interrupt.h>
+
+struct irqaction;
+struct softirq_action;
#define softirq_name(sirq) { sirq##_SOFTIRQ, #sirq }
#define show_softirq_name(val) \
@@ -93,7 +95,10 @@
),
TP_fast_assign(
- __entry->vec = (int)(h - vec);
+ if (vec)
+ __entry->vec = (int)(h - vec);
+ else
+ __entry->vec = (int)(long)h;
),
TP_printk("vec=%d [action=%s]", __entry->vec,
@@ -136,6 +141,23 @@
TP_ARGS(h, vec)
);
+/**
+ * softirq_raise - called immediately when a softirq is raised
+ * @h: pointer to struct softirq_action
+ * @vec: pointer to first struct softirq_action in softirq_vec array
+ *
+ * The @h parameter contains a pointer to the softirq vector number which is
+ * raised. @vec is NULL and it means @h includes vector number not
+ * softirq_action. When used in combination with the softirq_entry tracepoint
+ * we can determine the softirq raise latency.
+ */
+DEFINE_EVENT(softirq, softirq_raise,
+
+ TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
+
+ TP_ARGS(h, vec)
+);
+
#endif /* _TRACE_IRQ_H */
/* This part must be outside protection */
diff --git a/include/trace/events/napi.h b/include/trace/events/napi.h
index 188deca..8fe1e93 100644
--- a/include/trace/events/napi.h
+++ b/include/trace/events/napi.h
@@ -6,10 +6,31 @@
#include <linux/netdevice.h>
#include <linux/tracepoint.h>
+#include <linux/ftrace.h>
-DECLARE_TRACE(napi_poll,
+#define NO_DEV "(no_device)"
+
+TRACE_EVENT(napi_poll,
+
TP_PROTO(struct napi_struct *napi),
- TP_ARGS(napi));
+
+ TP_ARGS(napi),
+
+ TP_STRUCT__entry(
+ __field( struct napi_struct *, napi)
+ __string( dev_name, napi->dev ? napi->dev->name : NO_DEV)
+ ),
+
+ TP_fast_assign(
+ __entry->napi = napi;
+ __assign_str(dev_name, napi->dev ? napi->dev->name : NO_DEV);
+ ),
+
+ TP_printk("napi poll on napi struct %p for device %s",
+ __entry->napi, __get_str(dev_name))
+);
+
+#undef NO_DEV
#endif /* _TRACE_NAPI_H_ */
diff --git a/include/trace/events/net.h b/include/trace/events/net.h
new file mode 100644
index 0000000..5f247f5
--- /dev/null
+++ b/include/trace/events/net.h
@@ -0,0 +1,82 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM net
+
+#if !defined(_TRACE_NET_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_NET_H
+
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/ip.h>
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(net_dev_xmit,
+
+ TP_PROTO(struct sk_buff *skb,
+ int rc),
+
+ TP_ARGS(skb, rc),
+
+ TP_STRUCT__entry(
+ __field( void *, skbaddr )
+ __field( unsigned int, len )
+ __field( int, rc )
+ __string( name, skb->dev->name )
+ ),
+
+ TP_fast_assign(
+ __entry->skbaddr = skb;
+ __entry->len = skb->len;
+ __entry->rc = rc;
+ __assign_str(name, skb->dev->name);
+ ),
+
+ TP_printk("dev=%s skbaddr=%p len=%u rc=%d",
+ __get_str(name), __entry->skbaddr, __entry->len, __entry->rc)
+);
+
+DECLARE_EVENT_CLASS(net_dev_template,
+
+ TP_PROTO(struct sk_buff *skb),
+
+ TP_ARGS(skb),
+
+ TP_STRUCT__entry(
+ __field( void *, skbaddr )
+ __field( unsigned int, len )
+ __string( name, skb->dev->name )
+ ),
+
+ TP_fast_assign(
+ __entry->skbaddr = skb;
+ __entry->len = skb->len;
+ __assign_str(name, skb->dev->name);
+ ),
+
+ TP_printk("dev=%s skbaddr=%p len=%u",
+ __get_str(name), __entry->skbaddr, __entry->len)
+)
+
+DEFINE_EVENT(net_dev_template, net_dev_queue,
+
+ TP_PROTO(struct sk_buff *skb),
+
+ TP_ARGS(skb)
+);
+
+DEFINE_EVENT(net_dev_template, netif_receive_skb,
+
+ TP_PROTO(struct sk_buff *skb),
+
+ TP_ARGS(skb)
+);
+
+DEFINE_EVENT(net_dev_template, netif_rx,
+
+ TP_PROTO(struct sk_buff *skb),
+
+ TP_ARGS(skb)
+);
+#endif /* _TRACE_NET_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/power.h b/include/trace/events/power.h
index 35a2a6e..286784d 100644
--- a/include/trace/events/power.h
+++ b/include/trace/events/power.h
@@ -10,12 +10,17 @@
#ifndef _TRACE_POWER_ENUM_
#define _TRACE_POWER_ENUM_
enum {
- POWER_NONE = 0,
- POWER_CSTATE = 1,
- POWER_PSTATE = 2,
+ POWER_NONE = 0,
+ POWER_CSTATE = 1, /* C-State */
+ POWER_PSTATE = 2, /* Fequency change or DVFS */
+ POWER_SSTATE = 3, /* Suspend */
};
#endif
+/*
+ * The power events are used for cpuidle & suspend (power_start, power_end)
+ * and for cpufreq (power_frequency)
+ */
DECLARE_EVENT_CLASS(power,
TP_PROTO(unsigned int type, unsigned int state, unsigned int cpu_id),
@@ -70,6 +75,85 @@
);
+/*
+ * The clock events are used for clock enable/disable and for
+ * clock rate change
+ */
+DECLARE_EVENT_CLASS(clock,
+
+ TP_PROTO(const char *name, unsigned int state, unsigned int cpu_id),
+
+ TP_ARGS(name, state, cpu_id),
+
+ TP_STRUCT__entry(
+ __string( name, name )
+ __field( u64, state )
+ __field( u64, cpu_id )
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, name);
+ __entry->state = state;
+ __entry->cpu_id = cpu_id;
+ ),
+
+ TP_printk("%s state=%lu cpu_id=%lu", __get_str(name),
+ (unsigned long)__entry->state, (unsigned long)__entry->cpu_id)
+);
+
+DEFINE_EVENT(clock, clock_enable,
+
+ TP_PROTO(const char *name, unsigned int state, unsigned int cpu_id),
+
+ TP_ARGS(name, state, cpu_id)
+);
+
+DEFINE_EVENT(clock, clock_disable,
+
+ TP_PROTO(const char *name, unsigned int state, unsigned int cpu_id),
+
+ TP_ARGS(name, state, cpu_id)
+);
+
+DEFINE_EVENT(clock, clock_set_rate,
+
+ TP_PROTO(const char *name, unsigned int state, unsigned int cpu_id),
+
+ TP_ARGS(name, state, cpu_id)
+);
+
+/*
+ * The power domain events are used for power domains transitions
+ */
+DECLARE_EVENT_CLASS(power_domain,
+
+ TP_PROTO(const char *name, unsigned int state, unsigned int cpu_id),
+
+ TP_ARGS(name, state, cpu_id),
+
+ TP_STRUCT__entry(
+ __string( name, name )
+ __field( u64, state )
+ __field( u64, cpu_id )
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, name);
+ __entry->state = state;
+ __entry->cpu_id = cpu_id;
+),
+
+ TP_printk("%s state=%lu cpu_id=%lu", __get_str(name),
+ (unsigned long)__entry->state, (unsigned long)__entry->cpu_id)
+);
+
+DEFINE_EVENT(power_domain, power_domain_target,
+
+ TP_PROTO(const char *name, unsigned int state, unsigned int cpu_id),
+
+ TP_ARGS(name, state, cpu_id)
+);
+
#endif /* _TRACE_POWER_H */
/* This part must be outside protection */
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 9208c92..f633478 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -362,6 +362,35 @@
(unsigned long long)__entry->vruntime)
);
+/*
+ * Tracepoint for showing priority inheritance modifying a tasks
+ * priority.
+ */
+TRACE_EVENT(sched_pi_setprio,
+
+ TP_PROTO(struct task_struct *tsk, int newprio),
+
+ TP_ARGS(tsk, newprio),
+
+ TP_STRUCT__entry(
+ __array( char, comm, TASK_COMM_LEN )
+ __field( pid_t, pid )
+ __field( int, oldprio )
+ __field( int, newprio )
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
+ __entry->pid = tsk->pid;
+ __entry->oldprio = tsk->prio;
+ __entry->newprio = newprio;
+ ),
+
+ TP_printk("comm=%s pid=%d oldprio=%d newprio=%d",
+ __entry->comm, __entry->pid,
+ __entry->oldprio, __entry->newprio)
+);
+
#endif /* _TRACE_SCHED_H */
/* This part must be outside protection */
diff --git a/include/trace/events/skb.h b/include/trace/events/skb.h
index 4b2be6d..75ce9d5 100644
--- a/include/trace/events/skb.h
+++ b/include/trace/events/skb.h
@@ -35,6 +35,23 @@
__entry->skbaddr, __entry->protocol, __entry->location)
);
+TRACE_EVENT(consume_skb,
+
+ TP_PROTO(struct sk_buff *skb),
+
+ TP_ARGS(skb),
+
+ TP_STRUCT__entry(
+ __field( void *, skbaddr )
+ ),
+
+ TP_fast_assign(
+ __entry->skbaddr = skb;
+ ),
+
+ TP_printk("skbaddr=%p", __entry->skbaddr)
+);
+
TRACE_EVENT(skb_copy_datagram_iovec,
TP_PROTO(const struct sk_buff *skb, int len),
diff --git a/include/trace/events/timer.h b/include/trace/events/timer.h
index c624126..425bcfe 100644
--- a/include/trace/events/timer.h
+++ b/include/trace/events/timer.h
@@ -81,14 +81,16 @@
TP_STRUCT__entry(
__field( void *, timer )
__field( unsigned long, now )
+ __field( void *, function)
),
TP_fast_assign(
__entry->timer = timer;
__entry->now = jiffies;
+ __entry->function = timer->function;
),
- TP_printk("timer=%p now=%lu", __entry->timer, __entry->now)
+ TP_printk("timer=%p function=%pf now=%lu", __entry->timer, __entry->function,__entry->now)
);
/**
@@ -200,14 +202,16 @@
TP_STRUCT__entry(
__field( void *, hrtimer )
__field( s64, now )
+ __field( void *, function)
),
TP_fast_assign(
__entry->hrtimer = hrtimer;
__entry->now = now->tv64;
+ __entry->function = hrtimer->function;
),
- TP_printk("hrtimer=%p now=%llu", __entry->hrtimer,
+ TP_printk("hrtimer=%p function=%pf now=%llu", __entry->hrtimer, __entry->function,
(unsigned long long)ktime_to_ns((ktime_t) { .tv64 = __entry->now }))
);
diff --git a/include/xen/platform_pci.h b/include/xen/platform_pci.h
index ce9d671..a785a3b 100644
--- a/include/xen/platform_pci.h
+++ b/include/xen/platform_pci.h
@@ -16,11 +16,15 @@
#define XEN_IOPORT_PROTOVER (XEN_IOPORT_BASE + 2) /* 1 byte access (R) */
#define XEN_IOPORT_PRODNUM (XEN_IOPORT_BASE + 2) /* 2 byte access (W) */
-#define XEN_UNPLUG_ALL_IDE_DISKS 1
-#define XEN_UNPLUG_ALL_NICS 2
-#define XEN_UNPLUG_AUX_IDE_DISKS 4
-#define XEN_UNPLUG_ALL 7
-#define XEN_UNPLUG_IGNORE 8
+#define XEN_UNPLUG_ALL_IDE_DISKS (1<<0)
+#define XEN_UNPLUG_ALL_NICS (1<<1)
+#define XEN_UNPLUG_AUX_IDE_DISKS (1<<2)
+#define XEN_UNPLUG_ALL (XEN_UNPLUG_ALL_IDE_DISKS|\
+ XEN_UNPLUG_ALL_NICS|\
+ XEN_UNPLUG_AUX_IDE_DISKS)
+
+#define XEN_UNPLUG_UNNECESSARY (1<<16)
+#define XEN_UNPLUG_NEVER (1<<17)
static inline int xen_must_unplug_nics(void) {
#if (defined(CONFIG_XEN_NETDEV_FRONTEND) || \
diff --git a/init/Kconfig b/init/Kconfig
index 2de5b1c..7b920aa 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -21,6 +21,13 @@
depends on !UML
default y
+config HAVE_IRQ_WORK
+ bool
+
+config IRQ_WORK
+ bool
+ depends on HAVE_IRQ_WORK
+
menu "General setup"
config EXPERIMENTAL
@@ -340,6 +347,7 @@
config TREE_RCU
bool "Tree-based hierarchical RCU"
+ depends on !PREEMPT && SMP
help
This option selects the RCU implementation that is
designed for very large SMP system with hundreds or
@@ -347,7 +355,7 @@
smaller systems.
config TREE_PREEMPT_RCU
- bool "Preemptable tree-based hierarchical RCU"
+ bool "Preemptible tree-based hierarchical RCU"
depends on PREEMPT
help
This option selects the RCU implementation that is
@@ -365,8 +373,22 @@
is not required. This option greatly reduces the
memory footprint of RCU.
+config TINY_PREEMPT_RCU
+ bool "Preemptible UP-only small-memory-footprint RCU"
+ depends on !SMP && PREEMPT
+ help
+ This option selects the RCU implementation that is designed
+ for real-time UP systems. This option greatly reduces the
+ memory footprint of RCU.
+
endchoice
+config PREEMPT_RCU
+ def_bool ( TREE_PREEMPT_RCU || TINY_PREEMPT_RCU )
+ help
+ This option enables preemptible-RCU code that is common between
+ the TREE_PREEMPT_RCU and TINY_PREEMPT_RCU implementations.
+
config RCU_TRACE
bool "Enable tracing for RCU"
depends on TREE_RCU || TREE_PREEMPT_RCU
@@ -387,9 +409,12 @@
help
This option controls the fanout of hierarchical implementations
of RCU, allowing RCU to work efficiently on machines with
- large numbers of CPUs. This value must be at least the cube
- root of NR_CPUS, which allows NR_CPUS up to 32,768 for 32-bit
- systems and up to 262,144 for 64-bit systems.
+ large numbers of CPUs. This value must be at least the fourth
+ root of NR_CPUS, which allows NR_CPUS to be insanely large.
+ The default value of RCU_FANOUT should be used for production
+ systems, but if you are stress-testing the RCU implementation
+ itself, small RCU_FANOUT values allow you to test large-system
+ code paths on small(er) systems.
Select a specific number if testing RCU itself.
Take the default if unsure.
@@ -987,6 +1012,7 @@
default y if (PROFILING || PERF_COUNTERS)
depends on HAVE_PERF_EVENTS
select ANON_INODES
+ select IRQ_WORK
help
Enable kernel support for various performance events provided
by software and hardware.
diff --git a/ipc/sem.c b/ipc/sem.c
index 40a8f46..0e0d49b 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -743,6 +743,8 @@
{
struct semid_ds out;
+ memset(&out, 0, sizeof(out));
+
ipc64_perm_to_ipc_perm(&in->sem_perm, &out.sem_perm);
out.sem_otime = in->sem_otime;
diff --git a/kernel/Makefile b/kernel/Makefile
index 0b72d1a..e2c9d52 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -10,7 +10,7 @@
kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \
hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \
notifier.o ksysfs.o pm_qos_params.o sched_clock.o cred.o \
- async.o range.o
+ async.o range.o jump_label.o
obj-$(CONFIG_HAVE_EARLY_RES) += early_res.o
obj-y += groups.o
@@ -23,6 +23,7 @@
CFLAGS_REMOVE_cgroup-debug.o = -pg
CFLAGS_REMOVE_sched_clock.o = -pg
CFLAGS_REMOVE_perf_event.o = -pg
+CFLAGS_REMOVE_irq_work.o = -pg
endif
obj-$(CONFIG_FREEZER) += freezer.o
@@ -86,6 +87,7 @@
obj-$(CONFIG_TREE_PREEMPT_RCU) += rcutree.o
obj-$(CONFIG_TREE_RCU_TRACE) += rcutree_trace.o
obj-$(CONFIG_TINY_RCU) += rcutiny.o
+obj-$(CONFIG_TINY_PREEMPT_RCU) += rcutiny.o
obj-$(CONFIG_RELAY) += relay.o
obj-$(CONFIG_SYSCTL) += utsname_sysctl.o
obj-$(CONFIG_TASK_DELAY_ACCT) += delayacct.o
@@ -100,6 +102,7 @@
obj-$(CONFIG_X86_DS) += trace/
obj-$(CONFIG_RING_BUFFER) += trace/
obj-$(CONFIG_SMP) += sched_cpupri.o
+obj-$(CONFIG_IRQ_WORK) += irq_work.o
obj-$(CONFIG_PERF_EVENTS) += perf_event.o
obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
obj-$(CONFIG_USER_RETURN_NOTIFIER) += user-return-notifier.o
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 192f88c..291ba3d 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -138,7 +138,7 @@
* is called after synchronize_rcu(). But for safe use, css_is_removed()
* css_tryget() should be used for avoiding race.
*/
- struct cgroup_subsys_state *css;
+ struct cgroup_subsys_state __rcu *css;
/*
* ID of this css.
*/
@@ -1791,19 +1791,20 @@
}
/**
- * cgroup_attach_task_current_cg - attach task 'tsk' to current task's cgroup
+ * cgroup_attach_task_all - attach task 'tsk' to all cgroups of task 'from'
+ * @from: attach to all cgroups of a given task
* @tsk: the task to be attached
*/
-int cgroup_attach_task_current_cg(struct task_struct *tsk)
+int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
{
struct cgroupfs_root *root;
- struct cgroup *cur_cg;
int retval = 0;
cgroup_lock();
for_each_active_root(root) {
- cur_cg = task_cgroup_from_root(current, root);
- retval = cgroup_attach_task(cur_cg, tsk);
+ struct cgroup *from_cg = task_cgroup_from_root(from, root);
+
+ retval = cgroup_attach_task(from_cg, tsk);
if (retval)
break;
}
@@ -1811,7 +1812,7 @@
return retval;
}
-EXPORT_SYMBOL_GPL(cgroup_attach_task_current_cg);
+EXPORT_SYMBOL_GPL(cgroup_attach_task_all);
/*
* Attach task with pid 'pid' to cgroup 'cgrp'. Call with cgroup_mutex
diff --git a/kernel/compat.c b/kernel/compat.c
index e167efc..c9e2ec0 100644
--- a/kernel/compat.c
+++ b/kernel/compat.c
@@ -1126,3 +1126,24 @@
return 0;
}
+
+/*
+ * Allocate user-space memory for the duration of a single system call,
+ * in order to marshall parameters inside a compat thunk.
+ */
+void __user *compat_alloc_user_space(unsigned long len)
+{
+ void __user *ptr;
+
+ /* If len would occupy more than half of the entire compat space... */
+ if (unlikely(len > (((compat_uptr_t)~0) >> 1)))
+ return NULL;
+
+ ptr = arch_compat_alloc_user_space(len);
+
+ if (unlikely(!access_ok(VERIFY_WRITE, ptr, len)))
+ return NULL;
+
+ return ptr;
+}
+EXPORT_SYMBOL_GPL(compat_alloc_user_space);
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index b23c097..51b143e 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -1397,7 +1397,7 @@
if (tsk->flags & PF_THREAD_BOUND)
return -EINVAL;
- ret = security_task_setscheduler(tsk, 0, NULL);
+ ret = security_task_setscheduler(tsk);
if (ret)
return ret;
if (threadgroup) {
@@ -1405,7 +1405,7 @@
rcu_read_lock();
list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) {
- ret = security_task_setscheduler(c, 0, NULL);
+ ret = security_task_setscheduler(c);
if (ret) {
rcu_read_unlock();
return ret;
diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
index 3c2d497..de407c7 100644
--- a/kernel/debug/debug_core.c
+++ b/kernel/debug/debug_core.c
@@ -741,7 +741,7 @@
};
#ifdef CONFIG_MAGIC_SYSRQ
-static void sysrq_handle_dbg(int key, struct tty_struct *tty)
+static void sysrq_handle_dbg(int key)
{
if (!dbg_io_ops) {
printk(KERN_CRIT "ERROR: No KGDB I/O module available\n");
diff --git a/kernel/debug/kdb/kdb_bp.c b/kernel/debug/kdb/kdb_bp.c
index 75bd9b3..20059ef 100644
--- a/kernel/debug/kdb/kdb_bp.c
+++ b/kernel/debug/kdb/kdb_bp.c
@@ -274,7 +274,6 @@
int i, bpno;
kdb_bp_t *bp, *bp_check;
int diag;
- int free;
char *symname = NULL;
long offset = 0ul;
int nextarg;
@@ -305,7 +304,6 @@
/*
* Find an empty bp structure to allocate
*/
- free = KDB_MAXBPT;
for (bpno = 0, bp = kdb_breakpoints; bpno < KDB_MAXBPT; bpno++, bp++) {
if (bp->bp_free)
break;
diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
index 28b8441..caf057a 100644
--- a/kernel/debug/kdb/kdb_main.c
+++ b/kernel/debug/kdb/kdb_main.c
@@ -1929,7 +1929,7 @@
if (argc != 1)
return KDB_ARGCOUNT;
kdb_trap_printk++;
- __handle_sysrq(*argv[1], NULL, 0);
+ __handle_sysrq(*argv[1], false);
kdb_trap_printk--;
return 0;
diff --git a/kernel/exit.c b/kernel/exit.c
index 0312022..e2bdf37 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -149,9 +149,7 @@
{
struct task_struct *tsk = container_of(rhp, struct task_struct, rcu);
-#ifdef CONFIG_PERF_EVENTS
- WARN_ON_ONCE(tsk->perf_event_ctxp);
-#endif
+ perf_event_delayed_put(tsk);
trace_sched_process_free(tsk);
put_task_struct(tsk);
}
diff --git a/kernel/fork.c b/kernel/fork.c
index b7e9d60..c445f8c 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -356,10 +356,10 @@
if (IS_ERR(pol))
goto fail_nomem_policy;
vma_set_policy(tmp, pol);
+ tmp->vm_mm = mm;
if (anon_vma_fork(tmp, mpnt))
goto fail_nomem_anon_vma_fork;
tmp->vm_flags &= ~VM_LOCKED;
- tmp->vm_mm = mm;
tmp->vm_next = tmp->vm_prev = NULL;
file = tmp->vm_file;
if (file) {
diff --git a/kernel/gcov/fs.c b/kernel/gcov/fs.c
index ef3c3f8..f83972b 100644
--- a/kernel/gcov/fs.c
+++ b/kernel/gcov/fs.c
@@ -33,10 +33,11 @@
* @children: child nodes
* @all: list head for list of all nodes
* @parent: parent node
- * @info: associated profiling data structure if not a directory
- * @ghost: when an object file containing profiling data is unloaded we keep a
- * copy of the profiling data here to allow collecting coverage data
- * for cleanup code. Such a node is called a "ghost".
+ * @loaded_info: array of pointers to profiling data sets for loaded object
+ * files.
+ * @num_loaded: number of profiling data sets for loaded object files.
+ * @unloaded_info: accumulated copy of profiling data sets for unloaded
+ * object files. Used only when gcov_persist=1.
* @dentry: main debugfs entry, either a directory or data file
* @links: associated symbolic links
* @name: data file basename
@@ -51,10 +52,11 @@
struct list_head children;
struct list_head all;
struct gcov_node *parent;
- struct gcov_info *info;
- struct gcov_info *ghost;
+ struct gcov_info **loaded_info;
+ struct gcov_info *unloaded_info;
struct dentry *dentry;
struct dentry **links;
+ int num_loaded;
char name[0];
};
@@ -136,16 +138,37 @@
};
/*
- * Return the profiling data set for a given node. This can either be the
- * original profiling data structure or a duplicate (also called "ghost")
- * in case the associated object file has been unloaded.
+ * Return a profiling data set associated with the given node. This is
+ * either a data set for a loaded object file or a data set copy in case
+ * all associated object files have been unloaded.
*/
static struct gcov_info *get_node_info(struct gcov_node *node)
{
- if (node->info)
- return node->info;
+ if (node->num_loaded > 0)
+ return node->loaded_info[0];
- return node->ghost;
+ return node->unloaded_info;
+}
+
+/*
+ * Return a newly allocated profiling data set which contains the sum of
+ * all profiling data associated with the given node.
+ */
+static struct gcov_info *get_accumulated_info(struct gcov_node *node)
+{
+ struct gcov_info *info;
+ int i = 0;
+
+ if (node->unloaded_info)
+ info = gcov_info_dup(node->unloaded_info);
+ else
+ info = gcov_info_dup(node->loaded_info[i++]);
+ if (!info)
+ return NULL;
+ for (; i < node->num_loaded; i++)
+ gcov_info_add(info, node->loaded_info[i]);
+
+ return info;
}
/*
@@ -163,9 +186,10 @@
mutex_lock(&node_lock);
/*
* Read from a profiling data copy to minimize reference tracking
- * complexity and concurrent access.
+ * complexity and concurrent access and to keep accumulating multiple
+ * profiling data sets associated with one node simple.
*/
- info = gcov_info_dup(get_node_info(node));
+ info = get_accumulated_info(node);
if (!info)
goto out_unlock;
iter = gcov_iter_new(info);
@@ -225,12 +249,25 @@
return NULL;
}
+/*
+ * Reset all profiling data associated with the specified node.
+ */
+static void reset_node(struct gcov_node *node)
+{
+ int i;
+
+ if (node->unloaded_info)
+ gcov_info_reset(node->unloaded_info);
+ for (i = 0; i < node->num_loaded; i++)
+ gcov_info_reset(node->loaded_info[i]);
+}
+
static void remove_node(struct gcov_node *node);
/*
* write() implementation for gcov data files. Reset profiling data for the
- * associated file. If the object file has been unloaded (i.e. this is
- * a "ghost" node), remove the debug fs node as well.
+ * corresponding file. If all associated object files have been unloaded,
+ * remove the debug fs node as well.
*/
static ssize_t gcov_seq_write(struct file *file, const char __user *addr,
size_t len, loff_t *pos)
@@ -245,10 +282,10 @@
node = get_node_by_name(info->filename);
if (node) {
/* Reset counts or remove node for unloaded modules. */
- if (node->ghost)
+ if (node->num_loaded == 0)
remove_node(node);
else
- gcov_info_reset(node->info);
+ reset_node(node);
}
/* Reset counts for open file. */
gcov_info_reset(info);
@@ -378,7 +415,10 @@
INIT_LIST_HEAD(&node->list);
INIT_LIST_HEAD(&node->children);
INIT_LIST_HEAD(&node->all);
- node->info = info;
+ if (node->loaded_info) {
+ node->loaded_info[0] = info;
+ node->num_loaded = 1;
+ }
node->parent = parent;
if (name)
strcpy(node->name, name);
@@ -394,9 +434,13 @@
struct gcov_node *node;
node = kzalloc(sizeof(struct gcov_node) + strlen(name) + 1, GFP_KERNEL);
- if (!node) {
- pr_warning("out of memory\n");
- return NULL;
+ if (!node)
+ goto err_nomem;
+ if (info) {
+ node->loaded_info = kcalloc(1, sizeof(struct gcov_info *),
+ GFP_KERNEL);
+ if (!node->loaded_info)
+ goto err_nomem;
}
init_node(node, info, name, parent);
/* Differentiate between gcov data file nodes and directory nodes. */
@@ -416,6 +460,11 @@
list_add(&node->all, &all_head);
return node;
+
+err_nomem:
+ kfree(node);
+ pr_warning("out of memory\n");
+ return NULL;
}
/* Remove symbolic links associated with node. */
@@ -441,8 +490,9 @@
list_del(&node->all);
debugfs_remove(node->dentry);
remove_links(node);
- if (node->ghost)
- gcov_info_free(node->ghost);
+ kfree(node->loaded_info);
+ if (node->unloaded_info)
+ gcov_info_free(node->unloaded_info);
kfree(node);
}
@@ -477,7 +527,7 @@
/*
* write() implementation for reset file. Reset all profiling data to zero
- * and remove ghost nodes.
+ * and remove nodes for which all associated object files are unloaded.
*/
static ssize_t reset_write(struct file *file, const char __user *addr,
size_t len, loff_t *pos)
@@ -487,8 +537,8 @@
mutex_lock(&node_lock);
restart:
list_for_each_entry(node, &all_head, all) {
- if (node->info)
- gcov_info_reset(node->info);
+ if (node->num_loaded > 0)
+ reset_node(node);
else if (list_empty(&node->children)) {
remove_node(node);
/* Several nodes may have gone - restart loop. */
@@ -564,37 +614,115 @@
}
/*
- * The profiling data set associated with this node is being unloaded. Store a
- * copy of the profiling data and turn this node into a "ghost".
+ * Associate a profiling data set with an existing node. Needs to be called
+ * with node_lock held.
*/
-static int ghost_node(struct gcov_node *node)
+static void add_info(struct gcov_node *node, struct gcov_info *info)
{
- node->ghost = gcov_info_dup(node->info);
- if (!node->ghost) {
- pr_warning("could not save data for '%s' (out of memory)\n",
- node->info->filename);
- return -ENOMEM;
- }
- node->info = NULL;
+ struct gcov_info **loaded_info;
+ int num = node->num_loaded;
- return 0;
+ /*
+ * Prepare new array. This is done first to simplify cleanup in
+ * case the new data set is incompatible, the node only contains
+ * unloaded data sets and there's not enough memory for the array.
+ */
+ loaded_info = kcalloc(num + 1, sizeof(struct gcov_info *), GFP_KERNEL);
+ if (!loaded_info) {
+ pr_warning("could not add '%s' (out of memory)\n",
+ info->filename);
+ return;
+ }
+ memcpy(loaded_info, node->loaded_info,
+ num * sizeof(struct gcov_info *));
+ loaded_info[num] = info;
+ /* Check if the new data set is compatible. */
+ if (num == 0) {
+ /*
+ * A module was unloaded, modified and reloaded. The new
+ * data set replaces the copy of the last one.
+ */
+ if (!gcov_info_is_compatible(node->unloaded_info, info)) {
+ pr_warning("discarding saved data for %s "
+ "(incompatible version)\n", info->filename);
+ gcov_info_free(node->unloaded_info);
+ node->unloaded_info = NULL;
+ }
+ } else {
+ /*
+ * Two different versions of the same object file are loaded.
+ * The initial one takes precedence.
+ */
+ if (!gcov_info_is_compatible(node->loaded_info[0], info)) {
+ pr_warning("could not add '%s' (incompatible "
+ "version)\n", info->filename);
+ kfree(loaded_info);
+ return;
+ }
+ }
+ /* Overwrite previous array. */
+ kfree(node->loaded_info);
+ node->loaded_info = loaded_info;
+ node->num_loaded = num + 1;
}
/*
- * Profiling data for this node has been loaded again. Add profiling data
- * from previous instantiation and turn this node into a regular node.
+ * Return the index of a profiling data set associated with a node.
*/
-static void revive_node(struct gcov_node *node, struct gcov_info *info)
+static int get_info_index(struct gcov_node *node, struct gcov_info *info)
{
- if (gcov_info_is_compatible(node->ghost, info))
- gcov_info_add(info, node->ghost);
- else {
- pr_warning("discarding saved data for '%s' (version changed)\n",
- info->filename);
+ int i;
+
+ for (i = 0; i < node->num_loaded; i++) {
+ if (node->loaded_info[i] == info)
+ return i;
}
- gcov_info_free(node->ghost);
- node->ghost = NULL;
- node->info = info;
+ return -ENOENT;
+}
+
+/*
+ * Save the data of a profiling data set which is being unloaded.
+ */
+static void save_info(struct gcov_node *node, struct gcov_info *info)
+{
+ if (node->unloaded_info)
+ gcov_info_add(node->unloaded_info, info);
+ else {
+ node->unloaded_info = gcov_info_dup(info);
+ if (!node->unloaded_info) {
+ pr_warning("could not save data for '%s' "
+ "(out of memory)\n", info->filename);
+ }
+ }
+}
+
+/*
+ * Disassociate a profiling data set from a node. Needs to be called with
+ * node_lock held.
+ */
+static void remove_info(struct gcov_node *node, struct gcov_info *info)
+{
+ int i;
+
+ i = get_info_index(node, info);
+ if (i < 0) {
+ pr_warning("could not remove '%s' (not found)\n",
+ info->filename);
+ return;
+ }
+ if (gcov_persist)
+ save_info(node, info);
+ /* Shrink array. */
+ node->loaded_info[i] = node->loaded_info[node->num_loaded - 1];
+ node->num_loaded--;
+ if (node->num_loaded > 0)
+ return;
+ /* Last loaded data set was removed. */
+ kfree(node->loaded_info);
+ node->loaded_info = NULL;
+ node->num_loaded = 0;
+ if (!node->unloaded_info)
+ remove_node(node);
}
/*
@@ -609,30 +737,18 @@
node = get_node_by_name(info->filename);
switch (action) {
case GCOV_ADD:
- /* Add new node or revive ghost. */
- if (!node) {
+ if (node)
+ add_info(node, info);
+ else
add_node(info);
- break;
- }
- if (gcov_persist)
- revive_node(node, info);
- else {
- pr_warning("could not add '%s' (already exists)\n",
- info->filename);
- }
break;
case GCOV_REMOVE:
- /* Remove node or turn into ghost. */
- if (!node) {
+ if (node)
+ remove_info(node, info);
+ else {
pr_warning("could not remove '%s' (not found)\n",
info->filename);
- break;
}
- if (gcov_persist) {
- if (!ghost_node(node))
- break;
- }
- remove_node(node);
break;
}
mutex_unlock(&node_lock);
diff --git a/kernel/groups.c b/kernel/groups.c
index 53b1916..253dc0f 100644
--- a/kernel/groups.c
+++ b/kernel/groups.c
@@ -143,10 +143,9 @@
right = group_info->ngroups;
while (left < right) {
unsigned int mid = (left+right)/2;
- int cmp = grp - GROUP_AT(group_info, mid);
- if (cmp > 0)
+ if (grp > GROUP_AT(group_info, mid))
left = mid + 1;
- else if (cmp < 0)
+ else if (grp < GROUP_AT(group_info, mid))
right = mid;
else
return 1;
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index ce66917..72206cf 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -931,6 +931,7 @@
remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base)
{
if (hrtimer_is_queued(timer)) {
+ unsigned long state;
int reprogram;
/*
@@ -944,8 +945,13 @@
debug_deactivate(timer);
timer_stats_hrtimer_clear_start_info(timer);
reprogram = base->cpu_base == &__get_cpu_var(hrtimer_bases);
- __remove_hrtimer(timer, base, HRTIMER_STATE_INACTIVE,
- reprogram);
+ /*
+ * We must preserve the CALLBACK state flag here,
+ * otherwise we could move the timer base in
+ * switch_hrtimer_base.
+ */
+ state = timer->state & HRTIMER_STATE_CALLBACK;
+ __remove_hrtimer(timer, base, state, reprogram);
return 1;
}
return 0;
@@ -1091,11 +1097,10 @@
*/
ktime_t hrtimer_get_remaining(const struct hrtimer *timer)
{
- struct hrtimer_clock_base *base;
unsigned long flags;
ktime_t rem;
- base = lock_hrtimer_base(timer, &flags);
+ lock_hrtimer_base(timer, &flags);
rem = hrtimer_expires_remaining(timer);
unlock_hrtimer_base(timer, &flags);
@@ -1232,6 +1237,9 @@
BUG_ON(timer->state != HRTIMER_STATE_CALLBACK);
enqueue_hrtimer(timer, base);
}
+
+ WARN_ON_ONCE(!(timer->state & HRTIMER_STATE_CALLBACK));
+
timer->state &= ~HRTIMER_STATE_CALLBACK;
}
diff --git a/kernel/hung_task.c b/kernel/hung_task.c
index 0c642d5..53ead17 100644
--- a/kernel/hung_task.c
+++ b/kernel/hung_task.c
@@ -98,7 +98,7 @@
printk(KERN_ERR "\"echo 0 > /proc/sys/kernel/hung_task_timeout_secs\""
" disables this message.\n");
sched_show_task(t);
- __debug_show_held_locks(t);
+ debug_show_held_locks(t);
touch_nmi_watchdog();
@@ -111,7 +111,7 @@
* periodically exit the critical section and enter a new one.
*
* For preemptible RCU it is sufficient to call rcu_read_unlock in order
- * exit the grace period. For classic RCU, a reschedule is required.
+ * to exit the grace period. For classic RCU, a reschedule is required.
*/
static void rcu_lock_break(struct task_struct *g, struct task_struct *t)
{
diff --git a/kernel/hw_breakpoint.c b/kernel/hw_breakpoint.c
index d71a987..2c9120f 100644
--- a/kernel/hw_breakpoint.c
+++ b/kernel/hw_breakpoint.c
@@ -113,12 +113,12 @@
*/
static int task_bp_pinned(struct perf_event *bp, enum bp_type_idx type)
{
- struct perf_event_context *ctx = bp->ctx;
+ struct task_struct *tsk = bp->hw.bp_target;
struct perf_event *iter;
int count = 0;
list_for_each_entry(iter, &bp_task_head, hw.bp_list) {
- if (iter->ctx == ctx && find_slot_idx(iter) == type)
+ if (iter->hw.bp_target == tsk && find_slot_idx(iter) == type)
count += hw_breakpoint_weight(iter);
}
@@ -134,7 +134,7 @@
enum bp_type_idx type)
{
int cpu = bp->cpu;
- struct task_struct *tsk = bp->ctx->task;
+ struct task_struct *tsk = bp->hw.bp_target;
if (cpu >= 0) {
slots->pinned = per_cpu(nr_cpu_bp_pinned[type], cpu);
@@ -213,7 +213,7 @@
int weight)
{
int cpu = bp->cpu;
- struct task_struct *tsk = bp->ctx->task;
+ struct task_struct *tsk = bp->hw.bp_target;
/* Pinned counter cpu profiling */
if (!tsk) {
@@ -433,7 +433,7 @@
perf_overflow_handler_t triggered,
struct task_struct *tsk)
{
- return perf_event_create_kernel_counter(attr, -1, tsk->pid, triggered);
+ return perf_event_create_kernel_counter(attr, -1, tsk, triggered);
}
EXPORT_SYMBOL_GPL(register_user_hw_breakpoint);
@@ -515,7 +515,7 @@
get_online_cpus();
for_each_online_cpu(cpu) {
pevent = per_cpu_ptr(cpu_events, cpu);
- bp = perf_event_create_kernel_counter(attr, cpu, -1, triggered);
+ bp = perf_event_create_kernel_counter(attr, cpu, NULL, triggered);
*pevent = bp;
@@ -565,6 +565,61 @@
.priority = 0x7fffffff
};
+static void bp_perf_event_destroy(struct perf_event *event)
+{
+ release_bp_slot(event);
+}
+
+static int hw_breakpoint_event_init(struct perf_event *bp)
+{
+ int err;
+
+ if (bp->attr.type != PERF_TYPE_BREAKPOINT)
+ return -ENOENT;
+
+ err = register_perf_hw_breakpoint(bp);
+ if (err)
+ return err;
+
+ bp->destroy = bp_perf_event_destroy;
+
+ return 0;
+}
+
+static int hw_breakpoint_add(struct perf_event *bp, int flags)
+{
+ if (!(flags & PERF_EF_START))
+ bp->hw.state = PERF_HES_STOPPED;
+
+ return arch_install_hw_breakpoint(bp);
+}
+
+static void hw_breakpoint_del(struct perf_event *bp, int flags)
+{
+ arch_uninstall_hw_breakpoint(bp);
+}
+
+static void hw_breakpoint_start(struct perf_event *bp, int flags)
+{
+ bp->hw.state = 0;
+}
+
+static void hw_breakpoint_stop(struct perf_event *bp, int flags)
+{
+ bp->hw.state = PERF_HES_STOPPED;
+}
+
+static struct pmu perf_breakpoint = {
+ .task_ctx_nr = perf_sw_context, /* could eventually get its own */
+
+ .event_init = hw_breakpoint_event_init,
+ .add = hw_breakpoint_add,
+ .del = hw_breakpoint_del,
+ .start = hw_breakpoint_start,
+ .stop = hw_breakpoint_stop,
+ .read = hw_breakpoint_pmu_read,
+};
+
static int __init init_hw_breakpoint(void)
{
unsigned int **task_bp_pinned;
@@ -586,6 +641,8 @@
constraints_initialized = 1;
+ perf_pmu_register(&perf_breakpoint);
+
return register_die_notifier(&hw_breakpoint_exceptions_nb);
err_alloc:
@@ -601,8 +658,3 @@
core_initcall(init_hw_breakpoint);
-struct pmu perf_ops_bp = {
- .enable = arch_install_hw_breakpoint,
- .disable = arch_uninstall_hw_breakpoint,
- .read = hw_breakpoint_pmu_read,
-};
diff --git a/kernel/irq_work.c b/kernel/irq_work.c
new file mode 100644
index 0000000..f16763f
--- /dev/null
+++ b/kernel/irq_work.c
@@ -0,0 +1,164 @@
+/*
+ * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
+ *
+ * Provides a framework for enqueueing and running callbacks from hardirq
+ * context. The enqueueing is NMI-safe.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/irq_work.h>
+#include <linux/hardirq.h>
+
+/*
+ * An entry can be in one of four states:
+ *
+ * free NULL, 0 -> {claimed} : free to be used
+ * claimed NULL, 3 -> {pending} : claimed to be enqueued
+ * pending next, 3 -> {busy} : queued, pending callback
+ * busy NULL, 2 -> {free, claimed} : callback in progress, can be claimed
+ *
+ * We use the lower two bits of the next pointer to keep PENDING and BUSY
+ * flags.
+ */
+
+#define IRQ_WORK_PENDING 1UL
+#define IRQ_WORK_BUSY 2UL
+#define IRQ_WORK_FLAGS 3UL
+
+static inline bool irq_work_is_set(struct irq_work *entry, int flags)
+{
+ return (unsigned long)entry->next & flags;
+}
+
+static inline struct irq_work *irq_work_next(struct irq_work *entry)
+{
+ unsigned long next = (unsigned long)entry->next;
+ next &= ~IRQ_WORK_FLAGS;
+ return (struct irq_work *)next;
+}
+
+static inline struct irq_work *next_flags(struct irq_work *entry, int flags)
+{
+ unsigned long next = (unsigned long)entry;
+ next |= flags;
+ return (struct irq_work *)next;
+}
+
+static DEFINE_PER_CPU(struct irq_work *, irq_work_list);
+
+/*
+ * Claim the entry so that no one else will poke at it.
+ */
+static bool irq_work_claim(struct irq_work *entry)
+{
+ struct irq_work *next, *nflags;
+
+ do {
+ next = entry->next;
+ if ((unsigned long)next & IRQ_WORK_PENDING)
+ return false;
+ nflags = next_flags(next, IRQ_WORK_FLAGS);
+ } while (cmpxchg(&entry->next, next, nflags) != next);
+
+ return true;
+}
+
+
+void __weak arch_irq_work_raise(void)
+{
+ /*
+ * Lame architectures will get the timer tick callback
+ */
+}
+
+/*
+ * Queue the entry and raise the IPI if needed.
+ */
+static void __irq_work_queue(struct irq_work *entry)
+{
+ struct irq_work **head, *next;
+
+ head = &get_cpu_var(irq_work_list);
+
+ do {
+ next = *head;
+ /* Can assign non-atomic because we keep the flags set. */
+ entry->next = next_flags(next, IRQ_WORK_FLAGS);
+ } while (cmpxchg(head, next, entry) != next);
+
+ /* The list was empty, raise self-interrupt to start processing. */
+ if (!irq_work_next(entry))
+ arch_irq_work_raise();
+
+ put_cpu_var(irq_work_list);
+}
+
+/*
+ * Enqueue the irq_work @entry, returns true on success, failure when the
+ * @entry was already enqueued by someone else.
+ *
+ * Can be re-enqueued while the callback is still in progress.
+ */
+bool irq_work_queue(struct irq_work *entry)
+{
+ if (!irq_work_claim(entry)) {
+ /*
+ * Already enqueued, can't do!
+ */
+ return false;
+ }
+
+ __irq_work_queue(entry);
+ return true;
+}
+EXPORT_SYMBOL_GPL(irq_work_queue);
+
+/*
+ * Run the irq_work entries on this cpu. Requires to be ran from hardirq
+ * context with local IRQs disabled.
+ */
+void irq_work_run(void)
+{
+ struct irq_work *list, **head;
+
+ head = &__get_cpu_var(irq_work_list);
+ if (*head == NULL)
+ return;
+
+ BUG_ON(!in_irq());
+ BUG_ON(!irqs_disabled());
+
+ list = xchg(head, NULL);
+ while (list != NULL) {
+ struct irq_work *entry = list;
+
+ list = irq_work_next(list);
+
+ /*
+ * Clear the PENDING bit, after this point the @entry
+ * can be re-used.
+ */
+ entry->next = next_flags(NULL, IRQ_WORK_BUSY);
+ entry->func(entry);
+ /*
+ * Clear the BUSY bit and return to the free state if
+ * no-one else claimed it meanwhile.
+ */
+ cmpxchg(&entry->next, next_flags(NULL, IRQ_WORK_BUSY), NULL);
+ }
+}
+EXPORT_SYMBOL_GPL(irq_work_run);
+
+/*
+ * Synchronize against the irq_work @entry, ensures the entry is not
+ * currently in use.
+ */
+void irq_work_sync(struct irq_work *entry)
+{
+ WARN_ON_ONCE(irqs_disabled());
+
+ while (irq_work_is_set(entry, IRQ_WORK_BUSY))
+ cpu_relax();
+}
+EXPORT_SYMBOL_GPL(irq_work_sync);
diff --git a/kernel/jump_label.c b/kernel/jump_label.c
new file mode 100644
index 0000000..7be868b
--- /dev/null
+++ b/kernel/jump_label.c
@@ -0,0 +1,429 @@
+/*
+ * jump label support
+ *
+ * Copyright (C) 2009 Jason Baron <jbaron@redhat.com>
+ *
+ */
+#include <linux/jump_label.h>
+#include <linux/memory.h>
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/jhash.h>
+#include <linux/slab.h>
+#include <linux/sort.h>
+#include <linux/err.h>
+
+#ifdef HAVE_JUMP_LABEL
+
+#define JUMP_LABEL_HASH_BITS 6
+#define JUMP_LABEL_TABLE_SIZE (1 << JUMP_LABEL_HASH_BITS)
+static struct hlist_head jump_label_table[JUMP_LABEL_TABLE_SIZE];
+
+/* mutex to protect coming/going of the the jump_label table */
+static DEFINE_MUTEX(jump_label_mutex);
+
+struct jump_label_entry {
+ struct hlist_node hlist;
+ struct jump_entry *table;
+ int nr_entries;
+ /* hang modules off here */
+ struct hlist_head modules;
+ unsigned long key;
+};
+
+struct jump_label_module_entry {
+ struct hlist_node hlist;
+ struct jump_entry *table;
+ int nr_entries;
+ struct module *mod;
+};
+
+static int jump_label_cmp(const void *a, const void *b)
+{
+ const struct jump_entry *jea = a;
+ const struct jump_entry *jeb = b;
+
+ if (jea->key < jeb->key)
+ return -1;
+
+ if (jea->key > jeb->key)
+ return 1;
+
+ return 0;
+}
+
+static void
+sort_jump_label_entries(struct jump_entry *start, struct jump_entry *stop)
+{
+ unsigned long size;
+
+ size = (((unsigned long)stop - (unsigned long)start)
+ / sizeof(struct jump_entry));
+ sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
+}
+
+static struct jump_label_entry *get_jump_label_entry(jump_label_t key)
+{
+ struct hlist_head *head;
+ struct hlist_node *node;
+ struct jump_label_entry *e;
+ u32 hash = jhash((void *)&key, sizeof(jump_label_t), 0);
+
+ head = &jump_label_table[hash & (JUMP_LABEL_TABLE_SIZE - 1)];
+ hlist_for_each_entry(e, node, head, hlist) {
+ if (key == e->key)
+ return e;
+ }
+ return NULL;
+}
+
+static struct jump_label_entry *
+add_jump_label_entry(jump_label_t key, int nr_entries, struct jump_entry *table)
+{
+ struct hlist_head *head;
+ struct jump_label_entry *e;
+ u32 hash;
+
+ e = get_jump_label_entry(key);
+ if (e)
+ return ERR_PTR(-EEXIST);
+
+ e = kmalloc(sizeof(struct jump_label_entry), GFP_KERNEL);
+ if (!e)
+ return ERR_PTR(-ENOMEM);
+
+ hash = jhash((void *)&key, sizeof(jump_label_t), 0);
+ head = &jump_label_table[hash & (JUMP_LABEL_TABLE_SIZE - 1)];
+ e->key = key;
+ e->table = table;
+ e->nr_entries = nr_entries;
+ INIT_HLIST_HEAD(&(e->modules));
+ hlist_add_head(&e->hlist, head);
+ return e;
+}
+
+static int
+build_jump_label_hashtable(struct jump_entry *start, struct jump_entry *stop)
+{
+ struct jump_entry *iter, *iter_begin;
+ struct jump_label_entry *entry;
+ int count;
+
+ sort_jump_label_entries(start, stop);
+ iter = start;
+ while (iter < stop) {
+ entry = get_jump_label_entry(iter->key);
+ if (!entry) {
+ iter_begin = iter;
+ count = 0;
+ while ((iter < stop) &&
+ (iter->key == iter_begin->key)) {
+ iter++;
+ count++;
+ }
+ entry = add_jump_label_entry(iter_begin->key,
+ count, iter_begin);
+ if (IS_ERR(entry))
+ return PTR_ERR(entry);
+ } else {
+ WARN_ONCE(1, KERN_ERR "build_jump_hashtable: unexpected entry!\n");
+ return -1;
+ }
+ }
+ return 0;
+}
+
+/***
+ * jump_label_update - update jump label text
+ * @key - key value associated with a a jump label
+ * @type - enum set to JUMP_LABEL_ENABLE or JUMP_LABEL_DISABLE
+ *
+ * Will enable/disable the jump for jump label @key, depending on the
+ * value of @type.
+ *
+ */
+
+void jump_label_update(unsigned long key, enum jump_label_type type)
+{
+ struct jump_entry *iter;
+ struct jump_label_entry *entry;
+ struct hlist_node *module_node;
+ struct jump_label_module_entry *e_module;
+ int count;
+
+ mutex_lock(&jump_label_mutex);
+ entry = get_jump_label_entry((jump_label_t)key);
+ if (entry) {
+ count = entry->nr_entries;
+ iter = entry->table;
+ while (count--) {
+ if (kernel_text_address(iter->code))
+ arch_jump_label_transform(iter, type);
+ iter++;
+ }
+ /* eanble/disable jump labels in modules */
+ hlist_for_each_entry(e_module, module_node, &(entry->modules),
+ hlist) {
+ count = e_module->nr_entries;
+ iter = e_module->table;
+ while (count--) {
+ if (kernel_text_address(iter->code))
+ arch_jump_label_transform(iter, type);
+ iter++;
+ }
+ }
+ }
+ mutex_unlock(&jump_label_mutex);
+}
+
+static int addr_conflict(struct jump_entry *entry, void *start, void *end)
+{
+ if (entry->code <= (unsigned long)end &&
+ entry->code + JUMP_LABEL_NOP_SIZE > (unsigned long)start)
+ return 1;
+
+ return 0;
+}
+
+#ifdef CONFIG_MODULES
+
+static int module_conflict(void *start, void *end)
+{
+ struct hlist_head *head;
+ struct hlist_node *node, *node_next, *module_node, *module_node_next;
+ struct jump_label_entry *e;
+ struct jump_label_module_entry *e_module;
+ struct jump_entry *iter;
+ int i, count;
+ int conflict = 0;
+
+ for (i = 0; i < JUMP_LABEL_TABLE_SIZE; i++) {
+ head = &jump_label_table[i];
+ hlist_for_each_entry_safe(e, node, node_next, head, hlist) {
+ hlist_for_each_entry_safe(e_module, module_node,
+ module_node_next,
+ &(e->modules), hlist) {
+ count = e_module->nr_entries;
+ iter = e_module->table;
+ while (count--) {
+ if (addr_conflict(iter, start, end)) {
+ conflict = 1;
+ goto out;
+ }
+ iter++;
+ }
+ }
+ }
+ }
+out:
+ return conflict;
+}
+
+#endif
+
+/***
+ * jump_label_text_reserved - check if addr range is reserved
+ * @start: start text addr
+ * @end: end text addr
+ *
+ * checks if the text addr located between @start and @end
+ * overlaps with any of the jump label patch addresses. Code
+ * that wants to modify kernel text should first verify that
+ * it does not overlap with any of the jump label addresses.
+ *
+ * returns 1 if there is an overlap, 0 otherwise
+ */
+int jump_label_text_reserved(void *start, void *end)
+{
+ struct jump_entry *iter;
+ struct jump_entry *iter_start = __start___jump_table;
+ struct jump_entry *iter_stop = __start___jump_table;
+ int conflict = 0;
+
+ mutex_lock(&jump_label_mutex);
+ iter = iter_start;
+ while (iter < iter_stop) {
+ if (addr_conflict(iter, start, end)) {
+ conflict = 1;
+ goto out;
+ }
+ iter++;
+ }
+
+ /* now check modules */
+#ifdef CONFIG_MODULES
+ conflict = module_conflict(start, end);
+#endif
+out:
+ mutex_unlock(&jump_label_mutex);
+ return conflict;
+}
+
+static __init int init_jump_label(void)
+{
+ int ret;
+ struct jump_entry *iter_start = __start___jump_table;
+ struct jump_entry *iter_stop = __stop___jump_table;
+ struct jump_entry *iter;
+
+ mutex_lock(&jump_label_mutex);
+ ret = build_jump_label_hashtable(__start___jump_table,
+ __stop___jump_table);
+ iter = iter_start;
+ while (iter < iter_stop) {
+ arch_jump_label_text_poke_early(iter->code);
+ iter++;
+ }
+ mutex_unlock(&jump_label_mutex);
+ return ret;
+}
+early_initcall(init_jump_label);
+
+#ifdef CONFIG_MODULES
+
+static struct jump_label_module_entry *
+add_jump_label_module_entry(struct jump_label_entry *entry,
+ struct jump_entry *iter_begin,
+ int count, struct module *mod)
+{
+ struct jump_label_module_entry *e;
+
+ e = kmalloc(sizeof(struct jump_label_module_entry), GFP_KERNEL);
+ if (!e)
+ return ERR_PTR(-ENOMEM);
+ e->mod = mod;
+ e->nr_entries = count;
+ e->table = iter_begin;
+ hlist_add_head(&e->hlist, &entry->modules);
+ return e;
+}
+
+static int add_jump_label_module(struct module *mod)
+{
+ struct jump_entry *iter, *iter_begin;
+ struct jump_label_entry *entry;
+ struct jump_label_module_entry *module_entry;
+ int count;
+
+ /* if the module doesn't have jump label entries, just return */
+ if (!mod->num_jump_entries)
+ return 0;
+
+ sort_jump_label_entries(mod->jump_entries,
+ mod->jump_entries + mod->num_jump_entries);
+ iter = mod->jump_entries;
+ while (iter < mod->jump_entries + mod->num_jump_entries) {
+ entry = get_jump_label_entry(iter->key);
+ iter_begin = iter;
+ count = 0;
+ while ((iter < mod->jump_entries + mod->num_jump_entries) &&
+ (iter->key == iter_begin->key)) {
+ iter++;
+ count++;
+ }
+ if (!entry) {
+ entry = add_jump_label_entry(iter_begin->key, 0, NULL);
+ if (IS_ERR(entry))
+ return PTR_ERR(entry);
+ }
+ module_entry = add_jump_label_module_entry(entry, iter_begin,
+ count, mod);
+ if (IS_ERR(module_entry))
+ return PTR_ERR(module_entry);
+ }
+ return 0;
+}
+
+static void remove_jump_label_module(struct module *mod)
+{
+ struct hlist_head *head;
+ struct hlist_node *node, *node_next, *module_node, *module_node_next;
+ struct jump_label_entry *e;
+ struct jump_label_module_entry *e_module;
+ int i;
+
+ /* if the module doesn't have jump label entries, just return */
+ if (!mod->num_jump_entries)
+ return;
+
+ for (i = 0; i < JUMP_LABEL_TABLE_SIZE; i++) {
+ head = &jump_label_table[i];
+ hlist_for_each_entry_safe(e, node, node_next, head, hlist) {
+ hlist_for_each_entry_safe(e_module, module_node,
+ module_node_next,
+ &(e->modules), hlist) {
+ if (e_module->mod == mod) {
+ hlist_del(&e_module->hlist);
+ kfree(e_module);
+ }
+ }
+ if (hlist_empty(&e->modules) && (e->nr_entries == 0)) {
+ hlist_del(&e->hlist);
+ kfree(e);
+ }
+ }
+ }
+}
+
+static int
+jump_label_module_notify(struct notifier_block *self, unsigned long val,
+ void *data)
+{
+ struct module *mod = data;
+ int ret = 0;
+
+ switch (val) {
+ case MODULE_STATE_COMING:
+ mutex_lock(&jump_label_mutex);
+ ret = add_jump_label_module(mod);
+ if (ret)
+ remove_jump_label_module(mod);
+ mutex_unlock(&jump_label_mutex);
+ break;
+ case MODULE_STATE_GOING:
+ mutex_lock(&jump_label_mutex);
+ remove_jump_label_module(mod);
+ mutex_unlock(&jump_label_mutex);
+ break;
+ }
+ return ret;
+}
+
+/***
+ * apply_jump_label_nops - patch module jump labels with arch_get_jump_label_nop()
+ * @mod: module to patch
+ *
+ * Allow for run-time selection of the optimal nops. Before the module
+ * loads patch these with arch_get_jump_label_nop(), which is specified by
+ * the arch specific jump label code.
+ */
+void jump_label_apply_nops(struct module *mod)
+{
+ struct jump_entry *iter;
+
+ /* if the module doesn't have jump label entries, just return */
+ if (!mod->num_jump_entries)
+ return;
+
+ iter = mod->jump_entries;
+ while (iter < mod->jump_entries + mod->num_jump_entries) {
+ arch_jump_label_text_poke_early(iter->code);
+ iter++;
+ }
+}
+
+struct notifier_block jump_label_module_nb = {
+ .notifier_call = jump_label_module_notify,
+ .priority = 0,
+};
+
+static __init int init_jump_label_module(void)
+{
+ return register_module_notifier(&jump_label_module_nb);
+}
+early_initcall(init_jump_label_module);
+
+#endif /* CONFIG_MODULES */
+
+#endif
diff --git a/kernel/kfifo.c b/kernel/kfifo.c
index 6b5580c..01a0700 100644
--- a/kernel/kfifo.c
+++ b/kernel/kfifo.c
@@ -365,8 +365,6 @@
n = setup_sgl_buf(sgl, fifo->data + off, nents, l);
n += setup_sgl_buf(sgl + n, fifo->data, nents - n, len - l);
- if (n)
- sg_mark_end(sgl + n - 1);
return n;
}
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 282035f..ec4210c 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -47,6 +47,7 @@
#include <linux/memory.h>
#include <linux/ftrace.h>
#include <linux/cpu.h>
+#include <linux/jump_label.h>
#include <asm-generic/sections.h>
#include <asm/cacheflush.h>
@@ -399,7 +400,7 @@
* Return an optimized kprobe whose optimizing code replaces
* instructions including addr (exclude breakpoint).
*/
-struct kprobe *__kprobes get_optimized_kprobe(unsigned long addr)
+static struct kprobe *__kprobes get_optimized_kprobe(unsigned long addr)
{
int i;
struct kprobe *p = NULL;
@@ -831,6 +832,7 @@
void __kprobes kretprobe_hash_lock(struct task_struct *tsk,
struct hlist_head **head, unsigned long *flags)
+__acquires(hlist_lock)
{
unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
spinlock_t *hlist_lock;
@@ -842,6 +844,7 @@
static void __kprobes kretprobe_table_lock(unsigned long hash,
unsigned long *flags)
+__acquires(hlist_lock)
{
spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
spin_lock_irqsave(hlist_lock, *flags);
@@ -849,6 +852,7 @@
void __kprobes kretprobe_hash_unlock(struct task_struct *tsk,
unsigned long *flags)
+__releases(hlist_lock)
{
unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
spinlock_t *hlist_lock;
@@ -857,7 +861,9 @@
spin_unlock_irqrestore(hlist_lock, *flags);
}
-void __kprobes kretprobe_table_unlock(unsigned long hash, unsigned long *flags)
+static void __kprobes kretprobe_table_unlock(unsigned long hash,
+ unsigned long *flags)
+__releases(hlist_lock)
{
spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
spin_unlock_irqrestore(hlist_lock, *flags);
@@ -1141,7 +1147,8 @@
preempt_disable();
if (!kernel_text_address((unsigned long) p->addr) ||
in_kprobes_functions((unsigned long) p->addr) ||
- ftrace_text_reserved(p->addr, p->addr)) {
+ ftrace_text_reserved(p->addr, p->addr) ||
+ jump_label_text_reserved(p->addr, p->addr)) {
preempt_enable();
return -EINVAL;
}
@@ -1339,18 +1346,19 @@
if (num <= 0)
return -EINVAL;
for (i = 0; i < num; i++) {
- unsigned long addr;
+ unsigned long addr, offset;
jp = jps[i];
addr = arch_deref_entry_point(jp->entry);
- if (!kernel_text_address(addr))
- ret = -EINVAL;
- else {
- /* Todo: Verify probepoint is a function entry point */
+ /* Verify probepoint is a function entry point */
+ if (kallsyms_lookup_size_offset(addr, NULL, &offset) &&
+ offset == 0) {
jp->kp.pre_handler = setjmp_pre_handler;
jp->kp.break_handler = longjmp_break_handler;
ret = register_kprobe(&jp->kp);
- }
+ } else
+ ret = -EINVAL;
+
if (ret < 0) {
if (i > 0)
unregister_jprobes(jps, i);
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index f2852a5..42ba65d 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -639,6 +639,16 @@
}
#endif
+ if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
+ debug_locks_off();
+ printk(KERN_ERR
+ "BUG: looking up invalid subclass: %u\n", subclass);
+ printk(KERN_ERR
+ "turning off the locking correctness validator.\n");
+ dump_stack();
+ return NULL;
+ }
+
/*
* Static locks do not have their class-keys yet - for them the key
* is the lock object itself:
@@ -774,7 +784,9 @@
raw_local_irq_restore(flags);
if (!subclass || force)
- lock->class_cache = class;
+ lock->class_cache[0] = class;
+ else if (subclass < NR_LOCKDEP_CACHING_CLASSES)
+ lock->class_cache[subclass] = class;
if (DEBUG_LOCKS_WARN_ON(class->subclass != subclass))
return NULL;
@@ -2679,7 +2691,11 @@
void lockdep_init_map(struct lockdep_map *lock, const char *name,
struct lock_class_key *key, int subclass)
{
- lock->class_cache = NULL;
+ int i;
+
+ for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++)
+ lock->class_cache[i] = NULL;
+
#ifdef CONFIG_LOCK_STAT
lock->cpu = raw_smp_processor_id();
#endif
@@ -2739,21 +2755,13 @@
if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
return 0;
- if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
- debug_locks_off();
- printk("BUG: MAX_LOCKDEP_SUBCLASSES too low!\n");
- printk("turning off the locking correctness validator.\n");
- dump_stack();
- return 0;
- }
-
if (lock->key == &__lockdep_no_validate__)
check = 1;
- if (!subclass)
- class = lock->class_cache;
+ if (subclass < NR_LOCKDEP_CACHING_CLASSES)
+ class = lock->class_cache[subclass];
/*
- * Not cached yet or subclass?
+ * Not cached?
*/
if (unlikely(!class)) {
class = register_lock_class(lock, subclass, 0);
@@ -2918,7 +2926,7 @@
return 1;
if (hlock->references) {
- struct lock_class *class = lock->class_cache;
+ struct lock_class *class = lock->class_cache[0];
if (!class)
class = look_up_lock_class(lock, 0);
@@ -3559,7 +3567,12 @@
if (list_empty(head))
continue;
list_for_each_entry_safe(class, next, head, hash_entry) {
- if (unlikely(class == lock->class_cache)) {
+ int match = 0;
+
+ for (j = 0; j < NR_LOCKDEP_CACHING_CLASSES; j++)
+ match |= class == lock->class_cache[j];
+
+ if (unlikely(match)) {
if (debug_locks_off_graph_unlock())
WARN_ON(1);
goto out_restore;
@@ -3775,7 +3788,7 @@
* Careful: only use this function if you are sure that
* the task cannot run in parallel!
*/
-void __debug_show_held_locks(struct task_struct *task)
+void debug_show_held_locks(struct task_struct *task)
{
if (unlikely(!debug_locks)) {
printk("INFO: lockdep is turned off.\n");
@@ -3783,12 +3796,6 @@
}
lockdep_print_held_locks(task);
}
-EXPORT_SYMBOL_GPL(__debug_show_held_locks);
-
-void debug_show_held_locks(struct task_struct *task)
-{
- __debug_show_held_locks(task);
-}
EXPORT_SYMBOL_GPL(debug_show_held_locks);
void lockdep_sys_exit(void)
diff --git a/kernel/module.c b/kernel/module.c
index d0b5f8d..2df4630 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -55,6 +55,7 @@
#include <linux/async.h>
#include <linux/percpu.h>
#include <linux/kmemleak.h>
+#include <linux/jump_label.h>
#define CREATE_TRACE_POINTS
#include <trace/events/module.h>
@@ -1537,6 +1538,7 @@
{
struct module *mod = _mod;
list_del(&mod->list);
+ module_bug_cleanup(mod);
return 0;
}
@@ -2308,6 +2310,11 @@
sizeof(*mod->tracepoints),
&mod->num_tracepoints);
#endif
+#ifdef HAVE_JUMP_LABEL
+ mod->jump_entries = section_objs(info, "__jump_table",
+ sizeof(*mod->jump_entries),
+ &mod->num_jump_entries);
+#endif
#ifdef CONFIG_EVENT_TRACING
mod->trace_events = section_objs(info, "_ftrace_events",
sizeof(*mod->trace_events),
@@ -2625,6 +2632,7 @@
if (err < 0)
goto ddebug;
+ module_bug_finalize(info.hdr, info.sechdrs, mod);
list_add_rcu(&mod->list, &modules);
mutex_unlock(&module_mutex);
@@ -2650,6 +2658,8 @@
mutex_lock(&module_mutex);
/* Unlink carefully: kallsyms could be walking list. */
list_del_rcu(&mod->list);
+ module_bug_cleanup(mod);
+
ddebug:
if (!mod->taints)
dynamic_debug_remove(info.debug);
diff --git a/kernel/mutex.c b/kernel/mutex.c
index 4c0b7b3..200407c 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -36,15 +36,6 @@
# include <asm/mutex.h>
#endif
-/***
- * mutex_init - initialize the mutex
- * @lock: the mutex to be initialized
- * @key: the lock_class_key for the class; used by mutex lock debugging
- *
- * Initialize the mutex to unlocked state.
- *
- * It is not allowed to initialize an already locked mutex.
- */
void
__mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
{
@@ -68,7 +59,7 @@
static __used noinline void __sched
__mutex_lock_slowpath(atomic_t *lock_count);
-/***
+/**
* mutex_lock - acquire the mutex
* @lock: the mutex to be acquired
*
@@ -105,7 +96,7 @@
static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count);
-/***
+/**
* mutex_unlock - release the mutex
* @lock: the mutex to be released
*
@@ -364,8 +355,8 @@
static noinline int __sched
__mutex_lock_interruptible_slowpath(atomic_t *lock_count);
-/***
- * mutex_lock_interruptible - acquire the mutex, interruptable
+/**
+ * mutex_lock_interruptible - acquire the mutex, interruptible
* @lock: the mutex to be acquired
*
* Lock the mutex like mutex_lock(), and return 0 if the mutex has
@@ -456,15 +447,15 @@
return prev == 1;
}
-/***
- * mutex_trylock - try acquire the mutex, without waiting
+/**
+ * mutex_trylock - try to acquire the mutex, without waiting
* @lock: the mutex to be acquired
*
* Try to acquire the mutex atomically. Returns 1 if the mutex
* has been acquired successfully, and 0 on contention.
*
* NOTE: this function follows the spin_trylock() convention, so
- * it is negated to the down_trylock() return values! Be careful
+ * it is negated from the down_trylock() return values! Be careful
* about this when converting semaphore users to mutexes.
*
* This function must not be used in interrupt context. The
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 403d180..f309e80 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -31,24 +31,18 @@
#include <linux/kernel_stat.h>
#include <linux/perf_event.h>
#include <linux/ftrace_event.h>
-#include <linux/hw_breakpoint.h>
#include <asm/irq_regs.h>
-/*
- * Each CPU has a list of per CPU events:
- */
-static DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context);
-
-int perf_max_events __read_mostly = 1;
-static int perf_reserved_percpu __read_mostly;
-static int perf_overcommit __read_mostly = 1;
-
-static atomic_t nr_events __read_mostly;
+atomic_t perf_task_events __read_mostly;
static atomic_t nr_mmap_events __read_mostly;
static atomic_t nr_comm_events __read_mostly;
static atomic_t nr_task_events __read_mostly;
+static LIST_HEAD(pmus);
+static DEFINE_MUTEX(pmus_lock);
+static struct srcu_struct pmus_srcu;
+
/*
* perf event paranoia level:
* -1 - not paranoid at all
@@ -67,36 +61,43 @@
static atomic64_t perf_event_id;
-/*
- * Lock for (sysadmin-configurable) event reservations:
- */
-static DEFINE_SPINLOCK(perf_resource_lock);
-
-/*
- * Architecture provided APIs - weak aliases:
- */
-extern __weak const struct pmu *hw_perf_event_init(struct perf_event *event)
-{
- return NULL;
-}
-
-void __weak hw_perf_disable(void) { barrier(); }
-void __weak hw_perf_enable(void) { barrier(); }
-
void __weak perf_event_print_debug(void) { }
-static DEFINE_PER_CPU(int, perf_disable_count);
-
-void perf_disable(void)
+extern __weak const char *perf_pmu_name(void)
{
- if (!__get_cpu_var(perf_disable_count)++)
- hw_perf_disable();
+ return "pmu";
}
-void perf_enable(void)
+void perf_pmu_disable(struct pmu *pmu)
{
- if (!--__get_cpu_var(perf_disable_count))
- hw_perf_enable();
+ int *count = this_cpu_ptr(pmu->pmu_disable_count);
+ if (!(*count)++)
+ pmu->pmu_disable(pmu);
+}
+
+void perf_pmu_enable(struct pmu *pmu)
+{
+ int *count = this_cpu_ptr(pmu->pmu_disable_count);
+ if (!--(*count))
+ pmu->pmu_enable(pmu);
+}
+
+static DEFINE_PER_CPU(struct list_head, rotation_list);
+
+/*
+ * perf_pmu_rotate_start() and perf_rotate_context() are fully serialized
+ * because they're strictly cpu affine and rotate_start is called with IRQs
+ * disabled, while rotate_context is called from IRQ context.
+ */
+static void perf_pmu_rotate_start(struct pmu *pmu)
+{
+ struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
+ struct list_head *head = &__get_cpu_var(rotation_list);
+
+ WARN_ON(!irqs_disabled());
+
+ if (list_empty(&cpuctx->rotation_list))
+ list_add(&cpuctx->rotation_list, head);
}
static void get_ctx(struct perf_event_context *ctx)
@@ -151,13 +152,13 @@
* the context could get moved to another task.
*/
static struct perf_event_context *
-perf_lock_task_context(struct task_struct *task, unsigned long *flags)
+perf_lock_task_context(struct task_struct *task, int ctxn, unsigned long *flags)
{
struct perf_event_context *ctx;
rcu_read_lock();
- retry:
- ctx = rcu_dereference(task->perf_event_ctxp);
+retry:
+ ctx = rcu_dereference(task->perf_event_ctxp[ctxn]);
if (ctx) {
/*
* If this context is a clone of another, it might
@@ -170,7 +171,7 @@
* can't get swapped on us any more.
*/
raw_spin_lock_irqsave(&ctx->lock, *flags);
- if (ctx != rcu_dereference(task->perf_event_ctxp)) {
+ if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) {
raw_spin_unlock_irqrestore(&ctx->lock, *flags);
goto retry;
}
@@ -189,12 +190,13 @@
* can't get swapped to another task. This also increments its
* reference count so that the context can't get freed.
*/
-static struct perf_event_context *perf_pin_task_context(struct task_struct *task)
+static struct perf_event_context *
+perf_pin_task_context(struct task_struct *task, int ctxn)
{
struct perf_event_context *ctx;
unsigned long flags;
- ctx = perf_lock_task_context(task, &flags);
+ ctx = perf_lock_task_context(task, ctxn, &flags);
if (ctx) {
++ctx->pin_count;
raw_spin_unlock_irqrestore(&ctx->lock, flags);
@@ -302,6 +304,8 @@
}
list_add_rcu(&event->event_entry, &ctx->event_list);
+ if (!ctx->nr_events)
+ perf_pmu_rotate_start(ctx->pmu);
ctx->nr_events++;
if (event->attr.inherit_stat)
ctx->nr_stat++;
@@ -311,7 +315,12 @@
{
struct perf_event *group_leader = event->group_leader;
- WARN_ON_ONCE(event->attach_state & PERF_ATTACH_GROUP);
+ /*
+ * We can have double attach due to group movement in perf_event_open.
+ */
+ if (event->attach_state & PERF_ATTACH_GROUP)
+ return;
+
event->attach_state |= PERF_ATTACH_GROUP;
if (group_leader == event)
@@ -402,21 +411,40 @@
}
}
-static void
-event_sched_out(struct perf_event *event,
+static inline int
+event_filter_match(struct perf_event *event)
+{
+ return event->cpu == -1 || event->cpu == smp_processor_id();
+}
+
+static int
+__event_sched_out(struct perf_event *event,
struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx)
{
+ u64 delta;
+ /*
+ * An event which could not be activated because of
+ * filter mismatch still needs to have its timings
+ * maintained, otherwise bogus information is return
+ * via read() for time_enabled, time_running:
+ */
+ if (event->state == PERF_EVENT_STATE_INACTIVE
+ && !event_filter_match(event)) {
+ delta = ctx->time - event->tstamp_stopped;
+ event->tstamp_running += delta;
+ event->tstamp_stopped = ctx->time;
+ }
+
if (event->state != PERF_EVENT_STATE_ACTIVE)
- return;
+ return 0;
event->state = PERF_EVENT_STATE_INACTIVE;
if (event->pending_disable) {
event->pending_disable = 0;
event->state = PERF_EVENT_STATE_OFF;
}
- event->tstamp_stopped = ctx->time;
- event->pmu->disable(event);
+ event->pmu->del(event, 0);
event->oncpu = -1;
if (!is_software_event(event))
@@ -424,6 +452,19 @@
ctx->nr_active--;
if (event->attr.exclusive || !cpuctx->active_oncpu)
cpuctx->exclusive = 0;
+ return 1;
+}
+
+static void
+event_sched_out(struct perf_event *event,
+ struct perf_cpu_context *cpuctx,
+ struct perf_event_context *ctx)
+{
+ int ret;
+
+ ret = __event_sched_out(event, cpuctx, ctx);
+ if (ret)
+ event->tstamp_stopped = ctx->time;
}
static void
@@ -432,9 +473,7 @@
struct perf_event_context *ctx)
{
struct perf_event *event;
-
- if (group_event->state != PERF_EVENT_STATE_ACTIVE)
- return;
+ int state = group_event->state;
event_sched_out(group_event, cpuctx, ctx);
@@ -444,10 +483,16 @@
list_for_each_entry(event, &group_event->sibling_list, group_entry)
event_sched_out(event, cpuctx, ctx);
- if (group_event->attr.exclusive)
+ if (state == PERF_EVENT_STATE_ACTIVE && group_event->attr.exclusive)
cpuctx->exclusive = 0;
}
+static inline struct perf_cpu_context *
+__get_cpu_context(struct perf_event_context *ctx)
+{
+ return this_cpu_ptr(ctx->pmu->pmu_cpu_context);
+}
+
/*
* Cross CPU call to remove a performance event
*
@@ -456,9 +501,9 @@
*/
static void __perf_event_remove_from_context(void *info)
{
- struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
struct perf_event *event = info;
struct perf_event_context *ctx = event->ctx;
+ struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
/*
* If this is a task context, we need to check whether it is
@@ -469,27 +514,11 @@
return;
raw_spin_lock(&ctx->lock);
- /*
- * Protect the list operation against NMI by disabling the
- * events on a global level.
- */
- perf_disable();
event_sched_out(event, cpuctx, ctx);
list_del_event(event, ctx);
- if (!ctx->task) {
- /*
- * Allow more per task events with respect to the
- * reservation:
- */
- cpuctx->max_pertask =
- min(perf_max_events - ctx->nr_events,
- perf_max_events - perf_reserved_percpu);
- }
-
- perf_enable();
raw_spin_unlock(&ctx->lock);
}
@@ -554,8 +583,8 @@
static void __perf_event_disable(void *info)
{
struct perf_event *event = info;
- struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
struct perf_event_context *ctx = event->ctx;
+ struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
/*
* If this is a per-task event, need to check whether this
@@ -610,7 +639,7 @@
return;
}
- retry:
+retry:
task_oncpu_function_call(task, __perf_event_disable, event);
raw_spin_lock_irq(&ctx->lock);
@@ -635,7 +664,7 @@
}
static int
-event_sched_in(struct perf_event *event,
+__event_sched_in(struct perf_event *event,
struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx)
{
@@ -649,14 +678,12 @@
*/
smp_wmb();
- if (event->pmu->enable(event)) {
+ if (event->pmu->add(event, PERF_EF_START)) {
event->state = PERF_EVENT_STATE_INACTIVE;
event->oncpu = -1;
return -EAGAIN;
}
- event->tstamp_running += ctx->time - event->tstamp_stopped;
-
if (!is_software_event(event))
cpuctx->active_oncpu++;
ctx->nr_active++;
@@ -667,28 +694,56 @@
return 0;
}
+static inline int
+event_sched_in(struct perf_event *event,
+ struct perf_cpu_context *cpuctx,
+ struct perf_event_context *ctx)
+{
+ int ret = __event_sched_in(event, cpuctx, ctx);
+ if (ret)
+ return ret;
+ event->tstamp_running += ctx->time - event->tstamp_stopped;
+ return 0;
+}
+
+static void
+group_commit_event_sched_in(struct perf_event *group_event,
+ struct perf_cpu_context *cpuctx,
+ struct perf_event_context *ctx)
+{
+ struct perf_event *event;
+ u64 now = ctx->time;
+
+ group_event->tstamp_running += now - group_event->tstamp_stopped;
+ /*
+ * Schedule in siblings as one group (if any):
+ */
+ list_for_each_entry(event, &group_event->sibling_list, group_entry) {
+ event->tstamp_running += now - event->tstamp_stopped;
+ }
+}
+
static int
group_sched_in(struct perf_event *group_event,
struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx)
{
struct perf_event *event, *partial_group = NULL;
- const struct pmu *pmu = group_event->pmu;
- bool txn = false;
+ struct pmu *pmu = group_event->pmu;
if (group_event->state == PERF_EVENT_STATE_OFF)
return 0;
- /* Check if group transaction availabe */
- if (pmu->start_txn)
- txn = true;
+ pmu->start_txn(pmu);
- if (txn)
- pmu->start_txn(pmu);
-
- if (event_sched_in(group_event, cpuctx, ctx)) {
- if (txn)
- pmu->cancel_txn(pmu);
+ /*
+ * use __event_sched_in() to delay updating tstamp_running
+ * until the transaction is committed. In case of failure
+ * we will keep an unmodified tstamp_running which is a
+ * requirement to get correct timing information
+ */
+ if (__event_sched_in(group_event, cpuctx, ctx)) {
+ pmu->cancel_txn(pmu);
return -EAGAIN;
}
@@ -696,29 +751,33 @@
* Schedule in siblings as one group (if any):
*/
list_for_each_entry(event, &group_event->sibling_list, group_entry) {
- if (event_sched_in(event, cpuctx, ctx)) {
+ if (__event_sched_in(event, cpuctx, ctx)) {
partial_group = event;
goto group_error;
}
}
- if (!txn || !pmu->commit_txn(pmu))
+ if (!pmu->commit_txn(pmu)) {
+ /* commit tstamp_running */
+ group_commit_event_sched_in(group_event, cpuctx, ctx);
return 0;
-
+ }
group_error:
/*
* Groups can be scheduled in as one unit only, so undo any
* partial group before returning:
+ *
+ * use __event_sched_out() to avoid updating tstamp_stopped
+ * because the event never actually ran
*/
list_for_each_entry(event, &group_event->sibling_list, group_entry) {
if (event == partial_group)
break;
- event_sched_out(event, cpuctx, ctx);
+ __event_sched_out(event, cpuctx, ctx);
}
- event_sched_out(group_event, cpuctx, ctx);
+ __event_sched_out(group_event, cpuctx, ctx);
- if (txn)
- pmu->cancel_txn(pmu);
+ pmu->cancel_txn(pmu);
return -EAGAIN;
}
@@ -771,10 +830,10 @@
*/
static void __perf_install_in_context(void *info)
{
- struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
struct perf_event *event = info;
struct perf_event_context *ctx = event->ctx;
struct perf_event *leader = event->group_leader;
+ struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
int err;
/*
@@ -794,12 +853,6 @@
ctx->is_active = 1;
update_context_time(ctx);
- /*
- * Protect the list operation against NMI by disabling the
- * events on a global level. NOP for non NMI based events.
- */
- perf_disable();
-
add_event_to_ctx(event, ctx);
if (event->cpu != -1 && event->cpu != smp_processor_id())
@@ -837,12 +890,7 @@
}
}
- if (!err && !ctx->task && cpuctx->max_pertask)
- cpuctx->max_pertask--;
-
- unlock:
- perf_enable();
-
+unlock:
raw_spin_unlock(&ctx->lock);
}
@@ -865,6 +913,8 @@
{
struct task_struct *task = ctx->task;
+ event->ctx = ctx;
+
if (!task) {
/*
* Per cpu events are installed via an smp call and
@@ -913,10 +963,12 @@
event->state = PERF_EVENT_STATE_INACTIVE;
event->tstamp_enabled = ctx->time - event->total_time_enabled;
- list_for_each_entry(sub, &event->sibling_list, group_entry)
- if (sub->state >= PERF_EVENT_STATE_INACTIVE)
+ list_for_each_entry(sub, &event->sibling_list, group_entry) {
+ if (sub->state >= PERF_EVENT_STATE_INACTIVE) {
sub->tstamp_enabled =
ctx->time - sub->total_time_enabled;
+ }
+ }
}
/*
@@ -925,9 +977,9 @@
static void __perf_event_enable(void *info)
{
struct perf_event *event = info;
- struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
struct perf_event_context *ctx = event->ctx;
struct perf_event *leader = event->group_leader;
+ struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
int err;
/*
@@ -961,12 +1013,10 @@
if (!group_can_go_on(event, cpuctx, 1)) {
err = -EEXIST;
} else {
- perf_disable();
if (event == leader)
err = group_sched_in(event, cpuctx, ctx);
else
err = event_sched_in(event, cpuctx, ctx);
- perf_enable();
}
if (err) {
@@ -982,7 +1032,7 @@
}
}
- unlock:
+unlock:
raw_spin_unlock(&ctx->lock);
}
@@ -1023,7 +1073,7 @@
if (event->state == PERF_EVENT_STATE_ERROR)
event->state = PERF_EVENT_STATE_OFF;
- retry:
+retry:
raw_spin_unlock_irq(&ctx->lock);
task_oncpu_function_call(task, __perf_event_enable, event);
@@ -1043,7 +1093,7 @@
if (event->state == PERF_EVENT_STATE_OFF)
__perf_event_mark_enabled(event, ctx);
- out:
+out:
raw_spin_unlock_irq(&ctx->lock);
}
@@ -1074,26 +1124,26 @@
struct perf_event *event;
raw_spin_lock(&ctx->lock);
+ perf_pmu_disable(ctx->pmu);
ctx->is_active = 0;
if (likely(!ctx->nr_events))
goto out;
update_context_time(ctx);
- perf_disable();
if (!ctx->nr_active)
- goto out_enable;
+ goto out;
- if (event_type & EVENT_PINNED)
+ if (event_type & EVENT_PINNED) {
list_for_each_entry(event, &ctx->pinned_groups, group_entry)
group_sched_out(event, cpuctx, ctx);
+ }
- if (event_type & EVENT_FLEXIBLE)
+ if (event_type & EVENT_FLEXIBLE) {
list_for_each_entry(event, &ctx->flexible_groups, group_entry)
group_sched_out(event, cpuctx, ctx);
-
- out_enable:
- perf_enable();
- out:
+ }
+out:
+ perf_pmu_enable(ctx->pmu);
raw_spin_unlock(&ctx->lock);
}
@@ -1191,34 +1241,25 @@
}
}
-/*
- * Called from scheduler to remove the events of the current task,
- * with interrupts disabled.
- *
- * We stop each event and update the event value in event->count.
- *
- * This does not protect us against NMI, but disable()
- * sets the disabled bit in the control field of event _before_
- * accessing the event control register. If a NMI hits, then it will
- * not restart the event.
- */
-void perf_event_task_sched_out(struct task_struct *task,
- struct task_struct *next)
+void perf_event_context_sched_out(struct task_struct *task, int ctxn,
+ struct task_struct *next)
{
- struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
- struct perf_event_context *ctx = task->perf_event_ctxp;
+ struct perf_event_context *ctx = task->perf_event_ctxp[ctxn];
struct perf_event_context *next_ctx;
struct perf_event_context *parent;
+ struct perf_cpu_context *cpuctx;
int do_switch = 1;
- perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0);
+ if (likely(!ctx))
+ return;
- if (likely(!ctx || !cpuctx->task_ctx))
+ cpuctx = __get_cpu_context(ctx);
+ if (!cpuctx->task_ctx)
return;
rcu_read_lock();
parent = rcu_dereference(ctx->parent_ctx);
- next_ctx = next->perf_event_ctxp;
+ next_ctx = next->perf_event_ctxp[ctxn];
if (parent && next_ctx &&
rcu_dereference(next_ctx->parent_ctx) == parent) {
/*
@@ -1237,8 +1278,8 @@
* XXX do we need a memory barrier of sorts
* wrt to rcu_dereference() of perf_event_ctxp
*/
- task->perf_event_ctxp = next_ctx;
- next->perf_event_ctxp = ctx;
+ task->perf_event_ctxp[ctxn] = next_ctx;
+ next->perf_event_ctxp[ctxn] = ctx;
ctx->task = next;
next_ctx->task = task;
do_switch = 0;
@@ -1256,10 +1297,35 @@
}
}
+#define for_each_task_context_nr(ctxn) \
+ for ((ctxn) = 0; (ctxn) < perf_nr_task_contexts; (ctxn)++)
+
+/*
+ * Called from scheduler to remove the events of the current task,
+ * with interrupts disabled.
+ *
+ * We stop each event and update the event value in event->count.
+ *
+ * This does not protect us against NMI, but disable()
+ * sets the disabled bit in the control field of event _before_
+ * accessing the event control register. If a NMI hits, then it will
+ * not restart the event.
+ */
+void __perf_event_task_sched_out(struct task_struct *task,
+ struct task_struct *next)
+{
+ int ctxn;
+
+ perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0);
+
+ for_each_task_context_nr(ctxn)
+ perf_event_context_sched_out(task, ctxn, next);
+}
+
static void task_ctx_sched_out(struct perf_event_context *ctx,
enum event_type_t event_type)
{
- struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
+ struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
if (!cpuctx->task_ctx)
return;
@@ -1274,14 +1340,6 @@
/*
* Called with IRQs disabled
*/
-static void __perf_event_task_sched_out(struct perf_event_context *ctx)
-{
- task_ctx_sched_out(ctx, EVENT_ALL);
-}
-
-/*
- * Called with IRQs disabled
- */
static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
enum event_type_t event_type)
{
@@ -1332,9 +1390,10 @@
if (event->cpu != -1 && event->cpu != smp_processor_id())
continue;
- if (group_can_go_on(event, cpuctx, can_add_hw))
+ if (group_can_go_on(event, cpuctx, can_add_hw)) {
if (group_sched_in(event, cpuctx, ctx))
can_add_hw = 0;
+ }
}
}
@@ -1350,8 +1409,6 @@
ctx->timestamp = perf_clock();
- perf_disable();
-
/*
* First go through the list and put on any pinned groups
* in order to give them the best chance of going on.
@@ -1363,8 +1420,7 @@
if (event_type & EVENT_FLEXIBLE)
ctx_flexible_sched_in(ctx, cpuctx);
- perf_enable();
- out:
+out:
raw_spin_unlock(&ctx->lock);
}
@@ -1376,43 +1432,28 @@
ctx_sched_in(ctx, cpuctx, event_type);
}
-static void task_ctx_sched_in(struct task_struct *task,
+static void task_ctx_sched_in(struct perf_event_context *ctx,
enum event_type_t event_type)
{
- struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
- struct perf_event_context *ctx = task->perf_event_ctxp;
+ struct perf_cpu_context *cpuctx;
- if (likely(!ctx))
- return;
+ cpuctx = __get_cpu_context(ctx);
if (cpuctx->task_ctx == ctx)
return;
+
ctx_sched_in(ctx, cpuctx, event_type);
cpuctx->task_ctx = ctx;
}
-/*
- * Called from scheduler to add the events of the current task
- * with interrupts disabled.
- *
- * We restore the event value and then enable it.
- *
- * This does not protect us against NMI, but enable()
- * sets the enabled bit in the control field of event _before_
- * accessing the event control register. If a NMI hits, then it will
- * keep the event running.
- */
-void perf_event_task_sched_in(struct task_struct *task)
+
+void perf_event_context_sched_in(struct perf_event_context *ctx)
{
- struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
- struct perf_event_context *ctx = task->perf_event_ctxp;
+ struct perf_cpu_context *cpuctx;
- if (likely(!ctx))
- return;
-
+ cpuctx = __get_cpu_context(ctx);
if (cpuctx->task_ctx == ctx)
return;
- perf_disable();
-
+ perf_pmu_disable(ctx->pmu);
/*
* We want to keep the following priority order:
* cpu pinned (that don't need to move), task pinned,
@@ -1426,7 +1467,37 @@
cpuctx->task_ctx = ctx;
- perf_enable();
+ /*
+ * Since these rotations are per-cpu, we need to ensure the
+ * cpu-context we got scheduled on is actually rotating.
+ */
+ perf_pmu_rotate_start(ctx->pmu);
+ perf_pmu_enable(ctx->pmu);
+}
+
+/*
+ * Called from scheduler to add the events of the current task
+ * with interrupts disabled.
+ *
+ * We restore the event value and then enable it.
+ *
+ * This does not protect us against NMI, but enable()
+ * sets the enabled bit in the control field of event _before_
+ * accessing the event control register. If a NMI hits, then it will
+ * keep the event running.
+ */
+void __perf_event_task_sched_in(struct task_struct *task)
+{
+ struct perf_event_context *ctx;
+ int ctxn;
+
+ for_each_task_context_nr(ctxn) {
+ ctx = task->perf_event_ctxp[ctxn];
+ if (likely(!ctx))
+ continue;
+
+ perf_event_context_sched_in(ctx);
+ }
}
#define MAX_INTERRUPTS (~0ULL)
@@ -1506,22 +1577,6 @@
return div64_u64(dividend, divisor);
}
-static void perf_event_stop(struct perf_event *event)
-{
- if (!event->pmu->stop)
- return event->pmu->disable(event);
-
- return event->pmu->stop(event);
-}
-
-static int perf_event_start(struct perf_event *event)
-{
- if (!event->pmu->start)
- return event->pmu->enable(event);
-
- return event->pmu->start(event);
-}
-
static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count)
{
struct hw_perf_event *hwc = &event->hw;
@@ -1541,15 +1596,13 @@
hwc->sample_period = sample_period;
if (local64_read(&hwc->period_left) > 8*sample_period) {
- perf_disable();
- perf_event_stop(event);
+ event->pmu->stop(event, PERF_EF_UPDATE);
local64_set(&hwc->period_left, 0);
- perf_event_start(event);
- perf_enable();
+ event->pmu->start(event, PERF_EF_RELOAD);
}
}
-static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
+static void perf_ctx_adjust_freq(struct perf_event_context *ctx, u64 period)
{
struct perf_event *event;
struct hw_perf_event *hwc;
@@ -1574,23 +1627,19 @@
*/
if (interrupts == MAX_INTERRUPTS) {
perf_log_throttle(event, 1);
- perf_disable();
- event->pmu->unthrottle(event);
- perf_enable();
+ event->pmu->start(event, 0);
}
if (!event->attr.freq || !event->attr.sample_freq)
continue;
- perf_disable();
event->pmu->read(event);
now = local64_read(&event->count);
delta = now - hwc->freq_count_stamp;
hwc->freq_count_stamp = now;
if (delta > 0)
- perf_adjust_period(event, TICK_NSEC, delta);
- perf_enable();
+ perf_adjust_period(event, period, delta);
}
raw_spin_unlock(&ctx->lock);
}
@@ -1608,32 +1657,38 @@
raw_spin_unlock(&ctx->lock);
}
-void perf_event_task_tick(struct task_struct *curr)
+/*
+ * perf_pmu_rotate_start() and perf_rotate_context() are fully serialized
+ * because they're strictly cpu affine and rotate_start is called with IRQs
+ * disabled, while rotate_context is called from IRQ context.
+ */
+static void perf_rotate_context(struct perf_cpu_context *cpuctx)
{
- struct perf_cpu_context *cpuctx;
- struct perf_event_context *ctx;
- int rotate = 0;
+ u64 interval = (u64)cpuctx->jiffies_interval * TICK_NSEC;
+ struct perf_event_context *ctx = NULL;
+ int rotate = 0, remove = 1;
- if (!atomic_read(&nr_events))
- return;
+ if (cpuctx->ctx.nr_events) {
+ remove = 0;
+ if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
+ rotate = 1;
+ }
- cpuctx = &__get_cpu_var(perf_cpu_context);
- if (cpuctx->ctx.nr_events &&
- cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
- rotate = 1;
+ ctx = cpuctx->task_ctx;
+ if (ctx && ctx->nr_events) {
+ remove = 0;
+ if (ctx->nr_events != ctx->nr_active)
+ rotate = 1;
+ }
- ctx = curr->perf_event_ctxp;
- if (ctx && ctx->nr_events && ctx->nr_events != ctx->nr_active)
- rotate = 1;
-
- perf_ctx_adjust_freq(&cpuctx->ctx);
+ perf_pmu_disable(cpuctx->ctx.pmu);
+ perf_ctx_adjust_freq(&cpuctx->ctx, interval);
if (ctx)
- perf_ctx_adjust_freq(ctx);
+ perf_ctx_adjust_freq(ctx, interval);
if (!rotate)
- return;
+ goto done;
- perf_disable();
cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
if (ctx)
task_ctx_sched_out(ctx, EVENT_FLEXIBLE);
@@ -1644,8 +1699,27 @@
cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE);
if (ctx)
- task_ctx_sched_in(curr, EVENT_FLEXIBLE);
- perf_enable();
+ task_ctx_sched_in(ctx, EVENT_FLEXIBLE);
+
+done:
+ if (remove)
+ list_del_init(&cpuctx->rotation_list);
+
+ perf_pmu_enable(cpuctx->ctx.pmu);
+}
+
+void perf_event_task_tick(void)
+{
+ struct list_head *head = &__get_cpu_var(rotation_list);
+ struct perf_cpu_context *cpuctx, *tmp;
+
+ WARN_ON(!irqs_disabled());
+
+ list_for_each_entry_safe(cpuctx, tmp, head, rotation_list) {
+ if (cpuctx->jiffies_interval == 1 ||
+ !(jiffies % cpuctx->jiffies_interval))
+ perf_rotate_context(cpuctx);
+ }
}
static int event_enable_on_exec(struct perf_event *event,
@@ -1667,20 +1741,18 @@
* Enable all of a task's events that have been marked enable-on-exec.
* This expects task == current.
*/
-static void perf_event_enable_on_exec(struct task_struct *task)
+static void perf_event_enable_on_exec(struct perf_event_context *ctx)
{
- struct perf_event_context *ctx;
struct perf_event *event;
unsigned long flags;
int enabled = 0;
int ret;
local_irq_save(flags);
- ctx = task->perf_event_ctxp;
if (!ctx || !ctx->nr_events)
goto out;
- __perf_event_task_sched_out(ctx);
+ task_ctx_sched_out(ctx, EVENT_ALL);
raw_spin_lock(&ctx->lock);
@@ -1704,8 +1776,8 @@
raw_spin_unlock(&ctx->lock);
- perf_event_task_sched_in(task);
- out:
+ perf_event_context_sched_in(ctx);
+out:
local_irq_restore(flags);
}
@@ -1714,9 +1786,9 @@
*/
static void __perf_event_read(void *info)
{
- struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
struct perf_event *event = info;
struct perf_event_context *ctx = event->ctx;
+ struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
/*
* If this is a task context, we need to check whether it is
@@ -1755,7 +1827,13 @@
unsigned long flags;
raw_spin_lock_irqsave(&ctx->lock, flags);
- update_context_time(ctx);
+ /*
+ * may read while context is not active
+ * (e.g., thread is blocked), in that case
+ * we cannot update context time
+ */
+ if (ctx->is_active)
+ update_context_time(ctx);
update_event_times(event);
raw_spin_unlock_irqrestore(&ctx->lock, flags);
}
@@ -1764,11 +1842,219 @@
}
/*
+ * Callchain support
+ */
+
+struct callchain_cpus_entries {
+ struct rcu_head rcu_head;
+ struct perf_callchain_entry *cpu_entries[0];
+};
+
+static DEFINE_PER_CPU(int, callchain_recursion[PERF_NR_CONTEXTS]);
+static atomic_t nr_callchain_events;
+static DEFINE_MUTEX(callchain_mutex);
+struct callchain_cpus_entries *callchain_cpus_entries;
+
+
+__weak void perf_callchain_kernel(struct perf_callchain_entry *entry,
+ struct pt_regs *regs)
+{
+}
+
+__weak void perf_callchain_user(struct perf_callchain_entry *entry,
+ struct pt_regs *regs)
+{
+}
+
+static void release_callchain_buffers_rcu(struct rcu_head *head)
+{
+ struct callchain_cpus_entries *entries;
+ int cpu;
+
+ entries = container_of(head, struct callchain_cpus_entries, rcu_head);
+
+ for_each_possible_cpu(cpu)
+ kfree(entries->cpu_entries[cpu]);
+
+ kfree(entries);
+}
+
+static void release_callchain_buffers(void)
+{
+ struct callchain_cpus_entries *entries;
+
+ entries = callchain_cpus_entries;
+ rcu_assign_pointer(callchain_cpus_entries, NULL);
+ call_rcu(&entries->rcu_head, release_callchain_buffers_rcu);
+}
+
+static int alloc_callchain_buffers(void)
+{
+ int cpu;
+ int size;
+ struct callchain_cpus_entries *entries;
+
+ /*
+ * We can't use the percpu allocation API for data that can be
+ * accessed from NMI. Use a temporary manual per cpu allocation
+ * until that gets sorted out.
+ */
+ size = sizeof(*entries) + sizeof(struct perf_callchain_entry *) *
+ num_possible_cpus();
+
+ entries = kzalloc(size, GFP_KERNEL);
+ if (!entries)
+ return -ENOMEM;
+
+ size = sizeof(struct perf_callchain_entry) * PERF_NR_CONTEXTS;
+
+ for_each_possible_cpu(cpu) {
+ entries->cpu_entries[cpu] = kmalloc_node(size, GFP_KERNEL,
+ cpu_to_node(cpu));
+ if (!entries->cpu_entries[cpu])
+ goto fail;
+ }
+
+ rcu_assign_pointer(callchain_cpus_entries, entries);
+
+ return 0;
+
+fail:
+ for_each_possible_cpu(cpu)
+ kfree(entries->cpu_entries[cpu]);
+ kfree(entries);
+
+ return -ENOMEM;
+}
+
+static int get_callchain_buffers(void)
+{
+ int err = 0;
+ int count;
+
+ mutex_lock(&callchain_mutex);
+
+ count = atomic_inc_return(&nr_callchain_events);
+ if (WARN_ON_ONCE(count < 1)) {
+ err = -EINVAL;
+ goto exit;
+ }
+
+ if (count > 1) {
+ /* If the allocation failed, give up */
+ if (!callchain_cpus_entries)
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ err = alloc_callchain_buffers();
+ if (err)
+ release_callchain_buffers();
+exit:
+ mutex_unlock(&callchain_mutex);
+
+ return err;
+}
+
+static void put_callchain_buffers(void)
+{
+ if (atomic_dec_and_mutex_lock(&nr_callchain_events, &callchain_mutex)) {
+ release_callchain_buffers();
+ mutex_unlock(&callchain_mutex);
+ }
+}
+
+static int get_recursion_context(int *recursion)
+{
+ int rctx;
+
+ if (in_nmi())
+ rctx = 3;
+ else if (in_irq())
+ rctx = 2;
+ else if (in_softirq())
+ rctx = 1;
+ else
+ rctx = 0;
+
+ if (recursion[rctx])
+ return -1;
+
+ recursion[rctx]++;
+ barrier();
+
+ return rctx;
+}
+
+static inline void put_recursion_context(int *recursion, int rctx)
+{
+ barrier();
+ recursion[rctx]--;
+}
+
+static struct perf_callchain_entry *get_callchain_entry(int *rctx)
+{
+ int cpu;
+ struct callchain_cpus_entries *entries;
+
+ *rctx = get_recursion_context(__get_cpu_var(callchain_recursion));
+ if (*rctx == -1)
+ return NULL;
+
+ entries = rcu_dereference(callchain_cpus_entries);
+ if (!entries)
+ return NULL;
+
+ cpu = smp_processor_id();
+
+ return &entries->cpu_entries[cpu][*rctx];
+}
+
+static void
+put_callchain_entry(int rctx)
+{
+ put_recursion_context(__get_cpu_var(callchain_recursion), rctx);
+}
+
+static struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
+{
+ int rctx;
+ struct perf_callchain_entry *entry;
+
+
+ entry = get_callchain_entry(&rctx);
+ if (rctx == -1)
+ return NULL;
+
+ if (!entry)
+ goto exit_put;
+
+ entry->nr = 0;
+
+ if (!user_mode(regs)) {
+ perf_callchain_store(entry, PERF_CONTEXT_KERNEL);
+ perf_callchain_kernel(entry, regs);
+ if (current->mm)
+ regs = task_pt_regs(current);
+ else
+ regs = NULL;
+ }
+
+ if (regs) {
+ perf_callchain_store(entry, PERF_CONTEXT_USER);
+ perf_callchain_user(entry, regs);
+ }
+
+exit_put:
+ put_callchain_entry(rctx);
+
+ return entry;
+}
+
+/*
* Initialize the perf_event context in a task_struct:
*/
-static void
-__perf_event_init_context(struct perf_event_context *ctx,
- struct task_struct *task)
+static void __perf_event_init_context(struct perf_event_context *ctx)
{
raw_spin_lock_init(&ctx->lock);
mutex_init(&ctx->mutex);
@@ -1776,45 +2062,38 @@
INIT_LIST_HEAD(&ctx->flexible_groups);
INIT_LIST_HEAD(&ctx->event_list);
atomic_set(&ctx->refcount, 1);
- ctx->task = task;
}
-static struct perf_event_context *find_get_context(pid_t pid, int cpu)
+static struct perf_event_context *
+alloc_perf_context(struct pmu *pmu, struct task_struct *task)
{
struct perf_event_context *ctx;
- struct perf_cpu_context *cpuctx;
+
+ ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL);
+ if (!ctx)
+ return NULL;
+
+ __perf_event_init_context(ctx);
+ if (task) {
+ ctx->task = task;
+ get_task_struct(task);
+ }
+ ctx->pmu = pmu;
+
+ return ctx;
+}
+
+static struct task_struct *
+find_lively_task_by_vpid(pid_t vpid)
+{
struct task_struct *task;
- unsigned long flags;
int err;
- if (pid == -1 && cpu != -1) {
- /* Must be root to operate on a CPU event: */
- if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
- return ERR_PTR(-EACCES);
-
- if (cpu < 0 || cpu >= nr_cpumask_bits)
- return ERR_PTR(-EINVAL);
-
- /*
- * We could be clever and allow to attach a event to an
- * offline CPU and activate it when the CPU comes up, but
- * that's for later.
- */
- if (!cpu_online(cpu))
- return ERR_PTR(-ENODEV);
-
- cpuctx = &per_cpu(perf_cpu_context, cpu);
- ctx = &cpuctx->ctx;
- get_ctx(ctx);
-
- return ctx;
- }
-
rcu_read_lock();
- if (!pid)
+ if (!vpid)
task = current;
else
- task = find_task_by_vpid(pid);
+ task = find_task_by_vpid(vpid);
if (task)
get_task_struct(task);
rcu_read_unlock();
@@ -1834,36 +2113,78 @@
if (!ptrace_may_access(task, PTRACE_MODE_READ))
goto errout;
- retry:
- ctx = perf_lock_task_context(task, &flags);
+ return task;
+errout:
+ put_task_struct(task);
+ return ERR_PTR(err);
+
+}
+
+static struct perf_event_context *
+find_get_context(struct pmu *pmu, struct task_struct *task, int cpu)
+{
+ struct perf_event_context *ctx;
+ struct perf_cpu_context *cpuctx;
+ unsigned long flags;
+ int ctxn, err;
+
+ if (!task && cpu != -1) {
+ /* Must be root to operate on a CPU event: */
+ if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
+ return ERR_PTR(-EACCES);
+
+ if (cpu < 0 || cpu >= nr_cpumask_bits)
+ return ERR_PTR(-EINVAL);
+
+ /*
+ * We could be clever and allow to attach a event to an
+ * offline CPU and activate it when the CPU comes up, but
+ * that's for later.
+ */
+ if (!cpu_online(cpu))
+ return ERR_PTR(-ENODEV);
+
+ cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
+ ctx = &cpuctx->ctx;
+ get_ctx(ctx);
+
+ return ctx;
+ }
+
+ err = -EINVAL;
+ ctxn = pmu->task_ctx_nr;
+ if (ctxn < 0)
+ goto errout;
+
+retry:
+ ctx = perf_lock_task_context(task, ctxn, &flags);
if (ctx) {
unclone_ctx(ctx);
raw_spin_unlock_irqrestore(&ctx->lock, flags);
}
if (!ctx) {
- ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL);
+ ctx = alloc_perf_context(pmu, task);
err = -ENOMEM;
if (!ctx)
goto errout;
- __perf_event_init_context(ctx, task);
+
get_ctx(ctx);
- if (cmpxchg(&task->perf_event_ctxp, NULL, ctx)) {
+
+ if (cmpxchg(&task->perf_event_ctxp[ctxn], NULL, ctx)) {
/*
* We raced with some other task; use
* the context they set.
*/
+ put_task_struct(task);
kfree(ctx);
goto retry;
}
- get_task_struct(task);
}
- put_task_struct(task);
return ctx;
- errout:
- put_task_struct(task);
+errout:
return ERR_PTR(err);
}
@@ -1880,21 +2201,23 @@
kfree(event);
}
-static void perf_pending_sync(struct perf_event *event);
static void perf_buffer_put(struct perf_buffer *buffer);
static void free_event(struct perf_event *event)
{
- perf_pending_sync(event);
+ irq_work_sync(&event->pending);
if (!event->parent) {
- atomic_dec(&nr_events);
+ if (event->attach_state & PERF_ATTACH_TASK)
+ jump_label_dec(&perf_task_events);
if (event->attr.mmap || event->attr.mmap_data)
atomic_dec(&nr_mmap_events);
if (event->attr.comm)
atomic_dec(&nr_comm_events);
if (event->attr.task)
atomic_dec(&nr_task_events);
+ if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
+ put_callchain_buffers();
}
if (event->buffer) {
@@ -1905,7 +2228,9 @@
if (event->destroy)
event->destroy(event);
- put_ctx(event->ctx);
+ if (event->ctx)
+ put_ctx(event->ctx);
+
call_rcu(&event->rcu_head, free_event_rcu);
}
@@ -2184,15 +2509,13 @@
static int perf_event_period(struct perf_event *event, u64 __user *arg)
{
struct perf_event_context *ctx = event->ctx;
- unsigned long size;
int ret = 0;
u64 value;
if (!event->attr.sample_period)
return -EINVAL;
- size = copy_from_user(&value, arg, sizeof(value));
- if (size != sizeof(value))
+ if (copy_from_user(&value, arg, sizeof(value)))
return -EFAULT;
if (!value)
@@ -2326,6 +2649,9 @@
static int perf_event_index(struct perf_event *event)
{
+ if (event->hw.state & PERF_HES_STOPPED)
+ return 0;
+
if (event->state != PERF_EVENT_STATE_ACTIVE)
return 0;
@@ -2829,16 +3155,7 @@
}
}
-/*
- * Pending wakeups
- *
- * Handle the case where we need to wakeup up from NMI (or rq->lock) context.
- *
- * The NMI bit means we cannot possibly take locks. Therefore, maintain a
- * single linked list and use cmpxchg() to add entries lockless.
- */
-
-static void perf_pending_event(struct perf_pending_entry *entry)
+static void perf_pending_event(struct irq_work *entry)
{
struct perf_event *event = container_of(entry,
struct perf_event, pending);
@@ -2854,99 +3171,6 @@
}
}
-#define PENDING_TAIL ((struct perf_pending_entry *)-1UL)
-
-static DEFINE_PER_CPU(struct perf_pending_entry *, perf_pending_head) = {
- PENDING_TAIL,
-};
-
-static void perf_pending_queue(struct perf_pending_entry *entry,
- void (*func)(struct perf_pending_entry *))
-{
- struct perf_pending_entry **head;
-
- if (cmpxchg(&entry->next, NULL, PENDING_TAIL) != NULL)
- return;
-
- entry->func = func;
-
- head = &get_cpu_var(perf_pending_head);
-
- do {
- entry->next = *head;
- } while (cmpxchg(head, entry->next, entry) != entry->next);
-
- set_perf_event_pending();
-
- put_cpu_var(perf_pending_head);
-}
-
-static int __perf_pending_run(void)
-{
- struct perf_pending_entry *list;
- int nr = 0;
-
- list = xchg(&__get_cpu_var(perf_pending_head), PENDING_TAIL);
- while (list != PENDING_TAIL) {
- void (*func)(struct perf_pending_entry *);
- struct perf_pending_entry *entry = list;
-
- list = list->next;
-
- func = entry->func;
- entry->next = NULL;
- /*
- * Ensure we observe the unqueue before we issue the wakeup,
- * so that we won't be waiting forever.
- * -- see perf_not_pending().
- */
- smp_wmb();
-
- func(entry);
- nr++;
- }
-
- return nr;
-}
-
-static inline int perf_not_pending(struct perf_event *event)
-{
- /*
- * If we flush on whatever cpu we run, there is a chance we don't
- * need to wait.
- */
- get_cpu();
- __perf_pending_run();
- put_cpu();
-
- /*
- * Ensure we see the proper queue state before going to sleep
- * so that we do not miss the wakeup. -- see perf_pending_handle()
- */
- smp_rmb();
- return event->pending.next == NULL;
-}
-
-static void perf_pending_sync(struct perf_event *event)
-{
- wait_event(event->waitq, perf_not_pending(event));
-}
-
-void perf_event_do_pending(void)
-{
- __perf_pending_run();
-}
-
-/*
- * Callchain support -- arch specific
- */
-
-__weak struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
-{
- return NULL;
-}
-
-
/*
* We assume there is only KVM supporting the callbacks.
* Later on, we might change it to a list if there is
@@ -2996,8 +3220,7 @@
if (handle->nmi) {
handle->event->pending_wakeup = 1;
- perf_pending_queue(&handle->event->pending,
- perf_pending_event);
+ irq_work_queue(&handle->event->pending);
} else
perf_event_wakeup(handle->event);
}
@@ -3053,7 +3276,7 @@
if (handle->wakeup != local_read(&buffer->wakeup))
perf_output_wakeup(handle);
- out:
+out:
preempt_enable();
}
@@ -3441,14 +3664,20 @@
struct perf_output_handle handle;
struct perf_event_header header;
+ /* protect the callchain buffers */
+ rcu_read_lock();
+
perf_prepare_sample(&header, data, event, regs);
if (perf_output_begin(&handle, event, header.size, nmi, 1))
- return;
+ goto exit;
perf_output_sample(&handle, &header, data, event);
perf_output_end(&handle);
+
+exit:
+ rcu_read_unlock();
}
/*
@@ -3562,16 +3791,27 @@
static void perf_event_task_event(struct perf_task_event *task_event)
{
struct perf_cpu_context *cpuctx;
- struct perf_event_context *ctx = task_event->task_ctx;
+ struct perf_event_context *ctx;
+ struct pmu *pmu;
+ int ctxn;
rcu_read_lock();
- cpuctx = &get_cpu_var(perf_cpu_context);
- perf_event_task_ctx(&cpuctx->ctx, task_event);
- if (!ctx)
- ctx = rcu_dereference(current->perf_event_ctxp);
- if (ctx)
- perf_event_task_ctx(ctx, task_event);
- put_cpu_var(perf_cpu_context);
+ list_for_each_entry_rcu(pmu, &pmus, entry) {
+ cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
+ perf_event_task_ctx(&cpuctx->ctx, task_event);
+
+ ctx = task_event->task_ctx;
+ if (!ctx) {
+ ctxn = pmu->task_ctx_nr;
+ if (ctxn < 0)
+ goto next;
+ ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
+ }
+ if (ctx)
+ perf_event_task_ctx(ctx, task_event);
+next:
+ put_cpu_ptr(pmu->pmu_cpu_context);
+ }
rcu_read_unlock();
}
@@ -3676,8 +3916,10 @@
{
struct perf_cpu_context *cpuctx;
struct perf_event_context *ctx;
- unsigned int size;
char comm[TASK_COMM_LEN];
+ unsigned int size;
+ struct pmu *pmu;
+ int ctxn;
memset(comm, 0, sizeof(comm));
strlcpy(comm, comm_event->task->comm, sizeof(comm));
@@ -3689,21 +3931,36 @@
comm_event->event_id.header.size = sizeof(comm_event->event_id) + size;
rcu_read_lock();
- cpuctx = &get_cpu_var(perf_cpu_context);
- perf_event_comm_ctx(&cpuctx->ctx, comm_event);
- ctx = rcu_dereference(current->perf_event_ctxp);
- if (ctx)
- perf_event_comm_ctx(ctx, comm_event);
- put_cpu_var(perf_cpu_context);
+ list_for_each_entry_rcu(pmu, &pmus, entry) {
+ cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
+ perf_event_comm_ctx(&cpuctx->ctx, comm_event);
+
+ ctxn = pmu->task_ctx_nr;
+ if (ctxn < 0)
+ goto next;
+
+ ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
+ if (ctx)
+ perf_event_comm_ctx(ctx, comm_event);
+next:
+ put_cpu_ptr(pmu->pmu_cpu_context);
+ }
rcu_read_unlock();
}
void perf_event_comm(struct task_struct *task)
{
struct perf_comm_event comm_event;
+ struct perf_event_context *ctx;
+ int ctxn;
- if (task->perf_event_ctxp)
- perf_event_enable_on_exec(task);
+ for_each_task_context_nr(ctxn) {
+ ctx = task->perf_event_ctxp[ctxn];
+ if (!ctx)
+ continue;
+
+ perf_event_enable_on_exec(ctx);
+ }
if (!atomic_read(&nr_comm_events))
return;
@@ -3805,6 +4062,8 @@
char tmp[16];
char *buf = NULL;
const char *name;
+ struct pmu *pmu;
+ int ctxn;
memset(tmp, 0, sizeof(tmp));
@@ -3857,12 +4116,23 @@
mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size;
rcu_read_lock();
- cpuctx = &get_cpu_var(perf_cpu_context);
- perf_event_mmap_ctx(&cpuctx->ctx, mmap_event, vma->vm_flags & VM_EXEC);
- ctx = rcu_dereference(current->perf_event_ctxp);
- if (ctx)
- perf_event_mmap_ctx(ctx, mmap_event, vma->vm_flags & VM_EXEC);
- put_cpu_var(perf_cpu_context);
+ list_for_each_entry_rcu(pmu, &pmus, entry) {
+ cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
+ perf_event_mmap_ctx(&cpuctx->ctx, mmap_event,
+ vma->vm_flags & VM_EXEC);
+
+ ctxn = pmu->task_ctx_nr;
+ if (ctxn < 0)
+ goto next;
+
+ ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
+ if (ctx) {
+ perf_event_mmap_ctx(ctx, mmap_event,
+ vma->vm_flags & VM_EXEC);
+ }
+next:
+ put_cpu_ptr(pmu->pmu_cpu_context);
+ }
rcu_read_unlock();
kfree(buf);
@@ -3944,8 +4214,6 @@
struct hw_perf_event *hwc = &event->hw;
int ret = 0;
- throttle = (throttle && event->pmu->unthrottle != NULL);
-
if (!throttle) {
hwc->interrupts++;
} else {
@@ -3988,8 +4256,7 @@
event->pending_kill = POLL_HUP;
if (nmi) {
event->pending_disable = 1;
- perf_pending_queue(&event->pending,
- perf_pending_event);
+ irq_work_queue(&event->pending);
} else
perf_event_disable(event);
}
@@ -4013,6 +4280,17 @@
* Generic software event infrastructure
*/
+struct swevent_htable {
+ struct swevent_hlist *swevent_hlist;
+ struct mutex hlist_mutex;
+ int hlist_refcount;
+
+ /* Recursion avoidance in each contexts */
+ int recursion[PERF_NR_CONTEXTS];
+};
+
+static DEFINE_PER_CPU(struct swevent_htable, swevent_htable);
+
/*
* We directly increment event->count and keep a second value in
* event->hw.period_left to count intervals. This period event
@@ -4070,7 +4348,7 @@
}
}
-static void perf_swevent_add(struct perf_event *event, u64 nr,
+static void perf_swevent_event(struct perf_event *event, u64 nr,
int nmi, struct perf_sample_data *data,
struct pt_regs *regs)
{
@@ -4096,6 +4374,9 @@
static int perf_exclude_event(struct perf_event *event,
struct pt_regs *regs)
{
+ if (event->hw.state & PERF_HES_STOPPED)
+ return 0;
+
if (regs) {
if (event->attr.exclude_user && user_mode(regs))
return 1;
@@ -4142,11 +4423,11 @@
/* For the read side: events when they trigger */
static inline struct hlist_head *
-find_swevent_head_rcu(struct perf_cpu_context *ctx, u64 type, u32 event_id)
+find_swevent_head_rcu(struct swevent_htable *swhash, u64 type, u32 event_id)
{
struct swevent_hlist *hlist;
- hlist = rcu_dereference(ctx->swevent_hlist);
+ hlist = rcu_dereference(swhash->swevent_hlist);
if (!hlist)
return NULL;
@@ -4155,7 +4436,7 @@
/* For the event head insertion and removal in the hlist */
static inline struct hlist_head *
-find_swevent_head(struct perf_cpu_context *ctx, struct perf_event *event)
+find_swevent_head(struct swevent_htable *swhash, struct perf_event *event)
{
struct swevent_hlist *hlist;
u32 event_id = event->attr.config;
@@ -4166,7 +4447,7 @@
* and release. Which makes the protected version suitable here.
* The context lock guarantees that.
*/
- hlist = rcu_dereference_protected(ctx->swevent_hlist,
+ hlist = rcu_dereference_protected(swhash->swevent_hlist,
lockdep_is_held(&event->ctx->lock));
if (!hlist)
return NULL;
@@ -4179,23 +4460,19 @@
struct perf_sample_data *data,
struct pt_regs *regs)
{
- struct perf_cpu_context *cpuctx;
+ struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
struct perf_event *event;
struct hlist_node *node;
struct hlist_head *head;
- cpuctx = &__get_cpu_var(perf_cpu_context);
-
rcu_read_lock();
-
- head = find_swevent_head_rcu(cpuctx, type, event_id);
-
+ head = find_swevent_head_rcu(swhash, type, event_id);
if (!head)
goto end;
hlist_for_each_entry_rcu(event, node, head, hlist_entry) {
if (perf_swevent_match(event, type, event_id, data, regs))
- perf_swevent_add(event, nr, nmi, data, regs);
+ perf_swevent_event(event, nr, nmi, data, regs);
}
end:
rcu_read_unlock();
@@ -4203,33 +4480,17 @@
int perf_swevent_get_recursion_context(void)
{
- struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
- int rctx;
+ struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
- if (in_nmi())
- rctx = 3;
- else if (in_irq())
- rctx = 2;
- else if (in_softirq())
- rctx = 1;
- else
- rctx = 0;
-
- if (cpuctx->recursion[rctx])
- return -1;
-
- cpuctx->recursion[rctx]++;
- barrier();
-
- return rctx;
+ return get_recursion_context(swhash->recursion);
}
EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context);
void inline perf_swevent_put_recursion_context(int rctx)
{
- struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
- barrier();
- cpuctx->recursion[rctx]--;
+ struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
+
+ put_recursion_context(swhash->recursion, rctx);
}
void __perf_sw_event(u32 event_id, u64 nr, int nmi,
@@ -4255,20 +4516,20 @@
{
}
-static int perf_swevent_enable(struct perf_event *event)
+static int perf_swevent_add(struct perf_event *event, int flags)
{
+ struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
struct hw_perf_event *hwc = &event->hw;
- struct perf_cpu_context *cpuctx;
struct hlist_head *head;
- cpuctx = &__get_cpu_var(perf_cpu_context);
-
if (hwc->sample_period) {
hwc->last_period = hwc->sample_period;
perf_swevent_set_period(event);
}
- head = find_swevent_head(cpuctx, event);
+ hwc->state = !(flags & PERF_EF_START);
+
+ head = find_swevent_head(swhash, event);
if (WARN_ON_ONCE(!head))
return -EINVAL;
@@ -4277,202 +4538,27 @@
return 0;
}
-static void perf_swevent_disable(struct perf_event *event)
+static void perf_swevent_del(struct perf_event *event, int flags)
{
hlist_del_rcu(&event->hlist_entry);
}
-static void perf_swevent_void(struct perf_event *event)
+static void perf_swevent_start(struct perf_event *event, int flags)
{
+ event->hw.state = 0;
}
-static int perf_swevent_int(struct perf_event *event)
+static void perf_swevent_stop(struct perf_event *event, int flags)
{
- return 0;
+ event->hw.state = PERF_HES_STOPPED;
}
-static const struct pmu perf_ops_generic = {
- .enable = perf_swevent_enable,
- .disable = perf_swevent_disable,
- .start = perf_swevent_int,
- .stop = perf_swevent_void,
- .read = perf_swevent_read,
- .unthrottle = perf_swevent_void, /* hwc->interrupts already reset */
-};
-
-/*
- * hrtimer based swevent callback
- */
-
-static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
-{
- enum hrtimer_restart ret = HRTIMER_RESTART;
- struct perf_sample_data data;
- struct pt_regs *regs;
- struct perf_event *event;
- u64 period;
-
- event = container_of(hrtimer, struct perf_event, hw.hrtimer);
- event->pmu->read(event);
-
- perf_sample_data_init(&data, 0);
- data.period = event->hw.last_period;
- regs = get_irq_regs();
-
- if (regs && !perf_exclude_event(event, regs)) {
- if (!(event->attr.exclude_idle && current->pid == 0))
- if (perf_event_overflow(event, 0, &data, regs))
- ret = HRTIMER_NORESTART;
- }
-
- period = max_t(u64, 10000, event->hw.sample_period);
- hrtimer_forward_now(hrtimer, ns_to_ktime(period));
-
- return ret;
-}
-
-static void perf_swevent_start_hrtimer(struct perf_event *event)
-{
- struct hw_perf_event *hwc = &event->hw;
-
- hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- hwc->hrtimer.function = perf_swevent_hrtimer;
- if (hwc->sample_period) {
- u64 period;
-
- if (hwc->remaining) {
- if (hwc->remaining < 0)
- period = 10000;
- else
- period = hwc->remaining;
- hwc->remaining = 0;
- } else {
- period = max_t(u64, 10000, hwc->sample_period);
- }
- __hrtimer_start_range_ns(&hwc->hrtimer,
- ns_to_ktime(period), 0,
- HRTIMER_MODE_REL, 0);
- }
-}
-
-static void perf_swevent_cancel_hrtimer(struct perf_event *event)
-{
- struct hw_perf_event *hwc = &event->hw;
-
- if (hwc->sample_period) {
- ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer);
- hwc->remaining = ktime_to_ns(remaining);
-
- hrtimer_cancel(&hwc->hrtimer);
- }
-}
-
-/*
- * Software event: cpu wall time clock
- */
-
-static void cpu_clock_perf_event_update(struct perf_event *event)
-{
- int cpu = raw_smp_processor_id();
- s64 prev;
- u64 now;
-
- now = cpu_clock(cpu);
- prev = local64_xchg(&event->hw.prev_count, now);
- local64_add(now - prev, &event->count);
-}
-
-static int cpu_clock_perf_event_enable(struct perf_event *event)
-{
- struct hw_perf_event *hwc = &event->hw;
- int cpu = raw_smp_processor_id();
-
- local64_set(&hwc->prev_count, cpu_clock(cpu));
- perf_swevent_start_hrtimer(event);
-
- return 0;
-}
-
-static void cpu_clock_perf_event_disable(struct perf_event *event)
-{
- perf_swevent_cancel_hrtimer(event);
- cpu_clock_perf_event_update(event);
-}
-
-static void cpu_clock_perf_event_read(struct perf_event *event)
-{
- cpu_clock_perf_event_update(event);
-}
-
-static const struct pmu perf_ops_cpu_clock = {
- .enable = cpu_clock_perf_event_enable,
- .disable = cpu_clock_perf_event_disable,
- .read = cpu_clock_perf_event_read,
-};
-
-/*
- * Software event: task time clock
- */
-
-static void task_clock_perf_event_update(struct perf_event *event, u64 now)
-{
- u64 prev;
- s64 delta;
-
- prev = local64_xchg(&event->hw.prev_count, now);
- delta = now - prev;
- local64_add(delta, &event->count);
-}
-
-static int task_clock_perf_event_enable(struct perf_event *event)
-{
- struct hw_perf_event *hwc = &event->hw;
- u64 now;
-
- now = event->ctx->time;
-
- local64_set(&hwc->prev_count, now);
-
- perf_swevent_start_hrtimer(event);
-
- return 0;
-}
-
-static void task_clock_perf_event_disable(struct perf_event *event)
-{
- perf_swevent_cancel_hrtimer(event);
- task_clock_perf_event_update(event, event->ctx->time);
-
-}
-
-static void task_clock_perf_event_read(struct perf_event *event)
-{
- u64 time;
-
- if (!in_nmi()) {
- update_context_time(event->ctx);
- time = event->ctx->time;
- } else {
- u64 now = perf_clock();
- u64 delta = now - event->ctx->timestamp;
- time = event->ctx->time + delta;
- }
-
- task_clock_perf_event_update(event, time);
-}
-
-static const struct pmu perf_ops_task_clock = {
- .enable = task_clock_perf_event_enable,
- .disable = task_clock_perf_event_disable,
- .read = task_clock_perf_event_read,
-};
-
/* Deref the hlist from the update side */
static inline struct swevent_hlist *
-swevent_hlist_deref(struct perf_cpu_context *cpuctx)
+swevent_hlist_deref(struct swevent_htable *swhash)
{
- return rcu_dereference_protected(cpuctx->swevent_hlist,
- lockdep_is_held(&cpuctx->hlist_mutex));
+ return rcu_dereference_protected(swhash->swevent_hlist,
+ lockdep_is_held(&swhash->hlist_mutex));
}
static void swevent_hlist_release_rcu(struct rcu_head *rcu_head)
@@ -4483,27 +4569,27 @@
kfree(hlist);
}
-static void swevent_hlist_release(struct perf_cpu_context *cpuctx)
+static void swevent_hlist_release(struct swevent_htable *swhash)
{
- struct swevent_hlist *hlist = swevent_hlist_deref(cpuctx);
+ struct swevent_hlist *hlist = swevent_hlist_deref(swhash);
if (!hlist)
return;
- rcu_assign_pointer(cpuctx->swevent_hlist, NULL);
+ rcu_assign_pointer(swhash->swevent_hlist, NULL);
call_rcu(&hlist->rcu_head, swevent_hlist_release_rcu);
}
static void swevent_hlist_put_cpu(struct perf_event *event, int cpu)
{
- struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
+ struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
- mutex_lock(&cpuctx->hlist_mutex);
+ mutex_lock(&swhash->hlist_mutex);
- if (!--cpuctx->hlist_refcount)
- swevent_hlist_release(cpuctx);
+ if (!--swhash->hlist_refcount)
+ swevent_hlist_release(swhash);
- mutex_unlock(&cpuctx->hlist_mutex);
+ mutex_unlock(&swhash->hlist_mutex);
}
static void swevent_hlist_put(struct perf_event *event)
@@ -4521,12 +4607,12 @@
static int swevent_hlist_get_cpu(struct perf_event *event, int cpu)
{
- struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
+ struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
int err = 0;
- mutex_lock(&cpuctx->hlist_mutex);
+ mutex_lock(&swhash->hlist_mutex);
- if (!swevent_hlist_deref(cpuctx) && cpu_online(cpu)) {
+ if (!swevent_hlist_deref(swhash) && cpu_online(cpu)) {
struct swevent_hlist *hlist;
hlist = kzalloc(sizeof(*hlist), GFP_KERNEL);
@@ -4534,11 +4620,11 @@
err = -ENOMEM;
goto exit;
}
- rcu_assign_pointer(cpuctx->swevent_hlist, hlist);
+ rcu_assign_pointer(swhash->swevent_hlist, hlist);
}
- cpuctx->hlist_refcount++;
- exit:
- mutex_unlock(&cpuctx->hlist_mutex);
+ swhash->hlist_refcount++;
+exit:
+ mutex_unlock(&swhash->hlist_mutex);
return err;
}
@@ -4562,7 +4648,7 @@
put_online_cpus();
return 0;
- fail:
+fail:
for_each_possible_cpu(cpu) {
if (cpu == failed_cpu)
break;
@@ -4573,17 +4659,64 @@
return err;
}
-#ifdef CONFIG_EVENT_TRACING
+atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX];
-static const struct pmu perf_ops_tracepoint = {
- .enable = perf_trace_enable,
- .disable = perf_trace_disable,
- .start = perf_swevent_int,
- .stop = perf_swevent_void,
+static void sw_perf_event_destroy(struct perf_event *event)
+{
+ u64 event_id = event->attr.config;
+
+ WARN_ON(event->parent);
+
+ jump_label_dec(&perf_swevent_enabled[event_id]);
+ swevent_hlist_put(event);
+}
+
+static int perf_swevent_init(struct perf_event *event)
+{
+ int event_id = event->attr.config;
+
+ if (event->attr.type != PERF_TYPE_SOFTWARE)
+ return -ENOENT;
+
+ switch (event_id) {
+ case PERF_COUNT_SW_CPU_CLOCK:
+ case PERF_COUNT_SW_TASK_CLOCK:
+ return -ENOENT;
+
+ default:
+ break;
+ }
+
+ if (event_id > PERF_COUNT_SW_MAX)
+ return -ENOENT;
+
+ if (!event->parent) {
+ int err;
+
+ err = swevent_hlist_get(event);
+ if (err)
+ return err;
+
+ jump_label_inc(&perf_swevent_enabled[event_id]);
+ event->destroy = sw_perf_event_destroy;
+ }
+
+ return 0;
+}
+
+static struct pmu perf_swevent = {
+ .task_ctx_nr = perf_sw_context,
+
+ .event_init = perf_swevent_init,
+ .add = perf_swevent_add,
+ .del = perf_swevent_del,
+ .start = perf_swevent_start,
+ .stop = perf_swevent_stop,
.read = perf_swevent_read,
- .unthrottle = perf_swevent_void,
};
+#ifdef CONFIG_EVENT_TRACING
+
static int perf_tp_filter_match(struct perf_event *event,
struct perf_sample_data *data)
{
@@ -4627,7 +4760,7 @@
hlist_for_each_entry_rcu(event, node, head, hlist_entry) {
if (perf_tp_event_match(event, &data, regs))
- perf_swevent_add(event, count, 1, &data, regs);
+ perf_swevent_event(event, count, 1, &data, regs);
}
perf_swevent_put_recursion_context(rctx);
@@ -4639,10 +4772,13 @@
perf_trace_destroy(event);
}
-static const struct pmu *tp_perf_event_init(struct perf_event *event)
+static int perf_tp_event_init(struct perf_event *event)
{
int err;
+ if (event->attr.type != PERF_TYPE_TRACEPOINT)
+ return -ENOENT;
+
/*
* Raw tracepoint data is a severe data leak, only allow root to
* have these.
@@ -4650,15 +4786,31 @@
if ((event->attr.sample_type & PERF_SAMPLE_RAW) &&
perf_paranoid_tracepoint_raw() &&
!capable(CAP_SYS_ADMIN))
- return ERR_PTR(-EPERM);
+ return -EPERM;
err = perf_trace_init(event);
if (err)
- return NULL;
+ return err;
event->destroy = tp_perf_event_destroy;
- return &perf_ops_tracepoint;
+ return 0;
+}
+
+static struct pmu perf_tracepoint = {
+ .task_ctx_nr = perf_sw_context,
+
+ .event_init = perf_tp_event_init,
+ .add = perf_trace_add,
+ .del = perf_trace_del,
+ .start = perf_swevent_start,
+ .stop = perf_swevent_stop,
+ .read = perf_swevent_read,
+};
+
+static inline void perf_tp_register(void)
+{
+ perf_pmu_register(&perf_tracepoint);
}
static int perf_event_set_filter(struct perf_event *event, void __user *arg)
@@ -4686,9 +4838,8 @@
#else
-static const struct pmu *tp_perf_event_init(struct perf_event *event)
+static inline void perf_tp_register(void)
{
- return NULL;
}
static int perf_event_set_filter(struct perf_event *event, void __user *arg)
@@ -4703,24 +4854,6 @@
#endif /* CONFIG_EVENT_TRACING */
#ifdef CONFIG_HAVE_HW_BREAKPOINT
-static void bp_perf_event_destroy(struct perf_event *event)
-{
- release_bp_slot(event);
-}
-
-static const struct pmu *bp_perf_event_init(struct perf_event *bp)
-{
- int err;
-
- err = register_perf_hw_breakpoint(bp);
- if (err)
- return ERR_PTR(err);
-
- bp->destroy = bp_perf_event_destroy;
-
- return &perf_ops_bp;
-}
-
void perf_bp_event(struct perf_event *bp, void *data)
{
struct perf_sample_data sample;
@@ -4728,80 +4861,382 @@
perf_sample_data_init(&sample, bp->attr.bp_addr);
- if (!perf_exclude_event(bp, regs))
- perf_swevent_add(bp, 1, 1, &sample, regs);
-}
-#else
-static const struct pmu *bp_perf_event_init(struct perf_event *bp)
-{
- return NULL;
-}
-
-void perf_bp_event(struct perf_event *bp, void *regs)
-{
+ if (!bp->hw.state && !perf_exclude_event(bp, regs))
+ perf_swevent_event(bp, 1, 1, &sample, regs);
}
#endif
-atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX];
+/*
+ * hrtimer based swevent callback
+ */
-static void sw_perf_event_destroy(struct perf_event *event)
+static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
{
- u64 event_id = event->attr.config;
+ enum hrtimer_restart ret = HRTIMER_RESTART;
+ struct perf_sample_data data;
+ struct pt_regs *regs;
+ struct perf_event *event;
+ u64 period;
- WARN_ON(event->parent);
+ event = container_of(hrtimer, struct perf_event, hw.hrtimer);
+ event->pmu->read(event);
- atomic_dec(&perf_swevent_enabled[event_id]);
- swevent_hlist_put(event);
+ perf_sample_data_init(&data, 0);
+ data.period = event->hw.last_period;
+ regs = get_irq_regs();
+
+ if (regs && !perf_exclude_event(event, regs)) {
+ if (!(event->attr.exclude_idle && current->pid == 0))
+ if (perf_event_overflow(event, 0, &data, regs))
+ ret = HRTIMER_NORESTART;
+ }
+
+ period = max_t(u64, 10000, event->hw.sample_period);
+ hrtimer_forward_now(hrtimer, ns_to_ktime(period));
+
+ return ret;
}
-static const struct pmu *sw_perf_event_init(struct perf_event *event)
+static void perf_swevent_start_hrtimer(struct perf_event *event)
{
- const struct pmu *pmu = NULL;
- u64 event_id = event->attr.config;
+ struct hw_perf_event *hwc = &event->hw;
+
+ hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ hwc->hrtimer.function = perf_swevent_hrtimer;
+ if (hwc->sample_period) {
+ s64 period = local64_read(&hwc->period_left);
+
+ if (period) {
+ if (period < 0)
+ period = 10000;
+
+ local64_set(&hwc->period_left, 0);
+ } else {
+ period = max_t(u64, 10000, hwc->sample_period);
+ }
+ __hrtimer_start_range_ns(&hwc->hrtimer,
+ ns_to_ktime(period), 0,
+ HRTIMER_MODE_REL_PINNED, 0);
+ }
+}
+
+static void perf_swevent_cancel_hrtimer(struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+
+ if (hwc->sample_period) {
+ ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer);
+ local64_set(&hwc->period_left, ktime_to_ns(remaining));
+
+ hrtimer_cancel(&hwc->hrtimer);
+ }
+}
+
+/*
+ * Software event: cpu wall time clock
+ */
+
+static void cpu_clock_event_update(struct perf_event *event)
+{
+ s64 prev;
+ u64 now;
+
+ now = local_clock();
+ prev = local64_xchg(&event->hw.prev_count, now);
+ local64_add(now - prev, &event->count);
+}
+
+static void cpu_clock_event_start(struct perf_event *event, int flags)
+{
+ local64_set(&event->hw.prev_count, local_clock());
+ perf_swevent_start_hrtimer(event);
+}
+
+static void cpu_clock_event_stop(struct perf_event *event, int flags)
+{
+ perf_swevent_cancel_hrtimer(event);
+ cpu_clock_event_update(event);
+}
+
+static int cpu_clock_event_add(struct perf_event *event, int flags)
+{
+ if (flags & PERF_EF_START)
+ cpu_clock_event_start(event, flags);
+
+ return 0;
+}
+
+static void cpu_clock_event_del(struct perf_event *event, int flags)
+{
+ cpu_clock_event_stop(event, flags);
+}
+
+static void cpu_clock_event_read(struct perf_event *event)
+{
+ cpu_clock_event_update(event);
+}
+
+static int cpu_clock_event_init(struct perf_event *event)
+{
+ if (event->attr.type != PERF_TYPE_SOFTWARE)
+ return -ENOENT;
+
+ if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK)
+ return -ENOENT;
+
+ return 0;
+}
+
+static struct pmu perf_cpu_clock = {
+ .task_ctx_nr = perf_sw_context,
+
+ .event_init = cpu_clock_event_init,
+ .add = cpu_clock_event_add,
+ .del = cpu_clock_event_del,
+ .start = cpu_clock_event_start,
+ .stop = cpu_clock_event_stop,
+ .read = cpu_clock_event_read,
+};
+
+/*
+ * Software event: task time clock
+ */
+
+static void task_clock_event_update(struct perf_event *event, u64 now)
+{
+ u64 prev;
+ s64 delta;
+
+ prev = local64_xchg(&event->hw.prev_count, now);
+ delta = now - prev;
+ local64_add(delta, &event->count);
+}
+
+static void task_clock_event_start(struct perf_event *event, int flags)
+{
+ local64_set(&event->hw.prev_count, event->ctx->time);
+ perf_swevent_start_hrtimer(event);
+}
+
+static void task_clock_event_stop(struct perf_event *event, int flags)
+{
+ perf_swevent_cancel_hrtimer(event);
+ task_clock_event_update(event, event->ctx->time);
+}
+
+static int task_clock_event_add(struct perf_event *event, int flags)
+{
+ if (flags & PERF_EF_START)
+ task_clock_event_start(event, flags);
+
+ return 0;
+}
+
+static void task_clock_event_del(struct perf_event *event, int flags)
+{
+ task_clock_event_stop(event, PERF_EF_UPDATE);
+}
+
+static void task_clock_event_read(struct perf_event *event)
+{
+ u64 time;
+
+ if (!in_nmi()) {
+ update_context_time(event->ctx);
+ time = event->ctx->time;
+ } else {
+ u64 now = perf_clock();
+ u64 delta = now - event->ctx->timestamp;
+ time = event->ctx->time + delta;
+ }
+
+ task_clock_event_update(event, time);
+}
+
+static int task_clock_event_init(struct perf_event *event)
+{
+ if (event->attr.type != PERF_TYPE_SOFTWARE)
+ return -ENOENT;
+
+ if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK)
+ return -ENOENT;
+
+ return 0;
+}
+
+static struct pmu perf_task_clock = {
+ .task_ctx_nr = perf_sw_context,
+
+ .event_init = task_clock_event_init,
+ .add = task_clock_event_add,
+ .del = task_clock_event_del,
+ .start = task_clock_event_start,
+ .stop = task_clock_event_stop,
+ .read = task_clock_event_read,
+};
+
+static void perf_pmu_nop_void(struct pmu *pmu)
+{
+}
+
+static int perf_pmu_nop_int(struct pmu *pmu)
+{
+ return 0;
+}
+
+static void perf_pmu_start_txn(struct pmu *pmu)
+{
+ perf_pmu_disable(pmu);
+}
+
+static int perf_pmu_commit_txn(struct pmu *pmu)
+{
+ perf_pmu_enable(pmu);
+ return 0;
+}
+
+static void perf_pmu_cancel_txn(struct pmu *pmu)
+{
+ perf_pmu_enable(pmu);
+}
+
+/*
+ * Ensures all contexts with the same task_ctx_nr have the same
+ * pmu_cpu_context too.
+ */
+static void *find_pmu_context(int ctxn)
+{
+ struct pmu *pmu;
+
+ if (ctxn < 0)
+ return NULL;
+
+ list_for_each_entry(pmu, &pmus, entry) {
+ if (pmu->task_ctx_nr == ctxn)
+ return pmu->pmu_cpu_context;
+ }
+
+ return NULL;
+}
+
+static void free_pmu_context(void * __percpu cpu_context)
+{
+ struct pmu *pmu;
+
+ mutex_lock(&pmus_lock);
+ /*
+ * Like a real lame refcount.
+ */
+ list_for_each_entry(pmu, &pmus, entry) {
+ if (pmu->pmu_cpu_context == cpu_context)
+ goto out;
+ }
+
+ free_percpu(cpu_context);
+out:
+ mutex_unlock(&pmus_lock);
+}
+
+int perf_pmu_register(struct pmu *pmu)
+{
+ int cpu, ret;
+
+ mutex_lock(&pmus_lock);
+ ret = -ENOMEM;
+ pmu->pmu_disable_count = alloc_percpu(int);
+ if (!pmu->pmu_disable_count)
+ goto unlock;
+
+ pmu->pmu_cpu_context = find_pmu_context(pmu->task_ctx_nr);
+ if (pmu->pmu_cpu_context)
+ goto got_cpu_context;
+
+ pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context);
+ if (!pmu->pmu_cpu_context)
+ goto free_pdc;
+
+ for_each_possible_cpu(cpu) {
+ struct perf_cpu_context *cpuctx;
+
+ cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
+ __perf_event_init_context(&cpuctx->ctx);
+ cpuctx->ctx.type = cpu_context;
+ cpuctx->ctx.pmu = pmu;
+ cpuctx->jiffies_interval = 1;
+ INIT_LIST_HEAD(&cpuctx->rotation_list);
+ }
+
+got_cpu_context:
+ if (!pmu->start_txn) {
+ if (pmu->pmu_enable) {
+ /*
+ * If we have pmu_enable/pmu_disable calls, install
+ * transaction stubs that use that to try and batch
+ * hardware accesses.
+ */
+ pmu->start_txn = perf_pmu_start_txn;
+ pmu->commit_txn = perf_pmu_commit_txn;
+ pmu->cancel_txn = perf_pmu_cancel_txn;
+ } else {
+ pmu->start_txn = perf_pmu_nop_void;
+ pmu->commit_txn = perf_pmu_nop_int;
+ pmu->cancel_txn = perf_pmu_nop_void;
+ }
+ }
+
+ if (!pmu->pmu_enable) {
+ pmu->pmu_enable = perf_pmu_nop_void;
+ pmu->pmu_disable = perf_pmu_nop_void;
+ }
+
+ list_add_rcu(&pmu->entry, &pmus);
+ ret = 0;
+unlock:
+ mutex_unlock(&pmus_lock);
+
+ return ret;
+
+free_pdc:
+ free_percpu(pmu->pmu_disable_count);
+ goto unlock;
+}
+
+void perf_pmu_unregister(struct pmu *pmu)
+{
+ mutex_lock(&pmus_lock);
+ list_del_rcu(&pmu->entry);
+ mutex_unlock(&pmus_lock);
/*
- * Software events (currently) can't in general distinguish
- * between user, kernel and hypervisor events.
- * However, context switches and cpu migrations are considered
- * to be kernel events, and page faults are never hypervisor
- * events.
+ * We dereference the pmu list under both SRCU and regular RCU, so
+ * synchronize against both of those.
*/
- switch (event_id) {
- case PERF_COUNT_SW_CPU_CLOCK:
- pmu = &perf_ops_cpu_clock;
+ synchronize_srcu(&pmus_srcu);
+ synchronize_rcu();
- break;
- case PERF_COUNT_SW_TASK_CLOCK:
- /*
- * If the user instantiates this as a per-cpu event,
- * use the cpu_clock event instead.
- */
- if (event->ctx->task)
- pmu = &perf_ops_task_clock;
- else
- pmu = &perf_ops_cpu_clock;
+ free_percpu(pmu->pmu_disable_count);
+ free_pmu_context(pmu->pmu_cpu_context);
+}
- break;
- case PERF_COUNT_SW_PAGE_FAULTS:
- case PERF_COUNT_SW_PAGE_FAULTS_MIN:
- case PERF_COUNT_SW_PAGE_FAULTS_MAJ:
- case PERF_COUNT_SW_CONTEXT_SWITCHES:
- case PERF_COUNT_SW_CPU_MIGRATIONS:
- case PERF_COUNT_SW_ALIGNMENT_FAULTS:
- case PERF_COUNT_SW_EMULATION_FAULTS:
- if (!event->parent) {
- int err;
+struct pmu *perf_init_event(struct perf_event *event)
+{
+ struct pmu *pmu = NULL;
+ int idx;
- err = swevent_hlist_get(event);
- if (err)
- return ERR_PTR(err);
+ idx = srcu_read_lock(&pmus_srcu);
+ list_for_each_entry_rcu(pmu, &pmus, entry) {
+ int ret = pmu->event_init(event);
+ if (!ret)
+ goto unlock;
- atomic_inc(&perf_swevent_enabled[event_id]);
- event->destroy = sw_perf_event_destroy;
+ if (ret != -ENOENT) {
+ pmu = ERR_PTR(ret);
+ goto unlock;
}
- pmu = &perf_ops_generic;
- break;
}
+ pmu = ERR_PTR(-ENOENT);
+unlock:
+ srcu_read_unlock(&pmus_srcu, idx);
return pmu;
}
@@ -4810,20 +5245,18 @@
* Allocate and initialize a event structure
*/
static struct perf_event *
-perf_event_alloc(struct perf_event_attr *attr,
- int cpu,
- struct perf_event_context *ctx,
- struct perf_event *group_leader,
- struct perf_event *parent_event,
- perf_overflow_handler_t overflow_handler,
- gfp_t gfpflags)
+perf_event_alloc(struct perf_event_attr *attr, int cpu,
+ struct task_struct *task,
+ struct perf_event *group_leader,
+ struct perf_event *parent_event,
+ perf_overflow_handler_t overflow_handler)
{
- const struct pmu *pmu;
+ struct pmu *pmu;
struct perf_event *event;
struct hw_perf_event *hwc;
long err;
- event = kzalloc(sizeof(*event), gfpflags);
+ event = kzalloc(sizeof(*event), GFP_KERNEL);
if (!event)
return ERR_PTR(-ENOMEM);
@@ -4841,6 +5274,7 @@
INIT_LIST_HEAD(&event->event_entry);
INIT_LIST_HEAD(&event->sibling_list);
init_waitqueue_head(&event->waitq);
+ init_irq_work(&event->pending, perf_pending_event);
mutex_init(&event->mmap_mutex);
@@ -4848,7 +5282,6 @@
event->attr = *attr;
event->group_leader = group_leader;
event->pmu = NULL;
- event->ctx = ctx;
event->oncpu = -1;
event->parent = parent_event;
@@ -4858,6 +5291,17 @@
event->state = PERF_EVENT_STATE_INACTIVE;
+ if (task) {
+ event->attach_state = PERF_ATTACH_TASK;
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+ /*
+ * hw_breakpoint is a bit difficult here..
+ */
+ if (attr->type == PERF_TYPE_BREAKPOINT)
+ event->hw.bp_target = task;
+#endif
+ }
+
if (!overflow_handler && parent_event)
overflow_handler = parent_event->overflow_handler;
@@ -4882,29 +5326,8 @@
if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP))
goto done;
- switch (attr->type) {
- case PERF_TYPE_RAW:
- case PERF_TYPE_HARDWARE:
- case PERF_TYPE_HW_CACHE:
- pmu = hw_perf_event_init(event);
- break;
+ pmu = perf_init_event(event);
- case PERF_TYPE_SOFTWARE:
- pmu = sw_perf_event_init(event);
- break;
-
- case PERF_TYPE_TRACEPOINT:
- pmu = tp_perf_event_init(event);
- break;
-
- case PERF_TYPE_BREAKPOINT:
- pmu = bp_perf_event_init(event);
- break;
-
-
- default:
- break;
- }
done:
err = 0;
if (!pmu)
@@ -4922,13 +5345,21 @@
event->pmu = pmu;
if (!event->parent) {
- atomic_inc(&nr_events);
+ if (event->attach_state & PERF_ATTACH_TASK)
+ jump_label_inc(&perf_task_events);
if (event->attr.mmap || event->attr.mmap_data)
atomic_inc(&nr_mmap_events);
if (event->attr.comm)
atomic_inc(&nr_comm_events);
if (event->attr.task)
atomic_inc(&nr_task_events);
+ if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) {
+ err = get_callchain_buffers();
+ if (err) {
+ free_event(event);
+ return ERR_PTR(err);
+ }
+ }
}
return event;
@@ -5076,12 +5507,16 @@
struct perf_event_attr __user *, attr_uptr,
pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
{
- struct perf_event *event, *group_leader = NULL, *output_event = NULL;
+ struct perf_event *group_leader = NULL, *output_event = NULL;
+ struct perf_event *event, *sibling;
struct perf_event_attr attr;
struct perf_event_context *ctx;
struct file *event_file = NULL;
struct file *group_file = NULL;
+ struct task_struct *task = NULL;
+ struct pmu *pmu;
int event_fd;
+ int move_group = 0;
int fput_needed = 0;
int err;
@@ -5107,20 +5542,11 @@
if (event_fd < 0)
return event_fd;
- /*
- * Get the target context (task or percpu):
- */
- ctx = find_get_context(pid, cpu);
- if (IS_ERR(ctx)) {
- err = PTR_ERR(ctx);
- goto err_fd;
- }
-
if (group_fd != -1) {
group_leader = perf_fget_light(group_fd, &fput_needed);
if (IS_ERR(group_leader)) {
err = PTR_ERR(group_leader);
- goto err_put_context;
+ goto err_fd;
}
group_file = group_leader->filp;
if (flags & PERF_FLAG_FD_OUTPUT)
@@ -5129,6 +5555,58 @@
group_leader = NULL;
}
+ if (pid != -1) {
+ task = find_lively_task_by_vpid(pid);
+ if (IS_ERR(task)) {
+ err = PTR_ERR(task);
+ goto err_group_fd;
+ }
+ }
+
+ event = perf_event_alloc(&attr, cpu, task, group_leader, NULL, NULL);
+ if (IS_ERR(event)) {
+ err = PTR_ERR(event);
+ goto err_task;
+ }
+
+ /*
+ * Special case software events and allow them to be part of
+ * any hardware group.
+ */
+ pmu = event->pmu;
+
+ if (group_leader &&
+ (is_software_event(event) != is_software_event(group_leader))) {
+ if (is_software_event(event)) {
+ /*
+ * If event and group_leader are not both a software
+ * event, and event is, then group leader is not.
+ *
+ * Allow the addition of software events to !software
+ * groups, this is safe because software events never
+ * fail to schedule.
+ */
+ pmu = group_leader->pmu;
+ } else if (is_software_event(group_leader) &&
+ (group_leader->group_flags & PERF_GROUP_SOFTWARE)) {
+ /*
+ * In case the group is a pure software group, and we
+ * try to add a hardware event, move the whole group to
+ * the hardware context.
+ */
+ move_group = 1;
+ }
+ }
+
+ /*
+ * Get the target context (task or percpu):
+ */
+ ctx = find_get_context(pmu, task, cpu);
+ if (IS_ERR(ctx)) {
+ err = PTR_ERR(ctx);
+ goto err_alloc;
+ }
+
/*
* Look up the group leader (we will attach this event to it):
*/
@@ -5140,42 +5618,66 @@
* becoming part of another group-sibling):
*/
if (group_leader->group_leader != group_leader)
- goto err_put_context;
+ goto err_context;
/*
* Do not allow to attach to a group in a different
* task or CPU context:
*/
- if (group_leader->ctx != ctx)
- goto err_put_context;
+ if (move_group) {
+ if (group_leader->ctx->type != ctx->type)
+ goto err_context;
+ } else {
+ if (group_leader->ctx != ctx)
+ goto err_context;
+ }
+
/*
* Only a group leader can be exclusive or pinned
*/
if (attr.exclusive || attr.pinned)
- goto err_put_context;
- }
-
- event = perf_event_alloc(&attr, cpu, ctx, group_leader,
- NULL, NULL, GFP_KERNEL);
- if (IS_ERR(event)) {
- err = PTR_ERR(event);
- goto err_put_context;
+ goto err_context;
}
if (output_event) {
err = perf_event_set_output(event, output_event);
if (err)
- goto err_free_put_context;
+ goto err_context;
}
event_file = anon_inode_getfile("[perf_event]", &perf_fops, event, O_RDWR);
if (IS_ERR(event_file)) {
err = PTR_ERR(event_file);
- goto err_free_put_context;
+ goto err_context;
+ }
+
+ if (move_group) {
+ struct perf_event_context *gctx = group_leader->ctx;
+
+ mutex_lock(&gctx->mutex);
+ perf_event_remove_from_context(group_leader);
+ list_for_each_entry(sibling, &group_leader->sibling_list,
+ group_entry) {
+ perf_event_remove_from_context(sibling);
+ put_ctx(gctx);
+ }
+ mutex_unlock(&gctx->mutex);
+ put_ctx(gctx);
}
event->filp = event_file;
WARN_ON_ONCE(ctx->parent_ctx);
mutex_lock(&ctx->mutex);
+
+ if (move_group) {
+ perf_install_in_context(ctx, group_leader, cpu);
+ get_ctx(ctx);
+ list_for_each_entry(sibling, &group_leader->sibling_list,
+ group_entry) {
+ perf_install_in_context(ctx, sibling, cpu);
+ get_ctx(ctx);
+ }
+ }
+
perf_install_in_context(ctx, event, cpu);
++ctx->generation;
mutex_unlock(&ctx->mutex);
@@ -5196,11 +5698,15 @@
fd_install(event_fd, event_file);
return event_fd;
-err_free_put_context:
- free_event(event);
-err_put_context:
- fput_light(group_file, fput_needed);
+err_context:
put_ctx(ctx);
+err_alloc:
+ free_event(event);
+err_task:
+ if (task)
+ put_task_struct(task);
+err_group_fd:
+ fput_light(group_file, fput_needed);
err_fd:
put_unused_fd(event_fd);
return err;
@@ -5211,32 +5717,31 @@
*
* @attr: attributes of the counter to create
* @cpu: cpu in which the counter is bound
- * @pid: task to profile
+ * @task: task to profile (NULL for percpu)
*/
struct perf_event *
perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
- pid_t pid,
+ struct task_struct *task,
perf_overflow_handler_t overflow_handler)
{
- struct perf_event *event;
struct perf_event_context *ctx;
+ struct perf_event *event;
int err;
/*
* Get the target context (task or percpu):
*/
- ctx = find_get_context(pid, cpu);
- if (IS_ERR(ctx)) {
- err = PTR_ERR(ctx);
- goto err_exit;
- }
-
- event = perf_event_alloc(attr, cpu, ctx, NULL,
- NULL, overflow_handler, GFP_KERNEL);
+ event = perf_event_alloc(attr, cpu, task, NULL, NULL, overflow_handler);
if (IS_ERR(event)) {
err = PTR_ERR(event);
- goto err_put_context;
+ goto err;
+ }
+
+ ctx = find_get_context(event->pmu, task, cpu);
+ if (IS_ERR(ctx)) {
+ err = PTR_ERR(ctx);
+ goto err_free;
}
event->filp = NULL;
@@ -5254,112 +5759,13 @@
return event;
- err_put_context:
- put_ctx(ctx);
- err_exit:
+err_free:
+ free_event(event);
+err:
return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter);
-/*
- * inherit a event from parent task to child task:
- */
-static struct perf_event *
-inherit_event(struct perf_event *parent_event,
- struct task_struct *parent,
- struct perf_event_context *parent_ctx,
- struct task_struct *child,
- struct perf_event *group_leader,
- struct perf_event_context *child_ctx)
-{
- struct perf_event *child_event;
-
- /*
- * Instead of creating recursive hierarchies of events,
- * we link inherited events back to the original parent,
- * which has a filp for sure, which we use as the reference
- * count:
- */
- if (parent_event->parent)
- parent_event = parent_event->parent;
-
- child_event = perf_event_alloc(&parent_event->attr,
- parent_event->cpu, child_ctx,
- group_leader, parent_event,
- NULL, GFP_KERNEL);
- if (IS_ERR(child_event))
- return child_event;
- get_ctx(child_ctx);
-
- /*
- * Make the child state follow the state of the parent event,
- * not its attr.disabled bit. We hold the parent's mutex,
- * so we won't race with perf_event_{en, dis}able_family.
- */
- if (parent_event->state >= PERF_EVENT_STATE_INACTIVE)
- child_event->state = PERF_EVENT_STATE_INACTIVE;
- else
- child_event->state = PERF_EVENT_STATE_OFF;
-
- if (parent_event->attr.freq) {
- u64 sample_period = parent_event->hw.sample_period;
- struct hw_perf_event *hwc = &child_event->hw;
-
- hwc->sample_period = sample_period;
- hwc->last_period = sample_period;
-
- local64_set(&hwc->period_left, sample_period);
- }
-
- child_event->overflow_handler = parent_event->overflow_handler;
-
- /*
- * Link it up in the child's context:
- */
- add_event_to_ctx(child_event, child_ctx);
-
- /*
- * Get a reference to the parent filp - we will fput it
- * when the child event exits. This is safe to do because
- * we are in the parent and we know that the filp still
- * exists and has a nonzero count:
- */
- atomic_long_inc(&parent_event->filp->f_count);
-
- /*
- * Link this into the parent event's child list
- */
- WARN_ON_ONCE(parent_event->ctx->parent_ctx);
- mutex_lock(&parent_event->child_mutex);
- list_add_tail(&child_event->child_list, &parent_event->child_list);
- mutex_unlock(&parent_event->child_mutex);
-
- return child_event;
-}
-
-static int inherit_group(struct perf_event *parent_event,
- struct task_struct *parent,
- struct perf_event_context *parent_ctx,
- struct task_struct *child,
- struct perf_event_context *child_ctx)
-{
- struct perf_event *leader;
- struct perf_event *sub;
- struct perf_event *child_ctr;
-
- leader = inherit_event(parent_event, parent, parent_ctx,
- child, NULL, child_ctx);
- if (IS_ERR(leader))
- return PTR_ERR(leader);
- list_for_each_entry(sub, &parent_event->sibling_list, group_entry) {
- child_ctr = inherit_event(sub, parent, parent_ctx,
- child, leader, child_ctx);
- if (IS_ERR(child_ctr))
- return PTR_ERR(child_ctr);
- }
- return 0;
-}
-
static void sync_child_event(struct perf_event *child_event,
struct task_struct *child)
{
@@ -5416,16 +5822,13 @@
}
}
-/*
- * When a child task exits, feed back event values to parent events.
- */
-void perf_event_exit_task(struct task_struct *child)
+static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
{
struct perf_event *child_event, *tmp;
struct perf_event_context *child_ctx;
unsigned long flags;
- if (likely(!child->perf_event_ctxp)) {
+ if (likely(!child->perf_event_ctxp[ctxn])) {
perf_event_task(child, NULL, 0);
return;
}
@@ -5437,8 +5840,8 @@
* scheduled, so we are now safe from rescheduling changing
* our context.
*/
- child_ctx = child->perf_event_ctxp;
- __perf_event_task_sched_out(child_ctx);
+ child_ctx = child->perf_event_ctxp[ctxn];
+ task_ctx_sched_out(child_ctx, EVENT_ALL);
/*
* Take the context lock here so that if find_get_context is
@@ -5446,7 +5849,7 @@
* incremented the context's refcount before we do put_ctx below.
*/
raw_spin_lock(&child_ctx->lock);
- child->perf_event_ctxp = NULL;
+ child->perf_event_ctxp[ctxn] = NULL;
/*
* If this context is a clone; unclone it so it can't get
* swapped to another process while we're removing all
@@ -5499,6 +5902,17 @@
put_ctx(child_ctx);
}
+/*
+ * When a child task exits, feed back event values to parent events.
+ */
+void perf_event_exit_task(struct task_struct *child)
+{
+ int ctxn;
+
+ for_each_task_context_nr(ctxn)
+ perf_event_exit_task_context(child, ctxn);
+}
+
static void perf_free_event(struct perf_event *event,
struct perf_event_context *ctx)
{
@@ -5520,48 +5934,166 @@
/*
* free an unexposed, unused context as created by inheritance by
- * init_task below, used by fork() in case of fail.
+ * perf_event_init_task below, used by fork() in case of fail.
*/
void perf_event_free_task(struct task_struct *task)
{
- struct perf_event_context *ctx = task->perf_event_ctxp;
+ struct perf_event_context *ctx;
struct perf_event *event, *tmp;
+ int ctxn;
- if (!ctx)
- return;
+ for_each_task_context_nr(ctxn) {
+ ctx = task->perf_event_ctxp[ctxn];
+ if (!ctx)
+ continue;
- mutex_lock(&ctx->mutex);
+ mutex_lock(&ctx->mutex);
again:
- list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, group_entry)
- perf_free_event(event, ctx);
+ list_for_each_entry_safe(event, tmp, &ctx->pinned_groups,
+ group_entry)
+ perf_free_event(event, ctx);
- list_for_each_entry_safe(event, tmp, &ctx->flexible_groups,
- group_entry)
- perf_free_event(event, ctx);
+ list_for_each_entry_safe(event, tmp, &ctx->flexible_groups,
+ group_entry)
+ perf_free_event(event, ctx);
- if (!list_empty(&ctx->pinned_groups) ||
- !list_empty(&ctx->flexible_groups))
- goto again;
+ if (!list_empty(&ctx->pinned_groups) ||
+ !list_empty(&ctx->flexible_groups))
+ goto again;
- mutex_unlock(&ctx->mutex);
+ mutex_unlock(&ctx->mutex);
- put_ctx(ctx);
+ put_ctx(ctx);
+ }
+}
+
+void perf_event_delayed_put(struct task_struct *task)
+{
+ int ctxn;
+
+ for_each_task_context_nr(ctxn)
+ WARN_ON_ONCE(task->perf_event_ctxp[ctxn]);
+}
+
+/*
+ * inherit a event from parent task to child task:
+ */
+static struct perf_event *
+inherit_event(struct perf_event *parent_event,
+ struct task_struct *parent,
+ struct perf_event_context *parent_ctx,
+ struct task_struct *child,
+ struct perf_event *group_leader,
+ struct perf_event_context *child_ctx)
+{
+ struct perf_event *child_event;
+ unsigned long flags;
+
+ /*
+ * Instead of creating recursive hierarchies of events,
+ * we link inherited events back to the original parent,
+ * which has a filp for sure, which we use as the reference
+ * count:
+ */
+ if (parent_event->parent)
+ parent_event = parent_event->parent;
+
+ child_event = perf_event_alloc(&parent_event->attr,
+ parent_event->cpu,
+ child,
+ group_leader, parent_event,
+ NULL);
+ if (IS_ERR(child_event))
+ return child_event;
+ get_ctx(child_ctx);
+
+ /*
+ * Make the child state follow the state of the parent event,
+ * not its attr.disabled bit. We hold the parent's mutex,
+ * so we won't race with perf_event_{en, dis}able_family.
+ */
+ if (parent_event->state >= PERF_EVENT_STATE_INACTIVE)
+ child_event->state = PERF_EVENT_STATE_INACTIVE;
+ else
+ child_event->state = PERF_EVENT_STATE_OFF;
+
+ if (parent_event->attr.freq) {
+ u64 sample_period = parent_event->hw.sample_period;
+ struct hw_perf_event *hwc = &child_event->hw;
+
+ hwc->sample_period = sample_period;
+ hwc->last_period = sample_period;
+
+ local64_set(&hwc->period_left, sample_period);
+ }
+
+ child_event->ctx = child_ctx;
+ child_event->overflow_handler = parent_event->overflow_handler;
+
+ /*
+ * Link it up in the child's context:
+ */
+ raw_spin_lock_irqsave(&child_ctx->lock, flags);
+ add_event_to_ctx(child_event, child_ctx);
+ raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
+
+ /*
+ * Get a reference to the parent filp - we will fput it
+ * when the child event exits. This is safe to do because
+ * we are in the parent and we know that the filp still
+ * exists and has a nonzero count:
+ */
+ atomic_long_inc(&parent_event->filp->f_count);
+
+ /*
+ * Link this into the parent event's child list
+ */
+ WARN_ON_ONCE(parent_event->ctx->parent_ctx);
+ mutex_lock(&parent_event->child_mutex);
+ list_add_tail(&child_event->child_list, &parent_event->child_list);
+ mutex_unlock(&parent_event->child_mutex);
+
+ return child_event;
+}
+
+static int inherit_group(struct perf_event *parent_event,
+ struct task_struct *parent,
+ struct perf_event_context *parent_ctx,
+ struct task_struct *child,
+ struct perf_event_context *child_ctx)
+{
+ struct perf_event *leader;
+ struct perf_event *sub;
+ struct perf_event *child_ctr;
+
+ leader = inherit_event(parent_event, parent, parent_ctx,
+ child, NULL, child_ctx);
+ if (IS_ERR(leader))
+ return PTR_ERR(leader);
+ list_for_each_entry(sub, &parent_event->sibling_list, group_entry) {
+ child_ctr = inherit_event(sub, parent, parent_ctx,
+ child, leader, child_ctx);
+ if (IS_ERR(child_ctr))
+ return PTR_ERR(child_ctr);
+ }
+ return 0;
}
static int
inherit_task_group(struct perf_event *event, struct task_struct *parent,
struct perf_event_context *parent_ctx,
- struct task_struct *child,
+ struct task_struct *child, int ctxn,
int *inherited_all)
{
int ret;
- struct perf_event_context *child_ctx = child->perf_event_ctxp;
+ struct perf_event_context *child_ctx;
if (!event->attr.inherit) {
*inherited_all = 0;
return 0;
}
+ child_ctx = child->perf_event_ctxp[ctxn];
if (!child_ctx) {
/*
* This is executed from the parent task context, so
@@ -5570,14 +6102,11 @@
* child.
*/
- child_ctx = kzalloc(sizeof(struct perf_event_context),
- GFP_KERNEL);
+ child_ctx = alloc_perf_context(event->pmu, child);
if (!child_ctx)
return -ENOMEM;
- __perf_event_init_context(child_ctx, child);
- child->perf_event_ctxp = child_ctx;
- get_task_struct(child);
+ child->perf_event_ctxp[ctxn] = child_ctx;
}
ret = inherit_group(event, parent, parent_ctx,
@@ -5589,11 +6118,10 @@
return ret;
}
-
/*
* Initialize the perf_event context in task_struct
*/
-int perf_event_init_task(struct task_struct *child)
+int perf_event_init_context(struct task_struct *child, int ctxn)
{
struct perf_event_context *child_ctx, *parent_ctx;
struct perf_event_context *cloned_ctx;
@@ -5602,19 +6130,19 @@
int inherited_all = 1;
int ret = 0;
- child->perf_event_ctxp = NULL;
+ child->perf_event_ctxp[ctxn] = NULL;
mutex_init(&child->perf_event_mutex);
INIT_LIST_HEAD(&child->perf_event_list);
- if (likely(!parent->perf_event_ctxp))
+ if (likely(!parent->perf_event_ctxp[ctxn]))
return 0;
/*
* If the parent's context is a clone, pin it so it won't get
* swapped under us.
*/
- parent_ctx = perf_pin_task_context(parent);
+ parent_ctx = perf_pin_task_context(parent, ctxn);
/*
* No need to check if parent_ctx != NULL here; since we saw
@@ -5634,20 +6162,20 @@
* the list, not manipulating it:
*/
list_for_each_entry(event, &parent_ctx->pinned_groups, group_entry) {
- ret = inherit_task_group(event, parent, parent_ctx, child,
- &inherited_all);
+ ret = inherit_task_group(event, parent, parent_ctx,
+ child, ctxn, &inherited_all);
if (ret)
break;
}
list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) {
- ret = inherit_task_group(event, parent, parent_ctx, child,
- &inherited_all);
+ ret = inherit_task_group(event, parent, parent_ctx,
+ child, ctxn, &inherited_all);
if (ret)
break;
}
- child_ctx = child->perf_event_ctxp;
+ child_ctx = child->perf_event_ctxp[ctxn];
if (child_ctx && inherited_all) {
/*
@@ -5676,63 +6204,98 @@
return ret;
}
+/*
+ * Initialize the perf_event context in task_struct
+ */
+int perf_event_init_task(struct task_struct *child)
+{
+ int ctxn, ret;
+
+ for_each_task_context_nr(ctxn) {
+ ret = perf_event_init_context(child, ctxn);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static void __init perf_event_init_all_cpus(void)
{
+ struct swevent_htable *swhash;
int cpu;
- struct perf_cpu_context *cpuctx;
for_each_possible_cpu(cpu) {
- cpuctx = &per_cpu(perf_cpu_context, cpu);
- mutex_init(&cpuctx->hlist_mutex);
- __perf_event_init_context(&cpuctx->ctx, NULL);
+ swhash = &per_cpu(swevent_htable, cpu);
+ mutex_init(&swhash->hlist_mutex);
+ INIT_LIST_HEAD(&per_cpu(rotation_list, cpu));
}
}
static void __cpuinit perf_event_init_cpu(int cpu)
{
- struct perf_cpu_context *cpuctx;
+ struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
- cpuctx = &per_cpu(perf_cpu_context, cpu);
-
- spin_lock(&perf_resource_lock);
- cpuctx->max_pertask = perf_max_events - perf_reserved_percpu;
- spin_unlock(&perf_resource_lock);
-
- mutex_lock(&cpuctx->hlist_mutex);
- if (cpuctx->hlist_refcount > 0) {
+ mutex_lock(&swhash->hlist_mutex);
+ if (swhash->hlist_refcount > 0) {
struct swevent_hlist *hlist;
- hlist = kzalloc(sizeof(*hlist), GFP_KERNEL);
- WARN_ON_ONCE(!hlist);
- rcu_assign_pointer(cpuctx->swevent_hlist, hlist);
+ hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu));
+ WARN_ON(!hlist);
+ rcu_assign_pointer(swhash->swevent_hlist, hlist);
}
- mutex_unlock(&cpuctx->hlist_mutex);
+ mutex_unlock(&swhash->hlist_mutex);
}
#ifdef CONFIG_HOTPLUG_CPU
-static void __perf_event_exit_cpu(void *info)
+static void perf_pmu_rotate_stop(struct pmu *pmu)
{
- struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
- struct perf_event_context *ctx = &cpuctx->ctx;
+ struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
+
+ WARN_ON(!irqs_disabled());
+
+ list_del_init(&cpuctx->rotation_list);
+}
+
+static void __perf_event_exit_context(void *__info)
+{
+ struct perf_event_context *ctx = __info;
struct perf_event *event, *tmp;
+ perf_pmu_rotate_stop(ctx->pmu);
+
list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, group_entry)
__perf_event_remove_from_context(event);
list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, group_entry)
__perf_event_remove_from_context(event);
}
+
+static void perf_event_exit_cpu_context(int cpu)
+{
+ struct perf_event_context *ctx;
+ struct pmu *pmu;
+ int idx;
+
+ idx = srcu_read_lock(&pmus_srcu);
+ list_for_each_entry_rcu(pmu, &pmus, entry) {
+ ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx;
+
+ mutex_lock(&ctx->mutex);
+ smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1);
+ mutex_unlock(&ctx->mutex);
+ }
+ srcu_read_unlock(&pmus_srcu, idx);
+}
+
static void perf_event_exit_cpu(int cpu)
{
- struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
- struct perf_event_context *ctx = &cpuctx->ctx;
+ struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
- mutex_lock(&cpuctx->hlist_mutex);
- swevent_hlist_release(cpuctx);
- mutex_unlock(&cpuctx->hlist_mutex);
+ mutex_lock(&swhash->hlist_mutex);
+ swevent_hlist_release(swhash);
+ mutex_unlock(&swhash->hlist_mutex);
- mutex_lock(&ctx->mutex);
- smp_call_function_single(cpu, __perf_event_exit_cpu, NULL, 1);
- mutex_unlock(&ctx->mutex);
+ perf_event_exit_cpu_context(cpu);
}
#else
static inline void perf_event_exit_cpu(int cpu) { }
@@ -5743,15 +6306,15 @@
{
unsigned int cpu = (long)hcpu;
- switch (action) {
+ switch (action & ~CPU_TASKS_FROZEN) {
case CPU_UP_PREPARE:
- case CPU_UP_PREPARE_FROZEN:
+ case CPU_DOWN_FAILED:
perf_event_init_cpu(cpu);
break;
+ case CPU_UP_CANCELED:
case CPU_DOWN_PREPARE:
- case CPU_DOWN_PREPARE_FROZEN:
perf_event_exit_cpu(cpu);
break;
@@ -5762,118 +6325,13 @@
return NOTIFY_OK;
}
-/*
- * This has to have a higher priority than migration_notifier in sched.c.
- */
-static struct notifier_block __cpuinitdata perf_cpu_nb = {
- .notifier_call = perf_cpu_notify,
- .priority = 20,
-};
-
void __init perf_event_init(void)
{
perf_event_init_all_cpus();
- perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE,
- (void *)(long)smp_processor_id());
- perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_ONLINE,
- (void *)(long)smp_processor_id());
- register_cpu_notifier(&perf_cpu_nb);
+ init_srcu_struct(&pmus_srcu);
+ perf_pmu_register(&perf_swevent);
+ perf_pmu_register(&perf_cpu_clock);
+ perf_pmu_register(&perf_task_clock);
+ perf_tp_register();
+ perf_cpu_notifier(perf_cpu_notify);
}
-
-static ssize_t perf_show_reserve_percpu(struct sysdev_class *class,
- struct sysdev_class_attribute *attr,
- char *buf)
-{
- return sprintf(buf, "%d\n", perf_reserved_percpu);
-}
-
-static ssize_t
-perf_set_reserve_percpu(struct sysdev_class *class,
- struct sysdev_class_attribute *attr,
- const char *buf,
- size_t count)
-{
- struct perf_cpu_context *cpuctx;
- unsigned long val;
- int err, cpu, mpt;
-
- err = strict_strtoul(buf, 10, &val);
- if (err)
- return err;
- if (val > perf_max_events)
- return -EINVAL;
-
- spin_lock(&perf_resource_lock);
- perf_reserved_percpu = val;
- for_each_online_cpu(cpu) {
- cpuctx = &per_cpu(perf_cpu_context, cpu);
- raw_spin_lock_irq(&cpuctx->ctx.lock);
- mpt = min(perf_max_events - cpuctx->ctx.nr_events,
- perf_max_events - perf_reserved_percpu);
- cpuctx->max_pertask = mpt;
- raw_spin_unlock_irq(&cpuctx->ctx.lock);
- }
- spin_unlock(&perf_resource_lock);
-
- return count;
-}
-
-static ssize_t perf_show_overcommit(struct sysdev_class *class,
- struct sysdev_class_attribute *attr,
- char *buf)
-{
- return sprintf(buf, "%d\n", perf_overcommit);
-}
-
-static ssize_t
-perf_set_overcommit(struct sysdev_class *class,
- struct sysdev_class_attribute *attr,
- const char *buf, size_t count)
-{
- unsigned long val;
- int err;
-
- err = strict_strtoul(buf, 10, &val);
- if (err)
- return err;
- if (val > 1)
- return -EINVAL;
-
- spin_lock(&perf_resource_lock);
- perf_overcommit = val;
- spin_unlock(&perf_resource_lock);
-
- return count;
-}
-
-static SYSDEV_CLASS_ATTR(
- reserve_percpu,
- 0644,
- perf_show_reserve_percpu,
- perf_set_reserve_percpu
- );
-
-static SYSDEV_CLASS_ATTR(
- overcommit,
- 0644,
- perf_show_overcommit,
- perf_set_overcommit
- );
-
-static struct attribute *perfclass_attrs[] = {
- &attr_reserve_percpu.attr,
- &attr_overcommit.attr,
- NULL
-};
-
-static struct attribute_group perfclass_attr_group = {
- .attrs = perfclass_attrs,
- .name = "perf_events",
-};
-
-static int __init perf_event_sysfs_init(void)
-{
- return sysfs_create_group(&cpu_sysdev_class.kset.kobj,
- &perfclass_attr_group);
-}
-device_initcall(perf_event_sysfs_init);
diff --git a/kernel/pid.c b/kernel/pid.c
index d55c6fb..39b65b6 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -401,7 +401,7 @@
struct task_struct *result = NULL;
if (pid) {
struct hlist_node *first;
- first = rcu_dereference_check(pid->tasks[type].first,
+ first = rcu_dereference_check(hlist_first_rcu(&pid->tasks[type]),
rcu_read_lock_held() ||
lockdep_tasklist_lock_is_held());
if (first)
@@ -416,6 +416,7 @@
*/
struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
{
+ rcu_lockdep_assert(rcu_read_lock_held());
return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
}
diff --git a/kernel/pm_qos_params.c b/kernel/pm_qos_params.c
index 996a4de..645e541 100644
--- a/kernel/pm_qos_params.c
+++ b/kernel/pm_qos_params.c
@@ -212,15 +212,17 @@
/**
* pm_qos_add_request - inserts new qos request into the list
- * @pm_qos_class: identifies which list of qos request to us
+ * @dep: pointer to a preallocated handle
+ * @pm_qos_class: identifies which list of qos request to use
* @value: defines the qos request
*
* This function inserts a new entry in the pm_qos_class list of requested qos
* performance characteristics. It recomputes the aggregate QoS expectations
- * for the pm_qos_class of parameters, and returns the pm_qos_request list
- * element as a handle for use in updating and removal. Call needs to save
- * this handle for later use.
+ * for the pm_qos_class of parameters and initializes the pm_qos_request_list
+ * handle. Caller needs to save this handle for later use in updates and
+ * removal.
*/
+
void pm_qos_add_request(struct pm_qos_request_list *dep,
int pm_qos_class, s32 value)
{
@@ -348,7 +350,7 @@
pm_qos_class = find_pm_qos_object_by_minor(iminor(inode));
if (pm_qos_class >= 0) {
- struct pm_qos_request_list *req = kzalloc(GFP_KERNEL, sizeof(*req));
+ struct pm_qos_request_list *req = kzalloc(sizeof(*req), GFP_KERNEL);
if (!req)
return -ENOMEM;
@@ -387,10 +389,12 @@
} else if (count == 11) { /* len('0x12345678/0') */
if (copy_from_user(ascii_value, buf, 11))
return -EFAULT;
+ if (strlen(ascii_value) != 10)
+ return -EINVAL;
x = sscanf(ascii_value, "%x", &value);
if (x != 1)
return -EINVAL;
- pr_debug(KERN_ERR "%s, %d, 0x%x\n", ascii_value, x, value);
+ pr_debug("%s, %d, 0x%x\n", ascii_value, x, value);
} else
return -EINVAL;
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
index c779639..8dc31e0 100644
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -338,7 +338,6 @@
goto Close;
suspend_console();
- hibernation_freeze_swap();
saved_mask = clear_gfp_allowed_mask(GFP_IOFS);
error = dpm_suspend_start(PMSG_FREEZE);
if (error)
diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c
index e8b3370..d523593 100644
--- a/kernel/power/poweroff.c
+++ b/kernel/power/poweroff.c
@@ -24,7 +24,7 @@
static DECLARE_WORK(poweroff_work, do_poweroff);
-static void handle_poweroff(int key, struct tty_struct *tty)
+static void handle_poweroff(int key)
{
/* run sysrq poweroff on boot cpu */
schedule_work_on(cpumask_first(cpu_online_mask), &poweroff_work);
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 5e7edfb..d3f795f 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -1086,7 +1086,6 @@
buffer = NULL;
alloc_normal = 0;
alloc_highmem = 0;
- hibernation_thaw_swap();
}
/* Helper functions used for the shrinking of memory. */
@@ -1122,9 +1121,19 @@
return nr_alloc;
}
-static unsigned long preallocate_image_memory(unsigned long nr_pages)
+static unsigned long preallocate_image_memory(unsigned long nr_pages,
+ unsigned long avail_normal)
{
- return preallocate_image_pages(nr_pages, GFP_IMAGE);
+ unsigned long alloc;
+
+ if (avail_normal <= alloc_normal)
+ return 0;
+
+ alloc = avail_normal - alloc_normal;
+ if (nr_pages < alloc)
+ alloc = nr_pages;
+
+ return preallocate_image_pages(alloc, GFP_IMAGE);
}
#ifdef CONFIG_HIGHMEM
@@ -1170,15 +1179,22 @@
*/
static void free_unnecessary_pages(void)
{
- unsigned long save_highmem, to_free_normal, to_free_highmem;
+ unsigned long save, to_free_normal, to_free_highmem;
- to_free_normal = alloc_normal - count_data_pages();
- save_highmem = count_highmem_pages();
- if (alloc_highmem > save_highmem) {
- to_free_highmem = alloc_highmem - save_highmem;
+ save = count_data_pages();
+ if (alloc_normal >= save) {
+ to_free_normal = alloc_normal - save;
+ save = 0;
+ } else {
+ to_free_normal = 0;
+ save -= alloc_normal;
+ }
+ save += count_highmem_pages();
+ if (alloc_highmem >= save) {
+ to_free_highmem = alloc_highmem - save;
} else {
to_free_highmem = 0;
- to_free_normal -= save_highmem - alloc_highmem;
+ to_free_normal -= save - alloc_highmem;
}
memory_bm_position_reset(©_bm);
@@ -1259,7 +1275,7 @@
{
struct zone *zone;
unsigned long saveable, size, max_size, count, highmem, pages = 0;
- unsigned long alloc, save_highmem, pages_highmem;
+ unsigned long alloc, save_highmem, pages_highmem, avail_normal;
struct timeval start, stop;
int error;
@@ -1296,6 +1312,7 @@
else
count += zone_page_state(zone, NR_FREE_PAGES);
}
+ avail_normal = count;
count += highmem;
count -= totalreserve_pages;
@@ -1310,12 +1327,21 @@
*/
if (size >= saveable) {
pages = preallocate_image_highmem(save_highmem);
- pages += preallocate_image_memory(saveable - pages);
+ pages += preallocate_image_memory(saveable - pages, avail_normal);
goto out;
}
/* Estimate the minimum size of the image. */
pages = minimum_image_size(saveable);
+ /*
+ * To avoid excessive pressure on the normal zone, leave room in it to
+ * accommodate an image of the minimum size (unless it's already too
+ * small, in which case don't preallocate pages from it at all).
+ */
+ if (avail_normal > pages)
+ avail_normal -= pages;
+ else
+ avail_normal = 0;
if (size < pages)
size = min_t(unsigned long, pages, max_size);
@@ -1336,16 +1362,34 @@
*/
pages_highmem = preallocate_image_highmem(highmem / 2);
alloc = (count - max_size) - pages_highmem;
- pages = preallocate_image_memory(alloc);
- if (pages < alloc)
- goto err_out;
- size = max_size - size;
- alloc = size;
- size = preallocate_highmem_fraction(size, highmem, count);
- pages_highmem += size;
- alloc -= size;
- pages += preallocate_image_memory(alloc);
- pages += pages_highmem;
+ pages = preallocate_image_memory(alloc, avail_normal);
+ if (pages < alloc) {
+ /* We have exhausted non-highmem pages, try highmem. */
+ alloc -= pages;
+ pages += pages_highmem;
+ pages_highmem = preallocate_image_highmem(alloc);
+ if (pages_highmem < alloc)
+ goto err_out;
+ pages += pages_highmem;
+ /*
+ * size is the desired number of saveable pages to leave in
+ * memory, so try to preallocate (all memory - size) pages.
+ */
+ alloc = (count - pages) - size;
+ pages += preallocate_image_highmem(alloc);
+ } else {
+ /*
+ * There are approximately max_size saveable pages at this point
+ * and we want to reduce this number down to size.
+ */
+ alloc = max_size - size;
+ size = preallocate_highmem_fraction(alloc, highmem, count);
+ pages_highmem += size;
+ alloc -= size;
+ size = preallocate_image_memory(alloc, avail_normal);
+ pages_highmem += preallocate_image_highmem(alloc - size);
+ pages += pages_highmem + size;
+ }
/*
* We only need as many page frames for the image as there are saveable
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index 5d0059e..e6a5bdf 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -136,10 +136,10 @@
{
unsigned long offset;
- offset = swp_offset(get_swap_for_hibernation(swap));
+ offset = swp_offset(get_swap_page_of_type(swap));
if (offset) {
if (swsusp_extents_insert(offset))
- swap_free_for_hibernation(swp_entry(swap, offset));
+ swap_free(swp_entry(swap, offset));
else
return swapdev_block(swap, offset);
}
@@ -163,7 +163,7 @@
ext = container_of(node, struct swsusp_extent, node);
rb_erase(node, &swsusp_extents);
for (offset = ext->start; offset <= ext->end; offset++)
- swap_free_for_hibernation(swp_entry(swap, offset));
+ swap_free(swp_entry(swap, offset));
kfree(ext);
}
diff --git a/kernel/printk.c b/kernel/printk.c
index 8fe465a..2531017 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -85,7 +85,7 @@
* provides serialisation for access to the entire console
* driver system.
*/
-static DECLARE_MUTEX(console_sem);
+static DEFINE_SEMAPHORE(console_sem);
struct console *console_drivers;
EXPORT_SYMBOL_GPL(console_drivers);
@@ -556,7 +556,7 @@
/* If a crash is occurring, make sure we can't deadlock */
spin_lock_init(&logbuf_lock);
/* And make sure that we print immediately */
- init_MUTEX(&console_sem);
+ sema_init(&console_sem, 1);
}
#if defined(CONFIG_PRINTK_TIME)
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index 4d16983..a23a57a 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -73,12 +73,14 @@
EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled);
/**
- * rcu_read_lock_bh_held - might we be in RCU-bh read-side critical section?
+ * rcu_read_lock_bh_held() - might we be in RCU-bh read-side critical section?
*
* Check for bottom half being disabled, which covers both the
* CONFIG_PROVE_RCU and not cases. Note that if someone uses
* rcu_read_lock_bh(), but then later enables BH, lockdep (if enabled)
- * will show the situation.
+ * will show the situation. This is useful for debug checks in functions
+ * that require that they be called within an RCU read-side critical
+ * section.
*
* Check debug_lockdep_rcu_enabled() to prevent false positives during boot.
*/
@@ -86,7 +88,7 @@
{
if (!debug_lockdep_rcu_enabled())
return 1;
- return in_softirq();
+ return in_softirq() || irqs_disabled();
}
EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held);
diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c
index 196ec02..d806735 100644
--- a/kernel/rcutiny.c
+++ b/kernel/rcutiny.c
@@ -59,6 +59,14 @@
EXPORT_SYMBOL_GPL(rcu_scheduler_active);
#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
+/* Forward declarations for rcutiny_plugin.h. */
+static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
+static void __call_rcu(struct rcu_head *head,
+ void (*func)(struct rcu_head *rcu),
+ struct rcu_ctrlblk *rcp);
+
+#include "rcutiny_plugin.h"
+
#ifdef CONFIG_NO_HZ
static long rcu_dynticks_nesting = 1;
@@ -140,6 +148,7 @@
rcu_sched_qs(cpu);
else if (!in_softirq())
rcu_bh_qs(cpu);
+ rcu_preempt_check_callbacks();
}
/*
@@ -162,6 +171,7 @@
*rcp->donetail = NULL;
if (rcp->curtail == rcp->donetail)
rcp->curtail = &rcp->rcucblist;
+ rcu_preempt_remove_callbacks(rcp);
rcp->donetail = &rcp->rcucblist;
local_irq_restore(flags);
@@ -182,6 +192,7 @@
{
__rcu_process_callbacks(&rcu_sched_ctrlblk);
__rcu_process_callbacks(&rcu_bh_ctrlblk);
+ rcu_preempt_process_callbacks();
}
/*
@@ -223,15 +234,15 @@
}
/*
- * Post an RCU callback to be invoked after the end of an RCU grace
+ * Post an RCU callback to be invoked after the end of an RCU-sched grace
* period. But since we have but one CPU, that would be after any
* quiescent state.
*/
-void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
+void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
{
__call_rcu(head, func, &rcu_sched_ctrlblk);
}
-EXPORT_SYMBOL_GPL(call_rcu);
+EXPORT_SYMBOL_GPL(call_rcu_sched);
/*
* Post an RCU bottom-half callback to be invoked after any subsequent
@@ -243,20 +254,6 @@
}
EXPORT_SYMBOL_GPL(call_rcu_bh);
-void rcu_barrier(void)
-{
- struct rcu_synchronize rcu;
-
- init_rcu_head_on_stack(&rcu.head);
- init_completion(&rcu.completion);
- /* Will wake me after RCU finished. */
- call_rcu(&rcu.head, wakeme_after_rcu);
- /* Wait for it. */
- wait_for_completion(&rcu.completion);
- destroy_rcu_head_on_stack(&rcu.head);
-}
-EXPORT_SYMBOL_GPL(rcu_barrier);
-
void rcu_barrier_bh(void)
{
struct rcu_synchronize rcu;
@@ -289,5 +286,3 @@
{
open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
}
-
-#include "rcutiny_plugin.h"
diff --git a/kernel/rcutiny_plugin.h b/kernel/rcutiny_plugin.h
index d223a92bc..6ceca4f 100644
--- a/kernel/rcutiny_plugin.h
+++ b/kernel/rcutiny_plugin.h
@@ -1,7 +1,7 @@
/*
- * Read-Copy Update mechanism for mutual exclusion (tree-based version)
+ * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition
* Internal non-public definitions that provide either classic
- * or preemptable semantics.
+ * or preemptible semantics.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -17,11 +17,587 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
- * Copyright IBM Corporation, 2009
+ * Copyright (c) 2010 Linaro
*
* Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
*/
+#ifdef CONFIG_TINY_PREEMPT_RCU
+
+#include <linux/delay.h>
+
+/* Global control variables for preemptible RCU. */
+struct rcu_preempt_ctrlblk {
+ struct rcu_ctrlblk rcb; /* curtail: ->next ptr of last CB for GP. */
+ struct rcu_head **nexttail;
+ /* Tasks blocked in a preemptible RCU */
+ /* read-side critical section while an */
+ /* preemptible-RCU grace period is in */
+ /* progress must wait for a later grace */
+ /* period. This pointer points to the */
+ /* ->next pointer of the last task that */
+ /* must wait for a later grace period, or */
+ /* to &->rcb.rcucblist if there is no */
+ /* such task. */
+ struct list_head blkd_tasks;
+ /* Tasks blocked in RCU read-side critical */
+ /* section. Tasks are placed at the head */
+ /* of this list and age towards the tail. */
+ struct list_head *gp_tasks;
+ /* Pointer to the first task blocking the */
+ /* current grace period, or NULL if there */
+ /* is not such task. */
+ struct list_head *exp_tasks;
+ /* Pointer to first task blocking the */
+ /* current expedited grace period, or NULL */
+ /* if there is no such task. If there */
+ /* is no current expedited grace period, */
+ /* then there cannot be any such task. */
+ u8 gpnum; /* Current grace period. */
+ u8 gpcpu; /* Last grace period blocked by the CPU. */
+ u8 completed; /* Last grace period completed. */
+ /* If all three are equal, RCU is idle. */
+};
+
+static struct rcu_preempt_ctrlblk rcu_preempt_ctrlblk = {
+ .rcb.donetail = &rcu_preempt_ctrlblk.rcb.rcucblist,
+ .rcb.curtail = &rcu_preempt_ctrlblk.rcb.rcucblist,
+ .nexttail = &rcu_preempt_ctrlblk.rcb.rcucblist,
+ .blkd_tasks = LIST_HEAD_INIT(rcu_preempt_ctrlblk.blkd_tasks),
+};
+
+static int rcu_preempted_readers_exp(void);
+static void rcu_report_exp_done(void);
+
+/*
+ * Return true if the CPU has not yet responded to the current grace period.
+ */
+static int rcu_cpu_blocking_cur_gp(void)
+{
+ return rcu_preempt_ctrlblk.gpcpu != rcu_preempt_ctrlblk.gpnum;
+}
+
+/*
+ * Check for a running RCU reader. Because there is only one CPU,
+ * there can be but one running RCU reader at a time. ;-)
+ */
+static int rcu_preempt_running_reader(void)
+{
+ return current->rcu_read_lock_nesting;
+}
+
+/*
+ * Check for preempted RCU readers blocking any grace period.
+ * If the caller needs a reliable answer, it must disable hard irqs.
+ */
+static int rcu_preempt_blocked_readers_any(void)
+{
+ return !list_empty(&rcu_preempt_ctrlblk.blkd_tasks);
+}
+
+/*
+ * Check for preempted RCU readers blocking the current grace period.
+ * If the caller needs a reliable answer, it must disable hard irqs.
+ */
+static int rcu_preempt_blocked_readers_cgp(void)
+{
+ return rcu_preempt_ctrlblk.gp_tasks != NULL;
+}
+
+/*
+ * Return true if another preemptible-RCU grace period is needed.
+ */
+static int rcu_preempt_needs_another_gp(void)
+{
+ return *rcu_preempt_ctrlblk.rcb.curtail != NULL;
+}
+
+/*
+ * Return true if a preemptible-RCU grace period is in progress.
+ * The caller must disable hardirqs.
+ */
+static int rcu_preempt_gp_in_progress(void)
+{
+ return rcu_preempt_ctrlblk.completed != rcu_preempt_ctrlblk.gpnum;
+}
+
+/*
+ * Record a preemptible-RCU quiescent state for the specified CPU. Note
+ * that this just means that the task currently running on the CPU is
+ * in a quiescent state. There might be any number of tasks blocked
+ * while in an RCU read-side critical section.
+ *
+ * Unlike the other rcu_*_qs() functions, callers to this function
+ * must disable irqs in order to protect the assignment to
+ * ->rcu_read_unlock_special.
+ *
+ * Because this is a single-CPU implementation, the only way a grace
+ * period can end is if the CPU is in a quiescent state. The reason is
+ * that a blocked preemptible-RCU reader can exit its critical section
+ * only if the CPU is running it at the time. Therefore, when the
+ * last task blocking the current grace period exits its RCU read-side
+ * critical section, neither the CPU nor blocked tasks will be stopping
+ * the current grace period. (In contrast, SMP implementations
+ * might have CPUs running in RCU read-side critical sections that
+ * block later grace periods -- but this is not possible given only
+ * one CPU.)
+ */
+static void rcu_preempt_cpu_qs(void)
+{
+ /* Record both CPU and task as having responded to current GP. */
+ rcu_preempt_ctrlblk.gpcpu = rcu_preempt_ctrlblk.gpnum;
+ current->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS;
+
+ /*
+ * If there is no GP, or if blocked readers are still blocking GP,
+ * then there is nothing more to do.
+ */
+ if (!rcu_preempt_gp_in_progress() || rcu_preempt_blocked_readers_cgp())
+ return;
+
+ /* Advance callbacks. */
+ rcu_preempt_ctrlblk.completed = rcu_preempt_ctrlblk.gpnum;
+ rcu_preempt_ctrlblk.rcb.donetail = rcu_preempt_ctrlblk.rcb.curtail;
+ rcu_preempt_ctrlblk.rcb.curtail = rcu_preempt_ctrlblk.nexttail;
+
+ /* If there are no blocked readers, next GP is done instantly. */
+ if (!rcu_preempt_blocked_readers_any())
+ rcu_preempt_ctrlblk.rcb.donetail = rcu_preempt_ctrlblk.nexttail;
+
+ /* If there are done callbacks, make RCU_SOFTIRQ process them. */
+ if (*rcu_preempt_ctrlblk.rcb.donetail != NULL)
+ raise_softirq(RCU_SOFTIRQ);
+}
+
+/*
+ * Start a new RCU grace period if warranted. Hard irqs must be disabled.
+ */
+static void rcu_preempt_start_gp(void)
+{
+ if (!rcu_preempt_gp_in_progress() && rcu_preempt_needs_another_gp()) {
+
+ /* Official start of GP. */
+ rcu_preempt_ctrlblk.gpnum++;
+
+ /* Any blocked RCU readers block new GP. */
+ if (rcu_preempt_blocked_readers_any())
+ rcu_preempt_ctrlblk.gp_tasks =
+ rcu_preempt_ctrlblk.blkd_tasks.next;
+
+ /* If there is no running reader, CPU is done with GP. */
+ if (!rcu_preempt_running_reader())
+ rcu_preempt_cpu_qs();
+ }
+}
+
+/*
+ * We have entered the scheduler, and the current task might soon be
+ * context-switched away from. If this task is in an RCU read-side
+ * critical section, we will no longer be able to rely on the CPU to
+ * record that fact, so we enqueue the task on the blkd_tasks list.
+ * If the task started after the current grace period began, as recorded
+ * by ->gpcpu, we enqueue at the beginning of the list. Otherwise
+ * before the element referenced by ->gp_tasks (or at the tail if
+ * ->gp_tasks is NULL) and point ->gp_tasks at the newly added element.
+ * The task will dequeue itself when it exits the outermost enclosing
+ * RCU read-side critical section. Therefore, the current grace period
+ * cannot be permitted to complete until the ->gp_tasks pointer becomes
+ * NULL.
+ *
+ * Caller must disable preemption.
+ */
+void rcu_preempt_note_context_switch(void)
+{
+ struct task_struct *t = current;
+ unsigned long flags;
+
+ local_irq_save(flags); /* must exclude scheduler_tick(). */
+ if (rcu_preempt_running_reader() &&
+ (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) {
+
+ /* Possibly blocking in an RCU read-side critical section. */
+ t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED;
+
+ /*
+ * If this CPU has already checked in, then this task
+ * will hold up the next grace period rather than the
+ * current grace period. Queue the task accordingly.
+ * If the task is queued for the current grace period
+ * (i.e., this CPU has not yet passed through a quiescent
+ * state for the current grace period), then as long
+ * as that task remains queued, the current grace period
+ * cannot end.
+ */
+ list_add(&t->rcu_node_entry, &rcu_preempt_ctrlblk.blkd_tasks);
+ if (rcu_cpu_blocking_cur_gp())
+ rcu_preempt_ctrlblk.gp_tasks = &t->rcu_node_entry;
+ }
+
+ /*
+ * Either we were not in an RCU read-side critical section to
+ * begin with, or we have now recorded that critical section
+ * globally. Either way, we can now note a quiescent state
+ * for this CPU. Again, if we were in an RCU read-side critical
+ * section, and if that critical section was blocking the current
+ * grace period, then the fact that the task has been enqueued
+ * means that current grace period continues to be blocked.
+ */
+ rcu_preempt_cpu_qs();
+ local_irq_restore(flags);
+}
+
+/*
+ * Tiny-preemptible RCU implementation for rcu_read_lock().
+ * Just increment ->rcu_read_lock_nesting, shared state will be updated
+ * if we block.
+ */
+void __rcu_read_lock(void)
+{
+ current->rcu_read_lock_nesting++;
+ barrier(); /* needed if we ever invoke rcu_read_lock in rcutiny.c */
+}
+EXPORT_SYMBOL_GPL(__rcu_read_lock);
+
+/*
+ * Handle special cases during rcu_read_unlock(), such as needing to
+ * notify RCU core processing or task having blocked during the RCU
+ * read-side critical section.
+ */
+static void rcu_read_unlock_special(struct task_struct *t)
+{
+ int empty;
+ int empty_exp;
+ unsigned long flags;
+ struct list_head *np;
+ int special;
+
+ /*
+ * NMI handlers cannot block and cannot safely manipulate state.
+ * They therefore cannot possibly be special, so just leave.
+ */
+ if (in_nmi())
+ return;
+
+ local_irq_save(flags);
+
+ /*
+ * If RCU core is waiting for this CPU to exit critical section,
+ * let it know that we have done so.
+ */
+ special = t->rcu_read_unlock_special;
+ if (special & RCU_READ_UNLOCK_NEED_QS)
+ rcu_preempt_cpu_qs();
+
+ /* Hardware IRQ handlers cannot block. */
+ if (in_irq()) {
+ local_irq_restore(flags);
+ return;
+ }
+
+ /* Clean up if blocked during RCU read-side critical section. */
+ if (special & RCU_READ_UNLOCK_BLOCKED) {
+ t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BLOCKED;
+
+ /*
+ * Remove this task from the ->blkd_tasks list and adjust
+ * any pointers that might have been referencing it.
+ */
+ empty = !rcu_preempt_blocked_readers_cgp();
+ empty_exp = rcu_preempt_ctrlblk.exp_tasks == NULL;
+ np = t->rcu_node_entry.next;
+ if (np == &rcu_preempt_ctrlblk.blkd_tasks)
+ np = NULL;
+ list_del(&t->rcu_node_entry);
+ if (&t->rcu_node_entry == rcu_preempt_ctrlblk.gp_tasks)
+ rcu_preempt_ctrlblk.gp_tasks = np;
+ if (&t->rcu_node_entry == rcu_preempt_ctrlblk.exp_tasks)
+ rcu_preempt_ctrlblk.exp_tasks = np;
+ INIT_LIST_HEAD(&t->rcu_node_entry);
+
+ /*
+ * If this was the last task on the current list, and if
+ * we aren't waiting on the CPU, report the quiescent state
+ * and start a new grace period if needed.
+ */
+ if (!empty && !rcu_preempt_blocked_readers_cgp()) {
+ rcu_preempt_cpu_qs();
+ rcu_preempt_start_gp();
+ }
+
+ /*
+ * If this was the last task on the expedited lists,
+ * then we need wake up the waiting task.
+ */
+ if (!empty_exp && rcu_preempt_ctrlblk.exp_tasks == NULL)
+ rcu_report_exp_done();
+ }
+ local_irq_restore(flags);
+}
+
+/*
+ * Tiny-preemptible RCU implementation for rcu_read_unlock().
+ * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost
+ * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then
+ * invoke rcu_read_unlock_special() to clean up after a context switch
+ * in an RCU read-side critical section and other special cases.
+ */
+void __rcu_read_unlock(void)
+{
+ struct task_struct *t = current;
+
+ barrier(); /* needed if we ever invoke rcu_read_unlock in rcutiny.c */
+ --t->rcu_read_lock_nesting;
+ barrier(); /* decrement before load of ->rcu_read_unlock_special */
+ if (t->rcu_read_lock_nesting == 0 &&
+ unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
+ rcu_read_unlock_special(t);
+#ifdef CONFIG_PROVE_LOCKING
+ WARN_ON_ONCE(t->rcu_read_lock_nesting < 0);
+#endif /* #ifdef CONFIG_PROVE_LOCKING */
+}
+EXPORT_SYMBOL_GPL(__rcu_read_unlock);
+
+/*
+ * Check for a quiescent state from the current CPU. When a task blocks,
+ * the task is recorded in the rcu_preempt_ctrlblk structure, which is
+ * checked elsewhere. This is called from the scheduling-clock interrupt.
+ *
+ * Caller must disable hard irqs.
+ */
+static void rcu_preempt_check_callbacks(void)
+{
+ struct task_struct *t = current;
+
+ if (rcu_preempt_gp_in_progress() &&
+ (!rcu_preempt_running_reader() ||
+ !rcu_cpu_blocking_cur_gp()))
+ rcu_preempt_cpu_qs();
+ if (&rcu_preempt_ctrlblk.rcb.rcucblist !=
+ rcu_preempt_ctrlblk.rcb.donetail)
+ raise_softirq(RCU_SOFTIRQ);
+ if (rcu_preempt_gp_in_progress() &&
+ rcu_cpu_blocking_cur_gp() &&
+ rcu_preempt_running_reader())
+ t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS;
+}
+
+/*
+ * TINY_PREEMPT_RCU has an extra callback-list tail pointer to
+ * update, so this is invoked from __rcu_process_callbacks() to
+ * handle that case. Of course, it is invoked for all flavors of
+ * RCU, but RCU callbacks can appear only on one of the lists, and
+ * neither ->nexttail nor ->donetail can possibly be NULL, so there
+ * is no need for an explicit check.
+ */
+static void rcu_preempt_remove_callbacks(struct rcu_ctrlblk *rcp)
+{
+ if (rcu_preempt_ctrlblk.nexttail == rcp->donetail)
+ rcu_preempt_ctrlblk.nexttail = &rcp->rcucblist;
+}
+
+/*
+ * Process callbacks for preemptible RCU.
+ */
+static void rcu_preempt_process_callbacks(void)
+{
+ __rcu_process_callbacks(&rcu_preempt_ctrlblk.rcb);
+}
+
+/*
+ * Queue a preemptible -RCU callback for invocation after a grace period.
+ */
+void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
+{
+ unsigned long flags;
+
+ debug_rcu_head_queue(head);
+ head->func = func;
+ head->next = NULL;
+
+ local_irq_save(flags);
+ *rcu_preempt_ctrlblk.nexttail = head;
+ rcu_preempt_ctrlblk.nexttail = &head->next;
+ rcu_preempt_start_gp(); /* checks to see if GP needed. */
+ local_irq_restore(flags);
+}
+EXPORT_SYMBOL_GPL(call_rcu);
+
+void rcu_barrier(void)
+{
+ struct rcu_synchronize rcu;
+
+ init_rcu_head_on_stack(&rcu.head);
+ init_completion(&rcu.completion);
+ /* Will wake me after RCU finished. */
+ call_rcu(&rcu.head, wakeme_after_rcu);
+ /* Wait for it. */
+ wait_for_completion(&rcu.completion);
+ destroy_rcu_head_on_stack(&rcu.head);
+}
+EXPORT_SYMBOL_GPL(rcu_barrier);
+
+/*
+ * synchronize_rcu - wait until a grace period has elapsed.
+ *
+ * Control will return to the caller some time after a full grace
+ * period has elapsed, in other words after all currently executing RCU
+ * read-side critical sections have completed. RCU read-side critical
+ * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
+ * and may be nested.
+ */
+void synchronize_rcu(void)
+{
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ if (!rcu_scheduler_active)
+ return;
+#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
+
+ WARN_ON_ONCE(rcu_preempt_running_reader());
+ if (!rcu_preempt_blocked_readers_any())
+ return;
+
+ /* Once we get past the fastpath checks, same code as rcu_barrier(). */
+ rcu_barrier();
+}
+EXPORT_SYMBOL_GPL(synchronize_rcu);
+
+static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq);
+static unsigned long sync_rcu_preempt_exp_count;
+static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex);
+
+/*
+ * Return non-zero if there are any tasks in RCU read-side critical
+ * sections blocking the current preemptible-RCU expedited grace period.
+ * If there is no preemptible-RCU expedited grace period currently in
+ * progress, returns zero unconditionally.
+ */
+static int rcu_preempted_readers_exp(void)
+{
+ return rcu_preempt_ctrlblk.exp_tasks != NULL;
+}
+
+/*
+ * Report the exit from RCU read-side critical section for the last task
+ * that queued itself during or before the current expedited preemptible-RCU
+ * grace period.
+ */
+static void rcu_report_exp_done(void)
+{
+ wake_up(&sync_rcu_preempt_exp_wq);
+}
+
+/*
+ * Wait for an rcu-preempt grace period, but expedite it. The basic idea
+ * is to rely in the fact that there is but one CPU, and that it is
+ * illegal for a task to invoke synchronize_rcu_expedited() while in a
+ * preemptible-RCU read-side critical section. Therefore, any such
+ * critical sections must correspond to blocked tasks, which must therefore
+ * be on the ->blkd_tasks list. So just record the current head of the
+ * list in the ->exp_tasks pointer, and wait for all tasks including and
+ * after the task pointed to by ->exp_tasks to drain.
+ */
+void synchronize_rcu_expedited(void)
+{
+ unsigned long flags;
+ struct rcu_preempt_ctrlblk *rpcp = &rcu_preempt_ctrlblk;
+ unsigned long snap;
+
+ barrier(); /* ensure prior action seen before grace period. */
+
+ WARN_ON_ONCE(rcu_preempt_running_reader());
+
+ /*
+ * Acquire lock so that there is only one preemptible RCU grace
+ * period in flight. Of course, if someone does the expedited
+ * grace period for us while we are acquiring the lock, just leave.
+ */
+ snap = sync_rcu_preempt_exp_count + 1;
+ mutex_lock(&sync_rcu_preempt_exp_mutex);
+ if (ULONG_CMP_LT(snap, sync_rcu_preempt_exp_count))
+ goto unlock_mb_ret; /* Others did our work for us. */
+
+ local_irq_save(flags);
+
+ /*
+ * All RCU readers have to already be on blkd_tasks because
+ * we cannot legally be executing in an RCU read-side critical
+ * section.
+ */
+
+ /* Snapshot current head of ->blkd_tasks list. */
+ rpcp->exp_tasks = rpcp->blkd_tasks.next;
+ if (rpcp->exp_tasks == &rpcp->blkd_tasks)
+ rpcp->exp_tasks = NULL;
+ local_irq_restore(flags);
+
+ /* Wait for tail of ->blkd_tasks list to drain. */
+ if (rcu_preempted_readers_exp())
+ wait_event(sync_rcu_preempt_exp_wq,
+ !rcu_preempted_readers_exp());
+
+ /* Clean up and exit. */
+ barrier(); /* ensure expedited GP seen before counter increment. */
+ sync_rcu_preempt_exp_count++;
+unlock_mb_ret:
+ mutex_unlock(&sync_rcu_preempt_exp_mutex);
+ barrier(); /* ensure subsequent action seen after grace period. */
+}
+EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
+
+/*
+ * Does preemptible RCU need the CPU to stay out of dynticks mode?
+ */
+int rcu_preempt_needs_cpu(void)
+{
+ if (!rcu_preempt_running_reader())
+ rcu_preempt_cpu_qs();
+ return rcu_preempt_ctrlblk.rcb.rcucblist != NULL;
+}
+
+/*
+ * Check for a task exiting while in a preemptible -RCU read-side
+ * critical section, clean up if so. No need to issue warnings,
+ * as debug_check_no_locks_held() already does this if lockdep
+ * is enabled.
+ */
+void exit_rcu(void)
+{
+ struct task_struct *t = current;
+
+ if (t->rcu_read_lock_nesting == 0)
+ return;
+ t->rcu_read_lock_nesting = 1;
+ rcu_read_unlock();
+}
+
+#else /* #ifdef CONFIG_TINY_PREEMPT_RCU */
+
+/*
+ * Because preemptible RCU does not exist, it never has any callbacks
+ * to check.
+ */
+static void rcu_preempt_check_callbacks(void)
+{
+}
+
+/*
+ * Because preemptible RCU does not exist, it never has any callbacks
+ * to remove.
+ */
+static void rcu_preempt_remove_callbacks(struct rcu_ctrlblk *rcp)
+{
+}
+
+/*
+ * Because preemptible RCU does not exist, it never has any callbacks
+ * to process.
+ */
+static void rcu_preempt_process_callbacks(void)
+{
+}
+
+#endif /* #else #ifdef CONFIG_TINY_PREEMPT_RCU */
+
#ifdef CONFIG_DEBUG_LOCK_ALLOC
#include <linux/kernel_stat.h>
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
index 2e2726d..9d8e8fb 100644
--- a/kernel/rcutorture.c
+++ b/kernel/rcutorture.c
@@ -120,7 +120,7 @@
};
static LIST_HEAD(rcu_torture_freelist);
-static struct rcu_torture *rcu_torture_current;
+static struct rcu_torture __rcu *rcu_torture_current;
static long rcu_torture_current_version;
static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN];
static DEFINE_SPINLOCK(rcu_torture_lock);
@@ -153,8 +153,10 @@
#define FULLSTOP_SHUTDOWN 1 /* System shutdown with rcutorture running. */
#define FULLSTOP_RMMOD 2 /* Normal rmmod of rcutorture. */
static int fullstop = FULLSTOP_RMMOD;
-DEFINE_MUTEX(fullstop_mutex); /* Protect fullstop transitions and spawning */
- /* of kthreads. */
+/*
+ * Protect fullstop transitions and spawning of kthreads.
+ */
+static DEFINE_MUTEX(fullstop_mutex);
/*
* Detect and respond to a system shutdown.
@@ -303,6 +305,10 @@
mdelay(longdelay_ms);
if (!(rcu_random(rrsp) % (nrealreaders * 2 * shortdelay_us)))
udelay(shortdelay_us);
+#ifdef CONFIG_PREEMPT
+ if (!preempt_count() && !(rcu_random(rrsp) % (nrealreaders * 20000)))
+ preempt_schedule(); /* No QS if preempt_disable() in effect */
+#endif
}
static void rcu_torture_read_unlock(int idx) __releases(RCU)
@@ -536,6 +542,8 @@
delay = rcu_random(rrsp) % (nrealreaders * 2 * longdelay * uspertick);
if (!delay)
schedule_timeout_interruptible(longdelay);
+ else
+ rcu_read_delay(rrsp);
}
static void srcu_torture_read_unlock(int idx) __releases(&srcu_ctl)
@@ -731,7 +739,8 @@
continue;
rp->rtort_pipe_count = 0;
udelay(rcu_random(&rand) & 0x3ff);
- old_rp = rcu_torture_current;
+ old_rp = rcu_dereference_check(rcu_torture_current,
+ current == writer_task);
rp->rtort_mbtest = 1;
rcu_assign_pointer(rcu_torture_current, rp);
smp_wmb(); /* Mods to old_rp must follow rcu_assign_pointer() */
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index d5bc439..ccdc04c 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -143,6 +143,11 @@
module_param(qhimark, int, 0);
module_param(qlowmark, int, 0);
+#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
+int rcu_cpu_stall_suppress __read_mostly = RCU_CPU_STALL_SUPPRESS_INIT;
+module_param(rcu_cpu_stall_suppress, int, 0644);
+#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
+
static void force_quiescent_state(struct rcu_state *rsp, int relaxed);
static int rcu_pending(int cpu);
@@ -450,7 +455,7 @@
#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
-int rcu_cpu_stall_panicking __read_mostly;
+int rcu_cpu_stall_suppress __read_mostly;
static void record_gp_stall_check_time(struct rcu_state *rsp)
{
@@ -482,8 +487,11 @@
rcu_print_task_stall(rnp);
raw_spin_unlock_irqrestore(&rnp->lock, flags);
- /* OK, time to rat on our buddy... */
-
+ /*
+ * OK, time to rat on our buddy...
+ * See Documentation/RCU/stallwarn.txt for info on how to debug
+ * RCU CPU stall warnings.
+ */
printk(KERN_ERR "INFO: %s detected stalls on CPUs/tasks: {",
rsp->name);
rcu_for_each_leaf_node(rsp, rnp) {
@@ -512,6 +520,11 @@
unsigned long flags;
struct rcu_node *rnp = rcu_get_root(rsp);
+ /*
+ * OK, time to rat on ourselves...
+ * See Documentation/RCU/stallwarn.txt for info on how to debug
+ * RCU CPU stall warnings.
+ */
printk(KERN_ERR "INFO: %s detected stall on CPU %d (t=%lu jiffies)\n",
rsp->name, smp_processor_id(), jiffies - rsp->gp_start);
trigger_all_cpu_backtrace();
@@ -530,11 +543,11 @@
long delta;
struct rcu_node *rnp;
- if (rcu_cpu_stall_panicking)
+ if (rcu_cpu_stall_suppress)
return;
- delta = jiffies - rsp->jiffies_stall;
+ delta = jiffies - ACCESS_ONCE(rsp->jiffies_stall);
rnp = rdp->mynode;
- if ((rnp->qsmask & rdp->grpmask) && delta >= 0) {
+ if ((ACCESS_ONCE(rnp->qsmask) & rdp->grpmask) && delta >= 0) {
/* We haven't checked in, so go dump stack. */
print_cpu_stall(rsp);
@@ -548,10 +561,26 @@
static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr)
{
- rcu_cpu_stall_panicking = 1;
+ rcu_cpu_stall_suppress = 1;
return NOTIFY_DONE;
}
+/**
+ * rcu_cpu_stall_reset - prevent further stall warnings in current grace period
+ *
+ * Set the stall-warning timeout way off into the future, thus preventing
+ * any RCU CPU stall-warning messages from appearing in the current set of
+ * RCU grace periods.
+ *
+ * The caller must disable hard irqs.
+ */
+void rcu_cpu_stall_reset(void)
+{
+ rcu_sched_state.jiffies_stall = jiffies + ULONG_MAX / 2;
+ rcu_bh_state.jiffies_stall = jiffies + ULONG_MAX / 2;
+ rcu_preempt_stall_reset();
+}
+
static struct notifier_block rcu_panic_block = {
.notifier_call = rcu_panic,
};
@@ -571,6 +600,10 @@
{
}
+void rcu_cpu_stall_reset(void)
+{
+}
+
static void __init check_cpu_stall_init(void)
{
}
@@ -712,7 +745,7 @@
rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
__releases(rcu_get_root(rsp)->lock)
{
- struct rcu_data *rdp = rsp->rda[smp_processor_id()];
+ struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
struct rcu_node *rnp = rcu_get_root(rsp);
if (!cpu_needs_another_gp(rsp, rdp) || rsp->fqs_active) {
@@ -960,7 +993,7 @@
static void rcu_send_cbs_to_orphanage(struct rcu_state *rsp)
{
int i;
- struct rcu_data *rdp = rsp->rda[smp_processor_id()];
+ struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
if (rdp->nxtlist == NULL)
return; /* irqs disabled, so comparison is stable. */
@@ -971,6 +1004,7 @@
for (i = 0; i < RCU_NEXT_SIZE; i++)
rdp->nxttail[i] = &rdp->nxtlist;
rsp->orphan_qlen += rdp->qlen;
+ rdp->n_cbs_orphaned += rdp->qlen;
rdp->qlen = 0;
raw_spin_unlock(&rsp->onofflock); /* irqs remain disabled. */
}
@@ -984,7 +1018,7 @@
struct rcu_data *rdp;
raw_spin_lock_irqsave(&rsp->onofflock, flags);
- rdp = rsp->rda[smp_processor_id()];
+ rdp = this_cpu_ptr(rsp->rda);
if (rsp->orphan_cbs_list == NULL) {
raw_spin_unlock_irqrestore(&rsp->onofflock, flags);
return;
@@ -992,6 +1026,7 @@
*rdp->nxttail[RCU_NEXT_TAIL] = rsp->orphan_cbs_list;
rdp->nxttail[RCU_NEXT_TAIL] = rsp->orphan_cbs_tail;
rdp->qlen += rsp->orphan_qlen;
+ rdp->n_cbs_adopted += rsp->orphan_qlen;
rsp->orphan_cbs_list = NULL;
rsp->orphan_cbs_tail = &rsp->orphan_cbs_list;
rsp->orphan_qlen = 0;
@@ -1007,7 +1042,7 @@
unsigned long flags;
unsigned long mask;
int need_report = 0;
- struct rcu_data *rdp = rsp->rda[cpu];
+ struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
struct rcu_node *rnp;
/* Exclude any attempts to start a new grace period. */
@@ -1123,6 +1158,7 @@
/* Update count, and requeue any remaining callbacks. */
rdp->qlen -= count;
+ rdp->n_cbs_invoked += count;
if (list != NULL) {
*tail = rdp->nxtlist;
rdp->nxtlist = list;
@@ -1226,7 +1262,8 @@
cpu = rnp->grplo;
bit = 1;
for (; cpu <= rnp->grphi; cpu++, bit <<= 1) {
- if ((rnp->qsmask & bit) != 0 && f(rsp->rda[cpu]))
+ if ((rnp->qsmask & bit) != 0 &&
+ f(per_cpu_ptr(rsp->rda, cpu)))
mask |= bit;
}
if (mask != 0) {
@@ -1402,7 +1439,7 @@
* a quiescent state betweentimes.
*/
local_irq_save(flags);
- rdp = rsp->rda[smp_processor_id()];
+ rdp = this_cpu_ptr(rsp->rda);
rcu_process_gp_end(rsp, rdp);
check_for_new_grace_period(rsp, rdp);
@@ -1701,7 +1738,7 @@
{
unsigned long flags;
int i;
- struct rcu_data *rdp = rsp->rda[cpu];
+ struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
struct rcu_node *rnp = rcu_get_root(rsp);
/* Set up local state, ensuring consistent view of global state. */
@@ -1729,7 +1766,7 @@
{
unsigned long flags;
unsigned long mask;
- struct rcu_data *rdp = rsp->rda[cpu];
+ struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
struct rcu_node *rnp = rcu_get_root(rsp);
/* Set up local state, ensuring consistent view of global state. */
@@ -1865,7 +1902,8 @@
/*
* Helper function for rcu_init() that initializes one rcu_state structure.
*/
-static void __init rcu_init_one(struct rcu_state *rsp)
+static void __init rcu_init_one(struct rcu_state *rsp,
+ struct rcu_data __percpu *rda)
{
static char *buf[] = { "rcu_node_level_0",
"rcu_node_level_1",
@@ -1918,37 +1956,23 @@
}
}
+ rsp->rda = rda;
rnp = rsp->level[NUM_RCU_LVLS - 1];
for_each_possible_cpu(i) {
while (i > rnp->grphi)
rnp++;
- rsp->rda[i]->mynode = rnp;
+ per_cpu_ptr(rsp->rda, i)->mynode = rnp;
rcu_boot_init_percpu_data(i, rsp);
}
}
-/*
- * Helper macro for __rcu_init() and __rcu_init_preempt(). To be used
- * nowhere else! Assigns leaf node pointers into each CPU's rcu_data
- * structure.
- */
-#define RCU_INIT_FLAVOR(rsp, rcu_data) \
-do { \
- int i; \
- \
- for_each_possible_cpu(i) { \
- (rsp)->rda[i] = &per_cpu(rcu_data, i); \
- } \
- rcu_init_one(rsp); \
-} while (0)
-
void __init rcu_init(void)
{
int cpu;
rcu_bootup_announce();
- RCU_INIT_FLAVOR(&rcu_sched_state, rcu_sched_data);
- RCU_INIT_FLAVOR(&rcu_bh_state, rcu_bh_data);
+ rcu_init_one(&rcu_sched_state, &rcu_sched_data);
+ rcu_init_one(&rcu_bh_state, &rcu_bh_data);
__rcu_init_preempt();
open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
diff --git a/kernel/rcutree.h b/kernel/rcutree.h
index 14c040b..91d4170 100644
--- a/kernel/rcutree.h
+++ b/kernel/rcutree.h
@@ -202,6 +202,9 @@
long qlen; /* # of queued callbacks */
long qlen_last_fqs_check;
/* qlen at last check for QS forcing */
+ unsigned long n_cbs_invoked; /* count of RCU cbs invoked. */
+ unsigned long n_cbs_orphaned; /* RCU cbs sent to orphanage. */
+ unsigned long n_cbs_adopted; /* RCU cbs adopted from orphanage. */
unsigned long n_force_qs_snap;
/* did other CPU force QS recently? */
long blimit; /* Upper limit on a processed batch */
@@ -254,19 +257,23 @@
#define RCU_STALL_DELAY_DELTA 0
#endif
-#define RCU_SECONDS_TILL_STALL_CHECK (10 * HZ + RCU_STALL_DELAY_DELTA)
+#define RCU_SECONDS_TILL_STALL_CHECK (CONFIG_RCU_CPU_STALL_TIMEOUT * HZ + \
+ RCU_STALL_DELAY_DELTA)
/* for rsp->jiffies_stall */
-#define RCU_SECONDS_TILL_STALL_RECHECK (30 * HZ + RCU_STALL_DELAY_DELTA)
+#define RCU_SECONDS_TILL_STALL_RECHECK (3 * RCU_SECONDS_TILL_STALL_CHECK + 30)
/* for rsp->jiffies_stall */
#define RCU_STALL_RAT_DELAY 2 /* Allow other CPUs time */
/* to take at least one */
/* scheduling clock irq */
/* before ratting on them. */
-#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
+#ifdef CONFIG_RCU_CPU_STALL_DETECTOR_RUNNABLE
+#define RCU_CPU_STALL_SUPPRESS_INIT 0
+#else
+#define RCU_CPU_STALL_SUPPRESS_INIT 1
+#endif
-#define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b))
-#define ULONG_CMP_LT(a, b) (ULONG_MAX / 2 < (a) - (b))
+#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
/*
* RCU global state, including node hierarchy. This hierarchy is
@@ -283,7 +290,7 @@
struct rcu_node *level[NUM_RCU_LVLS]; /* Hierarchy levels. */
u32 levelcnt[MAX_RCU_LVLS + 1]; /* # nodes in each level. */
u8 levelspread[NUM_RCU_LVLS]; /* kids/node in each level. */
- struct rcu_data *rda[NR_CPUS]; /* array of rdp pointers. */
+ struct rcu_data __percpu *rda; /* pointer of percu rcu_data. */
/* The following fields are guarded by the root rcu_node's lock. */
@@ -365,6 +372,7 @@
#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
static void rcu_print_detail_task_stall(struct rcu_state *rsp);
static void rcu_print_task_stall(struct rcu_node *rnp);
+static void rcu_preempt_stall_reset(void);
#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp);
#ifdef CONFIG_HOTPLUG_CPU
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index 0e4f420..71a4147 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -57,7 +57,7 @@
printk(KERN_INFO
"\tRCU-based detection of stalled CPUs is disabled.\n");
#endif
-#ifndef CONFIG_RCU_CPU_STALL_VERBOSE
+#if defined(CONFIG_TREE_PREEMPT_RCU) && !defined(CONFIG_RCU_CPU_STALL_VERBOSE)
printk(KERN_INFO "\tVerbose stalled-CPUs detection is disabled.\n");
#endif
#if NUM_RCU_LVL_4 != 0
@@ -154,7 +154,7 @@
(t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) {
/* Possibly blocking in an RCU read-side critical section. */
- rdp = rcu_preempt_state.rda[cpu];
+ rdp = per_cpu_ptr(rcu_preempt_state.rda, cpu);
rnp = rdp->mynode;
raw_spin_lock_irqsave(&rnp->lock, flags);
t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED;
@@ -201,7 +201,7 @@
*/
void __rcu_read_lock(void)
{
- ACCESS_ONCE(current->rcu_read_lock_nesting)++;
+ current->rcu_read_lock_nesting++;
barrier(); /* needed if we ever invoke rcu_read_lock in rcutree.c */
}
EXPORT_SYMBOL_GPL(__rcu_read_lock);
@@ -344,7 +344,9 @@
struct task_struct *t = current;
barrier(); /* needed if we ever invoke rcu_read_unlock in rcutree.c */
- if (--ACCESS_ONCE(t->rcu_read_lock_nesting) == 0 &&
+ --t->rcu_read_lock_nesting;
+ barrier(); /* decrement before load of ->rcu_read_unlock_special */
+ if (t->rcu_read_lock_nesting == 0 &&
unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
rcu_read_unlock_special(t);
#ifdef CONFIG_PROVE_LOCKING
@@ -417,6 +419,16 @@
}
}
+/*
+ * Suppress preemptible RCU's CPU stall warnings by pushing the
+ * time of the next stall-warning message comfortably far into the
+ * future.
+ */
+static void rcu_preempt_stall_reset(void)
+{
+ rcu_preempt_state.jiffies_stall = jiffies + ULONG_MAX / 2;
+}
+
#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
/*
@@ -546,9 +558,11 @@
*
* Control will return to the caller some time after a full grace
* period has elapsed, in other words after all currently executing RCU
- * read-side critical sections have completed. RCU read-side critical
- * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
- * and may be nested.
+ * read-side critical sections have completed. Note, however, that
+ * upon return from synchronize_rcu(), the caller might well be executing
+ * concurrently with new RCU read-side critical sections that began while
+ * synchronize_rcu() was waiting. RCU read-side critical sections are
+ * delimited by rcu_read_lock() and rcu_read_unlock(), and may be nested.
*/
void synchronize_rcu(void)
{
@@ -771,7 +785,7 @@
*/
static void __init __rcu_init_preempt(void)
{
- RCU_INIT_FLAVOR(&rcu_preempt_state, rcu_preempt_data);
+ rcu_init_one(&rcu_preempt_state, &rcu_preempt_data);
}
/*
@@ -865,6 +879,14 @@
{
}
+/*
+ * Because preemptible RCU does not exist, there is no need to suppress
+ * its CPU stall warnings.
+ */
+static void rcu_preempt_stall_reset(void)
+{
+}
+
#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
/*
@@ -919,15 +941,6 @@
}
/*
- * In classic RCU, call_rcu() is just call_rcu_sched().
- */
-void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
-{
- call_rcu_sched(head, func);
-}
-EXPORT_SYMBOL_GPL(call_rcu);
-
-/*
* Wait for an rcu-preempt grace period, but make it happen quickly.
* But because preemptable RCU does not exist, map to rcu-sched.
*/
diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
index 36c95b4..d15430b 100644
--- a/kernel/rcutree_trace.c
+++ b/kernel/rcutree_trace.c
@@ -64,7 +64,9 @@
rdp->dynticks_fqs);
#endif /* #ifdef CONFIG_NO_HZ */
seq_printf(m, " of=%lu ri=%lu", rdp->offline_fqs, rdp->resched_ipi);
- seq_printf(m, " ql=%ld b=%ld\n", rdp->qlen, rdp->blimit);
+ seq_printf(m, " ql=%ld b=%ld", rdp->qlen, rdp->blimit);
+ seq_printf(m, " ci=%lu co=%lu ca=%lu\n",
+ rdp->n_cbs_invoked, rdp->n_cbs_orphaned, rdp->n_cbs_adopted);
}
#define PRINT_RCU_DATA(name, func, m) \
@@ -119,7 +121,9 @@
rdp->dynticks_fqs);
#endif /* #ifdef CONFIG_NO_HZ */
seq_printf(m, ",%lu,%lu", rdp->offline_fqs, rdp->resched_ipi);
- seq_printf(m, ",%ld,%ld\n", rdp->qlen, rdp->blimit);
+ seq_printf(m, ",%ld,%ld", rdp->qlen, rdp->blimit);
+ seq_printf(m, ",%lu,%lu,%lu\n",
+ rdp->n_cbs_invoked, rdp->n_cbs_orphaned, rdp->n_cbs_adopted);
}
static int show_rcudata_csv(struct seq_file *m, void *unused)
@@ -128,7 +132,7 @@
#ifdef CONFIG_NO_HZ
seq_puts(m, "\"dt\",\"dt nesting\",\"dn\",\"df\",");
#endif /* #ifdef CONFIG_NO_HZ */
- seq_puts(m, "\"of\",\"ri\",\"ql\",\"b\"\n");
+ seq_puts(m, "\"of\",\"ri\",\"ql\",\"b\",\"ci\",\"co\",\"ca\"\n");
#ifdef CONFIG_TREE_PREEMPT_RCU
seq_puts(m, "\"rcu_preempt:\"\n");
PRINT_RCU_DATA(rcu_preempt_data, print_one_rcu_data_csv, m);
@@ -262,7 +266,7 @@
struct rcu_data *rdp;
for_each_possible_cpu(cpu) {
- rdp = rsp->rda[cpu];
+ rdp = per_cpu_ptr(rsp->rda, cpu);
if (rdp->beenonline)
print_one_rcu_pending(m, rdp);
}
diff --git a/kernel/sched.c b/kernel/sched.c
index 41541d7..d42992b 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -426,9 +426,7 @@
*/
cpumask_var_t rto_mask;
atomic_t rto_count;
-#ifdef CONFIG_SMP
struct cpupri cpupri;
-#endif
};
/*
@@ -437,7 +435,7 @@
*/
static struct root_domain def_root_domain;
-#endif
+#endif /* CONFIG_SMP */
/*
* This is the main, per-CPU runqueue data structure.
@@ -488,11 +486,12 @@
*/
unsigned long nr_uninterruptible;
- struct task_struct *curr, *idle;
+ struct task_struct *curr, *idle, *stop;
unsigned long next_balance;
struct mm_struct *prev_mm;
u64 clock;
+ u64 clock_task;
atomic_t nr_iowait;
@@ -520,6 +519,10 @@
u64 avg_idle;
#endif
+#ifdef CONFIG_IRQ_TIME_ACCOUNTING
+ u64 prev_irq_time;
+#endif
+
/* calc_load related fields */
unsigned long calc_load_update;
long calc_load_active;
@@ -643,10 +646,22 @@
#endif /* CONFIG_CGROUP_SCHED */
+static u64 irq_time_cpu(int cpu);
+static void sched_irq_time_avg_update(struct rq *rq, u64 irq_time);
+
inline void update_rq_clock(struct rq *rq)
{
- if (!rq->skip_clock_update)
- rq->clock = sched_clock_cpu(cpu_of(rq));
+ if (!rq->skip_clock_update) {
+ int cpu = cpu_of(rq);
+ u64 irq_time;
+
+ rq->clock = sched_clock_cpu(cpu);
+ irq_time = irq_time_cpu(cpu);
+ if (rq->clock - irq_time > rq->clock_task)
+ rq->clock_task = rq->clock - irq_time;
+
+ sched_irq_time_avg_update(rq, irq_time);
+ }
}
/*
@@ -723,7 +738,7 @@
size_t cnt, loff_t *ppos)
{
char buf[64];
- char *cmp = buf;
+ char *cmp;
int neg = 0;
int i;
@@ -734,6 +749,7 @@
return -EFAULT;
buf[cnt] = 0;
+ cmp = strstrip(buf);
if (strncmp(buf, "NO_", 3) == 0) {
neg = 1;
@@ -741,9 +757,7 @@
}
for (i = 0; sched_feat_names[i]; i++) {
- int len = strlen(sched_feat_names[i]);
-
- if (strncmp(cmp, sched_feat_names[i], len) == 0) {
+ if (strcmp(cmp, sched_feat_names[i]) == 0) {
if (neg)
sysctl_sched_features &= ~(1UL << i);
else
@@ -1294,6 +1308,10 @@
static void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
{
}
+
+static void sched_avg_update(struct rq *rq)
+{
+}
#endif /* CONFIG_SMP */
#if BITS_PER_LONG == 32
@@ -1836,7 +1854,7 @@
static const struct sched_class rt_sched_class;
-#define sched_class_highest (&rt_sched_class)
+#define sched_class_highest (&stop_sched_class)
#define for_each_class(class) \
for (class = sched_class_highest; class; class = class->next)
@@ -1854,12 +1872,6 @@
static void set_load_weight(struct task_struct *p)
{
- if (task_has_rt_policy(p)) {
- p->se.load.weight = 0;
- p->se.load.inv_weight = WMULT_CONST;
- return;
- }
-
/*
* SCHED_IDLE tasks get minimal weight:
*/
@@ -1913,13 +1925,132 @@
dec_nr_running(rq);
}
+#ifdef CONFIG_IRQ_TIME_ACCOUNTING
+
+/*
+ * There are no locks covering percpu hardirq/softirq time.
+ * They are only modified in account_system_vtime, on corresponding CPU
+ * with interrupts disabled. So, writes are safe.
+ * They are read and saved off onto struct rq in update_rq_clock().
+ * This may result in other CPU reading this CPU's irq time and can
+ * race with irq/account_system_vtime on this CPU. We would either get old
+ * or new value (or semi updated value on 32 bit) with a side effect of
+ * accounting a slice of irq time to wrong task when irq is in progress
+ * while we read rq->clock. That is a worthy compromise in place of having
+ * locks on each irq in account_system_time.
+ */
+static DEFINE_PER_CPU(u64, cpu_hardirq_time);
+static DEFINE_PER_CPU(u64, cpu_softirq_time);
+
+static DEFINE_PER_CPU(u64, irq_start_time);
+static int sched_clock_irqtime;
+
+void enable_sched_clock_irqtime(void)
+{
+ sched_clock_irqtime = 1;
+}
+
+void disable_sched_clock_irqtime(void)
+{
+ sched_clock_irqtime = 0;
+}
+
+static u64 irq_time_cpu(int cpu)
+{
+ if (!sched_clock_irqtime)
+ return 0;
+
+ return per_cpu(cpu_softirq_time, cpu) + per_cpu(cpu_hardirq_time, cpu);
+}
+
+void account_system_vtime(struct task_struct *curr)
+{
+ unsigned long flags;
+ int cpu;
+ u64 now, delta;
+
+ if (!sched_clock_irqtime)
+ return;
+
+ local_irq_save(flags);
+
+ cpu = smp_processor_id();
+ now = sched_clock_cpu(cpu);
+ delta = now - per_cpu(irq_start_time, cpu);
+ per_cpu(irq_start_time, cpu) = now;
+ /*
+ * We do not account for softirq time from ksoftirqd here.
+ * We want to continue accounting softirq time to ksoftirqd thread
+ * in that case, so as not to confuse scheduler with a special task
+ * that do not consume any time, but still wants to run.
+ */
+ if (hardirq_count())
+ per_cpu(cpu_hardirq_time, cpu) += delta;
+ else if (in_serving_softirq() && !(curr->flags & PF_KSOFTIRQD))
+ per_cpu(cpu_softirq_time, cpu) += delta;
+
+ local_irq_restore(flags);
+}
+EXPORT_SYMBOL_GPL(account_system_vtime);
+
+static void sched_irq_time_avg_update(struct rq *rq, u64 curr_irq_time)
+{
+ if (sched_clock_irqtime && sched_feat(NONIRQ_POWER)) {
+ u64 delta_irq = curr_irq_time - rq->prev_irq_time;
+ rq->prev_irq_time = curr_irq_time;
+ sched_rt_avg_update(rq, delta_irq);
+ }
+}
+
+#else
+
+static u64 irq_time_cpu(int cpu)
+{
+ return 0;
+}
+
+static void sched_irq_time_avg_update(struct rq *rq, u64 curr_irq_time) { }
+
+#endif
+
#include "sched_idletask.c"
#include "sched_fair.c"
#include "sched_rt.c"
+#include "sched_stoptask.c"
#ifdef CONFIG_SCHED_DEBUG
# include "sched_debug.c"
#endif
+void sched_set_stop_task(int cpu, struct task_struct *stop)
+{
+ struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
+ struct task_struct *old_stop = cpu_rq(cpu)->stop;
+
+ if (stop) {
+ /*
+ * Make it appear like a SCHED_FIFO task, its something
+ * userspace knows about and won't get confused about.
+ *
+ * Also, it will make PI more or less work without too
+ * much confusion -- but then, stop work should not
+ * rely on PI working anyway.
+ */
+ sched_setscheduler_nocheck(stop, SCHED_FIFO, ¶m);
+
+ stop->sched_class = &stop_sched_class;
+ }
+
+ cpu_rq(cpu)->stop = stop;
+
+ if (old_stop) {
+ /*
+ * Reset it back to a normal scheduling class so that
+ * it can die in pieces.
+ */
+ old_stop->sched_class = &rt_sched_class;
+ }
+}
+
/*
* __normal_prio - return the priority that is based on the static prio
*/
@@ -1999,6 +2130,9 @@
if (p->sched_class != &fair_sched_class)
return 0;
+ if (unlikely(p->policy == SCHED_IDLE))
+ return 0;
+
/*
* Buddy candidates are cache hot:
*/
@@ -2848,14 +2982,14 @@
*/
arch_start_context_switch(prev);
- if (likely(!mm)) {
+ if (!mm) {
next->active_mm = oldmm;
atomic_inc(&oldmm->mm_count);
enter_lazy_tlb(oldmm, next);
} else
switch_mm(oldmm, mm, next);
- if (likely(!prev->mm)) {
+ if (!prev->mm) {
prev->active_mm = NULL;
rq->prev_mm = oldmm;
}
@@ -3182,6 +3316,8 @@
this_rq->cpu_load[i] = (old_load * (scale - 1) + new_load) >> i;
}
+
+ sched_avg_update(this_rq);
}
static void update_cpu_load_active(struct rq *this_rq)
@@ -3242,7 +3378,7 @@
if (task_current(rq, p)) {
update_rq_clock(rq);
- ns = rq->clock - p->se.exec_start;
+ ns = rq->clock_task - p->se.exec_start;
if ((s64)ns < 0)
ns = 0;
}
@@ -3391,7 +3527,7 @@
tmp = cputime_to_cputime64(cputime);
if (hardirq_count() - hardirq_offset)
cpustat->irq = cputime64_add(cpustat->irq, tmp);
- else if (softirq_count())
+ else if (in_serving_softirq())
cpustat->softirq = cputime64_add(cpustat->softirq, tmp);
else
cpustat->system = cputime64_add(cpustat->system, tmp);
@@ -3507,9 +3643,9 @@
rtime = nsecs_to_cputime(p->se.sum_exec_runtime);
if (total) {
- u64 temp;
+ u64 temp = rtime;
- temp = (u64)(rtime * utime);
+ temp *= utime;
do_div(temp, total);
utime = (cputime_t)temp;
} else
@@ -3540,9 +3676,9 @@
rtime = nsecs_to_cputime(cputime.sum_exec_runtime);
if (total) {
- u64 temp;
+ u64 temp = rtime;
- temp = (u64)(rtime * cputime.utime);
+ temp *= cputime.utime;
do_div(temp, total);
utime = (cputime_t)temp;
} else
@@ -3578,7 +3714,7 @@
curr->sched_class->task_tick(rq, curr, 0);
raw_spin_unlock(&rq->lock);
- perf_event_task_tick(curr);
+ perf_event_task_tick();
#ifdef CONFIG_SMP
rq->idle_at_tick = idle_cpu(cpu);
@@ -3717,17 +3853,13 @@
return p;
}
- class = sched_class_highest;
- for ( ; ; ) {
+ for_each_class(class) {
p = class->pick_next_task(rq);
if (p)
return p;
- /*
- * Will never be NULL as the idle class always
- * returns a non-NULL p:
- */
- class = class->next;
}
+
+ BUG(); /* the idle class will always have a runnable task */
}
/*
@@ -3865,8 +3997,16 @@
/*
* Owner changed, break to re-assess state.
*/
- if (lock->owner != owner)
+ if (lock->owner != owner) {
+ /*
+ * If the lock has switched to a different owner,
+ * we likely have heavy contention. Return 0 to quit
+ * optimistic spinning and not contend further:
+ */
+ if (lock->owner)
+ return 0;
break;
+ }
/*
* Is that owner really running on that cpu?
@@ -4344,6 +4484,7 @@
rq = task_rq_lock(p, &flags);
+ trace_sched_pi_setprio(p, prio);
oldprio = p->prio;
prev_class = p->sched_class;
on_rq = p->se.on_rq;
@@ -4631,7 +4772,7 @@
}
if (user) {
- retval = security_task_setscheduler(p, policy, param);
+ retval = security_task_setscheduler(p);
if (retval)
return retval;
}
@@ -4647,6 +4788,15 @@
*/
rq = __task_rq_lock(p);
+ /*
+ * Changing the policy of the stop threads its a very bad idea
+ */
+ if (p == rq->stop) {
+ __task_rq_unlock(rq);
+ raw_spin_unlock_irqrestore(&p->pi_lock, flags);
+ return -EINVAL;
+ }
+
#ifdef CONFIG_RT_GROUP_SCHED
if (user) {
/*
@@ -4873,13 +5023,13 @@
if (!check_same_owner(p) && !capable(CAP_SYS_NICE))
goto out_unlock;
- retval = security_task_setscheduler(p, 0, NULL);
+ retval = security_task_setscheduler(p);
if (retval)
goto out_unlock;
cpuset_cpus_allowed(p, cpus_allowed);
cpumask_and(new_mask, in_mask, cpus_allowed);
- again:
+again:
retval = set_cpus_allowed_ptr(p, new_mask);
if (!retval) {
@@ -5323,7 +5473,19 @@
idle->se.exec_start = sched_clock();
cpumask_copy(&idle->cpus_allowed, cpumask_of(cpu));
+ /*
+ * We're having a chicken and egg problem, even though we are
+ * holding rq->lock, the cpu isn't yet set to this cpu so the
+ * lockdep check in task_group() will fail.
+ *
+ * Similar case to sched_fork(). / Alternatively we could
+ * use task_rq_lock() here and obtain the other rq->lock.
+ *
+ * Silence PROVE_RCU
+ */
+ rcu_read_lock();
__set_task_cpu(idle, cpu);
+ rcu_read_unlock();
rq->curr = rq->idle = idle;
#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
@@ -6500,6 +6662,7 @@
cpumask_var_t nodemask;
cpumask_var_t this_sibling_map;
cpumask_var_t this_core_map;
+ cpumask_var_t this_book_map;
cpumask_var_t send_covered;
cpumask_var_t tmpmask;
struct sched_group **sched_group_nodes;
@@ -6511,6 +6674,7 @@
sa_rootdomain,
sa_tmpmask,
sa_send_covered,
+ sa_this_book_map,
sa_this_core_map,
sa_this_sibling_map,
sa_nodemask,
@@ -6546,31 +6710,48 @@
#ifdef CONFIG_SCHED_MC
static DEFINE_PER_CPU(struct static_sched_domain, core_domains);
static DEFINE_PER_CPU(struct static_sched_group, sched_group_core);
-#endif /* CONFIG_SCHED_MC */
-#if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT)
static int
cpu_to_core_group(int cpu, const struct cpumask *cpu_map,
struct sched_group **sg, struct cpumask *mask)
{
int group;
-
+#ifdef CONFIG_SCHED_SMT
cpumask_and(mask, topology_thread_cpumask(cpu), cpu_map);
group = cpumask_first(mask);
+#else
+ group = cpu;
+#endif
if (sg)
*sg = &per_cpu(sched_group_core, group).sg;
return group;
}
-#elif defined(CONFIG_SCHED_MC)
+#endif /* CONFIG_SCHED_MC */
+
+/*
+ * book sched-domains:
+ */
+#ifdef CONFIG_SCHED_BOOK
+static DEFINE_PER_CPU(struct static_sched_domain, book_domains);
+static DEFINE_PER_CPU(struct static_sched_group, sched_group_book);
+
static int
-cpu_to_core_group(int cpu, const struct cpumask *cpu_map,
- struct sched_group **sg, struct cpumask *unused)
+cpu_to_book_group(int cpu, const struct cpumask *cpu_map,
+ struct sched_group **sg, struct cpumask *mask)
{
- if (sg)
- *sg = &per_cpu(sched_group_core, cpu).sg;
- return cpu;
-}
+ int group = cpu;
+#ifdef CONFIG_SCHED_MC
+ cpumask_and(mask, cpu_coregroup_mask(cpu), cpu_map);
+ group = cpumask_first(mask);
+#elif defined(CONFIG_SCHED_SMT)
+ cpumask_and(mask, topology_thread_cpumask(cpu), cpu_map);
+ group = cpumask_first(mask);
#endif
+ if (sg)
+ *sg = &per_cpu(sched_group_book, group).sg;
+ return group;
+}
+#endif /* CONFIG_SCHED_BOOK */
static DEFINE_PER_CPU(struct static_sched_domain, phys_domains);
static DEFINE_PER_CPU(struct static_sched_group, sched_group_phys);
@@ -6580,7 +6761,10 @@
struct sched_group **sg, struct cpumask *mask)
{
int group;
-#ifdef CONFIG_SCHED_MC
+#ifdef CONFIG_SCHED_BOOK
+ cpumask_and(mask, cpu_book_mask(cpu), cpu_map);
+ group = cpumask_first(mask);
+#elif defined(CONFIG_SCHED_MC)
cpumask_and(mask, cpu_coregroup_mask(cpu), cpu_map);
group = cpumask_first(mask);
#elif defined(CONFIG_SCHED_SMT)
@@ -6841,6 +7025,9 @@
#ifdef CONFIG_SCHED_MC
SD_INIT_FUNC(MC)
#endif
+#ifdef CONFIG_SCHED_BOOK
+ SD_INIT_FUNC(BOOK)
+#endif
static int default_relax_domain_level = -1;
@@ -6890,6 +7077,8 @@
free_cpumask_var(d->tmpmask); /* fall through */
case sa_send_covered:
free_cpumask_var(d->send_covered); /* fall through */
+ case sa_this_book_map:
+ free_cpumask_var(d->this_book_map); /* fall through */
case sa_this_core_map:
free_cpumask_var(d->this_core_map); /* fall through */
case sa_this_sibling_map:
@@ -6936,8 +7125,10 @@
return sa_nodemask;
if (!alloc_cpumask_var(&d->this_core_map, GFP_KERNEL))
return sa_this_sibling_map;
- if (!alloc_cpumask_var(&d->send_covered, GFP_KERNEL))
+ if (!alloc_cpumask_var(&d->this_book_map, GFP_KERNEL))
return sa_this_core_map;
+ if (!alloc_cpumask_var(&d->send_covered, GFP_KERNEL))
+ return sa_this_book_map;
if (!alloc_cpumask_var(&d->tmpmask, GFP_KERNEL))
return sa_send_covered;
d->rd = alloc_rootdomain();
@@ -6995,6 +7186,23 @@
return sd;
}
+static struct sched_domain *__build_book_sched_domain(struct s_data *d,
+ const struct cpumask *cpu_map, struct sched_domain_attr *attr,
+ struct sched_domain *parent, int i)
+{
+ struct sched_domain *sd = parent;
+#ifdef CONFIG_SCHED_BOOK
+ sd = &per_cpu(book_domains, i).sd;
+ SD_INIT(sd, BOOK);
+ set_domain_attribute(sd, attr);
+ cpumask_and(sched_domain_span(sd), cpu_map, cpu_book_mask(i));
+ sd->parent = parent;
+ parent->child = sd;
+ cpu_to_book_group(i, cpu_map, &sd->groups, d->tmpmask);
+#endif
+ return sd;
+}
+
static struct sched_domain *__build_mc_sched_domain(struct s_data *d,
const struct cpumask *cpu_map, struct sched_domain_attr *attr,
struct sched_domain *parent, int i)
@@ -7052,6 +7260,15 @@
d->send_covered, d->tmpmask);
break;
#endif
+#ifdef CONFIG_SCHED_BOOK
+ case SD_LV_BOOK: /* set up book groups */
+ cpumask_and(d->this_book_map, cpu_map, cpu_book_mask(cpu));
+ if (cpu == cpumask_first(d->this_book_map))
+ init_sched_build_groups(d->this_book_map, cpu_map,
+ &cpu_to_book_group,
+ d->send_covered, d->tmpmask);
+ break;
+#endif
case SD_LV_CPU: /* set up physical groups */
cpumask_and(d->nodemask, cpumask_of_node(cpu), cpu_map);
if (!cpumask_empty(d->nodemask))
@@ -7099,12 +7316,14 @@
sd = __build_numa_sched_domains(&d, cpu_map, attr, i);
sd = __build_cpu_sched_domain(&d, cpu_map, attr, sd, i);
+ sd = __build_book_sched_domain(&d, cpu_map, attr, sd, i);
sd = __build_mc_sched_domain(&d, cpu_map, attr, sd, i);
sd = __build_smt_sched_domain(&d, cpu_map, attr, sd, i);
}
for_each_cpu(i, cpu_map) {
build_sched_groups(&d, SD_LV_SIBLING, cpu_map, i);
+ build_sched_groups(&d, SD_LV_BOOK, cpu_map, i);
build_sched_groups(&d, SD_LV_MC, cpu_map, i);
}
@@ -7135,6 +7354,12 @@
init_sched_groups_power(i, sd);
}
#endif
+#ifdef CONFIG_SCHED_BOOK
+ for_each_cpu(i, cpu_map) {
+ sd = &per_cpu(book_domains, i).sd;
+ init_sched_groups_power(i, sd);
+ }
+#endif
for_each_cpu(i, cpu_map) {
sd = &per_cpu(phys_domains, i).sd;
@@ -7160,6 +7385,8 @@
sd = &per_cpu(cpu_domains, i).sd;
#elif defined(CONFIG_SCHED_MC)
sd = &per_cpu(core_domains, i).sd;
+#elif defined(CONFIG_SCHED_BOOK)
+ sd = &per_cpu(book_domains, i).sd;
#else
sd = &per_cpu(phys_domains, i).sd;
#endif
@@ -8064,9 +8291,9 @@
return 1;
- err_free_rq:
+err_free_rq:
kfree(cfs_rq);
- err:
+err:
return 0;
}
@@ -8154,9 +8381,9 @@
return 1;
- err_free_rq:
+err_free_rq:
kfree(rt_rq);
- err:
+err:
return 0;
}
@@ -8514,7 +8741,7 @@
raw_spin_unlock(&rt_rq->rt_runtime_lock);
}
raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
- unlock:
+unlock:
read_unlock(&tasklist_lock);
mutex_unlock(&rt_constraints_mutex);
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 806d1b2..933f3d1 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -25,7 +25,7 @@
/*
* Targeted preemption latency for CPU-bound tasks:
- * (default: 5ms * (1 + ilog(ncpus)), units: nanoseconds)
+ * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds)
*
* NOTE: this latency value is not the same as the concept of
* 'timeslice length' - timeslices in CFS are of variable length
@@ -52,15 +52,15 @@
/*
* Minimal preemption granularity for CPU-bound tasks:
- * (default: 2 msec * (1 + ilog(ncpus)), units: nanoseconds)
+ * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds)
*/
-unsigned int sysctl_sched_min_granularity = 2000000ULL;
-unsigned int normalized_sysctl_sched_min_granularity = 2000000ULL;
+unsigned int sysctl_sched_min_granularity = 750000ULL;
+unsigned int normalized_sysctl_sched_min_granularity = 750000ULL;
/*
* is kept at sysctl_sched_latency / sysctl_sched_min_granularity
*/
-static unsigned int sched_nr_latency = 3;
+static unsigned int sched_nr_latency = 8;
/*
* After fork, child runs first. If set to 0 (default) then
@@ -519,7 +519,7 @@
static void update_curr(struct cfs_rq *cfs_rq)
{
struct sched_entity *curr = cfs_rq->curr;
- u64 now = rq_of(cfs_rq)->clock;
+ u64 now = rq_of(cfs_rq)->clock_task;
unsigned long delta_exec;
if (unlikely(!curr))
@@ -602,7 +602,7 @@
/*
* We are starting a new run period:
*/
- se->exec_start = rq_of(cfs_rq)->clock;
+ se->exec_start = rq_of(cfs_rq)->clock_task;
}
/**************************************************
@@ -1313,7 +1313,7 @@
find_idlest_group(struct sched_domain *sd, struct task_struct *p,
int this_cpu, int load_idx)
{
- struct sched_group *idlest = NULL, *this = NULL, *group = sd->groups;
+ struct sched_group *idlest = NULL, *group = sd->groups;
unsigned long min_load = ULONG_MAX, this_load = 0;
int imbalance = 100 + (sd->imbalance_pct-100)/2;
@@ -1348,7 +1348,6 @@
if (local_group) {
this_load = avg_load;
- this = group;
} else if (avg_load < min_load) {
min_load = avg_load;
idlest = group;
@@ -1765,6 +1764,10 @@
set_task_cpu(p, this_cpu);
activate_task(this_rq, p, 0);
check_preempt_curr(this_rq, p, 0);
+
+ /* re-arm NEWIDLE balancing when moving tasks */
+ src_rq->avg_idle = this_rq->avg_idle = 2*sysctl_sched_migration_cost;
+ this_rq->idle_stamp = 0;
}
/*
@@ -1799,7 +1802,7 @@
* 2) too many balance attempts have failed.
*/
- tsk_cache_hot = task_hot(p, rq->clock, sd);
+ tsk_cache_hot = task_hot(p, rq->clock_task, sd);
if (!tsk_cache_hot ||
sd->nr_balance_failed > sd->cache_nice_tries) {
#ifdef CONFIG_SCHEDSTATS
@@ -2031,12 +2034,14 @@
unsigned long this_load;
unsigned long this_load_per_task;
unsigned long this_nr_running;
+ unsigned long this_has_capacity;
/* Statistics of the busiest group */
unsigned long max_load;
unsigned long busiest_load_per_task;
unsigned long busiest_nr_running;
unsigned long busiest_group_capacity;
+ unsigned long busiest_has_capacity;
int group_imb; /* Is there imbalance in this sd */
#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
@@ -2059,6 +2064,7 @@
unsigned long sum_weighted_load; /* Weighted load of group's tasks */
unsigned long group_capacity;
int group_imb; /* Is there an imbalance in the group ? */
+ int group_has_capacity; /* Is there extra capacity in the group? */
};
/**
@@ -2268,10 +2274,14 @@
struct rq *rq = cpu_rq(cpu);
u64 total, available;
- sched_avg_update(rq);
-
total = sched_avg_period() + (rq->clock - rq->age_stamp);
- available = total - rq->rt_avg;
+
+ if (unlikely(total < rq->rt_avg)) {
+ /* Ensures that power won't end up being negative */
+ available = 0;
+ } else {
+ available = total - rq->rt_avg;
+ }
if (unlikely((s64)total < SCHED_LOAD_SCALE))
total = SCHED_LOAD_SCALE;
@@ -2381,7 +2391,7 @@
int local_group, const struct cpumask *cpus,
int *balance, struct sg_lb_stats *sgs)
{
- unsigned long load, max_cpu_load, min_cpu_load;
+ unsigned long load, max_cpu_load, min_cpu_load, max_nr_running;
int i;
unsigned int balance_cpu = -1, first_idle_cpu = 0;
unsigned long avg_load_per_task = 0;
@@ -2392,6 +2402,7 @@
/* Tally up the load of all CPUs in the group */
max_cpu_load = 0;
min_cpu_load = ~0UL;
+ max_nr_running = 0;
for_each_cpu_and(i, sched_group_cpus(group), cpus) {
struct rq *rq = cpu_rq(i);
@@ -2409,8 +2420,10 @@
load = target_load(i, load_idx);
} else {
load = source_load(i, load_idx);
- if (load > max_cpu_load)
+ if (load > max_cpu_load) {
max_cpu_load = load;
+ max_nr_running = rq->nr_running;
+ }
if (min_cpu_load > load)
min_cpu_load = load;
}
@@ -2450,13 +2463,15 @@
if (sgs->sum_nr_running)
avg_load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running;
- if ((max_cpu_load - min_cpu_load) > 2*avg_load_per_task)
+ if ((max_cpu_load - min_cpu_load) > 2*avg_load_per_task && max_nr_running > 1)
sgs->group_imb = 1;
- sgs->group_capacity =
- DIV_ROUND_CLOSEST(group->cpu_power, SCHED_LOAD_SCALE);
+ sgs->group_capacity = DIV_ROUND_CLOSEST(group->cpu_power, SCHED_LOAD_SCALE);
if (!sgs->group_capacity)
sgs->group_capacity = fix_small_capacity(sd, group);
+
+ if (sgs->group_capacity > sgs->sum_nr_running)
+ sgs->group_has_capacity = 1;
}
/**
@@ -2545,9 +2560,14 @@
/*
* In case the child domain prefers tasks go to siblings
* first, lower the sg capacity to one so that we'll try
- * and move all the excess tasks away.
+ * and move all the excess tasks away. We lower the capacity
+ * of a group only if the local group has the capacity to fit
+ * these excess tasks, i.e. nr_running < group_capacity. The
+ * extra check prevents the case where you always pull from the
+ * heaviest group when it is already under-utilized (possible
+ * with a large weight task outweighs the tasks on the system).
*/
- if (prefer_sibling)
+ if (prefer_sibling && !local_group && sds->this_has_capacity)
sgs.group_capacity = min(sgs.group_capacity, 1UL);
if (local_group) {
@@ -2555,12 +2575,14 @@
sds->this = sg;
sds->this_nr_running = sgs.sum_nr_running;
sds->this_load_per_task = sgs.sum_weighted_load;
+ sds->this_has_capacity = sgs.group_has_capacity;
} else if (update_sd_pick_busiest(sd, sds, sg, &sgs, this_cpu)) {
sds->max_load = sgs.avg_load;
sds->busiest = sg;
sds->busiest_nr_running = sgs.sum_nr_running;
sds->busiest_group_capacity = sgs.group_capacity;
sds->busiest_load_per_task = sgs.sum_weighted_load;
+ sds->busiest_has_capacity = sgs.group_has_capacity;
sds->group_imb = sgs.group_imb;
}
@@ -2757,6 +2779,7 @@
return fix_small_imbalance(sds, this_cpu, imbalance);
}
+
/******* find_busiest_group() helpers end here *********************/
/**
@@ -2808,6 +2831,11 @@
* 4) This group is more busy than the avg busieness at this
* sched_domain.
* 5) The imbalance is within the specified limit.
+ *
+ * Note: when doing newidle balance, if the local group has excess
+ * capacity (i.e. nr_running < group_capacity) and the busiest group
+ * does not have any capacity, we force a load balance to pull tasks
+ * to the local group. In this case, we skip past checks 3, 4 and 5.
*/
if (!(*balance))
goto ret;
@@ -2819,6 +2847,11 @@
if (!sds.busiest || sds.busiest_nr_running == 0)
goto out_balanced;
+ /* SD_BALANCE_NEWIDLE trumps SMP nice when underutilized */
+ if (idle == CPU_NEWLY_IDLE && sds.this_has_capacity &&
+ !sds.busiest_has_capacity)
+ goto force_balance;
+
if (sds.this_load >= sds.max_load)
goto out_balanced;
@@ -2830,6 +2863,7 @@
if (100 * sds.max_load <= sd->imbalance_pct * sds.this_load)
goto out_balanced;
+force_balance:
/* Looks like there is an imbalance. Compute it */
calculate_imbalance(&sds, this_cpu, imbalance);
return sds.busiest;
@@ -3034,7 +3068,14 @@
if (!ld_moved) {
schedstat_inc(sd, lb_failed[idle]);
- sd->nr_balance_failed++;
+ /*
+ * Increment the failure counter only on periodic balance.
+ * We do not want newidle balance, which can be very
+ * frequent, pollute the failure counter causing
+ * excessive cache_hot migrations and active balances.
+ */
+ if (idle != CPU_NEWLY_IDLE)
+ sd->nr_balance_failed++;
if (need_active_balance(sd, sd_idle, idle, cpu_of(busiest),
this_cpu)) {
@@ -3156,10 +3197,8 @@
interval = msecs_to_jiffies(sd->balance_interval);
if (time_after(next_balance, sd->last_balance + interval))
next_balance = sd->last_balance + interval;
- if (pulled_task) {
- this_rq->idle_stamp = 0;
+ if (pulled_task)
break;
- }
}
raw_spin_lock(&this_rq->lock);
@@ -3633,7 +3672,7 @@
if (time_before(now, nohz.next_balance))
return 0;
- if (!rq->nr_running)
+ if (rq->idle_at_tick)
return 0;
first_pick_cpu = atomic_read(&nohz.first_pick_cpu);
@@ -3752,8 +3791,13 @@
raw_spin_lock_irqsave(&rq->lock, flags);
- if (unlikely(task_cpu(p) != this_cpu))
+ update_rq_clock(rq);
+
+ if (unlikely(task_cpu(p) != this_cpu)) {
+ rcu_read_lock();
__set_task_cpu(p, this_cpu);
+ rcu_read_unlock();
+ }
update_curr(cfs_rq);
diff --git a/kernel/sched_features.h b/kernel/sched_features.h
index 83c66e8..185f920 100644
--- a/kernel/sched_features.h
+++ b/kernel/sched_features.h
@@ -61,3 +61,8 @@
* release the lock. Decreases scheduling overhead.
*/
SCHED_FEAT(OWNER_SPIN, 1)
+
+/*
+ * Decrement CPU power based on irq activity
+ */
+SCHED_FEAT(NONIRQ_POWER, 1)
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index d10c80e..bea7d79 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -609,7 +609,7 @@
if (!task_has_rt_policy(curr))
return;
- delta_exec = rq->clock - curr->se.exec_start;
+ delta_exec = rq->clock_task - curr->se.exec_start;
if (unlikely((s64)delta_exec < 0))
delta_exec = 0;
@@ -618,7 +618,7 @@
curr->se.sum_exec_runtime += delta_exec;
account_group_exec_runtime(curr, delta_exec);
- curr->se.exec_start = rq->clock;
+ curr->se.exec_start = rq->clock_task;
cpuacct_charge(curr, delta_exec);
sched_rt_avg_update(rq, delta_exec);
@@ -960,18 +960,19 @@
* runqueue. Otherwise simply start this RT task
* on its current runqueue.
*
- * We want to avoid overloading runqueues. Even if
- * the RT task is of higher priority than the current RT task.
- * RT tasks behave differently than other tasks. If
- * one gets preempted, we try to push it off to another queue.
- * So trying to keep a preempting RT task on the same
- * cache hot CPU will force the running RT task to
- * a cold CPU. So we waste all the cache for the lower
- * RT task in hopes of saving some of a RT task
- * that is just being woken and probably will have
- * cold cache anyway.
+ * We want to avoid overloading runqueues. If the woken
+ * task is a higher priority, then it will stay on this CPU
+ * and the lower prio task should be moved to another CPU.
+ * Even though this will probably make the lower prio task
+ * lose its cache, we do not want to bounce a higher task
+ * around just because it gave up its CPU, perhaps for a
+ * lock?
+ *
+ * For equal prio tasks, we just let the scheduler sort it out.
*/
if (unlikely(rt_task(rq->curr)) &&
+ (rq->curr->rt.nr_cpus_allowed < 2 ||
+ rq->curr->prio < p->prio) &&
(p->rt.nr_cpus_allowed > 1)) {
int cpu = find_lowest_rq(p);
@@ -1074,7 +1075,7 @@
} while (rt_rq);
p = rt_task_of(rt_se);
- p->se.exec_start = rq->clock;
+ p->se.exec_start = rq->clock_task;
return p;
}
@@ -1139,7 +1140,7 @@
for_each_leaf_rt_rq(rt_rq, rq) {
array = &rt_rq->active;
idx = sched_find_first_bit(array->bitmap);
- next_idx:
+next_idx:
if (idx >= MAX_RT_PRIO)
continue;
if (next && next->prio < idx)
@@ -1315,7 +1316,7 @@
if (!next_task)
return 0;
- retry:
+retry:
if (unlikely(next_task == rq->curr)) {
WARN_ON(1);
return 0;
@@ -1463,7 +1464,7 @@
* but possible)
*/
}
- skip:
+skip:
double_unlock_balance(this_rq, src_rq);
}
@@ -1491,7 +1492,10 @@
if (!task_running(rq, p) &&
!test_tsk_need_resched(rq->curr) &&
has_pushable_tasks(rq) &&
- p->rt.nr_cpus_allowed > 1)
+ p->rt.nr_cpus_allowed > 1 &&
+ rt_task(rq->curr) &&
+ (rq->curr->rt.nr_cpus_allowed < 2 ||
+ rq->curr->prio < p->prio))
push_rt_tasks(rq);
}
@@ -1709,7 +1713,7 @@
{
struct task_struct *p = rq->curr;
- p->se.exec_start = rq->clock;
+ p->se.exec_start = rq->clock_task;
/* The running task is never eligible for pushing */
dequeue_pushable_task(rq, p);
diff --git a/kernel/sched_stoptask.c b/kernel/sched_stoptask.c
new file mode 100644
index 0000000..45bddc0
--- /dev/null
+++ b/kernel/sched_stoptask.c
@@ -0,0 +1,108 @@
+/*
+ * stop-task scheduling class.
+ *
+ * The stop task is the highest priority task in the system, it preempts
+ * everything and will be preempted by nothing.
+ *
+ * See kernel/stop_machine.c
+ */
+
+#ifdef CONFIG_SMP
+static int
+select_task_rq_stop(struct rq *rq, struct task_struct *p,
+ int sd_flag, int flags)
+{
+ return task_cpu(p); /* stop tasks as never migrate */
+}
+#endif /* CONFIG_SMP */
+
+static void
+check_preempt_curr_stop(struct rq *rq, struct task_struct *p, int flags)
+{
+ resched_task(rq->curr); /* we preempt everything */
+}
+
+static struct task_struct *pick_next_task_stop(struct rq *rq)
+{
+ struct task_struct *stop = rq->stop;
+
+ if (stop && stop->state == TASK_RUNNING)
+ return stop;
+
+ return NULL;
+}
+
+static void
+enqueue_task_stop(struct rq *rq, struct task_struct *p, int flags)
+{
+}
+
+static void
+dequeue_task_stop(struct rq *rq, struct task_struct *p, int flags)
+{
+}
+
+static void yield_task_stop(struct rq *rq)
+{
+ BUG(); /* the stop task should never yield, its pointless. */
+}
+
+static void put_prev_task_stop(struct rq *rq, struct task_struct *prev)
+{
+}
+
+static void task_tick_stop(struct rq *rq, struct task_struct *curr, int queued)
+{
+}
+
+static void set_curr_task_stop(struct rq *rq)
+{
+}
+
+static void switched_to_stop(struct rq *rq, struct task_struct *p,
+ int running)
+{
+ BUG(); /* its impossible to change to this class */
+}
+
+static void prio_changed_stop(struct rq *rq, struct task_struct *p,
+ int oldprio, int running)
+{
+ BUG(); /* how!?, what priority? */
+}
+
+static unsigned int
+get_rr_interval_stop(struct rq *rq, struct task_struct *task)
+{
+ return 0;
+}
+
+/*
+ * Simple, special scheduling class for the per-CPU stop tasks:
+ */
+static const struct sched_class stop_sched_class = {
+ .next = &rt_sched_class,
+
+ .enqueue_task = enqueue_task_stop,
+ .dequeue_task = dequeue_task_stop,
+ .yield_task = yield_task_stop,
+
+ .check_preempt_curr = check_preempt_curr_stop,
+
+ .pick_next_task = pick_next_task_stop,
+ .put_prev_task = put_prev_task_stop,
+
+#ifdef CONFIG_SMP
+ .select_task_rq = select_task_rq_stop,
+#endif
+
+ .set_curr_task = set_curr_task_stop,
+ .task_tick = task_tick_stop,
+
+ .get_rr_interval = get_rr_interval_stop,
+
+ .prio_changed = prio_changed_stop,
+ .switched_to = switched_to_stop,
+
+ /* no .task_new for stop tasks */
+};
diff --git a/kernel/signal.c b/kernel/signal.c
index bded651..919562c 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -2215,6 +2215,14 @@
#ifdef __ARCH_SI_TRAPNO
err |= __put_user(from->si_trapno, &to->si_trapno);
#endif
+#ifdef BUS_MCEERR_AO
+ /*
+ * Other callers might not initialize the si_lsb field,
+ * so check explicitely for the right codes here.
+ */
+ if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO)
+ err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
+#endif
break;
case __SI_CHLD:
err |= __put_user(from->si_pid, &to->si_pid);
diff --git a/kernel/smp.c b/kernel/smp.c
index 75c970c..ed6aacf 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -365,9 +365,10 @@
EXPORT_SYMBOL_GPL(smp_call_function_any);
/**
- * __smp_call_function_single(): Run a function on another CPU
+ * __smp_call_function_single(): Run a function on a specific CPU
* @cpu: The CPU to run on.
* @data: Pre-allocated and setup data structure
+ * @wait: If true, wait until function has completed on specified CPU.
*
* Like smp_call_function_single(), but allow caller to pass in a
* pre-allocated data structure. Useful for embedding @data inside
@@ -376,8 +377,10 @@
void __smp_call_function_single(int cpu, struct call_single_data *data,
int wait)
{
- csd_lock(data);
+ unsigned int this_cpu;
+ unsigned long flags;
+ this_cpu = get_cpu();
/*
* Can deadlock when called with interrupts disabled.
* We allow cpu's that are not yet online though, as no one else can
@@ -387,7 +390,15 @@
WARN_ON_ONCE(cpu_online(smp_processor_id()) && wait && irqs_disabled()
&& !oops_in_progress);
- generic_exec_single(cpu, data, wait);
+ if (cpu == this_cpu) {
+ local_irq_save(flags);
+ data->func(data->info);
+ local_irq_restore(flags);
+ } else {
+ csd_lock(data);
+ generic_exec_single(cpu, data, wait);
+ }
+ put_cpu();
}
/**
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 07b4f1b..79ee8f1 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -77,11 +77,21 @@
}
/*
+ * preempt_count and SOFTIRQ_OFFSET usage:
+ * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
+ * softirq processing.
+ * - preempt_count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET)
+ * on local_bh_disable or local_bh_enable.
+ * This lets us distinguish between whether we are currently processing
+ * softirq and whether we just have bh disabled.
+ */
+
+/*
* This one is for softirq.c-internal use,
* where hardirqs are disabled legitimately:
*/
#ifdef CONFIG_TRACE_IRQFLAGS
-static void __local_bh_disable(unsigned long ip)
+static void __local_bh_disable(unsigned long ip, unsigned int cnt)
{
unsigned long flags;
@@ -95,32 +105,43 @@
* We must manually increment preempt_count here and manually
* call the trace_preempt_off later.
*/
- preempt_count() += SOFTIRQ_OFFSET;
+ preempt_count() += cnt;
/*
* Were softirqs turned off above:
*/
- if (softirq_count() == SOFTIRQ_OFFSET)
+ if (softirq_count() == cnt)
trace_softirqs_off(ip);
raw_local_irq_restore(flags);
- if (preempt_count() == SOFTIRQ_OFFSET)
+ if (preempt_count() == cnt)
trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
}
#else /* !CONFIG_TRACE_IRQFLAGS */
-static inline void __local_bh_disable(unsigned long ip)
+static inline void __local_bh_disable(unsigned long ip, unsigned int cnt)
{
- add_preempt_count(SOFTIRQ_OFFSET);
+ add_preempt_count(cnt);
barrier();
}
#endif /* CONFIG_TRACE_IRQFLAGS */
void local_bh_disable(void)
{
- __local_bh_disable((unsigned long)__builtin_return_address(0));
+ __local_bh_disable((unsigned long)__builtin_return_address(0),
+ SOFTIRQ_DISABLE_OFFSET);
}
EXPORT_SYMBOL(local_bh_disable);
+static void __local_bh_enable(unsigned int cnt)
+{
+ WARN_ON_ONCE(in_irq());
+ WARN_ON_ONCE(!irqs_disabled());
+
+ if (softirq_count() == cnt)
+ trace_softirqs_on((unsigned long)__builtin_return_address(0));
+ sub_preempt_count(cnt);
+}
+
/*
* Special-case - softirqs can safely be enabled in
* cond_resched_softirq(), or by __do_softirq(),
@@ -128,12 +149,7 @@
*/
void _local_bh_enable(void)
{
- WARN_ON_ONCE(in_irq());
- WARN_ON_ONCE(!irqs_disabled());
-
- if (softirq_count() == SOFTIRQ_OFFSET)
- trace_softirqs_on((unsigned long)__builtin_return_address(0));
- sub_preempt_count(SOFTIRQ_OFFSET);
+ __local_bh_enable(SOFTIRQ_DISABLE_OFFSET);
}
EXPORT_SYMBOL(_local_bh_enable);
@@ -147,13 +163,13 @@
/*
* Are softirqs going to be turned on now:
*/
- if (softirq_count() == SOFTIRQ_OFFSET)
+ if (softirq_count() == SOFTIRQ_DISABLE_OFFSET)
trace_softirqs_on(ip);
/*
* Keep preemption disabled until we are done with
* softirq processing:
*/
- sub_preempt_count(SOFTIRQ_OFFSET - 1);
+ sub_preempt_count(SOFTIRQ_DISABLE_OFFSET - 1);
if (unlikely(!in_interrupt() && local_softirq_pending()))
do_softirq();
@@ -198,7 +214,8 @@
pending = local_softirq_pending();
account_system_vtime(current);
- __local_bh_disable((unsigned long)__builtin_return_address(0));
+ __local_bh_disable((unsigned long)__builtin_return_address(0),
+ SOFTIRQ_OFFSET);
lockdep_softirq_enter();
cpu = smp_processor_id();
@@ -245,7 +262,7 @@
lockdep_softirq_exit();
account_system_vtime(current);
- _local_bh_enable();
+ __local_bh_enable(SOFTIRQ_OFFSET);
}
#ifndef __ARCH_HAS_DO_SOFTIRQ
@@ -279,10 +296,16 @@
rcu_irq_enter();
if (idle_cpu(cpu) && !in_interrupt()) {
- __irq_enter();
+ /*
+ * Prevent raise_softirq from needlessly waking up ksoftirqd
+ * here, as softirq will be serviced on return from interrupt.
+ */
+ local_bh_disable();
tick_check_idle(cpu);
- } else
- __irq_enter();
+ _local_bh_enable();
+ }
+
+ __irq_enter();
}
#ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED
@@ -696,6 +719,7 @@
{
set_current_state(TASK_INTERRUPTIBLE);
+ current->flags |= PF_KSOFTIRQD;
while (!kthread_should_stop()) {
preempt_disable();
if (!local_softirq_pending()) {
diff --git a/kernel/srcu.c b/kernel/srcu.c
index 2980da3..c71e075 100644
--- a/kernel/srcu.c
+++ b/kernel/srcu.c
@@ -46,11 +46,9 @@
int __init_srcu_struct(struct srcu_struct *sp, const char *name,
struct lock_class_key *key)
{
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
/* Don't re-initialize a lock while it is held. */
debug_check_no_locks_freed((void *)sp, sizeof(*sp));
lockdep_init_map(&sp->dep_map, name, key, 0);
-#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
return init_srcu_struct_fields(sp);
}
EXPORT_SYMBOL_GPL(__init_srcu_struct);
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index 4372ccb..090c288 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -287,11 +287,12 @@
goto repeat;
}
+extern void sched_set_stop_task(int cpu, struct task_struct *stop);
+
/* manage stopper for a cpu, mostly lifted from sched migration thread mgmt */
static int __cpuinit cpu_stop_cpu_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
- struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
unsigned int cpu = (unsigned long)hcpu;
struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
struct task_struct *p;
@@ -304,13 +305,13 @@
cpu);
if (IS_ERR(p))
return NOTIFY_BAD;
- sched_setscheduler_nocheck(p, SCHED_FIFO, ¶m);
get_task_struct(p);
+ kthread_bind(p, cpu);
+ sched_set_stop_task(cpu, p);
stopper->thread = p;
break;
case CPU_ONLINE:
- kthread_bind(stopper->thread, cpu);
/* strictly unnecessary, as first user will wake it */
wake_up_process(stopper->thread);
/* mark enabled */
@@ -325,6 +326,7 @@
{
struct cpu_stop_work *work;
+ sched_set_stop_task(cpu, NULL);
/* kill the stopper */
kthread_stop(stopper->thread);
/* drain remaining works */
diff --git a/kernel/sys.c b/kernel/sys.c
index e9ad444..7f5a0cd 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -931,6 +931,7 @@
pgid = pid;
if (pgid < 0)
return -EINVAL;
+ rcu_read_lock();
/* From this point forward we keep holding onto the tasklist lock
* so that our parent does not change from under us. -DaveM
@@ -984,6 +985,7 @@
out:
/* All paths lead to here, thus we are safe. -DaveM */
write_unlock_irq(&tasklist_lock);
+ rcu_read_unlock();
return err;
}
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index ca38e8e..3a45c22 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -1713,10 +1713,7 @@
{
sysctl_set_parent(NULL, root_table);
#ifdef CONFIG_SYSCTL_SYSCALL_CHECK
- {
- int err;
- err = sysctl_check_table(current->nsproxy, root_table);
- }
+ sysctl_check_table(current->nsproxy, root_table);
#endif
return 0;
}
@@ -2488,7 +2485,7 @@
kbuf[left] = 0;
}
- for (; left && vleft--; i++, min++, max++, first=0) {
+ for (; left && vleft--; i++, first = 0) {
unsigned long val;
if (write) {
diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c
index 04cdcf7..10b90d8 100644
--- a/kernel/sysctl_check.c
+++ b/kernel/sysctl_check.c
@@ -143,15 +143,6 @@
if (!table->maxlen)
set_fail(&fail, table, "No maxlen");
}
- if ((table->proc_handler == proc_doulongvec_minmax) ||
- (table->proc_handler == proc_doulongvec_ms_jiffies_minmax)) {
- if (table->maxlen > sizeof (unsigned long)) {
- if (!table->extra1)
- set_fail(&fail, table, "No min");
- if (!table->extra2)
- set_fail(&fail, table, "No max");
- }
- }
#ifdef CONFIG_PROC_SYSCTL
if (table->procname && !table->proc_handler)
set_fail(&fail, table, "No proc_handler");
diff --git a/kernel/test_kprobes.c b/kernel/test_kprobes.c
index 4f10451..f8b11a2 100644
--- a/kernel/test_kprobes.c
+++ b/kernel/test_kprobes.c
@@ -115,7 +115,9 @@
int ret;
struct kprobe *kps[2] = {&kp, &kp2};
- kp.addr = 0; /* addr should be cleard for reusing kprobe. */
+ /* addr and flags should be cleard for reusing kprobe. */
+ kp.addr = NULL;
+ kp.flags = 0;
ret = register_kprobes(kps, 2);
if (ret < 0) {
printk(KERN_ERR "Kprobe smoke test failed: "
@@ -210,7 +212,9 @@
int ret;
struct jprobe *jps[2] = {&jp, &jp2};
- jp.kp.addr = 0; /* addr should be cleard for reusing kprobe. */
+ /* addr and flags should be cleard for reusing kprobe. */
+ jp.kp.addr = NULL;
+ jp.kp.flags = 0;
ret = register_jprobes(jps, 2);
if (ret < 0) {
printk(KERN_ERR "Kprobe smoke test failed: "
@@ -323,7 +327,9 @@
int ret;
struct kretprobe *rps[2] = {&rp, &rp2};
- rp.kp.addr = 0; /* addr should be cleard for reusing kprobe. */
+ /* addr and flags should be cleard for reusing kprobe. */
+ rp.kp.addr = NULL;
+ rp.kp.flags = 0;
ret = register_kretprobes(rps, 2);
if (ret < 0) {
printk(KERN_ERR "Kprobe smoke test failed: "
diff --git a/kernel/timer.c b/kernel/timer.c
index 97bf05b..68a9ae7 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -37,7 +37,7 @@
#include <linux/delay.h>
#include <linux/tick.h>
#include <linux/kallsyms.h>
-#include <linux/perf_event.h>
+#include <linux/irq_work.h>
#include <linux/sched.h>
#include <linux/slab.h>
@@ -1279,7 +1279,10 @@
run_local_timers();
rcu_check_callbacks(cpu, user_tick);
printk_tick();
- perf_event_do_pending();
+#ifdef CONFIG_IRQ_WORK
+ if (in_irq())
+ irq_work_run();
+#endif
scheduler_tick();
run_posix_cpu_timers(p);
}
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 538501c..e550d2ed 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -49,6 +49,11 @@
help
See Documentation/trace/ftrace-design.txt
+config HAVE_C_RECORDMCOUNT
+ bool
+ help
+ C version of recordmcount available?
+
config TRACER_MAX_TRACE
bool
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 0d88ce9..ebd80d5 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -381,12 +381,19 @@
{
struct ftrace_profile *rec = v;
char str[KSYM_SYMBOL_LEN];
+ int ret = 0;
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- static DEFINE_MUTEX(mutex);
static struct trace_seq s;
unsigned long long avg;
unsigned long long stddev;
#endif
+ mutex_lock(&ftrace_profile_lock);
+
+ /* we raced with function_profile_reset() */
+ if (unlikely(rec->counter == 0)) {
+ ret = -EBUSY;
+ goto out;
+ }
kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
seq_printf(m, " %-30.30s %10lu", str, rec->counter);
@@ -408,7 +415,6 @@
do_div(stddev, (rec->counter - 1) * 1000);
}
- mutex_lock(&mutex);
trace_seq_init(&s);
trace_print_graph_duration(rec->time, &s);
trace_seq_puts(&s, " ");
@@ -416,11 +422,12 @@
trace_seq_puts(&s, " ");
trace_print_graph_duration(stddev, &s);
trace_print_seq(m, &s);
- mutex_unlock(&mutex);
#endif
seq_putc(m, '\n');
+out:
+ mutex_unlock(&ftrace_profile_lock);
- return 0;
+ return ret;
}
static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
@@ -877,10 +884,8 @@
FTRACE_ENABLE_CALLS = (1 << 0),
FTRACE_DISABLE_CALLS = (1 << 1),
FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
- FTRACE_ENABLE_MCOUNT = (1 << 3),
- FTRACE_DISABLE_MCOUNT = (1 << 4),
- FTRACE_START_FUNC_RET = (1 << 5),
- FTRACE_STOP_FUNC_RET = (1 << 6),
+ FTRACE_START_FUNC_RET = (1 << 3),
+ FTRACE_STOP_FUNC_RET = (1 << 4),
};
static int ftrace_filtered;
@@ -1219,8 +1224,6 @@
static void ftrace_startup_sysctl(void)
{
- int command = FTRACE_ENABLE_MCOUNT;
-
if (unlikely(ftrace_disabled))
return;
@@ -1228,23 +1231,17 @@
saved_ftrace_func = NULL;
/* ftrace_start_up is true if we want ftrace running */
if (ftrace_start_up)
- command |= FTRACE_ENABLE_CALLS;
-
- ftrace_run_update_code(command);
+ ftrace_run_update_code(FTRACE_ENABLE_CALLS);
}
static void ftrace_shutdown_sysctl(void)
{
- int command = FTRACE_DISABLE_MCOUNT;
-
if (unlikely(ftrace_disabled))
return;
/* ftrace_start_up is true if ftrace is running */
if (ftrace_start_up)
- command |= FTRACE_DISABLE_CALLS;
-
- ftrace_run_update_code(command);
+ ftrace_run_update_code(FTRACE_DISABLE_CALLS);
}
static cycle_t ftrace_update_time;
@@ -1361,24 +1358,29 @@
#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
struct ftrace_iterator {
- struct ftrace_page *pg;
- int hidx;
- int idx;
- unsigned flags;
- struct trace_parser parser;
+ loff_t pos;
+ loff_t func_pos;
+ struct ftrace_page *pg;
+ struct dyn_ftrace *func;
+ struct ftrace_func_probe *probe;
+ struct trace_parser parser;
+ int hidx;
+ int idx;
+ unsigned flags;
};
static void *
-t_hash_next(struct seq_file *m, void *v, loff_t *pos)
+t_hash_next(struct seq_file *m, loff_t *pos)
{
struct ftrace_iterator *iter = m->private;
- struct hlist_node *hnd = v;
+ struct hlist_node *hnd = NULL;
struct hlist_head *hhd;
- WARN_ON(!(iter->flags & FTRACE_ITER_HASH));
-
(*pos)++;
+ iter->pos = *pos;
+ if (iter->probe)
+ hnd = &iter->probe->node;
retry:
if (iter->hidx >= FTRACE_FUNC_HASHSIZE)
return NULL;
@@ -1401,7 +1403,12 @@
}
}
- return hnd;
+ if (WARN_ON_ONCE(!hnd))
+ return NULL;
+
+ iter->probe = hlist_entry(hnd, struct ftrace_func_probe, node);
+
+ return iter;
}
static void *t_hash_start(struct seq_file *m, loff_t *pos)
@@ -1410,26 +1417,32 @@
void *p = NULL;
loff_t l;
- if (!(iter->flags & FTRACE_ITER_HASH))
- *pos = 0;
-
- iter->flags |= FTRACE_ITER_HASH;
+ if (iter->func_pos > *pos)
+ return NULL;
iter->hidx = 0;
- for (l = 0; l <= *pos; ) {
- p = t_hash_next(m, p, &l);
+ for (l = 0; l <= (*pos - iter->func_pos); ) {
+ p = t_hash_next(m, &l);
if (!p)
break;
}
- return p;
+ if (!p)
+ return NULL;
+
+ /* Only set this if we have an item */
+ iter->flags |= FTRACE_ITER_HASH;
+
+ return iter;
}
-static int t_hash_show(struct seq_file *m, void *v)
+static int
+t_hash_show(struct seq_file *m, struct ftrace_iterator *iter)
{
struct ftrace_func_probe *rec;
- struct hlist_node *hnd = v;
- rec = hlist_entry(hnd, struct ftrace_func_probe, node);
+ rec = iter->probe;
+ if (WARN_ON_ONCE(!rec))
+ return -EIO;
if (rec->ops->print)
return rec->ops->print(m, rec->ip, rec->ops, rec->data);
@@ -1450,12 +1463,13 @@
struct dyn_ftrace *rec = NULL;
if (iter->flags & FTRACE_ITER_HASH)
- return t_hash_next(m, v, pos);
+ return t_hash_next(m, pos);
(*pos)++;
+ iter->pos = *pos;
if (iter->flags & FTRACE_ITER_PRINTALL)
- return NULL;
+ return t_hash_start(m, pos);
retry:
if (iter->idx >= iter->pg->index) {
@@ -1484,7 +1498,20 @@
}
}
- return rec;
+ if (!rec)
+ return t_hash_start(m, pos);
+
+ iter->func_pos = *pos;
+ iter->func = rec;
+
+ return iter;
+}
+
+static void reset_iter_read(struct ftrace_iterator *iter)
+{
+ iter->pos = 0;
+ iter->func_pos = 0;
+ iter->flags &= ~(FTRACE_ITER_PRINTALL & FTRACE_ITER_HASH);
}
static void *t_start(struct seq_file *m, loff_t *pos)
@@ -1495,6 +1522,12 @@
mutex_lock(&ftrace_lock);
/*
+ * If an lseek was done, then reset and start from beginning.
+ */
+ if (*pos < iter->pos)
+ reset_iter_read(iter);
+
+ /*
* For set_ftrace_filter reading, if we have the filter
* off, we can short cut and just print out that all
* functions are enabled.
@@ -1503,12 +1536,19 @@
if (*pos > 0)
return t_hash_start(m, pos);
iter->flags |= FTRACE_ITER_PRINTALL;
+ /* reset in case of seek/pread */
+ iter->flags &= ~FTRACE_ITER_HASH;
return iter;
}
if (iter->flags & FTRACE_ITER_HASH)
return t_hash_start(m, pos);
+ /*
+ * Unfortunately, we need to restart at ftrace_pages_start
+ * every time we let go of the ftrace_mutex. This is because
+ * those pointers can change without the lock.
+ */
iter->pg = ftrace_pages_start;
iter->idx = 0;
for (l = 0; l <= *pos; ) {
@@ -1517,10 +1557,14 @@
break;
}
- if (!p && iter->flags & FTRACE_ITER_FILTER)
- return t_hash_start(m, pos);
+ if (!p) {
+ if (iter->flags & FTRACE_ITER_FILTER)
+ return t_hash_start(m, pos);
- return p;
+ return NULL;
+ }
+
+ return iter;
}
static void t_stop(struct seq_file *m, void *p)
@@ -1531,16 +1575,18 @@
static int t_show(struct seq_file *m, void *v)
{
struct ftrace_iterator *iter = m->private;
- struct dyn_ftrace *rec = v;
+ struct dyn_ftrace *rec;
if (iter->flags & FTRACE_ITER_HASH)
- return t_hash_show(m, v);
+ return t_hash_show(m, iter);
if (iter->flags & FTRACE_ITER_PRINTALL) {
seq_printf(m, "#### all functions enabled ####\n");
return 0;
}
+ rec = iter->func;
+
if (!rec)
return 0;
@@ -1592,8 +1638,8 @@
ret = ftrace_avail_open(inode, file);
if (!ret) {
- m = (struct seq_file *)file->private_data;
- iter = (struct ftrace_iterator *)m->private;
+ m = file->private_data;
+ iter = m->private;
iter->flags = FTRACE_ITER_FAILURES;
}
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 19cccc3..c5a632a 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -405,7 +405,7 @@
#define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2))
/* Max number of timestamps that can fit on a page */
-#define RB_TIMESTAMPS_PER_PAGE (BUF_PAGE_SIZE / RB_LEN_TIME_STAMP)
+#define RB_TIMESTAMPS_PER_PAGE (BUF_PAGE_SIZE / RB_LEN_TIME_EXTEND)
int ring_buffer_print_page_header(struct trace_seq *s)
{
@@ -2606,6 +2606,19 @@
}
EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
+/*
+ * The total entries in the ring buffer is the running counter
+ * of entries entered into the ring buffer, minus the sum of
+ * the entries read from the ring buffer and the number of
+ * entries that were overwritten.
+ */
+static inline unsigned long
+rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
+{
+ return local_read(&cpu_buffer->entries) -
+ (local_read(&cpu_buffer->overrun) + cpu_buffer->read);
+}
+
/**
* ring_buffer_entries_cpu - get the number of entries in a cpu buffer
* @buffer: The ring buffer
@@ -2614,16 +2627,13 @@
unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
{
struct ring_buffer_per_cpu *cpu_buffer;
- unsigned long ret;
if (!cpumask_test_cpu(cpu, buffer->cpumask))
return 0;
cpu_buffer = buffer->buffers[cpu];
- ret = (local_read(&cpu_buffer->entries) - local_read(&cpu_buffer->overrun))
- - cpu_buffer->read;
- return ret;
+ return rb_num_of_entries(cpu_buffer);
}
EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
@@ -2684,8 +2694,7 @@
/* if you care about this being correct, lock the buffer */
for_each_buffer_cpu(buffer, cpu) {
cpu_buffer = buffer->buffers[cpu];
- entries += (local_read(&cpu_buffer->entries) -
- local_read(&cpu_buffer->overrun)) - cpu_buffer->read;
+ entries += rb_num_of_entries(cpu_buffer);
}
return entries;
@@ -2985,13 +2994,11 @@
static void rb_advance_iter(struct ring_buffer_iter *iter)
{
- struct ring_buffer *buffer;
struct ring_buffer_per_cpu *cpu_buffer;
struct ring_buffer_event *event;
unsigned length;
cpu_buffer = iter->cpu_buffer;
- buffer = cpu_buffer->buffer;
/*
* Check if we are at the end of the buffer.
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 9ec59f5..001bcd2 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -2196,7 +2196,7 @@
static int tracing_release(struct inode *inode, struct file *file)
{
- struct seq_file *m = (struct seq_file *)file->private_data;
+ struct seq_file *m = file->private_data;
struct trace_iterator *iter;
int cpu;
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index d39b3c5..9021f8c 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -343,6 +343,10 @@
unsigned long ip,
unsigned long parent_ip,
unsigned long flags, int pc);
+void trace_graph_function(struct trace_array *tr,
+ unsigned long ip,
+ unsigned long parent_ip,
+ unsigned long flags, int pc);
void trace_default_header(struct seq_file *m);
void print_trace_header(struct seq_file *m, struct trace_iterator *iter);
int trace_empty(struct trace_iterator *iter);
diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c
index 000e6e8..39c059c 100644
--- a/kernel/trace/trace_event_perf.c
+++ b/kernel/trace/trace_event_perf.c
@@ -9,7 +9,7 @@
#include <linux/kprobes.h>
#include "trace.h"
-static char *perf_trace_buf[4];
+static char __percpu *perf_trace_buf[PERF_NR_CONTEXTS];
/*
* Force it to be aligned to unsigned long to avoid misaligned accesses
@@ -24,7 +24,7 @@
static int perf_trace_event_init(struct ftrace_event_call *tp_event,
struct perf_event *p_event)
{
- struct hlist_head *list;
+ struct hlist_head __percpu *list;
int ret = -ENOMEM;
int cpu;
@@ -42,11 +42,11 @@
tp_event->perf_events = list;
if (!total_ref_count) {
- char *buf;
+ char __percpu *buf;
int i;
- for (i = 0; i < 4; i++) {
- buf = (char *)alloc_percpu(perf_trace_t);
+ for (i = 0; i < PERF_NR_CONTEXTS; i++) {
+ buf = (char __percpu *)alloc_percpu(perf_trace_t);
if (!buf)
goto fail;
@@ -65,7 +65,7 @@
if (!total_ref_count) {
int i;
- for (i = 0; i < 4; i++) {
+ for (i = 0; i < PERF_NR_CONTEXTS; i++) {
free_percpu(perf_trace_buf[i]);
perf_trace_buf[i] = NULL;
}
@@ -91,6 +91,8 @@
tp_event->class && tp_event->class->reg &&
try_module_get(tp_event->mod)) {
ret = perf_trace_event_init(tp_event, p_event);
+ if (ret)
+ module_put(tp_event->mod);
break;
}
}
@@ -99,22 +101,26 @@
return ret;
}
-int perf_trace_enable(struct perf_event *p_event)
+int perf_trace_add(struct perf_event *p_event, int flags)
{
struct ftrace_event_call *tp_event = p_event->tp_event;
+ struct hlist_head __percpu *pcpu_list;
struct hlist_head *list;
- list = tp_event->perf_events;
- if (WARN_ON_ONCE(!list))
+ pcpu_list = tp_event->perf_events;
+ if (WARN_ON_ONCE(!pcpu_list))
return -EINVAL;
- list = this_cpu_ptr(list);
+ if (!(flags & PERF_EF_START))
+ p_event->hw.state = PERF_HES_STOPPED;
+
+ list = this_cpu_ptr(pcpu_list);
hlist_add_head_rcu(&p_event->hlist_entry, list);
return 0;
}
-void perf_trace_disable(struct perf_event *p_event)
+void perf_trace_del(struct perf_event *p_event, int flags)
{
hlist_del_rcu(&p_event->hlist_entry);
}
@@ -140,12 +146,13 @@
tp_event->perf_events = NULL;
if (!--total_ref_count) {
- for (i = 0; i < 4; i++) {
+ for (i = 0; i < PERF_NR_CONTEXTS; i++) {
free_percpu(perf_trace_buf[i]);
perf_trace_buf[i] = NULL;
}
}
out:
+ module_put(tp_event->mod);
mutex_unlock(&event_mutex);
}
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 4c758f1..398c0e8 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -600,21 +600,29 @@
enum {
FORMAT_HEADER = 1,
- FORMAT_PRINTFMT = 2,
+ FORMAT_FIELD_SEPERATOR = 2,
+ FORMAT_PRINTFMT = 3,
};
static void *f_next(struct seq_file *m, void *v, loff_t *pos)
{
struct ftrace_event_call *call = m->private;
struct ftrace_event_field *field;
- struct list_head *head;
+ struct list_head *common_head = &ftrace_common_fields;
+ struct list_head *head = trace_get_fields(call);
(*pos)++;
switch ((unsigned long)v) {
case FORMAT_HEADER:
- head = &ftrace_common_fields;
+ if (unlikely(list_empty(common_head)))
+ return NULL;
+ field = list_entry(common_head->prev,
+ struct ftrace_event_field, link);
+ return field;
+
+ case FORMAT_FIELD_SEPERATOR:
if (unlikely(list_empty(head)))
return NULL;
@@ -626,31 +634,10 @@
return NULL;
}
- head = trace_get_fields(call);
-
- /*
- * To separate common fields from event fields, the
- * LSB is set on the first event field. Clear it in case.
- */
- v = (void *)((unsigned long)v & ~1L);
-
field = v;
- /*
- * If this is a common field, and at the end of the list, then
- * continue with main list.
- */
- if (field->link.prev == &ftrace_common_fields) {
- if (unlikely(list_empty(head)))
- return NULL;
- field = list_entry(head->prev, struct ftrace_event_field, link);
- /* Set the LSB to notify f_show to print an extra newline */
- field = (struct ftrace_event_field *)
- ((unsigned long)field | 1);
- return field;
- }
-
- /* If we are done tell f_show to print the format */
- if (field->link.prev == head)
+ if (field->link.prev == common_head)
+ return (void *)FORMAT_FIELD_SEPERATOR;
+ else if (field->link.prev == head)
return (void *)FORMAT_PRINTFMT;
field = list_entry(field->link.prev, struct ftrace_event_field, link);
@@ -688,22 +675,16 @@
seq_printf(m, "format:\n");
return 0;
+ case FORMAT_FIELD_SEPERATOR:
+ seq_putc(m, '\n');
+ return 0;
+
case FORMAT_PRINTFMT:
seq_printf(m, "\nprint fmt: %s\n",
call->print_fmt);
return 0;
}
- /*
- * To separate common fields from event fields, the
- * LSB is set on the first event field. Clear it and
- * print a newline if it is set.
- */
- if ((unsigned long)v & 1) {
- seq_putc(m, '\n');
- v = (void *)((unsigned long)v & ~1L);
- }
-
field = v;
/*
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 6f23369..76b0598 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -15,15 +15,19 @@
#include "trace.h"
#include "trace_output.h"
+/* When set, irq functions will be ignored */
+static int ftrace_graph_skip_irqs;
+
struct fgraph_cpu_data {
pid_t last_pid;
int depth;
+ int depth_irq;
int ignore;
unsigned long enter_funcs[FTRACE_RETFUNC_DEPTH];
};
struct fgraph_data {
- struct fgraph_cpu_data *cpu_data;
+ struct fgraph_cpu_data __percpu *cpu_data;
/* Place to preserve last processed entry. */
struct ftrace_graph_ent_entry ent;
@@ -41,6 +45,7 @@
#define TRACE_GRAPH_PRINT_PROC 0x8
#define TRACE_GRAPH_PRINT_DURATION 0x10
#define TRACE_GRAPH_PRINT_ABS_TIME 0x20
+#define TRACE_GRAPH_PRINT_IRQS 0x40
static struct tracer_opt trace_opts[] = {
/* Display overruns? (for self-debug purpose) */
@@ -55,13 +60,15 @@
{ TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
/* Display absolute time of an entry */
{ TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
+ /* Display interrupts */
+ { TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) },
{ } /* Empty entry */
};
static struct tracer_flags tracer_flags = {
/* Don't display overruns and proc by default */
.val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
- TRACE_GRAPH_PRINT_DURATION,
+ TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS,
.opts = trace_opts
};
@@ -204,6 +211,14 @@
return 1;
}
+static inline int ftrace_graph_ignore_irqs(void)
+{
+ if (!ftrace_graph_skip_irqs)
+ return 0;
+
+ return in_irq();
+}
+
int trace_graph_entry(struct ftrace_graph_ent *trace)
{
struct trace_array *tr = graph_array;
@@ -218,7 +233,8 @@
return 0;
/* trace it when it is-nested-in or is a function enabled. */
- if (!(trace->depth || ftrace_graph_addr(trace->func)))
+ if (!(trace->depth || ftrace_graph_addr(trace->func)) ||
+ ftrace_graph_ignore_irqs())
return 0;
local_irq_save(flags);
@@ -246,6 +262,34 @@
return trace_graph_entry(trace);
}
+static void
+__trace_graph_function(struct trace_array *tr,
+ unsigned long ip, unsigned long flags, int pc)
+{
+ u64 time = trace_clock_local();
+ struct ftrace_graph_ent ent = {
+ .func = ip,
+ .depth = 0,
+ };
+ struct ftrace_graph_ret ret = {
+ .func = ip,
+ .depth = 0,
+ .calltime = time,
+ .rettime = time,
+ };
+
+ __trace_graph_entry(tr, &ent, flags, pc);
+ __trace_graph_return(tr, &ret, flags, pc);
+}
+
+void
+trace_graph_function(struct trace_array *tr,
+ unsigned long ip, unsigned long parent_ip,
+ unsigned long flags, int pc)
+{
+ __trace_graph_function(tr, ip, flags, pc);
+}
+
void __trace_graph_return(struct trace_array *tr,
struct ftrace_graph_ret *trace,
unsigned long flags,
@@ -649,8 +693,9 @@
/* Print nsecs (we don't want to exceed 7 numbers) */
if (len < 7) {
- snprintf(nsecs_str, min(sizeof(nsecs_str), 8UL - len), "%03lu",
- nsecs_rem);
+ size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len);
+
+ snprintf(nsecs_str, slen, "%03lu", nsecs_rem);
ret = trace_seq_printf(s, ".%s", nsecs_str);
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
@@ -855,6 +900,108 @@
return 0;
}
+/*
+ * Entry check for irq code
+ *
+ * returns 1 if
+ * - we are inside irq code
+ * - we just extered irq code
+ *
+ * retunns 0 if
+ * - funcgraph-interrupts option is set
+ * - we are not inside irq code
+ */
+static int
+check_irq_entry(struct trace_iterator *iter, u32 flags,
+ unsigned long addr, int depth)
+{
+ int cpu = iter->cpu;
+ int *depth_irq;
+ struct fgraph_data *data = iter->private;
+
+ /*
+ * If we are either displaying irqs, or we got called as
+ * a graph event and private data does not exist,
+ * then we bypass the irq check.
+ */
+ if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
+ (!data))
+ return 0;
+
+ depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
+
+ /*
+ * We are inside the irq code
+ */
+ if (*depth_irq >= 0)
+ return 1;
+
+ if ((addr < (unsigned long)__irqentry_text_start) ||
+ (addr >= (unsigned long)__irqentry_text_end))
+ return 0;
+
+ /*
+ * We are entering irq code.
+ */
+ *depth_irq = depth;
+ return 1;
+}
+
+/*
+ * Return check for irq code
+ *
+ * returns 1 if
+ * - we are inside irq code
+ * - we just left irq code
+ *
+ * returns 0 if
+ * - funcgraph-interrupts option is set
+ * - we are not inside irq code
+ */
+static int
+check_irq_return(struct trace_iterator *iter, u32 flags, int depth)
+{
+ int cpu = iter->cpu;
+ int *depth_irq;
+ struct fgraph_data *data = iter->private;
+
+ /*
+ * If we are either displaying irqs, or we got called as
+ * a graph event and private data does not exist,
+ * then we bypass the irq check.
+ */
+ if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
+ (!data))
+ return 0;
+
+ depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
+
+ /*
+ * We are not inside the irq code.
+ */
+ if (*depth_irq == -1)
+ return 0;
+
+ /*
+ * We are inside the irq code, and this is returning entry.
+ * Let's not trace it and clear the entry depth, since
+ * we are out of irq code.
+ *
+ * This condition ensures that we 'leave the irq code' once
+ * we are out of the entry depth. Thus protecting us from
+ * the RETURN entry loss.
+ */
+ if (*depth_irq >= depth) {
+ *depth_irq = -1;
+ return 1;
+ }
+
+ /*
+ * We are inside the irq code, and this is not the entry.
+ */
+ return 1;
+}
+
static enum print_line_t
print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
struct trace_iterator *iter, u32 flags)
@@ -865,6 +1012,9 @@
static enum print_line_t ret;
int cpu = iter->cpu;
+ if (check_irq_entry(iter, flags, call->func, call->depth))
+ return TRACE_TYPE_HANDLED;
+
if (print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags))
return TRACE_TYPE_PARTIAL_LINE;
@@ -902,6 +1052,9 @@
int ret;
int i;
+ if (check_irq_return(iter, flags, trace->depth))
+ return TRACE_TYPE_HANDLED;
+
if (data) {
struct fgraph_cpu_data *cpu_data;
int cpu = iter->cpu;
@@ -1054,7 +1207,7 @@
enum print_line_t
-print_graph_function_flags(struct trace_iterator *iter, u32 flags)
+__print_graph_function_flags(struct trace_iterator *iter, u32 flags)
{
struct ftrace_graph_ent_entry *field;
struct fgraph_data *data = iter->private;
@@ -1117,7 +1270,18 @@
static enum print_line_t
print_graph_function(struct trace_iterator *iter)
{
- return print_graph_function_flags(iter, tracer_flags.val);
+ return __print_graph_function_flags(iter, tracer_flags.val);
+}
+
+enum print_line_t print_graph_function_flags(struct trace_iterator *iter,
+ u32 flags)
+{
+ if (trace_flags & TRACE_ITER_LATENCY_FMT)
+ flags |= TRACE_GRAPH_PRINT_DURATION;
+ else
+ flags |= TRACE_GRAPH_PRINT_ABS_TIME;
+
+ return __print_graph_function_flags(iter, flags);
}
static enum print_line_t
@@ -1149,7 +1313,7 @@
seq_printf(s, "#%.*s|||| / \n", size, spaces);
}
-void print_graph_headers_flags(struct seq_file *s, u32 flags)
+static void __print_graph_headers_flags(struct seq_file *s, u32 flags)
{
int lat = trace_flags & TRACE_ITER_LATENCY_FMT;
@@ -1190,6 +1354,23 @@
print_graph_headers_flags(s, tracer_flags.val);
}
+void print_graph_headers_flags(struct seq_file *s, u32 flags)
+{
+ struct trace_iterator *iter = s->private;
+
+ if (trace_flags & TRACE_ITER_LATENCY_FMT) {
+ /* print nothing if the buffers are empty */
+ if (trace_empty(iter))
+ return;
+
+ print_trace_header(s, iter);
+ flags |= TRACE_GRAPH_PRINT_DURATION;
+ } else
+ flags |= TRACE_GRAPH_PRINT_ABS_TIME;
+
+ __print_graph_headers_flags(s, flags);
+}
+
void graph_trace_open(struct trace_iterator *iter)
{
/* pid and depth on the last trace processed */
@@ -1210,9 +1391,12 @@
pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore);
+ int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
+
*pid = -1;
*depth = 0;
*ignore = 0;
+ *depth_irq = -1;
}
iter->private = data;
@@ -1235,6 +1419,14 @@
}
}
+static int func_graph_set_flag(u32 old_flags, u32 bit, int set)
+{
+ if (bit == TRACE_GRAPH_PRINT_IRQS)
+ ftrace_graph_skip_irqs = !set;
+
+ return 0;
+}
+
static struct trace_event_functions graph_functions = {
.trace = print_graph_function_event,
};
@@ -1261,6 +1453,7 @@
.print_line = print_graph_function,
.print_header = print_graph_headers,
.flags = &tracer_flags,
+ .set_flag = func_graph_set_flag,
#ifdef CONFIG_FTRACE_SELFTEST
.selftest = trace_selftest_startup_function_graph,
#endif
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index 73a6b06..5cf8c60 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -87,14 +87,22 @@
#ifdef CONFIG_FUNCTION_TRACER
/*
- * irqsoff uses its own tracer function to keep the overhead down:
+ * Prologue for the preempt and irqs off function tracers.
+ *
+ * Returns 1 if it is OK to continue, and data->disabled is
+ * incremented.
+ * 0 if the trace is to be ignored, and data->disabled
+ * is kept the same.
+ *
+ * Note, this function is also used outside this ifdef but
+ * inside the #ifdef of the function graph tracer below.
+ * This is OK, since the function graph tracer is
+ * dependent on the function tracer.
*/
-static void
-irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip)
+static int func_prolog_dec(struct trace_array *tr,
+ struct trace_array_cpu **data,
+ unsigned long *flags)
{
- struct trace_array *tr = irqsoff_trace;
- struct trace_array_cpu *data;
- unsigned long flags;
long disabled;
int cpu;
@@ -106,18 +114,38 @@
*/
cpu = raw_smp_processor_id();
if (likely(!per_cpu(tracing_cpu, cpu)))
- return;
+ return 0;
- local_save_flags(flags);
+ local_save_flags(*flags);
/* slight chance to get a false positive on tracing_cpu */
- if (!irqs_disabled_flags(flags))
- return;
+ if (!irqs_disabled_flags(*flags))
+ return 0;
- data = tr->data[cpu];
- disabled = atomic_inc_return(&data->disabled);
+ *data = tr->data[cpu];
+ disabled = atomic_inc_return(&(*data)->disabled);
if (likely(disabled == 1))
- trace_function(tr, ip, parent_ip, flags, preempt_count());
+ return 1;
+
+ atomic_dec(&(*data)->disabled);
+
+ return 0;
+}
+
+/*
+ * irqsoff uses its own tracer function to keep the overhead down:
+ */
+static void
+irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip)
+{
+ struct trace_array *tr = irqsoff_trace;
+ struct trace_array_cpu *data;
+ unsigned long flags;
+
+ if (!func_prolog_dec(tr, &data, &flags))
+ return;
+
+ trace_function(tr, ip, parent_ip, flags, preempt_count());
atomic_dec(&data->disabled);
}
@@ -155,30 +183,16 @@
struct trace_array *tr = irqsoff_trace;
struct trace_array_cpu *data;
unsigned long flags;
- long disabled;
int ret;
- int cpu;
int pc;
- cpu = raw_smp_processor_id();
- if (likely(!per_cpu(tracing_cpu, cpu)))
+ if (!func_prolog_dec(tr, &data, &flags))
return 0;
- local_save_flags(flags);
- /* slight chance to get a false positive on tracing_cpu */
- if (!irqs_disabled_flags(flags))
- return 0;
-
- data = tr->data[cpu];
- disabled = atomic_inc_return(&data->disabled);
-
- if (likely(disabled == 1)) {
- pc = preempt_count();
- ret = __trace_graph_entry(tr, trace, flags, pc);
- } else
- ret = 0;
-
+ pc = preempt_count();
+ ret = __trace_graph_entry(tr, trace, flags, pc);
atomic_dec(&data->disabled);
+
return ret;
}
@@ -187,27 +201,13 @@
struct trace_array *tr = irqsoff_trace;
struct trace_array_cpu *data;
unsigned long flags;
- long disabled;
- int cpu;
int pc;
- cpu = raw_smp_processor_id();
- if (likely(!per_cpu(tracing_cpu, cpu)))
+ if (!func_prolog_dec(tr, &data, &flags))
return;
- local_save_flags(flags);
- /* slight chance to get a false positive on tracing_cpu */
- if (!irqs_disabled_flags(flags))
- return;
-
- data = tr->data[cpu];
- disabled = atomic_inc_return(&data->disabled);
-
- if (likely(disabled == 1)) {
- pc = preempt_count();
- __trace_graph_return(tr, trace, flags, pc);
- }
-
+ pc = preempt_count();
+ __trace_graph_return(tr, trace, flags, pc);
atomic_dec(&data->disabled);
}
@@ -229,75 +229,33 @@
static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
{
- u32 flags = GRAPH_TRACER_FLAGS;
-
- if (trace_flags & TRACE_ITER_LATENCY_FMT)
- flags |= TRACE_GRAPH_PRINT_DURATION;
- else
- flags |= TRACE_GRAPH_PRINT_ABS_TIME;
-
/*
* In graph mode call the graph tracer output function,
* otherwise go with the TRACE_FN event handler
*/
if (is_graph())
- return print_graph_function_flags(iter, flags);
+ return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS);
return TRACE_TYPE_UNHANDLED;
}
static void irqsoff_print_header(struct seq_file *s)
{
- if (is_graph()) {
- struct trace_iterator *iter = s->private;
- u32 flags = GRAPH_TRACER_FLAGS;
-
- if (trace_flags & TRACE_ITER_LATENCY_FMT) {
- /* print nothing if the buffers are empty */
- if (trace_empty(iter))
- return;
-
- print_trace_header(s, iter);
- flags |= TRACE_GRAPH_PRINT_DURATION;
- } else
- flags |= TRACE_GRAPH_PRINT_ABS_TIME;
-
- print_graph_headers_flags(s, flags);
- } else
+ if (is_graph())
+ print_graph_headers_flags(s, GRAPH_TRACER_FLAGS);
+ else
trace_default_header(s);
}
static void
-trace_graph_function(struct trace_array *tr,
- unsigned long ip, unsigned long flags, int pc)
-{
- u64 time = trace_clock_local();
- struct ftrace_graph_ent ent = {
- .func = ip,
- .depth = 0,
- };
- struct ftrace_graph_ret ret = {
- .func = ip,
- .depth = 0,
- .calltime = time,
- .rettime = time,
- };
-
- __trace_graph_entry(tr, &ent, flags, pc);
- __trace_graph_return(tr, &ret, flags, pc);
-}
-
-static void
__trace_function(struct trace_array *tr,
unsigned long ip, unsigned long parent_ip,
unsigned long flags, int pc)
{
- if (!is_graph())
+ if (is_graph())
+ trace_graph_function(tr, ip, parent_ip, flags, pc);
+ else
trace_function(tr, ip, parent_ip, flags, pc);
- else {
- trace_graph_function(tr, parent_ip, flags, pc);
- trace_graph_function(tr, ip, flags, pc);
- }
}
#else
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 8b27c98..544301d 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -514,8 +514,8 @@
static int kretprobe_dispatcher(struct kretprobe_instance *ri,
struct pt_regs *regs);
-/* Check the name is good for event/group */
-static int check_event_name(const char *name)
+/* Check the name is good for event/group/fields */
+static int is_good_name(const char *name)
{
if (!isalpha(*name) && *name != '_')
return 0;
@@ -557,7 +557,7 @@
else
tp->rp.kp.pre_handler = kprobe_dispatcher;
- if (!event || !check_event_name(event)) {
+ if (!event || !is_good_name(event)) {
ret = -EINVAL;
goto error;
}
@@ -567,7 +567,7 @@
if (!tp->call.name)
goto error;
- if (!group || !check_event_name(group)) {
+ if (!group || !is_good_name(group)) {
ret = -EINVAL;
goto error;
}
@@ -883,7 +883,7 @@
int i, ret = 0;
int is_return = 0, is_delete = 0;
char *symbol = NULL, *event = NULL, *group = NULL;
- char *arg, *tmp;
+ char *arg;
unsigned long offset = 0;
void *addr = NULL;
char buf[MAX_EVENT_NAME_LEN];
@@ -992,26 +992,36 @@
/* parse arguments */
ret = 0;
for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
+ /* Increment count for freeing args in error case */
+ tp->nr_args++;
+
/* Parse argument name */
arg = strchr(argv[i], '=');
- if (arg)
+ if (arg) {
*arg++ = '\0';
- else
+ tp->args[i].name = kstrdup(argv[i], GFP_KERNEL);
+ } else {
arg = argv[i];
+ /* If argument name is omitted, set "argN" */
+ snprintf(buf, MAX_EVENT_NAME_LEN, "arg%d", i + 1);
+ tp->args[i].name = kstrdup(buf, GFP_KERNEL);
+ }
- tp->args[i].name = kstrdup(argv[i], GFP_KERNEL);
if (!tp->args[i].name) {
- pr_info("Failed to allocate argument%d name '%s'.\n",
- i, argv[i]);
+ pr_info("Failed to allocate argument[%d] name.\n", i);
ret = -ENOMEM;
goto error;
}
- tmp = strchr(tp->args[i].name, ':');
- if (tmp)
- *tmp = '_'; /* convert : to _ */
+
+ if (!is_good_name(tp->args[i].name)) {
+ pr_info("Invalid argument[%d] name: %s\n",
+ i, tp->args[i].name);
+ ret = -EINVAL;
+ goto error;
+ }
if (conflict_field_name(tp->args[i].name, tp->args, i)) {
- pr_info("Argument%d name '%s' conflicts with "
+ pr_info("Argument[%d] name '%s' conflicts with "
"another field.\n", i, argv[i]);
ret = -EINVAL;
goto error;
@@ -1020,12 +1030,9 @@
/* Parse fetch argument */
ret = parse_probe_arg(arg, tp, &tp->args[i], is_return);
if (ret) {
- pr_info("Parse error at argument%d. (%d)\n", i, ret);
- kfree(tp->args[i].name);
+ pr_info("Parse error at argument[%d]. (%d)\n", i, ret);
goto error;
}
-
- tp->nr_args++;
}
ret = register_trace_probe(tp);
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index 4086eae..7319559 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -31,13 +31,81 @@
static arch_spinlock_t wakeup_lock =
(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
+static void wakeup_reset(struct trace_array *tr);
static void __wakeup_reset(struct trace_array *tr);
+static int wakeup_graph_entry(struct ftrace_graph_ent *trace);
+static void wakeup_graph_return(struct ftrace_graph_ret *trace);
static int save_lat_flag;
+#define TRACE_DISPLAY_GRAPH 1
+
+static struct tracer_opt trace_opts[] = {
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ /* display latency trace as call graph */
+ { TRACER_OPT(display-graph, TRACE_DISPLAY_GRAPH) },
+#endif
+ { } /* Empty entry */
+};
+
+static struct tracer_flags tracer_flags = {
+ .val = 0,
+ .opts = trace_opts,
+};
+
+#define is_graph() (tracer_flags.val & TRACE_DISPLAY_GRAPH)
+
#ifdef CONFIG_FUNCTION_TRACER
+
/*
- * irqsoff uses its own tracer function to keep the overhead down:
+ * Prologue for the wakeup function tracers.
+ *
+ * Returns 1 if it is OK to continue, and preemption
+ * is disabled and data->disabled is incremented.
+ * 0 if the trace is to be ignored, and preemption
+ * is not disabled and data->disabled is
+ * kept the same.
+ *
+ * Note, this function is also used outside this ifdef but
+ * inside the #ifdef of the function graph tracer below.
+ * This is OK, since the function graph tracer is
+ * dependent on the function tracer.
+ */
+static int
+func_prolog_preempt_disable(struct trace_array *tr,
+ struct trace_array_cpu **data,
+ int *pc)
+{
+ long disabled;
+ int cpu;
+
+ if (likely(!wakeup_task))
+ return 0;
+
+ *pc = preempt_count();
+ preempt_disable_notrace();
+
+ cpu = raw_smp_processor_id();
+ if (cpu != wakeup_current_cpu)
+ goto out_enable;
+
+ *data = tr->data[cpu];
+ disabled = atomic_inc_return(&(*data)->disabled);
+ if (unlikely(disabled != 1))
+ goto out;
+
+ return 1;
+
+out:
+ atomic_dec(&(*data)->disabled);
+
+out_enable:
+ preempt_enable_notrace();
+ return 0;
+}
+
+/*
+ * wakeup uses its own tracer function to keep the overhead down:
*/
static void
wakeup_tracer_call(unsigned long ip, unsigned long parent_ip)
@@ -45,34 +113,16 @@
struct trace_array *tr = wakeup_trace;
struct trace_array_cpu *data;
unsigned long flags;
- long disabled;
- int cpu;
int pc;
- if (likely(!wakeup_task))
+ if (!func_prolog_preempt_disable(tr, &data, &pc))
return;
- pc = preempt_count();
- preempt_disable_notrace();
-
- cpu = raw_smp_processor_id();
- if (cpu != wakeup_current_cpu)
- goto out_enable;
-
- data = tr->data[cpu];
- disabled = atomic_inc_return(&data->disabled);
- if (unlikely(disabled != 1))
- goto out;
-
local_irq_save(flags);
-
trace_function(tr, ip, parent_ip, flags, pc);
-
local_irq_restore(flags);
- out:
atomic_dec(&data->disabled);
- out_enable:
preempt_enable_notrace();
}
@@ -82,6 +132,156 @@
};
#endif /* CONFIG_FUNCTION_TRACER */
+static int start_func_tracer(int graph)
+{
+ int ret;
+
+ if (!graph)
+ ret = register_ftrace_function(&trace_ops);
+ else
+ ret = register_ftrace_graph(&wakeup_graph_return,
+ &wakeup_graph_entry);
+
+ if (!ret && tracing_is_enabled())
+ tracer_enabled = 1;
+ else
+ tracer_enabled = 0;
+
+ return ret;
+}
+
+static void stop_func_tracer(int graph)
+{
+ tracer_enabled = 0;
+
+ if (!graph)
+ unregister_ftrace_function(&trace_ops);
+ else
+ unregister_ftrace_graph();
+}
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+static int wakeup_set_flag(u32 old_flags, u32 bit, int set)
+{
+
+ if (!(bit & TRACE_DISPLAY_GRAPH))
+ return -EINVAL;
+
+ if (!(is_graph() ^ set))
+ return 0;
+
+ stop_func_tracer(!set);
+
+ wakeup_reset(wakeup_trace);
+ tracing_max_latency = 0;
+
+ return start_func_tracer(set);
+}
+
+static int wakeup_graph_entry(struct ftrace_graph_ent *trace)
+{
+ struct trace_array *tr = wakeup_trace;
+ struct trace_array_cpu *data;
+ unsigned long flags;
+ int pc, ret = 0;
+
+ if (!func_prolog_preempt_disable(tr, &data, &pc))
+ return 0;
+
+ local_save_flags(flags);
+ ret = __trace_graph_entry(tr, trace, flags, pc);
+ atomic_dec(&data->disabled);
+ preempt_enable_notrace();
+
+ return ret;
+}
+
+static void wakeup_graph_return(struct ftrace_graph_ret *trace)
+{
+ struct trace_array *tr = wakeup_trace;
+ struct trace_array_cpu *data;
+ unsigned long flags;
+ int pc;
+
+ if (!func_prolog_preempt_disable(tr, &data, &pc))
+ return;
+
+ local_save_flags(flags);
+ __trace_graph_return(tr, trace, flags, pc);
+ atomic_dec(&data->disabled);
+
+ preempt_enable_notrace();
+ return;
+}
+
+static void wakeup_trace_open(struct trace_iterator *iter)
+{
+ if (is_graph())
+ graph_trace_open(iter);
+}
+
+static void wakeup_trace_close(struct trace_iterator *iter)
+{
+ if (iter->private)
+ graph_trace_close(iter);
+}
+
+#define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_PROC)
+
+static enum print_line_t wakeup_print_line(struct trace_iterator *iter)
+{
+ /*
+ * In graph mode call the graph tracer output function,
+ * otherwise go with the TRACE_FN event handler
+ */
+ if (is_graph())
+ return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS);
+
+ return TRACE_TYPE_UNHANDLED;
+}
+
+static void wakeup_print_header(struct seq_file *s)
+{
+ if (is_graph())
+ print_graph_headers_flags(s, GRAPH_TRACER_FLAGS);
+ else
+ trace_default_header(s);
+}
+
+static void
+__trace_function(struct trace_array *tr,
+ unsigned long ip, unsigned long parent_ip,
+ unsigned long flags, int pc)
+{
+ if (is_graph())
+ trace_graph_function(tr, ip, parent_ip, flags, pc);
+ else
+ trace_function(tr, ip, parent_ip, flags, pc);
+}
+#else
+#define __trace_function trace_function
+
+static int wakeup_set_flag(u32 old_flags, u32 bit, int set)
+{
+ return -EINVAL;
+}
+
+static int wakeup_graph_entry(struct ftrace_graph_ent *trace)
+{
+ return -1;
+}
+
+static enum print_line_t wakeup_print_line(struct trace_iterator *iter)
+{
+ return TRACE_TYPE_UNHANDLED;
+}
+
+static void wakeup_graph_return(struct ftrace_graph_ret *trace) { }
+static void wakeup_print_header(struct seq_file *s) { }
+static void wakeup_trace_open(struct trace_iterator *iter) { }
+static void wakeup_trace_close(struct trace_iterator *iter) { }
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+
/*
* Should this new latency be reported/recorded?
*/
@@ -152,7 +352,7 @@
/* The task we are waiting for is waking up */
data = wakeup_trace->data[wakeup_cpu];
- trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, flags, pc);
+ __trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, flags, pc);
tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc);
T0 = data->preempt_timestamp;
@@ -252,7 +452,7 @@
* is not called by an assembly function (where as schedule is)
* it should be safe to use it here.
*/
- trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc);
+ __trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc);
out_locked:
arch_spin_unlock(&wakeup_lock);
@@ -303,12 +503,8 @@
*/
smp_wmb();
- register_ftrace_function(&trace_ops);
-
- if (tracing_is_enabled())
- tracer_enabled = 1;
- else
- tracer_enabled = 0;
+ if (start_func_tracer(is_graph()))
+ printk(KERN_ERR "failed to start wakeup tracer\n");
return;
fail_deprobe_wake_new:
@@ -320,7 +516,7 @@
static void stop_wakeup_tracer(struct trace_array *tr)
{
tracer_enabled = 0;
- unregister_ftrace_function(&trace_ops);
+ stop_func_tracer(is_graph());
unregister_trace_sched_switch(probe_wakeup_sched_switch, NULL);
unregister_trace_sched_wakeup_new(probe_wakeup, NULL);
unregister_trace_sched_wakeup(probe_wakeup, NULL);
@@ -379,9 +575,15 @@
.start = wakeup_tracer_start,
.stop = wakeup_tracer_stop,
.print_max = 1,
+ .print_header = wakeup_print_header,
+ .print_line = wakeup_print_line,
+ .flags = &tracer_flags,
+ .set_flag = wakeup_set_flag,
#ifdef CONFIG_FTRACE_SELFTEST
.selftest = trace_selftest_startup_wakeup,
#endif
+ .open = wakeup_trace_open,
+ .close = wakeup_trace_close,
.use_max_tr = 1,
};
@@ -394,9 +596,15 @@
.stop = wakeup_tracer_stop,
.wait_pipe = poll_wait_pipe,
.print_max = 1,
+ .print_header = wakeup_print_header,
+ .print_line = wakeup_print_line,
+ .flags = &tracer_flags,
+ .set_flag = wakeup_set_flag,
#ifdef CONFIG_FTRACE_SELFTEST
.selftest = trace_selftest_startup_wakeup,
#endif
+ .open = wakeup_trace_open,
+ .close = wakeup_trace_close,
.use_max_tr = 1,
};
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index 056468e..a6b7e0e 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -249,7 +249,7 @@
{
unsigned long addr = stack_dump_trace[i];
- return seq_printf(m, "%pF\n", (void *)addr);
+ return seq_printf(m, "%pS\n", (void *)addr);
}
static void print_disabled(struct seq_file *m)
diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c
index a7cc379..209b379 100644
--- a/kernel/trace/trace_workqueue.c
+++ b/kernel/trace/trace_workqueue.c
@@ -263,6 +263,11 @@
{
int ret, cpu;
+ for_each_possible_cpu(cpu) {
+ spin_lock_init(&workqueue_cpu_stat(cpu)->lock);
+ INIT_LIST_HEAD(&workqueue_cpu_stat(cpu)->list);
+ }
+
ret = register_trace_workqueue_insertion(probe_workqueue_insertion, NULL);
if (ret)
goto out;
@@ -279,11 +284,6 @@
if (ret)
goto no_creation;
- for_each_possible_cpu(cpu) {
- spin_lock_init(&workqueue_cpu_stat(cpu)->lock);
- INIT_LIST_HEAD(&workqueue_cpu_stat(cpu)->list);
- }
-
return 0;
no_creation:
diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c
index c77f3ec..e95ee7f 100644
--- a/kernel/tracepoint.c
+++ b/kernel/tracepoint.c
@@ -25,6 +25,7 @@
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/sched.h>
+#include <linux/jump_label.h>
extern struct tracepoint __start___tracepoints[];
extern struct tracepoint __stop___tracepoints[];
@@ -263,7 +264,13 @@
* is used.
*/
rcu_assign_pointer(elem->funcs, (*entry)->funcs);
- elem->state = active;
+ if (!elem->state && active) {
+ jump_label_enable(&elem->state);
+ elem->state = active;
+ } else if (elem->state && !active) {
+ jump_label_disable(&elem->state);
+ elem->state = active;
+ }
}
/*
@@ -277,7 +284,10 @@
if (elem->unregfunc && elem->state)
elem->unregfunc();
- elem->state = 0;
+ if (elem->state) {
+ jump_label_disable(&elem->state);
+ elem->state = 0;
+ }
rcu_assign_pointer(elem->funcs, NULL);
}
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 613bc1f..bafba68 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -43,7 +43,6 @@
static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
#endif
-static int __read_mostly did_panic;
static int __initdata no_watchdog;
@@ -122,7 +121,7 @@
void touch_softlockup_watchdog(void)
{
- __get_cpu_var(watchdog_touch_ts) = 0;
+ __raw_get_cpu_var(watchdog_touch_ts) = 0;
}
EXPORT_SYMBOL(touch_softlockup_watchdog);
@@ -142,7 +141,14 @@
#ifdef CONFIG_HARDLOCKUP_DETECTOR
void touch_nmi_watchdog(void)
{
- __get_cpu_var(watchdog_nmi_touch) = true;
+ if (watchdog_enabled) {
+ unsigned cpu;
+
+ for_each_present_cpu(cpu) {
+ if (per_cpu(watchdog_nmi_touch, cpu) != true)
+ per_cpu(watchdog_nmi_touch, cpu) = true;
+ }
+ }
touch_softlockup_watchdog();
}
EXPORT_SYMBOL(touch_nmi_watchdog);
@@ -180,18 +186,6 @@
return 0;
}
-static int
-watchdog_panic(struct notifier_block *this, unsigned long event, void *ptr)
-{
- did_panic = 1;
-
- return NOTIFY_DONE;
-}
-
-static struct notifier_block panic_block = {
- .notifier_call = watchdog_panic,
-};
-
#ifdef CONFIG_HARDLOCKUP_DETECTOR
static struct perf_event_attr wd_hw_attr = {
.type = PERF_TYPE_HARDWARE,
@@ -202,10 +196,13 @@
};
/* Callback function for perf event subsystem */
-void watchdog_overflow_callback(struct perf_event *event, int nmi,
+static void watchdog_overflow_callback(struct perf_event *event, int nmi,
struct perf_sample_data *data,
struct pt_regs *regs)
{
+ /* Ensure the watchdog never gets throttled */
+ event->hw.interrupts = 0;
+
if (__get_cpu_var(watchdog_nmi_touch) == true) {
__get_cpu_var(watchdog_nmi_touch) = false;
return;
@@ -361,14 +358,14 @@
/* Try to register using hardware perf events */
wd_attr = &wd_hw_attr;
wd_attr->sample_period = hw_nmi_get_sample_period();
- event = perf_event_create_kernel_counter(wd_attr, cpu, -1, watchdog_overflow_callback);
+ event = perf_event_create_kernel_counter(wd_attr, cpu, NULL, watchdog_overflow_callback);
if (!IS_ERR(event)) {
printk(KERN_INFO "NMI watchdog enabled, takes one hw-pmu counter.\n");
goto out_save;
}
printk(KERN_ERR "NMI watchdog failed to create perf event on cpu%i: %p\n", cpu, event);
- return -1;
+ return PTR_ERR(event);
/* success path */
out_save:
@@ -412,17 +409,19 @@
static int watchdog_enable(int cpu)
{
struct task_struct *p = per_cpu(softlockup_watchdog, cpu);
+ int err;
/* enable the perf event */
- if (watchdog_nmi_enable(cpu) != 0)
- return -1;
+ err = watchdog_nmi_enable(cpu);
+ if (err)
+ return err;
/* create the watchdog thread */
if (!p) {
p = kthread_create(watchdog, (void *)(unsigned long)cpu, "watchdog/%d", cpu);
if (IS_ERR(p)) {
printk(KERN_ERR "softlockup watchdog for %i failed\n", cpu);
- return -1;
+ return PTR_ERR(p);
}
kthread_bind(p, cpu);
per_cpu(watchdog_touch_ts, cpu) = 0;
@@ -430,6 +429,9 @@
wake_up_process(p);
}
+ /* if any cpu succeeds, watchdog is considered enabled for the system */
+ watchdog_enabled = 1;
+
return 0;
}
@@ -452,9 +454,6 @@
per_cpu(softlockup_watchdog, cpu) = NULL;
kthread_stop(p);
}
-
- /* if any cpu succeeds, watchdog is considered enabled for the system */
- watchdog_enabled = 1;
}
static void watchdog_enable_all_cpus(void)
@@ -474,6 +473,9 @@
{
int cpu;
+ if (no_watchdog)
+ return;
+
for_each_online_cpu(cpu)
watchdog_disable(cpu);
@@ -516,17 +518,16 @@
cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
{
int hotcpu = (unsigned long)hcpu;
+ int err = 0;
switch (action) {
case CPU_UP_PREPARE:
case CPU_UP_PREPARE_FROZEN:
- if (watchdog_prepare_cpu(hotcpu))
- return NOTIFY_BAD;
+ err = watchdog_prepare_cpu(hotcpu);
break;
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
- if (watchdog_enable(hotcpu))
- return NOTIFY_BAD;
+ err = watchdog_enable(hotcpu);
break;
#ifdef CONFIG_HOTPLUG_CPU
case CPU_UP_CANCELED:
@@ -539,7 +540,7 @@
break;
#endif /* CONFIG_HOTPLUG_CPU */
}
- return NOTIFY_OK;
+ return notifier_from_errno(err);
}
static struct notifier_block __cpuinitdata cpu_nfb = {
@@ -555,13 +556,11 @@
return 0;
err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu);
- WARN_ON(err == NOTIFY_BAD);
+ WARN_ON(notifier_to_errno(err));
cpu_callback(&cpu_nfb, CPU_ONLINE, cpu);
register_cpu_notifier(&cpu_nfb);
- atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
-
return 0;
}
early_initcall(spawn_watchdog_task);
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 8bd600c..f77afd9 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -1,19 +1,26 @@
/*
- * linux/kernel/workqueue.c
+ * kernel/workqueue.c - generic async execution with shared worker pool
*
- * Generic mechanism for defining kernel helper threads for running
- * arbitrary tasks in process context.
+ * Copyright (C) 2002 Ingo Molnar
*
- * Started by Ingo Molnar, Copyright (C) 2002
- *
- * Derived from the taskqueue/keventd code by:
- *
- * David Woodhouse <dwmw2@infradead.org>
- * Andrew Morton
- * Kai Petzke <wpp@marie.physik.tu-berlin.de>
- * Theodore Ts'o <tytso@mit.edu>
+ * Derived from the taskqueue/keventd code by:
+ * David Woodhouse <dwmw2@infradead.org>
+ * Andrew Morton
+ * Kai Petzke <wpp@marie.physik.tu-berlin.de>
+ * Theodore Ts'o <tytso@mit.edu>
*
* Made to use alloc_percpu by Christoph Lameter.
+ *
+ * Copyright (C) 2010 SUSE Linux Products GmbH
+ * Copyright (C) 2010 Tejun Heo <tj@kernel.org>
+ *
+ * This is the generic async execution mechanism. Work items as are
+ * executed in process context. The worker pool is shared and
+ * automatically managed. There is one worker pool for each CPU and
+ * one extra for works which are better served by workers which are
+ * not bound to any specific CPU.
+ *
+ * Please read Documentation/workqueue.txt for details.
*/
#include <linux/module.h>
@@ -90,7 +97,8 @@
/*
* Structure fields follow one of the following exclusion rules.
*
- * I: Set during initialization and read-only afterwards.
+ * I: Modifiable by initialization/destruction paths and read-only for
+ * everyone else.
*
* P: Preemption protected. Disabling preemption is enough and should
* only be modified and accessed from the local cpu.
@@ -198,7 +206,7 @@
cpumask_test_and_set_cpu((cpu), (mask))
#define mayday_clear_cpu(cpu, mask) cpumask_clear_cpu((cpu), (mask))
#define for_each_mayday_cpu(cpu, mask) for_each_cpu((cpu), (mask))
-#define alloc_mayday_mask(maskp, gfp) alloc_cpumask_var((maskp), (gfp))
+#define alloc_mayday_mask(maskp, gfp) zalloc_cpumask_var((maskp), (gfp))
#define free_mayday_mask(mask) free_cpumask_var((mask))
#else
typedef unsigned long mayday_mask_t;
@@ -943,10 +951,14 @@
struct global_cwq *gcwq;
struct cpu_workqueue_struct *cwq;
struct list_head *worklist;
+ unsigned int work_flags;
unsigned long flags;
debug_work_activate(work);
+ if (WARN_ON_ONCE(wq->flags & WQ_DYING))
+ return;
+
/* determine gcwq to use */
if (!(wq->flags & WQ_UNBOUND)) {
struct global_cwq *last_gcwq;
@@ -989,14 +1001,17 @@
BUG_ON(!list_empty(&work->entry));
cwq->nr_in_flight[cwq->work_color]++;
+ work_flags = work_color_to_flags(cwq->work_color);
if (likely(cwq->nr_active < cwq->max_active)) {
cwq->nr_active++;
worklist = gcwq_determine_ins_pos(gcwq, cwq);
- } else
+ } else {
+ work_flags |= WORK_STRUCT_DELAYED;
worklist = &cwq->delayed_works;
+ }
- insert_work(cwq, work, worklist, work_color_to_flags(cwq->work_color));
+ insert_work(cwq, work, worklist, work_flags);
spin_unlock_irqrestore(&gcwq->lock, flags);
}
@@ -1215,6 +1230,7 @@
* bound), %false if offline.
*/
static bool worker_maybe_bind_and_lock(struct worker *worker)
+__acquires(&gcwq->lock)
{
struct global_cwq *gcwq = worker->gcwq;
struct task_struct *task = worker->task;
@@ -1488,6 +1504,8 @@
* otherwise.
*/
static bool maybe_create_worker(struct global_cwq *gcwq)
+__releases(&gcwq->lock)
+__acquires(&gcwq->lock)
{
if (!need_to_create_worker(gcwq))
return false;
@@ -1662,6 +1680,7 @@
struct list_head *pos = gcwq_determine_ins_pos(cwq->gcwq, cwq);
move_linked_works(work, pos, NULL);
+ __clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work));
cwq->nr_active++;
}
@@ -1669,6 +1688,7 @@
* cwq_dec_nr_in_flight - decrement cwq's nr_in_flight
* @cwq: cwq of interest
* @color: color of work which left the queue
+ * @delayed: for a delayed work
*
* A work either has completed or is removed from pending queue,
* decrement nr_in_flight of its cwq and handle workqueue flushing.
@@ -1676,19 +1696,22 @@
* CONTEXT:
* spin_lock_irq(gcwq->lock).
*/
-static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color)
+static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color,
+ bool delayed)
{
/* ignore uncolored works */
if (color == WORK_NO_COLOR)
return;
cwq->nr_in_flight[color]--;
- cwq->nr_active--;
- if (!list_empty(&cwq->delayed_works)) {
- /* one down, submit a delayed one */
- if (cwq->nr_active < cwq->max_active)
- cwq_activate_first_delayed(cwq);
+ if (!delayed) {
+ cwq->nr_active--;
+ if (!list_empty(&cwq->delayed_works)) {
+ /* one down, submit a delayed one */
+ if (cwq->nr_active < cwq->max_active)
+ cwq_activate_first_delayed(cwq);
+ }
}
/* is flush in progress and are we at the flushing tip? */
@@ -1725,6 +1748,8 @@
* spin_lock_irq(gcwq->lock) which is released and regrabbed.
*/
static void process_one_work(struct worker *worker, struct work_struct *work)
+__releases(&gcwq->lock)
+__acquires(&gcwq->lock)
{
struct cpu_workqueue_struct *cwq = get_work_cwq(work);
struct global_cwq *gcwq = cwq->gcwq;
@@ -1823,7 +1848,7 @@
hlist_del_init(&worker->hentry);
worker->current_work = NULL;
worker->current_cwq = NULL;
- cwq_dec_nr_in_flight(cwq, work_color);
+ cwq_dec_nr_in_flight(cwq, work_color, false);
}
/**
@@ -2388,7 +2413,8 @@
debug_work_deactivate(work);
list_del_init(&work->entry);
cwq_dec_nr_in_flight(get_work_cwq(work),
- get_work_color(work));
+ get_work_color(work),
+ *work_data_bits(work) & WORK_STRUCT_DELAYED);
ret = 1;
}
}
@@ -2791,7 +2817,6 @@
if (IS_ERR(rescuer->task))
goto err;
- wq->rescuer = rescuer;
rescuer->task->flags |= PF_THREAD_BOUND;
wake_up_process(rescuer->task);
}
@@ -2833,6 +2858,7 @@
{
unsigned int cpu;
+ wq->flags |= WQ_DYING;
flush_workqueue(wq);
/*
@@ -2857,6 +2883,7 @@
if (wq->flags & WQ_RESCUER) {
kthread_stop(wq->rescuer->task);
free_mayday_mask(wq->mayday_mask);
+ kfree(wq->rescuer);
}
free_cwqs(wq);
@@ -3239,6 +3266,8 @@
* multiple times. To be used by cpu_callback.
*/
static void __cpuinit wait_trustee_state(struct global_cwq *gcwq, int state)
+__releases(&gcwq->lock)
+__acquires(&gcwq->lock)
{
if (!(gcwq->trustee_state == state ||
gcwq->trustee_state == TRUSTEE_DONE)) {
@@ -3545,8 +3574,7 @@
spin_lock_init(&gcwq->lock);
INIT_LIST_HEAD(&gcwq->worklist);
gcwq->cpu = cpu;
- if (cpu == WORK_CPU_UNBOUND)
- gcwq->flags |= GCWQ_DISASSOCIATED;
+ gcwq->flags |= GCWQ_DISASSOCIATED;
INIT_LIST_HEAD(&gcwq->idle_list);
for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++)
@@ -3570,6 +3598,8 @@
struct global_cwq *gcwq = get_gcwq(cpu);
struct worker *worker;
+ if (cpu != WORK_CPU_UNBOUND)
+ gcwq->flags &= ~GCWQ_DISASSOCIATED;
worker = create_worker(gcwq, true);
BUG_ON(!worker);
spin_lock_irq(&gcwq->lock);
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 1b4afd2..21ac830 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -482,6 +482,7 @@
select DEBUG_SPINLOCK
select DEBUG_MUTEXES
select DEBUG_LOCK_ALLOC
+ select TRACE_IRQFLAGS
default n
help
This feature enables the kernel to prove that all locking
@@ -539,6 +540,23 @@
disabling, allowing multiple RCU-lockdep warnings to be printed
on a single reboot.
+ Say Y to allow multiple RCU-lockdep warnings per boot.
+
+ Say N if you are unsure.
+
+config SPARSE_RCU_POINTER
+ bool "RCU debugging: sparse-based checks for pointer usage"
+ default n
+ help
+ This feature enables the __rcu sparse annotation for
+ RCU-protected pointers. This annotation will cause sparse
+ to flag any non-RCU used of annotated pointers. This can be
+ helpful when debugging RCU usage. Please note that this feature
+ is not intended to enforce code cleanliness; it is instead merely
+ a debugging aid.
+
+ Say Y to make sparse flag questionable use of RCU-protected pointers
+
Say N if you are unsure.
config LOCKDEP
@@ -579,11 +597,10 @@
of more runtime overhead.
config TRACE_IRQFLAGS
- depends on DEBUG_KERNEL
bool
- default y
- depends on TRACE_IRQFLAGS_SUPPORT
- depends on PROVE_LOCKING
+ help
+ Enables hooks to interrupt enabling and disabling for
+ either tracing or lock debugging.
config DEBUG_SPINLOCK_SLEEP
bool "Spinlock debugging: sleep-inside-spinlock checking"
@@ -832,6 +849,30 @@
Say Y if you are unsure.
+config RCU_CPU_STALL_TIMEOUT
+ int "RCU CPU stall timeout in seconds"
+ depends on RCU_CPU_STALL_DETECTOR
+ range 3 300
+ default 60
+ help
+ If a given RCU grace period extends more than the specified
+ number of seconds, a CPU stall warning is printed. If the
+ RCU grace period persists, additional CPU stall warnings are
+ printed at more widely spaced intervals.
+
+config RCU_CPU_STALL_DETECTOR_RUNNABLE
+ bool "RCU CPU stall checking starts automatically at boot"
+ depends on RCU_CPU_STALL_DETECTOR
+ default y
+ help
+ If set, start checking for RCU CPU stalls immediately on
+ boot. Otherwise, RCU CPU stall checking must be manually
+ enabled.
+
+ Say Y if you are unsure.
+
+ Say N if you wish to suppress RCU CPU stall checking during boot.
+
config RCU_CPU_STALL_VERBOSE
bool "Print additional per-task information for RCU_CPU_STALL_DETECTOR"
depends on RCU_CPU_STALL_DETECTOR && TREE_PREEMPT_RCU
diff --git a/lib/bug.c b/lib/bug.c
index 7cdfad8..1955209 100644
--- a/lib/bug.c
+++ b/lib/bug.c
@@ -72,8 +72,8 @@
return NULL;
}
-int module_bug_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
- struct module *mod)
+void module_bug_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
+ struct module *mod)
{
char *secstrings;
unsigned int i;
@@ -97,8 +97,6 @@
* could potentially lead to deadlock and thus be counter-productive.
*/
list_add(&mod->bug_list, &module_bug_list);
-
- return 0;
}
void module_bug_cleanup(struct module *mod)
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index 02afc25..7bd6df7 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -26,19 +26,11 @@
#include <linux/dynamic_debug.h>
#include <linux/debugfs.h>
#include <linux/slab.h>
+#include <linux/jump_label.h>
extern struct _ddebug __start___verbose[];
extern struct _ddebug __stop___verbose[];
-/* dynamic_debug_enabled, and dynamic_debug_enabled2 are bitmasks in which
- * bit n is set to 1 if any modname hashes into the bucket n, 0 otherwise. They
- * use independent hash functions, to reduce the chance of false positives.
- */
-long long dynamic_debug_enabled;
-EXPORT_SYMBOL_GPL(dynamic_debug_enabled);
-long long dynamic_debug_enabled2;
-EXPORT_SYMBOL_GPL(dynamic_debug_enabled2);
-
struct ddebug_table {
struct list_head link;
char *mod_name;
@@ -88,26 +80,6 @@
}
/*
- * must be called with ddebug_lock held
- */
-
-static int disabled_hash(char hash, bool first_table)
-{
- struct ddebug_table *dt;
- char table_hash_value;
-
- list_for_each_entry(dt, &ddebug_tables, link) {
- if (first_table)
- table_hash_value = dt->ddebugs->primary_hash;
- else
- table_hash_value = dt->ddebugs->secondary_hash;
- if (dt->num_enabled && (hash == table_hash_value))
- return 0;
- }
- return 1;
-}
-
-/*
* Search the tables for _ddebug's which match the given
* `query' and apply the `flags' and `mask' to them. Tells
* the user which ddebug's were changed, or whether none
@@ -170,17 +142,9 @@
dt->num_enabled++;
dp->flags = newflags;
if (newflags) {
- dynamic_debug_enabled |=
- (1LL << dp->primary_hash);
- dynamic_debug_enabled2 |=
- (1LL << dp->secondary_hash);
+ jump_label_enable(&dp->enabled);
} else {
- if (disabled_hash(dp->primary_hash, true))
- dynamic_debug_enabled &=
- ~(1LL << dp->primary_hash);
- if (disabled_hash(dp->secondary_hash, false))
- dynamic_debug_enabled2 &=
- ~(1LL << dp->secondary_hash);
+ jump_label_disable(&dp->enabled);
}
if (verbose)
printk(KERN_INFO
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index b935795..70af0a7 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -123,7 +123,7 @@
* @kobj: struct kobject that the action is happening to
* @envp_ext: pointer to environmental data
*
- * Returns 0 if kobject_uevent() is completed with success or the
+ * Returns 0 if kobject_uevent_env() is completed with success or the
* corresponding error when it fails.
*/
int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
@@ -317,7 +317,7 @@
EXPORT_SYMBOL_GPL(kobject_uevent_env);
/**
- * kobject_uevent - notify userspace by ending an uevent
+ * kobject_uevent - notify userspace by sending an uevent
*
* @action: action that is happening
* @kobj: struct kobject that the action is happening to
diff --git a/lib/list_sort.c b/lib/list_sort.c
index 4b5cb79..a7616fa 100644
--- a/lib/list_sort.c
+++ b/lib/list_sort.c
@@ -70,7 +70,7 @@
* element comparison is needed, so the client's cmp()
* routine can invoke cond_resched() periodically.
*/
- (*cmp)(priv, tail, tail);
+ (*cmp)(priv, tail->next, tail->next);
tail->next->prev = tail;
tail = tail->next;
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 5b7d462..6f412ab4 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -49,7 +49,7 @@
unsigned int height; /* Height from the bottom */
unsigned int count;
struct rcu_head rcu_head;
- void *slots[RADIX_TREE_MAP_SIZE];
+ void __rcu *slots[RADIX_TREE_MAP_SIZE];
unsigned long tags[RADIX_TREE_MAX_TAGS][RADIX_TREE_TAG_LONGS];
};
@@ -174,14 +174,16 @@
{
struct radix_tree_node *node =
container_of(head, struct radix_tree_node, rcu_head);
+ int i;
/*
* must only free zeroed nodes into the slab. radix_tree_shrink
* can leave us with a non-NULL entry in the first slot, so clear
* that here to make sure.
*/
- tag_clear(node, 0, 0);
- tag_clear(node, 1, 0);
+ for (i = 0; i < RADIX_TREE_MAX_TAGS; i++)
+ tag_clear(node, i, 0);
+
node->slots[0] = NULL;
node->count = 0;
@@ -623,6 +625,13 @@
* also settag. The function stops either after tagging nr_to_tag items or
* after reaching last_index.
*
+ * The tags must be set from the leaf level only and propagated back up the
+ * path to the root. We must do this so that we resolve the full path before
+ * setting any tags on intermediate nodes. If we set tags as we descend, then
+ * we can get to the leaf node and find that the index that has the iftag
+ * set is outside the range we are scanning. This reults in dangling tags and
+ * can lead to problems with later tag operations (e.g. livelocks on lookups).
+ *
* The function returns number of leaves where the tag was set and sets
* *first_indexp to the first unscanned index.
* WARNING! *first_indexp can wrap if last_index is ULONG_MAX. Caller must
@@ -633,9 +642,13 @@
unsigned long nr_to_tag,
unsigned int iftag, unsigned int settag)
{
- unsigned int height = root->height, shift;
- unsigned long tagged = 0, index = *first_indexp;
- struct radix_tree_node *open_slots[height], *slot;
+ unsigned int height = root->height;
+ struct radix_tree_path path[height];
+ struct radix_tree_path *pathp = path;
+ struct radix_tree_node *slot;
+ unsigned int shift;
+ unsigned long tagged = 0;
+ unsigned long index = *first_indexp;
last_index = min(last_index, radix_tree_maxindex(height));
if (index > last_index)
@@ -655,6 +668,13 @@
shift = (height - 1) * RADIX_TREE_MAP_SHIFT;
slot = radix_tree_indirect_to_ptr(root->rnode);
+ /*
+ * we fill the path from (root->height - 2) to 0, leaving the index at
+ * (root->height - 1) as a terminator. Zero the node in the terminator
+ * so that we can use this to end walk loops back up the path.
+ */
+ path[height - 1].node = NULL;
+
for (;;) {
int offset;
@@ -663,17 +683,30 @@
goto next;
if (!tag_get(slot, iftag, offset))
goto next;
- tag_set(slot, settag, offset);
- if (height == 1) {
- tagged++;
- goto next;
+ if (height > 1) {
+ /* Go down one level */
+ height--;
+ shift -= RADIX_TREE_MAP_SHIFT;
+ path[height - 1].node = slot;
+ path[height - 1].offset = offset;
+ slot = slot->slots[offset];
+ continue;
}
- /* Go down one level */
- height--;
- shift -= RADIX_TREE_MAP_SHIFT;
- open_slots[height] = slot;
- slot = slot->slots[offset];
- continue;
+
+ /* tag the leaf */
+ tagged++;
+ tag_set(slot, settag, offset);
+
+ /* walk back up the path tagging interior nodes */
+ pathp = &path[0];
+ while (pathp->node) {
+ /* stop if we find a node with the tag already set */
+ if (tag_get(pathp->node, settag, pathp->offset))
+ break;
+ tag_set(pathp->node, settag, pathp->offset);
+ pathp++;
+ }
+
next:
/* Go to next item at level determined by 'shift' */
index = ((index >> shift) + 1) << shift;
@@ -688,7 +721,7 @@
* last_index is guaranteed to be in the tree, what
* we do below cannot wander astray.
*/
- slot = open_slots[height];
+ slot = path[height - 1].node;
height++;
shift += RADIX_TREE_MAP_SHIFT;
}
diff --git a/lib/raid6/.gitignore b/lib/raid6/.gitignore
new file mode 100644
index 0000000..162beca
--- /dev/null
+++ b/lib/raid6/.gitignore
@@ -0,0 +1,4 @@
+mktables
+altivec*.c
+int*.c
+tables.c
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index a5ec428..4ceb05d 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -248,8 +248,18 @@
left -= sg_size;
sg = alloc_fn(alloc_size, gfp_mask);
- if (unlikely(!sg))
- return -ENOMEM;
+ if (unlikely(!sg)) {
+ /*
+ * Adjust entry count to reflect that the last
+ * entry of the previous table won't be used for
+ * linkage. Without this, sg_kfree() may get
+ * confused.
+ */
+ if (prv)
+ table->nents = ++table->orig_nents;
+
+ return -ENOMEM;
+ }
sg_init_table(sg, alloc_size);
table->nents = table->orig_nents += sg_size;
diff --git a/mm/Kconfig b/mm/Kconfig
index f4e516e..f0fb912 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -189,7 +189,7 @@
config MIGRATION
bool "Page migration"
def_bool y
- depends on NUMA || ARCH_ENABLE_MEMORY_HOTREMOVE
+ depends on NUMA || ARCH_ENABLE_MEMORY_HOTREMOVE || COMPACTION
help
Allows the migration of the physical location of pages of processes
while the virtual addresses are not changed. This is useful in
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index eaa4a5b..65d4204 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -30,6 +30,7 @@
struct backing_dev_info noop_backing_dev_info = {
.name = "noop",
+ .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK,
};
EXPORT_SYMBOL_GPL(noop_backing_dev_info);
@@ -243,6 +244,7 @@
err = bdi_init(&default_backing_dev_info);
if (!err)
bdi_register(&default_backing_dev_info, NULL, "default");
+ err = bdi_init(&noop_backing_dev_info);
return err;
}
@@ -445,8 +447,8 @@
switch (action) {
case FORK_THREAD:
__set_current_state(TASK_RUNNING);
- task = kthread_run(bdi_writeback_thread, &bdi->wb, "flush-%s",
- dev_name(bdi->dev));
+ task = kthread_create(bdi_writeback_thread, &bdi->wb,
+ "flush-%s", dev_name(bdi->dev));
if (IS_ERR(task)) {
/*
* If thread creation fails, force writeout of
@@ -457,10 +459,13 @@
/*
* The spinlock makes sure we do not lose
* wake-ups when racing with 'bdi_queue_work()'.
+ * And as soon as the bdi thread is visible, we
+ * can start it.
*/
spin_lock_bh(&bdi->wb_lock);
bdi->wb.task = task;
spin_unlock_bh(&bdi->wb_lock);
+ wake_up_process(task);
}
break;
diff --git a/mm/bounce.c b/mm/bounce.c
index 13b6dad..1481de6 100644
--- a/mm/bounce.c
+++ b/mm/bounce.c
@@ -116,8 +116,8 @@
*/
vfrom = page_address(fromvec->bv_page) + tovec->bv_offset;
- flush_dcache_page(tovec->bv_page);
bounce_copy_vec(tovec, vfrom);
+ flush_dcache_page(tovec->bv_page);
}
}
diff --git a/mm/compaction.c b/mm/compaction.c
index 94cce51..4d709ee 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -214,15 +214,16 @@
/* Similar to reclaim, but different enough that they don't share logic */
static bool too_many_isolated(struct zone *zone)
{
-
- unsigned long inactive, isolated;
+ unsigned long active, inactive, isolated;
inactive = zone_page_state(zone, NR_INACTIVE_FILE) +
zone_page_state(zone, NR_INACTIVE_ANON);
+ active = zone_page_state(zone, NR_ACTIVE_FILE) +
+ zone_page_state(zone, NR_ACTIVE_ANON);
isolated = zone_page_state(zone, NR_ISOLATED_FILE) +
zone_page_state(zone, NR_ISOLATED_ANON);
- return isolated > inactive;
+ return isolated > (inactive + active) / 2;
}
/*
diff --git a/mm/fremap.c b/mm/fremap.c
index 46f5dac..ec520c7 100644
--- a/mm/fremap.c
+++ b/mm/fremap.c
@@ -125,7 +125,6 @@
{
struct mm_struct *mm = current->mm;
struct address_space *mapping;
- unsigned long end = start + size;
struct vm_area_struct *vma;
int err = -EINVAL;
int has_write_lock = 0;
@@ -142,6 +141,10 @@
if (start + size <= start)
return err;
+ /* Does pgoff wrap? */
+ if (pgoff + (size >> PAGE_SHIFT) < pgoff)
+ return err;
+
/* Can we represent this offset inside this architecture's pte's? */
#if PTE_FILE_MAX_BITS < BITS_PER_LONG
if (pgoff + (size >> PAGE_SHIFT) >= (1UL << PTE_FILE_MAX_BITS))
@@ -168,7 +171,7 @@
if (!(vma->vm_flags & VM_CAN_NONLINEAR))
goto out;
- if (end <= start || start < vma->vm_start || end > vma->vm_end)
+ if (start < vma->vm_start || start + size > vma->vm_end)
goto out;
/* Must set VM_NONLINEAR before any pages are populated. */
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index cc5be78..c032738 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2324,11 +2324,8 @@
* and just make the page writable */
avoidcopy = (page_mapcount(old_page) == 1);
if (avoidcopy) {
- if (!trylock_page(old_page)) {
- if (PageAnon(old_page))
- page_move_anon_rmap(old_page, vma, address);
- } else
- unlock_page(old_page);
+ if (PageAnon(old_page))
+ page_move_anon_rmap(old_page, vma, address);
set_huge_ptep_writable(vma, address, ptep);
return 0;
}
@@ -2404,7 +2401,7 @@
set_huge_pte_at(mm, address, ptep,
make_huge_pte(vma, new_page, 1));
page_remove_rmap(old_page);
- hugepage_add_anon_rmap(new_page, vma, address);
+ hugepage_add_new_anon_rmap(new_page, vma, address);
/* Make the old page be freed below */
new_page = old_page;
mmu_notifier_invalidate_range_end(mm,
@@ -2631,10 +2628,16 @@
vma, address);
}
- if (!pagecache_page) {
- page = pte_page(entry);
+ /*
+ * hugetlb_cow() requires page locks of pte_page(entry) and
+ * pagecache_page, so here we need take the former one
+ * when page != pagecache_page or !pagecache_page.
+ * Note that locking order is always pagecache_page -> page,
+ * so no worry about deadlock.
+ */
+ page = pte_page(entry);
+ if (page != pagecache_page)
lock_page(page);
- }
spin_lock(&mm->page_table_lock);
/* Check for a racing update before calling hugetlb_cow */
@@ -2661,9 +2664,8 @@
if (pagecache_page) {
unlock_page(pagecache_page);
put_page(pagecache_page);
- } else {
- unlock_page(page);
}
+ unlock_page(page);
out_mutex:
mutex_unlock(&hugetlb_instantiation_mutex);
diff --git a/mm/ksm.c b/mm/ksm.c
index e2ae004..65ab5c7 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -712,7 +712,7 @@
if (!ptep)
goto out;
- if (pte_write(*ptep)) {
+ if (pte_write(*ptep) || pte_dirty(*ptep)) {
pte_t entry;
swapped = PageSwapCache(page);
@@ -735,7 +735,9 @@
set_pte_at(mm, addr, ptep, entry);
goto out_unlock;
}
- entry = pte_wrprotect(entry);
+ if (pte_dirty(entry))
+ set_page_dirty(page);
+ entry = pte_mkclean(pte_wrprotect(entry));
set_pte_at_notify(mm, addr, ptep, entry);
}
*orig_pte = *ptep;
@@ -1504,8 +1506,6 @@
{
struct page *new_page;
- unlock_page(page); /* any racers will COW it, not modify it */
-
new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
if (new_page) {
copy_user_highpage(new_page, page, address, vma);
@@ -1521,7 +1521,6 @@
add_page_to_unevictable_list(new_page);
}
- page_cache_release(page);
return new_page;
}
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 3eed583..9be3cf8 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -3587,9 +3587,13 @@
static void mem_cgroup_threshold(struct mem_cgroup *memcg)
{
- __mem_cgroup_threshold(memcg, false);
- if (do_swap_account)
- __mem_cgroup_threshold(memcg, true);
+ while (memcg) {
+ __mem_cgroup_threshold(memcg, false);
+ if (do_swap_account)
+ __mem_cgroup_threshold(memcg, true);
+
+ memcg = parent_mem_cgroup(memcg);
+ }
}
static int compare_thresholds(const void *a, const void *b)
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 9c26eec..757f6b0 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -183,7 +183,7 @@
* signal.
*/
static int kill_proc_ao(struct task_struct *t, unsigned long addr, int trapno,
- unsigned long pfn)
+ unsigned long pfn, struct page *page)
{
struct siginfo si;
int ret;
@@ -198,7 +198,7 @@
#ifdef __ARCH_SI_TRAPNO
si.si_trapno = trapno;
#endif
- si.si_addr_lsb = PAGE_SHIFT;
+ si.si_addr_lsb = compound_order(compound_head(page)) + PAGE_SHIFT;
/*
* Don't use force here, it's convenient if the signal
* can be temporarily blocked.
@@ -235,7 +235,7 @@
int nr;
do {
nr = shrink_slab(1000, GFP_KERNEL, 1000);
- if (page_count(p) == 0)
+ if (page_count(p) == 1)
break;
} while (nr > 10);
}
@@ -327,7 +327,7 @@
* wrong earlier.
*/
static void kill_procs_ao(struct list_head *to_kill, int doit, int trapno,
- int fail, unsigned long pfn)
+ int fail, struct page *page, unsigned long pfn)
{
struct to_kill *tk, *next;
@@ -352,7 +352,7 @@
* process anyways.
*/
else if (kill_proc_ao(tk->tsk, tk->addr, trapno,
- pfn) < 0)
+ pfn, page) < 0)
printk(KERN_ERR
"MCE %#lx: Cannot send advisory machine check signal to %s:%d\n",
pfn, tk->tsk->comm, tk->tsk->pid);
@@ -928,7 +928,7 @@
* any accesses to the poisoned memory.
*/
kill_procs_ao(&tokill, !!PageDirty(hpage), trapno,
- ret != SWAP_SUCCESS, pfn);
+ ret != SWAP_SUCCESS, p, pfn);
return ret;
}
diff --git a/mm/memory.c b/mm/memory.c
index 2ed2267..0e18b4d 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2623,7 +2623,7 @@
unsigned int flags, pte_t orig_pte)
{
spinlock_t *ptl;
- struct page *page;
+ struct page *page, *swapcache = NULL;
swp_entry_t entry;
pte_t pte;
struct mem_cgroup *ptr = NULL;
@@ -2679,10 +2679,25 @@
lock_page(page);
delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
- page = ksm_might_need_to_copy(page, vma, address);
- if (!page) {
- ret = VM_FAULT_OOM;
- goto out;
+ /*
+ * Make sure try_to_free_swap or reuse_swap_page or swapoff did not
+ * release the swapcache from under us. The page pin, and pte_same
+ * test below, are not enough to exclude that. Even if it is still
+ * swapcache, we need to check that the page's swap has not changed.
+ */
+ if (unlikely(!PageSwapCache(page) || page_private(page) != entry.val))
+ goto out_page;
+
+ if (ksm_might_need_to_copy(page, vma, address)) {
+ swapcache = page;
+ page = ksm_does_need_to_copy(page, vma, address);
+
+ if (unlikely(!page)) {
+ ret = VM_FAULT_OOM;
+ page = swapcache;
+ swapcache = NULL;
+ goto out_page;
+ }
}
if (mem_cgroup_try_charge_swapin(mm, page, GFP_KERNEL, &ptr)) {
@@ -2735,6 +2750,18 @@
if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
try_to_free_swap(page);
unlock_page(page);
+ if (swapcache) {
+ /*
+ * Hold the lock to avoid the swap entry to be reused
+ * until we take the PT lock for the pte_same() check
+ * (to avoid false positives from pte_same). For
+ * further safety release the lock after the swap_free
+ * so that the swap count won't change under a
+ * parallel locked swapcache.
+ */
+ unlock_page(swapcache);
+ page_cache_release(swapcache);
+ }
if (flags & FAULT_FLAG_WRITE) {
ret |= do_wp_page(mm, vma, address, page_table, pmd, ptl, pte);
@@ -2756,15 +2783,17 @@
unlock_page(page);
out_release:
page_cache_release(page);
+ if (swapcache) {
+ unlock_page(swapcache);
+ page_cache_release(swapcache);
+ }
return ret;
}
/*
- * This is like a special single-page "expand_downwards()",
- * except we must first make sure that 'address-PAGE_SIZE'
+ * This is like a special single-page "expand_{down|up}wards()",
+ * except we must first make sure that 'address{-|+}PAGE_SIZE'
* doesn't hit another vma.
- *
- * The "find_vma()" will do the right thing even if we wrap
*/
static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
{
@@ -2783,6 +2812,15 @@
expand_stack(vma, address - PAGE_SIZE);
}
+ if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
+ struct vm_area_struct *next = vma->vm_next;
+
+ /* As VM_GROWSDOWN but s/below/above/ */
+ if (next && next->vm_start == address + PAGE_SIZE)
+ return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
+
+ expand_upwards(vma, address + PAGE_SIZE);
+ }
return 0;
}
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index a4cfcdc..dd186c1 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -584,19 +584,19 @@
/* Return the start of the next active pageblock after a given page */
static struct page *next_active_pageblock(struct page *page)
{
- int pageblocks_stride;
-
/* Ensure the starting page is pageblock-aligned */
BUG_ON(page_to_pfn(page) & (pageblock_nr_pages - 1));
- /* Move forward by at least 1 * pageblock_nr_pages */
- pageblocks_stride = 1;
-
/* If the entire pageblock is free, move to the end of free page */
- if (pageblock_free(page))
- pageblocks_stride += page_order(page) - pageblock_order;
+ if (pageblock_free(page)) {
+ int order;
+ /* be careful. we don't have locks, page_order can be changed.*/
+ order = page_order(page);
+ if ((order < MAX_ORDER) && (order >= pageblock_order))
+ return page + (1 << order);
+ }
- return page + (pageblocks_stride * pageblock_nr_pages);
+ return page + pageblock_nr_pages;
}
/* Checks if this range of memory is likely to be hot-removable. */
diff --git a/mm/mlock.c b/mm/mlock.c
index cbae7c5..b70919c 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -135,12 +135,6 @@
}
}
-/* Is the vma a continuation of the stack vma above it? */
-static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr)
-{
- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
-}
-
static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
{
return (vma->vm_flags & VM_GROWSDOWN) &&
diff --git a/mm/mmap.c b/mm/mmap.c
index 331e51a..00161a4 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1716,9 +1716,6 @@
* PA-RISC uses this for its stack; IA64 for its Register Backing Store.
* vma is the last one with address > vma->vm_end. Have to extend vma.
*/
-#ifndef CONFIG_IA64
-static
-#endif
int expand_upwards(struct vm_area_struct *vma, unsigned long address)
{
int error;
@@ -2012,6 +2009,7 @@
removed_exe_file_vma(mm);
fput(new->vm_file);
}
+ unlink_anon_vmas(new);
out_free_mpol:
mpol_put(pol);
out_free_vma:
diff --git a/mm/mmzone.c b/mm/mmzone.c
index f5b7d17..e35bfb8 100644
--- a/mm/mmzone.c
+++ b/mm/mmzone.c
@@ -87,3 +87,24 @@
return 1;
}
#endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */
+
+#ifdef CONFIG_SMP
+/* Called when a more accurate view of NR_FREE_PAGES is needed */
+unsigned long zone_nr_free_pages(struct zone *zone)
+{
+ unsigned long nr_free_pages = zone_page_state(zone, NR_FREE_PAGES);
+
+ /*
+ * While kswapd is awake, it is considered the zone is under some
+ * memory pressure. Under pressure, there is a risk that
+ * per-cpu-counter-drift will allow the min watermark to be breached
+ * potentially causing a live-lock. While kswapd is awake and
+ * free pages are low, get a better estimate for free pages
+ */
+ if (nr_free_pages < zone->percpu_drift_mark &&
+ !waitqueue_active(&zone->zone_pgdat->kswapd_wait))
+ return zone_page_state_snapshot(zone, NR_FREE_PAGES);
+
+ return nr_free_pages;
+}
+#endif /* CONFIG_SMP */
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index fc81cb2..4029583 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -121,8 +121,8 @@
}
/* return true if the task is not adequate as candidate victim task. */
-static bool oom_unkillable_task(struct task_struct *p, struct mem_cgroup *mem,
- const nodemask_t *nodemask)
+static bool oom_unkillable_task(struct task_struct *p,
+ const struct mem_cgroup *mem, const nodemask_t *nodemask)
{
if (is_global_init(p))
return true;
@@ -208,8 +208,13 @@
*/
points += p->signal->oom_score_adj;
- if (points < 0)
- return 0;
+ /*
+ * Never return 0 for an eligible task that may be killed since it's
+ * possible that no single user task uses more than 0.1% of memory and
+ * no single admin tasks uses more than 3.0%.
+ */
+ if (points <= 0)
+ return 1;
return (points < 1000) ? points : 1000;
}
@@ -339,26 +344,24 @@
/**
* dump_tasks - dump current memory state of all system tasks
* @mem: current's memory controller, if constrained
+ * @nodemask: nodemask passed to page allocator for mempolicy ooms
*
- * Dumps the current memory state of all system tasks, excluding kernel threads.
+ * Dumps the current memory state of all eligible tasks. Tasks not in the same
+ * memcg, not in the same cpuset, or bound to a disjoint set of mempolicy nodes
+ * are not shown.
* State information includes task's pid, uid, tgid, vm size, rss, cpu, oom_adj
* value, oom_score_adj value, and name.
*
- * If the actual is non-NULL, only tasks that are a member of the mem_cgroup are
- * shown.
- *
* Call with tasklist_lock read-locked.
*/
-static void dump_tasks(const struct mem_cgroup *mem)
+static void dump_tasks(const struct mem_cgroup *mem, const nodemask_t *nodemask)
{
struct task_struct *p;
struct task_struct *task;
pr_info("[ pid ] uid tgid total_vm rss cpu oom_adj oom_score_adj name\n");
for_each_process(p) {
- if (p->flags & PF_KTHREAD)
- continue;
- if (mem && !task_in_mem_cgroup(p, mem))
+ if (oom_unkillable_task(p, mem, nodemask))
continue;
task = find_lock_task_mm(p);
@@ -381,7 +384,7 @@
}
static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order,
- struct mem_cgroup *mem)
+ struct mem_cgroup *mem, const nodemask_t *nodemask)
{
task_lock(current);
pr_warning("%s invoked oom-killer: gfp_mask=0x%x, order=%d, "
@@ -394,7 +397,7 @@
mem_cgroup_print_oom_info(mem, p);
show_mem();
if (sysctl_oom_dump_tasks)
- dump_tasks(mem);
+ dump_tasks(mem, nodemask);
}
#define K(x) ((x) << (PAGE_SHIFT-10))
@@ -436,7 +439,7 @@
unsigned int victim_points = 0;
if (printk_ratelimit())
- dump_header(p, gfp_mask, order, mem);
+ dump_header(p, gfp_mask, order, mem, nodemask);
/*
* If the task is already exiting, don't alarm the sysadmin or kill
@@ -482,7 +485,7 @@
* Determines whether the kernel must panic because of the panic_on_oom sysctl.
*/
static void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask,
- int order)
+ int order, const nodemask_t *nodemask)
{
if (likely(!sysctl_panic_on_oom))
return;
@@ -496,7 +499,7 @@
return;
}
read_lock(&tasklist_lock);
- dump_header(NULL, gfp_mask, order, NULL);
+ dump_header(NULL, gfp_mask, order, NULL, nodemask);
read_unlock(&tasklist_lock);
panic("Out of memory: %s panic_on_oom is enabled\n",
sysctl_panic_on_oom == 2 ? "compulsory" : "system-wide");
@@ -509,7 +512,7 @@
unsigned int points = 0;
struct task_struct *p;
- check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, 0);
+ check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, 0, NULL);
limit = mem_cgroup_get_limit(mem) >> PAGE_SHIFT;
read_lock(&tasklist_lock);
retry:
@@ -641,6 +644,7 @@
void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
int order, nodemask_t *nodemask)
{
+ const nodemask_t *mpol_mask;
struct task_struct *p;
unsigned long totalpages;
unsigned long freed = 0;
@@ -670,7 +674,8 @@
*/
constraint = constrained_alloc(zonelist, gfp_mask, nodemask,
&totalpages);
- check_panic_on_oom(constraint, gfp_mask, order);
+ mpol_mask = (constraint == CONSTRAINT_MEMORY_POLICY) ? nodemask : NULL;
+ check_panic_on_oom(constraint, gfp_mask, order, mpol_mask);
read_lock(&tasklist_lock);
if (sysctl_oom_kill_allocating_task &&
@@ -688,15 +693,13 @@
}
retry:
- p = select_bad_process(&points, totalpages, NULL,
- constraint == CONSTRAINT_MEMORY_POLICY ? nodemask :
- NULL);
+ p = select_bad_process(&points, totalpages, NULL, mpol_mask);
if (PTR_ERR(p) == -1UL)
goto out;
/* Found nothing?!?! Either we hang forever, or we panic. */
if (!p) {
- dump_header(NULL, gfp_mask, order, NULL);
+ dump_header(NULL, gfp_mask, order, NULL, mpol_mask);
read_unlock(&tasklist_lock);
panic("Out of memory and no killable processes...\n");
}
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index c09ef52..e3bccac 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -985,22 +985,16 @@
}
}
- if (wbc->nr_to_write > 0) {
- if (--wbc->nr_to_write == 0 &&
- wbc->sync_mode == WB_SYNC_NONE) {
- /*
- * We stop writing back only if we are
- * not doing integrity sync. In case of
- * integrity sync we have to keep going
- * because someone may be concurrently
- * dirtying pages, and we might have
- * synced a lot of newly appeared dirty
- * pages, but have not synced all of the
- * old dirty pages.
- */
- done = 1;
- break;
- }
+ /*
+ * We stop writing back only if we are not doing
+ * integrity sync. In case of integrity sync we have to
+ * keep going until we have written all the pages
+ * we tagged for writeback prior to entering this loop.
+ */
+ if (--wbc->nr_to_write <= 0 &&
+ wbc->sync_mode == WB_SYNC_NONE) {
+ done = 1;
+ break;
}
}
pagevec_release(&pvec);
@@ -1132,6 +1126,7 @@
task_io_account_write(PAGE_CACHE_SIZE);
}
}
+EXPORT_SYMBOL(account_page_dirtied);
/*
* For address_spaces which do not use buffers. Just tag the page as dirty in
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index a9649f4..f12ad18 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -588,13 +588,13 @@
{
int migratetype = 0;
int batch_free = 0;
+ int to_free = count;
spin_lock(&zone->lock);
zone->all_unreclaimable = 0;
zone->pages_scanned = 0;
- __mod_zone_page_state(zone, NR_FREE_PAGES, count);
- while (count) {
+ while (to_free) {
struct page *page;
struct list_head *list;
@@ -619,8 +619,9 @@
/* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
__free_one_page(page, zone, 0, page_private(page));
trace_mm_page_pcpu_drain(page, 0, page_private(page));
- } while (--count && --batch_free && !list_empty(list));
+ } while (--to_free && --batch_free && !list_empty(list));
}
+ __mod_zone_page_state(zone, NR_FREE_PAGES, count);
spin_unlock(&zone->lock);
}
@@ -631,8 +632,8 @@
zone->all_unreclaimable = 0;
zone->pages_scanned = 0;
- __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order);
__free_one_page(page, zone, order, migratetype);
+ __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order);
spin_unlock(&zone->lock);
}
@@ -1461,7 +1462,7 @@
{
/* free_pages my go negative - that's OK */
long min = mark;
- long free_pages = zone_page_state(z, NR_FREE_PAGES) - (1 << order) + 1;
+ long free_pages = zone_nr_free_pages(z) - (1 << order) + 1;
int o;
if (alloc_flags & ALLOC_HIGH)
@@ -1846,6 +1847,7 @@
struct page *page = NULL;
struct reclaim_state reclaim_state;
struct task_struct *p = current;
+ bool drained = false;
cond_resched();
@@ -1864,14 +1866,25 @@
cond_resched();
- if (order != 0)
- drain_all_pages();
+ if (unlikely(!(*did_some_progress)))
+ return NULL;
- if (likely(*did_some_progress))
- page = get_page_from_freelist(gfp_mask, nodemask, order,
+retry:
+ page = get_page_from_freelist(gfp_mask, nodemask, order,
zonelist, high_zoneidx,
alloc_flags, preferred_zone,
migratetype);
+
+ /*
+ * If an allocation failed after direct reclaim, it could be because
+ * pages are pinned on the per-cpu lists. Drain them and try again
+ */
+ if (!page && !drained) {
+ drain_all_pages();
+ drained = true;
+ goto retry;
+ }
+
return page;
}
@@ -2423,7 +2436,7 @@
" all_unreclaimable? %s"
"\n",
zone->name,
- K(zone_page_state(zone, NR_FREE_PAGES)),
+ K(zone_nr_free_pages(zone)),
K(min_wmark_pages(zone)),
K(low_wmark_pages(zone)),
K(high_wmark_pages(zone)),
@@ -5169,9 +5182,9 @@
if (!table)
panic("Failed to allocate %s hash table\n", tablename);
- printk(KERN_INFO "%s hash table entries: %d (order: %d, %lu bytes)\n",
+ printk(KERN_INFO "%s hash table entries: %ld (order: %d, %lu bytes)\n",
tablename,
- (1U << log2qty),
+ (1UL << log2qty),
ilog2(size) - PAGE_SHIFT,
size);
diff --git a/mm/percpu.c b/mm/percpu.c
index e61dc2c..c76ef38 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -393,7 +393,9 @@
goto out_unlock;
old_size = chunk->map_alloc * sizeof(chunk->map[0]);
- memcpy(new, chunk->map, old_size);
+ old = chunk->map;
+
+ memcpy(new, old, old_size);
chunk->map_alloc = new_alloc;
chunk->map = new;
@@ -1162,7 +1164,7 @@
}
/*
- * Don't accept if wastage is over 25%. The
+ * Don't accept if wastage is over 1/3. The
* greater-than comparison ensures upa==1 always
* passes the following check.
*/
@@ -1399,9 +1401,9 @@
if (pcpu_first_unit_cpu == NR_CPUS)
pcpu_first_unit_cpu = cpu;
+ pcpu_last_unit_cpu = cpu;
}
}
- pcpu_last_unit_cpu = cpu;
pcpu_nr_units = unit;
for_each_possible_cpu(cpu)
diff --git a/mm/percpu_up.c b/mm/percpu_up.c
index c4351c7..db884fa 100644
--- a/mm/percpu_up.c
+++ b/mm/percpu_up.c
@@ -14,13 +14,13 @@
* percpu sections on SMP for which this path isn't used.
*/
WARN_ON_ONCE(align > SMP_CACHE_BYTES);
- return kzalloc(size, GFP_KERNEL);
+ return (void __percpu __force *)kzalloc(size, GFP_KERNEL);
}
EXPORT_SYMBOL_GPL(__alloc_percpu);
void free_percpu(void __percpu *p)
{
- kfree(p);
+ kfree(this_cpu_ptr(p));
}
EXPORT_SYMBOL_GPL(free_percpu);
diff --git a/mm/rmap.c b/mm/rmap.c
index 87b9e8a..92e6757 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -316,7 +316,7 @@
*/
struct anon_vma *page_lock_anon_vma(struct page *page)
{
- struct anon_vma *anon_vma;
+ struct anon_vma *anon_vma, *root_anon_vma;
unsigned long anon_mapping;
rcu_read_lock();
@@ -327,8 +327,21 @@
goto out;
anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
- anon_vma_lock(anon_vma);
- return anon_vma;
+ root_anon_vma = ACCESS_ONCE(anon_vma->root);
+ spin_lock(&root_anon_vma->lock);
+
+ /*
+ * If this page is still mapped, then its anon_vma cannot have been
+ * freed. But if it has been unmapped, we have no security against
+ * the anon_vma structure being freed and reused (for another anon_vma:
+ * SLAB_DESTROY_BY_RCU guarantees that - so the spin_lock above cannot
+ * corrupt): with anon_vma_prepare() or anon_vma_fork() redirecting
+ * anon_vma->root before page_unlock_anon_vma() is called to unlock.
+ */
+ if (page_mapped(page))
+ return anon_vma;
+
+ spin_unlock(&root_anon_vma->lock);
out:
rcu_read_unlock();
return NULL;
@@ -368,7 +381,13 @@
unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
{
if (PageAnon(page)) {
- if (vma->anon_vma->root != page_anon_vma(page)->root)
+ struct anon_vma *page__anon_vma = page_anon_vma(page);
+ /*
+ * Note: swapoff's unuse_vma() is more efficient with this
+ * check, and needs it to match anon_vma when KSM is active.
+ */
+ if (!vma->anon_vma || !page__anon_vma ||
+ vma->anon_vma->root != page__anon_vma->root)
return -EFAULT;
} else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) {
if (!vma->vm_file ||
@@ -1551,13 +1570,14 @@
struct vm_area_struct *vma, unsigned long address, int exclusive)
{
struct anon_vma *anon_vma = vma->anon_vma;
+
BUG_ON(!anon_vma);
- if (!exclusive) {
- struct anon_vma_chain *avc;
- avc = list_entry(vma->anon_vma_chain.prev,
- struct anon_vma_chain, same_vma);
- anon_vma = avc->anon_vma;
- }
+
+ if (PageAnon(page))
+ return;
+ if (!exclusive)
+ anon_vma = anon_vma->root;
+
anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
page->mapping = (struct address_space *) anon_vma;
page->index = linear_page_index(vma, address);
@@ -1568,6 +1588,8 @@
{
struct anon_vma *anon_vma = vma->anon_vma;
int first;
+
+ BUG_ON(!PageLocked(page));
BUG_ON(!anon_vma);
BUG_ON(address < vma->vm_start || address >= vma->vm_end);
first = atomic_inc_and_test(&page->_mapcount);
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 1f3f9c5..7c703ff 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -47,8 +47,6 @@
long total_swap_pages;
static int least_priority;
-static bool swap_for_hibernation;
-
static const char Bad_file[] = "Bad swap file entry ";
static const char Unused_file[] = "Unused swap file entry ";
static const char Bad_offset[] = "Bad swap offset entry ";
@@ -141,8 +139,7 @@
nr_blocks = ((sector_t)se->nr_pages - 1) << (PAGE_SHIFT - 9);
if (nr_blocks) {
err = blkdev_issue_discard(si->bdev, start_block,
- nr_blocks, GFP_KERNEL,
- BLKDEV_IFL_WAIT | BLKDEV_IFL_BARRIER);
+ nr_blocks, GFP_KERNEL, BLKDEV_IFL_WAIT);
if (err)
return err;
cond_resched();
@@ -153,8 +150,7 @@
nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9);
err = blkdev_issue_discard(si->bdev, start_block,
- nr_blocks, GFP_KERNEL,
- BLKDEV_IFL_WAIT | BLKDEV_IFL_BARRIER);
+ nr_blocks, GFP_KERNEL, BLKDEV_IFL_WAIT);
if (err)
break;
@@ -193,8 +189,7 @@
start_block <<= PAGE_SHIFT - 9;
nr_blocks <<= PAGE_SHIFT - 9;
if (blkdev_issue_discard(si->bdev, start_block,
- nr_blocks, GFP_NOIO, BLKDEV_IFL_WAIT |
- BLKDEV_IFL_BARRIER))
+ nr_blocks, GFP_NOIO, BLKDEV_IFL_WAIT))
break;
}
@@ -320,10 +315,8 @@
if (offset > si->highest_bit)
scan_base = offset = si->lowest_bit;
- /* reuse swap entry of cache-only swap if not hibernation. */
- if (vm_swap_full()
- && usage == SWAP_HAS_CACHE
- && si->swap_map[offset] == SWAP_HAS_CACHE) {
+ /* reuse swap entry of cache-only swap if not busy. */
+ if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
int swap_was_freed;
spin_unlock(&swap_lock);
swap_was_freed = __try_to_reclaim_swap(si, offset);
@@ -453,8 +446,6 @@
spin_lock(&swap_lock);
if (nr_swap_pages <= 0)
goto noswap;
- if (swap_for_hibernation)
- goto noswap;
nr_swap_pages--;
for (type = swap_list.next; type >= 0 && wrapped < 2; type = next) {
@@ -487,6 +478,28 @@
return (swp_entry_t) {0};
}
+/* The only caller of this function is now susupend routine */
+swp_entry_t get_swap_page_of_type(int type)
+{
+ struct swap_info_struct *si;
+ pgoff_t offset;
+
+ spin_lock(&swap_lock);
+ si = swap_info[type];
+ if (si && (si->flags & SWP_WRITEOK)) {
+ nr_swap_pages--;
+ /* This is called for allocating swap entry, not cache */
+ offset = scan_swap_map(si, 1);
+ if (offset) {
+ spin_unlock(&swap_lock);
+ return swp_entry(type, offset);
+ }
+ nr_swap_pages++;
+ }
+ spin_unlock(&swap_lock);
+ return (swp_entry_t) {0};
+}
+
static struct swap_info_struct *swap_info_get(swp_entry_t entry)
{
struct swap_info_struct *p;
@@ -670,6 +683,24 @@
if (page_swapcount(page))
return 0;
+ /*
+ * Once hibernation has begun to create its image of memory,
+ * there's a danger that one of the calls to try_to_free_swap()
+ * - most probably a call from __try_to_reclaim_swap() while
+ * hibernation is allocating its own swap pages for the image,
+ * but conceivably even a call from memory reclaim - will free
+ * the swap from a page which has already been recorded in the
+ * image as a clean swapcache page, and then reuse its swap for
+ * another page of the image. On waking from hibernation, the
+ * original page might be freed under memory pressure, then
+ * later read back in from swap, now with the wrong data.
+ *
+ * Hibernation clears bits from gfp_allowed_mask to prevent
+ * memory reclaim from writing to disk, so check that here.
+ */
+ if (!(gfp_allowed_mask & __GFP_IO))
+ return 0;
+
delete_from_swap_cache(page);
SetPageDirty(page);
return 1;
@@ -746,74 +777,6 @@
#endif
#ifdef CONFIG_HIBERNATION
-
-static pgoff_t hibernation_offset[MAX_SWAPFILES];
-/*
- * Once hibernation starts to use swap, we freeze swap_map[]. Otherwise,
- * saved swap_map[] image to the disk will be an incomplete because it's
- * changing without synchronization with hibernation snap shot.
- * At resume, we just make swap_for_hibernation=false. We can forget
- * used maps easily.
- */
-void hibernation_freeze_swap(void)
-{
- int i;
-
- spin_lock(&swap_lock);
-
- printk(KERN_INFO "PM: Freeze Swap\n");
- swap_for_hibernation = true;
- for (i = 0; i < MAX_SWAPFILES; i++)
- hibernation_offset[i] = 1;
- spin_unlock(&swap_lock);
-}
-
-void hibernation_thaw_swap(void)
-{
- spin_lock(&swap_lock);
- if (swap_for_hibernation) {
- printk(KERN_INFO "PM: Thaw Swap\n");
- swap_for_hibernation = false;
- }
- spin_unlock(&swap_lock);
-}
-
-/*
- * Because updateing swap_map[] can make not-saved-status-change,
- * we use our own easy allocator.
- * Please see kernel/power/swap.c, Used swaps are recorded into
- * RB-tree.
- */
-swp_entry_t get_swap_for_hibernation(int type)
-{
- pgoff_t off;
- swp_entry_t val = {0};
- struct swap_info_struct *si;
-
- spin_lock(&swap_lock);
-
- si = swap_info[type];
- if (!si || !(si->flags & SWP_WRITEOK))
- goto done;
-
- for (off = hibernation_offset[type]; off < si->max; ++off) {
- if (!si->swap_map[off])
- break;
- }
- if (off < si->max) {
- val = swp_entry(type, off);
- hibernation_offset[type] = off + 1;
- }
-done:
- spin_unlock(&swap_lock);
- return val;
-}
-
-void swap_free_for_hibernation(swp_entry_t ent)
-{
- /* Nothing to do */
-}
-
/*
* Find the swap type that corresponds to given device (if any).
*
@@ -2084,7 +2047,7 @@
p->flags |= SWP_SOLIDSTATE;
p->cluster_next = 1 + (random32() % p->highest_bit);
}
- if (discard_swap(p) == 0)
+ if (discard_swap(p) == 0 && (swap_flags & SWAP_FLAG_DISCARD))
p->flags |= SWP_DISCARDABLE;
}
diff --git a/mm/vmscan.c b/mm/vmscan.c
index c391c32..c5dfabf 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1804,12 +1804,11 @@
* If a zone is deemed to be full of pinned pages then just give it a light
* scan then give up on it.
*/
-static bool shrink_zones(int priority, struct zonelist *zonelist,
+static void shrink_zones(int priority, struct zonelist *zonelist,
struct scan_control *sc)
{
struct zoneref *z;
struct zone *zone;
- bool all_unreclaimable = true;
for_each_zone_zonelist_nodemask(zone, z, zonelist,
gfp_zone(sc->gfp_mask), sc->nodemask) {
@@ -1827,8 +1826,38 @@
}
shrink_zone(priority, zone, sc);
- all_unreclaimable = false;
}
+}
+
+static bool zone_reclaimable(struct zone *zone)
+{
+ return zone->pages_scanned < zone_reclaimable_pages(zone) * 6;
+}
+
+/*
+ * As hibernation is going on, kswapd is freezed so that it can't mark
+ * the zone into all_unreclaimable. It can't handle OOM during hibernation.
+ * So let's check zone's unreclaimable in direct reclaim as well as kswapd.
+ */
+static bool all_unreclaimable(struct zonelist *zonelist,
+ struct scan_control *sc)
+{
+ struct zoneref *z;
+ struct zone *zone;
+ bool all_unreclaimable = true;
+
+ for_each_zone_zonelist_nodemask(zone, z, zonelist,
+ gfp_zone(sc->gfp_mask), sc->nodemask) {
+ if (!populated_zone(zone))
+ continue;
+ if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
+ continue;
+ if (zone_reclaimable(zone)) {
+ all_unreclaimable = false;
+ break;
+ }
+ }
+
return all_unreclaimable;
}
@@ -1852,7 +1881,6 @@
struct scan_control *sc)
{
int priority;
- bool all_unreclaimable;
unsigned long total_scanned = 0;
struct reclaim_state *reclaim_state = current->reclaim_state;
struct zoneref *z;
@@ -1869,7 +1897,7 @@
sc->nr_scanned = 0;
if (!priority)
disable_swap_token();
- all_unreclaimable = shrink_zones(priority, zonelist, sc);
+ shrink_zones(priority, zonelist, sc);
/*
* Don't shrink slabs when reclaiming memory from
* over limit cgroups
@@ -1931,7 +1959,7 @@
return sc->nr_reclaimed;
/* top priority shrink_zones still had more to do? don't OOM, then */
- if (scanning_global_lru(sc) && !all_unreclaimable)
+ if (scanning_global_lru(sc) && !all_unreclaimable(zonelist, sc))
return 1;
return 0;
@@ -2197,8 +2225,7 @@
total_scanned += sc.nr_scanned;
if (zone->all_unreclaimable)
continue;
- if (nr_slab == 0 &&
- zone->pages_scanned >= (zone_reclaimable_pages(zone) * 6))
+ if (nr_slab == 0 && !zone_reclaimable(zone))
zone->all_unreclaimable = 1;
/*
* If we've done a decent amount of scanning and
diff --git a/mm/vmstat.c b/mm/vmstat.c
index f389168..355a9e6 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -138,11 +138,24 @@
int threshold;
for_each_populated_zone(zone) {
+ unsigned long max_drift, tolerate_drift;
+
threshold = calculate_threshold(zone);
for_each_online_cpu(cpu)
per_cpu_ptr(zone->pageset, cpu)->stat_threshold
= threshold;
+
+ /*
+ * Only set percpu_drift_mark if there is a danger that
+ * NR_FREE_PAGES reports the low watermark is ok when in fact
+ * the min watermark could be breached by an allocation
+ */
+ tolerate_drift = low_wmark_pages(zone) - min_wmark_pages(zone);
+ max_drift = num_online_cpus() * threshold;
+ if (max_drift > tolerate_drift)
+ zone->percpu_drift_mark = high_wmark_pages(zone) +
+ max_drift;
}
}
@@ -813,7 +826,7 @@
"\n scanned %lu"
"\n spanned %lu"
"\n present %lu",
- zone_page_state(zone, NR_FREE_PAGES),
+ zone_nr_free_pages(zone),
min_wmark_pages(zone),
low_wmark_pages(zone),
high_wmark_pages(zone),
@@ -998,6 +1011,7 @@
switch (action) {
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
+ refresh_zone_stat_thresholds();
start_cpu_timer(cpu);
node_set_state(cpu_to_node(cpu), N_CPU);
break;
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index 01ddb04..0eb96f7 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -24,8 +24,11 @@
if (vlan_dev)
skb->dev = vlan_dev;
- else if (vlan_id)
- goto drop;
+ else if (vlan_id) {
+ if (!(skb->dev->flags & IFF_PROMISC))
+ goto drop;
+ skb->pkt_type = PACKET_OTHERHOST;
+ }
return (polling ? netif_receive_skb(skb) : netif_rx(skb));
@@ -102,8 +105,11 @@
if (vlan_dev)
skb->dev = vlan_dev;
- else if (vlan_id)
- goto drop;
+ else if (vlan_id) {
+ if (!(skb->dev->flags & IFF_PROMISC))
+ goto drop;
+ skb->pkt_type = PACKET_OTHERHOST;
+ }
for (p = napi->gro_list; p; p = p->next) {
NAPI_GRO_CB(p)->same_flow =
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 3d59c9b..3bccdd1 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -510,7 +510,8 @@
if (vlan->flags & VLAN_FLAG_GVRP)
vlan_gvrp_request_join(dev);
- netif_carrier_on(dev);
+ if (netif_carrier_ok(real_dev))
+ netif_carrier_on(dev);
return 0;
clear_allmulti:
diff --git a/net/9p/client.c b/net/9p/client.c
index dc6f2f2..9eb7250 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -331,8 +331,10 @@
}
}
- if (c->tagpool)
+ if (c->tagpool) {
+ p9_idpool_put(0, c->tagpool); /* free reserved tag 0 */
p9_idpool_destroy(c->tagpool);
+ }
/* free requests associated with tags */
for (row = 0; row < (c->max_tag/P9_ROW_MAXTAG); row++) {
@@ -944,6 +946,7 @@
int16_t nwqids, count;
err = 0;
+ wqids = NULL;
clnt = oldfid->clnt;
if (clone) {
fid = p9_fid_create(clnt);
@@ -994,9 +997,11 @@
else
fid->qid = oldfid->qid;
+ kfree(wqids);
return fid;
clunk_fid:
+ kfree(wqids);
p9_client_clunk(fid);
fid = NULL;
diff --git a/net/9p/trans_rdma.c b/net/9p/trans_rdma.c
index 0ea20c3..17c5ba7 100644
--- a/net/9p/trans_rdma.c
+++ b/net/9p/trans_rdma.c
@@ -426,8 +426,10 @@
/* Allocate an fcall for the reply */
rpl_context = kmalloc(sizeof *rpl_context, GFP_KERNEL);
- if (!rpl_context)
+ if (!rpl_context) {
+ err = -ENOMEM;
goto err_close;
+ }
/*
* If the request has a buffer, steal it, otherwise
@@ -445,8 +447,8 @@
}
rpl_context->rc = req->rc;
if (!rpl_context->rc) {
- kfree(rpl_context);
- goto err_close;
+ err = -ENOMEM;
+ goto err_free2;
}
/*
@@ -458,11 +460,8 @@
*/
if (atomic_inc_return(&rdma->rq_count) <= rdma->rq_depth) {
err = post_recv(client, rpl_context);
- if (err) {
- kfree(rpl_context->rc);
- kfree(rpl_context);
- goto err_close;
- }
+ if (err)
+ goto err_free1;
} else
atomic_dec(&rdma->rq_count);
@@ -471,8 +470,10 @@
/* Post the request */
c = kmalloc(sizeof *c, GFP_KERNEL);
- if (!c)
- goto err_close;
+ if (!c) {
+ err = -ENOMEM;
+ goto err_free1;
+ }
c->req = req;
c->busa = ib_dma_map_single(rdma->cm_id->device,
@@ -499,9 +500,15 @@
return ib_post_send(rdma->qp, &wr, &bad_wr);
error:
+ kfree(c);
+ kfree(rpl_context->rc);
+ kfree(rpl_context);
P9_DPRINTK(P9_DEBUG_ERROR, "EIO\n");
return -EIO;
-
+ err_free1:
+ kfree(rpl_context->rc);
+ err_free2:
+ kfree(rpl_context);
err_close:
spin_lock_irqsave(&rdma->req_lock, flags);
if (rdma->state < P9_RDMA_CLOSING) {
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index dcfbe99..b885159 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -329,7 +329,8 @@
mutex_lock(&virtio_9p_lock);
list_for_each_entry(chan, &virtio_chan_list, chan_list) {
- if (!strncmp(devname, chan->tag, chan->tag_len)) {
+ if (!strncmp(devname, chan->tag, chan->tag_len) &&
+ strlen(devname) == chan->tag_len) {
if (!chan->inuse) {
chan->inuse = true;
found = 1;
diff --git a/net/Kconfig b/net/Kconfig
index e330594..55fd82e 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -217,7 +217,7 @@
config RPS
boolean
- depends on SMP && SYSFS
+ depends on SMP && SYSFS && USE_GENERIC_SMP_HELPERS
default y
menu "Network testing"
@@ -293,6 +293,7 @@
source "net/rfkill/Kconfig"
source "net/9p/Kconfig"
source "net/caif/Kconfig"
+source "net/ceph/Kconfig"
endif # if NET
diff --git a/net/Makefile b/net/Makefile
index ea60fbc..6b7bfd7 100644
--- a/net/Makefile
+++ b/net/Makefile
@@ -68,3 +68,4 @@
endif
obj-$(CONFIG_WIMAX) += wimax/
obj-$(CONFIG_DNS_RESOLVER) += dns_resolver/
+obj-$(CONFIG_CEPH_LIB) += ceph/
diff --git a/net/atm/br2684.c b/net/atm/br2684.c
index 651babd..ad2b232 100644
--- a/net/atm/br2684.c
+++ b/net/atm/br2684.c
@@ -399,12 +399,6 @@
unregister_netdev(net_dev);
free_netdev(net_dev);
}
- read_lock_irq(&devs_lock);
- if (list_empty(&br2684_devs)) {
- /* last br2684 device */
- unregister_atmdevice_notifier(&atm_dev_notifier);
- }
- read_unlock_irq(&devs_lock);
return;
}
@@ -675,7 +669,6 @@
if (list_empty(&br2684_devs)) {
/* 1st br2684 device */
- register_atmdevice_notifier(&atm_dev_notifier);
brdev->number = 1;
} else
brdev->number = BRPRIV(list_entry_brdev(br2684_devs.prev))->number + 1;
@@ -815,6 +808,7 @@
return -ENOMEM;
#endif
register_atm_ioctl(&br2684_ioctl_ops);
+ register_atmdevice_notifier(&atm_dev_notifier);
return 0;
}
@@ -830,9 +824,7 @@
#endif
- /* if not already empty */
- if (!list_empty(&br2684_devs))
- unregister_atmdevice_notifier(&atm_dev_notifier);
+ unregister_atmdevice_notifier(&atm_dev_notifier);
while (!list_empty(&br2684_devs)) {
net_dev = list_entry_brdev(br2684_devs.next);
diff --git a/net/atm/mpc.c b/net/atm/mpc.c
index 622b471..74bcc66 100644
--- a/net/atm/mpc.c
+++ b/net/atm/mpc.c
@@ -778,7 +778,7 @@
eg->packets_rcvd++;
mpc->eg_ops->put(eg);
- memset(ATM_SKB(skb), 0, sizeof(struct atm_skb_data));
+ memset(ATM_SKB(new_skb), 0, sizeof(struct atm_skb_data));
netif_rx(new_skb);
}
diff --git a/net/ax25/ax25_ds_timer.c b/net/ax25/ax25_ds_timer.c
index 2ce79df..c7d8143 100644
--- a/net/ax25/ax25_ds_timer.c
+++ b/net/ax25/ax25_ds_timer.c
@@ -112,8 +112,8 @@
if (sk) {
sock_hold(sk);
ax25_destroy_socket(ax25);
- sock_put(sk);
bh_unlock_sock(sk);
+ sock_put(sk);
} else
ax25_destroy_socket(ax25);
return;
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c
index fadf26b..0b54b7d 100644
--- a/net/bluetooth/l2cap.c
+++ b/net/bluetooth/l2cap.c
@@ -1441,33 +1441,23 @@
static void l2cap_streaming_send(struct sock *sk)
{
- struct sk_buff *skb, *tx_skb;
+ struct sk_buff *skb;
struct l2cap_pinfo *pi = l2cap_pi(sk);
u16 control, fcs;
- while ((skb = sk->sk_send_head)) {
- tx_skb = skb_clone(skb, GFP_ATOMIC);
-
- control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
+ while ((skb = skb_dequeue(TX_QUEUE(sk)))) {
+ control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE);
control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
- put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
+ put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE);
if (pi->fcs == L2CAP_FCS_CRC16) {
- fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
- put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
+ fcs = crc16(0, (u8 *)skb->data, skb->len - 2);
+ put_unaligned_le16(fcs, skb->data + skb->len - 2);
}
- l2cap_do_send(sk, tx_skb);
+ l2cap_do_send(sk, skb);
pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
-
- if (skb_queue_is_last(TX_QUEUE(sk), skb))
- sk->sk_send_head = NULL;
- else
- sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
-
- skb = skb_dequeue(TX_QUEUE(sk));
- kfree_skb(skb);
}
}
@@ -1960,6 +1950,11 @@
switch (optname) {
case L2CAP_OPTIONS:
+ if (sk->sk_state == BT_CONNECTED) {
+ err = -EINVAL;
+ break;
+ }
+
opts.imtu = l2cap_pi(sk)->imtu;
opts.omtu = l2cap_pi(sk)->omtu;
opts.flush_to = l2cap_pi(sk)->flush_to;
@@ -2771,10 +2766,10 @@
case L2CAP_CONF_MTU:
if (val < L2CAP_DEFAULT_MIN_MTU) {
*result = L2CAP_CONF_UNACCEPT;
- pi->omtu = L2CAP_DEFAULT_MIN_MTU;
+ pi->imtu = L2CAP_DEFAULT_MIN_MTU;
} else
- pi->omtu = val;
- l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
+ pi->imtu = val;
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
break;
case L2CAP_CONF_FLUSH_TO:
@@ -3071,6 +3066,17 @@
return 0;
}
+static inline void set_default_fcs(struct l2cap_pinfo *pi)
+{
+ /* FCS is enabled only in ERTM or streaming mode, if one or both
+ * sides request it.
+ */
+ if (pi->mode != L2CAP_MODE_ERTM && pi->mode != L2CAP_MODE_STREAMING)
+ pi->fcs = L2CAP_FCS_NONE;
+ else if (!(pi->conf_state & L2CAP_CONF_NO_FCS_RECV))
+ pi->fcs = L2CAP_FCS_CRC16;
+}
+
static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
{
struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
@@ -3088,14 +3094,8 @@
if (!sk)
return -ENOENT;
- if (sk->sk_state != BT_CONFIG) {
- struct l2cap_cmd_rej rej;
-
- rej.reason = cpu_to_le16(0x0002);
- l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
- sizeof(rej), &rej);
+ if (sk->sk_state == BT_DISCONN)
goto unlock;
- }
/* Reject if config buffer is too small. */
len = cmd_len - sizeof(*req);
@@ -3135,9 +3135,7 @@
goto unlock;
if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
- if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
- l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
- l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
+ set_default_fcs(l2cap_pi(sk));
sk->sk_state = BT_CONNECTED;
@@ -3225,9 +3223,7 @@
l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
- if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
- l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
- l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
+ set_default_fcs(l2cap_pi(sk));
sk->sk_state = BT_CONNECTED;
l2cap_pi(sk)->next_tx_seq = 0;
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index 44a6232..194b3a0 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -82,11 +82,14 @@
static void rfcomm_sk_state_change(struct rfcomm_dlc *d, int err)
{
struct sock *sk = d->owner, *parent;
+ unsigned long flags;
+
if (!sk)
return;
BT_DBG("dlc %p state %ld err %d", d, d->state, err);
+ local_irq_save(flags);
bh_lock_sock(sk);
if (err)
@@ -108,6 +111,7 @@
}
bh_unlock_sock(sk);
+ local_irq_restore(flags);
if (parent && sock_flag(sk, SOCK_ZAPPED)) {
/* We have to drop DLC lock here, otherwise
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index 2c911c0..137f232 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -162,8 +162,8 @@
if (tmp) {
memcpy(tmp, nf_bridge, sizeof(struct nf_bridge_info));
atomic_set(&tmp->use, 1);
- nf_bridge_put(nf_bridge);
}
+ nf_bridge_put(nf_bridge);
nf_bridge = tmp;
}
return nf_bridge;
@@ -761,9 +761,11 @@
{
if (skb->nfct != NULL && skb->protocol == htons(ETH_P_IP) &&
skb->len + nf_bridge_mtu_reduction(skb) > skb->dev->mtu &&
- !skb_is_gso(skb))
+ !skb_is_gso(skb)) {
+ /* BUG: Should really parse the IP options here. */
+ memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
return ip_fragment(skb, br_dev_queue_push_xmit);
- else
+ } else
return br_dev_queue_push_xmit(skb);
}
#else
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index 8ce9047..4bf28f2 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -827,6 +827,7 @@
long timeo;
int err;
int ifindex, headroom, tailroom;
+ unsigned int mtu;
struct net_device *dev;
lock_sock(sk);
@@ -896,15 +897,23 @@
cf_sk->sk.sk_state = CAIF_DISCONNECTED;
goto out;
}
- dev = dev_get_by_index(sock_net(sk), ifindex);
+
+ err = -ENODEV;
+ rcu_read_lock();
+ dev = dev_get_by_index_rcu(sock_net(sk), ifindex);
+ if (!dev) {
+ rcu_read_unlock();
+ goto out;
+ }
cf_sk->headroom = LL_RESERVED_SPACE_EXTRA(dev, headroom);
+ mtu = dev->mtu;
+ rcu_read_unlock();
+
cf_sk->tailroom = tailroom;
- cf_sk->maxframe = dev->mtu - (headroom + tailroom);
- dev_put(dev);
+ cf_sk->maxframe = mtu - (headroom + tailroom);
if (cf_sk->maxframe < 1) {
- pr_warning("CAIF: %s(): CAIF Interface MTU too small (%d)\n",
- __func__, dev->mtu);
- err = -ENODEV;
+ pr_warning("CAIF: %s(): CAIF Interface MTU too small (%u)\n",
+ __func__, mtu);
goto out;
}
diff --git a/net/caif/cfrfml.c b/net/caif/cfrfml.c
index eb16020..9a69924 100644
--- a/net/caif/cfrfml.c
+++ b/net/caif/cfrfml.c
@@ -7,7 +7,7 @@
#include <linux/stddef.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
-#include <linux/unaligned/le_byteshift.h>
+#include <asm/unaligned.h>
#include <net/caif/caif_layer.h>
#include <net/caif/cfsrvl.h>
#include <net/caif/cfpkt.h>
diff --git a/net/ceph/Kconfig b/net/ceph/Kconfig
new file mode 100644
index 0000000..ad42404
--- /dev/null
+++ b/net/ceph/Kconfig
@@ -0,0 +1,28 @@
+config CEPH_LIB
+ tristate "Ceph core library (EXPERIMENTAL)"
+ depends on INET && EXPERIMENTAL
+ select LIBCRC32C
+ select CRYPTO_AES
+ select CRYPTO
+ default n
+ help
+ Choose Y or M here to include cephlib, which provides the
+ common functionality to both the Ceph filesystem and
+ to the rados block device (rbd).
+
+ More information at http://ceph.newdream.net/.
+
+ If unsure, say N.
+
+config CEPH_LIB_PRETTYDEBUG
+ bool "Include file:line in ceph debug output"
+ depends on CEPH_LIB
+ default n
+ help
+ If you say Y here, debug output will include a filename and
+ line to aid debugging. This increases kernel size and slows
+ execution slightly when debug call sites are enabled (e.g.,
+ via CONFIG_DYNAMIC_DEBUG).
+
+ If unsure, say N.
+
diff --git a/net/ceph/Makefile b/net/ceph/Makefile
new file mode 100644
index 0000000..aab1cab
--- /dev/null
+++ b/net/ceph/Makefile
@@ -0,0 +1,37 @@
+#
+# Makefile for CEPH filesystem.
+#
+
+ifneq ($(KERNELRELEASE),)
+
+obj-$(CONFIG_CEPH_LIB) += libceph.o
+
+libceph-objs := ceph_common.o messenger.o msgpool.o buffer.o pagelist.o \
+ mon_client.o \
+ osd_client.o osdmap.o crush/crush.o crush/mapper.o crush/hash.o \
+ debugfs.o \
+ auth.o auth_none.o \
+ crypto.o armor.o \
+ auth_x.o \
+ ceph_fs.o ceph_strings.o ceph_hash.o \
+ pagevec.o
+
+else
+#Otherwise we were called directly from the command
+# line; invoke the kernel build system.
+
+KERNELDIR ?= /lib/modules/$(shell uname -r)/build
+PWD := $(shell pwd)
+
+default: all
+
+all:
+ $(MAKE) -C $(KERNELDIR) M=$(PWD) CONFIG_CEPH_LIB=m modules
+
+modules_install:
+ $(MAKE) -C $(KERNELDIR) M=$(PWD) CONFIG_CEPH_LIB=m modules_install
+
+clean:
+ $(MAKE) -C $(KERNELDIR) M=$(PWD) clean
+
+endif
diff --git a/fs/ceph/armor.c b/net/ceph/armor.c
similarity index 100%
rename from fs/ceph/armor.c
rename to net/ceph/armor.c
diff --git a/fs/ceph/auth.c b/net/ceph/auth.c
similarity index 96%
rename from fs/ceph/auth.c
rename to net/ceph/auth.c
index 6d2e306..549c1f4 100644
--- a/fs/ceph/auth.c
+++ b/net/ceph/auth.c
@@ -1,16 +1,16 @@
-#include "ceph_debug.h"
+#include <linux/ceph/ceph_debug.h>
#include <linux/module.h>
#include <linux/err.h>
#include <linux/slab.h>
-#include "types.h"
+#include <linux/ceph/types.h>
+#include <linux/ceph/decode.h>
+#include <linux/ceph/libceph.h>
+#include <linux/ceph/messenger.h>
#include "auth_none.h"
#include "auth_x.h"
-#include "decode.h"
-#include "super.h"
-#include "messenger.h"
/*
* get protocol handler
diff --git a/fs/ceph/auth_none.c b/net/ceph/auth_none.c
similarity index 96%
rename from fs/ceph/auth_none.c
rename to net/ceph/auth_none.c
index ad1dc21..214c2bb 100644
--- a/fs/ceph/auth_none.c
+++ b/net/ceph/auth_none.c
@@ -1,14 +1,15 @@
-#include "ceph_debug.h"
+#include <linux/ceph/ceph_debug.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/random.h>
#include <linux/slab.h>
+#include <linux/ceph/decode.h>
+#include <linux/ceph/auth.h>
+
#include "auth_none.h"
-#include "auth.h"
-#include "decode.h"
static void reset(struct ceph_auth_client *ac)
{
diff --git a/fs/ceph/auth_none.h b/net/ceph/auth_none.h
similarity index 94%
rename from fs/ceph/auth_none.h
rename to net/ceph/auth_none.h
index 8164df1..ed7d088 100644
--- a/fs/ceph/auth_none.h
+++ b/net/ceph/auth_none.h
@@ -2,8 +2,7 @@
#define _FS_CEPH_AUTH_NONE_H
#include <linux/slab.h>
-
-#include "auth.h"
+#include <linux/ceph/auth.h>
/*
* null security mode.
diff --git a/fs/ceph/auth_x.c b/net/ceph/auth_x.c
similarity index 98%
rename from fs/ceph/auth_x.c
rename to net/ceph/auth_x.c
index 582e0b2..7fd5dfc 100644
--- a/fs/ceph/auth_x.c
+++ b/net/ceph/auth_x.c
@@ -1,16 +1,17 @@
-#include "ceph_debug.h"
+#include <linux/ceph/ceph_debug.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/random.h>
#include <linux/slab.h>
+#include <linux/ceph/decode.h>
+#include <linux/ceph/auth.h>
+
+#include "crypto.h"
#include "auth_x.h"
#include "auth_x_protocol.h"
-#include "crypto.h"
-#include "auth.h"
-#include "decode.h"
#define TEMP_TICKET_BUF_LEN 256
@@ -376,7 +377,7 @@
th = get_ticket_handler(ac, service);
- if (!th) {
+ if (IS_ERR(th)) {
*pneed |= service;
continue;
}
@@ -399,6 +400,9 @@
struct ceph_x_ticket_handler *th =
get_ticket_handler(ac, CEPH_ENTITY_TYPE_AUTH);
+ if (IS_ERR(th))
+ return PTR_ERR(th);
+
ceph_x_validate_tickets(ac, &need);
dout("build_request want %x have %x need %x\n",
@@ -450,7 +454,6 @@
return -ERANGE;
head->op = cpu_to_le16(CEPHX_GET_PRINCIPAL_SESSION_KEY);
- BUG_ON(!th);
ret = ceph_x_build_authorizer(ac, th, &xi->auth_authorizer);
if (ret)
return ret;
@@ -505,7 +508,8 @@
case CEPHX_GET_PRINCIPAL_SESSION_KEY:
th = get_ticket_handler(ac, CEPH_ENTITY_TYPE_AUTH);
- BUG_ON(!th);
+ if (IS_ERR(th))
+ return PTR_ERR(th);
ret = ceph_x_proc_ticket_reply(ac, &th->session_key,
buf + sizeof(*head), end);
break;
@@ -563,8 +567,8 @@
void *end = p + sizeof(au->reply_buf);
th = get_ticket_handler(ac, au->service);
- if (!th)
- return -EIO; /* hrm! */
+ if (IS_ERR(th))
+ return PTR_ERR(th);
ret = ceph_x_decrypt(&th->session_key, &p, end, &reply, sizeof(reply));
if (ret < 0)
return ret;
@@ -626,7 +630,7 @@
struct ceph_x_ticket_handler *th;
th = get_ticket_handler(ac, peer_type);
- if (th && !IS_ERR(th))
+ if (!IS_ERR(th))
remove_ticket_handler(ac, th);
}
diff --git a/fs/ceph/auth_x.h b/net/ceph/auth_x.h
similarity index 96%
rename from fs/ceph/auth_x.h
rename to net/ceph/auth_x.h
index ff6f818..e02da7a 100644
--- a/fs/ceph/auth_x.h
+++ b/net/ceph/auth_x.h
@@ -3,8 +3,9 @@
#include <linux/rbtree.h>
+#include <linux/ceph/auth.h>
+
#include "crypto.h"
-#include "auth.h"
#include "auth_x_protocol.h"
/*
diff --git a/fs/ceph/auth_x_protocol.h b/net/ceph/auth_x_protocol.h
similarity index 100%
rename from fs/ceph/auth_x_protocol.h
rename to net/ceph/auth_x_protocol.h
diff --git a/fs/ceph/buffer.c b/net/ceph/buffer.c
similarity index 85%
rename from fs/ceph/buffer.c
rename to net/ceph/buffer.c
index cd39f17..53d8abf 100644
--- a/fs/ceph/buffer.c
+++ b/net/ceph/buffer.c
@@ -1,10 +1,11 @@
-#include "ceph_debug.h"
+#include <linux/ceph/ceph_debug.h>
+#include <linux/module.h>
#include <linux/slab.h>
-#include "buffer.h"
-#include "decode.h"
+#include <linux/ceph/buffer.h>
+#include <linux/ceph/decode.h>
struct ceph_buffer *ceph_buffer_new(size_t len, gfp_t gfp)
{
@@ -32,6 +33,7 @@
dout("buffer_new %p\n", b);
return b;
}
+EXPORT_SYMBOL(ceph_buffer_new);
void ceph_buffer_release(struct kref *kref)
{
@@ -46,6 +48,7 @@
}
kfree(b);
}
+EXPORT_SYMBOL(ceph_buffer_release);
int ceph_decode_buffer(struct ceph_buffer **b, void **p, void *end)
{
diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c
new file mode 100644
index 0000000..f3e4a13
--- /dev/null
+++ b/net/ceph/ceph_common.c
@@ -0,0 +1,529 @@
+
+#include <linux/ceph/ceph_debug.h>
+#include <linux/backing-dev.h>
+#include <linux/ctype.h>
+#include <linux/fs.h>
+#include <linux/inet.h>
+#include <linux/in6.h>
+#include <linux/module.h>
+#include <linux/mount.h>
+#include <linux/parser.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/statfs.h>
+#include <linux/string.h>
+
+
+#include <linux/ceph/libceph.h>
+#include <linux/ceph/debugfs.h>
+#include <linux/ceph/decode.h>
+#include <linux/ceph/mon_client.h>
+#include <linux/ceph/auth.h>
+
+
+
+/*
+ * find filename portion of a path (/foo/bar/baz -> baz)
+ */
+const char *ceph_file_part(const char *s, int len)
+{
+ const char *e = s + len;
+
+ while (e != s && *(e-1) != '/')
+ e--;
+ return e;
+}
+EXPORT_SYMBOL(ceph_file_part);
+
+const char *ceph_msg_type_name(int type)
+{
+ switch (type) {
+ case CEPH_MSG_SHUTDOWN: return "shutdown";
+ case CEPH_MSG_PING: return "ping";
+ case CEPH_MSG_AUTH: return "auth";
+ case CEPH_MSG_AUTH_REPLY: return "auth_reply";
+ case CEPH_MSG_MON_MAP: return "mon_map";
+ case CEPH_MSG_MON_GET_MAP: return "mon_get_map";
+ case CEPH_MSG_MON_SUBSCRIBE: return "mon_subscribe";
+ case CEPH_MSG_MON_SUBSCRIBE_ACK: return "mon_subscribe_ack";
+ case CEPH_MSG_STATFS: return "statfs";
+ case CEPH_MSG_STATFS_REPLY: return "statfs_reply";
+ case CEPH_MSG_MDS_MAP: return "mds_map";
+ case CEPH_MSG_CLIENT_SESSION: return "client_session";
+ case CEPH_MSG_CLIENT_RECONNECT: return "client_reconnect";
+ case CEPH_MSG_CLIENT_REQUEST: return "client_request";
+ case CEPH_MSG_CLIENT_REQUEST_FORWARD: return "client_request_forward";
+ case CEPH_MSG_CLIENT_REPLY: return "client_reply";
+ case CEPH_MSG_CLIENT_CAPS: return "client_caps";
+ case CEPH_MSG_CLIENT_CAPRELEASE: return "client_cap_release";
+ case CEPH_MSG_CLIENT_SNAP: return "client_snap";
+ case CEPH_MSG_CLIENT_LEASE: return "client_lease";
+ case CEPH_MSG_OSD_MAP: return "osd_map";
+ case CEPH_MSG_OSD_OP: return "osd_op";
+ case CEPH_MSG_OSD_OPREPLY: return "osd_opreply";
+ default: return "unknown";
+ }
+}
+EXPORT_SYMBOL(ceph_msg_type_name);
+
+/*
+ * Initially learn our fsid, or verify an fsid matches.
+ */
+int ceph_check_fsid(struct ceph_client *client, struct ceph_fsid *fsid)
+{
+ if (client->have_fsid) {
+ if (ceph_fsid_compare(&client->fsid, fsid)) {
+ pr_err("bad fsid, had %pU got %pU",
+ &client->fsid, fsid);
+ return -1;
+ }
+ } else {
+ pr_info("client%lld fsid %pU\n", ceph_client_id(client), fsid);
+ memcpy(&client->fsid, fsid, sizeof(*fsid));
+ ceph_debugfs_client_init(client);
+ client->have_fsid = true;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(ceph_check_fsid);
+
+static int strcmp_null(const char *s1, const char *s2)
+{
+ if (!s1 && !s2)
+ return 0;
+ if (s1 && !s2)
+ return -1;
+ if (!s1 && s2)
+ return 1;
+ return strcmp(s1, s2);
+}
+
+int ceph_compare_options(struct ceph_options *new_opt,
+ struct ceph_client *client)
+{
+ struct ceph_options *opt1 = new_opt;
+ struct ceph_options *opt2 = client->options;
+ int ofs = offsetof(struct ceph_options, mon_addr);
+ int i;
+ int ret;
+
+ ret = memcmp(opt1, opt2, ofs);
+ if (ret)
+ return ret;
+
+ ret = strcmp_null(opt1->name, opt2->name);
+ if (ret)
+ return ret;
+
+ ret = strcmp_null(opt1->secret, opt2->secret);
+ if (ret)
+ return ret;
+
+ /* any matching mon ip implies a match */
+ for (i = 0; i < opt1->num_mon; i++) {
+ if (ceph_monmap_contains(client->monc.monmap,
+ &opt1->mon_addr[i]))
+ return 0;
+ }
+ return -1;
+}
+EXPORT_SYMBOL(ceph_compare_options);
+
+
+static int parse_fsid(const char *str, struct ceph_fsid *fsid)
+{
+ int i = 0;
+ char tmp[3];
+ int err = -EINVAL;
+ int d;
+
+ dout("parse_fsid '%s'\n", str);
+ tmp[2] = 0;
+ while (*str && i < 16) {
+ if (ispunct(*str)) {
+ str++;
+ continue;
+ }
+ if (!isxdigit(str[0]) || !isxdigit(str[1]))
+ break;
+ tmp[0] = str[0];
+ tmp[1] = str[1];
+ if (sscanf(tmp, "%x", &d) < 1)
+ break;
+ fsid->fsid[i] = d & 0xff;
+ i++;
+ str += 2;
+ }
+
+ if (i == 16)
+ err = 0;
+ dout("parse_fsid ret %d got fsid %pU", err, fsid);
+ return err;
+}
+
+/*
+ * ceph options
+ */
+enum {
+ Opt_osdtimeout,
+ Opt_osdkeepalivetimeout,
+ Opt_mount_timeout,
+ Opt_osd_idle_ttl,
+ Opt_last_int,
+ /* int args above */
+ Opt_fsid,
+ Opt_name,
+ Opt_secret,
+ Opt_ip,
+ Opt_last_string,
+ /* string args above */
+ Opt_noshare,
+ Opt_nocrc,
+};
+
+static match_table_t opt_tokens = {
+ {Opt_osdtimeout, "osdtimeout=%d"},
+ {Opt_osdkeepalivetimeout, "osdkeepalive=%d"},
+ {Opt_mount_timeout, "mount_timeout=%d"},
+ {Opt_osd_idle_ttl, "osd_idle_ttl=%d"},
+ /* int args above */
+ {Opt_fsid, "fsid=%s"},
+ {Opt_name, "name=%s"},
+ {Opt_secret, "secret=%s"},
+ {Opt_ip, "ip=%s"},
+ /* string args above */
+ {Opt_noshare, "noshare"},
+ {Opt_nocrc, "nocrc"},
+ {-1, NULL}
+};
+
+void ceph_destroy_options(struct ceph_options *opt)
+{
+ dout("destroy_options %p\n", opt);
+ kfree(opt->name);
+ kfree(opt->secret);
+ kfree(opt);
+}
+EXPORT_SYMBOL(ceph_destroy_options);
+
+int ceph_parse_options(struct ceph_options **popt, char *options,
+ const char *dev_name, const char *dev_name_end,
+ int (*parse_extra_token)(char *c, void *private),
+ void *private)
+{
+ struct ceph_options *opt;
+ const char *c;
+ int err = -ENOMEM;
+ substring_t argstr[MAX_OPT_ARGS];
+
+ opt = kzalloc(sizeof(*opt), GFP_KERNEL);
+ if (!opt)
+ return err;
+ opt->mon_addr = kcalloc(CEPH_MAX_MON, sizeof(*opt->mon_addr),
+ GFP_KERNEL);
+ if (!opt->mon_addr)
+ goto out;
+
+ dout("parse_options %p options '%s' dev_name '%s'\n", opt, options,
+ dev_name);
+
+ /* start with defaults */
+ opt->flags = CEPH_OPT_DEFAULT;
+ opt->osd_timeout = CEPH_OSD_TIMEOUT_DEFAULT;
+ opt->osd_keepalive_timeout = CEPH_OSD_KEEPALIVE_DEFAULT;
+ opt->mount_timeout = CEPH_MOUNT_TIMEOUT_DEFAULT; /* seconds */
+ opt->osd_idle_ttl = CEPH_OSD_IDLE_TTL_DEFAULT; /* seconds */
+
+ /* get mon ip(s) */
+ /* ip1[:port1][,ip2[:port2]...] */
+ err = ceph_parse_ips(dev_name, dev_name_end, opt->mon_addr,
+ CEPH_MAX_MON, &opt->num_mon);
+ if (err < 0)
+ goto out;
+
+ /* parse mount options */
+ while ((c = strsep(&options, ",")) != NULL) {
+ int token, intval, ret;
+ if (!*c)
+ continue;
+ err = -EINVAL;
+ token = match_token((char *)c, opt_tokens, argstr);
+ if (token < 0 && parse_extra_token) {
+ /* extra? */
+ err = parse_extra_token((char *)c, private);
+ if (err < 0) {
+ pr_err("bad option at '%s'\n", c);
+ goto out;
+ }
+ continue;
+ }
+ if (token < Opt_last_int) {
+ ret = match_int(&argstr[0], &intval);
+ if (ret < 0) {
+ pr_err("bad mount option arg (not int) "
+ "at '%s'\n", c);
+ continue;
+ }
+ dout("got int token %d val %d\n", token, intval);
+ } else if (token > Opt_last_int && token < Opt_last_string) {
+ dout("got string token %d val %s\n", token,
+ argstr[0].from);
+ } else {
+ dout("got token %d\n", token);
+ }
+ switch (token) {
+ case Opt_ip:
+ err = ceph_parse_ips(argstr[0].from,
+ argstr[0].to,
+ &opt->my_addr,
+ 1, NULL);
+ if (err < 0)
+ goto out;
+ opt->flags |= CEPH_OPT_MYIP;
+ break;
+
+ case Opt_fsid:
+ err = parse_fsid(argstr[0].from, &opt->fsid);
+ if (err == 0)
+ opt->flags |= CEPH_OPT_FSID;
+ break;
+ case Opt_name:
+ opt->name = kstrndup(argstr[0].from,
+ argstr[0].to-argstr[0].from,
+ GFP_KERNEL);
+ break;
+ case Opt_secret:
+ opt->secret = kstrndup(argstr[0].from,
+ argstr[0].to-argstr[0].from,
+ GFP_KERNEL);
+ break;
+
+ /* misc */
+ case Opt_osdtimeout:
+ opt->osd_timeout = intval;
+ break;
+ case Opt_osdkeepalivetimeout:
+ opt->osd_keepalive_timeout = intval;
+ break;
+ case Opt_osd_idle_ttl:
+ opt->osd_idle_ttl = intval;
+ break;
+ case Opt_mount_timeout:
+ opt->mount_timeout = intval;
+ break;
+
+ case Opt_noshare:
+ opt->flags |= CEPH_OPT_NOSHARE;
+ break;
+
+ case Opt_nocrc:
+ opt->flags |= CEPH_OPT_NOCRC;
+ break;
+
+ default:
+ BUG_ON(token);
+ }
+ }
+
+ /* success */
+ *popt = opt;
+ return 0;
+
+out:
+ ceph_destroy_options(opt);
+ return err;
+}
+EXPORT_SYMBOL(ceph_parse_options);
+
+u64 ceph_client_id(struct ceph_client *client)
+{
+ return client->monc.auth->global_id;
+}
+EXPORT_SYMBOL(ceph_client_id);
+
+/*
+ * create a fresh client instance
+ */
+struct ceph_client *ceph_create_client(struct ceph_options *opt, void *private)
+{
+ struct ceph_client *client;
+ int err = -ENOMEM;
+
+ client = kzalloc(sizeof(*client), GFP_KERNEL);
+ if (client == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ client->private = private;
+ client->options = opt;
+
+ mutex_init(&client->mount_mutex);
+ init_waitqueue_head(&client->auth_wq);
+ client->auth_err = 0;
+
+ client->extra_mon_dispatch = NULL;
+ client->supported_features = CEPH_FEATURE_SUPPORTED_DEFAULT;
+ client->required_features = CEPH_FEATURE_REQUIRED_DEFAULT;
+
+ client->msgr = NULL;
+
+ /* subsystems */
+ err = ceph_monc_init(&client->monc, client);
+ if (err < 0)
+ goto fail;
+ err = ceph_osdc_init(&client->osdc, client);
+ if (err < 0)
+ goto fail_monc;
+
+ return client;
+
+fail_monc:
+ ceph_monc_stop(&client->monc);
+fail:
+ kfree(client);
+ return ERR_PTR(err);
+}
+EXPORT_SYMBOL(ceph_create_client);
+
+void ceph_destroy_client(struct ceph_client *client)
+{
+ dout("destroy_client %p\n", client);
+
+ /* unmount */
+ ceph_osdc_stop(&client->osdc);
+
+ /*
+ * make sure mds and osd connections close out before destroying
+ * the auth module, which is needed to free those connections'
+ * ceph_authorizers.
+ */
+ ceph_msgr_flush();
+
+ ceph_monc_stop(&client->monc);
+
+ ceph_debugfs_client_cleanup(client);
+
+ if (client->msgr)
+ ceph_messenger_destroy(client->msgr);
+
+ ceph_destroy_options(client->options);
+
+ kfree(client);
+ dout("destroy_client %p done\n", client);
+}
+EXPORT_SYMBOL(ceph_destroy_client);
+
+/*
+ * true if we have the mon map (and have thus joined the cluster)
+ */
+static int have_mon_and_osd_map(struct ceph_client *client)
+{
+ return client->monc.monmap && client->monc.monmap->epoch &&
+ client->osdc.osdmap && client->osdc.osdmap->epoch;
+}
+
+/*
+ * mount: join the ceph cluster, and open root directory.
+ */
+int __ceph_open_session(struct ceph_client *client, unsigned long started)
+{
+ struct ceph_entity_addr *myaddr = NULL;
+ int err;
+ unsigned long timeout = client->options->mount_timeout * HZ;
+
+ /* initialize the messenger */
+ if (client->msgr == NULL) {
+ if (ceph_test_opt(client, MYIP))
+ myaddr = &client->options->my_addr;
+ client->msgr = ceph_messenger_create(myaddr,
+ client->supported_features,
+ client->required_features);
+ if (IS_ERR(client->msgr)) {
+ client->msgr = NULL;
+ return PTR_ERR(client->msgr);
+ }
+ client->msgr->nocrc = ceph_test_opt(client, NOCRC);
+ }
+
+ /* open session, and wait for mon and osd maps */
+ err = ceph_monc_open_session(&client->monc);
+ if (err < 0)
+ return err;
+
+ while (!have_mon_and_osd_map(client)) {
+ err = -EIO;
+ if (timeout && time_after_eq(jiffies, started + timeout))
+ return err;
+
+ /* wait */
+ dout("mount waiting for mon_map\n");
+ err = wait_event_interruptible_timeout(client->auth_wq,
+ have_mon_and_osd_map(client) || (client->auth_err < 0),
+ timeout);
+ if (err == -EINTR || err == -ERESTARTSYS)
+ return err;
+ if (client->auth_err < 0)
+ return client->auth_err;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(__ceph_open_session);
+
+
+int ceph_open_session(struct ceph_client *client)
+{
+ int ret;
+ unsigned long started = jiffies; /* note the start time */
+
+ dout("open_session start\n");
+ mutex_lock(&client->mount_mutex);
+
+ ret = __ceph_open_session(client, started);
+
+ mutex_unlock(&client->mount_mutex);
+ return ret;
+}
+EXPORT_SYMBOL(ceph_open_session);
+
+
+static int __init init_ceph_lib(void)
+{
+ int ret = 0;
+
+ ret = ceph_debugfs_init();
+ if (ret < 0)
+ goto out;
+
+ ret = ceph_msgr_init();
+ if (ret < 0)
+ goto out_debugfs;
+
+ pr_info("loaded (mon/osd proto %d/%d, osdmap %d/%d %d/%d)\n",
+ CEPH_MONC_PROTOCOL, CEPH_OSDC_PROTOCOL,
+ CEPH_OSDMAP_VERSION, CEPH_OSDMAP_VERSION_EXT,
+ CEPH_OSDMAP_INC_VERSION, CEPH_OSDMAP_INC_VERSION_EXT);
+
+ return 0;
+
+out_debugfs:
+ ceph_debugfs_cleanup();
+out:
+ return ret;
+}
+
+static void __exit exit_ceph_lib(void)
+{
+ dout("exit_ceph_lib\n");
+ ceph_msgr_exit();
+ ceph_debugfs_cleanup();
+}
+
+module_init(init_ceph_lib);
+module_exit(exit_ceph_lib);
+
+MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
+MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
+MODULE_AUTHOR("Patience Warnick <patience@newdream.net>");
+MODULE_DESCRIPTION("Ceph filesystem for Linux");
+MODULE_LICENSE("GPL");
diff --git a/fs/ceph/ceph_fs.c b/net/ceph/ceph_fs.c
similarity index 92%
rename from fs/ceph/ceph_fs.c
rename to net/ceph/ceph_fs.c
index 3ac6cc7..a3a3a31 100644
--- a/fs/ceph/ceph_fs.c
+++ b/net/ceph/ceph_fs.c
@@ -1,7 +1,8 @@
/*
* Some non-inline ceph helpers
*/
-#include "types.h"
+#include <linux/module.h>
+#include <linux/ceph/types.h>
/*
* return true if @layout appears to be valid
@@ -52,6 +53,7 @@
return mode;
}
+EXPORT_SYMBOL(ceph_flags_to_mode);
int ceph_caps_for_mode(int mode)
{
@@ -70,3 +72,4 @@
return caps;
}
+EXPORT_SYMBOL(ceph_caps_for_mode);
diff --git a/fs/ceph/ceph_hash.c b/net/ceph/ceph_hash.c
similarity index 98%
rename from fs/ceph/ceph_hash.c
rename to net/ceph/ceph_hash.c
index bd57001..815ef88 100644
--- a/fs/ceph/ceph_hash.c
+++ b/net/ceph/ceph_hash.c
@@ -1,5 +1,5 @@
-#include "types.h"
+#include <linux/ceph/types.h>
/*
* Robert Jenkin's hash function.
diff --git a/net/ceph/ceph_strings.c b/net/ceph/ceph_strings.c
new file mode 100644
index 0000000..3fbda04
--- /dev/null
+++ b/net/ceph/ceph_strings.c
@@ -0,0 +1,84 @@
+/*
+ * Ceph string constants
+ */
+#include <linux/module.h>
+#include <linux/ceph/types.h>
+
+const char *ceph_entity_type_name(int type)
+{
+ switch (type) {
+ case CEPH_ENTITY_TYPE_MDS: return "mds";
+ case CEPH_ENTITY_TYPE_OSD: return "osd";
+ case CEPH_ENTITY_TYPE_MON: return "mon";
+ case CEPH_ENTITY_TYPE_CLIENT: return "client";
+ case CEPH_ENTITY_TYPE_AUTH: return "auth";
+ default: return "unknown";
+ }
+}
+
+const char *ceph_osd_op_name(int op)
+{
+ switch (op) {
+ case CEPH_OSD_OP_READ: return "read";
+ case CEPH_OSD_OP_STAT: return "stat";
+
+ case CEPH_OSD_OP_MASKTRUNC: return "masktrunc";
+
+ case CEPH_OSD_OP_WRITE: return "write";
+ case CEPH_OSD_OP_DELETE: return "delete";
+ case CEPH_OSD_OP_TRUNCATE: return "truncate";
+ case CEPH_OSD_OP_ZERO: return "zero";
+ case CEPH_OSD_OP_WRITEFULL: return "writefull";
+ case CEPH_OSD_OP_ROLLBACK: return "rollback";
+
+ case CEPH_OSD_OP_APPEND: return "append";
+ case CEPH_OSD_OP_STARTSYNC: return "startsync";
+ case CEPH_OSD_OP_SETTRUNC: return "settrunc";
+ case CEPH_OSD_OP_TRIMTRUNC: return "trimtrunc";
+
+ case CEPH_OSD_OP_TMAPUP: return "tmapup";
+ case CEPH_OSD_OP_TMAPGET: return "tmapget";
+ case CEPH_OSD_OP_TMAPPUT: return "tmapput";
+
+ case CEPH_OSD_OP_GETXATTR: return "getxattr";
+ case CEPH_OSD_OP_GETXATTRS: return "getxattrs";
+ case CEPH_OSD_OP_SETXATTR: return "setxattr";
+ case CEPH_OSD_OP_SETXATTRS: return "setxattrs";
+ case CEPH_OSD_OP_RESETXATTRS: return "resetxattrs";
+ case CEPH_OSD_OP_RMXATTR: return "rmxattr";
+ case CEPH_OSD_OP_CMPXATTR: return "cmpxattr";
+
+ case CEPH_OSD_OP_PULL: return "pull";
+ case CEPH_OSD_OP_PUSH: return "push";
+ case CEPH_OSD_OP_BALANCEREADS: return "balance-reads";
+ case CEPH_OSD_OP_UNBALANCEREADS: return "unbalance-reads";
+ case CEPH_OSD_OP_SCRUB: return "scrub";
+
+ case CEPH_OSD_OP_WRLOCK: return "wrlock";
+ case CEPH_OSD_OP_WRUNLOCK: return "wrunlock";
+ case CEPH_OSD_OP_RDLOCK: return "rdlock";
+ case CEPH_OSD_OP_RDUNLOCK: return "rdunlock";
+ case CEPH_OSD_OP_UPLOCK: return "uplock";
+ case CEPH_OSD_OP_DNLOCK: return "dnlock";
+
+ case CEPH_OSD_OP_CALL: return "call";
+
+ case CEPH_OSD_OP_PGLS: return "pgls";
+ }
+ return "???";
+}
+
+
+const char *ceph_pool_op_name(int op)
+{
+ switch (op) {
+ case POOL_OP_CREATE: return "create";
+ case POOL_OP_DELETE: return "delete";
+ case POOL_OP_AUID_CHANGE: return "auid change";
+ case POOL_OP_CREATE_SNAP: return "create snap";
+ case POOL_OP_DELETE_SNAP: return "delete snap";
+ case POOL_OP_CREATE_UNMANAGED_SNAP: return "create unmanaged snap";
+ case POOL_OP_DELETE_UNMANAGED_SNAP: return "delete unmanaged snap";
+ }
+ return "???";
+}
diff --git a/fs/ceph/crush/crush.c b/net/ceph/crush/crush.c
similarity index 98%
rename from fs/ceph/crush/crush.c
rename to net/ceph/crush/crush.c
index fabd302..d6ebb13 100644
--- a/fs/ceph/crush/crush.c
+++ b/net/ceph/crush/crush.c
@@ -8,7 +8,7 @@
# define BUG_ON(x) assert(!(x))
#endif
-#include "crush.h"
+#include <linux/crush/crush.h>
const char *crush_bucket_alg_name(int alg)
{
diff --git a/fs/ceph/crush/hash.c b/net/ceph/crush/hash.c
similarity index 98%
rename from fs/ceph/crush/hash.c
rename to net/ceph/crush/hash.c
index 5873aed..5bb63e3 100644
--- a/fs/ceph/crush/hash.c
+++ b/net/ceph/crush/hash.c
@@ -1,6 +1,6 @@
#include <linux/types.h>
-#include "hash.h"
+#include <linux/crush/hash.h>
/*
* Robert Jenkins' function for mixing 32-bit values
diff --git a/fs/ceph/crush/mapper.c b/net/ceph/crush/mapper.c
similarity index 99%
rename from fs/ceph/crush/mapper.c
rename to net/ceph/crush/mapper.c
index a4eec13..42599e3 100644
--- a/fs/ceph/crush/mapper.c
+++ b/net/ceph/crush/mapper.c
@@ -18,8 +18,8 @@
# define kfree(x) free(x)
#endif
-#include "crush.h"
-#include "hash.h"
+#include <linux/crush/crush.h>
+#include <linux/crush/hash.h>
/*
* Implement the core CRUSH mapping algorithm.
diff --git a/fs/ceph/crypto.c b/net/ceph/crypto.c
similarity index 99%
rename from fs/ceph/crypto.c
rename to net/ceph/crypto.c
index a3e627f..7b505b0 100644
--- a/fs/ceph/crypto.c
+++ b/net/ceph/crypto.c
@@ -1,13 +1,13 @@
-#include "ceph_debug.h"
+#include <linux/ceph/ceph_debug.h>
#include <linux/err.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <crypto/hash.h>
+#include <linux/ceph/decode.h>
#include "crypto.h"
-#include "decode.h"
int ceph_crypto_key_encode(struct ceph_crypto_key *key, void **p, void *end)
{
diff --git a/fs/ceph/crypto.h b/net/ceph/crypto.h
similarity index 95%
rename from fs/ceph/crypto.h
rename to net/ceph/crypto.h
index bdf3860..f9eccac 100644
--- a/fs/ceph/crypto.h
+++ b/net/ceph/crypto.h
@@ -1,8 +1,8 @@
#ifndef _FS_CEPH_CRYPTO_H
#define _FS_CEPH_CRYPTO_H
-#include "types.h"
-#include "buffer.h"
+#include <linux/ceph/types.h>
+#include <linux/ceph/buffer.h>
/*
* cryptographic secret
diff --git a/net/ceph/debugfs.c b/net/ceph/debugfs.c
new file mode 100644
index 0000000..27d4ea31
--- /dev/null
+++ b/net/ceph/debugfs.c
@@ -0,0 +1,267 @@
+#include <linux/ceph/ceph_debug.h>
+
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/ctype.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#include <linux/ceph/libceph.h>
+#include <linux/ceph/mon_client.h>
+#include <linux/ceph/auth.h>
+#include <linux/ceph/debugfs.h>
+
+#ifdef CONFIG_DEBUG_FS
+
+/*
+ * Implement /sys/kernel/debug/ceph fun
+ *
+ * /sys/kernel/debug/ceph/client* - an instance of the ceph client
+ * .../osdmap - current osdmap
+ * .../monmap - current monmap
+ * .../osdc - active osd requests
+ * .../monc - mon client state
+ * .../dentry_lru - dump contents of dentry lru
+ * .../caps - expose cap (reservation) stats
+ * .../bdi - symlink to ../../bdi/something
+ */
+
+static struct dentry *ceph_debugfs_dir;
+
+static int monmap_show(struct seq_file *s, void *p)
+{
+ int i;
+ struct ceph_client *client = s->private;
+
+ if (client->monc.monmap == NULL)
+ return 0;
+
+ seq_printf(s, "epoch %d\n", client->monc.monmap->epoch);
+ for (i = 0; i < client->monc.monmap->num_mon; i++) {
+ struct ceph_entity_inst *inst =
+ &client->monc.monmap->mon_inst[i];
+
+ seq_printf(s, "\t%s%lld\t%s\n",
+ ENTITY_NAME(inst->name),
+ ceph_pr_addr(&inst->addr.in_addr));
+ }
+ return 0;
+}
+
+static int osdmap_show(struct seq_file *s, void *p)
+{
+ int i;
+ struct ceph_client *client = s->private;
+ struct rb_node *n;
+
+ if (client->osdc.osdmap == NULL)
+ return 0;
+ seq_printf(s, "epoch %d\n", client->osdc.osdmap->epoch);
+ seq_printf(s, "flags%s%s\n",
+ (client->osdc.osdmap->flags & CEPH_OSDMAP_NEARFULL) ?
+ " NEARFULL" : "",
+ (client->osdc.osdmap->flags & CEPH_OSDMAP_FULL) ?
+ " FULL" : "");
+ for (n = rb_first(&client->osdc.osdmap->pg_pools); n; n = rb_next(n)) {
+ struct ceph_pg_pool_info *pool =
+ rb_entry(n, struct ceph_pg_pool_info, node);
+ seq_printf(s, "pg_pool %d pg_num %d / %d, lpg_num %d / %d\n",
+ pool->id, pool->v.pg_num, pool->pg_num_mask,
+ pool->v.lpg_num, pool->lpg_num_mask);
+ }
+ for (i = 0; i < client->osdc.osdmap->max_osd; i++) {
+ struct ceph_entity_addr *addr =
+ &client->osdc.osdmap->osd_addr[i];
+ int state = client->osdc.osdmap->osd_state[i];
+ char sb[64];
+
+ seq_printf(s, "\tosd%d\t%s\t%3d%%\t(%s)\n",
+ i, ceph_pr_addr(&addr->in_addr),
+ ((client->osdc.osdmap->osd_weight[i]*100) >> 16),
+ ceph_osdmap_state_str(sb, sizeof(sb), state));
+ }
+ return 0;
+}
+
+static int monc_show(struct seq_file *s, void *p)
+{
+ struct ceph_client *client = s->private;
+ struct ceph_mon_generic_request *req;
+ struct ceph_mon_client *monc = &client->monc;
+ struct rb_node *rp;
+
+ mutex_lock(&monc->mutex);
+
+ if (monc->have_mdsmap)
+ seq_printf(s, "have mdsmap %u\n", (unsigned)monc->have_mdsmap);
+ if (monc->have_osdmap)
+ seq_printf(s, "have osdmap %u\n", (unsigned)monc->have_osdmap);
+ if (monc->want_next_osdmap)
+ seq_printf(s, "want next osdmap\n");
+
+ for (rp = rb_first(&monc->generic_request_tree); rp; rp = rb_next(rp)) {
+ __u16 op;
+ req = rb_entry(rp, struct ceph_mon_generic_request, node);
+ op = le16_to_cpu(req->request->hdr.type);
+ if (op == CEPH_MSG_STATFS)
+ seq_printf(s, "%lld statfs\n", req->tid);
+ else
+ seq_printf(s, "%lld unknown\n", req->tid);
+ }
+
+ mutex_unlock(&monc->mutex);
+ return 0;
+}
+
+static int osdc_show(struct seq_file *s, void *pp)
+{
+ struct ceph_client *client = s->private;
+ struct ceph_osd_client *osdc = &client->osdc;
+ struct rb_node *p;
+
+ mutex_lock(&osdc->request_mutex);
+ for (p = rb_first(&osdc->requests); p; p = rb_next(p)) {
+ struct ceph_osd_request *req;
+ struct ceph_osd_request_head *head;
+ struct ceph_osd_op *op;
+ int num_ops;
+ int opcode, olen;
+ int i;
+
+ req = rb_entry(p, struct ceph_osd_request, r_node);
+
+ seq_printf(s, "%lld\tosd%d\t%d.%x\t", req->r_tid,
+ req->r_osd ? req->r_osd->o_osd : -1,
+ le32_to_cpu(req->r_pgid.pool),
+ le16_to_cpu(req->r_pgid.ps));
+
+ head = req->r_request->front.iov_base;
+ op = (void *)(head + 1);
+
+ num_ops = le16_to_cpu(head->num_ops);
+ olen = le32_to_cpu(head->object_len);
+ seq_printf(s, "%.*s", olen,
+ (const char *)(head->ops + num_ops));
+
+ if (req->r_reassert_version.epoch)
+ seq_printf(s, "\t%u'%llu",
+ (unsigned)le32_to_cpu(req->r_reassert_version.epoch),
+ le64_to_cpu(req->r_reassert_version.version));
+ else
+ seq_printf(s, "\t");
+
+ for (i = 0; i < num_ops; i++) {
+ opcode = le16_to_cpu(op->op);
+ seq_printf(s, "\t%s", ceph_osd_op_name(opcode));
+ op++;
+ }
+
+ seq_printf(s, "\n");
+ }
+ mutex_unlock(&osdc->request_mutex);
+ return 0;
+}
+
+CEPH_DEFINE_SHOW_FUNC(monmap_show)
+CEPH_DEFINE_SHOW_FUNC(osdmap_show)
+CEPH_DEFINE_SHOW_FUNC(monc_show)
+CEPH_DEFINE_SHOW_FUNC(osdc_show)
+
+int ceph_debugfs_init(void)
+{
+ ceph_debugfs_dir = debugfs_create_dir("ceph", NULL);
+ if (!ceph_debugfs_dir)
+ return -ENOMEM;
+ return 0;
+}
+
+void ceph_debugfs_cleanup(void)
+{
+ debugfs_remove(ceph_debugfs_dir);
+}
+
+int ceph_debugfs_client_init(struct ceph_client *client)
+{
+ int ret = -ENOMEM;
+ char name[80];
+
+ snprintf(name, sizeof(name), "%pU.client%lld", &client->fsid,
+ client->monc.auth->global_id);
+
+ client->debugfs_dir = debugfs_create_dir(name, ceph_debugfs_dir);
+ if (!client->debugfs_dir)
+ goto out;
+
+ client->monc.debugfs_file = debugfs_create_file("monc",
+ 0600,
+ client->debugfs_dir,
+ client,
+ &monc_show_fops);
+ if (!client->monc.debugfs_file)
+ goto out;
+
+ client->osdc.debugfs_file = debugfs_create_file("osdc",
+ 0600,
+ client->debugfs_dir,
+ client,
+ &osdc_show_fops);
+ if (!client->osdc.debugfs_file)
+ goto out;
+
+ client->debugfs_monmap = debugfs_create_file("monmap",
+ 0600,
+ client->debugfs_dir,
+ client,
+ &monmap_show_fops);
+ if (!client->debugfs_monmap)
+ goto out;
+
+ client->debugfs_osdmap = debugfs_create_file("osdmap",
+ 0600,
+ client->debugfs_dir,
+ client,
+ &osdmap_show_fops);
+ if (!client->debugfs_osdmap)
+ goto out;
+
+ return 0;
+
+out:
+ ceph_debugfs_client_cleanup(client);
+ return ret;
+}
+
+void ceph_debugfs_client_cleanup(struct ceph_client *client)
+{
+ debugfs_remove(client->debugfs_osdmap);
+ debugfs_remove(client->debugfs_monmap);
+ debugfs_remove(client->osdc.debugfs_file);
+ debugfs_remove(client->monc.debugfs_file);
+ debugfs_remove(client->debugfs_dir);
+}
+
+#else /* CONFIG_DEBUG_FS */
+
+int ceph_debugfs_init(void)
+{
+ return 0;
+}
+
+void ceph_debugfs_cleanup(void)
+{
+}
+
+int ceph_debugfs_client_init(struct ceph_client *client)
+{
+ return 0;
+}
+
+void ceph_debugfs_client_cleanup(struct ceph_client *client)
+{
+}
+
+#endif /* CONFIG_DEBUG_FS */
+
+EXPORT_SYMBOL(ceph_debugfs_init);
+EXPORT_SYMBOL(ceph_debugfs_cleanup);
diff --git a/fs/ceph/messenger.c b/net/ceph/messenger.c
similarity index 89%
rename from fs/ceph/messenger.c
rename to net/ceph/messenger.c
index 2502d76..0e8157e 100644
--- a/fs/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -1,4 +1,4 @@
-#include "ceph_debug.h"
+#include <linux/ceph/ceph_debug.h>
#include <linux/crc32c.h>
#include <linux/ctype.h>
@@ -9,12 +9,14 @@
#include <linux/slab.h>
#include <linux/socket.h>
#include <linux/string.h>
+#include <linux/bio.h>
+#include <linux/blkdev.h>
#include <net/tcp.h>
-#include "super.h"
-#include "messenger.h"
-#include "decode.h"
-#include "pagelist.h"
+#include <linux/ceph/libceph.h>
+#include <linux/ceph/messenger.h>
+#include <linux/ceph/decode.h>
+#include <linux/ceph/pagelist.h>
/*
* Ceph uses the messenger to exchange ceph_msg messages with other
@@ -48,7 +50,7 @@
static DEFINE_SPINLOCK(addr_str_lock);
static int last_addr_str;
-const char *pr_addr(const struct sockaddr_storage *ss)
+const char *ceph_pr_addr(const struct sockaddr_storage *ss)
{
int i;
char *s;
@@ -79,6 +81,7 @@
return s;
}
+EXPORT_SYMBOL(ceph_pr_addr);
static void encode_my_addr(struct ceph_messenger *msgr)
{
@@ -91,7 +94,7 @@
*/
struct workqueue_struct *ceph_msgr_wq;
-int __init ceph_msgr_init(void)
+int ceph_msgr_init(void)
{
ceph_msgr_wq = create_workqueue("ceph-msgr");
if (IS_ERR(ceph_msgr_wq)) {
@@ -102,16 +105,19 @@
}
return 0;
}
+EXPORT_SYMBOL(ceph_msgr_init);
void ceph_msgr_exit(void)
{
destroy_workqueue(ceph_msgr_wq);
}
+EXPORT_SYMBOL(ceph_msgr_exit);
void ceph_msgr_flush(void)
{
flush_workqueue(ceph_msgr_wq);
}
+EXPORT_SYMBOL(ceph_msgr_flush);
/*
@@ -221,19 +227,19 @@
set_sock_callbacks(sock, con);
- dout("connect %s\n", pr_addr(&con->peer_addr.in_addr));
+ dout("connect %s\n", ceph_pr_addr(&con->peer_addr.in_addr));
ret = sock->ops->connect(sock, (struct sockaddr *)paddr, sizeof(*paddr),
O_NONBLOCK);
if (ret == -EINPROGRESS) {
dout("connect %s EINPROGRESS sk_state = %u\n",
- pr_addr(&con->peer_addr.in_addr),
+ ceph_pr_addr(&con->peer_addr.in_addr),
sock->sk->sk_state);
ret = 0;
}
if (ret < 0) {
pr_err("connect %s error %d\n",
- pr_addr(&con->peer_addr.in_addr), ret);
+ ceph_pr_addr(&con->peer_addr.in_addr), ret);
sock_release(sock);
con->sock = NULL;
con->error_msg = "connect error";
@@ -334,7 +340,8 @@
*/
void ceph_con_close(struct ceph_connection *con)
{
- dout("con_close %p peer %s\n", con, pr_addr(&con->peer_addr.in_addr));
+ dout("con_close %p peer %s\n", con,
+ ceph_pr_addr(&con->peer_addr.in_addr));
set_bit(CLOSED, &con->state); /* in case there's queued work */
clear_bit(STANDBY, &con->state); /* avoid connect_seq bump */
clear_bit(LOSSYTX, &con->state); /* so we retry next connect */
@@ -347,19 +354,21 @@
mutex_unlock(&con->mutex);
queue_con(con);
}
+EXPORT_SYMBOL(ceph_con_close);
/*
* Reopen a closed connection, with a new peer address.
*/
void ceph_con_open(struct ceph_connection *con, struct ceph_entity_addr *addr)
{
- dout("con_open %p %s\n", con, pr_addr(&addr->in_addr));
+ dout("con_open %p %s\n", con, ceph_pr_addr(&addr->in_addr));
set_bit(OPENING, &con->state);
clear_bit(CLOSED, &con->state);
memcpy(&con->peer_addr, addr, sizeof(*addr));
con->delay = 0; /* reset backoff memory */
queue_con(con);
}
+EXPORT_SYMBOL(ceph_con_open);
/*
* return true if this connection ever successfully opened
@@ -406,6 +415,7 @@
INIT_LIST_HEAD(&con->out_sent);
INIT_DELAYED_WORK(&con->work, con_work);
}
+EXPORT_SYMBOL(ceph_con_init);
/*
@@ -529,8 +539,11 @@
if (le32_to_cpu(m->hdr.data_len) > 0) {
/* initialize page iterator */
con->out_msg_pos.page = 0;
- con->out_msg_pos.page_pos =
- le16_to_cpu(m->hdr.data_off) & ~PAGE_MASK;
+ if (m->pages)
+ con->out_msg_pos.page_pos =
+ le16_to_cpu(m->hdr.data_off) & ~PAGE_MASK;
+ else
+ con->out_msg_pos.page_pos = 0;
con->out_msg_pos.data_pos = 0;
con->out_msg_pos.did_page_crc = 0;
con->out_more = 1; /* data + footer will follow */
@@ -647,7 +660,7 @@
dout("prepare_write_connect %p cseq=%d gseq=%d proto=%d\n", con,
con->connect_seq, global_seq, proto);
- con->out_connect.features = cpu_to_le64(CEPH_FEATURE_SUPPORTED);
+ con->out_connect.features = cpu_to_le64(msgr->supported_features);
con->out_connect.host_type = cpu_to_le32(CEPH_ENTITY_TYPE_CLIENT);
con->out_connect.connect_seq = cpu_to_le32(con->connect_seq);
con->out_connect.global_seq = cpu_to_le32(global_seq);
@@ -712,6 +725,31 @@
return ret; /* done! */
}
+#ifdef CONFIG_BLOCK
+static void init_bio_iter(struct bio *bio, struct bio **iter, int *seg)
+{
+ if (!bio) {
+ *iter = NULL;
+ *seg = 0;
+ return;
+ }
+ *iter = bio;
+ *seg = bio->bi_idx;
+}
+
+static void iter_bio_next(struct bio **bio_iter, int *seg)
+{
+ if (*bio_iter == NULL)
+ return;
+
+ BUG_ON(*seg >= (*bio_iter)->bi_vcnt);
+
+ (*seg)++;
+ if (*seg == (*bio_iter)->bi_vcnt)
+ init_bio_iter((*bio_iter)->bi_next, bio_iter, seg);
+}
+#endif
+
/*
* Write as much message data payload as we can. If we finish, queue
* up the footer.
@@ -726,21 +764,46 @@
size_t len;
int crc = con->msgr->nocrc;
int ret;
+ int total_max_write;
+ int in_trail = 0;
+ size_t trail_len = (msg->trail ? msg->trail->length : 0);
dout("write_partial_msg_pages %p msg %p page %d/%d offset %d\n",
con, con->out_msg, con->out_msg_pos.page, con->out_msg->nr_pages,
con->out_msg_pos.page_pos);
- while (con->out_msg_pos.page < con->out_msg->nr_pages) {
+#ifdef CONFIG_BLOCK
+ if (msg->bio && !msg->bio_iter)
+ init_bio_iter(msg->bio, &msg->bio_iter, &msg->bio_seg);
+#endif
+
+ while (data_len > con->out_msg_pos.data_pos) {
struct page *page = NULL;
void *kaddr = NULL;
+ int max_write = PAGE_SIZE;
+ int page_shift = 0;
+
+ total_max_write = data_len - trail_len -
+ con->out_msg_pos.data_pos;
/*
* if we are calculating the data crc (the default), we need
* to map the page. if our pages[] has been revoked, use the
* zero page.
*/
- if (msg->pages) {
+
+ /* have we reached the trail part of the data? */
+ if (con->out_msg_pos.data_pos >= data_len - trail_len) {
+ in_trail = 1;
+
+ total_max_write = data_len - con->out_msg_pos.data_pos;
+
+ page = list_first_entry(&msg->trail->head,
+ struct page, lru);
+ if (crc)
+ kaddr = kmap(page);
+ max_write = PAGE_SIZE;
+ } else if (msg->pages) {
page = msg->pages[con->out_msg_pos.page];
if (crc)
kaddr = kmap(page);
@@ -749,13 +812,25 @@
struct page, lru);
if (crc)
kaddr = kmap(page);
+#ifdef CONFIG_BLOCK
+ } else if (msg->bio) {
+ struct bio_vec *bv;
+
+ bv = bio_iovec_idx(msg->bio_iter, msg->bio_seg);
+ page = bv->bv_page;
+ page_shift = bv->bv_offset;
+ if (crc)
+ kaddr = kmap(page) + page_shift;
+ max_write = bv->bv_len;
+#endif
} else {
page = con->msgr->zero_page;
if (crc)
kaddr = page_address(con->msgr->zero_page);
}
- len = min((int)(PAGE_SIZE - con->out_msg_pos.page_pos),
- (int)(data_len - con->out_msg_pos.data_pos));
+ len = min_t(int, max_write - con->out_msg_pos.page_pos,
+ total_max_write);
+
if (crc && !con->out_msg_pos.did_page_crc) {
void *base = kaddr + con->out_msg_pos.page_pos;
u32 tmpcrc = le32_to_cpu(con->out_msg->footer.data_crc);
@@ -765,13 +840,14 @@
cpu_to_le32(crc32c(tmpcrc, base, len));
con->out_msg_pos.did_page_crc = 1;
}
-
ret = kernel_sendpage(con->sock, page,
- con->out_msg_pos.page_pos, len,
+ con->out_msg_pos.page_pos + page_shift,
+ len,
MSG_DONTWAIT | MSG_NOSIGNAL |
MSG_MORE);
- if (crc && (msg->pages || msg->pagelist))
+ if (crc &&
+ (msg->pages || msg->pagelist || msg->bio || in_trail))
kunmap(page);
if (ret <= 0)
@@ -783,9 +859,16 @@
con->out_msg_pos.page_pos = 0;
con->out_msg_pos.page++;
con->out_msg_pos.did_page_crc = 0;
- if (msg->pagelist)
+ if (in_trail)
+ list_move_tail(&page->lru,
+ &msg->trail->head);
+ else if (msg->pagelist)
list_move_tail(&page->lru,
&msg->pagelist->head);
+#ifdef CONFIG_BLOCK
+ else if (msg->bio)
+ iter_bio_next(&msg->bio_iter, &msg->bio_seg);
+#endif
}
}
@@ -938,7 +1021,7 @@
{
if (memcmp(con->in_banner, CEPH_BANNER, strlen(CEPH_BANNER))) {
pr_err("connect to %s got bad banner\n",
- pr_addr(&con->peer_addr.in_addr));
+ ceph_pr_addr(&con->peer_addr.in_addr));
con->error_msg = "protocol error, bad banner";
return -1;
}
@@ -1041,7 +1124,7 @@
addr_set_port(ss, port);
- dout("parse_ips got %s\n", pr_addr(ss));
+ dout("parse_ips got %s\n", ceph_pr_addr(ss));
if (p == end)
break;
@@ -1061,6 +1144,7 @@
pr_err("parse_ips bad ip '%.*s'\n", (int)(end - c), c);
return -EINVAL;
}
+EXPORT_SYMBOL(ceph_parse_ips);
static int process_banner(struct ceph_connection *con)
{
@@ -1082,9 +1166,9 @@
!(addr_is_blank(&con->actual_peer_addr.in_addr) &&
con->actual_peer_addr.nonce == con->peer_addr.nonce)) {
pr_warning("wrong peer, want %s/%d, got %s/%d\n",
- pr_addr(&con->peer_addr.in_addr),
+ ceph_pr_addr(&con->peer_addr.in_addr),
(int)le32_to_cpu(con->peer_addr.nonce),
- pr_addr(&con->actual_peer_addr.in_addr),
+ ceph_pr_addr(&con->actual_peer_addr.in_addr),
(int)le32_to_cpu(con->actual_peer_addr.nonce));
con->error_msg = "wrong peer at address";
return -1;
@@ -1102,7 +1186,7 @@
addr_set_port(&con->msgr->inst.addr.in_addr, port);
encode_my_addr(con->msgr);
dout("process_banner learned my addr is %s\n",
- pr_addr(&con->msgr->inst.addr.in_addr));
+ ceph_pr_addr(&con->msgr->inst.addr.in_addr));
}
set_bit(NEGOTIATING, &con->state);
@@ -1123,8 +1207,8 @@
static int process_connect(struct ceph_connection *con)
{
- u64 sup_feat = CEPH_FEATURE_SUPPORTED;
- u64 req_feat = CEPH_FEATURE_REQUIRED;
+ u64 sup_feat = con->msgr->supported_features;
+ u64 req_feat = con->msgr->required_features;
u64 server_feat = le64_to_cpu(con->in_reply.features);
dout("process_connect on %p tag %d\n", con, (int)con->in_tag);
@@ -1134,7 +1218,7 @@
pr_err("%s%lld %s feature set mismatch,"
" my %llx < server's %llx, missing %llx\n",
ENTITY_NAME(con->peer_name),
- pr_addr(&con->peer_addr.in_addr),
+ ceph_pr_addr(&con->peer_addr.in_addr),
sup_feat, server_feat, server_feat & ~sup_feat);
con->error_msg = "missing required protocol features";
fail_protocol(con);
@@ -1144,7 +1228,7 @@
pr_err("%s%lld %s protocol version mismatch,"
" my %d != server's %d\n",
ENTITY_NAME(con->peer_name),
- pr_addr(&con->peer_addr.in_addr),
+ ceph_pr_addr(&con->peer_addr.in_addr),
le32_to_cpu(con->out_connect.protocol_version),
le32_to_cpu(con->in_reply.protocol_version));
con->error_msg = "protocol version mismatch";
@@ -1178,7 +1262,7 @@
le32_to_cpu(con->in_connect.connect_seq));
pr_err("%s%lld %s connection reset\n",
ENTITY_NAME(con->peer_name),
- pr_addr(&con->peer_addr.in_addr));
+ ceph_pr_addr(&con->peer_addr.in_addr));
reset_connection(con);
prepare_write_connect(con->msgr, con, 0);
prepare_read_connect(con);
@@ -1223,7 +1307,7 @@
pr_err("%s%lld %s protocol feature mismatch,"
" my required %llx > server's %llx, need %llx\n",
ENTITY_NAME(con->peer_name),
- pr_addr(&con->peer_addr.in_addr),
+ ceph_pr_addr(&con->peer_addr.in_addr),
req_feat, server_feat, req_feat & ~server_feat);
con->error_msg = "missing required protocol features";
fail_protocol(con);
@@ -1305,8 +1389,7 @@
struct kvec *section,
unsigned int sec_len, u32 *crc)
{
- int left;
- int ret;
+ int ret, left;
BUG_ON(!section);
@@ -1329,13 +1412,83 @@
static struct ceph_msg *ceph_alloc_msg(struct ceph_connection *con,
struct ceph_msg_header *hdr,
int *skip);
+
+
+static int read_partial_message_pages(struct ceph_connection *con,
+ struct page **pages,
+ unsigned data_len, int datacrc)
+{
+ void *p;
+ int ret;
+ int left;
+
+ left = min((int)(data_len - con->in_msg_pos.data_pos),
+ (int)(PAGE_SIZE - con->in_msg_pos.page_pos));
+ /* (page) data */
+ BUG_ON(pages == NULL);
+ p = kmap(pages[con->in_msg_pos.page]);
+ ret = ceph_tcp_recvmsg(con->sock, p + con->in_msg_pos.page_pos,
+ left);
+ if (ret > 0 && datacrc)
+ con->in_data_crc =
+ crc32c(con->in_data_crc,
+ p + con->in_msg_pos.page_pos, ret);
+ kunmap(pages[con->in_msg_pos.page]);
+ if (ret <= 0)
+ return ret;
+ con->in_msg_pos.data_pos += ret;
+ con->in_msg_pos.page_pos += ret;
+ if (con->in_msg_pos.page_pos == PAGE_SIZE) {
+ con->in_msg_pos.page_pos = 0;
+ con->in_msg_pos.page++;
+ }
+
+ return ret;
+}
+
+#ifdef CONFIG_BLOCK
+static int read_partial_message_bio(struct ceph_connection *con,
+ struct bio **bio_iter, int *bio_seg,
+ unsigned data_len, int datacrc)
+{
+ struct bio_vec *bv = bio_iovec_idx(*bio_iter, *bio_seg);
+ void *p;
+ int ret, left;
+
+ if (IS_ERR(bv))
+ return PTR_ERR(bv);
+
+ left = min((int)(data_len - con->in_msg_pos.data_pos),
+ (int)(bv->bv_len - con->in_msg_pos.page_pos));
+
+ p = kmap(bv->bv_page) + bv->bv_offset;
+
+ ret = ceph_tcp_recvmsg(con->sock, p + con->in_msg_pos.page_pos,
+ left);
+ if (ret > 0 && datacrc)
+ con->in_data_crc =
+ crc32c(con->in_data_crc,
+ p + con->in_msg_pos.page_pos, ret);
+ kunmap(bv->bv_page);
+ if (ret <= 0)
+ return ret;
+ con->in_msg_pos.data_pos += ret;
+ con->in_msg_pos.page_pos += ret;
+ if (con->in_msg_pos.page_pos == bv->bv_len) {
+ con->in_msg_pos.page_pos = 0;
+ iter_bio_next(bio_iter, bio_seg);
+ }
+
+ return ret;
+}
+#endif
+
/*
* read (part of) a message.
*/
static int read_partial_message(struct ceph_connection *con)
{
struct ceph_msg *m = con->in_msg;
- void *p;
int ret;
int to, left;
unsigned front_len, middle_len, data_len, data_off;
@@ -1381,7 +1534,7 @@
if ((s64)seq - (s64)con->in_seq < 1) {
pr_info("skipping %s%lld %s seq %lld, expected %lld\n",
ENTITY_NAME(con->peer_name),
- pr_addr(&con->peer_addr.in_addr),
+ ceph_pr_addr(&con->peer_addr.in_addr),
seq, con->in_seq + 1);
con->in_base_pos = -front_len - middle_len - data_len -
sizeof(m->footer);
@@ -1422,7 +1575,10 @@
m->middle->vec.iov_len = 0;
con->in_msg_pos.page = 0;
- con->in_msg_pos.page_pos = data_off & ~PAGE_MASK;
+ if (m->pages)
+ con->in_msg_pos.page_pos = data_off & ~PAGE_MASK;
+ else
+ con->in_msg_pos.page_pos = 0;
con->in_msg_pos.data_pos = 0;
}
@@ -1440,27 +1596,29 @@
if (ret <= 0)
return ret;
}
+#ifdef CONFIG_BLOCK
+ if (m->bio && !m->bio_iter)
+ init_bio_iter(m->bio, &m->bio_iter, &m->bio_seg);
+#endif
/* (page) data */
while (con->in_msg_pos.data_pos < data_len) {
- left = min((int)(data_len - con->in_msg_pos.data_pos),
- (int)(PAGE_SIZE - con->in_msg_pos.page_pos));
- BUG_ON(m->pages == NULL);
- p = kmap(m->pages[con->in_msg_pos.page]);
- ret = ceph_tcp_recvmsg(con->sock, p + con->in_msg_pos.page_pos,
- left);
- if (ret > 0 && datacrc)
- con->in_data_crc =
- crc32c(con->in_data_crc,
- p + con->in_msg_pos.page_pos, ret);
- kunmap(m->pages[con->in_msg_pos.page]);
- if (ret <= 0)
- return ret;
- con->in_msg_pos.data_pos += ret;
- con->in_msg_pos.page_pos += ret;
- if (con->in_msg_pos.page_pos == PAGE_SIZE) {
- con->in_msg_pos.page_pos = 0;
- con->in_msg_pos.page++;
+ if (m->pages) {
+ ret = read_partial_message_pages(con, m->pages,
+ data_len, datacrc);
+ if (ret <= 0)
+ return ret;
+#ifdef CONFIG_BLOCK
+ } else if (m->bio) {
+
+ ret = read_partial_message_bio(con,
+ &m->bio_iter, &m->bio_seg,
+ data_len, datacrc);
+ if (ret <= 0)
+ return ret;
+#endif
+ } else {
+ BUG_ON(1);
}
}
@@ -1874,9 +2032,9 @@
static void ceph_fault(struct ceph_connection *con)
{
pr_err("%s%lld %s %s\n", ENTITY_NAME(con->peer_name),
- pr_addr(&con->peer_addr.in_addr), con->error_msg);
+ ceph_pr_addr(&con->peer_addr.in_addr), con->error_msg);
dout("fault %p state %lu to peer %s\n",
- con, con->state, pr_addr(&con->peer_addr.in_addr));
+ con, con->state, ceph_pr_addr(&con->peer_addr.in_addr));
if (test_bit(LOSSYTX, &con->state)) {
dout("fault on LOSSYTX channel\n");
@@ -1936,7 +2094,9 @@
/*
* create a new messenger instance
*/
-struct ceph_messenger *ceph_messenger_create(struct ceph_entity_addr *myaddr)
+struct ceph_messenger *ceph_messenger_create(struct ceph_entity_addr *myaddr,
+ u32 supported_features,
+ u32 required_features)
{
struct ceph_messenger *msgr;
@@ -1944,6 +2104,9 @@
if (msgr == NULL)
return ERR_PTR(-ENOMEM);
+ msgr->supported_features = supported_features;
+ msgr->required_features = required_features;
+
spin_lock_init(&msgr->global_seq_lock);
/* the zero page is needed if a request is "canceled" while the message
@@ -1966,6 +2129,7 @@
dout("messenger_create %p\n", msgr);
return msgr;
}
+EXPORT_SYMBOL(ceph_messenger_create);
void ceph_messenger_destroy(struct ceph_messenger *msgr)
{
@@ -1975,6 +2139,7 @@
kfree(msgr);
dout("destroyed messenger %p\n", msgr);
}
+EXPORT_SYMBOL(ceph_messenger_destroy);
/*
* Queue up an outgoing message on the given connection.
@@ -2011,6 +2176,7 @@
if (test_and_set_bit(WRITE_PENDING, &con->state) == 0)
queue_con(con);
}
+EXPORT_SYMBOL(ceph_con_send);
/*
* Revoke a message that was previously queued for send
@@ -2076,6 +2242,7 @@
test_and_set_bit(WRITE_PENDING, &con->state) == 0)
queue_con(con);
}
+EXPORT_SYMBOL(ceph_con_keepalive);
/*
@@ -2136,6 +2303,10 @@
m->nr_pages = 0;
m->pages = NULL;
m->pagelist = NULL;
+ m->bio = NULL;
+ m->bio_iter = NULL;
+ m->bio_seg = 0;
+ m->trail = NULL;
dout("ceph_msg_new %p front %d\n", m, front_len);
return m;
@@ -2146,6 +2317,7 @@
pr_err("msg_new can't create type %d front %d\n", type, front_len);
return NULL;
}
+EXPORT_SYMBOL(ceph_msg_new);
/*
* Allocate "middle" portion of a message, if it is needed and wasn't
@@ -2250,11 +2422,14 @@
m->pagelist = NULL;
}
+ m->trail = NULL;
+
if (m->pool)
ceph_msgpool_put(m->pool, m);
else
ceph_msg_kfree(m);
}
+EXPORT_SYMBOL(ceph_msg_last_put);
void ceph_msg_dump(struct ceph_msg *msg)
{
@@ -2275,3 +2450,4 @@
DUMP_PREFIX_OFFSET, 16, 1,
&msg->footer, sizeof(msg->footer), true);
}
+EXPORT_SYMBOL(ceph_msg_dump);
diff --git a/fs/ceph/mon_client.c b/net/ceph/mon_client.c
similarity index 94%
rename from fs/ceph/mon_client.c
rename to net/ceph/mon_client.c
index b2a5a3e..8a07939 100644
--- a/fs/ceph/mon_client.c
+++ b/net/ceph/mon_client.c
@@ -1,14 +1,16 @@
-#include "ceph_debug.h"
+#include <linux/ceph/ceph_debug.h>
+#include <linux/module.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/random.h>
#include <linux/sched.h>
-#include "mon_client.h"
-#include "super.h"
-#include "auth.h"
-#include "decode.h"
+#include <linux/ceph/mon_client.h>
+#include <linux/ceph/libceph.h>
+#include <linux/ceph/decode.h>
+
+#include <linux/ceph/auth.h>
/*
* Interact with Ceph monitor cluster. Handle requests for new map
@@ -74,7 +76,7 @@
m->num_mon);
for (i = 0; i < m->num_mon; i++)
dout("monmap_decode mon%d is %s\n", i,
- pr_addr(&m->mon_inst[i].addr.in_addr));
+ ceph_pr_addr(&m->mon_inst[i].addr.in_addr));
return m;
bad:
@@ -191,30 +193,33 @@
struct ceph_msg *msg = monc->m_subscribe;
struct ceph_mon_subscribe_item *i;
void *p, *end;
+ int num;
p = msg->front.iov_base;
end = p + msg->front_max;
- dout("__send_subscribe to 'mdsmap' %u+\n",
- (unsigned)monc->have_mdsmap);
+ num = 1 + !!monc->want_next_osdmap + !!monc->want_mdsmap;
+ ceph_encode_32(&p, num);
+
if (monc->want_next_osdmap) {
dout("__send_subscribe to 'osdmap' %u\n",
(unsigned)monc->have_osdmap);
- ceph_encode_32(&p, 3);
ceph_encode_string(&p, end, "osdmap", 6);
i = p;
i->have = cpu_to_le64(monc->have_osdmap);
i->onetime = 1;
p += sizeof(*i);
monc->want_next_osdmap = 2; /* requested */
- } else {
- ceph_encode_32(&p, 2);
}
- ceph_encode_string(&p, end, "mdsmap", 6);
- i = p;
- i->have = cpu_to_le64(monc->have_mdsmap);
- i->onetime = 0;
- p += sizeof(*i);
+ if (monc->want_mdsmap) {
+ dout("__send_subscribe to 'mdsmap' %u+\n",
+ (unsigned)monc->have_mdsmap);
+ ceph_encode_string(&p, end, "mdsmap", 6);
+ i = p;
+ i->have = cpu_to_le64(monc->have_mdsmap);
+ i->onetime = 0;
+ p += sizeof(*i);
+ }
ceph_encode_string(&p, end, "monmap", 6);
i = p;
i->have = 0;
@@ -243,7 +248,8 @@
mutex_lock(&monc->mutex);
if (monc->hunting) {
pr_info("mon%d %s session established\n",
- monc->cur_mon, pr_addr(&monc->con->peer_addr.in_addr));
+ monc->cur_mon,
+ ceph_pr_addr(&monc->con->peer_addr.in_addr));
monc->hunting = false;
}
dout("handle_subscribe_ack after %d seconds\n", seconds);
@@ -266,6 +272,7 @@
mutex_unlock(&monc->mutex);
return 0;
}
+EXPORT_SYMBOL(ceph_monc_got_mdsmap);
int ceph_monc_got_osdmap(struct ceph_mon_client *monc, u32 got)
{
@@ -310,6 +317,7 @@
mutex_unlock(&monc->mutex);
return 0;
}
+EXPORT_SYMBOL(ceph_monc_open_session);
/*
* The monitor responds with mount ack indicate mount success. The
@@ -540,6 +548,7 @@
kref_put(&req->kref, release_generic_request);
return err;
}
+EXPORT_SYMBOL(ceph_monc_do_statfs);
/*
* pool ops
@@ -651,6 +660,7 @@
pool, 0, (char *)snapid, sizeof(*snapid));
}
+EXPORT_SYMBOL(ceph_monc_create_snapid);
int ceph_monc_delete_snapid(struct ceph_mon_client *monc,
u32 pool, u64 snapid)
@@ -708,9 +718,9 @@
*/
static int build_initial_monmap(struct ceph_mon_client *monc)
{
- struct ceph_mount_args *args = monc->client->mount_args;
- struct ceph_entity_addr *mon_addr = args->mon_addr;
- int num_mon = args->num_mon;
+ struct ceph_options *opt = monc->client->options;
+ struct ceph_entity_addr *mon_addr = opt->mon_addr;
+ int num_mon = opt->num_mon;
int i;
/* build initial monmap */
@@ -728,11 +738,6 @@
}
monc->monmap->num_mon = num_mon;
monc->have_fsid = false;
-
- /* release addr memory */
- kfree(args->mon_addr);
- args->mon_addr = NULL;
- args->num_mon = 0;
return 0;
}
@@ -753,8 +758,8 @@
monc->con = NULL;
/* authentication */
- monc->auth = ceph_auth_init(cl->mount_args->name,
- cl->mount_args->secret);
+ monc->auth = ceph_auth_init(cl->options->name,
+ cl->options->secret);
if (IS_ERR(monc->auth))
return PTR_ERR(monc->auth);
monc->auth->want_keys =
@@ -808,6 +813,7 @@
out:
return err;
}
+EXPORT_SYMBOL(ceph_monc_init);
void ceph_monc_stop(struct ceph_mon_client *monc)
{
@@ -832,6 +838,7 @@
kfree(monc->monmap);
}
+EXPORT_SYMBOL(ceph_monc_stop);
static void handle_auth_reply(struct ceph_mon_client *monc,
struct ceph_msg *msg)
@@ -889,6 +896,7 @@
mutex_unlock(&monc->mutex);
return ret;
}
+EXPORT_SYMBOL(ceph_monc_validate_auth);
/*
* handle incoming message
@@ -922,15 +930,16 @@
ceph_monc_handle_map(monc, msg);
break;
- case CEPH_MSG_MDS_MAP:
- ceph_mdsc_handle_map(&monc->client->mdsc, msg);
- break;
-
case CEPH_MSG_OSD_MAP:
ceph_osdc_handle_map(&monc->client->osdc, msg);
break;
default:
+ /* can the chained handler handle it? */
+ if (monc->client->extra_mon_dispatch &&
+ monc->client->extra_mon_dispatch(monc->client, msg) == 0)
+ break;
+
pr_err("received unknown message type %d %s\n", type,
ceph_msg_type_name(type));
}
@@ -994,7 +1003,7 @@
if (monc->con && !monc->hunting)
pr_info("mon%d %s session lost, "
"hunting for new mon\n", monc->cur_mon,
- pr_addr(&monc->con->peer_addr.in_addr));
+ ceph_pr_addr(&monc->con->peer_addr.in_addr));
__close_session(monc);
if (!monc->hunting) {
diff --git a/fs/ceph/msgpool.c b/net/ceph/msgpool.c
similarity index 95%
rename from fs/ceph/msgpool.c
rename to net/ceph/msgpool.c
index dd65a64..d5f2d97 100644
--- a/fs/ceph/msgpool.c
+++ b/net/ceph/msgpool.c
@@ -1,11 +1,11 @@
-#include "ceph_debug.h"
+#include <linux/ceph/ceph_debug.h>
#include <linux/err.h>
#include <linux/sched.h>
#include <linux/types.h>
#include <linux/vmalloc.h>
-#include "msgpool.h"
+#include <linux/ceph/msgpool.h>
static void *alloc_fn(gfp_t gfp_mask, void *arg)
{
diff --git a/fs/ceph/osd_client.c b/net/ceph/osd_client.c
similarity index 84%
rename from fs/ceph/osd_client.c
rename to net/ceph/osd_client.c
index bed6391..7939199 100644
--- a/fs/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -1,17 +1,22 @@
-#include "ceph_debug.h"
+#include <linux/ceph/ceph_debug.h>
+#include <linux/module.h>
#include <linux/err.h>
#include <linux/highmem.h>
#include <linux/mm.h>
#include <linux/pagemap.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
+#ifdef CONFIG_BLOCK
+#include <linux/bio.h>
+#endif
-#include "super.h"
-#include "osd_client.h"
-#include "messenger.h"
-#include "decode.h"
-#include "auth.h"
+#include <linux/ceph/libceph.h>
+#include <linux/ceph/osd_client.h>
+#include <linux/ceph/messenger.h>
+#include <linux/ceph/decode.h>
+#include <linux/ceph/auth.h>
+#include <linux/ceph/pagelist.h>
#define OSD_OP_FRONT_LEN 4096
#define OSD_OPREPLY_FRONT_LEN 512
@@ -22,6 +27,59 @@
static void kick_requests(struct ceph_osd_client *osdc, struct ceph_osd *osd);
+static int op_needs_trail(int op)
+{
+ switch (op) {
+ case CEPH_OSD_OP_GETXATTR:
+ case CEPH_OSD_OP_SETXATTR:
+ case CEPH_OSD_OP_CMPXATTR:
+ case CEPH_OSD_OP_CALL:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+static int op_has_extent(int op)
+{
+ return (op == CEPH_OSD_OP_READ ||
+ op == CEPH_OSD_OP_WRITE);
+}
+
+void ceph_calc_raw_layout(struct ceph_osd_client *osdc,
+ struct ceph_file_layout *layout,
+ u64 snapid,
+ u64 off, u64 *plen, u64 *bno,
+ struct ceph_osd_request *req,
+ struct ceph_osd_req_op *op)
+{
+ struct ceph_osd_request_head *reqhead = req->r_request->front.iov_base;
+ u64 orig_len = *plen;
+ u64 objoff, objlen; /* extent in object */
+
+ reqhead->snapid = cpu_to_le64(snapid);
+
+ /* object extent? */
+ ceph_calc_file_object_mapping(layout, off, plen, bno,
+ &objoff, &objlen);
+ if (*plen < orig_len)
+ dout(" skipping last %llu, final file extent %llu~%llu\n",
+ orig_len - *plen, off, *plen);
+
+ if (op_has_extent(op->op)) {
+ op->extent.offset = objoff;
+ op->extent.length = objlen;
+ }
+ req->r_num_pages = calc_pages_for(off, *plen);
+ if (op->op == CEPH_OSD_OP_WRITE)
+ op->payload_len = *plen;
+
+ dout("calc_layout bno=%llx %llu~%llu (%d pages)\n",
+ *bno, objoff, objlen, req->r_num_pages);
+
+}
+EXPORT_SYMBOL(ceph_calc_raw_layout);
+
/*
* Implement client access to distributed object storage cluster.
*
@@ -48,34 +106,19 @@
* fill osd op in request message.
*/
static void calc_layout(struct ceph_osd_client *osdc,
- struct ceph_vino vino, struct ceph_file_layout *layout,
+ struct ceph_vino vino,
+ struct ceph_file_layout *layout,
u64 off, u64 *plen,
- struct ceph_osd_request *req)
+ struct ceph_osd_request *req,
+ struct ceph_osd_req_op *op)
{
- struct ceph_osd_request_head *reqhead = req->r_request->front.iov_base;
- struct ceph_osd_op *op = (void *)(reqhead + 1);
- u64 orig_len = *plen;
- u64 objoff, objlen; /* extent in object */
u64 bno;
- reqhead->snapid = cpu_to_le64(vino.snap);
-
- /* object extent? */
- ceph_calc_file_object_mapping(layout, off, plen, &bno,
- &objoff, &objlen);
- if (*plen < orig_len)
- dout(" skipping last %llu, final file extent %llu~%llu\n",
- orig_len - *plen, off, *plen);
+ ceph_calc_raw_layout(osdc, layout, vino.snap, off,
+ plen, &bno, req, op);
sprintf(req->r_oid, "%llx.%08llx", vino.ino, bno);
req->r_oid_len = strlen(req->r_oid);
-
- op->extent.offset = cpu_to_le64(objoff);
- op->extent.length = cpu_to_le64(objlen);
- req->r_num_pages = calc_pages_for(off, *plen);
-
- dout("calc_layout %s (%d) %llu~%llu (%d pages)\n",
- req->r_oid, req->r_oid_len, objoff, objlen, req->r_num_pages);
}
/*
@@ -101,12 +144,259 @@
if (req->r_own_pages)
ceph_release_page_vector(req->r_pages,
req->r_num_pages);
+#ifdef CONFIG_BLOCK
+ if (req->r_bio)
+ bio_put(req->r_bio);
+#endif
ceph_put_snap_context(req->r_snapc);
+ if (req->r_trail) {
+ ceph_pagelist_release(req->r_trail);
+ kfree(req->r_trail);
+ }
if (req->r_mempool)
mempool_free(req, req->r_osdc->req_mempool);
else
kfree(req);
}
+EXPORT_SYMBOL(ceph_osdc_release_request);
+
+static int get_num_ops(struct ceph_osd_req_op *ops, int *needs_trail)
+{
+ int i = 0;
+
+ if (needs_trail)
+ *needs_trail = 0;
+ while (ops[i].op) {
+ if (needs_trail && op_needs_trail(ops[i].op))
+ *needs_trail = 1;
+ i++;
+ }
+
+ return i;
+}
+
+struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
+ int flags,
+ struct ceph_snap_context *snapc,
+ struct ceph_osd_req_op *ops,
+ bool use_mempool,
+ gfp_t gfp_flags,
+ struct page **pages,
+ struct bio *bio)
+{
+ struct ceph_osd_request *req;
+ struct ceph_msg *msg;
+ int needs_trail;
+ int num_op = get_num_ops(ops, &needs_trail);
+ size_t msg_size = sizeof(struct ceph_osd_request_head);
+
+ msg_size += num_op*sizeof(struct ceph_osd_op);
+
+ if (use_mempool) {
+ req = mempool_alloc(osdc->req_mempool, gfp_flags);
+ memset(req, 0, sizeof(*req));
+ } else {
+ req = kzalloc(sizeof(*req), gfp_flags);
+ }
+ if (req == NULL)
+ return NULL;
+
+ req->r_osdc = osdc;
+ req->r_mempool = use_mempool;
+
+ kref_init(&req->r_kref);
+ init_completion(&req->r_completion);
+ init_completion(&req->r_safe_completion);
+ INIT_LIST_HEAD(&req->r_unsafe_item);
+ req->r_flags = flags;
+
+ WARN_ON((flags & (CEPH_OSD_FLAG_READ|CEPH_OSD_FLAG_WRITE)) == 0);
+
+ /* create reply message */
+ if (use_mempool)
+ msg = ceph_msgpool_get(&osdc->msgpool_op_reply, 0);
+ else
+ msg = ceph_msg_new(CEPH_MSG_OSD_OPREPLY,
+ OSD_OPREPLY_FRONT_LEN, gfp_flags);
+ if (!msg) {
+ ceph_osdc_put_request(req);
+ return NULL;
+ }
+ req->r_reply = msg;
+
+ /* allocate space for the trailing data */
+ if (needs_trail) {
+ req->r_trail = kmalloc(sizeof(struct ceph_pagelist), gfp_flags);
+ if (!req->r_trail) {
+ ceph_osdc_put_request(req);
+ return NULL;
+ }
+ ceph_pagelist_init(req->r_trail);
+ }
+ /* create request message; allow space for oid */
+ msg_size += 40;
+ if (snapc)
+ msg_size += sizeof(u64) * snapc->num_snaps;
+ if (use_mempool)
+ msg = ceph_msgpool_get(&osdc->msgpool_op, 0);
+ else
+ msg = ceph_msg_new(CEPH_MSG_OSD_OP, msg_size, gfp_flags);
+ if (!msg) {
+ ceph_osdc_put_request(req);
+ return NULL;
+ }
+
+ msg->hdr.type = cpu_to_le16(CEPH_MSG_OSD_OP);
+ memset(msg->front.iov_base, 0, msg->front.iov_len);
+
+ req->r_request = msg;
+ req->r_pages = pages;
+#ifdef CONFIG_BLOCK
+ if (bio) {
+ req->r_bio = bio;
+ bio_get(req->r_bio);
+ }
+#endif
+
+ return req;
+}
+EXPORT_SYMBOL(ceph_osdc_alloc_request);
+
+static void osd_req_encode_op(struct ceph_osd_request *req,
+ struct ceph_osd_op *dst,
+ struct ceph_osd_req_op *src)
+{
+ dst->op = cpu_to_le16(src->op);
+
+ switch (dst->op) {
+ case CEPH_OSD_OP_READ:
+ case CEPH_OSD_OP_WRITE:
+ dst->extent.offset =
+ cpu_to_le64(src->extent.offset);
+ dst->extent.length =
+ cpu_to_le64(src->extent.length);
+ dst->extent.truncate_size =
+ cpu_to_le64(src->extent.truncate_size);
+ dst->extent.truncate_seq =
+ cpu_to_le32(src->extent.truncate_seq);
+ break;
+
+ case CEPH_OSD_OP_GETXATTR:
+ case CEPH_OSD_OP_SETXATTR:
+ case CEPH_OSD_OP_CMPXATTR:
+ BUG_ON(!req->r_trail);
+
+ dst->xattr.name_len = cpu_to_le32(src->xattr.name_len);
+ dst->xattr.value_len = cpu_to_le32(src->xattr.value_len);
+ dst->xattr.cmp_op = src->xattr.cmp_op;
+ dst->xattr.cmp_mode = src->xattr.cmp_mode;
+ ceph_pagelist_append(req->r_trail, src->xattr.name,
+ src->xattr.name_len);
+ ceph_pagelist_append(req->r_trail, src->xattr.val,
+ src->xattr.value_len);
+ break;
+ case CEPH_OSD_OP_CALL:
+ BUG_ON(!req->r_trail);
+
+ dst->cls.class_len = src->cls.class_len;
+ dst->cls.method_len = src->cls.method_len;
+ dst->cls.indata_len = cpu_to_le32(src->cls.indata_len);
+
+ ceph_pagelist_append(req->r_trail, src->cls.class_name,
+ src->cls.class_len);
+ ceph_pagelist_append(req->r_trail, src->cls.method_name,
+ src->cls.method_len);
+ ceph_pagelist_append(req->r_trail, src->cls.indata,
+ src->cls.indata_len);
+ break;
+ case CEPH_OSD_OP_ROLLBACK:
+ dst->snap.snapid = cpu_to_le64(src->snap.snapid);
+ break;
+ case CEPH_OSD_OP_STARTSYNC:
+ break;
+ default:
+ pr_err("unrecognized osd opcode %d\n", dst->op);
+ WARN_ON(1);
+ break;
+ }
+ dst->payload_len = cpu_to_le32(src->payload_len);
+}
+
+/*
+ * build new request AND message
+ *
+ */
+void ceph_osdc_build_request(struct ceph_osd_request *req,
+ u64 off, u64 *plen,
+ struct ceph_osd_req_op *src_ops,
+ struct ceph_snap_context *snapc,
+ struct timespec *mtime,
+ const char *oid,
+ int oid_len)
+{
+ struct ceph_msg *msg = req->r_request;
+ struct ceph_osd_request_head *head;
+ struct ceph_osd_req_op *src_op;
+ struct ceph_osd_op *op;
+ void *p;
+ int num_op = get_num_ops(src_ops, NULL);
+ size_t msg_size = sizeof(*head) + num_op*sizeof(*op);
+ int flags = req->r_flags;
+ u64 data_len = 0;
+ int i;
+
+ head = msg->front.iov_base;
+ op = (void *)(head + 1);
+ p = (void *)(op + num_op);
+
+ req->r_snapc = ceph_get_snap_context(snapc);
+
+ head->client_inc = cpu_to_le32(1); /* always, for now. */
+ head->flags = cpu_to_le32(flags);
+ if (flags & CEPH_OSD_FLAG_WRITE)
+ ceph_encode_timespec(&head->mtime, mtime);
+ head->num_ops = cpu_to_le16(num_op);
+
+
+ /* fill in oid */
+ head->object_len = cpu_to_le32(oid_len);
+ memcpy(p, oid, oid_len);
+ p += oid_len;
+
+ src_op = src_ops;
+ while (src_op->op) {
+ osd_req_encode_op(req, op, src_op);
+ src_op++;
+ op++;
+ }
+
+ if (req->r_trail)
+ data_len += req->r_trail->length;
+
+ if (snapc) {
+ head->snap_seq = cpu_to_le64(snapc->seq);
+ head->num_snaps = cpu_to_le32(snapc->num_snaps);
+ for (i = 0; i < snapc->num_snaps; i++) {
+ put_unaligned_le64(snapc->snaps[i], p);
+ p += sizeof(u64);
+ }
+ }
+
+ if (flags & CEPH_OSD_FLAG_WRITE) {
+ req->r_request->hdr.data_off = cpu_to_le16(off);
+ req->r_request->hdr.data_len = cpu_to_le32(*plen + data_len);
+ } else if (data_len) {
+ req->r_request->hdr.data_off = 0;
+ req->r_request->hdr.data_len = cpu_to_le32(data_len);
+ }
+
+ BUG_ON(p > msg->front.iov_base + msg->front.iov_len);
+ msg_size = p - msg->front.iov_base;
+ msg->front.iov_len = msg_size;
+ msg->hdr.front_len = cpu_to_le32(msg_size);
+ return;
+}
+EXPORT_SYMBOL(ceph_osdc_build_request);
/*
* build new request AND message, calculate layout, and adjust file
@@ -131,110 +421,40 @@
struct timespec *mtime,
bool use_mempool, int num_reply)
{
+ struct ceph_osd_req_op ops[3];
struct ceph_osd_request *req;
- struct ceph_msg *msg;
- struct ceph_osd_request_head *head;
- struct ceph_osd_op *op;
- void *p;
- int num_op = 1 + do_sync;
- size_t msg_size = sizeof(*head) + num_op*sizeof(*op);
- int i;
- if (use_mempool) {
- req = mempool_alloc(osdc->req_mempool, GFP_NOFS);
- memset(req, 0, sizeof(*req));
- } else {
- req = kzalloc(sizeof(*req), GFP_NOFS);
- }
- if (req == NULL)
- return NULL;
-
- req->r_osdc = osdc;
- req->r_mempool = use_mempool;
- kref_init(&req->r_kref);
- init_completion(&req->r_completion);
- init_completion(&req->r_safe_completion);
- INIT_LIST_HEAD(&req->r_unsafe_item);
- req->r_flags = flags;
-
- WARN_ON((flags & (CEPH_OSD_FLAG_READ|CEPH_OSD_FLAG_WRITE)) == 0);
-
- /* create reply message */
- if (use_mempool)
- msg = ceph_msgpool_get(&osdc->msgpool_op_reply, 0);
- else
- msg = ceph_msg_new(CEPH_MSG_OSD_OPREPLY,
- OSD_OPREPLY_FRONT_LEN, GFP_NOFS);
- if (!msg) {
- ceph_osdc_put_request(req);
- return NULL;
- }
- req->r_reply = msg;
-
- /* create request message; allow space for oid */
- msg_size += 40;
- if (snapc)
- msg_size += sizeof(u64) * snapc->num_snaps;
- if (use_mempool)
- msg = ceph_msgpool_get(&osdc->msgpool_op, 0);
- else
- msg = ceph_msg_new(CEPH_MSG_OSD_OP, msg_size, GFP_NOFS);
- if (!msg) {
- ceph_osdc_put_request(req);
- return NULL;
- }
- msg->hdr.type = cpu_to_le16(CEPH_MSG_OSD_OP);
- memset(msg->front.iov_base, 0, msg->front.iov_len);
- head = msg->front.iov_base;
- op = (void *)(head + 1);
- p = (void *)(op + num_op);
-
- req->r_request = msg;
- req->r_snapc = ceph_get_snap_context(snapc);
-
- head->client_inc = cpu_to_le32(1); /* always, for now. */
- head->flags = cpu_to_le32(flags);
- if (flags & CEPH_OSD_FLAG_WRITE)
- ceph_encode_timespec(&head->mtime, mtime);
- head->num_ops = cpu_to_le16(num_op);
- op->op = cpu_to_le16(opcode);
-
- /* calculate max write size */
- calc_layout(osdc, vino, layout, off, plen, req);
- req->r_file_layout = *layout; /* keep a copy */
-
- if (flags & CEPH_OSD_FLAG_WRITE) {
- req->r_request->hdr.data_off = cpu_to_le16(off);
- req->r_request->hdr.data_len = cpu_to_le32(*plen);
- op->payload_len = cpu_to_le32(*plen);
- }
- op->extent.truncate_size = cpu_to_le64(truncate_size);
- op->extent.truncate_seq = cpu_to_le32(truncate_seq);
-
- /* fill in oid */
- head->object_len = cpu_to_le32(req->r_oid_len);
- memcpy(p, req->r_oid, req->r_oid_len);
- p += req->r_oid_len;
+ ops[0].op = opcode;
+ ops[0].extent.truncate_seq = truncate_seq;
+ ops[0].extent.truncate_size = truncate_size;
+ ops[0].payload_len = 0;
if (do_sync) {
- op++;
- op->op = cpu_to_le16(CEPH_OSD_OP_STARTSYNC);
- }
- if (snapc) {
- head->snap_seq = cpu_to_le64(snapc->seq);
- head->num_snaps = cpu_to_le32(snapc->num_snaps);
- for (i = 0; i < snapc->num_snaps; i++) {
- put_unaligned_le64(snapc->snaps[i], p);
- p += sizeof(u64);
- }
- }
+ ops[1].op = CEPH_OSD_OP_STARTSYNC;
+ ops[1].payload_len = 0;
+ ops[2].op = 0;
+ } else
+ ops[1].op = 0;
- BUG_ON(p > msg->front.iov_base + msg->front.iov_len);
- msg_size = p - msg->front.iov_base;
- msg->front.iov_len = msg_size;
- msg->hdr.front_len = cpu_to_le32(msg_size);
+ req = ceph_osdc_alloc_request(osdc, flags,
+ snapc, ops,
+ use_mempool,
+ GFP_NOFS, NULL, NULL);
+ if (IS_ERR(req))
+ return req;
+
+ /* calculate max write size */
+ calc_layout(osdc, vino, layout, off, plen, req, ops);
+ req->r_file_layout = *layout; /* keep a copy */
+
+ ceph_osdc_build_request(req, off, plen, ops,
+ snapc,
+ mtime,
+ req->r_oid, req->r_oid_len);
+
return req;
}
+EXPORT_SYMBOL(ceph_osdc_new_request);
/*
* We keep osd requests in an rbtree, sorted by ->r_tid.
@@ -389,7 +609,7 @@
dout("__move_osd_to_lru %p\n", osd);
BUG_ON(!list_empty(&osd->o_osd_lru));
list_add_tail(&osd->o_osd_lru, &osdc->osd_lru);
- osd->lru_ttl = jiffies + osdc->client->mount_args->osd_idle_ttl * HZ;
+ osd->lru_ttl = jiffies + osdc->client->options->osd_idle_ttl * HZ;
}
static void __remove_osd_from_lru(struct ceph_osd *osd)
@@ -483,7 +703,7 @@
static void __schedule_osd_timeout(struct ceph_osd_client *osdc)
{
schedule_delayed_work(&osdc->timeout_work,
- osdc->client->mount_args->osd_keepalive_timeout * HZ);
+ osdc->client->options->osd_keepalive_timeout * HZ);
}
static void __cancel_osd_timeout(struct ceph_osd_client *osdc)
@@ -549,7 +769,7 @@
*/
static void __cancel_request(struct ceph_osd_request *req)
{
- if (req->r_sent) {
+ if (req->r_sent && req->r_osd) {
ceph_con_revoke(&req->r_osd->o_con, req->r_request);
req->r_sent = 0;
}
@@ -661,7 +881,7 @@
reqhead->reassert_version = req->r_reassert_version;
req->r_stamp = jiffies;
- list_move_tail(&osdc->req_lru, &req->r_req_lru_item);
+ list_move_tail(&req->r_req_lru_item, &osdc->req_lru);
ceph_msg_get(req->r_request); /* send consumes a ref */
ceph_con_send(&req->r_osd->o_con, req->r_request);
@@ -684,9 +904,9 @@
container_of(work, struct ceph_osd_client, timeout_work.work);
struct ceph_osd_request *req, *last_req = NULL;
struct ceph_osd *osd;
- unsigned long timeout = osdc->client->mount_args->osd_timeout * HZ;
+ unsigned long timeout = osdc->client->options->osd_timeout * HZ;
unsigned long keepalive =
- osdc->client->mount_args->osd_keepalive_timeout * HZ;
+ osdc->client->options->osd_keepalive_timeout * HZ;
unsigned long last_stamp = 0;
struct rb_node *p;
struct list_head slow_osds;
@@ -773,7 +993,7 @@
container_of(work, struct ceph_osd_client,
osds_timeout_work.work);
unsigned long delay =
- osdc->client->mount_args->osd_idle_ttl * HZ >> 2;
+ osdc->client->options->osd_idle_ttl * HZ >> 2;
dout("osds timeout\n");
down_read(&osdc->map_sem);
@@ -1104,6 +1324,10 @@
req->r_request->pages = req->r_pages;
req->r_request->nr_pages = req->r_num_pages;
+#ifdef CONFIG_BLOCK
+ req->r_request->bio = req->r_bio;
+#endif
+ req->r_request->trail = req->r_trail;
register_request(osdc, req);
@@ -1131,6 +1355,7 @@
up_read(&osdc->map_sem);
return rc;
}
+EXPORT_SYMBOL(ceph_osdc_start_request);
/*
* wait for a request to complete
@@ -1153,6 +1378,7 @@
dout("wait_request tid %llu result %d\n", req->r_tid, req->r_result);
return req->r_result;
}
+EXPORT_SYMBOL(ceph_osdc_wait_request);
/*
* sync - wait for all in-flight requests to flush. avoid starvation.
@@ -1186,6 +1412,7 @@
mutex_unlock(&osdc->request_mutex);
dout("sync done (thru tid %llu)\n", last_tid);
}
+EXPORT_SYMBOL(ceph_osdc_sync);
/*
* init, shutdown
@@ -1211,7 +1438,7 @@
INIT_DELAYED_WORK(&osdc->osds_timeout_work, handle_osds_timeout);
schedule_delayed_work(&osdc->osds_timeout_work,
- round_jiffies_relative(osdc->client->mount_args->osd_idle_ttl * HZ));
+ round_jiffies_relative(osdc->client->options->osd_idle_ttl * HZ));
err = -ENOMEM;
osdc->req_mempool = mempool_create_kmalloc_pool(10,
@@ -1237,6 +1464,7 @@
out:
return err;
}
+EXPORT_SYMBOL(ceph_osdc_init);
void ceph_osdc_stop(struct ceph_osd_client *osdc)
{
@@ -1251,6 +1479,7 @@
ceph_msgpool_destroy(&osdc->msgpool_op);
ceph_msgpool_destroy(&osdc->msgpool_op_reply);
}
+EXPORT_SYMBOL(ceph_osdc_stop);
/*
* Read some contiguous pages. If we cross a stripe boundary, shorten
@@ -1288,6 +1517,7 @@
dout("readpages result %d\n", rc);
return rc;
}
+EXPORT_SYMBOL(ceph_osdc_readpages);
/*
* do a synchronous write on N pages
@@ -1330,6 +1560,7 @@
dout("writepages result %d\n", rc);
return rc;
}
+EXPORT_SYMBOL(ceph_osdc_writepages);
/*
* handle incoming message
@@ -1420,6 +1651,9 @@
}
m->pages = req->r_pages;
m->nr_pages = req->r_num_pages;
+#ifdef CONFIG_BLOCK
+ m->bio = req->r_bio;
+#endif
}
*skip = 0;
req->r_con_filling_msg = ceph_con_get(con);
diff --git a/fs/ceph/osdmap.c b/net/ceph/osdmap.c
similarity index 97%
rename from fs/ceph/osdmap.c
rename to net/ceph/osdmap.c
index e31f118..d73f3f6 100644
--- a/fs/ceph/osdmap.c
+++ b/net/ceph/osdmap.c
@@ -1,14 +1,15 @@
-#include "ceph_debug.h"
+#include <linux/ceph/ceph_debug.h>
+#include <linux/module.h>
#include <linux/slab.h>
#include <asm/div64.h>
-#include "super.h"
-#include "osdmap.h"
-#include "crush/hash.h"
-#include "crush/mapper.h"
-#include "decode.h"
+#include <linux/ceph/libceph.h>
+#include <linux/ceph/osdmap.h>
+#include <linux/ceph/decode.h>
+#include <linux/crush/hash.h>
+#include <linux/crush/mapper.h>
char *ceph_osdmap_state_str(char *str, int len, int state)
{
@@ -417,6 +418,20 @@
return NULL;
}
+int ceph_pg_poolid_by_name(struct ceph_osdmap *map, const char *name)
+{
+ struct rb_node *rbp;
+
+ for (rbp = rb_first(&map->pg_pools); rbp; rbp = rb_next(rbp)) {
+ struct ceph_pg_pool_info *pi =
+ rb_entry(rbp, struct ceph_pg_pool_info, node);
+ if (pi->name && strcmp(pi->name, name) == 0)
+ return pi->id;
+ }
+ return -ENOENT;
+}
+EXPORT_SYMBOL(ceph_pg_poolid_by_name);
+
static void __remove_pg_pool(struct rb_root *root, struct ceph_pg_pool_info *pi)
{
rb_erase(&pi->node, root);
@@ -966,6 +981,7 @@
dout(" obj extent %llu~%llu\n", *oxoff, *oxlen);
}
+EXPORT_SYMBOL(ceph_calc_file_object_mapping);
/*
* calculate an object layout (i.e. pgid) from an oid,
@@ -1011,6 +1027,7 @@
ol->ol_stripe_unit = fl->fl_object_stripe_unit;
return 0;
}
+EXPORT_SYMBOL(ceph_calc_object_layout);
/*
* Calculate raw osd vector for the given pgid. Return pointer to osd
@@ -1108,3 +1125,4 @@
return osds[i];
return -1;
}
+EXPORT_SYMBOL(ceph_calc_pg_primary);
diff --git a/net/ceph/pagelist.c b/net/ceph/pagelist.c
new file mode 100644
index 0000000..13cb409
--- /dev/null
+++ b/net/ceph/pagelist.c
@@ -0,0 +1,154 @@
+
+#include <linux/module.h>
+#include <linux/gfp.h>
+#include <linux/pagemap.h>
+#include <linux/highmem.h>
+#include <linux/ceph/pagelist.h>
+
+static void ceph_pagelist_unmap_tail(struct ceph_pagelist *pl)
+{
+ if (pl->mapped_tail) {
+ struct page *page = list_entry(pl->head.prev, struct page, lru);
+ kunmap(page);
+ pl->mapped_tail = NULL;
+ }
+}
+
+int ceph_pagelist_release(struct ceph_pagelist *pl)
+{
+ ceph_pagelist_unmap_tail(pl);
+ while (!list_empty(&pl->head)) {
+ struct page *page = list_first_entry(&pl->head, struct page,
+ lru);
+ list_del(&page->lru);
+ __free_page(page);
+ }
+ ceph_pagelist_free_reserve(pl);
+ return 0;
+}
+EXPORT_SYMBOL(ceph_pagelist_release);
+
+static int ceph_pagelist_addpage(struct ceph_pagelist *pl)
+{
+ struct page *page;
+
+ if (!pl->num_pages_free) {
+ page = __page_cache_alloc(GFP_NOFS);
+ } else {
+ page = list_first_entry(&pl->free_list, struct page, lru);
+ list_del(&page->lru);
+ --pl->num_pages_free;
+ }
+ if (!page)
+ return -ENOMEM;
+ pl->room += PAGE_SIZE;
+ ceph_pagelist_unmap_tail(pl);
+ list_add_tail(&page->lru, &pl->head);
+ pl->mapped_tail = kmap(page);
+ return 0;
+}
+
+int ceph_pagelist_append(struct ceph_pagelist *pl, const void *buf, size_t len)
+{
+ while (pl->room < len) {
+ size_t bit = pl->room;
+ int ret;
+
+ memcpy(pl->mapped_tail + (pl->length & ~PAGE_CACHE_MASK),
+ buf, bit);
+ pl->length += bit;
+ pl->room -= bit;
+ buf += bit;
+ len -= bit;
+ ret = ceph_pagelist_addpage(pl);
+ if (ret)
+ return ret;
+ }
+
+ memcpy(pl->mapped_tail + (pl->length & ~PAGE_CACHE_MASK), buf, len);
+ pl->length += len;
+ pl->room -= len;
+ return 0;
+}
+EXPORT_SYMBOL(ceph_pagelist_append);
+
+/**
+ * Allocate enough pages for a pagelist to append the given amount
+ * of data without without allocating.
+ * Returns: 0 on success, -ENOMEM on error.
+ */
+int ceph_pagelist_reserve(struct ceph_pagelist *pl, size_t space)
+{
+ if (space <= pl->room)
+ return 0;
+ space -= pl->room;
+ space = (space + PAGE_SIZE - 1) >> PAGE_SHIFT; /* conv to num pages */
+
+ while (space > pl->num_pages_free) {
+ struct page *page = __page_cache_alloc(GFP_NOFS);
+ if (!page)
+ return -ENOMEM;
+ list_add_tail(&page->lru, &pl->free_list);
+ ++pl->num_pages_free;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(ceph_pagelist_reserve);
+
+/**
+ * Free any pages that have been preallocated.
+ */
+int ceph_pagelist_free_reserve(struct ceph_pagelist *pl)
+{
+ while (!list_empty(&pl->free_list)) {
+ struct page *page = list_first_entry(&pl->free_list,
+ struct page, lru);
+ list_del(&page->lru);
+ __free_page(page);
+ --pl->num_pages_free;
+ }
+ BUG_ON(pl->num_pages_free);
+ return 0;
+}
+EXPORT_SYMBOL(ceph_pagelist_free_reserve);
+
+/**
+ * Create a truncation point.
+ */
+void ceph_pagelist_set_cursor(struct ceph_pagelist *pl,
+ struct ceph_pagelist_cursor *c)
+{
+ c->pl = pl;
+ c->page_lru = pl->head.prev;
+ c->room = pl->room;
+}
+EXPORT_SYMBOL(ceph_pagelist_set_cursor);
+
+/**
+ * Truncate a pagelist to the given point. Move extra pages to reserve.
+ * This won't sleep.
+ * Returns: 0 on success,
+ * -EINVAL if the pagelist doesn't match the trunc point pagelist
+ */
+int ceph_pagelist_truncate(struct ceph_pagelist *pl,
+ struct ceph_pagelist_cursor *c)
+{
+ struct page *page;
+
+ if (pl != c->pl)
+ return -EINVAL;
+ ceph_pagelist_unmap_tail(pl);
+ while (pl->head.prev != c->page_lru) {
+ page = list_entry(pl->head.prev, struct page, lru);
+ list_del(&page->lru); /* remove from pagelist */
+ list_add_tail(&page->lru, &pl->free_list); /* add to reserve */
+ ++pl->num_pages_free;
+ }
+ pl->room = c->room;
+ if (!list_empty(&pl->head)) {
+ page = list_entry(pl->head.prev, struct page, lru);
+ pl->mapped_tail = kmap(page);
+ }
+ return 0;
+}
+EXPORT_SYMBOL(ceph_pagelist_truncate);
diff --git a/net/ceph/pagevec.c b/net/ceph/pagevec.c
new file mode 100644
index 0000000..54caf06
--- /dev/null
+++ b/net/ceph/pagevec.c
@@ -0,0 +1,223 @@
+#include <linux/ceph/ceph_debug.h>
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/file.h>
+#include <linux/namei.h>
+#include <linux/writeback.h>
+
+#include <linux/ceph/libceph.h>
+
+/*
+ * build a vector of user pages
+ */
+struct page **ceph_get_direct_page_vector(const char __user *data,
+ int num_pages,
+ loff_t off, size_t len)
+{
+ struct page **pages;
+ int rc;
+
+ pages = kmalloc(sizeof(*pages) * num_pages, GFP_NOFS);
+ if (!pages)
+ return ERR_PTR(-ENOMEM);
+
+ down_read(¤t->mm->mmap_sem);
+ rc = get_user_pages(current, current->mm, (unsigned long)data,
+ num_pages, 0, 0, pages, NULL);
+ up_read(¤t->mm->mmap_sem);
+ if (rc < 0)
+ goto fail;
+ return pages;
+
+fail:
+ kfree(pages);
+ return ERR_PTR(rc);
+}
+EXPORT_SYMBOL(ceph_get_direct_page_vector);
+
+void ceph_put_page_vector(struct page **pages, int num_pages)
+{
+ int i;
+
+ for (i = 0; i < num_pages; i++)
+ put_page(pages[i]);
+ kfree(pages);
+}
+EXPORT_SYMBOL(ceph_put_page_vector);
+
+void ceph_release_page_vector(struct page **pages, int num_pages)
+{
+ int i;
+
+ for (i = 0; i < num_pages; i++)
+ __free_pages(pages[i], 0);
+ kfree(pages);
+}
+EXPORT_SYMBOL(ceph_release_page_vector);
+
+/*
+ * allocate a vector new pages
+ */
+struct page **ceph_alloc_page_vector(int num_pages, gfp_t flags)
+{
+ struct page **pages;
+ int i;
+
+ pages = kmalloc(sizeof(*pages) * num_pages, flags);
+ if (!pages)
+ return ERR_PTR(-ENOMEM);
+ for (i = 0; i < num_pages; i++) {
+ pages[i] = __page_cache_alloc(flags);
+ if (pages[i] == NULL) {
+ ceph_release_page_vector(pages, i);
+ return ERR_PTR(-ENOMEM);
+ }
+ }
+ return pages;
+}
+EXPORT_SYMBOL(ceph_alloc_page_vector);
+
+/*
+ * copy user data into a page vector
+ */
+int ceph_copy_user_to_page_vector(struct page **pages,
+ const char __user *data,
+ loff_t off, size_t len)
+{
+ int i = 0;
+ int po = off & ~PAGE_CACHE_MASK;
+ int left = len;
+ int l, bad;
+
+ while (left > 0) {
+ l = min_t(int, PAGE_CACHE_SIZE-po, left);
+ bad = copy_from_user(page_address(pages[i]) + po, data, l);
+ if (bad == l)
+ return -EFAULT;
+ data += l - bad;
+ left -= l - bad;
+ po += l - bad;
+ if (po == PAGE_CACHE_SIZE) {
+ po = 0;
+ i++;
+ }
+ }
+ return len;
+}
+EXPORT_SYMBOL(ceph_copy_user_to_page_vector);
+
+int ceph_copy_to_page_vector(struct page **pages,
+ const char *data,
+ loff_t off, size_t len)
+{
+ int i = 0;
+ size_t po = off & ~PAGE_CACHE_MASK;
+ size_t left = len;
+ size_t l;
+
+ while (left > 0) {
+ l = min_t(size_t, PAGE_CACHE_SIZE-po, left);
+ memcpy(page_address(pages[i]) + po, data, l);
+ data += l;
+ left -= l;
+ po += l;
+ if (po == PAGE_CACHE_SIZE) {
+ po = 0;
+ i++;
+ }
+ }
+ return len;
+}
+EXPORT_SYMBOL(ceph_copy_to_page_vector);
+
+int ceph_copy_from_page_vector(struct page **pages,
+ char *data,
+ loff_t off, size_t len)
+{
+ int i = 0;
+ size_t po = off & ~PAGE_CACHE_MASK;
+ size_t left = len;
+ size_t l;
+
+ while (left > 0) {
+ l = min_t(size_t, PAGE_CACHE_SIZE-po, left);
+ memcpy(data, page_address(pages[i]) + po, l);
+ data += l;
+ left -= l;
+ po += l;
+ if (po == PAGE_CACHE_SIZE) {
+ po = 0;
+ i++;
+ }
+ }
+ return len;
+}
+EXPORT_SYMBOL(ceph_copy_from_page_vector);
+
+/*
+ * copy user data from a page vector into a user pointer
+ */
+int ceph_copy_page_vector_to_user(struct page **pages,
+ char __user *data,
+ loff_t off, size_t len)
+{
+ int i = 0;
+ int po = off & ~PAGE_CACHE_MASK;
+ int left = len;
+ int l, bad;
+
+ while (left > 0) {
+ l = min_t(int, left, PAGE_CACHE_SIZE-po);
+ bad = copy_to_user(data, page_address(pages[i]) + po, l);
+ if (bad == l)
+ return -EFAULT;
+ data += l - bad;
+ left -= l - bad;
+ if (po) {
+ po += l - bad;
+ if (po == PAGE_CACHE_SIZE)
+ po = 0;
+ }
+ i++;
+ }
+ return len;
+}
+EXPORT_SYMBOL(ceph_copy_page_vector_to_user);
+
+/*
+ * Zero an extent within a page vector. Offset is relative to the
+ * start of the first page.
+ */
+void ceph_zero_page_vector_range(int off, int len, struct page **pages)
+{
+ int i = off >> PAGE_CACHE_SHIFT;
+
+ off &= ~PAGE_CACHE_MASK;
+
+ dout("zero_page_vector_page %u~%u\n", off, len);
+
+ /* leading partial page? */
+ if (off) {
+ int end = min((int)PAGE_CACHE_SIZE, off + len);
+ dout("zeroing %d %p head from %d\n", i, pages[i],
+ (int)off);
+ zero_user_segment(pages[i], off, end);
+ len -= (end - off);
+ i++;
+ }
+ while (len >= PAGE_CACHE_SIZE) {
+ dout("zeroing %d %p len=%d\n", i, pages[i], len);
+ zero_user_segment(pages[i], 0, PAGE_CACHE_SIZE);
+ len -= PAGE_CACHE_SIZE;
+ i++;
+ }
+ /* trailing partial page? */
+ if (len) {
+ dout("zeroing %d %p tail to %d\n", i, pages[i], (int)len);
+ zero_user_segment(pages[i], 0, len);
+ }
+}
+EXPORT_SYMBOL(ceph_zero_page_vector_range);
+
diff --git a/net/core/datagram.c b/net/core/datagram.c
index 251997a..282806b 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -243,6 +243,7 @@
unlock_sock_fast(sk, slow);
/* skb is now orphaned, can be freed outside of locked section */
+ trace_kfree_skb(skb, skb_free_datagram_locked);
__kfree_skb(skb);
}
EXPORT_SYMBOL(skb_free_datagram_locked);
diff --git a/net/core/dev.c b/net/core/dev.c
index 3721fbb..7ec85e2 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -128,6 +128,8 @@
#include <linux/jhash.h>
#include <linux/random.h>
#include <trace/events/napi.h>
+#include <trace/events/net.h>
+#include <trace/events/skb.h>
#include <linux/pci.h>
#include "net-sysfs.h"
@@ -1978,6 +1980,7 @@
}
rc = ops->ndo_start_xmit(skb, dev);
+ trace_net_dev_xmit(skb, rc);
if (rc == NETDEV_TX_OK)
txq_trans_update(txq);
return rc;
@@ -1998,6 +2001,7 @@
skb_dst_drop(nskb);
rc = ops->ndo_start_xmit(nskb, dev);
+ trace_net_dev_xmit(nskb, rc);
if (unlikely(rc != NETDEV_TX_OK)) {
if (rc & ~NETDEV_TX_MASK)
goto out_kfree_gso_skb;
@@ -2058,16 +2062,16 @@
struct sk_buff *skb)
{
int queue_index;
- struct sock *sk = skb->sk;
+ const struct net_device_ops *ops = dev->netdev_ops;
- queue_index = sk_tx_queue_get(sk);
- if (queue_index < 0) {
- const struct net_device_ops *ops = dev->netdev_ops;
+ if (ops->ndo_select_queue) {
+ queue_index = ops->ndo_select_queue(dev, skb);
+ queue_index = dev_cap_txqueue(dev, queue_index);
+ } else {
+ struct sock *sk = skb->sk;
+ queue_index = sk_tx_queue_get(sk);
+ if (queue_index < 0) {
- if (ops->ndo_select_queue) {
- queue_index = ops->ndo_select_queue(dev, skb);
- queue_index = dev_cap_txqueue(dev, queue_index);
- } else {
queue_index = 0;
if (dev->real_num_tx_queues > 1)
queue_index = skb_tx_hash(dev, skb);
@@ -2186,6 +2190,7 @@
#ifdef CONFIG_NET_CLS_ACT
skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
#endif
+ trace_net_dev_queue(skb);
if (q->enqueue) {
rc = __dev_xmit_skb(skb, q, dev, txq);
goto out;
@@ -2512,6 +2517,7 @@
if (netdev_tstamp_prequeue)
net_timestamp_check(skb);
+ trace_netif_rx(skb);
#ifdef CONFIG_RPS
{
struct rps_dev_flow voidflow, *rflow = &voidflow;
@@ -2571,6 +2577,7 @@
clist = clist->next;
WARN_ON(atomic_read(&skb->users));
+ trace_kfree_skb(skb, net_tx_action);
__kfree_skb(skb);
}
}
@@ -2828,6 +2835,7 @@
if (!netdev_tstamp_prequeue)
net_timestamp_check(skb);
+ trace_netif_receive_skb(skb);
if (vlan_tx_tag_present(skb) && vlan_hwaccel_do_receive(skb))
return NET_RX_SUCCESS;
@@ -4845,7 +4853,7 @@
dev = list_first_entry(head, struct net_device, unreg_list);
call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
- synchronize_net();
+ rcu_barrier();
list_for_each_entry(dev, head, unreg_list)
dev_put(dev);
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 7a85367..8451ab4 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -348,7 +348,7 @@
if (info.cmd == ETHTOOL_GRXCLSRLALL) {
if (info.rule_cnt > 0) {
if (info.rule_cnt <= KMALLOC_MAX_SIZE / sizeof(u32))
- rule_buf = kmalloc(info.rule_cnt * sizeof(u32),
+ rule_buf = kzalloc(info.rule_cnt * sizeof(u32),
GFP_USER);
if (!rule_buf)
return -ENOMEM;
@@ -397,7 +397,7 @@
(KMALLOC_MAX_SIZE - sizeof(*indir)) / sizeof(*indir->ring_index))
return -ENOMEM;
full_size = sizeof(*indir) + sizeof(*indir->ring_index) * table_size;
- indir = kmalloc(full_size, GFP_USER);
+ indir = kzalloc(full_size, GFP_USER);
if (!indir)
return -ENOMEM;
@@ -538,7 +538,7 @@
gstrings.len = ret;
- data = kmalloc(gstrings.len * ETH_GSTRING_LEN, GFP_USER);
+ data = kzalloc(gstrings.len * ETH_GSTRING_LEN, GFP_USER);
if (!data)
return -ENOMEM;
@@ -775,7 +775,7 @@
if (regs.len > reglen)
regs.len = reglen;
- regbuf = kmalloc(reglen, GFP_USER);
+ regbuf = kzalloc(reglen, GFP_USER);
if (!regbuf)
return -ENOMEM;
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c
index 9fbe7f7..6743146 100644
--- a/net/core/gen_estimator.c
+++ b/net/core/gen_estimator.c
@@ -232,7 +232,7 @@
est->last_packets = bstats->packets;
est->avpps = rate_est->pps<<10;
- spin_lock(&est_tree_lock);
+ spin_lock_bh(&est_tree_lock);
if (!elist[idx].timer.function) {
INIT_LIST_HEAD(&elist[idx].list);
setup_timer(&elist[idx].timer, est_timer, idx);
@@ -243,7 +243,7 @@
list_add_rcu(&est->list, &elist[idx].list);
gen_add_node(est);
- spin_unlock(&est_tree_lock);
+ spin_unlock_bh(&est_tree_lock);
return 0;
}
@@ -270,7 +270,7 @@
{
struct gen_estimator *e;
- spin_lock(&est_tree_lock);
+ spin_lock_bh(&est_tree_lock);
while ((e = gen_find_node(bstats, rate_est))) {
rb_erase(&e->node, &est_root);
@@ -281,7 +281,7 @@
list_del_rcu(&e->list);
call_rcu(&e->e_rcu, __gen_kill_estimator);
}
- spin_unlock(&est_tree_lock);
+ spin_unlock_bh(&est_tree_lock);
}
EXPORT_SYMBOL(gen_kill_estimator);
@@ -320,9 +320,9 @@
ASSERT_RTNL();
- spin_lock(&est_tree_lock);
+ spin_lock_bh(&est_tree_lock);
res = gen_find_node(bstats, rate_est) != NULL;
- spin_unlock(&est_tree_lock);
+ spin_unlock_bh(&est_tree_lock);
return res;
}
diff --git a/net/core/iovec.c b/net/core/iovec.c
index 1cd98df..e6b133b 100644
--- a/net/core/iovec.c
+++ b/net/core/iovec.c
@@ -35,9 +35,10 @@
* in any case.
*/
-int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode)
+long verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode)
{
- int size, err, ct;
+ int size, ct;
+ long err;
if (m->msg_namelen) {
if (mode == VERIFY_READ) {
diff --git a/net/core/net-traces.c b/net/core/net-traces.c
index afa6380..7f1bb2a 100644
--- a/net/core/net-traces.c
+++ b/net/core/net-traces.c
@@ -26,6 +26,7 @@
#define CREATE_TRACE_POINTS
#include <trace/events/skb.h>
+#include <trace/events/net.h>
#include <trace/events/napi.h>
EXPORT_TRACEPOINT_SYMBOL_GPL(kfree_skb);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 3a2513f..56ba3c4 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -466,6 +466,7 @@
smp_rmb();
else if (likely(!atomic_dec_and_test(&skb->users)))
return;
+ trace_consume_skb(skb);
__kfree_skb(skb);
}
EXPORT_SYMBOL(consume_skb);
@@ -2573,6 +2574,10 @@
__copy_skb_header(nskb, skb);
nskb->mac_len = skb->mac_len;
+ /* nskb and skb might have different headroom */
+ if (nskb->ip_summed == CHECKSUM_PARTIAL)
+ nskb->csum_start += skb_headroom(nskb) - headroom;
+
skb_reset_mac_header(nskb);
skb_set_network_header(nskb, skb->mac_len);
nskb->transport_header = (nskb->network_header +
@@ -2703,7 +2708,7 @@
return -E2BIG;
headroom = skb_headroom(p);
- nskb = netdev_alloc_skb(p->dev, headroom + skb_gro_offset(p));
+ nskb = alloc_skb(headroom + skb_gro_offset(p), GFP_ATOMIC);
if (unlikely(!nskb))
return -ENOMEM;
diff --git a/net/core/sock.c b/net/core/sock.c
index b05b9b6..7d99e13 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1078,8 +1078,11 @@
#ifdef CONFIG_CGROUPS
void sock_update_classid(struct sock *sk)
{
- u32 classid = task_cls_classid(current);
+ u32 classid;
+ rcu_read_lock(); /* doing current task, which cannot vanish. */
+ classid = task_cls_classid(current);
+ rcu_read_unlock();
if (classid && classid != sk->sk_classid)
sk->sk_classid = classid;
}
@@ -1351,9 +1354,9 @@
{
int uid;
- read_lock(&sk->sk_callback_lock);
+ read_lock_bh(&sk->sk_callback_lock);
uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : 0;
- read_unlock(&sk->sk_callback_lock);
+ read_unlock_bh(&sk->sk_callback_lock);
return uid;
}
EXPORT_SYMBOL(sock_i_uid);
@@ -1362,9 +1365,9 @@
{
unsigned long ino;
- read_lock(&sk->sk_callback_lock);
+ read_lock_bh(&sk->sk_callback_lock);
ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;
- read_unlock(&sk->sk_callback_lock);
+ read_unlock_bh(&sk->sk_callback_lock);
return ino;
}
EXPORT_SYMBOL(sock_i_ino);
diff --git a/net/core/stream.c b/net/core/stream.c
index d959e0f..f5df85d 100644
--- a/net/core/stream.c
+++ b/net/core/stream.c
@@ -141,10 +141,10 @@
set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
sk->sk_write_pending++;
- sk_wait_event(sk, ¤t_timeo, !sk->sk_err &&
- !(sk->sk_shutdown & SEND_SHUTDOWN) &&
- sk_stream_memory_free(sk) &&
- vm_wait);
+ sk_wait_event(sk, ¤t_timeo, sk->sk_err ||
+ (sk->sk_shutdown & SEND_SHUTDOWN) ||
+ (sk_stream_memory_free(sk) &&
+ !vm_wait));
sk->sk_write_pending--;
if (vm_wait) {
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index 7c3a7d1..7cd7760 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -46,7 +46,7 @@
rp_filter on use:
echo 1 > /proc/sys/net/ipv4/conf/<device>/rp_filter
- and
+ or
echo 1 > /proc/sys/net/ipv4/conf/all/rp_filter
Note that some distributions enable it in startup scripts.
@@ -217,6 +217,7 @@
config NET_IPGRE
tristate "IP: GRE tunnels over IP"
+ depends on IPV6 || IPV6=n
help
Tunneling means encapsulating data of one protocol type within
another protocol and sending it over a channel that understands the
@@ -412,7 +413,7 @@
If unsure, say Y.
config INET_LRO
- bool "Large Receive Offload (ipv4/tcp)"
+ tristate "Large Receive Offload (ipv4/tcp)"
default y
---help---
Support for Large Receive Offload (ipv4/tcp).
diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c
index f055094..721a8a3 100644
--- a/net/ipv4/datagram.c
+++ b/net/ipv4/datagram.c
@@ -62,8 +62,11 @@
}
if (!inet->inet_saddr)
inet->inet_saddr = rt->rt_src; /* Update source address */
- if (!inet->inet_rcv_saddr)
+ if (!inet->inet_rcv_saddr) {
inet->inet_rcv_saddr = rt->rt_src;
+ if (sk->sk_prot->rehash)
+ sk->sk_prot->rehash(sk);
+ }
inet->inet_daddr = rt->rt_dst;
inet->inet_dport = usin->sin_port;
sk->sk_state = TCP_ESTABLISHED;
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index a439689..7d02a9f 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -246,6 +246,7 @@
struct fib_result res;
int no_addr, rpf, accept_local;
+ bool dev_match;
int ret;
struct net *net;
@@ -273,12 +274,22 @@
}
*spec_dst = FIB_RES_PREFSRC(res);
fib_combine_itag(itag, &res);
+ dev_match = false;
+
#ifdef CONFIG_IP_ROUTE_MULTIPATH
- if (FIB_RES_DEV(res) == dev || res.fi->fib_nhs > 1)
+ for (ret = 0; ret < res.fi->fib_nhs; ret++) {
+ struct fib_nh *nh = &res.fi->fib_nh[ret];
+
+ if (nh->nh_dev == dev) {
+ dev_match = true;
+ break;
+ }
+ }
#else
if (FIB_RES_DEV(res) == dev)
+ dev_match = true;
#endif
- {
+ if (dev_match) {
ret = FIB_RES_NH(res).nh_scope >= RT_SCOPE_HOST;
fib_res_put(&res);
return ret;
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 79d057a..4a8e370 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -186,7 +186,9 @@
{
struct tnode *ret = node_parent(node);
- return rcu_dereference(ret);
+ return rcu_dereference_check(ret,
+ rcu_read_lock_held() ||
+ lockdep_rtnl_is_held());
}
/* Same as rcu_assign_pointer
@@ -1753,7 +1755,9 @@
static struct leaf *trie_firstleaf(struct trie *t)
{
- struct tnode *n = (struct tnode *) rcu_dereference(t->trie);
+ struct tnode *n = (struct tnode *) rcu_dereference_check(t->trie,
+ rcu_read_lock_held() ||
+ lockdep_rtnl_is_held());
if (!n)
return NULL;
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index a1ad0e7..2a4bb76 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -856,6 +856,18 @@
igmpv3_clear_delrec(in_dev);
} else if (len < 12) {
return; /* ignore bogus packet; freed by caller */
+ } else if (IGMP_V1_SEEN(in_dev)) {
+ /* This is a v3 query with v1 queriers present */
+ max_delay = IGMP_Query_Response_Interval;
+ group = 0;
+ } else if (IGMP_V2_SEEN(in_dev)) {
+ /* this is a v3 query with v2 queriers present;
+ * Interpretation of the max_delay code is problematic here.
+ * A real v2 host would use ih_code directly, while v3 has a
+ * different encoding. We use the v3 encoding as more likely
+ * to be intended in a v3 query.
+ */
+ max_delay = IGMPV3_MRC(ih3->code)*(HZ/IGMP_TIMER_SCALE);
} else { /* v3 */
if (!pskb_may_pull(skb, sizeof(struct igmpv3_query)))
return;
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 945b20a..35c93e8 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -45,7 +45,7 @@
#include <net/netns/generic.h>
#include <net/rtnetlink.h>
-#ifdef CONFIG_IPV6
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
#include <net/ipv6.h>
#include <net/ip6_fib.h>
#include <net/ip6_route.h>
@@ -699,7 +699,7 @@
if ((dst = rt->rt_gateway) == 0)
goto tx_error_icmp;
}
-#ifdef CONFIG_IPV6
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
else if (skb->protocol == htons(ETH_P_IPV6)) {
struct in6_addr *addr6;
int addr_type;
@@ -774,7 +774,7 @@
goto tx_error;
}
}
-#ifdef CONFIG_IPV6
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
else if (skb->protocol == htons(ETH_P_IPV6)) {
struct rt6_info *rt6 = (struct rt6_info *)skb_dst(skb);
@@ -850,7 +850,7 @@
if ((iph->ttl = tiph->ttl) == 0) {
if (skb->protocol == htons(ETH_P_IP))
iph->ttl = old_iph->ttl;
-#ifdef CONFIG_IPV6
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
else if (skb->protocol == htons(ETH_P_IPV6))
iph->ttl = ((struct ipv6hdr *)old_iph)->hop_limit;
#endif
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 04b6989..7649d77 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -488,9 +488,8 @@
* we can switch to copy when see the first bad fragment.
*/
if (skb_has_frags(skb)) {
- struct sk_buff *frag;
+ struct sk_buff *frag, *frag2;
int first_len = skb_pagelen(skb);
- int truesizes = 0;
if (first_len - hlen > mtu ||
((first_len - hlen) & 7) ||
@@ -503,18 +502,18 @@
if (frag->len > mtu ||
((frag->len & 7) && frag->next) ||
skb_headroom(frag) < hlen)
- goto slow_path;
+ goto slow_path_clean;
/* Partially cloned skb? */
if (skb_shared(frag))
- goto slow_path;
+ goto slow_path_clean;
BUG_ON(frag->sk);
if (skb->sk) {
frag->sk = skb->sk;
frag->destructor = sock_wfree;
}
- truesizes += frag->truesize;
+ skb->truesize -= frag->truesize;
}
/* Everything is OK. Generate! */
@@ -524,7 +523,6 @@
frag = skb_shinfo(skb)->frag_list;
skb_frag_list_init(skb);
skb->data_len = first_len - skb_headlen(skb);
- skb->truesize -= truesizes;
skb->len = first_len;
iph->tot_len = htons(first_len);
iph->frag_off = htons(IP_MF);
@@ -576,6 +574,15 @@
}
IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
return err;
+
+slow_path_clean:
+ skb_walk_frags(skb, frag2) {
+ if (frag2 == frag)
+ break;
+ frag2->sk = NULL;
+ frag2->destructor = NULL;
+ skb->truesize += frag2->truesize;
+ }
}
slow_path:
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 6c40a8c..64b70ad 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -1129,6 +1129,9 @@
case IP_HDRINCL:
val = inet->hdrincl;
break;
+ case IP_NODEFRAG:
+ val = inet->nodefrag;
+ break;
case IP_MTU_DISCOVER:
val = inet->pmtudisc;
break;
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index 51d6c31..e8f4f9a 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -1420,6 +1420,9 @@
if (ret != 0)
break;
++i;
+ if (strcmp(arpt_get_target(iter1)->u.user.name,
+ XT_ERROR_TARGET) == 0)
+ ++newinfo->stacksize;
}
if (ret) {
/*
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 97b64b2..d163f2e 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -1751,6 +1751,9 @@
if (ret != 0)
break;
++i;
+ if (strcmp(ipt_get_target(iter1)->u.user.name,
+ XT_ERROR_TARGET) == 0)
+ ++newinfo->stacksize;
}
if (ret) {
/*
diff --git a/net/ipv4/netfilter/ipt_REJECT.c b/net/ipv4/netfilter/ipt_REJECT.c
index b254daf..43eec80 100644
--- a/net/ipv4/netfilter/ipt_REJECT.c
+++ b/net/ipv4/netfilter/ipt_REJECT.c
@@ -112,6 +112,7 @@
/* ip_route_me_harder expects skb->dst to be set */
skb_dst_set_noref(nskb, skb_dst(oldskb));
+ nskb->protocol = htons(ETH_P_IP);
if (ip_route_me_harder(nskb, addr_type))
goto free_nskb;
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
index 244f7cb..37f8adb 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
@@ -11,6 +11,7 @@
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/percpu.h>
+#include <linux/security.h>
#include <net/net_namespace.h>
#include <linux/netfilter.h>
@@ -87,6 +88,29 @@
rcu_read_unlock();
}
+#ifdef CONFIG_NF_CONNTRACK_SECMARK
+static int ct_show_secctx(struct seq_file *s, const struct nf_conn *ct)
+{
+ int ret;
+ u32 len;
+ char *secctx;
+
+ ret = security_secid_to_secctx(ct->secmark, &secctx, &len);
+ if (ret)
+ return ret;
+
+ ret = seq_printf(s, "secctx=%s ", secctx);
+
+ security_release_secctx(secctx, len);
+ return ret;
+}
+#else
+static inline int ct_show_secctx(struct seq_file *s, const struct nf_conn *ct)
+{
+ return 0;
+}
+#endif
+
static int ct_seq_show(struct seq_file *s, void *v)
{
struct nf_conntrack_tuple_hash *hash = v;
@@ -148,10 +172,8 @@
goto release;
#endif
-#ifdef CONFIG_NF_CONNTRACK_SECMARK
- if (seq_printf(s, "secmark=%u ", ct->secmark))
+ if (ct_show_secctx(s, ct))
goto release;
-#endif
if (seq_printf(s, "use=%u\n", atomic_read(&ct->ct_general.use)))
goto release;
diff --git a/net/ipv4/netfilter/nf_defrag_ipv4.c b/net/ipv4/netfilter/nf_defrag_ipv4.c
index eab8de3..f3a9b42 100644
--- a/net/ipv4/netfilter/nf_defrag_ipv4.c
+++ b/net/ipv4/netfilter/nf_defrag_ipv4.c
@@ -66,9 +66,11 @@
const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
+ struct sock *sk = skb->sk;
struct inet_sock *inet = inet_sk(skb->sk);
- if (inet && inet->nodefrag)
+ if (sk && (sk->sk_family == PF_INET) &&
+ inet->nodefrag)
return NF_ACCEPT;
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c
index 8c8632d..957c924 100644
--- a/net/ipv4/netfilter/nf_nat_core.c
+++ b/net/ipv4/netfilter/nf_nat_core.c
@@ -38,7 +38,7 @@
static struct nf_conntrack_l3proto *l3proto __read_mostly;
#define MAX_IP_NAT_PROTO 256
-static const struct nf_nat_protocol *nf_nat_protos[MAX_IP_NAT_PROTO]
+static const struct nf_nat_protocol __rcu *nf_nat_protos[MAX_IP_NAT_PROTO]
__read_mostly;
static inline const struct nf_nat_protocol *
diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
index 1679e2c..ee5f419 100644
--- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
+++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
@@ -893,13 +893,15 @@
unsigned char s[4];
if (offset & 1) {
- s[0] = s[2] = 0;
+ s[0] = ~0;
s[1] = ~*optr;
+ s[2] = 0;
s[3] = *nptr;
} else {
- s[1] = s[3] = 0;
s[0] = ~*optr;
+ s[1] = ~0;
s[2] = *nptr;
+ s[3] = 0;
}
*csum = csum_fold(csum_partial(s, 4, ~csum_unfold(*csum)));
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 3f56b6e..ac6559c 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1231,7 +1231,7 @@
}
if (net_ratelimit())
- printk(KERN_WARNING "Neighbour table overflow.\n");
+ printk(KERN_WARNING "ipv4: Neighbour table overflow.\n");
rt_drop(rt);
return -ENOBUFS;
}
@@ -2738,6 +2738,11 @@
}
EXPORT_SYMBOL_GPL(__ip_route_output_key);
+static struct dst_entry *ipv4_blackhole_dst_check(struct dst_entry *dst, u32 cookie)
+{
+ return NULL;
+}
+
static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu)
{
}
@@ -2746,7 +2751,7 @@
.family = AF_INET,
.protocol = cpu_to_be16(ETH_P_IP),
.destroy = ipv4_dst_destroy,
- .check = ipv4_dst_check,
+ .check = ipv4_blackhole_dst_check,
.update_pmtu = ipv4_rt_blackhole_update_pmtu,
.entries = ATOMIC_INIT(0),
};
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 176e11a..f115ea6 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -386,8 +386,6 @@
*/
mask = 0;
- if (sk->sk_err)
- mask = POLLERR;
/*
* POLLHUP is certainly not done right. But poll() doesn't
@@ -451,11 +449,17 @@
if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
mask |= POLLOUT | POLLWRNORM;
}
- }
+ } else
+ mask |= POLLOUT | POLLWRNORM;
if (tp->urg_data & TCP_URG_VALID)
mask |= POLLPRI;
}
+ /* This barrier is coupled with smp_wmb() in tcp_reset() */
+ smp_rmb();
+ if (sk->sk_err)
+ mask |= POLLERR;
+
return mask;
}
EXPORT_SYMBOL(tcp_poll);
@@ -939,7 +943,7 @@
sg = sk->sk_route_caps & NETIF_F_SG;
while (--iovlen >= 0) {
- int seglen = iov->iov_len;
+ size_t seglen = iov->iov_len;
unsigned char __user *from = iov->iov_base;
iov++;
@@ -2011,11 +2015,8 @@
}
}
if (sk->sk_state != TCP_CLOSE) {
- int orphan_count = percpu_counter_read_positive(
- sk->sk_prot->orphan_count);
-
sk_mem_reclaim(sk);
- if (tcp_too_many_orphans(sk, orphan_count)) {
+ if (tcp_too_many_orphans(sk, 0)) {
if (net_ratelimit())
printk(KERN_INFO "TCP: too many of orphaned "
"sockets\n");
@@ -3212,7 +3213,7 @@
{
struct sk_buff *skb = NULL;
unsigned long nr_pages, limit;
- int order, i, max_share;
+ int i, max_share, cnt;
unsigned long jiffy = jiffies;
BUILD_BUG_ON(sizeof(struct tcp_skb_cb) > sizeof(skb->cb));
@@ -3261,22 +3262,12 @@
INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain);
}
- /* Try to be a bit smarter and adjust defaults depending
- * on available memory.
- */
- for (order = 0; ((1 << order) << PAGE_SHIFT) <
- (tcp_hashinfo.bhash_size * sizeof(struct inet_bind_hashbucket));
- order++)
- ;
- if (order >= 4) {
- tcp_death_row.sysctl_max_tw_buckets = 180000;
- sysctl_tcp_max_orphans = 4096 << (order - 4);
- sysctl_max_syn_backlog = 1024;
- } else if (order < 3) {
- tcp_death_row.sysctl_max_tw_buckets >>= (3 - order);
- sysctl_tcp_max_orphans >>= (3 - order);
- sysctl_max_syn_backlog = 128;
- }
+
+ cnt = tcp_hashinfo.ehash_mask + 1;
+
+ tcp_death_row.sysctl_max_tw_buckets = cnt / 2;
+ sysctl_tcp_max_orphans = cnt / 2;
+ sysctl_max_syn_backlog = max(128, cnt / 256);
/* Set the pressure threshold to be a fraction of global memory that
* is up to 1/2 at 256 MB, decreasing toward zero with the amount of
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index 0ec9bd0..850c737 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -196,10 +196,10 @@
int tcp_set_allowed_congestion_control(char *val)
{
struct tcp_congestion_ops *ca;
- char *clone, *name;
+ char *saved_clone, *clone, *name;
int ret = 0;
- clone = kstrdup(val, GFP_USER);
+ saved_clone = clone = kstrdup(val, GFP_USER);
if (!clone)
return -ENOMEM;
@@ -226,6 +226,7 @@
}
out:
spin_unlock(&tcp_cong_list_lock);
+ kfree(saved_clone);
return ret;
}
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index e663b78..b55f60f 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2545,7 +2545,8 @@
cnt += tcp_skb_pcount(skb);
if (cnt > packets) {
- if (tcp_is_sack(tp) || (oldcnt >= packets))
+ if ((tcp_is_sack(tp) && !tcp_is_fack(tp)) ||
+ (oldcnt >= packets))
break;
mss = skb_shinfo(skb)->gso_size;
@@ -4048,6 +4049,8 @@
default:
sk->sk_err = ECONNRESET;
}
+ /* This barrier is coupled with smp_rmb() in tcp_poll() */
+ smp_wmb();
if (!sock_flag(sk, SOCK_DEAD))
sk->sk_error_report(sk);
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 808bb92..74c54b3 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -66,18 +66,18 @@
static int tcp_out_of_resources(struct sock *sk, int do_reset)
{
struct tcp_sock *tp = tcp_sk(sk);
- int orphans = percpu_counter_read_positive(&tcp_orphan_count);
+ int shift = 0;
/* If peer does not open window for long time, or did not transmit
* anything for long time, penalize it. */
if ((s32)(tcp_time_stamp - tp->lsndtime) > 2*TCP_RTO_MAX || !do_reset)
- orphans <<= 1;
+ shift++;
/* If some dubious ICMP arrived, penalize even more. */
if (sk->sk_err_soft)
- orphans <<= 1;
+ shift++;
- if (tcp_too_many_orphans(sk, orphans)) {
+ if (tcp_too_many_orphans(sk, shift)) {
if (net_ratelimit())
printk(KERN_INFO "Out of socket memory\n");
@@ -135,13 +135,16 @@
/* This function calculates a "timeout" which is equivalent to the timeout of a
* TCP connection after "boundary" unsuccessful, exponentially backed-off
- * retransmissions with an initial RTO of TCP_RTO_MIN.
+ * retransmissions with an initial RTO of TCP_RTO_MIN or TCP_TIMEOUT_INIT if
+ * syn_set flag is set.
*/
static bool retransmits_timed_out(struct sock *sk,
- unsigned int boundary)
+ unsigned int boundary,
+ bool syn_set)
{
unsigned int timeout, linear_backoff_thresh;
unsigned int start_ts;
+ unsigned int rto_base = syn_set ? TCP_TIMEOUT_INIT : TCP_RTO_MIN;
if (!inet_csk(sk)->icsk_retransmits)
return false;
@@ -151,12 +154,12 @@
else
start_ts = tcp_sk(sk)->retrans_stamp;
- linear_backoff_thresh = ilog2(TCP_RTO_MAX/TCP_RTO_MIN);
+ linear_backoff_thresh = ilog2(TCP_RTO_MAX/rto_base);
if (boundary <= linear_backoff_thresh)
- timeout = ((2 << boundary) - 1) * TCP_RTO_MIN;
+ timeout = ((2 << boundary) - 1) * rto_base;
else
- timeout = ((2 << linear_backoff_thresh) - 1) * TCP_RTO_MIN +
+ timeout = ((2 << linear_backoff_thresh) - 1) * rto_base +
(boundary - linear_backoff_thresh) * TCP_RTO_MAX;
return (tcp_time_stamp - start_ts) >= timeout;
@@ -167,14 +170,15 @@
{
struct inet_connection_sock *icsk = inet_csk(sk);
int retry_until;
- bool do_reset;
+ bool do_reset, syn_set = 0;
if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
if (icsk->icsk_retransmits)
dst_negative_advice(sk);
retry_until = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries;
+ syn_set = 1;
} else {
- if (retransmits_timed_out(sk, sysctl_tcp_retries1)) {
+ if (retransmits_timed_out(sk, sysctl_tcp_retries1, 0)) {
/* Black hole detection */
tcp_mtu_probing(icsk, sk);
@@ -187,14 +191,14 @@
retry_until = tcp_orphan_retries(sk, alive);
do_reset = alive ||
- !retransmits_timed_out(sk, retry_until);
+ !retransmits_timed_out(sk, retry_until, 0);
if (tcp_out_of_resources(sk, do_reset))
return 1;
}
}
- if (retransmits_timed_out(sk, retry_until)) {
+ if (retransmits_timed_out(sk, retry_until, syn_set)) {
/* Has it gone just too far? */
tcp_write_err(sk);
return 1;
@@ -436,7 +440,7 @@
icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX);
}
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, TCP_RTO_MAX);
- if (retransmits_timed_out(sk, sysctl_tcp_retries1 + 1))
+ if (retransmits_timed_out(sk, sysctl_tcp_retries1 + 1, 0))
__sk_dst_reset(sk);
out:;
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 32e0bef..fb23c2e 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1260,6 +1260,49 @@
}
EXPORT_SYMBOL(udp_lib_unhash);
+/*
+ * inet_rcv_saddr was changed, we must rehash secondary hash
+ */
+void udp_lib_rehash(struct sock *sk, u16 newhash)
+{
+ if (sk_hashed(sk)) {
+ struct udp_table *udptable = sk->sk_prot->h.udp_table;
+ struct udp_hslot *hslot, *hslot2, *nhslot2;
+
+ hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash);
+ nhslot2 = udp_hashslot2(udptable, newhash);
+ udp_sk(sk)->udp_portaddr_hash = newhash;
+ if (hslot2 != nhslot2) {
+ hslot = udp_hashslot(udptable, sock_net(sk),
+ udp_sk(sk)->udp_port_hash);
+ /* we must lock primary chain too */
+ spin_lock_bh(&hslot->lock);
+
+ spin_lock(&hslot2->lock);
+ hlist_nulls_del_init_rcu(&udp_sk(sk)->udp_portaddr_node);
+ hslot2->count--;
+ spin_unlock(&hslot2->lock);
+
+ spin_lock(&nhslot2->lock);
+ hlist_nulls_add_head_rcu(&udp_sk(sk)->udp_portaddr_node,
+ &nhslot2->head);
+ nhslot2->count++;
+ spin_unlock(&nhslot2->lock);
+
+ spin_unlock_bh(&hslot->lock);
+ }
+ }
+}
+EXPORT_SYMBOL(udp_lib_rehash);
+
+static void udp_v4_rehash(struct sock *sk)
+{
+ u16 new_hash = udp4_portaddr_hash(sock_net(sk),
+ inet_sk(sk)->inet_rcv_saddr,
+ inet_sk(sk)->inet_num);
+ udp_lib_rehash(sk, new_hash);
+}
+
static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
{
int rc;
@@ -1843,6 +1886,7 @@
.backlog_rcv = __udp_queue_rcv_skb,
.hash = udp_lib_hash,
.unhash = udp_lib_unhash,
+ .rehash = udp_v4_rehash,
.get_port = udp_v4_get_port,
.memory_allocated = &udp_memory_allocated,
.sysctl_mem = sysctl_udp_mem,
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index 869078d..a580349 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -61,7 +61,7 @@
static int xfrm4_get_tos(struct flowi *fl)
{
- return fl->fl4_tos;
+ return IPTOS_RT_MASK & fl->fl4_tos; /* Strip ECN bits */
}
static int xfrm4_init_path(struct xfrm_dst *path, struct dst_entry *dst,
diff --git a/net/ipv4/xfrm4_state.c b/net/ipv4/xfrm4_state.c
index 1ef1366..4794762 100644
--- a/net/ipv4/xfrm4_state.c
+++ b/net/ipv4/xfrm4_state.c
@@ -21,21 +21,25 @@
}
static void
-__xfrm4_init_tempsel(struct xfrm_state *x, struct flowi *fl,
- struct xfrm_tmpl *tmpl,
- xfrm_address_t *daddr, xfrm_address_t *saddr)
+__xfrm4_init_tempsel(struct xfrm_selector *sel, struct flowi *fl)
{
- x->sel.daddr.a4 = fl->fl4_dst;
- x->sel.saddr.a4 = fl->fl4_src;
- x->sel.dport = xfrm_flowi_dport(fl);
- x->sel.dport_mask = htons(0xffff);
- x->sel.sport = xfrm_flowi_sport(fl);
- x->sel.sport_mask = htons(0xffff);
- x->sel.family = AF_INET;
- x->sel.prefixlen_d = 32;
- x->sel.prefixlen_s = 32;
- x->sel.proto = fl->proto;
- x->sel.ifindex = fl->oif;
+ sel->daddr.a4 = fl->fl4_dst;
+ sel->saddr.a4 = fl->fl4_src;
+ sel->dport = xfrm_flowi_dport(fl);
+ sel->dport_mask = htons(0xffff);
+ sel->sport = xfrm_flowi_sport(fl);
+ sel->sport_mask = htons(0xffff);
+ sel->family = AF_INET;
+ sel->prefixlen_d = 32;
+ sel->prefixlen_s = 32;
+ sel->proto = fl->proto;
+ sel->ifindex = fl->oif;
+}
+
+static void
+xfrm4_init_temprop(struct xfrm_state *x, struct xfrm_tmpl *tmpl,
+ xfrm_address_t *daddr, xfrm_address_t *saddr)
+{
x->id = tmpl->id;
if (x->id.daddr.a4 == 0)
x->id.daddr.a4 = daddr->a4;
@@ -70,6 +74,7 @@
.owner = THIS_MODULE,
.init_flags = xfrm4_init_flags,
.init_tempsel = __xfrm4_init_tempsel,
+ .init_temprop = xfrm4_init_temprop,
.output = xfrm4_output,
.extract_input = xfrm4_extract_input,
.extract_output = xfrm4_extract_output,
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index ab70a3f..324fac3 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -4637,10 +4637,12 @@
if (err < 0) {
printk(KERN_CRIT "IPv6 Addrconf:"
" cannot initialize default policy table: %d.\n", err);
- return err;
+ goto out;
}
- register_pernet_subsys(&addrconf_ops);
+ err = register_pernet_subsys(&addrconf_ops);
+ if (err < 0)
+ goto out_addrlabel;
/* The addrconf netdev notifier requires that loopback_dev
* has it's ipv6 private information allocated and setup
@@ -4692,7 +4694,9 @@
unregister_netdevice_notifier(&ipv6_dev_notf);
errlo:
unregister_pernet_subsys(&addrconf_ops);
-
+out_addrlabel:
+ ipv6_addr_label_cleanup();
+out:
return err;
}
@@ -4703,6 +4707,7 @@
unregister_netdevice_notifier(&ipv6_dev_notf);
unregister_pernet_subsys(&addrconf_ops);
+ ipv6_addr_label_cleanup();
rtnl_lock();
diff --git a/net/ipv6/addrlabel.c b/net/ipv6/addrlabel.c
index f0e774c..8175f80 100644
--- a/net/ipv6/addrlabel.c
+++ b/net/ipv6/addrlabel.c
@@ -393,6 +393,11 @@
return register_pernet_subsys(&ipv6_addr_label_ops);
}
+void ipv6_addr_label_cleanup(void)
+{
+ unregister_pernet_subsys(&ipv6_addr_label_ops);
+}
+
static const struct nla_policy ifal_policy[IFAL_MAX+1] = {
[IFAL_ADDRESS] = { .len = sizeof(struct in6_addr), },
[IFAL_LABEL] = { .len = sizeof(u32), },
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 7d929a2..ef371aa 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -105,9 +105,12 @@
if (ipv6_addr_any(&np->saddr))
ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr);
- if (ipv6_addr_any(&np->rcv_saddr))
+ if (ipv6_addr_any(&np->rcv_saddr)) {
ipv6_addr_set_v4mapped(inet->inet_rcv_saddr,
&np->rcv_saddr);
+ if (sk->sk_prot->rehash)
+ sk->sk_prot->rehash(sk);
+ }
goto out;
}
@@ -181,6 +184,8 @@
if (ipv6_addr_any(&np->rcv_saddr)) {
ipv6_addr_copy(&np->rcv_saddr, &fl.fl6_src);
inet->inet_rcv_saddr = LOOPBACK4_IPV6;
+ if (sk->sk_prot->rehash)
+ sk->sk_prot->rehash(sk);
}
ip6_dst_store(sk, dst,
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index d40b330..980912e 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -639,7 +639,7 @@
if (skb_has_frags(skb)) {
int first_len = skb_pagelen(skb);
- int truesizes = 0;
+ struct sk_buff *frag2;
if (first_len - hlen > mtu ||
((first_len - hlen) & 7) ||
@@ -651,18 +651,18 @@
if (frag->len > mtu ||
((frag->len & 7) && frag->next) ||
skb_headroom(frag) < hlen)
- goto slow_path;
+ goto slow_path_clean;
/* Partially cloned skb? */
if (skb_shared(frag))
- goto slow_path;
+ goto slow_path_clean;
BUG_ON(frag->sk);
if (skb->sk) {
frag->sk = skb->sk;
frag->destructor = sock_wfree;
- truesizes += frag->truesize;
}
+ skb->truesize -= frag->truesize;
}
err = 0;
@@ -693,7 +693,6 @@
first_len = skb_pagelen(skb);
skb->data_len = first_len - skb_headlen(skb);
- skb->truesize -= truesizes;
skb->len = first_len;
ipv6_hdr(skb)->payload_len = htons(first_len -
sizeof(struct ipv6hdr));
@@ -756,6 +755,15 @@
IPSTATS_MIB_FRAGFAILS);
dst_release(&rt->dst);
return err;
+
+slow_path_clean:
+ skb_walk_frags(skb, frag2) {
+ if (frag2 == frag)
+ break;
+ frag2->sk = NULL;
+ frag2->destructor = NULL;
+ skb->truesize += frag2->truesize;
+ }
}
slow_path:
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 29a7bca..8e754be 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -1766,6 +1766,9 @@
if (ret != 0)
break;
++i;
+ if (strcmp(ip6t_get_target(iter1)->u.user.name,
+ XT_ERROR_TARGET) == 0)
+ ++newinfo->stacksize;
}
if (ret) {
/*
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 13ef5bc..578f3c1 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -113,14 +113,6 @@
kfree_skb(NFCT_FRAG6_CB(skb)->orig);
}
-/* Memory Tracking Functions. */
-static void frag_kfree_skb(struct sk_buff *skb)
-{
- atomic_sub(skb->truesize, &nf_init_frags.mem);
- nf_skb_free(skb);
- kfree_skb(skb);
-}
-
/* Destruction primitives. */
static __inline__ void fq_put(struct nf_ct_frag6_queue *fq)
@@ -282,66 +274,22 @@
}
found:
- /* We found where to put this one. Check for overlap with
- * preceding fragment, and, if needed, align things so that
- * any overlaps are eliminated.
+ /* RFC5722, Section 4:
+ * When reassembling an IPv6 datagram, if
+ * one or more its constituent fragments is determined to be an
+ * overlapping fragment, the entire datagram (and any constituent
+ * fragments, including those not yet received) MUST be silently
+ * discarded.
*/
- if (prev) {
- int i = (NFCT_FRAG6_CB(prev)->offset + prev->len) - offset;
- if (i > 0) {
- offset += i;
- if (end <= offset) {
- pr_debug("overlap\n");
- goto err;
- }
- if (!pskb_pull(skb, i)) {
- pr_debug("Can't pull\n");
- goto err;
- }
- if (skb->ip_summed != CHECKSUM_UNNECESSARY)
- skb->ip_summed = CHECKSUM_NONE;
- }
- }
+ /* Check for overlap with preceding fragment. */
+ if (prev &&
+ (NFCT_FRAG6_CB(prev)->offset + prev->len) - offset > 0)
+ goto discard_fq;
- /* Look for overlap with succeeding segments.
- * If we can merge fragments, do it.
- */
- while (next && NFCT_FRAG6_CB(next)->offset < end) {
- /* overlap is 'i' bytes */
- int i = end - NFCT_FRAG6_CB(next)->offset;
-
- if (i < next->len) {
- /* Eat head of the next overlapped fragment
- * and leave the loop. The next ones cannot overlap.
- */
- pr_debug("Eat head of the overlapped parts.: %d", i);
- if (!pskb_pull(next, i))
- goto err;
-
- /* next fragment */
- NFCT_FRAG6_CB(next)->offset += i;
- fq->q.meat -= i;
- if (next->ip_summed != CHECKSUM_UNNECESSARY)
- next->ip_summed = CHECKSUM_NONE;
- break;
- } else {
- struct sk_buff *free_it = next;
-
- /* Old fragmnet is completely overridden with
- * new one drop it.
- */
- next = next->next;
-
- if (prev)
- prev->next = next;
- else
- fq->q.fragments = next;
-
- fq->q.meat -= free_it->len;
- frag_kfree_skb(free_it);
- }
- }
+ /* Look for overlap with succeeding segment. */
+ if (next && NFCT_FRAG6_CB(next)->offset < end)
+ goto discard_fq;
NFCT_FRAG6_CB(skb)->offset = offset;
@@ -371,6 +319,8 @@
write_unlock(&nf_frags.lock);
return 0;
+discard_fq:
+ fq_kill(fq);
err:
return -1;
}
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 545c414..64cfef1 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -149,13 +149,6 @@
}
EXPORT_SYMBOL(ip6_frag_match);
-/* Memory Tracking Functions. */
-static void frag_kfree_skb(struct netns_frags *nf, struct sk_buff *skb)
-{
- atomic_sub(skb->truesize, &nf->mem);
- kfree_skb(skb);
-}
-
void ip6_frag_init(struct inet_frag_queue *q, void *a)
{
struct frag_queue *fq = container_of(q, struct frag_queue, q);
@@ -346,58 +339,22 @@
}
found:
- /* We found where to put this one. Check for overlap with
- * preceding fragment, and, if needed, align things so that
- * any overlaps are eliminated.
+ /* RFC5722, Section 4:
+ * When reassembling an IPv6 datagram, if
+ * one or more its constituent fragments is determined to be an
+ * overlapping fragment, the entire datagram (and any constituent
+ * fragments, including those not yet received) MUST be silently
+ * discarded.
*/
- if (prev) {
- int i = (FRAG6_CB(prev)->offset + prev->len) - offset;
- if (i > 0) {
- offset += i;
- if (end <= offset)
- goto err;
- if (!pskb_pull(skb, i))
- goto err;
- if (skb->ip_summed != CHECKSUM_UNNECESSARY)
- skb->ip_summed = CHECKSUM_NONE;
- }
- }
+ /* Check for overlap with preceding fragment. */
+ if (prev &&
+ (FRAG6_CB(prev)->offset + prev->len) - offset > 0)
+ goto discard_fq;
- /* Look for overlap with succeeding segments.
- * If we can merge fragments, do it.
- */
- while (next && FRAG6_CB(next)->offset < end) {
- int i = end - FRAG6_CB(next)->offset; /* overlap is 'i' bytes */
-
- if (i < next->len) {
- /* Eat head of the next overlapped fragment
- * and leave the loop. The next ones cannot overlap.
- */
- if (!pskb_pull(next, i))
- goto err;
- FRAG6_CB(next)->offset += i; /* next fragment */
- fq->q.meat -= i;
- if (next->ip_summed != CHECKSUM_UNNECESSARY)
- next->ip_summed = CHECKSUM_NONE;
- break;
- } else {
- struct sk_buff *free_it = next;
-
- /* Old fragment is completely overridden with
- * new one drop it.
- */
- next = next->next;
-
- if (prev)
- prev->next = next;
- else
- fq->q.fragments = next;
-
- fq->q.meat -= free_it->len;
- frag_kfree_skb(fq->q.net, free_it);
- }
- }
+ /* Look for overlap with succeeding segment. */
+ if (next && FRAG6_CB(next)->offset < end)
+ goto discard_fq;
FRAG6_CB(skb)->offset = offset;
@@ -436,6 +393,8 @@
write_unlock(&ip6_frags.lock);
return -1;
+discard_fq:
+ fq_kill(fq);
err:
IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
IPSTATS_MIB_REASMFAILS);
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index d126365..a275c6e 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -670,7 +670,7 @@
if (net_ratelimit())
printk(KERN_WARNING
- "Neighbour table overflow.\n");
+ "ipv6: Neighbour table overflow.\n");
dst_free(&rt->dst);
return NULL;
}
@@ -1556,14 +1556,13 @@
* i.e. Path MTU discovery
*/
-void rt6_pmtu_discovery(struct in6_addr *daddr, struct in6_addr *saddr,
- struct net_device *dev, u32 pmtu)
+static void rt6_do_pmtu_disc(struct in6_addr *daddr, struct in6_addr *saddr,
+ struct net *net, u32 pmtu, int ifindex)
{
struct rt6_info *rt, *nrt;
- struct net *net = dev_net(dev);
int allfrag = 0;
- rt = rt6_lookup(net, daddr, saddr, dev->ifindex, 0);
+ rt = rt6_lookup(net, daddr, saddr, ifindex, 0);
if (rt == NULL)
return;
@@ -1631,6 +1630,27 @@
dst_release(&rt->dst);
}
+void rt6_pmtu_discovery(struct in6_addr *daddr, struct in6_addr *saddr,
+ struct net_device *dev, u32 pmtu)
+{
+ struct net *net = dev_net(dev);
+
+ /*
+ * RFC 1981 states that a node "MUST reduce the size of the packets it
+ * is sending along the path" that caused the Packet Too Big message.
+ * Since it's not possible in the general case to determine which
+ * interface was used to send the original packet, we update the MTU
+ * on the interface that will be used to send future packets. We also
+ * update the MTU on the interface that received the Packet Too Big in
+ * case the original packet was forced out that interface with
+ * SO_BINDTODEVICE or similar. This is the next best thing to the
+ * correct behaviour, which would be to update the MTU on all
+ * interfaces.
+ */
+ rt6_do_pmtu_disc(daddr, saddr, net, pmtu, 0);
+ rt6_do_pmtu_disc(daddr, saddr, net, pmtu, dev->ifindex);
+}
+
/*
* Misc support functions
*/
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 1dd1aff..5acb356 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -111,6 +111,15 @@
return udp_lib_get_port(sk, snum, ipv6_rcv_saddr_equal, hash2_nulladdr);
}
+static void udp_v6_rehash(struct sock *sk)
+{
+ u16 new_hash = udp6_portaddr_hash(sock_net(sk),
+ &inet6_sk(sk)->rcv_saddr,
+ inet_sk(sk)->inet_num);
+
+ udp_lib_rehash(sk, new_hash);
+}
+
static inline int compute_score(struct sock *sk, struct net *net,
unsigned short hnum,
struct in6_addr *saddr, __be16 sport,
@@ -1447,6 +1456,7 @@
.backlog_rcv = udpv6_queue_rcv_skb,
.hash = udp_lib_hash,
.unhash = udp_lib_unhash,
+ .rehash = udp_v6_rehash,
.get_port = udp_v6_get_port,
.memory_allocated = &udp_memory_allocated,
.sysctl_mem = sysctl_udp_mem,
diff --git a/net/ipv6/xfrm6_state.c b/net/ipv6/xfrm6_state.c
index f417b77..a67575d 100644
--- a/net/ipv6/xfrm6_state.c
+++ b/net/ipv6/xfrm6_state.c
@@ -20,23 +20,27 @@
#include <net/addrconf.h>
static void
-__xfrm6_init_tempsel(struct xfrm_state *x, struct flowi *fl,
- struct xfrm_tmpl *tmpl,
- xfrm_address_t *daddr, xfrm_address_t *saddr)
+__xfrm6_init_tempsel(struct xfrm_selector *sel, struct flowi *fl)
{
/* Initialize temporary selector matching only
* to current session. */
- ipv6_addr_copy((struct in6_addr *)&x->sel.daddr, &fl->fl6_dst);
- ipv6_addr_copy((struct in6_addr *)&x->sel.saddr, &fl->fl6_src);
- x->sel.dport = xfrm_flowi_dport(fl);
- x->sel.dport_mask = htons(0xffff);
- x->sel.sport = xfrm_flowi_sport(fl);
- x->sel.sport_mask = htons(0xffff);
- x->sel.family = AF_INET6;
- x->sel.prefixlen_d = 128;
- x->sel.prefixlen_s = 128;
- x->sel.proto = fl->proto;
- x->sel.ifindex = fl->oif;
+ ipv6_addr_copy((struct in6_addr *)&sel->daddr, &fl->fl6_dst);
+ ipv6_addr_copy((struct in6_addr *)&sel->saddr, &fl->fl6_src);
+ sel->dport = xfrm_flowi_dport(fl);
+ sel->dport_mask = htons(0xffff);
+ sel->sport = xfrm_flowi_sport(fl);
+ sel->sport_mask = htons(0xffff);
+ sel->family = AF_INET6;
+ sel->prefixlen_d = 128;
+ sel->prefixlen_s = 128;
+ sel->proto = fl->proto;
+ sel->ifindex = fl->oif;
+}
+
+static void
+xfrm6_init_temprop(struct xfrm_state *x, struct xfrm_tmpl *tmpl,
+ xfrm_address_t *daddr, xfrm_address_t *saddr)
+{
x->id = tmpl->id;
if (ipv6_addr_any((struct in6_addr*)&x->id.daddr))
memcpy(&x->id.daddr, daddr, sizeof(x->sel.daddr));
@@ -168,6 +172,7 @@
.eth_proto = htons(ETH_P_IPV6),
.owner = THIS_MODULE,
.init_tempsel = __xfrm6_init_tempsel,
+ .init_temprop = xfrm6_init_temprop,
.tmpl_sort = __xfrm6_tmpl_sort,
.state_sort = __xfrm6_state_sort,
.output = xfrm6_output,
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
index 79986a6..fd55b51 100644
--- a/net/irda/af_irda.c
+++ b/net/irda/af_irda.c
@@ -824,8 +824,8 @@
err = irda_open_tsap(self, addr->sir_lsap_sel, addr->sir_name);
if (err < 0) {
- kfree(self->ias_obj->name);
- kfree(self->ias_obj);
+ irias_delete_object(self->ias_obj);
+ self->ias_obj = NULL;
goto out;
}
diff --git a/net/irda/irlan/irlan_common.c b/net/irda/irlan/irlan_common.c
index a788f9e..6130f9d 100644
--- a/net/irda/irlan/irlan_common.c
+++ b/net/irda/irlan/irlan_common.c
@@ -1102,7 +1102,7 @@
memcpy(&val_len, buf+n, 2); /* To avoid alignment problems */
le16_to_cpus(&val_len); n+=2;
- if (val_len > 1016) {
+ if (val_len >= 1016) {
IRDA_DEBUG(2, "%s(), parameter length to long\n", __func__ );
return -RSP_INVALID_COMMAND_FORMAT;
}
diff --git a/net/irda/irlan/irlan_eth.c b/net/irda/irlan/irlan_eth.c
index 9616c32..5bb8353 100644
--- a/net/irda/irlan/irlan_eth.c
+++ b/net/irda/irlan/irlan_eth.c
@@ -169,6 +169,7 @@
{
struct irlan_cb *self = netdev_priv(dev);
int ret;
+ unsigned int len;
/* skb headroom large enough to contain all IrDA-headers? */
if ((skb_headroom(skb) < self->max_header_size) || (skb_shared(skb))) {
@@ -188,6 +189,7 @@
dev->trans_start = jiffies;
+ len = skb->len;
/* Now queue the packet in the transport layer */
if (self->use_udata)
ret = irttp_udata_request(self->tsap_data, skb);
@@ -209,7 +211,7 @@
self->stats.tx_dropped++;
} else {
self->stats.tx_packets++;
- self->stats.tx_bytes += skb->len;
+ self->stats.tx_bytes += len;
}
return NETDEV_TX_OK;
diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
index 58c6c4c..1ae6976 100644
--- a/net/l2tp/l2tp_eth.c
+++ b/net/l2tp/l2tp_eth.c
@@ -132,7 +132,7 @@
printk("\n");
}
- if (data_len < ETH_HLEN)
+ if (!pskb_may_pull(skb, sizeof(ETH_HLEN)))
goto error;
secpath_reset(skb);
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index 023ba82..5826129 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -1024,7 +1024,8 @@
{
struct sock *sk = sock->sk;
struct llc_sock *llc = llc_sk(sk);
- int rc = -EINVAL, opt;
+ unsigned int opt;
+ int rc = -EINVAL;
lock_sock(sk);
if (unlikely(level != SOL_LLC || optlen != sizeof(int)))
diff --git a/net/llc/llc_station.c b/net/llc/llc_station.c
index e4dae02..cf4aea3 100644
--- a/net/llc/llc_station.c
+++ b/net/llc/llc_station.c
@@ -689,7 +689,7 @@
int __init llc_station_init(void)
{
- u16 rc = -ENOBUFS;
+ int rc = -ENOBUFS;
struct sk_buff *skb;
struct llc_station_state_ev *ev;
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index c893f23..8f23401 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -175,6 +175,8 @@
set_bit(HT_AGG_STATE_STOPPING, &tid_tx->state);
+ del_timer_sync(&tid_tx->addba_resp_timer);
+
/*
* After this packets are no longer handed right through
* to the driver but are put onto tid_tx->pending instead,
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 798a91b..ded5c38 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -732,6 +732,12 @@
rtnl_unlock();
+ /*
+ * Now all work items will be gone, but the
+ * timer might still be armed, so delete it
+ */
+ del_timer_sync(&local->work_timer);
+
cancel_work_sync(&local->reconfig_filter);
ieee80211_clear_tx_pending(local);
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index fa0f37e..28624282 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -2199,9 +2199,6 @@
struct net_device *prev_dev = NULL;
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
- if (status->flag & RX_FLAG_INTERNAL_CMTR)
- goto out_free_skb;
-
if (skb_headroom(skb) < sizeof(*rthdr) &&
pskb_expand_head(skb, sizeof(*rthdr), 0, GFP_ATOMIC))
goto out_free_skb;
@@ -2260,7 +2257,6 @@
} else
goto out_free_skb;
- status->flag |= RX_FLAG_INTERNAL_CMTR;
return;
out_free_skb:
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 10caec5..34da679 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -377,7 +377,7 @@
skb2 = skb_clone(skb, GFP_ATOMIC);
if (skb2) {
skb2->dev = prev_dev;
- netif_receive_skb(skb2);
+ netif_rx(skb2);
}
}
@@ -386,7 +386,7 @@
}
if (prev_dev) {
skb->dev = prev_dev;
- netif_receive_skb(skb);
+ netif_rx(skb);
skb = NULL;
}
rcu_read_unlock();
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index 78b505d..fdaec7d 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -27,7 +27,7 @@
static DEFINE_MUTEX(afinfo_mutex);
-const struct nf_afinfo *nf_afinfo[NFPROTO_NUMPROTO] __read_mostly;
+const struct nf_afinfo __rcu *nf_afinfo[NFPROTO_NUMPROTO] __read_mostly;
EXPORT_SYMBOL(nf_afinfo);
int nf_register_afinfo(const struct nf_afinfo *afinfo)
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 4f8ddba4..4c2f89d 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -924,6 +924,7 @@
ip_vs_out_stats(cp, skb);
ip_vs_set_state(cp, IP_VS_DIR_OUTPUT, skb, pp);
+ ip_vs_update_conntrack(skb, cp, 0);
ip_vs_conn_put(cp);
skb->ipvs_property = 1;
diff --git a/net/netfilter/ipvs/ip_vs_ftp.c b/net/netfilter/ipvs/ip_vs_ftp.c
index f228a17..7e9af5b 100644
--- a/net/netfilter/ipvs/ip_vs_ftp.c
+++ b/net/netfilter/ipvs/ip_vs_ftp.c
@@ -45,6 +45,7 @@
#include <linux/netfilter.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_expect.h>
+#include <net/netfilter/nf_nat.h>
#include <net/netfilter/nf_nat_helper.h>
#include <linux/gfp.h>
#include <net/protocol.h>
@@ -359,7 +360,7 @@
buf_len = strlen(buf);
ct = nf_ct_get(skb, &ctinfo);
- if (ct && !nf_ct_is_untracked(ct)) {
+ if (ct && !nf_ct_is_untracked(ct) && nfct_nat(ct)) {
/* If mangling fails this function will return 0
* which will cause the packet to be dropped.
* Mangling can only fail under memory pressure,
@@ -409,7 +410,6 @@
union nf_inet_addr to;
__be16 port;
struct ip_vs_conn *n_cp;
- struct nf_conn *ct;
#ifdef CONFIG_IP_VS_IPV6
/* This application helper doesn't work with IPv6 yet,
@@ -496,11 +496,6 @@
ip_vs_control_add(n_cp, cp);
}
- ct = (struct nf_conn *)skb->nfct;
- if (ct && ct != &nf_conntrack_untracked)
- ip_vs_expect_related(skb, ct, n_cp,
- IPPROTO_TCP, &n_cp->dport, 1);
-
/*
* Move tunnel to listen state
*/
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
index 21e1a5e..49df6be 100644
--- a/net/netfilter/ipvs/ip_vs_xmit.c
+++ b/net/netfilter/ipvs/ip_vs_xmit.c
@@ -349,8 +349,8 @@
}
#endif
-static void
-ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp)
+void
+ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp, int outin)
{
struct nf_conn *ct = (struct nf_conn *)skb->nfct;
struct nf_conntrack_tuple new_tuple;
@@ -365,11 +365,17 @@
* real-server we will see RIP->DIP.
*/
new_tuple = ct->tuplehash[IP_CT_DIR_REPLY].tuple;
- new_tuple.src.u3 = cp->daddr;
+ if (outin)
+ new_tuple.src.u3 = cp->daddr;
+ else
+ new_tuple.dst.u3 = cp->vaddr;
/*
* This will also take care of UDP and other protocols.
*/
- new_tuple.src.u.tcp.port = cp->dport;
+ if (outin)
+ new_tuple.src.u.tcp.port = cp->dport;
+ else
+ new_tuple.dst.u.tcp.port = cp->vport;
nf_conntrack_alter_reply(ct, &new_tuple);
}
@@ -428,7 +434,7 @@
IP_VS_DBG_PKT(10, pp, skb, 0, "After DNAT");
- ip_vs_update_conntrack(skb, cp);
+ ip_vs_update_conntrack(skb, cp, 1);
/* FIXME: when application helper enlarges the packet and the length
is larger than the MTU of outgoing device, there will be still
@@ -506,7 +512,7 @@
IP_VS_DBG_PKT(10, pp, skb, 0, "After DNAT");
- ip_vs_update_conntrack(skb, cp);
+ ip_vs_update_conntrack(skb, cp, 1);
/* FIXME: when application helper enlarges the packet and the length
is larger than the MTU of outgoing device, there will be still
diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
index cdcc764..5702de3 100644
--- a/net/netfilter/nf_conntrack_ecache.c
+++ b/net/netfilter/nf_conntrack_ecache.c
@@ -26,10 +26,10 @@
static DEFINE_MUTEX(nf_ct_ecache_mutex);
-struct nf_ct_event_notifier *nf_conntrack_event_cb __read_mostly;
+struct nf_ct_event_notifier __rcu *nf_conntrack_event_cb __read_mostly;
EXPORT_SYMBOL_GPL(nf_conntrack_event_cb);
-struct nf_exp_event_notifier *nf_expect_event_cb __read_mostly;
+struct nf_exp_event_notifier __rcu *nf_expect_event_cb __read_mostly;
EXPORT_SYMBOL_GPL(nf_expect_event_cb);
/* deliver cached events and clear cache entry - must be called with locally
diff --git a/net/netfilter/nf_conntrack_extend.c b/net/netfilter/nf_conntrack_extend.c
index 7dcf7a4..bd82450 100644
--- a/net/netfilter/nf_conntrack_extend.c
+++ b/net/netfilter/nf_conntrack_extend.c
@@ -16,7 +16,7 @@
#include <linux/skbuff.h>
#include <net/netfilter/nf_conntrack_extend.h>
-static struct nf_ct_ext_type *nf_ct_ext_types[NF_CT_EXT_NUM];
+static struct nf_ct_ext_type __rcu *nf_ct_ext_types[NF_CT_EXT_NUM];
static DEFINE_MUTEX(nf_ct_ext_type_mutex);
void __nf_ct_ext_destroy(struct nf_conn *ct)
@@ -48,15 +48,17 @@
{
unsigned int off, len;
struct nf_ct_ext_type *t;
+ size_t alloc_size;
rcu_read_lock();
t = rcu_dereference(nf_ct_ext_types[id]);
BUG_ON(t == NULL);
off = ALIGN(sizeof(struct nf_ct_ext), t->align);
len = off + t->len;
+ alloc_size = t->alloc_size;
rcu_read_unlock();
- *ext = kzalloc(t->alloc_size, gfp);
+ *ext = kzalloc(alloc_size, gfp);
if (!*ext)
return NULL;
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 5bae1cd..146476c 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -22,6 +22,7 @@
#include <linux/rculist_nulls.h>
#include <linux/types.h>
#include <linux/timer.h>
+#include <linux/security.h>
#include <linux/skbuff.h>
#include <linux/errno.h>
#include <linux/netlink.h>
@@ -245,16 +246,31 @@
#ifdef CONFIG_NF_CONNTRACK_SECMARK
static inline int
-ctnetlink_dump_secmark(struct sk_buff *skb, const struct nf_conn *ct)
+ctnetlink_dump_secctx(struct sk_buff *skb, const struct nf_conn *ct)
{
- NLA_PUT_BE32(skb, CTA_SECMARK, htonl(ct->secmark));
- return 0;
+ struct nlattr *nest_secctx;
+ int len, ret;
+ char *secctx;
+ ret = security_secid_to_secctx(ct->secmark, &secctx, &len);
+ if (ret)
+ return ret;
+
+ ret = -1;
+ nest_secctx = nla_nest_start(skb, CTA_SECCTX | NLA_F_NESTED);
+ if (!nest_secctx)
+ goto nla_put_failure;
+
+ NLA_PUT_STRING(skb, CTA_SECCTX_NAME, secctx);
+ nla_nest_end(skb, nest_secctx);
+
+ ret = 0;
nla_put_failure:
- return -1;
+ security_release_secctx(secctx, len);
+ return ret;
}
#else
-#define ctnetlink_dump_secmark(a, b) (0)
+#define ctnetlink_dump_secctx(a, b) (0)
#endif
#define master_tuple(ct) &(ct->master->tuplehash[IP_CT_DIR_ORIGINAL].tuple)
@@ -391,7 +407,7 @@
ctnetlink_dump_protoinfo(skb, ct) < 0 ||
ctnetlink_dump_helpinfo(skb, ct) < 0 ||
ctnetlink_dump_mark(skb, ct) < 0 ||
- ctnetlink_dump_secmark(skb, ct) < 0 ||
+ ctnetlink_dump_secctx(skb, ct) < 0 ||
ctnetlink_dump_id(skb, ct) < 0 ||
ctnetlink_dump_use(skb, ct) < 0 ||
ctnetlink_dump_master(skb, ct) < 0 ||
@@ -437,6 +453,17 @@
;
}
+#ifdef CONFIG_NF_CONNTRACK_SECMARK
+static int ctnetlink_nlmsg_secctx_size(const struct nf_conn *ct)
+{
+ int len;
+
+ security_secid_to_secctx(ct->secmark, NULL, &len);
+
+ return sizeof(char) * len;
+}
+#endif
+
static inline size_t
ctnetlink_nlmsg_size(const struct nf_conn *ct)
{
@@ -453,7 +480,8 @@
+ nla_total_size(0) /* CTA_HELP */
+ nla_total_size(NF_CT_HELPER_NAME_LEN) /* CTA_HELP_NAME */
#ifdef CONFIG_NF_CONNTRACK_SECMARK
- + nla_total_size(sizeof(u_int32_t)) /* CTA_SECMARK */
+ + nla_total_size(0) /* CTA_SECCTX */
+ + nla_total_size(ctnetlink_nlmsg_secctx_size(ct)) /* CTA_SECCTX_NAME */
#endif
#ifdef CONFIG_NF_NAT_NEEDED
+ 2 * nla_total_size(0) /* CTA_NAT_SEQ_ADJ_ORIG|REPL */
@@ -556,7 +584,7 @@
#ifdef CONFIG_NF_CONNTRACK_SECMARK
if ((events & (1 << IPCT_SECMARK) || ct->secmark)
- && ctnetlink_dump_secmark(skb, ct) < 0)
+ && ctnetlink_dump_secctx(skb, ct) < 0)
goto nla_put_failure;
#endif
diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
index 5886ba1..ed6d929 100644
--- a/net/netfilter/nf_conntrack_proto.c
+++ b/net/netfilter/nf_conntrack_proto.c
@@ -28,8 +28,8 @@
#include <net/netfilter/nf_conntrack_l4proto.h>
#include <net/netfilter/nf_conntrack_core.h>
-static struct nf_conntrack_l4proto **nf_ct_protos[PF_MAX] __read_mostly;
-struct nf_conntrack_l3proto *nf_ct_l3protos[AF_MAX] __read_mostly;
+static struct nf_conntrack_l4proto __rcu **nf_ct_protos[PF_MAX] __read_mostly;
+struct nf_conntrack_l3proto __rcu *nf_ct_l3protos[AF_MAX] __read_mostly;
EXPORT_SYMBOL_GPL(nf_ct_l3protos);
static DEFINE_MUTEX(nf_ct_proto_mutex);
diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c
index 53d8922..f64de95 100644
--- a/net/netfilter/nf_conntrack_sip.c
+++ b/net/netfilter/nf_conntrack_sip.c
@@ -1376,7 +1376,7 @@
unsigned int msglen, origlen;
const char *dptr, *end;
s16 diff, tdiff = 0;
- int ret;
+ int ret = NF_ACCEPT;
typeof(nf_nat_sip_seq_adjust_hook) nf_nat_sip_seq_adjust;
if (ctinfo != IP_CT_ESTABLISHED &&
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index eb973fc..0fb6570 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -15,6 +15,7 @@
#include <linux/seq_file.h>
#include <linux/percpu.h>
#include <linux/netdevice.h>
+#include <linux/security.h>
#include <net/net_namespace.h>
#ifdef CONFIG_SYSCTL
#include <linux/sysctl.h>
@@ -108,6 +109,29 @@
rcu_read_unlock();
}
+#ifdef CONFIG_NF_CONNTRACK_SECMARK
+static int ct_show_secctx(struct seq_file *s, const struct nf_conn *ct)
+{
+ int ret;
+ u32 len;
+ char *secctx;
+
+ ret = security_secid_to_secctx(ct->secmark, &secctx, &len);
+ if (ret)
+ return ret;
+
+ ret = seq_printf(s, "secctx=%s ", secctx);
+
+ security_release_secctx(secctx, len);
+ return ret;
+}
+#else
+static inline int ct_show_secctx(struct seq_file *s, const struct nf_conn *ct)
+{
+ return 0;
+}
+#endif
+
/* return 0 on success, 1 in case of error */
static int ct_seq_show(struct seq_file *s, void *v)
{
@@ -168,10 +192,8 @@
goto release;
#endif
-#ifdef CONFIG_NF_CONNTRACK_SECMARK
- if (seq_printf(s, "secmark=%u ", ct->secmark))
+ if (ct_show_secctx(s, ct))
goto release;
-#endif
#ifdef CONFIG_NF_CONNTRACK_ZONES
if (seq_printf(s, "zone=%u ", nf_ct_zone(ct)))
diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
index 7df37fd..b07393e 100644
--- a/net/netfilter/nf_log.c
+++ b/net/netfilter/nf_log.c
@@ -16,7 +16,7 @@
#define NF_LOG_PREFIXLEN 128
#define NFLOGGER_NAME_LEN 64
-static const struct nf_logger *nf_loggers[NFPROTO_NUMPROTO] __read_mostly;
+static const struct nf_logger __rcu *nf_loggers[NFPROTO_NUMPROTO] __read_mostly;
static struct list_head nf_loggers_l[NFPROTO_NUMPROTO] __read_mostly;
static DEFINE_MUTEX(nf_log_mutex);
diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c
index 78b3cf9c..74aebed 100644
--- a/net/netfilter/nf_queue.c
+++ b/net/netfilter/nf_queue.c
@@ -18,7 +18,7 @@
* long term mutex. The handler must provide an an outfn() to accept packets
* for queueing and must reinject all packets it receives, no matter what.
*/
-static const struct nf_queue_handler *queue_handler[NFPROTO_NUMPROTO] __read_mostly;
+static const struct nf_queue_handler __rcu *queue_handler[NFPROTO_NUMPROTO] __read_mostly;
static DEFINE_MUTEX(queue_handler_mutex);
diff --git a/net/netfilter/nf_tproxy_core.c b/net/netfilter/nf_tproxy_core.c
index 5490fc3..daab8c4 100644
--- a/net/netfilter/nf_tproxy_core.c
+++ b/net/netfilter/nf_tproxy_core.c
@@ -70,7 +70,11 @@
int
nf_tproxy_assign_sock(struct sk_buff *skb, struct sock *sk)
{
- if (inet_sk(sk)->transparent) {
+ bool transparent = (sk->sk_state == TCP_TIME_WAIT) ?
+ inet_twsk(sk)->tw_transparent :
+ inet_sk(sk)->transparent;
+
+ if (transparent) {
skb_orphan(skb);
skb->sk = sk;
skb->destructor = nf_tproxy_destructor;
diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c
index 0cb6053..782e519 100644
--- a/net/netfilter/xt_CT.c
+++ b/net/netfilter/xt_CT.c
@@ -9,7 +9,6 @@
#include <linux/module.h>
#include <linux/gfp.h>
#include <linux/skbuff.h>
-#include <linux/selinux.h>
#include <linux/netfilter_ipv4/ip_tables.h>
#include <linux/netfilter_ipv6/ip6_tables.h>
#include <linux/netfilter/x_tables.h>
diff --git a/net/netfilter/xt_SECMARK.c b/net/netfilter/xt_SECMARK.c
index 23b2d6c..9faf5e0 100644
--- a/net/netfilter/xt_SECMARK.c
+++ b/net/netfilter/xt_SECMARK.c
@@ -14,8 +14,8 @@
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
+#include <linux/security.h>
#include <linux/skbuff.h>
-#include <linux/selinux.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter/xt_SECMARK.h>
@@ -39,9 +39,8 @@
switch (mode) {
case SECMARK_MODE_SEL:
- secmark = info->u.sel.selsid;
+ secmark = info->secid;
break;
-
default:
BUG();
}
@@ -50,33 +49,33 @@
return XT_CONTINUE;
}
-static int checkentry_selinux(struct xt_secmark_target_info *info)
+static int checkentry_lsm(struct xt_secmark_target_info *info)
{
int err;
- struct xt_secmark_target_selinux_info *sel = &info->u.sel;
- sel->selctx[SECMARK_SELCTX_MAX - 1] = '\0';
+ info->secctx[SECMARK_SECCTX_MAX - 1] = '\0';
+ info->secid = 0;
- err = selinux_string_to_sid(sel->selctx, &sel->selsid);
+ err = security_secctx_to_secid(info->secctx, strlen(info->secctx),
+ &info->secid);
if (err) {
if (err == -EINVAL)
- pr_info("invalid SELinux context \'%s\'\n",
- sel->selctx);
+ pr_info("invalid security context \'%s\'\n", info->secctx);
return err;
}
- if (!sel->selsid) {
- pr_info("unable to map SELinux context \'%s\'\n", sel->selctx);
+ if (!info->secid) {
+ pr_info("unable to map security context \'%s\'\n", info->secctx);
return -ENOENT;
}
- err = selinux_secmark_relabel_packet_permission(sel->selsid);
+ err = security_secmark_relabel_packet(info->secid);
if (err) {
pr_info("unable to obtain relabeling permission\n");
return err;
}
- selinux_secmark_refcount_inc();
+ security_secmark_refcount_inc();
return 0;
}
@@ -100,16 +99,16 @@
switch (info->mode) {
case SECMARK_MODE_SEL:
- err = checkentry_selinux(info);
- if (err <= 0)
- return err;
break;
-
default:
pr_info("invalid mode: %hu\n", info->mode);
return -EINVAL;
}
+ err = checkentry_lsm(info);
+ if (err)
+ return err;
+
if (!mode)
mode = info->mode;
return 0;
@@ -119,7 +118,7 @@
{
switch (mode) {
case SECMARK_MODE_SEL:
- selinux_secmark_refcount_dec();
+ security_secmark_refcount_dec();
}
}
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 8648a99..cd96ed3 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1406,7 +1406,7 @@
struct netlink_sock *nlk = nlk_sk(sk);
int noblock = flags&MSG_DONTWAIT;
size_t copied;
- struct sk_buff *skb, *frag __maybe_unused = NULL;
+ struct sk_buff *skb, *data_skb;
int err;
if (flags&MSG_OOB)
@@ -1418,45 +1418,35 @@
if (skb == NULL)
goto out;
+ data_skb = skb;
+
#ifdef CONFIG_COMPAT_NETLINK_MESSAGES
if (unlikely(skb_shinfo(skb)->frag_list)) {
- bool need_compat = !!(flags & MSG_CMSG_COMPAT);
-
/*
- * If this skb has a frag_list, then here that means that
- * we will have to use the frag_list skb for compat tasks
- * and the regular skb for non-compat tasks.
+ * If this skb has a frag_list, then here that means that we
+ * will have to use the frag_list skb's data for compat tasks
+ * and the regular skb's data for normal (non-compat) tasks.
*
- * The skb might (and likely will) be cloned, so we can't
- * just reset frag_list and go on with things -- we need to
- * keep that. For the compat case that's easy -- simply get
- * a reference to the compat skb and free the regular one
- * including the frag. For the non-compat case, we need to
- * avoid sending the frag to the user -- so assign NULL but
- * restore it below before freeing the skb.
+ * If we need to send the compat skb, assign it to the
+ * 'data_skb' variable so that it will be used below for data
+ * copying. We keep 'skb' for everything else, including
+ * freeing both later.
*/
- if (need_compat) {
- struct sk_buff *compskb = skb_shinfo(skb)->frag_list;
- skb_get(compskb);
- kfree_skb(skb);
- skb = compskb;
- } else {
- frag = skb_shinfo(skb)->frag_list;
- skb_shinfo(skb)->frag_list = NULL;
- }
+ if (flags & MSG_CMSG_COMPAT)
+ data_skb = skb_shinfo(skb)->frag_list;
}
#endif
msg->msg_namelen = 0;
- copied = skb->len;
+ copied = data_skb->len;
if (len < copied) {
msg->msg_flags |= MSG_TRUNC;
copied = len;
}
- skb_reset_transport_header(skb);
- err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
+ skb_reset_transport_header(data_skb);
+ err = skb_copy_datagram_iovec(data_skb, 0, msg->msg_iov, copied);
if (msg->msg_name) {
struct sockaddr_nl *addr = (struct sockaddr_nl *)msg->msg_name;
@@ -1476,11 +1466,7 @@
}
siocb->scm->creds = *NETLINK_CREDS(skb);
if (flags & MSG_TRUNC)
- copied = skb->len;
-
-#ifdef CONFIG_COMPAT_NETLINK_MESSAGES
- skb_shinfo(skb)->frag_list = frag;
-#endif
+ copied = data_skb->len;
skb_free_datagram(sk, skb);
@@ -2116,6 +2102,26 @@
#endif
}
+static void __init netlink_add_usersock_entry(void)
+{
+ unsigned long *listeners;
+ int groups = 32;
+
+ listeners = kzalloc(NLGRPSZ(groups) + sizeof(struct listeners_rcu_head),
+ GFP_KERNEL);
+ if (!listeners)
+ panic("netlink_add_usersock_entry: Cannot allocate listneres\n");
+
+ netlink_table_grab();
+
+ nl_table[NETLINK_USERSOCK].groups = groups;
+ nl_table[NETLINK_USERSOCK].listeners = listeners;
+ nl_table[NETLINK_USERSOCK].module = THIS_MODULE;
+ nl_table[NETLINK_USERSOCK].registered = 1;
+
+ netlink_table_ungrab();
+}
+
static struct pernet_operations __net_initdata netlink_net_ops = {
.init = netlink_net_init,
.exit = netlink_net_exit,
@@ -2164,6 +2170,8 @@
hash->rehash_time = jiffies;
}
+ netlink_add_usersock_entry();
+
sock_register(&netlink_family_ops);
register_pernet_subsys(&netlink_net_ops);
/* The netlink device handler may be needed early. */
diff --git a/net/phonet/pep.c b/net/phonet/pep.c
index b2a3ae6..1500302 100644
--- a/net/phonet/pep.c
+++ b/net/phonet/pep.c
@@ -225,12 +225,13 @@
static int pipe_rcv_status(struct sock *sk, struct sk_buff *skb)
{
struct pep_sock *pn = pep_sk(sk);
- struct pnpipehdr *hdr = pnp_hdr(skb);
+ struct pnpipehdr *hdr;
int wake = 0;
if (!pskb_may_pull(skb, sizeof(*hdr) + 4))
return -EINVAL;
+ hdr = pnp_hdr(skb);
if (hdr->data[0] != PN_PEP_TYPE_COMMON) {
LIMIT_NETDEBUG(KERN_DEBUG"Phonet unknown PEP type: %u\n",
(unsigned)hdr->data[0]);
diff --git a/net/rds/page.c b/net/rds/page.c
index 595a952..1dfbfea 100644
--- a/net/rds/page.c
+++ b/net/rds/page.c
@@ -57,30 +57,17 @@
unsigned long ret;
void *addr;
- if (to_user)
+ addr = kmap(page);
+ if (to_user) {
rds_stats_add(s_copy_to_user, bytes);
- else
+ ret = copy_to_user(ptr, addr + offset, bytes);
+ } else {
rds_stats_add(s_copy_from_user, bytes);
-
- addr = kmap_atomic(page, KM_USER0);
- if (to_user)
- ret = __copy_to_user_inatomic(ptr, addr + offset, bytes);
- else
- ret = __copy_from_user_inatomic(addr + offset, ptr, bytes);
- kunmap_atomic(addr, KM_USER0);
-
- if (ret) {
- addr = kmap(page);
- if (to_user)
- ret = copy_to_user(ptr, addr + offset, bytes);
- else
- ret = copy_from_user(addr + offset, ptr, bytes);
- kunmap(page);
- if (ret)
- return -EFAULT;
+ ret = copy_from_user(addr + offset, ptr, bytes);
}
+ kunmap(page);
- return 0;
+ return ret ? -EFAULT : 0;
}
EXPORT_SYMBOL_GPL(rds_page_copy_user);
diff --git a/net/rds/recv.c b/net/rds/recv.c
index 795a00b..c93588c 100644
--- a/net/rds/recv.c
+++ b/net/rds/recv.c
@@ -297,7 +297,7 @@
int rds_notify_queue_get(struct rds_sock *rs, struct msghdr *msghdr)
{
struct rds_notifier *notifier;
- struct rds_rdma_notify cmsg;
+ struct rds_rdma_notify cmsg = { 0 }; /* fill holes with zero */
unsigned int count = 0, max_messages = ~0U;
unsigned long flags;
LIST_HEAD(copy);
diff --git a/net/rds/tcp_connect.c b/net/rds/tcp_connect.c
index c397524..c519939 100644
--- a/net/rds/tcp_connect.c
+++ b/net/rds/tcp_connect.c
@@ -43,7 +43,7 @@
struct rds_connection *conn;
struct rds_tcp_connection *tc;
- read_lock(&sk->sk_callback_lock);
+ read_lock_bh(&sk->sk_callback_lock);
conn = sk->sk_user_data;
if (conn == NULL) {
state_change = sk->sk_state_change;
@@ -68,7 +68,7 @@
break;
}
out:
- read_unlock(&sk->sk_callback_lock);
+ read_unlock_bh(&sk->sk_callback_lock);
state_change(sk);
}
diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c
index 975183f..27844f2 100644
--- a/net/rds/tcp_listen.c
+++ b/net/rds/tcp_listen.c
@@ -114,7 +114,7 @@
rdsdebug("listen data ready sk %p\n", sk);
- read_lock(&sk->sk_callback_lock);
+ read_lock_bh(&sk->sk_callback_lock);
ready = sk->sk_user_data;
if (ready == NULL) { /* check for teardown race */
ready = sk->sk_data_ready;
@@ -131,7 +131,7 @@
queue_work(rds_wq, &rds_tcp_listen_work);
out:
- read_unlock(&sk->sk_callback_lock);
+ read_unlock_bh(&sk->sk_callback_lock);
ready(sk, bytes);
}
diff --git a/net/rds/tcp_recv.c b/net/rds/tcp_recv.c
index 1aba687..e437974 100644
--- a/net/rds/tcp_recv.c
+++ b/net/rds/tcp_recv.c
@@ -324,7 +324,7 @@
rdsdebug("data ready sk %p bytes %d\n", sk, bytes);
- read_lock(&sk->sk_callback_lock);
+ read_lock_bh(&sk->sk_callback_lock);
conn = sk->sk_user_data;
if (conn == NULL) { /* check for teardown race */
ready = sk->sk_data_ready;
@@ -338,7 +338,7 @@
if (rds_tcp_read_sock(conn, GFP_ATOMIC, KM_SOFTIRQ0) == -ENOMEM)
queue_delayed_work(rds_wq, &conn->c_recv_w, 0);
out:
- read_unlock(&sk->sk_callback_lock);
+ read_unlock_bh(&sk->sk_callback_lock);
ready(sk, bytes);
}
diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c
index a28b895..2f012a0 100644
--- a/net/rds/tcp_send.c
+++ b/net/rds/tcp_send.c
@@ -224,7 +224,7 @@
struct rds_connection *conn;
struct rds_tcp_connection *tc;
- read_lock(&sk->sk_callback_lock);
+ read_lock_bh(&sk->sk_callback_lock);
conn = sk->sk_user_data;
if (conn == NULL) {
write_space = sk->sk_write_space;
@@ -244,7 +244,7 @@
queue_delayed_work(rds_wq, &conn->c_send_w, 0);
out:
- read_unlock(&sk->sk_callback_lock);
+ read_unlock_bh(&sk->sk_callback_lock);
/*
* write_space is only called when data leaves tcp's send queue if
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index 8e45e76..d952e7e 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -679,7 +679,7 @@
if (addr_len == sizeof(struct sockaddr_rose) && addr->srose_ndigis > 1)
return -EINVAL;
- if (addr->srose_ndigis > ROSE_MAX_DIGIS)
+ if ((unsigned int) addr->srose_ndigis > ROSE_MAX_DIGIS)
return -EINVAL;
if ((dev = rose_dev_get(&addr->srose_addr)) == NULL) {
@@ -739,7 +739,7 @@
if (addr_len == sizeof(struct sockaddr_rose) && addr->srose_ndigis > 1)
return -EINVAL;
- if (addr->srose_ndigis > ROSE_MAX_DIGIS)
+ if ((unsigned int) addr->srose_ndigis > ROSE_MAX_DIGIS)
return -EINVAL;
/* Source + Destination digis should not exceed ROSE_MAX_DIGIS */
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index 537a487..7ebf743 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -350,22 +350,19 @@
{
unsigned char *b = skb_tail_pointer(skb);
struct tcf_police *police = a->priv;
- struct tc_police opt;
+ struct tc_police opt = {
+ .index = police->tcf_index,
+ .action = police->tcf_action,
+ .mtu = police->tcfp_mtu,
+ .burst = police->tcfp_burst,
+ .refcnt = police->tcf_refcnt - ref,
+ .bindcnt = police->tcf_bindcnt - bind,
+ };
- opt.index = police->tcf_index;
- opt.action = police->tcf_action;
- opt.mtu = police->tcfp_mtu;
- opt.burst = police->tcfp_burst;
- opt.refcnt = police->tcf_refcnt - ref;
- opt.bindcnt = police->tcf_bindcnt - bind;
if (police->tcfp_R_tab)
opt.rate = police->tcfp_R_tab->rate;
- else
- memset(&opt.rate, 0, sizeof(opt.rate));
if (police->tcfp_P_tab)
opt.peakrate = police->tcfp_P_tab->rate;
- else
- memset(&opt.peakrate, 0, sizeof(opt.peakrate));
NLA_PUT(skb, TCA_POLICE_TBF, sizeof(opt), &opt);
if (police->tcfp_result)
NLA_PUT_U32(skb, TCA_POLICE_RESULT, police->tcfp_result);
diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c
index 78ef2c5..37dff78 100644
--- a/net/sched/cls_cgroup.c
+++ b/net/sched/cls_cgroup.c
@@ -123,7 +123,7 @@
* calls by looking at the number of nested bh disable calls because
* softirqs always disables bh.
*/
- if (softirq_count() != SOFTIRQ_OFFSET) {
+ if (in_serving_softirq()) {
/* If there is an sk_classid we'll use that. */
if (!skb->sk)
return -1;
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index 7416a5c..b0c2a82 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -137,7 +137,7 @@
int toff = off + key->off + (off2 & key->offmask);
__be32 *data, _data;
- if (skb_headroom(skb) + toff < 0)
+ if (skb_headroom(skb) + toff > INT_MAX)
goto out;
data = skb_header_pointer(skb, toff, 4, &_data);
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
index 34066278..6318e11 100644
--- a/net/sched/sch_atm.c
+++ b/net/sched/sch_atm.c
@@ -255,10 +255,6 @@
error = -EINVAL;
goto err_out;
}
- if (!list_empty(&flow->list)) {
- error = -EEXIST;
- goto err_out;
- }
} else {
int i;
unsigned long cl;
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index abd904b..4749609 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -761,8 +761,8 @@
if (f != cl->cl_f) {
cl->cl_f = f;
cftree_update(cl);
- update_cfmin(cl->cl_parent);
}
+ update_cfmin(cl->cl_parent);
}
}
diff --git a/net/sctp/auth.c b/net/sctp/auth.c
index 8636639..ddbbf7c 100644
--- a/net/sctp/auth.c
+++ b/net/sctp/auth.c
@@ -543,16 +543,20 @@
id = ntohs(hmacs->hmac_ids[i]);
/* Check the id is in the supported range */
- if (id > SCTP_AUTH_HMAC_ID_MAX)
+ if (id > SCTP_AUTH_HMAC_ID_MAX) {
+ id = 0;
continue;
+ }
/* See is we support the id. Supported IDs have name and
* length fields set, so that we can allocated and use
* them. We can safely just check for name, for without the
* name, we can't allocate the TFM.
*/
- if (!sctp_hmac_list[id].hmac_name)
+ if (!sctp_hmac_list[id].hmac_name) {
+ id = 0;
continue;
+ }
break;
}
diff --git a/net/sctp/output.c b/net/sctp/output.c
index a646681..bcc4590 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -92,7 +92,6 @@
SCTP_DEBUG_PRINTK("%s: packet:%p vtag:0x%x\n", __func__,
packet, vtag);
- sctp_packet_reset(packet);
packet->vtag = vtag;
if (ecn_capable && sctp_packet_empty(packet)) {
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 24b2cd5..d344dc4 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -1232,6 +1232,18 @@
return 0;
}
+static bool list_has_sctp_addr(const struct list_head *list,
+ union sctp_addr *ipaddr)
+{
+ struct sctp_transport *addr;
+
+ list_for_each_entry(addr, list, transports) {
+ if (sctp_cmp_addr_exact(ipaddr, &addr->ipaddr))
+ return true;
+ }
+
+ return false;
+}
/* A restart is occurring, check to make sure no new addresses
* are being added as we may be under a takeover attack.
*/
@@ -1240,10 +1252,10 @@
struct sctp_chunk *init,
sctp_cmd_seq_t *commands)
{
- struct sctp_transport *new_addr, *addr;
- int found;
+ struct sctp_transport *new_addr;
+ int ret = 1;
- /* Implementor's Guide - Sectin 5.2.2
+ /* Implementor's Guide - Section 5.2.2
* ...
* Before responding the endpoint MUST check to see if the
* unexpected INIT adds new addresses to the association. If new
@@ -1254,31 +1266,19 @@
/* Search through all current addresses and make sure
* we aren't adding any new ones.
*/
- new_addr = NULL;
- found = 0;
-
list_for_each_entry(new_addr, &new_asoc->peer.transport_addr_list,
- transports) {
- found = 0;
- list_for_each_entry(addr, &asoc->peer.transport_addr_list,
- transports) {
- if (sctp_cmp_addr_exact(&new_addr->ipaddr,
- &addr->ipaddr)) {
- found = 1;
- break;
- }
- }
- if (!found)
+ transports) {
+ if (!list_has_sctp_addr(&asoc->peer.transport_addr_list,
+ &new_addr->ipaddr)) {
+ sctp_sf_send_restart_abort(&new_addr->ipaddr, init,
+ commands);
+ ret = 0;
break;
- }
-
- /* If a new address was added, ABORT the sender. */
- if (!found && new_addr) {
- sctp_sf_send_restart_abort(&new_addr->ipaddr, init, commands);
+ }
}
/* Return success if all addresses were found. */
- return found;
+ return ret;
}
/* Populate the verification/tie tags based on overlapping INIT
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index ca44917..fbb7077 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -916,6 +916,11 @@
/* Walk through the addrs buffer and count the number of addresses. */
addr_buf = kaddrs;
while (walk_size < addrs_size) {
+ if (walk_size + sizeof(sa_family_t) > addrs_size) {
+ kfree(kaddrs);
+ return -EINVAL;
+ }
+
sa_addr = (struct sockaddr *)addr_buf;
af = sctp_get_af_specific(sa_addr->sa_family);
@@ -1002,9 +1007,13 @@
/* Walk through the addrs buffer and count the number of addresses. */
addr_buf = kaddrs;
while (walk_size < addrs_size) {
+ if (walk_size + sizeof(sa_family_t) > addrs_size) {
+ err = -EINVAL;
+ goto out_free;
+ }
+
sa_addr = (union sctp_addr *)addr_buf;
af = sctp_get_af_specific(sa_addr->sa.sa_family);
- port = ntohs(sa_addr->v4.sin_port);
/* If the address family is not supported or if this address
* causes the address buffer to overflow return EINVAL.
@@ -1014,6 +1023,8 @@
goto out_free;
}
+ port = ntohs(sa_addr->v4.sin_port);
+
/* Save current address so we can work with it */
memcpy(&to, sa_addr, af->sockaddr_len);
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
index 36cb660..e9eaaf7 100644
--- a/net/sunrpc/auth.c
+++ b/net/sunrpc/auth.c
@@ -38,7 +38,7 @@
static LIST_HEAD(cred_unused);
static unsigned long number_cred_unused;
-#define MAX_HASHTABLE_BITS (10)
+#define MAX_HASHTABLE_BITS (14)
static int param_set_hashtbl_sz(const char *val, const struct kernel_param *kp)
{
unsigned long num;
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index dcfc66b..12c4859 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -745,17 +745,18 @@
struct rpc_inode *rpci = RPC_I(inode);
struct gss_upcall_msg *gss_msg;
+restart:
spin_lock(&inode->i_lock);
- while (!list_empty(&rpci->in_downcall)) {
+ list_for_each_entry(gss_msg, &rpci->in_downcall, list) {
- gss_msg = list_entry(rpci->in_downcall.next,
- struct gss_upcall_msg, list);
+ if (!list_empty(&gss_msg->msg.list))
+ continue;
gss_msg->msg.errno = -EPIPE;
atomic_inc(&gss_msg->count);
__gss_unhash_msg(gss_msg);
spin_unlock(&inode->i_lock);
gss_release_msg(gss_msg);
- spin_lock(&inode->i_lock);
+ goto restart;
}
spin_unlock(&inode->i_lock);
diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c
index 0326446..778e5df 100644
--- a/net/sunrpc/auth_gss/gss_krb5_mech.c
+++ b/net/sunrpc/auth_gss/gss_krb5_mech.c
@@ -237,6 +237,7 @@
if (!supported_gss_krb5_enctype(alg)) {
printk(KERN_WARNING "gss_kerberos_mech: unsupported "
"encryption key algorithm %d\n", alg);
+ p = ERR_PTR(-EINVAL);
goto out_err;
}
p = simple_get_netobj(p, end, &key);
@@ -282,15 +283,19 @@
ctx->enctype = ENCTYPE_DES_CBC_RAW;
ctx->gk5e = get_gss_krb5_enctype(ctx->enctype);
- if (ctx->gk5e == NULL)
+ if (ctx->gk5e == NULL) {
+ p = ERR_PTR(-EINVAL);
goto out_err;
+ }
/* The downcall format was designed before we completely understood
* the uses of the context fields; so it includes some stuff we
* just give some minimal sanity-checking, and some we ignore
* completely (like the next twenty bytes): */
- if (unlikely(p + 20 > end || p + 20 < p))
+ if (unlikely(p + 20 > end || p + 20 < p)) {
+ p = ERR_PTR(-EFAULT);
goto out_err;
+ }
p += 20;
p = simple_get_bytes(p, end, &tmp, sizeof(tmp));
if (IS_ERR(p))
@@ -619,6 +624,7 @@
if (ctx->seq_send64 != ctx->seq_send) {
dprintk("%s: seq_send64 %lx, seq_send %x overflow?\n", __func__,
(long unsigned)ctx->seq_send64, ctx->seq_send);
+ p = ERR_PTR(-EINVAL);
goto out_err;
}
p = simple_get_bytes(p, end, &ctx->enctype, sizeof(ctx->enctype));
diff --git a/net/sunrpc/auth_gss/gss_spkm3_mech.c b/net/sunrpc/auth_gss/gss_spkm3_mech.c
index dc3f1f5..adade3d 100644
--- a/net/sunrpc/auth_gss/gss_spkm3_mech.c
+++ b/net/sunrpc/auth_gss/gss_spkm3_mech.c
@@ -100,6 +100,7 @@
if (version != 1) {
dprintk("RPC: unknown spkm3 token format: "
"obsolete nfs-utils?\n");
+ p = ERR_PTR(-EINVAL);
goto out_err_free_ctx;
}
@@ -135,8 +136,10 @@
if (IS_ERR(p))
goto out_err_free_intg_alg;
- if (p != end)
+ if (p != end) {
+ p = ERR_PTR(-EFAULT);
goto out_err_free_intg_key;
+ }
ctx_id->internal_ctx_id = ctx;
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 2388d83..fa55490 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -226,7 +226,7 @@
goto out_no_principal;
}
- kref_init(&clnt->cl_kref);
+ atomic_set(&clnt->cl_count, 1);
err = rpc_setup_pipedir(clnt, program->pipe_dir_name);
if (err < 0)
@@ -390,14 +390,14 @@
if (new->cl_principal == NULL)
goto out_no_principal;
}
- kref_init(&new->cl_kref);
+ atomic_set(&new->cl_count, 1);
err = rpc_setup_pipedir(new, clnt->cl_program->pipe_dir_name);
if (err != 0)
goto out_no_path;
if (new->cl_auth)
atomic_inc(&new->cl_auth->au_count);
xprt_get(clnt->cl_xprt);
- kref_get(&clnt->cl_kref);
+ atomic_inc(&clnt->cl_count);
rpc_register_client(new);
rpciod_up();
return new;
@@ -465,10 +465,8 @@
* Free an RPC client
*/
static void
-rpc_free_client(struct kref *kref)
+rpc_free_client(struct rpc_clnt *clnt)
{
- struct rpc_clnt *clnt = container_of(kref, struct rpc_clnt, cl_kref);
-
dprintk("RPC: destroying %s client for %s\n",
clnt->cl_protname, clnt->cl_server);
if (!IS_ERR(clnt->cl_path.dentry)) {
@@ -495,12 +493,10 @@
* Free an RPC client
*/
static void
-rpc_free_auth(struct kref *kref)
+rpc_free_auth(struct rpc_clnt *clnt)
{
- struct rpc_clnt *clnt = container_of(kref, struct rpc_clnt, cl_kref);
-
if (clnt->cl_auth == NULL) {
- rpc_free_client(kref);
+ rpc_free_client(clnt);
return;
}
@@ -509,10 +505,11 @@
* release remaining GSS contexts. This mechanism ensures
* that it can do so safely.
*/
- kref_init(kref);
+ atomic_inc(&clnt->cl_count);
rpcauth_release(clnt->cl_auth);
clnt->cl_auth = NULL;
- kref_put(kref, rpc_free_client);
+ if (atomic_dec_and_test(&clnt->cl_count))
+ rpc_free_client(clnt);
}
/*
@@ -525,7 +522,8 @@
if (list_empty(&clnt->cl_tasks))
wake_up(&destroy_wait);
- kref_put(&clnt->cl_kref, rpc_free_auth);
+ if (atomic_dec_and_test(&clnt->cl_count))
+ rpc_free_auth(clnt);
}
/**
@@ -588,7 +586,7 @@
if (clnt != NULL) {
rpc_task_release_client(task);
task->tk_client = clnt;
- kref_get(&clnt->cl_kref);
+ atomic_inc(&clnt->cl_count);
if (clnt->cl_softrtry)
task->tk_flags |= RPC_TASK_SOFT;
/* Add to the client's list of all tasks */
@@ -931,7 +929,7 @@
task->tk_status = 0;
if (status >= 0) {
if (task->tk_rqstp) {
- task->tk_action = call_allocate;
+ task->tk_action = call_refresh;
return;
}
@@ -966,13 +964,54 @@
}
/*
- * 2. Allocate the buffer. For details, see sched.c:rpc_malloc.
+ * 2. Bind and/or refresh the credentials
+ */
+static void
+call_refresh(struct rpc_task *task)
+{
+ dprint_status(task);
+
+ task->tk_action = call_refreshresult;
+ task->tk_status = 0;
+ task->tk_client->cl_stats->rpcauthrefresh++;
+ rpcauth_refreshcred(task);
+}
+
+/*
+ * 2a. Process the results of a credential refresh
+ */
+static void
+call_refreshresult(struct rpc_task *task)
+{
+ int status = task->tk_status;
+
+ dprint_status(task);
+
+ task->tk_status = 0;
+ task->tk_action = call_allocate;
+ if (status >= 0 && rpcauth_uptodatecred(task))
+ return;
+ switch (status) {
+ case -EACCES:
+ rpc_exit(task, -EACCES);
+ return;
+ case -ENOMEM:
+ rpc_exit(task, -ENOMEM);
+ return;
+ case -ETIMEDOUT:
+ rpc_delay(task, 3*HZ);
+ }
+ task->tk_action = call_refresh;
+}
+
+/*
+ * 2b. Allocate the buffer. For details, see sched.c:rpc_malloc.
* (Note: buffer memory is freed in xprt_release).
*/
static void
call_allocate(struct rpc_task *task)
{
- unsigned int slack = task->tk_client->cl_auth->au_cslack;
+ unsigned int slack = task->tk_rqstp->rq_cred->cr_auth->au_cslack;
struct rpc_rqst *req = task->tk_rqstp;
struct rpc_xprt *xprt = task->tk_xprt;
struct rpc_procinfo *proc = task->tk_msg.rpc_proc;
@@ -980,7 +1019,7 @@
dprint_status(task);
task->tk_status = 0;
- task->tk_action = call_refresh;
+ task->tk_action = call_bind;
if (req->rq_buffer)
return;
@@ -1017,47 +1056,6 @@
rpc_exit(task, -ERESTARTSYS);
}
-/*
- * 2a. Bind and/or refresh the credentials
- */
-static void
-call_refresh(struct rpc_task *task)
-{
- dprint_status(task);
-
- task->tk_action = call_refreshresult;
- task->tk_status = 0;
- task->tk_client->cl_stats->rpcauthrefresh++;
- rpcauth_refreshcred(task);
-}
-
-/*
- * 2b. Process the results of a credential refresh
- */
-static void
-call_refreshresult(struct rpc_task *task)
-{
- int status = task->tk_status;
-
- dprint_status(task);
-
- task->tk_status = 0;
- task->tk_action = call_bind;
- if (status >= 0 && rpcauth_uptodatecred(task))
- return;
- switch (status) {
- case -EACCES:
- rpc_exit(task, -EACCES);
- return;
- case -ENOMEM:
- rpc_exit(task, -ENOMEM);
- return;
- case -ETIMEDOUT:
- rpc_delay(task, 3*HZ);
- }
- task->tk_action = call_refresh;
-}
-
static inline int
rpc_task_need_encode(struct rpc_task *task)
{
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 95ccbcf..8c8eef2 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -48,7 +48,7 @@
return;
do {
msg = list_entry(head->next, struct rpc_pipe_msg, list);
- list_del(&msg->list);
+ list_del_init(&msg->list);
msg->errno = err;
destroy_msg(msg);
} while (!list_empty(head));
@@ -208,7 +208,7 @@
if (msg != NULL) {
spin_lock(&inode->i_lock);
msg->errno = -EAGAIN;
- list_del(&msg->list);
+ list_del_init(&msg->list);
spin_unlock(&inode->i_lock);
rpci->ops->destroy_msg(msg);
}
@@ -268,7 +268,7 @@
if (res < 0 || msg->len == msg->copied) {
filp->private_data = NULL;
spin_lock(&inode->i_lock);
- list_del(&msg->list);
+ list_del_init(&msg->list);
spin_unlock(&inode->i_lock);
rpci->ops->destroy_msg(msg);
}
@@ -371,21 +371,23 @@
static int
rpc_info_open(struct inode *inode, struct file *file)
{
- struct rpc_clnt *clnt;
+ struct rpc_clnt *clnt = NULL;
int ret = single_open(file, rpc_show_info, NULL);
if (!ret) {
struct seq_file *m = file->private_data;
- mutex_lock(&inode->i_mutex);
- clnt = RPC_I(inode)->private;
- if (clnt) {
- kref_get(&clnt->cl_kref);
+
+ spin_lock(&file->f_path.dentry->d_lock);
+ if (!d_unhashed(file->f_path.dentry))
+ clnt = RPC_I(inode)->private;
+ if (clnt != NULL && atomic_inc_not_zero(&clnt->cl_count)) {
+ spin_unlock(&file->f_path.dentry->d_lock);
m->private = clnt;
} else {
+ spin_unlock(&file->f_path.dentry->d_lock);
single_release(inode, file);
ret = -EINVAL;
}
- mutex_unlock(&inode->i_mutex);
}
return ret;
}
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index b6309db..fe9306b 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -800,7 +800,7 @@
u32 _xid;
__be32 *xp;
- read_lock(&sk->sk_callback_lock);
+ read_lock_bh(&sk->sk_callback_lock);
dprintk("RPC: xs_udp_data_ready...\n");
if (!(xprt = xprt_from_sock(sk)))
goto out;
@@ -852,7 +852,7 @@
dropit:
skb_free_datagram(sk, skb);
out:
- read_unlock(&sk->sk_callback_lock);
+ read_unlock_bh(&sk->sk_callback_lock);
}
static inline void xs_tcp_read_fraghdr(struct rpc_xprt *xprt, struct xdr_skb_reader *desc)
@@ -1229,7 +1229,7 @@
dprintk("RPC: xs_tcp_data_ready...\n");
- read_lock(&sk->sk_callback_lock);
+ read_lock_bh(&sk->sk_callback_lock);
if (!(xprt = xprt_from_sock(sk)))
goto out;
if (xprt->shutdown)
@@ -1248,7 +1248,7 @@
read = tcp_read_sock(sk, &rd_desc, xs_tcp_data_recv);
} while (read > 0);
out:
- read_unlock(&sk->sk_callback_lock);
+ read_unlock_bh(&sk->sk_callback_lock);
}
/*
@@ -1301,7 +1301,7 @@
{
struct rpc_xprt *xprt;
- read_lock(&sk->sk_callback_lock);
+ read_lock_bh(&sk->sk_callback_lock);
if (!(xprt = xprt_from_sock(sk)))
goto out;
dprintk("RPC: xs_tcp_state_change client %p...\n", xprt);
@@ -1313,7 +1313,7 @@
switch (sk->sk_state) {
case TCP_ESTABLISHED:
- spin_lock_bh(&xprt->transport_lock);
+ spin_lock(&xprt->transport_lock);
if (!xprt_test_and_set_connected(xprt)) {
struct sock_xprt *transport = container_of(xprt,
struct sock_xprt, xprt);
@@ -1327,7 +1327,7 @@
xprt_wake_pending_tasks(xprt, -EAGAIN);
}
- spin_unlock_bh(&xprt->transport_lock);
+ spin_unlock(&xprt->transport_lock);
break;
case TCP_FIN_WAIT1:
/* The client initiated a shutdown of the socket */
@@ -1365,7 +1365,7 @@
xs_sock_mark_closed(xprt);
}
out:
- read_unlock(&sk->sk_callback_lock);
+ read_unlock_bh(&sk->sk_callback_lock);
}
/**
@@ -1376,7 +1376,7 @@
{
struct rpc_xprt *xprt;
- read_lock(&sk->sk_callback_lock);
+ read_lock_bh(&sk->sk_callback_lock);
if (!(xprt = xprt_from_sock(sk)))
goto out;
dprintk("RPC: %s client %p...\n"
@@ -1384,7 +1384,7 @@
__func__, xprt, sk->sk_err);
xprt_wake_pending_tasks(xprt, -EAGAIN);
out:
- read_unlock(&sk->sk_callback_lock);
+ read_unlock_bh(&sk->sk_callback_lock);
}
static void xs_write_space(struct sock *sk)
@@ -1416,13 +1416,13 @@
*/
static void xs_udp_write_space(struct sock *sk)
{
- read_lock(&sk->sk_callback_lock);
+ read_lock_bh(&sk->sk_callback_lock);
/* from net/core/sock.c:sock_def_write_space */
if (sock_writeable(sk))
xs_write_space(sk);
- read_unlock(&sk->sk_callback_lock);
+ read_unlock_bh(&sk->sk_callback_lock);
}
/**
@@ -1437,13 +1437,13 @@
*/
static void xs_tcp_write_space(struct sock *sk)
{
- read_lock(&sk->sk_callback_lock);
+ read_lock_bh(&sk->sk_callback_lock);
/* from net/core/stream.c:sk_stream_write_space */
if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
xs_write_space(sk);
- read_unlock(&sk->sk_callback_lock);
+ read_unlock_bh(&sk->sk_callback_lock);
}
static void xs_udp_do_set_buffer_size(struct rpc_xprt *xprt)
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 4414a18..0b39b24 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -692,6 +692,7 @@
static u32 ordernum = 1;
struct unix_address *addr;
int err;
+ unsigned int retries = 0;
mutex_lock(&u->readlock);
@@ -717,9 +718,17 @@
if (__unix_find_socket_byname(net, addr->name, addr->len, sock->type,
addr->hash)) {
spin_unlock(&unix_table_lock);
- /* Sanity yield. It is unusual case, but yet... */
- if (!(ordernum&0xFF))
- yield();
+ /*
+ * __unix_find_socket_byname() may take long time if many names
+ * are already in use.
+ */
+ cond_resched();
+ /* Give up if all names seems to be in use. */
+ if (retries++ == 0xFFFFF) {
+ err = -ENOSPC;
+ kfree(addr);
+ goto out;
+ }
goto retry;
}
addr->hash ^= sk->sk_type;
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 541e2ff..d6d046b 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -475,12 +475,10 @@
mutex_lock(&cfg80211_mutex);
res = device_add(&rdev->wiphy.dev);
- if (res)
- goto out_unlock;
-
- res = rfkill_register(rdev->rfkill);
- if (res)
- goto out_rm_dev;
+ if (res) {
+ mutex_unlock(&cfg80211_mutex);
+ return res;
+ }
/* set up regulatory info */
wiphy_update_regulatory(wiphy, NL80211_REGDOM_SET_BY_CORE);
@@ -509,13 +507,18 @@
cfg80211_debugfs_rdev_add(rdev);
mutex_unlock(&cfg80211_mutex);
+ /*
+ * due to a locking dependency this has to be outside of the
+ * cfg80211_mutex lock
+ */
+ res = rfkill_register(rdev->rfkill);
+ if (res)
+ goto out_rm_dev;
+
return 0;
out_rm_dev:
device_del(&rdev->wiphy.dev);
-
-out_unlock:
- mutex_unlock(&cfg80211_mutex);
return res;
}
EXPORT_SYMBOL(wiphy_register);
diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c
index bb5e0a5..7e5c3a4 100644
--- a/net/wireless/wext-compat.c
+++ b/net/wireless/wext-compat.c
@@ -1420,6 +1420,9 @@
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
+ data->flags = 0;
+ data->length = 0;
+
switch (wdev->iftype) {
case NL80211_IFTYPE_ADHOC:
return cfg80211_ibss_wext_giwessid(dev, info, data, ssid);
diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
index 0ef17bc..8f5116f 100644
--- a/net/wireless/wext-core.c
+++ b/net/wireless/wext-core.c
@@ -782,6 +782,22 @@
}
}
+ if (IW_IS_GET(cmd) && !(descr->flags & IW_DESCR_FLAG_NOMAX)) {
+ /*
+ * If this is a GET, but not NOMAX, it means that the extra
+ * data is not bounded by userspace, but by max_tokens. Thus
+ * set the length to max_tokens. This matches the extra data
+ * allocation.
+ * The driver should fill it with the number of tokens it
+ * provided, and it may check iwp->length rather than having
+ * knowledge of max_tokens. If the driver doesn't change the
+ * iwp->length, this ioctl just copies back max_token tokens
+ * filled with zeroes. Hopefully the driver isn't claiming
+ * them to be valid data.
+ */
+ iwp->length = descr->max_tokens;
+ }
+
err = handler(dev, info, (union iwreq_data *) iwp, extra);
iwp->length += essid_compat;
diff --git a/net/wireless/wext-priv.c b/net/wireless/wext-priv.c
index 3feb28e..674d426 100644
--- a/net/wireless/wext-priv.c
+++ b/net/wireless/wext-priv.c
@@ -152,7 +152,7 @@
} else if (!iwp->pointer)
return -EFAULT;
- extra = kmalloc(extra_size, GFP_KERNEL);
+ extra = kzalloc(extra_size, GFP_KERNEL);
if (!extra)
return -ENOMEM;
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
index a3cca0a..64f2ae1 100644
--- a/net/xfrm/xfrm_output.c
+++ b/net/xfrm/xfrm_output.c
@@ -101,7 +101,7 @@
err = -EHOSTUNREACH;
goto error_nolock;
}
- skb_dst_set_noref(skb, dst);
+ skb_dst_set(skb, dst_clone(dst));
x = dst->xfrm;
} while (x && !(x->outer_mode->flags & XFRM_MODE_FLAG_TUNNEL));
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 2b3ed7a..cbab6e1 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1175,9 +1175,8 @@
tmpl->mode == XFRM_MODE_BEET) {
remote = &tmpl->id.daddr;
local = &tmpl->saddr;
- family = tmpl->encap_family;
- if (xfrm_addr_any(local, family)) {
- error = xfrm_get_saddr(net, &tmp, remote, family);
+ if (xfrm_addr_any(local, tmpl->encap_family)) {
+ error = xfrm_get_saddr(net, &tmp, remote, tmpl->encap_family);
if (error)
goto fail;
local = &tmp;
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 5208b12f..eb96ce5 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -656,15 +656,23 @@
EXPORT_SYMBOL(xfrm_sad_getinfo);
static int
-xfrm_init_tempsel(struct xfrm_state *x, struct flowi *fl,
- struct xfrm_tmpl *tmpl,
- xfrm_address_t *daddr, xfrm_address_t *saddr,
- unsigned short family)
+xfrm_init_tempstate(struct xfrm_state *x, struct flowi *fl,
+ struct xfrm_tmpl *tmpl,
+ xfrm_address_t *daddr, xfrm_address_t *saddr,
+ unsigned short family)
{
struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
if (!afinfo)
return -1;
- afinfo->init_tempsel(x, fl, tmpl, daddr, saddr);
+ afinfo->init_tempsel(&x->sel, fl);
+
+ if (family != tmpl->encap_family) {
+ xfrm_state_put_afinfo(afinfo);
+ afinfo = xfrm_state_get_afinfo(tmpl->encap_family);
+ if (!afinfo)
+ return -1;
+ }
+ afinfo->init_temprop(x, tmpl, daddr, saddr);
xfrm_state_put_afinfo(afinfo);
return 0;
}
@@ -790,37 +798,38 @@
int error = 0;
struct xfrm_state *best = NULL;
u32 mark = pol->mark.v & pol->mark.m;
+ unsigned short encap_family = tmpl->encap_family;
to_put = NULL;
spin_lock_bh(&xfrm_state_lock);
- h = xfrm_dst_hash(net, daddr, saddr, tmpl->reqid, family);
+ h = xfrm_dst_hash(net, daddr, saddr, tmpl->reqid, encap_family);
hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h, bydst) {
- if (x->props.family == family &&
+ if (x->props.family == encap_family &&
x->props.reqid == tmpl->reqid &&
(mark & x->mark.m) == x->mark.v &&
!(x->props.flags & XFRM_STATE_WILDRECV) &&
- xfrm_state_addr_check(x, daddr, saddr, family) &&
+ xfrm_state_addr_check(x, daddr, saddr, encap_family) &&
tmpl->mode == x->props.mode &&
tmpl->id.proto == x->id.proto &&
(tmpl->id.spi == x->id.spi || !tmpl->id.spi))
- xfrm_state_look_at(pol, x, fl, family, daddr, saddr,
+ xfrm_state_look_at(pol, x, fl, encap_family, daddr, saddr,
&best, &acquire_in_progress, &error);
}
if (best)
goto found;
- h_wildcard = xfrm_dst_hash(net, daddr, &saddr_wildcard, tmpl->reqid, family);
+ h_wildcard = xfrm_dst_hash(net, daddr, &saddr_wildcard, tmpl->reqid, encap_family);
hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h_wildcard, bydst) {
- if (x->props.family == family &&
+ if (x->props.family == encap_family &&
x->props.reqid == tmpl->reqid &&
(mark & x->mark.m) == x->mark.v &&
!(x->props.flags & XFRM_STATE_WILDRECV) &&
- xfrm_state_addr_check(x, daddr, saddr, family) &&
+ xfrm_state_addr_check(x, daddr, saddr, encap_family) &&
tmpl->mode == x->props.mode &&
tmpl->id.proto == x->id.proto &&
(tmpl->id.spi == x->id.spi || !tmpl->id.spi))
- xfrm_state_look_at(pol, x, fl, family, daddr, saddr,
+ xfrm_state_look_at(pol, x, fl, encap_family, daddr, saddr,
&best, &acquire_in_progress, &error);
}
@@ -829,7 +838,7 @@
if (!x && !error && !acquire_in_progress) {
if (tmpl->id.spi &&
(x0 = __xfrm_state_lookup(net, mark, daddr, tmpl->id.spi,
- tmpl->id.proto, family)) != NULL) {
+ tmpl->id.proto, encap_family)) != NULL) {
to_put = x0;
error = -EEXIST;
goto out;
@@ -839,9 +848,9 @@
error = -ENOMEM;
goto out;
}
- /* Initialize temporary selector matching only
+ /* Initialize temporary state matching only
* to current session. */
- xfrm_init_tempsel(x, fl, tmpl, daddr, saddr, family);
+ xfrm_init_tempstate(x, fl, tmpl, daddr, saddr, family);
memcpy(&x->mark, &pol->mark, sizeof(x->mark));
error = security_xfrm_state_alloc_acquire(x, pol->security, fl->secid);
@@ -856,10 +865,10 @@
x->km.state = XFRM_STATE_ACQ;
list_add(&x->km.all, &net->xfrm.state_all);
hlist_add_head(&x->bydst, net->xfrm.state_bydst+h);
- h = xfrm_src_hash(net, daddr, saddr, family);
+ h = xfrm_src_hash(net, daddr, saddr, encap_family);
hlist_add_head(&x->bysrc, net->xfrm.state_bysrc+h);
if (x->id.spi) {
- h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, family);
+ h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, encap_family);
hlist_add_head(&x->byspi, net->xfrm.state_byspi+h);
}
x->lft.hard_add_expires_seconds = net->xfrm.sysctl_acq_expires;
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index b14ed4b..8bae6b2 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -1801,7 +1801,7 @@
struct xfrm_user_expire *ue = nlmsg_data(nlh);
struct xfrm_usersa_info *p = &ue->state;
struct xfrm_mark m;
- u32 mark = xfrm_mark_get(attrs, &m);;
+ u32 mark = xfrm_mark_get(attrs, &m);
x = xfrm_state_lookup(net, mark, &p->id.daddr, p->id.spi, p->id.proto, p->family);
diff --git a/samples/kfifo/dma-example.c b/samples/kfifo/dma-example.c
index ee03a4f..0647379 100644
--- a/samples/kfifo/dma-example.c
+++ b/samples/kfifo/dma-example.c
@@ -24,6 +24,7 @@
{
int i;
unsigned int ret;
+ unsigned int nents;
struct scatterlist sg[10];
printk(KERN_INFO "DMA fifo test start\n");
@@ -61,9 +62,9 @@
* byte at the beginning, after the kfifo_skip().
*/
sg_init_table(sg, ARRAY_SIZE(sg));
- ret = kfifo_dma_in_prepare(&fifo, sg, ARRAY_SIZE(sg), FIFO_SIZE);
- printk(KERN_INFO "DMA sgl entries: %d\n", ret);
- if (!ret) {
+ nents = kfifo_dma_in_prepare(&fifo, sg, ARRAY_SIZE(sg), FIFO_SIZE);
+ printk(KERN_INFO "DMA sgl entries: %d\n", nents);
+ if (!nents) {
/* fifo is full and no sgl was created */
printk(KERN_WARNING "error kfifo_dma_in_prepare\n");
return -EIO;
@@ -71,7 +72,7 @@
/* receive data */
printk(KERN_INFO "scatterlist for receive:\n");
- for (i = 0; i < ARRAY_SIZE(sg); i++) {
+ for (i = 0; i < nents; i++) {
printk(KERN_INFO
"sg[%d] -> "
"page_link 0x%.8lx offset 0x%.8x length 0x%.8x\n",
@@ -91,16 +92,16 @@
kfifo_dma_in_finish(&fifo, ret);
/* Prepare to transmit data, example: 8 bytes */
- ret = kfifo_dma_out_prepare(&fifo, sg, ARRAY_SIZE(sg), 8);
- printk(KERN_INFO "DMA sgl entries: %d\n", ret);
- if (!ret) {
+ nents = kfifo_dma_out_prepare(&fifo, sg, ARRAY_SIZE(sg), 8);
+ printk(KERN_INFO "DMA sgl entries: %d\n", nents);
+ if (!nents) {
/* no data was available and no sgl was created */
printk(KERN_WARNING "error kfifo_dma_out_prepare\n");
return -EIO;
}
printk(KERN_INFO "scatterlist for transmit:\n");
- for (i = 0; i < ARRAY_SIZE(sg); i++) {
+ for (i = 0; i < nents; i++) {
printk(KERN_INFO
"sg[%d] -> "
"page_link 0x%.8lx offset 0x%.8x length 0x%.8x\n",
diff --git a/scripts/Makefile b/scripts/Makefile
index 842dbc2..2e08810 100644
--- a/scripts/Makefile
+++ b/scripts/Makefile
@@ -11,6 +11,7 @@
hostprogs-$(CONFIG_LOGO) += pnmtologo
hostprogs-$(CONFIG_VT) += conmakehash
hostprogs-$(CONFIG_IKCONFIG) += bin2c
+hostprogs-$(BUILD_C_RECORDMCOUNT) += recordmcount
always := $(hostprogs-y) $(hostprogs-m)
diff --git a/scripts/Makefile.build b/scripts/Makefile.build
index a1a5cf9..843bd4f 100644
--- a/scripts/Makefile.build
+++ b/scripts/Makefile.build
@@ -209,12 +209,22 @@
endif
ifdef CONFIG_FTRACE_MCOUNT_RECORD
+ifdef BUILD_C_RECORDMCOUNT
+# Due to recursion, we must skip empty.o.
+# The empty.o file is created in the make process in order to determine
+# the target endianness and word size. It is made before all other C
+# files, including recordmcount.
+cmd_record_mcount = if [ $(@) != "scripts/mod/empty.o" ]; then \
+ $(objtree)/scripts/recordmcount "$(@)"; \
+ fi;
+else
cmd_record_mcount = set -e ; perl $(srctree)/scripts/recordmcount.pl "$(ARCH)" \
"$(if $(CONFIG_CPU_BIG_ENDIAN),big,little)" \
"$(if $(CONFIG_64BIT),64,32)" \
"$(OBJDUMP)" "$(OBJCOPY)" "$(CC)" "$(LD)" "$(NM)" "$(RM)" "$(MV)" \
"$(if $(part-of-module),1,0)" "$(@)";
endif
+endif
define rule_cc_o_c
$(call echo-cmd,checksrc) $(cmd_checksrc) \
diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
index 54fd1b7..7bfcf1a 100644
--- a/scripts/Makefile.lib
+++ b/scripts/Makefile.lib
@@ -101,14 +101,6 @@
modname_flags = $(if $(filter 1,$(words $(modname))),\
-D"KBUILD_MODNAME=KBUILD_STR($(call name-fix,$(modname)))")
-#hash values
-ifdef CONFIG_DYNAMIC_DEBUG
-debug_flags = -D"DEBUG_HASH=$(shell ./scripts/basic/hash djb2 $(@D)$(modname))"\
- -D"DEBUG_HASH2=$(shell ./scripts/basic/hash r5 $(@D)$(modname))"
-else
-debug_flags =
-endif
-
orig_c_flags = $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) $(KBUILD_SUBDIR_CCFLAGS) \
$(ccflags-y) $(CFLAGS_$(basetarget).o)
_c_flags = $(filter-out $(CFLAGS_REMOVE_$(basetarget).o), $(orig_c_flags))
@@ -152,8 +144,7 @@
c_flags = -Wp,-MD,$(depfile) $(NOSTDINC_FLAGS) $(LINUXINCLUDE) \
$(__c_flags) $(modkern_cflags) \
- -D"KBUILD_STR(s)=\#s" $(basename_flags) $(modname_flags) \
- $(debug_flags)
+ -D"KBUILD_STR(s)=\#s" $(basename_flags) $(modname_flags)
a_flags = -Wp,-MD,$(depfile) $(NOSTDINC_FLAGS) $(LINUXINCLUDE) \
$(__a_flags) $(modkern_aflags)
diff --git a/scripts/basic/Makefile b/scripts/basic/Makefile
index 0955995..4c324a1 100644
--- a/scripts/basic/Makefile
+++ b/scripts/basic/Makefile
@@ -9,7 +9,7 @@
# fixdep: Used to generate dependency information during build process
# docproc: Used in Documentation/DocBook
-hostprogs-y := fixdep docproc hash
+hostprogs-y := fixdep docproc
always := $(hostprogs-y)
# fixdep is needed to compile other host programs
diff --git a/scripts/basic/docproc.c b/scripts/basic/docproc.c
index 79ab973..fc3b18d 100644
--- a/scripts/basic/docproc.c
+++ b/scripts/basic/docproc.c
@@ -34,12 +34,14 @@
*
*/
+#define _GNU_SOURCE
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <ctype.h>
#include <unistd.h>
#include <limits.h>
+#include <errno.h>
#include <sys/types.h>
#include <sys/wait.h>
@@ -54,6 +56,7 @@
FILEONLY *internalfunctions;
FILEONLY *externalfunctions;
FILEONLY *symbolsonly;
+FILEONLY *findall;
typedef void FILELINE(char * file, char * line);
FILELINE * singlefunctions;
@@ -65,12 +68,30 @@
#define KERNELDOCPATH "scripts/"
#define KERNELDOC "kernel-doc"
#define DOCBOOK "-docbook"
+#define LIST "-list"
#define FUNCTION "-function"
#define NOFUNCTION "-nofunction"
#define NODOCSECTIONS "-no-doc-sections"
static char *srctree, *kernsrctree;
+static char **all_list = NULL;
+static int all_list_len = 0;
+
+static void consume_symbol(const char *sym)
+{
+ int i;
+
+ for (i = 0; i < all_list_len; i++) {
+ if (!all_list[i])
+ continue;
+ if (strcmp(sym, all_list[i]))
+ continue;
+ all_list[i] = NULL;
+ break;
+ }
+}
+
static void usage (void)
{
fprintf(stderr, "Usage: docproc {doc|depend} file\n");
@@ -248,6 +269,7 @@
struct symfile * sym = &symfilelist[i];
for (j=0; j < sym->symbolcnt; j++) {
vec[idx++] = type;
+ consume_symbol(sym->symbollist[j].name);
vec[idx++] = sym->symbollist[j].name;
}
}
@@ -287,6 +309,11 @@
vec[idx++] = &line[i];
}
}
+ for (i = 0; i < idx; i++) {
+ if (strcmp(vec[i], FUNCTION))
+ continue;
+ consume_symbol(vec[i + 1]);
+ }
vec[idx++] = filename;
vec[idx] = NULL;
exec_kernel_doc(vec);
@@ -306,6 +333,10 @@
if (*s == '\n')
*s = '\0';
+ asprintf(&s, "DOC: %s", line);
+ consume_symbol(s);
+ free(s);
+
vec[0] = KERNELDOC;
vec[1] = DOCBOOK;
vec[2] = FUNCTION;
@@ -315,6 +346,84 @@
exec_kernel_doc(vec);
}
+static void find_all_symbols(char *filename)
+{
+ char *vec[4]; /* kerneldoc -list file NULL */
+ pid_t pid;
+ int ret, i, count, start;
+ char real_filename[PATH_MAX + 1];
+ int pipefd[2];
+ char *data, *str;
+ size_t data_len = 0;
+
+ vec[0] = KERNELDOC;
+ vec[1] = LIST;
+ vec[2] = filename;
+ vec[3] = NULL;
+
+ if (pipe(pipefd)) {
+ perror("pipe");
+ exit(1);
+ }
+
+ switch (pid=fork()) {
+ case -1:
+ perror("fork");
+ exit(1);
+ case 0:
+ close(pipefd[0]);
+ dup2(pipefd[1], 1);
+ memset(real_filename, 0, sizeof(real_filename));
+ strncat(real_filename, kernsrctree, PATH_MAX);
+ strncat(real_filename, "/" KERNELDOCPATH KERNELDOC,
+ PATH_MAX - strlen(real_filename));
+ execvp(real_filename, vec);
+ fprintf(stderr, "exec ");
+ perror(real_filename);
+ exit(1);
+ default:
+ close(pipefd[1]);
+ data = malloc(4096);
+ do {
+ while ((ret = read(pipefd[0],
+ data + data_len,
+ 4096)) > 0) {
+ data_len += ret;
+ data = realloc(data, data_len + 4096);
+ }
+ } while (ret == -EAGAIN);
+ if (ret != 0) {
+ perror("read");
+ exit(1);
+ }
+ waitpid(pid, &ret ,0);
+ }
+ if (WIFEXITED(ret))
+ exitstatus |= WEXITSTATUS(ret);
+ else
+ exitstatus = 0xff;
+
+ count = 0;
+ /* poor man's strtok, but with counting */
+ for (i = 0; i < data_len; i++) {
+ if (data[i] == '\n') {
+ count++;
+ data[i] = '\0';
+ }
+ }
+ start = all_list_len;
+ all_list_len += count;
+ all_list = realloc(all_list, sizeof(char *) * all_list_len);
+ str = data;
+ for (i = 0; i < data_len && start != all_list_len; i++) {
+ if (data[i] == '\0') {
+ all_list[start] = str;
+ str = data + i + 1;
+ start++;
+ }
+ }
+}
+
/*
* Parse file, calling action specific functions for:
* 1) Lines containing !E
@@ -322,7 +431,8 @@
* 3) Lines containing !D
* 4) Lines containing !F
* 5) Lines containing !P
- * 6) Default lines - lines not matching the above
+ * 6) Lines containing !C
+ * 7) Default lines - lines not matching the above
*/
static void parse_file(FILE *infile)
{
@@ -365,6 +475,12 @@
s++;
docsection(line + 2, s);
break;
+ case 'C':
+ while (*s && !isspace(*s)) s++;
+ *s = '\0';
+ if (findall)
+ findall(line+2);
+ break;
default:
defaultline(line);
}
@@ -380,6 +496,7 @@
int main(int argc, char *argv[])
{
FILE * infile;
+ int i;
srctree = getenv("SRCTREE");
if (!srctree)
@@ -415,6 +532,7 @@
symbolsonly = find_export_symbols;
singlefunctions = noaction2;
docsection = noaction2;
+ findall = find_all_symbols;
parse_file(infile);
/* Rewind to start from beginning of file again */
@@ -425,8 +543,16 @@
symbolsonly = printline;
singlefunctions = singfunc;
docsection = docsect;
+ findall = NULL;
parse_file(infile);
+
+ for (i = 0; i < all_list_len; i++) {
+ if (!all_list[i])
+ continue;
+ fprintf(stderr, "Warning: didn't use docs for %s\n",
+ all_list[i]);
+ }
}
else if (strcmp("depend", argv[1]) == 0)
{
@@ -439,6 +565,7 @@
symbolsonly = adddep;
singlefunctions = adddep2;
docsection = adddep2;
+ findall = adddep;
parse_file(infile);
printf("\n");
}
diff --git a/scripts/basic/hash.c b/scripts/basic/hash.c
deleted file mode 100644
index 2ef5d3f..0000000
--- a/scripts/basic/hash.c
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Copyright (C) 2008 Red Hat, Inc., Jason Baron <jbaron@redhat.com>
- *
- */
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-
-#define DYNAMIC_DEBUG_HASH_BITS 6
-
-static const char *program;
-
-static void usage(void)
-{
- printf("Usage: %s <djb2|r5> <modname>\n", program);
- exit(1);
-}
-
-/* djb2 hashing algorithm by Dan Bernstein. From:
- * http://www.cse.yorku.ca/~oz/hash.html
- */
-
-static unsigned int djb2_hash(char *str)
-{
- unsigned long hash = 5381;
- int c;
-
- c = *str;
- while (c) {
- hash = ((hash << 5) + hash) + c;
- c = *++str;
- }
- return (unsigned int)(hash & ((1 << DYNAMIC_DEBUG_HASH_BITS) - 1));
-}
-
-static unsigned int r5_hash(char *str)
-{
- unsigned long hash = 0;
- int c;
-
- c = *str;
- while (c) {
- hash = (hash + (c << 4) + (c >> 4)) * 11;
- c = *++str;
- }
- return (unsigned int)(hash & ((1 << DYNAMIC_DEBUG_HASH_BITS) - 1));
-}
-
-int main(int argc, char *argv[])
-{
- program = argv[0];
-
- if (argc != 3)
- usage();
- if (!strcmp(argv[1], "djb2"))
- printf("%d\n", djb2_hash(argv[2]));
- else if (!strcmp(argv[1], "r5"))
- printf("%d\n", r5_hash(argv[2]));
- else
- usage();
- exit(0);
-}
-
diff --git a/scripts/gcc-goto.sh b/scripts/gcc-goto.sh
new file mode 100644
index 0000000..520d16b
--- /dev/null
+++ b/scripts/gcc-goto.sh
@@ -0,0 +1,5 @@
+#!/bin/sh
+# Test for gcc 'asm goto' suport
+# Copyright (C) 2010, Jason Baron <jbaron@redhat.com>
+
+echo "int main(void) { entry: asm goto (\"\"::::entry); return 0; }" | $@ -x c - -c -o /dev/null >/dev/null 2>&1 && echo "y"
diff --git a/scripts/kconfig/conf.c b/scripts/kconfig/conf.c
index 5b7c86e..7ef429c 100644
--- a/scripts/kconfig/conf.c
+++ b/scripts/kconfig/conf.c
@@ -427,7 +427,7 @@
if (sym->name && !sym_is_choice_value(sym)) {
printf("CONFIG_%s\n", sym->name);
}
- } else {
+ } else if (input_mode != oldnoconfig) {
if (!conf_cnt++)
printf(_("*\n* Restart config...\n*\n"));
rootEntry = menu_get_parent_menu(menu);
diff --git a/scripts/kconfig/confdata.c b/scripts/kconfig/confdata.c
index c39327e..515253f 100644
--- a/scripts/kconfig/confdata.c
+++ b/scripts/kconfig/confdata.c
@@ -497,7 +497,9 @@
/*
* If symbol is a choice value and equals to the
* default for a choice - skip.
- * But only if value is bool and equal to "y" .
+ * But only if value is bool and equal to "y" and
+ * choice is not "optional".
+ * (If choice is "optional" then all values can be "n")
*/
if (sym_is_choice_value(sym)) {
struct symbol *cs;
@@ -505,7 +507,7 @@
cs = prop_get_symbol(sym_get_choice_prop(sym));
ds = sym_choice_default(cs);
- if (sym == ds) {
+ if (!sym_is_optional(cs) && sym == ds) {
if ((sym->type == S_BOOLEAN) &&
sym_get_tristate_value(sym) == yes)
goto next_menu;
diff --git a/scripts/kconfig/expr.h b/scripts/kconfig/expr.h
index 6ee2e4f..170459c 100644
--- a/scripts/kconfig/expr.h
+++ b/scripts/kconfig/expr.h
@@ -165,7 +165,6 @@
struct symbol *sym;
struct property *prompt;
struct expr *dep;
- struct expr *dir_dep;
unsigned int flags;
char *help;
struct file *file;
diff --git a/scripts/kconfig/menu.c b/scripts/kconfig/menu.c
index 4fb5902..edda8b4 100644
--- a/scripts/kconfig/menu.c
+++ b/scripts/kconfig/menu.c
@@ -107,7 +107,6 @@
void menu_add_dep(struct expr *dep)
{
current_entry->dep = expr_alloc_and(current_entry->dep, menu_check_dep(dep));
- current_entry->dir_dep = current_entry->dep;
}
void menu_set_type(int type)
@@ -291,10 +290,6 @@
for (menu = parent->list; menu; menu = menu->next)
menu_finalize(menu);
} else if (sym) {
- /* ignore inherited dependencies for dir_dep */
- sym->dir_dep.expr = expr_transform(expr_copy(parent->dir_dep));
- sym->dir_dep.expr = expr_eliminate_dups(sym->dir_dep.expr);
-
basedep = parent->prompt ? parent->prompt->visible.expr : NULL;
basedep = expr_trans_compare(basedep, E_UNEQUAL, &symbol_no);
basedep = expr_eliminate_dups(expr_transform(basedep));
@@ -325,6 +320,8 @@
parent->next = last_menu->next;
last_menu->next = NULL;
}
+
+ sym->dir_dep.expr = parent->dep;
}
for (menu = parent->list; menu; menu = menu->next) {
if (sym && sym_is_choice(sym) &&
diff --git a/scripts/kconfig/symbol.c b/scripts/kconfig/symbol.c
index e95718f..1f8b305 100644
--- a/scripts/kconfig/symbol.c
+++ b/scripts/kconfig/symbol.c
@@ -350,6 +350,7 @@
}
}
calc_newval:
+#if 0
if (sym->dir_dep.tri == no && sym->rev_dep.tri != no) {
fprintf(stderr, "warning: (");
expr_fprint(sym->rev_dep.expr, stderr);
@@ -358,6 +359,7 @@
expr_fprint(sym->dir_dep.expr, stderr);
fprintf(stderr, ")\n");
}
+#endif
newval.tri = EXPR_OR(newval.tri, sym->rev_dep.tri);
}
if (newval.tri == mod && sym_get_type(sym) == S_BOOLEAN)
@@ -937,6 +939,8 @@
sym = stack->sym;
next_sym = stack->next ? stack->next->sym : last_sym;
prop = stack->prop;
+ if (prop == NULL)
+ prop = stack->sym->prop;
/* for choice values find the menu entry (used below) */
if (sym_is_choice(sym) || sym_is_choice_value(sym)) {
diff --git a/scripts/kernel-doc b/scripts/kernel-doc
index 102e123..cdb6dc1 100755
--- a/scripts/kernel-doc
+++ b/scripts/kernel-doc
@@ -44,12 +44,13 @@
# Note: This only supports 'c'.
# usage:
-# kernel-doc [ -docbook | -html | -text | -man ] [ -no-doc-sections ]
+# kernel-doc [ -docbook | -html | -text | -man | -list ] [ -no-doc-sections ]
# [ -function funcname [ -function funcname ...] ] c file(s)s > outputfile
# or
# [ -nofunction funcname [ -function funcname ...] ] c file(s)s > outputfile
#
# Set output format using one of -docbook -html -text or -man. Default is man.
+# The -list format is for internal use by docproc.
#
# -no-doc-sections
# Do not output DOC: sections
@@ -210,9 +211,16 @@
$type_param, "\$1" );
my $blankline_text = "";
+# list mode
+my %highlights_list = ( $type_constant, "\$1",
+ $type_func, "\$1",
+ $type_struct, "\$1",
+ $type_param, "\$1" );
+my $blankline_list = "";
sub usage {
- print "Usage: $0 [ -v ] [ -docbook | -html | -text | -man ] [ -no-doc-sections ]\n";
+ print "Usage: $0 [ -v ] [ -docbook | -html | -text | -man | -list ]\n";
+ print " [ -no-doc-sections ]\n";
print " [ -function funcname [ -function funcname ...] ]\n";
print " [ -nofunction funcname [ -nofunction funcname ...] ]\n";
print " c source file(s) > outputfile\n";
@@ -318,6 +326,10 @@
$output_mode = "xml";
%highlights = %highlights_xml;
$blankline = $blankline_xml;
+ } elsif ($cmd eq "-list") {
+ $output_mode = "list";
+ %highlights = %highlights_list;
+ $blankline = $blankline_list;
} elsif ($cmd eq "-gnome") {
$output_mode = "gnome";
%highlights = %highlights_gnome;
@@ -1361,6 +1373,42 @@
}
}
+## list mode output functions
+
+sub output_function_list(%) {
+ my %args = %{$_[0]};
+
+ print $args{'function'} . "\n";
+}
+
+# output enum in list
+sub output_enum_list(%) {
+ my %args = %{$_[0]};
+ print $args{'enum'} . "\n";
+}
+
+# output typedef in list
+sub output_typedef_list(%) {
+ my %args = %{$_[0]};
+ print $args{'typedef'} . "\n";
+}
+
+# output struct as list
+sub output_struct_list(%) {
+ my %args = %{$_[0]};
+
+ print $args{'struct'} . "\n";
+}
+
+sub output_blockhead_list(%) {
+ my %args = %{$_[0]};
+ my ($parameter, $section);
+
+ foreach $section (@{$args{'sectionlist'}}) {
+ print "DOC: $section\n";
+ }
+}
+
##
# generic output function for all types (function, struct/union, typedef, enum);
# calls the generated, variable output_ function name based on
@@ -1679,7 +1727,7 @@
foreach $px (0 .. $#prms) {
$prm_clean = $prms[$px];
$prm_clean =~ s/\[.*\]//;
- $prm_clean =~ s/__attribute__\s*\(\([a-z,_\*\s\(\)]*\)\)//;
+ $prm_clean =~ s/__attribute__\s*\(\([a-z,_\*\s\(\)]*\)\)//i;
# ignore array size in a parameter string;
# however, the original param string may contain
# spaces, e.g.: addr[6 + 2]
diff --git a/scripts/mkmakefile b/scripts/mkmakefile
index 67d59c7..5325423 100644
--- a/scripts/mkmakefile
+++ b/scripts/mkmakefile
@@ -44,7 +44,9 @@
Makefile:;
-\$(all) %/: all
+\$(all): all
@:
+%/: all
+ @:
EOF
diff --git a/scripts/recordmcount.c b/scripts/recordmcount.c
new file mode 100644
index 0000000..26e1271
--- /dev/null
+++ b/scripts/recordmcount.c
@@ -0,0 +1,363 @@
+/*
+ * recordmcount.c: construct a table of the locations of calls to 'mcount'
+ * so that ftrace can find them quickly.
+ * Copyright 2009 John F. Reiser <jreiser@BitWagon.com>. All rights reserved.
+ * Licensed under the GNU General Public License, version 2 (GPLv2).
+ *
+ * Restructured to fit Linux format, as well as other updates:
+ * Copyright 2010 Steven Rostedt <srostedt@redhat.com>, Red Hat Inc.
+ */
+
+/*
+ * Strategy: alter the .o file in-place.
+ *
+ * Append a new STRTAB that has the new section names, followed by a new array
+ * ElfXX_Shdr[] that has the new section headers, followed by the section
+ * contents for __mcount_loc and its relocations. The old shstrtab strings,
+ * and the old ElfXX_Shdr[] array, remain as "garbage" (commonly, a couple
+ * kilobytes.) Subsequent processing by /bin/ld (or the kernel module loader)
+ * will ignore the garbage regions, because they are not designated by the
+ * new .e_shoff nor the new ElfXX_Shdr[]. [In order to remove the garbage,
+ * then use "ld -r" to create a new file that omits the garbage.]
+ */
+
+#include <sys/types.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <elf.h>
+#include <fcntl.h>
+#include <setjmp.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+static int fd_map; /* File descriptor for file being modified. */
+static int mmap_failed; /* Boolean flag. */
+static void *ehdr_curr; /* current ElfXX_Ehdr * for resource cleanup */
+static char gpfx; /* prefix for global symbol name (sometimes '_') */
+static struct stat sb; /* Remember .st_size, etc. */
+static jmp_buf jmpenv; /* setjmp/longjmp per-file error escape */
+
+/* setjmp() return values */
+enum {
+ SJ_SETJMP = 0, /* hardwired first return */
+ SJ_FAIL,
+ SJ_SUCCEED
+};
+
+/* Per-file resource cleanup when multiple files. */
+static void
+cleanup(void)
+{
+ if (!mmap_failed)
+ munmap(ehdr_curr, sb.st_size);
+ else
+ free(ehdr_curr);
+ close(fd_map);
+}
+
+static void __attribute__((noreturn))
+fail_file(void)
+{
+ cleanup();
+ longjmp(jmpenv, SJ_FAIL);
+}
+
+static void __attribute__((noreturn))
+succeed_file(void)
+{
+ cleanup();
+ longjmp(jmpenv, SJ_SUCCEED);
+}
+
+/* ulseek, uread, ...: Check return value for errors. */
+
+static off_t
+ulseek(int const fd, off_t const offset, int const whence)
+{
+ off_t const w = lseek(fd, offset, whence);
+ if ((off_t)-1 == w) {
+ perror("lseek");
+ fail_file();
+ }
+ return w;
+}
+
+static size_t
+uread(int const fd, void *const buf, size_t const count)
+{
+ size_t const n = read(fd, buf, count);
+ if (n != count) {
+ perror("read");
+ fail_file();
+ }
+ return n;
+}
+
+static size_t
+uwrite(int const fd, void const *const buf, size_t const count)
+{
+ size_t const n = write(fd, buf, count);
+ if (n != count) {
+ perror("write");
+ fail_file();
+ }
+ return n;
+}
+
+static void *
+umalloc(size_t size)
+{
+ void *const addr = malloc(size);
+ if (0 == addr) {
+ fprintf(stderr, "malloc failed: %zu bytes\n", size);
+ fail_file();
+ }
+ return addr;
+}
+
+/*
+ * Get the whole file as a programming convenience in order to avoid
+ * malloc+lseek+read+free of many pieces. If successful, then mmap
+ * avoids copying unused pieces; else just read the whole file.
+ * Open for both read and write; new info will be appended to the file.
+ * Use MAP_PRIVATE so that a few changes to the in-memory ElfXX_Ehdr
+ * do not propagate to the file until an explicit overwrite at the last.
+ * This preserves most aspects of consistency (all except .st_size)
+ * for simultaneous readers of the file while we are appending to it.
+ * However, multiple writers still are bad. We choose not to use
+ * locking because it is expensive and the use case of kernel build
+ * makes multiple writers unlikely.
+ */
+static void *mmap_file(char const *fname)
+{
+ void *addr;
+
+ fd_map = open(fname, O_RDWR);
+ if (0 > fd_map || 0 > fstat(fd_map, &sb)) {
+ perror(fname);
+ fail_file();
+ }
+ if (!S_ISREG(sb.st_mode)) {
+ fprintf(stderr, "not a regular file: %s\n", fname);
+ fail_file();
+ }
+ addr = mmap(0, sb.st_size, PROT_READ|PROT_WRITE, MAP_PRIVATE,
+ fd_map, 0);
+ mmap_failed = 0;
+ if (MAP_FAILED == addr) {
+ mmap_failed = 1;
+ addr = umalloc(sb.st_size);
+ uread(fd_map, addr, sb.st_size);
+ }
+ return addr;
+}
+
+/* w8rev, w8nat, ...: Handle endianness. */
+
+static uint64_t w8rev(uint64_t const x)
+{
+ return ((0xff & (x >> (0 * 8))) << (7 * 8))
+ | ((0xff & (x >> (1 * 8))) << (6 * 8))
+ | ((0xff & (x >> (2 * 8))) << (5 * 8))
+ | ((0xff & (x >> (3 * 8))) << (4 * 8))
+ | ((0xff & (x >> (4 * 8))) << (3 * 8))
+ | ((0xff & (x >> (5 * 8))) << (2 * 8))
+ | ((0xff & (x >> (6 * 8))) << (1 * 8))
+ | ((0xff & (x >> (7 * 8))) << (0 * 8));
+}
+
+static uint32_t w4rev(uint32_t const x)
+{
+ return ((0xff & (x >> (0 * 8))) << (3 * 8))
+ | ((0xff & (x >> (1 * 8))) << (2 * 8))
+ | ((0xff & (x >> (2 * 8))) << (1 * 8))
+ | ((0xff & (x >> (3 * 8))) << (0 * 8));
+}
+
+static uint32_t w2rev(uint16_t const x)
+{
+ return ((0xff & (x >> (0 * 8))) << (1 * 8))
+ | ((0xff & (x >> (1 * 8))) << (0 * 8));
+}
+
+static uint64_t w8nat(uint64_t const x)
+{
+ return x;
+}
+
+static uint32_t w4nat(uint32_t const x)
+{
+ return x;
+}
+
+static uint32_t w2nat(uint16_t const x)
+{
+ return x;
+}
+
+static uint64_t (*w8)(uint64_t);
+static uint32_t (*w)(uint32_t);
+static uint32_t (*w2)(uint16_t);
+
+/* Names of the sections that could contain calls to mcount. */
+static int
+is_mcounted_section_name(char const *const txtname)
+{
+ return 0 == strcmp(".text", txtname) ||
+ 0 == strcmp(".sched.text", txtname) ||
+ 0 == strcmp(".spinlock.text", txtname) ||
+ 0 == strcmp(".irqentry.text", txtname) ||
+ 0 == strcmp(".text.unlikely", txtname);
+}
+
+/* 32 bit and 64 bit are very similar */
+#include "recordmcount.h"
+#define RECORD_MCOUNT_64
+#include "recordmcount.h"
+
+static void
+do_file(char const *const fname)
+{
+ Elf32_Ehdr *const ehdr = mmap_file(fname);
+ unsigned int reltype = 0;
+
+ ehdr_curr = ehdr;
+ w = w4nat;
+ w2 = w2nat;
+ w8 = w8nat;
+ switch (ehdr->e_ident[EI_DATA]) {
+ static unsigned int const endian = 1;
+ default: {
+ fprintf(stderr, "unrecognized ELF data encoding %d: %s\n",
+ ehdr->e_ident[EI_DATA], fname);
+ fail_file();
+ } break;
+ case ELFDATA2LSB: {
+ if (1 != *(unsigned char const *)&endian) {
+ /* main() is big endian, file.o is little endian. */
+ w = w4rev;
+ w2 = w2rev;
+ w8 = w8rev;
+ }
+ } break;
+ case ELFDATA2MSB: {
+ if (0 != *(unsigned char const *)&endian) {
+ /* main() is little endian, file.o is big endian. */
+ w = w4rev;
+ w2 = w2rev;
+ w8 = w8rev;
+ }
+ } break;
+ } /* end switch */
+ if (0 != memcmp(ELFMAG, ehdr->e_ident, SELFMAG)
+ || ET_REL != w2(ehdr->e_type)
+ || EV_CURRENT != ehdr->e_ident[EI_VERSION]) {
+ fprintf(stderr, "unrecognized ET_REL file %s\n", fname);
+ fail_file();
+ }
+
+ gpfx = 0;
+ switch (w2(ehdr->e_machine)) {
+ default: {
+ fprintf(stderr, "unrecognized e_machine %d %s\n",
+ w2(ehdr->e_machine), fname);
+ fail_file();
+ } break;
+ case EM_386: reltype = R_386_32; break;
+ case EM_ARM: reltype = R_ARM_ABS32; break;
+ case EM_IA_64: reltype = R_IA64_IMM64; gpfx = '_'; break;
+ case EM_PPC: reltype = R_PPC_ADDR32; gpfx = '_'; break;
+ case EM_PPC64: reltype = R_PPC64_ADDR64; gpfx = '_'; break;
+ case EM_S390: /* reltype: e_class */ gpfx = '_'; break;
+ case EM_SH: reltype = R_SH_DIR32; break;
+ case EM_SPARCV9: reltype = R_SPARC_64; gpfx = '_'; break;
+ case EM_X86_64: reltype = R_X86_64_64; break;
+ } /* end switch */
+
+ switch (ehdr->e_ident[EI_CLASS]) {
+ default: {
+ fprintf(stderr, "unrecognized ELF class %d %s\n",
+ ehdr->e_ident[EI_CLASS], fname);
+ fail_file();
+ } break;
+ case ELFCLASS32: {
+ if (sizeof(Elf32_Ehdr) != w2(ehdr->e_ehsize)
+ || sizeof(Elf32_Shdr) != w2(ehdr->e_shentsize)) {
+ fprintf(stderr,
+ "unrecognized ET_REL file: %s\n", fname);
+ fail_file();
+ }
+ if (EM_S390 == w2(ehdr->e_machine))
+ reltype = R_390_32;
+ do32(ehdr, fname, reltype);
+ } break;
+ case ELFCLASS64: {
+ Elf64_Ehdr *const ghdr = (Elf64_Ehdr *)ehdr;
+ if (sizeof(Elf64_Ehdr) != w2(ghdr->e_ehsize)
+ || sizeof(Elf64_Shdr) != w2(ghdr->e_shentsize)) {
+ fprintf(stderr,
+ "unrecognized ET_REL file: %s\n", fname);
+ fail_file();
+ }
+ if (EM_S390 == w2(ghdr->e_machine))
+ reltype = R_390_64;
+ do64(ghdr, fname, reltype);
+ } break;
+ } /* end switch */
+
+ cleanup();
+}
+
+int
+main(int argc, char const *argv[])
+{
+ const char ftrace[] = "kernel/trace/ftrace.o";
+ int ftrace_size = sizeof(ftrace) - 1;
+ int n_error = 0; /* gcc-4.3.0 false positive complaint */
+
+ if (argc <= 1) {
+ fprintf(stderr, "usage: recordmcount file.o...\n");
+ return 0;
+ }
+
+ /* Process each file in turn, allowing deep failure. */
+ for (--argc, ++argv; 0 < argc; --argc, ++argv) {
+ int const sjval = setjmp(jmpenv);
+ int len;
+
+ /*
+ * The file kernel/trace/ftrace.o references the mcount
+ * function but does not call it. Since ftrace.o should
+ * not be traced anyway, we just skip it.
+ */
+ len = strlen(argv[0]);
+ if (len >= ftrace_size &&
+ strcmp(argv[0] + (len - ftrace_size), ftrace) == 0)
+ continue;
+
+ switch (sjval) {
+ default: {
+ fprintf(stderr, "internal error: %s\n", argv[0]);
+ exit(1);
+ } break;
+ case SJ_SETJMP: { /* normal sequence */
+ /* Avoid problems if early cleanup() */
+ fd_map = -1;
+ ehdr_curr = NULL;
+ mmap_failed = 1;
+ do_file(argv[0]);
+ } break;
+ case SJ_FAIL: { /* error in do_file or below */
+ ++n_error;
+ } break;
+ case SJ_SUCCEED: { /* premature success */
+ /* do nothing */
+ } break;
+ } /* end switch */
+ }
+ return !!n_error;
+}
+
+
diff --git a/scripts/recordmcount.h b/scripts/recordmcount.h
new file mode 100644
index 0000000..7f39d09
--- /dev/null
+++ b/scripts/recordmcount.h
@@ -0,0 +1,366 @@
+/*
+ * recordmcount.h
+ *
+ * This code was taken out of recordmcount.c written by
+ * Copyright 2009 John F. Reiser <jreiser@BitWagon.com>. All rights reserved.
+ *
+ * The original code had the same algorithms for both 32bit
+ * and 64bit ELF files, but the code was duplicated to support
+ * the difference in structures that were used. This
+ * file creates a macro of everything that is different between
+ * the 64 and 32 bit code, such that by including this header
+ * twice we can create both sets of functions by including this
+ * header once with RECORD_MCOUNT_64 undefined, and again with
+ * it defined.
+ *
+ * This conversion to macros was done by:
+ * Copyright 2010 Steven Rostedt <srostedt@redhat.com>, Red Hat Inc.
+ *
+ * Licensed under the GNU General Public License, version 2 (GPLv2).
+ */
+#undef append_func
+#undef sift_rel_mcount
+#undef find_secsym_ndx
+#undef __has_rel_mcount
+#undef has_rel_mcount
+#undef tot_relsize
+#undef do_func
+#undef Elf_Ehdr
+#undef Elf_Shdr
+#undef Elf_Rel
+#undef Elf_Rela
+#undef Elf_Sym
+#undef ELF_R_SYM
+#undef ELF_R_INFO
+#undef ELF_ST_BIND
+#undef uint_t
+#undef _w
+#undef _align
+#undef _size
+
+#ifdef RECORD_MCOUNT_64
+# define append_func append64
+# define sift_rel_mcount sift64_rel_mcount
+# define find_secsym_ndx find64_secsym_ndx
+# define __has_rel_mcount __has64_rel_mcount
+# define has_rel_mcount has64_rel_mcount
+# define tot_relsize tot64_relsize
+# define do_func do64
+# define Elf_Ehdr Elf64_Ehdr
+# define Elf_Shdr Elf64_Shdr
+# define Elf_Rel Elf64_Rel
+# define Elf_Rela Elf64_Rela
+# define Elf_Sym Elf64_Sym
+# define ELF_R_SYM ELF64_R_SYM
+# define ELF_R_INFO ELF64_R_INFO
+# define ELF_ST_BIND ELF64_ST_BIND
+# define uint_t uint64_t
+# define _w w8
+# define _align 7u
+# define _size 8
+#else
+# define append_func append32
+# define sift_rel_mcount sift32_rel_mcount
+# define find_secsym_ndx find32_secsym_ndx
+# define __has_rel_mcount __has32_rel_mcount
+# define has_rel_mcount has32_rel_mcount
+# define tot_relsize tot32_relsize
+# define do_func do32
+# define Elf_Ehdr Elf32_Ehdr
+# define Elf_Shdr Elf32_Shdr
+# define Elf_Rel Elf32_Rel
+# define Elf_Rela Elf32_Rela
+# define Elf_Sym Elf32_Sym
+# define ELF_R_SYM ELF32_R_SYM
+# define ELF_R_INFO ELF32_R_INFO
+# define ELF_ST_BIND ELF32_ST_BIND
+# define uint_t uint32_t
+# define _w w
+# define _align 3u
+# define _size 4
+#endif
+
+/* Append the new shstrtab, Elf_Shdr[], __mcount_loc and its relocations. */
+static void append_func(Elf_Ehdr *const ehdr,
+ Elf_Shdr *const shstr,
+ uint_t const *const mloc0,
+ uint_t const *const mlocp,
+ Elf_Rel const *const mrel0,
+ Elf_Rel const *const mrelp,
+ unsigned int const rel_entsize,
+ unsigned int const symsec_sh_link)
+{
+ /* Begin constructing output file */
+ Elf_Shdr mcsec;
+ char const *mc_name = (sizeof(Elf_Rela) == rel_entsize)
+ ? ".rela__mcount_loc"
+ : ".rel__mcount_loc";
+ unsigned const old_shnum = w2(ehdr->e_shnum);
+ uint_t const old_shoff = _w(ehdr->e_shoff);
+ uint_t const old_shstr_sh_size = _w(shstr->sh_size);
+ uint_t const old_shstr_sh_offset = _w(shstr->sh_offset);
+ uint_t t = 1 + strlen(mc_name) + _w(shstr->sh_size);
+ uint_t new_e_shoff;
+
+ shstr->sh_size = _w(t);
+ shstr->sh_offset = _w(sb.st_size);
+ t += sb.st_size;
+ t += (_align & -t); /* word-byte align */
+ new_e_shoff = t;
+
+ /* body for new shstrtab */
+ ulseek(fd_map, sb.st_size, SEEK_SET);
+ uwrite(fd_map, old_shstr_sh_offset + (void *)ehdr, old_shstr_sh_size);
+ uwrite(fd_map, mc_name, 1 + strlen(mc_name));
+
+ /* old(modified) Elf_Shdr table, word-byte aligned */
+ ulseek(fd_map, t, SEEK_SET);
+ t += sizeof(Elf_Shdr) * old_shnum;
+ uwrite(fd_map, old_shoff + (void *)ehdr,
+ sizeof(Elf_Shdr) * old_shnum);
+
+ /* new sections __mcount_loc and .rel__mcount_loc */
+ t += 2*sizeof(mcsec);
+ mcsec.sh_name = w((sizeof(Elf_Rela) == rel_entsize) + strlen(".rel")
+ + old_shstr_sh_size);
+ mcsec.sh_type = w(SHT_PROGBITS);
+ mcsec.sh_flags = _w(SHF_ALLOC);
+ mcsec.sh_addr = 0;
+ mcsec.sh_offset = _w(t);
+ mcsec.sh_size = _w((void *)mlocp - (void *)mloc0);
+ mcsec.sh_link = 0;
+ mcsec.sh_info = 0;
+ mcsec.sh_addralign = _w(_size);
+ mcsec.sh_entsize = _w(_size);
+ uwrite(fd_map, &mcsec, sizeof(mcsec));
+
+ mcsec.sh_name = w(old_shstr_sh_size);
+ mcsec.sh_type = (sizeof(Elf_Rela) == rel_entsize)
+ ? w(SHT_RELA)
+ : w(SHT_REL);
+ mcsec.sh_flags = 0;
+ mcsec.sh_addr = 0;
+ mcsec.sh_offset = _w((void *)mlocp - (void *)mloc0 + t);
+ mcsec.sh_size = _w((void *)mrelp - (void *)mrel0);
+ mcsec.sh_link = w(symsec_sh_link);
+ mcsec.sh_info = w(old_shnum);
+ mcsec.sh_addralign = _w(_size);
+ mcsec.sh_entsize = _w(rel_entsize);
+ uwrite(fd_map, &mcsec, sizeof(mcsec));
+
+ uwrite(fd_map, mloc0, (void *)mlocp - (void *)mloc0);
+ uwrite(fd_map, mrel0, (void *)mrelp - (void *)mrel0);
+
+ ehdr->e_shoff = _w(new_e_shoff);
+ ehdr->e_shnum = w2(2 + w2(ehdr->e_shnum)); /* {.rel,}__mcount_loc */
+ ulseek(fd_map, 0, SEEK_SET);
+ uwrite(fd_map, ehdr, sizeof(*ehdr));
+}
+
+
+/*
+ * Look at the relocations in order to find the calls to mcount.
+ * Accumulate the section offsets that are found, and their relocation info,
+ * onto the end of the existing arrays.
+ */
+static uint_t *sift_rel_mcount(uint_t *mlocp,
+ unsigned const offbase,
+ Elf_Rel **const mrelpp,
+ Elf_Shdr const *const relhdr,
+ Elf_Ehdr const *const ehdr,
+ unsigned const recsym,
+ uint_t const recval,
+ unsigned const reltype)
+{
+ uint_t *const mloc0 = mlocp;
+ Elf_Rel *mrelp = *mrelpp;
+ Elf_Shdr *const shdr0 = (Elf_Shdr *)(_w(ehdr->e_shoff)
+ + (void *)ehdr);
+ unsigned const symsec_sh_link = w(relhdr->sh_link);
+ Elf_Shdr const *const symsec = &shdr0[symsec_sh_link];
+ Elf_Sym const *const sym0 = (Elf_Sym const *)(_w(symsec->sh_offset)
+ + (void *)ehdr);
+
+ Elf_Shdr const *const strsec = &shdr0[w(symsec->sh_link)];
+ char const *const str0 = (char const *)(_w(strsec->sh_offset)
+ + (void *)ehdr);
+
+ Elf_Rel const *const rel0 = (Elf_Rel const *)(_w(relhdr->sh_offset)
+ + (void *)ehdr);
+ unsigned rel_entsize = _w(relhdr->sh_entsize);
+ unsigned const nrel = _w(relhdr->sh_size) / rel_entsize;
+ Elf_Rel const *relp = rel0;
+
+ unsigned mcountsym = 0;
+ unsigned t;
+
+ for (t = nrel; t; --t) {
+ if (!mcountsym) {
+ Elf_Sym const *const symp =
+ &sym0[ELF_R_SYM(_w(relp->r_info))];
+ char const *symname = &str0[w(symp->st_name)];
+
+ if ('.' == symname[0])
+ ++symname; /* ppc64 hack */
+ if (0 == strcmp((('_' == gpfx) ? "_mcount" : "mcount"),
+ symname))
+ mcountsym = ELF_R_SYM(_w(relp->r_info));
+ }
+
+ if (mcountsym == ELF_R_SYM(_w(relp->r_info))) {
+ uint_t const addend = _w(_w(relp->r_offset) - recval);
+
+ mrelp->r_offset = _w(offbase
+ + ((void *)mlocp - (void *)mloc0));
+ mrelp->r_info = _w(ELF_R_INFO(recsym, reltype));
+ if (sizeof(Elf_Rela) == rel_entsize) {
+ ((Elf_Rela *)mrelp)->r_addend = addend;
+ *mlocp++ = 0;
+ } else
+ *mlocp++ = addend;
+
+ mrelp = (Elf_Rel *)(rel_entsize + (void *)mrelp);
+ }
+ relp = (Elf_Rel const *)(rel_entsize + (void *)relp);
+ }
+ *mrelpp = mrelp;
+ return mlocp;
+}
+
+
+/*
+ * Find a symbol in the given section, to be used as the base for relocating
+ * the table of offsets of calls to mcount. A local or global symbol suffices,
+ * but avoid a Weak symbol because it may be overridden; the change in value
+ * would invalidate the relocations of the offsets of the calls to mcount.
+ * Often the found symbol will be the unnamed local symbol generated by
+ * GNU 'as' for the start of each section. For example:
+ * Num: Value Size Type Bind Vis Ndx Name
+ * 2: 00000000 0 SECTION LOCAL DEFAULT 1
+ */
+static unsigned find_secsym_ndx(unsigned const txtndx,
+ char const *const txtname,
+ uint_t *const recvalp,
+ Elf_Shdr const *const symhdr,
+ Elf_Ehdr const *const ehdr)
+{
+ Elf_Sym const *const sym0 = (Elf_Sym const *)(_w(symhdr->sh_offset)
+ + (void *)ehdr);
+ unsigned const nsym = _w(symhdr->sh_size) / _w(symhdr->sh_entsize);
+ Elf_Sym const *symp;
+ unsigned t;
+
+ for (symp = sym0, t = nsym; t; --t, ++symp) {
+ unsigned int const st_bind = ELF_ST_BIND(symp->st_info);
+
+ if (txtndx == w2(symp->st_shndx)
+ /* avoid STB_WEAK */
+ && (STB_LOCAL == st_bind || STB_GLOBAL == st_bind)) {
+ *recvalp = _w(symp->st_value);
+ return symp - sym0;
+ }
+ }
+ fprintf(stderr, "Cannot find symbol for section %d: %s.\n",
+ txtndx, txtname);
+ fail_file();
+}
+
+
+/* Evade ISO C restriction: no declaration after statement in has_rel_mcount. */
+static char const *
+__has_rel_mcount(Elf_Shdr const *const relhdr, /* is SHT_REL or SHT_RELA */
+ Elf_Shdr const *const shdr0,
+ char const *const shstrtab,
+ char const *const fname)
+{
+ /* .sh_info depends on .sh_type == SHT_REL[,A] */
+ Elf_Shdr const *const txthdr = &shdr0[w(relhdr->sh_info)];
+ char const *const txtname = &shstrtab[w(txthdr->sh_name)];
+
+ if (0 == strcmp("__mcount_loc", txtname)) {
+ fprintf(stderr, "warning: __mcount_loc already exists: %s\n",
+ fname);
+ succeed_file();
+ }
+ if (SHT_PROGBITS != w(txthdr->sh_type) ||
+ !is_mcounted_section_name(txtname))
+ return NULL;
+ return txtname;
+}
+
+static char const *has_rel_mcount(Elf_Shdr const *const relhdr,
+ Elf_Shdr const *const shdr0,
+ char const *const shstrtab,
+ char const *const fname)
+{
+ if (SHT_REL != w(relhdr->sh_type) && SHT_RELA != w(relhdr->sh_type))
+ return NULL;
+ return __has_rel_mcount(relhdr, shdr0, shstrtab, fname);
+}
+
+
+static unsigned tot_relsize(Elf_Shdr const *const shdr0,
+ unsigned nhdr,
+ const char *const shstrtab,
+ const char *const fname)
+{
+ unsigned totrelsz = 0;
+ Elf_Shdr const *shdrp = shdr0;
+
+ for (; nhdr; --nhdr, ++shdrp) {
+ if (has_rel_mcount(shdrp, shdr0, shstrtab, fname))
+ totrelsz += _w(shdrp->sh_size);
+ }
+ return totrelsz;
+}
+
+
+/* Overall supervision for Elf32 ET_REL file. */
+static void
+do_func(Elf_Ehdr *const ehdr, char const *const fname, unsigned const reltype)
+{
+ Elf_Shdr *const shdr0 = (Elf_Shdr *)(_w(ehdr->e_shoff)
+ + (void *)ehdr);
+ unsigned const nhdr = w2(ehdr->e_shnum);
+ Elf_Shdr *const shstr = &shdr0[w2(ehdr->e_shstrndx)];
+ char const *const shstrtab = (char const *)(_w(shstr->sh_offset)
+ + (void *)ehdr);
+
+ Elf_Shdr const *relhdr;
+ unsigned k;
+
+ /* Upper bound on space: assume all relevant relocs are for mcount. */
+ unsigned const totrelsz = tot_relsize(shdr0, nhdr, shstrtab, fname);
+ Elf_Rel *const mrel0 = umalloc(totrelsz);
+ Elf_Rel * mrelp = mrel0;
+
+ /* 2*sizeof(address) <= sizeof(Elf_Rel) */
+ uint_t *const mloc0 = umalloc(totrelsz>>1);
+ uint_t * mlocp = mloc0;
+
+ unsigned rel_entsize = 0;
+ unsigned symsec_sh_link = 0;
+
+ for (relhdr = shdr0, k = nhdr; k; --k, ++relhdr) {
+ char const *const txtname = has_rel_mcount(relhdr, shdr0,
+ shstrtab, fname);
+ if (txtname) {
+ uint_t recval = 0;
+ unsigned const recsym = find_secsym_ndx(
+ w(relhdr->sh_info), txtname, &recval,
+ &shdr0[symsec_sh_link = w(relhdr->sh_link)],
+ ehdr);
+
+ rel_entsize = _w(relhdr->sh_entsize);
+ mlocp = sift_rel_mcount(mlocp,
+ (void *)mlocp - (void *)mloc0, &mrelp,
+ relhdr, ehdr, recsym, recval, reltype);
+ }
+ }
+ if (mloc0 != mlocp) {
+ append_func(ehdr, shstr, mloc0, mlocp, mrel0, mrelp,
+ rel_entsize, symsec_sh_link);
+ }
+ free(mrel0);
+ free(mloc0);
+}
diff --git a/scripts/setlocalversion b/scripts/setlocalversion
index e90a91c..057b6b3 100755
--- a/scripts/setlocalversion
+++ b/scripts/setlocalversion
@@ -43,7 +43,7 @@
fi
# Check for git and a git repo.
- if head=`git rev-parse --verify --short HEAD 2>/dev/null`; then
+ if test -d .git && head=`git rev-parse --verify --short HEAD 2>/dev/null`; then
# If we are at a tagged commit (like "v2.6.30-rc6"), we ignore
# it, because this version is defined in the top level Makefile.
@@ -85,7 +85,7 @@
fi
# Check for mercurial and a mercurial repo.
- if hgid=`hg id 2>/dev/null`; then
+ if test -d .hg && hgid=`hg id 2>/dev/null`; then
tag=`printf '%s' "$hgid" | cut -s -d' ' -f2`
# Do we have an untagged version?
diff --git a/security/apparmor/.gitignore b/security/apparmor/.gitignore
index 0a0a99f..4d995ae 100644
--- a/security/apparmor/.gitignore
+++ b/security/apparmor/.gitignore
@@ -3,3 +3,4 @@
#
af_names.h
capability_names.h
+rlim_names.h
diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c
index 7320331..544ff58 100644
--- a/security/apparmor/apparmorfs.c
+++ b/security/apparmor/apparmorfs.c
@@ -29,7 +29,7 @@
* aa_simple_write_to_buffer - common routine for getting policy from user
* @op: operation doing the user buffer copy
* @userbuf: user buffer to copy data from (NOT NULL)
- * @alloc_size: size of user buffer
+ * @alloc_size: size of user buffer (REQUIRES: @alloc_size >= @copy_size)
* @copy_size: size of data to copy from user buffer
* @pos: position write is at in the file (NOT NULL)
*
@@ -42,6 +42,8 @@
{
char *data;
+ BUG_ON(copy_size > alloc_size);
+
if (*pos != 0)
/* only writes from pos 0, that is complete writes */
return ERR_PTR(-ESPIPE);
diff --git a/security/apparmor/include/resource.h b/security/apparmor/include/resource.h
index 3c88be9..02baec7 100644
--- a/security/apparmor/include/resource.h
+++ b/security/apparmor/include/resource.h
@@ -33,8 +33,8 @@
};
int aa_map_resource(int resource);
-int aa_task_setrlimit(struct aa_profile *profile, unsigned int resource,
- struct rlimit *new_rlim);
+int aa_task_setrlimit(struct aa_profile *profile, struct task_struct *,
+ unsigned int resource, struct rlimit *new_rlim);
void __aa_transition_rlimits(struct aa_profile *old, struct aa_profile *new);
diff --git a/security/apparmor/lib.c b/security/apparmor/lib.c
index 6e85cdb..506d2ba 100644
--- a/security/apparmor/lib.c
+++ b/security/apparmor/lib.c
@@ -40,6 +40,7 @@
*ns_name = NULL;
if (name[0] == ':') {
char *split = strchr(&name[1], ':');
+ *ns_name = skip_spaces(&name[1]);
if (split) {
/* overwrite ':' with \0 */
*split = 0;
@@ -47,7 +48,6 @@
} else
/* a ns name without a following profile is allowed */
name = NULL;
- *ns_name = &name[1];
}
if (name && *name == 0)
name = NULL;
diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
index f73e2c2..cf1de44 100644
--- a/security/apparmor/lsm.c
+++ b/security/apparmor/lsm.c
@@ -614,7 +614,7 @@
int error = 0;
if (!unconfined(profile))
- error = aa_task_setrlimit(profile, resource, new_rlim);
+ error = aa_task_setrlimit(profile, task, resource, new_rlim);
return error;
}
diff --git a/security/apparmor/path.c b/security/apparmor/path.c
index 19358dc..8239605 100644
--- a/security/apparmor/path.c
+++ b/security/apparmor/path.c
@@ -59,8 +59,7 @@
{
struct path root, tmp;
char *res;
- int deleted, connected;
- int error = 0;
+ int connected, error = 0;
/* Get the root we want to resolve too, released below */
if (flags & PATH_CHROOT_REL) {
@@ -74,19 +73,8 @@
}
spin_lock(&dcache_lock);
- /* There is a race window between path lookup here and the
- * need to strip the " (deleted) string that __d_path applies
- * Detect the race and relookup the path
- *
- * The stripping of (deleted) is a hack that could be removed
- * with an updated __d_path
- */
- do {
- tmp = root;
- deleted = d_unlinked(path->dentry);
- res = __d_path(path, &tmp, buf, buflen);
-
- } while (deleted != d_unlinked(path->dentry));
+ tmp = root;
+ res = __d_path(path, &tmp, buf, buflen);
spin_unlock(&dcache_lock);
*name = res;
@@ -98,21 +86,17 @@
*name = buf;
goto out;
}
- if (deleted) {
- /* On some filesystems, newly allocated dentries appear to the
- * security_path hooks as a deleted dentry except without an
- * inode allocated.
- *
- * Remove the appended deleted text and return as string for
- * normal mediation, or auditing. The (deleted) string is
- * guaranteed to be added in this case, so just strip it.
- */
- buf[buflen - 11] = 0; /* - (len(" (deleted)") +\0) */
- if (path->dentry->d_inode && !(flags & PATH_MEDIATE_DELETED)) {
+ /* Handle two cases:
+ * 1. A deleted dentry && profile is not allowing mediation of deleted
+ * 2. On some filesystems, newly allocated dentries appear to the
+ * security_path hooks as a deleted dentry except without an inode
+ * allocated.
+ */
+ if (d_unlinked(path->dentry) && path->dentry->d_inode &&
+ !(flags & PATH_MEDIATE_DELETED)) {
error = -ENOENT;
goto out;
- }
}
/* Determine if the path is connected to the expected root */
diff --git a/security/apparmor/policy.c b/security/apparmor/policy.c
index 3cdc1ad..52cc865 100644
--- a/security/apparmor/policy.c
+++ b/security/apparmor/policy.c
@@ -1151,12 +1151,14 @@
/* released below */
ns = aa_get_namespace(root);
- write_lock(&ns->lock);
if (!name) {
/* remove namespace - can only happen if fqname[0] == ':' */
+ write_lock(&ns->parent->lock);
__remove_namespace(ns);
+ write_unlock(&ns->parent->lock);
} else {
/* remove profile */
+ write_lock(&ns->lock);
profile = aa_get_profile(__lookup_profile(&ns->base, name));
if (!profile) {
error = -ENOENT;
@@ -1165,8 +1167,8 @@
}
name = profile->base.hname;
__remove_profile(profile);
+ write_unlock(&ns->lock);
}
- write_unlock(&ns->lock);
/* don't fail removal if audit fails */
(void) audit_policy(OP_PROF_RM, GFP_KERNEL, name, info, error);
diff --git a/security/apparmor/resource.c b/security/apparmor/resource.c
index 4a368f1..a4136c1 100644
--- a/security/apparmor/resource.c
+++ b/security/apparmor/resource.c
@@ -72,6 +72,7 @@
/**
* aa_task_setrlimit - test permission to set an rlimit
* @profile - profile confining the task (NOT NULL)
+ * @task - task the resource is being set on
* @resource - the resource being set
* @new_rlim - the new resource limit (NOT NULL)
*
@@ -79,18 +80,21 @@
*
* Returns: 0 or error code if setting resource failed
*/
-int aa_task_setrlimit(struct aa_profile *profile, unsigned int resource,
- struct rlimit *new_rlim)
+int aa_task_setrlimit(struct aa_profile *profile, struct task_struct *task,
+ unsigned int resource, struct rlimit *new_rlim)
{
int error = 0;
- if (profile->rlimits.mask & (1 << resource) &&
- new_rlim->rlim_max > profile->rlimits.limits[resource].rlim_max)
+ /* TODO: extend resource control to handle other (non current)
+ * processes. AppArmor rules currently have the implicit assumption
+ * that the task is setting the resource of the current process
+ */
+ if ((task != current->group_leader) ||
+ (profile->rlimits.mask & (1 << resource) &&
+ new_rlim->rlim_max > profile->rlimits.limits[resource].rlim_max))
+ error = -EACCES;
- error = audit_resource(profile, resource, new_rlim->rlim_max,
- -EACCES);
-
- return error;
+ return audit_resource(profile, resource, new_rlim->rlim_max, error);
}
/**
diff --git a/security/capability.c b/security/capability.c
index 95a6599..30ae00f 100644
--- a/security/capability.c
+++ b/security/capability.c
@@ -677,7 +677,18 @@
{
}
+static int cap_secmark_relabel_packet(u32 secid)
+{
+ return 0;
+}
+static void cap_secmark_refcount_inc(void)
+{
+}
+
+static void cap_secmark_refcount_dec(void)
+{
+}
static void cap_req_classify_flow(const struct request_sock *req,
struct flowi *fl)
@@ -777,7 +788,8 @@
static int cap_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid)
{
- return -EOPNOTSUPP;
+ *secid = 0;
+ return 0;
}
static void cap_release_secctx(char *secdata, u32 seclen)
@@ -1018,6 +1030,9 @@
set_to_cap_if_null(ops, inet_conn_request);
set_to_cap_if_null(ops, inet_csk_clone);
set_to_cap_if_null(ops, inet_conn_established);
+ set_to_cap_if_null(ops, secmark_relabel_packet);
+ set_to_cap_if_null(ops, secmark_refcount_inc);
+ set_to_cap_if_null(ops, secmark_refcount_dec);
set_to_cap_if_null(ops, req_classify_flow);
set_to_cap_if_null(ops, tun_dev_create);
set_to_cap_if_null(ops, tun_dev_post_create);
diff --git a/security/commoncap.c b/security/commoncap.c
index 9d172e6..5e632b4 100644
--- a/security/commoncap.c
+++ b/security/commoncap.c
@@ -719,14 +719,11 @@
/**
* cap_task_setscheduler - Detemine if scheduler policy change is permitted
* @p: The task to affect
- * @policy: The policy to effect
- * @lp: The parameters to the scheduling policy
*
* Detemine if the requested scheduler policy change is permitted for the
* specified task, returning 0 if permission is granted, -ve if denied.
*/
-int cap_task_setscheduler(struct task_struct *p, int policy,
- struct sched_param *lp)
+int cap_task_setscheduler(struct task_struct *p)
{
return cap_safe_nice(p);
}
diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
index 16d100d..3fbcd1d 100644
--- a/security/integrity/ima/ima.h
+++ b/security/integrity/ima/ima.h
@@ -35,6 +35,7 @@
#define IMA_MEASURE_HTABLE_SIZE (1 << IMA_HASH_BITS)
/* set during initialization */
+extern int iint_initialized;
extern int ima_initialized;
extern int ima_used_chip;
extern char *ima_hash;
diff --git a/security/integrity/ima/ima_iint.c b/security/integrity/ima/ima_iint.c
index 7625b85..afba4ae 100644
--- a/security/integrity/ima/ima_iint.c
+++ b/security/integrity/ima/ima_iint.c
@@ -22,9 +22,10 @@
RADIX_TREE(ima_iint_store, GFP_ATOMIC);
DEFINE_SPINLOCK(ima_iint_lock);
-
static struct kmem_cache *iint_cache __read_mostly;
+int iint_initialized = 0;
+
/* ima_iint_find_get - return the iint associated with an inode
*
* ima_iint_find_get gets a reference to the iint. Caller must
@@ -141,6 +142,7 @@
iint_cache =
kmem_cache_create("iint_cache", sizeof(struct ima_iint_cache), 0,
SLAB_PANIC, init_once);
+ iint_initialized = 1;
return 0;
}
security_initcall(ima_iintcache_init);
diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
index f936413..e662b89 100644
--- a/security/integrity/ima/ima_main.c
+++ b/security/integrity/ima/ima_main.c
@@ -148,12 +148,14 @@
struct ima_iint_cache *iint;
int rc;
- if (!ima_initialized || !S_ISREG(inode->i_mode))
+ if (!iint_initialized || !S_ISREG(inode->i_mode))
return;
iint = ima_iint_find_get(inode);
if (!iint)
return;
mutex_lock(&iint->mutex);
+ if (!ima_initialized)
+ goto out;
rc = ima_must_measure(iint, inode, MAY_READ, FILE_CHECK);
if (rc < 0)
goto out;
@@ -213,7 +215,7 @@
struct inode *inode = file->f_dentry->d_inode;
struct ima_iint_cache *iint;
- if (!ima_initialized || !S_ISREG(inode->i_mode))
+ if (!iint_initialized || !S_ISREG(inode->i_mode))
return;
iint = ima_iint_find_get(inode);
if (!iint)
@@ -230,7 +232,7 @@
{
struct inode *inode = file->f_dentry->d_inode;
struct ima_iint_cache *iint;
- int rc;
+ int rc = 0;
if (!ima_initialized || !S_ISREG(inode->i_mode))
return 0;
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
index b2b0998..60924f6 100644
--- a/security/keys/keyctl.c
+++ b/security/keys/keyctl.c
@@ -1272,6 +1272,7 @@
keyring_r = NULL;
me = current;
+ rcu_read_lock();
write_lock_irq(&tasklist_lock);
parent = me->real_parent;
@@ -1304,7 +1305,8 @@
goto not_permitted;
/* the keyrings must have the same UID */
- if (pcred->tgcred->session_keyring->uid != mycred->euid ||
+ if ((pcred->tgcred->session_keyring &&
+ pcred->tgcred->session_keyring->uid != mycred->euid) ||
mycred->tgcred->session_keyring->uid != mycred->euid)
goto not_permitted;
@@ -1319,6 +1321,7 @@
set_ti_thread_flag(task_thread_info(parent), TIF_NOTIFY_RESUME);
write_unlock_irq(&tasklist_lock);
+ rcu_read_unlock();
if (oldcred)
put_cred(oldcred);
return 0;
@@ -1327,6 +1330,7 @@
ret = 0;
not_permitted:
write_unlock_irq(&tasklist_lock);
+ rcu_read_unlock();
put_cred(cred);
return ret;
diff --git a/security/security.c b/security/security.c
index c53949f..b50f472 100644
--- a/security/security.c
+++ b/security/security.c
@@ -89,20 +89,12 @@
* Return true if:
* -The passed LSM is the one chosen by user at boot time,
* -or the passed LSM is configured as the default and the user did not
- * choose an alternate LSM at boot time,
- * -or there is no default LSM set and the user didn't specify a
- * specific LSM and we're the first to ask for registration permission,
- * -or the passed LSM is currently loaded.
+ * choose an alternate LSM at boot time.
* Otherwise, return false.
*/
int __init security_module_enable(struct security_operations *ops)
{
- if (!*chosen_lsm)
- strncpy(chosen_lsm, ops->name, SECURITY_NAME_MAX);
- else if (strncmp(ops->name, chosen_lsm, SECURITY_NAME_MAX))
- return 0;
-
- return 1;
+ return !strcmp(ops->name, chosen_lsm);
}
/**
@@ -786,10 +778,9 @@
return security_ops->task_setrlimit(p, resource, new_rlim);
}
-int security_task_setscheduler(struct task_struct *p,
- int policy, struct sched_param *lp)
+int security_task_setscheduler(struct task_struct *p)
{
- return security_ops->task_setscheduler(p, policy, lp);
+ return security_ops->task_setscheduler(p);
}
int security_task_getscheduler(struct task_struct *p)
@@ -1145,6 +1136,24 @@
security_ops->inet_conn_established(sk, skb);
}
+int security_secmark_relabel_packet(u32 secid)
+{
+ return security_ops->secmark_relabel_packet(secid);
+}
+EXPORT_SYMBOL(security_secmark_relabel_packet);
+
+void security_secmark_refcount_inc(void)
+{
+ security_ops->secmark_refcount_inc();
+}
+EXPORT_SYMBOL(security_secmark_refcount_inc);
+
+void security_secmark_refcount_dec(void)
+{
+ security_ops->secmark_refcount_dec();
+}
+EXPORT_SYMBOL(security_secmark_refcount_dec);
+
int security_tun_dev_create(void)
{
return security_ops->tun_dev_create();
diff --git a/security/selinux/Makefile b/security/selinux/Makefile
index 58d80f3..ad5cd76 100644
--- a/security/selinux/Makefile
+++ b/security/selinux/Makefile
@@ -2,25 +2,20 @@
# Makefile for building the SELinux module as part of the kernel tree.
#
-obj-$(CONFIG_SECURITY_SELINUX) := selinux.o ss/
+obj-$(CONFIG_SECURITY_SELINUX) := selinux.o
-selinux-y := avc.o \
- hooks.o \
- selinuxfs.o \
- netlink.o \
- nlmsgtab.o \
- netif.o \
- netnode.o \
- netport.o \
- exports.o
+selinux-y := avc.o hooks.o selinuxfs.o netlink.o nlmsgtab.o netif.o \
+ netnode.o netport.o exports.o \
+ ss/ebitmap.o ss/hashtab.o ss/symtab.o ss/sidtab.o ss/avtab.o \
+ ss/policydb.o ss/services.o ss/conditional.o ss/mls.o ss/status.o
selinux-$(CONFIG_SECURITY_NETWORK_XFRM) += xfrm.o
selinux-$(CONFIG_NETLABEL) += netlabel.o
-EXTRA_CFLAGS += -Isecurity/selinux -Isecurity/selinux/include
+ccflags-y := -Isecurity/selinux -Isecurity/selinux/include
-$(obj)/avc.o: $(obj)/flask.h
+$(addprefix $(obj)/,$(selinux-y)): $(obj)/flask.h
quiet_cmd_flask = GEN $(obj)/flask.h $(obj)/av_permissions.h
cmd_flask = scripts/selinux/genheaders/genheaders $(obj)/flask.h $(obj)/av_permissions.h
diff --git a/security/selinux/exports.c b/security/selinux/exports.c
index c0a454a..9066438 100644
--- a/security/selinux/exports.c
+++ b/security/selinux/exports.c
@@ -11,58 +11,9 @@
* it under the terms of the GNU General Public License version 2,
* as published by the Free Software Foundation.
*/
-#include <linux/types.h>
-#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/selinux.h>
-#include <linux/fs.h>
-#include <linux/ipc.h>
-#include <asm/atomic.h>
#include "security.h"
-#include "objsec.h"
-
-/* SECMARK reference count */
-extern atomic_t selinux_secmark_refcount;
-
-int selinux_string_to_sid(char *str, u32 *sid)
-{
- if (selinux_enabled)
- return security_context_to_sid(str, strlen(str), sid);
- else {
- *sid = 0;
- return 0;
- }
-}
-EXPORT_SYMBOL_GPL(selinux_string_to_sid);
-
-int selinux_secmark_relabel_packet_permission(u32 sid)
-{
- if (selinux_enabled) {
- const struct task_security_struct *__tsec;
- u32 tsid;
-
- __tsec = current_security();
- tsid = __tsec->sid;
-
- return avc_has_perm(tsid, sid, SECCLASS_PACKET,
- PACKET__RELABELTO, NULL);
- }
- return 0;
-}
-EXPORT_SYMBOL_GPL(selinux_secmark_relabel_packet_permission);
-
-void selinux_secmark_refcount_inc(void)
-{
- atomic_inc(&selinux_secmark_refcount);
-}
-EXPORT_SYMBOL_GPL(selinux_secmark_refcount_inc);
-
-void selinux_secmark_refcount_dec(void)
-{
- atomic_dec(&selinux_secmark_refcount);
-}
-EXPORT_SYMBOL_GPL(selinux_secmark_refcount_dec);
bool selinux_is_enabled(void)
{
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 4796ddd..d9154cf 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -3354,11 +3354,11 @@
return 0;
}
-static int selinux_task_setscheduler(struct task_struct *p, int policy, struct sched_param *lp)
+static int selinux_task_setscheduler(struct task_struct *p)
{
int rc;
- rc = cap_task_setscheduler(p, policy, lp);
+ rc = cap_task_setscheduler(p);
if (rc)
return rc;
@@ -4279,6 +4279,27 @@
selinux_skb_peerlbl_sid(skb, family, &sksec->peer_sid);
}
+static int selinux_secmark_relabel_packet(u32 sid)
+{
+ const struct task_security_struct *__tsec;
+ u32 tsid;
+
+ __tsec = current_security();
+ tsid = __tsec->sid;
+
+ return avc_has_perm(tsid, sid, SECCLASS_PACKET, PACKET__RELABELTO, NULL);
+}
+
+static void selinux_secmark_refcount_inc(void)
+{
+ atomic_inc(&selinux_secmark_refcount);
+}
+
+static void selinux_secmark_refcount_dec(void)
+{
+ atomic_dec(&selinux_secmark_refcount);
+}
+
static void selinux_req_classify_flow(const struct request_sock *req,
struct flowi *fl)
{
@@ -5533,6 +5554,9 @@
.inet_conn_request = selinux_inet_conn_request,
.inet_csk_clone = selinux_inet_csk_clone,
.inet_conn_established = selinux_inet_conn_established,
+ .secmark_relabel_packet = selinux_secmark_relabel_packet,
+ .secmark_refcount_inc = selinux_secmark_refcount_inc,
+ .secmark_refcount_dec = selinux_secmark_refcount_dec,
.req_classify_flow = selinux_req_classify_flow,
.tun_dev_create = selinux_tun_dev_create,
.tun_dev_post_create = selinux_tun_dev_post_create,
diff --git a/security/selinux/include/classmap.h b/security/selinux/include/classmap.h
index b4c9eb4..8858d2b 100644
--- a/security/selinux/include/classmap.h
+++ b/security/selinux/include/classmap.h
@@ -17,7 +17,7 @@
{ "compute_av", "compute_create", "compute_member",
"check_context", "load_policy", "compute_relabel",
"compute_user", "setenforce", "setbool", "setsecparam",
- "setcheckreqprot", NULL } },
+ "setcheckreqprot", "read_policy", NULL } },
{ "process",
{ "fork", "transition", "sigchld", "sigkill",
"sigstop", "signull", "signal", "ptrace", "getsched", "setsched",
diff --git a/security/selinux/include/security.h b/security/selinux/include/security.h
index 1f7c249..671273e 100644
--- a/security/selinux/include/security.h
+++ b/security/selinux/include/security.h
@@ -9,6 +9,7 @@
#define _SELINUX_SECURITY_H_
#include <linux/magic.h>
+#include <linux/types.h>
#include "flask.h"
#define SECSID_NULL 0x00000000 /* unspecified SID */
@@ -82,6 +83,8 @@
int security_mls_enabled(void);
int security_load_policy(void *data, size_t len);
+int security_read_policy(void **data, ssize_t *len);
+size_t security_policydb_len(void);
int security_policycap_supported(unsigned int req_cap);
@@ -191,5 +194,25 @@
const char *security_get_initial_sid_context(u32 sid);
+/*
+ * status notifier using mmap interface
+ */
+extern struct page *selinux_kernel_status_page(void);
+
+#define SELINUX_KERNEL_STATUS_VERSION 1
+struct selinux_kernel_status {
+ u32 version; /* version number of thie structure */
+ u32 sequence; /* sequence number of seqlock logic */
+ u32 enforcing; /* current setting of enforcing mode */
+ u32 policyload; /* times of policy reloaded */
+ u32 deny_unknown; /* current setting of deny_unknown */
+ /*
+ * The version > 0 supports above members.
+ */
+} __attribute__((packed));
+
+extern void selinux_status_update_setenforce(int enforcing);
+extern void selinux_status_update_policyload(int seqno);
+
#endif /* _SELINUX_SECURITY_H_ */
diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
index 79a1bb6..87e0556 100644
--- a/security/selinux/selinuxfs.c
+++ b/security/selinux/selinuxfs.c
@@ -68,6 +68,8 @@
static struct dentry *class_dir;
static unsigned long last_class_ino;
+static char policy_opened;
+
/* global data for policy capabilities */
static struct dentry *policycap_dir;
@@ -110,6 +112,8 @@
SEL_COMPAT_NET, /* whether to use old compat network packet controls */
SEL_REJECT_UNKNOWN, /* export unknown reject handling to userspace */
SEL_DENY_UNKNOWN, /* export unknown deny handling to userspace */
+ SEL_STATUS, /* export current status using mmap() */
+ SEL_POLICY, /* allow userspace to read the in kernel policy */
SEL_INO_NEXT, /* The next inode number to use */
};
@@ -171,6 +175,7 @@
if (selinux_enforcing)
avc_ss_reset(0);
selnl_notify_setenforce(selinux_enforcing);
+ selinux_status_update_setenforce(selinux_enforcing);
}
length = count;
out:
@@ -205,6 +210,59 @@
.llseek = generic_file_llseek,
};
+static int sel_open_handle_status(struct inode *inode, struct file *filp)
+{
+ struct page *status = selinux_kernel_status_page();
+
+ if (!status)
+ return -ENOMEM;
+
+ filp->private_data = status;
+
+ return 0;
+}
+
+static ssize_t sel_read_handle_status(struct file *filp, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct page *status = filp->private_data;
+
+ BUG_ON(!status);
+
+ return simple_read_from_buffer(buf, count, ppos,
+ page_address(status),
+ sizeof(struct selinux_kernel_status));
+}
+
+static int sel_mmap_handle_status(struct file *filp,
+ struct vm_area_struct *vma)
+{
+ struct page *status = filp->private_data;
+ unsigned long size = vma->vm_end - vma->vm_start;
+
+ BUG_ON(!status);
+
+ /* only allows one page from the head */
+ if (vma->vm_pgoff > 0 || size != PAGE_SIZE)
+ return -EIO;
+ /* disallow writable mapping */
+ if (vma->vm_flags & VM_WRITE)
+ return -EPERM;
+ /* disallow mprotect() turns it into writable */
+ vma->vm_flags &= ~VM_MAYWRITE;
+
+ return remap_pfn_range(vma, vma->vm_start,
+ page_to_pfn(status),
+ size, vma->vm_page_prot);
+}
+
+static const struct file_operations sel_handle_status_ops = {
+ .open = sel_open_handle_status,
+ .read = sel_read_handle_status,
+ .mmap = sel_mmap_handle_status,
+ .llseek = generic_file_llseek,
+};
+
#ifdef CONFIG_SECURITY_SELINUX_DISABLE
static ssize_t sel_write_disable(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
@@ -296,6 +354,141 @@
.llseek = generic_file_llseek,
};
+struct policy_load_memory {
+ size_t len;
+ void *data;
+};
+
+static int sel_open_policy(struct inode *inode, struct file *filp)
+{
+ struct policy_load_memory *plm = NULL;
+ int rc;
+
+ BUG_ON(filp->private_data);
+
+ mutex_lock(&sel_mutex);
+
+ rc = task_has_security(current, SECURITY__READ_POLICY);
+ if (rc)
+ goto err;
+
+ rc = -EBUSY;
+ if (policy_opened)
+ goto err;
+
+ rc = -ENOMEM;
+ plm = kzalloc(sizeof(*plm), GFP_KERNEL);
+ if (!plm)
+ goto err;
+
+ if (i_size_read(inode) != security_policydb_len()) {
+ mutex_lock(&inode->i_mutex);
+ i_size_write(inode, security_policydb_len());
+ mutex_unlock(&inode->i_mutex);
+ }
+
+ rc = security_read_policy(&plm->data, &plm->len);
+ if (rc)
+ goto err;
+
+ policy_opened = 1;
+
+ filp->private_data = plm;
+
+ mutex_unlock(&sel_mutex);
+
+ return 0;
+err:
+ mutex_unlock(&sel_mutex);
+
+ if (plm)
+ vfree(plm->data);
+ kfree(plm);
+ return rc;
+}
+
+static int sel_release_policy(struct inode *inode, struct file *filp)
+{
+ struct policy_load_memory *plm = filp->private_data;
+
+ BUG_ON(!plm);
+
+ policy_opened = 0;
+
+ vfree(plm->data);
+ kfree(plm);
+
+ return 0;
+}
+
+static ssize_t sel_read_policy(struct file *filp, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct policy_load_memory *plm = filp->private_data;
+ int ret;
+
+ mutex_lock(&sel_mutex);
+
+ ret = task_has_security(current, SECURITY__READ_POLICY);
+ if (ret)
+ goto out;
+
+ ret = simple_read_from_buffer(buf, count, ppos, plm->data, plm->len);
+out:
+ mutex_unlock(&sel_mutex);
+ return ret;
+}
+
+static int sel_mmap_policy_fault(struct vm_area_struct *vma,
+ struct vm_fault *vmf)
+{
+ struct policy_load_memory *plm = vma->vm_file->private_data;
+ unsigned long offset;
+ struct page *page;
+
+ if (vmf->flags & (FAULT_FLAG_MKWRITE | FAULT_FLAG_WRITE))
+ return VM_FAULT_SIGBUS;
+
+ offset = vmf->pgoff << PAGE_SHIFT;
+ if (offset >= roundup(plm->len, PAGE_SIZE))
+ return VM_FAULT_SIGBUS;
+
+ page = vmalloc_to_page(plm->data + offset);
+ get_page(page);
+
+ vmf->page = page;
+
+ return 0;
+}
+
+static struct vm_operations_struct sel_mmap_policy_ops = {
+ .fault = sel_mmap_policy_fault,
+ .page_mkwrite = sel_mmap_policy_fault,
+};
+
+int sel_mmap_policy(struct file *filp, struct vm_area_struct *vma)
+{
+ if (vma->vm_flags & VM_SHARED) {
+ /* do not allow mprotect to make mapping writable */
+ vma->vm_flags &= ~VM_MAYWRITE;
+
+ if (vma->vm_flags & VM_WRITE)
+ return -EACCES;
+ }
+
+ vma->vm_flags |= VM_RESERVED;
+ vma->vm_ops = &sel_mmap_policy_ops;
+
+ return 0;
+}
+
+static const struct file_operations sel_policy_ops = {
+ .open = sel_open_policy,
+ .read = sel_read_policy,
+ .mmap = sel_mmap_policy,
+ .release = sel_release_policy,
+};
+
static ssize_t sel_write_load(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
@@ -1612,6 +1805,8 @@
[SEL_CHECKREQPROT] = {"checkreqprot", &sel_checkreqprot_ops, S_IRUGO|S_IWUSR},
[SEL_REJECT_UNKNOWN] = {"reject_unknown", &sel_handle_unknown_ops, S_IRUGO},
[SEL_DENY_UNKNOWN] = {"deny_unknown", &sel_handle_unknown_ops, S_IRUGO},
+ [SEL_STATUS] = {"status", &sel_handle_status_ops, S_IRUGO},
+ [SEL_POLICY] = {"policy", &sel_policy_ops, S_IRUSR},
/* last one */ {""}
};
ret = simple_fill_super(sb, SELINUX_MAGIC, selinux_files);
diff --git a/security/selinux/ss/Makefile b/security/selinux/ss/Makefile
deleted file mode 100644
index 15d4e62..0000000
--- a/security/selinux/ss/Makefile
+++ /dev/null
@@ -1,9 +0,0 @@
-#
-# Makefile for building the SELinux security server as part of the kernel tree.
-#
-
-EXTRA_CFLAGS += -Isecurity/selinux -Isecurity/selinux/include
-obj-y := ss.o
-
-ss-y := ebitmap.o hashtab.o symtab.o sidtab.o avtab.o policydb.o services.o conditional.o mls.o
-
diff --git a/security/selinux/ss/avtab.c b/security/selinux/ss/avtab.c
index 929480c..a3dd9fa 100644
--- a/security/selinux/ss/avtab.c
+++ b/security/selinux/ss/avtab.c
@@ -266,8 +266,8 @@
if (shift > 2)
shift = shift - 2;
nslot = 1 << shift;
- if (nslot > MAX_AVTAB_SIZE)
- nslot = MAX_AVTAB_SIZE;
+ if (nslot > MAX_AVTAB_HASH_BUCKETS)
+ nslot = MAX_AVTAB_HASH_BUCKETS;
mask = nslot - 1;
h->htable = kcalloc(nslot, sizeof(*(h->htable)), GFP_KERNEL);
@@ -501,6 +501,48 @@
goto out;
}
+int avtab_write_item(struct policydb *p, struct avtab_node *cur, void *fp)
+{
+ __le16 buf16[4];
+ __le32 buf32[1];
+ int rc;
+
+ buf16[0] = cpu_to_le16(cur->key.source_type);
+ buf16[1] = cpu_to_le16(cur->key.target_type);
+ buf16[2] = cpu_to_le16(cur->key.target_class);
+ buf16[3] = cpu_to_le16(cur->key.specified);
+ rc = put_entry(buf16, sizeof(u16), 4, fp);
+ if (rc)
+ return rc;
+ buf32[0] = cpu_to_le32(cur->datum.data);
+ rc = put_entry(buf32, sizeof(u32), 1, fp);
+ if (rc)
+ return rc;
+ return 0;
+}
+
+int avtab_write(struct policydb *p, struct avtab *a, void *fp)
+{
+ unsigned int i;
+ int rc = 0;
+ struct avtab_node *cur;
+ __le32 buf[1];
+
+ buf[0] = cpu_to_le32(a->nel);
+ rc = put_entry(buf, sizeof(u32), 1, fp);
+ if (rc)
+ return rc;
+
+ for (i = 0; i < a->nslot; i++) {
+ for (cur = a->htable[i]; cur; cur = cur->next) {
+ rc = avtab_write_item(p, cur, fp);
+ if (rc)
+ return rc;
+ }
+ }
+
+ return rc;
+}
void avtab_cache_init(void)
{
avtab_node_cachep = kmem_cache_create("avtab_node",
diff --git a/security/selinux/ss/avtab.h b/security/selinux/ss/avtab.h
index cd4f734..dff0c75 100644
--- a/security/selinux/ss/avtab.h
+++ b/security/selinux/ss/avtab.h
@@ -71,6 +71,8 @@
void *p);
int avtab_read(struct avtab *a, void *fp, struct policydb *pol);
+int avtab_write_item(struct policydb *p, struct avtab_node *cur, void *fp);
+int avtab_write(struct policydb *p, struct avtab *a, void *fp);
struct avtab_node *avtab_insert_nonunique(struct avtab *h, struct avtab_key *key,
struct avtab_datum *datum);
@@ -85,7 +87,6 @@
#define MAX_AVTAB_HASH_BITS 11
#define MAX_AVTAB_HASH_BUCKETS (1 << MAX_AVTAB_HASH_BITS)
#define MAX_AVTAB_HASH_MASK (MAX_AVTAB_HASH_BUCKETS-1)
-#define MAX_AVTAB_SIZE MAX_AVTAB_HASH_BUCKETS
#endif /* _SS_AVTAB_H_ */
diff --git a/security/selinux/ss/conditional.c b/security/selinux/ss/conditional.c
index c91e150..655fe1c 100644
--- a/security/selinux/ss/conditional.c
+++ b/security/selinux/ss/conditional.c
@@ -490,6 +490,129 @@
return rc;
}
+int cond_write_bool(void *vkey, void *datum, void *ptr)
+{
+ char *key = vkey;
+ struct cond_bool_datum *booldatum = datum;
+ struct policy_data *pd = ptr;
+ void *fp = pd->fp;
+ __le32 buf[3];
+ u32 len;
+ int rc;
+
+ len = strlen(key);
+ buf[0] = cpu_to_le32(booldatum->value);
+ buf[1] = cpu_to_le32(booldatum->state);
+ buf[2] = cpu_to_le32(len);
+ rc = put_entry(buf, sizeof(u32), 3, fp);
+ if (rc)
+ return rc;
+ rc = put_entry(key, 1, len, fp);
+ if (rc)
+ return rc;
+ return 0;
+}
+
+/*
+ * cond_write_cond_av_list doesn't write out the av_list nodes.
+ * Instead it writes out the key/value pairs from the avtab. This
+ * is necessary because there is no way to uniquely identifying rules
+ * in the avtab so it is not possible to associate individual rules
+ * in the avtab with a conditional without saving them as part of
+ * the conditional. This means that the avtab with the conditional
+ * rules will not be saved but will be rebuilt on policy load.
+ */
+static int cond_write_av_list(struct policydb *p,
+ struct cond_av_list *list, struct policy_file *fp)
+{
+ __le32 buf[1];
+ struct cond_av_list *cur_list;
+ u32 len;
+ int rc;
+
+ len = 0;
+ for (cur_list = list; cur_list != NULL; cur_list = cur_list->next)
+ len++;
+
+ buf[0] = cpu_to_le32(len);
+ rc = put_entry(buf, sizeof(u32), 1, fp);
+ if (rc)
+ return rc;
+
+ if (len == 0)
+ return 0;
+
+ for (cur_list = list; cur_list != NULL; cur_list = cur_list->next) {
+ rc = avtab_write_item(p, cur_list->node, fp);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
+int cond_write_node(struct policydb *p, struct cond_node *node,
+ struct policy_file *fp)
+{
+ struct cond_expr *cur_expr;
+ __le32 buf[2];
+ int rc;
+ u32 len = 0;
+
+ buf[0] = cpu_to_le32(node->cur_state);
+ rc = put_entry(buf, sizeof(u32), 1, fp);
+ if (rc)
+ return rc;
+
+ for (cur_expr = node->expr; cur_expr != NULL; cur_expr = cur_expr->next)
+ len++;
+
+ buf[0] = cpu_to_le32(len);
+ rc = put_entry(buf, sizeof(u32), 1, fp);
+ if (rc)
+ return rc;
+
+ for (cur_expr = node->expr; cur_expr != NULL; cur_expr = cur_expr->next) {
+ buf[0] = cpu_to_le32(cur_expr->expr_type);
+ buf[1] = cpu_to_le32(cur_expr->bool);
+ rc = put_entry(buf, sizeof(u32), 2, fp);
+ if (rc)
+ return rc;
+ }
+
+ rc = cond_write_av_list(p, node->true_list, fp);
+ if (rc)
+ return rc;
+ rc = cond_write_av_list(p, node->false_list, fp);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+int cond_write_list(struct policydb *p, struct cond_node *list, void *fp)
+{
+ struct cond_node *cur;
+ u32 len;
+ __le32 buf[1];
+ int rc;
+
+ len = 0;
+ for (cur = list; cur != NULL; cur = cur->next)
+ len++;
+ buf[0] = cpu_to_le32(len);
+ rc = put_entry(buf, sizeof(u32), 1, fp);
+ if (rc)
+ return rc;
+
+ for (cur = list; cur != NULL; cur = cur->next) {
+ rc = cond_write_node(p, cur, fp);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
/* Determine whether additional permissions are granted by the conditional
* av table, and if so, add them to the result
*/
diff --git a/security/selinux/ss/conditional.h b/security/selinux/ss/conditional.h
index 53ddb01..3f209c6 100644
--- a/security/selinux/ss/conditional.h
+++ b/security/selinux/ss/conditional.h
@@ -69,6 +69,8 @@
int cond_read_bool(struct policydb *p, struct hashtab *h, void *fp);
int cond_read_list(struct policydb *p, void *fp);
+int cond_write_bool(void *key, void *datum, void *ptr);
+int cond_write_list(struct policydb *p, struct cond_node *list, void *fp);
void cond_compute_av(struct avtab *ctab, struct avtab_key *key, struct av_decision *avd);
diff --git a/security/selinux/ss/ebitmap.c b/security/selinux/ss/ebitmap.c
index 04b6145..d42951f 100644
--- a/security/selinux/ss/ebitmap.c
+++ b/security/selinux/ss/ebitmap.c
@@ -22,6 +22,8 @@
#include "ebitmap.h"
#include "policydb.h"
+#define BITS_PER_U64 (sizeof(u64) * 8)
+
int ebitmap_cmp(struct ebitmap *e1, struct ebitmap *e2)
{
struct ebitmap_node *n1, *n2;
@@ -363,10 +365,10 @@
e->highbit = le32_to_cpu(buf[1]);
count = le32_to_cpu(buf[2]);
- if (mapunit != sizeof(u64) * 8) {
+ if (mapunit != BITS_PER_U64) {
printk(KERN_ERR "SELinux: ebitmap: map size %u does not "
"match my size %Zd (high bit was %d)\n",
- mapunit, sizeof(u64) * 8, e->highbit);
+ mapunit, BITS_PER_U64, e->highbit);
goto bad;
}
@@ -446,3 +448,78 @@
ebitmap_destroy(e);
goto out;
}
+
+int ebitmap_write(struct ebitmap *e, void *fp)
+{
+ struct ebitmap_node *n;
+ u32 count;
+ __le32 buf[3];
+ u64 map;
+ int bit, last_bit, last_startbit, rc;
+
+ buf[0] = cpu_to_le32(BITS_PER_U64);
+
+ count = 0;
+ last_bit = 0;
+ last_startbit = -1;
+ ebitmap_for_each_positive_bit(e, n, bit) {
+ if (rounddown(bit, (int)BITS_PER_U64) > last_startbit) {
+ count++;
+ last_startbit = rounddown(bit, BITS_PER_U64);
+ }
+ last_bit = roundup(bit + 1, BITS_PER_U64);
+ }
+ buf[1] = cpu_to_le32(last_bit);
+ buf[2] = cpu_to_le32(count);
+
+ rc = put_entry(buf, sizeof(u32), 3, fp);
+ if (rc)
+ return rc;
+
+ map = 0;
+ last_startbit = INT_MIN;
+ ebitmap_for_each_positive_bit(e, n, bit) {
+ if (rounddown(bit, (int)BITS_PER_U64) > last_startbit) {
+ __le64 buf64[1];
+
+ /* this is the very first bit */
+ if (!map) {
+ last_startbit = rounddown(bit, BITS_PER_U64);
+ map = (u64)1 << (bit - last_startbit);
+ continue;
+ }
+
+ /* write the last node */
+ buf[0] = cpu_to_le32(last_startbit);
+ rc = put_entry(buf, sizeof(u32), 1, fp);
+ if (rc)
+ return rc;
+
+ buf64[0] = cpu_to_le64(map);
+ rc = put_entry(buf64, sizeof(u64), 1, fp);
+ if (rc)
+ return rc;
+
+ /* set up for the next node */
+ map = 0;
+ last_startbit = rounddown(bit, BITS_PER_U64);
+ }
+ map |= (u64)1 << (bit - last_startbit);
+ }
+ /* write the last node */
+ if (map) {
+ __le64 buf64[1];
+
+ /* write the last node */
+ buf[0] = cpu_to_le32(last_startbit);
+ rc = put_entry(buf, sizeof(u32), 1, fp);
+ if (rc)
+ return rc;
+
+ buf64[0] = cpu_to_le64(map);
+ rc = put_entry(buf64, sizeof(u64), 1, fp);
+ if (rc)
+ return rc;
+ }
+ return 0;
+}
diff --git a/security/selinux/ss/ebitmap.h b/security/selinux/ss/ebitmap.h
index f283b43..1f4e93c 100644
--- a/security/selinux/ss/ebitmap.h
+++ b/security/selinux/ss/ebitmap.h
@@ -123,6 +123,7 @@
int ebitmap_set_bit(struct ebitmap *e, unsigned long bit, int value);
void ebitmap_destroy(struct ebitmap *e);
int ebitmap_read(struct ebitmap *e, void *fp);
+int ebitmap_write(struct ebitmap *e, void *fp);
#ifdef CONFIG_NETLABEL
int ebitmap_netlbl_export(struct ebitmap *ebmap,
diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
index 3a29704..94f630d 100644
--- a/security/selinux/ss/policydb.c
+++ b/security/selinux/ss/policydb.c
@@ -37,6 +37,7 @@
#include "policydb.h"
#include "conditional.h"
#include "mls.h"
+#include "services.h"
#define _DEBUG_HASHES
@@ -185,9 +186,19 @@
static int rangetr_cmp(struct hashtab *h, const void *k1, const void *k2)
{
const struct range_trans *key1 = k1, *key2 = k2;
- return (key1->source_type != key2->source_type ||
- key1->target_type != key2->target_type ||
- key1->target_class != key2->target_class);
+ int v;
+
+ v = key1->source_type - key2->source_type;
+ if (v)
+ return v;
+
+ v = key1->target_type - key2->target_type;
+ if (v)
+ return v;
+
+ v = key1->target_class - key2->target_class;
+
+ return v;
}
/*
@@ -1624,11 +1635,11 @@
static int type_bounds_sanity_check(void *key, void *datum, void *datap)
{
- struct type_datum *upper, *type;
+ struct type_datum *upper;
struct policydb *p = datap;
int depth = 0;
- upper = type = datum;
+ upper = datum;
while (upper->bounds) {
if (++depth == POLICYDB_BOUNDS_MAXDEPTH) {
printk(KERN_ERR "SELinux: type %s: "
@@ -2306,3 +2317,843 @@
policydb_destroy(p);
goto out;
}
+
+/*
+ * Write a MLS level structure to a policydb binary
+ * representation file.
+ */
+static int mls_write_level(struct mls_level *l, void *fp)
+{
+ __le32 buf[1];
+ int rc;
+
+ buf[0] = cpu_to_le32(l->sens);
+ rc = put_entry(buf, sizeof(u32), 1, fp);
+ if (rc)
+ return rc;
+
+ rc = ebitmap_write(&l->cat, fp);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+/*
+ * Write a MLS range structure to a policydb binary
+ * representation file.
+ */
+static int mls_write_range_helper(struct mls_range *r, void *fp)
+{
+ __le32 buf[3];
+ size_t items;
+ int rc, eq;
+
+ eq = mls_level_eq(&r->level[1], &r->level[0]);
+
+ if (eq)
+ items = 2;
+ else
+ items = 3;
+ buf[0] = cpu_to_le32(items-1);
+ buf[1] = cpu_to_le32(r->level[0].sens);
+ if (!eq)
+ buf[2] = cpu_to_le32(r->level[1].sens);
+
+ BUG_ON(items > (sizeof(buf)/sizeof(buf[0])));
+
+ rc = put_entry(buf, sizeof(u32), items, fp);
+ if (rc)
+ return rc;
+
+ rc = ebitmap_write(&r->level[0].cat, fp);
+ if (rc)
+ return rc;
+ if (!eq) {
+ rc = ebitmap_write(&r->level[1].cat, fp);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
+static int sens_write(void *vkey, void *datum, void *ptr)
+{
+ char *key = vkey;
+ struct level_datum *levdatum = datum;
+ struct policy_data *pd = ptr;
+ void *fp = pd->fp;
+ __le32 buf[2];
+ size_t len;
+ int rc;
+
+ len = strlen(key);
+ buf[0] = cpu_to_le32(len);
+ buf[1] = cpu_to_le32(levdatum->isalias);
+ rc = put_entry(buf, sizeof(u32), 2, fp);
+ if (rc)
+ return rc;
+
+ rc = put_entry(key, 1, len, fp);
+ if (rc)
+ return rc;
+
+ rc = mls_write_level(levdatum->level, fp);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+static int cat_write(void *vkey, void *datum, void *ptr)
+{
+ char *key = vkey;
+ struct cat_datum *catdatum = datum;
+ struct policy_data *pd = ptr;
+ void *fp = pd->fp;
+ __le32 buf[3];
+ size_t len;
+ int rc;
+
+ len = strlen(key);
+ buf[0] = cpu_to_le32(len);
+ buf[1] = cpu_to_le32(catdatum->value);
+ buf[2] = cpu_to_le32(catdatum->isalias);
+ rc = put_entry(buf, sizeof(u32), 3, fp);
+ if (rc)
+ return rc;
+
+ rc = put_entry(key, 1, len, fp);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+static int role_trans_write(struct role_trans *r, void *fp)
+{
+ struct role_trans *tr;
+ u32 buf[3];
+ size_t nel;
+ int rc;
+
+ nel = 0;
+ for (tr = r; tr; tr = tr->next)
+ nel++;
+ buf[0] = cpu_to_le32(nel);
+ rc = put_entry(buf, sizeof(u32), 1, fp);
+ if (rc)
+ return rc;
+ for (tr = r; tr; tr = tr->next) {
+ buf[0] = cpu_to_le32(tr->role);
+ buf[1] = cpu_to_le32(tr->type);
+ buf[2] = cpu_to_le32(tr->new_role);
+ rc = put_entry(buf, sizeof(u32), 3, fp);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
+static int role_allow_write(struct role_allow *r, void *fp)
+{
+ struct role_allow *ra;
+ u32 buf[2];
+ size_t nel;
+ int rc;
+
+ nel = 0;
+ for (ra = r; ra; ra = ra->next)
+ nel++;
+ buf[0] = cpu_to_le32(nel);
+ rc = put_entry(buf, sizeof(u32), 1, fp);
+ if (rc)
+ return rc;
+ for (ra = r; ra; ra = ra->next) {
+ buf[0] = cpu_to_le32(ra->role);
+ buf[1] = cpu_to_le32(ra->new_role);
+ rc = put_entry(buf, sizeof(u32), 2, fp);
+ if (rc)
+ return rc;
+ }
+ return 0;
+}
+
+/*
+ * Write a security context structure
+ * to a policydb binary representation file.
+ */
+static int context_write(struct policydb *p, struct context *c,
+ void *fp)
+{
+ int rc;
+ __le32 buf[3];
+
+ buf[0] = cpu_to_le32(c->user);
+ buf[1] = cpu_to_le32(c->role);
+ buf[2] = cpu_to_le32(c->type);
+
+ rc = put_entry(buf, sizeof(u32), 3, fp);
+ if (rc)
+ return rc;
+
+ rc = mls_write_range_helper(&c->range, fp);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+/*
+ * The following *_write functions are used to
+ * write the symbol data to a policy database
+ * binary representation file.
+ */
+
+static int perm_write(void *vkey, void *datum, void *fp)
+{
+ char *key = vkey;
+ struct perm_datum *perdatum = datum;
+ __le32 buf[2];
+ size_t len;
+ int rc;
+
+ len = strlen(key);
+ buf[0] = cpu_to_le32(len);
+ buf[1] = cpu_to_le32(perdatum->value);
+ rc = put_entry(buf, sizeof(u32), 2, fp);
+ if (rc)
+ return rc;
+
+ rc = put_entry(key, 1, len, fp);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+static int common_write(void *vkey, void *datum, void *ptr)
+{
+ char *key = vkey;
+ struct common_datum *comdatum = datum;
+ struct policy_data *pd = ptr;
+ void *fp = pd->fp;
+ __le32 buf[4];
+ size_t len;
+ int rc;
+
+ len = strlen(key);
+ buf[0] = cpu_to_le32(len);
+ buf[1] = cpu_to_le32(comdatum->value);
+ buf[2] = cpu_to_le32(comdatum->permissions.nprim);
+ buf[3] = cpu_to_le32(comdatum->permissions.table->nel);
+ rc = put_entry(buf, sizeof(u32), 4, fp);
+ if (rc)
+ return rc;
+
+ rc = put_entry(key, 1, len, fp);
+ if (rc)
+ return rc;
+
+ rc = hashtab_map(comdatum->permissions.table, perm_write, fp);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+static int write_cons_helper(struct policydb *p, struct constraint_node *node,
+ void *fp)
+{
+ struct constraint_node *c;
+ struct constraint_expr *e;
+ __le32 buf[3];
+ u32 nel;
+ int rc;
+
+ for (c = node; c; c = c->next) {
+ nel = 0;
+ for (e = c->expr; e; e = e->next)
+ nel++;
+ buf[0] = cpu_to_le32(c->permissions);
+ buf[1] = cpu_to_le32(nel);
+ rc = put_entry(buf, sizeof(u32), 2, fp);
+ if (rc)
+ return rc;
+ for (e = c->expr; e; e = e->next) {
+ buf[0] = cpu_to_le32(e->expr_type);
+ buf[1] = cpu_to_le32(e->attr);
+ buf[2] = cpu_to_le32(e->op);
+ rc = put_entry(buf, sizeof(u32), 3, fp);
+ if (rc)
+ return rc;
+
+ switch (e->expr_type) {
+ case CEXPR_NAMES:
+ rc = ebitmap_write(&e->names, fp);
+ if (rc)
+ return rc;
+ break;
+ default:
+ break;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int class_write(void *vkey, void *datum, void *ptr)
+{
+ char *key = vkey;
+ struct class_datum *cladatum = datum;
+ struct policy_data *pd = ptr;
+ void *fp = pd->fp;
+ struct policydb *p = pd->p;
+ struct constraint_node *c;
+ __le32 buf[6];
+ u32 ncons;
+ size_t len, len2;
+ int rc;
+
+ len = strlen(key);
+ if (cladatum->comkey)
+ len2 = strlen(cladatum->comkey);
+ else
+ len2 = 0;
+
+ ncons = 0;
+ for (c = cladatum->constraints; c; c = c->next)
+ ncons++;
+
+ buf[0] = cpu_to_le32(len);
+ buf[1] = cpu_to_le32(len2);
+ buf[2] = cpu_to_le32(cladatum->value);
+ buf[3] = cpu_to_le32(cladatum->permissions.nprim);
+ if (cladatum->permissions.table)
+ buf[4] = cpu_to_le32(cladatum->permissions.table->nel);
+ else
+ buf[4] = 0;
+ buf[5] = cpu_to_le32(ncons);
+ rc = put_entry(buf, sizeof(u32), 6, fp);
+ if (rc)
+ return rc;
+
+ rc = put_entry(key, 1, len, fp);
+ if (rc)
+ return rc;
+
+ if (cladatum->comkey) {
+ rc = put_entry(cladatum->comkey, 1, len2, fp);
+ if (rc)
+ return rc;
+ }
+
+ rc = hashtab_map(cladatum->permissions.table, perm_write, fp);
+ if (rc)
+ return rc;
+
+ rc = write_cons_helper(p, cladatum->constraints, fp);
+ if (rc)
+ return rc;
+
+ /* write out the validatetrans rule */
+ ncons = 0;
+ for (c = cladatum->validatetrans; c; c = c->next)
+ ncons++;
+
+ buf[0] = cpu_to_le32(ncons);
+ rc = put_entry(buf, sizeof(u32), 1, fp);
+ if (rc)
+ return rc;
+
+ rc = write_cons_helper(p, cladatum->validatetrans, fp);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+static int role_write(void *vkey, void *datum, void *ptr)
+{
+ char *key = vkey;
+ struct role_datum *role = datum;
+ struct policy_data *pd = ptr;
+ void *fp = pd->fp;
+ struct policydb *p = pd->p;
+ __le32 buf[3];
+ size_t items, len;
+ int rc;
+
+ len = strlen(key);
+ items = 0;
+ buf[items++] = cpu_to_le32(len);
+ buf[items++] = cpu_to_le32(role->value);
+ if (p->policyvers >= POLICYDB_VERSION_BOUNDARY)
+ buf[items++] = cpu_to_le32(role->bounds);
+
+ BUG_ON(items > (sizeof(buf)/sizeof(buf[0])));
+
+ rc = put_entry(buf, sizeof(u32), items, fp);
+ if (rc)
+ return rc;
+
+ rc = put_entry(key, 1, len, fp);
+ if (rc)
+ return rc;
+
+ rc = ebitmap_write(&role->dominates, fp);
+ if (rc)
+ return rc;
+
+ rc = ebitmap_write(&role->types, fp);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+static int type_write(void *vkey, void *datum, void *ptr)
+{
+ char *key = vkey;
+ struct type_datum *typdatum = datum;
+ struct policy_data *pd = ptr;
+ struct policydb *p = pd->p;
+ void *fp = pd->fp;
+ __le32 buf[4];
+ int rc;
+ size_t items, len;
+
+ len = strlen(key);
+ items = 0;
+ buf[items++] = cpu_to_le32(len);
+ buf[items++] = cpu_to_le32(typdatum->value);
+ if (p->policyvers >= POLICYDB_VERSION_BOUNDARY) {
+ u32 properties = 0;
+
+ if (typdatum->primary)
+ properties |= TYPEDATUM_PROPERTY_PRIMARY;
+
+ if (typdatum->attribute)
+ properties |= TYPEDATUM_PROPERTY_ATTRIBUTE;
+
+ buf[items++] = cpu_to_le32(properties);
+ buf[items++] = cpu_to_le32(typdatum->bounds);
+ } else {
+ buf[items++] = cpu_to_le32(typdatum->primary);
+ }
+ BUG_ON(items > (sizeof(buf) / sizeof(buf[0])));
+ rc = put_entry(buf, sizeof(u32), items, fp);
+ if (rc)
+ return rc;
+
+ rc = put_entry(key, 1, len, fp);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+static int user_write(void *vkey, void *datum, void *ptr)
+{
+ char *key = vkey;
+ struct user_datum *usrdatum = datum;
+ struct policy_data *pd = ptr;
+ struct policydb *p = pd->p;
+ void *fp = pd->fp;
+ __le32 buf[3];
+ size_t items, len;
+ int rc;
+
+ len = strlen(key);
+ items = 0;
+ buf[items++] = cpu_to_le32(len);
+ buf[items++] = cpu_to_le32(usrdatum->value);
+ if (p->policyvers >= POLICYDB_VERSION_BOUNDARY)
+ buf[items++] = cpu_to_le32(usrdatum->bounds);
+ BUG_ON(items > (sizeof(buf) / sizeof(buf[0])));
+ rc = put_entry(buf, sizeof(u32), items, fp);
+ if (rc)
+ return rc;
+
+ rc = put_entry(key, 1, len, fp);
+ if (rc)
+ return rc;
+
+ rc = ebitmap_write(&usrdatum->roles, fp);
+ if (rc)
+ return rc;
+
+ rc = mls_write_range_helper(&usrdatum->range, fp);
+ if (rc)
+ return rc;
+
+ rc = mls_write_level(&usrdatum->dfltlevel, fp);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+static int (*write_f[SYM_NUM]) (void *key, void *datum,
+ void *datap) =
+{
+ common_write,
+ class_write,
+ role_write,
+ type_write,
+ user_write,
+ cond_write_bool,
+ sens_write,
+ cat_write,
+};
+
+static int ocontext_write(struct policydb *p, struct policydb_compat_info *info,
+ void *fp)
+{
+ unsigned int i, j, rc;
+ size_t nel, len;
+ __le32 buf[3];
+ u32 nodebuf[8];
+ struct ocontext *c;
+ for (i = 0; i < info->ocon_num; i++) {
+ nel = 0;
+ for (c = p->ocontexts[i]; c; c = c->next)
+ nel++;
+ buf[0] = cpu_to_le32(nel);
+ rc = put_entry(buf, sizeof(u32), 1, fp);
+ if (rc)
+ return rc;
+ for (c = p->ocontexts[i]; c; c = c->next) {
+ switch (i) {
+ case OCON_ISID:
+ buf[0] = cpu_to_le32(c->sid[0]);
+ rc = put_entry(buf, sizeof(u32), 1, fp);
+ if (rc)
+ return rc;
+ rc = context_write(p, &c->context[0], fp);
+ if (rc)
+ return rc;
+ break;
+ case OCON_FS:
+ case OCON_NETIF:
+ len = strlen(c->u.name);
+ buf[0] = cpu_to_le32(len);
+ rc = put_entry(buf, sizeof(u32), 1, fp);
+ if (rc)
+ return rc;
+ rc = put_entry(c->u.name, 1, len, fp);
+ if (rc)
+ return rc;
+ rc = context_write(p, &c->context[0], fp);
+ if (rc)
+ return rc;
+ rc = context_write(p, &c->context[1], fp);
+ if (rc)
+ return rc;
+ break;
+ case OCON_PORT:
+ buf[0] = cpu_to_le32(c->u.port.protocol);
+ buf[1] = cpu_to_le32(c->u.port.low_port);
+ buf[2] = cpu_to_le32(c->u.port.high_port);
+ rc = put_entry(buf, sizeof(u32), 3, fp);
+ if (rc)
+ return rc;
+ rc = context_write(p, &c->context[0], fp);
+ if (rc)
+ return rc;
+ break;
+ case OCON_NODE:
+ nodebuf[0] = c->u.node.addr; /* network order */
+ nodebuf[1] = c->u.node.mask; /* network order */
+ rc = put_entry(nodebuf, sizeof(u32), 2, fp);
+ if (rc)
+ return rc;
+ rc = context_write(p, &c->context[0], fp);
+ if (rc)
+ return rc;
+ break;
+ case OCON_FSUSE:
+ buf[0] = cpu_to_le32(c->v.behavior);
+ len = strlen(c->u.name);
+ buf[1] = cpu_to_le32(len);
+ rc = put_entry(buf, sizeof(u32), 2, fp);
+ if (rc)
+ return rc;
+ rc = put_entry(c->u.name, 1, len, fp);
+ if (rc)
+ return rc;
+ rc = context_write(p, &c->context[0], fp);
+ if (rc)
+ return rc;
+ break;
+ case OCON_NODE6:
+ for (j = 0; j < 4; j++)
+ nodebuf[j] = c->u.node6.addr[j]; /* network order */
+ for (j = 0; j < 4; j++)
+ nodebuf[j + 4] = c->u.node6.mask[j]; /* network order */
+ rc = put_entry(nodebuf, sizeof(u32), 8, fp);
+ if (rc)
+ return rc;
+ rc = context_write(p, &c->context[0], fp);
+ if (rc)
+ return rc;
+ break;
+ }
+ }
+ }
+ return 0;
+}
+
+static int genfs_write(struct policydb *p, void *fp)
+{
+ struct genfs *genfs;
+ struct ocontext *c;
+ size_t len;
+ __le32 buf[1];
+ int rc;
+
+ len = 0;
+ for (genfs = p->genfs; genfs; genfs = genfs->next)
+ len++;
+ buf[0] = cpu_to_le32(len);
+ rc = put_entry(buf, sizeof(u32), 1, fp);
+ if (rc)
+ return rc;
+ for (genfs = p->genfs; genfs; genfs = genfs->next) {
+ len = strlen(genfs->fstype);
+ buf[0] = cpu_to_le32(len);
+ rc = put_entry(buf, sizeof(u32), 1, fp);
+ if (rc)
+ return rc;
+ rc = put_entry(genfs->fstype, 1, len, fp);
+ if (rc)
+ return rc;
+ len = 0;
+ for (c = genfs->head; c; c = c->next)
+ len++;
+ buf[0] = cpu_to_le32(len);
+ rc = put_entry(buf, sizeof(u32), 1, fp);
+ if (rc)
+ return rc;
+ for (c = genfs->head; c; c = c->next) {
+ len = strlen(c->u.name);
+ buf[0] = cpu_to_le32(len);
+ rc = put_entry(buf, sizeof(u32), 1, fp);
+ if (rc)
+ return rc;
+ rc = put_entry(c->u.name, 1, len, fp);
+ if (rc)
+ return rc;
+ buf[0] = cpu_to_le32(c->v.sclass);
+ rc = put_entry(buf, sizeof(u32), 1, fp);
+ if (rc)
+ return rc;
+ rc = context_write(p, &c->context[0], fp);
+ if (rc)
+ return rc;
+ }
+ }
+ return 0;
+}
+
+static int range_count(void *key, void *data, void *ptr)
+{
+ int *cnt = ptr;
+ *cnt = *cnt + 1;
+
+ return 0;
+}
+
+static int range_write_helper(void *key, void *data, void *ptr)
+{
+ __le32 buf[2];
+ struct range_trans *rt = key;
+ struct mls_range *r = data;
+ struct policy_data *pd = ptr;
+ void *fp = pd->fp;
+ struct policydb *p = pd->p;
+ int rc;
+
+ buf[0] = cpu_to_le32(rt->source_type);
+ buf[1] = cpu_to_le32(rt->target_type);
+ rc = put_entry(buf, sizeof(u32), 2, fp);
+ if (rc)
+ return rc;
+ if (p->policyvers >= POLICYDB_VERSION_RANGETRANS) {
+ buf[0] = cpu_to_le32(rt->target_class);
+ rc = put_entry(buf, sizeof(u32), 1, fp);
+ if (rc)
+ return rc;
+ }
+ rc = mls_write_range_helper(r, fp);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+static int range_write(struct policydb *p, void *fp)
+{
+ size_t nel;
+ __le32 buf[1];
+ int rc;
+ struct policy_data pd;
+
+ pd.p = p;
+ pd.fp = fp;
+
+ /* count the number of entries in the hashtab */
+ nel = 0;
+ rc = hashtab_map(p->range_tr, range_count, &nel);
+ if (rc)
+ return rc;
+
+ buf[0] = cpu_to_le32(nel);
+ rc = put_entry(buf, sizeof(u32), 1, fp);
+ if (rc)
+ return rc;
+
+ /* actually write all of the entries */
+ rc = hashtab_map(p->range_tr, range_write_helper, &pd);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+/*
+ * Write the configuration data in a policy database
+ * structure to a policy database binary representation
+ * file.
+ */
+int policydb_write(struct policydb *p, void *fp)
+{
+ unsigned int i, num_syms;
+ int rc;
+ __le32 buf[4];
+ u32 config;
+ size_t len;
+ struct policydb_compat_info *info;
+
+ /*
+ * refuse to write policy older than compressed avtab
+ * to simplify the writer. There are other tests dropped
+ * since we assume this throughout the writer code. Be
+ * careful if you ever try to remove this restriction
+ */
+ if (p->policyvers < POLICYDB_VERSION_AVTAB) {
+ printk(KERN_ERR "SELinux: refusing to write policy version %d."
+ " Because it is less than version %d\n", p->policyvers,
+ POLICYDB_VERSION_AVTAB);
+ return -EINVAL;
+ }
+
+ config = 0;
+ if (p->mls_enabled)
+ config |= POLICYDB_CONFIG_MLS;
+
+ if (p->reject_unknown)
+ config |= REJECT_UNKNOWN;
+ if (p->allow_unknown)
+ config |= ALLOW_UNKNOWN;
+
+ /* Write the magic number and string identifiers. */
+ buf[0] = cpu_to_le32(POLICYDB_MAGIC);
+ len = strlen(POLICYDB_STRING);
+ buf[1] = cpu_to_le32(len);
+ rc = put_entry(buf, sizeof(u32), 2, fp);
+ if (rc)
+ return rc;
+ rc = put_entry(POLICYDB_STRING, 1, len, fp);
+ if (rc)
+ return rc;
+
+ /* Write the version, config, and table sizes. */
+ info = policydb_lookup_compat(p->policyvers);
+ if (!info) {
+ printk(KERN_ERR "SELinux: compatibility lookup failed for policy "
+ "version %d", p->policyvers);
+ return rc;
+ }
+
+ buf[0] = cpu_to_le32(p->policyvers);
+ buf[1] = cpu_to_le32(config);
+ buf[2] = cpu_to_le32(info->sym_num);
+ buf[3] = cpu_to_le32(info->ocon_num);
+
+ rc = put_entry(buf, sizeof(u32), 4, fp);
+ if (rc)
+ return rc;
+
+ if (p->policyvers >= POLICYDB_VERSION_POLCAP) {
+ rc = ebitmap_write(&p->policycaps, fp);
+ if (rc)
+ return rc;
+ }
+
+ if (p->policyvers >= POLICYDB_VERSION_PERMISSIVE) {
+ rc = ebitmap_write(&p->permissive_map, fp);
+ if (rc)
+ return rc;
+ }
+
+ num_syms = info->sym_num;
+ for (i = 0; i < num_syms; i++) {
+ struct policy_data pd;
+
+ pd.fp = fp;
+ pd.p = p;
+
+ buf[0] = cpu_to_le32(p->symtab[i].nprim);
+ buf[1] = cpu_to_le32(p->symtab[i].table->nel);
+
+ rc = put_entry(buf, sizeof(u32), 2, fp);
+ if (rc)
+ return rc;
+ rc = hashtab_map(p->symtab[i].table, write_f[i], &pd);
+ if (rc)
+ return rc;
+ }
+
+ rc = avtab_write(p, &p->te_avtab, fp);
+ if (rc)
+ return rc;
+
+ rc = cond_write_list(p, p->cond_list, fp);
+ if (rc)
+ return rc;
+
+ rc = role_trans_write(p->role_tr, fp);
+ if (rc)
+ return rc;
+
+ rc = role_allow_write(p->role_allow, fp);
+ if (rc)
+ return rc;
+
+ rc = ocontext_write(p, info, fp);
+ if (rc)
+ return rc;
+
+ rc = genfs_write(p, fp);
+ if (rc)
+ return rc;
+
+ rc = range_write(p, fp);
+ if (rc)
+ return rc;
+
+ for (i = 0; i < p->p_types.nprim; i++) {
+ struct ebitmap *e = flex_array_get(p->type_attr_map_array, i);
+
+ BUG_ON(!e);
+ rc = ebitmap_write(e, fp);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
diff --git a/security/selinux/ss/policydb.h b/security/selinux/ss/policydb.h
index 310e944..95d3d7d 100644
--- a/security/selinux/ss/policydb.h
+++ b/security/selinux/ss/policydb.h
@@ -254,6 +254,9 @@
struct ebitmap permissive_map;
+ /* length of this policy when it was loaded */
+ size_t len;
+
unsigned int policyvers;
unsigned int reject_unknown : 1;
@@ -270,6 +273,7 @@
extern int policydb_type_isvalid(struct policydb *p, unsigned int type);
extern int policydb_role_isvalid(struct policydb *p, unsigned int role);
extern int policydb_read(struct policydb *p, void *fp);
+extern int policydb_write(struct policydb *p, void *fp);
#define PERM_SYMTAB_SIZE 32
@@ -290,6 +294,11 @@
size_t len;
};
+struct policy_data {
+ struct policydb *p;
+ void *fp;
+};
+
static inline int next_entry(void *buf, struct policy_file *fp, size_t bytes)
{
if (bytes > fp->len)
@@ -301,6 +310,17 @@
return 0;
}
+static inline int put_entry(void *buf, size_t bytes, int num, struct policy_file *fp)
+{
+ size_t len = bytes * num;
+
+ memcpy(fp->data, buf, len);
+ fp->data += len;
+ fp->len -= len;
+
+ return 0;
+}
+
extern u16 string_to_security_class(struct policydb *p, const char *name);
extern u32 string_to_av_perm(struct policydb *p, u16 tclass, const char *name);
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
index 9ea2fec..223c1ff 100644
--- a/security/selinux/ss/services.c
+++ b/security/selinux/ss/services.c
@@ -51,6 +51,7 @@
#include <linux/mutex.h>
#include <linux/selinux.h>
#include <linux/flex_array.h>
+#include <linux/vmalloc.h>
#include <net/netlabel.h>
#include "flask.h"
@@ -991,7 +992,8 @@
{
char *scontextp;
- *scontext = NULL;
+ if (scontext)
+ *scontext = NULL;
*scontext_len = 0;
if (context->len) {
@@ -1008,6 +1010,9 @@
*scontext_len += strlen(policydb.p_type_val_to_name[context->type - 1]) + 1;
*scontext_len += mls_compute_context_len(context);
+ if (!scontext)
+ return 0;
+
/* Allocate space for the context; caller must free this space. */
scontextp = kmalloc(*scontext_len, GFP_ATOMIC);
if (!scontextp)
@@ -1047,7 +1052,8 @@
struct context *context;
int rc = 0;
- *scontext = NULL;
+ if (scontext)
+ *scontext = NULL;
*scontext_len = 0;
if (!ss_initialized) {
@@ -1055,6 +1061,8 @@
char *scontextp;
*scontext_len = strlen(initial_sid_to_string[sid]) + 1;
+ if (!scontext)
+ goto out;
scontextp = kmalloc(*scontext_len, GFP_ATOMIC);
if (!scontextp) {
rc = -ENOMEM;
@@ -1769,6 +1777,7 @@
return rc;
}
+ policydb.len = len;
rc = selinux_set_mapping(&policydb, secclass_map,
¤t_mapping,
¤t_mapping_size);
@@ -1791,6 +1800,7 @@
selinux_complete_init();
avc_ss_reset(seqno);
selnl_notify_policyload(seqno);
+ selinux_status_update_policyload(seqno);
selinux_netlbl_cache_invalidate();
selinux_xfrm_notify_policyload();
return 0;
@@ -1804,6 +1814,7 @@
if (rc)
return rc;
+ newpolicydb.len = len;
/* If switching between different policy types, log MLS status */
if (policydb.mls_enabled && !newpolicydb.mls_enabled)
printk(KERN_INFO "SELinux: Disabling MLS support...\n");
@@ -1870,6 +1881,7 @@
avc_ss_reset(seqno);
selnl_notify_policyload(seqno);
+ selinux_status_update_policyload(seqno);
selinux_netlbl_cache_invalidate();
selinux_xfrm_notify_policyload();
@@ -1883,6 +1895,17 @@
}
+size_t security_policydb_len(void)
+{
+ size_t len;
+
+ read_lock(&policy_rwlock);
+ len = policydb.len;
+ read_unlock(&policy_rwlock);
+
+ return len;
+}
+
/**
* security_port_sid - Obtain the SID for a port.
* @protocol: protocol number
@@ -2374,6 +2397,7 @@
if (!rc) {
avc_ss_reset(seqno);
selnl_notify_policyload(seqno);
+ selinux_status_update_policyload(seqno);
selinux_xfrm_notify_policyload();
}
return rc;
@@ -3129,3 +3153,38 @@
return rc;
}
#endif /* CONFIG_NETLABEL */
+
+/**
+ * security_read_policy - read the policy.
+ * @data: binary policy data
+ * @len: length of data in bytes
+ *
+ */
+int security_read_policy(void **data, ssize_t *len)
+{
+ int rc;
+ struct policy_file fp;
+
+ if (!ss_initialized)
+ return -EINVAL;
+
+ *len = security_policydb_len();
+
+ *data = vmalloc_user(*len);
+ if (!*data)
+ return -ENOMEM;
+
+ fp.data = *data;
+ fp.len = *len;
+
+ read_lock(&policy_rwlock);
+ rc = policydb_write(&policydb, &fp);
+ read_unlock(&policy_rwlock);
+
+ if (rc)
+ return rc;
+
+ *len = (unsigned long)fp.data - (unsigned long)*data;
+ return 0;
+
+}
diff --git a/security/selinux/ss/status.c b/security/selinux/ss/status.c
new file mode 100644
index 0000000..d982365
--- /dev/null
+++ b/security/selinux/ss/status.c
@@ -0,0 +1,126 @@
+/*
+ * mmap based event notifications for SELinux
+ *
+ * Author: KaiGai Kohei <kaigai@ak.jp.nec.com>
+ *
+ * Copyright (C) 2010 NEC corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2,
+ * as published by the Free Software Foundation.
+ */
+#include <linux/kernel.h>
+#include <linux/gfp.h>
+#include <linux/mm.h>
+#include <linux/mutex.h>
+#include "avc.h"
+#include "services.h"
+
+/*
+ * The selinux_status_page shall be exposed to userspace applications
+ * using mmap interface on /selinux/status.
+ * It enables to notify applications a few events that will cause reset
+ * of userspace access vector without context switching.
+ *
+ * The selinux_kernel_status structure on the head of status page is
+ * protected from concurrent accesses using seqlock logic, so userspace
+ * application should reference the status page according to the seqlock
+ * logic.
+ *
+ * Typically, application checks status->sequence at the head of access
+ * control routine. If it is odd-number, kernel is updating the status,
+ * so please wait for a moment. If it is changed from the last sequence
+ * number, it means something happen, so application will reset userspace
+ * avc, if needed.
+ * In most cases, application shall confirm the kernel status is not
+ * changed without any system call invocations.
+ */
+static struct page *selinux_status_page;
+static DEFINE_MUTEX(selinux_status_lock);
+
+/*
+ * selinux_kernel_status_page
+ *
+ * It returns a reference to selinux_status_page. If the status page is
+ * not allocated yet, it also tries to allocate it at the first time.
+ */
+struct page *selinux_kernel_status_page(void)
+{
+ struct selinux_kernel_status *status;
+ struct page *result = NULL;
+
+ mutex_lock(&selinux_status_lock);
+ if (!selinux_status_page) {
+ selinux_status_page = alloc_page(GFP_KERNEL|__GFP_ZERO);
+
+ if (selinux_status_page) {
+ status = page_address(selinux_status_page);
+
+ status->version = SELINUX_KERNEL_STATUS_VERSION;
+ status->sequence = 0;
+ status->enforcing = selinux_enforcing;
+ /*
+ * NOTE: the next policyload event shall set
+ * a positive value on the status->policyload,
+ * although it may not be 1, but never zero.
+ * So, application can know it was updated.
+ */
+ status->policyload = 0;
+ status->deny_unknown = !security_get_allow_unknown();
+ }
+ }
+ result = selinux_status_page;
+ mutex_unlock(&selinux_status_lock);
+
+ return result;
+}
+
+/*
+ * selinux_status_update_setenforce
+ *
+ * It updates status of the current enforcing/permissive mode.
+ */
+void selinux_status_update_setenforce(int enforcing)
+{
+ struct selinux_kernel_status *status;
+
+ mutex_lock(&selinux_status_lock);
+ if (selinux_status_page) {
+ status = page_address(selinux_status_page);
+
+ status->sequence++;
+ smp_wmb();
+
+ status->enforcing = enforcing;
+
+ smp_wmb();
+ status->sequence++;
+ }
+ mutex_unlock(&selinux_status_lock);
+}
+
+/*
+ * selinux_status_update_policyload
+ *
+ * It updates status of the times of policy reloaded, and current
+ * setting of deny_unknown.
+ */
+void selinux_status_update_policyload(int seqno)
+{
+ struct selinux_kernel_status *status;
+
+ mutex_lock(&selinux_status_lock);
+ if (selinux_status_page) {
+ status = page_address(selinux_status_page);
+
+ status->sequence++;
+ smp_wmb();
+
+ status->policyload = seqno;
+ status->deny_unknown = !security_get_allow_unknown();
+
+ smp_wmb();
+ status->sequence++;
+ }
+ mutex_unlock(&selinux_status_lock);
+}
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index c448d57..bc39f40 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -1281,12 +1281,11 @@
*
* Return 0 if read access is permitted
*/
-static int smack_task_setscheduler(struct task_struct *p, int policy,
- struct sched_param *lp)
+static int smack_task_setscheduler(struct task_struct *p)
{
int rc;
- rc = cap_task_setscheduler(p, policy, lp);
+ rc = cap_task_setscheduler(p);
if (rc == 0)
rc = smk_curacc_on_task(p, MAY_WRITE);
return rc;
@@ -3005,7 +3004,8 @@
{
char *sp = smack_from_secid(secid);
- *secdata = sp;
+ if (secdata)
+ *secdata = sp;
*seclen = strlen(sp);
return 0;
}
diff --git a/security/tomoyo/common.c b/security/tomoyo/common.c
index ef43995..7556315 100644
--- a/security/tomoyo/common.c
+++ b/security/tomoyo/common.c
@@ -768,8 +768,10 @@
return true; /* Do nothing if open(O_WRONLY). */
memset(&head->r, 0, sizeof(head->r));
head->r.print_this_domain_only = true;
- head->r.eof = !domain;
- head->r.domain = &domain->list;
+ if (domain)
+ head->r.domain = &domain->list;
+ else
+ head->r.eof = 1;
tomoyo_io_printf(head, "# select %s\n", data);
if (domain && domain->is_deleted)
tomoyo_io_printf(head, "# This is a deleted domain.\n");
@@ -1416,15 +1418,19 @@
const pid_t gpid = task_pid_nr(current);
static const int tomoyo_buffer_len = 4096;
char *buffer = kmalloc(tomoyo_buffer_len, GFP_NOFS);
+ pid_t ppid;
if (!buffer)
return NULL;
do_gettimeofday(&tv);
+ rcu_read_lock();
+ ppid = task_tgid_vnr(current->real_parent);
+ rcu_read_unlock();
snprintf(buffer, tomoyo_buffer_len - 1,
"#timestamp=%lu profile=%u mode=%s (global-pid=%u)"
" task={ pid=%u ppid=%u uid=%u gid=%u euid=%u"
" egid=%u suid=%u sgid=%u fsuid=%u fsgid=%u }",
tv.tv_sec, r->profile, tomoyo_mode[r->mode], gpid,
- (pid_t) sys_getpid(), (pid_t) sys_getppid(),
+ task_tgid_vnr(current), ppid,
current_uid(), current_gid(), current_euid(),
current_egid(), current_suid(), current_sgid(),
current_fsuid(), current_fsgid());
@@ -2047,13 +2053,22 @@
const u8 profile = domain->profile;
if (tomoyo_profile_ptr[profile])
continue;
+ printk(KERN_ERR "You need to define profile %u before using it.\n",
+ profile);
+ printk(KERN_ERR "Please see http://tomoyo.sourceforge.jp/2.3/ "
+ "for more information.\n");
panic("Profile %u (used by '%s') not defined.\n",
profile, domain->domainname->name);
}
tomoyo_read_unlock(idx);
- if (tomoyo_profile_version != 20090903)
+ if (tomoyo_profile_version != 20090903) {
+ printk(KERN_ERR "You need to install userland programs for "
+ "TOMOYO 2.3 and initialize policy configuration.\n");
+ printk(KERN_ERR "Please see http://tomoyo.sourceforge.jp/2.3/ "
+ "for more information.\n");
panic("Profile version %u is not supported.\n",
tomoyo_profile_version);
+ }
printk(KERN_INFO "TOMOYO: 2.3.0\n");
printk(KERN_INFO "Mandatory Access Control activated.\n");
}
diff --git a/security/tomoyo/common.h b/security/tomoyo/common.h
index 04454cb..7c66bd8 100644
--- a/security/tomoyo/common.h
+++ b/security/tomoyo/common.h
@@ -689,9 +689,6 @@
/********** Function prototypes. **********/
-extern asmlinkage long sys_getpid(void);
-extern asmlinkage long sys_getppid(void);
-
/* Check whether the given string starts with the given keyword. */
bool tomoyo_str_starts(char **src, const char *find);
/* Get tomoyo_realpath() of current process. */
diff --git a/sound/core/control.c b/sound/core/control.c
index 070aab4..45a8180 100644
--- a/sound/core/control.c
+++ b/sound/core/control.c
@@ -31,6 +31,7 @@
/* max number of user-defined controls */
#define MAX_USER_CONTROLS 32
+#define MAX_CONTROL_COUNT 1028
struct snd_kctl_ioctl {
struct list_head list; /* list of all ioctls */
@@ -195,6 +196,10 @@
if (snd_BUG_ON(!control || !control->count))
return NULL;
+
+ if (control->count > MAX_CONTROL_COUNT)
+ return NULL;
+
kctl = kzalloc(sizeof(*kctl) + sizeof(struct snd_kcontrol_volatile) * control->count, GFP_KERNEL);
if (kctl == NULL) {
snd_printk(KERN_ERR "Cannot allocate control instance\n");
diff --git a/sound/core/pcm.c b/sound/core/pcm.c
index cbe815d..ac242a3 100644
--- a/sound/core/pcm.c
+++ b/sound/core/pcm.c
@@ -203,10 +203,16 @@
FORMAT(S18_3BE),
FORMAT(U18_3LE),
FORMAT(U18_3BE),
+ FORMAT(G723_24),
+ FORMAT(G723_24_1B),
+ FORMAT(G723_40),
+ FORMAT(G723_40_1B),
};
const char *snd_pcm_format_name(snd_pcm_format_t format)
{
+ if (format >= ARRAY_SIZE(snd_pcm_format_names))
+ return "Unknown";
return snd_pcm_format_names[format];
}
EXPORT_SYMBOL_GPL(snd_pcm_format_name);
@@ -366,14 +372,17 @@
struct snd_info_buffer *buffer)
{
struct snd_pcm_substream *substream = entry->private_data;
- struct snd_pcm_runtime *runtime = substream->runtime;
+ struct snd_pcm_runtime *runtime;
+
+ mutex_lock(&substream->pcm->open_mutex);
+ runtime = substream->runtime;
if (!runtime) {
snd_iprintf(buffer, "closed\n");
- return;
+ goto unlock;
}
if (runtime->status->state == SNDRV_PCM_STATE_OPEN) {
snd_iprintf(buffer, "no setup\n");
- return;
+ goto unlock;
}
snd_iprintf(buffer, "access: %s\n", snd_pcm_access_name(runtime->access));
snd_iprintf(buffer, "format: %s\n", snd_pcm_format_name(runtime->format));
@@ -392,20 +401,25 @@
snd_iprintf(buffer, "OSS period frames: %lu\n", (unsigned long)runtime->oss.period_frames);
}
#endif
+ unlock:
+ mutex_unlock(&substream->pcm->open_mutex);
}
static void snd_pcm_substream_proc_sw_params_read(struct snd_info_entry *entry,
struct snd_info_buffer *buffer)
{
struct snd_pcm_substream *substream = entry->private_data;
- struct snd_pcm_runtime *runtime = substream->runtime;
+ struct snd_pcm_runtime *runtime;
+
+ mutex_lock(&substream->pcm->open_mutex);
+ runtime = substream->runtime;
if (!runtime) {
snd_iprintf(buffer, "closed\n");
- return;
+ goto unlock;
}
if (runtime->status->state == SNDRV_PCM_STATE_OPEN) {
snd_iprintf(buffer, "no setup\n");
- return;
+ goto unlock;
}
snd_iprintf(buffer, "tstamp_mode: %s\n", snd_pcm_tstamp_mode_name(runtime->tstamp_mode));
snd_iprintf(buffer, "period_step: %u\n", runtime->period_step);
@@ -415,24 +429,29 @@
snd_iprintf(buffer, "silence_threshold: %lu\n", runtime->silence_threshold);
snd_iprintf(buffer, "silence_size: %lu\n", runtime->silence_size);
snd_iprintf(buffer, "boundary: %lu\n", runtime->boundary);
+ unlock:
+ mutex_unlock(&substream->pcm->open_mutex);
}
static void snd_pcm_substream_proc_status_read(struct snd_info_entry *entry,
struct snd_info_buffer *buffer)
{
struct snd_pcm_substream *substream = entry->private_data;
- struct snd_pcm_runtime *runtime = substream->runtime;
+ struct snd_pcm_runtime *runtime;
struct snd_pcm_status status;
int err;
+
+ mutex_lock(&substream->pcm->open_mutex);
+ runtime = substream->runtime;
if (!runtime) {
snd_iprintf(buffer, "closed\n");
- return;
+ goto unlock;
}
memset(&status, 0, sizeof(status));
err = snd_pcm_status(substream, &status);
if (err < 0) {
snd_iprintf(buffer, "error %d\n", err);
- return;
+ goto unlock;
}
snd_iprintf(buffer, "state: %s\n", snd_pcm_state_name(status.state));
snd_iprintf(buffer, "owner_pid : %d\n", pid_vnr(substream->pid));
@@ -446,6 +465,8 @@
snd_iprintf(buffer, "-----\n");
snd_iprintf(buffer, "hw_ptr : %ld\n", runtime->status->hw_ptr);
snd_iprintf(buffer, "appl_ptr : %ld\n", runtime->control->appl_ptr);
+ unlock:
+ mutex_unlock(&substream->pcm->open_mutex);
}
#ifdef CONFIG_SND_PCM_XRUN_DEBUG
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index 134fc6c..d4eb2ef 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -1992,6 +1992,8 @@
substream->ops->close(substream);
substream->hw_opened = 0;
}
+ if (pm_qos_request_active(&substream->latency_pm_qos_req))
+ pm_qos_remove_request(&substream->latency_pm_qos_req);
if (substream->pcm_release) {
substream->pcm_release(substream);
substream->pcm_release = NULL;
diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c
index eb68326..cbbed0d 100644
--- a/sound/core/rawmidi.c
+++ b/sound/core/rawmidi.c
@@ -535,13 +535,15 @@
{
struct snd_rawmidi_file *rfile;
struct snd_rawmidi *rmidi;
+ struct module *module;
rfile = file->private_data;
rmidi = rfile->rmidi;
rawmidi_release_priv(rfile);
kfree(rfile);
+ module = rmidi->card->module;
snd_card_file_remove(rmidi->card, file);
- module_put(rmidi->card->module);
+ module_put(module);
return 0;
}
@@ -829,6 +831,8 @@
if (get_user(device, (int __user *)argp))
return -EFAULT;
+ if (device >= SNDRV_RAWMIDI_DEVICES) /* next device is -1 */
+ device = SNDRV_RAWMIDI_DEVICES - 1;
mutex_lock(®ister_mutex);
device = device < 0 ? 0 : device + 1;
while (device < SNDRV_RAWMIDI_DEVICES) {
diff --git a/sound/core/seq/oss/seq_oss_init.c b/sound/core/seq/oss/seq_oss_init.c
index 6857122..69cd7b3 100644
--- a/sound/core/seq/oss/seq_oss_init.c
+++ b/sound/core/seq/oss/seq_oss_init.c
@@ -281,13 +281,10 @@
return 0;
_error:
- snd_seq_oss_writeq_delete(dp->writeq);
- snd_seq_oss_readq_delete(dp->readq);
snd_seq_oss_synth_cleanup(dp);
snd_seq_oss_midi_cleanup(dp);
- delete_port(dp);
delete_seq_queue(dp->queue);
- kfree(dp);
+ delete_port(dp);
return rc;
}
@@ -350,8 +347,10 @@
static int
delete_port(struct seq_oss_devinfo *dp)
{
- if (dp->port < 0)
+ if (dp->port < 0) {
+ kfree(dp);
return 0;
+ }
debug_printk(("delete_port %i\n", dp->port));
return snd_seq_event_port_detach(dp->cseq, dp->port);
diff --git a/sound/i2c/other/ak4xxx-adda.c b/sound/i2c/other/ak4xxx-adda.c
index 1adb8a3..42d7844 100644
--- a/sound/i2c/other/ak4xxx-adda.c
+++ b/sound/i2c/other/ak4xxx-adda.c
@@ -900,7 +900,7 @@
return 0;
}
#else /* !CONFIG_PROC_FS */
-static int proc_init(struct snd_akm4xxx *ak) {}
+static int proc_init(struct snd_akm4xxx *ak) { return 0; }
#endif
int snd_akm4xxx_build_controls(struct snd_akm4xxx *ak)
diff --git a/sound/isa/msnd/msnd_pinnacle.c b/sound/isa/msnd/msnd_pinnacle.c
index 5f3e684..91d6023 100644
--- a/sound/isa/msnd/msnd_pinnacle.c
+++ b/sound/isa/msnd/msnd_pinnacle.c
@@ -764,9 +764,9 @@
static int irq[SNDRV_CARDS] = SNDRV_DEFAULT_IRQ;
static long mem[SNDRV_CARDS] = SNDRV_DEFAULT_PORT;
+#ifndef MSND_CLASSIC
static long cfg[SNDRV_CARDS] = SNDRV_DEFAULT_PORT;
-#ifndef MSND_CLASSIC
/* Extra Peripheral Configuration (Default: Disable) */
static long ide_io0[SNDRV_CARDS] = SNDRV_DEFAULT_PORT;
static long ide_io1[SNDRV_CARDS] = SNDRV_DEFAULT_PORT;
@@ -894,7 +894,11 @@
struct snd_card *card;
struct snd_msnd *chip;
- if (has_isapnp(idx) || cfg[idx] == SNDRV_AUTO_PORT) {
+ if (has_isapnp(idx)
+#ifndef MSND_CLASSIC
+ || cfg[idx] == SNDRV_AUTO_PORT
+#endif
+ ) {
printk(KERN_INFO LOGNAME ": Assuming PnP mode\n");
return -ENODEV;
}
diff --git a/sound/oss/sound_timer.c b/sound/oss/sound_timer.c
index f0f0c19..48cda6c 100644
--- a/sound/oss/sound_timer.c
+++ b/sound/oss/sound_timer.c
@@ -26,7 +26,7 @@
static volatile unsigned long usecs_per_tmr; /* Length of the current interval */
static struct sound_lowlev_timer *tmr;
-static spinlock_t lock;
+static DEFINE_SPINLOCK(lock);
static unsigned long tmr2ticks(int tmr_value)
{
diff --git a/sound/oss/soundcard.c b/sound/oss/soundcard.c
index 92aa762..07f803e 100644
--- a/sound/oss/soundcard.c
+++ b/sound/oss/soundcard.c
@@ -391,11 +391,11 @@
case SND_DEV_DSP:
case SND_DEV_DSP16:
case SND_DEV_AUDIO:
- return audio_ioctl(dev, file, cmd, p);
+ ret = audio_ioctl(dev, file, cmd, p);
break;
case SND_DEV_MIDIN:
- return MIDIbuf_ioctl(dev, file, cmd, p);
+ ret = MIDIbuf_ioctl(dev, file, cmd, p);
break;
}
diff --git a/sound/pci/asihpi/hpi6205.c b/sound/pci/asihpi/hpi6205.c
index 3b44134..22c5fc6 100644
--- a/sound/pci/asihpi/hpi6205.c
+++ b/sound/pci/asihpi/hpi6205.c
@@ -941,8 +941,7 @@
}
-static u32 outstream_get_space_available(struct hpi_hostbuffer_status
- *status)
+static u32 outstream_get_space_available(struct hpi_hostbuffer_status *status)
{
return status->size_in_bytes - (status->host_index -
status->dSP_index);
@@ -987,6 +986,10 @@
/* write it */
phm->function = HPI_OSTREAM_WRITE;
hw_message(pao, phm, phr);
+
+ if (phr->error)
+ return;
+
/* update status information that the DSP would typically
* update (and will update next time the DSP
* buffer update task reads data from the host BBM buffer)
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index dd8fb86..1482921 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -589,6 +589,7 @@
bus->ops = temp->ops;
mutex_init(&bus->cmd_mutex);
+ mutex_init(&bus->prepare_mutex);
INIT_LIST_HEAD(&bus->codec_list);
snprintf(bus->workq_name, sizeof(bus->workq_name),
@@ -1068,7 +1069,6 @@
codec->addr = codec_addr;
mutex_init(&codec->spdif_mutex);
mutex_init(&codec->control_mutex);
- mutex_init(&codec->prepare_mutex);
init_hda_cache(&codec->amp_cache, sizeof(struct hda_amp_info));
init_hda_cache(&codec->cmd_cache, sizeof(struct hda_cache_head));
snd_array_init(&codec->mixers, sizeof(struct hda_nid_item), 32);
@@ -1213,6 +1213,7 @@
u32 stream_tag,
int channel_id, int format)
{
+ struct hda_codec *c;
struct hda_cvt_setup *p;
unsigned int oldval, newval;
int i;
@@ -1253,10 +1254,12 @@
p->dirty = 0;
/* make other inactive cvts with the same stream-tag dirty */
- for (i = 0; i < codec->cvt_setups.used; i++) {
- p = snd_array_elem(&codec->cvt_setups, i);
- if (!p->active && p->stream_tag == stream_tag)
- p->dirty = 1;
+ list_for_each_entry(c, &codec->bus->codec_list, list) {
+ for (i = 0; i < c->cvt_setups.used; i++) {
+ p = snd_array_elem(&c->cvt_setups, i);
+ if (!p->active && p->stream_tag == stream_tag)
+ p->dirty = 1;
+ }
}
}
EXPORT_SYMBOL_HDA(snd_hda_codec_setup_stream);
@@ -1306,12 +1309,16 @@
/* clean up the all conflicting obsolete streams */
static void purify_inactive_streams(struct hda_codec *codec)
{
+ struct hda_codec *c;
int i;
- for (i = 0; i < codec->cvt_setups.used; i++) {
- struct hda_cvt_setup *p = snd_array_elem(&codec->cvt_setups, i);
- if (p->dirty)
- really_cleanup_stream(codec, p);
+ list_for_each_entry(c, &codec->bus->codec_list, list) {
+ for (i = 0; i < c->cvt_setups.used; i++) {
+ struct hda_cvt_setup *p;
+ p = snd_array_elem(&c->cvt_setups, i);
+ if (p->dirty)
+ really_cleanup_stream(c, p);
+ }
}
}
@@ -3502,11 +3509,11 @@
struct snd_pcm_substream *substream)
{
int ret;
- mutex_lock(&codec->prepare_mutex);
+ mutex_lock(&codec->bus->prepare_mutex);
ret = hinfo->ops.prepare(hinfo, codec, stream, format, substream);
if (ret >= 0)
purify_inactive_streams(codec);
- mutex_unlock(&codec->prepare_mutex);
+ mutex_unlock(&codec->bus->prepare_mutex);
return ret;
}
EXPORT_SYMBOL_HDA(snd_hda_codec_prepare);
@@ -3515,9 +3522,9 @@
struct hda_pcm_stream *hinfo,
struct snd_pcm_substream *substream)
{
- mutex_lock(&codec->prepare_mutex);
+ mutex_lock(&codec->bus->prepare_mutex);
hinfo->ops.cleanup(hinfo, codec, substream);
- mutex_unlock(&codec->prepare_mutex);
+ mutex_unlock(&codec->bus->prepare_mutex);
}
EXPORT_SYMBOL_HDA(snd_hda_codec_cleanup);
@@ -4529,7 +4536,7 @@
cfg->hp_outs--;
memmove(cfg->hp_pins + i, cfg->hp_pins + i + 1,
sizeof(cfg->hp_pins[0]) * (cfg->hp_outs - i));
- memmove(sequences_hp + i - 1, sequences_hp + i,
+ memmove(sequences_hp + i, sequences_hp + i + 1,
sizeof(sequences_hp[0]) * (cfg->hp_outs - i));
}
}
diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
index 4303353..62c7022 100644
--- a/sound/pci/hda/hda_codec.h
+++ b/sound/pci/hda/hda_codec.h
@@ -648,6 +648,7 @@
struct hda_codec *caddr_tbl[HDA_MAX_CODEC_ADDRESS + 1];
struct mutex cmd_mutex;
+ struct mutex prepare_mutex;
/* unsolicited event queue */
struct hda_bus_unsolicited *unsol;
@@ -826,7 +827,6 @@
struct mutex spdif_mutex;
struct mutex control_mutex;
- struct mutex prepare_mutex;
unsigned int spdif_status; /* IEC958 status bits */
unsigned short spdif_ctls; /* SPDIF control bits */
unsigned int spdif_in_enable; /* SPDIF input enable? */
diff --git a/sound/pci/hda/hda_eld.c b/sound/pci/hda/hda_eld.c
index 803b298..26c3ade 100644
--- a/sound/pci/hda/hda_eld.c
+++ b/sound/pci/hda/hda_eld.c
@@ -596,6 +596,8 @@
}
EXPORT_SYMBOL_HDA(snd_hda_eld_proc_free);
+#endif /* CONFIG_PROC_FS */
+
/* update PCM info based on ELD */
void hdmi_eld_update_pcm_info(struct hdmi_eld *eld, struct hda_pcm_stream *pcm,
struct hda_pcm_stream *codec_pars)
@@ -644,5 +646,3 @@
pcm->maxbps = min(pcm->maxbps, codec_pars->maxbps);
}
EXPORT_SYMBOL_HDA(hdmi_eld_update_pcm_info);
-
-#endif /* CONFIG_PROC_FS */
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 1053fff..34940a0 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -126,6 +126,7 @@
"{Intel, ICH10},"
"{Intel, PCH},"
"{Intel, CPT},"
+ "{Intel, PBG},"
"{Intel, SCH},"
"{ATI, SB450},"
"{ATI, SB600},"
@@ -2749,6 +2750,8 @@
{ PCI_DEVICE(0x8086, 0x3b57), .driver_data = AZX_DRIVER_ICH },
/* CPT */
{ PCI_DEVICE(0x8086, 0x1c20), .driver_data = AZX_DRIVER_PCH },
+ /* PBG */
+ { PCI_DEVICE(0x8086, 0x1d20), .driver_data = AZX_DRIVER_PCH },
/* SCH */
{ PCI_DEVICE(0x8086, 0x811b), .driver_data = AZX_DRIVER_SCH },
/* ATI SB 450/600 */
diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c
index b697fd2..10bbbaf 100644
--- a/sound/pci/hda/patch_analog.c
+++ b/sound/pci/hda/patch_analog.c
@@ -3641,6 +3641,7 @@
/* Lenovo Thinkpad T61/X61 */
SND_PCI_QUIRK_VENDOR(0x17aa, "Lenovo Thinkpad", AD1984_THINKPAD),
SND_PCI_QUIRK(0x1028, 0x0214, "Dell T3400", AD1984_DELL_DESKTOP),
+ SND_PCI_QUIRK(0x1028, 0x0233, "Dell Latitude E6400", AD1984_DELL_DESKTOP),
{}
};
diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
index 4ef5efa..488fd9a 100644
--- a/sound/pci/hda/patch_cirrus.c
+++ b/sound/pci/hda/patch_cirrus.c
@@ -972,6 +972,53 @@
{} /* terminator */
};
+/* Errata: CS4207 rev C0/C1/C2 Silicon
+ *
+ * http://www.cirrus.com/en/pubs/errata/ER880C3.pdf
+ *
+ * 6. At high temperature (TA > +85°C), the digital supply current (IVD)
+ * may be excessive (up to an additional 200 μA), which is most easily
+ * observed while the part is being held in reset (RESET# active low).
+ *
+ * Root Cause: At initial powerup of the device, the logic that drives
+ * the clock and write enable to the S/PDIF SRC RAMs is not properly
+ * initialized.
+ * Certain random patterns will cause a steady leakage current in those
+ * RAM cells. The issue will resolve once the SRCs are used (turned on).
+ *
+ * Workaround: The following verb sequence briefly turns on the S/PDIF SRC
+ * blocks, which will alleviate the issue.
+ */
+
+static struct hda_verb cs_errata_init_verbs[] = {
+ {0x01, AC_VERB_SET_POWER_STATE, 0x00}, /* AFG: D0 */
+ {0x11, AC_VERB_SET_PROC_STATE, 0x01}, /* VPW: processing on */
+
+ {0x11, AC_VERB_SET_COEF_INDEX, 0x0008},
+ {0x11, AC_VERB_SET_PROC_COEF, 0x9999},
+ {0x11, AC_VERB_SET_COEF_INDEX, 0x0017},
+ {0x11, AC_VERB_SET_PROC_COEF, 0xa412},
+ {0x11, AC_VERB_SET_COEF_INDEX, 0x0001},
+ {0x11, AC_VERB_SET_PROC_COEF, 0x0009},
+
+ {0x07, AC_VERB_SET_POWER_STATE, 0x00}, /* S/PDIF Rx: D0 */
+ {0x08, AC_VERB_SET_POWER_STATE, 0x00}, /* S/PDIF Tx: D0 */
+
+ {0x11, AC_VERB_SET_COEF_INDEX, 0x0017},
+ {0x11, AC_VERB_SET_PROC_COEF, 0x2412},
+ {0x11, AC_VERB_SET_COEF_INDEX, 0x0008},
+ {0x11, AC_VERB_SET_PROC_COEF, 0x0000},
+ {0x11, AC_VERB_SET_COEF_INDEX, 0x0001},
+ {0x11, AC_VERB_SET_PROC_COEF, 0x0008},
+ {0x11, AC_VERB_SET_PROC_STATE, 0x00},
+
+ {0x07, AC_VERB_SET_POWER_STATE, 0x03}, /* S/PDIF Rx: D3 */
+ {0x08, AC_VERB_SET_POWER_STATE, 0x03}, /* S/PDIF Tx: D3 */
+ /*{0x01, AC_VERB_SET_POWER_STATE, 0x03},*/ /* AFG: D3 This is already handled */
+
+ {} /* terminator */
+};
+
/* SPDIF setup */
static void init_digital(struct hda_codec *codec)
{
@@ -991,6 +1038,9 @@
{
struct cs_spec *spec = codec->spec;
+ /* init_verb sequence for C0/C1/C2 errata*/
+ snd_hda_sequence_write(codec, cs_errata_init_verbs);
+
snd_hda_sequence_write(codec, cs_coef_init_verbs);
if (spec->gpio_mask) {
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index c424952..972e7c4 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -116,6 +116,7 @@
unsigned int dell_vostro:1;
unsigned int ideapad:1;
unsigned int thinkpad:1;
+ unsigned int hp_laptop:1;
unsigned int ext_mic_present;
unsigned int recording;
@@ -2299,6 +2300,18 @@
}
}
+/* toggle input of built-in digital mic and mic jack appropriately */
+static void cxt5066_hp_laptop_automic(struct hda_codec *codec)
+{
+ unsigned int present;
+
+ present = snd_hda_jack_detect(codec, 0x1b);
+ snd_printdd("CXT5066: external microphone present=%d\n", present);
+ snd_hda_codec_write(codec, 0x17, 0, AC_VERB_SET_CONNECT_SEL,
+ present ? 1 : 3);
+}
+
+
/* toggle input of built-in digital mic and mic jack appropriately
order is: external mic -> dock mic -> interal mic */
static void cxt5066_thinkpad_automic(struct hda_codec *codec)
@@ -2408,6 +2421,20 @@
}
/* unsolicited event for jack sensing */
+static void cxt5066_hp_laptop_event(struct hda_codec *codec, unsigned int res)
+{
+ snd_printdd("CXT5066_hp_laptop: unsol event %x (%x)\n", res, res >> 26);
+ switch (res >> 26) {
+ case CONEXANT_HP_EVENT:
+ cxt5066_hp_automute(codec);
+ break;
+ case CONEXANT_MIC_EVENT:
+ cxt5066_hp_laptop_automic(codec);
+ break;
+ }
+}
+
+/* unsolicited event for jack sensing */
static void cxt5066_thinkpad_event(struct hda_codec *codec, unsigned int res)
{
snd_printdd("CXT5066_thinkpad: unsol event %x (%x)\n", res, res >> 26);
@@ -2989,6 +3016,14 @@
{ } /* end */
};
+
+static struct hda_verb cxt5066_init_verbs_hp_laptop[] = {
+ {0x14, AC_VERB_SET_CONNECT_SEL, 0x0},
+ {0x19, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | CONEXANT_HP_EVENT},
+ {0x1b, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | CONEXANT_MIC_EVENT},
+ { } /* end */
+};
+
/* initialize jack-sensing, too */
static int cxt5066_init(struct hda_codec *codec)
{
@@ -3004,6 +3039,8 @@
cxt5066_ideapad_automic(codec);
else if (spec->thinkpad)
cxt5066_thinkpad_automic(codec);
+ else if (spec->hp_laptop)
+ cxt5066_hp_laptop_automic(codec);
}
cxt5066_set_mic_boost(codec);
return 0;
@@ -3031,6 +3068,7 @@
CXT5066_DELL_VOSTO, /* Dell Vostro 1015i */
CXT5066_IDEAPAD, /* Lenovo IdeaPad U150 */
CXT5066_THINKPAD, /* Lenovo ThinkPad T410s, others? */
+ CXT5066_HP_LAPTOP, /* HP Laptop */
CXT5066_MODELS
};
@@ -3041,6 +3079,7 @@
[CXT5066_DELL_VOSTO] = "dell-vostro",
[CXT5066_IDEAPAD] = "ideapad",
[CXT5066_THINKPAD] = "thinkpad",
+ [CXT5066_HP_LAPTOP] = "hp-laptop",
};
static struct snd_pci_quirk cxt5066_cfg_tbl[] = {
@@ -3052,13 +3091,17 @@
SND_PCI_QUIRK(0x1028, 0x02d8, "Dell Vostro", CXT5066_DELL_VOSTO),
SND_PCI_QUIRK(0x1028, 0x0402, "Dell Vostro", CXT5066_DELL_VOSTO),
SND_PCI_QUIRK(0x1028, 0x0408, "Dell Inspiron One 19T", CXT5066_IDEAPAD),
+ SND_PCI_QUIRK(0x103c, 0x360b, "HP G60", CXT5066_HP_LAPTOP),
+ SND_PCI_QUIRK(0x1179, 0xff1e, "Toshiba Satellite C650D", CXT5066_IDEAPAD),
SND_PCI_QUIRK(0x1179, 0xff50, "Toshiba Satellite P500-PSPGSC-01800T", CXT5066_OLPC_XO_1_5),
SND_PCI_QUIRK(0x1179, 0xffe0, "Toshiba Satellite Pro T130-15F", CXT5066_OLPC_XO_1_5),
+ SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400s", CXT5066_THINKPAD),
SND_PCI_QUIRK(0x17aa, 0x21b2, "Thinkpad X100e", CXT5066_IDEAPAD),
SND_PCI_QUIRK(0x17aa, 0x21b3, "Thinkpad Edge 13 (197)", CXT5066_IDEAPAD),
SND_PCI_QUIRK(0x17aa, 0x21b4, "Thinkpad Edge", CXT5066_IDEAPAD),
SND_PCI_QUIRK(0x17aa, 0x215e, "Lenovo Thinkpad", CXT5066_THINKPAD),
SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo G series", CXT5066_IDEAPAD),
+ SND_PCI_QUIRK(0x17aa, 0x390a, "Lenovo S10-3t", CXT5066_IDEAPAD),
SND_PCI_QUIRK(0x17aa, 0x3938, "Lenovo G series (AMD)", CXT5066_IDEAPAD),
SND_PCI_QUIRK(0x17aa, 0x3a0d, "ideapad", CXT5066_IDEAPAD),
{}
@@ -3115,6 +3158,23 @@
spec->num_init_verbs++;
spec->dell_automute = 1;
break;
+ case CXT5066_HP_LAPTOP:
+ codec->patch_ops.init = cxt5066_init;
+ codec->patch_ops.unsol_event = cxt5066_hp_laptop_event;
+ spec->init_verbs[spec->num_init_verbs] =
+ cxt5066_init_verbs_hp_laptop;
+ spec->num_init_verbs++;
+ spec->hp_laptop = 1;
+ spec->mixers[spec->num_mixers++] = cxt5066_mixer_master;
+ spec->mixers[spec->num_mixers++] = cxt5066_mixers;
+ /* no S/PDIF out */
+ spec->multiout.dig_out_nid = 0;
+ /* input source automatically selected */
+ spec->input_mux = NULL;
+ spec->port_d_mode = 0;
+ spec->mic_boost = 3; /* default 30dB gain */
+ break;
+
case CXT5066_OLPC_XO_1_5:
codec->patch_ops.init = cxt5066_olpc_init;
codec->patch_ops.unsol_event = cxt5066_olpc_unsol_event;
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 2bc0f07..afd6022 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -707,8 +707,6 @@
u32 stream_tag, int format)
{
struct hdmi_spec *spec = codec->spec;
- int tag;
- int fmt;
int pinctl;
int new_pinctl = 0;
int i;
@@ -745,24 +743,7 @@
return -EINVAL;
}
- tag = snd_hda_codec_read(codec, nid, 0, AC_VERB_GET_CONV, 0) >> 4;
- fmt = snd_hda_codec_read(codec, nid, 0, AC_VERB_GET_STREAM_FORMAT, 0);
-
- snd_printdd("hdmi_setup_stream: "
- "NID=0x%x, %sstream=0x%x, %sformat=0x%x\n",
- nid,
- tag == stream_tag ? "" : "new-",
- stream_tag,
- fmt == format ? "" : "new-",
- format);
-
- if (tag != stream_tag)
- snd_hda_codec_write(codec, nid, 0,
- AC_VERB_SET_CHANNEL_STREAMID,
- stream_tag << 4);
- if (fmt != format)
- snd_hda_codec_write(codec, nid, 0,
- AC_VERB_SET_STREAM_FORMAT, format);
+ snd_hda_codec_setup_stream(codec, nid, stream_tag, 0, format);
return 0;
}
diff --git a/sound/pci/hda/patch_intelhdmi.c b/sound/pci/hda/patch_intelhdmi.c
index d382d3c..36a9b83 100644
--- a/sound/pci/hda/patch_intelhdmi.c
+++ b/sound/pci/hda/patch_intelhdmi.c
@@ -69,20 +69,12 @@
return hdmi_setup_stream(codec, hinfo->nid, stream_tag, format);
}
-static int intel_hdmi_playback_pcm_cleanup(struct hda_pcm_stream *hinfo,
- struct hda_codec *codec,
- struct snd_pcm_substream *substream)
-{
- return 0;
-}
-
static struct hda_pcm_stream intel_hdmi_pcm_playback = {
.substreams = 1,
.channels_min = 2,
.ops = {
.open = hdmi_pcm_open,
.prepare = intel_hdmi_playback_pcm_prepare,
- .cleanup = intel_hdmi_playback_pcm_cleanup,
},
};
diff --git a/sound/pci/hda/patch_nvhdmi.c b/sound/pci/hda/patch_nvhdmi.c
index f636870..baa108b 100644
--- a/sound/pci/hda/patch_nvhdmi.c
+++ b/sound/pci/hda/patch_nvhdmi.c
@@ -84,7 +84,7 @@
#else
/* support all rates and formats */
#define SUPPORTED_RATES \
- (SNDRV_PCM_RATE_22050 | SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 |\
+ (SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 |\
SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000 | SNDRV_PCM_RATE_176400 |\
SNDRV_PCM_RATE_192000)
#define SUPPORTED_MAXBPS 24
@@ -326,13 +326,6 @@
return 0;
}
-static int nvhdmi_playback_pcm_cleanup(struct hda_pcm_stream *hinfo,
- struct hda_codec *codec,
- struct snd_pcm_substream *substream)
-{
- return 0;
-}
-
static int nvhdmi_dig_playback_pcm_prepare_2ch(struct hda_pcm_stream *hinfo,
struct hda_codec *codec,
unsigned int stream_tag,
@@ -350,7 +343,6 @@
.ops = {
.open = hdmi_pcm_open,
.prepare = nvhdmi_dig_playback_pcm_prepare_8ch_89,
- .cleanup = nvhdmi_playback_pcm_cleanup,
},
};
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index a4dd045..a432e6e 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -1594,12 +1594,22 @@
}
if (spec->autocfg.dig_in_pin) {
- hda_nid_t dig_nid;
- err = snd_hda_get_connections(codec,
- spec->autocfg.dig_in_pin,
- &dig_nid, 1);
- if (err > 0)
- spec->dig_in_nid = dig_nid;
+ dig_nid = codec->start_nid;
+ for (i = 0; i < codec->num_nodes; i++, dig_nid++) {
+ unsigned int wcaps = get_wcaps(codec, dig_nid);
+ if (get_wcaps_type(wcaps) != AC_WID_AUD_IN)
+ continue;
+ if (!(wcaps & AC_WCAP_DIGITAL))
+ continue;
+ if (!(wcaps & AC_WCAP_CONN_LIST))
+ continue;
+ err = get_connection_index(codec, dig_nid,
+ spec->autocfg.dig_in_pin);
+ if (err >= 0) {
+ spec->dig_in_nid = dig_nid;
+ break;
+ }
+ }
}
}
@@ -5334,6 +5344,7 @@
static struct snd_pci_quirk beep_white_list[] = {
SND_PCI_QUIRK(0x1043, 0x829f, "ASUS", 1),
+ SND_PCI_QUIRK(0x1043, 0x83ce, "EeePC", 1),
SND_PCI_QUIRK(0x8086, 0xd613, "Intel", 1),
{}
};
@@ -14452,6 +14463,7 @@
enum {
ALC269_FIXUP_SONY_VAIO,
+ ALC269_FIXUP_DELL_M101Z,
};
static const struct hda_verb alc269_sony_vaio_fixup_verbs[] = {
@@ -14463,10 +14475,20 @@
[ALC269_FIXUP_SONY_VAIO] = {
.verbs = alc269_sony_vaio_fixup_verbs
},
+ [ALC269_FIXUP_DELL_M101Z] = {
+ .verbs = (const struct hda_verb[]) {
+ /* Enables internal speaker */
+ {0x20, AC_VERB_SET_COEF_INDEX, 13},
+ {0x20, AC_VERB_SET_PROC_COEF, 0x4040},
+ {}
+ }
+ },
};
static struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x104d, 0x9071, "Sony VAIO", ALC269_FIXUP_SONY_VAIO),
+ SND_PCI_QUIRK(0x104d, 0x9077, "Sony VAIO", ALC269_FIXUP_SONY_VAIO),
+ SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z),
{}
};
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index f3f861b..c16c5ba 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -1747,6 +1747,8 @@
"HP dv6", STAC_HP_DV5),
SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x3061,
"HP dv6", STAC_HP_DV5), /* HP dv6-1110ax */
+ SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x363e,
+ "HP DV6", STAC_HP_DV5),
SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_HP, 0xfff0, 0x7010,
"HP", STAC_HP_DV5),
SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0233,
@@ -6303,6 +6305,21 @@
{ .id = 0x111d76b5, .name = "92HD71B6X", .patch = patch_stac92hd71bxx },
{ .id = 0x111d76b6, .name = "92HD71B5X", .patch = patch_stac92hd71bxx },
{ .id = 0x111d76b7, .name = "92HD71B5X", .patch = patch_stac92hd71bxx },
+ { .id = 0x111d76c0, .name = "92HD89C3", .patch = patch_stac92hd73xx },
+ { .id = 0x111d76c1, .name = "92HD89C2", .patch = patch_stac92hd73xx },
+ { .id = 0x111d76c2, .name = "92HD89C1", .patch = patch_stac92hd73xx },
+ { .id = 0x111d76c3, .name = "92HD89B3", .patch = patch_stac92hd73xx },
+ { .id = 0x111d76c4, .name = "92HD89B2", .patch = patch_stac92hd73xx },
+ { .id = 0x111d76c5, .name = "92HD89B1", .patch = patch_stac92hd73xx },
+ { .id = 0x111d76c6, .name = "92HD89E3", .patch = patch_stac92hd73xx },
+ { .id = 0x111d76c7, .name = "92HD89E2", .patch = patch_stac92hd73xx },
+ { .id = 0x111d76c8, .name = "92HD89E1", .patch = patch_stac92hd73xx },
+ { .id = 0x111d76c9, .name = "92HD89D3", .patch = patch_stac92hd73xx },
+ { .id = 0x111d76ca, .name = "92HD89D2", .patch = patch_stac92hd73xx },
+ { .id = 0x111d76cb, .name = "92HD89D1", .patch = patch_stac92hd73xx },
+ { .id = 0x111d76cc, .name = "92HD89F3", .patch = patch_stac92hd73xx },
+ { .id = 0x111d76cd, .name = "92HD89F2", .patch = patch_stac92hd73xx },
+ { .id = 0x111d76ce, .name = "92HD89F1", .patch = patch_stac92hd73xx },
{} /* terminator */
};
diff --git a/sound/pci/intel8x0.c b/sound/pci/intel8x0.c
index 6433e65..4677492 100644
--- a/sound/pci/intel8x0.c
+++ b/sound/pci/intel8x0.c
@@ -1776,6 +1776,12 @@
},
{
.subvendor = 0x1014,
+ .subdevice = 0x0534,
+ .name = "ThinkPad X31",
+ .type = AC97_TUNE_INV_EAPD
+ },
+ {
+ .subvendor = 0x1014,
.subdevice = 0x1f00,
.name = "MS-9128",
.type = AC97_TUNE_ALC_JACK
diff --git a/sound/pci/oxygen/oxygen.c b/sound/pci/oxygen/oxygen.c
index 289cb4d..6c0a11a 100644
--- a/sound/pci/oxygen/oxygen.c
+++ b/sound/pci/oxygen/oxygen.c
@@ -543,6 +543,10 @@
chip->model.suspend = claro_suspend;
chip->model.resume = claro_resume;
chip->model.set_adc_params = set_ak5385_params;
+ chip->model.device_config = PLAYBACK_0_TO_I2S |
+ PLAYBACK_1_TO_SPDIF |
+ CAPTURE_0_FROM_I2S_2 |
+ CAPTURE_1_FROM_SPDIF;
break;
}
if (id->driver_data == MODEL_MERIDIAN ||
diff --git a/sound/pci/oxygen/oxygen.h b/sound/pci/oxygen/oxygen.h
index 6147216..a3409ed 100644
--- a/sound/pci/oxygen/oxygen.h
+++ b/sound/pci/oxygen/oxygen.h
@@ -155,6 +155,7 @@
int oxygen_pci_suspend(struct pci_dev *pci, pm_message_t state);
int oxygen_pci_resume(struct pci_dev *pci);
#endif
+void oxygen_pci_shutdown(struct pci_dev *pci);
/* oxygen_mixer.c */
diff --git a/sound/pci/oxygen/oxygen_lib.c b/sound/pci/oxygen/oxygen_lib.c
index fad03d6..7e93cf8 100644
--- a/sound/pci/oxygen/oxygen_lib.c
+++ b/sound/pci/oxygen/oxygen_lib.c
@@ -519,16 +519,21 @@
}
}
-static void oxygen_card_free(struct snd_card *card)
+static void oxygen_shutdown(struct oxygen *chip)
{
- struct oxygen *chip = card->private_data;
-
spin_lock_irq(&chip->reg_lock);
chip->interrupt_mask = 0;
chip->pcm_running = 0;
oxygen_write16(chip, OXYGEN_DMA_STATUS, 0);
oxygen_write16(chip, OXYGEN_INTERRUPT_MASK, 0);
spin_unlock_irq(&chip->reg_lock);
+}
+
+static void oxygen_card_free(struct snd_card *card)
+{
+ struct oxygen *chip = card->private_data;
+
+ oxygen_shutdown(chip);
if (chip->irq >= 0)
free_irq(chip->irq, chip);
flush_scheduled_work();
@@ -778,3 +783,13 @@
}
EXPORT_SYMBOL(oxygen_pci_resume);
#endif /* CONFIG_PM */
+
+void oxygen_pci_shutdown(struct pci_dev *pci)
+{
+ struct snd_card *card = pci_get_drvdata(pci);
+ struct oxygen *chip = card->private_data;
+
+ oxygen_shutdown(chip);
+ chip->model.cleanup(chip);
+}
+EXPORT_SYMBOL(oxygen_pci_shutdown);
diff --git a/sound/pci/oxygen/virtuoso.c b/sound/pci/oxygen/virtuoso.c
index f03a2f2..06c863e 100644
--- a/sound/pci/oxygen/virtuoso.c
+++ b/sound/pci/oxygen/virtuoso.c
@@ -95,6 +95,7 @@
.suspend = oxygen_pci_suspend,
.resume = oxygen_pci_resume,
#endif
+ .shutdown = oxygen_pci_shutdown,
};
static int __init alsa_card_xonar_init(void)
diff --git a/sound/pci/oxygen/xonar_wm87x6.c b/sound/pci/oxygen/xonar_wm87x6.c
index dbc4b89..b82c1cf 100644
--- a/sound/pci/oxygen/xonar_wm87x6.c
+++ b/sound/pci/oxygen/xonar_wm87x6.c
@@ -53,6 +53,8 @@
struct xonar_generic generic;
u16 wm8776_regs[0x17];
u16 wm8766_regs[0x10];
+ struct snd_kcontrol *line_adcmux_control;
+ struct snd_kcontrol *mic_adcmux_control;
struct snd_kcontrol *lc_controls[13];
};
@@ -193,6 +195,7 @@
static void xonar_ds_cleanup(struct oxygen *chip)
{
xonar_disable_output(chip);
+ wm8776_write(chip, WM8776_RESET, 0);
}
static void xonar_ds_suspend(struct oxygen *chip)
@@ -603,6 +606,7 @@
{
struct oxygen *chip = ctl->private_data;
struct xonar_wm87x6 *data = chip->model_data;
+ struct snd_kcontrol *other_ctl;
unsigned int mux_bit = ctl->private_value;
u16 reg;
int changed;
@@ -610,8 +614,18 @@
mutex_lock(&chip->mutex);
reg = data->wm8776_regs[WM8776_ADCMUX];
if (value->value.integer.value[0]) {
- reg &= ~0x003;
reg |= mux_bit;
+ /* line-in and mic-in are exclusive */
+ mux_bit ^= 3;
+ if (reg & mux_bit) {
+ reg &= ~mux_bit;
+ if (mux_bit == 1)
+ other_ctl = data->line_adcmux_control;
+ else
+ other_ctl = data->mic_adcmux_control;
+ snd_ctl_notify(chip->card, SNDRV_CTL_EVENT_MASK_VALUE,
+ &other_ctl->id);
+ }
} else
reg &= ~mux_bit;
changed = reg != data->wm8776_regs[WM8776_ADCMUX];
@@ -963,7 +977,13 @@
err = snd_ctl_add(chip->card, ctl);
if (err < 0)
return err;
+ if (!strcmp(ctl->id.name, "Line Capture Switch"))
+ data->line_adcmux_control = ctl;
+ else if (!strcmp(ctl->id.name, "Mic Capture Switch"))
+ data->mic_adcmux_control = ctl;
}
+ if (!data->line_adcmux_control || !data->mic_adcmux_control)
+ return -ENXIO;
BUILD_BUG_ON(ARRAY_SIZE(lc_controls) != ARRAY_SIZE(data->lc_controls));
for (i = 0; i < ARRAY_SIZE(lc_controls); ++i) {
ctl = snd_ctl_new1(&lc_controls[i], chip);
diff --git a/sound/pci/rme9652/hdsp.c b/sound/pci/rme9652/hdsp.c
index b92adef..d6fa7bf 100644
--- a/sound/pci/rme9652/hdsp.c
+++ b/sound/pci/rme9652/hdsp.c
@@ -4609,6 +4609,7 @@
if (err < 0)
return err;
+ memset(&info, 0, sizeof(info));
spin_lock_irqsave(&hdsp->lock, flags);
info.pref_sync_ref = (unsigned char)hdsp_pref_sync_ref(hdsp);
info.wordclock_sync_check = (unsigned char)hdsp_wc_sync_check(hdsp);
diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c
index 547b713..0c98ef9 100644
--- a/sound/pci/rme9652/hdspm.c
+++ b/sound/pci/rme9652/hdspm.c
@@ -4127,6 +4127,7 @@
case SNDRV_HDSPM_IOCTL_GET_CONFIG_INFO:
+ memset(&info, 0, sizeof(info));
spin_lock_irq(&hdspm->lock);
info.pref_sync_ref = hdspm_pref_sync_ref(hdspm);
info.wordclock_sync_check = hdspm_wc_sync_check(hdspm);
diff --git a/sound/ppc/snd_ps3.c b/sound/ppc/snd_ps3.c
index 2f12da4..581a670 100644
--- a/sound/ppc/snd_ps3.c
+++ b/sound/ppc/snd_ps3.c
@@ -579,7 +579,7 @@
rate * delay_ms / 1000)
* substream->runtime->channels;
- pr_debug(KERN_ERR "%s: time=%d rate=%d bytes=%ld, frames=%d, ret=%d\n",
+ pr_debug("%s: time=%d rate=%d bytes=%ld, frames=%d, ret=%d\n",
__func__,
delay_ms,
rate,
diff --git a/sound/soc/imx/imx-ssi.c b/sound/soc/imx/imx-ssi.c
index a11daa1..c81da05 100644
--- a/sound/soc/imx/imx-ssi.c
+++ b/sound/soc/imx/imx-ssi.c
@@ -254,6 +254,9 @@
dma_data = &ssi->dma_params_rx;
}
+ if (ssi->flags & IMX_SSI_SYN)
+ reg = SSI_STCCR;
+
snd_soc_dai_set_dma_data(cpu_dai, substream, dma_data);
sccr = readl(ssi->base + reg) & ~SSI_STCCR_WL_MASK;
diff --git a/sound/soc/s3c24xx/s3c-dma.c b/sound/soc/s3c24xx/s3c-dma.c
index 1b61c23..f1b1bc4 100644
--- a/sound/soc/s3c24xx/s3c-dma.c
+++ b/sound/soc/s3c24xx/s3c-dma.c
@@ -94,8 +94,7 @@
if ((pos + len) > prtd->dma_end) {
len = prtd->dma_end - pos;
- pr_debug(KERN_DEBUG "%s: corrected dma len %ld\n",
- __func__, len);
+ pr_debug("%s: corrected dma len %ld\n", __func__, len);
}
ret = s3c2410_dma_enqueue(prtd->params->channel,
diff --git a/sound/soc/sh/migor.c b/sound/soc/sh/migor.c
index b823a5c..87e2b7fc 100644
--- a/sound/soc/sh/migor.c
+++ b/sound/soc/sh/migor.c
@@ -12,6 +12,7 @@
#include <linux/firmware.h>
#include <linux/module.h>
+#include <asm/clkdev.h>
#include <asm/clock.h>
#include <cpu/sh7722.h>
@@ -40,12 +41,12 @@
};
static struct clk siumckb_clk = {
- .name = "siumckb_clk",
- .id = -1,
.ops = &siumckb_clk_ops,
.rate = 0, /* initialised at run-time */
};
+static struct clk_lookup *siumckb_lookup;
+
static int migor_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
@@ -180,6 +181,13 @@
if (ret < 0)
return ret;
+ siumckb_lookup = clkdev_alloc(&siumckb_clk, "siumckb_clk", NULL);
+ if (!siumckb_lookup) {
+ ret = -ENOMEM;
+ goto eclkdevalloc;
+ }
+ clkdev_add(siumckb_lookup);
+
/* Port number used on this machine: port B */
migor_snd_device = platform_device_alloc("soc-audio", 1);
if (!migor_snd_device) {
@@ -200,12 +208,15 @@
epdevadd:
platform_device_put(migor_snd_device);
epdevalloc:
+ clkdev_drop(siumckb_lookup);
+eclkdevalloc:
clk_unregister(&siumckb_clk);
return ret;
}
static void __exit migor_exit(void)
{
+ clkdev_drop(siumckb_lookup);
clk_unregister(&siumckb_clk);
platform_device_unregister(migor_snd_device);
}
diff --git a/sound/soc/soc-cache.c b/sound/soc/soc-cache.c
index adbc68c..f6b0d28 100644
--- a/sound/soc/soc-cache.c
+++ b/sound/soc/soc-cache.c
@@ -203,8 +203,9 @@
data[1] = (value >> 8) & 0xff;
data[2] = value & 0xff;
- if (!snd_soc_codec_volatile_register(codec, reg))
- reg_cache[reg] = value;
+ if (!snd_soc_codec_volatile_register(codec, reg)
+ && reg < codec->reg_cache_size)
+ reg_cache[reg] = value;
if (codec->cache_only) {
codec->cache_sync = 1;
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 844ae82..acc91da 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -251,7 +251,7 @@
printk(KERN_WARNING
"ASoC: Failed to create codec register debugfs file\n");
- codec->debugfs_pop_time = debugfs_create_u32("dapm_pop_time", 0744,
+ codec->debugfs_pop_time = debugfs_create_u32("dapm_pop_time", 0644,
codec->debugfs_codec_root,
&codec->pop_time);
if (!codec->debugfs_pop_time)
diff --git a/sound/usb/card.c b/sound/usb/card.c
index 9feb00c..4eabafa 100644
--- a/sound/usb/card.c
+++ b/sound/usb/card.c
@@ -126,7 +126,7 @@
for (idx = 0; idx < 2; idx++) {
subs = &as->substream[idx];
if (!subs->num_formats)
- return;
+ continue;
snd_usb_release_substream_urbs(subs, 1);
subs->interface = -1;
}
@@ -216,6 +216,11 @@
}
switch (protocol) {
+ default:
+ snd_printdd(KERN_WARNING "unknown interface protocol %#02x, assuming v1\n",
+ protocol);
+ /* fall through */
+
case UAC_VERSION_1: {
struct uac1_ac_header_descriptor *h1 = control_header;
@@ -253,10 +258,6 @@
break;
}
-
- default:
- snd_printk(KERN_ERR "unknown protocol version 0x%02x\n", protocol);
- return -EINVAL;
}
return 0;
@@ -465,7 +466,13 @@
goto __error;
}
- chip->ctrl_intf = alts;
+ /*
+ * For devices with more than one control interface, we assume the
+ * first contains the audio controls. We might need a more specific
+ * check here in the future.
+ */
+ if (!chip->ctrl_intf)
+ chip->ctrl_intf = alts;
if (err > 0) {
/* create normal USB audio interfaces */
diff --git a/sound/usb/clock.c b/sound/usb/clock.c
index b853f8d..7754a10 100644
--- a/sound/usb/clock.c
+++ b/sound/usb/clock.c
@@ -295,12 +295,11 @@
switch (altsd->bInterfaceProtocol) {
case UAC_VERSION_1:
+ default:
return set_sample_rate_v1(chip, iface, alts, fmt, rate);
case UAC_VERSION_2:
return set_sample_rate_v2(chip, iface, alts, fmt, rate);
}
-
- return -EINVAL;
}
diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
index 1a701f1..ef0a07e 100644
--- a/sound/usb/endpoint.c
+++ b/sound/usb/endpoint.c
@@ -275,6 +275,12 @@
/* get audio formats */
switch (protocol) {
+ default:
+ snd_printdd(KERN_WARNING "%d:%u:%d: unknown interface protocol %#02x, assuming v1\n",
+ dev->devnum, iface_no, altno, protocol);
+ protocol = UAC_VERSION_1;
+ /* fall through */
+
case UAC_VERSION_1: {
struct uac1_as_header_descriptor *as =
snd_usb_find_csint_desc(alts->extra, alts->extralen, NULL, UAC_AS_GENERAL);
@@ -336,11 +342,6 @@
dev->devnum, iface_no, altno, as->bTerminalLink);
continue;
}
-
- default:
- snd_printk(KERN_ERR "%d:%u:%d : unknown interface protocol %04x\n",
- dev->devnum, iface_no, altno, protocol);
- continue;
}
/* get format type */
diff --git a/sound/usb/format.c b/sound/usb/format.c
index 3a13754..6914821 100644
--- a/sound/usb/format.c
+++ b/sound/usb/format.c
@@ -49,7 +49,8 @@
u64 pcm_formats;
switch (protocol) {
- case UAC_VERSION_1: {
+ case UAC_VERSION_1:
+ default: {
struct uac_format_type_i_discrete_descriptor *fmt = _fmt;
sample_width = fmt->bBitResolution;
sample_bytes = fmt->bSubframeSize;
@@ -64,9 +65,6 @@
format <<= 1;
break;
}
-
- default:
- return -EINVAL;
}
pcm_formats = 0;
@@ -384,6 +382,10 @@
* audio class v2 uses class specific EP0 range requests for that.
*/
switch (protocol) {
+ default:
+ snd_printdd(KERN_WARNING "%d:%u:%d : invalid protocol version %d, assuming v1\n",
+ chip->dev->devnum, fp->iface, fp->altsetting, protocol);
+ /* fall through */
case UAC_VERSION_1:
fp->channels = fmt->bNrChannels;
ret = parse_audio_format_rates_v1(chip, fp, (unsigned char *) fmt, 7);
@@ -392,10 +394,6 @@
/* fp->channels is already set in this case */
ret = parse_audio_format_rates_v2(chip, fp);
break;
- default:
- snd_printk(KERN_ERR "%d:%u:%d : invalid protocol version %d\n",
- chip->dev->devnum, fp->iface, fp->altsetting, protocol);
- return -EINVAL;
}
if (fp->channels < 1) {
@@ -438,6 +436,10 @@
fp->channels = 1;
switch (protocol) {
+ default:
+ snd_printdd(KERN_WARNING "%d:%u:%d : invalid protocol version %d, assuming v1\n",
+ chip->dev->devnum, fp->iface, fp->altsetting, protocol);
+ /* fall through */
case UAC_VERSION_1: {
struct uac_format_type_ii_discrete_descriptor *fmt = _fmt;
brate = le16_to_cpu(fmt->wMaxBitRate);
@@ -456,10 +458,6 @@
ret = parse_audio_format_rates_v2(chip, fp);
break;
}
- default:
- snd_printk(KERN_ERR "%d:%u:%d : invalid protocol version %d\n",
- chip->dev->devnum, fp->iface, fp->altsetting, protocol);
- return -EINVAL;
}
return ret;
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index c166db0..3ed3901 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -2175,7 +2175,15 @@
}
host_iface = &usb_ifnum_to_if(chip->dev, ctrlif)->altsetting[0];
- mixer->protocol = get_iface_desc(host_iface)->bInterfaceProtocol;
+ switch (get_iface_desc(host_iface)->bInterfaceProtocol) {
+ case UAC_VERSION_1:
+ default:
+ mixer->protocol = UAC_VERSION_1;
+ break;
+ case UAC_VERSION_2:
+ mixer->protocol = UAC_VERSION_2;
+ break;
+ }
if ((err = snd_usb_mixer_controls(mixer)) < 0 ||
(err = snd_usb_mixer_status_create(mixer)) < 0)
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
index 3634ced..3b5135c 100644
--- a/sound/usb/pcm.c
+++ b/sound/usb/pcm.c
@@ -173,13 +173,12 @@
switch (altsd->bInterfaceProtocol) {
case UAC_VERSION_1:
+ default:
return init_pitch_v1(chip, iface, alts, fmt);
case UAC_VERSION_2:
return init_pitch_v2(chip, iface, alts, fmt);
}
-
- return -EINVAL;
}
/*
diff --git a/tools/perf/Documentation/perf-annotate.txt b/tools/perf/Documentation/perf-annotate.txt
index 5164a65..b2c6330 100644
--- a/tools/perf/Documentation/perf-annotate.txt
+++ b/tools/perf/Documentation/perf-annotate.txt
@@ -8,7 +8,7 @@
SYNOPSIS
--------
[verse]
-'perf annotate' [-i <file> | --input=file] symbol_name
+'perf annotate' [-i <file> | --input=file] [symbol_name]
DESCRIPTION
-----------
@@ -24,6 +24,13 @@
--input=::
Input file name. (default: perf.data)
+--stdio:: Use the stdio interface.
+
+--tui:: Use the TUI interface Use of --tui requires a tty, if one is not
+ present, as when piping to other commands, the stdio interface is
+ used. This interfaces starts by centering on the line with more
+ samples, TAB/UNTAB cycles thru the lines with more samples.
+
SEE ALSO
--------
-linkperf:perf-record[1]
+linkperf:perf-record[1], linkperf:perf-report[1]
diff --git a/tools/perf/Documentation/perf-report.txt b/tools/perf/Documentation/perf-report.txt
index abfabe9..12052c9 100644
--- a/tools/perf/Documentation/perf-report.txt
+++ b/tools/perf/Documentation/perf-report.txt
@@ -65,6 +65,13 @@
the tree is considered as a new profiled object. +
Default: fractal,0.5.
+--stdio:: Use the stdio interface.
+
+--tui:: Use the TUI interface, that is integrated with annotate and allows
+ zooming into DSOs or threads, among other features. Use of --tui
+ requires a tty, if one is not present, as when piping to other
+ commands, the stdio interface is used.
+
SEE ALSO
--------
linkperf:perf-stat[1]
diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index 4f1fa77..d1db0f6 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -313,6 +313,9 @@
SCRIPT_SH += perf-archive.sh
+grep-libs = $(filter -l%,$(1))
+strip-libs = $(filter-out -l%,$(1))
+
#
# No Perl scripts right now:
#
@@ -588,14 +591,17 @@
ifdef NO_LIBPERL
BASIC_CFLAGS += -DNO_LIBPERL
else
- PERL_EMBED_LDOPTS = `perl -MExtUtils::Embed -e ldopts 2>/dev/null`
+ PERL_EMBED_LDOPTS = $(shell perl -MExtUtils::Embed -e ldopts 2>/dev/null)
+ PERL_EMBED_LDFLAGS = $(call strip-libs,$(PERL_EMBED_LDOPTS))
+ PERL_EMBED_LIBADD = $(call grep-libs,$(PERL_EMBED_LDOPTS))
PERL_EMBED_CCOPTS = `perl -MExtUtils::Embed -e ccopts 2>/dev/null`
FLAGS_PERL_EMBED=$(PERL_EMBED_CCOPTS) $(PERL_EMBED_LDOPTS)
ifneq ($(call try-cc,$(SOURCE_PERL_EMBED),$(FLAGS_PERL_EMBED)),y)
BASIC_CFLAGS += -DNO_LIBPERL
else
- ALL_LDFLAGS += $(PERL_EMBED_LDOPTS)
+ ALL_LDFLAGS += $(PERL_EMBED_LDFLAGS)
+ EXTLIBS += $(PERL_EMBED_LIBADD)
LIB_OBJS += $(OUTPUT)util/scripting-engines/trace-event-perl.o
LIB_OBJS += $(OUTPUT)scripts/perl/Perf-Trace-Util/Context.o
endif
@@ -604,13 +610,16 @@
ifdef NO_LIBPYTHON
BASIC_CFLAGS += -DNO_LIBPYTHON
else
- PYTHON_EMBED_LDOPTS = `python-config --ldflags 2>/dev/null`
+ PYTHON_EMBED_LDOPTS = $(shell python-config --ldflags 2>/dev/null)
+ PYTHON_EMBED_LDFLAGS = $(call strip-libs,$(PYTHON_EMBED_LDOPTS))
+ PYTHON_EMBED_LIBADD = $(call grep-libs,$(PYTHON_EMBED_LDOPTS))
PYTHON_EMBED_CCOPTS = `python-config --cflags 2>/dev/null`
FLAGS_PYTHON_EMBED=$(PYTHON_EMBED_CCOPTS) $(PYTHON_EMBED_LDOPTS)
ifneq ($(call try-cc,$(SOURCE_PYTHON_EMBED),$(FLAGS_PYTHON_EMBED)),y)
BASIC_CFLAGS += -DNO_LIBPYTHON
else
- ALL_LDFLAGS += $(PYTHON_EMBED_LDOPTS)
+ ALL_LDFLAGS += $(PYTHON_EMBED_LDFLAGS)
+ EXTLIBS += $(PYTHON_EMBED_LIBADD)
LIB_OBJS += $(OUTPUT)util/scripting-engines/trace-event-python.o
LIB_OBJS += $(OUTPUT)scripts/python/Perf-Trace-Util/Context.o
endif
@@ -653,6 +662,15 @@
endif
endif
+
+ifdef NO_STRLCPY
+ BASIC_CFLAGS += -DNO_STRLCPY
+else
+ ifneq ($(call try-cc,$(SOURCE_STRLCPY),),y)
+ BASIC_CFLAGS += -DNO_STRLCPY
+ endif
+endif
+
ifndef CC_LD_DYNPATH
ifdef NO_R_TO_GCC_LINKER
# Some gcc does not accept and pass -R to the linker to specify
@@ -910,8 +928,8 @@
$(ALL_CFLAGS) -c $(filter %.c,$^) -o $@
$(OUTPUT)perf$X: $(OUTPUT)perf.o $(BUILTIN_OBJS) $(PERFLIBS)
- $(QUIET_LINK)$(CC) $(ALL_CFLAGS) -o $@ $(OUTPUT)perf.o \
- $(BUILTIN_OBJS) $(ALL_LDFLAGS) $(LIBS)
+ $(QUIET_LINK)$(CC) $(ALL_CFLAGS) $(ALL_LDFLAGS) $(OUTPUT)perf.o \
+ $(BUILTIN_OBJS) $(LIBS) -o $@
$(OUTPUT)builtin-help.o: builtin-help.c $(OUTPUT)common-cmds.h $(OUTPUT)PERF-CFLAGS
$(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) \
@@ -1017,7 +1035,7 @@
# we compile into subdirectories. if the target directory is not the source directory, they might not exists. So
# we depend the various files onto their directories.
DIRECTORY_DEPS = $(LIB_OBJS) $(BUILTIN_OBJS) $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)common-cmds.h
-$(DIRECTORY_DEPS): $(sort $(dir $(DIRECTORY_DEPS)))
+$(DIRECTORY_DEPS): | $(sort $(dir $(DIRECTORY_DEPS)))
# In the second step, we make a rule to actually create these directories
$(sort $(dir $(DIRECTORY_DEPS))):
$(QUIET_MKDIR)$(MKDIR) -p $@ 2>/dev/null
diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c
index 1478dc6..6d5604d8d 100644
--- a/tools/perf/builtin-annotate.c
+++ b/tools/perf/builtin-annotate.c
@@ -28,7 +28,7 @@
static char const *input_name = "perf.data";
-static bool force;
+static bool force, use_tui, use_stdio;
static bool full_paths;
@@ -321,7 +321,7 @@
static void hists__find_annotations(struct hists *self)
{
- struct rb_node *first = rb_first(&self->entries), *nd = first;
+ struct rb_node *nd = rb_first(&self->entries), *next;
int key = KEY_RIGHT;
while (nd) {
@@ -343,20 +343,19 @@
if (use_browser > 0) {
key = hist_entry__tui_annotate(he);
- if (is_exit_key(key))
- break;
switch (key) {
case KEY_RIGHT:
- case '\t':
- nd = rb_next(nd);
+ next = rb_next(nd);
break;
case KEY_LEFT:
- if (nd == first)
- continue;
- nd = rb_prev(nd);
- default:
+ next = rb_prev(nd);
break;
+ default:
+ return;
}
+
+ if (next != NULL)
+ nd = next;
} else {
hist_entry__tty_annotate(he);
nd = rb_next(nd);
@@ -428,6 +427,8 @@
"be more verbose (show symbol address, etc)"),
OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
"dump raw trace in ASCII"),
+ OPT_BOOLEAN(0, "tui", &use_tui, "Use the TUI interface"),
+ OPT_BOOLEAN(0, "stdio", &use_stdio, "Use the stdio interface"),
OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
"file", "vmlinux pathname"),
OPT_BOOLEAN('m', "modules", &symbol_conf.use_modules,
@@ -443,6 +444,11 @@
{
argc = parse_options(argc, argv, options, annotate_usage, 0);
+ if (use_stdio)
+ use_browser = 0;
+ else if (use_tui)
+ use_browser = 1;
+
setup_browser();
symbol_conf.priv_size = sizeof(struct sym_priv);
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 55fc1f4..5de405d 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -32,7 +32,7 @@
static char const *input_name = "perf.data";
-static bool force;
+static bool force, use_tui, use_stdio;
static bool hide_unresolved;
static bool dont_use_callchains;
@@ -107,7 +107,8 @@
goto out_free_syms;
err = 0;
if (symbol_conf.use_callchain) {
- err = append_chain(he->callchain, data->callchain, syms, data->period);
+ err = callchain_append(he->callchain, data->callchain, syms,
+ data->period);
if (err)
goto out_free_syms;
}
@@ -450,6 +451,8 @@
"Show per-thread event counters"),
OPT_STRING(0, "pretty", &pretty_printing_style, "key",
"pretty printing style key: normal raw"),
+ OPT_BOOLEAN(0, "tui", &use_tui, "Use the TUI interface"),
+ OPT_BOOLEAN(0, "stdio", &use_stdio, "Use the stdio interface"),
OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
"sort by key(s): pid, comm, dso, symbol, parent"),
OPT_BOOLEAN(0, "showcpuutilization", &symbol_conf.show_cpu_utilization,
@@ -482,8 +485,15 @@
{
argc = parse_options(argc, argv, options, report_usage, 0);
+ if (use_stdio)
+ use_browser = 0;
+ else if (use_tui)
+ use_browser = 1;
+
if (strcmp(input_name, "-") != 0)
setup_browser();
+ else
+ use_browser = 0;
/*
* Only in the newt browser we are doing integrated annotation,
* so don't allocate extra space that won't be used in the stdio
diff --git a/tools/perf/feature-tests.mak b/tools/perf/feature-tests.mak
index 7a7b608..b253db6 100644
--- a/tools/perf/feature-tests.mak
+++ b/tools/perf/feature-tests.mak
@@ -110,6 +110,17 @@
}
endef
+define SOURCE_STRLCPY
+#include <stdlib.h>
+extern size_t strlcpy(char *dest, const char *src, size_t size);
+
+int main(void)
+{
+ strlcpy(NULL, NULL, 0);
+ return 0;
+}
+endef
+
# try-cc
# Usage: option = $(call try-cc, source-to-build, cc-options)
try-cc = $(shell sh -c \
diff --git a/tools/perf/perf.h b/tools/perf/perf.h
index ef7aa0a..95aaf56 100644
--- a/tools/perf/perf.h
+++ b/tools/perf/perf.h
@@ -73,6 +73,18 @@
#define cpu_relax() asm volatile("":::"memory")
#endif
+#ifdef __mips__
+#include "../../arch/mips/include/asm/unistd.h"
+#define rmb() asm volatile( \
+ ".set mips2\n\t" \
+ "sync\n\t" \
+ ".set mips0" \
+ : /* no output */ \
+ : /* no input */ \
+ : "memory")
+#define cpu_relax() asm volatile("" ::: "memory")
+#endif
+
#include <time.h>
#include <unistd.h>
#include <sys/types.h>
diff --git a/tools/perf/scripts/python/bin/netdev-times-record b/tools/perf/scripts/python/bin/netdev-times-record
new file mode 100644
index 0000000..d931a82
--- /dev/null
+++ b/tools/perf/scripts/python/bin/netdev-times-record
@@ -0,0 +1,8 @@
+#!/bin/bash
+perf record -a -e net:net_dev_xmit -e net:net_dev_queue \
+ -e net:netif_receive_skb -e net:netif_rx \
+ -e skb:consume_skb -e skb:kfree_skb \
+ -e skb:skb_copy_datagram_iovec -e napi:napi_poll \
+ -e irq:irq_handler_entry -e irq:irq_handler_exit \
+ -e irq:softirq_entry -e irq:softirq_exit \
+ -e irq:softirq_raise $@
diff --git a/tools/perf/scripts/python/bin/netdev-times-report b/tools/perf/scripts/python/bin/netdev-times-report
new file mode 100644
index 0000000..c3d0a63
--- /dev/null
+++ b/tools/perf/scripts/python/bin/netdev-times-report
@@ -0,0 +1,5 @@
+#!/bin/bash
+# description: display a process of packet and processing time
+# args: [tx] [rx] [dev=] [debug]
+
+perf trace -s ~/libexec/perf-core/scripts/python/netdev-times.py $@
diff --git a/tools/perf/scripts/python/netdev-times.py b/tools/perf/scripts/python/netdev-times.py
new file mode 100644
index 0000000..9aa0a32
--- /dev/null
+++ b/tools/perf/scripts/python/netdev-times.py
@@ -0,0 +1,464 @@
+# Display a process of packets and processed time.
+# It helps us to investigate networking or network device.
+#
+# options
+# tx: show only tx chart
+# rx: show only rx chart
+# dev=: show only thing related to specified device
+# debug: work with debug mode. It shows buffer status.
+
+import os
+import sys
+
+sys.path.append(os.environ['PERF_EXEC_PATH'] + \
+ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
+
+from perf_trace_context import *
+from Core import *
+from Util import *
+
+all_event_list = []; # insert all tracepoint event related with this script
+irq_dic = {}; # key is cpu and value is a list which stacks irqs
+ # which raise NET_RX softirq
+net_rx_dic = {}; # key is cpu and value include time of NET_RX softirq-entry
+ # and a list which stacks receive
+receive_hunk_list = []; # a list which include a sequence of receive events
+rx_skb_list = []; # received packet list for matching
+ # skb_copy_datagram_iovec
+
+buffer_budget = 65536; # the budget of rx_skb_list, tx_queue_list and
+ # tx_xmit_list
+of_count_rx_skb_list = 0; # overflow count
+
+tx_queue_list = []; # list of packets which pass through dev_queue_xmit
+of_count_tx_queue_list = 0; # overflow count
+
+tx_xmit_list = []; # list of packets which pass through dev_hard_start_xmit
+of_count_tx_xmit_list = 0; # overflow count
+
+tx_free_list = []; # list of packets which is freed
+
+# options
+show_tx = 0;
+show_rx = 0;
+dev = 0; # store a name of device specified by option "dev="
+debug = 0;
+
+# indices of event_info tuple
+EINFO_IDX_NAME= 0
+EINFO_IDX_CONTEXT=1
+EINFO_IDX_CPU= 2
+EINFO_IDX_TIME= 3
+EINFO_IDX_PID= 4
+EINFO_IDX_COMM= 5
+
+# Calculate a time interval(msec) from src(nsec) to dst(nsec)
+def diff_msec(src, dst):
+ return (dst - src) / 1000000.0
+
+# Display a process of transmitting a packet
+def print_transmit(hunk):
+ if dev != 0 and hunk['dev'].find(dev) < 0:
+ return
+ print "%7s %5d %6d.%06dsec %12.3fmsec %12.3fmsec" % \
+ (hunk['dev'], hunk['len'],
+ nsecs_secs(hunk['queue_t']),
+ nsecs_nsecs(hunk['queue_t'])/1000,
+ diff_msec(hunk['queue_t'], hunk['xmit_t']),
+ diff_msec(hunk['xmit_t'], hunk['free_t']))
+
+# Format for displaying rx packet processing
+PF_IRQ_ENTRY= " irq_entry(+%.3fmsec irq=%d:%s)"
+PF_SOFT_ENTRY=" softirq_entry(+%.3fmsec)"
+PF_NAPI_POLL= " napi_poll_exit(+%.3fmsec %s)"
+PF_JOINT= " |"
+PF_WJOINT= " | |"
+PF_NET_RECV= " |---netif_receive_skb(+%.3fmsec skb=%x len=%d)"
+PF_NET_RX= " |---netif_rx(+%.3fmsec skb=%x)"
+PF_CPY_DGRAM= " | skb_copy_datagram_iovec(+%.3fmsec %d:%s)"
+PF_KFREE_SKB= " | kfree_skb(+%.3fmsec location=%x)"
+PF_CONS_SKB= " | consume_skb(+%.3fmsec)"
+
+# Display a process of received packets and interrputs associated with
+# a NET_RX softirq
+def print_receive(hunk):
+ show_hunk = 0
+ irq_list = hunk['irq_list']
+ cpu = irq_list[0]['cpu']
+ base_t = irq_list[0]['irq_ent_t']
+ # check if this hunk should be showed
+ if dev != 0:
+ for i in range(len(irq_list)):
+ if irq_list[i]['name'].find(dev) >= 0:
+ show_hunk = 1
+ break
+ else:
+ show_hunk = 1
+ if show_hunk == 0:
+ return
+
+ print "%d.%06dsec cpu=%d" % \
+ (nsecs_secs(base_t), nsecs_nsecs(base_t)/1000, cpu)
+ for i in range(len(irq_list)):
+ print PF_IRQ_ENTRY % \
+ (diff_msec(base_t, irq_list[i]['irq_ent_t']),
+ irq_list[i]['irq'], irq_list[i]['name'])
+ print PF_JOINT
+ irq_event_list = irq_list[i]['event_list']
+ for j in range(len(irq_event_list)):
+ irq_event = irq_event_list[j]
+ if irq_event['event'] == 'netif_rx':
+ print PF_NET_RX % \
+ (diff_msec(base_t, irq_event['time']),
+ irq_event['skbaddr'])
+ print PF_JOINT
+ print PF_SOFT_ENTRY % \
+ diff_msec(base_t, hunk['sirq_ent_t'])
+ print PF_JOINT
+ event_list = hunk['event_list']
+ for i in range(len(event_list)):
+ event = event_list[i]
+ if event['event_name'] == 'napi_poll':
+ print PF_NAPI_POLL % \
+ (diff_msec(base_t, event['event_t']), event['dev'])
+ if i == len(event_list) - 1:
+ print ""
+ else:
+ print PF_JOINT
+ else:
+ print PF_NET_RECV % \
+ (diff_msec(base_t, event['event_t']), event['skbaddr'],
+ event['len'])
+ if 'comm' in event.keys():
+ print PF_WJOINT
+ print PF_CPY_DGRAM % \
+ (diff_msec(base_t, event['comm_t']),
+ event['pid'], event['comm'])
+ elif 'handle' in event.keys():
+ print PF_WJOINT
+ if event['handle'] == "kfree_skb":
+ print PF_KFREE_SKB % \
+ (diff_msec(base_t,
+ event['comm_t']),
+ event['location'])
+ elif event['handle'] == "consume_skb":
+ print PF_CONS_SKB % \
+ diff_msec(base_t,
+ event['comm_t'])
+ print PF_JOINT
+
+def trace_begin():
+ global show_tx
+ global show_rx
+ global dev
+ global debug
+
+ for i in range(len(sys.argv)):
+ if i == 0:
+ continue
+ arg = sys.argv[i]
+ if arg == 'tx':
+ show_tx = 1
+ elif arg =='rx':
+ show_rx = 1
+ elif arg.find('dev=',0, 4) >= 0:
+ dev = arg[4:]
+ elif arg == 'debug':
+ debug = 1
+ if show_tx == 0 and show_rx == 0:
+ show_tx = 1
+ show_rx = 1
+
+def trace_end():
+ # order all events in time
+ all_event_list.sort(lambda a,b :cmp(a[EINFO_IDX_TIME],
+ b[EINFO_IDX_TIME]))
+ # process all events
+ for i in range(len(all_event_list)):
+ event_info = all_event_list[i]
+ name = event_info[EINFO_IDX_NAME]
+ if name == 'irq__softirq_exit':
+ handle_irq_softirq_exit(event_info)
+ elif name == 'irq__softirq_entry':
+ handle_irq_softirq_entry(event_info)
+ elif name == 'irq__softirq_raise':
+ handle_irq_softirq_raise(event_info)
+ elif name == 'irq__irq_handler_entry':
+ handle_irq_handler_entry(event_info)
+ elif name == 'irq__irq_handler_exit':
+ handle_irq_handler_exit(event_info)
+ elif name == 'napi__napi_poll':
+ handle_napi_poll(event_info)
+ elif name == 'net__netif_receive_skb':
+ handle_netif_receive_skb(event_info)
+ elif name == 'net__netif_rx':
+ handle_netif_rx(event_info)
+ elif name == 'skb__skb_copy_datagram_iovec':
+ handle_skb_copy_datagram_iovec(event_info)
+ elif name == 'net__net_dev_queue':
+ handle_net_dev_queue(event_info)
+ elif name == 'net__net_dev_xmit':
+ handle_net_dev_xmit(event_info)
+ elif name == 'skb__kfree_skb':
+ handle_kfree_skb(event_info)
+ elif name == 'skb__consume_skb':
+ handle_consume_skb(event_info)
+ # display receive hunks
+ if show_rx:
+ for i in range(len(receive_hunk_list)):
+ print_receive(receive_hunk_list[i])
+ # display transmit hunks
+ if show_tx:
+ print " dev len Qdisc " \
+ " netdevice free"
+ for i in range(len(tx_free_list)):
+ print_transmit(tx_free_list[i])
+ if debug:
+ print "debug buffer status"
+ print "----------------------------"
+ print "xmit Qdisc:remain:%d overflow:%d" % \
+ (len(tx_queue_list), of_count_tx_queue_list)
+ print "xmit netdevice:remain:%d overflow:%d" % \
+ (len(tx_xmit_list), of_count_tx_xmit_list)
+ print "receive:remain:%d overflow:%d" % \
+ (len(rx_skb_list), of_count_rx_skb_list)
+
+# called from perf, when it finds a correspoinding event
+def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, vec):
+ if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
+ return
+ event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
+ all_event_list.append(event_info)
+
+def irq__softirq_exit(name, context, cpu, sec, nsec, pid, comm, vec):
+ if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
+ return
+ event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
+ all_event_list.append(event_info)
+
+def irq__softirq_raise(name, context, cpu, sec, nsec, pid, comm, vec):
+ if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
+ return
+ event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
+ all_event_list.append(event_info)
+
+def irq__irq_handler_entry(name, context, cpu, sec, nsec, pid, comm,
+ irq, irq_name):
+ event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
+ irq, irq_name)
+ all_event_list.append(event_info)
+
+def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, irq, ret):
+ event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, ret)
+ all_event_list.append(event_info)
+
+def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, napi, dev_name):
+ event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
+ napi, dev_name)
+ all_event_list.append(event_info)
+
+def net__netif_receive_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr,
+ skblen, dev_name):
+ event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
+ skbaddr, skblen, dev_name)
+ all_event_list.append(event_info)
+
+def net__netif_rx(name, context, cpu, sec, nsec, pid, comm, skbaddr,
+ skblen, dev_name):
+ event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
+ skbaddr, skblen, dev_name)
+ all_event_list.append(event_info)
+
+def net__net_dev_queue(name, context, cpu, sec, nsec, pid, comm,
+ skbaddr, skblen, dev_name):
+ event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
+ skbaddr, skblen, dev_name)
+ all_event_list.append(event_info)
+
+def net__net_dev_xmit(name, context, cpu, sec, nsec, pid, comm,
+ skbaddr, skblen, rc, dev_name):
+ event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
+ skbaddr, skblen, rc ,dev_name)
+ all_event_list.append(event_info)
+
+def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
+ skbaddr, protocol, location):
+ event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
+ skbaddr, protocol, location)
+ all_event_list.append(event_info)
+
+def skb__consume_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr):
+ event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
+ skbaddr)
+ all_event_list.append(event_info)
+
+def skb__skb_copy_datagram_iovec(name, context, cpu, sec, nsec, pid, comm,
+ skbaddr, skblen):
+ event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
+ skbaddr, skblen)
+ all_event_list.append(event_info)
+
+def handle_irq_handler_entry(event_info):
+ (name, context, cpu, time, pid, comm, irq, irq_name) = event_info
+ if cpu not in irq_dic.keys():
+ irq_dic[cpu] = []
+ irq_record = {'irq':irq, 'name':irq_name, 'cpu':cpu, 'irq_ent_t':time}
+ irq_dic[cpu].append(irq_record)
+
+def handle_irq_handler_exit(event_info):
+ (name, context, cpu, time, pid, comm, irq, ret) = event_info
+ if cpu not in irq_dic.keys():
+ return
+ irq_record = irq_dic[cpu].pop()
+ if irq != irq_record['irq']:
+ return
+ irq_record.update({'irq_ext_t':time})
+ # if an irq doesn't include NET_RX softirq, drop.
+ if 'event_list' in irq_record.keys():
+ irq_dic[cpu].append(irq_record)
+
+def handle_irq_softirq_raise(event_info):
+ (name, context, cpu, time, pid, comm, vec) = event_info
+ if cpu not in irq_dic.keys() \
+ or len(irq_dic[cpu]) == 0:
+ return
+ irq_record = irq_dic[cpu].pop()
+ if 'event_list' in irq_record.keys():
+ irq_event_list = irq_record['event_list']
+ else:
+ irq_event_list = []
+ irq_event_list.append({'time':time, 'event':'sirq_raise'})
+ irq_record.update({'event_list':irq_event_list})
+ irq_dic[cpu].append(irq_record)
+
+def handle_irq_softirq_entry(event_info):
+ (name, context, cpu, time, pid, comm, vec) = event_info
+ net_rx_dic[cpu] = {'sirq_ent_t':time, 'event_list':[]}
+
+def handle_irq_softirq_exit(event_info):
+ (name, context, cpu, time, pid, comm, vec) = event_info
+ irq_list = []
+ event_list = 0
+ if cpu in irq_dic.keys():
+ irq_list = irq_dic[cpu]
+ del irq_dic[cpu]
+ if cpu in net_rx_dic.keys():
+ sirq_ent_t = net_rx_dic[cpu]['sirq_ent_t']
+ event_list = net_rx_dic[cpu]['event_list']
+ del net_rx_dic[cpu]
+ if irq_list == [] or event_list == 0:
+ return
+ rec_data = {'sirq_ent_t':sirq_ent_t, 'sirq_ext_t':time,
+ 'irq_list':irq_list, 'event_list':event_list}
+ # merge information realted to a NET_RX softirq
+ receive_hunk_list.append(rec_data)
+
+def handle_napi_poll(event_info):
+ (name, context, cpu, time, pid, comm, napi, dev_name) = event_info
+ if cpu in net_rx_dic.keys():
+ event_list = net_rx_dic[cpu]['event_list']
+ rec_data = {'event_name':'napi_poll',
+ 'dev':dev_name, 'event_t':time}
+ event_list.append(rec_data)
+
+def handle_netif_rx(event_info):
+ (name, context, cpu, time, pid, comm,
+ skbaddr, skblen, dev_name) = event_info
+ if cpu not in irq_dic.keys() \
+ or len(irq_dic[cpu]) == 0:
+ return
+ irq_record = irq_dic[cpu].pop()
+ if 'event_list' in irq_record.keys():
+ irq_event_list = irq_record['event_list']
+ else:
+ irq_event_list = []
+ irq_event_list.append({'time':time, 'event':'netif_rx',
+ 'skbaddr':skbaddr, 'skblen':skblen, 'dev_name':dev_name})
+ irq_record.update({'event_list':irq_event_list})
+ irq_dic[cpu].append(irq_record)
+
+def handle_netif_receive_skb(event_info):
+ global of_count_rx_skb_list
+
+ (name, context, cpu, time, pid, comm,
+ skbaddr, skblen, dev_name) = event_info
+ if cpu in net_rx_dic.keys():
+ rec_data = {'event_name':'netif_receive_skb',
+ 'event_t':time, 'skbaddr':skbaddr, 'len':skblen}
+ event_list = net_rx_dic[cpu]['event_list']
+ event_list.append(rec_data)
+ rx_skb_list.insert(0, rec_data)
+ if len(rx_skb_list) > buffer_budget:
+ rx_skb_list.pop()
+ of_count_rx_skb_list += 1
+
+def handle_net_dev_queue(event_info):
+ global of_count_tx_queue_list
+
+ (name, context, cpu, time, pid, comm,
+ skbaddr, skblen, dev_name) = event_info
+ skb = {'dev':dev_name, 'skbaddr':skbaddr, 'len':skblen, 'queue_t':time}
+ tx_queue_list.insert(0, skb)
+ if len(tx_queue_list) > buffer_budget:
+ tx_queue_list.pop()
+ of_count_tx_queue_list += 1
+
+def handle_net_dev_xmit(event_info):
+ global of_count_tx_xmit_list
+
+ (name, context, cpu, time, pid, comm,
+ skbaddr, skblen, rc, dev_name) = event_info
+ if rc == 0: # NETDEV_TX_OK
+ for i in range(len(tx_queue_list)):
+ skb = tx_queue_list[i]
+ if skb['skbaddr'] == skbaddr:
+ skb['xmit_t'] = time
+ tx_xmit_list.insert(0, skb)
+ del tx_queue_list[i]
+ if len(tx_xmit_list) > buffer_budget:
+ tx_xmit_list.pop()
+ of_count_tx_xmit_list += 1
+ return
+
+def handle_kfree_skb(event_info):
+ (name, context, cpu, time, pid, comm,
+ skbaddr, protocol, location) = event_info
+ for i in range(len(tx_queue_list)):
+ skb = tx_queue_list[i]
+ if skb['skbaddr'] == skbaddr:
+ del tx_queue_list[i]
+ return
+ for i in range(len(tx_xmit_list)):
+ skb = tx_xmit_list[i]
+ if skb['skbaddr'] == skbaddr:
+ skb['free_t'] = time
+ tx_free_list.append(skb)
+ del tx_xmit_list[i]
+ return
+ for i in range(len(rx_skb_list)):
+ rec_data = rx_skb_list[i]
+ if rec_data['skbaddr'] == skbaddr:
+ rec_data.update({'handle':"kfree_skb",
+ 'comm':comm, 'pid':pid, 'comm_t':time})
+ del rx_skb_list[i]
+ return
+
+def handle_consume_skb(event_info):
+ (name, context, cpu, time, pid, comm, skbaddr) = event_info
+ for i in range(len(tx_xmit_list)):
+ skb = tx_xmit_list[i]
+ if skb['skbaddr'] == skbaddr:
+ skb['free_t'] = time
+ tx_free_list.append(skb)
+ del tx_xmit_list[i]
+ return
+
+def handle_skb_copy_datagram_iovec(event_info):
+ (name, context, cpu, time, pid, comm, skbaddr, skblen) = event_info
+ for i in range(len(rx_skb_list)):
+ rec_data = rx_skb_list[i]
+ if skbaddr == rec_data['skbaddr']:
+ rec_data.update({'handle':"skb_copy_datagram_iovec",
+ 'comm':comm, 'pid':pid, 'comm_t':time})
+ del rx_skb_list[i]
+ return
diff --git a/tools/perf/util/cache.h b/tools/perf/util/cache.h
index 27e9ebe..a772979 100644
--- a/tools/perf/util/cache.h
+++ b/tools/perf/util/cache.h
@@ -82,6 +82,8 @@
extern char *perf_pathdup(const char *fmt, ...)
__attribute__((format (printf, 1, 2)));
+#ifdef NO_STRLCPY
extern size_t strlcpy(char *dest, const char *src, size_t size);
+#endif
#endif /* __PERF_CACHE_H */
diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c
index f231f43..e12d539 100644
--- a/tools/perf/util/callchain.c
+++ b/tools/perf/util/callchain.c
@@ -28,6 +28,9 @@
#define chain_for_each_child(child, parent) \
list_for_each_entry(child, &parent->children, brothers)
+#define chain_for_each_child_safe(child, next, parent) \
+ list_for_each_entry_safe(child, next, &parent->children, brothers)
+
static void
rb_insert_callchain(struct rb_root *root, struct callchain_node *chain,
enum chain_mode mode)
@@ -86,10 +89,10 @@
* sort them by hit
*/
static void
-sort_chain_flat(struct rb_root *rb_root, struct callchain_node *node,
+sort_chain_flat(struct rb_root *rb_root, struct callchain_root *root,
u64 min_hit, struct callchain_param *param __used)
{
- __sort_chain_flat(rb_root, node, min_hit);
+ __sort_chain_flat(rb_root, &root->node, min_hit);
}
static void __sort_chain_graph_abs(struct callchain_node *node,
@@ -108,11 +111,11 @@
}
static void
-sort_chain_graph_abs(struct rb_root *rb_root, struct callchain_node *chain_root,
+sort_chain_graph_abs(struct rb_root *rb_root, struct callchain_root *chain_root,
u64 min_hit, struct callchain_param *param __used)
{
- __sort_chain_graph_abs(chain_root, min_hit);
- rb_root->rb_node = chain_root->rb_root.rb_node;
+ __sort_chain_graph_abs(&chain_root->node, min_hit);
+ rb_root->rb_node = chain_root->node.rb_root.rb_node;
}
static void __sort_chain_graph_rel(struct callchain_node *node,
@@ -133,11 +136,11 @@
}
static void
-sort_chain_graph_rel(struct rb_root *rb_root, struct callchain_node *chain_root,
+sort_chain_graph_rel(struct rb_root *rb_root, struct callchain_root *chain_root,
u64 min_hit __used, struct callchain_param *param)
{
- __sort_chain_graph_rel(chain_root, param->min_percent / 100.0);
- rb_root->rb_node = chain_root->rb_root.rb_node;
+ __sort_chain_graph_rel(&chain_root->node, param->min_percent / 100.0);
+ rb_root->rb_node = chain_root->node.rb_root.rb_node;
}
int register_callchain_param(struct callchain_param *param)
@@ -284,19 +287,18 @@
}
static int
-__append_chain(struct callchain_node *root, struct resolved_chain *chain,
- unsigned int start, u64 period);
+append_chain(struct callchain_node *root, struct resolved_chain *chain,
+ unsigned int start, u64 period);
static void
-__append_chain_children(struct callchain_node *root,
- struct resolved_chain *chain,
- unsigned int start, u64 period)
+append_chain_children(struct callchain_node *root, struct resolved_chain *chain,
+ unsigned int start, u64 period)
{
struct callchain_node *rnode;
/* lookup in childrens */
chain_for_each_child(rnode, root) {
- unsigned int ret = __append_chain(rnode, chain, start, period);
+ unsigned int ret = append_chain(rnode, chain, start, period);
if (!ret)
goto inc_children_hit;
@@ -309,8 +311,8 @@
}
static int
-__append_chain(struct callchain_node *root, struct resolved_chain *chain,
- unsigned int start, u64 period)
+append_chain(struct callchain_node *root, struct resolved_chain *chain,
+ unsigned int start, u64 period)
{
struct callchain_list *cnode;
unsigned int i = start;
@@ -357,7 +359,7 @@
}
/* We match the node and still have a part remaining */
- __append_chain_children(root, chain, i, period);
+ append_chain_children(root, chain, i, period);
return 0;
}
@@ -380,8 +382,8 @@
}
-int append_chain(struct callchain_node *root, struct ip_callchain *chain,
- struct map_symbol *syms, u64 period)
+int callchain_append(struct callchain_root *root, struct ip_callchain *chain,
+ struct map_symbol *syms, u64 period)
{
struct resolved_chain *filtered;
@@ -398,9 +400,65 @@
if (!filtered->nr)
goto end;
- __append_chain_children(root, filtered, 0, period);
+ append_chain_children(&root->node, filtered, 0, period);
+
+ if (filtered->nr > root->max_depth)
+ root->max_depth = filtered->nr;
end:
free(filtered);
return 0;
}
+
+static int
+merge_chain_branch(struct callchain_node *dst, struct callchain_node *src,
+ struct resolved_chain *chain)
+{
+ struct callchain_node *child, *next_child;
+ struct callchain_list *list, *next_list;
+ int old_pos = chain->nr;
+ int err = 0;
+
+ list_for_each_entry_safe(list, next_list, &src->val, list) {
+ chain->ips[chain->nr].ip = list->ip;
+ chain->ips[chain->nr].ms = list->ms;
+ chain->nr++;
+ list_del(&list->list);
+ free(list);
+ }
+
+ if (src->hit)
+ append_chain_children(dst, chain, 0, src->hit);
+
+ chain_for_each_child_safe(child, next_child, src) {
+ err = merge_chain_branch(dst, child, chain);
+ if (err)
+ break;
+
+ list_del(&child->brothers);
+ free(child);
+ }
+
+ chain->nr = old_pos;
+
+ return err;
+}
+
+int callchain_merge(struct callchain_root *dst, struct callchain_root *src)
+{
+ struct resolved_chain *chain;
+ int err;
+
+ chain = malloc(sizeof(*chain) +
+ src->max_depth * sizeof(struct resolved_ip));
+ if (!chain)
+ return -ENOMEM;
+
+ chain->nr = 0;
+
+ err = merge_chain_branch(&dst->node, &src->node, chain);
+
+ free(chain);
+
+ return err;
+}
diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h
index 624a96c..c15fb8c 100644
--- a/tools/perf/util/callchain.h
+++ b/tools/perf/util/callchain.h
@@ -26,9 +26,14 @@
u64 children_hit;
};
+struct callchain_root {
+ u64 max_depth;
+ struct callchain_node node;
+};
+
struct callchain_param;
-typedef void (*sort_chain_func_t)(struct rb_root *, struct callchain_node *,
+typedef void (*sort_chain_func_t)(struct rb_root *, struct callchain_root *,
u64, struct callchain_param *);
struct callchain_param {
@@ -44,14 +49,16 @@
struct list_head list;
};
-static inline void callchain_init(struct callchain_node *node)
+static inline void callchain_init(struct callchain_root *root)
{
- INIT_LIST_HEAD(&node->brothers);
- INIT_LIST_HEAD(&node->children);
- INIT_LIST_HEAD(&node->val);
+ INIT_LIST_HEAD(&root->node.brothers);
+ INIT_LIST_HEAD(&root->node.children);
+ INIT_LIST_HEAD(&root->node.val);
- node->parent = NULL;
- node->hit = 0;
+ root->node.parent = NULL;
+ root->node.hit = 0;
+ root->node.children_hit = 0;
+ root->max_depth = 0;
}
static inline u64 cumul_hits(struct callchain_node *node)
@@ -60,8 +67,9 @@
}
int register_callchain_param(struct callchain_param *param);
-int append_chain(struct callchain_node *root, struct ip_callchain *chain,
- struct map_symbol *syms, u64 period);
+int callchain_append(struct callchain_root *root, struct ip_callchain *chain,
+ struct map_symbol *syms, u64 period);
+int callchain_merge(struct callchain_root *dst, struct callchain_root *src);
bool ip_callchain__valid(struct ip_callchain *chain, const event_t *event);
#endif /* __PERF_CALLCHAIN_H */
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index be22ae6..2022e87 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -87,7 +87,7 @@
static struct hist_entry *hist_entry__new(struct hist_entry *template)
{
- size_t callchain_size = symbol_conf.use_callchain ? sizeof(struct callchain_node) : 0;
+ size_t callchain_size = symbol_conf.use_callchain ? sizeof(struct callchain_root) : 0;
struct hist_entry *self = malloc(sizeof(*self) + callchain_size);
if (self != NULL) {
@@ -226,6 +226,8 @@
if (!cmp) {
iter->period += he->period;
+ if (symbol_conf.use_callchain)
+ callchain_merge(iter->callchain, he->callchain);
hist_entry__free(he);
return false;
}
diff --git a/tools/perf/util/path.c b/tools/perf/util/path.c
index 58a470d..bd74977 100644
--- a/tools/perf/util/path.c
+++ b/tools/perf/util/path.c
@@ -22,6 +22,7 @@
return ".";
}
+#ifdef NO_STRLCPY
size_t strlcpy(char *dest, const char *src, size_t size)
{
size_t ret = strlen(src);
@@ -33,7 +34,7 @@
}
return ret;
}
-
+#endif
static char *get_pathname(void)
{
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index e72f05c..fcc16e4 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -1539,6 +1539,7 @@
goto error;
}
tev->point.offset = pev->point.offset;
+ tev->point.retprobe = pev->point.retprobe;
tev->nargs = pev->nargs;
if (tev->nargs) {
tev->args = zalloc(sizeof(struct probe_trace_arg)
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
index 5251366..32b81f7 100644
--- a/tools/perf/util/probe-finder.c
+++ b/tools/perf/util/probe-finder.c
@@ -686,6 +686,25 @@
char buf[32], *ptr;
int ret, nscopes;
+ if (!is_c_varname(pf->pvar->var)) {
+ /* Copy raw parameters */
+ pf->tvar->value = strdup(pf->pvar->var);
+ if (pf->tvar->value == NULL)
+ return -ENOMEM;
+ if (pf->pvar->type) {
+ pf->tvar->type = strdup(pf->pvar->type);
+ if (pf->tvar->type == NULL)
+ return -ENOMEM;
+ }
+ if (pf->pvar->name) {
+ pf->tvar->name = strdup(pf->pvar->name);
+ if (pf->tvar->name == NULL)
+ return -ENOMEM;
+ } else
+ pf->tvar->name = NULL;
+ return 0;
+ }
+
if (pf->pvar->name)
pf->tvar->name = strdup(pf->pvar->name);
else {
@@ -700,19 +719,6 @@
if (pf->tvar->name == NULL)
return -ENOMEM;
- if (!is_c_varname(pf->pvar->var)) {
- /* Copy raw parameters */
- pf->tvar->value = strdup(pf->pvar->var);
- if (pf->tvar->value == NULL)
- return -ENOMEM;
- if (pf->pvar->type) {
- pf->tvar->type = strdup(pf->pvar->type);
- if (pf->tvar->type == NULL)
- return -ENOMEM;
- }
- return 0;
- }
-
pr_debug("Searching '%s' variable in context.\n",
pf->pvar->var);
/* Search child die for local variables and parameters. */
@@ -783,6 +789,16 @@
/* This function has no name. */
tev->point.offset = (unsigned long)pf->addr;
+ /* Return probe must be on the head of a subprogram */
+ if (pf->pev->point.retprobe) {
+ if (tev->point.offset != 0) {
+ pr_warning("Return probe must be on the head of"
+ " a real function\n");
+ return -EINVAL;
+ }
+ tev->point.retprobe = true;
+ }
+
pr_debug("Probe point found: %s+%lu\n", tev->point.symbol,
tev->point.offset);
diff --git a/tools/perf/util/sort.h b/tools/perf/util/sort.h
index 46e531d..0b91053 100644
--- a/tools/perf/util/sort.h
+++ b/tools/perf/util/sort.h
@@ -70,7 +70,7 @@
struct hist_entry *pair;
struct rb_root sorted_chain;
};
- struct callchain_node callchain[0];
+ struct callchain_root callchain[0];
};
enum sort_type {
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 1a36773..b39f499 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -388,6 +388,20 @@
return fprintf(fp, "%s", sbuild_id);
}
+size_t dso__fprintf_symbols_by_name(struct dso *self, enum map_type type, FILE *fp)
+{
+ size_t ret = 0;
+ struct rb_node *nd;
+ struct symbol_name_rb_node *pos;
+
+ for (nd = rb_first(&self->symbol_names[type]); nd; nd = rb_next(nd)) {
+ pos = rb_entry(nd, struct symbol_name_rb_node, rb_node);
+ fprintf(fp, "%s\n", pos->sym.name);
+ }
+
+ return ret;
+}
+
size_t dso__fprintf(struct dso *self, enum map_type type, FILE *fp)
{
struct rb_node *nd;
@@ -2268,6 +2282,9 @@
int symbol__init(void)
{
+ if (symbol_conf.initialized)
+ return 0;
+
elf_version(EV_CURRENT);
if (symbol_conf.sort_by_name)
symbol_conf.priv_size += (sizeof(struct symbol_name_rb_node) -
@@ -2293,6 +2310,7 @@
symbol_conf.sym_list_str, "symbol") < 0)
goto out_free_comm_list;
+ symbol_conf.initialized = true;
return 0;
out_free_dso_list:
@@ -2304,11 +2322,14 @@
void symbol__exit(void)
{
+ if (!symbol_conf.initialized)
+ return;
strlist__delete(symbol_conf.sym_list);
strlist__delete(symbol_conf.dso_list);
strlist__delete(symbol_conf.comm_list);
vmlinux_path__exit();
symbol_conf.sym_list = symbol_conf.dso_list = symbol_conf.comm_list = NULL;
+ symbol_conf.initialized = false;
}
int machines__create_kernel_maps(struct rb_root *self, pid_t pid)
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index b7a8da4..038f220 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -69,7 +69,8 @@
show_nr_samples,
use_callchain,
exclude_other,
- show_cpu_utilization;
+ show_cpu_utilization,
+ initialized;
const char *vmlinux_name,
*source_prefix,
*field_sep;
@@ -181,6 +182,7 @@
size_t machines__fprintf_dsos_buildid(struct rb_root *self, FILE *fp, bool with_hits);
size_t dso__fprintf_buildid(struct dso *self, FILE *fp);
+size_t dso__fprintf_symbols_by_name(struct dso *self, enum map_type type, FILE *fp);
size_t dso__fprintf(struct dso *self, enum map_type type, FILE *fp);
enum dso_origin {
diff --git a/tools/perf/util/trace-event-scripting.c b/tools/perf/util/trace-event-scripting.c
index 7ea983a..f7af2fc 100644
--- a/tools/perf/util/trace-event-scripting.c
+++ b/tools/perf/util/trace-event-scripting.c
@@ -97,7 +97,7 @@
register_python_scripting(&python_scripting_unsupported_ops);
}
#else
-struct scripting_ops python_scripting_ops;
+extern struct scripting_ops python_scripting_ops;
void setup_python_scripting(void)
{
@@ -158,7 +158,7 @@
register_perl_scripting(&perl_scripting_unsupported_ops);
}
#else
-struct scripting_ops perl_scripting_ops;
+extern struct scripting_ops perl_scripting_ops;
void setup_perl_scripting(void)
{
diff --git a/tools/perf/util/ui/browser.c b/tools/perf/util/ui/browser.c
index 66f2d58..6d0df80 100644
--- a/tools/perf/util/ui/browser.c
+++ b/tools/perf/util/ui/browser.c
@@ -1,16 +1,6 @@
-#define _GNU_SOURCE
-#include <stdio.h>
-#undef _GNU_SOURCE
-/*
- * slang versions <= 2.0.6 have a "#if HAVE_LONG_LONG" that breaks
- * the build if it isn't defined. Use the equivalent one that glibc
- * has on features.h.
- */
-#include <features.h>
-#ifndef HAVE_LONG_LONG
-#define HAVE_LONG_LONG __GLIBC_HAVE_LONG_LONG
-#endif
#include <slang.h>
+#include "libslang.h"
+#include <linux/compiler.h>
#include <linux/list.h>
#include <linux/rbtree.h>
#include <stdlib.h>
@@ -19,17 +9,9 @@
#include "helpline.h"
#include "../color.h"
#include "../util.h"
+#include <stdio.h>
-#if SLANG_VERSION < 20104
-#define sltt_set_color(obj, name, fg, bg) \
- SLtt_set_color(obj,(char *)name, (char *)fg, (char *)bg)
-#else
-#define sltt_set_color SLtt_set_color
-#endif
-
-newtComponent newt_form__new(void);
-
-int ui_browser__percent_color(double percent, bool current)
+static int ui_browser__percent_color(double percent, bool current)
{
if (current)
return HE_COLORSET_SELECTED;
@@ -40,6 +22,23 @@
return HE_COLORSET_NORMAL;
}
+void ui_browser__set_color(struct ui_browser *self __used, int color)
+{
+ SLsmg_set_color(color);
+}
+
+void ui_browser__set_percent_color(struct ui_browser *self,
+ double percent, bool current)
+{
+ int color = ui_browser__percent_color(percent, current);
+ ui_browser__set_color(self, color);
+}
+
+void ui_browser__gotorc(struct ui_browser *self, int y, int x)
+{
+ SLsmg_gotorc(self->y + y, self->x + x);
+}
+
void ui_browser__list_head_seek(struct ui_browser *self, off_t offset, int whence)
{
struct list_head *head = self->entries;
@@ -111,7 +110,7 @@
nd = self->top;
while (nd != NULL) {
- SLsmg_gotorc(self->y + row, self->x);
+ ui_browser__gotorc(self, row, 0);
self->write(self, nd, row);
if (++row == self->height)
break;
@@ -131,13 +130,10 @@
int cols, rows;
newtGetScreenSize(&cols, &rows);
- if (self->width > cols - 4)
- self->width = cols - 4;
- self->height = rows - 5;
- if (self->height > self->nr_entries)
- self->height = self->nr_entries;
- self->y = (rows - self->height) / 2;
- self->x = (cols - self->width) / 2;
+ self->width = cols - 1;
+ self->height = rows - 2;
+ self->y = 1;
+ self->x = 0;
}
void ui_browser__reset_index(struct ui_browser *self)
@@ -146,34 +142,48 @@
self->seek(self, 0, SEEK_SET);
}
+void ui_browser__add_exit_key(struct ui_browser *self, int key)
+{
+ newtFormAddHotKey(self->form, key);
+}
+
+void ui_browser__add_exit_keys(struct ui_browser *self, int keys[])
+{
+ int i = 0;
+
+ while (keys[i] && i < 64) {
+ ui_browser__add_exit_key(self, keys[i]);
+ ++i;
+ }
+}
+
int ui_browser__show(struct ui_browser *self, const char *title,
const char *helpline, ...)
{
va_list ap;
+ int keys[] = { NEWT_KEY_UP, NEWT_KEY_DOWN, NEWT_KEY_PGUP,
+ NEWT_KEY_PGDN, NEWT_KEY_HOME, NEWT_KEY_END, ' ',
+ NEWT_KEY_LEFT, NEWT_KEY_ESCAPE, 'q', CTRL('c'), 0 };
- if (self->form != NULL) {
+ if (self->form != NULL)
newtFormDestroy(self->form);
- newtPopWindow();
- }
+
ui_browser__refresh_dimensions(self);
- newtCenteredWindow(self->width, self->height, title);
- self->form = newt_form__new();
+ self->form = newtForm(NULL, NULL, 0);
if (self->form == NULL)
return -1;
- self->sb = newtVerticalScrollbar(self->width, 0, self->height,
+ self->sb = newtVerticalScrollbar(self->width, 1, self->height,
HE_COLORSET_NORMAL,
HE_COLORSET_SELECTED);
if (self->sb == NULL)
return -1;
- newtFormAddHotKey(self->form, NEWT_KEY_UP);
- newtFormAddHotKey(self->form, NEWT_KEY_DOWN);
- newtFormAddHotKey(self->form, NEWT_KEY_PGUP);
- newtFormAddHotKey(self->form, NEWT_KEY_PGDN);
- newtFormAddHotKey(self->form, NEWT_KEY_HOME);
- newtFormAddHotKey(self->form, NEWT_KEY_END);
- newtFormAddHotKey(self->form, ' ');
+ SLsmg_gotorc(0, 0);
+ ui_browser__set_color(self, NEWT_COLORSET_ROOT);
+ slsmg_write_nstring(title, self->width);
+
+ ui_browser__add_exit_keys(self, keys);
newtFormAddComponent(self->form, self->sb);
va_start(ap, helpline);
@@ -185,7 +195,6 @@
void ui_browser__hide(struct ui_browser *self)
{
newtFormDestroy(self->form);
- newtPopWindow();
self->form = NULL;
ui_helpline__pop();
}
@@ -196,28 +205,28 @@
newtScrollbarSet(self->sb, self->index, self->nr_entries - 1);
row = self->refresh(self);
- SLsmg_set_color(HE_COLORSET_NORMAL);
+ ui_browser__set_color(self, HE_COLORSET_NORMAL);
SLsmg_fill_region(self->y + row, self->x,
self->height - row, self->width, ' ');
return 0;
}
-int ui_browser__run(struct ui_browser *self, struct newtExitStruct *es)
+int ui_browser__run(struct ui_browser *self)
{
+ struct newtExitStruct es;
+
if (ui_browser__refresh(self) < 0)
return -1;
while (1) {
off_t offset;
- newtFormRun(self->form, es);
+ newtFormRun(self->form, &es);
- if (es->reason != NEWT_EXIT_HOTKEY)
+ if (es.reason != NEWT_EXIT_HOTKEY)
break;
- if (is_exit_key(es->u.key))
- return es->u.key;
- switch (es->u.key) {
+ switch (es.u.key) {
case NEWT_KEY_DOWN:
if (self->index == self->nr_entries - 1)
break;
@@ -274,12 +283,12 @@
self->seek(self, -offset, SEEK_END);
break;
default:
- return es->u.key;
+ return es.u.key;
}
if (ui_browser__refresh(self) < 0)
return -1;
}
- return 0;
+ return -1;
}
unsigned int ui_browser__list_head_refresh(struct ui_browser *self)
@@ -294,7 +303,7 @@
pos = self->top;
list_for_each_from(pos, head) {
- SLsmg_gotorc(self->y + row, self->x);
+ ui_browser__gotorc(self, row, 0);
self->write(self, pos, row);
if (++row == self->height)
break;
diff --git a/tools/perf/util/ui/browser.h b/tools/perf/util/ui/browser.h
index 0b9f829..0dc7e4d 100644
--- a/tools/perf/util/ui/browser.h
+++ b/tools/perf/util/ui/browser.h
@@ -25,16 +25,21 @@
};
-int ui_browser__percent_color(double percent, bool current);
+void ui_browser__set_color(struct ui_browser *self, int color);
+void ui_browser__set_percent_color(struct ui_browser *self,
+ double percent, bool current);
bool ui_browser__is_current_entry(struct ui_browser *self, unsigned row);
void ui_browser__refresh_dimensions(struct ui_browser *self);
void ui_browser__reset_index(struct ui_browser *self);
+void ui_browser__gotorc(struct ui_browser *self, int y, int x);
+void ui_browser__add_exit_key(struct ui_browser *self, int key);
+void ui_browser__add_exit_keys(struct ui_browser *self, int keys[]);
int ui_browser__show(struct ui_browser *self, const char *title,
const char *helpline, ...);
void ui_browser__hide(struct ui_browser *self);
int ui_browser__refresh(struct ui_browser *self);
-int ui_browser__run(struct ui_browser *self, struct newtExitStruct *es);
+int ui_browser__run(struct ui_browser *self);
void ui_browser__rb_tree_seek(struct ui_browser *self, off_t offset, int whence);
unsigned int ui_browser__rb_tree_refresh(struct ui_browser *self);
diff --git a/tools/perf/util/ui/browsers/annotate.c b/tools/perf/util/ui/browsers/annotate.c
index a90273e..82b78f9 100644
--- a/tools/perf/util/ui/browsers/annotate.c
+++ b/tools/perf/util/ui/browsers/annotate.c
@@ -40,14 +40,12 @@
if (ol->offset != -1) {
struct objdump_line_rb_node *olrb = objdump_line__rb(ol);
- int color = ui_browser__percent_color(olrb->percent, current_entry);
- SLsmg_set_color(color);
+ ui_browser__set_percent_color(self, olrb->percent, current_entry);
slsmg_printf(" %7.2f ", olrb->percent);
if (!current_entry)
- SLsmg_set_color(HE_COLORSET_CODE);
+ ui_browser__set_color(self, HE_COLORSET_CODE);
} else {
- int color = ui_browser__percent_color(0, current_entry);
- SLsmg_set_color(color);
+ ui_browser__set_percent_color(self, 0, current_entry);
slsmg_write_nstring(" ", 9);
}
@@ -135,32 +133,31 @@
self->curr_hot = nd;
}
-static int annotate_browser__run(struct annotate_browser *self,
- struct newtExitStruct *es)
+static int annotate_browser__run(struct annotate_browser *self)
{
struct rb_node *nd;
struct hist_entry *he = self->b.priv;
+ int key;
if (ui_browser__show(&self->b, he->ms.sym->name,
- "<- or ESC: exit, TAB/shift+TAB: cycle thru samples") < 0)
+ "<-, -> or ESC: exit, TAB/shift+TAB: cycle thru samples") < 0)
return -1;
-
- newtFormAddHotKey(self->b.form, NEWT_KEY_LEFT);
- newtFormAddHotKey(self->b.form, NEWT_KEY_RIGHT);
+ /*
+ * To allow builtin-annotate to cycle thru multiple symbols by
+ * examining the exit key for this function.
+ */
+ ui_browser__add_exit_key(&self->b, NEWT_KEY_RIGHT);
nd = self->curr_hot;
if (nd) {
- newtFormAddHotKey(self->b.form, NEWT_KEY_TAB);
- newtFormAddHotKey(self->b.form, NEWT_KEY_UNTAB);
+ int tabs[] = { NEWT_KEY_TAB, NEWT_KEY_UNTAB, 0 };
+ ui_browser__add_exit_keys(&self->b, tabs);
}
while (1) {
- ui_browser__run(&self->b, es);
+ key = ui_browser__run(&self->b);
- if (es->reason != NEWT_EXIT_HOTKEY)
- break;
-
- switch (es->u.key) {
+ switch (key) {
case NEWT_KEY_TAB:
nd = rb_prev(nd);
if (nd == NULL)
@@ -179,12 +176,11 @@
}
out:
ui_browser__hide(&self->b);
- return es->u.key;
+ return key;
}
int hist_entry__tui_annotate(struct hist_entry *self)
{
- struct newtExitStruct es;
struct objdump_line *pos, *n;
struct objdump_line_rb_node *rbpos;
LIST_HEAD(head);
@@ -232,7 +228,7 @@
annotate_browser__set_top(&browser, browser.curr_hot);
browser.b.width += 18; /* Percentage */
- ret = annotate_browser__run(&browser, &es);
+ ret = annotate_browser__run(&browser);
list_for_each_entry_safe(pos, n, &head, node) {
list_del(&pos->node);
objdump_line__free(pos);
diff --git a/tools/perf/util/ui/browsers/hists.c b/tools/perf/util/ui/browsers/hists.c
index dafdf67..ebda8c3 100644
--- a/tools/perf/util/ui/browsers/hists.c
+++ b/tools/perf/util/ui/browsers/hists.c
@@ -58,6 +58,11 @@
return map_symbol__folded(&self->ms);
}
+static void map_symbol__set_folding(struct map_symbol *self, bool unfold)
+{
+ self->unfolded = unfold ? self->has_children : false;
+}
+
static int callchain_node__count_rows_rb_tree(struct callchain_node *self)
{
int n = 0;
@@ -129,16 +134,16 @@
for (nd = rb_first(&self->rb_root); nd; nd = rb_next(nd)) {
struct callchain_node *child = rb_entry(nd, struct callchain_node, rb_node);
struct callchain_list *chain;
- int first = true;
+ bool first = true;
list_for_each_entry(chain, &child->val, list) {
if (first) {
first = false;
chain->ms.has_children = chain->list.next != &child->val ||
- rb_first(&child->rb_root) != NULL;
+ !RB_EMPTY_ROOT(&child->rb_root);
} else
chain->ms.has_children = chain->list.next == &child->val &&
- rb_first(&child->rb_root) != NULL;
+ !RB_EMPTY_ROOT(&child->rb_root);
}
callchain_node__init_have_children_rb_tree(child);
@@ -150,7 +155,7 @@
struct callchain_list *chain;
list_for_each_entry(chain, &self->val, list)
- chain->ms.has_children = rb_first(&self->rb_root) != NULL;
+ chain->ms.has_children = !RB_EMPTY_ROOT(&self->rb_root);
callchain_node__init_have_children_rb_tree(self);
}
@@ -168,6 +173,7 @@
static void hist_entry__init_have_children(struct hist_entry *self)
{
if (!self->init_have_children) {
+ self->ms.has_children = !RB_EMPTY_ROOT(&self->sorted_chain);
callchain__init_have_children(&self->sorted_chain);
self->init_have_children = true;
}
@@ -195,43 +201,114 @@
return false;
}
-static int hist_browser__run(struct hist_browser *self, const char *title,
- struct newtExitStruct *es)
+static int callchain_node__set_folding_rb_tree(struct callchain_node *self, bool unfold)
{
- char str[256], unit;
- unsigned long nr_events = self->hists->stats.nr_events[PERF_RECORD_SAMPLE];
+ int n = 0;
+ struct rb_node *nd;
+
+ for (nd = rb_first(&self->rb_root); nd; nd = rb_next(nd)) {
+ struct callchain_node *child = rb_entry(nd, struct callchain_node, rb_node);
+ struct callchain_list *chain;
+ bool has_children = false;
+
+ list_for_each_entry(chain, &child->val, list) {
+ ++n;
+ map_symbol__set_folding(&chain->ms, unfold);
+ has_children = chain->ms.has_children;
+ }
+
+ if (has_children)
+ n += callchain_node__set_folding_rb_tree(child, unfold);
+ }
+
+ return n;
+}
+
+static int callchain_node__set_folding(struct callchain_node *node, bool unfold)
+{
+ struct callchain_list *chain;
+ bool has_children = false;
+ int n = 0;
+
+ list_for_each_entry(chain, &node->val, list) {
+ ++n;
+ map_symbol__set_folding(&chain->ms, unfold);
+ has_children = chain->ms.has_children;
+ }
+
+ if (has_children)
+ n += callchain_node__set_folding_rb_tree(node, unfold);
+
+ return n;
+}
+
+static int callchain__set_folding(struct rb_root *chain, bool unfold)
+{
+ struct rb_node *nd;
+ int n = 0;
+
+ for (nd = rb_first(chain); nd; nd = rb_next(nd)) {
+ struct callchain_node *node = rb_entry(nd, struct callchain_node, rb_node);
+ n += callchain_node__set_folding(node, unfold);
+ }
+
+ return n;
+}
+
+static void hist_entry__set_folding(struct hist_entry *self, bool unfold)
+{
+ hist_entry__init_have_children(self);
+ map_symbol__set_folding(&self->ms, unfold);
+
+ if (self->ms.has_children) {
+ int n = callchain__set_folding(&self->sorted_chain, unfold);
+ self->nr_rows = unfold ? n : 0;
+ } else
+ self->nr_rows = 0;
+}
+
+static void hists__set_folding(struct hists *self, bool unfold)
+{
+ struct rb_node *nd;
+
+ self->nr_entries = 0;
+
+ for (nd = rb_first(&self->entries); nd; nd = rb_next(nd)) {
+ struct hist_entry *he = rb_entry(nd, struct hist_entry, rb_node);
+ hist_entry__set_folding(he, unfold);
+ self->nr_entries += 1 + he->nr_rows;
+ }
+}
+
+static void hist_browser__set_folding(struct hist_browser *self, bool unfold)
+{
+ hists__set_folding(self->hists, unfold);
+ self->b.nr_entries = self->hists->nr_entries;
+ /* Go to the start, we may be way after valid entries after a collapse */
+ ui_browser__reset_index(&self->b);
+}
+
+static int hist_browser__run(struct hist_browser *self, const char *title)
+{
+ int key;
+ int exit_keys[] = { 'a', '?', 'h', 'C', 'd', 'D', 'E', 't',
+ NEWT_KEY_ENTER, NEWT_KEY_RIGHT, NEWT_KEY_LEFT, 0, };
self->b.entries = &self->hists->entries;
self->b.nr_entries = self->hists->nr_entries;
hist_browser__refresh_dimensions(self);
- nr_events = convert_unit(nr_events, &unit);
- snprintf(str, sizeof(str), "Events: %lu%c ",
- nr_events, unit);
- newtDrawRootText(0, 0, str);
-
if (ui_browser__show(&self->b, title,
"Press '?' for help on key bindings") < 0)
return -1;
- newtFormAddHotKey(self->b.form, 'a');
- newtFormAddHotKey(self->b.form, '?');
- newtFormAddHotKey(self->b.form, 'h');
- newtFormAddHotKey(self->b.form, 'd');
- newtFormAddHotKey(self->b.form, 'D');
- newtFormAddHotKey(self->b.form, 't');
-
- newtFormAddHotKey(self->b.form, NEWT_KEY_LEFT);
- newtFormAddHotKey(self->b.form, NEWT_KEY_RIGHT);
- newtFormAddHotKey(self->b.form, NEWT_KEY_ENTER);
+ ui_browser__add_exit_keys(&self->b, exit_keys);
while (1) {
- ui_browser__run(&self->b, es);
+ key = ui_browser__run(&self->b);
- if (es->reason != NEWT_EXIT_HOTKEY)
- break;
- switch (es->u.key) {
+ switch (key) {
case 'D': { /* Debug */
static int seq;
struct hist_entry *h = rb_entry(self->b.top,
@@ -245,18 +322,26 @@
self->b.top_idx,
h->row_offset, h->nr_rows);
}
- continue;
+ break;
+ case 'C':
+ /* Collapse the whole world. */
+ hist_browser__set_folding(self, false);
+ break;
+ case 'E':
+ /* Expand the whole world. */
+ hist_browser__set_folding(self, true);
+ break;
case NEWT_KEY_ENTER:
if (hist_browser__toggle_fold(self))
break;
/* fall thru */
default:
- return 0;
+ goto out;
}
}
-
+out:
ui_browser__hide(&self->b);
- return 0;
+ return key;
}
static char *callchain_list__sym_name(struct callchain_list *self,
@@ -306,15 +391,10 @@
int color;
bool was_first = first;
- if (first) {
+ if (first)
first = false;
- chain->ms.has_children = chain->list.next != &child->val ||
- rb_first(&child->rb_root) != NULL;
- } else {
+ else
extra_offset = LEVEL_OFFSET_STEP;
- chain->ms.has_children = chain->list.next == &child->val &&
- rb_first(&child->rb_root) != NULL;
- }
folded_sign = callchain_list__folded(chain);
if (*row_offset != 0) {
@@ -341,8 +421,8 @@
*is_current_entry = true;
}
- SLsmg_set_color(color);
- SLsmg_gotorc(self->b.y + row, self->b.x);
+ ui_browser__set_color(&self->b, color);
+ ui_browser__gotorc(&self->b, row, 0);
slsmg_write_nstring(" ", offset + extra_offset);
slsmg_printf("%c ", folded_sign);
slsmg_write_nstring(str, width);
@@ -384,12 +464,7 @@
list_for_each_entry(chain, &node->val, list) {
char ipstr[BITS_PER_LONG / 4 + 1], *s;
int color;
- /*
- * FIXME: This should be moved to somewhere else,
- * probably when the callchain is created, so as not to
- * traverse it all over again
- */
- chain->ms.has_children = rb_first(&node->rb_root) != NULL;
+
folded_sign = callchain_list__folded(chain);
if (*row_offset != 0) {
@@ -405,8 +480,8 @@
}
s = callchain_list__sym_name(chain, ipstr, sizeof(ipstr));
- SLsmg_gotorc(self->b.y + row, self->b.x);
- SLsmg_set_color(color);
+ ui_browser__gotorc(&self->b, row, 0);
+ ui_browser__set_color(&self->b, color);
slsmg_write_nstring(" ", offset);
slsmg_printf("%c ", folded_sign);
slsmg_write_nstring(s, width - 2);
@@ -465,7 +540,7 @@
}
if (symbol_conf.use_callchain) {
- entry->ms.has_children = !RB_EMPTY_ROOT(&entry->sorted_chain);
+ hist_entry__init_have_children(entry);
folded_sign = hist_entry__folded(entry);
}
@@ -484,8 +559,8 @@
color = HE_COLORSET_NORMAL;
}
- SLsmg_set_color(color);
- SLsmg_gotorc(self->b.y + row, self->b.x);
+ ui_browser__set_color(&self->b, color);
+ ui_browser__gotorc(&self->b, row, 0);
if (symbol_conf.use_callchain) {
slsmg_printf("%c ", folded_sign);
width -= 2;
@@ -687,8 +762,6 @@
static void hist_browser__delete(struct hist_browser *self)
{
- newtFormDestroy(self->b.form);
- newtPopWindow();
free(self);
}
@@ -702,21 +775,26 @@
return self->he_selection->thread;
}
-static int hist_browser__title(char *bf, size_t size, const char *ev_name,
- const struct dso *dso, const struct thread *thread)
+static int hists__browser_title(struct hists *self, char *bf, size_t size,
+ const char *ev_name, const struct dso *dso,
+ const struct thread *thread)
{
- int printed = 0;
+ char unit;
+ int printed;
+ unsigned long nr_events = self->stats.nr_events[PERF_RECORD_SAMPLE];
+
+ nr_events = convert_unit(nr_events, &unit);
+ printed = snprintf(bf, size, "Events: %lu%c %s", nr_events, unit, ev_name);
if (thread)
printed += snprintf(bf + printed, size - printed,
- "Thread: %s(%d)",
- (thread->comm_set ? thread->comm : ""),
+ ", Thread: %s(%d)",
+ (thread->comm_set ? thread->comm : ""),
thread->pid);
if (dso)
printed += snprintf(bf + printed, size - printed,
- "%sDSO: %s", thread ? " " : "",
- dso->short_name);
- return printed ?: snprintf(bf, size, "Event: %s", ev_name);
+ ", DSO: %s", dso->short_name);
+ return printed;
}
int hists__browse(struct hists *self, const char *helpline, const char *ev_name)
@@ -725,7 +803,6 @@
struct pstack *fstack;
const struct thread *thread_filter = NULL;
const struct dso *dso_filter = NULL;
- struct newtExitStruct es;
char msg[160];
int key = -1;
@@ -738,9 +815,8 @@
ui_helpline__push(helpline);
- hist_browser__title(msg, sizeof(msg), ev_name,
- dso_filter, thread_filter);
-
+ hists__browser_title(self, msg, sizeof(msg), ev_name,
+ dso_filter, thread_filter);
while (1) {
const struct thread *thread;
const struct dso *dso;
@@ -749,70 +825,63 @@
annotate = -2, zoom_dso = -2, zoom_thread = -2,
browse_map = -2;
- if (hist_browser__run(browser, msg, &es))
- break;
+ key = hist_browser__run(browser, msg);
thread = hist_browser__selected_thread(browser);
dso = browser->selection->map ? browser->selection->map->dso : NULL;
- if (es.reason == NEWT_EXIT_HOTKEY) {
- key = es.u.key;
-
- switch (key) {
- case NEWT_KEY_F1:
- goto do_help;
- case NEWT_KEY_TAB:
- case NEWT_KEY_UNTAB:
- /*
- * Exit the browser, let hists__browser_tree
- * go to the next or previous
- */
- goto out_free_stack;
- default:;
- }
-
- switch (key) {
- case 'a':
- if (browser->selection->map == NULL &&
- browser->selection->map->dso->annotate_warned)
- continue;
- goto do_annotate;
- case 'd':
- goto zoom_dso;
- case 't':
- goto zoom_thread;
- case 'h':
- case '?':
-do_help:
- ui__help_window("-> Zoom into DSO/Threads & Annotate current symbol\n"
- "<- Zoom out\n"
- "a Annotate current symbol\n"
- "h/?/F1 Show this window\n"
- "d Zoom into current DSO\n"
- "t Zoom into current Thread\n"
- "q/CTRL+C Exit browser");
+ switch (key) {
+ case NEWT_KEY_TAB:
+ case NEWT_KEY_UNTAB:
+ /*
+ * Exit the browser, let hists__browser_tree
+ * go to the next or previous
+ */
+ goto out_free_stack;
+ case 'a':
+ if (browser->selection->map == NULL &&
+ browser->selection->map->dso->annotate_warned)
continue;
- default:;
- }
- if (is_exit_key(key)) {
- if (key == NEWT_KEY_ESCAPE &&
- !ui__dialog_yesno("Do you really want to exit?"))
- continue;
- break;
- }
+ goto do_annotate;
+ case 'd':
+ goto zoom_dso;
+ case 't':
+ goto zoom_thread;
+ case NEWT_KEY_F1:
+ case 'h':
+ case '?':
+ ui__help_window("-> Zoom into DSO/Threads & Annotate current symbol\n"
+ "<- Zoom out\n"
+ "a Annotate current symbol\n"
+ "h/?/F1 Show this window\n"
+ "C Collapse all callchains\n"
+ "E Expand all callchains\n"
+ "d Zoom into current DSO\n"
+ "t Zoom into current Thread\n"
+ "q/CTRL+C Exit browser");
+ continue;
+ case NEWT_KEY_ENTER:
+ case NEWT_KEY_RIGHT:
+ /* menu */
+ break;
+ case NEWT_KEY_LEFT: {
+ const void *top;
- if (es.u.key == NEWT_KEY_LEFT) {
- const void *top;
-
- if (pstack__empty(fstack))
- continue;
- top = pstack__pop(fstack);
- if (top == &dso_filter)
- goto zoom_out_dso;
- if (top == &thread_filter)
- goto zoom_out_thread;
+ if (pstack__empty(fstack))
continue;
- }
+ top = pstack__pop(fstack);
+ if (top == &dso_filter)
+ goto zoom_out_dso;
+ if (top == &thread_filter)
+ goto zoom_out_thread;
+ continue;
+ }
+ case NEWT_KEY_ESCAPE:
+ if (!ui__dialog_yesno("Do you really want to exit?"))
+ continue;
+ /* Fall thru */
+ default:
+ goto out_free_stack;
}
if (browser->selection->sym != NULL &&
@@ -885,8 +954,8 @@
pstack__push(fstack, &dso_filter);
}
hists__filter_by_dso(self, dso_filter);
- hist_browser__title(msg, sizeof(msg), ev_name,
- dso_filter, thread_filter);
+ hists__browser_title(self, msg, sizeof(msg), ev_name,
+ dso_filter, thread_filter);
hist_browser__reset(browser);
} else if (choice == zoom_thread) {
zoom_thread:
@@ -903,8 +972,8 @@
pstack__push(fstack, &thread_filter);
}
hists__filter_by_thread(self, thread_filter);
- hist_browser__title(msg, sizeof(msg), ev_name,
- dso_filter, thread_filter);
+ hists__browser_title(self, msg, sizeof(msg), ev_name,
+ dso_filter, thread_filter);
hist_browser__reset(browser);
}
}
@@ -925,10 +994,6 @@
const char *ev_name = __event_name(hists->type, hists->config);
key = hists__browse(hists, help, ev_name);
-
- if (is_exit_key(key))
- break;
-
switch (key) {
case NEWT_KEY_TAB:
next = rb_next(nd);
@@ -940,7 +1005,7 @@
continue;
nd = rb_prev(nd);
default:
- break;
+ return key;
}
}
diff --git a/tools/perf/util/ui/browsers/map.c b/tools/perf/util/ui/browsers/map.c
index 142b825..e35437d 100644
--- a/tools/perf/util/ui/browsers/map.c
+++ b/tools/perf/util/ui/browsers/map.c
@@ -1,6 +1,5 @@
#include "../libslang.h"
#include <elf.h>
-#include <newt.h>
#include <sys/ttydefaults.h>
#include <ctype.h>
#include <string.h>
@@ -47,7 +46,6 @@
struct map_browser {
struct ui_browser b;
struct map *map;
- u16 namelen;
u8 addrlen;
};
@@ -56,14 +54,16 @@
struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
struct map_browser *mb = container_of(self, struct map_browser, b);
bool current_entry = ui_browser__is_current_entry(self, row);
- int color = ui_browser__percent_color(0, current_entry);
+ int width;
- SLsmg_set_color(color);
+ ui_browser__set_percent_color(self, 0, current_entry);
slsmg_printf("%*llx %*llx %c ",
mb->addrlen, sym->start, mb->addrlen, sym->end,
sym->binding == STB_GLOBAL ? 'g' :
sym->binding == STB_LOCAL ? 'l' : 'w');
- slsmg_write_nstring(sym->name, mb->namelen);
+ width = self->width - ((mb->addrlen * 2) + 4);
+ if (width > 0)
+ slsmg_write_nstring(sym->name, width);
}
/* FIXME uber-kludgy, see comment on cmd_report... */
@@ -98,31 +98,29 @@
return 0;
}
-static int map_browser__run(struct map_browser *self, struct newtExitStruct *es)
+static int map_browser__run(struct map_browser *self)
{
+ int key;
+
if (ui_browser__show(&self->b, self->map->dso->long_name,
"Press <- or ESC to exit, %s / to search",
verbose ? "" : "restart with -v to use") < 0)
return -1;
- newtFormAddHotKey(self->b.form, NEWT_KEY_LEFT);
- newtFormAddHotKey(self->b.form, NEWT_KEY_ENTER);
if (verbose)
- newtFormAddHotKey(self->b.form, '/');
+ ui_browser__add_exit_key(&self->b, '/');
while (1) {
- ui_browser__run(&self->b, es);
+ key = ui_browser__run(&self->b);
- if (es->reason != NEWT_EXIT_HOTKEY)
- break;
- if (verbose && es->u.key == '/')
+ if (verbose && key == '/')
map_browser__search(self);
else
break;
}
ui_browser__hide(&self->b);
- return 0;
+ return key;
}
int map__browse(struct map *self)
@@ -136,7 +134,6 @@
},
.map = self,
};
- struct newtExitStruct es;
struct rb_node *nd;
char tmp[BITS_PER_LONG / 4];
u64 maxaddr = 0;
@@ -144,8 +141,6 @@
for (nd = rb_first(mb.b.entries); nd; nd = rb_next(nd)) {
struct symbol *pos = rb_entry(nd, struct symbol, rb_node);
- if (mb.namelen < pos->namelen)
- mb.namelen = pos->namelen;
if (maxaddr < pos->end)
maxaddr = pos->end;
if (verbose) {
@@ -156,6 +151,5 @@
}
mb.addrlen = snprintf(tmp, sizeof(tmp), "%llx", maxaddr);
- mb.b.width += mb.addrlen * 2 + 4 + mb.namelen;
- return map_browser__run(&mb, &es);
+ return map_browser__run(&mb);
}
diff --git a/tools/perf/util/ui/util.c b/tools/perf/util/ui/util.c
index 04600e2..9706d9d 100644
--- a/tools/perf/util/ui/util.c
+++ b/tools/perf/util/ui/util.c
@@ -11,8 +11,6 @@
#include "helpline.h"
#include "util.h"
-newtComponent newt_form__new(void);
-
static void newt_form__set_exit_keys(newtComponent self)
{
newtFormAddHotKey(self, NEWT_KEY_LEFT);
@@ -22,7 +20,7 @@
newtFormAddHotKey(self, CTRL('c'));
}
-newtComponent newt_form__new(void)
+static newtComponent newt_form__new(void)
{
newtComponent self = newtForm(NULL, NULL, 0);
if (self)
diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h
index f380fed..7562707 100644
--- a/tools/perf/util/util.h
+++ b/tools/perf/util/util.h
@@ -266,19 +266,6 @@
bool strlazymatch(const char *str, const char *pat);
unsigned long convert_unit(unsigned long value, char *unit);
-#ifndef ESC
-#define ESC 27
-#endif
-
-static inline bool is_exit_key(int key)
-{
- char up;
- if (key == CTRL('c') || key == ESC)
- return true;
- up = toupper(key);
- return up == 'Q';
-}
-
#define _STR(x) #x
#define STR(x) _STR(x)
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
index 66cf65b..c1f1e3c 100644
--- a/virt/kvm/eventfd.c
+++ b/virt/kvm/eventfd.c
@@ -218,7 +218,6 @@
events = file->f_op->poll(file, &irqfd->pt);
list_add_tail(&irqfd->list, &kvm->irqfds.items);
- spin_unlock_irq(&kvm->irqfds.lock);
/*
* Check if there was an event already pending on the eventfd
@@ -227,6 +226,8 @@
if (events & POLLIN)
schedule_work(&irqfd->inject);
+ spin_unlock_irq(&kvm->irqfds.lock);
+
/*
* do not drop the file until the irqfd is fully initialized, otherwise
* we might race against the POLLHUP
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index b78b794..5186e72 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1958,10 +1958,10 @@
cpu);
hardware_disable(NULL);
break;
- case CPU_ONLINE:
+ case CPU_STARTING:
printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
cpu);
- smp_call_function_single(cpu, hardware_enable, NULL, 1);
+ hardware_enable(NULL);
break;
}
return NOTIFY_OK;
@@ -1970,10 +1970,12 @@
asmlinkage void kvm_handle_fault_on_reboot(void)
{
- if (kvm_rebooting)
+ if (kvm_rebooting) {
/* spin while reset goes on */
+ local_irq_enable();
while (true)
;
+ }
/* Fault while not rebooting. We want the trace. */
BUG();
}
@@ -2096,7 +2098,6 @@
static struct notifier_block kvm_cpu_notifier = {
.notifier_call = kvm_cpu_hotplug,
- .priority = 20, /* must be > scheduler priority */
};
static int vm_stat_get(void *_offset, u64 *val)