summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/RCU/00-INDEX2
-rw-r--r--Documentation/RCU/trace.txt413
-rw-r--r--Documentation/block/biodoc.txt6
-rw-r--r--Documentation/lockstat.txt51
-rw-r--r--arch/alpha/include/asm/io.h3
-rw-r--r--arch/powerpc/platforms/pseries/rtasd.c4
-rw-r--r--arch/s390/mm/pgtable.c4
-rw-r--r--arch/um/include/asm/system.h14
-rw-r--r--arch/x86/Kconfig24
-rw-r--r--arch/x86/include/asm/dma-mapping.h2
-rw-r--r--arch/x86/include/asm/io_apic.h9
-rw-r--r--arch/x86/include/asm/iommu.h2
-rw-r--r--arch/x86/include/asm/irq_vectors.h11
-rw-r--r--arch/x86/include/asm/pci.h2
-rw-r--r--arch/x86/include/asm/pci_64.h1
-rw-r--r--arch/x86/include/asm/uaccess.h2
-rw-r--r--arch/x86/include/asm/uaccess_32.h8
-rw-r--r--arch/x86/include/asm/uaccess_64.h6
-rw-r--r--arch/x86/kernel/Makefile3
-rw-r--r--arch/x86/kernel/hpet.c7
-rw-r--r--arch/x86/kernel/io_apic.c761
-rw-r--r--arch/x86/kernel/irq.c3
-rw-r--r--arch/x86/kernel/irq_32.c2
-rw-r--r--arch/x86/kernel/irq_64.c2
-rw-r--r--arch/x86/kernel/irqinit_32.c3
-rw-r--r--arch/x86/kernel/irqinit_64.c3
-rw-r--r--arch/x86/kernel/pci-dma.c13
-rw-r--r--arch/x86/kernel/pci-swiotlb_64.c29
-rw-r--r--arch/x86/kernel/quirks.c2
-rw-r--r--arch/x86/kernel/setup.c2
-rw-r--r--arch/x86/lib/usercopy_32.c8
-rw-r--r--arch/x86/lib/usercopy_64.c4
-rw-r--r--arch/x86/mm/init_32.c3
-rw-r--r--block/Kconfig23
-rw-r--r--block/as-iosched.c10
-rw-r--r--block/blk-barrier.c120
-rw-r--r--block/blk-core.c63
-rw-r--r--block/blk-settings.c6
-rw-r--r--block/blk-softirq.c2
-rw-r--r--block/blk-sysfs.c7
-rw-r--r--block/blk-tag.c1
-rw-r--r--block/blk-timeout.c21
-rw-r--r--block/cfq-iosched.c26
-rw-r--r--block/compat_ioctl.c2
-rw-r--r--block/deadline-iosched.c6
-rw-r--r--block/elevator.c73
-rw-r--r--block/genhd.c23
-rw-r--r--block/ioctl.c2
-rw-r--r--block/noop-iosched.c2
-rw-r--r--block/scsi_ioctl.c4
-rw-r--r--drivers/block/cciss.c88
-rw-r--r--drivers/block/cciss.h4
-rw-r--r--drivers/block/cciss_cmd.h3
-rw-r--r--drivers/block/loop.c39
-rw-r--r--drivers/block/nbd.c10
-rw-r--r--drivers/block/virtio_blk.c2
-rw-r--r--drivers/block/xen-blkfront.c8
-rw-r--r--drivers/cdrom/cdrom.c703
-rw-r--r--drivers/char/hpet.c2
-rw-r--r--drivers/char/random.c22
-rw-r--r--drivers/clocksource/acpi_pm.c10
-rw-r--r--drivers/input/touchscreen/ads7846.c4
-rw-r--r--drivers/md/dm-crypt.c2
-rw-r--r--drivers/md/dm-io.c2
-rw-r--r--drivers/md/dm.c2
-rw-r--r--drivers/pci/intr_remapping.c77
-rw-r--r--drivers/pci/msi.c55
-rw-r--r--drivers/xen/events.c17
-rw-r--r--fs/aio.c100
-rw-r--r--fs/bio-integrity.c2
-rw-r--r--fs/bio.c320
-rw-r--r--fs/buffer.c19
-rw-r--r--fs/exec.c8
-rw-r--r--fs/ext4/super.c8
-rw-r--r--fs/proc/stat.c16
-rw-r--r--include/asm-generic/bug.h7
-rw-r--r--include/linux/Kbuild1
-rw-r--r--include/linux/aio.h5
-rw-r--r--include/linux/bio.h26
-rw-r--r--include/linux/blkdev.h52
-rw-r--r--include/linux/bottom_half.h1
-rw-r--r--include/linux/buffer_head.h1
-rw-r--r--include/linux/debug_locks.h2
-rw-r--r--include/linux/elevator.h8
-rw-r--r--include/linux/futex.h5
-rw-r--r--include/linux/genhd.h1
-rw-r--r--include/linux/hardirq.h13
-rw-r--r--include/linux/hrtimer.h34
-rw-r--r--include/linux/interrupt.h5
-rw-r--r--include/linux/irq.h62
-rw-r--r--include/linux/irqnr.h26
-rw-r--r--include/linux/kernel.h11
-rw-r--r--include/linux/kernel_stat.h14
-rw-r--r--include/linux/lockdep.h45
-rw-r--r--include/linux/mm_types.h5
-rw-r--r--include/linux/msi.h3
-rw-r--r--include/linux/mutex.h2
-rw-r--r--include/linux/posix-timers.h6
-rw-r--r--include/linux/random.h51
-rw-r--r--include/linux/rcuclassic.h2
-rw-r--r--include/linux/rcupdate.h10
-rw-r--r--include/linux/rcutree.h329
-rw-r--r--include/linux/swiotlb.h22
-rw-r--r--include/linux/timex.h73
-rw-r--r--include/linux/types.h11
-rw-r--r--include/linux/uaccess.h2
-rw-r--r--init/Kconfig86
-rw-r--r--init/main.c11
-rw-r--r--kernel/Kconfig.preempt25
-rw-r--r--kernel/Makefile6
-rw-r--r--kernel/exit.c4
-rw-r--r--kernel/extable.c16
-rw-r--r--kernel/fork.c4
-rw-r--r--kernel/futex.c351
-rw-r--r--kernel/hrtimer.c331
-rw-r--r--kernel/irq/Makefile1
-rw-r--r--kernel/irq/autoprobe.c15
-rw-r--r--kernel/irq/chip.c16
-rw-r--r--kernel/irq/handle.c189
-rw-r--r--kernel/irq/internals.h5
-rw-r--r--kernel/irq/manage.c27
-rw-r--r--kernel/irq/numa_migrate.c122
-rw-r--r--kernel/irq/proc.c6
-rw-r--r--kernel/irq/spurious.c5
-rw-r--r--kernel/lockdep.c60
-rw-r--r--kernel/lockdep_proc.c28
-rw-r--r--kernel/mutex.c10
-rw-r--r--kernel/notifier.c8
-rw-r--r--kernel/panic.c32
-rw-r--r--kernel/posix-cpu-timers.c10
-rw-r--r--kernel/posix-timers.c40
-rw-r--r--kernel/printk.c2
-rw-r--r--kernel/rcuclassic.c4
-rw-r--r--kernel/rcupreempt.c10
-rw-r--r--kernel/rcupreempt_trace.c10
-rw-r--r--kernel/rcutorture.c66
-rw-r--r--kernel/rcutree.c1535
-rw-r--r--kernel/rcutree_trace.c271
-rw-r--r--kernel/resource.c9
-rw-r--r--kernel/sched.c5
-rw-r--r--kernel/softirq.c19
-rw-r--r--kernel/softlockup.c2
-rw-r--r--kernel/stacktrace.c11
-rw-r--r--kernel/sys.c2
-rw-r--r--kernel/time/ntp.c4
-rw-r--r--kernel/time/tick-sched.c44
-rw-r--r--kernel/trace/trace_sysprof.c1
-rw-r--r--lib/Kconfig.debug31
-rw-r--r--lib/debugobjects.c4
-rw-r--r--lib/swiotlb.c255
-rw-r--r--mm/bounce.c9
-rw-r--r--mm/memory.c15
-rw-r--r--security/keys/keyctl.c6
-rw-r--r--sound/core/hrtimer.c1
-rw-r--r--sound/drivers/pcsp/pcsp.c1
155 files changed, 5906 insertions, 1977 deletions
diff --git a/Documentation/RCU/00-INDEX b/Documentation/RCU/00-INDEX
index 461481dfb7c3..7dc0695a8f90 100644
--- a/Documentation/RCU/00-INDEX
+++ b/Documentation/RCU/00-INDEX
@@ -16,6 +16,8 @@ RTFP.txt
- List of RCU papers (bibliography) going back to 1980.
torture.txt
- RCU Torture Test Operation (CONFIG_RCU_TORTURE_TEST)
+trace.txt
+ - CONFIG_RCU_TRACE debugfs files and formats
UP.txt
- RCU on Uniprocessor Systems
whatisRCU.txt
diff --git a/Documentation/RCU/trace.txt b/Documentation/RCU/trace.txt
new file mode 100644
index 000000000000..068848240a8b
--- /dev/null
+++ b/Documentation/RCU/trace.txt
@@ -0,0 +1,413 @@
+CONFIG_RCU_TRACE debugfs Files and Formats
+
+
+The rcupreempt and rcutree implementations of RCU provide debugfs trace
+output that summarizes counters and state. This information is useful for
+debugging RCU itself, and can sometimes also help to debug abuses of RCU.
+Note that the rcuclassic implementation of RCU does not provide debugfs
+trace output.
+
+The following sections describe the debugfs files and formats for
+preemptable RCU (rcupreempt) and hierarchical RCU (rcutree).
+
+
+Preemptable RCU debugfs Files and Formats
+
+This implementation of RCU provides three debugfs files under the
+top-level directory RCU: rcu/rcuctrs (which displays the per-CPU
+counters used by preemptable RCU) rcu/rcugp (which displays grace-period
+counters), and rcu/rcustats (which internal counters for debugging RCU).
+
+The output of "cat rcu/rcuctrs" looks as follows:
+
+CPU last cur F M
+ 0 5 -5 0 0
+ 1 -1 0 0 0
+ 2 0 1 0 0
+ 3 0 1 0 0
+ 4 0 1 0 0
+ 5 0 1 0 0
+ 6 0 2 0 0
+ 7 0 -1 0 0
+ 8 0 1 0 0
+ggp = 26226, state = waitzero
+
+The per-CPU fields are as follows:
+
+o "CPU" gives the CPU number. Offline CPUs are not displayed.
+
+o "last" gives the value of the counter that is being decremented
+ for the current grace period phase. In the example above,
+ the counters sum to 4, indicating that there are still four
+ RCU read-side critical sections still running that started
+ before the last counter flip.
+
+o "cur" gives the value of the counter that is currently being
+ both incremented (by rcu_read_lock()) and decremented (by
+ rcu_read_unlock()). In the example above, the counters sum to
+ 1, indicating that there is only one RCU read-side critical section
+ still running that started after the last counter flip.
+
+o "F" indicates whether RCU is waiting for this CPU to acknowledge
+ a counter flip. In the above example, RCU is not waiting on any,
+ which is consistent with the state being "waitzero" rather than
+ "waitack".
+
+o "M" indicates whether RCU is waiting for this CPU to execute a
+ memory barrier. In the above example, RCU is not waiting on any,
+ which is consistent with the state being "waitzero" rather than
+ "waitmb".
+
+o "ggp" is the global grace-period counter.
+
+o "state" is the RCU state, which can be one of the following:
+
+ o "idle": there is no grace period in progress.
+
+ o "waitack": RCU just incremented the global grace-period
+ counter, which has the effect of reversing the roles of
+ the "last" and "cur" counters above, and is waiting for
+ all the CPUs to acknowledge the flip. Once the flip has
+ been acknowledged, CPUs will no longer be incrementing
+ what are now the "last" counters, so that their sum will
+ decrease monotonically down to zero.
+
+ o "waitzero": RCU is waiting for the sum of the "last" counters
+ to decrease to zero.
+
+ o "waitmb": RCU is waiting for each CPU to execute a memory
+ barrier, which ensures that instructions from a given CPU's
+ last RCU read-side critical section cannot be reordered
+ with instructions following the memory-barrier instruction.
+
+The output of "cat rcu/rcugp" looks as follows:
+
+oldggp=48870 newggp=48873
+
+Note that reading from this file provokes a synchronize_rcu(). The
+"oldggp" value is that of "ggp" from rcu/rcuctrs above, taken before
+executing the synchronize_rcu(), and the "newggp" value is also the
+"ggp" value, but taken after the synchronize_rcu() command returns.
+
+
+The output of "cat rcu/rcugp" looks as follows:
+
+na=1337955 nl=40 wa=1337915 wl=44 da=1337871 dl=0 dr=1337871 di=1337871
+1=50989 e1=6138 i1=49722 ie1=82 g1=49640 a1=315203 ae1=265563 a2=49640
+z1=1401244 ze1=1351605 z2=49639 m1=5661253 me1=5611614 m2=49639
+
+These are counters tracking internal preemptable-RCU events, however,
+some of them may be useful for debugging algorithms using RCU. In
+particular, the "nl", "wl", and "dl" values track the number of RCU
+callbacks in various states. The fields are as follows:
+
+o "na" is the total number of RCU callbacks that have been enqueued
+ since boot.
+
+o "nl" is the number of RCU callbacks waiting for the previous
+ grace period to end so that they can start waiting on the next
+ grace period.
+
+o "wa" is the total number of RCU callbacks that have started waiting
+ for a grace period since boot. "na" should be roughly equal to
+ "nl" plus "wa".
+
+o "wl" is the number of RCU callbacks currently waiting for their
+ grace period to end.
+
+o "da" is the total number of RCU callbacks whose grace periods
+ have completed since boot. "wa" should be roughly equal to
+ "wl" plus "da".
+
+o "dr" is the total number of RCU callbacks that have been removed
+ from the list of callbacks ready to invoke. "dr" should be roughly
+ equal to "da".
+
+o "di" is the total number of RCU callbacks that have been invoked
+ since boot. "di" should be roughly equal to "da", though some
+ early versions of preemptable RCU had a bug so that only the
+ last CPU's count of invocations was displayed, rather than the
+ sum of all CPU's counts.
+
+o "1" is the number of calls to rcu_try_flip(). This should be
+ roughly equal to the sum of "e1", "i1", "a1", "z1", and "m1"
+ described below. In other words, the number of times that
+ the state machine is visited should be equal to the sum of the
+ number of times that each state is visited plus the number of
+ times that the state-machine lock acquisition failed.
+
+o "e1" is the number of times that rcu_try_flip() was unable to
+ acquire the fliplock.
+
+o "i1" is the number of calls to rcu_try_flip_idle().
+
+o "ie1" is the number of times rcu_try_flip_idle() exited early
+ due to the calling CPU having no work for RCU.
+
+o "g1" is the number of times that rcu_try_flip_idle() decided
+ to start a new grace period. "i1" should be roughly equal to
+ "ie1" plus "g1".
+
+o "a1" is the number of calls to rcu_try_flip_waitack().
+
+o "ae1" is the number of times that rcu_try_flip_waitack() found
+ that at least one CPU had not yet acknowledge the new grace period
+ (AKA "counter flip").
+
+o "a2" is the number of time rcu_try_flip_waitack() found that
+ all CPUs had acknowledged. "a1" should be roughly equal to
+ "ae1" plus "a2". (This particular output was collected on
+ a 128-CPU machine, hence the smaller-than-usual fraction of
+ calls to rcu_try_flip_waitack() finding all CPUs having already
+ acknowledged.)
+
+o "z1" is the number of calls to rcu_try_flip_waitzero().
+
+o "ze1" is the number of times that rcu_try_flip_waitzero() found
+ that not all of the old RCU read-side critical sections had
+ completed.
+
+o "z2" is the number of times that rcu_try_flip_waitzero() finds
+ the sum of the counters equal to zero, in other words, that
+ all of the old RCU read-side critical sections had completed.
+ The value of "z1" should be roughly equal to "ze1" plus
+ "z2".
+
+o "m1" is the number of calls to rcu_try_flip_waitmb().
+
+o "me1" is the number of times that rcu_try_flip_waitmb() finds
+ that at least one CPU has not yet executed a memory barrier.
+
+o "m2" is the number of times that rcu_try_flip_waitmb() finds that
+ all CPUs have executed a memory barrier.
+
+
+Hierarchical RCU debugfs Files and Formats
+
+This implementation of RCU provides three debugfs files under the
+top-level directory RCU: rcu/rcudata (which displays fields in struct
+rcu_data), rcu/rcugp (which displays grace-period counters), and
+rcu/rcuhier (which displays the struct rcu_node hierarchy).
+
+The output of "cat rcu/rcudata" looks as follows:
+
+rcu:
+ 0 c=4011 g=4012 pq=1 pqc=4011 qp=0 rpfq=1 rp=3c2a dt=23301/73 dn=2 df=1882 of=0 ri=2126 ql=2 b=10
+ 1 c=4011 g=4012 pq=1 pqc=4011 qp=0 rpfq=3 rp=39a6 dt=78073/1 dn=2 df=1402 of=0 ri=1875 ql=46 b=10
+ 2 c=4010 g=4010 pq=1 pqc=4010 qp=0 rpfq=-5 rp=1d12 dt=16646/0 dn=2 df=3140 of=0 ri=2080 ql=0 b=10
+ 3 c=4012 g=4013 pq=1 pqc=4012 qp=1 rpfq=3 rp=2b50 dt=21159/1 dn=2 df=2230 of=0 ri=1923 ql=72 b=10
+ 4 c=4012 g=4013 pq=1 pqc=4012 qp=1 rpfq=3 rp=1644 dt=5783/1 dn=2 df=3348 of=0 ri=2805 ql=7 b=10
+ 5 c=4012 g=4013 pq=0 pqc=4011 qp=1 rpfq=3 rp=1aac dt=5879/1 dn=2 df=3140 of=0 ri=2066 ql=10 b=10
+ 6 c=4012 g=4013 pq=1 pqc=4012 qp=1 rpfq=3 rp=ed8 dt=5847/1 dn=2 df=3797 of=0 ri=1266 ql=10 b=10
+ 7 c=4012 g=4013 pq=1 pqc=4012 qp=1 rpfq=3 rp=1fa2 dt=6199/1 dn=2 df=2795 of=0 ri=2162 ql=28 b=10
+rcu_bh:
+ 0 c=-268 g=-268 pq=1 pqc=-268 qp=0 rpfq=-145 rp=21d6 dt=23301/73 dn=2 df=0 of=0 ri=0 ql=0 b=10
+ 1 c=-268 g=-268 pq=1 pqc=-268 qp=1 rpfq=-170 rp=20ce dt=78073/1 dn=2 df=26 of=0 ri=5 ql=0 b=10
+ 2 c=-268 g=-268 pq=1 pqc=-268 qp=1 rpfq=-83 rp=fbd dt=16646/0 dn=2 df=28 of=0 ri=4 ql=0 b=10
+ 3 c=-268 g=-268 pq=1 pqc=-268 qp=0 rpfq=-105 rp=178c dt=21159/1 dn=2 df=28 of=0 ri=2 ql=0 b=10
+ 4 c=-268 g=-268 pq=1 pqc=-268 qp=1 rpfq=-30 rp=b54 dt=5783/1 dn=2 df=32 of=0 ri=0 ql=0 b=10
+ 5 c=-268 g=-268 pq=1 pqc=-268 qp=1 rpfq=-29 rp=df5 dt=5879/1 dn=2 df=30 of=0 ri=3 ql=0 b=10
+ 6 c=-268 g=-268 pq=1 pqc=-268 qp=1 rpfq=-28 rp=788 dt=5847/1 dn=2 df=32 of=0 ri=0 ql=0 b=10
+ 7 c=-268 g=-268 pq=1 pqc=-268 qp=1 rpfq=-53 rp=1098 dt=6199/1 dn=2 df=30 of=0 ri=3 ql=0 b=10
+
+The first section lists the rcu_data structures for rcu, the second for
+rcu_bh. Each section has one line per CPU, or eight for this 8-CPU system.
+The fields are as follows:
+
+o The number at the beginning of each line is the CPU number.
+ CPUs numbers followed by an exclamation mark are offline,
+ but have been online at least once since boot. There will be
+ no output for CPUs that have never been online, which can be
+ a good thing in the surprisingly common case where NR_CPUS is
+ substantially larger than the number of actual CPUs.
+
+o "c" is the count of grace periods that this CPU believes have
+ completed. CPUs in dynticks idle mode may lag quite a ways
+ behind, for example, CPU 4 under "rcu" above, which has slept
+ through the past 25 RCU grace periods. It is not unusual to
+ see CPUs lagging by thousands of grace periods.
+
+o "g" is the count of grace periods that this CPU believes have
+ started. Again, CPUs in dynticks idle mode may lag behind.
+ If the "c" and "g" values are equal, this CPU has already
+ reported a quiescent state for the last RCU grace period that
+ it is aware of, otherwise, the CPU believes that it owes RCU a
+ quiescent state.
+
+o "pq" indicates that this CPU has passed through a quiescent state
+ for the current grace period. It is possible for "pq" to be
+ "1" and "c" different than "g", which indicates that although
+ the CPU has passed through a quiescent state, either (1) this
+ CPU has not yet reported that fact, (2) some other CPU has not
+ yet reported for this grace period, or (3) both.
+
+o "pqc" indicates which grace period the last-observed quiescent
+ state for this CPU corresponds to. This is important for handling
+ the race between CPU 0 reporting an extended dynticks-idle
+ quiescent state for CPU 1 and CPU 1 suddenly waking up and
+ reporting its own quiescent state. If CPU 1 was the last CPU
+ for the current grace period, then the CPU that loses this race
+ will attempt to incorrectly mark CPU 1 as having checked in for
+ the next grace period!
+
+o "qp" indicates that RCU still expects a quiescent state from
+ this CPU.
+
+o "rpfq" is the number of rcu_pending() calls on this CPU required
+ to induce this CPU to invoke force_quiescent_state().
+
+o "rp" is low-order four hex digits of the count of how many times
+ rcu_pending() has been invoked on this CPU.
+
+o "dt" is the current value of the dyntick counter that is incremented
+ when entering or leaving dynticks idle state, either by the
+ scheduler or by irq. The number after the "/" is the interrupt
+ nesting depth when in dyntick-idle state, or one greater than
+ the interrupt-nesting depth otherwise.
+
+ This field is displayed only for CONFIG_NO_HZ kernels.
+
+o "dn" is the current value of the dyntick counter that is incremented
+ when entering or leaving dynticks idle state via NMI. If both
+ the "dt" and "dn" values are even, then this CPU is in dynticks
+ idle mode and may be ignored by RCU. If either of these two
+ counters is odd, then RCU must be alert to the possibility of
+ an RCU read-side critical section running on this CPU.
+
+ This field is displayed only for CONFIG_NO_HZ kernels.
+
+o "df" is the number of times that some other CPU has forced a
+ quiescent state on behalf of this CPU due to this CPU being in
+ dynticks-idle state.
+
+ This field is displayed only for CONFIG_NO_HZ kernels.
+
+o "of" is the number of times that some other CPU has forced a
+ quiescent state on behalf of this CPU due to this CPU being
+ offline. In a perfect world, this might neve happen, but it
+ turns out that offlining and onlining a CPU can take several grace
+ periods, and so there is likely to be an extended period of time
+ when RCU believes that the CPU is online when it really is not.
+ Please note that erring in the other direction (RCU believing a
+ CPU is offline when it is really alive and kicking) is a fatal
+ error, so it makes sense to err conservatively.
+
+o "ri" is the number of times that RCU has seen fit to send a
+ reschedule IPI to this CPU in order to get it to report a
+ quiescent state.
+
+o "ql" is the number of RCU callbacks currently residing on
+ this CPU. This is the total number of callbacks, regardless
+ of what state they are in (new, waiting for grace period to
+ start, waiting for grace period to end, ready to invoke).
+
+o "b" is the batch limit for this CPU. If more than this number
+ of RCU callbacks is ready to invoke, then the remainder will
+ be deferred.
+
+
+The output of "cat rcu/rcugp" looks as follows:
+
+rcu: completed=33062 gpnum=33063
+rcu_bh: completed=464 gpnum=464
+
+Again, this output is for both "rcu" and "rcu_bh". The fields are
+taken from the rcu_state structure, and are as follows:
+
+o "completed" is the number of grace periods that have completed.
+ It is comparable to the "c" field from rcu/rcudata in that a
+ CPU whose "c" field matches the value of "completed" is aware
+ that the corresponding RCU grace period has completed.
+
+o "gpnum" is the number of grace periods that have started. It is
+ comparable to the "g" field from rcu/rcudata in that a CPU
+ whose "g" field matches the value of "gpnum" is aware that the
+ corresponding RCU grace period has started.
+
+ If these two fields are equal (as they are for "rcu_bh" above),
+ then there is no grace period in progress, in other words, RCU
+ is idle. On the other hand, if the two fields differ (as they
+ do for "rcu" above), then an RCU grace period is in progress.
+
+
+The output of "cat rcu/rcuhier" looks as follows, with very long lines:
+
+c=6902 g=6903 s=2 jfq=3 j=72c7 nfqs=13142/nfqsng=0(13142) fqlh=6
+1/1 0:127 ^0
+3/3 0:35 ^0 0/0 36:71 ^1 0/0 72:107 ^2 0/0 108:127 ^3
+3/3f 0:5 ^0 2/3 6:11 ^1 0/0 12:17 ^2 0/0 18:23 ^3 0/0 24:29 ^4 0/0 30:35 ^5 0/0 36:41 ^0 0/0 42:47 ^1 0/0 48:53 ^2 0/0 54:59 ^3 0/0 60:65 ^4 0/0 66:71 ^5 0/0 72:77 ^0 0/0 78:83 ^1 0/0 84:89 ^2 0/0 90:95 ^3 0/0 96:101 ^4 0/0 102:107 ^5 0/0 108:113 ^0 0/0 114:119 ^1 0/0 120:125 ^2 0/0 126:127 ^3
+rcu_bh:
+c=-226 g=-226 s=1 jfq=-5701 j=72c7 nfqs=88/nfqsng=0(88) fqlh=0
+0/1 0:127 ^0
+0/3 0:35 ^0 0/0 36:71 ^1 0/0 72:107 ^2 0/0 108:127 ^3
+0/3f 0:5 ^0 0/3 6:11 ^1 0/0 12:17 ^2 0/0 18:23 ^3 0/0 24:29 ^4 0/0 30:35 ^5 0/0 36:41 ^0 0/0 42:47 ^1 0/0 48:53 ^2 0/0 54:59 ^3 0/0 60:65 ^4 0/0 66:71 ^5 0/0 72:77 ^0 0/0 78:83 ^1 0/0 84:89 ^2 0/0 90:95 ^3 0/0 96:101 ^4 0/0 102:107 ^5 0/0 108:113 ^0 0/0 114:119 ^1 0/0 120:125 ^2 0/0 126:127 ^3
+
+This is once again split into "rcu" and "rcu_bh" portions. The fields are
+as follows:
+
+o "c" is exactly the same as "completed" under rcu/rcugp.
+
+o "g" is exactly the same as "gpnum" under rcu/rcugp.
+
+o "s" is the "signaled" state that drives force_quiescent_state()'s
+ state machine.
+
+o "jfq" is the number of jiffies remaining for this grace period
+ before force_quiescent_state() is invoked to help push things
+ along. Note that CPUs in dyntick-idle mode thoughout the grace
+ period will not report on their own, but rather must be check by
+ some other CPU via force_quiescent_state().
+
+o "j" is the low-order four hex digits of the jiffies counter.
+ Yes, Paul did run into a number of problems that turned out to
+ be due to the jiffies counter no longer counting. Why do you ask?
+
+o "nfqs" is the number of calls to force_quiescent_state() since
+ boot.
+
+o "nfqsng" is the number of useless calls to force_quiescent_state(),
+ where there wasn't actually a grace period active. This can
+ happen due to races. The number in parentheses is the difference
+ between "nfqs" and "nfqsng", or the number of times that
+ force_quiescent_state() actually did some real work.
+
+o "fqlh" is the number of calls to force_quiescent_state() that
+ exited immediately (without even being counted in nfqs above)
+ due to contention on ->fqslock.
+
+o Each element of the form "1/1 0:127 ^0" represents one struct
+ rcu_node. Each line represents one level of the hierarchy, from
+ root to leaves. It is best to think of the rcu_data structures
+ as forming yet another level after the leaves. Note that there
+ might be either one, two, or three levels of rcu_node structures,
+ depending on the relationship between CONFIG_RCU_FANOUT and
+ CONFIG_NR_CPUS.
+
+ o The numbers separated by the "/" are the qsmask followed
+ by the qsmaskinit. The qsmask will have one bit
+ set for each entity in the next lower level that
+ has not yet checked in for the current grace period.
+ The qsmaskinit will have one bit for each entity that is
+ currently expected to check in during each grace period.
+ The value of qsmaskinit is assigned to that of qsmask
+ at the beginning of each grace period.
+
+ For example, for "rcu", the qsmask of the first entry
+ of the lowest level is 0x14, meaning that we are still
+ waiting for CPUs 2 and 4 to check in for the current
+ grace period.
+
+ o The numbers separated by the ":" are the range of CPUs
+ served by this struct rcu_node. This can be helpful
+ in working out how the hierarchy is wired together.
+
+ For example, the first entry at the lowest level shows
+ "0:5", indicating that it covers CPUs 0 through 5.
+
+ o The number after the "^" indicates the bit in the
+ next higher level rcu_node structure that this
+ rcu_node structure corresponds to.
+
+ For example, the first entry at the lowest level shows
+ "^0", indicating that it corresponds to bit zero in
+ the first entry at the middle level.
diff --git a/Documentation/block/biodoc.txt b/Documentation/block/biodoc.txt
index 4dbb8be1c991..3c5434c83daf 100644
--- a/Documentation/block/biodoc.txt
+++ b/Documentation/block/biodoc.txt
@@ -914,7 +914,7 @@ I/O scheduler, a.k.a. elevator, is implemented in two layers. Generic dispatch
queue and specific I/O schedulers. Unless stated otherwise, elevator is used
to refer to both parts and I/O scheduler to specific I/O schedulers.
-Block layer implements generic dispatch queue in ll_rw_blk.c and elevator.c.
+Block layer implements generic dispatch queue in block/*.c.
The generic dispatch queue is responsible for properly ordering barrier
requests, requeueing, handling non-fs requests and all other subtleties.
@@ -926,8 +926,8 @@ be built inside the kernel. Each queue can choose different one and can also
change to another one dynamically.
A block layer call to the i/o scheduler follows the convention elv_xxx(). This
-calls elevator_xxx_fn in the elevator switch (drivers/block/elevator.c). Oh,
-xxx and xxx might not match exactly, but use your imagination. If an elevator
+calls elevator_xxx_fn in the elevator switch (block/elevator.c). Oh, xxx
+and xxx might not match exactly, but use your imagination. If an elevator
doesn't implement a function, the switch does nothing or some minimal house
keeping work.
diff --git a/Documentation/lockstat.txt b/Documentation/lockstat.txt
index 4ba4664ce5c3..9cb9138f7a79 100644
--- a/Documentation/lockstat.txt
+++ b/Documentation/lockstat.txt
@@ -71,35 +71,50 @@ Look at the current lock statistics:
# less /proc/lock_stat
-01 lock_stat version 0.2
+01 lock_stat version 0.3
02 -----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
03 class name con-bounces contentions waittime-min waittime-max waittime-total acq-bounces acquisitions holdtime-min holdtime-max holdtime-total
04 -----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
05
-06 &inode->i_data.tree_lock-W: 15 21657 0.18 1093295.30 11547131054.85 58 10415 0.16 87.51 6387.60
-07 &inode->i_data.tree_lock-R: 0 0 0.00 0.00 0.00 23302 231198 0.25 8.45 98023.38
-08 --------------------------
-09 &inode->i_data.tree_lock 0 [<ffffffff8027c08f>] add_to_page_cache+0x5f/0x190
-10
-11 ...............................................................................................................................................................................................
-12
-13 dcache_lock: 1037 1161 0.38 45.32 774.51 6611 243371 0.15 306.48 77387.24
-14 -----------
-15 dcache_lock 180 [<ffffffff802c0d7e>] sys_getcwd+0x11e/0x230
-16 dcache_lock 165 [<ffffffff802c002a>] d_alloc+0x15a/0x210
-17 dcache_lock 33 [<ffffffff8035818d>] _atomic_dec_and_lock+0x4d/0x70
-18 dcache_lock 1 [<ffffffff802beef8>] shrink_dcache_parent+0x18/0x130
+06 &mm->mmap_sem-W: 233 538 18446744073708 22924.27 607243.51 1342 45806 1.71 8595.89 1180582.34
+07 &mm->mmap_sem-R: 205 587 18446744073708 28403.36 731975.00 1940 412426 0.58 187825.45 6307502.88
+08 ---------------
+09 &mm->mmap_sem 487 [<ffffffff8053491f>] do_page_fault+0x466/0x928
+10 &mm->mmap_sem 179 [<ffffffff802a6200>] sys_mprotect+0xcd/0x21d
+11 &mm->mmap_sem 279 [<ffffffff80210a57>] sys_mmap+0x75/0xce
+12 &mm->mmap_sem 76 [<ffffffff802a490b>] sys_munmap+0x32/0x59
+13 ---------------
+14 &mm->mmap_sem 270 [<ffffffff80210a57>] sys_mmap+0x75/0xce
+15 &mm->mmap_sem 431 [<ffffffff8053491f>] do_page_fault+0x466/0x928
+16 &mm->mmap_sem 138 [<ffffffff802a490b>] sys_munmap+0x32/0x59
+17 &mm->mmap_sem 145 [<ffffffff802a6200>] sys_mprotect+0xcd/0x21d
+18
+19 ...............................................................................................................................................................................................
+20
+21 dcache_lock: 621 623 0.52 118.26 1053.02 6745 91930 0.29 316.29 118423.41
+22 -----------
+23 dcache_lock 179 [<ffffffff80378274>] _atomic_dec_and_lock+0x34/0x54
+24 dcache_lock 113 [<ffffffff802cc17b>] d_alloc+0x19a/0x1eb
+25 dcache_lock 99 [<ffffffff802ca0dc>] d_rehash+0x1b/0x44
+26 dcache_lock 104 [<ffffffff802cbca0>] d_instantiate+0x36/0x8a
+27 -----------
+28 dcache_lock 192 [<ffffffff80378274>] _atomic_dec_and_lock+0x34/0x54
+29 dcache_lock 98 [<ffffffff802ca0dc>] d_rehash+0x1b/0x44
+30 dcache_lock 72 [<ffffffff802cc17b>] d_alloc+0x19a/0x1eb
+31 dcache_lock 112 [<ffffffff802cbca0>] d_instantiate+0x36/0x8a
This excerpt shows the first two lock class statistics. Line 01 shows the
output version - each time the format changes this will be updated. Line 02-04
-show the header with column descriptions. Lines 05-10 and 13-18 show the actual
+show the header with column descriptions. Lines 05-18 and 20-31 show the actual
statistics. These statistics come in two parts; the actual stats separated by a
-short separator (line 08, 14) from the contention points.
+short separator (line 08, 13) from the contention points.
-The first lock (05-10) is a read/write lock, and shows two lines above the
+The first lock (05-18) is a read/write lock, and shows two lines above the
short separator. The contention points don't match the column descriptors,
-they have two: contentions and [<IP>] symbol.
+they have two: contentions and [<IP>] symbol. The second set of contention
+points are the points we're contending with.
+The integer part of the time values is in us.
View the top contending locks:
diff --git a/arch/alpha/include/asm/io.h b/arch/alpha/include/asm/io.h
index e971ab000f95..eda9b909aa05 100644
--- a/arch/alpha/include/asm/io.h
+++ b/arch/alpha/include/asm/io.h
@@ -96,9 +96,6 @@ static inline dma_addr_t __deprecated isa_page_to_bus(struct page *page)
return page_to_phys(page);
}
-/* This depends on working iommu. */
-#define BIO_VMERGE_BOUNDARY (alpha_mv.mv_pci_tbi ? PAGE_SIZE : 0)
-
/* Maximum PIO space address supported? */
#define IO_SPACE_LIMIT 0xffff
diff --git a/arch/powerpc/platforms/pseries/rtasd.c b/arch/powerpc/platforms/pseries/rtasd.c
index f4e55be2eea9..afad9f5ac0ac 100644
--- a/arch/powerpc/platforms/pseries/rtasd.c
+++ b/arch/powerpc/platforms/pseries/rtasd.c
@@ -208,6 +208,7 @@ void pSeries_log_error(char *buf, unsigned int err_type, int fatal)
break;
case ERR_TYPE_KERNEL_PANIC:
default:
+ WARN_ON_ONCE(!irqs_disabled()); /* @@@ DEBUG @@@ */
spin_unlock_irqrestore(&rtasd_log_lock, s);
return;
}
@@ -227,6 +228,7 @@ void pSeries_log_error(char *buf, unsigned int err_type, int fatal)
/* Check to see if we need to or have stopped logging */
if (fatal || !logging_enabled) {
logging_enabled = 0;
+ WARN_ON_ONCE(!irqs_disabled()); /* @@@ DEBUG @@@ */
spin_unlock_irqrestore(&rtasd_log_lock, s);
return;
}
@@ -249,11 +251,13 @@ void pSeries_log_error(char *buf, unsigned int err_type, int fatal)
else
rtas_log_start += 1;
+ WARN_ON_ONCE(!irqs_disabled()); /* @@@ DEBUG @@@ */
spin_unlock_irqrestore(&rtasd_log_lock, s);
wake_up_interruptible(&rtas_log_wait);
break;
case ERR_TYPE_KERNEL_PANIC:
default:
+ WARN_ON_ONCE(!irqs_disabled()); /* @@@ DEBUG @@@ */
spin_unlock_irqrestore(&rtasd_log_lock, s);
return;
}
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index ef3635b52fc0..0767827540b1 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -263,7 +263,7 @@ int s390_enable_sie(void)
/* lets check if we are allowed to replace the mm */
task_lock(tsk);
if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 ||
- tsk->mm != tsk->active_mm || tsk->mm->ioctx_list) {
+ tsk->mm != tsk->active_mm || !hlist_empty(&tsk->mm->ioctx_list)) {
task_unlock(tsk);
return -EINVAL;
}
@@ -279,7 +279,7 @@ int s390_enable_sie(void)
/* Now lets check again if something happened */
task_lock(tsk);
if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 ||
- tsk->mm != tsk->active_mm || tsk->mm->ioctx_list) {
+ tsk->mm != tsk->active_mm || !hlist_empty(&tsk->mm->ioctx_list)) {
mmput(mm);
task_unlock(tsk);
return -EINVAL;
diff --git a/arch/um/include/asm/system.h b/arch/um/include/asm/system.h
index 753346e2cdfd..ae5f94d6317d 100644
--- a/arch/um/include/asm/system.h
+++ b/arch/um/include/asm/system.h
@@ -11,21 +11,21 @@ extern int get_signals(void);
extern void block_signals(void);
extern void unblock_signals(void);
-#define local_save_flags(flags) do { typecheck(unsigned long, flags); \
+#define raw_local_save_flags(flags) do { typecheck(unsigned long, flags); \
(flags) = get_signals(); } while(0)
-#define local_irq_restore(flags) do { typecheck(unsigned long, flags); \
+#define raw_local_irq_restore(flags) do { typecheck(unsigned long, flags); \
set_signals(flags); } while(0)
-#define local_irq_save(flags) do { local_save_flags(flags); \
- local_irq_disable(); } while(0)
+#define raw_local_irq_save(flags) do { raw_local_save_flags(flags); \
+ raw_local_irq_disable(); } while(0)
-#define local_irq_enable() unblock_signals()
-#define local_irq_disable() block_signals()
+#define raw_local_irq_enable() unblock_signals()
+#define raw_local_irq_disable() block_signals()
#define irqs_disabled() \
({ \
unsigned long flags; \
- local_save_flags(flags); \
+ raw_local_save_flags(flags); \
(flags == 0); \
})
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 98a0ed52b5c3..0f44add3e0b7 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -247,6 +247,28 @@ config X86_HAS_BOOT_CPU_ID
def_bool y
depends on X86_VOYAGER
+config SPARSE_IRQ
+ bool "Support sparse irq numbering"
+ depends on PCI_MSI || HT_IRQ
+ help
+ This enables support for sparse irqs. This is useful for distro
+ kernels that want to define a high CONFIG_NR_CPUS value but still
+ want to have low kernel memory footprint on smaller machines.
+
+ ( Sparse IRQs can also be beneficial on NUMA boxes, as they spread
+ out the irq_desc[] array in a more NUMA-friendly way. )
+
+ If you don't know what to do here, say N.
+
+config NUMA_MIGRATE_IRQ_DESC
+ bool "Move irq desc when changing irq smp_affinity"
+ depends on SPARSE_IRQ && NUMA
+ default n
+ help
+ This enables moving irq_desc to cpu/node that irq will use handled.
+
+ If you don't know what to do here, say N.
+
config X86_FIND_SMP_CONFIG
def_bool y
depends on X86_MPPARSE || X86_VOYAGER
@@ -479,7 +501,7 @@ config HPET_TIMER
The HPET provides a stable time base on SMP
systems, unlike the TSC, but it is more expensive to access,
as it is off-chip. You can find the HPET spec at
- <http://www.intel.com/hardwaredesign/hpetspec.htm>.
+ <http://www.intel.com/hardwaredesign/hpetspec_1.pdf>.
You can safely choose Y here. However, HPET will only be
activated if the platform and the BIOS support this feature.
diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h
index dc22c0733282..4035357f5b9d 100644
--- a/arch/x86/include/asm/dma-mapping.h
+++ b/arch/x86/include/asm/dma-mapping.h
@@ -65,7 +65,7 @@ static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
return dma_ops;
else
return dev->archdata.dma_ops;
-#endif /* _ASM_X86_DMA_MAPPING_H */
+#endif
}
/* Make sure we keep the same behaviour */
diff --git a/arch/x86/include/asm/io_apic.h b/arch/x86/include/asm/io_apic.h
index e475e009ae5d..7a1f44ac1f17 100644
--- a/arch/x86/include/asm/io_apic.h
+++ b/arch/x86/include/asm/io_apic.h
@@ -198,17 +198,14 @@ extern void restore_IO_APIC_setup(void);
extern void reinit_intr_remapped_IO_APIC(int);
#endif
-extern int probe_nr_irqs(void);
+extern void probe_nr_irqs_gsi(void);
#else /* !CONFIG_X86_IO_APIC */
#define io_apic_assign_pci_irqs 0
static const int timer_through_8259 = 0;
-static inline void ioapic_init_mappings(void) { }
+static inline void ioapic_init_mappings(void) { }
-static inline int probe_nr_irqs(void)
-{
- return NR_IRQS;
-}
+static inline void probe_nr_irqs_gsi(void) { }
#endif
#endif /* _ASM_X86_IO_APIC_H */
diff --git a/arch/x86/include/asm/iommu.h b/arch/x86/include/asm/iommu.h
index 295b13193f4d..a6ee9e6f530f 100644
--- a/arch/x86/include/asm/iommu.h
+++ b/arch/x86/include/asm/iommu.h
@@ -7,8 +7,6 @@ extern struct dma_mapping_ops nommu_dma_ops;
extern int force_iommu, no_iommu;
extern int iommu_detected;
-extern unsigned long iommu_nr_pages(unsigned long addr, unsigned long len);
-
/* 10 seconds */
#define DMAR_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000)
diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h
index 0005adb0f941..f7ff65032b9d 100644
--- a/arch/x86/include/asm/irq_vectors.h
+++ b/arch/x86/include/asm/irq_vectors.h
@@ -101,12 +101,23 @@
#define LAST_VM86_IRQ 15
#define invalid_vm86_irq(irq) ((irq) < 3 || (irq) > 15)
+#define NR_IRQS_LEGACY 16
+
#if defined(CONFIG_X86_IO_APIC) && !defined(CONFIG_X86_VOYAGER)
+
+#ifndef CONFIG_SPARSE_IRQ
# if NR_CPUS < MAX_IO_APICS
# define NR_IRQS (NR_VECTORS + (32 * NR_CPUS))
# else
# define NR_IRQS (NR_VECTORS + (32 * MAX_IO_APICS))
# endif
+#else
+# if (8 * NR_CPUS) > (32 * MAX_IO_APICS)
+# define NR_IRQS (NR_VECTORS + (8 * NR_CPUS))
+# else
+# define NR_IRQS (NR_VECTORS + (32 * MAX_IO_APICS))
+# endif
+#endif
#elif defined(CONFIG_X86_VOYAGER)
diff --git a/arch/x86/include/asm/pci.h b/arch/x86/include/asm/pci.h
index 647781298e7e..66834c41c049 100644
--- a/arch/x86/include/asm/pci.h
+++ b/arch/x86/include/asm/pci.h
@@ -84,6 +84,8 @@ static inline void pci_dma_burst_advice(struct pci_dev *pdev,
static inline void early_quirks(void) { }
#endif
+extern void pci_iommu_alloc(void);
+
#endif /* __KERNEL__ */
#ifdef CONFIG_X86_32
diff --git a/arch/x86/include/asm/pci_64.h b/arch/x86/include/asm/pci_64.h
index d02d936840a3..4da207982777 100644
--- a/arch/x86/include/asm/pci_64.h
+++ b/arch/x86/include/asm/pci_64.h
@@ -23,7 +23,6 @@ extern int (*pci_config_write)(int seg, int bus, int dev, int fn,
int reg, int len, u32 value);
extern void dma32_reserve_bootmem(void);
-extern void pci_iommu_alloc(void);
/* The PCI address space does equal the physical memory
* address space. The networking and block device layers use
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index 580c3ee6c58c..4340055b7559 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -157,6 +157,7 @@ extern int __get_user_bad(void);
int __ret_gu; \
unsigned long __val_gu; \
__chk_user_ptr(ptr); \
+ might_fault(); \
switch (sizeof(*(ptr))) { \
case 1: \
__get_user_x(1, __ret_gu, __val_gu, ptr); \
@@ -241,6 +242,7 @@ extern void __put_user_8(void);
int __ret_pu; \
__typeof__(*(ptr)) __pu_val; \
__chk_user_ptr(ptr); \
+ might_fault(); \
__pu_val = x; \
switch (sizeof(*(ptr))) { \
case 1: \
diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
index d095a3aeea1b..5e06259e90e5 100644
--- a/arch/x86/include/asm/uaccess_32.h
+++ b/arch/x86/include/asm/uaccess_32.h
@@ -82,8 +82,8 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
static __always_inline unsigned long __must_check
__copy_to_user(void __user *to, const void *from, unsigned long n)
{
- might_sleep();
- return __copy_to_user_inatomic(to, from, n);
+ might_fault();
+ return __copy_to_user_inatomic(to, from, n);
}
static __always_inline unsigned long
@@ -137,7 +137,7 @@ __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
static __always_inline unsigned long
__copy_from_user(void *to, const void __user *from, unsigned long n)
{
- might_sleep();
+ might_fault();
if (__builtin_constant_p(n)) {
unsigned long ret;
@@ -159,7 +159,7 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
static __always_inline unsigned long __copy_from_user_nocache(void *to,
const void __user *from, unsigned long n)
{
- might_sleep();
+ might_fault();
if (__builtin_constant_p(n)) {
unsigned long ret;
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
index f8cfd00db450..84210c479fca 100644
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -29,6 +29,8 @@ static __always_inline __must_check
int __copy_from_user(void *dst, const void __user *src, unsigned size)
{
int ret = 0;
+
+ might_fault();
if (!__builtin_constant_p(size))
return copy_user_generic(dst, (__force void *)src, size);
switch (size) {
@@ -71,6 +73,8 @@ static __always_inline __must_check
int __copy_to_user(void __user *dst, const void *src, unsigned size)
{
int ret = 0;
+
+ might_fault();
if (!__builtin_constant_p(size))
return copy_user_generic((__force void *)dst, src, size);
switch (size) {
@@ -113,6 +117,8 @@ static __always_inline __must_check
int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
{
int ret = 0;
+
+ might_fault();
if (!__builtin_constant_p(size))
return copy_user_generic((__force void *)dst,
(__force void *)src, size);
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 88dd768eab6d..d364df03c1d6 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -109,6 +109,8 @@ obj-$(CONFIG_MICROCODE) += microcode.o
obj-$(CONFIG_X86_CHECK_BIOS_CORRUPTION) += check.o
+obj-$(CONFIG_SWIOTLB) += pci-swiotlb_64.o # NB rename without _64
+
###
# 64 bit specific files
ifeq ($(CONFIG_X86_64),y)
@@ -122,7 +124,6 @@ ifeq ($(CONFIG_X86_64),y)
obj-$(CONFIG_GART_IOMMU) += pci-gart_64.o aperture_64.o
obj-$(CONFIG_CALGARY_IOMMU) += pci-calgary_64.o tce_64.o
obj-$(CONFIG_AMD_IOMMU) += amd_iommu_init.o amd_iommu.o
- obj-$(CONFIG_SWIOTLB) += pci-swiotlb_64.o
obj-$(CONFIG_PCI_MMCONFIG) += mmconf-fam10h_64.o
endif
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index 3f0a3edf0a57..845ea097383e 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -813,7 +813,7 @@ int __init hpet_enable(void)
out_nohpet:
hpet_clear_mapping();
- boot_hpet_disable = 1;
+ hpet_address = 0;
return 0;
}
@@ -836,10 +836,11 @@ static __init int hpet_late_init(void)
hpet_address = force_hpet_address;
hpet_enable();
- if (!hpet_virt_address)
- return -ENODEV;
}
+ if (!hpet_virt_address)
+ return -ENODEV;
+
hpet_reserve_platform_timers(hpet_readl(HPET_ID));
for_each_online_cpu(cpu) {
diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c
index 679e7bbbbcd6..f6ea94b74da1 100644
--- a/arch/x86/kernel/io_apic.c
+++ b/arch/x86/kernel/io_apic.c
@@ -108,93 +108,252 @@ static int __init parse_noapic(char *str)
early_param("noapic", parse_noapic);
struct irq_pin_list;
+
+/*
+ * This is performance-critical, we want to do it O(1)
+ *
+ * the indexing order of this array favors 1:1 mappings
+ * between pins and IRQs.
+ */
+
+struct irq_pin_list {
+ int apic, pin;
+ struct irq_pin_list *next;
+};
+
+static struct irq_pin_list *get_one_free_irq_2_pin(int cpu)
+{
+ struct irq_pin_list *pin;
+ int node;
+
+ node = cpu_to_node(cpu);
+
+ pin = kzalloc_node(sizeof(*pin), GFP_ATOMIC, node);
+ printk(KERN_DEBUG " alloc irq_2_pin on cpu %d node %d\n", cpu, node);
+
+ return pin;
+}
+
struct irq_cfg {
- unsigned int irq;
struct irq_pin_list *irq_2_pin;
cpumask_t domain;
cpumask_t old_domain;
unsigned move_cleanup_count;
u8 vector;
u8 move_in_progress : 1;
+#ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC
+ u8 move_desc_pending : 1;
+#endif
};
/* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */
+#ifdef CONFIG_SPARSE_IRQ
+static struct irq_cfg irq_cfgx[] = {
+#else
static struct irq_cfg irq_cfgx[NR_IRQS] = {
- [0] = { .irq = 0, .domain = CPU_MASK_ALL, .vector = IRQ0_VECTOR, },
- [1] = { .irq = 1, .domain = CPU_MASK_ALL, .vector = IRQ1_VECTOR, },
- [2] = { .irq = 2, .domain = CPU_MASK_ALL, .vector = IRQ2_VECTOR, },
- [3] = { .irq = 3, .domain = CPU_MASK_ALL, .vector = IRQ3_VECTOR, },
- [4] = { .irq = 4, .domain = CPU_MASK_ALL, .vector = IRQ4_VECTOR, },
- [5] = { .irq = 5, .domain = CPU_MASK_ALL, .vector = IRQ5_VECTOR, },
- [6] = { .irq = 6, .domain = CPU_MASK_ALL, .vector = IRQ6_VECTOR, },
- [7] = { .irq = 7, .domain = CPU_MASK_ALL, .vector = IRQ7_VECTOR, },
- [8] = { .irq = 8, .domain = CPU_MASK_ALL, .vector = IRQ8_VECTOR, },
- [9] = { .irq = 9, .domain = CPU_MASK_ALL, .vector = IRQ9_VECTOR, },
- [10] = { .irq = 10, .domain = CPU_MASK_ALL, .vector = IRQ10_VECTOR, },
- [11] = { .irq = 11, .domain = CPU_MASK_ALL, .vector = IRQ11_VECTOR, },
- [12] = { .irq = 12, .domain = CPU_MASK_ALL, .vector = IRQ12_VECTOR, },
- [13] = { .irq = 13, .domain = CPU_MASK_ALL, .vector = IRQ13_VECTOR, },
- [14] = { .irq = 14, .domain = CPU_MASK_ALL, .vector = IRQ14_VECTOR, },
- [15] = { .irq = 15, .domain = CPU_MASK_ALL, .vector = IRQ15_VECTOR, },
+#endif
+ [0] = { .domain = CPU_MASK_ALL, .vector = IRQ0_VECTOR, },
+ [1] = { .domain = CPU_MASK_ALL, .vector = IRQ1_VECTOR, },
+ [2] = { .domain = CPU_MASK_ALL, .vector = IRQ2_VECTOR, },
+ [3] = { .domain = CPU_MASK_ALL, .vector = IRQ3_VECTOR, },
+ [4] = { .domain = CPU_MASK_ALL, .vector = IRQ4_VECTOR, },
+ [5] = { .domain = CPU_MASK_ALL, .vector = IRQ5_VECTOR, },
+ [6] = { .domain = CPU_MASK_ALL, .vector = IRQ6_VECTOR, },
+ [7] = { .domain = CPU_MASK_ALL, .vector = IRQ7_VECTOR, },
+ [8] = { .domain = CPU_MASK_ALL, .vector = IRQ8_VECTOR, },
+ [9] = { .domain = CPU_MASK_ALL, .vector = IRQ9_VECTOR, },
+ [10] = { .domain = CPU_MASK_ALL, .vector = IRQ10_VECTOR, },
+ [11] = { .domain = CPU_MASK_ALL, .vector = IRQ11_VECTOR, },
+ [12] = { .domain = CPU_MASK_ALL, .vector = IRQ12_VECTOR, },
+ [13] = { .domain = CPU_MASK_ALL, .vector = IRQ13_VECTOR, },
+ [14] = { .domain = CPU_MASK_ALL, .vector = IRQ14_VECTOR, },
+ [15] = { .domain = CPU_MASK_ALL, .vector = IRQ15_VECTOR, },
};
-#define for_each_irq_cfg(irq, cfg) \
- for (irq = 0, cfg = irq_cfgx; irq < nr_irqs; irq++, cfg++)
+void __init arch_early_irq_init(void)
+{
+ struct irq_cfg *cfg;
+ struct irq_desc *desc;
+ int count;
+ int i;
+
+ cfg = irq_cfgx;
+ count = ARRAY_SIZE(irq_cfgx);
+
+ for (i = 0; i < count; i++) {
+ desc = irq_to_desc(i);
+ desc->chip_data = &cfg[i];
+ }
+}
+#ifdef CONFIG_SPARSE_IRQ
static struct irq_cfg *irq_cfg(unsigned int irq)
{
- return irq < nr_irqs ? irq_cfgx + irq : NULL;
+ struct irq_cfg *cfg = NULL;
+ struct irq_desc *desc;
+
+ desc = irq_to_desc(irq);
+ if (desc)
+ cfg = desc->chip_data;
+
+ return cfg;
}
-static struct irq_cfg *irq_cfg_alloc(unsigned int irq)
+static struct irq_cfg *get_one_free_irq_cfg(int cpu)
{
- return irq_cfg(irq);
+ struct irq_cfg *cfg;
+ int node;
+
+ node = cpu_to_node(cpu);
+
+ cfg = kzalloc_node(sizeof(*cfg), GFP_ATOMIC, node);
+ printk(KERN_DEBUG " alloc irq_cfg on cpu %d node %d\n", cpu, node);
+
+ return cfg;
}
-/*
- * Rough estimation of how many shared IRQs there are, can be changed
- * anytime.
- */
-#define MAX_PLUS_SHARED_IRQS NR_IRQS
-#define PIN_MAP_SIZE (MAX_PLUS_SHARED_IRQS + NR_IRQS)
+void arch_init_chip_data(struct irq_desc *desc, int cpu)
+{
+ struct irq_cfg *cfg;
-/*
- * This is performance-critical, we want to do it O(1)
- *
- * the indexing order of this array favors 1:1 mappings
- * between pins and IRQs.
- */
+ cfg = desc->chip_data;
+ if (!cfg) {
+ desc->chip_data = get_one_free_irq_cfg(cpu);
+ if (!desc->chip_data) {
+ printk(KERN_ERR "can not alloc irq_cfg\n");
+ BUG_ON(1);
+ }
+ }
+}
-struct irq_pin_list {
- int apic, pin;
- struct irq_pin_list *next;
-};
+#ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC
+
+static void
+init_copy_irq_2_pin(struct irq_cfg *old_cfg, struct irq_cfg *cfg, int cpu)
+{
+ struct irq_pin_list *old_entry, *head, *tail, *entry;
+
+ cfg->irq_2_pin = NULL;
+ old_entry = old_cfg->irq_2_pin;
+ if (!old_entry)
+ return;
-static struct irq_pin_list irq_2_pin_head[PIN_MAP_SIZE];
-static struct irq_pin_list *irq_2_pin_ptr;
+ entry = get_one_free_irq_2_pin(cpu);
+ if (!entry)
+ return;
-static void __init irq_2_pin_init(void)
+ entry->apic = old_entry->apic;
+ entry->pin = old_entry->pin;
+ head = entry;
+ tail = entry;
+ old_entry = old_entry->next;
+ while (old_entry) {
+ entry = get_one_free_irq_2_pin(cpu);
+ if (!entry) {
+ entry = head;
+ while (entry) {
+ head = entry->next;
+ kfree(entry);
+ entry = head;
+ }
+ /* still use the old one */
+ return;
+ }
+ entry->apic = old_entry->apic;
+ entry->pin = old_entry->pin;
+ tail->next = entry;
+ tail = entry;
+ old_entry = old_entry->next;
+ }
+
+ tail->next = NULL;
+ cfg->irq_2_pin = head;
+}
+
+static void free_irq_2_pin(struct irq_cfg *old_cfg, struct irq_cfg *cfg)
{
- struct irq_pin_list *pin = irq_2_pin_head;
- int i;
+ struct irq_pin_list *entry, *next;
- for (i = 1; i < PIN_MAP_SIZE; i++)
- pin[i-1].next = &pin[i];
+ if (old_cfg->irq_2_pin == cfg->irq_2_pin)
+ return;
+
+ entry = old_cfg->irq_2_pin;
- irq_2_pin_ptr = &pin[0];
+ while (entry) {
+ next = entry->next;
+ kfree(entry);
+ entry = next;
+ }
+ old_cfg->irq_2_pin = NULL;
}
-static struct irq_pin_list *get_one_free_irq_2_pin(void)
+void arch_init_copy_chip_data(struct irq_desc *old_desc,
+ struct irq_desc *desc, int cpu)
{
- struct irq_pin_list *pin = irq_2_pin_ptr;
+ struct irq_cfg *cfg;
+ struct irq_cfg *old_cfg;
- if (!pin)
- panic("can not get more irq_2_pin\n");
+ cfg = get_one_free_irq_cfg(cpu);
- irq_2_pin_ptr = pin->next;
- pin->next = NULL;
- return pin;
+ if (!cfg)
+ return;
+
+ desc->chip_data = cfg;
+
+ old_cfg = old_desc->chip_data;
+
+ memcpy(cfg, old_cfg, sizeof(struct irq_cfg));
+
+ init_copy_irq_2_pin(old_cfg, cfg, cpu);
+}
+
+static void free_irq_cfg(struct irq_cfg *old_cfg)
+{
+ kfree(old_cfg);
+}
+
+void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc)
+{
+ struct irq_cfg *old_cfg, *cfg;
+
+ old_cfg = old_desc->chip_data;
+ cfg = desc->chip_data;
+
+ if (old_cfg == cfg)
+ return;
+
+ if (old_cfg) {
+ free_irq_2_pin(old_cfg, cfg);
+ free_irq_cfg(old_cfg);
+ old_desc->chip_data = NULL;
+ }
+}
+
+static void set_extra_move_desc(struct irq_desc *desc, cpumask_t mask)
+{
+ struct irq_cfg *cfg = desc->chip_data;
+
+ if (!cfg->move_in_progress) {
+ /* it means that domain is not changed */
+ if (!cpus_intersects(desc->affinity, mask))
+ cfg->move_desc_pending = 1;
+ }
}
+#endif
+
+#else
+static struct irq_cfg *irq_cfg(unsigned int irq)
+{
+ return irq < nr_irqs ? irq_cfgx + irq : NULL;
+}
+
+#endif
+
+#ifndef CONFIG_NUMA_MIGRATE_IRQ_DESC
+static inline void set_extra_move_desc(struct irq_desc *desc, cpumask_t mask)
+{
+}
+#endif
struct io_apic {
unsigned int index;
@@ -237,11 +396,10 @@ static inline void io_apic_modify(unsigned int apic, unsigned int reg, unsigned
writel(value, &io_apic->data);
}
-static bool io_apic_level_ack_pending(unsigned int irq)
+static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
{
struct irq_pin_list *entry;
unsigned long flags;
- struct irq_cfg *cfg = irq_cfg(irq);
spin_lock_irqsave(&ioapic_lock, flags);
entry = cfg->irq_2_pin;
@@ -323,13 +481,12 @@ static void ioapic_mask_entry(int apic, int pin)
}
#ifdef CONFIG_SMP
-static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, u8 vector)
+static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq_cfg *cfg)
{
int apic, pin;
- struct irq_cfg *cfg;
struct irq_pin_list *entry;
+ u8 vector = cfg->vector;
- cfg = irq_cfg(irq);
entry = cfg->irq_2_pin;
for (;;) {
unsigned int reg;
@@ -359,24 +516,27 @@ static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, u8 vector)
}
}
-static int assign_irq_vector(int irq, cpumask_t mask);
+static int assign_irq_vector(int irq, struct irq_cfg *cfg, cpumask_t mask);
-static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask)
+static void set_ioapic_affinity_irq_desc(struct irq_desc *desc, cpumask_t mask)
{
struct irq_cfg *cfg;
unsigned long flags;
unsigned int dest;
cpumask_t tmp;
- struct irq_desc *desc;
+ unsigned int irq;
cpus_and(tmp, mask, cpu_online_map);
if (cpus_empty(tmp))
return;
- cfg = irq_cfg(irq);
- if (assign_irq_vector(irq, mask))
+ irq = desc->irq;
+ cfg = desc->chip_data;
+ if (assign_irq_vector(irq, cfg, mask))
return;
+ set_extra_move_desc(desc, mask);
+
cpus_and(tmp, cfg->domain, mask);
dest = cpu_mask_to_apicid(tmp);
/*
@@ -384,12 +544,20 @@ static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask)
*/
dest = SET_APIC_LOGICAL_ID(dest);
- desc = irq_to_desc(irq);
spin_lock_irqsave(&ioapic_lock, flags);
- __target_IO_APIC_irq(irq, dest, cfg->vector);
+ __target_IO_APIC_irq(irq, dest, cfg);
desc->affinity = mask;
spin_unlock_irqrestore(&ioapic_lock, flags);
}
+
+static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask)
+{
+ struct irq_desc *desc;
+
+ desc = irq_to_desc(irq);
+
+ set_ioapic_affinity_irq_desc(desc, mask);
+}
#endif /* CONFIG_SMP */
/*
@@ -397,16 +565,18 @@ static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask)
* shared ISA-space IRQs, so we have to support them. We are super
* fast in the common case, and fast for shared ISA-space IRQs.
*/
-static void add_pin_to_irq(unsigned int irq, int apic, int pin)
+static void add_pin_to_irq_cpu(struct irq_cfg *cfg, int cpu, int apic, int pin)
{
- struct irq_cfg *cfg;
struct irq_pin_list *entry;
- /* first time to refer irq_cfg, so with new */
- cfg = irq_cfg_alloc(irq);
entry = cfg->irq_2_pin;
if (!entry) {
- entry = get_one_free_irq_2_pin();
+ entry = get_one_free_irq_2_pin(cpu);
+ if (!entry) {
+ printk(KERN_ERR "can not alloc irq_2_pin to add %d - %d\n",
+ apic, pin);
+ return;
+ }
cfg->irq_2_pin = entry;
entry->apic = apic;
entry->pin = pin;
@@ -421,7 +591,7 @@ static void add_pin_to_irq(unsigned int irq, int apic, int pin)
entry = entry->next;
}
- entry->next = get_one_free_irq_2_pin();
+ entry->next = get_one_free_irq_2_pin(cpu);
entry = entry->next;
entry->apic = apic;
entry->pin = pin;
@@ -430,11 +600,10 @@ static void add_pin_to_irq(unsigned int irq, int apic, int pin)
/*
* Reroute an IRQ to a different pin.
*/
-static void __init replace_pin_at_irq(unsigned int irq,
+static void __init replace_pin_at_irq_cpu(struct irq_cfg *cfg, int cpu,
int oldapic, int oldpin,
int newapic, int newpin)
{
- struct irq_cfg *cfg = irq_cfg(irq);
struct irq_pin_list *entry = cfg->irq_2_pin;
int replaced = 0;
@@ -451,18 +620,16 @@ static void __init replace_pin_at_irq(unsigned int irq,
/* why? call replace before add? */
if (!replaced)
- add_pin_to_irq(irq, newapic, newpin);
+ add_pin_to_irq_cpu(cfg, cpu, newapic, newpin);
}
-static inline void io_apic_modify_irq(unsigned int irq,
+static inline void io_apic_modify_irq(struct irq_cfg *cfg,
int mask_and, int mask_or,
void (*final)(struct irq_pin_list *entry))
{
int pin;
- struct irq_cfg *cfg;
struct irq_pin_list *entry;
- cfg = irq_cfg(irq);
for (entry = cfg->irq_2_pin; entry != NULL; entry = entry->next) {
unsigned int reg;
pin = entry->pin;
@@ -475,9 +642,9 @@ static inline void io_apic_modify_irq(unsigned int irq,
}
}
-static void __unmask_IO_APIC_irq(unsigned int irq)
+static void __unmask_IO_APIC_irq(struct irq_cfg *cfg)
{
- io_apic_modify_irq(irq, ~IO_APIC_REDIR_MASKED, 0, NULL);
+ io_apic_modify_irq(cfg, ~IO_APIC_REDIR_MASKED, 0, NULL);
}
#ifdef CONFIG_X86_64
@@ -492,47 +659,64 @@ void io_apic_sync(struct irq_pin_list *entry)
readl(&io_apic->data);
}
-static void __mask_IO_APIC_irq(unsigned int irq)
+static void __mask_IO_APIC_irq(struct irq_cfg *cfg)
{
- io_apic_modify_irq(irq, ~0, IO_APIC_REDIR_MASKED, &io_apic_sync);
+ io_apic_modify_irq(cfg, ~0, IO_APIC_REDIR_MASKED, &io_apic_sync);
}
#else /* CONFIG_X86_32 */
-static void __mask_IO_APIC_irq(unsigned int irq)
+static void __mask_IO_APIC_irq(struct irq_cfg *cfg)
{
- io_apic_modify_irq(irq, ~0, IO_APIC_REDIR_MASKED, NULL);
+ io_apic_modify_irq(cfg, ~0, IO_APIC_REDIR_MASKED, NULL);
}
-static void __mask_and_edge_IO_APIC_irq(unsigned int irq)
+static void __mask_and_edge_IO_APIC_irq(struct irq_cfg *cfg)
{
- io_apic_modify_irq(irq, ~IO_APIC_REDIR_LEVEL_TRIGGER,
+ io_apic_modify_irq(cfg, ~IO_APIC_REDIR_LEVEL_TRIGGER,
IO_APIC_REDIR_MASKED, NULL);
}
-static void __unmask_and_level_IO_APIC_irq(unsigned int irq)
+static void __unmask_and_level_IO_APIC_irq(struct irq_cfg *cfg)
{
- io_apic_modify_irq(irq, ~IO_APIC_REDIR_MASKED,
+ io_apic_modify_irq(cfg, ~IO_APIC_REDIR_MASKED,
IO_APIC_REDIR_LEVEL_TRIGGER, NULL);
}
#endif /* CONFIG_X86_32 */
-static void mask_IO_APIC_irq (unsigned int irq)
+static void mask_IO_APIC_irq_desc(struct irq_desc *desc)
{
+ struct irq_cfg *cfg = desc->chip_data;
unsigned long flags;
+ BUG_ON(!cfg);
+
spin_lock_irqsave(&ioapic_lock, flags);
- __mask_IO_APIC_irq(irq);
+ __mask_IO_APIC_irq(cfg);
spin_unlock_irqrestore(&ioapic_lock, flags);
}
-static void unmask_IO_APIC_irq (unsigned int irq)
+static void unmask_IO_APIC_irq_desc(struct irq_desc *desc)
{
+ struct irq_cfg *cfg = desc->chip_data;
unsigned long flags;
spin_lock_irqsave(&ioapic_lock, flags);
- __unmask_IO_APIC_irq(irq);
+ __unmask_IO_APIC_irq(cfg);
spin_unlock_irqrestore(&ioapic_lock, flags);
}
+static void mask_IO_APIC_irq(unsigned int irq)
+{
+ struct irq_desc *desc = irq_to_desc(irq);
+
+ mask_IO_APIC_irq_desc(desc);
+}
+static void unmask_IO_APIC_irq(unsigned int irq)
+{
+ struct irq_desc *desc = irq_to_desc(irq);
+
+ unmask_IO_APIC_irq_desc(desc);
+}
+
static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
{
struct IO_APIC_route_entry entry;
@@ -809,7 +993,7 @@ EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
*/
static int EISA_ELCR(unsigned int irq)
{
- if (irq < 16) {
+ if (irq < NR_IRQS_LEGACY) {
unsigned int port = 0x4d0 + (irq >> 3);
return (inb(port) >> (irq & 7)) & 1;
}
@@ -1034,7 +1218,7 @@ void unlock_vector_lock(void)
spin_unlock(&vector_lock);
}
-static int __assign_irq_vector(int irq, cpumask_t mask)
+static int __assign_irq_vector(int irq, struct irq_cfg *cfg, cpumask_t mask)
{
/*
* NOTE! The local APIC isn't very good at handling
@@ -1050,16 +1234,13 @@ static int __assign_irq_vector(int irq, cpumask_t mask)
static int current_vector = FIRST_DEVICE_VECTOR, current_offset = 0;
unsigned int old_vector;
int cpu;
- struct irq_cfg *cfg;
- cfg = irq_cfg(irq);
+ if ((cfg->move_in_progress) || cfg->move_cleanup_count)
+ return -EBUSY;
/* Only try and allocate irqs on cpus that are present */
cpus_and(mask, mask, cpu_online_map);
- if ((cfg->move_in_progress) || cfg->move_cleanup_count)
- return -EBUSY;
-
old_vector = cfg->vector;
if (old_vector) {
cpumask_t tmp;
@@ -1113,24 +1294,22 @@ next:
return -ENOSPC;
}
-static int assign_irq_vector(int irq, cpumask_t mask)
+static int assign_irq_vector(int irq, struct irq_cfg *cfg, cpumask_t mask)
{
int err;
unsigned long flags;
spin_lock_irqsave(&vector_lock, flags);
- err = __assign_irq_vector(irq, mask);
+ err = __assign_irq_vector(irq, cfg, mask);
spin_unlock_irqrestore(&vector_lock, flags);
return err;
}
-static void __clear_irq_vector(int irq)
+static void __clear_irq_vector(int irq, struct irq_cfg *cfg)
{
- struct irq_cfg *cfg;
cpumask_t mask;
int cpu, vector;
- cfg = irq_cfg(irq);
BUG_ON(!cfg->vector);
vector = cfg->vector;
@@ -1162,9 +1341,13 @@ void __setup_vector_irq(int cpu)
/* This function must be called with vector_lock held */
int irq, vector;
struct irq_cfg *cfg;
+ struct irq_desc *desc;
/* Mark the inuse vectors */
- for_each_irq_cfg(irq, cfg) {
+ for_each_irq_desc(irq, desc) {
+ if (!desc)
+ continue;
+ cfg = desc->chip_data;
if (!cpu_isset(cpu, cfg->domain))
continue;
vector = cfg->vector;
@@ -1215,11 +1398,8 @@ static inline int IO_APIC_irq_trigger(int irq)
}
#endif
-static void ioapic_register_intr(int irq, unsigned long trigger)
+static void ioapic_register_intr(int irq, struct irq_desc *desc, unsigned long trigger)
{
- struct irq_desc *desc;
-
- desc = irq_to_desc(irq);
if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
trigger == IOAPIC_LEVEL)
@@ -1311,7 +1491,7 @@ static int setup_ioapic_entry(int apic, int irq,
return 0;
}
-static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq,
+static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq, struct irq_desc *desc,
int trigger, int polarity)
{
struct irq_cfg *cfg;
@@ -1321,10 +1501,10 @@ static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq,
if (!IO_APIC_IRQ(irq))
return;
- cfg = irq_cfg(irq);
+ cfg = desc->chip_data;
mask = TARGET_CPUS;
- if (assign_irq_vector(irq, mask))
+ if (assign_irq_vector(irq, cfg, mask))
return;
cpus_and(mask, cfg->domain, mask);
@@ -1341,12 +1521,12 @@ static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq,
cfg->vector)) {
printk("Failed to setup ioapic entry for ioapic %d, pin %d\n",
mp_ioapics[apic].mp_apicid, pin);
- __clear_irq_vector(irq);
+ __clear_irq_vector(irq, cfg);
return;
}
- ioapic_register_intr(irq, trigger);
- if (irq < 16)
+ ioapic_register_intr(irq, desc, trigger);
+ if (irq < NR_IRQS_LEGACY)
disable_8259A_irq(irq);
ioapic_write_entry(apic, pin, entry);
@@ -1356,6 +1536,9 @@ static void __init setup_IO_APIC_irqs(void)
{
int apic, pin, idx, irq;
int notcon = 0;
+ struct irq_desc *desc;
+ struct irq_cfg *cfg;
+ int cpu = boot_cpu_id;
apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
@@ -1387,9 +1570,15 @@ static void __init setup_IO_APIC_irqs(void)
if (multi_timer_check(apic, irq))
continue;
#endif
- add_pin_to_irq(irq, apic, pin);
+ desc = irq_to_desc_alloc_cpu(irq, cpu);
+ if (!desc) {
+ printk(KERN_INFO "can not get irq_desc for %d\n", irq);
+ continue;
+ }
+ cfg = desc->chip_data;
+ add_pin_to_irq_cpu(cfg, cpu, apic, pin);
- setup_IO_APIC_irq(apic, pin, irq,
+ setup_IO_APIC_irq(apic, pin, irq, desc,
irq_trigger(idx), irq_polarity(idx));
}
}
@@ -1448,6 +1637,7 @@ __apicdebuginit(void) print_IO_APIC(void)
union IO_APIC_reg_03 reg_03;
unsigned long flags;
struct irq_cfg *cfg;
+ struct irq_desc *desc;
unsigned int irq;
if (apic_verbosity == APIC_QUIET)
@@ -1537,8 +1727,13 @@ __apicdebuginit(void) print_IO_APIC(void)
}
}
printk(KERN_DEBUG "IRQ to pin mappings:\n");
- for_each_irq_cfg(irq, cfg) {
- struct irq_pin_list *entry = cfg->irq_2_pin;
+ for_each_irq_desc(irq, desc) {
+ struct irq_pin_list *entry;
+
+ if (!desc)
+ continue;
+ cfg = desc->chip_data;
+ entry = cfg->irq_2_pin;
if (!entry)
continue;
printk(KERN_DEBUG "IRQ%d ", irq);
@@ -2022,14 +2217,16 @@ static unsigned int startup_ioapic_irq(unsigned int irq)
{
int was_pending = 0;
unsigned long flags;
+ struct irq_cfg *cfg;
spin_lock_irqsave(&ioapic_lock, flags);
- if (irq < 16) {
+ if (irq < NR_IRQS_LEGACY) {
disable_8259A_irq(irq);
if (i8259A_irq_pending(irq))
was_pending = 1;
}
- __unmask_IO_APIC_irq(irq);
+ cfg = irq_cfg(irq);
+ __unmask_IO_APIC_irq(cfg);
spin_unlock_irqrestore(&ioapic_lock, flags);
return was_pending;
@@ -2092,35 +2289,37 @@ static DECLARE_DELAYED_WORK(ir_migration_work, ir_irq_migration);
* as simple as edge triggered migration and we can do the irq migration
* with a simple atomic update to IO-APIC RTE.
*/
-static void migrate_ioapic_irq(int irq, cpumask_t mask)
+static void migrate_ioapic_irq_desc(struct irq_desc *desc, cpumask_t mask)
{
struct irq_cfg *cfg;
- struct irq_desc *desc;
cpumask_t tmp, cleanup_mask;
struct irte irte;
int modify_ioapic_rte;
unsigned int dest;
unsigned long flags;
+ unsigned int irq;
cpus_and(tmp, mask, cpu_online_map);
if (cpus_empty(tmp))
return;
+ irq = desc->irq;
if (get_irte(irq, &irte))
return;
- if (assign_irq_vector(irq, mask))
+ cfg = desc->chip_data;
+ if (assign_irq_vector(irq, cfg, mask))
return;
- cfg = irq_cfg(irq);
+ set_extra_move_desc(desc, mask);
+
cpus_and(tmp, cfg->domain, mask);
dest = cpu_mask_to_apicid(tmp);
- desc = irq_to_desc(irq);
modify_ioapic_rte = desc->status & IRQ_LEVEL;
if (modify_ioapic_rte) {
spin_lock_irqsave(&ioapic_lock, flags);
- __target_IO_APIC_irq(irq, dest, cfg->vector);
+ __target_IO_APIC_irq(irq, dest, cfg);
spin_unlock_irqrestore(&ioapic_lock, flags);
}
@@ -2142,14 +2341,14 @@ static void migrate_ioapic_irq(int irq, cpumask_t mask)
desc->affinity = mask;
}
-static int migrate_irq_remapped_level(int irq)
+static int migrate_irq_remapped_level_desc(struct irq_desc *desc)
{
int ret = -1;
- struct irq_desc *desc = irq_to_desc(irq);
+ struct irq_cfg *cfg = desc->chip_data;
- mask_IO_APIC_irq(irq);
+ mask_IO_APIC_irq_desc(desc);
- if (io_apic_level_ack_pending(irq)) {
+ if (io_apic_level_ack_pending(cfg)) {
/*
* Interrupt in progress. Migrating irq now will change the
* vector information in the IO-APIC RTE and that will confuse
@@ -2161,14 +2360,15 @@ static int migrate_irq_remapped_level(int irq)
}
/* everthing is clear. we have right of way */
- migrate_ioapic_irq(irq, desc->pending_mask);
+ migrate_ioapic_irq_desc(desc, desc->pending_mask);
ret = 0;
desc->status &= ~IRQ_MOVE_PENDING;
cpus_clear(desc->pending_mask);
unmask:
- unmask_IO_APIC_irq(irq);
+ unmask_IO_APIC_irq_desc(desc);
+
return ret;
}
@@ -2178,6 +2378,9 @@ static void ir_irq_migration(struct work_struct *work)
struct irq_desc *desc;
for_each_irq_desc(irq, desc) {
+ if (!desc)
+ continue;
+
if (desc->status & IRQ_MOVE_PENDING) {
unsigned long flags;
@@ -2198,18 +2401,22 @@ static void ir_irq_migration(struct work_struct *work)
/*
* Migrates the IRQ destination in the process context.
*/
-static void set_ir_ioapic_affinity_irq(unsigned int irq, cpumask_t mask)
+static void set_ir_ioapic_affinity_irq_desc(struct irq_desc *desc, cpumask_t mask)
{
- struct irq_desc *desc = irq_to_desc(irq);
-
if (desc->status & IRQ_LEVEL) {
desc->status |= IRQ_MOVE_PENDING;
desc->pending_mask = mask;
- migrate_irq_remapped_level(irq);
+ migrate_irq_remapped_level_desc(desc);
return;
}
- migrate_ioapic_irq(irq, mask);
+ migrate_ioapic_irq_desc(desc, mask);
+}
+static void set_ir_ioapic_affinity_irq(unsigned int irq, cpumask_t mask)
+{
+ struct irq_desc *desc = irq_to_desc(irq);
+
+ set_ir_ioapic_affinity_irq_desc(desc, mask);
}
#endif
@@ -2228,6 +2435,9 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void)
struct irq_cfg *cfg;
irq = __get_cpu_var(vector_irq)[vector];
+ if (irq == -1)
+ continue;
+
desc = irq_to_desc(irq);
if (!desc)
continue;
@@ -2249,19 +2459,40 @@ unlock:
irq_exit();
}
-static void irq_complete_move(unsigned int irq)
+static void irq_complete_move(struct irq_desc **descp)
{
- struct irq_cfg *cfg = irq_cfg(irq);
+ struct irq_desc *desc = *descp;
+ struct irq_cfg *cfg = desc->chip_data;
unsigned vector, me;
- if (likely(!cfg->move_in_progress))
+ if (likely(!cfg->move_in_progress)) {
+#ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC
+ if (likely(!cfg->move_desc_pending))
+ return;
+
+ /* domain has not changed, but affinity did */
+ me = smp_processor_id();
+ if (cpu_isset(me, desc->affinity)) {
+ *descp = desc = move_irq_desc(desc, me);
+ /* get the new one */
+ cfg = desc->chip_data;
+ cfg->move_desc_pending = 0;
+ }
+#endif
return;
+ }
vector = ~get_irq_regs()->orig_ax;
me = smp_processor_id();
if ((vector == cfg->vector) && cpu_isset(me, cfg->domain)) {
cpumask_t cleanup_mask;
+#ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC
+ *descp = desc = move_irq_desc(desc, me);
+ /* get the new one */
+ cfg = desc->chip_data;
+#endif
+
cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map);
cfg->move_cleanup_count = cpus_weight(cleanup_mask);
send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
@@ -2269,8 +2500,9 @@ static void irq_complete_move(unsigned int irq)
}
}
#else
-static inline void irq_complete_move(unsigned int irq) {}
+static inline void irq_complete_move(struct irq_desc **descp) {}
#endif
+
#ifdef CONFIG_INTR_REMAP
static void ack_x2apic_level(unsigned int irq)
{
@@ -2281,11 +2513,14 @@ static void ack_x2apic_edge(unsigned int irq)
{
ack_x2APIC_irq();
}
+
#endif
static void ack_apic_edge(unsigned int irq)
{
- irq_complete_move(irq);
+ struct irq_desc *desc = irq_to_desc(irq);
+
+ irq_complete_move(&desc);
move_native_irq(irq);
ack_APIC_irq();
}
@@ -2294,18 +2529,21 @@ atomic_t irq_mis_count;
static void ack_apic_level(unsigned int irq)
{
+ struct irq_desc *desc = irq_to_desc(irq);
+
#ifdef CONFIG_X86_32
unsigned long v;
int i;
#endif
+ struct irq_cfg *cfg;
int do_unmask_irq = 0;
- irq_complete_move(irq);
+ irq_complete_move(&desc);
#ifdef CONFIG_GENERIC_PENDING_IRQ
/* If we are moving the irq we need to mask it */
- if (unlikely(irq_to_desc(irq)->status & IRQ_MOVE_PENDING)) {
+ if (unlikely(desc->status & IRQ_MOVE_PENDING)) {
do_unmask_irq = 1;
- mask_IO_APIC_irq(irq);
+ mask_IO_APIC_irq_desc(desc);
}
#endif
@@ -2329,7 +2567,8 @@ static void ack_apic_level(unsigned int irq)
* operation to prevent an edge-triggered interrupt escaping meanwhile.
* The idea is from Manfred Spraul. --macro
*/
- i = irq_cfg(irq)->vector;
+ cfg = desc->chip_data;
+ i = cfg->vector;
v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1));
#endif
@@ -2368,17 +2607,18 @@ static void ack_apic_level(unsigned int irq)
* accurate and is causing problems then it is a hardware bug
* and you can go talk to the chipset vendor about it.
*/
- if (!io_apic_level_ack_pending(irq))
+ cfg = desc->chip_data;
+ if (!io_apic_level_ack_pending(cfg))
move_masked_irq(irq);
- unmask_IO_APIC_irq(irq);
+ unmask_IO_APIC_irq_desc(desc);
}
#ifdef CONFIG_X86_32
if (!(v & (1 << (i & 0x1f)))) {
atomic_inc(&irq_mis_count);
spin_lock(&ioapic_lock);
- __mask_and_edge_IO_APIC_irq(irq);
- __unmask_and_level_IO_APIC_irq(irq);
+ __mask_and_edge_IO_APIC_irq(cfg);
+ __unmask_and_level_IO_APIC_irq(cfg);
spin_unlock(&ioapic_lock);
}
#endif
@@ -2429,20 +2669,22 @@ static inline void init_IO_APIC_traps(void)
* Also, we've got to be careful not to trash gate
* 0x80, because int 0x80 is hm, kind of importantish. ;)
*/
- for_each_irq_cfg(irq, cfg) {
- if (IO_APIC_IRQ(irq) && !cfg->vector) {
+ for_each_irq_desc(irq, desc) {
+ if (!desc)
+ continue;
+
+ cfg = desc->chip_data;
+ if (IO_APIC_IRQ(irq) && cfg && !cfg->vector) {
/*
* Hmm.. We don't have an entry for this,
* so default to an old-fashioned 8259
* interrupt if we can..
*/
- if (irq < 16)
+ if (irq < NR_IRQS_LEGACY)
make_8259A_irq(irq);
- else {
- desc = irq_to_desc(irq);
+ else
/* Strange. Oh, well.. */
desc->chip = &no_irq_chip;
- }
}
}
}
@@ -2467,7 +2709,7 @@ static void unmask_lapic_irq(unsigned int irq)
apic_write(APIC_LVT0, v & ~APIC_LVT_MASKED);
}
-static void ack_lapic_irq (unsigned int irq)
+static void ack_lapic_irq(unsigned int irq)
{
ack_APIC_irq();
}
@@ -2479,11 +2721,8 @@ static struct irq_chip lapic_chip __read_mostly = {
.ack = ack_lapic_irq,
};
-static void lapic_register_intr(int irq)
+static void lapic_register_intr(int irq, struct irq_desc *desc)
{
- struct irq_desc *desc;
-
- desc = irq_to_desc(irq);
desc->status &= ~IRQ_LEVEL;
set_irq_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq,
"edge");
@@ -2587,7 +2826,9 @@ int timer_through_8259 __initdata;
*/
static inline void __init check_timer(void)
{
- struct irq_cfg *cfg = irq_cfg(0);
+ struct irq_desc *desc = irq_to_desc(0);
+ struct irq_cfg *cfg = desc->chip_data;
+ int cpu = boot_cpu_id;
int apic1, pin1, apic2, pin2;
unsigned long flags;
unsigned int ver;
@@ -2602,7 +2843,7 @@ static inline void __init check_timer(void)
* get/set the timer IRQ vector:
*/
disable_8259A_irq(0);
- assign_irq_vector(0, TARGET_CPUS);
+ assign_irq_vector(0, cfg, TARGET_CPUS);
/*
* As IRQ0 is to be enabled in the 8259A, the virtual
@@ -2653,10 +2894,10 @@ static inline void __init check_timer(void)
* Ok, does IRQ0 through the IOAPIC work?
*/
if (no_pin1) {
- add_pin_to_irq(0, apic1, pin1);
+ add_pin_to_irq_cpu(cfg, cpu, apic1, pin1);
setup_timer_IRQ0_pin(apic1, pin1, cfg->vector);
}
- unmask_IO_APIC_irq(0);
+ unmask_IO_APIC_irq_desc(desc);
if (timer_irq_works()) {
if (nmi_watchdog == NMI_IO_APIC) {
setup_nmi();
@@ -2682,9 +2923,9 @@ static inline void __init check_timer(void)
/*
* legacy devices should be connected to IO APIC #0
*/
- replace_pin_at_irq(0, apic1, pin1, apic2, pin2);
+ replace_pin_at_irq_cpu(cfg, cpu, apic1, pin1, apic2, pin2);
setup_timer_IRQ0_pin(apic2, pin2, cfg->vector);
- unmask_IO_APIC_irq(0);
+ unmask_IO_APIC_irq_desc(desc);
enable_8259A_irq(0);
if (timer_irq_works()) {
apic_printk(APIC_QUIET, KERN_INFO "....... works.\n");
@@ -2716,7 +2957,7 @@ static inline void __init check_timer(void)
apic_printk(APIC_QUIET, KERN_INFO
"...trying to set up timer as Virtual Wire IRQ...\n");
- lapic_register_intr(0);
+ lapic_register_intr(0, desc);
apic_write(APIC_LVT0, APIC_DM_FIXED | cfg->vector); /* Fixed mode */
enable_8259A_irq(0);
@@ -2901,22 +3142,26 @@ unsigned int create_irq_nr(unsigned int irq_want)
unsigned int irq;
unsigned int new;
unsigned long flags;
- struct irq_cfg *cfg_new;
-
- irq_want = nr_irqs - 1;
+ struct irq_cfg *cfg_new = NULL;
+ int cpu = boot_cpu_id;
+ struct irq_desc *desc_new = NULL;
irq = 0;
spin_lock_irqsave(&vector_lock, flags);
- for (new = irq_want; new > 0; new--) {
+ for (new = irq_want; new < NR_IRQS; new++) {
if (platform_legacy_irq(new))
continue;
- cfg_new = irq_cfg(new);
- if (cfg_new && cfg_new->vector != 0)
+
+ desc_new = irq_to_desc_alloc_cpu(new, cpu);
+ if (!desc_new) {
+ printk(KERN_INFO "can not get irq_desc for %d\n", new);
continue;
- /* check if need to create one */
- if (!cfg_new)
- cfg_new = irq_cfg_alloc(new);
- if (__assign_irq_vector(new, TARGET_CPUS) == 0)
+ }
+ cfg_new = desc_new->chip_data;
+
+ if (cfg_new->vector != 0)
+ continue;
+ if (__assign_irq_vector(new, cfg_new, TARGET_CPUS) == 0)
irq = new;
break;
}
@@ -2924,15 +3169,21 @@ unsigned int create_irq_nr(unsigned int irq_want)
if (irq > 0) {
dynamic_irq_init(irq);
+ /* restore it, in case dynamic_irq_init clear it */
+ if (desc_new)
+ desc_new->chip_data = cfg_new;
}
return irq;
}
+static int nr_irqs_gsi = NR_IRQS_LEGACY;
int create_irq(void)
{
+ unsigned int irq_want;
int irq;
- irq = create_irq_nr(nr_irqs - 1);
+ irq_want = nr_irqs_gsi;
+ irq = create_irq_nr(irq_want);
if (irq == 0)
irq = -1;
@@ -2943,14 +3194,22 @@ int create_irq(void)
void destroy_irq(unsigned int irq)
{
unsigned long flags;
+ struct irq_cfg *cfg;
+ struct irq_desc *desc;
+ /* store it, in case dynamic_irq_cleanup clear it */
+ desc = irq_to_desc(irq);
+ cfg = desc->chip_data;
dynamic_irq_cleanup(irq);
+ /* connect back irq_cfg */
+ if (desc)
+ desc->chip_data = cfg;
#ifdef CONFIG_INTR_REMAP
free_irte(irq);
#endif
spin_lock_irqsave(&vector_lock, flags);
- __clear_irq_vector(irq);
+ __clear_irq_vector(irq, cfg);
spin_unlock_irqrestore(&vector_lock, flags);
}
@@ -2965,12 +3224,12 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_ms
unsigned dest;
cpumask_t tmp;
+ cfg = irq_cfg(irq);
tmp = TARGET_CPUS;
- err = assign_irq_vector(irq, tmp);
+ err = assign_irq_vector(irq, cfg, tmp);
if (err)
return err;
- cfg = irq_cfg(irq);
cpus_and(tmp, cfg->domain, tmp);
dest = cpu_mask_to_apicid(tmp);
@@ -3028,35 +3287,35 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_ms
#ifdef CONFIG_SMP
static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
{
+ struct irq_desc *desc = irq_to_desc(irq);
struct irq_cfg *cfg;
struct msi_msg msg;
unsigned int dest;
cpumask_t tmp;
- struct irq_desc *desc;
cpus_and(tmp, mask, cpu_online_map);
if (cpus_empty(tmp))
return;
- if (assign_irq_vector(irq, mask))
+ cfg = desc->chip_data;
+ if (assign_irq_vector(irq, cfg, mask))
return;
- cfg = irq_cfg(irq);
+ set_extra_move_desc(desc, mask);
+
cpus_and(tmp, cfg->domain, mask);
dest = cpu_mask_to_apicid(tmp);
- read_msi_msg(irq, &msg);
+ read_msi_msg_desc(desc, &msg);
msg.data &= ~MSI_DATA_VECTOR_MASK;
msg.data |= MSI_DATA_VECTOR(cfg->vector);
msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
msg.address_lo |= MSI_ADDR_DEST_ID(dest);
- write_msi_msg(irq, &msg);
- desc = irq_to_desc(irq);
+ write_msi_msg_desc(desc, &msg);
desc->affinity = mask;
}
-
#ifdef CONFIG_INTR_REMAP
/*
* Migrate the MSI irq to another cpumask. This migration is
@@ -3064,11 +3323,11 @@ static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
*/
static void ir_set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
{
+ struct irq_desc *desc = irq_to_desc(irq);
struct irq_cfg *cfg;
unsigned int dest;
cpumask_t tmp, cleanup_mask;
struct irte irte;
- struct irq_desc *desc;
cpus_and(tmp, mask, cpu_online_map);
if (cpus_empty(tmp))
@@ -3077,10 +3336,12 @@ static void ir_set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
if (get_irte(irq, &irte))
return;
- if (assign_irq_vector(irq, mask))
+ cfg = desc->chip_data;
+ if (assign_irq_vector(irq, cfg, mask))
return;
- cfg = irq_cfg(irq);
+ set_extra_move_desc(desc, mask);
+
cpus_and(tmp, cfg->domain, mask);
dest = cpu_mask_to_apicid(tmp);
@@ -3104,9 +3365,9 @@ static void ir_set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
cfg->move_in_progress = 0;
}
- desc = irq_to_desc(irq);
desc->affinity = mask;
}
+
#endif
#endif /* CONFIG_SMP */
@@ -3165,7 +3426,7 @@ static int msi_alloc_irte(struct pci_dev *dev, int irq, int nvec)
}
#endif
-static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc, int irq)
+static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq)
{
int ret;
struct msi_msg msg;
@@ -3174,7 +3435,7 @@ static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc, int irq)
if (ret < 0)
return ret;
- set_irq_msi(irq, desc);
+ set_irq_msi(irq, msidesc);
write_msi_msg(irq, &msg);
#ifdef CONFIG_INTR_REMAP
@@ -3194,26 +3455,13 @@ static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc, int irq)
return 0;
}
-static unsigned int build_irq_for_pci_dev(struct pci_dev *dev)
-{
- unsigned int irq;
-
- irq = dev->bus->number;
- irq <<= 8;
- irq |= dev->devfn;
- irq <<= 12;
-
- return irq;
-}
-
-int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc)
+int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc)
{
unsigned int irq;
int ret;
unsigned int irq_want;
- irq_want = build_irq_for_pci_dev(dev) + 0x100;
-
+ irq_want = nr_irqs_gsi;
irq = create_irq_nr(irq_want);
if (irq == 0)
return -1;
@@ -3227,7 +3475,7 @@ int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc)
goto error;
no_ir:
#endif
- ret = setup_msi_irq(dev, desc, irq);
+ ret = setup_msi_irq(dev, msidesc, irq);
if (ret < 0) {
destroy_irq(irq);
return ret;
@@ -3245,7 +3493,7 @@ int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
{
unsigned int irq;
int ret, sub_handle;
- struct msi_desc *desc;
+ struct msi_desc *msidesc;
unsigned int irq_want;
#ifdef CONFIG_INTR_REMAP
@@ -3253,10 +3501,11 @@ int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
int index = 0;
#endif
- irq_want = build_irq_for_pci_dev(dev) + 0x100;
+ irq_want = nr_irqs_gsi;
sub_handle = 0;
- list_for_each_entry(desc, &dev->msi_list, list) {
- irq = create_irq_nr(irq_want--);
+ list_for_each_entry(msidesc, &dev->msi_list, list) {
+ irq = create_irq_nr(irq_want);
+ irq_want++;
if (irq == 0)
return -1;
#ifdef CONFIG_INTR_REMAP
@@ -3288,7 +3537,7 @@ int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
}
no_ir:
#endif
- ret = setup_msi_irq(dev, desc, irq);
+ ret = setup_msi_irq(dev, msidesc, irq);
if (ret < 0)
goto error;
sub_handle++;
@@ -3309,20 +3558,22 @@ void arch_teardown_msi_irq(unsigned int irq)
#ifdef CONFIG_SMP
static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask)
{
+ struct irq_desc *desc = irq_to_desc(irq);
struct irq_cfg *cfg;
struct msi_msg msg;
unsigned int dest;
cpumask_t tmp;
- struct irq_desc *desc;
cpus_and(tmp, mask, cpu_online_map);
if (cpus_empty(tmp))
return;
- if (assign_irq_vector(irq, mask))
+ cfg = desc->chip_data;
+ if (assign_irq_vector(irq, cfg, mask))
return;
- cfg = irq_cfg(irq);
+ set_extra_move_desc(desc, mask);
+
cpus_and(tmp, cfg->domain, mask);
dest = cpu_mask_to_apicid(tmp);
@@ -3334,9 +3585,9 @@ static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask)
msg.address_lo |= MSI_ADDR_DEST_ID(dest);
dmar_msi_write(irq, &msg);
- desc = irq_to_desc(irq);
desc->affinity = mask;
}
+
#endif /* CONFIG_SMP */
struct irq_chip dmar_msi_type = {
@@ -3370,8 +3621,8 @@ int arch_setup_dmar_msi(unsigned int irq)
#ifdef CONFIG_SMP
static void hpet_msi_set_affinity(unsigned int irq, cpumask_t mask)
{
+ struct irq_desc *desc = irq_to_desc(irq);
struct irq_cfg *cfg;
- struct irq_desc *desc;
struct msi_msg msg;
unsigned int dest;
cpumask_t tmp;
@@ -3380,10 +3631,12 @@ static void hpet_msi_set_affinity(unsigned int irq, cpumask_t mask)
if (cpus_empty(tmp))
return;
- if (assign_irq_vector(irq, mask))
+ cfg = desc->chip_data;
+ if (assign_irq_vector(irq, cfg, mask))
return;
- cfg = irq_cfg(irq);
+ set_extra_move_desc(desc, mask);
+
cpus_and(tmp, cfg->domain, mask);
dest = cpu_mask_to_apicid(tmp);
@@ -3395,9 +3648,9 @@ static void hpet_msi_set_affinity(unsigned int irq, cpumask_t mask)
msg.address_lo |= MSI_ADDR_DEST_ID(dest);
hpet_msi_write(irq, &msg);
- desc = irq_to_desc(irq);
desc->affinity = mask;
}
+
#endif /* CONFIG_SMP */
struct irq_chip hpet_msi_type = {
@@ -3452,26 +3705,28 @@ static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector)
static void set_ht_irq_affinity(unsigned int irq, cpumask_t mask)
{
+ struct irq_desc *desc = irq_to_desc(irq);
struct irq_cfg *cfg;
unsigned int dest;
cpumask_t tmp;
- struct irq_desc *desc;
cpus_and(tmp, mask, cpu_online_map);
if (cpus_empty(tmp))
return;
- if (assign_irq_vector(irq, mask))
+ cfg = desc->chip_data;
+ if (assign_irq_vector(irq, cfg, mask))
return;
- cfg = irq_cfg(irq);
+ set_extra_move_desc(desc, mask);
+
cpus_and(tmp, cfg->domain, mask);
dest = cpu_mask_to_apicid(tmp);
target_ht_irq(irq, dest, cfg->vector);
- desc = irq_to_desc(irq);
desc->affinity = mask;
}
+
#endif
static struct irq_chip ht_irq_chip = {
@@ -3491,13 +3746,13 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
int err;
cpumask_t tmp;
+ cfg = irq_cfg(irq);
tmp = TARGET_CPUS;
- err = assign_irq_vector(irq, tmp);
+ err = assign_irq_vector(irq, cfg, tmp);
if (!err) {
struct ht_irq_msg msg;
unsigned dest;
- cfg = irq_cfg(irq);
cpus_and(tmp, cfg->domain, tmp);
dest = cpu_mask_to_apicid(tmp);
@@ -3543,7 +3798,9 @@ int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
unsigned long flags;
int err;
- err = assign_irq_vector(irq, *eligible_cpu);
+ cfg = irq_cfg(irq);
+
+ err = assign_irq_vector(irq, cfg, *eligible_cpu);
if (err != 0)
return err;
@@ -3552,8 +3809,6 @@ int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
irq_name);
spin_unlock_irqrestore(&vector_lock, flags);
- cfg = irq_cfg(irq);
-
mmr_value = 0;
entry = (struct uv_IO_APIC_route_entry *)&mmr_value;
BUG_ON(sizeof(struct uv_IO_APIC_route_entry) != sizeof(unsigned long));
@@ -3605,9 +3860,16 @@ int __init io_apic_get_redir_entries (int ioapic)
return reg_01.bits.entries;
}
-int __init probe_nr_irqs(void)
+void __init probe_nr_irqs_gsi(void)
{
- return NR_IRQS;
+ int idx;
+ int nr = 0;
+
+ for (idx = 0; idx < nr_ioapics; idx++)
+ nr += io_apic_get_redir_entries(idx) + 1;
+
+ if (nr > nr_irqs_gsi)
+ nr_irqs_gsi = nr;
}
/* --------------------------------------------------------------------------
@@ -3706,19 +3968,31 @@ int __init io_apic_get_version(int ioapic)
int io_apic_set_pci_routing (int ioapic, int pin, int irq, int triggering, int polarity)
{
+ struct irq_desc *desc;
+ struct irq_cfg *cfg;
+ int cpu = boot_cpu_id;
+
if (!IO_APIC_IRQ(irq)) {
apic_printk(APIC_QUIET,KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n",
ioapic);
return -EINVAL;
}
+ desc = irq_to_desc_alloc_cpu(irq, cpu);
+ if (!desc) {
+ printk(KERN_INFO "can not get irq_desc %d\n", irq);
+ return 0;
+ }
+
/*
* IRQs < 16 are already in the irq_2_pin[] map
*/
- if (irq >= 16)
- add_pin_to_irq(irq, ioapic, pin);
+ if (irq >= NR_IRQS_LEGACY) {
+ cfg = desc->chip_data;
+ add_pin_to_irq_cpu(cfg, cpu, ioapic, pin);
+ }
- setup_IO_APIC_irq(ioapic, pin, irq, triggering, polarity);
+ setup_IO_APIC_irq(ioapic, pin, irq, desc, triggering, polarity);
return 0;
}
@@ -3772,9 +4046,10 @@ void __init setup_ioapic_dest(void)
* when you have too many devices, because at that time only boot
* cpu is online.
*/
- cfg = irq_cfg(irq);
+ desc = irq_to_desc(irq);
+ cfg = desc->chip_data;
if (!cfg->vector) {
- setup_IO_APIC_irq(ioapic, pin, irq,
+ setup_IO_APIC_irq(ioapic, pin, irq, desc,
irq_trigger(irq_entry),
irq_polarity(irq_entry));
continue;
@@ -3784,7 +4059,6 @@ void __init setup_ioapic_dest(void)
/*
* Honour affinities which have been set in early boot
*/
- desc = irq_to_desc(irq);
if (desc->status &
(IRQ_NO_BALANCING | IRQ_AFFINITY_SET))
mask = desc->affinity;
@@ -3793,10 +4067,10 @@ void __init setup_ioapic_dest(void)
#ifdef CONFIG_INTR_REMAP
if (intr_remapping_enabled)
- set_ir_ioapic_affinity_irq(irq, mask);
+ set_ir_ioapic_affinity_irq_desc(desc, mask);
else
#endif
- set_ioapic_affinity_irq(irq, mask);
+ set_ioapic_affinity_irq_desc(desc, mask);
}
}
@@ -3845,7 +4119,6 @@ void __init ioapic_init_mappings(void)
struct resource *ioapic_res;
int i;
- irq_2_pin_init();
ioapic_res = ioapic_setup_resources();
for (i = 0; i < nr_ioapics; i++) {
if (smp_found_config) {
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index d1d4dc52f649..3f1d9d18df67 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -118,6 +118,9 @@ int show_interrupts(struct seq_file *p, void *v)
}
desc = irq_to_desc(i);
+ if (!desc)
+ return 0;
+
spin_lock_irqsave(&desc->lock, flags);
#ifndef CONFIG_SMP
any_count = kstat_irqs(i);
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
index a51382672de0..119fc9c8ff7f 100644
--- a/arch/x86/kernel/irq_32.c
+++ b/arch/x86/kernel/irq_32.c
@@ -242,6 +242,8 @@ void fixup_irqs(cpumask_t map)
for_each_irq_desc(irq, desc) {
cpumask_t mask;
+ if (!desc)
+ continue;
if (irq == 2)
continue;
diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
index 1df869e5bd0b..a174a217eb1a 100644
--- a/arch/x86/kernel/irq_64.c
+++ b/arch/x86/kernel/irq_64.c
@@ -91,6 +91,8 @@ void fixup_irqs(cpumask_t map)
int break_affinity = 0;
int set_affinity = 1;
+ if (!desc)
+ continue;
if (irq == 2)
continue;
diff --git a/arch/x86/kernel/irqinit_32.c b/arch/x86/kernel/irqinit_32.c
index 607db63044a5..203384ed2b5d 100644
--- a/arch/x86/kernel/irqinit_32.c
+++ b/arch/x86/kernel/irqinit_32.c
@@ -68,8 +68,7 @@ void __init init_ISA_irqs (void)
/*
* 16 old-style INTA-cycle interrupts:
*/
- for (i = 0; i < 16; i++) {
- /* first time call this irq_desc */
+ for (i = 0; i < NR_IRQS_LEGACY; i++) {
struct irq_desc *desc = irq_to_desc(i);
desc->status = IRQ_DISABLED;
diff --git a/arch/x86/kernel/irqinit_64.c b/arch/x86/kernel/irqinit_64.c
index 8670b3ce626e..6190e6ef546c 100644
--- a/arch/x86/kernel/irqinit_64.c
+++ b/arch/x86/kernel/irqinit_64.c
@@ -76,8 +76,7 @@ void __init init_ISA_irqs(void)
init_bsp_APIC();
init_8259A(0);
- for (i = 0; i < 16; i++) {
- /* first time call this irq_desc */
+ for (i = 0; i < NR_IRQS_LEGACY; i++) {
struct irq_desc *desc = irq_to_desc(i);
desc->status = IRQ_DISABLED;
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index 7a3dfceb90e4..19a1044a0cd9 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -101,11 +101,15 @@ static void __init dma32_free_bootmem(void)
dma32_bootmem_ptr = NULL;
dma32_bootmem_size = 0;
}
+#endif
void __init pci_iommu_alloc(void)
{
+#ifdef CONFIG_X86_64
/* free the range so iommu could get some range less than 4G */
dma32_free_bootmem();
+#endif
+
/*
* The order of these functions is important for
* fall-back/fail-over reasons
@@ -121,15 +125,6 @@ void __init pci_iommu_alloc(void)
pci_swiotlb_init();
}
-unsigned long iommu_nr_pages(unsigned long addr, unsigned long len)
-{
- unsigned long size = roundup((addr & ~PAGE_MASK) + len, PAGE_SIZE);
-
- return size >> PAGE_SHIFT;
-}
-EXPORT_SYMBOL(iommu_nr_pages);
-#endif
-
void *dma_generic_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_addr, gfp_t flag)
{
diff --git a/arch/x86/kernel/pci-swiotlb_64.c b/arch/x86/kernel/pci-swiotlb_64.c
index 3c539d111abb..242c3440687f 100644
--- a/arch/x86/kernel/pci-swiotlb_64.c
+++ b/arch/x86/kernel/pci-swiotlb_64.c
@@ -3,6 +3,8 @@
#include <linux/pci.h>
#include <linux/cache.h>
#include <linux/module.h>
+#include <linux/swiotlb.h>
+#include <linux/bootmem.h>
#include <linux/dma-mapping.h>
#include <asm/iommu.h>
@@ -11,6 +13,31 @@
int swiotlb __read_mostly;
+void *swiotlb_alloc_boot(size_t size, unsigned long nslabs)
+{
+ return alloc_bootmem_low_pages(size);
+}
+
+void *swiotlb_alloc(unsigned order, unsigned long nslabs)
+{
+ return (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN, order);
+}
+
+dma_addr_t swiotlb_phys_to_bus(phys_addr_t paddr)
+{
+ return paddr;
+}
+
+phys_addr_t swiotlb_bus_to_phys(dma_addr_t baddr)
+{
+ return baddr;
+}
+
+int __weak swiotlb_arch_range_needs_mapping(void *ptr, size_t size)
+{
+ return 0;
+}
+
static dma_addr_t
swiotlb_map_single_phys(struct device *hwdev, phys_addr_t paddr, size_t size,
int direction)
@@ -50,8 +77,10 @@ struct dma_mapping_ops swiotlb_dma_ops = {
void __init pci_swiotlb_init(void)
{
/* don't initialize swiotlb if iommu=off (no_iommu=1) */
+#ifdef CONFIG_X86_64
if (!iommu_detected && !no_iommu && max_pfn > MAX_DMA32_PFN)
swiotlb = 1;
+#endif
if (swiotlb_force)
swiotlb = 1;
if (swiotlb) {
diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c
index 67465ed89310..309949e9e1c1 100644
--- a/arch/x86/kernel/quirks.c
+++ b/arch/x86/kernel/quirks.c
@@ -168,6 +168,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_31,
ich_force_enable_hpet);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_1,
ich_force_enable_hpet);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_4,
+ ich_force_enable_hpet);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_7,
ich_force_enable_hpet);
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 08e02e8453c9..ae0d8042cf69 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -953,7 +953,7 @@ void __init setup_arch(char **cmdline_p)
ioapic_init_mappings();
/* need to wait for io_apic is mapped */
- nr_irqs = probe_nr_irqs();
+ probe_nr_irqs_gsi();
kvm_guest_init();
diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
index 9e68075544f6..4a20b2f9a381 100644
--- a/arch/x86/lib/usercopy_32.c
+++ b/arch/x86/lib/usercopy_32.c
@@ -39,7 +39,7 @@ static inline int __movsl_is_ok(unsigned long a1, unsigned long a2, unsigned lon
#define __do_strncpy_from_user(dst, src, count, res) \
do { \
int __d0, __d1, __d2; \
- might_sleep(); \
+ might_fault(); \
__asm__ __volatile__( \
" testl %1,%1\n" \
" jz 2f\n" \
@@ -126,7 +126,7 @@ EXPORT_SYMBOL(strncpy_from_user);
#define __do_clear_user(addr,size) \
do { \
int __d0; \
- might_sleep(); \
+ might_fault(); \
__asm__ __volatile__( \
"0: rep; stosl\n" \
" movl %2,%0\n" \
@@ -155,7 +155,7 @@ do { \
unsigned long
clear_user(void __user *to, unsigned long n)
{
- might_sleep();
+ might_fault();
if (access_ok(VERIFY_WRITE, to, n))
__do_clear_user(to, n);
return n;
@@ -197,7 +197,7 @@ long strnlen_user(const char __user *s, long n)
unsigned long mask = -__addr_ok(s);
unsigned long res, tmp;
- might_sleep();
+ might_fault();
__asm__ __volatile__(
" testl %0, %0\n"
diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
index f4df6e7c718b..64d6c84e6353 100644
--- a/arch/x86/lib/usercopy_64.c
+++ b/arch/x86/lib/usercopy_64.c
@@ -15,7 +15,7 @@
#define __do_strncpy_from_user(dst,src,count,res) \
do { \
long __d0, __d1, __d2; \
- might_sleep(); \
+ might_fault(); \
__asm__ __volatile__( \
" testq %1,%1\n" \
" jz 2f\n" \
@@ -64,7 +64,7 @@ EXPORT_SYMBOL(strncpy_from_user);
unsigned long __clear_user(void __user *addr, unsigned long size)
{
long __d0;
- might_sleep();
+ might_fault();
/* no memory constraint because it doesn't change any memory gcc knows
about */
asm volatile(
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 800e1d94c1b5..8655b5bb0963 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -21,6 +21,7 @@
#include <linux/init.h>
#include <linux/highmem.h>
#include <linux/pagemap.h>
+#include <linux/pci.h>
#include <linux/pfn.h>
#include <linux/poison.h>
#include <linux/bootmem.h>
@@ -967,6 +968,8 @@ void __init mem_init(void)
int codesize, reservedpages, datasize, initsize;
int tmp;
+ pci_iommu_alloc();
+
#ifdef CONFIG_FLATMEM
BUG_ON(!mem_map);
#endif
diff --git a/block/Kconfig b/block/Kconfig
index 290b219fad9c..ac0956f77785 100644
--- a/block/Kconfig
+++ b/block/Kconfig
@@ -24,21 +24,17 @@ menuconfig BLOCK
if BLOCK
config LBD
- bool "Support for Large Block Devices"
+ bool "Support for large block devices and files"
depends on !64BIT
help
- Enable block devices of size 2TB and larger.
+ Enable block devices or files of size 2TB and larger.
This option is required to support the full capacity of large
(2TB+) block devices, including RAID, disk, Network Block Device,
Logical Volume Manager (LVM) and loopback.
-
- For example, RAID devices are frequently bigger than the capacity
- of the largest individual hard drive.
-
- This option is not required if you have individual disk drives
- which total 2TB+ and you are not aggregating the capacity into
- a large block device (e.g. using RAID or LVM).
+
+ This option also enables support for single files larger than
+ 2TB.
If unsure, say N.
@@ -58,15 +54,6 @@ config BLK_DEV_IO_TRACE
If unsure, say N.
-config LSF
- bool "Support for Large Single Files"
- depends on !64BIT
- help
- Say Y here if you want to be able to handle very large files (2TB
- and larger), otherwise say N.
-
- If unsure, say Y.
-
config BLK_DEV_BSG
bool "Block layer SG support v4 (EXPERIMENTAL)"
depends on EXPERIMENTAL
diff --git a/block/as-iosched.c b/block/as-iosched.c
index 71f0abb219ee..631f6f44460a 100644
--- a/block/as-iosched.c
+++ b/block/as-iosched.c
@@ -1339,12 +1339,12 @@ static int as_may_queue(struct request_queue *q, int rw)
return ret;
}
-static void as_exit_queue(elevator_t *e)
+static void as_exit_queue(struct elevator_queue *e)
{
struct as_data *ad = e->elevator_data;
del_timer_sync(&ad->antic_timer);
- kblockd_flush_work(&ad->antic_work);
+ cancel_work_sync(&ad->antic_work);
BUG_ON(!list_empty(&ad->fifo_list[REQ_SYNC]));
BUG_ON(!list_empty(&ad->fifo_list[REQ_ASYNC]));
@@ -1409,7 +1409,7 @@ as_var_store(unsigned long *var, const char *page, size_t count)
return count;
}
-static ssize_t est_time_show(elevator_t *e, char *page)
+static ssize_t est_time_show(struct elevator_queue *e, char *page)
{
struct as_data *ad = e->elevator_data;
int pos = 0;
@@ -1427,7 +1427,7 @@ static ssize_t est_time_show(elevator_t *e, char *page)
}
#define SHOW_FUNCTION(__FUNC, __VAR) \
-static ssize_t __FUNC(elevator_t *e, char *page) \
+static ssize_t __FUNC(struct elevator_queue *e, char *page) \
{ \
struct as_data *ad = e->elevator_data; \
return as_var_show(jiffies_to_msecs((__VAR)), (page)); \
@@ -1440,7 +1440,7 @@ SHOW_FUNCTION(as_write_batch_expire_show, ad->batch_expire[REQ_ASYNC]);
#undef SHOW_FUNCTION
#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX) \
-static ssize_t __FUNC(elevator_t *e, const char *page, size_t count) \
+static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
{ \
struct as_data *ad = e->elevator_data; \
int ret = as_var_store(__PTR, (page), count); \
diff --git a/block/blk-barrier.c b/block/blk-barrier.c
index 6e72d661ae42..8eba4e43bb0c 100644
--- a/block/blk-barrier.c
+++ b/block/blk-barrier.c
@@ -24,8 +24,8 @@
int blk_queue_ordered(struct request_queue *q, unsigned ordered,
prepare_flush_fn *prepare_flush_fn)
{
- if (ordered & (QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH) &&
- prepare_flush_fn == NULL) {
+ if (!prepare_flush_fn && (ordered & (QUEUE_ORDERED_DO_PREFLUSH |
+ QUEUE_ORDERED_DO_POSTFLUSH))) {
printk(KERN_ERR "%s: prepare_flush_fn required\n", __func__);
return -EINVAL;
}
@@ -88,7 +88,7 @@ unsigned blk_ordered_req_seq(struct request *rq)
return QUEUE_ORDSEQ_DONE;
}
-void blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error)
+bool blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error)
{
struct request *rq;
@@ -99,7 +99,7 @@ void blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error)
q->ordseq |= seq;
if (blk_ordered_cur_seq(q) != QUEUE_ORDSEQ_DONE)
- return;
+ return false;
/*
* Okay, sequence complete.
@@ -109,6 +109,8 @@ void blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error)
if (__blk_end_request(rq, q->orderr, blk_rq_bytes(rq)))
BUG();
+
+ return true;
}
static void pre_flush_end_io(struct request *rq, int error)
@@ -134,7 +136,7 @@ static void queue_flush(struct request_queue *q, unsigned which)
struct request *rq;
rq_end_io_fn *end_io;
- if (which == QUEUE_ORDERED_PREFLUSH) {
+ if (which == QUEUE_ORDERED_DO_PREFLUSH) {
rq = &q->pre_flush_rq;
end_io = pre_flush_end_io;
} else {
@@ -151,80 +153,110 @@ static void queue_flush(struct request_queue *q, unsigned which)
elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
}
-static inline struct request *start_ordered(struct request_queue *q,
- struct request *rq)
+static inline bool start_ordered(struct request_queue *q, struct request **rqp)
{
+ struct request *rq = *rqp;
+ unsigned skip = 0;
+
q->orderr = 0;
q->ordered = q->next_ordered;
q->ordseq |= QUEUE_ORDSEQ_STARTED;
/*
- * Prep proxy barrier request.
+ * For an empty barrier, there's no actual BAR request, which
+ * in turn makes POSTFLUSH unnecessary. Mask them off.
*/
+ if (!rq->hard_nr_sectors) {
+ q->ordered &= ~(QUEUE_ORDERED_DO_BAR |
+ QUEUE_ORDERED_DO_POSTFLUSH);
+ /*
+ * Empty barrier on a write-through device w/ ordered
+ * tag has no command to issue and without any command
+ * to issue, ordering by tag can't be used. Drain
+ * instead.
+ */
+ if ((q->ordered & QUEUE_ORDERED_BY_TAG) &&
+ !(q->ordered & QUEUE_ORDERED_DO_PREFLUSH)) {
+ q->ordered &= ~QUEUE_ORDERED_BY_TAG;
+ q->ordered |= QUEUE_ORDERED_BY_DRAIN;
+ }
+ }
+
+ /* stash away the original request */
elv_dequeue_request(q, rq);
q->orig_bar_rq = rq;
- rq = &q->bar_rq;
- blk_rq_init(q, rq);
- if (bio_data_dir(q->orig_bar_rq->bio) == WRITE)
- rq->cmd_flags |= REQ_RW;
- if (q->ordered & QUEUE_ORDERED_FUA)
- rq->cmd_flags |= REQ_FUA;
- init_request_from_bio(rq, q->orig_bar_rq->bio);
- rq->end_io = bar_end_io;
+ rq = NULL;
/*
* Queue ordered sequence. As we stack them at the head, we
* need to queue in reverse order. Note that we rely on that
* no fs request uses ELEVATOR_INSERT_FRONT and thus no fs
- * request gets inbetween ordered sequence. If this request is
- * an empty barrier, we don't need to do a postflush ever since
- * there will be no data written between the pre and post flush.
- * Hence a single flush will suffice.
+ * request gets inbetween ordered sequence.
*/
- if ((q->ordered & QUEUE_ORDERED_POSTFLUSH) && !blk_empty_barrier(rq))
- queue_flush(q, QUEUE_ORDERED_POSTFLUSH);
- else
- q->ordseq |= QUEUE_ORDSEQ_POSTFLUSH;
+ if (q->ordered & QUEUE_ORDERED_DO_POSTFLUSH) {
+ queue_flush(q, QUEUE_ORDERED_DO_POSTFLUSH);
+ rq = &q->post_flush_rq;
+ } else
+ skip |= QUEUE_ORDSEQ_POSTFLUSH;
- elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
+ if (q->ordered & QUEUE_ORDERED_DO_BAR) {
+ rq = &q->bar_rq;
+
+ /* initialize proxy request and queue it */
+ blk_rq_init(q, rq);
+ if (bio_data_dir(q->orig_bar_rq->bio) == WRITE)
+ rq->cmd_flags |= REQ_RW;
+ if (q->ordered & QUEUE_ORDERED_DO_FUA)
+ rq->cmd_flags |= REQ_FUA;
+ init_request_from_bio(rq, q->orig_bar_rq->bio);
+ rq->end_io = bar_end_io;
- if (q->ordered & QUEUE_ORDERED_PREFLUSH) {
- queue_flush(q, QUEUE_ORDERED_PREFLUSH);
+ elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
+ } else
+ skip |= QUEUE_ORDSEQ_BAR;
+
+ if (q->ordered & QUEUE_ORDERED_DO_PREFLUSH) {
+ queue_flush(q, QUEUE_ORDERED_DO_PREFLUSH);
rq = &q->pre_flush_rq;
} else
- q->ordseq |= QUEUE_ORDSEQ_PREFLUSH;
+ skip |= QUEUE_ORDSEQ_PREFLUSH;
- if ((q->ordered & QUEUE_ORDERED_TAG) || q->in_flight == 0)
- q->ordseq |= QUEUE_ORDSEQ_DRAIN;
- else
+ if ((q->ordered & QUEUE_ORDERED_BY_DRAIN) && q->in_flight)
rq = NULL;
+ else
+ skip |= QUEUE_ORDSEQ_DRAIN;
+
+ *rqp = rq;
- return rq;
+ /*
+ * Complete skipped sequences. If whole sequence is complete,
+ * return false to tell elevator that this request is gone.
+ */
+ return !blk_ordered_complete_seq(q, skip, 0);
}
-int blk_do_ordered(struct request_queue *q, struct request **rqp)
+bool blk_do_ordered(struct request_queue *q, struct request **rqp)
{
struct request *rq = *rqp;
const int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq);
if (!q->ordseq) {
if (!is_barrier)
- return 1;
+ return true;
- if (q->next_ordered != QUEUE_ORDERED_NONE) {
- *rqp = start_ordered(q, rq);
- return 1;
- } else {
+ if (q->next_ordered != QUEUE_ORDERED_NONE)
+ return start_ordered(q, rqp);
+ else {
/*
- * This can happen when the queue switches to
- * ORDERED_NONE while this request is on it.
+ * Queue ordering not supported. Terminate
+ * with prejudice.
*/
elv_dequeue_request(q, rq);
if (__blk_end_request(rq, -EOPNOTSUPP,
blk_rq_bytes(rq)))
BUG();
*rqp = NULL;
- return 0;
+ return false;
}
}
@@ -235,9 +267,9 @@ int blk_do_ordered(struct request_queue *q, struct request **rqp)
/* Special requests are not subject to ordering rules. */
if (!blk_fs_request(rq) &&
rq != &q->pre_flush_rq && rq != &q->post_flush_rq)
- return 1;
+ return true;
- if (q->ordered & QUEUE_ORDERED_TAG) {
+ if (q->ordered & QUEUE_ORDERED_BY_TAG) {
/* Ordered by tag. Blocking the next barrier is enough. */
if (is_barrier && rq != &q->bar_rq)
*rqp = NULL;
@@ -248,7 +280,7 @@ int blk_do_ordered(struct request_queue *q, struct request **rqp)
*rqp = NULL;
}
- return 1;
+ return true;
}
static void bio_end_empty_barrier(struct bio *bio, int err)
diff --git a/block/blk-core.c b/block/blk-core.c
index 561e8a1b43a4..a824e49c0d0a 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -153,6 +153,9 @@ static void req_bio_endio(struct request *rq, struct bio *bio,
nbytes = bio->bi_size;
}
+ if (unlikely(rq->cmd_flags & REQ_QUIET))
+ set_bit(BIO_QUIET, &bio->bi_flags);
+
bio->bi_size -= nbytes;
bio->bi_sector += (nbytes >> 9);
@@ -265,8 +268,7 @@ void __generic_unplug_device(struct request_queue *q)
{
if (unlikely(blk_queue_stopped(q)))
return;
-
- if (!blk_remove_plug(q))
+ if (!blk_remove_plug(q) && !blk_queue_nonrot(q))
return;
q->request_fn(q);
@@ -404,7 +406,8 @@ EXPORT_SYMBOL(blk_stop_queue);
void blk_sync_queue(struct request_queue *q)
{
del_timer_sync(&q->unplug_timer);
- kblockd_flush_work(&q->unplug_work);
+ del_timer_sync(&q->timeout);
+ cancel_work_sync(&q->unplug_work);
}
EXPORT_SYMBOL(blk_sync_queue);
@@ -1135,7 +1138,7 @@ void init_request_from_bio(struct request *req, struct bio *bio)
static int __make_request(struct request_queue *q, struct bio *bio)
{
struct request *req;
- int el_ret, nr_sectors, barrier, discard, err;
+ int el_ret, nr_sectors;
const unsigned short prio = bio_prio(bio);
const int sync = bio_sync(bio);
int rw_flags;
@@ -1149,22 +1152,9 @@ static int __make_request(struct request_queue *q, struct bio *bio)
*/
blk_queue_bounce(q, &bio);
- barrier = bio_barrier(bio);
- if (unlikely(barrier) && bio_has_data(bio) &&
- (q->next_ordered == QUEUE_ORDERED_NONE)) {
- err = -EOPNOTSUPP;
- goto end_io;
- }
-
- discard = bio_discard(bio);
- if (unlikely(discard) && !q->prepare_discard_fn) {
- err = -EOPNOTSUPP;
- goto end_io;
- }
-
spin_lock_irq(q->queue_lock);
- if (unlikely(barrier) || elv_queue_empty(q))
+ if (unlikely(bio_barrier(bio)) || elv_queue_empty(q))
goto get_rq;
el_ret = elv_merge(q, &req, bio);
@@ -1250,18 +1240,14 @@ get_rq:
if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags) ||
bio_flagged(bio, BIO_CPU_AFFINE))
req->cpu = blk_cpu_to_group(smp_processor_id());
- if (elv_queue_empty(q))
+ if (!blk_queue_nonrot(q) && elv_queue_empty(q))
blk_plug_device(q);
add_request(q, req);
out:
- if (sync)
+ if (sync || blk_queue_nonrot(q))
__generic_unplug_device(q);
spin_unlock_irq(q->queue_lock);
return 0;
-
-end_io:
- bio_endio(bio, err);
- return 0;
}
/*
@@ -1414,15 +1400,13 @@ static inline void __generic_make_request(struct bio *bio)
char b[BDEVNAME_SIZE];
q = bdev_get_queue(bio->bi_bdev);
- if (!q) {
+ if (unlikely(!q)) {
printk(KERN_ERR
"generic_make_request: Trying to access "
"nonexistent block-device %s (%Lu)\n",
bdevname(bio->bi_bdev, b),
(long long) bio->bi_sector);
-end_io:
- bio_endio(bio, err);
- break;
+ goto end_io;
}
if (unlikely(nr_sectors > q->max_hw_sectors)) {
@@ -1459,14 +1443,19 @@ end_io:
if (bio_check_eod(bio, nr_sectors))
goto end_io;
- if ((bio_empty_barrier(bio) && !q->prepare_flush_fn) ||
- (bio_discard(bio) && !q->prepare_discard_fn)) {
+
+ if (bio_discard(bio) && !q->prepare_discard_fn) {
err = -EOPNOTSUPP;
goto end_io;
}
ret = q->make_request_fn(q, bio);
} while (ret);
+
+ return;
+
+end_io:
+ bio_endio(bio, err);
}
/*
@@ -1716,14 +1705,6 @@ static int __end_that_request_first(struct request *req, int error,
while ((bio = req->bio) != NULL) {
int nbytes;
- /*
- * For an empty barrier request, the low level driver must
- * store a potential error location in ->sector. We pass
- * that back up in ->bi_sector.
- */
- if (blk_empty_barrier(req))
- bio->bi_sector = req->sector;
-
if (nr_bytes >= bio->bi_size) {
req->bio = bio->bi_next;
nbytes = bio->bi_size;
@@ -2143,12 +2124,6 @@ int kblockd_schedule_work(struct request_queue *q, struct work_struct *work)
}
EXPORT_SYMBOL(kblockd_schedule_work);
-void kblockd_flush_work(struct work_struct *work)
-{
- cancel_work_sync(work);
-}
-EXPORT_SYMBOL(kblockd_flush_work);
-
int __init blk_dev_init(void)
{
kblockd_workqueue = create_workqueue("kblockd");
diff --git a/block/blk-settings.c b/block/blk-settings.c
index afa55e14e278..59fd05d9f1d5 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -319,9 +319,9 @@ void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask, b->seg_boundary_mask);
- t->max_phys_segments = min(t->max_phys_segments, b->max_phys_segments);
- t->max_hw_segments = min(t->max_hw_segments, b->max_hw_segments);
- t->max_segment_size = min(t->max_segment_size, b->max_segment_size);
+ t->max_phys_segments = min_not_zero(t->max_phys_segments, b->max_phys_segments);
+ t->max_hw_segments = min_not_zero(t->max_hw_segments, b->max_hw_segments);
+ t->max_segment_size = min_not_zero(t->max_segment_size, b->max_segment_size);
t->hardsect_size = max(t->hardsect_size, b->hardsect_size);
if (!t->queue_lock)
WARN_ON_ONCE(1);
diff --git a/block/blk-softirq.c b/block/blk-softirq.c
index e660d26ca656..ce0efc6b26dc 100644
--- a/block/blk-softirq.c
+++ b/block/blk-softirq.c
@@ -161,7 +161,7 @@ void blk_complete_request(struct request *req)
}
EXPORT_SYMBOL(blk_complete_request);
-__init int blk_softirq_init(void)
+static __init int blk_softirq_init(void)
{
int i;
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 21e275d7eed9..a29cb788e408 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -88,9 +88,7 @@ queue_ra_store(struct request_queue *q, const char *page, size_t count)
unsigned long ra_kb;
ssize_t ret = queue_var_store(&ra_kb, page, count);
- spin_lock_irq(q->queue_lock);
q->backing_dev_info.ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10);
- spin_unlock_irq(q->queue_lock);
return ret;
}
@@ -117,10 +115,7 @@ queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb)
return -EINVAL;
- /*
- * Take the queue lock to update the readahead and max_sectors
- * values synchronously:
- */
+
spin_lock_irq(q->queue_lock);
q->max_sectors = max_sectors_kb << 1;
spin_unlock_irq(q->queue_lock);
diff --git a/block/blk-tag.c b/block/blk-tag.c
index c0d419e84ce7..3c518e3303ae 100644
--- a/block/blk-tag.c
+++ b/block/blk-tag.c
@@ -158,7 +158,6 @@ fail:
/**
* blk_init_tags - initialize the tag info for an external tag map
* @depth: the maximum queue depth supported
- * @tags: the tag to use
**/
struct blk_queue_tag *blk_init_tags(int depth)
{
diff --git a/block/blk-timeout.c b/block/blk-timeout.c
index 69185ea9fae2..a09535377a94 100644
--- a/block/blk-timeout.c
+++ b/block/blk-timeout.c
@@ -73,11 +73,7 @@ ssize_t part_timeout_store(struct device *dev, struct device_attribute *attr,
*/
void blk_delete_timer(struct request *req)
{
- struct request_queue *q = req->q;
-
list_del_init(&req->timeout_list);
- if (list_empty(&q->timeout_list))
- del_timer(&q->timeout);
}
static void blk_rq_timed_out(struct request *req)
@@ -111,7 +107,7 @@ static void blk_rq_timed_out(struct request *req)
void blk_rq_timed_out_timer(unsigned long data)
{
struct request_queue *q = (struct request_queue *) data;
- unsigned long flags, uninitialized_var(next), next_set = 0;
+ unsigned long flags, next = 0;
struct request *rq, *tmp;
spin_lock_irqsave(q->queue_lock, flags);
@@ -126,15 +122,18 @@ void blk_rq_timed_out_timer(unsigned long data)
if (blk_mark_rq_complete(rq))
continue;
blk_rq_timed_out(rq);
+ } else {
+ if (!next || time_after(next, rq->deadline))
+ next = rq->deadline;
}
- if (!next_set) {
- next = rq->deadline;
- next_set = 1;
- } else if (time_after(next, rq->deadline))
- next = rq->deadline;
}
- if (next_set && !list_empty(&q->timeout_list))
+ /*
+ * next can never be 0 here with the list non-empty, since we always
+ * bump ->deadline to 1 so we can detect if the timer was ever added
+ * or not. See comment in blk_add_timer()
+ */
+ if (next)
mod_timer(&q->timeout, round_jiffies_up(next));
spin_unlock_irqrestore(q->queue_lock, flags);
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 6a062eebbd15..e8525fa72823 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -1136,12 +1136,8 @@ static int cfq_dispatch_requests(struct request_queue *q, int force)
if (cfq_class_idle(cfqq))
max_dispatch = 1;
- if (cfqq->dispatched >= max_dispatch) {
- if (cfqd->busy_queues > 1)
- break;
- if (cfqq->dispatched >= 4 * max_dispatch)
- break;
- }
+ if (cfqq->dispatched >= max_dispatch && cfqd->busy_queues > 1)
+ break;
if (cfqd->sync_flight && !cfq_cfqq_sync(cfqq))
break;
@@ -1318,7 +1314,15 @@ static void cfq_exit_single_io_context(struct io_context *ioc,
unsigned long flags;
spin_lock_irqsave(q->queue_lock, flags);
- __cfq_exit_single_io_context(cfqd, cic);
+
+ /*
+ * Ensure we get a fresh copy of the ->key to prevent
+ * race between exiting task and queue
+ */
+ smp_read_barrier_depends();
+ if (cic->key)
+ __cfq_exit_single_io_context(cfqd, cic);
+
spin_unlock_irqrestore(q->queue_lock, flags);
}
}
@@ -2160,7 +2164,7 @@ out_cont:
static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
{
del_timer_sync(&cfqd->idle_slice_timer);
- kblockd_flush_work(&cfqd->unplug_work);
+ cancel_work_sync(&cfqd->unplug_work);
}
static void cfq_put_async_queues(struct cfq_data *cfqd)
@@ -2178,7 +2182,7 @@ static void cfq_put_async_queues(struct cfq_data *cfqd)
cfq_put_queue(cfqd->async_idle_cfqq);
}
-static void cfq_exit_queue(elevator_t *e)
+static void cfq_exit_queue(struct elevator_queue *e)
{
struct cfq_data *cfqd = e->elevator_data;
struct request_queue *q = cfqd->queue;
@@ -2288,7 +2292,7 @@ cfq_var_store(unsigned int *var, const char *page, size_t count)
}
#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
-static ssize_t __FUNC(elevator_t *e, char *page) \
+static ssize_t __FUNC(struct elevator_queue *e, char *page) \
{ \
struct cfq_data *cfqd = e->elevator_data; \
unsigned int __data = __VAR; \
@@ -2308,7 +2312,7 @@ SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
#undef SHOW_FUNCTION
#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
-static ssize_t __FUNC(elevator_t *e, const char *page, size_t count) \
+static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
{ \
struct cfq_data *cfqd = e->elevator_data; \
unsigned int __data; \
diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
index 67eb93cff699..f87615dea46b 100644
--- a/block/compat_ioctl.c
+++ b/block/compat_ioctl.c
@@ -774,9 +774,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
bdi = blk_get_backing_dev_info(bdev);
if (bdi == NULL)
return -ENOTTY;
- lock_kernel();
bdi->ra_pages = (arg * 512) / PAGE_CACHE_SIZE;
- unlock_kernel();
return 0;
case BLKGETSIZE:
size = bdev->bd_inode->i_size;
diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c
index fd311179f44c..c4d991d4adef 100644
--- a/block/deadline-iosched.c
+++ b/block/deadline-iosched.c
@@ -334,7 +334,7 @@ static int deadline_queue_empty(struct request_queue *q)
&& list_empty(&dd->fifo_list[READ]);
}
-static void deadline_exit_queue(elevator_t *e)
+static void deadline_exit_queue(struct elevator_queue *e)
{
struct deadline_data *dd = e->elevator_data;
@@ -387,7 +387,7 @@ deadline_var_store(int *var, const char *page, size_t count)
}
#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
-static ssize_t __FUNC(elevator_t *e, char *page) \
+static ssize_t __FUNC(struct elevator_queue *e, char *page) \
{ \
struct deadline_data *dd = e->elevator_data; \
int __data = __VAR; \
@@ -403,7 +403,7 @@ SHOW_FUNCTION(deadline_fifo_batch_show, dd->fifo_batch, 0);
#undef SHOW_FUNCTION
#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
-static ssize_t __FUNC(elevator_t *e, const char *page, size_t count) \
+static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
{ \
struct deadline_data *dd = e->elevator_data; \
int __data; \
diff --git a/block/elevator.c b/block/elevator.c
index 86836dd179c0..98259eda0ef6 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -65,7 +65,7 @@ DEFINE_TRACE(block_rq_issue);
static int elv_iosched_allow_merge(struct request *rq, struct bio *bio)
{
struct request_queue *q = rq->q;
- elevator_t *e = q->elevator;
+ struct elevator_queue *e = q->elevator;
if (e->ops->elevator_allow_merge_fn)
return e->ops->elevator_allow_merge_fn(q, rq, bio);
@@ -208,13 +208,13 @@ __setup("elevator=", elevator_setup);
static struct kobj_type elv_ktype;
-static elevator_t *elevator_alloc(struct request_queue *q,
+static struct elevator_queue *elevator_alloc(struct request_queue *q,
struct elevator_type *e)
{
- elevator_t *eq;
+ struct elevator_queue *eq;
int i;
- eq = kmalloc_node(sizeof(elevator_t), GFP_KERNEL | __GFP_ZERO, q->node);
+ eq = kmalloc_node(sizeof(*eq), GFP_KERNEL | __GFP_ZERO, q->node);
if (unlikely(!eq))
goto err;
@@ -240,8 +240,9 @@ err:
static void elevator_release(struct kobject *kobj)
{
- elevator_t *e = container_of(kobj, elevator_t, kobj);
+ struct elevator_queue *e;
+ e = container_of(kobj, struct elevator_queue, kobj);
elevator_put(e->elevator_type);
kfree(e->hash);
kfree(e);
@@ -297,7 +298,7 @@ int elevator_init(struct request_queue *q, char *name)
}
EXPORT_SYMBOL(elevator_init);
-void elevator_exit(elevator_t *e)
+void elevator_exit(struct elevator_queue *e)
{
mutex_lock(&e->sysfs_lock);
if (e->ops->elevator_exit_fn)
@@ -311,7 +312,7 @@ EXPORT_SYMBOL(elevator_exit);
static void elv_activate_rq(struct request_queue *q, struct request *rq)
{
- elevator_t *e = q->elevator;
+ struct elevator_queue *e = q->elevator;
if (e->ops->elevator_activate_req_fn)
e->ops->elevator_activate_req_fn(q, rq);
@@ -319,7 +320,7 @@ static void elv_activate_rq(struct request_queue *q, struct request *rq)
static void elv_deactivate_rq(struct request_queue *q, struct request *rq)
{
- elevator_t *e = q->elevator;
+ struct elevator_queue *e = q->elevator;
if (e->ops->elevator_deactivate_req_fn)
e->ops->elevator_deactivate_req_fn(q, rq);
@@ -338,7 +339,7 @@ static void elv_rqhash_del(struct request_queue *q, struct request *rq)
static void elv_rqhash_add(struct request_queue *q, struct request *rq)
{
- elevator_t *e = q->elevator;
+ struct elevator_queue *e = q->elevator;
BUG_ON(ELV_ON_HASH(rq));
hlist_add_head(&rq->hash, &e->hash[ELV_HASH_FN(rq_hash_key(rq))]);
@@ -352,7 +353,7 @@ static void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
static struct request *elv_rqhash_find(struct request_queue *q, sector_t offset)
{
- elevator_t *e = q->elevator;
+ struct elevator_queue *e = q->elevator;
struct hlist_head *hash_list = &e->hash[ELV_HASH_FN(offset)];
struct hlist_node *entry, *next;
struct request *rq;
@@ -494,7 +495,7 @@ EXPORT_SYMBOL(elv_dispatch_add_tail);
int elv_merge(struct request_queue *q, struct request **req, struct bio *bio)
{
- elevator_t *e = q->elevator;
+ struct elevator_queue *e = q->elevator;
struct request *__rq;
int ret;
@@ -529,7 +530,7 @@ int elv_merge(struct request_queue *q, struct request **req, struct bio *bio)
void elv_merged_request(struct request_queue *q, struct request *rq, int type)
{
- elevator_t *e = q->elevator;
+ struct elevator_queue *e = q->elevator;
if (e->ops->elevator_merged_fn)
e->ops->elevator_merged_fn(q, rq, type);
@@ -543,7 +544,7 @@ void elv_merged_request(struct request_queue *q, struct request *rq, int type)
void elv_merge_requests(struct request_queue *q, struct request *rq,
struct request *next)
{
- elevator_t *e = q->elevator;
+ struct elevator_queue *e = q->elevator;
if (e->ops->elevator_merge_req_fn)
e->ops->elevator_merge_req_fn(q, rq, next);
@@ -755,14 +756,6 @@ struct request *elv_next_request(struct request_queue *q)
int ret;
while ((rq = __elv_next_request(q)) != NULL) {
- /*
- * Kill the empty barrier place holder, the driver must
- * not ever see it.
- */
- if (blk_empty_barrier(rq)) {
- __blk_end_request(rq, 0, blk_rq_bytes(rq));
- continue;
- }
if (!(rq->cmd_flags & REQ_STARTED)) {
/*
* This is the first time the device driver
@@ -854,7 +847,7 @@ void elv_dequeue_request(struct request_queue *q, struct request *rq)
int elv_queue_empty(struct request_queue *q)
{
- elevator_t *e = q->elevator;
+ struct elevator_queue *e = q->elevator;
if (!list_empty(&q->queue_head))
return 0;
@@ -868,7 +861,7 @@ EXPORT_SYMBOL(elv_queue_empty);
struct request *elv_latter_request(struct request_queue *q, struct request *rq)
{
- elevator_t *e = q->elevator;
+ struct elevator_queue *e = q->elevator;
if (e->ops->elevator_latter_req_fn)
return e->ops->elevator_latter_req_fn(q, rq);
@@ -877,7 +870,7 @@ struct request *elv_latter_request(struct request_queue *q, struct request *rq)
struct request *elv_former_request(struct request_queue *q, struct request *rq)
{
- elevator_t *e = q->elevator;
+ struct elevator_queue *e = q->elevator;
if (e->ops->elevator_former_req_fn)
return e->ops->elevator_former_req_fn(q, rq);
@@ -886,7 +879,7 @@ struct request *elv_former_request(struct request_queue *q, struct request *rq)
int elv_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
{
- elevator_t *e = q->elevator;
+ struct elevator_queue *e = q->elevator;
if (e->ops->elevator_set_req_fn)
return e->ops->elevator_set_req_fn(q, rq, gfp_mask);
@@ -897,7 +890,7 @@ int elv_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
void elv_put_request(struct request_queue *q, struct request *rq)
{
- elevator_t *e = q->elevator;
+ struct elevator_queue *e = q->elevator;
if (e->ops->elevator_put_req_fn)
e->ops->elevator_put_req_fn(rq);
@@ -905,7 +898,7 @@ void elv_put_request(struct request_queue *q, struct request *rq)
int elv_may_queue(struct request_queue *q, int rw)
{
- elevator_t *e = q->elevator;
+ struct elevator_queue *e = q->elevator;
if (e->ops->elevator_may_queue_fn)
return e->ops->elevator_may_queue_fn(q, rw);
@@ -928,7 +921,7 @@ EXPORT_SYMBOL(elv_abort_queue);
void elv_completed_request(struct request_queue *q, struct request *rq)
{
- elevator_t *e = q->elevator;
+ struct elevator_queue *e = q->elevator;
/*
* request is released from the driver, io must be done
@@ -944,10 +937,14 @@ void elv_completed_request(struct request_queue *q, struct request *rq)
* drained for flush sequence.
*/
if (unlikely(q->ordseq)) {
- struct request *first_rq = list_entry_rq(q->queue_head.next);
- if (q->in_flight == 0 &&
+ struct request *next = NULL;
+
+ if (!list_empty(&q->queue_head))
+ next = list_entry_rq(q->queue_head.next);
+
+ if (!q->in_flight &&
blk_ordered_cur_seq(q) == QUEUE_ORDSEQ_DRAIN &&
- blk_ordered_req_seq(first_rq) > QUEUE_ORDSEQ_DRAIN) {
+ (!next || blk_ordered_req_seq(next) > QUEUE_ORDSEQ_DRAIN)) {
blk_ordered_complete_seq(q, QUEUE_ORDSEQ_DRAIN, 0);
blk_start_queueing(q);
}
@@ -959,13 +956,14 @@ void elv_completed_request(struct request_queue *q, struct request *rq)
static ssize_t
elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
{
- elevator_t *e = container_of(kobj, elevator_t, kobj);
struct elv_fs_entry *entry = to_elv(attr);
+ struct elevator_queue *e;
ssize_t error;
if (!entry->show)
return -EIO;
+ e = container_of(kobj, struct elevator_queue, kobj);
mutex_lock(&e->sysfs_lock);
error = e->ops ? entry->show(e, page) : -ENOENT;
mutex_unlock(&e->sysfs_lock);
@@ -976,13 +974,14 @@ static ssize_t
elv_attr_store(struct kobject *kobj, struct attribute *attr,
const char *page, size_t length)
{
- elevator_t *e = container_of(kobj, elevator_t, kobj);
struct elv_fs_entry *entry = to_elv(attr);
+ struct elevator_queue *e;
ssize_t error;
if (!entry->store)
return -EIO;
+ e = container_of(kobj, struct elevator_queue, kobj);
mutex_lock(&e->sysfs_lock);
error = e->ops ? entry->store(e, page, length) : -ENOENT;
mutex_unlock(&e->sysfs_lock);
@@ -1001,7 +1000,7 @@ static struct kobj_type elv_ktype = {
int elv_register_queue(struct request_queue *q)
{
- elevator_t *e = q->elevator;
+ struct elevator_queue *e = q->elevator;
int error;
error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched");
@@ -1019,7 +1018,7 @@ int elv_register_queue(struct request_queue *q)
return error;
}
-static void __elv_unregister_queue(elevator_t *e)
+static void __elv_unregister_queue(struct elevator_queue *e)
{
kobject_uevent(&e->kobj, KOBJ_REMOVE);
kobject_del(&e->kobj);
@@ -1082,7 +1081,7 @@ EXPORT_SYMBOL_GPL(elv_unregister);
*/
static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
{
- elevator_t *old_elevator, *e;
+ struct elevator_queue *old_elevator, *e;
void *data;
/*
@@ -1188,7 +1187,7 @@ ssize_t elv_iosched_store(struct request_queue *q, const char *name,
ssize_t elv_iosched_show(struct request_queue *q, char *name)
{
- elevator_t *e = q->elevator;
+ struct elevator_queue *e = q->elevator;
struct elevator_type *elv = e->elevator_type;
struct elevator_type *__e;
int len = 0;
diff --git a/block/genhd.c b/block/genhd.c
index 2f7feda61e35..d84a7df1e2a0 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -181,6 +181,12 @@ void disk_part_iter_exit(struct disk_part_iter *piter)
}
EXPORT_SYMBOL_GPL(disk_part_iter_exit);
+static inline int sector_in_part(struct hd_struct *part, sector_t sector)
+{
+ return part->start_sect <= sector &&
+ sector < part->start_sect + part->nr_sects;
+}
+
/**
* disk_map_sector_rcu - map sector to partition
* @disk: gendisk of interest
@@ -199,16 +205,22 @@ EXPORT_SYMBOL_GPL(disk_part_iter_exit);
struct hd_struct *disk_map_sector_rcu(struct gendisk *disk, sector_t sector)
{
struct disk_part_tbl *ptbl;
+ struct hd_struct *part;
int i;
ptbl = rcu_dereference(disk->part_tbl);
+ part = rcu_dereference(ptbl->last_lookup);
+ if (part && sector_in_part(part, sector))
+ return part;
+
for (i = 1; i < ptbl->len; i++) {
- struct hd_struct *part = rcu_dereference(ptbl->part[i]);
+ part = rcu_dereference(ptbl->part[i]);
- if (part && part->start_sect <= sector &&
- sector < part->start_sect + part->nr_sects)
+ if (part && sector_in_part(part, sector)) {
+ rcu_assign_pointer(ptbl->last_lookup, part);
return part;
+ }
}
return &disk->part0;
}
@@ -888,8 +900,11 @@ static void disk_replace_part_tbl(struct gendisk *disk,
struct disk_part_tbl *old_ptbl = disk->part_tbl;
rcu_assign_pointer(disk->part_tbl, new_ptbl);
- if (old_ptbl)
+
+ if (old_ptbl) {
+ rcu_assign_pointer(old_ptbl->last_lookup, NULL);
call_rcu(&old_ptbl->rcu_head, disk_free_ptbl_rcu_cb);
+ }
}
/**
diff --git a/block/ioctl.c b/block/ioctl.c
index d03985b04d67..0f22e629b13c 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -323,9 +323,7 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
bdi = blk_get_backing_dev_info(bdev);
if (bdi == NULL)
return -ENOTTY;
- lock_kernel();
bdi->ra_pages = (arg * 512) / PAGE_CACHE_SIZE;
- unlock_kernel();
return 0;
case BLKBSZSET:
/* set the logical block size */
diff --git a/block/noop-iosched.c b/block/noop-iosched.c
index c23e02969650..3a0d369d08c7 100644
--- a/block/noop-iosched.c
+++ b/block/noop-iosched.c
@@ -76,7 +76,7 @@ static void *noop_init_queue(struct request_queue *q)
return nd;
}
-static void noop_exit_queue(elevator_t *e)
+static void noop_exit_queue(struct elevator_queue *e)
{
struct noop_data *nd = e->elevator_data;
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index d0bb92cbefb9..ee9c67d7e1be 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -60,7 +60,7 @@ static int scsi_get_bus(struct request_queue *q, int __user *p)
static int sg_get_timeout(struct request_queue *q)
{
- return q->sg_timeout / (HZ / USER_HZ);
+ return jiffies_to_clock_t(q->sg_timeout);
}
static int sg_set_timeout(struct request_queue *q, int __user *p)
@@ -68,7 +68,7 @@ static int sg_set_timeout(struct request_queue *q, int __user *p)
int timeout, err = get_user(timeout, p);
if (!err)
- q->sg_timeout = timeout * (HZ / USER_HZ);
+ q->sg_timeout = clock_t_to_jiffies(timeout);
return err;
}
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 9f7c543cc04b..01e69383d9c0 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -164,7 +164,7 @@ static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo);
static int cciss_revalidate(struct gendisk *disk);
static int rebuild_lun_table(ctlr_info_t *h, int first_time);
-static int deregister_disk(struct gendisk *disk, drive_info_struct *drv,
+static int deregister_disk(ctlr_info_t *h, int drv_index,
int clear_all);
static void cciss_read_capacity(int ctlr, int logvol, int withirq,
@@ -215,31 +215,17 @@ static struct block_device_operations cciss_fops = {
/*
* Enqueuing and dequeuing functions for cmdlists.
*/
-static inline void addQ(CommandList_struct **Qptr, CommandList_struct *c)
+static inline void addQ(struct hlist_head *list, CommandList_struct *c)
{
- if (*Qptr == NULL) {
- *Qptr = c;
- c->next = c->prev = c;
- } else {
- c->prev = (*Qptr)->prev;
- c->next = (*Qptr);
- (*Qptr)->prev->next = c;
- (*Qptr)->prev = c;
- }
+ hlist_add_head(&c->list, list);
}
-static inline CommandList_struct *removeQ(CommandList_struct **Qptr,
- CommandList_struct *c)
+static inline void removeQ(CommandList_struct *c)
{
- if (c && c->next != c) {
- if (*Qptr == c)
- *Qptr = c->next;
- c->prev->next = c->next;
- c->next->prev = c->prev;
- } else {
- *Qptr = NULL;
- }
- return c;
+ if (WARN_ON(hlist_unhashed(&c->list)))
+ return;
+
+ hlist_del_init(&c->list);
}
#include "cciss_scsi.c" /* For SCSI tape support */
@@ -506,6 +492,7 @@ static CommandList_struct *cmd_alloc(ctlr_info_t *h, int get_from_pool)
c->cmdindex = i;
}
+ INIT_HLIST_NODE(&c->list);
c->busaddr = (__u32) cmd_dma_handle;
temp64.val = (__u64) err_dma_handle;
c->ErrDesc.Addr.lower = temp64.val32.lower;
@@ -1492,8 +1479,7 @@ static void cciss_update_drive_info(int ctlr, int drv_index, int first_time)
* which keeps the interrupt handler from starting
* the queue.
*/
- ret = deregister_disk(h->gendisk[drv_index],
- &h->drv[drv_index], 0);
+ ret = deregister_disk(h, drv_index, 0);
h->drv[drv_index].busy_configuring = 0;
}
@@ -1711,8 +1697,7 @@ static int rebuild_lun_table(ctlr_info_t *h, int first_time)
spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
h->drv[i].busy_configuring = 1;
spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
- return_code = deregister_disk(h->gendisk[i],
- &h->drv[i], 1);
+ return_code = deregister_disk(h, i, 1);
h->drv[i].busy_configuring = 0;
}
}
@@ -1782,15 +1767,19 @@ mem_msg:
* the highest_lun should be left unchanged and the LunID
* should not be cleared.
*/
-static int deregister_disk(struct gendisk *disk, drive_info_struct *drv,
+static int deregister_disk(ctlr_info_t *h, int drv_index,
int clear_all)
{
int i;
- ctlr_info_t *h = get_host(disk);
+ struct gendisk *disk;
+ drive_info_struct *drv;
if (!capable(CAP_SYS_RAWIO))
return -EPERM;
+ drv = &h->drv[drv_index];
+ disk = h->gendisk[drv_index];
+
/* make sure logical volume is NOT is use */
if (clear_all || (h->gendisk[0] == disk)) {
if (drv->usage_count > 1)
@@ -2548,7 +2537,8 @@ static void start_io(ctlr_info_t *h)
{
CommandList_struct *c;
- while ((c = h->reqQ) != NULL) {
+ while (!hlist_empty(&h->reqQ)) {
+ c = hlist_entry(h->reqQ.first, CommandList_struct, list);
/* can't do anything if fifo is full */
if ((h->access.fifo_full(h))) {
printk(KERN_WARNING "cciss: fifo full\n");
@@ -2556,14 +2546,14 @@ static void start_io(ctlr_info_t *h)
}
/* Get the first entry from the Request Q */
- removeQ(&(h->reqQ), c);
+ removeQ(c);
h->Qdepth--;
/* Tell the controller execute command */
h->access.submit_command(h, c);
/* Put job onto the completed Q */
- addQ(&(h->cmpQ), c);
+ addQ(&h->cmpQ, c);
}
}
@@ -2576,7 +2566,7 @@ static inline void resend_cciss_cmd(ctlr_info_t *h, CommandList_struct *c)
memset(c->err_info, 0, sizeof(ErrorInfo_struct));
/* add it to software queue and then send it to the controller */
- addQ(&(h->reqQ), c);
+ addQ(&h->reqQ, c);
h->Qdepth++;
if (h->Qdepth > h->maxQsinceinit)
h->maxQsinceinit = h->Qdepth;
@@ -2897,7 +2887,7 @@ static void do_cciss_request(struct request_queue *q)
spin_lock_irq(q->queue_lock);
- addQ(&(h->reqQ), c);
+ addQ(&h->reqQ, c);
h->Qdepth++;
if (h->Qdepth > h->maxQsinceinit)
h->maxQsinceinit = h->Qdepth;
@@ -2985,16 +2975,12 @@ static irqreturn_t do_cciss_intr(int irq, void *dev_id)
a = c->busaddr;
} else {
+ struct hlist_node *tmp;
+
a &= ~3;
- if ((c = h->cmpQ) == NULL) {
- printk(KERN_WARNING
- "cciss: Completion of %08x ignored\n",
- a1);
- continue;
- }
- while (c->busaddr != a) {
- c = c->next;
- if (c == h->cmpQ)
+ c = NULL;
+ hlist_for_each_entry(c, tmp, &h->cmpQ, list) {
+ if (c->busaddr == a)
break;
}
}
@@ -3002,8 +2988,8 @@ static irqreturn_t do_cciss_intr(int irq, void *dev_id)
* If we've found the command, take it off the
* completion Q and free it
*/
- if (c->busaddr == a) {
- removeQ(&h->cmpQ, c);
+ if (c && c->busaddr == a) {
+ removeQ(c);
if (c->cmd_type == CMD_RWREQ) {
complete_command(h, c, 0);
} else if (c->cmd_type == CMD_IOCTL_PEND) {
@@ -3423,6 +3409,8 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
return -1;
hba[i]->busy_initializing = 1;
+ INIT_HLIST_HEAD(&hba[i]->cmpQ);
+ INIT_HLIST_HEAD(&hba[i]->reqQ);
if (cciss_pci_init(hba[i], pdev) != 0)
goto clean1;
@@ -3730,15 +3718,17 @@ static void fail_all_cmds(unsigned long ctlr)
pci_disable_device(h->pdev); /* Make sure it is really dead. */
/* move everything off the request queue onto the completed queue */
- while ((c = h->reqQ) != NULL) {
- removeQ(&(h->reqQ), c);
+ while (!hlist_empty(&h->reqQ)) {
+ c = hlist_entry(h->reqQ.first, CommandList_struct, list);
+ removeQ(c);
h->Qdepth--;
- addQ(&(h->cmpQ), c);
+ addQ(&h->cmpQ, c);
}
/* Now, fail everything on the completed queue with a HW error */
- while ((c = h->cmpQ) != NULL) {
- removeQ(&h->cmpQ, c);
+ while (!hlist_empty(&h->cmpQ)) {
+ c = hlist_entry(h->cmpQ.first, CommandList_struct, list);
+ removeQ(c);
c->err_info->CommandStatus = CMD_HARDWARE_ERR;
if (c->cmd_type == CMD_RWREQ) {
complete_command(h, c, 0);
diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
index 24a7efa993ab..15e2b84734e3 100644
--- a/drivers/block/cciss.h
+++ b/drivers/block/cciss.h
@@ -89,8 +89,8 @@ struct ctlr_info
struct access_method access;
/* queue and queue Info */
- CommandList_struct *reqQ;
- CommandList_struct *cmpQ;
+ struct hlist_head reqQ;
+ struct hlist_head cmpQ;
unsigned int Qdepth;
unsigned int maxQsinceinit;
unsigned int maxSG;
diff --git a/drivers/block/cciss_cmd.h b/drivers/block/cciss_cmd.h
index 43bf5593b59b..24e22dea1a99 100644
--- a/drivers/block/cciss_cmd.h
+++ b/drivers/block/cciss_cmd.h
@@ -265,8 +265,7 @@ typedef struct _CommandList_struct {
int ctlr;
int cmd_type;
long cmdindex;
- struct _CommandList_struct *prev;
- struct _CommandList_struct *next;
+ struct hlist_node list;
struct request * rq;
struct completion *waiting;
int retry_count;
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index fb06ed659212..edbaac6c0573 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -624,20 +624,38 @@ static int loop_switch(struct loop_device *lo, struct file *file)
}
/*
+ * Helper to flush the IOs in loop, but keeping loop thread running
+ */
+static int loop_flush(struct loop_device *lo)
+{
+ /* loop not yet configured, no running thread, nothing to flush */
+ if (!lo->lo_thread)
+ return 0;
+
+ return loop_switch(lo, NULL);
+}
+
+/*
* Do the actual switch; called from the BIO completion routine
*/
static void do_loop_switch(struct loop_device *lo, struct switch_request *p)
{
struct file *file = p->file;
struct file *old_file = lo->lo_backing_file;
- struct address_space *mapping = file->f_mapping;
+ struct address_space *mapping;
+
+ /* if no new file, only flush of queued bios requested */
+ if (!file)
+ goto out;
+ mapping = file->f_mapping;
mapping_set_gfp_mask(old_file->f_mapping, lo->old_gfp_mask);
lo->lo_backing_file = file;
lo->lo_blocksize = S_ISBLK(mapping->host->i_mode) ?
mapping->host->i_bdev->bd_block_size : PAGE_SIZE;
lo->old_gfp_mask = mapping_gfp_mask(mapping);
mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
+out:
complete(&p->wait);
}
@@ -901,6 +919,7 @@ static int loop_clr_fd(struct loop_device *lo, struct block_device *bdev)
kthread_stop(lo->lo_thread);
+ lo->lo_queue->unplug_fn = NULL;
lo->lo_backing_file = NULL;
loop_release_xfer(lo);
@@ -1345,11 +1364,25 @@ static int lo_release(struct gendisk *disk, fmode_t mode)
struct loop_device *lo = disk->private_data;
mutex_lock(&lo->lo_ctl_mutex);
- --lo->lo_refcnt;
- if ((lo->lo_flags & LO_FLAGS_AUTOCLEAR) && !lo->lo_refcnt)
+ if (--lo->lo_refcnt)
+ goto out;
+
+ if (lo->lo_flags & LO_FLAGS_AUTOCLEAR) {
+ /*
+ * In autoclear mode, stop the loop thread
+ * and remove configuration after last close.
+ */
loop_clr_fd(lo, NULL);
+ } else {
+ /*
+ * Otherwise keep thread (if running) and config,
+ * but flush possible ongoing bios in thread.
+ */
+ loop_flush(lo);
+ }
+out:
mutex_unlock(&lo->lo_ctl_mutex);
return 0;
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index d3a91cacee8c..7bcc1d8bc967 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -722,7 +722,6 @@ static int __init nbd_init(void)
for (i = 0; i < nbds_max; i++) {
struct gendisk *disk = alloc_disk(1 << part_shift);
- elevator_t *old_e;
if (!disk)
goto out;
nbd_dev[i].disk = disk;
@@ -736,11 +735,10 @@ static int __init nbd_init(void)
put_disk(disk);
goto out;
}
- old_e = disk->queue->elevator;
- if (elevator_init(disk->queue, "deadline") == 0 ||
- elevator_init(disk->queue, "noop") == 0) {
- elevator_exit(old_e);
- }
+ /*
+ * Tell the block layer that we are not a rotational device
+ */
+ queue_flag_set_unlocked(QUEUE_FLAG_NONROT, disk->queue);
}
if (register_blkdev(NBD_MAJOR, "nbd")) {
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 85d79a02d487..f151592ecf73 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -237,6 +237,8 @@ static int virtblk_probe(struct virtio_device *vdev)
goto out_put_disk;
}
+ queue_flag_set_unlocked(QUEUE_FLAG_VIRT, vblk->disk->queue);
+
if (index < 26) {
sprintf(vblk->disk->disk_name, "vd%c", 'a' + index % 26);
} else if (index < (26 + 1) * 26) {
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 2d19f0cc47f2..918ef725de41 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -338,18 +338,12 @@ wait:
static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size)
{
struct request_queue *rq;
- elevator_t *old_e;
rq = blk_init_queue(do_blkif_request, &blkif_io_lock);
if (rq == NULL)
return -1;
- old_e = rq->elevator;
- if (IS_ERR_VALUE(elevator_init(rq, "noop")))
- printk(KERN_WARNING
- "blkfront: Switch elevator failed, use default\n");
- else
- elevator_exit(old_e);
+ queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq);
/* Hard sector size and max sectors impersonate the equiv. hardware. */
blk_queue_hardsect_size(rq, sector_size);
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index 7d2e91cccb13..cceace61ef28 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -1712,29 +1712,30 @@ static int dvd_do_auth(struct cdrom_device_info *cdi, dvd_authinfo *ai)
return 0;
}
-static int dvd_read_physical(struct cdrom_device_info *cdi, dvd_struct *s)
+static int dvd_read_physical(struct cdrom_device_info *cdi, dvd_struct *s,
+ struct packet_command *cgc)
{
unsigned char buf[21], *base;
struct dvd_layer *layer;
- struct packet_command cgc;
struct cdrom_device_ops *cdo = cdi->ops;
int ret, layer_num = s->physical.layer_num;
if (layer_num >= DVD_LAYERS)
return -EINVAL;
- init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ);
- cgc.cmd[0] = GPCMD_READ_DVD_STRUCTURE;
- cgc.cmd[6] = layer_num;
- cgc.cmd[7] = s->type;
- cgc.cmd[9] = cgc.buflen & 0xff;
+ init_cdrom_command(cgc, buf, sizeof(buf), CGC_DATA_READ);
+ cgc->cmd[0] = GPCMD_READ_DVD_STRUCTURE;
+ cgc->cmd[6] = layer_num;
+ cgc->cmd[7] = s->type;
+ cgc->cmd[9] = cgc->buflen & 0xff;
/*
* refrain from reporting errors on non-existing layers (mainly)
*/
- cgc.quiet = 1;
+ cgc->quiet = 1;
- if ((ret = cdo->generic_packet(cdi, &cgc)))
+ ret = cdo->generic_packet(cdi, cgc);
+ if (ret)
return ret;
base = &buf[4];
@@ -1762,21 +1763,22 @@ static int dvd_read_physical(struct cdrom_device_info *cdi, dvd_struct *s)
return 0;
}
-static int dvd_read_copyright(struct cdrom_device_info *cdi, dvd_struct *s)
+static int dvd_read_copyright(struct cdrom_device_info *cdi, dvd_struct *s,
+ struct packet_command *cgc)
{
int ret;
u_char buf[8];
- struct packet_command cgc;
struct cdrom_device_ops *cdo = cdi->ops;
- init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ);
- cgc.cmd[0] = GPCMD_READ_DVD_STRUCTURE;
- cgc.cmd[6] = s->copyright.layer_num;
- cgc.cmd[7] = s->type;
- cgc.cmd[8] = cgc.buflen >> 8;
- cgc.cmd[9] = cgc.buflen & 0xff;
+ init_cdrom_command(cgc, buf, sizeof(buf), CGC_DATA_READ);
+ cgc->cmd[0] = GPCMD_READ_DVD_STRUCTURE;
+ cgc->cmd[6] = s->copyright.layer_num;
+ cgc->cmd[7] = s->type;
+ cgc->cmd[8] = cgc->buflen >> 8;
+ cgc->cmd[9] = cgc->buflen & 0xff;
- if ((ret = cdo->generic_packet(cdi, &cgc)))
+ ret = cdo->generic_packet(cdi, cgc);
+ if (ret)
return ret;
s->copyright.cpst = buf[4];
@@ -1785,79 +1787,89 @@ static int dvd_read_copyright(struct cdrom_device_info *cdi, dvd_struct *s)
return 0;
}
-static int dvd_read_disckey(struct cdrom_device_info *cdi, dvd_struct *s)
+static int dvd_read_disckey(struct cdrom_device_info *cdi, dvd_struct *s,
+ struct packet_command *cgc)
{
int ret, size;
u_char *buf;
- struct packet_command cgc;
struct cdrom_device_ops *cdo = cdi->ops;
size = sizeof(s->disckey.value) + 4;
- if ((buf = kmalloc(size, GFP_KERNEL)) == NULL)
+ buf = kmalloc(size, GFP_KERNEL);
+ if (!buf)
return -ENOMEM;
- init_cdrom_command(&cgc, buf, size, CGC_DATA_READ);
- cgc.cmd[0] = GPCMD_READ_DVD_STRUCTURE;
- cgc.cmd[7] = s->type;
- cgc.cmd[8] = size >> 8;
- cgc.cmd[9] = size & 0xff;
- cgc.cmd[10] = s->disckey.agid << 6;
+ init_cdrom_command(cgc, buf, size, CGC_DATA_READ);
+ cgc->cmd[0] = GPCMD_READ_DVD_STRUCTURE;
+ cgc->cmd[7] = s->type;
+ cgc->cmd[8] = size >> 8;
+ cgc->cmd[9] = size & 0xff;
+ cgc->cmd[10] = s->disckey.agid << 6;
- if (!(ret = cdo->generic_packet(cdi, &cgc)))
+ ret = cdo->generic_packet(cdi, cgc);
+ if (!ret)
memcpy(s->disckey.value, &buf[4], sizeof(s->disckey.value));
kfree(buf);
return ret;
}
-static int dvd_read_bca(struct cdrom_device_info *cdi, dvd_struct *s)
+static int dvd_read_bca(struct cdrom_device_info *cdi, dvd_struct *s,
+ struct packet_command *cgc)
{
- int ret;
- u_char buf[4 + 188];
- struct packet_command cgc;
+ int ret, size = 4 + 188;
+ u_char *buf;
struct cdrom_device_ops *cdo = cdi->ops;
- init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ);
- cgc.cmd[0] = GPCMD_READ_DVD_STRUCTURE;
- cgc.cmd[7] = s->type;
- cgc.cmd[9] = cgc.buflen & 0xff;
+ buf = kmalloc(size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
- if ((ret = cdo->generic_packet(cdi, &cgc)))
- return ret;
+ init_cdrom_command(cgc, buf, size, CGC_DATA_READ);
+ cgc->cmd[0] = GPCMD_READ_DVD_STRUCTURE;
+ cgc->cmd[7] = s->type;
+ cgc->cmd[9] = cgc->buflen & 0xff;
+
+ ret = cdo->generic_packet(cdi, cgc);
+ if (ret)
+ goto out;
s->bca.len = buf[0] << 8 | buf[1];
if (s->bca.len < 12 || s->bca.len > 188) {
cdinfo(CD_WARNING, "Received invalid BCA length (%d)\n", s->bca.len);
- return -EIO;
+ ret = -EIO;
+ goto out;
}
memcpy(s->bca.value, &buf[4], s->bca.len);
-
- return 0;
+ ret = 0;
+out:
+ kfree(buf);
+ return ret;
}
-static int dvd_read_manufact(struct cdrom_device_info *cdi, dvd_struct *s)
+static int dvd_read_manufact(struct cdrom_device_info *cdi, dvd_struct *s,
+ struct packet_command *cgc)
{
int ret = 0, size;
u_char *buf;
- struct packet_command cgc;
struct cdrom_device_ops *cdo = cdi->ops;
size = sizeof(s->manufact.value) + 4;
- if ((buf = kmalloc(size, GFP_KERNEL)) == NULL)
+ buf = kmalloc(size, GFP_KERNEL);
+ if (!buf)
return -ENOMEM;
- init_cdrom_command(&cgc, buf, size, CGC_DATA_READ);
- cgc.cmd[0] = GPCMD_READ_DVD_STRUCTURE;
- cgc.cmd[7] = s->type;
- cgc.cmd[8] = size >> 8;
- cgc.cmd[9] = size & 0xff;
+ init_cdrom_command(cgc, buf, size, CGC_DATA_READ);
+ cgc->cmd[0] = GPCMD_READ_DVD_STRUCTURE;
+ cgc->cmd[7] = s->type;
+ cgc->cmd[8] = size >> 8;
+ cgc->cmd[9] = size & 0xff;
- if ((ret = cdo->generic_packet(cdi, &cgc))) {
- kfree(buf);
- return ret;
- }
+ ret = cdo->generic_packet(cdi, cgc);
+ if (ret)
+ goto out;
s->manufact.len = buf[0] << 8 | buf[1];
if (s->manufact.len < 0 || s->manufact.len > 2048) {
@@ -1868,27 +1880,29 @@ static int dvd_read_manufact(struct cdrom_device_info *cdi, dvd_struct *s)
memcpy(s->manufact.value, &buf[4], s->manufact.len);
}
+out:
kfree(buf);
return ret;
}
-static int dvd_read_struct(struct cdrom_device_info *cdi, dvd_struct *s)
+static int dvd_read_struct(struct cdrom_device_info *cdi, dvd_struct *s,
+ struct packet_command *cgc)
{
switch (s->type) {
case DVD_STRUCT_PHYSICAL:
- return dvd_read_physical(cdi, s);
+ return dvd_read_physical(cdi, s, cgc);
case DVD_STRUCT_COPYRIGHT:
- return dvd_read_copyright(cdi, s);
+ return dvd_read_copyright(cdi, s, cgc);
case DVD_STRUCT_DISCKEY:
- return dvd_read_disckey(cdi, s);
+ return dvd_read_disckey(cdi, s, cgc);
case DVD_STRUCT_BCA:
- return dvd_read_bca(cdi, s);
+ return dvd_read_bca(cdi, s, cgc);
case DVD_STRUCT_MANUFACT:
- return dvd_read_manufact(cdi, s);
+ return dvd_read_manufact(cdi, s, cgc);
default:
cdinfo(CD_WARNING, ": Invalid DVD structure read requested (%d)\n",
@@ -2787,271 +2801,360 @@ static int cdrom_switch_blocksize(struct cdrom_device_info *cdi, int size)
return cdo->generic_packet(cdi, &cgc);
}
-static int mmc_ioctl(struct cdrom_device_info *cdi, unsigned int cmd,
- unsigned long arg)
-{
- struct cdrom_device_ops *cdo = cdi->ops;
- struct packet_command cgc;
+static noinline int mmc_ioctl_cdrom_read_data(struct cdrom_device_info *cdi,
+ void __user *arg,
+ struct packet_command *cgc,
+ int cmd)
+{
struct request_sense sense;
- unsigned char buffer[32];
- int ret = 0;
-
- memset(&cgc, 0, sizeof(cgc));
+ struct cdrom_msf msf;
+ int blocksize = 0, format = 0, lba;
+ int ret;
- /* build a unified command and queue it through
- cdo->generic_packet() */
switch (cmd) {
case CDROMREADRAW:
+ blocksize = CD_FRAMESIZE_RAW;
+ break;
case CDROMREADMODE1:
- case CDROMREADMODE2: {
- struct cdrom_msf msf;
- int blocksize = 0, format = 0, lba;
-
- switch (cmd) {
- case CDROMREADRAW:
- blocksize = CD_FRAMESIZE_RAW;
- break;
- case CDROMREADMODE1:
- blocksize = CD_FRAMESIZE;
- format = 2;
- break;
- case CDROMREADMODE2:
- blocksize = CD_FRAMESIZE_RAW0;
- break;
- }
- IOCTL_IN(arg, struct cdrom_msf, msf);
- lba = msf_to_lba(msf.cdmsf_min0,msf.cdmsf_sec0,msf.cdmsf_frame0);
- /* FIXME: we need upper bound checking, too!! */
- if (lba < 0)
- return -EINVAL;
- cgc.buffer = kmalloc(blocksize, GFP_KERNEL);
- if (cgc.buffer == NULL)
- return -ENOMEM;
- memset(&sense, 0, sizeof(sense));
- cgc.sense = &sense;
- cgc.data_direction = CGC_DATA_READ;
- ret = cdrom_read_block(cdi, &cgc, lba, 1, format, blocksize);
- if (ret && sense.sense_key==0x05 && sense.asc==0x20 && sense.ascq==0x00) {
- /*
- * SCSI-II devices are not required to support
- * READ_CD, so let's try switching block size
- */
- /* FIXME: switch back again... */
- if ((ret = cdrom_switch_blocksize(cdi, blocksize))) {
- kfree(cgc.buffer);
- return ret;
- }
- cgc.sense = NULL;
- ret = cdrom_read_cd(cdi, &cgc, lba, blocksize, 1);
- ret |= cdrom_switch_blocksize(cdi, blocksize);
- }
- if (!ret && copy_to_user((char __user *)arg, cgc.buffer, blocksize))
- ret = -EFAULT;
- kfree(cgc.buffer);
+ blocksize = CD_FRAMESIZE;
+ format = 2;
+ break;
+ case CDROMREADMODE2:
+ blocksize = CD_FRAMESIZE_RAW0;
+ break;
+ }
+ IOCTL_IN(arg, struct cdrom_msf, msf);
+ lba = msf_to_lba(msf.cdmsf_min0, msf.cdmsf_sec0, msf.cdmsf_frame0);
+ /* FIXME: we need upper bound checking, too!! */
+ if (lba < 0)
+ return -EINVAL;
+
+ cgc->buffer = kmalloc(blocksize, GFP_KERNEL);
+ if (cgc->buffer == NULL)
+ return -ENOMEM;
+
+ memset(&sense, 0, sizeof(sense));
+ cgc->sense = &sense;
+ cgc->data_direction = CGC_DATA_READ;
+ ret = cdrom_read_block(cdi, cgc, lba, 1, format, blocksize);
+ if (ret && sense.sense_key == 0x05 &&
+ sense.asc == 0x20 &&
+ sense.ascq == 0x00) {
+ /*
+ * SCSI-II devices are not required to support
+ * READ_CD, so let's try switching block size
+ */
+ /* FIXME: switch back again... */
+ ret = cdrom_switch_blocksize(cdi, blocksize);
+ if (ret)
+ goto out;
+ cgc->sense = NULL;
+ ret = cdrom_read_cd(cdi, cgc, lba, blocksize, 1);
+ ret |= cdrom_switch_blocksize(cdi, blocksize);
+ }
+ if (!ret && copy_to_user(arg, cgc->buffer, blocksize))
+ ret = -EFAULT;
+out:
+ kfree(cgc->buffer);
+ return ret;
+}
+
+static noinline int mmc_ioctl_cdrom_read_audio(struct cdrom_device_info *cdi,
+ void __user *arg)
+{
+ struct cdrom_read_audio ra;
+ int lba;
+
+ IOCTL_IN(arg, struct cdrom_read_audio, ra);
+
+ if (ra.addr_format == CDROM_MSF)
+ lba = msf_to_lba(ra.addr.msf.minute,
+ ra.addr.msf.second,
+ ra.addr.msf.frame);
+ else if (ra.addr_format == CDROM_LBA)
+ lba = ra.addr.lba;
+ else
+ return -EINVAL;
+
+ /* FIXME: we need upper bound checking, too!! */
+ if (lba < 0 || ra.nframes <= 0 || ra.nframes > CD_FRAMES)
+ return -EINVAL;
+
+ return cdrom_read_cdda(cdi, ra.buf, lba, ra.nframes);
+}
+
+static noinline int mmc_ioctl_cdrom_subchannel(struct cdrom_device_info *cdi,
+ void __user *arg)
+{
+ int ret;
+ struct cdrom_subchnl q;
+ u_char requested, back;
+ IOCTL_IN(arg, struct cdrom_subchnl, q);
+ requested = q.cdsc_format;
+ if (!((requested == CDROM_MSF) ||
+ (requested == CDROM_LBA)))
+ return -EINVAL;
+ q.cdsc_format = CDROM_MSF;
+ ret = cdrom_read_subchannel(cdi, &q, 0);
+ if (ret)
return ret;
- }
- case CDROMREADAUDIO: {
- struct cdrom_read_audio ra;
- int lba;
-
- IOCTL_IN(arg, struct cdrom_read_audio, ra);
-
- if (ra.addr_format == CDROM_MSF)
- lba = msf_to_lba(ra.addr.msf.minute,
- ra.addr.msf.second,
- ra.addr.msf.frame);
- else if (ra.addr_format == CDROM_LBA)
- lba = ra.addr.lba;
- else
- return -EINVAL;
+ back = q.cdsc_format; /* local copy */
+ sanitize_format(&q.cdsc_absaddr, &back, requested);
+ sanitize_format(&q.cdsc_reladdr, &q.cdsc_format, requested);
+ IOCTL_OUT(arg, struct cdrom_subchnl, q);
+ /* cdinfo(CD_DO_IOCTL, "CDROMSUBCHNL successful\n"); */
+ return 0;
+}
- /* FIXME: we need upper bound checking, too!! */
- if (lba < 0 || ra.nframes <= 0 || ra.nframes > CD_FRAMES)
- return -EINVAL;
+static noinline int mmc_ioctl_cdrom_play_msf(struct cdrom_device_info *cdi,
+ void __user *arg,
+ struct packet_command *cgc)
+{
+ struct cdrom_device_ops *cdo = cdi->ops;
+ struct cdrom_msf msf;
+ cdinfo(CD_DO_IOCTL, "entering CDROMPLAYMSF\n");
+ IOCTL_IN(arg, struct cdrom_msf, msf);
+ cgc->cmd[0] = GPCMD_PLAY_AUDIO_MSF;
+ cgc->cmd[3] = msf.cdmsf_min0;
+ cgc->cmd[4] = msf.cdmsf_sec0;
+ cgc->cmd[5] = msf.cdmsf_frame0;
+ cgc->cmd[6] = msf.cdmsf_min1;
+ cgc->cmd[7] = msf.cdmsf_sec1;
+ cgc->cmd[8] = msf.cdmsf_frame1;
+ cgc->data_direction = CGC_DATA_NONE;
+ return cdo->generic_packet(cdi, cgc);
+}
- return cdrom_read_cdda(cdi, ra.buf, lba, ra.nframes);
- }
- case CDROMSUBCHNL: {
- struct cdrom_subchnl q;
- u_char requested, back;
- IOCTL_IN(arg, struct cdrom_subchnl, q);
- requested = q.cdsc_format;
- if (!((requested == CDROM_MSF) ||
- (requested == CDROM_LBA)))
- return -EINVAL;
- q.cdsc_format = CDROM_MSF;
- if ((ret = cdrom_read_subchannel(cdi, &q, 0)))
- return ret;
- back = q.cdsc_format; /* local copy */
- sanitize_format(&q.cdsc_absaddr, &back, requested);
- sanitize_format(&q.cdsc_reladdr, &q.cdsc_format, requested);
- IOCTL_OUT(arg, struct cdrom_subchnl, q);
- /* cdinfo(CD_DO_IOCTL, "CDROMSUBCHNL successful\n"); */
- return 0;
- }
- case CDROMPLAYMSF: {
- struct cdrom_msf msf;
- cdinfo(CD_DO_IOCTL, "entering CDROMPLAYMSF\n");
- IOCTL_IN(arg, struct cdrom_msf, msf);
- cgc.cmd[0] = GPCMD_PLAY_AUDIO_MSF;
- cgc.cmd[3] = msf.cdmsf_min0;
- cgc.cmd[4] = msf.cdmsf_sec0;
- cgc.cmd[5] = msf.cdmsf_frame0;
- cgc.cmd[6] = msf.cdmsf_min1;
- cgc.cmd[7] = msf.cdmsf_sec1;
- cgc.cmd[8] = msf.cdmsf_frame1;
- cgc.data_direction = CGC_DATA_NONE;
- return cdo->generic_packet(cdi, &cgc);
- }
- case CDROMPLAYBLK: {
- struct cdrom_blk blk;
- cdinfo(CD_DO_IOCTL, "entering CDROMPLAYBLK\n");
- IOCTL_IN(arg, struct cdrom_blk, blk);
- cgc.cmd[0] = GPCMD_PLAY_AUDIO_10;
- cgc.cmd[2] = (blk.from >> 24) & 0xff;
- cgc.cmd[3] = (blk.from >> 16) & 0xff;
- cgc.cmd[4] = (blk.from >> 8) & 0xff;
- cgc.cmd[5] = blk.from & 0xff;
- cgc.cmd[7] = (blk.len >> 8) & 0xff;
- cgc.cmd[8] = blk.len & 0xff;
- cgc.data_direction = CGC_DATA_NONE;
- return cdo->generic_packet(cdi, &cgc);
- }
- case CDROMVOLCTRL:
- case CDROMVOLREAD: {
- struct cdrom_volctrl volctrl;
- char mask[sizeof(buffer)];
- unsigned short offset;
+static noinline int mmc_ioctl_cdrom_play_blk(struct cdrom_device_info *cdi,
+ void __user *arg,
+ struct packet_command *cgc)
+{
+ struct cdrom_device_ops *cdo = cdi->ops;
+ struct cdrom_blk blk;
+ cdinfo(CD_DO_IOCTL, "entering CDROMPLAYBLK\n");
+ IOCTL_IN(arg, struct cdrom_blk, blk);
+ cgc->cmd[0] = GPCMD_PLAY_AUDIO_10;
+ cgc->cmd[2] = (blk.from >> 24) & 0xff;
+ cgc->cmd[3] = (blk.from >> 16) & 0xff;
+ cgc->cmd[4] = (blk.from >> 8) & 0xff;
+ cgc->cmd[5] = blk.from & 0xff;
+ cgc->cmd[7] = (blk.len >> 8) & 0xff;
+ cgc->cmd[8] = blk.len & 0xff;
+ cgc->data_direction = CGC_DATA_NONE;
+ return cdo->generic_packet(cdi, cgc);
+}
+
+static noinline int mmc_ioctl_cdrom_volume(struct cdrom_device_info *cdi,
+ void __user *arg,
+ struct packet_command *cgc,
+ unsigned int cmd)
+{
+ struct cdrom_volctrl volctrl;
+ unsigned char buffer[32];
+ char mask[sizeof(buffer)];
+ unsigned short offset;
+ int ret;
- cdinfo(CD_DO_IOCTL, "entering CDROMVOLUME\n");
+ cdinfo(CD_DO_IOCTL, "entering CDROMVOLUME\n");
- IOCTL_IN(arg, struct cdrom_volctrl, volctrl);
+ IOCTL_IN(arg, struct cdrom_volctrl, volctrl);
- cgc.buffer = buffer;
- cgc.buflen = 24;
- if ((ret = cdrom_mode_sense(cdi, &cgc, GPMODE_AUDIO_CTL_PAGE, 0)))
- return ret;
+ cgc->buffer = buffer;
+ cgc->buflen = 24;
+ ret = cdrom_mode_sense(cdi, cgc, GPMODE_AUDIO_CTL_PAGE, 0);
+ if (ret)
+ return ret;
- /* originally the code depended on buffer[1] to determine
- how much data is available for transfer. buffer[1] is
- unfortunately ambigious and the only reliable way seem
- to be to simply skip over the block descriptor... */
- offset = 8 + be16_to_cpu(*(__be16 *)(buffer+6));
-
- if (offset + 16 > sizeof(buffer))
- return -E2BIG;
-
- if (offset + 16 > cgc.buflen) {
- cgc.buflen = offset+16;
- ret = cdrom_mode_sense(cdi, &cgc,
- GPMODE_AUDIO_CTL_PAGE, 0);
- if (ret)
- return ret;
- }
+ /* originally the code depended on buffer[1] to determine
+ how much data is available for transfer. buffer[1] is
+ unfortunately ambigious and the only reliable way seem
+ to be to simply skip over the block descriptor... */
+ offset = 8 + be16_to_cpu(*(__be16 *)(buffer + 6));
+
+ if (offset + 16 > sizeof(buffer))
+ return -E2BIG;
+
+ if (offset + 16 > cgc->buflen) {
+ cgc->buflen = offset + 16;
+ ret = cdrom_mode_sense(cdi, cgc,
+ GPMODE_AUDIO_CTL_PAGE, 0);
+ if (ret)
+ return ret;
+ }
- /* sanity check */
- if ((buffer[offset] & 0x3f) != GPMODE_AUDIO_CTL_PAGE ||
- buffer[offset+1] < 14)
- return -EINVAL;
+ /* sanity check */
+ if ((buffer[offset] & 0x3f) != GPMODE_AUDIO_CTL_PAGE ||
+ buffer[offset + 1] < 14)
+ return -EINVAL;
- /* now we have the current volume settings. if it was only
- a CDROMVOLREAD, return these values */
- if (cmd == CDROMVOLREAD) {
- volctrl.channel0 = buffer[offset+9];
- volctrl.channel1 = buffer[offset+11];
- volctrl.channel2 = buffer[offset+13];
- volctrl.channel3 = buffer[offset+15];
- IOCTL_OUT(arg, struct cdrom_volctrl, volctrl);
- return 0;
- }
+ /* now we have the current volume settings. if it was only
+ a CDROMVOLREAD, return these values */
+ if (cmd == CDROMVOLREAD) {
+ volctrl.channel0 = buffer[offset+9];
+ volctrl.channel1 = buffer[offset+11];
+ volctrl.channel2 = buffer[offset+13];
+ volctrl.channel3 = buffer[offset+15];
+ IOCTL_OUT(arg, struct cdrom_volctrl, volctrl);
+ return 0;
+ }
- /* get the volume mask */
- cgc.buffer = mask;
- if ((ret = cdrom_mode_sense(cdi, &cgc,
- GPMODE_AUDIO_CTL_PAGE, 1)))
- return ret;
+ /* get the volume mask */
+ cgc->buffer = mask;
+ ret = cdrom_mode_sense(cdi, cgc, GPMODE_AUDIO_CTL_PAGE, 1);
+ if (ret)
+ return ret;
- buffer[offset+9] = volctrl.channel0 & mask[offset+9];
- buffer[offset+11] = volctrl.channel1 & mask[offset+11];
- buffer[offset+13] = volctrl.channel2 & mask[offset+13];
- buffer[offset+15] = volctrl.channel3 & mask[offset+15];
+ buffer[offset + 9] = volctrl.channel0 & mask[offset + 9];
+ buffer[offset + 11] = volctrl.channel1 & mask[offset + 11];
+ buffer[offset + 13] = volctrl.channel2 & mask[offset + 13];
+ buffer[offset + 15] = volctrl.channel3 & mask[offset + 15];
- /* set volume */
- cgc.buffer = buffer + offset - 8;
- memset(cgc.buffer, 0, 8);
- return cdrom_mode_select(cdi, &cgc);
- }
+ /* set volume */
+ cgc->buffer = buffer + offset - 8;
+ memset(cgc->buffer, 0, 8);
+ return cdrom_mode_select(cdi, cgc);
+}
- case CDROMSTART:
- case CDROMSTOP: {
- cdinfo(CD_DO_IOCTL, "entering CDROMSTART/CDROMSTOP\n");
- cgc.cmd[0] = GPCMD_START_STOP_UNIT;
- cgc.cmd[1] = 1;
- cgc.cmd[4] = (cmd == CDROMSTART) ? 1 : 0;
- cgc.data_direction = CGC_DATA_NONE;
- return cdo->generic_packet(cdi, &cgc);
- }
+static noinline int mmc_ioctl_cdrom_start_stop(struct cdrom_device_info *cdi,
+ struct packet_command *cgc,
+ int cmd)
+{
+ struct cdrom_device_ops *cdo = cdi->ops;
+ cdinfo(CD_DO_IOCTL, "entering CDROMSTART/CDROMSTOP\n");
+ cgc->cmd[0] = GPCMD_START_STOP_UNIT;
+ cgc->cmd[1] = 1;
+ cgc->cmd[4] = (cmd == CDROMSTART) ? 1 : 0;
+ cgc->data_direction = CGC_DATA_NONE;
+ return cdo->generic_packet(cdi, cgc);
+}
- case CDROMPAUSE:
- case CDROMRESUME: {
- cdinfo(CD_DO_IOCTL, "entering CDROMPAUSE/CDROMRESUME\n");
- cgc.cmd[0] = GPCMD_PAUSE_RESUME;
- cgc.cmd[8] = (cmd == CDROMRESUME) ? 1 : 0;
- cgc.data_direction = CGC_DATA_NONE;
- return cdo->generic_packet(cdi, &cgc);
- }
+static noinline int mmc_ioctl_cdrom_pause_resume(struct cdrom_device_info *cdi,
+ struct packet_command *cgc,
+ int cmd)
+{
+ struct cdrom_device_ops *cdo = cdi->ops;
+ cdinfo(CD_DO_IOCTL, "entering CDROMPAUSE/CDROMRESUME\n");
+ cgc->cmd[0] = GPCMD_PAUSE_RESUME;
+ cgc->cmd[8] = (cmd == CDROMRESUME) ? 1 : 0;
+ cgc->data_direction = CGC_DATA_NONE;
+ return cdo->generic_packet(cdi, cgc);
+}
- case DVD_READ_STRUCT: {
- dvd_struct *s;
- int size = sizeof(dvd_struct);
- if (!CDROM_CAN(CDC_DVD))
- return -ENOSYS;
- if ((s = kmalloc(size, GFP_KERNEL)) == NULL)
- return -ENOMEM;
- cdinfo(CD_DO_IOCTL, "entering DVD_READ_STRUCT\n");
- if (copy_from_user(s, (dvd_struct __user *)arg, size)) {
- kfree(s);
- return -EFAULT;
- }
- if ((ret = dvd_read_struct(cdi, s))) {
- kfree(s);
- return ret;
- }
- if (copy_to_user((dvd_struct __user *)arg, s, size))
- ret = -EFAULT;
+static noinline int mmc_ioctl_dvd_read_struct(struct cdrom_device_info *cdi,
+ void __user *arg,
+ struct packet_command *cgc)
+{
+ int ret;
+ dvd_struct *s;
+ int size = sizeof(dvd_struct);
+
+ if (!CDROM_CAN(CDC_DVD))
+ return -ENOSYS;
+
+ s = kmalloc(size, GFP_KERNEL);
+ if (!s)
+ return -ENOMEM;
+
+ cdinfo(CD_DO_IOCTL, "entering DVD_READ_STRUCT\n");
+ if (copy_from_user(s, arg, size)) {
kfree(s);
+ return -EFAULT;
+ }
+
+ ret = dvd_read_struct(cdi, s, cgc);
+ if (ret)
+ goto out;
+
+ if (copy_to_user(arg, s, size))
+ ret = -EFAULT;
+out:
+ kfree(s);
+ return ret;
+}
+
+static noinline int mmc_ioctl_dvd_auth(struct cdrom_device_info *cdi,
+ void __user *arg)
+{
+ int ret;
+ dvd_authinfo ai;
+ if (!CDROM_CAN(CDC_DVD))
+ return -ENOSYS;
+ cdinfo(CD_DO_IOCTL, "entering DVD_AUTH\n");
+ IOCTL_IN(arg, dvd_authinfo, ai);
+ ret = dvd_do_auth(cdi, &ai);
+ if (ret)
return ret;
- }
+ IOCTL_OUT(arg, dvd_authinfo, ai);
+ return 0;
+}
- case DVD_AUTH: {
- dvd_authinfo ai;
- if (!CDROM_CAN(CDC_DVD))
- return -ENOSYS;
- cdinfo(CD_DO_IOCTL, "entering DVD_AUTH\n");
- IOCTL_IN(arg, dvd_authinfo, ai);
- if ((ret = dvd_do_auth (cdi, &ai)))
- return ret;
- IOCTL_OUT(arg, dvd_authinfo, ai);
- return 0;
- }
+static noinline int mmc_ioctl_cdrom_next_writable(struct cdrom_device_info *cdi,
+ void __user *arg)
+{
+ int ret;
+ long next = 0;
+ cdinfo(CD_DO_IOCTL, "entering CDROM_NEXT_WRITABLE\n");
+ ret = cdrom_get_next_writable(cdi, &next);
+ if (ret)
+ return ret;
+ IOCTL_OUT(arg, long, next);
+ return 0;
+}
- case CDROM_NEXT_WRITABLE: {
- long next = 0;
- cdinfo(CD_DO_IOCTL, "entering CDROM_NEXT_WRITABLE\n");
- if ((ret = cdrom_get_next_writable(cdi, &next)))
- return ret;
- IOCTL_OUT(arg, long, next);
- return 0;
- }
- case CDROM_LAST_WRITTEN: {
- long last = 0;
- cdinfo(CD_DO_IOCTL, "entering CDROM_LAST_WRITTEN\n");
- if ((ret = cdrom_get_last_written(cdi, &last)))
- return ret;
- IOCTL_OUT(arg, long, last);
- return 0;
- }
- } /* switch */
+static noinline int mmc_ioctl_cdrom_last_written(struct cdrom_device_info *cdi,
+ void __user *arg)
+{
+ int ret;
+ long last = 0;
+ cdinfo(CD_DO_IOCTL, "entering CDROM_LAST_WRITTEN\n");
+ ret = cdrom_get_last_written(cdi, &last);
+ if (ret)
+ return ret;
+ IOCTL_OUT(arg, long, last);
+ return 0;
+}
+
+static int mmc_ioctl(struct cdrom_device_info *cdi, unsigned int cmd,
+ unsigned long arg)
+{
+ struct packet_command cgc;
+ void __user *userptr = (void __user *)arg;
+
+ memset(&cgc, 0, sizeof(cgc));
+
+ /* build a unified command and queue it through
+ cdo->generic_packet() */
+ switch (cmd) {
+ case CDROMREADRAW:
+ case CDROMREADMODE1:
+ case CDROMREADMODE2:
+ return mmc_ioctl_cdrom_read_data(cdi, userptr, &cgc, cmd);
+ case CDROMREADAUDIO:
+ return mmc_ioctl_cdrom_read_audio(cdi, userptr);
+ case CDROMSUBCHNL:
+ return mmc_ioctl_cdrom_subchannel(cdi, userptr);
+ case CDROMPLAYMSF:
+ return mmc_ioctl_cdrom_play_msf(cdi, userptr, &cgc);
+ case CDROMPLAYBLK:
+ return mmc_ioctl_cdrom_play_blk(cdi, userptr, &cgc);
+ case CDROMVOLCTRL:
+ case CDROMVOLREAD:
+ return mmc_ioctl_cdrom_volume(cdi, userptr, &cgc, cmd);
+ case CDROMSTART:
+ case CDROMSTOP:
+ return mmc_ioctl_cdrom_start_stop(cdi, &cgc, cmd);
+ case CDROMPAUSE:
+ case CDROMRESUME:
+ return mmc_ioctl_cdrom_pause_resume(cdi, &cgc, cmd);
+ case DVD_READ_STRUCT:
+ return mmc_ioctl_dvd_read_struct(cdi, userptr, &cgc);
+ case DVD_AUTH:
+ return mmc_ioctl_dvd_auth(cdi, userptr);
+ case CDROM_NEXT_WRITABLE:
+ return mmc_ioctl_cdrom_next_writable(cdi, userptr);
+ case CDROM_LAST_WRITTEN:
+ return mmc_ioctl_cdrom_last_written(cdi, userptr);
+ }
return -ENOTTY;
}
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index 53fdc7ff3870..32b8bbf5003e 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -46,7 +46,7 @@
/*
* The High Precision Event Timer driver.
* This driver is closely modelled after the rtc.c driver.
- * http://www.intel.com/hardwaredesign/hpetspec.htm
+ * http://www.intel.com/hardwaredesign/hpetspec_1.pdf
*/
#define HPET_USER_FREQ (64)
#define HPET_DRIFT (500)
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 675076f5fca8..d26891bfcd41 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -558,23 +558,9 @@ struct timer_rand_state {
unsigned dont_count_entropy:1;
};
-static struct timer_rand_state *irq_timer_state[NR_IRQS];
-
-static struct timer_rand_state *get_timer_rand_state(unsigned int irq)
-{
- if (irq >= nr_irqs)
- return NULL;
-
- return irq_timer_state[irq];
-}
-
-static void set_timer_rand_state(unsigned int irq, struct timer_rand_state *state)
-{
- if (irq >= nr_irqs)
- return;
-
- irq_timer_state[irq] = state;
-}
+#ifndef CONFIG_SPARSE_IRQ
+struct timer_rand_state *irq_timer_state[NR_IRQS];
+#endif
static struct timer_rand_state input_timer_state;
@@ -933,8 +919,10 @@ void rand_initialize_irq(int irq)
{
struct timer_rand_state *state;
+#ifndef CONFIG_SPARSE_IRQ
if (irq >= nr_irqs)
return;
+#endif
state = get_timer_rand_state(irq);
diff --git a/drivers/clocksource/acpi_pm.c b/drivers/clocksource/acpi_pm.c
index c20171078d1d..e1129fad96dd 100644
--- a/drivers/clocksource/acpi_pm.c
+++ b/drivers/clocksource/acpi_pm.c
@@ -57,11 +57,6 @@ u32 acpi_pm_read_verified(void)
return v2;
}
-static cycle_t acpi_pm_read_slow(void)
-{
- return (cycle_t)acpi_pm_read_verified();
-}
-
static cycle_t acpi_pm_read(void)
{
return (cycle_t)read_pmtmr();
@@ -88,6 +83,11 @@ static int __init acpi_pm_good_setup(char *__str)
}
__setup("acpi_pm_good", acpi_pm_good_setup);
+static cycle_t acpi_pm_read_slow(void)
+{
+ return (cycle_t)acpi_pm_read_verified();
+}
+
static inline void acpi_pm_need_workaround(void)
{
clocksource_acpi_pm.read = acpi_pm_read_slow;
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c
index b9b7fc6ff1eb..e1ece89fe922 100644
--- a/drivers/input/touchscreen/ads7846.c
+++ b/drivers/input/touchscreen/ads7846.c
@@ -697,7 +697,7 @@ static enum hrtimer_restart ads7846_timer(struct hrtimer *handle)
struct ads7846 *ts = container_of(handle, struct ads7846, timer);
int status = 0;
- spin_lock_irq(&ts->lock);
+ spin_lock(&ts->lock);
if (unlikely(!get_pendown_state(ts) ||
device_suspended(&ts->spi->dev))) {
@@ -728,7 +728,7 @@ static enum hrtimer_restart ads7846_timer(struct hrtimer *handle)
dev_err(&ts->spi->dev, "spi_async --> %d\n", status);
}
- spin_unlock_irq(&ts->lock);
+ spin_unlock(&ts->lock);
return HRTIMER_NORESTART;
}
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index ce26c84af064..3326750ec02c 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -1060,7 +1060,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad_page_pool;
}
- cc->bs = bioset_create(MIN_IOS, MIN_IOS);
+ cc->bs = bioset_create(MIN_IOS, 0);
if (!cc->bs) {
ti->error = "Cannot allocate crypt bioset";
goto bad_bs;
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index 2fd6d4450637..a34338567a2a 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -56,7 +56,7 @@ struct dm_io_client *dm_io_client_create(unsigned num_pages)
if (!client->pool)
goto bad;
- client->bios = bioset_create(16, 16);
+ client->bios = bioset_create(16, 0);
if (!client->bios)
goto bad;
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 343094c3feeb..421c9f02d8ca 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1093,7 +1093,7 @@ static struct mapped_device *alloc_dev(int minor)
if (!md->tio_pool)
goto bad_tio_pool;
- md->bs = bioset_create(16, 16);
+ md->bs = bioset_create(16, 0);
if (!md->bs)
goto bad_no_bioset;
diff --git a/drivers/pci/intr_remapping.c b/drivers/pci/intr_remapping.c
index 2de5a3238c94..f78371b22529 100644
--- a/drivers/pci/intr_remapping.c
+++ b/drivers/pci/intr_remapping.c
@@ -5,6 +5,7 @@
#include <linux/pci.h>
#include <linux/irq.h>
#include <asm/io_apic.h>
+#include <asm/smp.h>
#include <linux/intel-iommu.h>
#include "intr_remapping.h"
@@ -19,17 +20,75 @@ struct irq_2_iommu {
u8 irte_mask;
};
-static struct irq_2_iommu irq_2_iommuX[NR_IRQS];
+#ifdef CONFIG_SPARSE_IRQ
+static struct irq_2_iommu *get_one_free_irq_2_iommu(int cpu)
+{
+ struct irq_2_iommu *iommu;
+ int node;
+
+ node = cpu_to_node(cpu);
+
+ iommu = kzalloc_node(sizeof(*iommu), GFP_ATOMIC, node);
+ printk(KERN_DEBUG "alloc irq_2_iommu on cpu %d node %d\n", cpu, node);
+
+ return iommu;
+}
static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
{
- return (irq < nr_irqs) ? irq_2_iommuX + irq : NULL;
+ struct irq_desc *desc;
+
+ desc = irq_to_desc(irq);
+
+ if (WARN_ON_ONCE(!desc))
+ return NULL;
+
+ return desc->irq_2_iommu;
+}
+
+static struct irq_2_iommu *irq_2_iommu_alloc_cpu(unsigned int irq, int cpu)
+{
+ struct irq_desc *desc;
+ struct irq_2_iommu *irq_iommu;
+
+ /*
+ * alloc irq desc if not allocated already.
+ */
+ desc = irq_to_desc_alloc_cpu(irq, cpu);
+ if (!desc) {
+ printk(KERN_INFO "can not get irq_desc for %d\n", irq);
+ return NULL;
+ }
+
+ irq_iommu = desc->irq_2_iommu;
+
+ if (!irq_iommu)
+ desc->irq_2_iommu = get_one_free_irq_2_iommu(cpu);
+
+ return desc->irq_2_iommu;
}
static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq)
{
+ return irq_2_iommu_alloc_cpu(irq, boot_cpu_id);
+}
+
+#else /* !CONFIG_SPARSE_IRQ */
+
+static struct irq_2_iommu irq_2_iommuX[NR_IRQS];
+
+static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
+{
+ if (irq < nr_irqs)
+ return &irq_2_iommuX[irq];
+
+ return NULL;
+}
+static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq)
+{
return irq_2_iommu(irq);
}
+#endif
static DEFINE_SPINLOCK(irq_2_ir_lock);
@@ -86,9 +145,11 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
if (!count)
return -1;
+#ifndef CONFIG_SPARSE_IRQ
/* protect irq_2_iommu_alloc later */
if (irq >= nr_irqs)
return -1;
+#endif
/*
* start the IRTE search from index 0.
@@ -130,6 +191,12 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
table->base[i].present = 1;
irq_iommu = irq_2_iommu_alloc(irq);
+ if (!irq_iommu) {
+ spin_unlock(&irq_2_ir_lock);
+ printk(KERN_ERR "can't allocate irq_2_iommu\n");
+ return -1;
+ }
+
irq_iommu->iommu = iommu;
irq_iommu->irte_index = index;
irq_iommu->sub_handle = 0;
@@ -177,6 +244,12 @@ int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
irq_iommu = irq_2_iommu_alloc(irq);
+ if (!irq_iommu) {
+ spin_unlock(&irq_2_ir_lock);
+ printk(KERN_ERR "can't allocate irq_2_iommu\n");
+ return -1;
+ }
+
irq_iommu->iommu = iommu;
irq_iommu->irte_index = index;
irq_iommu->sub_handle = subhandle;
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 74801f7df9c9..11a51f8ed3b3 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -103,11 +103,11 @@ static void msix_set_enable(struct pci_dev *dev, int enable)
}
}
-static void msix_flush_writes(unsigned int irq)
+static void msix_flush_writes(struct irq_desc *desc)
{
struct msi_desc *entry;
- entry = get_irq_msi(irq);
+ entry = get_irq_desc_msi(desc);
BUG_ON(!entry || !entry->dev);
switch (entry->msi_attrib.type) {
case PCI_CAP_ID_MSI:
@@ -135,11 +135,11 @@ static void msix_flush_writes(unsigned int irq)
* Returns 1 if it succeeded in masking the interrupt and 0 if the device
* doesn't support MSI masking.
*/
-static int msi_set_mask_bits(unsigned int irq, u32 mask, u32 flag)
+static int msi_set_mask_bits(struct irq_desc *desc, u32 mask, u32 flag)
{
struct msi_desc *entry;
- entry = get_irq_msi(irq);
+ entry = get_irq_desc_msi(desc);
BUG_ON(!entry || !entry->dev);
switch (entry->msi_attrib.type) {
case PCI_CAP_ID_MSI:
@@ -172,9 +172,9 @@ static int msi_set_mask_bits(unsigned int irq, u32 mask, u32 flag)
return 1;
}
-void read_msi_msg(unsigned int irq, struct msi_msg *msg)
+void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg)
{
- struct msi_desc *entry = get_irq_msi(irq);
+ struct msi_desc *entry = get_irq_desc_msi(desc);
switch(entry->msi_attrib.type) {
case PCI_CAP_ID_MSI:
{
@@ -211,9 +211,16 @@ void read_msi_msg(unsigned int irq, struct msi_msg *msg)
}
}
-void write_msi_msg(unsigned int irq, struct msi_msg *msg)
+void read_msi_msg(unsigned int irq, struct msi_msg *msg)
{
- struct msi_desc *entry = get_irq_msi(irq);
+ struct irq_desc *desc = irq_to_desc(irq);
+
+ read_msi_msg_desc(desc, msg);
+}
+
+void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg)
+{
+ struct msi_desc *entry = get_irq_desc_msi(desc);
switch (entry->msi_attrib.type) {
case PCI_CAP_ID_MSI:
{
@@ -252,21 +259,31 @@ void write_msi_msg(unsigned int irq, struct msi_msg *msg)
entry->msg = *msg;
}
+void write_msi_msg(unsigned int irq, struct msi_msg *msg)
+{
+ struct irq_desc *desc = irq_to_desc(irq);
+
+ write_msi_msg_desc(desc, msg);
+}
+
void mask_msi_irq(unsigned int irq)
{
- msi_set_mask_bits(irq, 1, 1);
- msix_flush_writes(irq);
+ struct irq_desc *desc = irq_to_desc(irq);
+
+ msi_set_mask_bits(desc, 1, 1);
+ msix_flush_writes(desc);
}
void unmask_msi_irq(unsigned int irq)
{
- msi_set_mask_bits(irq, 1, 0);
- msix_flush_writes(irq);
+ struct irq_desc *desc = irq_to_desc(irq);
+
+ msi_set_mask_bits(desc, 1, 0);
+ msix_flush_writes(desc);
}
static int msi_free_irqs(struct pci_dev* dev);
-
static struct msi_desc* alloc_msi_entry(void)
{
struct msi_desc *entry;
@@ -303,9 +320,11 @@ static void __pci_restore_msi_state(struct pci_dev *dev)
pci_intx_for_msi(dev, 0);
msi_set_enable(dev, 0);
write_msi_msg(dev->irq, &entry->msg);
- if (entry->msi_attrib.maskbit)
- msi_set_mask_bits(dev->irq, entry->msi_attrib.maskbits_mask,
+ if (entry->msi_attrib.maskbit) {
+ struct irq_desc *desc = irq_to_desc(dev->irq);
+ msi_set_mask_bits(desc, entry->msi_attrib.maskbits_mask,
entry->msi_attrib.masked);
+ }
pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
control &= ~PCI_MSI_FLAGS_QSIZE;
@@ -327,8 +346,9 @@ static void __pci_restore_msix_state(struct pci_dev *dev)
msix_set_enable(dev, 0);
list_for_each_entry(entry, &dev->msi_list, list) {
+ struct irq_desc *desc = irq_to_desc(entry->irq);
write_msi_msg(entry->irq, &entry->msg);
- msi_set_mask_bits(entry->irq, 1, entry->msi_attrib.masked);
+ msi_set_mask_bits(desc, 1, entry->msi_attrib.masked);
}
BUG_ON(list_empty(&dev->msi_list));
@@ -596,7 +616,8 @@ void pci_msi_shutdown(struct pci_dev* dev)
/* Return the the pci reset with msi irqs unmasked */
if (entry->msi_attrib.maskbit) {
u32 mask = entry->msi_attrib.maskbits_mask;
- msi_set_mask_bits(dev->irq, mask, ~mask);
+ struct irq_desc *desc = irq_to_desc(dev->irq);
+ msi_set_mask_bits(desc, mask, ~mask);
}
if (!entry->dev || entry->msi_attrib.type != PCI_CAP_ID_MSI)
return;
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 1e3b934a4cf7..46625cd38743 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -141,8 +141,12 @@ static void init_evtchn_cpu_bindings(void)
int i;
/* By default all event channels notify CPU#0. */
- for_each_irq_desc(i, desc)
+ for_each_irq_desc(i, desc) {
+ if (!desc)
+ continue;
+
desc->affinity = cpumask_of_cpu(0);
+ }
#endif
memset(cpu_evtchn, 0, sizeof(cpu_evtchn));
@@ -229,15 +233,20 @@ static void unmask_evtchn(int port)
static int find_unbound_irq(void)
{
int irq;
+ struct irq_desc *desc;
/* Only allocate from dynirq range */
- for_each_irq_nr(irq)
+ for (irq = 0; irq < nr_irqs; irq++)
if (irq_bindcount[irq] == 0)
break;
if (irq == nr_irqs)
panic("No available IRQ to bind to: increase nr_irqs!\n");
+ desc = irq_to_desc_alloc_cpu(irq, 0);
+ if (WARN_ON(desc == NULL))
+ return -1;
+
return irq;
}
@@ -792,7 +801,7 @@ void xen_irq_resume(void)
mask_evtchn(evtchn);
/* No IRQ <-> event-channel mappings. */
- for_each_irq_nr(irq)
+ for (irq = 0; irq < nr_irqs; irq++)
irq_info[irq].evtchn = 0; /* zap event-channel binding */
for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
@@ -824,7 +833,7 @@ void __init xen_init_IRQ(void)
mask_evtchn(i);
/* Dynamic IRQ space is currently unbound. Zero the refcnts. */
- for_each_irq_nr(i)
+ for (i = 0; i < nr_irqs; i++)
irq_bindcount[i] = 0;
irq_ctx_init(smp_processor_id());
diff --git a/fs/aio.c b/fs/aio.c
index f658441d5666..d6f89d3c15e8 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -191,6 +191,20 @@ static int aio_setup_ring(struct kioctx *ctx)
kunmap_atomic((void *)((unsigned long)__event & PAGE_MASK), km); \
} while(0)
+static void ctx_rcu_free(struct rcu_head *head)
+{
+ struct kioctx *ctx = container_of(head, struct kioctx, rcu_head);
+ unsigned nr_events = ctx->max_reqs;
+
+ kmem_cache_free(kioctx_cachep, ctx);
+
+ if (nr_events) {
+ spin_lock(&aio_nr_lock);
+ BUG_ON(aio_nr - nr_events > aio_nr);
+ aio_nr -= nr_events;
+ spin_unlock(&aio_nr_lock);
+ }
+}
/* __put_ioctx
* Called when the last user of an aio context has gone away,
@@ -198,8 +212,6 @@ static int aio_setup_ring(struct kioctx *ctx)
*/
static void __put_ioctx(struct kioctx *ctx)
{
- unsigned nr_events = ctx->max_reqs;
-
BUG_ON(ctx->reqs_active);
cancel_delayed_work(&ctx->wq);
@@ -208,14 +220,7 @@ static void __put_ioctx(struct kioctx *ctx)
mmdrop(ctx->mm);
ctx->mm = NULL;
pr_debug("__put_ioctx: freeing %p\n", ctx);
- kmem_cache_free(kioctx_cachep, ctx);
-
- if (nr_events) {
- spin_lock(&aio_nr_lock);
- BUG_ON(aio_nr - nr_events > aio_nr);
- aio_nr -= nr_events;
- spin_unlock(&aio_nr_lock);
- }
+ call_rcu(&ctx->rcu_head, ctx_rcu_free);
}
#define get_ioctx(kioctx) do { \
@@ -235,6 +240,7 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
{
struct mm_struct *mm;
struct kioctx *ctx;
+ int did_sync = 0;
/* Prevent overflows */
if ((nr_events > (0x10000000U / sizeof(struct io_event))) ||
@@ -267,21 +273,30 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
goto out_freectx;
/* limit the number of system wide aios */
- spin_lock(&aio_nr_lock);
- if (aio_nr + ctx->max_reqs > aio_max_nr ||
- aio_nr + ctx->max_reqs < aio_nr)
- ctx->max_reqs = 0;
- else
- aio_nr += ctx->max_reqs;
- spin_unlock(&aio_nr_lock);
+ do {
+ spin_lock_bh(&aio_nr_lock);
+ if (aio_nr + nr_events > aio_max_nr ||
+ aio_nr + nr_events < aio_nr)
+ ctx->max_reqs = 0;
+ else
+ aio_nr += ctx->max_reqs;
+ spin_unlock_bh(&aio_nr_lock);
+ if (ctx->max_reqs || did_sync)
+ break;
+
+ /* wait for rcu callbacks to have completed before giving up */
+ synchronize_rcu();
+ did_sync = 1;
+ ctx->max_reqs = nr_events;
+ } while (1);
+
if (ctx->max_reqs == 0)
goto out_cleanup;
/* now link into global list. */
- write_lock(&mm->ioctx_list_lock);
- ctx->next = mm->ioctx_list;
- mm->ioctx_list = ctx;
- write_unlock(&mm->ioctx_list_lock);
+ spin_lock(&mm->ioctx_lock);
+ hlist_add_head_rcu(&ctx->list, &mm->ioctx_list);
+ spin_unlock(&mm->ioctx_lock);
dprintk("aio: allocated ioctx %p[%ld]: mm=%p mask=0x%x\n",
ctx, ctx->user_id, current->mm, ctx->ring_info.nr);
@@ -375,11 +390,12 @@ ssize_t wait_on_sync_kiocb(struct kiocb *iocb)
*/
void exit_aio(struct mm_struct *mm)
{
- struct kioctx *ctx = mm->ioctx_list;
- mm->ioctx_list = NULL;
- while (ctx) {
- struct kioctx *next = ctx->next;
- ctx->next = NULL;
+ struct kioctx *ctx;
+
+ while (!hlist_empty(&mm->ioctx_list)) {
+ ctx = hlist_entry(mm->ioctx_list.first, struct kioctx, list);
+ hlist_del_rcu(&ctx->list);
+
aio_cancel_all(ctx);
wait_for_all_aios(ctx);
@@ -394,7 +410,6 @@ void exit_aio(struct mm_struct *mm)
atomic_read(&ctx->users), ctx->dead,
ctx->reqs_active);
put_ioctx(ctx);
- ctx = next;
}
}
@@ -555,19 +570,21 @@ int aio_put_req(struct kiocb *req)
static struct kioctx *lookup_ioctx(unsigned long ctx_id)
{
- struct kioctx *ioctx;
- struct mm_struct *mm;
+ struct mm_struct *mm = current->mm;
+ struct kioctx *ctx = NULL;
+ struct hlist_node *n;
- mm = current->mm;
- read_lock(&mm->ioctx_list_lock);
- for (ioctx = mm->ioctx_list; ioctx; ioctx = ioctx->next)
- if (likely(ioctx->user_id == ctx_id && !ioctx->dead)) {
- get_ioctx(ioctx);
+ rcu_read_lock();
+
+ hlist_for_each_entry_rcu(ctx, n, &mm->ioctx_list, list) {
+ if (ctx->user_id == ctx_id && !ctx->dead) {
+ get_ioctx(ctx);
break;
}
- read_unlock(&mm->ioctx_list_lock);
+ }
- return ioctx;
+ rcu_read_unlock();
+ return ctx;
}
/*
@@ -1215,19 +1232,14 @@ out:
static void io_destroy(struct kioctx *ioctx)
{
struct mm_struct *mm = current->mm;
- struct kioctx **tmp;
int was_dead;
/* delete the entry from the list is someone else hasn't already */
- write_lock(&mm->ioctx_list_lock);
+ spin_lock(&mm->ioctx_lock);
was_dead = ioctx->dead;
ioctx->dead = 1;
- for (tmp = &mm->ioctx_list; *tmp && *tmp != ioctx;
- tmp = &(*tmp)->next)
- ;
- if (*tmp)
- *tmp = ioctx->next;
- write_unlock(&mm->ioctx_list_lock);
+ hlist_del_rcu(&ioctx->list);
+ spin_unlock(&mm->ioctx_lock);
dprintk("aio_release(%p)\n", ioctx);
if (likely(!was_dead))
diff --git a/fs/bio-integrity.c b/fs/bio-integrity.c
index 19caf7c962ac..77ebc3c263d6 100644
--- a/fs/bio-integrity.c
+++ b/fs/bio-integrity.c
@@ -111,7 +111,7 @@ void bio_integrity_free(struct bio *bio, struct bio_set *bs)
&& bip->bip_buf != NULL)
kfree(bip->bip_buf);
- mempool_free(bip->bip_vec, bs->bvec_pools[bip->bip_pool]);
+ bvec_free_bs(bs, bip->bip_vec, bip->bip_pool);
mempool_free(bip, bs->bio_integrity_pool);
bio->bi_integrity = NULL;
diff --git a/fs/bio.c b/fs/bio.c
index df99c882b807..711cee103602 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -31,7 +31,11 @@
DEFINE_TRACE(block_split);
-static struct kmem_cache *bio_slab __read_mostly;
+/*
+ * Test patch to inline a certain number of bi_io_vec's inside the bio
+ * itself, to shrink a bio data allocation from two mempool calls to one
+ */
+#define BIO_INLINE_VECS 4
static mempool_t *bio_split_pool __read_mostly;
@@ -40,9 +44,8 @@ static mempool_t *bio_split_pool __read_mostly;
* break badly! cannot be bigger than what you can fit into an
* unsigned short
*/
-
#define BV(x) { .nr_vecs = x, .name = "biovec-"__stringify(x) }
-static struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] __read_mostly = {
+struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] __read_mostly = {
BV(1), BV(4), BV(16), BV(64), BV(128), BV(BIO_MAX_PAGES),
};
#undef BV
@@ -53,12 +56,121 @@ static struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] __read_mostly = {
*/
struct bio_set *fs_bio_set;
+/*
+ * Our slab pool management
+ */
+struct bio_slab {
+ struct kmem_cache *slab;
+ unsigned int slab_ref;
+ unsigned int slab_size;
+ char name[8];
+};
+static DEFINE_MUTEX(bio_slab_lock);
+static struct bio_slab *bio_slabs;
+static unsigned int bio_slab_nr, bio_slab_max;
+
+static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size)
+{
+ unsigned int sz = sizeof(struct bio) + extra_size;
+ struct kmem_cache *slab = NULL;
+ struct bio_slab *bslab;
+ unsigned int i, entry = -1;
+
+ mutex_lock(&bio_slab_lock);
+
+ i = 0;
+ while (i < bio_slab_nr) {
+ struct bio_slab *bslab = &bio_slabs[i];
+
+ if (!bslab->slab && entry == -1)
+ entry = i;
+ else if (bslab->slab_size == sz) {
+ slab = bslab->slab;
+ bslab->slab_ref++;
+ break;
+ }
+ i++;
+ }
+
+ if (slab)
+ goto out_unlock;
+
+ if (bio_slab_nr == bio_slab_max && entry == -1) {
+ bio_slab_max <<= 1;
+ bio_slabs = krealloc(bio_slabs,
+ bio_slab_max * sizeof(struct bio_slab),
+ GFP_KERNEL);
+ if (!bio_slabs)
+ goto out_unlock;
+ }
+ if (entry == -1)
+ entry = bio_slab_nr++;
+
+ bslab = &bio_slabs[entry];
+
+ snprintf(bslab->name, sizeof(bslab->name), "bio-%d", entry);
+ slab = kmem_cache_create(bslab->name, sz, 0, SLAB_HWCACHE_ALIGN, NULL);
+ if (!slab)
+ goto out_unlock;
+
+ printk("bio: create slab <%s> at %d\n", bslab->name, entry);
+ bslab->slab = slab;
+ bslab->slab_ref = 1;
+ bslab->slab_size = sz;
+out_unlock:
+ mutex_unlock(&bio_slab_lock);
+ return slab;
+}
+
+static void bio_put_slab(struct bio_set *bs)
+{
+ struct bio_slab *bslab = NULL;
+ unsigned int i;
+
+ mutex_lock(&bio_slab_lock);
+
+ for (i = 0; i < bio_slab_nr; i++) {
+ if (bs->bio_slab == bio_slabs[i].slab) {
+ bslab = &bio_slabs[i];
+ break;
+ }
+ }
+
+ if (WARN(!bslab, KERN_ERR "bio: unable to find slab!\n"))
+ goto out;
+
+ WARN_ON(!bslab->slab_ref);
+
+ if (--bslab->slab_ref)
+ goto out;
+
+ kmem_cache_destroy(bslab->slab);
+ bslab->slab = NULL;
+
+out:
+ mutex_unlock(&bio_slab_lock);
+}
+
unsigned int bvec_nr_vecs(unsigned short idx)
{
return bvec_slabs[idx].nr_vecs;
}
-struct bio_vec *bvec_alloc_bs(gfp_t gfp_mask, int nr, unsigned long *idx, struct bio_set *bs)
+void bvec_free_bs(struct bio_set *bs, struct bio_vec *bv, unsigned int idx)
+{
+ BIO_BUG_ON(idx >= BIOVEC_NR_POOLS);
+
+ if (idx == BIOVEC_MAX_IDX)
+ mempool_free(bv, bs->bvec_pool);
+ else {
+ struct biovec_slab *bvs = bvec_slabs + idx;
+
+ kmem_cache_free(bvs->slab, bv);
+ }
+}
+
+struct bio_vec *bvec_alloc_bs(gfp_t gfp_mask, int nr, unsigned long *idx,
+ struct bio_set *bs)
{
struct bio_vec *bvl;
@@ -67,60 +179,85 @@ struct bio_vec *bvec_alloc_bs(gfp_t gfp_mask, int nr, unsigned long *idx, struct
* If not, this is a bio_kmalloc() allocation and just do a
* kzalloc() for the exact number of vecs right away.
*/
- if (bs) {
+ if (!bs)
+ bvl = kmalloc(nr * sizeof(struct bio_vec), gfp_mask);
+
+ /*
+ * see comment near bvec_array define!
+ */
+ switch (nr) {
+ case 1:
+ *idx = 0;
+ break;
+ case 2 ... 4:
+ *idx = 1;
+ break;
+ case 5 ... 16:
+ *idx = 2;
+ break;
+ case 17 ... 64:
+ *idx = 3;
+ break;
+ case 65 ... 128:
+ *idx = 4;
+ break;
+ case 129 ... BIO_MAX_PAGES:
+ *idx = 5;
+ break;
+ default:
+ return NULL;
+ }
+
+ /*
+ * idx now points to the pool we want to allocate from. only the
+ * 1-vec entry pool is mempool backed.
+ */
+ if (*idx == BIOVEC_MAX_IDX) {
+fallback:
+ bvl = mempool_alloc(bs->bvec_pool, gfp_mask);
+ } else {
+ struct biovec_slab *bvs = bvec_slabs + *idx;
+ gfp_t __gfp_mask = gfp_mask & ~(__GFP_WAIT | __GFP_IO);
+
/*
- * see comment near bvec_array define!
+ * Make this allocation restricted and don't dump info on
+ * allocation failures, since we'll fallback to the mempool
+ * in case of failure.
*/
- switch (nr) {
- case 1:
- *idx = 0;
- break;
- case 2 ... 4:
- *idx = 1;
- break;
- case 5 ... 16:
- *idx = 2;
- break;
- case 17 ... 64:
- *idx = 3;
- break;
- case 65 ... 128:
- *idx = 4;
- break;
- case 129 ... BIO_MAX_PAGES:
- *idx = 5;
- break;
- default:
- return NULL;
- }
+ __gfp_mask |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;
/*
- * idx now points to the pool we want to allocate from
+ * Try a slab allocation. If this fails and __GFP_WAIT
+ * is set, retry with the 1-entry mempool
*/
- bvl = mempool_alloc(bs->bvec_pools[*idx], gfp_mask);
- if (bvl)
- memset(bvl, 0,
- bvec_nr_vecs(*idx) * sizeof(struct bio_vec));
- } else
- bvl = kzalloc(nr * sizeof(struct bio_vec), gfp_mask);
+ bvl = kmem_cache_alloc(bvs->slab, __gfp_mask);
+ if (unlikely(!bvl && (gfp_mask & __GFP_WAIT))) {
+ *idx = BIOVEC_MAX_IDX;
+ goto fallback;
+ }
+ }
return bvl;
}
-void bio_free(struct bio *bio, struct bio_set *bio_set)
+void bio_free(struct bio *bio, struct bio_set *bs)
{
- if (bio->bi_io_vec) {
- const int pool_idx = BIO_POOL_IDX(bio);
+ void *p;
- BIO_BUG_ON(pool_idx >= BIOVEC_NR_POOLS);
-
- mempool_free(bio->bi_io_vec, bio_set->bvec_pools[pool_idx]);
- }
+ if (bio_has_allocated_vec(bio))
+ bvec_free_bs(bs, bio->bi_io_vec, BIO_POOL_IDX(bio));
if (bio_integrity(bio))
- bio_integrity_free(bio, bio_set);
+ bio_integrity_free(bio, bs);
+
+ /*
+ * If we have front padding, adjust the bio pointer before freeing
+ */
+ p = bio;
+ if (bs->front_pad)
+ p -= bs->front_pad;
- mempool_free(bio, bio_set->bio_pool);
+ mempool_free(p, bs->bio_pool);
}
/*
@@ -133,7 +270,8 @@ static void bio_fs_destructor(struct bio *bio)
static void bio_kmalloc_destructor(struct bio *bio)
{
- kfree(bio->bi_io_vec);
+ if (bio_has_allocated_vec(bio))
+ kfree(bio->bi_io_vec);
kfree(bio);
}
@@ -157,16 +295,20 @@ void bio_init(struct bio *bio)
* for a &struct bio to become free. If a %NULL @bs is passed in, we will
* fall back to just using @kmalloc to allocate the required memory.
*
- * allocate bio and iovecs from the memory pools specified by the
- * bio_set structure, or @kmalloc if none given.
+ * Note that the caller must set ->bi_destructor on succesful return
+ * of a bio, to do the appropriate freeing of the bio once the reference
+ * count drops to zero.
**/
struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
{
- struct bio *bio;
+ struct bio *bio = NULL;
+
+ if (bs) {
+ void *p = mempool_alloc(bs->bio_pool, gfp_mask);
- if (bs)
- bio = mempool_alloc(bs->bio_pool, gfp_mask);
- else
+ if (p)
+ bio = p + bs->front_pad;
+ } else
bio = kmalloc(sizeof(*bio), gfp_mask);
if (likely(bio)) {
@@ -176,7 +318,15 @@ struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
if (likely(nr_iovecs)) {
unsigned long uninitialized_var(idx);
- bvl = bvec_alloc_bs(gfp_mask, nr_iovecs, &idx, bs);
+ if (nr_iovecs <= BIO_INLINE_VECS) {
+ idx = 0;
+ bvl = bio->bi_inline_vecs;
+ nr_iovecs = BIO_INLINE_VECS;
+ } else {
+ bvl = bvec_alloc_bs(gfp_mask, nr_iovecs, &idx,
+ bs);
+ nr_iovecs = bvec_nr_vecs(idx);
+ }
if (unlikely(!bvl)) {
if (bs)
mempool_free(bio, bs->bio_pool);
@@ -186,7 +336,7 @@ struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
goto out;
}
bio->bi_flags |= idx << BIO_POOL_OFFSET;
- bio->bi_max_vecs = bvec_nr_vecs(idx);
+ bio->bi_max_vecs = nr_iovecs;
}
bio->bi_io_vec = bvl;
}
@@ -1346,30 +1496,18 @@ EXPORT_SYMBOL(bio_sector_offset);
*/
static int biovec_create_pools(struct bio_set *bs, int pool_entries)
{
- int i;
+ struct biovec_slab *bp = bvec_slabs + BIOVEC_MAX_IDX;
- for (i = 0; i < BIOVEC_NR_POOLS; i++) {
- struct biovec_slab *bp = bvec_slabs + i;
- mempool_t **bvp = bs->bvec_pools + i;
+ bs->bvec_pool = mempool_create_slab_pool(pool_entries, bp->slab);
+ if (!bs->bvec_pool)
+ return -ENOMEM;
- *bvp = mempool_create_slab_pool(pool_entries, bp->slab);
- if (!*bvp)
- return -ENOMEM;
- }
return 0;
}
static void biovec_free_pools(struct bio_set *bs)
{
- int i;
-
- for (i = 0; i < BIOVEC_NR_POOLS; i++) {
- mempool_t *bvp = bs->bvec_pools[i];
-
- if (bvp)
- mempool_destroy(bvp);
- }
-
+ mempool_destroy(bs->bvec_pool);
}
void bioset_free(struct bio_set *bs)
@@ -1379,25 +1517,49 @@ void bioset_free(struct bio_set *bs)
bioset_integrity_free(bs);
biovec_free_pools(bs);
+ bio_put_slab(bs);
kfree(bs);
}
-struct bio_set *bioset_create(int bio_pool_size, int bvec_pool_size)
+/**
+ * bioset_create - Create a bio_set
+ * @pool_size: Number of bio and bio_vecs to cache in the mempool
+ * @front_pad: Number of bytes to allocate in front of the returned bio
+ *
+ * Description:
+ * Set up a bio_set to be used with @bio_alloc_bioset. Allows the caller
+ * to ask for a number of bytes to be allocated in front of the bio.
+ * Front pad allocation is useful for embedding the bio inside
+ * another structure, to avoid allocating extra data to go with the bio.
+ * Note that the bio must be embedded at the END of that structure always,
+ * or things will break badly.
+ */
+struct bio_set *bioset_create(unsigned int pool_size, unsigned int front_pad)
{
- struct bio_set *bs = kzalloc(sizeof(*bs), GFP_KERNEL);
+ unsigned int back_pad = BIO_INLINE_VECS * sizeof(struct bio_vec);
+ struct bio_set *bs;
+ bs = kzalloc(sizeof(*bs), GFP_KERNEL);
if (!bs)
return NULL;
- bs->bio_pool = mempool_create_slab_pool(bio_pool_size, bio_slab);
+ bs->front_pad = front_pad;
+
+ bs->bio_slab = bio_find_or_create_slab(front_pad + back_pad);
+ if (!bs->bio_slab) {
+ kfree(bs);
+ return NULL;
+ }
+
+ bs->bio_pool = mempool_create_slab_pool(pool_size, bs->bio_slab);
if (!bs->bio_pool)
goto bad;
- if (bioset_integrity_create(bs, bio_pool_size))
+ if (bioset_integrity_create(bs, pool_size))
goto bad;
- if (!biovec_create_pools(bs, bvec_pool_size))
+ if (!biovec_create_pools(bs, pool_size))
return bs;
bad:
@@ -1421,12 +1583,16 @@ static void __init biovec_init_slabs(void)
static int __init init_bio(void)
{
- bio_slab = KMEM_CACHE(bio, SLAB_HWCACHE_ALIGN|SLAB_PANIC);
+ bio_slab_max = 2;
+ bio_slab_nr = 0;
+ bio_slabs = kzalloc(bio_slab_max * sizeof(struct bio_slab), GFP_KERNEL);
+ if (!bio_slabs)
+ panic("bio: can't allocate bios\n");
bio_integrity_init_slab();
biovec_init_slabs();
- fs_bio_set = bioset_create(BIO_POOL_SIZE, 2);
+ fs_bio_set = bioset_create(BIO_POOL_SIZE, 0);
if (!fs_bio_set)
panic("bio: can't allocate bios\n");
diff --git a/fs/buffer.c b/fs/buffer.c
index 10179cfa1152..776ae091d3b0 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -99,10 +99,18 @@ __clear_page_buffers(struct page *page)
page_cache_release(page);
}
+
+static int quiet_error(struct buffer_head *bh)
+{
+ if (!test_bit(BH_Quiet, &bh->b_state) && printk_ratelimit())
+ return 0;
+ return 1;
+}
+
+
static void buffer_io_error(struct buffer_head *bh)
{
char b[BDEVNAME_SIZE];
-
printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
bdevname(bh->b_bdev, b),
(unsigned long long)bh->b_blocknr);
@@ -144,7 +152,7 @@ void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
if (uptodate) {
set_buffer_uptodate(bh);
} else {
- if (!buffer_eopnotsupp(bh) && printk_ratelimit()) {
+ if (!buffer_eopnotsupp(bh) && !quiet_error(bh)) {
buffer_io_error(bh);
printk(KERN_WARNING "lost page write due to "
"I/O error on %s\n",
@@ -394,7 +402,7 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
set_buffer_uptodate(bh);
} else {
clear_buffer_uptodate(bh);
- if (printk_ratelimit())
+ if (!quiet_error(bh))
buffer_io_error(bh);
SetPageError(page);
}
@@ -455,7 +463,7 @@ static void end_buffer_async_write(struct buffer_head *bh, int uptodate)
if (uptodate) {
set_buffer_uptodate(bh);
} else {
- if (printk_ratelimit()) {
+ if (!quiet_error(bh)) {
buffer_io_error(bh);
printk(KERN_WARNING "lost page write due to "
"I/O error on %s\n",
@@ -2913,6 +2921,9 @@ static void end_bio_bh_io_sync(struct bio *bio, int err)
set_bit(BH_Eopnotsupp, &bh->b_state);
}
+ if (unlikely (test_bit(BIO_QUIET,&bio->bi_flags)))
+ set_bit(BH_Quiet, &bh->b_state);
+
bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
bio_put(bio);
}
diff --git a/fs/exec.c b/fs/exec.c
index 1f59ea079cbb..02d2e120542d 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -773,7 +773,6 @@ static int de_thread(struct task_struct *tsk)
struct signal_struct *sig = tsk->signal;
struct sighand_struct *oldsighand = tsk->sighand;
spinlock_t *lock = &oldsighand->siglock;
- struct task_struct *leader = NULL;
int count;
if (thread_group_empty(tsk))
@@ -811,7 +810,7 @@ static int de_thread(struct task_struct *tsk)
* and to assume its PID:
*/
if (!thread_group_leader(tsk)) {
- leader = tsk->group_leader;
+ struct task_struct *leader = tsk->group_leader;
sig->notify_count = -1; /* for exit_notify() */
for (;;) {
@@ -863,8 +862,9 @@ static int de_thread(struct task_struct *tsk)
BUG_ON(leader->exit_state != EXIT_ZOMBIE);
leader->exit_state = EXIT_DEAD;
-
write_unlock_irq(&tasklist_lock);
+
+ release_task(leader);
}
sig->group_exit_task = NULL;
@@ -873,8 +873,6 @@ static int de_thread(struct task_struct *tsk)
no_thread_group:
exit_itimers(sig);
flush_itimer_signals();
- if (leader)
- release_task(leader);
if (atomic_read(&oldsighand->count) != 1) {
struct sighand_struct *newsighand;
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index e4a241c65dbe..04158ad74dbb 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -1721,7 +1721,7 @@ static loff_t ext4_max_size(int blkbits, int has_huge_files)
/* small i_blocks in vfs inode? */
if (!has_huge_files || sizeof(blkcnt_t) < sizeof(u64)) {
/*
- * CONFIG_LSF is not enabled implies the inode
+ * CONFIG_LBD is not enabled implies the inode
* i_block represent total blocks in 512 bytes
* 32 == size of vfs inode i_blocks * 8
*/
@@ -1764,7 +1764,7 @@ static loff_t ext4_max_bitmap_size(int bits, int has_huge_files)
if (!has_huge_files || sizeof(blkcnt_t) < sizeof(u64)) {
/*
- * !has_huge_files or CONFIG_LSF is not enabled
+ * !has_huge_files or CONFIG_LBD is not enabled
* implies the inode i_block represent total blocks in
* 512 bytes 32 == size of vfs inode i_blocks * 8
*/
@@ -2021,13 +2021,13 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
if (has_huge_files) {
/*
* Large file size enabled file system can only be
- * mount if kernel is build with CONFIG_LSF
+ * mount if kernel is build with CONFIG_LBD
*/
if (sizeof(root->i_blocks) < sizeof(u64) &&
!(sb->s_flags & MS_RDONLY)) {
printk(KERN_ERR "EXT4-fs: %s: Filesystem with huge "
"files cannot be mounted read-write "
- "without CONFIG_LSF.\n", sb->s_id);
+ "without CONFIG_LBD.\n", sb->s_id);
goto failed_mount;
}
}
diff --git a/fs/proc/stat.c b/fs/proc/stat.c
index 81904f07679d..3bb1cf1e7425 100644
--- a/fs/proc/stat.c
+++ b/fs/proc/stat.c
@@ -44,10 +44,13 @@ static int show_stat(struct seq_file *p, void *v)
softirq = cputime64_add(softirq, kstat_cpu(i).cpustat.softirq);
steal = cputime64_add(steal, kstat_cpu(i).cpustat.steal);
guest = cputime64_add(guest, kstat_cpu(i).cpustat.guest);
-
- for_each_irq_nr(j)
+ for_each_irq_nr(j) {
+#ifdef CONFIG_SPARSE_IRQ
+ if (!irq_to_desc(j))
+ continue;
+#endif
sum += kstat_irqs_cpu(j, i);
-
+ }
sum += arch_irq_stat_cpu(i);
}
sum += arch_irq_stat();
@@ -92,7 +95,12 @@ static int show_stat(struct seq_file *p, void *v)
/* sum again ? it could be updated? */
for_each_irq_nr(j) {
per_irq_sum = 0;
-
+#ifdef CONFIG_SPARSE_IRQ
+ if (!irq_to_desc(j)) {
+ seq_printf(p, " %u", per_irq_sum);
+ continue;
+ }
+#endif
for_each_possible_cpu(i)
per_irq_sum += kstat_irqs_cpu(j, i);
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
index 4c794d73fb84..8af276361bf2 100644
--- a/include/asm-generic/bug.h
+++ b/include/asm-generic/bug.h
@@ -41,15 +41,14 @@ struct bug_entry {
#ifndef __WARN
#ifndef __ASSEMBLY__
-extern void warn_on_slowpath(const char *file, const int line);
extern void warn_slowpath(const char *file, const int line,
const char *fmt, ...) __attribute__((format(printf, 3, 4)));
#define WANT_WARN_ON_SLOWPATH
#endif
-#define __WARN() warn_on_slowpath(__FILE__, __LINE__)
-#define __WARN_printf(arg...) warn_slowpath(__FILE__, __LINE__, arg)
+#define __WARN() warn_slowpath(__FILE__, __LINE__, NULL)
+#define __WARN_printf(arg...) warn_slowpath(__FILE__, __LINE__, arg)
#else
-#define __WARN_printf(arg...) do { printk(arg); __WARN(); } while (0)
+#define __WARN_printf(arg...) do { printk(arg); __WARN(); } while (0)
#endif
#ifndef WARN_ON
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index e531783e5d78..95ac82340c3b 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -313,6 +313,7 @@ unifdef-y += ptrace.h
unifdef-y += qnx4_fs.h
unifdef-y += quota.h
unifdef-y += random.h
+unifdef-y += irqnr.h
unifdef-y += reboot.h
unifdef-y += reiserfs_fs.h
unifdef-y += reiserfs_xattr.h
diff --git a/include/linux/aio.h b/include/linux/aio.h
index f6b8cf99b596..b16a957030f8 100644
--- a/include/linux/aio.h
+++ b/include/linux/aio.h
@@ -5,6 +5,7 @@
#include <linux/workqueue.h>
#include <linux/aio_abi.h>
#include <linux/uio.h>
+#include <linux/rcupdate.h>
#include <asm/atomic.h>
@@ -183,7 +184,7 @@ struct kioctx {
/* This needs improving */
unsigned long user_id;
- struct kioctx *next;
+ struct hlist_node list;
wait_queue_head_t wait;
@@ -199,6 +200,8 @@ struct kioctx {
struct aio_ring_info ring_info;
struct delayed_work wq;
+
+ struct rcu_head rcu_head;
};
/* prototypes */
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 6a642098e5c3..18462c5b8fff 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -90,10 +90,11 @@ struct bio {
unsigned int bi_comp_cpu; /* completion CPU */
+ atomic_t bi_cnt; /* pin count */
+
struct bio_vec *bi_io_vec; /* the actual vec list */
bio_end_io_t *bi_end_io;
- atomic_t bi_cnt; /* pin count */
void *bi_private;
#if defined(CONFIG_BLK_DEV_INTEGRITY)
@@ -101,6 +102,13 @@ struct bio {
#endif
bio_destructor_t *bi_destructor; /* destructor */
+
+ /*
+ * We can inline a number of vecs at the end of the bio, to avoid
+ * double allocations for a small number of bio_vecs. This member
+ * MUST obviously be kept at the very end of the bio.
+ */
+ struct bio_vec bi_inline_vecs[0];
};
/*
@@ -117,6 +125,7 @@ struct bio {
#define BIO_CPU_AFFINE 8 /* complete bio on same CPU as submitted */
#define BIO_NULL_MAPPED 9 /* contains invalid user pages */
#define BIO_FS_INTEGRITY 10 /* fs owns integrity data, not block layer */
+#define BIO_QUIET 11 /* Make BIO Quiet */
#define bio_flagged(bio, flag) ((bio)->bi_flags & (1 << (flag)))
/*
@@ -211,6 +220,11 @@ static inline void *bio_data(struct bio *bio)
return NULL;
}
+static inline int bio_has_allocated_vec(struct bio *bio)
+{
+ return bio->bi_io_vec && bio->bi_io_vec != bio->bi_inline_vecs;
+}
+
/*
* will die
*/
@@ -332,7 +346,7 @@ struct bio_pair {
extern struct bio_pair *bio_split(struct bio *bi, int first_sectors);
extern void bio_pair_release(struct bio_pair *dbio);
-extern struct bio_set *bioset_create(int, int);
+extern struct bio_set *bioset_create(unsigned int, unsigned int);
extern void bioset_free(struct bio_set *);
extern struct bio *bio_alloc(gfp_t, int);
@@ -377,6 +391,7 @@ extern struct bio *bio_copy_user_iov(struct request_queue *,
extern int bio_uncopy_user(struct bio *);
void zero_fill_bio(struct bio *bio);
extern struct bio_vec *bvec_alloc_bs(gfp_t, int, unsigned long *, struct bio_set *);
+extern void bvec_free_bs(struct bio_set *, struct bio_vec *, unsigned int);
extern unsigned int bvec_nr_vecs(unsigned short idx);
/*
@@ -395,13 +410,17 @@ static inline void bio_set_completion_cpu(struct bio *bio, unsigned int cpu)
*/
#define BIO_POOL_SIZE 2
#define BIOVEC_NR_POOLS 6
+#define BIOVEC_MAX_IDX (BIOVEC_NR_POOLS - 1)
struct bio_set {
+ struct kmem_cache *bio_slab;
+ unsigned int front_pad;
+
mempool_t *bio_pool;
#if defined(CONFIG_BLK_DEV_INTEGRITY)
mempool_t *bio_integrity_pool;
#endif
- mempool_t *bvec_pools[BIOVEC_NR_POOLS];
+ mempool_t *bvec_pool;
};
struct biovec_slab {
@@ -411,6 +430,7 @@ struct biovec_slab {
};
extern struct bio_set *fs_bio_set;
+extern struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] __read_mostly;
/*
* a small number of entries is fine, not going to be performance critical.
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 031a315c0509..7035cec583b6 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -26,7 +26,6 @@ struct scsi_ioctl_command;
struct request_queue;
struct elevator_queue;
-typedef struct elevator_queue elevator_t;
struct request_pm_state;
struct blk_trace;
struct request;
@@ -313,7 +312,7 @@ struct request_queue
*/
struct list_head queue_head;
struct request *last_merge;
- elevator_t *elevator;
+ struct elevator_queue *elevator;
/*
* the queue request freelist, one for reads and one for writes
@@ -449,6 +448,7 @@ struct request_queue
#define QUEUE_FLAG_FAIL_IO 12 /* fake timeout */
#define QUEUE_FLAG_STACKABLE 13 /* supports request stacking */
#define QUEUE_FLAG_NONROT 14 /* non-rotational device (SSD) */
+#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */
static inline int queue_is_locked(struct request_queue *q)
{
@@ -522,22 +522,32 @@ enum {
* TAG_FLUSH : ordering by tag w/ pre and post flushes
* TAG_FUA : ordering by tag w/ pre flush and FUA write
*/
- QUEUE_ORDERED_NONE = 0x00,
- QUEUE_ORDERED_DRAIN = 0x01,
- QUEUE_ORDERED_TAG = 0x02,
-
- QUEUE_ORDERED_PREFLUSH = 0x10,
- QUEUE_ORDERED_POSTFLUSH = 0x20,
- QUEUE_ORDERED_FUA = 0x40,
-
- QUEUE_ORDERED_DRAIN_FLUSH = QUEUE_ORDERED_DRAIN |
- QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH,
- QUEUE_ORDERED_DRAIN_FUA = QUEUE_ORDERED_DRAIN |
- QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_FUA,
- QUEUE_ORDERED_TAG_FLUSH = QUEUE_ORDERED_TAG |
- QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH,
- QUEUE_ORDERED_TAG_FUA = QUEUE_ORDERED_TAG |
- QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_FUA,
+ QUEUE_ORDERED_BY_DRAIN = 0x01,
+ QUEUE_ORDERED_BY_TAG = 0x02,
+ QUEUE_ORDERED_DO_PREFLUSH = 0x10,
+ QUEUE_ORDERED_DO_BAR = 0x20,
+ QUEUE_ORDERED_DO_POSTFLUSH = 0x40,
+ QUEUE_ORDERED_DO_FUA = 0x80,
+
+ QUEUE_ORDERED_NONE = 0x00,
+
+ QUEUE_ORDERED_DRAIN = QUEUE_ORDERED_BY_DRAIN |
+ QUEUE_ORDERED_DO_BAR,
+ QUEUE_ORDERED_DRAIN_FLUSH = QUEUE_ORDERED_DRAIN |
+ QUEUE_ORDERED_DO_PREFLUSH |
+ QUEUE_ORDERED_DO_POSTFLUSH,
+ QUEUE_ORDERED_DRAIN_FUA = QUEUE_ORDERED_DRAIN |
+ QUEUE_ORDERED_DO_PREFLUSH |
+ QUEUE_ORDERED_DO_FUA,
+
+ QUEUE_ORDERED_TAG = QUEUE_ORDERED_BY_TAG |
+ QUEUE_ORDERED_DO_BAR,
+ QUEUE_ORDERED_TAG_FLUSH = QUEUE_ORDERED_TAG |
+ QUEUE_ORDERED_DO_PREFLUSH |
+ QUEUE_ORDERED_DO_POSTFLUSH,
+ QUEUE_ORDERED_TAG_FUA = QUEUE_ORDERED_TAG |
+ QUEUE_ORDERED_DO_PREFLUSH |
+ QUEUE_ORDERED_DO_FUA,
/*
* Ordered operation sequence
@@ -585,7 +595,6 @@ enum {
#define blk_fua_rq(rq) ((rq)->cmd_flags & REQ_FUA)
#define blk_discard_rq(rq) ((rq)->cmd_flags & REQ_DISCARD)
#define blk_bidi_rq(rq) ((rq)->next_rq != NULL)
-#define blk_empty_barrier(rq) (blk_barrier_rq(rq) && blk_fs_request(rq) && !(rq)->hard_nr_sectors)
/* rq->queuelist of dequeued request must be list_empty() */
#define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist))
@@ -855,10 +864,10 @@ extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *);
extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
extern int blk_queue_ordered(struct request_queue *, unsigned, prepare_flush_fn *);
-extern int blk_do_ordered(struct request_queue *, struct request **);
+extern bool blk_do_ordered(struct request_queue *, struct request **);
extern unsigned blk_ordered_cur_seq(struct request_queue *);
extern unsigned blk_ordered_req_seq(struct request *);
-extern void blk_ordered_complete_seq(struct request_queue *, unsigned, int);
+extern bool blk_ordered_complete_seq(struct request_queue *, unsigned, int);
extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *);
extern void blk_dump_rq_flags(struct request *, char *);
@@ -977,7 +986,6 @@ static inline void put_dev_sector(Sector p)
struct work_struct;
int kblockd_schedule_work(struct request_queue *q, struct work_struct *work);
-void kblockd_flush_work(struct work_struct *work);
#define MODULE_ALIAS_BLOCKDEV(major,minor) \
MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor))
diff --git a/include/linux/bottom_half.h b/include/linux/bottom_half.h
index 777dbf695d44..27b1bcffe408 100644
--- a/include/linux/bottom_half.h
+++ b/include/linux/bottom_half.h
@@ -2,7 +2,6 @@
#define _LINUX_BH_H
extern void local_bh_disable(void);
-extern void __local_bh_enable(void);
extern void _local_bh_enable(void);
extern void local_bh_enable(void);
extern void local_bh_enable_ip(unsigned long ip);
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index 3ce64b90118c..8605f8a74df9 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -35,6 +35,7 @@ enum bh_state_bits {
BH_Ordered, /* ordered write */
BH_Eopnotsupp, /* operation not supported (barrier) */
BH_Unwritten, /* Buffer is allocated on disk but not written */
+ BH_Quiet, /* Buffer Error Prinks to be quiet */
BH_PrivateStart,/* not a state bit, but the first bit available
* for private allocation by other entities
diff --git a/include/linux/debug_locks.h b/include/linux/debug_locks.h
index 4aaa4afb1cb9..096476f1fb35 100644
--- a/include/linux/debug_locks.h
+++ b/include/linux/debug_locks.h
@@ -17,7 +17,7 @@ extern int debug_locks_off(void);
({ \
int __ret = 0; \
\
- if (unlikely(c)) { \
+ if (!oops_in_progress && unlikely(c)) { \
if (debug_locks_off() && !debug_locks_silent) \
WARN_ON(1); \
__ret = 1; \
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index 92f6f634e3e6..7a204256b155 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -28,7 +28,7 @@ typedef void (elevator_activate_req_fn) (struct request_queue *, struct request
typedef void (elevator_deactivate_req_fn) (struct request_queue *, struct request *);
typedef void *(elevator_init_fn) (struct request_queue *);
-typedef void (elevator_exit_fn) (elevator_t *);
+typedef void (elevator_exit_fn) (struct elevator_queue *);
struct elevator_ops
{
@@ -62,8 +62,8 @@ struct elevator_ops
struct elv_fs_entry {
struct attribute attr;
- ssize_t (*show)(elevator_t *, char *);
- ssize_t (*store)(elevator_t *, const char *, size_t);
+ ssize_t (*show)(struct elevator_queue *, char *);
+ ssize_t (*store)(struct elevator_queue *, const char *, size_t);
};
/*
@@ -130,7 +130,7 @@ extern ssize_t elv_iosched_show(struct request_queue *, char *);
extern ssize_t elv_iosched_store(struct request_queue *, const char *, size_t);
extern int elevator_init(struct request_queue *, char *);
-extern void elevator_exit(elevator_t *);
+extern void elevator_exit(struct elevator_queue *);
extern int elv_rq_merge_ok(struct request *, struct bio *);
/*
diff --git a/include/linux/futex.h b/include/linux/futex.h
index 586ab56a3ec3..3bf5bb5a34f9 100644
--- a/include/linux/futex.h
+++ b/include/linux/futex.h
@@ -25,7 +25,8 @@ union ktime;
#define FUTEX_WAKE_BITSET 10
#define FUTEX_PRIVATE_FLAG 128
-#define FUTEX_CMD_MASK ~FUTEX_PRIVATE_FLAG
+#define FUTEX_CLOCK_REALTIME 256
+#define FUTEX_CMD_MASK ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME)
#define FUTEX_WAIT_PRIVATE (FUTEX_WAIT | FUTEX_PRIVATE_FLAG)
#define FUTEX_WAKE_PRIVATE (FUTEX_WAKE | FUTEX_PRIVATE_FLAG)
@@ -164,6 +165,8 @@ union futex_key {
} both;
};
+#define FUTEX_KEY_INIT (union futex_key) { .both = { .ptr = NULL } }
+
#ifdef CONFIG_FUTEX
extern void exit_robust_list(struct task_struct *curr);
extern void exit_pi_state_list(struct task_struct *curr);
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index 3df7742ce246..16948eaecae3 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -126,6 +126,7 @@ struct blk_scsi_cmd_filter {
struct disk_part_tbl {
struct rcu_head rcu_head;
int len;
+ struct hd_struct *last_lookup;
struct hd_struct *part[];
};
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
index 89a56d79e4c6..f83288347dda 100644
--- a/include/linux/hardirq.h
+++ b/include/linux/hardirq.h
@@ -119,13 +119,17 @@ static inline void account_system_vtime(struct task_struct *tsk)
}
#endif
-#if defined(CONFIG_PREEMPT_RCU) && defined(CONFIG_NO_HZ)
+#if defined(CONFIG_NO_HZ) && !defined(CONFIG_CLASSIC_RCU)
extern void rcu_irq_enter(void);
extern void rcu_irq_exit(void);
+extern void rcu_nmi_enter(void);
+extern void rcu_nmi_exit(void);
#else
# define rcu_irq_enter() do { } while (0)
# define rcu_irq_exit() do { } while (0)
-#endif /* CONFIG_PREEMPT_RCU */
+# define rcu_nmi_enter() do { } while (0)
+# define rcu_nmi_exit() do { } while (0)
+#endif /* #if defined(CONFIG_NO_HZ) && !defined(CONFIG_CLASSIC_RCU) */
/*
* It is safe to do non-atomic ops on ->hardirq_context,
@@ -135,7 +139,6 @@ extern void rcu_irq_exit(void);
*/
#define __irq_enter() \
do { \
- rcu_irq_enter(); \
account_system_vtime(current); \
add_preempt_count(HARDIRQ_OFFSET); \
trace_hardirq_enter(); \
@@ -154,7 +157,6 @@ extern void irq_enter(void);
trace_hardirq_exit(); \
account_system_vtime(current); \
sub_preempt_count(HARDIRQ_OFFSET); \
- rcu_irq_exit(); \
} while (0)
/*
@@ -166,11 +168,14 @@ extern void irq_exit(void);
do { \
ftrace_nmi_enter(); \
lockdep_off(); \
+ rcu_nmi_enter(); \
__irq_enter(); \
} while (0)
+
#define nmi_exit() \
do { \
__irq_exit(); \
+ rcu_nmi_exit(); \
lockdep_on(); \
ftrace_nmi_exit(); \
} while (0)
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 3eba43878dcb..bd37078c2d7d 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -43,26 +43,6 @@ enum hrtimer_restart {
};
/*
- * hrtimer callback modes:
- *
- * HRTIMER_CB_SOFTIRQ: Callback must run in softirq context
- * HRTIMER_CB_IRQSAFE_PERCPU: Callback must run in hardirq context
- * Special mode for tick emulation and
- * scheduler timer. Such timers are per
- * cpu and not allowed to be migrated on
- * cpu unplug.
- * HRTIMER_CB_IRQSAFE_UNLOCKED: Callback should run in hardirq context
- * with timer->base lock unlocked
- * used for timers which call wakeup to
- * avoid lock order problems with rq->lock
- */
-enum hrtimer_cb_mode {
- HRTIMER_CB_SOFTIRQ,
- HRTIMER_CB_IRQSAFE_PERCPU,
- HRTIMER_CB_IRQSAFE_UNLOCKED,
-};
-
-/*
* Values to track state of the timer
*
* Possible states:
@@ -70,7 +50,6 @@ enum hrtimer_cb_mode {
* 0x00 inactive
* 0x01 enqueued into rbtree
* 0x02 callback function running
- * 0x04 callback pending (high resolution mode)
*
* Special cases:
* 0x03 callback function running and enqueued
@@ -92,8 +71,7 @@ enum hrtimer_cb_mode {
#define HRTIMER_STATE_INACTIVE 0x00
#define HRTIMER_STATE_ENQUEUED 0x01
#define HRTIMER_STATE_CALLBACK 0x02
-#define HRTIMER_STATE_PENDING 0x04
-#define HRTIMER_STATE_MIGRATE 0x08
+#define HRTIMER_STATE_MIGRATE 0x04
/**
* struct hrtimer - the basic hrtimer structure
@@ -109,8 +87,6 @@ enum hrtimer_cb_mode {
* @function: timer expiry callback function
* @base: pointer to the timer base (per cpu and per clock)
* @state: state information (See bit values above)
- * @cb_mode: high resolution timer feature to select the callback execution
- * mode
* @cb_entry: list head to enqueue an expired timer into the callback list
* @start_site: timer statistics field to store the site where the timer
* was started
@@ -129,7 +105,6 @@ struct hrtimer {
struct hrtimer_clock_base *base;
unsigned long state;
struct list_head cb_entry;
- enum hrtimer_cb_mode cb_mode;
#ifdef CONFIG_TIMER_STATS
int start_pid;
void *start_site;
@@ -188,15 +163,11 @@ struct hrtimer_clock_base {
* @check_clocks: Indictator, when set evaluate time source and clock
* event devices whether high resolution mode can be
* activated.
- * @cb_pending: Expired timers are moved from the rbtree to this
- * list in the timer interrupt. The list is processed
- * in the softirq.
* @nr_events: Total number of timer interrupt events
*/
struct hrtimer_cpu_base {
spinlock_t lock;
struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES];
- struct list_head cb_pending;
#ifdef CONFIG_HIGH_RES_TIMERS
ktime_t expires_next;
int hres_active;
@@ -404,8 +375,7 @@ static inline int hrtimer_active(const struct hrtimer *timer)
*/
static inline int hrtimer_is_queued(struct hrtimer *timer)
{
- return timer->state &
- (HRTIMER_STATE_ENQUEUED | HRTIMER_STATE_PENDING);
+ return timer->state & HRTIMER_STATE_ENQUEUED;
}
/*
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index f58a0cf8929a..be3c484b5242 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -14,6 +14,8 @@
#include <linux/irqflags.h>
#include <linux/smp.h>
#include <linux/percpu.h>
+#include <linux/irqnr.h>
+
#include <asm/atomic.h>
#include <asm/ptrace.h>
#include <asm/system.h>
@@ -251,9 +253,6 @@ enum
BLOCK_SOFTIRQ,
TASKLET_SOFTIRQ,
SCHED_SOFTIRQ,
-#ifdef CONFIG_HIGH_RES_TIMERS
- HRTIMER_SOFTIRQ,
-#endif
RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */
NR_SOFTIRQS
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 3dddfa703ebd..98564dc64476 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -129,9 +129,14 @@ struct irq_chip {
const char *typename;
};
+struct timer_rand_state;
+struct irq_2_iommu;
/**
* struct irq_desc - interrupt descriptor
* @irq: interrupt number for this descriptor
+ * @timer_rand_state: pointer to timer rand state struct
+ * @kstat_irqs: irq stats per cpu
+ * @irq_2_iommu: iommu with this irq
* @handle_irq: highlevel irq-events handler [if NULL, __do_IRQ()]
* @chip: low level interrupt hardware access
* @msi_desc: MSI descriptor
@@ -143,8 +148,8 @@ struct irq_chip {
* @depth: disable-depth, for nested irq_disable() calls
* @wake_depth: enable depth, for multiple set_irq_wake() callers
* @irq_count: stats field to detect stalled irqs
- * @irqs_unhandled: stats field for spurious unhandled interrupts
* @last_unhandled: aging timer for unhandled count
+ * @irqs_unhandled: stats field for spurious unhandled interrupts
* @lock: locking for SMP
* @affinity: IRQ affinity on SMP
* @cpu: cpu index useful for balancing
@@ -154,6 +159,13 @@ struct irq_chip {
*/
struct irq_desc {
unsigned int irq;
+#ifdef CONFIG_SPARSE_IRQ
+ struct timer_rand_state *timer_rand_state;
+ unsigned int *kstat_irqs;
+# ifdef CONFIG_INTR_REMAP
+ struct irq_2_iommu *irq_2_iommu;
+# endif
+#endif
irq_flow_handler_t handle_irq;
struct irq_chip *chip;
struct msi_desc *msi_desc;
@@ -165,8 +177,8 @@ struct irq_desc {
unsigned int depth; /* nested irq disables */
unsigned int wake_depth; /* nested wake enables */
unsigned int irq_count; /* For detecting broken IRQs */
- unsigned int irqs_unhandled;
unsigned long last_unhandled; /* Aging timer for unhandled count */
+ unsigned int irqs_unhandled;
spinlock_t lock;
#ifdef CONFIG_SMP
cpumask_t affinity;
@@ -181,12 +193,51 @@ struct irq_desc {
const char *name;
} ____cacheline_internodealigned_in_smp;
+extern void early_irq_init(void);
+extern void arch_early_irq_init(void);
+extern void arch_init_chip_data(struct irq_desc *desc, int cpu);
+extern void arch_init_copy_chip_data(struct irq_desc *old_desc,
+ struct irq_desc *desc, int cpu);
+extern void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc);
+#ifndef CONFIG_SPARSE_IRQ
extern struct irq_desc irq_desc[NR_IRQS];
static inline struct irq_desc *irq_to_desc(unsigned int irq)
{
- return (irq < nr_irqs) ? irq_desc + irq : NULL;
+ return (irq < NR_IRQS) ? irq_desc + irq : NULL;
+}
+static inline struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu)
+{
+ return irq_to_desc(irq);
+}
+
+#else
+
+extern struct irq_desc *irq_to_desc(unsigned int irq);
+extern struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu);
+extern struct irq_desc *move_irq_desc(struct irq_desc *old_desc, int cpu);
+
+# define for_each_irq_desc(irq, desc) \
+ for (irq = 0, desc = irq_to_desc(irq); irq < nr_irqs; irq++, desc = irq_to_desc(irq))
+# define for_each_irq_desc_reverse(irq, desc) \
+ for (irq = nr_irqs - 1, desc = irq_to_desc(irq); irq >= 0; irq--, desc = irq_to_desc(irq))
+
+#define kstat_irqs_this_cpu(DESC) \
+ ((DESC)->kstat_irqs[smp_processor_id()])
+#define kstat_incr_irqs_this_cpu(irqno, DESC) \
+ ((DESC)->kstat_irqs[smp_processor_id()]++)
+
+#endif
+
+static inline struct irq_desc *
+irq_remap_to_desc(unsigned int irq, struct irq_desc *desc)
+{
+#ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC
+ return irq_to_desc(irq);
+#else
+ return desc;
+#endif
}
/*
@@ -380,6 +431,11 @@ extern int set_irq_msi(unsigned int irq, struct msi_desc *entry);
#define get_irq_data(irq) (irq_to_desc(irq)->handler_data)
#define get_irq_msi(irq) (irq_to_desc(irq)->msi_desc)
+#define get_irq_desc_chip(desc) ((desc)->chip)
+#define get_irq_desc_chip_data(desc) ((desc)->chip_data)
+#define get_irq_desc_data(desc) ((desc)->handler_data)
+#define get_irq_desc_msi(desc) ((desc)->msi_desc)
+
#endif /* CONFIG_GENERIC_HARDIRQS */
#endif /* !CONFIG_S390 */
diff --git a/include/linux/irqnr.h b/include/linux/irqnr.h
index 452c280c8115..95d2b74641f5 100644
--- a/include/linux/irqnr.h
+++ b/include/linux/irqnr.h
@@ -1,24 +1,38 @@
#ifndef _LINUX_IRQNR_H
#define _LINUX_IRQNR_H
+/*
+ * Generic irq_desc iterators:
+ */
+#ifdef __KERNEL__
+
#ifndef CONFIG_GENERIC_HARDIRQS
#include <asm/irq.h>
# define nr_irqs NR_IRQS
# define for_each_irq_desc(irq, desc) \
for (irq = 0; irq < nr_irqs; irq++)
+
+# define for_each_irq_desc_reverse(irq, desc) \
+ for (irq = nr_irqs - 1; irq >= 0; irq--)
#else
+
extern int nr_irqs;
+#ifndef CONFIG_SPARSE_IRQ
+
+struct irq_desc;
# define for_each_irq_desc(irq, desc) \
for (irq = 0, desc = irq_desc; irq < nr_irqs; irq++, desc++)
-
-# define for_each_irq_desc_reverse(irq, desc) \
- for (irq = nr_irqs - 1, desc = irq_desc + (nr_irqs - 1); \
- irq >= 0; irq--, desc--)
+# define for_each_irq_desc_reverse(irq, desc) \
+ for (irq = nr_irqs - 1, desc = irq_desc + (nr_irqs - 1); \
+ irq >= 0; irq--, desc--)
+#endif
#endif
-#define for_each_irq_nr(irq) \
- for (irq = 0; irq < nr_irqs; irq++)
+#define for_each_irq_nr(irq) \
+ for (irq = 0; irq < nr_irqs; irq++)
+
+#endif /* __KERNEL__ */
#endif
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 6002ae76785c..ca9ff6411dfa 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -141,6 +141,15 @@ extern int _cond_resched(void);
(__x < 0) ? -__x : __x; \
})
+#ifdef CONFIG_PROVE_LOCKING
+void might_fault(void);
+#else
+static inline void might_fault(void)
+{
+ might_sleep();
+}
+#endif
+
extern struct atomic_notifier_head panic_notifier_list;
extern long (*panic_blink)(long time);
NORET_TYPE void panic(const char * fmt, ...)
@@ -188,6 +197,8 @@ extern unsigned long long memparse(const char *ptr, char **retptr);
extern int core_kernel_text(unsigned long addr);
extern int __kernel_text_address(unsigned long addr);
extern int kernel_text_address(unsigned long addr);
+extern int func_ptr_is_kernel_text(void *ptr);
+
struct pid;
extern struct pid *session_of_pgrp(struct pid *pgrp);
diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h
index 4a145caeee07..4ee4b3d2316f 100644
--- a/include/linux/kernel_stat.h
+++ b/include/linux/kernel_stat.h
@@ -28,7 +28,9 @@ struct cpu_usage_stat {
struct kernel_stat {
struct cpu_usage_stat cpustat;
- unsigned int irqs[NR_IRQS];
+#ifndef CONFIG_SPARSE_IRQ
+ unsigned int irqs[NR_IRQS];
+#endif
};
DECLARE_PER_CPU(struct kernel_stat, kstat);
@@ -39,6 +41,10 @@ DECLARE_PER_CPU(struct kernel_stat, kstat);
extern unsigned long long nr_context_switches(void);
+#ifndef CONFIG_SPARSE_IRQ
+#define kstat_irqs_this_cpu(irq) \
+ (kstat_this_cpu.irqs[irq])
+
struct irq_desc;
static inline void kstat_incr_irqs_this_cpu(unsigned int irq,
@@ -46,11 +52,17 @@ static inline void kstat_incr_irqs_this_cpu(unsigned int irq,
{
kstat_this_cpu.irqs[irq]++;
}
+#endif
+
+#ifndef CONFIG_SPARSE_IRQ
static inline unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
{
return kstat_cpu(cpu).irqs[irq];
}
+#else
+extern unsigned int kstat_irqs_cpu(unsigned int irq, int cpu);
+#endif
/*
* Number of interrupts per specific IRQ source, since bootup
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 29aec6e10020..23bf02fb124f 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -73,6 +73,8 @@ struct lock_class_key {
struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES];
};
+#define LOCKSTAT_POINTS 4
+
/*
* The lock-class itself:
*/
@@ -119,7 +121,8 @@ struct lock_class {
int name_version;
#ifdef CONFIG_LOCK_STAT
- unsigned long contention_point[4];
+ unsigned long contention_point[LOCKSTAT_POINTS];
+ unsigned long contending_point[LOCKSTAT_POINTS];
#endif
};
@@ -144,6 +147,7 @@ enum bounce_type {
struct lock_class_stats {
unsigned long contention_point[4];
+ unsigned long contending_point[4];
struct lock_time read_waittime;
struct lock_time write_waittime;
struct lock_time read_holdtime;
@@ -165,6 +169,7 @@ struct lockdep_map {
const char *name;
#ifdef CONFIG_LOCK_STAT
int cpu;
+ unsigned long ip;
#endif
};
@@ -309,8 +314,15 @@ extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
extern void lock_release(struct lockdep_map *lock, int nested,
unsigned long ip);
-extern void lock_set_subclass(struct lockdep_map *lock, unsigned int subclass,
- unsigned long ip);
+extern void lock_set_class(struct lockdep_map *lock, const char *name,
+ struct lock_class_key *key, unsigned int subclass,
+ unsigned long ip);
+
+static inline void lock_set_subclass(struct lockdep_map *lock,
+ unsigned int subclass, unsigned long ip)
+{
+ lock_set_class(lock, lock->name, lock->key, subclass, ip);
+}
# define INIT_LOCKDEP .lockdep_recursion = 0,
@@ -328,6 +340,7 @@ static inline void lockdep_on(void)
# define lock_acquire(l, s, t, r, c, n, i) do { } while (0)
# define lock_release(l, n, i) do { } while (0)
+# define lock_set_class(l, n, k, s, i) do { } while (0)
# define lock_set_subclass(l, s, i) do { } while (0)
# define lockdep_init() do { } while (0)
# define lockdep_info() do { } while (0)
@@ -356,7 +369,7 @@ struct lock_class_key { };
#ifdef CONFIG_LOCK_STAT
extern void lock_contended(struct lockdep_map *lock, unsigned long ip);
-extern void lock_acquired(struct lockdep_map *lock);
+extern void lock_acquired(struct lockdep_map *lock, unsigned long ip);
#define LOCK_CONTENDED(_lock, try, lock) \
do { \
@@ -364,20 +377,20 @@ do { \
lock_contended(&(_lock)->dep_map, _RET_IP_); \
lock(_lock); \
} \
- lock_acquired(&(_lock)->dep_map); \
+ lock_acquired(&(_lock)->dep_map, _RET_IP_); \
} while (0)
#else /* CONFIG_LOCK_STAT */
#define lock_contended(lockdep_map, ip) do {} while (0)
-#define lock_acquired(lockdep_map) do {} while (0)
+#define lock_acquired(lockdep_map, ip) do {} while (0)
#define LOCK_CONTENDED(_lock, try, lock) \
lock(_lock)
#endif /* CONFIG_LOCK_STAT */
-#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_GENERIC_HARDIRQS)
+#ifdef CONFIG_GENERIC_HARDIRQS
extern void early_init_irq_lock_class(void);
#else
static inline void early_init_irq_lock_class(void)
@@ -481,4 +494,22 @@ static inline void print_irqtrace_events(struct task_struct *curr)
# define lock_map_release(l) do { } while (0)
#endif
+#ifdef CONFIG_PROVE_LOCKING
+# define might_lock(lock) \
+do { \
+ typecheck(struct lockdep_map *, &(lock)->dep_map); \
+ lock_acquire(&(lock)->dep_map, 0, 0, 0, 2, NULL, _THIS_IP_); \
+ lock_release(&(lock)->dep_map, 0, _THIS_IP_); \
+} while (0)
+# define might_lock_read(lock) \
+do { \
+ typecheck(struct lockdep_map *, &(lock)->dep_map); \
+ lock_acquire(&(lock)->dep_map, 0, 0, 1, 2, NULL, _THIS_IP_); \
+ lock_release(&(lock)->dep_map, 0, _THIS_IP_); \
+} while (0)
+#else
+# define might_lock(lock) do { } while (0)
+# define might_lock_read(lock) do { } while (0)
+#endif
+
#endif /* __LINUX_LOCKDEP_H */
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index fe825471d5aa..9cfc9b627fdd 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -232,8 +232,9 @@ struct mm_struct {
struct core_state *core_state; /* coredumping support */
/* aio bits */
- rwlock_t ioctx_list_lock; /* aio lock */
- struct kioctx *ioctx_list;
+ spinlock_t ioctx_lock;
+ struct hlist_head ioctx_list;
+
#ifdef CONFIG_MM_OWNER
/*
* "owner" points to a task that is regarded as the canonical
diff --git a/include/linux/msi.h b/include/linux/msi.h
index 8f2939227207..d2b8a1e8ca11 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -10,8 +10,11 @@ struct msi_msg {
};
/* Helper functions */
+struct irq_desc;
extern void mask_msi_irq(unsigned int irq);
extern void unmask_msi_irq(unsigned int irq);
+extern void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg);
+extern void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg);
extern void read_msi_msg(unsigned int irq, struct msi_msg *msg);
extern void write_msi_msg(unsigned int irq, struct msi_msg *msg);
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index bc6da10ceee0..7a0e5c4f8072 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -144,6 +144,8 @@ extern int __must_check mutex_lock_killable(struct mutex *lock);
/*
* NOTE: mutex_trylock() follows the spin_trylock() convention,
* not the down_trylock() convention!
+ *
+ * Returns 1 if the mutex has been acquired successfully, and 0 on contention.
*/
extern int mutex_trylock(struct mutex *lock);
extern void mutex_unlock(struct mutex *lock);
diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h
index a7c721355549..4f71bf4e628c 100644
--- a/include/linux/posix-timers.h
+++ b/include/linux/posix-timers.h
@@ -45,7 +45,11 @@ struct k_itimer {
int it_requeue_pending; /* waiting to requeue this timer */
#define REQUEUE_PENDING 1
int it_sigev_notify; /* notify word of sigevent struct */
- struct task_struct *it_process; /* process to send signal to */
+ struct signal_struct *it_signal;
+ union {
+ struct pid *it_pid; /* pid of process to send signal to */
+ struct task_struct *it_process; /* for clock_nanosleep */
+ };
struct sigqueue *sigq; /* signal queue entry. */
union {
struct {
diff --git a/include/linux/random.h b/include/linux/random.h
index 36f125c0c603..adbf3bd3c6b3 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -8,6 +8,7 @@
#define _LINUX_RANDOM_H
#include <linux/ioctl.h>
+#include <linux/irqnr.h>
/* ioctl()'s for the random number generator */
@@ -44,6 +45,56 @@ struct rand_pool_info {
extern void rand_initialize_irq(int irq);
+struct timer_rand_state;
+#ifndef CONFIG_SPARSE_IRQ
+
+extern struct timer_rand_state *irq_timer_state[];
+
+static inline struct timer_rand_state *get_timer_rand_state(unsigned int irq)
+{
+ if (irq >= nr_irqs)
+ return NULL;
+
+ return irq_timer_state[irq];
+}
+
+static inline void set_timer_rand_state(unsigned int irq, struct timer_rand_state *state)
+{
+ if (irq >= nr_irqs)
+ return;
+
+ irq_timer_state[irq] = state;
+}
+
+#else
+
+#include <linux/irq.h>
+static inline struct timer_rand_state *get_timer_rand_state(unsigned int irq)
+{
+ struct irq_desc *desc;
+
+ desc = irq_to_desc(irq);
+
+ if (!desc)
+ return NULL;
+
+ return desc->timer_rand_state;
+}
+
+static inline void set_timer_rand_state(unsigned int irq, struct timer_rand_state *state)
+{
+ struct irq_desc *desc;
+
+ desc = irq_to_desc(irq);
+
+ if (!desc)
+ return;
+
+ desc->timer_rand_state = state;
+}
+#endif
+
+
extern void add_input_randomness(unsigned int type, unsigned int code,
unsigned int value);
extern void add_interrupt_randomness(int irq);
diff --git a/include/linux/rcuclassic.h b/include/linux/rcuclassic.h
index 5f89b62e6983..301dda829e37 100644
--- a/include/linux/rcuclassic.h
+++ b/include/linux/rcuclassic.h
@@ -41,7 +41,7 @@
#include <linux/seqlock.h>
#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
-#define RCU_SECONDS_TILL_STALL_CHECK ( 3 * HZ) /* for rcp->jiffies_stall */
+#define RCU_SECONDS_TILL_STALL_CHECK (10 * HZ) /* for rcp->jiffies_stall */
#define RCU_SECONDS_TILL_STALL_RECHECK (30 * HZ) /* for rcp->jiffies_stall */
#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 895dc9c1088c..1168fbcea8d4 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -52,11 +52,15 @@ struct rcu_head {
void (*func)(struct rcu_head *head);
};
-#ifdef CONFIG_CLASSIC_RCU
+#if defined(CONFIG_CLASSIC_RCU)
#include <linux/rcuclassic.h>
-#else /* #ifdef CONFIG_CLASSIC_RCU */
+#elif defined(CONFIG_TREE_RCU)
+#include <linux/rcutree.h>
+#elif defined(CONFIG_PREEMPT_RCU)
#include <linux/rcupreempt.h>
-#endif /* #else #ifdef CONFIG_CLASSIC_RCU */
+#else
+#error "Unknown RCU implementation specified to kernel configuration"
+#endif /* #else #if defined(CONFIG_CLASSIC_RCU) */
#define RCU_HEAD_INIT { .next = NULL, .func = NULL }
#define RCU_HEAD(head) struct rcu_head head = RCU_HEAD_INIT
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
new file mode 100644
index 000000000000..d4368b7975c3
--- /dev/null
+++ b/include/linux/rcutree.h
@@ -0,0 +1,329 @@
+/*
+ * Read-Copy Update mechanism for mutual exclusion (tree-based version)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright IBM Corporation, 2008
+ *
+ * Author: Dipankar Sarma <dipankar@in.ibm.com>
+ * Paul E. McKenney <paulmck@linux.vnet.ibm.com> Hierarchical algorithm
+ *
+ * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
+ * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
+ *
+ * For detailed explanation of Read-Copy Update mechanism see -
+ * Documentation/RCU
+ */
+
+#ifndef __LINUX_RCUTREE_H
+#define __LINUX_RCUTREE_H
+
+#include <linux/cache.h>
+#include <linux/spinlock.h>
+#include <linux/threads.h>
+#include <linux/percpu.h>
+#include <linux/cpumask.h>
+#include <linux/seqlock.h>
+
+/*
+ * Define shape of hierarchy based on NR_CPUS and CONFIG_RCU_FANOUT.
+ * In theory, it should be possible to add more levels straightforwardly.
+ * In practice, this has not been tested, so there is probably some
+ * bug somewhere.
+ */
+#define MAX_RCU_LVLS 3
+#define RCU_FANOUT (CONFIG_RCU_FANOUT)
+#define RCU_FANOUT_SQ (RCU_FANOUT * RCU_FANOUT)
+#define RCU_FANOUT_CUBE (RCU_FANOUT_SQ * RCU_FANOUT)
+
+#if NR_CPUS <= RCU_FANOUT
+# define NUM_RCU_LVLS 1
+# define NUM_RCU_LVL_0 1
+# define NUM_RCU_LVL_1 (NR_CPUS)
+# define NUM_RCU_LVL_2 0
+# define NUM_RCU_LVL_3 0
+#elif NR_CPUS <= RCU_FANOUT_SQ
+# define NUM_RCU_LVLS 2
+# define NUM_RCU_LVL_0 1
+# define NUM_RCU_LVL_1 (((NR_CPUS) + RCU_FANOUT - 1) / RCU_FANOUT)
+# define NUM_RCU_LVL_2 (NR_CPUS)
+# define NUM_RCU_LVL_3 0
+#elif NR_CPUS <= RCU_FANOUT_CUBE
+# define NUM_RCU_LVLS 3
+# define NUM_RCU_LVL_0 1
+# define NUM_RCU_LVL_1 (((NR_CPUS) + RCU_FANOUT_SQ - 1) / RCU_FANOUT_SQ)
+# define NUM_RCU_LVL_2 (((NR_CPUS) + (RCU_FANOUT) - 1) / (RCU_FANOUT))
+# define NUM_RCU_LVL_3 NR_CPUS
+#else
+# error "CONFIG_RCU_FANOUT insufficient for NR_CPUS"
+#endif /* #if (NR_CPUS) <= RCU_FANOUT */
+
+#define RCU_SUM (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2 + NUM_RCU_LVL_3)
+#define NUM_RCU_NODES (RCU_SUM - NR_CPUS)
+
+/*
+ * Dynticks per-CPU state.
+ */
+struct rcu_dynticks {
+ int dynticks_nesting; /* Track nesting level, sort of. */
+ int dynticks; /* Even value for dynticks-idle, else odd. */
+ int dynticks_nmi; /* Even value for either dynticks-idle or */
+ /* not in nmi handler, else odd. So this */
+ /* remains even for nmi from irq handler. */
+};
+
+/*
+ * Definition for node within the RCU grace-period-detection hierarchy.
+ */
+struct rcu_node {
+ spinlock_t lock;
+ unsigned long qsmask; /* CPUs or groups that need to switch in */
+ /* order for current grace period to proceed.*/
+ unsigned long qsmaskinit;
+ /* Per-GP initialization for qsmask. */
+ unsigned long grpmask; /* Mask to apply to parent qsmask. */
+ int grplo; /* lowest-numbered CPU or group here. */
+ int grphi; /* highest-numbered CPU or group here. */
+ u8 grpnum; /* CPU/group number for next level up. */
+ u8 level; /* root is at level 0. */
+ struct rcu_node *parent;
+} ____cacheline_internodealigned_in_smp;
+
+/* Index values for nxttail array in struct rcu_data. */
+#define RCU_DONE_TAIL 0 /* Also RCU_WAIT head. */
+#define RCU_WAIT_TAIL 1 /* Also RCU_NEXT_READY head. */
+#define RCU_NEXT_READY_TAIL 2 /* Also RCU_NEXT head. */
+#define RCU_NEXT_TAIL 3
+#define RCU_NEXT_SIZE 4
+
+/* Per-CPU data for read-copy update. */
+struct rcu_data {
+ /* 1) quiescent-state and grace-period handling : */
+ long completed; /* Track rsp->completed gp number */
+ /* in order to detect GP end. */
+ long gpnum; /* Highest gp number that this CPU */
+ /* is aware of having started. */
+ long passed_quiesc_completed;
+ /* Value of completed at time of qs. */
+ bool passed_quiesc; /* User-mode/idle loop etc. */
+ bool qs_pending; /* Core waits for quiesc state. */
+ bool beenonline; /* CPU online at least once. */
+ struct rcu_node *mynode; /* This CPU's leaf of hierarchy */
+ unsigned long grpmask; /* Mask to apply to leaf qsmask. */
+
+ /* 2) batch handling */
+ /*
+ * If nxtlist is not NULL, it is partitioned as follows.
+ * Any of the partitions might be empty, in which case the
+ * pointer to that partition will be equal to the pointer for
+ * the following partition. When the list is empty, all of
+ * the nxttail elements point to nxtlist, which is NULL.
+ *
+ * [*nxttail[RCU_NEXT_READY_TAIL], NULL = *nxttail[RCU_NEXT_TAIL]):
+ * Entries that might have arrived after current GP ended
+ * [*nxttail[RCU_WAIT_TAIL], *nxttail[RCU_NEXT_READY_TAIL]):
+ * Entries known to have arrived before current GP ended
+ * [*nxttail[RCU_DONE_TAIL], *nxttail[RCU_WAIT_TAIL]):
+ * Entries that batch # <= ->completed - 1: waiting for current GP
+ * [nxtlist, *nxttail[RCU_DONE_TAIL]):
+ * Entries that batch # <= ->completed
+ * The grace period for these entries has completed, and
+ * the other grace-period-completed entries may be moved
+ * here temporarily in rcu_process_callbacks().
+ */
+ struct rcu_head *nxtlist;
+ struct rcu_head **nxttail[RCU_NEXT_SIZE];
+ long qlen; /* # of queued callbacks */
+ long blimit; /* Upper limit on a processed batch */
+
+#ifdef CONFIG_NO_HZ
+ /* 3) dynticks interface. */
+ struct rcu_dynticks *dynticks; /* Shared per-CPU dynticks state. */
+ int dynticks_snap; /* Per-GP tracking for dynticks. */
+ int dynticks_nmi_snap; /* Per-GP tracking for dynticks_nmi. */
+#endif /* #ifdef CONFIG_NO_HZ */
+
+ /* 4) reasons this CPU needed to be kicked by force_quiescent_state */
+#ifdef CONFIG_NO_HZ
+ unsigned long dynticks_fqs; /* Kicked due to dynticks idle. */
+#endif /* #ifdef CONFIG_NO_HZ */
+ unsigned long offline_fqs; /* Kicked due to being offline. */
+ unsigned long resched_ipi; /* Sent a resched IPI. */
+
+ /* 5) state to allow this CPU to force_quiescent_state on others */
+ long n_rcu_pending; /* rcu_pending() calls since boot. */
+ long n_rcu_pending_force_qs; /* when to force quiescent states. */
+
+ int cpu;
+};
+
+/* Values for signaled field in struct rcu_state. */
+#define RCU_GP_INIT 0 /* Grace period being initialized. */
+#define RCU_SAVE_DYNTICK 1 /* Need to scan dyntick state. */
+#define RCU_FORCE_QS 2 /* Need to force quiescent state. */
+#ifdef CONFIG_NO_HZ
+#define RCU_SIGNAL_INIT RCU_SAVE_DYNTICK
+#else /* #ifdef CONFIG_NO_HZ */
+#define RCU_SIGNAL_INIT RCU_FORCE_QS
+#endif /* #else #ifdef CONFIG_NO_HZ */
+
+#define RCU_JIFFIES_TILL_FORCE_QS 3 /* for rsp->jiffies_force_qs */
+#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
+#define RCU_SECONDS_TILL_STALL_CHECK (10 * HZ) /* for rsp->jiffies_stall */
+#define RCU_SECONDS_TILL_STALL_RECHECK (30 * HZ) /* for rsp->jiffies_stall */
+#define RCU_STALL_RAT_DELAY 2 /* Allow other CPUs time */
+ /* to take at least one */
+ /* scheduling clock irq */
+ /* before ratting on them. */
+
+#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
+
+/*
+ * RCU global state, including node hierarchy. This hierarchy is
+ * represented in "heap" form in a dense array. The root (first level)
+ * of the hierarchy is in ->node[0] (referenced by ->level[0]), the second
+ * level in ->node[1] through ->node[m] (->node[1] referenced by ->level[1]),
+ * and the third level in ->node[m+1] and following (->node[m+1] referenced
+ * by ->level[2]). The number of levels is determined by the number of
+ * CPUs and by CONFIG_RCU_FANOUT. Small systems will have a "hierarchy"
+ * consisting of a single rcu_node.
+ */
+struct rcu_state {
+ struct rcu_node node[NUM_RCU_NODES]; /* Hierarchy. */
+ struct rcu_node *level[NUM_RCU_LVLS]; /* Hierarchy levels. */
+ u32 levelcnt[MAX_RCU_LVLS + 1]; /* # nodes in each level. */
+ u8 levelspread[NUM_RCU_LVLS]; /* kids/node in each level. */
+ struct rcu_data *rda[NR_CPUS]; /* array of rdp pointers. */
+
+ /* The following fields are guarded by the root rcu_node's lock. */
+
+ u8 signaled ____cacheline_internodealigned_in_smp;
+ /* Force QS state. */
+ long gpnum; /* Current gp number. */
+ long completed; /* # of last completed gp. */
+ spinlock_t onofflock; /* exclude on/offline and */
+ /* starting new GP. */
+ spinlock_t fqslock; /* Only one task forcing */
+ /* quiescent states. */
+ unsigned long jiffies_force_qs; /* Time at which to invoke */
+ /* force_quiescent_state(). */
+ unsigned long n_force_qs; /* Number of calls to */
+ /* force_quiescent_state(). */
+ unsigned long n_force_qs_lh; /* ~Number of calls leaving */
+ /* due to lock unavailable. */
+ unsigned long n_force_qs_ngp; /* Number of calls leaving */
+ /* due to no GP active. */
+#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
+ unsigned long gp_start; /* Time at which GP started, */
+ /* but in jiffies. */
+ unsigned long jiffies_stall; /* Time at which to check */
+ /* for CPU stalls. */
+#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
+#ifdef CONFIG_NO_HZ
+ long dynticks_completed; /* Value of completed @ snap. */
+#endif /* #ifdef CONFIG_NO_HZ */
+};
+
+extern struct rcu_state rcu_state;
+DECLARE_PER_CPU(struct rcu_data, rcu_data);
+
+extern struct rcu_state rcu_bh_state;
+DECLARE_PER_CPU(struct rcu_data, rcu_bh_data);
+
+/*
+ * Increment the quiescent state counter.
+ * The counter is a bit degenerated: We do not need to know
+ * how many quiescent states passed, just if there was at least
+ * one since the start of the grace period. Thus just a flag.
+ */
+static inline void rcu_qsctr_inc(int cpu)
+{
+ struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
+ rdp->passed_quiesc = 1;
+ rdp->passed_quiesc_completed = rdp->completed;
+}
+static inline void rcu_bh_qsctr_inc(int cpu)
+{
+ struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu);
+ rdp->passed_quiesc = 1;
+ rdp->passed_quiesc_completed = rdp->completed;
+}
+
+extern int rcu_pending(int cpu);
+extern int rcu_needs_cpu(int cpu);
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+extern struct lockdep_map rcu_lock_map;
+# define rcu_read_acquire() \
+ lock_acquire(&rcu_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_)
+# define rcu_read_release() lock_release(&rcu_lock_map, 1, _THIS_IP_)
+#else
+# define rcu_read_acquire() do { } while (0)
+# define rcu_read_release() do { } while (0)
+#endif
+
+static inline void __rcu_read_lock(void)
+{
+ preempt_disable();
+ __acquire(RCU);
+ rcu_read_acquire();
+}
+static inline void __rcu_read_unlock(void)
+{
+ rcu_read_release();
+ __release(RCU);
+ preempt_enable();
+}
+static inline void __rcu_read_lock_bh(void)
+{
+ local_bh_disable();
+ __acquire(RCU_BH);
+ rcu_read_acquire();
+}
+static inline void __rcu_read_unlock_bh(void)
+{
+ rcu_read_release();
+ __release(RCU_BH);
+ local_bh_enable();
+}
+
+#define __synchronize_sched() synchronize_rcu()
+
+#define call_rcu_sched(head, func) call_rcu(head, func)
+
+static inline void rcu_init_sched(void)
+{
+}
+
+extern void __rcu_init(void);
+extern void rcu_check_callbacks(int cpu, int user);
+extern void rcu_restart_cpu(int cpu);
+
+extern long rcu_batches_completed(void);
+extern long rcu_batches_completed_bh(void);
+
+#ifdef CONFIG_NO_HZ
+void rcu_enter_nohz(void);
+void rcu_exit_nohz(void);
+#else /* CONFIG_NO_HZ */
+static inline void rcu_enter_nohz(void)
+{
+}
+static inline void rcu_exit_nohz(void)
+{
+}
+#endif /* CONFIG_NO_HZ */
+
+#endif /* __LINUX_RCUTREE_H */
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index b18ec5533e8c..325af1de0351 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -7,9 +7,31 @@ struct device;
struct dma_attrs;
struct scatterlist;
+/*
+ * Maximum allowable number of contiguous slabs to map,
+ * must be a power of 2. What is the appropriate value ?
+ * The complexity of {map,unmap}_single is linearly dependent on this value.
+ */
+#define IO_TLB_SEGSIZE 128
+
+
+/*
+ * log of the size of each IO TLB slab. The number of slabs is command line
+ * controllable.
+ */
+#define IO_TLB_SHIFT 11
+
extern void
swiotlb_init(void);
+extern void *swiotlb_alloc_boot(size_t bytes, unsigned long nslabs);
+extern void *swiotlb_alloc(unsigned order, unsigned long nslabs);
+
+extern dma_addr_t swiotlb_phys_to_bus(phys_addr_t address);
+extern phys_addr_t swiotlb_bus_to_phys(dma_addr_t address);
+
+extern int swiotlb_arch_range_needs_mapping(void *ptr, size_t size);
+
extern void
*swiotlb_alloc_coherent(struct device *hwdev, size_t size,
dma_addr_t *dma_handle, gfp_t flags);
diff --git a/include/linux/timex.h b/include/linux/timex.h
index 9007313b5b71..998a55d80acf 100644
--- a/include/linux/timex.h
+++ b/include/linux/timex.h
@@ -53,47 +53,11 @@
#ifndef _LINUX_TIMEX_H
#define _LINUX_TIMEX_H
-#include <linux/compiler.h>
#include <linux/time.h>
-#include <asm/param.h>
-
#define NTP_API 4 /* NTP API version */
/*
- * SHIFT_KG and SHIFT_KF establish the damping of the PLL and are chosen
- * for a slightly underdamped convergence characteristic. SHIFT_KH
- * establishes the damping of the FLL and is chosen by wisdom and black
- * art.
- *
- * MAXTC establishes the maximum time constant of the PLL. With the
- * SHIFT_KG and SHIFT_KF values given and a time constant range from
- * zero to MAXTC, the PLL will converge in 15 minutes to 16 hours,
- * respectively.
- */
-#define SHIFT_PLL 4 /* PLL frequency factor (shift) */
-#define SHIFT_FLL 2 /* FLL frequency factor (shift) */
-#define MAXTC 10 /* maximum time constant (shift) */
-
-/*
- * SHIFT_USEC defines the scaling (shift) of the time_freq and
- * time_tolerance variables, which represent the current frequency
- * offset and maximum frequency tolerance.
- */
-#define SHIFT_USEC 16 /* frequency offset scale (shift) */
-#define PPM_SCALE (NSEC_PER_USEC << (NTP_SCALE_SHIFT - SHIFT_USEC))
-#define PPM_SCALE_INV_SHIFT 19
-#define PPM_SCALE_INV ((1ll << (PPM_SCALE_INV_SHIFT + NTP_SCALE_SHIFT)) / \
- PPM_SCALE + 1)
-
-#define MAXPHASE 500000000l /* max phase error (ns) */
-#define MAXFREQ 500000 /* max frequency error (ns/s) */
-#define MAXFREQ_SCALED ((s64)MAXFREQ << NTP_SCALE_SHIFT)
-#define MINSEC 256 /* min interval between updates (s) */
-#define MAXSEC 2048 /* max interval between updates (s) */
-#define NTP_PHASE_LIMIT ((MAXPHASE / NSEC_PER_USEC) << 5) /* beyond max. dispersion */
-
-/*
* syscall interface - used (mainly by NTP daemon)
* to discipline kernel clock oscillator
*/
@@ -199,9 +163,46 @@ struct timex {
#define TIME_BAD TIME_ERROR /* bw compat */
#ifdef __KERNEL__
+#include <linux/compiler.h>
+#include <linux/types.h>
+#include <linux/param.h>
+
#include <asm/timex.h>
/*
+ * SHIFT_KG and SHIFT_KF establish the damping of the PLL and are chosen
+ * for a slightly underdamped convergence characteristic. SHIFT_KH
+ * establishes the damping of the FLL and is chosen by wisdom and black
+ * art.
+ *
+ * MAXTC establishes the maximum time constant of the PLL. With the
+ * SHIFT_KG and SHIFT_KF values given and a time constant range from
+ * zero to MAXTC, the PLL will converge in 15 minutes to 16 hours,
+ * respectively.
+ */
+#define SHIFT_PLL 4 /* PLL frequency factor (shift) */
+#define SHIFT_FLL 2 /* FLL frequency factor (shift) */
+#define MAXTC 10 /* maximum time constant (shift) */
+
+/*
+ * SHIFT_USEC defines the scaling (shift) of the time_freq and
+ * time_tolerance variables, which represent the current frequency
+ * offset and maximum frequency tolerance.
+ */
+#define SHIFT_USEC 16 /* frequency offset scale (shift) */
+#define PPM_SCALE (NSEC_PER_USEC << (NTP_SCALE_SHIFT - SHIFT_USEC))
+#define PPM_SCALE_INV_SHIFT 19
+#define PPM_SCALE_INV ((1ll << (PPM_SCALE_INV_SHIFT + NTP_SCALE_SHIFT)) / \
+ PPM_SCALE + 1)
+
+#define MAXPHASE 500000000l /* max phase error (ns) */
+#define MAXFREQ 500000 /* max frequency error (ns/s) */
+#define MAXFREQ_SCALED ((s64)MAXFREQ << NTP_SCALE_SHIFT)
+#define MINSEC 256 /* min interval between updates (s) */
+#define MAXSEC 2048 /* max interval between updates (s) */
+#define NTP_PHASE_LIMIT ((MAXPHASE / NSEC_PER_USEC) << 5) /* beyond max. dispersion */
+
+/*
* kernel variables
* Note: maximum error = NTP synch distance = dispersion + delay / 2;
* estimated error = NTP dispersion.
diff --git a/include/linux/types.h b/include/linux/types.h
index 1d98330b1f2c..121f349cb7ec 100644
--- a/include/linux/types.h
+++ b/include/linux/types.h
@@ -135,19 +135,14 @@ typedef __s64 int64_t;
*
* Linux always considers sectors to be 512 bytes long independently
* of the devices real block size.
+ *
+ * blkcnt_t is the type of the inode's block count.
*/
#ifdef CONFIG_LBD
typedef u64 sector_t;
-#else
-typedef unsigned long sector_t;
-#endif
-
-/*
- * The type of the inode's block count.
- */
-#ifdef CONFIG_LSF
typedef u64 blkcnt_t;
#else
+typedef unsigned long sector_t;
typedef unsigned long blkcnt_t;
#endif
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index fec6decfb983..6b58367d145e 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -78,7 +78,7 @@ static inline unsigned long __copy_from_user_nocache(void *to,
\
set_fs(KERNEL_DS); \
pagefault_disable(); \
- ret = __get_user(retval, (__force typeof(retval) __user *)(addr)); \
+ ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
pagefault_enable(); \
set_fs(old_fs); \
ret; \
diff --git a/init/Kconfig b/init/Kconfig
index 8a63c404ef44..13627191a60d 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -936,10 +936,90 @@ source "block/Kconfig"
config PREEMPT_NOTIFIERS
bool
+choice
+ prompt "RCU Implementation"
+ default CLASSIC_RCU
+
config CLASSIC_RCU
- def_bool !PREEMPT_RCU
+ bool "Classic RCU"
help
This option selects the classic RCU implementation that is
designed for best read-side performance on non-realtime
- systems. Classic RCU is the default. Note that the
- PREEMPT_RCU symbol is used to select/deselect this option.
+ systems.
+
+ Select this option if you are unsure.
+
+config TREE_RCU
+ bool "Tree-based hierarchical RCU"
+ help
+ This option selects the RCU implementation that is
+ designed for very large SMP system with hundreds or
+ thousands of CPUs.
+
+config PREEMPT_RCU
+ bool "Preemptible RCU"
+ depends on PREEMPT
+ help
+ This option reduces the latency of the kernel by making certain
+ RCU sections preemptible. Normally RCU code is non-preemptible, if
+ this option is selected then read-only RCU sections become
+ preemptible. This helps latency, but may expose bugs due to
+ now-naive assumptions about each RCU read-side critical section
+ remaining on a given CPU through its execution.
+
+endchoice
+
+config RCU_TRACE
+ bool "Enable tracing for RCU"
+ depends on TREE_RCU || PREEMPT_RCU
+ help
+ This option provides tracing in RCU which presents stats
+ in debugfs for debugging RCU implementation.
+
+ Say Y here if you want to enable RCU tracing
+ Say N if you are unsure.
+
+config RCU_FANOUT
+ int "Tree-based hierarchical RCU fanout value"
+ range 2 64 if 64BIT
+ range 2 32 if !64BIT
+ depends on TREE_RCU
+ default 64 if 64BIT
+ default 32 if !64BIT
+ help
+ This option controls the fanout of hierarchical implementations
+ of RCU, allowing RCU to work efficiently on machines with
+ large numbers of CPUs. This value must be at least the cube
+ root of NR_CPUS, which allows NR_CPUS up to 32,768 for 32-bit
+ systems and up to 262,144 for 64-bit systems.
+
+ Select a specific number if testing RCU itself.
+ Take the default if unsure.
+
+config RCU_FANOUT_EXACT
+ bool "Disable tree-based hierarchical RCU auto-balancing"
+ depends on TREE_RCU
+ default n
+ help
+ This option forces use of the exact RCU_FANOUT value specified,
+ regardless of imbalances in the hierarchy. This is useful for
+ testing RCU itself, and might one day be useful on systems with
+ strong NUMA behavior.
+
+ Without RCU_FANOUT_EXACT, the code will balance the hierarchy.
+
+ Say N if unsure.
+
+config TREE_RCU_TRACE
+ def_bool RCU_TRACE && TREE_RCU
+ select DEBUG_FS
+ help
+ This option provides tracing for the TREE_RCU implementation,
+ permitting Makefile to trivially select kernel/rcutree_trace.c.
+
+config PREEMPT_RCU_TRACE
+ def_bool RCU_TRACE && PREEMPT_RCU
+ select DEBUG_FS
+ help
+ This option provides tracing for the PREEMPT_RCU implementation,
+ permitting Makefile to trivially select kernel/rcupreempt_trace.c.
diff --git a/init/main.c b/init/main.c
index 17e9757bfde2..2a7ce0f8e453 100644
--- a/init/main.c
+++ b/init/main.c
@@ -540,6 +540,15 @@ void __init __weak thread_info_cache_init(void)
{
}
+void __init __weak arch_early_irq_init(void)
+{
+}
+
+void __init __weak early_irq_init(void)
+{
+ arch_early_irq_init();
+}
+
asmlinkage void __init start_kernel(void)
{
char * command_line;
@@ -604,6 +613,8 @@ asmlinkage void __init start_kernel(void)
sort_main_extable();
trap_init();
rcu_init();
+ /* init some links before init_ISA_irqs() */
+ early_irq_init();
init_IRQ();
pidhash_init();
init_timers();
diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt
index 9fdba03dc1fc..bf987b95b356 100644
--- a/kernel/Kconfig.preempt
+++ b/kernel/Kconfig.preempt
@@ -52,28 +52,3 @@ config PREEMPT
endchoice
-config PREEMPT_RCU
- bool "Preemptible RCU"
- depends on PREEMPT
- default n
- help
- This option reduces the latency of the kernel by making certain
- RCU sections preemptible. Normally RCU code is non-preemptible, if
- this option is selected then read-only RCU sections become
- preemptible. This helps latency, but may expose bugs due to
- now-naive assumptions about each RCU read-side critical section
- remaining on a given CPU through its execution.
-
- Say N if you are unsure.
-
-config RCU_TRACE
- bool "Enable tracing for RCU - currently stats in debugfs"
- depends on PREEMPT_RCU
- select DEBUG_FS
- default y
- help
- This option provides tracing in RCU which presents stats
- in debugfs for debugging RCU implementation.
-
- Say Y here if you want to enable RCU tracing
- Say N if you are unsure.
diff --git a/kernel/Makefile b/kernel/Makefile
index 027edda63511..e1c5bf3365c0 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -73,10 +73,10 @@ obj-$(CONFIG_GENERIC_HARDIRQS) += irq/
obj-$(CONFIG_SECCOMP) += seccomp.o
obj-$(CONFIG_RCU_TORTURE_TEST) += rcutorture.o
obj-$(CONFIG_CLASSIC_RCU) += rcuclassic.o
+obj-$(CONFIG_TREE_RCU) += rcutree.o
obj-$(CONFIG_PREEMPT_RCU) += rcupreempt.o
-ifeq ($(CONFIG_PREEMPT_RCU),y)
-obj-$(CONFIG_RCU_TRACE) += rcupreempt_trace.o
-endif
+obj-$(CONFIG_TREE_RCU_TRACE) += rcutree_trace.o
+obj-$(CONFIG_PREEMPT_RCU_TRACE) += rcupreempt_trace.o
obj-$(CONFIG_RELAY) += relay.o
obj-$(CONFIG_SYSCTL) += utsname_sysctl.o
obj-$(CONFIG_TASK_DELAY_ACCT) += delayacct.o
diff --git a/kernel/exit.c b/kernel/exit.c
index c7422ca92038..c9e5a1c14e08 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -1037,8 +1037,6 @@ NORET_TYPE void do_exit(long code)
* task into the wait for ever nirwana as well.
*/
tsk->flags |= PF_EXITPIDONE;
- if (tsk->io_context)
- exit_io_context();
set_current_state(TASK_UNINTERRUPTIBLE);
schedule();
}
@@ -1328,10 +1326,10 @@ static int wait_task_zombie(struct task_struct *p, int options,
* group, which consolidates times for all threads in the
* group including the group leader.
*/
+ thread_group_cputime(p, &cputime);
spin_lock_irq(&p->parent->sighand->siglock);
psig = p->parent->signal;
sig = p->signal;
- thread_group_cputime(p, &cputime);
psig->cutime =
cputime_add(psig->cutime,
cputime_add(cputime.utime,
diff --git a/kernel/extable.c b/kernel/extable.c
index feb0317cf09a..e136ed8d82ba 100644
--- a/kernel/extable.c
+++ b/kernel/extable.c
@@ -67,3 +67,19 @@ int kernel_text_address(unsigned long addr)
return 1;
return module_text_address(addr) != NULL;
}
+
+/*
+ * On some architectures (PPC64, IA64) function pointers
+ * are actually only tokens to some data that then holds the
+ * real function address. As a result, to find if a function
+ * pointer is part of the kernel text, we need to do some
+ * special dereferencing first.
+ */
+int func_ptr_is_kernel_text(void *ptr)
+{
+ unsigned long addr;
+ addr = (unsigned long) dereference_function_descriptor(ptr);
+ if (core_kernel_text(addr))
+ return 1;
+ return module_text_address(addr) != NULL;
+}
diff --git a/kernel/fork.c b/kernel/fork.c
index 6144b36cd897..43cbf30669e6 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -415,8 +415,8 @@ static struct mm_struct * mm_init(struct mm_struct * mm, struct task_struct *p)
set_mm_counter(mm, file_rss, 0);
set_mm_counter(mm, anon_rss, 0);
spin_lock_init(&mm->page_table_lock);
- rwlock_init(&mm->ioctx_list_lock);
- mm->ioctx_list = NULL;
+ spin_lock_init(&mm->ioctx_lock);
+ INIT_HLIST_HEAD(&mm->ioctx_list);
mm->free_area_cache = TASK_UNMAPPED_BASE;
mm->cached_hole_size = ~0UL;
mm_init_owner(mm, p);
diff --git a/kernel/futex.c b/kernel/futex.c
index 4fe790e89d0f..7c6cbabe52b3 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -92,11 +92,12 @@ struct futex_pi_state {
* A futex_q has a woken state, just like tasks have TASK_RUNNING.
* It is considered woken when plist_node_empty(&q->list) || q->lock_ptr == 0.
* The order of wakup is always to make the first condition true, then
- * wake up q->waiters, then make the second condition true.
+ * wake up q->waiter, then make the second condition true.
*/
struct futex_q {
struct plist_node list;
- wait_queue_head_t waiters;
+ /* There can only be a single waiter */
+ wait_queue_head_t waiter;
/* Which hash list lock to use: */
spinlock_t *lock_ptr;
@@ -123,24 +124,6 @@ struct futex_hash_bucket {
static struct futex_hash_bucket futex_queues[1<<FUTEX_HASHBITS];
/*
- * Take mm->mmap_sem, when futex is shared
- */
-static inline void futex_lock_mm(struct rw_semaphore *fshared)
-{
- if (fshared)
- down_read(fshared);
-}
-
-/*
- * Release mm->mmap_sem, when the futex is shared
- */
-static inline void futex_unlock_mm(struct rw_semaphore *fshared)
-{
- if (fshared)
- up_read(fshared);
-}
-
-/*
* We hash on the keys returned from get_futex_key (see below).
*/
static struct futex_hash_bucket *hash_futex(union futex_key *key)
@@ -161,6 +144,45 @@ static inline int match_futex(union futex_key *key1, union futex_key *key2)
&& key1->both.offset == key2->both.offset);
}
+/*
+ * Take a reference to the resource addressed by a key.
+ * Can be called while holding spinlocks.
+ *
+ */
+static void get_futex_key_refs(union futex_key *key)
+{
+ if (!key->both.ptr)
+ return;
+
+ switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
+ case FUT_OFF_INODE:
+ atomic_inc(&key->shared.inode->i_count);
+ break;
+ case FUT_OFF_MMSHARED:
+ atomic_inc(&key->private.mm->mm_count);
+ break;
+ }
+}
+
+/*
+ * Drop a reference to the resource addressed by a key.
+ * The hash bucket spinlock must not be held.
+ */
+static void drop_futex_key_refs(union futex_key *key)
+{
+ if (!key->both.ptr)
+ return;
+
+ switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
+ case FUT_OFF_INODE:
+ iput(key->shared.inode);
+ break;
+ case FUT_OFF_MMSHARED:
+ mmdrop(key->private.mm);
+ break;
+ }
+}
+
/**
* get_futex_key - Get parameters which are the keys for a futex.
* @uaddr: virtual address of the futex
@@ -179,12 +201,10 @@ static inline int match_futex(union futex_key *key1, union futex_key *key2)
* For other futexes, it points to &current->mm->mmap_sem and
* caller must have taken the reader lock. but NOT any spinlocks.
*/
-static int get_futex_key(u32 __user *uaddr, struct rw_semaphore *fshared,
- union futex_key *key)
+static int get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key)
{
unsigned long address = (unsigned long)uaddr;
struct mm_struct *mm = current->mm;
- struct vm_area_struct *vma;
struct page *page;
int err;
@@ -208,100 +228,50 @@ static int get_futex_key(u32 __user *uaddr, struct rw_semaphore *fshared,
return -EFAULT;
key->private.mm = mm;
key->private.address = address;
+ get_futex_key_refs(key);
return 0;
}
- /*
- * The futex is hashed differently depending on whether
- * it's in a shared or private mapping. So check vma first.
- */
- vma = find_extend_vma(mm, address);
- if (unlikely(!vma))
- return -EFAULT;
- /*
- * Permissions.
- */
- if (unlikely((vma->vm_flags & (VM_IO|VM_READ)) != VM_READ))
- return (vma->vm_flags & VM_IO) ? -EPERM : -EACCES;
+again:
+ err = get_user_pages_fast(address, 1, 0, &page);
+ if (err < 0)
+ return err;
+
+ lock_page(page);
+ if (!page->mapping) {
+ unlock_page(page);
+ put_page(page);
+ goto again;
+ }
/*
* Private mappings are handled in a simple way.
*
* NOTE: When userspace waits on a MAP_SHARED mapping, even if
* it's a read-only handle, it's expected that futexes attach to
- * the object not the particular process. Therefore we use
- * VM_MAYSHARE here, not VM_SHARED which is restricted to shared
- * mappings of _writable_ handles.
+ * the object not the particular process.
*/
- if (likely(!(vma->vm_flags & VM_MAYSHARE))) {
- key->both.offset |= FUT_OFF_MMSHARED; /* reference taken on mm */
+ if (PageAnon(page)) {
+ key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */
key->private.mm = mm;
key->private.address = address;
- return 0;
+ } else {
+ key->both.offset |= FUT_OFF_INODE; /* inode-based key */
+ key->shared.inode = page->mapping->host;
+ key->shared.pgoff = page->index;
}
- /*
- * Linear file mappings are also simple.
- */
- key->shared.inode = vma->vm_file->f_path.dentry->d_inode;
- key->both.offset |= FUT_OFF_INODE; /* inode-based key. */
- if (likely(!(vma->vm_flags & VM_NONLINEAR))) {
- key->shared.pgoff = (((address - vma->vm_start) >> PAGE_SHIFT)
- + vma->vm_pgoff);
- return 0;
- }
+ get_futex_key_refs(key);
- /*
- * We could walk the page table to read the non-linear
- * pte, and get the page index without fetching the page
- * from swap. But that's a lot of code to duplicate here
- * for a rare case, so we simply fetch the page.
- */
- err = get_user_pages(current, mm, address, 1, 0, 0, &page, NULL);
- if (err >= 0) {
- key->shared.pgoff =
- page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
- put_page(page);
- return 0;
- }
- return err;
-}
-
-/*
- * Take a reference to the resource addressed by a key.
- * Can be called while holding spinlocks.
- *
- */
-static void get_futex_key_refs(union futex_key *key)
-{
- if (key->both.ptr == NULL)
- return;
- switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
- case FUT_OFF_INODE:
- atomic_inc(&key->shared.inode->i_count);
- break;
- case FUT_OFF_MMSHARED:
- atomic_inc(&key->private.mm->mm_count);
- break;
- }
+ unlock_page(page);
+ put_page(page);
+ return 0;
}
-/*
- * Drop a reference to the resource addressed by a key.
- * The hash bucket spinlock must not be held.
- */
-static void drop_futex_key_refs(union futex_key *key)
+static inline
+void put_futex_key(int fshared, union futex_key *key)
{
- if (!key->both.ptr)
- return;
- switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
- case FUT_OFF_INODE:
- iput(key->shared.inode);
- break;
- case FUT_OFF_MMSHARED:
- mmdrop(key->private.mm);
- break;
- }
+ drop_futex_key_refs(key);
}
static u32 cmpxchg_futex_value_locked(u32 __user *uaddr, u32 uval, u32 newval)
@@ -328,10 +298,8 @@ static int get_futex_value_locked(u32 *dest, u32 __user *from)
/*
* Fault handling.
- * if fshared is non NULL, current->mm->mmap_sem is already held
*/
-static int futex_handle_fault(unsigned long address,
- struct rw_semaphore *fshared, int attempt)
+static int futex_handle_fault(unsigned long address, int attempt)
{
struct vm_area_struct * vma;
struct mm_struct *mm = current->mm;
@@ -340,8 +308,7 @@ static int futex_handle_fault(unsigned long address,
if (attempt > 2)
return ret;
- if (!fshared)
- down_read(&mm->mmap_sem);
+ down_read(&mm->mmap_sem);
vma = find_vma(mm, address);
if (vma && address >= vma->vm_start &&
(vma->vm_flags & VM_WRITE)) {
@@ -361,8 +328,7 @@ static int futex_handle_fault(unsigned long address,
current->min_flt++;
}
}
- if (!fshared)
- up_read(&mm->mmap_sem);
+ up_read(&mm->mmap_sem);
return ret;
}
@@ -385,6 +351,7 @@ static int refill_pi_state_cache(void)
/* pi_mutex gets initialized later */
pi_state->owner = NULL;
atomic_set(&pi_state->refcount, 1);
+ pi_state->key = FUTEX_KEY_INIT;
current->pi_state_cache = pi_state;
@@ -469,7 +436,7 @@ void exit_pi_state_list(struct task_struct *curr)
struct list_head *next, *head = &curr->pi_state_list;
struct futex_pi_state *pi_state;
struct futex_hash_bucket *hb;
- union futex_key key;
+ union futex_key key = FUTEX_KEY_INIT;
if (!futex_cmpxchg_enabled)
return;
@@ -614,7 +581,7 @@ static void wake_futex(struct futex_q *q)
* The lock in wake_up_all() is a crucial memory barrier after the
* plist_del() and also before assigning to q->lock_ptr.
*/
- wake_up_all(&q->waiters);
+ wake_up(&q->waiter);
/*
* The waiting task can free the futex_q as soon as this is written,
* without taking any locks. This must come last.
@@ -726,20 +693,17 @@ double_lock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
* Wake up all waiters hashed on the physical page that is mapped
* to this virtual address:
*/
-static int futex_wake(u32 __user *uaddr, struct rw_semaphore *fshared,
- int nr_wake, u32 bitset)
+static int futex_wake(u32 __user *uaddr, int fshared, int nr_wake, u32 bitset)
{
struct futex_hash_bucket *hb;
struct futex_q *this, *next;
struct plist_head *head;
- union futex_key key;
+ union futex_key key = FUTEX_KEY_INIT;
int ret;
if (!bitset)
return -EINVAL;
- futex_lock_mm(fshared);
-
ret = get_futex_key(uaddr, fshared, &key);
if (unlikely(ret != 0))
goto out;
@@ -767,7 +731,7 @@ static int futex_wake(u32 __user *uaddr, struct rw_semaphore *fshared,
spin_unlock(&hb->lock);
out:
- futex_unlock_mm(fshared);
+ put_futex_key(fshared, &key);
return ret;
}
@@ -776,19 +740,16 @@ out:
* to this virtual address:
*/
static int
-futex_wake_op(u32 __user *uaddr1, struct rw_semaphore *fshared,
- u32 __user *uaddr2,
+futex_wake_op(u32 __user *uaddr1, int fshared, u32 __user *uaddr2,
int nr_wake, int nr_wake2, int op)
{
- union futex_key key1, key2;
+ union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
struct futex_hash_bucket *hb1, *hb2;
struct plist_head *head;
struct futex_q *this, *next;
int ret, op_ret, attempt = 0;
retryfull:
- futex_lock_mm(fshared);
-
ret = get_futex_key(uaddr1, fshared, &key1);
if (unlikely(ret != 0))
goto out;
@@ -833,18 +794,12 @@ retry:
*/
if (attempt++) {
ret = futex_handle_fault((unsigned long)uaddr2,
- fshared, attempt);
+ attempt);
if (ret)
goto out;
goto retry;
}
- /*
- * If we would have faulted, release mmap_sem,
- * fault it in and start all over again.
- */
- futex_unlock_mm(fshared);
-
ret = get_user(dummy, uaddr2);
if (ret)
return ret;
@@ -880,7 +835,8 @@ retry:
if (hb1 != hb2)
spin_unlock(&hb2->lock);
out:
- futex_unlock_mm(fshared);
+ put_futex_key(fshared, &key2);
+ put_futex_key(fshared, &key1);
return ret;
}
@@ -889,19 +845,16 @@ out:
* Requeue all waiters hashed on one physical page to another
* physical page.
*/
-static int futex_requeue(u32 __user *uaddr1, struct rw_semaphore *fshared,
- u32 __user *uaddr2,
+static int futex_requeue(u32 __user *uaddr1, int fshared, u32 __user *uaddr2,
int nr_wake, int nr_requeue, u32 *cmpval)
{
- union futex_key key1, key2;
+ union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
struct futex_hash_bucket *hb1, *hb2;
struct plist_head *head1;
struct futex_q *this, *next;
int ret, drop_count = 0;
retry:
- futex_lock_mm(fshared);
-
ret = get_futex_key(uaddr1, fshared, &key1);
if (unlikely(ret != 0))
goto out;
@@ -924,12 +877,6 @@ static int futex_requeue(u32 __user *uaddr1, struct rw_semaphore *fshared,
if (hb1 != hb2)
spin_unlock(&hb2->lock);
- /*
- * If we would have faulted, release mmap_sem, fault
- * it in and start all over again.
- */
- futex_unlock_mm(fshared);
-
ret = get_user(curval, uaddr1);
if (!ret)
@@ -981,7 +928,8 @@ out_unlock:
drop_futex_key_refs(&key1);
out:
- futex_unlock_mm(fshared);
+ put_futex_key(fshared, &key2);
+ put_futex_key(fshared, &key1);
return ret;
}
@@ -990,7 +938,7 @@ static inline struct futex_hash_bucket *queue_lock(struct futex_q *q)
{
struct futex_hash_bucket *hb;
- init_waitqueue_head(&q->waiters);
+ init_waitqueue_head(&q->waiter);
get_futex_key_refs(&q->key);
hb = hash_futex(&q->key);
@@ -1103,8 +1051,7 @@ static void unqueue_me_pi(struct futex_q *q)
* private futexes.
*/
static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
- struct task_struct *newowner,
- struct rw_semaphore *fshared)
+ struct task_struct *newowner, int fshared)
{
u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
struct futex_pi_state *pi_state = q->pi_state;
@@ -1183,7 +1130,7 @@ retry:
handle_fault:
spin_unlock(q->lock_ptr);
- ret = futex_handle_fault((unsigned long)uaddr, fshared, attempt++);
+ ret = futex_handle_fault((unsigned long)uaddr, attempt++);
spin_lock(q->lock_ptr);
@@ -1203,12 +1150,13 @@ handle_fault:
* In case we must use restart_block to restart a futex_wait,
* we encode in the 'flags' shared capability
*/
-#define FLAGS_SHARED 1
+#define FLAGS_SHARED 0x01
+#define FLAGS_CLOCKRT 0x02
static long futex_wait_restart(struct restart_block *restart);
-static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared,
- u32 val, ktime_t *abs_time, u32 bitset)
+static int futex_wait(u32 __user *uaddr, int fshared,
+ u32 val, ktime_t *abs_time, u32 bitset, int clockrt)
{
struct task_struct *curr = current;
DECLARE_WAITQUEUE(wait, curr);
@@ -1225,8 +1173,7 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared,
q.pi_state = NULL;
q.bitset = bitset;
retry:
- futex_lock_mm(fshared);
-
+ q.key = FUTEX_KEY_INIT;
ret = get_futex_key(uaddr, fshared, &q.key);
if (unlikely(ret != 0))
goto out_release_sem;
@@ -1258,12 +1205,6 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared,
if (unlikely(ret)) {
queue_unlock(&q, hb);
- /*
- * If we would have faulted, release mmap_sem, fault it in and
- * start all over again.
- */
- futex_unlock_mm(fshared);
-
ret = get_user(uval, uaddr);
if (!ret)
@@ -1278,12 +1219,6 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared,
queue_me(&q, hb);
/*
- * Now the futex is queued and we have checked the data, we
- * don't want to hold mmap_sem while we sleep.
- */
- futex_unlock_mm(fshared);
-
- /*
* There might have been scheduling since the queue_me(), as we
* cannot hold a spinlock across the get_user() in case it
* faults, and we cannot just set TASK_INTERRUPTIBLE state when
@@ -1294,7 +1229,7 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared,
/* add_wait_queue is the barrier after __set_current_state. */
__set_current_state(TASK_INTERRUPTIBLE);
- add_wait_queue(&q.waiters, &wait);
+ add_wait_queue(&q.waiter, &wait);
/*
* !plist_node_empty() is safe here without any lock.
* q.lock_ptr != 0 is not safe, because of ordering against wakeup.
@@ -1307,8 +1242,10 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared,
slack = current->timer_slack_ns;
if (rt_task(current))
slack = 0;
- hrtimer_init_on_stack(&t.timer, CLOCK_MONOTONIC,
- HRTIMER_MODE_ABS);
+ hrtimer_init_on_stack(&t.timer,
+ clockrt ? CLOCK_REALTIME :
+ CLOCK_MONOTONIC,
+ HRTIMER_MODE_ABS);
hrtimer_init_sleeper(&t, current);
hrtimer_set_expires_range_ns(&t.timer, *abs_time, slack);
@@ -1363,6 +1300,8 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared,
if (fshared)
restart->futex.flags |= FLAGS_SHARED;
+ if (clockrt)
+ restart->futex.flags |= FLAGS_CLOCKRT;
return -ERESTART_RESTARTBLOCK;
}
@@ -1370,7 +1309,7 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared,
queue_unlock(&q, hb);
out_release_sem:
- futex_unlock_mm(fshared);
+ put_futex_key(fshared, &q.key);
return ret;
}
@@ -1378,15 +1317,16 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared,
static long futex_wait_restart(struct restart_block *restart)
{
u32 __user *uaddr = (u32 __user *)restart->futex.uaddr;
- struct rw_semaphore *fshared = NULL;
+ int fshared = 0;
ktime_t t;
t.tv64 = restart->futex.time;
restart->fn = do_no_restart_syscall;
if (restart->futex.flags & FLAGS_SHARED)
- fshared = &current->mm->mmap_sem;
+ fshared = 1;
return (long)futex_wait(uaddr, fshared, restart->futex.val, &t,
- restart->futex.bitset);
+ restart->futex.bitset,
+ restart->futex.flags & FLAGS_CLOCKRT);
}
@@ -1396,7 +1336,7 @@ static long futex_wait_restart(struct restart_block *restart)
* if there are waiters then it will block, it does PI, etc. (Due to
* races the kernel might see a 0 value of the futex too.)
*/
-static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
+static int futex_lock_pi(u32 __user *uaddr, int fshared,
int detect, ktime_t *time, int trylock)
{
struct hrtimer_sleeper timeout, *to = NULL;
@@ -1419,8 +1359,7 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
q.pi_state = NULL;
retry:
- futex_lock_mm(fshared);
-
+ q.key = FUTEX_KEY_INIT;
ret = get_futex_key(uaddr, fshared, &q.key);
if (unlikely(ret != 0))
goto out_release_sem;
@@ -1509,7 +1448,6 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
* exit to complete.
*/
queue_unlock(&q, hb);
- futex_unlock_mm(fshared);
cond_resched();
goto retry;
@@ -1541,12 +1479,6 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
*/
queue_me(&q, hb);
- /*
- * Now the futex is queued and we have checked the data, we
- * don't want to hold mmap_sem while we sleep.
- */
- futex_unlock_mm(fshared);
-
WARN_ON(!q.pi_state);
/*
* Block on the PI mutex:
@@ -1559,7 +1491,6 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
ret = ret ? 0 : -EWOULDBLOCK;
}
- futex_lock_mm(fshared);
spin_lock(q.lock_ptr);
if (!ret) {
@@ -1625,7 +1556,6 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
/* Unqueue and drop the lock */
unqueue_me_pi(&q);
- futex_unlock_mm(fshared);
if (to)
destroy_hrtimer_on_stack(&to->timer);
@@ -1635,34 +1565,30 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
queue_unlock(&q, hb);
out_release_sem:
- futex_unlock_mm(fshared);
+ put_futex_key(fshared, &q.key);
if (to)
destroy_hrtimer_on_stack(&to->timer);
return ret;
uaddr_faulted:
/*
- * We have to r/w *(int __user *)uaddr, but we can't modify it
- * non-atomically. Therefore, if get_user below is not
- * enough, we need to handle the fault ourselves, while
- * still holding the mmap_sem.
- *
- * ... and hb->lock. :-) --ANK
+ * We have to r/w *(int __user *)uaddr, and we have to modify it
+ * atomically. Therefore, if we continue to fault after get_user()
+ * below, we need to handle the fault ourselves, while still holding
+ * the mmap_sem. This can occur if the uaddr is under contention as
+ * we have to drop the mmap_sem in order to call get_user().
*/
queue_unlock(&q, hb);
if (attempt++) {
- ret = futex_handle_fault((unsigned long)uaddr, fshared,
- attempt);
+ ret = futex_handle_fault((unsigned long)uaddr, attempt);
if (ret)
goto out_release_sem;
goto retry_unlocked;
}
- futex_unlock_mm(fshared);
-
ret = get_user(uval, uaddr);
- if (!ret && (uval != -EFAULT))
+ if (!ret)
goto retry;
if (to)
@@ -1675,13 +1601,13 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
* This is the in-kernel slowpath: we look up the PI state (if any),
* and do the rt-mutex unlock.
*/
-static int futex_unlock_pi(u32 __user *uaddr, struct rw_semaphore *fshared)
+static int futex_unlock_pi(u32 __user *uaddr, int fshared)
{
struct futex_hash_bucket *hb;
struct futex_q *this, *next;
u32 uval;
struct plist_head *head;
- union futex_key key;
+ union futex_key key = FUTEX_KEY_INIT;
int ret, attempt = 0;
retry:
@@ -1692,10 +1618,6 @@ retry:
*/
if ((uval & FUTEX_TID_MASK) != task_pid_vnr(current))
return -EPERM;
- /*
- * First take all the futex related locks:
- */
- futex_lock_mm(fshared);
ret = get_futex_key(uaddr, fshared, &key);
if (unlikely(ret != 0))
@@ -1754,34 +1676,30 @@ retry_unlocked:
out_unlock:
spin_unlock(&hb->lock);
out:
- futex_unlock_mm(fshared);
+ put_futex_key(fshared, &key);
return ret;
pi_faulted:
/*
- * We have to r/w *(int __user *)uaddr, but we can't modify it
- * non-atomically. Therefore, if get_user below is not
- * enough, we need to handle the fault ourselves, while
- * still holding the mmap_sem.
- *
- * ... and hb->lock. --ANK
+ * We have to r/w *(int __user *)uaddr, and we have to modify it
+ * atomically. Therefore, if we continue to fault after get_user()
+ * below, we need to handle the fault ourselves, while still holding
+ * the mmap_sem. This can occur if the uaddr is under contention as
+ * we have to drop the mmap_sem in order to call get_user().
*/
spin_unlock(&hb->lock);
if (attempt++) {
- ret = futex_handle_fault((unsigned long)uaddr, fshared,
- attempt);
+ ret = futex_handle_fault((unsigned long)uaddr, attempt);
if (ret)
goto out;
uval = 0;
goto retry_unlocked;
}
- futex_unlock_mm(fshared);
-
ret = get_user(uval, uaddr);
- if (!ret && (uval != -EFAULT))
+ if (!ret)
goto retry;
return ret;
@@ -1908,8 +1826,7 @@ retry:
* PI futexes happens in exit_pi_state():
*/
if (!pi && (uval & FUTEX_WAITERS))
- futex_wake(uaddr, &curr->mm->mmap_sem, 1,
- FUTEX_BITSET_MATCH_ANY);
+ futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY);
}
return 0;
}
@@ -2003,18 +1920,22 @@ void exit_robust_list(struct task_struct *curr)
long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
u32 __user *uaddr2, u32 val2, u32 val3)
{
- int ret = -ENOSYS;
+ int clockrt, ret = -ENOSYS;
int cmd = op & FUTEX_CMD_MASK;
- struct rw_semaphore *fshared = NULL;
+ int fshared = 0;
if (!(op & FUTEX_PRIVATE_FLAG))
- fshared = &current->mm->mmap_sem;
+ fshared = 1;
+
+ clockrt = op & FUTEX_CLOCK_REALTIME;
+ if (clockrt && cmd != FUTEX_WAIT_BITSET)
+ return -ENOSYS;
switch (cmd) {
case FUTEX_WAIT:
val3 = FUTEX_BITSET_MATCH_ANY;
case FUTEX_WAIT_BITSET:
- ret = futex_wait(uaddr, fshared, val, timeout, val3);
+ ret = futex_wait(uaddr, fshared, val, timeout, val3, clockrt);
break;
case FUTEX_WAKE:
val3 = FUTEX_BITSET_MATCH_ANY;
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index 47e63349d1b2..bda9cb924276 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -442,22 +442,6 @@ static inline void debug_hrtimer_activate(struct hrtimer *timer) { }
static inline void debug_hrtimer_deactivate(struct hrtimer *timer) { }
#endif
-/*
- * Check, whether the timer is on the callback pending list
- */
-static inline int hrtimer_cb_pending(const struct hrtimer *timer)
-{
- return timer->state & HRTIMER_STATE_PENDING;
-}
-
-/*
- * Remove a timer from the callback pending list
- */
-static inline void hrtimer_remove_cb_pending(struct hrtimer *timer)
-{
- list_del_init(&timer->cb_entry);
-}
-
/* High resolution timer related functions */
#ifdef CONFIG_HIGH_RES_TIMERS
@@ -651,6 +635,8 @@ static inline void hrtimer_init_timer_hres(struct hrtimer *timer)
{
}
+static void __run_hrtimer(struct hrtimer *timer);
+
/*
* When High resolution timers are active, try to reprogram. Note, that in case
* the state has HRTIMER_STATE_CALLBACK set, no reprogramming and no expiry
@@ -661,31 +647,14 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
struct hrtimer_clock_base *base)
{
if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) {
-
- /* Timer is expired, act upon the callback mode */
- switch(timer->cb_mode) {
- case HRTIMER_CB_IRQSAFE_PERCPU:
- case HRTIMER_CB_IRQSAFE_UNLOCKED:
- /*
- * This is solely for the sched tick emulation with
- * dynamic tick support to ensure that we do not
- * restart the tick right on the edge and end up with
- * the tick timer in the softirq ! The calling site
- * takes care of this. Also used for hrtimer sleeper !
- */
- debug_hrtimer_deactivate(timer);
- return 1;
- case HRTIMER_CB_SOFTIRQ:
- /*
- * Move everything else into the softirq pending list !
- */
- list_add_tail(&timer->cb_entry,
- &base->cpu_base->cb_pending);
- timer->state = HRTIMER_STATE_PENDING;
- return 1;
- default:
- BUG();
- }
+ /*
+ * XXX: recursion check?
+ * hrtimer_forward() should round up with timer granularity
+ * so that we never get into inf recursion here,
+ * it doesn't do that though
+ */
+ __run_hrtimer(timer);
+ return 1;
}
return 0;
}
@@ -724,11 +693,6 @@ static int hrtimer_switch_to_hres(void)
return 1;
}
-static inline void hrtimer_raise_softirq(void)
-{
- raise_softirq(HRTIMER_SOFTIRQ);
-}
-
#else
static inline int hrtimer_hres_active(void) { return 0; }
@@ -747,7 +711,6 @@ static inline int hrtimer_reprogram(struct hrtimer *timer,
{
return 0;
}
-static inline void hrtimer_raise_softirq(void) { }
#endif /* CONFIG_HIGH_RES_TIMERS */
@@ -890,10 +853,7 @@ static void __remove_hrtimer(struct hrtimer *timer,
struct hrtimer_clock_base *base,
unsigned long newstate, int reprogram)
{
- /* High res. callback list. NOP for !HIGHRES */
- if (hrtimer_cb_pending(timer))
- hrtimer_remove_cb_pending(timer);
- else {
+ if (timer->state & HRTIMER_STATE_ENQUEUED) {
/*
* Remove the timer from the rbtree and replace the
* first entry pointer if necessary.
@@ -953,7 +913,7 @@ hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, unsigned long delta_n
{
struct hrtimer_clock_base *base, *new_base;
unsigned long flags;
- int ret, raise;
+ int ret;
base = lock_hrtimer_base(timer, &flags);
@@ -988,26 +948,8 @@ hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, unsigned long delta_n
enqueue_hrtimer(timer, new_base,
new_base->cpu_base == &__get_cpu_var(hrtimer_bases));
- /*
- * The timer may be expired and moved to the cb_pending
- * list. We can not raise the softirq with base lock held due
- * to a possible deadlock with runqueue lock.
- */
- raise = timer->state == HRTIMER_STATE_PENDING;
-
- /*
- * We use preempt_disable to prevent this task from migrating after
- * setting up the softirq and raising it. Otherwise, if me migrate
- * we will raise the softirq on the wrong CPU.
- */
- preempt_disable();
-
unlock_hrtimer_base(timer, &flags);
- if (raise)
- hrtimer_raise_softirq();
- preempt_enable();
-
return ret;
}
EXPORT_SYMBOL_GPL(hrtimer_start_range_ns);
@@ -1192,75 +1134,6 @@ int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp)
}
EXPORT_SYMBOL_GPL(hrtimer_get_res);
-static void run_hrtimer_pending(struct hrtimer_cpu_base *cpu_base)
-{
- spin_lock_irq(&cpu_base->lock);
-
- while (!list_empty(&cpu_base->cb_pending)) {
- enum hrtimer_restart (*fn)(struct hrtimer *);
- struct hrtimer *timer;
- int restart;
- int emulate_hardirq_ctx = 0;
-
- timer = list_entry(cpu_base->cb_pending.next,
- struct hrtimer, cb_entry);
-
- debug_hrtimer_deactivate(timer);
- timer_stats_account_hrtimer(timer);
-
- fn = timer->function;
- /*
- * A timer might have been added to the cb_pending list
- * when it was migrated during a cpu-offline operation.
- * Emulate hardirq context for such timers.
- */
- if (timer->cb_mode == HRTIMER_CB_IRQSAFE_PERCPU ||
- timer->cb_mode == HRTIMER_CB_IRQSAFE_UNLOCKED)
- emulate_hardirq_ctx = 1;
-
- __remove_hrtimer(timer, timer->base, HRTIMER_STATE_CALLBACK, 0);
- spin_unlock_irq(&cpu_base->lock);
-
- if (unlikely(emulate_hardirq_ctx)) {
- local_irq_disable();
- restart = fn(timer);
- local_irq_enable();
- } else
- restart = fn(timer);
-
- spin_lock_irq(&cpu_base->lock);
-
- timer->state &= ~HRTIMER_STATE_CALLBACK;
- if (restart == HRTIMER_RESTART) {
- BUG_ON(hrtimer_active(timer));
- /*
- * Enqueue the timer, allow reprogramming of the event
- * device
- */
- enqueue_hrtimer(timer, timer->base, 1);
- } else if (hrtimer_active(timer)) {
- /*
- * If the timer was rearmed on another CPU, reprogram
- * the event device.
- */
- struct hrtimer_clock_base *base = timer->base;
-
- if (base->first == &timer->node &&
- hrtimer_reprogram(timer, base)) {
- /*
- * Timer is expired. Thus move it from tree to
- * pending list again.
- */
- __remove_hrtimer(timer, base,
- HRTIMER_STATE_PENDING, 0);
- list_add_tail(&timer->cb_entry,
- &base->cpu_base->cb_pending);
- }
- }
- }
- spin_unlock_irq(&cpu_base->lock);
-}
-
static void __run_hrtimer(struct hrtimer *timer)
{
struct hrtimer_clock_base *base = timer->base;
@@ -1268,25 +1141,21 @@ static void __run_hrtimer(struct hrtimer *timer)
enum hrtimer_restart (*fn)(struct hrtimer *);
int restart;
+ WARN_ON(!irqs_disabled());
+
debug_hrtimer_deactivate(timer);
__remove_hrtimer(timer, base, HRTIMER_STATE_CALLBACK, 0);
timer_stats_account_hrtimer(timer);
-
fn = timer->function;
- if (timer->cb_mode == HRTIMER_CB_IRQSAFE_PERCPU ||
- timer->cb_mode == HRTIMER_CB_IRQSAFE_UNLOCKED) {
- /*
- * Used for scheduler timers, avoid lock inversion with
- * rq->lock and tasklist_lock.
- *
- * These timers are required to deal with enqueue expiry
- * themselves and are not allowed to migrate.
- */
- spin_unlock(&cpu_base->lock);
- restart = fn(timer);
- spin_lock(&cpu_base->lock);
- } else
- restart = fn(timer);
+
+ /*
+ * Because we run timers from hardirq context, there is no chance
+ * they get migrated to another cpu, therefore its safe to unlock
+ * the timer base.
+ */
+ spin_unlock(&cpu_base->lock);
+ restart = fn(timer);
+ spin_lock(&cpu_base->lock);
/*
* Note: We clear the CALLBACK bit after enqueue_hrtimer to avoid
@@ -1311,7 +1180,7 @@ void hrtimer_interrupt(struct clock_event_device *dev)
struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
struct hrtimer_clock_base *base;
ktime_t expires_next, now;
- int i, raise = 0;
+ int i;
BUG_ON(!cpu_base->hres_active);
cpu_base->nr_events++;
@@ -1360,16 +1229,6 @@ void hrtimer_interrupt(struct clock_event_device *dev)
break;
}
- /* Move softirq callbacks to the pending list */
- if (timer->cb_mode == HRTIMER_CB_SOFTIRQ) {
- __remove_hrtimer(timer, base,
- HRTIMER_STATE_PENDING, 0);
- list_add_tail(&timer->cb_entry,
- &base->cpu_base->cb_pending);
- raise = 1;
- continue;
- }
-
__run_hrtimer(timer);
}
spin_unlock(&cpu_base->lock);
@@ -1383,10 +1242,6 @@ void hrtimer_interrupt(struct clock_event_device *dev)
if (tick_program_event(expires_next, 0))
goto retry;
}
-
- /* Raise softirq ? */
- if (raise)
- raise_softirq(HRTIMER_SOFTIRQ);
}
/**
@@ -1413,11 +1268,6 @@ void hrtimer_peek_ahead_timers(void)
local_irq_restore(flags);
}
-static void run_hrtimer_softirq(struct softirq_action *h)
-{
- run_hrtimer_pending(&__get_cpu_var(hrtimer_bases));
-}
-
#endif /* CONFIG_HIGH_RES_TIMERS */
/*
@@ -1429,8 +1279,6 @@ static void run_hrtimer_softirq(struct softirq_action *h)
*/
void hrtimer_run_pending(void)
{
- struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
-
if (hrtimer_hres_active())
return;
@@ -1444,8 +1292,6 @@ void hrtimer_run_pending(void)
*/
if (tick_check_oneshot_change(!hrtimer_is_hres_enabled()))
hrtimer_switch_to_hres();
-
- run_hrtimer_pending(cpu_base);
}
/*
@@ -1482,14 +1328,6 @@ void hrtimer_run_queues(void)
hrtimer_get_expires_tv64(timer))
break;
- if (timer->cb_mode == HRTIMER_CB_SOFTIRQ) {
- __remove_hrtimer(timer, base,
- HRTIMER_STATE_PENDING, 0);
- list_add_tail(&timer->cb_entry,
- &base->cpu_base->cb_pending);
- continue;
- }
-
__run_hrtimer(timer);
}
spin_unlock(&cpu_base->lock);
@@ -1516,9 +1354,6 @@ void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task)
{
sl->timer.function = hrtimer_wakeup;
sl->task = task;
-#ifdef CONFIG_HIGH_RES_TIMERS
- sl->timer.cb_mode = HRTIMER_CB_IRQSAFE_UNLOCKED;
-#endif
}
static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode)
@@ -1655,18 +1490,16 @@ static void __cpuinit init_hrtimers_cpu(int cpu)
for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++)
cpu_base->clock_base[i].cpu_base = cpu_base;
- INIT_LIST_HEAD(&cpu_base->cb_pending);
hrtimer_init_hres(cpu_base);
}
#ifdef CONFIG_HOTPLUG_CPU
-static int migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
- struct hrtimer_clock_base *new_base, int dcpu)
+static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
+ struct hrtimer_clock_base *new_base)
{
struct hrtimer *timer;
struct rb_node *node;
- int raise = 0;
while ((node = rb_first(&old_base->active))) {
timer = rb_entry(node, struct hrtimer, node);
@@ -1674,18 +1507,6 @@ static int migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
debug_hrtimer_deactivate(timer);
/*
- * Should not happen. Per CPU timers should be
- * canceled _before_ the migration code is called
- */
- if (timer->cb_mode == HRTIMER_CB_IRQSAFE_PERCPU) {
- __remove_hrtimer(timer, old_base,
- HRTIMER_STATE_INACTIVE, 0);
- WARN(1, "hrtimer (%p %p)active but cpu %d dead\n",
- timer, timer->function, dcpu);
- continue;
- }
-
- /*
* Mark it as STATE_MIGRATE not INACTIVE otherwise the
* timer could be seen as !active and just vanish away
* under us on another CPU
@@ -1693,69 +1514,34 @@ static int migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
__remove_hrtimer(timer, old_base, HRTIMER_STATE_MIGRATE, 0);
timer->base = new_base;
/*
- * Enqueue the timer. Allow reprogramming of the event device
+ * Enqueue the timers on the new cpu, but do not reprogram
+ * the timer as that would enable a deadlock between
+ * hrtimer_enqueue_reprogramm() running the timer and us still
+ * holding a nested base lock.
+ *
+ * Instead we tickle the hrtimer interrupt after the migration
+ * is done, which will run all expired timers and re-programm
+ * the timer device.
*/
- enqueue_hrtimer(timer, new_base, 1);
+ enqueue_hrtimer(timer, new_base, 0);
-#ifdef CONFIG_HIGH_RES_TIMERS
- /*
- * Happens with high res enabled when the timer was
- * already expired and the callback mode is
- * HRTIMER_CB_IRQSAFE_UNLOCKED (hrtimer_sleeper). The
- * enqueue code does not move them to the soft irq
- * pending list for performance/latency reasons, but
- * in the migration state, we need to do that
- * otherwise we end up with a stale timer.
- */
- if (timer->state == HRTIMER_STATE_MIGRATE) {
- timer->state = HRTIMER_STATE_PENDING;
- list_add_tail(&timer->cb_entry,
- &new_base->cpu_base->cb_pending);
- raise = 1;
- }
-#endif
/* Clear the migration state bit */
timer->state &= ~HRTIMER_STATE_MIGRATE;
}
- return raise;
-}
-
-#ifdef CONFIG_HIGH_RES_TIMERS
-static int migrate_hrtimer_pending(struct hrtimer_cpu_base *old_base,
- struct hrtimer_cpu_base *new_base)
-{
- struct hrtimer *timer;
- int raise = 0;
-
- while (!list_empty(&old_base->cb_pending)) {
- timer = list_entry(old_base->cb_pending.next,
- struct hrtimer, cb_entry);
-
- __remove_hrtimer(timer, timer->base, HRTIMER_STATE_PENDING, 0);
- timer->base = &new_base->clock_base[timer->base->index];
- list_add_tail(&timer->cb_entry, &new_base->cb_pending);
- raise = 1;
- }
- return raise;
-}
-#else
-static int migrate_hrtimer_pending(struct hrtimer_cpu_base *old_base,
- struct hrtimer_cpu_base *new_base)
-{
- return 0;
}
-#endif
-static void migrate_hrtimers(int cpu)
+static int migrate_hrtimers(int scpu)
{
struct hrtimer_cpu_base *old_base, *new_base;
- int i, raise = 0;
+ int dcpu, i;
- BUG_ON(cpu_online(cpu));
- old_base = &per_cpu(hrtimer_bases, cpu);
+ BUG_ON(cpu_online(scpu));
+ old_base = &per_cpu(hrtimer_bases, scpu);
new_base = &get_cpu_var(hrtimer_bases);
- tick_cancel_sched_timer(cpu);
+ dcpu = smp_processor_id();
+
+ tick_cancel_sched_timer(scpu);
/*
* The caller is globally serialized and nobody else
* takes two locks at once, deadlock is not possible.
@@ -1764,41 +1550,47 @@ static void migrate_hrtimers(int cpu)
spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
- if (migrate_hrtimer_list(&old_base->clock_base[i],
- &new_base->clock_base[i], cpu))
- raise = 1;
+ migrate_hrtimer_list(&old_base->clock_base[i],
+ &new_base->clock_base[i]);
}
- if (migrate_hrtimer_pending(old_base, new_base))
- raise = 1;
-
spin_unlock(&old_base->lock);
spin_unlock_irq(&new_base->lock);
put_cpu_var(hrtimer_bases);
- if (raise)
- hrtimer_raise_softirq();
+ return dcpu;
+}
+
+static void tickle_timers(void *arg)
+{
+ hrtimer_peek_ahead_timers();
}
+
#endif /* CONFIG_HOTPLUG_CPU */
static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
{
- unsigned int cpu = (long)hcpu;
+ int scpu = (long)hcpu;
switch (action) {
case CPU_UP_PREPARE:
case CPU_UP_PREPARE_FROZEN:
- init_hrtimers_cpu(cpu);
+ init_hrtimers_cpu(scpu);
break;
#ifdef CONFIG_HOTPLUG_CPU
case CPU_DEAD:
case CPU_DEAD_FROZEN:
- clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DEAD, &cpu);
- migrate_hrtimers(cpu);
+ {
+ int dcpu;
+
+ clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DEAD, &scpu);
+ dcpu = migrate_hrtimers(scpu);
+ smp_call_function_single(dcpu, tickle_timers, NULL, 0);
break;
+ }
#endif
default:
@@ -1817,9 +1609,6 @@ void __init hrtimers_init(void)
hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE,
(void *)(long)smp_processor_id());
register_cpu_notifier(&hrtimers_nb);
-#ifdef CONFIG_HIGH_RES_TIMERS
- open_softirq(HRTIMER_SOFTIRQ, run_hrtimer_softirq);
-#endif
}
/**
diff --git a/kernel/irq/Makefile b/kernel/irq/Makefile
index 681c52dbfe22..4dd5b1edac98 100644
--- a/kernel/irq/Makefile
+++ b/kernel/irq/Makefile
@@ -3,3 +3,4 @@ obj-y := handle.o manage.o spurious.o resend.o chip.o devres.o
obj-$(CONFIG_GENERIC_IRQ_PROBE) += autoprobe.o
obj-$(CONFIG_PROC_FS) += proc.o
obj-$(CONFIG_GENERIC_PENDING_IRQ) += migration.o
+obj-$(CONFIG_NUMA_MIGRATE_IRQ_DESC) += numa_migrate.o
diff --git a/kernel/irq/autoprobe.c b/kernel/irq/autoprobe.c
index cc0f7321b8ce..650ce4102a63 100644
--- a/kernel/irq/autoprobe.c
+++ b/kernel/irq/autoprobe.c
@@ -40,6 +40,9 @@ unsigned long probe_irq_on(void)
* flush such a longstanding irq before considering it as spurious.
*/
for_each_irq_desc_reverse(i, desc) {
+ if (!desc)
+ continue;
+
spin_lock_irq(&desc->lock);
if (!desc->action && !(desc->status & IRQ_NOPROBE)) {
/*
@@ -68,6 +71,9 @@ unsigned long probe_irq_on(void)
* happened in the previous stage, it may have masked itself)
*/
for_each_irq_desc_reverse(i, desc) {
+ if (!desc)
+ continue;
+
spin_lock_irq(&desc->lock);
if (!desc->action && !(desc->status & IRQ_NOPROBE)) {
desc->status |= IRQ_AUTODETECT | IRQ_WAITING;
@@ -86,6 +92,9 @@ unsigned long probe_irq_on(void)
* Now filter out any obviously spurious interrupts
*/
for_each_irq_desc(i, desc) {
+ if (!desc)
+ continue;
+
spin_lock_irq(&desc->lock);
status = desc->status;
@@ -124,6 +133,9 @@ unsigned int probe_irq_mask(unsigned long val)
int i;
for_each_irq_desc(i, desc) {
+ if (!desc)
+ continue;
+
spin_lock_irq(&desc->lock);
status = desc->status;
@@ -166,6 +178,9 @@ int probe_irq_off(unsigned long val)
unsigned int status;
for_each_irq_desc(i, desc) {
+ if (!desc)
+ continue;
+
spin_lock_irq(&desc->lock);
status = desc->status;
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 10b5092e9bfe..6eb3c7952b64 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -24,9 +24,10 @@
*/
void dynamic_irq_init(unsigned int irq)
{
- struct irq_desc *desc = irq_to_desc(irq);
+ struct irq_desc *desc;
unsigned long flags;
+ desc = irq_to_desc(irq);
if (!desc) {
WARN(1, KERN_ERR "Trying to initialize invalid IRQ%d\n", irq);
return;
@@ -124,6 +125,7 @@ int set_irq_type(unsigned int irq, unsigned int type)
return -ENODEV;
}
+ type &= IRQ_TYPE_SENSE_MASK;
if (type == IRQ_TYPE_NONE)
return 0;
@@ -352,6 +354,7 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc)
spin_lock(&desc->lock);
mask_ack_irq(desc, irq);
+ desc = irq_remap_to_desc(irq, desc);
if (unlikely(desc->status & IRQ_INPROGRESS))
goto out_unlock;
@@ -429,6 +432,7 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
desc->status &= ~IRQ_INPROGRESS;
out:
desc->chip->eoi(irq);
+ desc = irq_remap_to_desc(irq, desc);
spin_unlock(&desc->lock);
}
@@ -465,12 +469,14 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc)
!desc->action)) {
desc->status |= (IRQ_PENDING | IRQ_MASKED);
mask_ack_irq(desc, irq);
+ desc = irq_remap_to_desc(irq, desc);
goto out_unlock;
}
kstat_incr_irqs_this_cpu(irq, desc);
/* Start handling the irq */
desc->chip->ack(irq);
+ desc = irq_remap_to_desc(irq, desc);
/* Mark the IRQ currently in progress.*/
desc->status |= IRQ_INPROGRESS;
@@ -531,8 +537,10 @@ handle_percpu_irq(unsigned int irq, struct irq_desc *desc)
if (!noirqdebug)
note_interrupt(irq, desc, action_ret);
- if (desc->chip->eoi)
+ if (desc->chip->eoi) {
desc->chip->eoi(irq);
+ desc = irq_remap_to_desc(irq, desc);
+ }
}
void
@@ -567,8 +575,10 @@ __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
/* Uninstall? */
if (handle == handle_bad_irq) {
- if (desc->chip != &no_irq_chip)
+ if (desc->chip != &no_irq_chip) {
mask_ack_irq(desc, irq);
+ desc = irq_remap_to_desc(irq, desc);
+ }
desc->status |= IRQ_DISABLED;
desc->depth = 1;
}
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index c815b42d0f5b..6492400cb50d 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -15,9 +15,16 @@
#include <linux/random.h>
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
+#include <linux/rculist.h>
+#include <linux/hash.h>
#include "internals.h"
+/*
+ * lockdep: we want to handle all irq_desc locks as a single lock-class:
+ */
+struct lock_class_key irq_desc_lock_class;
+
/**
* handle_bad_irq - handle spurious and unhandled irqs
* @irq: the interrupt number
@@ -49,6 +56,155 @@ void handle_bad_irq(unsigned int irq, struct irq_desc *desc)
int nr_irqs = NR_IRQS;
EXPORT_SYMBOL_GPL(nr_irqs);
+void __init __attribute__((weak)) arch_early_irq_init(void)
+{
+}
+
+#ifdef CONFIG_SPARSE_IRQ
+static struct irq_desc irq_desc_init = {
+ .irq = -1,
+ .status = IRQ_DISABLED,
+ .chip = &no_irq_chip,
+ .handle_irq = handle_bad_irq,
+ .depth = 1,
+ .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
+#ifdef CONFIG_SMP
+ .affinity = CPU_MASK_ALL
+#endif
+};
+
+void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr)
+{
+ unsigned long bytes;
+ char *ptr;
+ int node;
+
+ /* Compute how many bytes we need per irq and allocate them */
+ bytes = nr * sizeof(unsigned int);
+
+ node = cpu_to_node(cpu);
+ ptr = kzalloc_node(bytes, GFP_ATOMIC, node);
+ printk(KERN_DEBUG " alloc kstat_irqs on cpu %d node %d\n", cpu, node);
+
+ if (ptr)
+ desc->kstat_irqs = (unsigned int *)ptr;
+}
+
+void __attribute__((weak)) arch_init_chip_data(struct irq_desc *desc, int cpu)
+{
+}
+
+static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu)
+{
+ memcpy(desc, &irq_desc_init, sizeof(struct irq_desc));
+ desc->irq = irq;
+#ifdef CONFIG_SMP
+ desc->cpu = cpu;
+#endif
+ lockdep_set_class(&desc->lock, &irq_desc_lock_class);
+ init_kstat_irqs(desc, cpu, nr_cpu_ids);
+ if (!desc->kstat_irqs) {
+ printk(KERN_ERR "can not alloc kstat_irqs\n");
+ BUG_ON(1);
+ }
+ arch_init_chip_data(desc, cpu);
+}
+
+/*
+ * Protect the sparse_irqs:
+ */
+DEFINE_SPINLOCK(sparse_irq_lock);
+
+struct irq_desc *irq_desc_ptrs[NR_IRQS] __read_mostly;
+
+static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = {
+ [0 ... NR_IRQS_LEGACY-1] = {
+ .irq = -1,
+ .status = IRQ_DISABLED,
+ .chip = &no_irq_chip,
+ .handle_irq = handle_bad_irq,
+ .depth = 1,
+ .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
+#ifdef CONFIG_SMP
+ .affinity = CPU_MASK_ALL
+#endif
+ }
+};
+
+/* FIXME: use bootmem alloc ...*/
+static unsigned int kstat_irqs_legacy[NR_IRQS_LEGACY][NR_CPUS];
+
+void __init early_irq_init(void)
+{
+ struct irq_desc *desc;
+ int legacy_count;
+ int i;
+
+ desc = irq_desc_legacy;
+ legacy_count = ARRAY_SIZE(irq_desc_legacy);
+
+ for (i = 0; i < legacy_count; i++) {
+ desc[i].irq = i;
+ desc[i].kstat_irqs = kstat_irqs_legacy[i];
+
+ irq_desc_ptrs[i] = desc + i;
+ }
+
+ for (i = legacy_count; i < NR_IRQS; i++)
+ irq_desc_ptrs[i] = NULL;
+
+ arch_early_irq_init();
+}
+
+struct irq_desc *irq_to_desc(unsigned int irq)
+{
+ return (irq < NR_IRQS) ? irq_desc_ptrs[irq] : NULL;
+}
+
+struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu)
+{
+ struct irq_desc *desc;
+ unsigned long flags;
+ int node;
+
+ if (irq >= NR_IRQS) {
+ printk(KERN_WARNING "irq >= NR_IRQS in irq_to_desc_alloc: %d %d\n",
+ irq, NR_IRQS);
+ WARN_ON(1);
+ return NULL;
+ }
+
+ desc = irq_desc_ptrs[irq];
+ if (desc)
+ return desc;
+
+ spin_lock_irqsave(&sparse_irq_lock, flags);
+
+ /* We have to check it to avoid races with another CPU */
+ desc = irq_desc_ptrs[irq];
+ if (desc)
+ goto out_unlock;
+
+ node = cpu_to_node(cpu);
+ desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node);
+ printk(KERN_DEBUG " alloc irq_desc for %d on cpu %d node %d\n",
+ irq, cpu, node);
+ if (!desc) {
+ printk(KERN_ERR "can not alloc irq_desc\n");
+ BUG_ON(1);
+ }
+ init_one_irq_desc(irq, desc, cpu);
+
+ irq_desc_ptrs[irq] = desc;
+
+out_unlock:
+ spin_unlock_irqrestore(&sparse_irq_lock, flags);
+
+ return desc;
+}
+
+#else
+
struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
[0 ... NR_IRQS-1] = {
.status = IRQ_DISABLED,
@@ -62,6 +218,8 @@ struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
}
};
+#endif
+
/*
* What should we do if we get a hw irq event on an illegal vector?
* Each architecture has to answer this themself.
@@ -179,8 +337,11 @@ unsigned int __do_IRQ(unsigned int irq)
/*
* No locking required for CPU-local interrupts:
*/
- if (desc->chip->ack)
+ if (desc->chip->ack) {
desc->chip->ack(irq);
+ /* get new one */
+ desc = irq_remap_to_desc(irq, desc);
+ }
if (likely(!(desc->status & IRQ_DISABLED))) {
action_ret = handle_IRQ_event(irq, desc->action);
if (!noirqdebug)
@@ -191,8 +352,10 @@ unsigned int __do_IRQ(unsigned int irq)
}
spin_lock(&desc->lock);
- if (desc->chip->ack)
+ if (desc->chip->ack) {
desc->chip->ack(irq);
+ desc = irq_remap_to_desc(irq, desc);
+ }
/*
* REPLAY is when Linux resends an IRQ that was dropped earlier
* WAITING is used by probe to mark irqs that are being tested
@@ -259,19 +422,25 @@ out:
}
#endif
-
-#ifdef CONFIG_TRACE_IRQFLAGS
-/*
- * lockdep: we want to handle all irq_desc locks as a single lock-class:
- */
-static struct lock_class_key irq_desc_lock_class;
-
void early_init_irq_lock_class(void)
{
struct irq_desc *desc;
int i;
- for_each_irq_desc(i, desc)
+ for_each_irq_desc(i, desc) {
+ if (!desc)
+ continue;
+
lockdep_set_class(&desc->lock, &irq_desc_lock_class);
+ }
+}
+
+#ifdef CONFIG_SPARSE_IRQ
+unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
+{
+ struct irq_desc *desc = irq_to_desc(irq);
+ return desc->kstat_irqs[cpu];
}
#endif
+EXPORT_SYMBOL(kstat_irqs_cpu);
+
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index 64c1c7253dae..e6d0a43cc125 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -13,6 +13,11 @@ extern void compat_irq_chip_set_default_handler(struct irq_desc *desc);
extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
unsigned long flags);
+extern struct lock_class_key irq_desc_lock_class;
+extern void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr);
+extern spinlock_t sparse_irq_lock;
+extern struct irq_desc *irq_desc_ptrs[NR_IRQS];
+
#ifdef CONFIG_PROC_FS
extern void register_irq_proc(unsigned int irq, struct irq_desc *desc);
extern void register_handler_proc(unsigned int irq, struct irqaction *action);
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 801addda3c43..540f6c49f3fa 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -370,16 +370,18 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
return 0;
}
- ret = chip->set_type(irq, flags & IRQF_TRIGGER_MASK);
+ /* caller masked out all except trigger mode flags */
+ ret = chip->set_type(irq, flags);
if (ret)
pr_err("setting trigger mode %d for irq %u failed (%pF)\n",
- (int)(flags & IRQF_TRIGGER_MASK),
- irq, chip->set_type);
+ (int)flags, irq, chip->set_type);
else {
+ if (flags & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
+ flags |= IRQ_LEVEL;
/* note that IRQF_TRIGGER_MASK == IRQ_TYPE_SENSE_MASK */
- desc->status &= ~IRQ_TYPE_SENSE_MASK;
- desc->status |= flags & IRQ_TYPE_SENSE_MASK;
+ desc->status &= ~(IRQ_LEVEL | IRQ_TYPE_SENSE_MASK);
+ desc->status |= flags;
}
return ret;
@@ -459,7 +461,8 @@ __setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new)
/* Setup the type (level, edge polarity) if configured: */
if (new->flags & IRQF_TRIGGER_MASK) {
- ret = __irq_set_trigger(desc, irq, new->flags);
+ ret = __irq_set_trigger(desc, irq,
+ new->flags & IRQF_TRIGGER_MASK);
if (ret) {
spin_unlock_irqrestore(&desc->lock, flags);
@@ -673,6 +676,18 @@ int request_irq(unsigned int irq, irq_handler_t handler,
struct irq_desc *desc;
int retval;
+ /*
+ * handle_IRQ_event() always ignores IRQF_DISABLED except for
+ * the _first_ irqaction (sigh). That can cause oopsing, but
+ * the behavior is classified as "will not fix" so we need to
+ * start nudging drivers away from using that idiom.
+ */
+ if ((irqflags & (IRQF_SHARED|IRQF_DISABLED))
+ == (IRQF_SHARED|IRQF_DISABLED))
+ pr_warning("IRQ %d/%s: IRQF_DISABLED is not "
+ "guaranteed on shared IRQs\n",
+ irq, devname);
+
#ifdef CONFIG_LOCKDEP
/*
* Lockdep wants atomic interrupt handlers:
diff --git a/kernel/irq/numa_migrate.c b/kernel/irq/numa_migrate.c
new file mode 100644
index 000000000000..089c3746358a
--- /dev/null
+++ b/kernel/irq/numa_migrate.c
@@ -0,0 +1,122 @@
+/*
+ * NUMA irq-desc migration code
+ *
+ * Migrate IRQ data structures (irq_desc, chip_data, etc.) over to
+ * the new "home node" of the IRQ.
+ */
+
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/random.h>
+#include <linux/interrupt.h>
+#include <linux/kernel_stat.h>
+
+#include "internals.h"
+
+static void init_copy_kstat_irqs(struct irq_desc *old_desc,
+ struct irq_desc *desc,
+ int cpu, int nr)
+{
+ unsigned long bytes;
+
+ init_kstat_irqs(desc, cpu, nr);
+
+ if (desc->kstat_irqs != old_desc->kstat_irqs) {
+ /* Compute how many bytes we need per irq and allocate them */
+ bytes = nr * sizeof(unsigned int);
+
+ memcpy(desc->kstat_irqs, old_desc->kstat_irqs, bytes);
+ }
+}
+
+static void free_kstat_irqs(struct irq_desc *old_desc, struct irq_desc *desc)
+{
+ if (old_desc->kstat_irqs == desc->kstat_irqs)
+ return;
+
+ kfree(old_desc->kstat_irqs);
+ old_desc->kstat_irqs = NULL;
+}
+
+static void init_copy_one_irq_desc(int irq, struct irq_desc *old_desc,
+ struct irq_desc *desc, int cpu)
+{
+ memcpy(desc, old_desc, sizeof(struct irq_desc));
+ desc->cpu = cpu;
+ lockdep_set_class(&desc->lock, &irq_desc_lock_class);
+ init_copy_kstat_irqs(old_desc, desc, cpu, nr_cpu_ids);
+ arch_init_copy_chip_data(old_desc, desc, cpu);
+}
+
+static void free_one_irq_desc(struct irq_desc *old_desc, struct irq_desc *desc)
+{
+ free_kstat_irqs(old_desc, desc);
+ arch_free_chip_data(old_desc, desc);
+}
+
+static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc,
+ int cpu)
+{
+ struct irq_desc *desc;
+ unsigned int irq;
+ unsigned long flags;
+ int node;
+
+ irq = old_desc->irq;
+
+ spin_lock_irqsave(&sparse_irq_lock, flags);
+
+ /* We have to check it to avoid races with another CPU */
+ desc = irq_desc_ptrs[irq];
+
+ if (desc && old_desc != desc)
+ goto out_unlock;
+
+ node = cpu_to_node(cpu);
+ desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node);
+ printk(KERN_DEBUG " move irq_desc for %d to cpu %d node %d\n",
+ irq, cpu, node);
+ if (!desc) {
+ printk(KERN_ERR "can not get new irq_desc for moving\n");
+ /* still use old one */
+ desc = old_desc;
+ goto out_unlock;
+ }
+ init_copy_one_irq_desc(irq, old_desc, desc, cpu);
+
+ irq_desc_ptrs[irq] = desc;
+
+ /* free the old one */
+ free_one_irq_desc(old_desc, desc);
+ kfree(old_desc);
+
+out_unlock:
+ spin_unlock_irqrestore(&sparse_irq_lock, flags);
+
+ return desc;
+}
+
+struct irq_desc *move_irq_desc(struct irq_desc *desc, int cpu)
+{
+ int old_cpu;
+ int node, old_node;
+
+ /* those all static, do move them */
+ if (desc->irq < NR_IRQS_LEGACY)
+ return desc;
+
+ old_cpu = desc->cpu;
+ printk(KERN_DEBUG
+ "try to move irq_desc from cpu %d to %d\n", old_cpu, cpu);
+ if (old_cpu != cpu) {
+ node = cpu_to_node(cpu);
+ old_node = cpu_to_node(old_cpu);
+ if (old_node != node)
+ desc = __real_move_irq_desc(desc, cpu);
+ else
+ desc->cpu = cpu;
+ }
+
+ return desc;
+}
+
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
index d257e7d6a8a4..f6b3440f05bc 100644
--- a/kernel/irq/proc.c
+++ b/kernel/irq/proc.c
@@ -243,7 +243,11 @@ void init_irq_proc(void)
/*
* Create entries for all existing IRQs.
*/
- for_each_irq_desc(irq, desc)
+ for_each_irq_desc(irq, desc) {
+ if (!desc)
+ continue;
+
register_irq_proc(irq, desc);
+ }
}
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
index dd364c11e56e..3738107531fd 100644
--- a/kernel/irq/spurious.c
+++ b/kernel/irq/spurious.c
@@ -91,6 +91,9 @@ static int misrouted_irq(int irq)
int i, ok = 0;
for_each_irq_desc(i, desc) {
+ if (!desc)
+ continue;
+
if (!i)
continue;
@@ -112,6 +115,8 @@ static void poll_spurious_irqs(unsigned long dummy)
for_each_irq_desc(i, desc) {
unsigned int status;
+ if (!desc)
+ continue;
if (!i)
continue;
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 74b1878b8bb8..06b0c3568f0b 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -137,16 +137,16 @@ static inline struct lock_class *hlock_class(struct held_lock *hlock)
#ifdef CONFIG_LOCK_STAT
static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], lock_stats);
-static int lock_contention_point(struct lock_class *class, unsigned long ip)
+static int lock_point(unsigned long points[], unsigned long ip)
{
int i;
- for (i = 0; i < ARRAY_SIZE(class->contention_point); i++) {
- if (class->contention_point[i] == 0) {
- class->contention_point[i] = ip;
+ for (i = 0; i < LOCKSTAT_POINTS; i++) {
+ if (points[i] == 0) {
+ points[i] = ip;
break;
}
- if (class->contention_point[i] == ip)
+ if (points[i] == ip)
break;
}
@@ -186,6 +186,9 @@ struct lock_class_stats lock_stats(struct lock_class *class)
for (i = 0; i < ARRAY_SIZE(stats.contention_point); i++)
stats.contention_point[i] += pcs->contention_point[i];
+ for (i = 0; i < ARRAY_SIZE(stats.contending_point); i++)
+ stats.contending_point[i] += pcs->contending_point[i];
+
lock_time_add(&pcs->read_waittime, &stats.read_waittime);
lock_time_add(&pcs->write_waittime, &stats.write_waittime);
@@ -210,6 +213,7 @@ void clear_lock_stats(struct lock_class *class)
memset(cpu_stats, 0, sizeof(struct lock_class_stats));
}
memset(class->contention_point, 0, sizeof(class->contention_point));
+ memset(class->contending_point, 0, sizeof(class->contending_point));
}
static struct lock_class_stats *get_lock_stats(struct lock_class *class)
@@ -288,14 +292,12 @@ void lockdep_off(void)
{
current->lockdep_recursion++;
}
-
EXPORT_SYMBOL(lockdep_off);
void lockdep_on(void)
{
current->lockdep_recursion--;
}
-
EXPORT_SYMBOL(lockdep_on);
/*
@@ -577,7 +579,8 @@ static void print_lock_class_header(struct lock_class *class, int depth)
/*
* printk all lock dependencies starting at <entry>:
*/
-static void print_lock_dependencies(struct lock_class *class, int depth)
+static void __used
+print_lock_dependencies(struct lock_class *class, int depth)
{
struct lock_list *entry;
@@ -2509,7 +2512,6 @@ void lockdep_init_map(struct lockdep_map *lock, const char *name,
if (subclass)
register_lock_class(lock, subclass, 1);
}
-
EXPORT_SYMBOL_GPL(lockdep_init_map);
/*
@@ -2690,8 +2692,9 @@ static int check_unlock(struct task_struct *curr, struct lockdep_map *lock,
}
static int
-__lock_set_subclass(struct lockdep_map *lock,
- unsigned int subclass, unsigned long ip)
+__lock_set_class(struct lockdep_map *lock, const char *name,
+ struct lock_class_key *key, unsigned int subclass,
+ unsigned long ip)
{
struct task_struct *curr = current;
struct held_lock *hlock, *prev_hlock;
@@ -2718,6 +2721,7 @@ __lock_set_subclass(struct lockdep_map *lock,
return print_unlock_inbalance_bug(curr, lock, ip);
found_it:
+ lockdep_init_map(lock, name, key, 0);
class = register_lock_class(lock, subclass, 0);
hlock->class_idx = class - lock_classes + 1;
@@ -2902,9 +2906,9 @@ static void check_flags(unsigned long flags)
#endif
}
-void
-lock_set_subclass(struct lockdep_map *lock,
- unsigned int subclass, unsigned long ip)
+void lock_set_class(struct lockdep_map *lock, const char *name,
+ struct lock_class_key *key, unsigned int subclass,
+ unsigned long ip)
{
unsigned long flags;
@@ -2914,13 +2918,12 @@ lock_set_subclass(struct lockdep_map *lock,
raw_local_irq_save(flags);
current->lockdep_recursion = 1;
check_flags(flags);
- if (__lock_set_subclass(lock, subclass, ip))
+ if (__lock_set_class(lock, name, key, subclass, ip))
check_chain_key(current);
current->lockdep_recursion = 0;
raw_local_irq_restore(flags);
}
-
-EXPORT_SYMBOL_GPL(lock_set_subclass);
+EXPORT_SYMBOL_GPL(lock_set_class);
/*
* We are not always called with irqs disabled - do that here,
@@ -2944,7 +2947,6 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
current->lockdep_recursion = 0;
raw_local_irq_restore(flags);
}
-
EXPORT_SYMBOL_GPL(lock_acquire);
void lock_release(struct lockdep_map *lock, int nested,
@@ -2962,7 +2964,6 @@ void lock_release(struct lockdep_map *lock, int nested,
current->lockdep_recursion = 0;
raw_local_irq_restore(flags);
}
-
EXPORT_SYMBOL_GPL(lock_release);
#ifdef CONFIG_LOCK_STAT
@@ -3000,7 +3001,7 @@ __lock_contended(struct lockdep_map *lock, unsigned long ip)
struct held_lock *hlock, *prev_hlock;
struct lock_class_stats *stats;
unsigned int depth;
- int i, point;
+ int i, contention_point, contending_point;
depth = curr->lockdep_depth;
if (DEBUG_LOCKS_WARN_ON(!depth))
@@ -3024,18 +3025,22 @@ __lock_contended(struct lockdep_map *lock, unsigned long ip)
found_it:
hlock->waittime_stamp = sched_clock();
- point = lock_contention_point(hlock_class(hlock), ip);
+ contention_point = lock_point(hlock_class(hlock)->contention_point, ip);
+ contending_point = lock_point(hlock_class(hlock)->contending_point,
+ lock->ip);
stats = get_lock_stats(hlock_class(hlock));
- if (point < ARRAY_SIZE(stats->contention_point))
- stats->contention_point[point]++;
+ if (contention_point < LOCKSTAT_POINTS)
+ stats->contention_point[contention_point]++;
+ if (contending_point < LOCKSTAT_POINTS)
+ stats->contending_point[contending_point]++;
if (lock->cpu != smp_processor_id())
stats->bounces[bounce_contended + !!hlock->read]++;
put_lock_stats(stats);
}
static void
-__lock_acquired(struct lockdep_map *lock)
+__lock_acquired(struct lockdep_map *lock, unsigned long ip)
{
struct task_struct *curr = current;
struct held_lock *hlock, *prev_hlock;
@@ -3084,6 +3089,7 @@ found_it:
put_lock_stats(stats);
lock->cpu = cpu;
+ lock->ip = ip;
}
void lock_contended(struct lockdep_map *lock, unsigned long ip)
@@ -3105,7 +3111,7 @@ void lock_contended(struct lockdep_map *lock, unsigned long ip)
}
EXPORT_SYMBOL_GPL(lock_contended);
-void lock_acquired(struct lockdep_map *lock)
+void lock_acquired(struct lockdep_map *lock, unsigned long ip)
{
unsigned long flags;
@@ -3118,7 +3124,7 @@ void lock_acquired(struct lockdep_map *lock)
raw_local_irq_save(flags);
check_flags(flags);
current->lockdep_recursion = 1;
- __lock_acquired(lock);
+ __lock_acquired(lock, ip);
current->lockdep_recursion = 0;
raw_local_irq_restore(flags);
}
@@ -3442,7 +3448,6 @@ retry:
if (unlock)
read_unlock(&tasklist_lock);
}
-
EXPORT_SYMBOL_GPL(debug_show_all_locks);
/*
@@ -3463,7 +3468,6 @@ void debug_show_held_locks(struct task_struct *task)
{
__debug_show_held_locks(task);
}
-
EXPORT_SYMBOL_GPL(debug_show_held_locks);
void lockdep_sys_exit(void)
diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
index 20dbcbf9c7dd..13716b813896 100644
--- a/kernel/lockdep_proc.c
+++ b/kernel/lockdep_proc.c
@@ -470,11 +470,12 @@ static void seq_line(struct seq_file *m, char c, int offset, int length)
static void snprint_time(char *buf, size_t bufsiz, s64 nr)
{
- unsigned long rem;
+ s64 div;
+ s32 rem;
nr += 5; /* for display rounding */
- rem = do_div(nr, 1000); /* XXX: do_div_signed */
- snprintf(buf, bufsiz, "%lld.%02d", (long long)nr, (int)rem/10);
+ div = div_s64_rem(nr, 1000, &rem);
+ snprintf(buf, bufsiz, "%lld.%02d", (long long)div, (int)rem/10);
}
static void seq_time(struct seq_file *m, s64 time)
@@ -556,7 +557,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
if (stats->read_holdtime.nr)
namelen += 2;
- for (i = 0; i < ARRAY_SIZE(class->contention_point); i++) {
+ for (i = 0; i < LOCKSTAT_POINTS; i++) {
char sym[KSYM_SYMBOL_LEN];
char ip[32];
@@ -573,6 +574,23 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
stats->contention_point[i],
ip, sym);
}
+ for (i = 0; i < LOCKSTAT_POINTS; i++) {
+ char sym[KSYM_SYMBOL_LEN];
+ char ip[32];
+
+ if (class->contending_point[i] == 0)
+ break;
+
+ if (!i)
+ seq_line(m, '-', 40-namelen, namelen);
+
+ sprint_symbol(sym, class->contending_point[i]);
+ snprintf(ip, sizeof(ip), "[<%p>]",
+ (void *)class->contending_point[i]);
+ seq_printf(m, "%40s %14lu %29s %s\n", name,
+ stats->contending_point[i],
+ ip, sym);
+ }
if (i) {
seq_puts(m, "\n");
seq_line(m, '.', 0, 40 + 1 + 10 * (14 + 1));
@@ -582,7 +600,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
static void seq_header(struct seq_file *m)
{
- seq_printf(m, "lock_stat version 0.2\n");
+ seq_printf(m, "lock_stat version 0.3\n");
seq_line(m, '-', 0, 40 + 1 + 10 * (14 + 1));
seq_printf(m, "%40s %14s %14s %14s %14s %14s %14s %14s %14s "
"%14s %14s\n",
diff --git a/kernel/mutex.c b/kernel/mutex.c
index 12c779dc65d4..4f45d4b658ef 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -59,7 +59,7 @@ EXPORT_SYMBOL(__mutex_init);
* We also put the fastpath first in the kernel image, to make sure the
* branch is predicted by the CPU as default-untaken.
*/
-static void noinline __sched
+static __used noinline void __sched
__mutex_lock_slowpath(atomic_t *lock_count);
/***
@@ -96,7 +96,7 @@ void inline __sched mutex_lock(struct mutex *lock)
EXPORT_SYMBOL(mutex_lock);
#endif
-static noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count);
+static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count);
/***
* mutex_unlock - release the mutex
@@ -184,7 +184,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
}
done:
- lock_acquired(&lock->dep_map);
+ lock_acquired(&lock->dep_map, ip);
/* got the lock - rejoice! */
mutex_remove_waiter(lock, &waiter, task_thread_info(task));
debug_mutex_set_owner(lock, task_thread_info(task));
@@ -268,7 +268,7 @@ __mutex_unlock_common_slowpath(atomic_t *lock_count, int nested)
/*
* Release the lock, slowpath:
*/
-static noinline void
+static __used noinline void
__mutex_unlock_slowpath(atomic_t *lock_count)
{
__mutex_unlock_common_slowpath(lock_count, 1);
@@ -313,7 +313,7 @@ int __sched mutex_lock_killable(struct mutex *lock)
}
EXPORT_SYMBOL(mutex_lock_killable);
-static noinline void __sched
+static __used noinline void __sched
__mutex_lock_slowpath(atomic_t *lock_count)
{
struct mutex *lock = container_of(lock_count, struct mutex, count);
diff --git a/kernel/notifier.c b/kernel/notifier.c
index 4282c0a40a57..61d5aa5eced3 100644
--- a/kernel/notifier.c
+++ b/kernel/notifier.c
@@ -82,6 +82,14 @@ static int __kprobes notifier_call_chain(struct notifier_block **nl,
while (nb && nr_to_call) {
next_nb = rcu_dereference(nb->next);
+
+#ifdef CONFIG_DEBUG_NOTIFIERS
+ if (unlikely(!func_ptr_is_kernel_text(nb->notifier_call))) {
+ WARN(1, "Invalid notifier called!");
+ nb = next_nb;
+ continue;
+ }
+#endif
ret = nb->notifier_call(nb, val, v);
if (nr_calls)
diff --git a/kernel/panic.c b/kernel/panic.c
index 4d5088355bfe..13f06349a786 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -21,6 +21,7 @@
#include <linux/debug_locks.h>
#include <linux/random.h>
#include <linux/kallsyms.h>
+#include <linux/dmi.h>
int panic_on_oops;
static unsigned long tainted_mask;
@@ -321,36 +322,27 @@ void oops_exit(void)
}
#ifdef WANT_WARN_ON_SLOWPATH
-void warn_on_slowpath(const char *file, int line)
-{
- char function[KSYM_SYMBOL_LEN];
- unsigned long caller = (unsigned long) __builtin_return_address(0);
- sprint_symbol(function, caller);
-
- printk(KERN_WARNING "------------[ cut here ]------------\n");
- printk(KERN_WARNING "WARNING: at %s:%d %s()\n", file,
- line, function);
- print_modules();
- dump_stack();
- print_oops_end_marker();
- add_taint(TAINT_WARN);
-}
-EXPORT_SYMBOL(warn_on_slowpath);
-
-
void warn_slowpath(const char *file, int line, const char *fmt, ...)
{
va_list args;
char function[KSYM_SYMBOL_LEN];
unsigned long caller = (unsigned long)__builtin_return_address(0);
+ const char *board;
+
sprint_symbol(function, caller);
printk(KERN_WARNING "------------[ cut here ]------------\n");
printk(KERN_WARNING "WARNING: at %s:%d %s()\n", file,
line, function);
- va_start(args, fmt);
- vprintk(fmt, args);
- va_end(args);
+ board = dmi_get_system_info(DMI_PRODUCT_NAME);
+ if (board)
+ printk(KERN_WARNING "Hardware name: %s\n", board);
+
+ if (fmt) {
+ va_start(args, fmt);
+ vprintk(fmt, args);
+ va_end(args);
+ }
print_modules();
dump_stack();
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index 4e5288a831de..157de3a47832 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -58,21 +58,21 @@ void thread_group_cputime(
struct task_struct *tsk,
struct task_cputime *times)
{
- struct signal_struct *sig;
+ struct task_cputime *totals, *tot;
int i;
- struct task_cputime *tot;
- sig = tsk->signal;
- if (unlikely(!sig) || !sig->cputime.totals) {
+ totals = tsk->signal->cputime.totals;
+ if (!totals) {
times->utime = tsk->utime;
times->stime = tsk->stime;
times->sum_exec_runtime = tsk->se.sum_exec_runtime;
return;
}
+
times->stime = times->utime = cputime_zero;
times->sum_exec_runtime = 0;
for_each_possible_cpu(i) {
- tot = per_cpu_ptr(tsk->signal->cputime.totals, i);
+ tot = per_cpu_ptr(totals, i);
times->utime = cputime_add(times->utime, tot->utime);
times->stime = cputime_add(times->stime, tot->stime);
times->sum_exec_runtime += tot->sum_exec_runtime;
diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
index a140e44eebba..887c63787de6 100644
--- a/kernel/posix-timers.c
+++ b/kernel/posix-timers.c
@@ -116,7 +116,7 @@ static DEFINE_SPINLOCK(idr_lock);
* must supply functions here, even if the function just returns
* ENOSYS. The standard POSIX timer management code assumes the
* following: 1.) The k_itimer struct (sched.h) is used for the
- * timer. 2.) The list, it_lock, it_clock, it_id and it_process
+ * timer. 2.) The list, it_lock, it_clock, it_id and it_pid
* fields are not modified by timer code.
*
* At this time all functions EXCEPT clock_nanosleep can be
@@ -319,7 +319,8 @@ void do_schedule_next_timer(struct siginfo *info)
int posix_timer_event(struct k_itimer *timr, int si_private)
{
- int shared, ret;
+ struct task_struct *task;
+ int shared, ret = -1;
/*
* FIXME: if ->sigq is queued we can race with
* dequeue_signal()->do_schedule_next_timer().
@@ -333,8 +334,13 @@ int posix_timer_event(struct k_itimer *timr, int si_private)
*/
timr->sigq->info.si_sys_private = si_private;
- shared = !(timr->it_sigev_notify & SIGEV_THREAD_ID);
- ret = send_sigqueue(timr->sigq, timr->it_process, shared);
+ rcu_read_lock();
+ task = pid_task(timr->it_pid, PIDTYPE_PID);
+ if (task) {
+ shared = !(timr->it_sigev_notify & SIGEV_THREAD_ID);
+ ret = send_sigqueue(timr->sigq, task, shared);
+ }
+ rcu_read_unlock();
/* If we failed to send the signal the timer stops. */
return ret > 0;
}
@@ -411,7 +417,7 @@ static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer)
return ret;
}
-static struct task_struct * good_sigevent(sigevent_t * event)
+static struct pid *good_sigevent(sigevent_t * event)
{
struct task_struct *rtn = current->group_leader;
@@ -425,7 +431,7 @@ static struct task_struct * good_sigevent(sigevent_t * event)
((event->sigev_signo <= 0) || (event->sigev_signo > SIGRTMAX)))
return NULL;
- return rtn;
+ return task_pid(rtn);
}
void register_posix_clock(const clockid_t clock_id, struct k_clock *new_clock)
@@ -464,6 +470,7 @@ static void release_posix_timer(struct k_itimer *tmr, int it_id_set)
idr_remove(&posix_timers_id, tmr->it_id);
spin_unlock_irqrestore(&idr_lock, flags);
}
+ put_pid(tmr->it_pid);
sigqueue_free(tmr->sigq);
kmem_cache_free(posix_timers_cache, tmr);
}
@@ -477,7 +484,6 @@ sys_timer_create(const clockid_t which_clock,
{
struct k_itimer *new_timer;
int error, new_timer_id;
- struct task_struct *process;
sigevent_t event;
int it_id_set = IT_ID_NOT_SET;
@@ -531,11 +537,9 @@ sys_timer_create(const clockid_t which_clock,
goto out;
}
rcu_read_lock();
- process = good_sigevent(&event);
- if (process)
- get_task_struct(process);
+ new_timer->it_pid = get_pid(good_sigevent(&event));
rcu_read_unlock();
- if (!process) {
+ if (!new_timer->it_pid) {
error = -EINVAL;
goto out;
}
@@ -543,8 +547,7 @@ sys_timer_create(const clockid_t which_clock,
event.sigev_notify = SIGEV_SIGNAL;
event.sigev_signo = SIGALRM;
event.sigev_value.sival_int = new_timer->it_id;
- process = current->group_leader;
- get_task_struct(process);
+ new_timer->it_pid = get_pid(task_tgid(current));
}
new_timer->it_sigev_notify = event.sigev_notify;
@@ -554,7 +557,7 @@ sys_timer_create(const clockid_t which_clock,
new_timer->sigq->info.si_code = SI_TIMER;
spin_lock_irq(&current->sighand->siglock);
- new_timer->it_process = process;
+ new_timer->it_signal = current->signal;
list_add(&new_timer->list, &current->signal->posix_timers);
spin_unlock_irq(&current->sighand->siglock);
@@ -589,8 +592,7 @@ static struct k_itimer *lock_timer(timer_t timer_id, unsigned long *flags)
timr = idr_find(&posix_timers_id, (int)timer_id);
if (timr) {
spin_lock(&timr->it_lock);
- if (timr->it_process &&
- same_thread_group(timr->it_process, current)) {
+ if (timr->it_signal == current->signal) {
spin_unlock(&idr_lock);
return timr;
}
@@ -837,8 +839,7 @@ retry_delete:
* This keeps any tasks waiting on the spin lock from thinking
* they got something (see the lock code above).
*/
- put_task_struct(timer->it_process);
- timer->it_process = NULL;
+ timer->it_signal = NULL;
unlock_timer(timer, flags);
release_posix_timer(timer, IT_ID_SET);
@@ -864,8 +865,7 @@ retry_delete:
* This keeps any tasks waiting on the spin lock from thinking
* they got something (see the lock code above).
*/
- put_task_struct(timer->it_process);
- timer->it_process = NULL;
+ timer->it_signal = NULL;
unlock_timer(timer, flags);
release_posix_timer(timer, IT_ID_SET);
diff --git a/kernel/printk.c b/kernel/printk.c
index f492f1583d77..e651ab05655f 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -662,7 +662,7 @@ asmlinkage int vprintk(const char *fmt, va_list args)
if (recursion_bug) {
recursion_bug = 0;
strcpy(printk_buf, recursion_bug_msg);
- printed_len = sizeof(recursion_bug_msg);
+ printed_len = strlen(recursion_bug_msg);
}
/* Emit the output into the temporary buffer */
printed_len += vscnprintf(printk_buf + printed_len,
diff --git a/kernel/rcuclassic.c b/kernel/rcuclassic.c
index 37f72e551542..e503a002f330 100644
--- a/kernel/rcuclassic.c
+++ b/kernel/rcuclassic.c
@@ -191,7 +191,7 @@ static void print_other_cpu_stall(struct rcu_ctrlblk *rcp)
/* OK, time to rat on our buddy... */
- printk(KERN_ERR "RCU detected CPU stalls:");
+ printk(KERN_ERR "INFO: RCU detected CPU stalls:");
for_each_possible_cpu(cpu) {
if (cpu_isset(cpu, rcp->cpumask))
printk(" %d", cpu);
@@ -204,7 +204,7 @@ static void print_cpu_stall(struct rcu_ctrlblk *rcp)
{
unsigned long flags;
- printk(KERN_ERR "RCU detected CPU %d stall (t=%lu/%lu jiffies)\n",
+ printk(KERN_ERR "INFO: RCU detected CPU %d stall (t=%lu/%lu jiffies)\n",
smp_processor_id(), jiffies,
jiffies - rcp->gp_start);
dump_stack();
diff --git a/kernel/rcupreempt.c b/kernel/rcupreempt.c
index 59236e8b9daa..04982659875a 100644
--- a/kernel/rcupreempt.c
+++ b/kernel/rcupreempt.c
@@ -551,6 +551,16 @@ void rcu_irq_exit(void)
}
}
+void rcu_nmi_enter(void)
+{
+ rcu_irq_enter();
+}
+
+void rcu_nmi_exit(void)
+{
+ rcu_irq_exit();
+}
+
static void dyntick_save_progress_counter(int cpu)
{
struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu);
diff --git a/kernel/rcupreempt_trace.c b/kernel/rcupreempt_trace.c
index 35c2d3360ecf..7c2665cac172 100644
--- a/kernel/rcupreempt_trace.c
+++ b/kernel/rcupreempt_trace.c
@@ -149,12 +149,12 @@ static void rcupreempt_trace_sum(struct rcupreempt_trace *sp)
sp->done_length += cp->done_length;
sp->done_add += cp->done_add;
sp->done_remove += cp->done_remove;
- atomic_set(&sp->done_invoked, atomic_read(&cp->done_invoked));
+ atomic_add(atomic_read(&cp->done_invoked), &sp->done_invoked);
sp->rcu_check_callbacks += cp->rcu_check_callbacks;
- atomic_set(&sp->rcu_try_flip_1,
- atomic_read(&cp->rcu_try_flip_1));
- atomic_set(&sp->rcu_try_flip_e1,
- atomic_read(&cp->rcu_try_flip_e1));
+ atomic_add(atomic_read(&cp->rcu_try_flip_1),
+ &sp->rcu_try_flip_1);
+ atomic_add(atomic_read(&cp->rcu_try_flip_e1),
+ &sp->rcu_try_flip_e1);
sp->rcu_try_flip_i1 += cp->rcu_try_flip_i1;
sp->rcu_try_flip_ie1 += cp->rcu_try_flip_ie1;
sp->rcu_try_flip_g1 += cp->rcu_try_flip_g1;
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
index 85cb90588a55..b31065522104 100644
--- a/kernel/rcutorture.c
+++ b/kernel/rcutorture.c
@@ -39,6 +39,7 @@
#include <linux/moduleparam.h>
#include <linux/percpu.h>
#include <linux/notifier.h>
+#include <linux/reboot.h>
#include <linux/freezer.h>
#include <linux/cpu.h>
#include <linux/delay.h>
@@ -108,7 +109,6 @@ struct rcu_torture {
int rtort_mbtest;
};
-static int fullstop = 0; /* stop generating callbacks at test end. */
static LIST_HEAD(rcu_torture_freelist);
static struct rcu_torture *rcu_torture_current = NULL;
static long rcu_torture_current_version = 0;
@@ -136,6 +136,30 @@ static int stutter_pause_test = 0;
#endif
int rcutorture_runnable = RCUTORTURE_RUNNABLE_INIT;
+#define FULLSTOP_SIGNALED 1 /* Bail due to signal. */
+#define FULLSTOP_CLEANUP 2 /* Orderly shutdown. */
+static int fullstop; /* stop generating callbacks at test end. */
+DEFINE_MUTEX(fullstop_mutex); /* protect fullstop transitions and */
+ /* spawning of kthreads. */
+
+/*
+ * Detect and respond to a signal-based shutdown.
+ */
+static int
+rcutorture_shutdown_notify(struct notifier_block *unused1,
+ unsigned long unused2, void *unused3)
+{
+ if (fullstop)
+ return NOTIFY_DONE;
+ if (signal_pending(current)) {
+ mutex_lock(&fullstop_mutex);
+ if (!ACCESS_ONCE(fullstop))
+ fullstop = FULLSTOP_SIGNALED;
+ mutex_unlock(&fullstop_mutex);
+ }
+ return NOTIFY_DONE;
+}
+
/*
* Allocate an element from the rcu_tortures pool.
*/
@@ -199,11 +223,12 @@ rcu_random(struct rcu_random_state *rrsp)
static void
rcu_stutter_wait(void)
{
- while (stutter_pause_test || !rcutorture_runnable)
+ while ((stutter_pause_test || !rcutorture_runnable) && !fullstop) {
if (rcutorture_runnable)
schedule_timeout_interruptible(1);
else
schedule_timeout_interruptible(round_jiffies_relative(HZ));
+ }
}
/*
@@ -599,7 +624,7 @@ rcu_torture_writer(void *arg)
rcu_stutter_wait();
} while (!kthread_should_stop() && !fullstop);
VERBOSE_PRINTK_STRING("rcu_torture_writer task stopping");
- while (!kthread_should_stop())
+ while (!kthread_should_stop() && fullstop != FULLSTOP_SIGNALED)
schedule_timeout_uninterruptible(1);
return 0;
}
@@ -624,7 +649,7 @@ rcu_torture_fakewriter(void *arg)
} while (!kthread_should_stop() && !fullstop);
VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task stopping");
- while (!kthread_should_stop())
+ while (!kthread_should_stop() && fullstop != FULLSTOP_SIGNALED)
schedule_timeout_uninterruptible(1);
return 0;
}
@@ -734,7 +759,7 @@ rcu_torture_reader(void *arg)
VERBOSE_PRINTK_STRING("rcu_torture_reader task stopping");
if (irqreader && cur_ops->irqcapable)
del_timer_sync(&t);
- while (!kthread_should_stop())
+ while (!kthread_should_stop() && fullstop != FULLSTOP_SIGNALED)
schedule_timeout_uninterruptible(1);
return 0;
}
@@ -831,7 +856,7 @@ rcu_torture_stats(void *arg)
do {
schedule_timeout_interruptible(stat_interval * HZ);
rcu_torture_stats_print();
- } while (!kthread_should_stop());
+ } while (!kthread_should_stop() && !fullstop);
VERBOSE_PRINTK_STRING("rcu_torture_stats task stopping");
return 0;
}
@@ -899,7 +924,7 @@ rcu_torture_shuffle(void *arg)
do {
schedule_timeout_interruptible(shuffle_interval * HZ);
rcu_torture_shuffle_tasks();
- } while (!kthread_should_stop());
+ } while (!kthread_should_stop() && !fullstop);
VERBOSE_PRINTK_STRING("rcu_torture_shuffle task stopping");
return 0;
}
@@ -914,10 +939,10 @@ rcu_torture_stutter(void *arg)
do {
schedule_timeout_interruptible(stutter * HZ);
stutter_pause_test = 1;
- if (!kthread_should_stop())
+ if (!kthread_should_stop() && !fullstop)
schedule_timeout_interruptible(stutter * HZ);
stutter_pause_test = 0;
- } while (!kthread_should_stop());
+ } while (!kthread_should_stop() && !fullstop);
VERBOSE_PRINTK_STRING("rcu_torture_stutter task stopping");
return 0;
}
@@ -934,12 +959,27 @@ rcu_torture_print_module_parms(char *tag)
stutter, irqreader);
}
+static struct notifier_block rcutorture_nb = {
+ .notifier_call = rcutorture_shutdown_notify,
+};
+
static void
rcu_torture_cleanup(void)
{
int i;
- fullstop = 1;
+ mutex_lock(&fullstop_mutex);
+ if (!fullstop) {
+ /* If being signaled, let it happen, then exit. */
+ mutex_unlock(&fullstop_mutex);
+ schedule_timeout_interruptible(10 * HZ);
+ if (cur_ops->cb_barrier != NULL)
+ cur_ops->cb_barrier();
+ return;
+ }
+ fullstop = FULLSTOP_CLEANUP;
+ mutex_unlock(&fullstop_mutex);
+ unregister_reboot_notifier(&rcutorture_nb);
if (stutter_task) {
VERBOSE_PRINTK_STRING("Stopping rcu_torture_stutter task");
kthread_stop(stutter_task);
@@ -1015,6 +1055,8 @@ rcu_torture_init(void)
{ &rcu_ops, &rcu_sync_ops, &rcu_bh_ops, &rcu_bh_sync_ops,
&srcu_ops, &sched_ops, &sched_ops_sync, };
+ mutex_lock(&fullstop_mutex);
+
/* Process args and tell the world that the torturer is on the job. */
for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
cur_ops = torture_ops[i];
@@ -1024,6 +1066,7 @@ rcu_torture_init(void)
if (i == ARRAY_SIZE(torture_ops)) {
printk(KERN_ALERT "rcutorture: invalid torture type: \"%s\"\n",
torture_type);
+ mutex_unlock(&fullstop_mutex);
return (-EINVAL);
}
if (cur_ops->init)
@@ -1146,9 +1189,12 @@ rcu_torture_init(void)
goto unwind;
}
}
+ register_reboot_notifier(&rcutorture_nb);
+ mutex_unlock(&fullstop_mutex);
return 0;
unwind:
+ mutex_unlock(&fullstop_mutex);
rcu_torture_cleanup();
return firsterr;
}
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
new file mode 100644
index 000000000000..a342b032112c
--- /dev/null
+++ b/kernel/rcutree.c
@@ -0,0 +1,1535 @@
+/*
+ * Read-Copy Update mechanism for mutual exclusion
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright IBM Corporation, 2008
+ *
+ * Authors: Dipankar Sarma <dipankar@in.ibm.com>
+ * Manfred Spraul <manfred@colorfullife.com>
+ * Paul E. McKenney <paulmck@linux.vnet.ibm.com> Hierarchical version
+ *
+ * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
+ * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
+ *
+ * For detailed explanation of Read-Copy Update mechanism see -
+ * Documentation/RCU
+ */
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/smp.h>
+#include <linux/rcupdate.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <asm/atomic.h>
+#include <linux/bitops.h>
+#include <linux/module.h>
+#include <linux/completion.h>
+#include <linux/moduleparam.h>
+#include <linux/percpu.h>
+#include <linux/notifier.h>
+#include <linux/cpu.h>
+#include <linux/mutex.h>
+#include <linux/time.h>
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+static struct lock_class_key rcu_lock_key;
+struct lockdep_map rcu_lock_map =
+ STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key);
+EXPORT_SYMBOL_GPL(rcu_lock_map);
+#endif
+
+/* Data structures. */
+
+#define RCU_STATE_INITIALIZER(name) { \
+ .level = { &name.node[0] }, \
+ .levelcnt = { \
+ NUM_RCU_LVL_0, /* root of hierarchy. */ \
+ NUM_RCU_LVL_1, \
+ NUM_RCU_LVL_2, \
+ NUM_RCU_LVL_3, /* == MAX_RCU_LVLS */ \
+ }, \
+ .signaled = RCU_SIGNAL_INIT, \
+ .gpnum = -300, \
+ .completed = -300, \
+ .onofflock = __SPIN_LOCK_UNLOCKED(&name.onofflock), \
+ .fqslock = __SPIN_LOCK_UNLOCKED(&name.fqslock), \
+ .n_force_qs = 0, \
+ .n_force_qs_ngp = 0, \
+}
+
+struct rcu_state rcu_state = RCU_STATE_INITIALIZER(rcu_state);
+DEFINE_PER_CPU(struct rcu_data, rcu_data);
+
+struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state);
+DEFINE_PER_CPU(struct rcu_data, rcu_bh_data);
+
+#ifdef CONFIG_NO_HZ
+DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks);
+#endif /* #ifdef CONFIG_NO_HZ */
+
+static int blimit = 10; /* Maximum callbacks per softirq. */
+static int qhimark = 10000; /* If this many pending, ignore blimit. */
+static int qlowmark = 100; /* Once only this many pending, use blimit. */
+
+static void force_quiescent_state(struct rcu_state *rsp, int relaxed);
+
+/*
+ * Return the number of RCU batches processed thus far for debug & stats.
+ */
+long rcu_batches_completed(void)
+{
+ return rcu_state.completed;
+}
+EXPORT_SYMBOL_GPL(rcu_batches_completed);
+
+/*
+ * Return the number of RCU BH batches processed thus far for debug & stats.
+ */
+long rcu_batches_completed_bh(void)
+{
+ return rcu_bh_state.completed;
+}
+EXPORT_SYMBOL_GPL(rcu_batches_completed_bh);
+
+/*
+ * Does the CPU have callbacks ready to be invoked?
+ */
+static int
+cpu_has_callbacks_ready_to_invoke(struct rcu_data *rdp)
+{
+ return &rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL];
+}
+
+/*
+ * Does the current CPU require a yet-as-unscheduled grace period?
+ */
+static int
+cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp)
+{
+ /* ACCESS_ONCE() because we are accessing outside of lock. */
+ return *rdp->nxttail[RCU_DONE_TAIL] &&
+ ACCESS_ONCE(rsp->completed) == ACCESS_ONCE(rsp->gpnum);
+}
+
+/*
+ * Return the root node of the specified rcu_state structure.
+ */
+static struct rcu_node *rcu_get_root(struct rcu_state *rsp)
+{
+ return &rsp->node[0];
+}
+
+#ifdef CONFIG_SMP
+
+/*
+ * If the specified CPU is offline, tell the caller that it is in
+ * a quiescent state. Otherwise, whack it with a reschedule IPI.
+ * Grace periods can end up waiting on an offline CPU when that
+ * CPU is in the process of coming online -- it will be added to the
+ * rcu_node bitmasks before it actually makes it online. The same thing
+ * can happen while a CPU is in the process of coming online. Because this
+ * race is quite rare, we check for it after detecting that the grace
+ * period has been delayed rather than checking each and every CPU
+ * each and every time we start a new grace period.
+ */
+static int rcu_implicit_offline_qs(struct rcu_data *rdp)
+{
+ /*
+ * If the CPU is offline, it is in a quiescent state. We can
+ * trust its state not to change because interrupts are disabled.
+ */
+ if (cpu_is_offline(rdp->cpu)) {
+ rdp->offline_fqs++;
+ return 1;
+ }
+
+ /* The CPU is online, so send it a reschedule IPI. */
+ if (rdp->cpu != smp_processor_id())
+ smp_send_reschedule(rdp->cpu);
+ else
+ set_need_resched();
+ rdp->resched_ipi++;
+ return 0;
+}
+
+#endif /* #ifdef CONFIG_SMP */
+
+#ifdef CONFIG_NO_HZ
+static DEFINE_RATELIMIT_STATE(rcu_rs, 10 * HZ, 5);
+
+/**
+ * rcu_enter_nohz - inform RCU that current CPU is entering nohz
+ *
+ * Enter nohz mode, in other words, -leave- the mode in which RCU
+ * read-side critical sections can occur. (Though RCU read-side
+ * critical sections can occur in irq handlers in nohz mode, a possibility
+ * handled by rcu_irq_enter() and rcu_irq_exit()).
+ */
+void rcu_enter_nohz(void)
+{
+ unsigned long flags;
+ struct rcu_dynticks *rdtp;
+
+ smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */
+ local_irq_save(flags);
+ rdtp = &__get_cpu_var(rcu_dynticks);
+ rdtp->dynticks++;
+ rdtp->dynticks_nesting--;
+ WARN_ON_RATELIMIT(rdtp->dynticks & 0x1, &rcu_rs);
+ local_irq_restore(flags);
+}
+
+/*
+ * rcu_exit_nohz - inform RCU that current CPU is leaving nohz
+ *
+ * Exit nohz mode, in other words, -enter- the mode in which RCU
+ * read-side critical sections normally occur.
+ */
+void rcu_exit_nohz(void)
+{
+ unsigned long flags;
+ struct rcu_dynticks *rdtp;
+
+ local_irq_save(flags);
+ rdtp = &__get_cpu_var(rcu_dynticks);
+ rdtp->dynticks++;
+ rdtp->dynticks_nesting++;
+ WARN_ON_RATELIMIT(!(rdtp->dynticks & 0x1), &rcu_rs);
+ local_irq_restore(flags);
+ smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */
+}
+
+/**
+ * rcu_nmi_enter - inform RCU of entry to NMI context
+ *
+ * If the CPU was idle with dynamic ticks active, and there is no
+ * irq handler running, this updates rdtp->dynticks_nmi to let the
+ * RCU grace-period handling know that the CPU is active.
+ */
+void rcu_nmi_enter(void)
+{
+ struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
+
+ if (rdtp->dynticks & 0x1)
+ return;
+ rdtp->dynticks_nmi++;
+ WARN_ON_RATELIMIT(!(rdtp->dynticks_nmi & 0x1), &rcu_rs);
+ smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */
+}
+
+/**
+ * rcu_nmi_exit - inform RCU of exit from NMI context
+ *
+ * If the CPU was idle with dynamic ticks active, and there is no
+ * irq handler running, this updates rdtp->dynticks_nmi to let the
+ * RCU grace-period handling know that the CPU is no longer active.
+ */
+void rcu_nmi_exit(void)
+{
+ struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
+
+ if (rdtp->dynticks & 0x1)
+ return;
+ smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */
+ rdtp->dynticks_nmi++;
+ WARN_ON_RATELIMIT(rdtp->dynticks_nmi & 0x1, &rcu_rs);
+}
+
+/**
+ * rcu_irq_enter - inform RCU of entry to hard irq context
+ *
+ * If the CPU was idle with dynamic ticks active, this updates the
+ * rdtp->dynticks to let the RCU handling know that the CPU is active.
+ */
+void rcu_irq_enter(void)
+{
+ struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
+
+ if (rdtp->dynticks_nesting++)
+ return;
+ rdtp->dynticks++;
+ WARN_ON_RATELIMIT(!(rdtp->dynticks & 0x1), &rcu_rs);
+ smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */
+}
+
+/**
+ * rcu_irq_exit - inform RCU of exit from hard irq context
+ *
+ * If the CPU was idle with dynamic ticks active, update the rdp->dynticks
+ * to put let the RCU handling be aware that the CPU is going back to idle
+ * with no ticks.
+ */
+void rcu_irq_exit(void)
+{
+ struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
+
+ if (--rdtp->dynticks_nesting)
+ return;
+ smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */
+ rdtp->dynticks++;
+ WARN_ON_RATELIMIT(rdtp->dynticks & 0x1, &rcu_rs);
+
+ /* If the interrupt queued a callback, get out of dyntick mode. */
+ if (__get_cpu_var(rcu_data).nxtlist ||
+ __get_cpu_var(rcu_bh_data).nxtlist)
+ set_need_resched();
+}
+
+/*
+ * Record the specified "completed" value, which is later used to validate
+ * dynticks counter manipulations. Specify "rsp->completed - 1" to
+ * unconditionally invalidate any future dynticks manipulations (which is
+ * useful at the beginning of a grace period).
+ */
+static void dyntick_record_completed(struct rcu_state *rsp, long comp)
+{
+ rsp->dynticks_completed = comp;
+}
+
+#ifdef CONFIG_SMP
+
+/*
+ * Recall the previously recorded value of the completion for dynticks.
+ */
+static long dyntick_recall_completed(struct rcu_state *rsp)
+{
+ return rsp->dynticks_completed;
+}
+
+/*
+ * Snapshot the specified CPU's dynticks counter so that we can later
+ * credit them with an implicit quiescent state. Return 1 if this CPU
+ * is already in a quiescent state courtesy of dynticks idle mode.
+ */
+static int dyntick_save_progress_counter(struct rcu_data *rdp)
+{
+ int ret;
+ int snap;
+ int snap_nmi;
+
+ snap = rdp->dynticks->dynticks;
+ snap_nmi = rdp->dynticks->dynticks_nmi;
+ smp_mb(); /* Order sampling of snap with end of grace period. */
+ rdp->dynticks_snap = snap;
+ rdp->dynticks_nmi_snap = snap_nmi;
+ ret = ((snap & 0x1) == 0) && ((snap_nmi & 0x1) == 0);
+ if (ret)
+ rdp->dynticks_fqs++;
+ return ret;
+}
+
+/*
+ * Return true if the specified CPU has passed through a quiescent
+ * state by virtue of being in or having passed through an dynticks
+ * idle state since the last call to dyntick_save_progress_counter()
+ * for this same CPU.
+ */
+static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
+{
+ long curr;
+ long curr_nmi;
+ long snap;
+ long snap_nmi;
+
+ curr = rdp->dynticks->dynticks;
+ snap = rdp->dynticks_snap;
+ curr_nmi = rdp->dynticks->dynticks_nmi;
+ snap_nmi = rdp->dynticks_nmi_snap;
+ smp_mb(); /* force ordering with cpu entering/leaving dynticks. */
+
+ /*
+ * If the CPU passed through or entered a dynticks idle phase with
+ * no active irq/NMI handlers, then we can safely pretend that the CPU
+ * already acknowledged the request to pass through a quiescent
+ * state. Either way, that CPU cannot possibly be in an RCU
+ * read-side critical section that started before the beginning
+ * of the current RCU grace period.
+ */
+ if ((curr != snap || (curr & 0x1) == 0) &&
+ (curr_nmi != snap_nmi || (curr_nmi & 0x1) == 0)) {
+ rdp->dynticks_fqs++;
+ return 1;
+ }
+
+ /* Go check for the CPU being offline. */
+ return rcu_implicit_offline_qs(rdp);
+}
+
+#endif /* #ifdef CONFIG_SMP */
+
+#else /* #ifdef CONFIG_NO_HZ */
+
+static void dyntick_record_completed(struct rcu_state *rsp, long comp)
+{
+}
+
+#ifdef CONFIG_SMP
+
+/*
+ * If there are no dynticks, then the only way that a CPU can passively
+ * be in a quiescent state is to be offline. Unlike dynticks idle, which
+ * is a point in time during the prior (already finished) grace period,
+ * an offline CPU is always in a quiescent state, and thus can be
+ * unconditionally applied. So just return the current value of completed.
+ */
+static long dyntick_recall_completed(struct rcu_state *rsp)
+{
+ return rsp->completed;
+}
+
+static int dyntick_save_progress_counter(struct rcu_data *rdp)
+{
+ return 0;
+}
+
+static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
+{
+ return rcu_implicit_offline_qs(rdp);
+}
+
+#endif /* #ifdef CONFIG_SMP */
+
+#endif /* #else #ifdef CONFIG_NO_HZ */
+
+#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
+
+static void record_gp_stall_check_time(struct rcu_state *rsp)
+{
+ rsp->gp_start = jiffies;
+ rsp->jiffies_stall = jiffies + RCU_SECONDS_TILL_STALL_CHECK;
+}
+
+static void print_other_cpu_stall(struct rcu_state *rsp)
+{
+ int cpu;
+ long delta;
+ unsigned long flags;
+ struct rcu_node *rnp = rcu_get_root(rsp);
+ struct rcu_node *rnp_cur = rsp->level[NUM_RCU_LVLS - 1];
+ struct rcu_node *rnp_end = &rsp->node[NUM_RCU_NODES];
+
+ /* Only let one CPU complain about others per time interval. */
+
+ spin_lock_irqsave(&rnp->lock, flags);
+ delta = jiffies - rsp->jiffies_stall;
+ if (delta < RCU_STALL_RAT_DELAY || rsp->gpnum == rsp->completed) {
+ spin_unlock_irqrestore(&rnp->lock, flags);
+ return;
+ }
+ rsp->jiffies_stall = jiffies + RCU_SECONDS_TILL_STALL_RECHECK;
+ spin_unlock_irqrestore(&rnp->lock, flags);
+
+ /* OK, time to rat on our buddy... */
+
+ printk(KERN_ERR "INFO: RCU detected CPU stalls:");
+ for (; rnp_cur < rnp_end; rnp_cur++) {
+ if (rnp_cur->qsmask == 0)
+ continue;
+ for (cpu = 0; cpu <= rnp_cur->grphi - rnp_cur->grplo; cpu++)
+ if (rnp_cur->qsmask & (1UL << cpu))
+ printk(" %d", rnp_cur->grplo + cpu);
+ }
+ printk(" (detected by %d, t=%ld jiffies)\n",
+ smp_processor_id(), (long)(jiffies - rsp->gp_start));
+ force_quiescent_state(rsp, 0); /* Kick them all. */
+}
+
+static void print_cpu_stall(struct rcu_state *rsp)
+{
+ unsigned long flags;
+ struct rcu_node *rnp = rcu_get_root(rsp);
+
+ printk(KERN_ERR "INFO: RCU detected CPU %d stall (t=%lu jiffies)\n",
+ smp_processor_id(), jiffies - rsp->gp_start);
+ dump_stack();
+ spin_lock_irqsave(&rnp->lock, flags);
+ if ((long)(jiffies - rsp->jiffies_stall) >= 0)
+ rsp->jiffies_stall =
+ jiffies + RCU_SECONDS_TILL_STALL_RECHECK;
+ spin_unlock_irqrestore(&rnp->lock, flags);
+ set_need_resched(); /* kick ourselves to get things going. */
+}
+
+static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
+{
+ long delta;
+ struct rcu_node *rnp;
+
+ delta = jiffies - rsp->jiffies_stall;
+ rnp = rdp->mynode;
+ if ((rnp->qsmask & rdp->grpmask) && delta >= 0) {
+
+ /* We haven't checked in, so go dump stack. */
+ print_cpu_stall(rsp);
+
+ } else if (rsp->gpnum != rsp->completed &&
+ delta >= RCU_STALL_RAT_DELAY) {
+
+ /* They had two time units to dump stack, so complain. */
+ print_other_cpu_stall(rsp);
+ }
+}
+
+#else /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
+
+static void record_gp_stall_check_time(struct rcu_state *rsp)
+{
+}
+
+static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
+{
+}
+
+#endif /* #else #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
+
+/*
+ * Update CPU-local rcu_data state to record the newly noticed grace period.
+ * This is used both when we started the grace period and when we notice
+ * that someone else started the grace period.
+ */
+static void note_new_gpnum(struct rcu_state *rsp, struct rcu_data *rdp)
+{
+ rdp->qs_pending = 1;
+ rdp->passed_quiesc = 0;
+ rdp->gpnum = rsp->gpnum;
+ rdp->n_rcu_pending_force_qs = rdp->n_rcu_pending +
+ RCU_JIFFIES_TILL_FORCE_QS;
+}
+
+/*
+ * Did someone else start a new RCU grace period start since we last
+ * checked? Update local state appropriately if so. Must be called
+ * on the CPU corresponding to rdp.
+ */
+static int
+check_for_new_grace_period(struct rcu_state *rsp, struct rcu_data *rdp)
+{
+ unsigned long flags;
+ int ret = 0;
+
+ local_irq_save(flags);
+ if (rdp->gpnum != rsp->gpnum) {
+ note_new_gpnum(rsp, rdp);
+ ret = 1;
+ }
+ local_irq_restore(flags);
+ return ret;
+}
+
+/*
+ * Start a new RCU grace period if warranted, re-initializing the hierarchy
+ * in preparation for detecting the next grace period. The caller must hold
+ * the root node's ->lock, which is released before return. Hard irqs must
+ * be disabled.
+ */
+static void
+rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
+ __releases(rcu_get_root(rsp)->lock)
+{
+ struct rcu_data *rdp = rsp->rda[smp_processor_id()];
+ struct rcu_node *rnp = rcu_get_root(rsp);
+ struct rcu_node *rnp_cur;
+ struct rcu_node *rnp_end;
+
+ if (!cpu_needs_another_gp(rsp, rdp)) {
+ spin_unlock_irqrestore(&rnp->lock, flags);
+ return;
+ }
+
+ /* Advance to a new grace period and initialize state. */
+ rsp->gpnum++;
+ rsp->signaled = RCU_GP_INIT; /* Hold off force_quiescent_state. */
+ rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS;
+ rdp->n_rcu_pending_force_qs = rdp->n_rcu_pending +
+ RCU_JIFFIES_TILL_FORCE_QS;
+ record_gp_stall_check_time(rsp);
+ dyntick_record_completed(rsp, rsp->completed - 1);
+ note_new_gpnum(rsp, rdp);
+
+ /*
+ * Because we are first, we know that all our callbacks will
+ * be covered by this upcoming grace period, even the ones
+ * that were registered arbitrarily recently.
+ */
+ rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
+ rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
+
+ /* Special-case the common single-level case. */
+ if (NUM_RCU_NODES == 1) {
+ rnp->qsmask = rnp->qsmaskinit;
+ spin_unlock_irqrestore(&rnp->lock, flags);
+ return;
+ }
+
+ spin_unlock(&rnp->lock); /* leave irqs disabled. */
+
+
+ /* Exclude any concurrent CPU-hotplug operations. */
+ spin_lock(&rsp->onofflock); /* irqs already disabled. */
+
+ /*
+ * Set the quiescent-state-needed bits in all the non-leaf RCU
+ * nodes for all currently online CPUs. This operation relies
+ * on the layout of the hierarchy within the rsp->node[] array.
+ * Note that other CPUs will access only the leaves of the
+ * hierarchy, which still indicate that no grace period is in
+ * progress. In addition, we have excluded CPU-hotplug operations.
+ *
+ * We therefore do not need to hold any locks. Any required
+ * memory barriers will be supplied by the locks guarding the
+ * leaf rcu_nodes in the hierarchy.
+ */
+
+ rnp_end = rsp->level[NUM_RCU_LVLS - 1];
+ for (rnp_cur = &rsp->node[0]; rnp_cur < rnp_end; rnp_cur++)
+ rnp_cur->qsmask = rnp_cur->qsmaskinit;
+
+ /*
+ * Now set up the leaf nodes. Here we must be careful. First,
+ * we need to hold the lock in order to exclude other CPUs, which
+ * might be contending for the leaf nodes' locks. Second, as
+ * soon as we initialize a given leaf node, its CPUs might run
+ * up the rest of the hierarchy. We must therefore acquire locks
+ * for each node that we touch during this stage. (But we still
+ * are excluding CPU-hotplug operations.)
+ *
+ * Note that the grace period cannot complete until we finish
+ * the initialization process, as there will be at least one
+ * qsmask bit set in the root node until that time, namely the
+ * one corresponding to this CPU.
+ */
+ rnp_end = &rsp->node[NUM_RCU_NODES];
+ rnp_cur = rsp->level[NUM_RCU_LVLS - 1];
+ for (; rnp_cur < rnp_end; rnp_cur++) {
+ spin_lock(&rnp_cur->lock); /* irqs already disabled. */
+ rnp_cur->qsmask = rnp_cur->qsmaskinit;
+ spin_unlock(&rnp_cur->lock); /* irqs already disabled. */
+ }
+
+ rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state now OK. */
+ spin_unlock_irqrestore(&rsp->onofflock, flags);
+}
+
+/*
+ * Advance this CPU's callbacks, but only if the current grace period
+ * has ended. This may be called only from the CPU to whom the rdp
+ * belongs.
+ */
+static void
+rcu_process_gp_end(struct rcu_state *rsp, struct rcu_data *rdp)
+{
+ long completed_snap;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ completed_snap = ACCESS_ONCE(rsp->completed); /* outside of lock. */
+
+ /* Did another grace period end? */
+ if (rdp->completed != completed_snap) {
+
+ /* Advance callbacks. No harm if list empty. */
+ rdp->nxttail[RCU_DONE_TAIL] = rdp->nxttail[RCU_WAIT_TAIL];
+ rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_READY_TAIL];
+ rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
+
+ /* Remember that we saw this grace-period completion. */
+ rdp->completed = completed_snap;
+ }
+ local_irq_restore(flags);
+}
+
+/*
+ * Similar to cpu_quiet(), for which it is a helper function. Allows
+ * a group of CPUs to be quieted at one go, though all the CPUs in the
+ * group must be represented by the same leaf rcu_node structure.
+ * That structure's lock must be held upon entry, and it is released
+ * before return.
+ */
+static void
+cpu_quiet_msk(unsigned long mask, struct rcu_state *rsp, struct rcu_node *rnp,
+ unsigned long flags)
+ __releases(rnp->lock)
+{
+ /* Walk up the rcu_node hierarchy. */
+ for (;;) {
+ if (!(rnp->qsmask & mask)) {
+
+ /* Our bit has already been cleared, so done. */
+ spin_unlock_irqrestore(&rnp->lock, flags);
+ return;
+ }
+ rnp->qsmask &= ~mask;
+ if (rnp->qsmask != 0) {
+
+ /* Other bits still set at this level, so done. */
+ spin_unlock_irqrestore(&rnp->lock, flags);
+ return;
+ }
+ mask = rnp->grpmask;
+ if (rnp->parent == NULL) {
+
+ /* No more levels. Exit loop holding root lock. */
+
+ break;
+ }
+ spin_unlock_irqrestore(&rnp->lock, flags);
+ rnp = rnp->parent;
+ spin_lock_irqsave(&rnp->lock, flags);
+ }
+
+ /*
+ * Get here if we are the last CPU to pass through a quiescent
+ * state for this grace period. Clean up and let rcu_start_gp()
+ * start up the next grace period if one is needed. Note that
+ * we still hold rnp->lock, as required by rcu_start_gp(), which
+ * will release it.
+ */
+ rsp->completed = rsp->gpnum;
+ rcu_process_gp_end(rsp, rsp->rda[smp_processor_id()]);
+ rcu_start_gp(rsp, flags); /* releases rnp->lock. */
+}
+
+/*
+ * Record a quiescent state for the specified CPU, which must either be
+ * the current CPU or an offline CPU. The lastcomp argument is used to
+ * make sure we are still in the grace period of interest. We don't want
+ * to end the current grace period based on quiescent states detected in
+ * an earlier grace period!
+ */
+static void
+cpu_quiet(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long lastcomp)
+{
+ unsigned long flags;
+ unsigned long mask;
+ struct rcu_node *rnp;
+
+ rnp = rdp->mynode;
+ spin_lock_irqsave(&rnp->lock, flags);
+ if (lastcomp != ACCESS_ONCE(rsp->completed)) {
+
+ /*
+ * Someone beat us to it for this grace period, so leave.
+ * The race with GP start is resolved by the fact that we
+ * hold the leaf rcu_node lock, so that the per-CPU bits
+ * cannot yet be initialized -- so we would simply find our
+ * CPU's bit already cleared in cpu_quiet_msk() if this race
+ * occurred.
+ */
+ rdp->passed_quiesc = 0; /* try again later! */
+ spin_unlock_irqrestore(&rnp->lock, flags);
+ return;
+ }
+ mask = rdp->grpmask;
+ if ((rnp->qsmask & mask) == 0) {
+ spin_unlock_irqrestore(&rnp->lock, flags);
+ } else {
+ rdp->qs_pending = 0;
+
+ /*
+ * This GP can't end until cpu checks in, so all of our
+ * callbacks can be processed during the next GP.
+ */
+ rdp = rsp->rda[smp_processor_id()];
+ rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
+
+ cpu_quiet_msk(mask, rsp, rnp, flags); /* releases rnp->lock */
+ }
+}
+
+/*
+ * Check to see if there is a new grace period of which this CPU
+ * is not yet aware, and if so, set up local rcu_data state for it.
+ * Otherwise, see if this CPU has just passed through its first
+ * quiescent state for this grace period, and record that fact if so.
+ */
+static void
+rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp)
+{
+ /* If there is now a new grace period, record and return. */
+ if (check_for_new_grace_period(rsp, rdp))
+ return;
+
+ /*
+ * Does this CPU still need to do its part for current grace period?
+ * If no, return and let the other CPUs do their part as well.
+ */
+ if (!rdp->qs_pending)
+ return;
+
+ /*
+ * Was there a quiescent state since the beginning of the grace
+ * period? If no, then exit and wait for the next call.
+ */
+ if (!rdp->passed_quiesc)
+ return;
+
+ /* Tell RCU we are done (but cpu_quiet() will be the judge of that). */
+ cpu_quiet(rdp->cpu, rsp, rdp, rdp->passed_quiesc_completed);
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+
+/*
+ * Remove the outgoing CPU from the bitmasks in the rcu_node hierarchy
+ * and move all callbacks from the outgoing CPU to the current one.
+ */
+static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp)
+{
+ int i;
+ unsigned long flags;
+ long lastcomp;
+ unsigned long mask;
+ struct rcu_data *rdp = rsp->rda[cpu];
+ struct rcu_data *rdp_me;
+ struct rcu_node *rnp;
+
+ /* Exclude any attempts to start a new grace period. */
+ spin_lock_irqsave(&rsp->onofflock, flags);
+
+ /* Remove the outgoing CPU from the masks in the rcu_node hierarchy. */
+ rnp = rdp->mynode;
+ mask = rdp->grpmask; /* rnp->grplo is constant. */
+ do {
+ spin_lock(&rnp->lock); /* irqs already disabled. */
+ rnp->qsmaskinit &= ~mask;
+ if (rnp->qsmaskinit != 0) {
+ spin_unlock(&rnp->lock); /* irqs already disabled. */
+ break;
+ }
+ mask = rnp->grpmask;
+ spin_unlock(&rnp->lock); /* irqs already disabled. */
+ rnp = rnp->parent;
+ } while (rnp != NULL);
+ lastcomp = rsp->completed;
+
+ spin_unlock(&rsp->onofflock); /* irqs remain disabled. */
+
+ /* Being offline is a quiescent state, so go record it. */
+ cpu_quiet(cpu, rsp, rdp, lastcomp);
+
+ /*
+ * Move callbacks from the outgoing CPU to the running CPU.
+ * Note that the outgoing CPU is now quiscent, so it is now
+ * (uncharacteristically) safe to access it rcu_data structure.
+ * Note also that we must carefully retain the order of the
+ * outgoing CPU's callbacks in order for rcu_barrier() to work
+ * correctly. Finally, note that we start all the callbacks
+ * afresh, even those that have passed through a grace period
+ * and are therefore ready to invoke. The theory is that hotplug
+ * events are rare, and that if they are frequent enough to
+ * indefinitely delay callbacks, you have far worse things to
+ * be worrying about.
+ */
+ rdp_me = rsp->rda[smp_processor_id()];
+ if (rdp->nxtlist != NULL) {
+ *rdp_me->nxttail[RCU_NEXT_TAIL] = rdp->nxtlist;
+ rdp_me->nxttail[RCU_NEXT_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
+ rdp->nxtlist = NULL;
+ for (i = 0; i < RCU_NEXT_SIZE; i++)
+ rdp->nxttail[i] = &rdp->nxtlist;
+ rdp_me->qlen += rdp->qlen;
+ rdp->qlen = 0;
+ }
+ local_irq_restore(flags);
+}
+
+/*
+ * Remove the specified CPU from the RCU hierarchy and move any pending
+ * callbacks that it might have to the current CPU. This code assumes
+ * that at least one CPU in the system will remain running at all times.
+ * Any attempt to offline -all- CPUs is likely to strand RCU callbacks.
+ */
+static void rcu_offline_cpu(int cpu)
+{
+ __rcu_offline_cpu(cpu, &rcu_state);
+ __rcu_offline_cpu(cpu, &rcu_bh_state);
+}
+
+#else /* #ifdef CONFIG_HOTPLUG_CPU */
+
+static void rcu_offline_cpu(int cpu)
+{
+}
+
+#endif /* #else #ifdef CONFIG_HOTPLUG_CPU */
+
+/*
+ * Invoke any RCU callbacks that have made it to the end of their grace
+ * period. Thottle as specified by rdp->blimit.
+ */
+static void rcu_do_batch(struct rcu_data *rdp)
+{
+ unsigned long flags;
+ struct rcu_head *next, *list, **tail;
+ int count;
+
+ /* If no callbacks are ready, just return.*/
+ if (!cpu_has_callbacks_ready_to_invoke(rdp))
+ return;
+
+ /*
+ * Extract the list of ready callbacks, disabling to prevent
+ * races with call_rcu() from interrupt handlers.
+ */
+ local_irq_save(flags);
+ list = rdp->nxtlist;
+ rdp->nxtlist = *rdp->nxttail[RCU_DONE_TAIL];
+ *rdp->nxttail[RCU_DONE_TAIL] = NULL;
+ tail = rdp->nxttail[RCU_DONE_TAIL];
+ for (count = RCU_NEXT_SIZE - 1; count >= 0; count--)
+ if (rdp->nxttail[count] == rdp->nxttail[RCU_DONE_TAIL])
+ rdp->nxttail[count] = &rdp->nxtlist;
+ local_irq_restore(flags);
+
+ /* Invoke callbacks. */
+ count = 0;
+ while (list) {
+ next = list->next;
+ prefetch(next);
+ list->func(list);
+ list = next;
+ if (++count >= rdp->blimit)
+ break;
+ }
+
+ local_irq_save(flags);
+
+ /* Update count, and requeue any remaining callbacks. */
+ rdp->qlen -= count;
+ if (list != NULL) {
+ *tail = rdp->nxtlist;
+ rdp->nxtlist = list;
+ for (count = 0; count < RCU_NEXT_SIZE; count++)
+ if (&rdp->nxtlist == rdp->nxttail[count])
+ rdp->nxttail[count] = tail;
+ else
+ break;
+ }
+
+ /* Reinstate batch limit if we have worked down the excess. */
+ if (rdp->blimit == LONG_MAX && rdp->qlen <= qlowmark)
+ rdp->blimit = blimit;
+
+ local_irq_restore(flags);
+
+ /* Re-raise the RCU softirq if there are callbacks remaining. */
+ if (cpu_has_callbacks_ready_to_invoke(rdp))
+ raise_softirq(RCU_SOFTIRQ);
+}
+
+/*
+ * Check to see if this CPU is in a non-context-switch quiescent state
+ * (user mode or idle loop for rcu, non-softirq execution for rcu_bh).
+ * Also schedule the RCU softirq handler.
+ *
+ * This function must be called with hardirqs disabled. It is normally
+ * invoked from the scheduling-clock interrupt. If rcu_pending returns
+ * false, there is no point in invoking rcu_check_callbacks().
+ */
+void rcu_check_callbacks(int cpu, int user)
+{
+ if (user ||
+ (idle_cpu(cpu) && !in_softirq() &&
+ hardirq_count() <= (1 << HARDIRQ_SHIFT))) {
+
+ /*
+ * Get here if this CPU took its interrupt from user
+ * mode or from the idle loop, and if this is not a
+ * nested interrupt. In this case, the CPU is in
+ * a quiescent state, so count it.
+ *
+ * No memory barrier is required here because both
+ * rcu_qsctr_inc() and rcu_bh_qsctr_inc() reference
+ * only CPU-local variables that other CPUs neither
+ * access nor modify, at least not while the corresponding
+ * CPU is online.
+ */
+
+ rcu_qsctr_inc(cpu);
+ rcu_bh_qsctr_inc(cpu);
+
+ } else if (!in_softirq()) {
+
+ /*
+ * Get here if this CPU did not take its interrupt from
+ * softirq, in other words, if it is not interrupting
+ * a rcu_bh read-side critical section. This is an _bh
+ * critical section, so count it.
+ */
+
+ rcu_bh_qsctr_inc(cpu);
+ }
+ raise_softirq(RCU_SOFTIRQ);
+}
+
+#ifdef CONFIG_SMP
+
+/*
+ * Scan the leaf rcu_node structures, processing dyntick state for any that
+ * have not yet encountered a quiescent state, using the function specified.
+ * Returns 1 if the current grace period ends while scanning (possibly
+ * because we made it end).
+ */
+static int rcu_process_dyntick(struct rcu_state *rsp, long lastcomp,
+ int (*f)(struct rcu_data *))
+{
+ unsigned long bit;
+ int cpu;
+ unsigned long flags;
+ unsigned long mask;
+ struct rcu_node *rnp_cur = rsp->level[NUM_RCU_LVLS - 1];
+ struct rcu_node *rnp_end = &rsp->node[NUM_RCU_NODES];
+
+ for (; rnp_cur < rnp_end; rnp_cur++) {
+ mask = 0;
+ spin_lock_irqsave(&rnp_cur->lock, flags);
+ if (rsp->completed != lastcomp) {
+ spin_unlock_irqrestore(&rnp_cur->lock, flags);
+ return 1;
+ }
+ if (rnp_cur->qsmask == 0) {
+ spin_unlock_irqrestore(&rnp_cur->lock, flags);
+ continue;
+ }
+ cpu = rnp_cur->grplo;
+ bit = 1;
+ for (; cpu <= rnp_cur->grphi; cpu++, bit <<= 1) {
+ if ((rnp_cur->qsmask & bit) != 0 && f(rsp->rda[cpu]))
+ mask |= bit;
+ }
+ if (mask != 0 && rsp->completed == lastcomp) {
+
+ /* cpu_quiet_msk() releases rnp_cur->lock. */
+ cpu_quiet_msk(mask, rsp, rnp_cur, flags);
+ continue;
+ }
+ spin_unlock_irqrestore(&rnp_cur->lock, flags);
+ }
+ return 0;
+}
+
+/*
+ * Force quiescent states on reluctant CPUs, and also detect which
+ * CPUs are in dyntick-idle mode.
+ */
+static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
+{
+ unsigned long flags;
+ long lastcomp;
+ struct rcu_data *rdp = rsp->rda[smp_processor_id()];
+ struct rcu_node *rnp = rcu_get_root(rsp);
+ u8 signaled;
+
+ if (ACCESS_ONCE(rsp->completed) == ACCESS_ONCE(rsp->gpnum))
+ return; /* No grace period in progress, nothing to force. */
+ if (!spin_trylock_irqsave(&rsp->fqslock, flags)) {
+ rsp->n_force_qs_lh++; /* Inexact, can lose counts. Tough! */
+ return; /* Someone else is already on the job. */
+ }
+ if (relaxed &&
+ (long)(rsp->jiffies_force_qs - jiffies) >= 0 &&
+ (rdp->n_rcu_pending_force_qs - rdp->n_rcu_pending) >= 0)
+ goto unlock_ret; /* no emergency and done recently. */
+ rsp->n_force_qs++;
+ spin_lock(&rnp->lock);
+ lastcomp = rsp->completed;
+ signaled = rsp->signaled;
+ rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS;
+ rdp->n_rcu_pending_force_qs = rdp->n_rcu_pending +
+ RCU_JIFFIES_TILL_FORCE_QS;
+ if (lastcomp == rsp->gpnum) {
+ rsp->n_force_qs_ngp++;
+ spin_unlock(&rnp->lock);
+ goto unlock_ret; /* no GP in progress, time updated. */
+ }
+ spin_unlock(&rnp->lock);
+ switch (signaled) {
+ case RCU_GP_INIT:
+
+ break; /* grace period still initializing, ignore. */
+
+ case RCU_SAVE_DYNTICK:
+
+ if (RCU_SIGNAL_INIT != RCU_SAVE_DYNTICK)
+ break; /* So gcc recognizes the dead code. */
+
+ /* Record dyntick-idle state. */
+ if (rcu_process_dyntick(rsp, lastcomp,
+ dyntick_save_progress_counter))
+ goto unlock_ret;
+
+ /* Update state, record completion counter. */
+ spin_lock(&rnp->lock);
+ if (lastcomp == rsp->completed) {
+ rsp->signaled = RCU_FORCE_QS;
+ dyntick_record_completed(rsp, lastcomp);
+ }
+ spin_unlock(&rnp->lock);
+ break;
+
+ case RCU_FORCE_QS:
+
+ /* Check dyntick-idle state, send IPI to laggarts. */
+ if (rcu_process_dyntick(rsp, dyntick_recall_completed(rsp),
+ rcu_implicit_dynticks_qs))
+ goto unlock_ret;
+
+ /* Leave state in case more forcing is required. */
+
+ break;
+ }
+unlock_ret:
+ spin_unlock_irqrestore(&rsp->fqslock, flags);
+}
+
+#else /* #ifdef CONFIG_SMP */
+
+static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
+{
+ set_need_resched();
+}
+
+#endif /* #else #ifdef CONFIG_SMP */
+
+/*
+ * This does the RCU processing work from softirq context for the
+ * specified rcu_state and rcu_data structures. This may be called
+ * only from the CPU to whom the rdp belongs.
+ */
+static void
+__rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
+{
+ unsigned long flags;
+
+ /*
+ * If an RCU GP has gone long enough, go check for dyntick
+ * idle CPUs and, if needed, send resched IPIs.
+ */
+ if ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0 ||
+ (rdp->n_rcu_pending_force_qs - rdp->n_rcu_pending) < 0)
+ force_quiescent_state(rsp, 1);
+
+ /*
+ * Advance callbacks in response to end of earlier grace
+ * period that some other CPU ended.
+ */
+ rcu_process_gp_end(rsp, rdp);
+
+ /* Update RCU state based on any recent quiescent states. */
+ rcu_check_quiescent_state(rsp, rdp);
+
+ /* Does this CPU require a not-yet-started grace period? */
+ if (cpu_needs_another_gp(rsp, rdp)) {
+ spin_lock_irqsave(&rcu_get_root(rsp)->lock, flags);
+ rcu_start_gp(rsp, flags); /* releases above lock */
+ }
+
+ /* If there are callbacks ready, invoke them. */
+ rcu_do_batch(rdp);
+}
+
+/*
+ * Do softirq processing for the current CPU.
+ */
+static void rcu_process_callbacks(struct softirq_action *unused)
+{
+ /*
+ * Memory references from any prior RCU read-side critical sections
+ * executed by the interrupted code must be seen before any RCU
+ * grace-period manipulations below.
+ */
+ smp_mb(); /* See above block comment. */
+
+ __rcu_process_callbacks(&rcu_state, &__get_cpu_var(rcu_data));
+ __rcu_process_callbacks(&rcu_bh_state, &__get_cpu_var(rcu_bh_data));
+
+ /*
+ * Memory references from any later RCU read-side critical sections
+ * executed by the interrupted code must be seen after any RCU
+ * grace-period manipulations above.
+ */
+ smp_mb(); /* See above block comment. */
+}
+
+static void
+__call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
+ struct rcu_state *rsp)
+{
+ unsigned long flags;
+ struct rcu_data *rdp;
+
+ head->func = func;
+ head->next = NULL;
+
+ smp_mb(); /* Ensure RCU update seen before callback registry. */
+
+ /*
+ * Opportunistically note grace-period endings and beginnings.
+ * Note that we might see a beginning right after we see an
+ * end, but never vice versa, since this CPU has to pass through
+ * a quiescent state betweentimes.
+ */
+ local_irq_save(flags);
+ rdp = rsp->rda[smp_processor_id()];
+ rcu_process_gp_end(rsp, rdp);
+ check_for_new_grace_period(rsp, rdp);
+
+ /* Add the callback to our list. */
+ *rdp->nxttail[RCU_NEXT_TAIL] = head;
+ rdp->nxttail[RCU_NEXT_TAIL] = &head->next;
+
+ /* Start a new grace period if one not already started. */
+ if (ACCESS_ONCE(rsp->completed) == ACCESS_ONCE(rsp->gpnum)) {
+ unsigned long nestflag;
+ struct rcu_node *rnp_root = rcu_get_root(rsp);
+
+ spin_lock_irqsave(&rnp_root->lock, nestflag);
+ rcu_start_gp(rsp, nestflag); /* releases rnp_root->lock. */
+ }
+
+ /* Force the grace period if too many callbacks or too long waiting. */
+ if (unlikely(++rdp->qlen > qhimark)) {
+ rdp->blimit = LONG_MAX;
+ force_quiescent_state(rsp, 0);
+ } else if ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0 ||
+ (rdp->n_rcu_pending_force_qs - rdp->n_rcu_pending) < 0)
+ force_quiescent_state(rsp, 1);
+ local_irq_restore(flags);
+}
+
+/*
+ * Queue an RCU callback for invocation after a grace period.
+ */
+void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
+{
+ __call_rcu(head, func, &rcu_state);
+}
+EXPORT_SYMBOL_GPL(call_rcu);
+
+/*
+ * Queue an RCU for invocation after a quicker grace period.
+ */
+void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
+{
+ __call_rcu(head, func, &rcu_bh_state);
+}
+EXPORT_SYMBOL_GPL(call_rcu_bh);
+
+/*
+ * Check to see if there is any immediate RCU-related work to be done
+ * by the current CPU, for the specified type of RCU, returning 1 if so.
+ * The checks are in order of increasing expense: checks that can be
+ * carried out against CPU-local state are performed first. However,
+ * we must check for CPU stalls first, else we might not get a chance.
+ */
+static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
+{
+ rdp->n_rcu_pending++;
+
+ /* Check for CPU stalls, if enabled. */
+ check_cpu_stall(rsp, rdp);
+
+ /* Is the RCU core waiting for a quiescent state from this CPU? */
+ if (rdp->qs_pending)
+ return 1;
+
+ /* Does this CPU have callbacks ready to invoke? */
+ if (cpu_has_callbacks_ready_to_invoke(rdp))
+ return 1;
+
+ /* Has RCU gone idle with this CPU needing another grace period? */
+ if (cpu_needs_another_gp(rsp, rdp))
+ return 1;
+
+ /* Has another RCU grace period completed? */
+ if (ACCESS_ONCE(rsp->completed) != rdp->completed) /* outside of lock */
+ return 1;
+
+ /* Has a new RCU grace period started? */
+ if (ACCESS_ONCE(rsp->gpnum) != rdp->gpnum) /* outside of lock */
+ return 1;
+
+ /* Has an RCU GP gone long enough to send resched IPIs &c? */
+ if (ACCESS_ONCE(rsp->completed) != ACCESS_ONCE(rsp->gpnum) &&
+ ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0 ||
+ (rdp->n_rcu_pending_force_qs - rdp->n_rcu_pending) < 0))
+ return 1;
+
+ /* nothing to do */
+ return 0;
+}
+
+/*
+ * Check to see if there is any immediate RCU-related work to be done
+ * by the current CPU, returning 1 if so. This function is part of the
+ * RCU implementation; it is -not- an exported member of the RCU API.
+ */
+int rcu_pending(int cpu)
+{
+ return __rcu_pending(&rcu_state, &per_cpu(rcu_data, cpu)) ||
+ __rcu_pending(&rcu_bh_state, &per_cpu(rcu_bh_data, cpu));
+}
+
+/*
+ * Check to see if any future RCU-related work will need to be done
+ * by the current CPU, even if none need be done immediately, returning
+ * 1 if so. This function is part of the RCU implementation; it is -not-
+ * an exported member of the RCU API.
+ */
+int rcu_needs_cpu(int cpu)
+{
+ /* RCU callbacks either ready or pending? */
+ return per_cpu(rcu_data, cpu).nxtlist ||
+ per_cpu(rcu_bh_data, cpu).nxtlist;
+}
+
+/*
+ * Initialize a CPU's per-CPU RCU data. We take this "scorched earth"
+ * approach so that we don't have to worry about how long the CPU has
+ * been gone, or whether it ever was online previously. We do trust the
+ * ->mynode field, as it is constant for a given struct rcu_data and
+ * initialized during early boot.
+ *
+ * Note that only one online or offline event can be happening at a given
+ * time. Note also that we can accept some slop in the rsp->completed
+ * access due to the fact that this CPU cannot possibly have any RCU
+ * callbacks in flight yet.
+ */
+static void
+rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
+{
+ unsigned long flags;
+ int i;
+ long lastcomp;
+ unsigned long mask;
+ struct rcu_data *rdp = rsp->rda[cpu];
+ struct rcu_node *rnp = rcu_get_root(rsp);
+
+ /* Set up local state, ensuring consistent view of global state. */
+ spin_lock_irqsave(&rnp->lock, flags);
+ lastcomp = rsp->completed;
+ rdp->completed = lastcomp;
+ rdp->gpnum = lastcomp;
+ rdp->passed_quiesc = 0; /* We could be racing with new GP, */
+ rdp->qs_pending = 1; /* so set up to respond to current GP. */
+ rdp->beenonline = 1; /* We have now been online. */
+ rdp->passed_quiesc_completed = lastcomp - 1;
+ rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
+ rdp->nxtlist = NULL;
+ for (i = 0; i < RCU_NEXT_SIZE; i++)
+ rdp->nxttail[i] = &rdp->nxtlist;
+ rdp->qlen = 0;
+ rdp->blimit = blimit;
+#ifdef CONFIG_NO_HZ
+ rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
+#endif /* #ifdef CONFIG_NO_HZ */
+ rdp->cpu = cpu;
+ spin_unlock(&rnp->lock); /* irqs remain disabled. */
+
+ /*
+ * A new grace period might start here. If so, we won't be part
+ * of it, but that is OK, as we are currently in a quiescent state.
+ */
+
+ /* Exclude any attempts to start a new GP on large systems. */
+ spin_lock(&rsp->onofflock); /* irqs already disabled. */
+
+ /* Add CPU to rcu_node bitmasks. */
+ rnp = rdp->mynode;
+ mask = rdp->grpmask;
+ do {
+ /* Exclude any attempts to start a new GP on small systems. */
+ spin_lock(&rnp->lock); /* irqs already disabled. */
+ rnp->qsmaskinit |= mask;
+ mask = rnp->grpmask;
+ spin_unlock(&rnp->lock); /* irqs already disabled. */
+ rnp = rnp->parent;
+ } while (rnp != NULL && !(rnp->qsmaskinit & mask));
+
+ spin_unlock(&rsp->onofflock); /* irqs remain disabled. */
+
+ /*
+ * A new grace period might start here. If so, we will be part of
+ * it, and its gpnum will be greater than ours, so we will
+ * participate. It is also possible for the gpnum to have been
+ * incremented before this function was called, and the bitmasks
+ * to not be filled out until now, in which case we will also
+ * participate due to our gpnum being behind.
+ */
+
+ /* Since it is coming online, the CPU is in a quiescent state. */
+ cpu_quiet(cpu, rsp, rdp, lastcomp);
+ local_irq_restore(flags);
+}
+
+static void __cpuinit rcu_online_cpu(int cpu)
+{
+#ifdef CONFIG_NO_HZ
+ struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
+
+ rdtp->dynticks_nesting = 1;
+ rdtp->dynticks |= 1; /* need consecutive #s even for hotplug. */
+ rdtp->dynticks_nmi = (rdtp->dynticks_nmi + 1) & ~0x1;
+#endif /* #ifdef CONFIG_NO_HZ */
+ rcu_init_percpu_data(cpu, &rcu_state);
+ rcu_init_percpu_data(cpu, &rcu_bh_state);
+ open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
+}
+
+/*
+ * Handle CPU online/offline notifcation events.
+ */
+static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
+ unsigned long action, void *hcpu)
+{
+ long cpu = (long)hcpu;
+
+ switch (action) {
+ case CPU_UP_PREPARE:
+ case CPU_UP_PREPARE_FROZEN:
+ rcu_online_cpu(cpu);
+ break;
+ case CPU_DEAD:
+ case CPU_DEAD_FROZEN:
+ case CPU_UP_CANCELED:
+ case CPU_UP_CANCELED_FROZEN:
+ rcu_offline_cpu(cpu);
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+/*
+ * Compute the per-level fanout, either using the exact fanout specified
+ * or balancing the tree, depending on CONFIG_RCU_FANOUT_EXACT.
+ */
+#ifdef CONFIG_RCU_FANOUT_EXACT
+static void __init rcu_init_levelspread(struct rcu_state *rsp)
+{
+ int i;
+
+ for (i = NUM_RCU_LVLS - 1; i >= 0; i--)
+ rsp->levelspread[i] = CONFIG_RCU_FANOUT;
+}
+#else /* #ifdef CONFIG_RCU_FANOUT_EXACT */
+static void __init rcu_init_levelspread(struct rcu_state *rsp)
+{
+ int ccur;
+ int cprv;
+ int i;
+
+ cprv = NR_CPUS;
+ for (i = NUM_RCU_LVLS - 1; i >= 0; i--) {
+ ccur = rsp->levelcnt[i];
+ rsp->levelspread[i] = (cprv + ccur - 1) / ccur;
+ cprv = ccur;
+ }
+}
+#endif /* #else #ifdef CONFIG_RCU_FANOUT_EXACT */
+
+/*
+ * Helper function for rcu_init() that initializes one rcu_state structure.
+ */
+static void __init rcu_init_one(struct rcu_state *rsp)
+{
+ int cpustride = 1;
+ int i;
+ int j;
+ struct rcu_node *rnp;
+
+ /* Initialize the level-tracking arrays. */
+
+ for (i = 1; i < NUM_RCU_LVLS; i++)
+ rsp->level[i] = rsp->level[i - 1] + rsp->levelcnt[i - 1];
+ rcu_init_levelspread(rsp);
+
+ /* Initialize the elements themselves, starting from the leaves. */
+
+ for (i = NUM_RCU_LVLS - 1; i >= 0; i--) {
+ cpustride *= rsp->levelspread[i];
+ rnp = rsp->level[i];
+ for (j = 0; j < rsp->levelcnt[i]; j++, rnp++) {
+ spin_lock_init(&rnp->lock);
+ rnp->qsmask = 0;
+ rnp->qsmaskinit = 0;
+ rnp->grplo = j * cpustride;
+ rnp->grphi = (j + 1) * cpustride - 1;
+ if (rnp->grphi >= NR_CPUS)
+ rnp->grphi = NR_CPUS - 1;
+ if (i == 0) {
+ rnp->grpnum = 0;
+ rnp->grpmask = 0;
+ rnp->parent = NULL;
+ } else {
+ rnp->grpnum = j % rsp->levelspread[i - 1];
+ rnp->grpmask = 1UL << rnp->grpnum;
+ rnp->parent = rsp->level[i - 1] +
+ j / rsp->levelspread[i - 1];
+ }
+ rnp->level = i;
+ }
+ }
+}
+
+/*
+ * Helper macro for __rcu_init(). To be used nowhere else!
+ * Assigns leaf node pointers into each CPU's rcu_data structure.
+ */
+#define RCU_DATA_PTR_INIT(rsp, rcu_data) \
+do { \
+ rnp = (rsp)->level[NUM_RCU_LVLS - 1]; \
+ j = 0; \
+ for_each_possible_cpu(i) { \
+ if (i > rnp[j].grphi) \
+ j++; \
+ per_cpu(rcu_data, i).mynode = &rnp[j]; \
+ (rsp)->rda[i] = &per_cpu(rcu_data, i); \
+ } \
+} while (0)
+
+static struct notifier_block __cpuinitdata rcu_nb = {
+ .notifier_call = rcu_cpu_notify,
+};
+
+void __init __rcu_init(void)
+{
+ int i; /* All used by RCU_DATA_PTR_INIT(). */
+ int j;
+ struct rcu_node *rnp;
+
+ printk(KERN_WARNING "Experimental hierarchical RCU implementation.\n");
+#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
+ printk(KERN_INFO "RCU-based detection of stalled CPUs is enabled.\n");
+#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
+ rcu_init_one(&rcu_state);
+ RCU_DATA_PTR_INIT(&rcu_state, rcu_data);
+ rcu_init_one(&rcu_bh_state);
+ RCU_DATA_PTR_INIT(&rcu_bh_state, rcu_bh_data);
+
+ for_each_online_cpu(i)
+ rcu_cpu_notify(&rcu_nb, CPU_UP_PREPARE, (void *)(long)i);
+ /* Register notifier for non-boot CPUs */
+ register_cpu_notifier(&rcu_nb);
+ printk(KERN_WARNING "Experimental hierarchical RCU init done.\n");
+}
+
+module_param(blimit, int, 0);
+module_param(qhimark, int, 0);
+module_param(qlowmark, int, 0);
diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
new file mode 100644
index 000000000000..d6db3e837826
--- /dev/null
+++ b/kernel/rcutree_trace.c
@@ -0,0 +1,271 @@
+/*
+ * Read-Copy Update tracing for classic implementation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright IBM Corporation, 2008
+ *
+ * Papers: http://www.rdrop.com/users/paulmck/RCU
+ *
+ * For detailed explanation of Read-Copy Update mechanism see -
+ * Documentation/RCU
+ *
+ */
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/smp.h>
+#include <linux/rcupdate.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <asm/atomic.h>
+#include <linux/bitops.h>
+#include <linux/module.h>
+#include <linux/completion.h>
+#include <linux/moduleparam.h>
+#include <linux/percpu.h>
+#include <linux/notifier.h>
+#include <linux/cpu.h>
+#include <linux/mutex.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
+{
+ if (!rdp->beenonline)
+ return;
+ seq_printf(m, "%3d%cc=%ld g=%ld pq=%d pqc=%ld qp=%d rpfq=%ld rp=%x",
+ rdp->cpu,
+ cpu_is_offline(rdp->cpu) ? '!' : ' ',
+ rdp->completed, rdp->gpnum,
+ rdp->passed_quiesc, rdp->passed_quiesc_completed,
+ rdp->qs_pending,
+ rdp->n_rcu_pending_force_qs - rdp->n_rcu_pending,
+ (int)(rdp->n_rcu_pending & 0xffff));
+#ifdef CONFIG_NO_HZ
+ seq_printf(m, " dt=%d/%d dn=%d df=%lu",
+ rdp->dynticks->dynticks,
+ rdp->dynticks->dynticks_nesting,
+ rdp->dynticks->dynticks_nmi,
+ rdp->dynticks_fqs);
+#endif /* #ifdef CONFIG_NO_HZ */
+ seq_printf(m, " of=%lu ri=%lu", rdp->offline_fqs, rdp->resched_ipi);
+ seq_printf(m, " ql=%ld b=%ld\n", rdp->qlen, rdp->blimit);
+}
+
+#define PRINT_RCU_DATA(name, func, m) \
+ do { \
+ int _p_r_d_i; \
+ \
+ for_each_possible_cpu(_p_r_d_i) \
+ func(m, &per_cpu(name, _p_r_d_i)); \
+ } while (0)
+
+static int show_rcudata(struct seq_file *m, void *unused)
+{
+ seq_puts(m, "rcu:\n");
+ PRINT_RCU_DATA(rcu_data, print_one_rcu_data, m);
+ seq_puts(m, "rcu_bh:\n");
+ PRINT_RCU_DATA(rcu_bh_data, print_one_rcu_data, m);
+ return 0;
+}
+
+static int rcudata_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, show_rcudata, NULL);
+}
+
+static struct file_operations rcudata_fops = {
+ .owner = THIS_MODULE,
+ .open = rcudata_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp)
+{
+ if (!rdp->beenonline)
+ return;
+ seq_printf(m, "%d,%s,%ld,%ld,%d,%ld,%d,%ld,%ld",
+ rdp->cpu,
+ cpu_is_offline(rdp->cpu) ? "\"Y\"" : "\"N\"",
+ rdp->completed, rdp->gpnum,
+ rdp->passed_quiesc, rdp->passed_quiesc_completed,
+ rdp->qs_pending,
+ rdp->n_rcu_pending_force_qs - rdp->n_rcu_pending,
+ rdp->n_rcu_pending);
+#ifdef CONFIG_NO_HZ
+ seq_printf(m, ",%d,%d,%d,%lu",
+ rdp->dynticks->dynticks,
+ rdp->dynticks->dynticks_nesting,
+ rdp->dynticks->dynticks_nmi,
+ rdp->dynticks_fqs);
+#endif /* #ifdef CONFIG_NO_HZ */
+ seq_printf(m, ",%lu,%lu", rdp->offline_fqs, rdp->resched_ipi);
+ seq_printf(m, ",%ld,%ld\n", rdp->qlen, rdp->blimit);
+}
+
+static int show_rcudata_csv(struct seq_file *m, void *unused)
+{
+ seq_puts(m, "\"CPU\",\"Online?\",\"c\",\"g\",\"pq\",\"pqc\",\"pq\",\"rpfq\",\"rp\",");
+#ifdef CONFIG_NO_HZ
+ seq_puts(m, "\"dt\",\"dt nesting\",\"dn\",\"df\",");
+#endif /* #ifdef CONFIG_NO_HZ */
+ seq_puts(m, "\"of\",\"ri\",\"ql\",\"b\"\n");
+ seq_puts(m, "\"rcu:\"\n");
+ PRINT_RCU_DATA(rcu_data, print_one_rcu_data_csv, m);
+ seq_puts(m, "\"rcu_bh:\"\n");
+ PRINT_RCU_DATA(rcu_bh_data, print_one_rcu_data_csv, m);
+ return 0;
+}
+
+static int rcudata_csv_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, show_rcudata_csv, NULL);
+}
+
+static struct file_operations rcudata_csv_fops = {
+ .owner = THIS_MODULE,
+ .open = rcudata_csv_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void print_one_rcu_state(struct seq_file *m, struct rcu_state *rsp)
+{
+ int level = 0;
+ struct rcu_node *rnp;
+
+ seq_printf(m, "c=%ld g=%ld s=%d jfq=%ld j=%x "
+ "nfqs=%lu/nfqsng=%lu(%lu) fqlh=%lu\n",
+ rsp->completed, rsp->gpnum, rsp->signaled,
+ (long)(rsp->jiffies_force_qs - jiffies),
+ (int)(jiffies & 0xffff),
+ rsp->n_force_qs, rsp->n_force_qs_ngp,
+ rsp->n_force_qs - rsp->n_force_qs_ngp,
+ rsp->n_force_qs_lh);
+ for (rnp = &rsp->node[0]; rnp - &rsp->node[0] < NUM_RCU_NODES; rnp++) {
+ if (rnp->level != level) {
+ seq_puts(m, "\n");
+ level = rnp->level;
+ }
+ seq_printf(m, "%lx/%lx %d:%d ^%d ",
+ rnp->qsmask, rnp->qsmaskinit,
+ rnp->grplo, rnp->grphi, rnp->grpnum);
+ }
+ seq_puts(m, "\n");
+}
+
+static int show_rcuhier(struct seq_file *m, void *unused)
+{
+ seq_puts(m, "rcu:\n");
+ print_one_rcu_state(m, &rcu_state);
+ seq_puts(m, "rcu_bh:\n");
+ print_one_rcu_state(m, &rcu_bh_state);
+ return 0;
+}
+
+static int rcuhier_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, show_rcuhier, NULL);
+}
+
+static struct file_operations rcuhier_fops = {
+ .owner = THIS_MODULE,
+ .open = rcuhier_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int show_rcugp(struct seq_file *m, void *unused)
+{
+ seq_printf(m, "rcu: completed=%ld gpnum=%ld\n",
+ rcu_state.completed, rcu_state.gpnum);
+ seq_printf(m, "rcu_bh: completed=%ld gpnum=%ld\n",
+ rcu_bh_state.completed, rcu_bh_state.gpnum);
+ return 0;
+}
+
+static int rcugp_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, show_rcugp, NULL);
+}
+
+static struct file_operations rcugp_fops = {
+ .owner = THIS_MODULE,
+ .open = rcugp_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static struct dentry *rcudir, *datadir, *datadir_csv, *hierdir, *gpdir;
+static int __init rcuclassic_trace_init(void)
+{
+ rcudir = debugfs_create_dir("rcu", NULL);
+ if (!rcudir)
+ goto out;
+
+ datadir = debugfs_create_file("rcudata", 0444, rcudir,
+ NULL, &rcudata_fops);
+ if (!datadir)
+ goto free_out;
+
+ datadir_csv = debugfs_create_file("rcudata.csv", 0444, rcudir,
+ NULL, &rcudata_csv_fops);
+ if (!datadir_csv)
+ goto free_out;
+
+ gpdir = debugfs_create_file("rcugp", 0444, rcudir, NULL, &rcugp_fops);
+ if (!gpdir)
+ goto free_out;
+
+ hierdir = debugfs_create_file("rcuhier", 0444, rcudir,
+ NULL, &rcuhier_fops);
+ if (!hierdir)
+ goto free_out;
+ return 0;
+free_out:
+ if (datadir)
+ debugfs_remove(datadir);
+ if (datadir_csv)
+ debugfs_remove(datadir_csv);
+ if (gpdir)
+ debugfs_remove(gpdir);
+ debugfs_remove(rcudir);
+out:
+ return 1;
+}
+
+static void __exit rcuclassic_trace_cleanup(void)
+{
+ debugfs_remove(datadir);
+ debugfs_remove(datadir_csv);
+ debugfs_remove(gpdir);
+ debugfs_remove(hierdir);
+ debugfs_remove(rcudir);
+}
+
+
+module_init(rcuclassic_trace_init);
+module_exit(rcuclassic_trace_cleanup);
+
+MODULE_AUTHOR("Paul E. McKenney");
+MODULE_DESCRIPTION("Read-Copy Update tracing for hierarchical implementation");
+MODULE_LICENSE("GPL");
diff --git a/kernel/resource.c b/kernel/resource.c
index 4337063663ef..e633106b12f6 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -853,6 +853,15 @@ int iomem_map_sanity_check(resource_size_t addr, unsigned long size)
if (PFN_DOWN(p->start) <= PFN_DOWN(addr) &&
PFN_DOWN(p->end) >= PFN_DOWN(addr + size - 1))
continue;
+ /*
+ * if a resource is "BUSY", it's not a hardware resource
+ * but a driver mapping of such a resource; we don't want
+ * to warn for those; some drivers legitimately map only
+ * partial hardware resources. (example: vesafb)
+ */
+ if (p->flags & IORESOURCE_BUSY)
+ continue;
+
printk(KERN_WARNING "resource map sanity check conflict: "
"0x%llx 0x%llx 0x%llx 0x%llx %s\n",
(unsigned long long)addr,
diff --git a/kernel/sched.c b/kernel/sched.c
index 748ff924a290..fff1c4a20b65 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -209,7 +209,6 @@ void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
hrtimer_init(&rt_b->rt_period_timer,
CLOCK_MONOTONIC, HRTIMER_MODE_REL);
rt_b->rt_period_timer.function = sched_rt_period_timer;
- rt_b->rt_period_timer.cb_mode = HRTIMER_CB_IRQSAFE_UNLOCKED;
}
static inline int rt_bandwidth_enabled(void)
@@ -1139,7 +1138,6 @@ static void init_rq_hrtick(struct rq *rq)
hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
rq->hrtick_timer.function = hrtick;
- rq->hrtick_timer.cb_mode = HRTIMER_CB_IRQSAFE_PERCPU;
}
#else /* CONFIG_SCHED_HRTICK */
static inline void hrtick_clear(struct rq *rq)
@@ -4192,7 +4190,6 @@ void account_steal_time(struct task_struct *p, cputime_t steal)
if (p == rq->idle) {
p->stime = cputime_add(p->stime, steal);
- account_group_system_time(p, steal);
if (atomic_read(&rq->nr_iowait) > 0)
cpustat->iowait = cputime64_add(cpustat->iowait, tmp);
else
@@ -4328,7 +4325,7 @@ void __kprobes sub_preempt_count(int val)
/*
* Underflow?
*/
- if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
+ if (DEBUG_LOCKS_WARN_ON(val > preempt_count() - (!!kernel_locked())))
return;
/*
* Is the spinlock portion underflowing?
diff --git a/kernel/softirq.c b/kernel/softirq.c
index e7c69a720d69..466e75ce271a 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -102,20 +102,6 @@ void local_bh_disable(void)
EXPORT_SYMBOL(local_bh_disable);
-void __local_bh_enable(void)
-{
- WARN_ON_ONCE(in_irq());
-
- /*
- * softirqs should never be enabled by __local_bh_enable(),
- * it always nests inside local_bh_enable() sections:
- */
- WARN_ON_ONCE(softirq_count() == SOFTIRQ_OFFSET);
-
- sub_preempt_count(SOFTIRQ_OFFSET);
-}
-EXPORT_SYMBOL_GPL(__local_bh_enable);
-
/*
* Special-case - softirqs can safely be enabled in
* cond_resched_softirq(), or by __do_softirq(),
@@ -269,6 +255,7 @@ void irq_enter(void)
{
int cpu = smp_processor_id();
+ rcu_irq_enter();
if (idle_cpu(cpu) && !in_interrupt()) {
__irq_enter();
tick_check_idle(cpu);
@@ -295,9 +282,9 @@ void irq_exit(void)
#ifdef CONFIG_NO_HZ
/* Make sure that timer wheel updates are propagated */
- if (!in_interrupt() && idle_cpu(smp_processor_id()) && !need_resched())
- tick_nohz_stop_sched_tick(0);
rcu_irq_exit();
+ if (idle_cpu(smp_processor_id()) && !in_interrupt() && !need_resched())
+ tick_nohz_stop_sched_tick(0);
#endif
preempt_enable_no_resched();
}
diff --git a/kernel/softlockup.c b/kernel/softlockup.c
index dc0b3be6b7d5..1ab790c67b17 100644
--- a/kernel/softlockup.c
+++ b/kernel/softlockup.c
@@ -164,7 +164,7 @@ unsigned long __read_mostly sysctl_hung_task_check_count = 1024;
/*
* Zero means infinite timeout - no checking done:
*/
-unsigned long __read_mostly sysctl_hung_task_timeout_secs = 120;
+unsigned long __read_mostly sysctl_hung_task_timeout_secs = 480;
unsigned long __read_mostly sysctl_hung_task_warnings = 10;
diff --git a/kernel/stacktrace.c b/kernel/stacktrace.c
index 94b527ef1d1e..eb212f8f8bc8 100644
--- a/kernel/stacktrace.c
+++ b/kernel/stacktrace.c
@@ -6,6 +6,7 @@
* Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
*/
#include <linux/sched.h>
+#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/kallsyms.h>
#include <linux/stacktrace.h>
@@ -24,3 +25,13 @@ void print_stack_trace(struct stack_trace *trace, int spaces)
}
EXPORT_SYMBOL_GPL(print_stack_trace);
+/*
+ * Architectures that do not implement save_stack_trace_tsk get this
+ * weak alias and a once-per-bootup warning (whenever this facility
+ * is utilized - for example by procfs):
+ */
+__weak void
+save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
+{
+ WARN_ONCE(1, KERN_INFO "save_stack_trace_tsk() not implemented yet.\n");
+}
diff --git a/kernel/sys.c b/kernel/sys.c
index ebe65c2c9873..d356d79e84ac 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -907,8 +907,8 @@ void do_sys_times(struct tms *tms)
struct task_cputime cputime;
cputime_t cutime, cstime;
- spin_lock_irq(&current->sighand->siglock);
thread_group_cputime(current, &cputime);
+ spin_lock_irq(&current->sighand->siglock);
cutime = current->signal->cutime;
cstime = current->signal->cstime;
spin_unlock_irq(&current->sighand->siglock);
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
index 8ff15e5d486b..f5f793d92415 100644
--- a/kernel/time/ntp.c
+++ b/kernel/time/ntp.c
@@ -131,7 +131,7 @@ static enum hrtimer_restart ntp_leap_second(struct hrtimer *timer)
{
enum hrtimer_restart res = HRTIMER_NORESTART;
- write_seqlock_irq(&xtime_lock);
+ write_seqlock(&xtime_lock);
switch (time_state) {
case TIME_OK:
@@ -164,7 +164,7 @@ static enum hrtimer_restart ntp_leap_second(struct hrtimer *timer)
}
update_vsyscall(&xtime, clock);
- write_sequnlock_irq(&xtime_lock);
+ write_sequnlock(&xtime_lock);
return res;
}
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 342fc9ccab46..8f3fc2582d38 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -247,7 +247,7 @@ void tick_nohz_stop_sched_tick(int inidle)
if (need_resched())
goto end;
- if (unlikely(local_softirq_pending())) {
+ if (unlikely(local_softirq_pending() && cpu_online(cpu))) {
static int ratelimit;
if (ratelimit < 10) {
@@ -282,8 +282,31 @@ void tick_nohz_stop_sched_tick(int inidle)
/* Schedule the tick, if we are at least one jiffie off */
if ((long)delta_jiffies >= 1) {
+ /*
+ * calculate the expiry time for the next timer wheel
+ * timer
+ */
+ expires = ktime_add_ns(last_update, tick_period.tv64 *
+ delta_jiffies);
+
+ /*
+ * If this cpu is the one which updates jiffies, then
+ * give up the assignment and let it be taken by the
+ * cpu which runs the tick timer next, which might be
+ * this cpu as well. If we don't drop this here the
+ * jiffies might be stale and do_timer() never
+ * invoked.
+ */
+ if (cpu == tick_do_timer_cpu)
+ tick_do_timer_cpu = TICK_DO_TIMER_NONE;
+
if (delta_jiffies > 1)
cpu_set(cpu, nohz_cpu_mask);
+
+ /* Skip reprogram of event if its not changed */
+ if (ts->tick_stopped && ktime_equal(expires, dev->next_event))
+ goto out;
+
/*
* nohz_stop_sched_tick can be called several times before
* the nohz_restart_sched_tick is called. This happens when
@@ -306,17 +329,6 @@ void tick_nohz_stop_sched_tick(int inidle)
rcu_enter_nohz();
}
- /*
- * If this cpu is the one which updates jiffies, then
- * give up the assignment and let it be taken by the
- * cpu which runs the tick timer next, which might be
- * this cpu as well. If we don't drop this here the
- * jiffies might be stale and do_timer() never
- * invoked.
- */
- if (cpu == tick_do_timer_cpu)
- tick_do_timer_cpu = TICK_DO_TIMER_NONE;
-
ts->idle_sleeps++;
/*
@@ -332,12 +344,7 @@ void tick_nohz_stop_sched_tick(int inidle)
goto out;
}
- /*
- * calculate the expiry time for the next timer wheel
- * timer
- */
- expires = ktime_add_ns(last_update, tick_period.tv64 *
- delta_jiffies);
+ /* Mark expiries */
ts->idle_expires = expires;
if (ts->nohz_mode == NOHZ_MODE_HIGHRES) {
@@ -681,7 +688,6 @@ void tick_setup_sched_timer(void)
*/
hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
ts->sched_timer.function = tick_sched_timer;
- ts->sched_timer.cb_mode = HRTIMER_CB_IRQSAFE_PERCPU;
/* Get the next period (per cpu) */
hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update());
diff --git a/kernel/trace/trace_sysprof.c b/kernel/trace/trace_sysprof.c
index 01becf1f19ff..a5779bd975db 100644
--- a/kernel/trace/trace_sysprof.c
+++ b/kernel/trace/trace_sysprof.c
@@ -202,7 +202,6 @@ static void start_stack_timer(int cpu)
hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
hrtimer->function = stack_trace_timer_fn;
- hrtimer->cb_mode = HRTIMER_CB_IRQSAFE_PERCPU;
hrtimer_start(hrtimer, ns_to_ktime(sample_period), HRTIMER_MODE_REL);
}
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index b0f239e443bc..eae594cb6ea9 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -252,6 +252,14 @@ config DEBUG_OBJECTS_TIMERS
timer routines to track the life time of timer objects and
validate the timer operations.
+config DEBUG_OBJECTS_ENABLE_DEFAULT
+ int "debug_objects bootup default value (0-1)"
+ range 0 1
+ default "1"
+ depends on DEBUG_OBJECTS
+ help
+ Debug objects boot parameter default value
+
config DEBUG_SLAB
bool "Debug slab memory allocations"
depends on DEBUG_KERNEL && SLAB
@@ -545,6 +553,16 @@ config DEBUG_SG
If unsure, say N.
+config DEBUG_NOTIFIERS
+ bool "Debug notifier call chains"
+ depends on DEBUG_KERNEL
+ help
+ Enable this to turn on sanity checking for notifier call chains.
+ This is most useful for kernel developers to make sure that
+ modules properly unregister themselves from notifier chains.
+ This is a relatively cheap check but if you care about maximum
+ performance, say N.
+
config FRAME_POINTER
bool "Compile the kernel with frame pointers"
depends on DEBUG_KERNEL && \
@@ -619,6 +637,19 @@ config RCU_CPU_STALL_DETECTOR
Say N if you are unsure.
+config RCU_CPU_STALL_DETECTOR
+ bool "Check for stalled CPUs delaying RCU grace periods"
+ depends on CLASSIC_RCU || TREE_RCU
+ default n
+ help
+ This option causes RCU to printk information on which
+ CPUs are delaying the current grace period, but only when
+ the grace period extends for excessive time periods.
+
+ Say Y if you want RCU to perform such checks.
+
+ Say N if you are unsure.
+
config KPROBES_SANITY_TEST
bool "Kprobes sanity tests"
depends on DEBUG_KERNEL
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index e3ab374e1334..5d99be1fd988 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -45,7 +45,9 @@ static struct kmem_cache *obj_cache;
static int debug_objects_maxchain __read_mostly;
static int debug_objects_fixups __read_mostly;
static int debug_objects_warnings __read_mostly;
-static int debug_objects_enabled __read_mostly;
+static int debug_objects_enabled __read_mostly
+ = CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
+
static struct debug_obj_descr *descr_test __read_mostly;
static int __init enable_object_debug(char *str)
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 5f6c629a924d..fa2dc4e5f9ba 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -21,9 +21,12 @@
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/spinlock.h>
+#include <linux/swiotlb.h>
#include <linux/string.h>
+#include <linux/swiotlb.h>
#include <linux/types.h>
#include <linux/ctype.h>
+#include <linux/highmem.h>
#include <asm/io.h>
#include <asm/dma.h>
@@ -36,22 +39,6 @@
#define OFFSET(val,align) ((unsigned long) \
( (val) & ( (align) - 1)))
-#define SG_ENT_VIRT_ADDRESS(sg) (sg_virt((sg)))
-#define SG_ENT_PHYS_ADDRESS(sg) virt_to_bus(SG_ENT_VIRT_ADDRESS(sg))
-
-/*
- * Maximum allowable number of contiguous slabs to map,
- * must be a power of 2. What is the appropriate value ?
- * The complexity of {map,unmap}_single is linearly dependent on this value.
- */
-#define IO_TLB_SEGSIZE 128
-
-/*
- * log of the size of each IO TLB slab. The number of slabs is command line
- * controllable.
- */
-#define IO_TLB_SHIFT 11
-
#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
/*
@@ -102,7 +89,10 @@ static unsigned int io_tlb_index;
* We need to save away the original address corresponding to a mapped entry
* for the sync operations.
*/
-static unsigned char **io_tlb_orig_addr;
+static struct swiotlb_phys_addr {
+ struct page *page;
+ unsigned int offset;
+} *io_tlb_orig_addr;
/*
* Protect the above data structures in the map and unmap calls
@@ -126,6 +116,72 @@ setup_io_tlb_npages(char *str)
__setup("swiotlb=", setup_io_tlb_npages);
/* make io_tlb_overflow tunable too? */
+void * __weak swiotlb_alloc_boot(size_t size, unsigned long nslabs)
+{
+ return alloc_bootmem_low_pages(size);
+}
+
+void * __weak swiotlb_alloc(unsigned order, unsigned long nslabs)
+{
+ return (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN, order);
+}
+
+dma_addr_t __weak swiotlb_phys_to_bus(phys_addr_t paddr)
+{
+ return paddr;
+}
+
+phys_addr_t __weak swiotlb_bus_to_phys(dma_addr_t baddr)
+{
+ return baddr;
+}
+
+static dma_addr_t swiotlb_virt_to_bus(volatile void *address)
+{
+ return swiotlb_phys_to_bus(virt_to_phys(address));
+}
+
+static void *swiotlb_bus_to_virt(dma_addr_t address)
+{
+ return phys_to_virt(swiotlb_bus_to_phys(address));
+}
+
+int __weak swiotlb_arch_range_needs_mapping(void *ptr, size_t size)
+{
+ return 0;
+}
+
+static dma_addr_t swiotlb_sg_to_bus(struct scatterlist *sg)
+{
+ return swiotlb_phys_to_bus(page_to_phys(sg_page(sg)) + sg->offset);
+}
+
+static void swiotlb_print_info(unsigned long bytes)
+{
+ phys_addr_t pstart, pend;
+ dma_addr_t bstart, bend;
+
+ pstart = virt_to_phys(io_tlb_start);
+ pend = virt_to_phys(io_tlb_end);
+
+ bstart = swiotlb_phys_to_bus(pstart);
+ bend = swiotlb_phys_to_bus(pend);
+
+ printk(KERN_INFO "Placing %luMB software IO TLB between %p - %p\n",
+ bytes >> 20, io_tlb_start, io_tlb_end);
+ if (pstart != bstart || pend != bend)
+ printk(KERN_INFO "software IO TLB at phys %#llx - %#llx"
+ " bus %#llx - %#llx\n",
+ (unsigned long long)pstart,
+ (unsigned long long)pend,
+ (unsigned long long)bstart,
+ (unsigned long long)bend);
+ else
+ printk(KERN_INFO "software IO TLB at phys %#llx - %#llx\n",
+ (unsigned long long)pstart,
+ (unsigned long long)pend);
+}
+
/*
* Statically reserve bounce buffer space and initialize bounce buffer data
* structures for the software IO TLB used to implement the DMA API.
@@ -145,7 +201,7 @@ swiotlb_init_with_default_size(size_t default_size)
/*
* Get IO TLB memory from the low pages
*/
- io_tlb_start = alloc_bootmem_low_pages(bytes);
+ io_tlb_start = swiotlb_alloc_boot(bytes, io_tlb_nslabs);
if (!io_tlb_start)
panic("Cannot allocate SWIOTLB buffer");
io_tlb_end = io_tlb_start + bytes;
@@ -159,7 +215,7 @@ swiotlb_init_with_default_size(size_t default_size)
for (i = 0; i < io_tlb_nslabs; i++)
io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
io_tlb_index = 0;
- io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(char *));
+ io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(struct swiotlb_phys_addr));
/*
* Get the overflow emergency buffer
@@ -168,8 +224,7 @@ swiotlb_init_with_default_size(size_t default_size)
if (!io_tlb_overflow_buffer)
panic("Cannot allocate SWIOTLB overflow buffer!\n");
- printk(KERN_INFO "Placing software IO TLB between 0x%lx - 0x%lx\n",
- virt_to_bus(io_tlb_start), virt_to_bus(io_tlb_end));
+ swiotlb_print_info(bytes);
}
void __init
@@ -202,8 +257,7 @@ swiotlb_late_init_with_default_size(size_t default_size)
bytes = io_tlb_nslabs << IO_TLB_SHIFT;
while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
- io_tlb_start = (char *)__get_free_pages(GFP_DMA | __GFP_NOWARN,
- order);
+ io_tlb_start = swiotlb_alloc(order, io_tlb_nslabs);
if (io_tlb_start)
break;
order--;
@@ -235,12 +289,12 @@ swiotlb_late_init_with_default_size(size_t default_size)
io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
io_tlb_index = 0;
- io_tlb_orig_addr = (unsigned char **)__get_free_pages(GFP_KERNEL,
- get_order(io_tlb_nslabs * sizeof(char *)));
+ io_tlb_orig_addr = (struct swiotlb_phys_addr *)__get_free_pages(GFP_KERNEL,
+ get_order(io_tlb_nslabs * sizeof(struct swiotlb_phys_addr)));
if (!io_tlb_orig_addr)
goto cleanup3;
- memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(char *));
+ memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(struct swiotlb_phys_addr));
/*
* Get the overflow emergency buffer
@@ -250,9 +304,7 @@ swiotlb_late_init_with_default_size(size_t default_size)
if (!io_tlb_overflow_buffer)
goto cleanup4;
- printk(KERN_INFO "Placing %luMB software IO TLB between 0x%lx - "
- "0x%lx\n", bytes >> 20,
- virt_to_bus(io_tlb_start), virt_to_bus(io_tlb_end));
+ swiotlb_print_info(bytes);
return 0;
@@ -279,16 +331,69 @@ address_needs_mapping(struct device *hwdev, dma_addr_t addr, size_t size)
return !is_buffer_dma_capable(dma_get_mask(hwdev), addr, size);
}
+static inline int range_needs_mapping(void *ptr, size_t size)
+{
+ return swiotlb_force || swiotlb_arch_range_needs_mapping(ptr, size);
+}
+
static int is_swiotlb_buffer(char *addr)
{
return addr >= io_tlb_start && addr < io_tlb_end;
}
+static struct swiotlb_phys_addr swiotlb_bus_to_phys_addr(char *dma_addr)
+{
+ int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
+ struct swiotlb_phys_addr buffer = io_tlb_orig_addr[index];
+ buffer.offset += (long)dma_addr & ((1 << IO_TLB_SHIFT) - 1);
+ buffer.page += buffer.offset >> PAGE_SHIFT;
+ buffer.offset &= PAGE_SIZE - 1;
+ return buffer;
+}
+
+static void
+__sync_single(struct swiotlb_phys_addr buffer, char *dma_addr, size_t size, int dir)
+{
+ if (PageHighMem(buffer.page)) {
+ size_t len, bytes;
+ char *dev, *host, *kmp;
+
+ len = size;
+ while (len != 0) {
+ unsigned long flags;
+
+ bytes = len;
+ if ((bytes + buffer.offset) > PAGE_SIZE)
+ bytes = PAGE_SIZE - buffer.offset;
+ local_irq_save(flags); /* protects KM_BOUNCE_READ */
+ kmp = kmap_atomic(buffer.page, KM_BOUNCE_READ);
+ dev = dma_addr + size - len;
+ host = kmp + buffer.offset;
+ if (dir == DMA_FROM_DEVICE)
+ memcpy(host, dev, bytes);
+ else
+ memcpy(dev, host, bytes);
+ kunmap_atomic(kmp, KM_BOUNCE_READ);
+ local_irq_restore(flags);
+ len -= bytes;
+ buffer.page++;
+ buffer.offset = 0;
+ }
+ } else {
+ void *v = page_address(buffer.page) + buffer.offset;
+
+ if (dir == DMA_TO_DEVICE)
+ memcpy(dma_addr, v, size);
+ else
+ memcpy(v, dma_addr, size);
+ }
+}
+
/*
* Allocates bounce buffer and returns its kernel virtual address.
*/
static void *
-map_single(struct device *hwdev, char *buffer, size_t size, int dir)
+map_single(struct device *hwdev, struct swiotlb_phys_addr buffer, size_t size, int dir)
{
unsigned long flags;
char *dma_addr;
@@ -298,11 +403,16 @@ map_single(struct device *hwdev, char *buffer, size_t size, int dir)
unsigned long mask;
unsigned long offset_slots;
unsigned long max_slots;
+ struct swiotlb_phys_addr slot_buf;
mask = dma_get_seg_boundary(hwdev);
- start_dma_addr = virt_to_bus(io_tlb_start) & mask;
+ start_dma_addr = swiotlb_virt_to_bus(io_tlb_start) & mask;
offset_slots = ALIGN(start_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
+
+ /*
+ * Carefully handle integer overflow which can occur when mask == ~0UL.
+ */
max_slots = mask + 1
? ALIGN(mask + 1, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT
: 1UL << (BITS_PER_LONG - IO_TLB_SHIFT);
@@ -378,10 +488,15 @@ found:
* This is needed when we sync the memory. Then we sync the buffer if
* needed.
*/
- for (i = 0; i < nslots; i++)
- io_tlb_orig_addr[index+i] = buffer + (i << IO_TLB_SHIFT);
+ slot_buf = buffer;
+ for (i = 0; i < nslots; i++) {
+ slot_buf.page += slot_buf.offset >> PAGE_SHIFT;
+ slot_buf.offset &= PAGE_SIZE - 1;
+ io_tlb_orig_addr[index+i] = slot_buf;
+ slot_buf.offset += 1 << IO_TLB_SHIFT;
+ }
if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
- memcpy(dma_addr, buffer, size);
+ __sync_single(buffer, dma_addr, size, DMA_TO_DEVICE);
return dma_addr;
}
@@ -395,17 +510,17 @@ unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
unsigned long flags;
int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
- char *buffer = io_tlb_orig_addr[index];
+ struct swiotlb_phys_addr buffer = swiotlb_bus_to_phys_addr(dma_addr);
/*
* First, sync the memory before unmapping the entry
*/
- if (buffer && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
+ if ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL))
/*
* bounce... copy the data back into the original buffer * and
* delete the bounce buffer.
*/
- memcpy(buffer, dma_addr, size);
+ __sync_single(buffer, dma_addr, size, DMA_FROM_DEVICE);
/*
* Return the buffer to the free list by setting the corresponding
@@ -437,21 +552,18 @@ static void
sync_single(struct device *hwdev, char *dma_addr, size_t size,
int dir, int target)
{
- int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
- char *buffer = io_tlb_orig_addr[index];
-
- buffer += ((unsigned long)dma_addr & ((1 << IO_TLB_SHIFT) - 1));
+ struct swiotlb_phys_addr buffer = swiotlb_bus_to_phys_addr(dma_addr);
switch (target) {
case SYNC_FOR_CPU:
if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
- memcpy(buffer, dma_addr, size);
+ __sync_single(buffer, dma_addr, size, DMA_FROM_DEVICE);
else
BUG_ON(dir != DMA_TO_DEVICE);
break;
case SYNC_FOR_DEVICE:
if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
- memcpy(dma_addr, buffer, size);
+ __sync_single(buffer, dma_addr, size, DMA_TO_DEVICE);
else
BUG_ON(dir != DMA_FROM_DEVICE);
break;
@@ -473,7 +585,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
dma_mask = hwdev->coherent_dma_mask;
ret = (void *)__get_free_pages(flags, order);
- if (ret && !is_buffer_dma_capable(dma_mask, virt_to_bus(ret), size)) {
+ if (ret && !is_buffer_dma_capable(dma_mask, swiotlb_virt_to_bus(ret), size)) {
/*
* The allocated memory isn't reachable by the device.
* Fall back on swiotlb_map_single().
@@ -488,13 +600,16 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
* swiotlb_map_single(), which will grab memory from
* the lowest available address range.
*/
- ret = map_single(hwdev, NULL, size, DMA_FROM_DEVICE);
+ struct swiotlb_phys_addr buffer;
+ buffer.page = virt_to_page(NULL);
+ buffer.offset = 0;
+ ret = map_single(hwdev, buffer, size, DMA_FROM_DEVICE);
if (!ret)
return NULL;
}
memset(ret, 0, size);
- dev_addr = virt_to_bus(ret);
+ dev_addr = swiotlb_virt_to_bus(ret);
/* Confirm address can be DMA'd by device */
if (!is_buffer_dma_capable(dma_mask, dev_addr, size)) {
@@ -554,8 +669,9 @@ dma_addr_t
swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size,
int dir, struct dma_attrs *attrs)
{
- dma_addr_t dev_addr = virt_to_bus(ptr);
+ dma_addr_t dev_addr = swiotlb_virt_to_bus(ptr);
void *map;
+ struct swiotlb_phys_addr buffer;
BUG_ON(dir == DMA_NONE);
/*
@@ -563,19 +679,22 @@ swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size,
* we can safely return the device addr and not worry about bounce
* buffering it.
*/
- if (!address_needs_mapping(hwdev, dev_addr, size) && !swiotlb_force)
+ if (!address_needs_mapping(hwdev, dev_addr, size) &&
+ !range_needs_mapping(ptr, size))
return dev_addr;
/*
* Oh well, have to allocate and map a bounce buffer.
*/
- map = map_single(hwdev, ptr, size, dir);
+ buffer.page = virt_to_page(ptr);
+ buffer.offset = (unsigned long)ptr & ~PAGE_MASK;
+ map = map_single(hwdev, buffer, size, dir);
if (!map) {
swiotlb_full(hwdev, size, dir, 1);
map = io_tlb_overflow_buffer;
}
- dev_addr = virt_to_bus(map);
+ dev_addr = swiotlb_virt_to_bus(map);
/*
* Ensure that the address returned is DMA'ble
@@ -605,7 +724,7 @@ void
swiotlb_unmap_single_attrs(struct device *hwdev, dma_addr_t dev_addr,
size_t size, int dir, struct dma_attrs *attrs)
{
- char *dma_addr = bus_to_virt(dev_addr);
+ char *dma_addr = swiotlb_bus_to_virt(dev_addr);
BUG_ON(dir == DMA_NONE);
if (is_swiotlb_buffer(dma_addr))
@@ -635,7 +754,7 @@ static void
swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
size_t size, int dir, int target)
{
- char *dma_addr = bus_to_virt(dev_addr);
+ char *dma_addr = swiotlb_bus_to_virt(dev_addr);
BUG_ON(dir == DMA_NONE);
if (is_swiotlb_buffer(dma_addr))
@@ -666,7 +785,7 @@ swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr,
unsigned long offset, size_t size,
int dir, int target)
{
- char *dma_addr = bus_to_virt(dev_addr) + offset;
+ char *dma_addr = swiotlb_bus_to_virt(dev_addr) + offset;
BUG_ON(dir == DMA_NONE);
if (is_swiotlb_buffer(dma_addr))
@@ -714,18 +833,20 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
int dir, struct dma_attrs *attrs)
{
struct scatterlist *sg;
- void *addr;
+ struct swiotlb_phys_addr buffer;
dma_addr_t dev_addr;
int i;
BUG_ON(dir == DMA_NONE);
for_each_sg(sgl, sg, nelems, i) {
- addr = SG_ENT_VIRT_ADDRESS(sg);
- dev_addr = virt_to_bus(addr);
- if (swiotlb_force ||
+ dev_addr = swiotlb_sg_to_bus(sg);
+ if (range_needs_mapping(sg_virt(sg), sg->length) ||
address_needs_mapping(hwdev, dev_addr, sg->length)) {
- void *map = map_single(hwdev, addr, sg->length, dir);
+ void *map;
+ buffer.page = sg_page(sg);
+ buffer.offset = sg->offset;
+ map = map_single(hwdev, buffer, sg->length, dir);
if (!map) {
/* Don't panic here, we expect map_sg users
to do proper error handling. */
@@ -735,7 +856,7 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
sgl[0].dma_length = 0;
return 0;
}
- sg->dma_address = virt_to_bus(map);
+ sg->dma_address = swiotlb_virt_to_bus(map);
} else
sg->dma_address = dev_addr;
sg->dma_length = sg->length;
@@ -765,11 +886,11 @@ swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
BUG_ON(dir == DMA_NONE);
for_each_sg(sgl, sg, nelems, i) {
- if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
- unmap_single(hwdev, bus_to_virt(sg->dma_address),
+ if (sg->dma_address != swiotlb_sg_to_bus(sg))
+ unmap_single(hwdev, swiotlb_bus_to_virt(sg->dma_address),
sg->dma_length, dir);
else if (dir == DMA_FROM_DEVICE)
- dma_mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length);
+ dma_mark_clean(swiotlb_bus_to_virt(sg->dma_address), sg->dma_length);
}
}
EXPORT_SYMBOL(swiotlb_unmap_sg_attrs);
@@ -798,11 +919,11 @@ swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl,
BUG_ON(dir == DMA_NONE);
for_each_sg(sgl, sg, nelems, i) {
- if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
- sync_single(hwdev, bus_to_virt(sg->dma_address),
+ if (sg->dma_address != swiotlb_sg_to_bus(sg))
+ sync_single(hwdev, swiotlb_bus_to_virt(sg->dma_address),
sg->dma_length, dir, target);
else if (dir == DMA_FROM_DEVICE)
- dma_mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length);
+ dma_mark_clean(swiotlb_bus_to_virt(sg->dma_address), sg->dma_length);
}
}
@@ -823,7 +944,7 @@ swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
int
swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
{
- return (dma_addr == virt_to_bus(io_tlb_overflow_buffer));
+ return (dma_addr == swiotlb_virt_to_bus(io_tlb_overflow_buffer));
}
/*
@@ -835,7 +956,7 @@ swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
int
swiotlb_dma_supported(struct device *hwdev, u64 mask)
{
- return virt_to_bus(io_tlb_end - 1) <= mask;
+ return swiotlb_virt_to_bus(io_tlb_end - 1) <= mask;
}
EXPORT_SYMBOL(swiotlb_map_single);
diff --git a/mm/bounce.c b/mm/bounce.c
index bf0cf7c8387b..e590272fe7a8 100644
--- a/mm/bounce.c
+++ b/mm/bounce.c
@@ -198,8 +198,13 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
/*
* irk, bounce it
*/
- if (!bio)
- bio = bio_alloc(GFP_NOIO, (*bio_orig)->bi_vcnt);
+ if (!bio) {
+ unsigned int cnt = (*bio_orig)->bi_vcnt;
+
+ bio = bio_alloc(GFP_NOIO, cnt);
+ memset(bio->bi_io_vec, 0, cnt * sizeof(struct bio_vec));
+ }
+
to = bio->bi_io_vec + i;
diff --git a/mm/memory.c b/mm/memory.c
index f01b7eed6e16..0a2010a9518c 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3075,3 +3075,18 @@ void print_vma_addr(char *prefix, unsigned long ip)
}
up_read(&current->mm->mmap_sem);
}
+
+#ifdef CONFIG_PROVE_LOCKING
+void might_fault(void)
+{
+ might_sleep();
+ /*
+ * it would be nicer only to annotate paths which are not under
+ * pagefault_disable, however that requires a larger audit and
+ * providing helpers like get_user_atomic.
+ */
+ if (!in_atomic() && current->mm)
+ might_lock_read(&current->mm->mmap_sem);
+}
+EXPORT_SYMBOL(might_fault);
+#endif
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
index 7c72baa02f2e..6688765bd8b9 100644
--- a/security/keys/keyctl.c
+++ b/security/keys/keyctl.c
@@ -838,11 +838,11 @@ static long get_instantiation_keyring(key_serial_t ringid,
{
key_ref_t dkref;
+ *_dest_keyring = NULL;
+
/* just return a NULL pointer if we weren't asked to make a link */
- if (ringid == 0) {
- *_dest_keyring = NULL;
+ if (ringid == 0)
return 0;
- }
/* if a specific keyring is nominated by ID, then use that */
if (ringid > 0) {
diff --git a/sound/core/hrtimer.c b/sound/core/hrtimer.c
index c1d285921f80..34c7d48f5061 100644
--- a/sound/core/hrtimer.c
+++ b/sound/core/hrtimer.c
@@ -57,7 +57,6 @@ static int snd_hrtimer_open(struct snd_timer *t)
return -ENOMEM;
hrtimer_init(&stime->hrt, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
stime->timer = t;
- stime->hrt.cb_mode = HRTIMER_CB_IRQSAFE_UNLOCKED;
stime->hrt.function = snd_hrtimer_callback;
t->private_data = stime;
return 0;
diff --git a/sound/drivers/pcsp/pcsp.c b/sound/drivers/pcsp/pcsp.c
index 2a02f704f366..a4049eb94d35 100644
--- a/sound/drivers/pcsp/pcsp.c
+++ b/sound/drivers/pcsp/pcsp.c
@@ -96,7 +96,6 @@ static int __devinit snd_card_pcsp_probe(int devnum, struct device *dev)
return -EINVAL;
hrtimer_init(&pcsp_chip.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- pcsp_chip.timer.cb_mode = HRTIMER_CB_IRQSAFE_UNLOCKED;
pcsp_chip.timer.function = pcsp_do_timer;
card = snd_card_new(index, id, THIS_MODULE, 0);