Merge master.kernel.org:/home/rmk/linux-2.6-mmc
authorLinus Torvalds <torvalds@g5.osdl.org>
Sun, 11 Sep 2005 16:16:03 +0000 (09:16 -0700)
committerLinus Torvalds <torvalds@g5.osdl.org>
Sun, 11 Sep 2005 16:16:03 +0000 (09:16 -0700)
331 files changed:
COPYING
Documentation/00-INDEX
Documentation/CodingStyle
Documentation/DMA-API.txt
Documentation/DocBook/journal-api.tmpl
Documentation/DocBook/usb.tmpl
Documentation/MSI-HOWTO.txt
Documentation/RCU/RTFP.txt
Documentation/RCU/UP.txt
Documentation/RCU/checklist.txt
Documentation/RCU/rcu.txt
Documentation/RCU/whatisRCU.txt [new file with mode: 0644]
Documentation/cpu-freq/cpufreq-stats.txt
Documentation/cpusets.txt
Documentation/crypto/descore-readme.txt
Documentation/feature-removal-schedule.txt
Documentation/ioctl/cdrom.txt
Documentation/kernel-parameters.txt
Documentation/mono.txt
Documentation/networking/bonding.txt
Documentation/networking/wan-router.txt
Documentation/pci.txt
Documentation/powerpc/eeh-pci-error-recovery.txt
Documentation/s390/s390dbf.txt
Documentation/scsi/ibmmca.txt
Documentation/sound/alsa/ALSA-Configuration.txt
Documentation/sysrq.txt
Documentation/uml/UserModeLinux-HOWTO.txt
Documentation/usb/gadget_serial.txt
Documentation/video4linux/Zoran
Kbuild
Makefile
REPORTING-BUGS
arch/alpha/kernel/alpha_ksyms.c
arch/alpha/kernel/osf_sys.c
arch/alpha/kernel/smp.c
arch/arm/Kconfig
arch/arm/Kconfig.debug
arch/arm/common/scoop.c
arch/arm/kernel/calls.S
arch/arm/kernel/entry-common.S
arch/arm/mach-pxa/corgi.c
arch/i386/kernel/acpi/wakeup.S
arch/i386/kernel/cpu/common.c
arch/i386/kernel/io_apic.c
arch/i386/kernel/smpboot.c
arch/i386/kernel/vmlinux.lds.S
arch/ia64/kernel/mca.c
arch/m32r/Kconfig
arch/m32r/kernel/smp.c
arch/mips/kernel/irixsig.c
arch/mips/kernel/sysirix.c
arch/mips/lib/dec_and_lock.c
arch/parisc/lib/Makefile
arch/parisc/lib/bitops.c
arch/parisc/lib/debuglocks.c [deleted file]
arch/ppc/Kconfig
arch/ppc/Makefile
arch/ppc/kernel/cpu_setup_6xx.S
arch/ppc/kernel/cpu_setup_power4.S
arch/ppc/kernel/dma-mapping.c
arch/ppc/kernel/head.S
arch/ppc/kernel/idle.c
arch/ppc/kernel/smp.c
arch/ppc/kernel/traps.c
arch/ppc/lib/Makefile
arch/ppc/lib/dec_and_lock.c
arch/ppc/mm/fault.c
arch/ppc/platforms/pmac_sleep.S
arch/ppc/platforms/pmac_smp.c
arch/ppc/syslib/cpc700_pic.c
arch/ppc/syslib/i8259.c
arch/ppc/syslib/open_pic2.c
arch/ppc/syslib/ppc403_pic.c
arch/ppc/syslib/xilinx_pic.c
arch/ppc64/Makefile
arch/ppc64/kernel/cpu_setup_power4.S
arch/ppc64/lib/dec_and_lock.c
arch/ppc64/lib/locks.c
arch/s390/lib/spinlock.c
arch/sh/boards/adx/irq_maskreg.c
arch/sh/boards/bigsur/io.c
arch/sh/boards/bigsur/irq.c
arch/sh/boards/cqreek/irq.c
arch/sh/boards/harp/irq.c
arch/sh/boards/overdrive/irq.c
arch/sh/boards/renesas/hs7751rvoip/irq.c
arch/sh/boards/renesas/rts7751r2d/irq.c
arch/sh/boards/renesas/systemh/irq.c
arch/sh/boards/superh/microdev/irq.c
arch/sh/cchips/hd6446x/hd64465/io.c
arch/sh/cchips/voyagergx/irq.c
arch/sh/kernel/cpu/irq_imask.c
arch/sh/kernel/cpu/irq_ipr.c
arch/sh/kernel/cpu/sh4/irq_intc2.c
arch/sh64/kernel/irq_intc.c
arch/sparc/kernel/sparc_ksyms.c
arch/sparc/lib/Makefile
arch/sparc/lib/debuglocks.c [deleted file]
arch/sparc64/kernel/process.c
arch/sparc64/kernel/sparc64_ksyms.c
arch/sparc64/lib/Makefile
arch/sparc64/lib/debuglocks.c [deleted file]
arch/um/Makefile
arch/um/include/mem.h
arch/um/kernel/dyn.lds.S
arch/um/kernel/ksyms.c
arch/um/kernel/physmem.c
arch/um/kernel/trap_kern.c
arch/um/kernel/uml.lds.S
arch/v850/kernel/irq.c
arch/v850/kernel/setup.c
arch/v850/kernel/sim.c
arch/x86_64/kernel/setup.c
arch/x86_64/kernel/vmlinux.lds.S
drivers/acpi/sleep/main.c
drivers/acpi/sleep/poweroff.c
drivers/acpi/sleep/proc.c
drivers/base/dmapool.c
drivers/block/cciss.c
drivers/block/cfq-iosched.c
drivers/block/paride/pcd.c
drivers/block/paride/pf.c
drivers/block/paride/pg.c
drivers/block/paride/pt.c
drivers/block/swim3.c
drivers/block/swim_iop.c
drivers/block/umem.c
drivers/block/xd.c
drivers/block/z2ram.c
drivers/cdrom/sbpcd.c
drivers/cdrom/sonycd535.c
drivers/char/agp/backend.c
drivers/char/applicom.c
drivers/char/ftape/lowlevel/fdc-io.c
drivers/char/hpet.c
drivers/char/hw_random.c
drivers/char/ip2/i2lib.c
drivers/char/ipmi/ipmi_si_intf.c
drivers/char/ipmi/ipmi_watchdog.c
drivers/char/keyboard.c
drivers/char/lcd.c
drivers/char/lp.c
drivers/char/mxser.c
drivers/char/n_tty.c
drivers/char/pcmcia/synclink_cs.c
drivers/ide/ide-io.c
drivers/ide/ide-tape.c
drivers/ide/ide-timing.h
drivers/ide/legacy/ide-cs.c
drivers/input/evdev.c
drivers/input/joystick/iforce/iforce-packets.c
drivers/input/joystick/iforce/iforce-usb.c
drivers/input/keyboard/atkbd.c
drivers/input/keyboard/sunkbd.c
drivers/input/mouse/Makefile
drivers/input/mouse/alps.c
drivers/input/mouse/logips2pp.c
drivers/input/mouse/psmouse-base.c
drivers/input/mouse/psmouse.h
drivers/input/mouse/trackpoint.c [new file with mode: 0644]
drivers/input/mouse/trackpoint.h [new file with mode: 0644]
drivers/input/serio/i8042-io.h
drivers/input/serio/i8042-ip22io.h
drivers/input/serio/i8042-jazzio.h
drivers/input/serio/i8042-sparcio.h
drivers/input/serio/i8042-x86ia64io.h
drivers/input/serio/i8042.c
drivers/isdn/i4l/isdn_bsdcomp.c
drivers/isdn/i4l/isdn_common.c
drivers/md/dm-exception-store.c
drivers/md/md.c
drivers/media/common/saa7146_core.c
drivers/media/video/cpia_usb.c
drivers/media/video/stradis.c
drivers/media/video/video-buf.c
drivers/media/video/zoran_driver.c
drivers/media/video/zr36120.c
drivers/mfd/Kconfig
drivers/mfd/Makefile
drivers/mfd/ucb1x00-assabet.c [new file with mode: 0644]
drivers/mfd/ucb1x00-core.c [new file with mode: 0644]
drivers/mfd/ucb1x00-ts.c [new file with mode: 0644]
drivers/mfd/ucb1x00.h [new file with mode: 0644]
drivers/misc/hdpuftrs/hdpu_cpustate.c
drivers/mtd/devices/mtdram.c
drivers/mtd/ftl.c
drivers/net/bsd_comp.c
drivers/net/ppp_generic.c
drivers/net/tulip/de4x5.c
drivers/parisc/lasi.c
drivers/parport/ieee1284.c
drivers/parport/ieee1284_ops.c
drivers/parport/parport_pc.c
drivers/pci/pci-sysfs.c
drivers/pci/probe.c
drivers/sbus/char/bbc_envctrl.c
drivers/sbus/char/envctrl.c
drivers/scsi/53c7xx.c
drivers/scsi/ch.c
drivers/scsi/cpqfcTSinit.c
drivers/scsi/ibmmca.c
drivers/scsi/osst.c
drivers/serial/8250.c
drivers/telephony/ixj.c
drivers/usb/input/hid-core.c
drivers/usb/input/hid-debug.h
drivers/usb/input/hid-input.c
drivers/usb/input/hid.h
drivers/usb/input/hiddev.c
drivers/usb/media/stv680.c
drivers/video/nvidia/nv_of.c
drivers/video/vgastate.c
fs/buffer.c
fs/cifs/connect.c
fs/cramfs/uncompress.c
fs/dcache.c
fs/jbd/transaction.c
fs/jffs/intrep.c
fs/lockd/clntproc.c
fs/namespace.c
fs/nfs/nfs3proc.c
fs/nfs/nfs4proc.c
fs/ntfs/aops.c
fs/pipe.c
fs/reiserfs/journal.c
fs/reiserfs/super.c
fs/smbfs/proc.c
fs/xfs/linux-2.6/time.h
fs/xfs/linux-2.6/xfs_buf.c
fs/xfs/linux-2.6/xfs_super.c
include/asm-alpha/spinlock.h
include/asm-alpha/spinlock_types.h [new file with mode: 0644]
include/asm-arm/spinlock.h
include/asm-arm/spinlock_types.h [new file with mode: 0644]
include/asm-arm/unistd.h
include/asm-arm26/hardirq.h
include/asm-generic/vmlinux.lds.h
include/asm-i386/div64.h
include/asm-i386/processor.h
include/asm-i386/spinlock.h
include/asm-i386/spinlock_types.h [new file with mode: 0644]
include/asm-ia64/spinlock.h
include/asm-ia64/spinlock_types.h [new file with mode: 0644]
include/asm-m32r/spinlock.h
include/asm-m32r/spinlock_types.h [new file with mode: 0644]
include/asm-mips/spinlock.h
include/asm-mips/spinlock_types.h [new file with mode: 0644]
include/asm-parisc/atomic.h
include/asm-parisc/bitops.h
include/asm-parisc/cacheflush.h
include/asm-parisc/processor.h
include/asm-parisc/spinlock.h
include/asm-parisc/spinlock_types.h [new file with mode: 0644]
include/asm-parisc/system.h
include/asm-ppc/smp.h
include/asm-ppc/spinlock.h
include/asm-ppc/spinlock_types.h [new file with mode: 0644]
include/asm-ppc/system.h
include/asm-ppc64/spinlock.h
include/asm-ppc64/spinlock_types.h [new file with mode: 0644]
include/asm-s390/spinlock.h
include/asm-s390/spinlock_types.h [new file with mode: 0644]
include/asm-sh/spinlock.h
include/asm-sh/spinlock_types.h [new file with mode: 0644]
include/asm-sparc/spinlock.h
include/asm-sparc/spinlock_types.h [new file with mode: 0644]
include/asm-sparc64/spinlock.h
include/asm-sparc64/spinlock_types.h [new file with mode: 0644]
include/asm-um/page.h
include/asm-um/pgtable.h
include/asm-um/spinlock_types.h [new file with mode: 0644]
include/asm-x86_64/proto.h
include/asm-x86_64/spinlock.h
include/asm-x86_64/spinlock_types.h [new file with mode: 0644]
include/linux/bio.h
include/linux/bit_spinlock.h [new file with mode: 0644]
include/linux/blkdev.h
include/linux/chio.h
include/linux/dmapool.h
include/linux/fs.h
include/linux/in6.h
include/linux/input.h
include/linux/ipv6.h
include/linux/jbd.h
include/linux/jiffies.h
include/linux/radix-tree.h
include/linux/reiserfs_fs.h
include/linux/sched.h
include/linux/slab.h
include/linux/spinlock.h
include/linux/spinlock_api_smp.h [new file with mode: 0644]
include/linux/spinlock_api_up.h [new file with mode: 0644]
include/linux/spinlock_types.h [new file with mode: 0644]
include/linux/spinlock_types_up.h [new file with mode: 0644]
include/linux/spinlock_up.h [new file with mode: 0644]
include/linux/time.h
include/linux/writeback.h
ipc/mqueue.c
kernel/Makefile
kernel/acct.c
kernel/compat.c
kernel/cpuset.c
kernel/sched.c
kernel/signal.c
kernel/spinlock.c
kernel/timer.c
lib/Makefile
lib/dec_and_lock.c
lib/kernel_lock.c
lib/radix-tree.c
lib/sort.c
lib/spinlock_debug.c [new file with mode: 0644]
mm/filemap.c
mm/memory.c
mm/oom_kill.c
mm/page_alloc.c
mm/slab.c
mm/swap_state.c
mm/swapfile.c
net/dccp/ccids/ccid3.c
net/dccp/ccids/ccid3.h
net/dccp/ipv4.c
net/dccp/output.c
net/ipv4/tcp_output.c
net/ipv6/exthdrs.c
net/ipv6/netfilter/ip6t_rt.c
scripts/Kbuild.include
scripts/reference_discarded.pl
sound/isa/sb/sb16_csp.c
sound/oss/skeleton.c [deleted file]

diff --git a/COPYING b/COPYING
index 2a7e338ec2fc6aac461a11fe8049799e65639166..ca442d313d86dc67e0a2e5d584b465bd382cbf5c 100644 (file)
--- a/COPYING
+++ b/COPYING
@@ -18,7 +18,7 @@
                       Version 2, June 1991
 
  Copyright (C) 1989, 1991 Free Software Foundation, Inc.
-                       59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+                       51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  Everyone is permitted to copy and distribute verbatim copies
  of this license document, but changing it is not allowed.
 
@@ -321,7 +321,7 @@ the "copyright" line and a pointer to where the full notice is found.
 
     You should have received a copy of the GNU General Public License
     along with this program; if not, write to the Free Software
-    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
 
 
 Also add information on how to contact you by electronic and paper mail.
index f6de52b010591120e0acb63791e24df45cfaed74..433cf5e9ae04bd17a04d50c320db9f100c68da63 100644 (file)
@@ -277,7 +277,7 @@ tty.txt
 unicode.txt
        - info on the Unicode character/font mapping used in Linux.
 uml/
-       - directory with infomation about User Mode Linux.
+       - directory with information about User Mode Linux.
 usb/
        - directory with info regarding the Universal Serial Bus.
 video4linux/
index f25b3953f51398a023afd8b633aa5168313ccc4a..22e5f9036f3c193f88d14611b2e5fe669b2c9b9e 100644 (file)
@@ -236,6 +236,9 @@ ugly), but try to avoid excess.  Instead, put the comments at the head
 of the function, telling people what it does, and possibly WHY it does
 it.
 
+When commenting the kernel API functions, please use the kerneldoc format.
+See the files Documentation/kernel-doc-nano-HOWTO.txt and scripts/kernel-doc
+for details.
 
                Chapter 8: You've made a mess of it
 
index 6ee3cd6134dfad024bccff633ac2bece79fc0db6..1af0f2d5022066f1a205ad5265771e3ab112c018 100644 (file)
@@ -121,7 +121,7 @@ pool's device.
                        dma_addr_t addr);
 
 This puts memory back into the pool.  The pool is what was passed to
-the the pool allocation routine; the cpu and dma addresses are what
+the pool allocation routine; the cpu and dma addresses are what
 were returned when that routine allocated the memory being freed.
 
 
index 1ef6f43c6d8f6a2d3f1be39ac6fcf4781ddb746d..341aaa4ce481a5a9af1f85d17be35e83e2b8b075 100644 (file)
@@ -116,7 +116,7 @@ filesystem. Almost.
 
 You still need to actually journal your filesystem changes, this
 is done by wrapping them into transactions. Additionally you
-also need to wrap the modification of each of the the buffers
+also need to wrap the modification of each of the buffers
 with calls to the journal layer, so it knows what the modifications
 you are actually making are. To do this use  journal_start() which
 returns a transaction handle.
@@ -128,7 +128,7 @@ and its counterpart journal_stop(), which indicates the end of a transaction
 are nestable calls, so you can reenter a transaction if necessary,
 but remember you must call journal_stop() the same number of times as
 journal_start() before the transaction is completed (or more accurately
-leaves the the update phase). Ext3/VFS makes use of this feature to simplify 
+leaves the update phase). Ext3/VFS makes use of this feature to simplify
 quota support.
 </para>
 
index f3ef0bf435e9ebde6ba5174f43ef7168059058fa..705c442c7bf4cfcd687ea84aa92e6713463f6fcb 100644 (file)
@@ -841,7 +841,7 @@ usbdev_ioctl (int fd, int ifno, unsigned request, void *param)
                    File modification time is not updated by this request.
                    </para><para>
                    Those struct members are from some interface descriptor
-                   applying to the the current configuration.
+                   applying to the current configuration.
                    The interface number is the bInterfaceNumber value, and
                    the altsetting number is the bAlternateSetting value.
                    (This resets each endpoint in the interface.)
index d5032eb480aa364593d73bd6f211ec23bc2b990e..63edc5f847c45a4b1771c4e35872317ca2bbcbc7 100644 (file)
@@ -430,7 +430,7 @@ which may result in system hang. The software driver of specific
 MSI-capable hardware is responsible for whether calling
 pci_enable_msi or not. A return of zero indicates the kernel
 successfully initializes the MSI/MSI-X capability structure of the
-device funtion. The device function is now running on MSI/MSI-X mode.
+device function. The device function is now running on MSI/MSI-X mode.
 
 5.6 How to tell whether MSI/MSI-X is enabled on device function
 
index 9c6d450138ead56f9f1ef4c2e55794b2b30ff77c..fcbcbc35b122fda0223e22c267f7bca240e48b5b 100644 (file)
@@ -2,7 +2,8 @@ Read the F-ing Papers!
 
 
 This document describes RCU-related publications, and is followed by
-the corresponding bibtex entries.
+the corresponding bibtex entries.  A number of the publications may
+be found at http://www.rdrop.com/users/paulmck/RCU/.
 
 The first thing resembling RCU was published in 1980, when Kung and Lehman
 [Kung80] recommended use of a garbage collector to defer destruction
@@ -113,6 +114,10 @@ describing how to make RCU safe for soft-realtime applications [Sarma04c],
 and a paper describing SELinux performance with RCU [JamesMorris04b].
 
 
+2005 has seen further adaptation of RCU to realtime use, permitting
+preemption of RCU realtime critical sections [PaulMcKenney05a,
+PaulMcKenney05b].
+
 Bibtex Entries
 
 @article{Kung80
@@ -410,3 +415,32 @@ Oregon Health and Sciences University"
 \url{http://www.livejournal.com/users/james_morris/2153.html}
 [Viewed December 10, 2004]"
 }
+
+@unpublished{PaulMcKenney05a
+,Author="Paul E. McKenney"
+,Title="{[RFC]} {RCU} and {CONFIG\_PREEMPT\_RT} progress"
+,month="May"
+,year="2005"
+,note="Available:
+\url{http://lkml.org/lkml/2005/5/9/185}
+[Viewed May 13, 2005]"
+,annotation="
+       First publication of working lock-based deferred free patches
+       for the CONFIG_PREEMPT_RT environment.
+"
+}
+
+@conference{PaulMcKenney05b
+,Author="Paul E. McKenney and Dipankar Sarma"
+,Title="Towards Hard Realtime Response from the Linux Kernel on SMP Hardware"
+,Booktitle="linux.conf.au 2005"
+,month="April"
+,year="2005"
+,address="Canberra, Australia"
+,note="Available:
+\url{http://www.rdrop.com/users/paulmck/RCU/realtimeRCU.2005.04.23a.pdf}
+[Viewed May 13, 2005]"
+,annotation="
+       Realtime turns into making RCU yet more realtime friendly.
+"
+}
index 3bfb84b3b7dbc5120bbcbf14c52ed95fb2a72507..aab4a9ec3931520554a76b2e33e5096b5a0ca632 100644 (file)
@@ -8,7 +8,7 @@ is that since there is only one CPU, it should not be necessary to
 wait for anything else to get done, since there are no other CPUs for
 anything else to be happening on.  Although this approach will -sort- -of-
 work a surprising amount of the time, it is a very bad idea in general.
-This document presents two examples that demonstrate exactly how bad an
+This document presents three examples that demonstrate exactly how bad an
 idea this is.
 
 
@@ -26,6 +26,9 @@ from softirq, the list scan would find itself referencing a newly freed
 element B.  This situation can greatly decrease the life expectancy of
 your kernel.
 
+This same problem can occur if call_rcu() is invoked from a hardware
+interrupt handler.
+
 
 Example 2: Function-Call Fatality
 
@@ -44,8 +47,37 @@ its arguments would cause it to fail to make the fundamental guarantee
 underlying RCU, namely that call_rcu() defers invoking its arguments until
 all RCU read-side critical sections currently executing have completed.
 
-Quick Quiz: why is it -not- legal to invoke synchronize_rcu() in
-this case?
+Quick Quiz #1: why is it -not- legal to invoke synchronize_rcu() in
+       this case?
+
+
+Example 3: Death by Deadlock
+
+Suppose that call_rcu() is invoked while holding a lock, and that the
+callback function must acquire this same lock.  In this case, if
+call_rcu() were to directly invoke the callback, the result would
+be self-deadlock.
+
+In some cases, it would possible to restructure to code so that
+the call_rcu() is delayed until after the lock is released.  However,
+there are cases where this can be quite ugly:
+
+1.     If a number of items need to be passed to call_rcu() within
+       the same critical section, then the code would need to create
+       a list of them, then traverse the list once the lock was
+       released.
+
+2.     In some cases, the lock will be held across some kernel API,
+       so that delaying the call_rcu() until the lock is released
+       requires that the data item be passed up via a common API.
+       It is far better to guarantee that callbacks are invoked
+       with no locks held than to have to modify such APIs to allow
+       arbitrary data items to be passed back up through them.
+
+If call_rcu() directly invokes the callback, painful locking restrictions
+or API changes would be required.
+
+Quick Quiz #2: What locking restriction must RCU callbacks respect?
 
 
 Summary
@@ -53,12 +85,35 @@ Summary
 Permitting call_rcu() to immediately invoke its arguments or permitting
 synchronize_rcu() to immediately return breaks RCU, even on a UP system.
 So do not do it!  Even on a UP system, the RCU infrastructure -must-
-respect grace periods.
-
-
-Answer to Quick Quiz
-
-The calling function is scanning an RCU-protected linked list, and
-is therefore within an RCU read-side critical section.  Therefore,
-the called function has been invoked within an RCU read-side critical
-section, and is not permitted to block.
+respect grace periods, and -must- invoke callbacks from a known environment
+in which no locks are held.
+
+
+Answer to Quick Quiz #1:
+       Why is it -not- legal to invoke synchronize_rcu() in this case?
+
+       Because the calling function is scanning an RCU-protected linked
+       list, and is therefore within an RCU read-side critical section.
+       Therefore, the called function has been invoked within an RCU
+       read-side critical section, and is not permitted to block.
+
+Answer to Quick Quiz #2:
+       What locking restriction must RCU callbacks respect?
+
+       Any lock that is acquired within an RCU callback must be
+       acquired elsewhere using an _irq variant of the spinlock
+       primitive.  For example, if "mylock" is acquired by an
+       RCU callback, then a process-context acquisition of this
+       lock must use something like spin_lock_irqsave() to
+       acquire the lock.
+
+       If the process-context code were to simply use spin_lock(),
+       then, since RCU callbacks can be invoked from softirq context,
+       the callback might be called from a softirq that interrupted
+       the process-context critical section.  This would result in
+       self-deadlock.
+
+       This restriction might seem gratuitous, since very few RCU
+       callbacks acquire locks directly.  However, a great many RCU
+       callbacks do acquire locks -indirectly-, for example, via
+       the kfree() primitive.
index 8f3fb77c9cd32f2732351c2a78701110d2fc2454..e118a7c1a0928d9aadc5f4cd34d4e110a930a1ee 100644 (file)
@@ -43,6 +43,10 @@ over a rather long period of time, but improvements are always welcome!
        rcu_read_lock_bh()) in the read-side critical sections,
        and are also an excellent aid to readability.
 
+       As a rough rule of thumb, any dereference of an RCU-protected
+       pointer must be covered by rcu_read_lock() or rcu_read_lock_bh()
+       or by the appropriate update-side lock.
+
 3.     Does the update code tolerate concurrent accesses?
 
        The whole point of RCU is to permit readers to run without
@@ -90,7 +94,11 @@ over a rather long period of time, but improvements are always welcome!
 
                The rcu_dereference() primitive is used by the various
                "_rcu()" list-traversal primitives, such as the
-               list_for_each_entry_rcu().
+               list_for_each_entry_rcu().  Note that it is perfectly
+               legal (if redundant) for update-side code to use
+               rcu_dereference() and the "_rcu()" list-traversal
+               primitives.  This is particularly useful in code
+               that is common to readers and updaters.
 
        b.      If the list macros are being used, the list_add_tail_rcu()
                and list_add_rcu() primitives must be used in order
@@ -150,16 +158,9 @@ over a rather long period of time, but improvements are always welcome!
 
        Use of the _rcu() list-traversal primitives outside of an
        RCU read-side critical section causes no harm other than
-       a slight performance degradation on Alpha CPUs and some
-       confusion on the part of people trying to read the code.
-
-       Another way of thinking of this is "If you are holding the
-       lock that prevents the data structure from changing, why do
-       you also need RCU-based protection?"  That said, there may
-       well be situations where use of the _rcu() list-traversal
-       primitives while the update-side lock is held results in
-       simpler and more maintainable code.  The jury is still out
-       on this question.
+       a slight performance degradation on Alpha CPUs.  It can
+       also be quite helpful in reducing code bloat when common
+       code is shared between readers and updaters.
 
 10.    Conversely, if you are in an RCU read-side critical section,
        you -must- use the "_rcu()" variants of the list macros.
index eb444006683e28140cc0c1eb6d02f74f6f344503..6fa092251586e14930c6d51d8bde1d4566512138 100644 (file)
@@ -64,6 +64,54 @@ o    I hear that RCU is patented?  What is with that?
        Of these, one was allowed to lapse by the assignee, and the
        others have been contributed to the Linux kernel under GPL.
 
+o      I hear that RCU needs work in order to support realtime kernels?
+
+       Yes, work in progress.
+
 o      Where can I find more information on RCU?
 
        See the RTFP.txt file in this directory.
+       Or point your browser at http://www.rdrop.com/users/paulmck/RCU/.
+
+o      What are all these files in this directory?
+
+
+       NMI-RCU.txt
+
+               Describes how to use RCU to implement dynamic
+               NMI handlers, which can be revectored on the fly,
+               without rebooting.
+
+       RTFP.txt
+
+               List of RCU-related publications and web sites.
+
+       UP.txt
+
+               Discussion of RCU usage in UP kernels.
+
+       arrayRCU.txt
+
+               Describes how to use RCU to protect arrays, with
+               resizeable arrays whose elements reference other
+               data structures being of the most interest.
+
+       checklist.txt
+
+               Lists things to check for when inspecting code that
+               uses RCU.
+
+       listRCU.txt
+
+               Describes how to use RCU to protect linked lists.
+               This is the simplest and most common use of RCU
+               in the Linux kernel.
+
+       rcu.txt
+
+               You are reading it!
+
+       whatisRCU.txt
+
+               Overview of how the RCU implementation works.  Along
+               the way, presents a conceptual view of RCU.
diff --git a/Documentation/RCU/whatisRCU.txt b/Documentation/RCU/whatisRCU.txt
new file mode 100644 (file)
index 0000000..354d89c
--- /dev/null
@@ -0,0 +1,902 @@
+What is RCU?
+
+RCU is a synchronization mechanism that was added to the Linux kernel
+during the 2.5 development effort that is optimized for read-mostly
+situations.  Although RCU is actually quite simple once you understand it,
+getting there can sometimes be a challenge.  Part of the problem is that
+most of the past descriptions of RCU have been written with the mistaken
+assumption that there is "one true way" to describe RCU.  Instead,
+the experience has been that different people must take different paths
+to arrive at an understanding of RCU.  This document provides several
+different paths, as follows:
+
+1.     RCU OVERVIEW
+2.     WHAT IS RCU'S CORE API?
+3.     WHAT ARE SOME EXAMPLE USES OF CORE RCU API?
+4.     WHAT IF MY UPDATING THREAD CANNOT BLOCK?
+5.     WHAT ARE SOME SIMPLE IMPLEMENTATIONS OF RCU?
+6.     ANALOGY WITH READER-WRITER LOCKING
+7.     FULL LIST OF RCU APIs
+8.     ANSWERS TO QUICK QUIZZES
+
+People who prefer starting with a conceptual overview should focus on
+Section 1, though most readers will profit by reading this section at
+some point.  People who prefer to start with an API that they can then
+experiment with should focus on Section 2.  People who prefer to start
+with example uses should focus on Sections 3 and 4.  People who need to
+understand the RCU implementation should focus on Section 5, then dive
+into the kernel source code.  People who reason best by analogy should
+focus on Section 6.  Section 7 serves as an index to the docbook API
+documentation, and Section 8 is the traditional answer key.
+
+So, start with the section that makes the most sense to you and your
+preferred method of learning.  If you need to know everything about
+everything, feel free to read the whole thing -- but if you are really
+that type of person, you have perused the source code and will therefore
+never need this document anyway.  ;-)
+
+
+1.  RCU OVERVIEW
+
+The basic idea behind RCU is to split updates into "removal" and
+"reclamation" phases.  The removal phase removes references to data items
+within a data structure (possibly by replacing them with references to
+new versions of these data items), and can run concurrently with readers.
+The reason that it is safe to run the removal phase concurrently with
+readers is the semantics of modern CPUs guarantee that readers will see
+either the old or the new version of the data structure rather than a
+partially updated reference.  The reclamation phase does the work of reclaiming
+(e.g., freeing) the data items removed from the data structure during the
+removal phase.  Because reclaiming data items can disrupt any readers
+concurrently referencing those data items, the reclamation phase must
+not start until readers no longer hold references to those data items.
+
+Splitting the update into removal and reclamation phases permits the
+updater to perform the removal phase immediately, and to defer the
+reclamation phase until all readers active during the removal phase have
+completed, either by blocking until they finish or by registering a
+callback that is invoked after they finish.  Only readers that are active
+during the removal phase need be considered, because any reader starting
+after the removal phase will be unable to gain a reference to the removed
+data items, and therefore cannot be disrupted by the reclamation phase.
+
+So the typical RCU update sequence goes something like the following:
+
+a.     Remove pointers to a data structure, so that subsequent
+       readers cannot gain a reference to it.
+
+b.     Wait for all previous readers to complete their RCU read-side
+       critical sections.
+
+c.     At this point, there cannot be any readers who hold references
+       to the data structure, so it now may safely be reclaimed
+       (e.g., kfree()d).
+
+Step (b) above is the key idea underlying RCU's deferred destruction.
+The ability to wait until all readers are done allows RCU readers to
+use much lighter-weight synchronization, in some cases, absolutely no
+synchronization at all.  In contrast, in more conventional lock-based
+schemes, readers must use heavy-weight synchronization in order to
+prevent an updater from deleting the data structure out from under them.
+This is because lock-based updaters typically update data items in place,
+and must therefore exclude readers.  In contrast, RCU-based updaters
+typically take advantage of the fact that writes to single aligned
+pointers are atomic on modern CPUs, allowing atomic insertion, removal,
+and replacement of data items in a linked structure without disrupting
+readers.  Concurrent RCU readers can then continue accessing the old
+versions, and can dispense with the atomic operations, memory barriers,
+and communications cache misses that are so expensive on present-day
+SMP computer systems, even in absence of lock contention.
+
+In the three-step procedure shown above, the updater is performing both
+the removal and the reclamation step, but it is often helpful for an
+entirely different thread to do the reclamation, as is in fact the case
+in the Linux kernel's directory-entry cache (dcache).  Even if the same
+thread performs both the update step (step (a) above) and the reclamation
+step (step (c) above), it is often helpful to think of them separately.
+For example, RCU readers and updaters need not communicate at all,
+but RCU provides implicit low-overhead communication between readers
+and reclaimers, namely, in step (b) above.
+
+So how the heck can a reclaimer tell when a reader is done, given
+that readers are not doing any sort of synchronization operations???
+Read on to learn about how RCU's API makes this easy.
+
+
+2.  WHAT IS RCU'S CORE API?
+
+The core RCU API is quite small:
+
+a.     rcu_read_lock()
+b.     rcu_read_unlock()
+c.     synchronize_rcu() / call_rcu()
+d.     rcu_assign_pointer()
+e.     rcu_dereference()
+
+There are many other members of the RCU API, but the rest can be
+expressed in terms of these five, though most implementations instead
+express synchronize_rcu() in terms of the call_rcu() callback API.
+
+The five core RCU APIs are described below, the other 18 will be enumerated
+later.  See the kernel docbook documentation for more info, or look directly
+at the function header comments.
+
+rcu_read_lock()
+
+       void rcu_read_lock(void);
+
+       Used by a reader to inform the reclaimer that the reader is
+       entering an RCU read-side critical section.  It is illegal
+       to block while in an RCU read-side critical section, though
+       kernels built with CONFIG_PREEMPT_RCU can preempt RCU read-side
+       critical sections.  Any RCU-protected data structure accessed
+       during an RCU read-side critical section is guaranteed to remain
+       unreclaimed for the full duration of that critical section.
+       Reference counts may be used in conjunction with RCU to maintain
+       longer-term references to data structures.
+
+rcu_read_unlock()
+
+       void rcu_read_unlock(void);
+
+       Used by a reader to inform the reclaimer that the reader is
+       exiting an RCU read-side critical section.  Note that RCU
+       read-side critical sections may be nested and/or overlapping.
+
+synchronize_rcu()
+
+       void synchronize_rcu(void);
+
+       Marks the end of updater code and the beginning of reclaimer
+       code.  It does this by blocking until all pre-existing RCU
+       read-side critical sections on all CPUs have completed.
+       Note that synchronize_rcu() will -not- necessarily wait for
+       any subsequent RCU read-side critical sections to complete.
+       For example, consider the following sequence of events:
+
+                CPU 0                  CPU 1                 CPU 2
+            ----------------- ------------------------- ---------------
+        1.  rcu_read_lock()
+        2.                    enters synchronize_rcu()
+        3.                                               rcu_read_lock()
+        4.  rcu_read_unlock()
+        5.                     exits synchronize_rcu()
+        6.                                              rcu_read_unlock()
+
+       To reiterate, synchronize_rcu() waits only for ongoing RCU
+       read-side critical sections to complete, not necessarily for
+       any that begin after synchronize_rcu() is invoked.
+
+       Of course, synchronize_rcu() does not necessarily return
+       -immediately- after the last pre-existing RCU read-side critical
+       section completes.  For one thing, there might well be scheduling
+       delays.  For another thing, many RCU implementations process
+       requests in batches in order to improve efficiencies, which can
+       further delay synchronize_rcu().
+
+       Since synchronize_rcu() is the API that must figure out when
+       readers are done, its implementation is key to RCU.  For RCU
+       to be useful in all but the most read-intensive situations,
+       synchronize_rcu()'s overhead must also be quite small.
+
+       The call_rcu() API is a callback form of synchronize_rcu(),
+       and is described in more detail in a later section.  Instead of
+       blocking, it registers a function and argument which are invoked
+       after all ongoing RCU read-side critical sections have completed.
+       This callback variant is particularly useful in situations where
+       it is illegal to block.
+
+rcu_assign_pointer()
+
+       typeof(p) rcu_assign_pointer(p, typeof(p) v);
+
+       Yes, rcu_assign_pointer() -is- implemented as a macro, though it
+       would be cool to be able to declare a function in this manner.
+       (Compiler experts will no doubt disagree.)
+
+       The updater uses this function to assign a new value to an
+       RCU-protected pointer, in order to safely communicate the change
+       in value from the updater to the reader.  This function returns
+       the new value, and also executes any memory-barrier instructions
+       required for a given CPU architecture.
+
+       Perhaps more important, it serves to document which pointers
+       are protected by RCU.  That said, rcu_assign_pointer() is most
+       frequently used indirectly, via the _rcu list-manipulation
+       primitives such as list_add_rcu().
+
+rcu_dereference()
+
+       typeof(p) rcu_dereference(p);
+
+       Like rcu_assign_pointer(), rcu_dereference() must be implemented
+       as a macro.
+
+       The reader uses rcu_dereference() to fetch an RCU-protected
+       pointer, which returns a value that may then be safely
+       dereferenced.  Note that rcu_deference() does not actually
+       dereference the pointer, instead, it protects the pointer for
+       later dereferencing.  It also executes any needed memory-barrier
+       instructions for a given CPU architecture.  Currently, only Alpha
+       needs memory barriers within rcu_dereference() -- on other CPUs,
+       it compiles to nothing, not even a compiler directive.
+
+       Common coding practice uses rcu_dereference() to copy an
+       RCU-protected pointer to a local variable, then dereferences
+       this local variable, for example as follows:
+
+               p = rcu_dereference(head.next);
+               return p->data;
+
+       However, in this case, one could just as easily combine these
+       into one statement:
+
+               return rcu_dereference(head.next)->data;
+
+       If you are going to be fetching multiple fields from the
+       RCU-protected structure, using the local variable is of
+       course preferred.  Repeated rcu_dereference() calls look
+       ugly and incur unnecessary overhead on Alpha CPUs.
+
+       Note that the value returned by rcu_dereference() is valid
+       only within the enclosing RCU read-side critical section.
+       For example, the following is -not- legal:
+
+               rcu_read_lock();
+               p = rcu_dereference(head.next);
+               rcu_read_unlock();
+               x = p->address;
+               rcu_read_lock();
+               y = p->data;
+               rcu_read_unlock();
+
+       Holding a reference from one RCU read-side critical section
+       to another is just as illegal as holding a reference from
+       one lock-based critical section to another!  Similarly,
+       using a reference outside of the critical section in which
+       it was acquired is just as illegal as doing so with normal
+       locking.
+
+       As with rcu_assign_pointer(), an important function of
+       rcu_dereference() is to document which pointers are protected
+       by RCU.  And, again like rcu_assign_pointer(), rcu_dereference()
+       is typically used indirectly, via the _rcu list-manipulation
+       primitives, such as list_for_each_entry_rcu().
+
+The following diagram shows how each API communicates among the
+reader, updater, and reclaimer.
+
+
+           rcu_assign_pointer()
+                                   +--------+
+           +---------------------->| reader |---------+
+           |                       +--------+         |
+           |                           |              |
+           |                           |              | Protect:
+           |                           |              | rcu_read_lock()
+           |                           |              | rcu_read_unlock()
+           |        rcu_dereference()  |              |
+       +---------+                      |              |
+       | updater |<---------------------+              |
+       +---------+                                     V
+           |                                    +-----------+
+           +----------------------------------->| reclaimer |
+                                                +-----------+
+             Defer:
+             synchronize_rcu() & call_rcu()
+
+
+The RCU infrastructure observes the time sequence of rcu_read_lock(),
+rcu_read_unlock(), synchronize_rcu(), and call_rcu() invocations in
+order to determine when (1) synchronize_rcu() invocations may return
+to their callers and (2) call_rcu() callbacks may be invoked.  Efficient
+implementations of the RCU infrastructure make heavy use of batching in
+order to amortize their overhead over many uses of the corresponding APIs.
+
+There are no fewer than three RCU mechanisms in the Linux kernel; the
+diagram above shows the first one, which is by far the most commonly used.
+The rcu_dereference() and rcu_assign_pointer() primitives are used for
+all three mechanisms, but different defer and protect primitives are
+used as follows:
+
+       Defer                   Protect
+
+a.     synchronize_rcu()       rcu_read_lock() / rcu_read_unlock()
+       call_rcu()
+
+b.     call_rcu_bh()           rcu_read_lock_bh() / rcu_read_unlock_bh()
+
+c.     synchronize_sched()     preempt_disable() / preempt_enable()
+                               local_irq_save() / local_irq_restore()
+                               hardirq enter / hardirq exit
+                               NMI enter / NMI exit
+
+These three mechanisms are used as follows:
+
+a.     RCU applied to normal data structures.
+
+b.     RCU applied to networking data structures that may be subjected
+       to remote denial-of-service attacks.
+
+c.     RCU applied to scheduler and interrupt/NMI-handler tasks.
+
+Again, most uses will be of (a).  The (b) and (c) cases are important
+for specialized uses, but are relatively uncommon.
+
+
+3.  WHAT ARE SOME EXAMPLE USES OF CORE RCU API?
+
+This section shows a simple use of the core RCU API to protect a
+global pointer to a dynamically allocated structure.  More typical
+uses of RCU may be found in listRCU.txt, arrayRCU.txt, and NMI-RCU.txt.
+
+       struct foo {
+               int a;
+               char b;
+               long c;
+       };
+       DEFINE_SPINLOCK(foo_mutex);
+
+       struct foo *gbl_foo;
+
+       /*
+        * Create a new struct foo that is the same as the one currently
+        * pointed to by gbl_foo, except that field "a" is replaced
+        * with "new_a".  Points gbl_foo to the new structure, and
+        * frees up the old structure after a grace period.
+        *
+        * Uses rcu_assign_pointer() to ensure that concurrent readers
+        * see the initialized version of the new structure.
+        *
+        * Uses synchronize_rcu() to ensure that any readers that might
+        * have references to the old structure complete before freeing
+        * the old structure.
+        */
+       void foo_update_a(int new_a)
+       {
+               struct foo *new_fp;
+               struct foo *old_fp;
+
+               new_fp = kmalloc(sizeof(*fp), GFP_KERNEL);
+               spin_lock(&foo_mutex);
+               old_fp = gbl_foo;
+               *new_fp = *old_fp;
+               new_fp->a = new_a;
+               rcu_assign_pointer(gbl_foo, new_fp);
+               spin_unlock(&foo_mutex);
+               synchronize_rcu();
+               kfree(old_fp);
+       }
+
+       /*
+        * Return the value of field "a" of the current gbl_foo
+        * structure.  Use rcu_read_lock() and rcu_read_unlock()
+        * to ensure that the structure does not get deleted out
+        * from under us, and use rcu_dereference() to ensure that
+        * we see the initialized version of the structure (important
+        * for DEC Alpha and for people reading the code).
+        */
+       int foo_get_a(void)
+       {
+               int retval;
+
+               rcu_read_lock();
+               retval = rcu_dereference(gbl_foo)->a;
+               rcu_read_unlock();
+               return retval;
+       }
+
+So, to sum up:
+
+o      Use rcu_read_lock() and rcu_read_unlock() to guard RCU
+       read-side critical sections.
+
+o      Within an RCU read-side critical section, use rcu_dereference()
+       to dereference RCU-protected pointers.
+
+o      Use some solid scheme (such as locks or semaphores) to
+       keep concurrent updates from interfering with each other.
+
+o      Use rcu_assign_pointer() to update an RCU-protected pointer.
+       This primitive protects concurrent readers from the updater,
+       -not- concurrent updates from each other!  You therefore still
+       need to use locking (or something similar) to keep concurrent
+       rcu_assign_pointer() primitives from interfering with each other.
+
+o      Use synchronize_rcu() -after- removing a data element from an
+       RCU-protected data structure, but -before- reclaiming/freeing
+       the data element, in order to wait for the completion of all
+       RCU read-side critical sections that might be referencing that
+       data item.
+
+See checklist.txt for additional rules to follow when using RCU.
+
+
+4.  WHAT IF MY UPDATING THREAD CANNOT BLOCK?
+
+In the example above, foo_update_a() blocks until a grace period elapses.
+This is quite simple, but in some cases one cannot afford to wait so
+long -- there might be other high-priority work to be done.
+
+In such cases, one uses call_rcu() rather than synchronize_rcu().
+The call_rcu() API is as follows:
+
+       void call_rcu(struct rcu_head * head,
+                     void (*func)(struct rcu_head *head));
+
+This function invokes func(head) after a grace period has elapsed.
+This invocation might happen from either softirq or process context,
+so the function is not permitted to block.  The foo struct needs to
+have an rcu_head structure added, perhaps as follows:
+
+       struct foo {
+               int a;
+               char b;
+               long c;
+               struct rcu_head rcu;
+       };
+
+The foo_update_a() function might then be written as follows:
+
+       /*
+        * Create a new struct foo that is the same as the one currently
+        * pointed to by gbl_foo, except that field "a" is replaced
+        * with "new_a".  Points gbl_foo to the new structure, and
+        * frees up the old structure after a grace period.
+        *
+        * Uses rcu_assign_pointer() to ensure that concurrent readers
+        * see the initialized version of the new structure.
+        *
+        * Uses call_rcu() to ensure that any readers that might have
+        * references to the old structure complete before freeing the
+        * old structure.
+        */
+       void foo_update_a(int new_a)
+       {
+               struct foo *new_fp;
+               struct foo *old_fp;
+
+               new_fp = kmalloc(sizeof(*fp), GFP_KERNEL);
+               spin_lock(&foo_mutex);
+               old_fp = gbl_foo;
+               *new_fp = *old_fp;
+               new_fp->a = new_a;
+               rcu_assign_pointer(gbl_foo, new_fp);
+               spin_unlock(&foo_mutex);
+               call_rcu(&old_fp->rcu, foo_reclaim);
+       }
+
+The foo_reclaim() function might appear as follows:
+
+       void foo_reclaim(struct rcu_head *rp)
+       {
+               struct foo *fp = container_of(rp, struct foo, rcu);
+
+               kfree(fp);
+       }
+
+The container_of() primitive is a macro that, given a pointer into a
+struct, the type of the struct, and the pointed-to field within the
+struct, returns a pointer to the beginning of the struct.
+
+The use of call_rcu() permits the caller of foo_update_a() to
+immediately regain control, without needing to worry further about the
+old version of the newly updated element.  It also clearly shows the
+RCU distinction between updater, namely foo_update_a(), and reclaimer,
+namely foo_reclaim().
+
+The summary of advice is the same as for the previous section, except
+that we are now using call_rcu() rather than synchronize_rcu():
+
+o      Use call_rcu() -after- removing a data element from an
+       RCU-protected data structure in order to register a callback
+       function that will be invoked after the completion of all RCU
+       read-side critical sections that might be referencing that
+       data item.
+
+Again, see checklist.txt for additional rules governing the use of RCU.
+
+
+5.  WHAT ARE SOME SIMPLE IMPLEMENTATIONS OF RCU?
+
+One of the nice things about RCU is that it has extremely simple "toy"
+implementations that are a good first step towards understanding the
+production-quality implementations in the Linux kernel.  This section
+presents two such "toy" implementations of RCU, one that is implemented
+in terms of familiar locking primitives, and another that more closely
+resembles "classic" RCU.  Both are way too simple for real-world use,
+lacking both functionality and performance.  However, they are useful
+in getting a feel for how RCU works.  See kernel/rcupdate.c for a
+production-quality implementation, and see:
+
+       http://www.rdrop.com/users/paulmck/RCU
+
+for papers describing the Linux kernel RCU implementation.  The OLS'01
+and OLS'02 papers are a good introduction, and the dissertation provides
+more details on the current implementation.
+
+
+5A.  "TOY" IMPLEMENTATION #1: LOCKING
+
+This section presents a "toy" RCU implementation that is based on
+familiar locking primitives.  Its overhead makes it a non-starter for
+real-life use, as does its lack of scalability.  It is also unsuitable
+for realtime use, since it allows scheduling latency to "bleed" from
+one read-side critical section to another.
+
+However, it is probably the easiest implementation to relate to, so is
+a good starting point.
+
+It is extremely simple:
+
+       static DEFINE_RWLOCK(rcu_gp_mutex);
+
+       void rcu_read_lock(void)
+       {
+               read_lock(&rcu_gp_mutex);
+       }
+
+       void rcu_read_unlock(void)
+       {
+               read_unlock(&rcu_gp_mutex);
+       }
+
+       void synchronize_rcu(void)
+       {
+               write_lock(&rcu_gp_mutex);
+               write_unlock(&rcu_gp_mutex);
+       }
+
+[You can ignore rcu_assign_pointer() and rcu_dereference() without
+missing much.  But here they are anyway.  And whatever you do, don't
+forget about them when submitting patches making use of RCU!]
+
+       #define rcu_assign_pointer(p, v)        ({ \
+                                                       smp_wmb(); \
+                                                       (p) = (v); \
+                                               })
+
+       #define rcu_dereference(p)     ({ \
+                                       typeof(p) _________p1 = p; \
+                                       smp_read_barrier_depends(); \
+                                       (_________p1); \
+                                       })
+
+
+The rcu_read_lock() and rcu_read_unlock() primitive read-acquire
+and release a global reader-writer lock.  The synchronize_rcu()
+primitive write-acquires this same lock, then immediately releases
+it.  This means that once synchronize_rcu() exits, all RCU read-side
+critical sections that were in progress before synchonize_rcu() was
+called are guaranteed to have completed -- there is no way that
+synchronize_rcu() would have been able to write-acquire the lock
+otherwise.
+
+It is possible to nest rcu_read_lock(), since reader-writer locks may
+be recursively acquired.  Note also that rcu_read_lock() is immune
+from deadlock (an important property of RCU).  The reason for this is
+that the only thing that can block rcu_read_lock() is a synchronize_rcu().
+But synchronize_rcu() does not acquire any locks while holding rcu_gp_mutex,
+so there can be no deadlock cycle.
+
+Quick Quiz #1: Why is this argument naive?  How could a deadlock
+               occur when using this algorithm in a real-world Linux
+               kernel?  How could this deadlock be avoided?
+
+
+5B.  "TOY" EXAMPLE #2: CLASSIC RCU
+
+This section presents a "toy" RCU implementation that is based on
+"classic RCU".  It is also short on performance (but only for updates) and
+on features such as hotplug CPU and the ability to run in CONFIG_PREEMPT
+kernels.  The definitions of rcu_dereference() and rcu_assign_pointer()
+are the same as those shown in the preceding section, so they are omitted.
+
+       void rcu_read_lock(void) { }
+
+       void rcu_read_unlock(void) { }
+
+       void synchronize_rcu(void)
+       {
+               int cpu;
+
+               for_each_cpu(cpu)
+                       run_on(cpu);
+       }
+
+Note that rcu_read_lock() and rcu_read_unlock() do absolutely nothing.
+This is the great strength of classic RCU in a non-preemptive kernel:
+read-side overhead is precisely zero, at least on non-Alpha CPUs.
+And there is absolutely no way that rcu_read_lock() can possibly
+participate in a deadlock cycle!
+
+The implementation of synchronize_rcu() simply schedules itself on each
+CPU in turn.  The run_on() primitive can be implemented straightforwardly
+in terms of the sched_setaffinity() primitive.  Of course, a somewhat less
+"toy" implementation would restore the affinity upon completion rather
+than just leaving all tasks running on the last CPU, but when I said
+"toy", I meant -toy-!
+
+So how the heck is this supposed to work???
+
+Remember that it is illegal to block while in an RCU read-side critical
+section.  Therefore, if a given CPU executes a context switch, we know
+that it must have completed all preceding RCU read-side critical sections.
+Once -all- CPUs have executed a context switch, then -all- preceding
+RCU read-side critical sections will have completed.
+
+So, suppose that we remove a data item from its structure and then invoke
+synchronize_rcu().  Once synchronize_rcu() returns, we are guaranteed
+that there are no RCU read-side critical sections holding a reference
+to that data item, so we can safely reclaim it.
+
+Quick Quiz #2: Give an example where Classic RCU's read-side
+               overhead is -negative-.
+
+Quick Quiz #3:  If it is illegal to block in an RCU read-side
+               critical section, what the heck do you do in
+               PREEMPT_RT, where normal spinlocks can block???
+
+
+6.  ANALOGY WITH READER-WRITER LOCKING
+
+Although RCU can be used in many different ways, a very common use of
+RCU is analogous to reader-writer locking.  The following unified
+diff shows how closely related RCU and reader-writer locking can be.
+
+       @@ -13,15 +14,15 @@
+               struct list_head *lp;
+               struct el *p;
+
+       -       read_lock();
+       -       list_for_each_entry(p, head, lp) {
+       +       rcu_read_lock();
+       +       list_for_each_entry_rcu(p, head, lp) {
+                       if (p->key == key) {
+                               *result = p->data;
+       -                       read_unlock();
+       +                       rcu_read_unlock();
+                               return 1;
+                       }
+               }
+       -       read_unlock();
+       +       rcu_read_unlock();
+               return 0;
+        }
+
+       @@ -29,15 +30,16 @@
+        {
+               struct el *p;
+
+       -       write_lock(&listmutex);
+       +       spin_lock(&listmutex);
+               list_for_each_entry(p, head, lp) {
+                       if (p->key == key) {
+                               list_del(&p->list);
+       -                       write_unlock(&listmutex);
+       +                       spin_unlock(&listmutex);
+       +                       synchronize_rcu();
+                               kfree(p);
+                               return 1;
+                       }
+               }
+       -       write_unlock(&listmutex);
+       +       spin_unlock(&listmutex);
+               return 0;
+        }
+
+Or, for those who prefer a side-by-side listing:
+
+ 1 struct el {                          1 struct el {
+ 2   struct list_head list;             2   struct list_head list;
+ 3   long key;                          3   long key;
+ 4   spinlock_t mutex;                  4   spinlock_t mutex;
+ 5   int data;                          5   int data;
+ 6   /* Other data fields */            6   /* Other data fields */
+ 7 };                                   7 };
+ 8 spinlock_t listmutex;                8 spinlock_t listmutex;
+ 9 struct el head;                      9 struct el head;
+
+ 1 int search(long key, int *result)    1 int search(long key, int *result)
+ 2 {                                    2 {
+ 3   struct list_head *lp;              3   struct list_head *lp;
+ 4   struct el *p;                      4   struct el *p;
+ 5                                      5
+ 6   read_lock();                       6   rcu_read_lock();
+ 7   list_for_each_entry(p, head, lp) { 7   list_for_each_entry_rcu(p, head, lp) {
+ 8     if (p->key == key) {             8     if (p->key == key) {
+ 9       *result = p->data;             9       *result = p->data;
+10       read_unlock();                10       rcu_read_unlock();
+11       return 1;                     11       return 1;
+12     }                               12     }
+13   }                                 13   }
+14   read_unlock();                    14   rcu_read_unlock();
+15   return 0;                         15   return 0;
+16 }                                   16 }
+
+ 1 int delete(long key)                 1 int delete(long key)
+ 2 {                                    2 {
+ 3   struct el *p;                      3   struct el *p;
+ 4                                      4
+ 5   write_lock(&listmutex);            5   spin_lock(&listmutex);
+ 6   list_for_each_entry(p, head, lp) { 6   list_for_each_entry(p, head, lp) {
+ 7     if (p->key == key) {             7     if (p->key == key) {
+ 8       list_del(&p->list);            8       list_del(&p->list);
+ 9       write_unlock(&listmutex);      9       spin_unlock(&listmutex);
+                                       10       synchronize_rcu();
+10       kfree(p);                     11       kfree(p);
+11       return 1;                     12       return 1;
+12     }                               13     }
+13   }                                 14   }
+14   write_unlock(&listmutex);         15   spin_unlock(&listmutex);
+15   return 0;                         16   return 0;
+16 }                                   17 }
+
+Either way, the differences are quite small.  Read-side locking moves
+to rcu_read_lock() and rcu_read_unlock, update-side locking moves from
+from a reader-writer lock to a simple spinlock, and a synchronize_rcu()
+precedes the kfree().
+
+However, there is one potential catch: the read-side and update-side
+critical sections can now run concurrently.  In many cases, this will
+not be a problem, but it is necessary to check carefully regardless.
+For example, if multiple independent list updates must be seen as
+a single atomic update, converting to RCU will require special care.
+
+Also, the presence of synchronize_rcu() means that the RCU version of
+delete() can now block.  If this is a problem, there is a callback-based
+mechanism that never blocks, namely call_rcu(), that can be used in
+place of synchronize_rcu().
+
+
+7.  FULL LIST OF RCU APIs
+
+The RCU APIs are documented in docbook-format header comments in the
+Linux-kernel source code, but it helps to have a full list of the
+APIs, since there does not appear to be a way to categorize them
+in docbook.  Here is the list, by category.
+
+Markers for RCU read-side critical sections:
+
+       rcu_read_lock
+       rcu_read_unlock
+       rcu_read_lock_bh
+       rcu_read_unlock_bh
+
+RCU pointer/list traversal:
+
+       rcu_dereference
+       list_for_each_rcu               (to be deprecated in favor of
+                                        list_for_each_entry_rcu)
+       list_for_each_safe_rcu          (deprecated, not used)
+       list_for_each_entry_rcu
+       list_for_each_continue_rcu      (to be deprecated in favor of new
+                                        list_for_each_entry_continue_rcu)
+       hlist_for_each_rcu              (to be deprecated in favor of
+                                        hlist_for_each_entry_rcu)
+       hlist_for_each_entry_rcu
+
+RCU pointer update:
+
+       rcu_assign_pointer
+       list_add_rcu
+       list_add_tail_rcu
+       list_del_rcu
+       list_replace_rcu
+       hlist_del_rcu
+       hlist_add_head_rcu
+
+RCU grace period:
+
+       synchronize_kernel (deprecated)
+       synchronize_net
+       synchronize_sched
+       synchronize_rcu
+       call_rcu
+       call_rcu_bh
+
+See the comment headers in the source code (or the docbook generated
+from them) for more information.
+
+
+8.  ANSWERS TO QUICK QUIZZES
+
+Quick Quiz #1: Why is this argument naive?  How could a deadlock
+               occur when using this algorithm in a real-world Linux
+               kernel?  [Referring to the lock-based "toy" RCU
+               algorithm.]
+
+Answer:                Consider the following sequence of events:
+
+               1.      CPU 0 acquires some unrelated lock, call it
+                       "problematic_lock".
+
+               2.      CPU 1 enters synchronize_rcu(), write-acquiring
+                       rcu_gp_mutex.
+
+               3.      CPU 0 enters rcu_read_lock(), but must wait
+                       because CPU 1 holds rcu_gp_mutex.
+
+               4.      CPU 1 is interrupted, and the irq handler
+                       attempts to acquire problematic_lock.
+
+               The system is now deadlocked.
+
+               One way to avoid this deadlock is to use an approach like
+               that of CONFIG_PREEMPT_RT, where all normal spinlocks
+               become blocking locks, and all irq handlers execute in
+               the context of special tasks.  In this case, in step 4
+               above, the irq handler would block, allowing CPU 1 to
+               release rcu_gp_mutex, avoiding the deadlock.
+
+               Even in the absence of deadlock, this RCU implementation
+               allows latency to "bleed" from readers to other
+               readers through synchronize_rcu().  To see this,
+               consider task A in an RCU read-side critical section
+               (thus read-holding rcu_gp_mutex), task B blocked
+               attempting to write-acquire rcu_gp_mutex, and
+               task C blocked in rcu_read_lock() attempting to
+               read_acquire rcu_gp_mutex.  Task A's RCU read-side
+               latency is holding up task C, albeit indirectly via
+               task B.
+
+               Realtime RCU implementations therefore use a counter-based
+               approach where tasks in RCU read-side critical sections
+               cannot be blocked by tasks executing synchronize_rcu().
+
+Quick Quiz #2: Give an example where Classic RCU's read-side
+               overhead is -negative-.
+
+Answer:                Imagine a single-CPU system with a non-CONFIG_PREEMPT
+               kernel where a routing table is used by process-context
+               code, but can be updated by irq-context code (for example,
+               by an "ICMP REDIRECT" packet).  The usual way of handling
+               this would be to have the process-context code disable
+               interrupts while searching the routing table.  Use of
+               RCU allows such interrupt-disabling to be dispensed with.
+               Thus, without RCU, you pay the cost of disabling interrupts,
+               and with RCU you don't.
+
+               One can argue that the overhead of RCU in this
+               case is negative with respect to the single-CPU
+               interrupt-disabling approach.  Others might argue that
+               the overhead of RCU is merely zero, and that replacing
+               the positive overhead of the interrupt-disabling scheme
+               with the zero-overhead RCU scheme does not constitute
+               negative overhead.
+
+               In real life, of course, things are more complex.  But
+               even the theoretical possibility of negative overhead for
+               a synchronization primitive is a bit unexpected.  ;-)
+
+Quick Quiz #3:  If it is illegal to block in an RCU read-side
+               critical section, what the heck do you do in
+               PREEMPT_RT, where normal spinlocks can block???
+
+Answer:                Just as PREEMPT_RT permits preemption of spinlock
+               critical sections, it permits preemption of RCU
+               read-side critical sections.  It also permits
+               spinlocks blocking while in RCU read-side critical
+               sections.
+
+               Why the apparent inconsistency?  Because it is it
+               possible to use priority boosting to keep the RCU
+               grace periods short if need be (for example, if running
+               short of memory).  In contrast, if blocking waiting
+               for (say) network reception, there is no way to know
+               what should be boosted.  Especially given that the
+               process we need to boost might well be a human being
+               who just went out for a pizza or something.  And although
+               a computer-operated cattle prod might arouse serious
+               interest, it might also provoke serious objections.
+               Besides, how does the computer know what pizza parlor
+               the human being went to???
+
+
+ACKNOWLEDGEMENTS
+
+My thanks to the people who helped make this human-readable, including
+Jon Walpole, Josh Triplett, Serge Hallyn, and Suzanne Wood.
+
+
+For more information, see http://www.rdrop.com/users/paulmck/RCU.
index e2d1e760b4ba3bd382d2034286c429fe4e6979fe..6a82948ff4bd59adb26b08e305339b4452f79edc 100644 (file)
@@ -36,7 +36,7 @@ cpufreq stats provides following statistics (explained in detail below).
 
 All the statistics will be from the time the stats driver has been inserted 
 to the time when a read of a particular statistic is done. Obviously, stats 
-driver will not have any information about the the frequcny transitions before
+driver will not have any information about the frequency transitions before
 the stats driver insertion.
 
 --------------------------------------------------------------------------------
index 47f4114fbf545459efbdad232392d93357eb6968..d17b7d2dd771e6c0eeeda4b93c372840209f014d 100644 (file)
@@ -277,7 +277,7 @@ rewritten to the 'tasks' file of its cpuset.  This is done to avoid
 impacting the scheduler code in the kernel with a check for changes
 in a tasks processor placement.
 
-There is an exception to the above.  If hotplug funtionality is used
+There is an exception to the above.  If hotplug functionality is used
 to remove all the CPUs that are currently assigned to a cpuset,
 then the kernel will automatically update the cpus_allowed of all
 tasks attached to CPUs in that cpuset to allow all CPUs.  When memory
index 166474c2ee0bdc23e34b0892ded4241626abd1cd..16e9e63507551989d726998d1ac769703f4325f2 100644 (file)
@@ -1,4 +1,4 @@
-Below is the orginal README file from the descore.shar package.
+Below is the original README file from the descore.shar package.
 ------------------------------------------------------------------------------
 
 des - fast & portable DES encryption & decryption.
index 5f95d4b3cab1a4f6625f65431a720703e1ce2a05..784e08c1c80a308b9916a4a1f360380482761603 100644 (file)
@@ -17,14 +17,6 @@ Who: Greg Kroah-Hartman <greg@kroah.com>
 
 ---------------------------
 
-What:  ACPI S4bios support
-When:  May 2005
-Why:   Noone uses it, and it probably does not work, anyway. swsusp is
-       faster, more reliable, and people are actually using it.
-Who:   Pavel Machek <pavel@suse.cz>
-
----------------------------
-
 What:  io_remap_page_range() (macro or function)
 When:  September 2005
 Why:   Replaced by io_remap_pfn_range() which allows more memory space
index 4ccdcc6fe3645992a796eef7d6cfe763d1e232d2..8ec32cc49eb107bdab130a6cb24c54b6da9dfcb2 100644 (file)
@@ -878,7 +878,7 @@ DVD_READ_STRUCT                     Read structure
 
        error returns:
          EINVAL        physical.layer_num exceeds number of layers
-         EIO           Recieved invalid response from drive
+         EIO           Received invalid response from drive
 
 
 
index d2f0c67ba1fb01e419966178138629048d2202bb..db2603ceabba6f6743438a4e74d458865ba504bb 100644 (file)
@@ -549,6 +549,7 @@ running once the system is up.
                             keyboard and can not control its state
                             (Don't attempt to blink the leds)
        i8042.noaux     [HW] Don't check for auxiliary (== mouse) port
+       i8042.nokbd     [HW] Don't check/create keyboard port
        i8042.nomux     [HW] Don't check presence of an active multiplexing
                             controller
        i8042.nopnp     [HW] Don't use ACPIPnP / PnPBIOS to discover KBD/AUX
index 6739ab9615ef7085441fae78c6f4695bd36d5682..807a0c7b4737c60fe8d119d54b65ebf4ba80af10 100644 (file)
@@ -30,7 +30,7 @@ other program after you have done the following:
    Read the file 'binfmt_misc.txt' in this directory to know
    more about the configuration process.
 
-3) Add the following enries to /etc/rc.local or similar script
+3) Add the following entries to /etc/rc.local or similar script
    to be run at system startup:
 
 # Insert BINFMT_MISC module into the kernel
index 24d029455baadabc3acc398e3970ff8052e3ab1d..a55f0f95b171a3bbe80b5e19c535a2707e957f64 100644 (file)
@@ -1241,7 +1241,7 @@ traffic while still maintaining carrier on.
 
        If running SNMP agents, the bonding driver should be loaded
 before any network drivers participating in a bond.  This requirement
-is due to the the interface index (ipAdEntIfIndex) being associated to
+is due to the interface index (ipAdEntIfIndex) being associated to
 the first interface found with a given IP address.  That is, there is
 only one ipAdEntIfIndex for each IP address.  For example, if eth0 and
 eth1 are slaves of bond0 and the driver for eth0 is loaded before the
@@ -1937,7 +1937,7 @@ switches currently available support 802.3ad.
        If not explicitly configured (with ifconfig or ip link), the
 MAC address of the bonding device is taken from its first slave
 device.  This MAC address is then passed to all following slaves and
-remains persistent (even if the the first slave is removed) until the
+remains persistent (even if the first slave is removed) until the
 bonding device is brought down or reconfigured.
 
        If you wish to change the MAC address, you can set it with
index aea20cd2a56e0f68071088b893a7e78d9dd1a5bb..c96897aa08b6b89f9845a131635b69842877a575 100644 (file)
@@ -355,7 +355,7 @@ REVISION HISTORY
                                There is no functional difference between the two packages         
 
 2.0.7   Aug 26, 1999           o  Merged X25API code into WANPIPE.
-                               o  Fixed a memeory leak for X25API
+                               o  Fixed a memory leak for X25API
                                o  Updated the X25API code for 2.2.X kernels.
                                o  Improved NEM handling.   
 
@@ -514,7 +514,7 @@ beta2-2.2.0 Jan 8 2001
                                o Patches for 2.4.0 kernel
                                o Patches for 2.2.18 kernel
                                o Minor updates to PPP and CHLDC drivers.
-                                 Note: No functinal difference. 
+                                 Note: No functional difference.
 
 beta3-2.2.9    Jan 10 2001
                                o I missed the 2.2.18 kernel patches in beta2-2.2.0
index 76d28d033657aac4158b8db93821553f332d6b11..711210b38f5fe4c1dfa84db9392cf0c5c75dfa49 100644 (file)
@@ -84,7 +84,7 @@ Each entry consists of:
 
 Most drivers don't need to use the driver_data field.  Best practice
 for use of driver_data is to use it as an index into a static list of
-equivalant device types, not to use it as a pointer.
+equivalent device types, not to use it as a pointer.
 
 Have a table entry {PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID}
 to have probe() called for every PCI device known to the system.
index 2bfe71beec5b1cacad0464298d8acebacc4c7f27..e75d7474322cd915b07c169c358617dc639caf31 100644 (file)
@@ -134,7 +134,7 @@ pci_get_device_by_addr() will find the pci device associated
 with that address (if any).
 
 The default include/asm-ppc64/io.h macros readb(), inb(), insb(),
-etc. include a check to see if the the i/o read returned all-0xff's.
+etc. include a check to see if the i/o read returned all-0xff's.
 If so, these make a call to eeh_dn_check_failure(), which in turn
 asks the firmware if the all-ff's value is the sign of a true EEH
 error.  If it is not, processing continues as normal.  The grand
index e24fdeada9705e3e9a14cc4195592e832d169c00..e321a8ed2a2d6bf78581673b967cc9fe45186435 100644 (file)
@@ -468,7 +468,7 @@ The hex_ascii view shows the data field in hex and ascii representation
 The raw view returns a bytestream as the debug areas are stored in memory.
 
 The sprintf view formats the debug entries in the same way as the sprintf
-function would do. The sprintf event/expection fuctions write to the 
+function would do. The sprintf event/expection functions write to the
 debug entry a pointer to the format string (size = sizeof(long)) 
 and for each vararg a long value. So e.g. for a debug entry with a format 
 string plus two varargs one would need to allocate a (3 * sizeof(long)) 
index 2814491600ff472c315d31ad60da806bcb3496a2..2ffb3ae0ef4d1d8fa7c27aa1ecb5238d4c8d9878 100644 (file)
    /proc/scsi/ibmmca/<host_no>. ibmmca_proc_info() provides this information.
    
    This table is quite informative for interested users. It shows the load
-   of commands on the subsystem and wether you are running the bypassed 
+   of commands on the subsystem and whether you are running the bypassed
    (software) or integrated (hardware) SCSI-command set (see below). The
    amount of accesses is shown. Read, write, modeselect is shown separately
    in order to help debugging problems with CD-ROMs or tapedrives.
index 5c49ba07e709625516952f4d1147f0ae61464a37..ebfcdf28485f28aa47ee6af8fd5f4f639aadda59 100644 (file)
@@ -1459,7 +1459,7 @@ devices where %i is sound card number from zero to seven.
 To auto-load an ALSA driver for OSS services, define the string
 'sound-slot-%i' where %i means the slot number for OSS, which
 corresponds to the card index of ALSA.  Usually, define this
-as the the same card module.
+as the same card module.
 
 An example configuration for a single emu10k1 card is like below:
 ----- /etc/modprobe.conf
index 136d817c01babb51fc8e80ba5917544a7204c5ab..baf17b381588d0899512b0e65b29adda91aa49bc 100644 (file)
@@ -171,7 +171,7 @@ the header 'include/linux/sysrq.h', this will define everything else you need.
 Next, you must create a sysrq_key_op struct, and populate it with A) the key
 handler function you will use, B) a help_msg string, that will print when SysRQ
 prints help, and C) an action_msg string, that will print right before your
-handler is called. Your handler must conform to the protoype in 'sysrq.h'.
+handler is called. Your handler must conform to the prototype in 'sysrq.h'.
 
 After the sysrq_key_op is created, you can call the macro 
 register_sysrq_key(int key, struct sysrq_key_op *op_p) that is defined in
index 0c7b654fec996c388dd99af3dcc8a7884ca30989..544430e3998027f2c822ff4d27d9123e4d7f5c90 100644 (file)
   If you want to access files on the host machine from inside UML, you
   can treat it as a separate machine and either nfs mount directories
   from the host or copy files into the virtual machine with scp or rcp.
-  However, since UML is running on the the host, it can access those
+  However, since UML is running on the host, it can access those
   files just like any other process and make them available inside the
   virtual machine without needing to use the network.
 
index a938c3dd13d657fc504b84e58d4c5781a30c97fa..815f5c2301ffa8599ce35dd3d684bf66afbfcc02 100644 (file)
@@ -20,7 +20,7 @@ License along with this program; if not, write to the Free
 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
 MA 02111-1307 USA.
 
-This document and the the gadget serial driver itself are
+This document and the gadget serial driver itself are
 Copyright (C) 2004 by Al Borchers (alborchers@steinerpoint.com).
 
 If you have questions, problems, or suggestions for this driver
index 01425c21986ba7ee03408f74537a36d2be7b2912..52c94bd7dca1f909cc16ddc2f40351cfbefad5ab 100644 (file)
@@ -222,7 +222,7 @@ was introduced in 1991, is used in the DC10 old
 can generate: PAL , NTSC , SECAM
 
 The adv717x, should be able to produce PAL N. But you find nothing PAL N 
-specific in the the registers. Seem that you have to reuse a other standard
+specific in the registers. Seem that you have to reuse a other standard
 to generate PAL N, maybe it would work if you use the PAL M settings. 
 
 ==========================
diff --git a/Kbuild b/Kbuild
index 1880e6f760aab05f139d74400d2477e7c47681da..79003918f37f2593e157778734f2bb83d131abb1 100644 (file)
--- a/Kbuild
+++ b/Kbuild
@@ -4,7 +4,7 @@
 # 1) Generate asm-offsets.h
 
 #####
-# 1) Generate asm-offsets.h 
+# 1) Generate asm-offsets.h
 #
 
 offsets-file := include/asm-$(ARCH)/asm-offsets.h
@@ -22,6 +22,7 @@ sed-$(CONFIG_MIPS) := "/^@@@/s///p"
 
 quiet_cmd_offsets = GEN     $@
 define cmd_offsets
+       mkdir -p $(dir $@); \
        cat $< | \
        (set -e; \
         echo "#ifndef __ASM_OFFSETS_H__"; \
@@ -43,6 +44,6 @@ arch/$(ARCH)/kernel/asm-offsets.s: arch/$(ARCH)/kernel/asm-offsets.c FORCE
        $(Q)mkdir -p $(dir $@)
        $(call if_changed_dep,cc_s_c)
 
-$(srctree)/$(offsets-file): arch/$(ARCH)/kernel/asm-offsets.s Kbuild
+$(obj)/$(offsets-file): arch/$(ARCH)/kernel/asm-offsets.s Kbuild
        $(call cmd,offsets)
 
index 2402430c87e663d99841f84980039c999af3d4d6..382298f37adfb010b32d0904e818081da5824f19 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -334,7 +334,7 @@ KALLSYMS    = scripts/kallsyms
 PERL           = perl
 CHECK          = sparse
 
-CHECKFLAGS     := -D__linux__ -Dlinux -D__STDC__ -Dunix -D__unix__
+CHECKFLAGS     := -D__linux__ -Dlinux -D__STDC__ -Dunix -D__unix__ $(CF)
 MODFLAGS       = -DMODULE
 CFLAGS_MODULE   = $(MODFLAGS)
 AFLAGS_MODULE   = $(MODFLAGS)
@@ -382,6 +382,9 @@ RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn --exclude CV
 scripts_basic:
        $(Q)$(MAKE) $(build)=scripts/basic
 
+# To avoid any implicit rule to kick in, define an empty command.
+scripts/basic/%: scripts_basic ;
+
 .PHONY: outputmakefile
 # outputmakefile generate a Makefile to be placed in output directory, if
 # using a seperate output directory. This allows convinient use
@@ -444,9 +447,8 @@ ifeq ($(config-targets),1)
 include $(srctree)/arch/$(ARCH)/Makefile
 export KBUILD_DEFCONFIG
 
-config: scripts_basic outputmakefile FORCE
-       $(Q)$(MAKE) $(build)=scripts/kconfig $@
-%config: scripts_basic outputmakefile FORCE
+config %config: scripts_basic outputmakefile FORCE
+       $(Q)mkdir -p include/linux
        $(Q)$(MAKE) $(build)=scripts/kconfig $@
 
 else
@@ -641,8 +643,13 @@ quiet_cmd_vmlinux__ ?= LD      $@
 # Generate new vmlinux version
 quiet_cmd_vmlinux_version = GEN     .version
       cmd_vmlinux_version = set -e;                     \
-       . $(srctree)/scripts/mkversion > .tmp_version;  \
-       mv -f .tmp_version .version;                    \
+       if [ ! -r .version ]; then                      \
+         rm -f .version;                               \
+         echo 1 >.version;                             \
+       else                                            \
+         mv .version .old_version;                     \
+         expr 0$$(cat .old_version) + 1 >.version;     \
+       fi;                                             \
        $(MAKE) $(build)=init
 
 # Generate System.map
@@ -756,6 +763,7 @@ endif # ifdef CONFIG_KALLSYMS
 # vmlinux image - including updated kernel symbols
 vmlinux: $(vmlinux-lds) $(vmlinux-init) $(vmlinux-main) $(kallsyms.o) FORCE
        $(call if_changed_rule,vmlinux__)
+       $(Q)rm -f .old_version
 
 # The actual objects are generated when descending, 
 # make sure no implicit rule kicks in
@@ -806,7 +814,7 @@ ifneq ($(KBUILD_MODULES),)
 endif
 
 prepare0: prepare prepare1 FORCE
-       $(Q)$(MAKE) $(build)=$(srctree)
+       $(Q)$(MAKE) $(build)=.
 
 # All the preparing..
 prepare-all: prepare0
@@ -848,7 +856,7 @@ include/asm:
 
 #      Split autoconf.h into include/linux/config/*
 
-include/config/MARKER: include/linux/autoconf.h
+include/config/MARKER: scripts/basic/split-include include/linux/autoconf.h
        @echo '  SPLIT   include/linux/autoconf.h -> include/config/*'
        @scripts/basic/split-include include/linux/autoconf.h include/config
        @touch $@
@@ -1053,6 +1061,7 @@ help:
        @echo  '  rpm             - Build a kernel as an RPM package'
        @echo  '  tags/TAGS       - Generate tags file for editors'
        @echo  '  cscope          - Generate cscope index'
+       @echo  '  kernelrelease   - Output the release version string'
        @echo  ''
        @echo  'Static analysers'
        @echo  '  buildcheck      - List dangling references to vmlinux discarded sections'
index 224c34741d32d139aec5d5ff110cfd5398565f58..f9da827a0c1880154757981e1430f01f071181f2 100644 (file)
@@ -9,7 +9,7 @@ screen please read "Documentation/oops-tracing.txt" before posting your
 bug report. This explains what you should do with the "Oops" information
 to make it useful to the recipient.
 
-      Send the output the maintainer of the kernel area that seems to
+      Send the output to the maintainer of the kernel area that seems to
 be involved with the problem. Don't worry too much about getting the
 wrong person. If you are unsure send it to the person responsible for the
 code relevant to what you were doing. If it occurs repeatably try and
@@ -18,15 +18,15 @@ The list of maintainers is in the MAINTAINERS file in this directory.
 
       If it is a security bug, please copy the Security Contact listed
 in the MAINTAINERS file.  They can help coordinate bugfix and disclosure.
-See Documentation/SecurityBugs for more infomation.
+See Documentation/SecurityBugs for more information.
 
       If you are totally stumped as to whom to send the report, send it to
 linux-kernel@vger.kernel.org. (For more information on the linux-kernel
 mailing list see http://www.tux.org/lkml/).
 
-This is a suggested format for a bug report sent to the Linux kernel mailing 
-list. Having a standardized bug report form makes it easier  for you not to 
-overlook things, and easier for the developers to find the pieces of 
+This is a suggested format for a bug report sent to the Linux kernel mailing
+list. Having a standardized bug report form makes it easier for you not to
+overlook things, and easier for the developers to find the pieces of
 information they're really interested in. Don't feel you have to follow it.
 
       First run the ver_linux script included as scripts/ver_linux, which
@@ -35,9 +35,9 @@ the command "sh scripts/ver_linux".
 
 Use that information to fill in all fields of the bug report form, and
 post it to the mailing list with a subject of "PROBLEM: <one line
-summary from [1.]>" for easy identification by the developers    
+summary from [1.]>" for easy identification by the developers.
 
-[1.] One line summary of the problem:    
+[1.] One line summary of the problem:
 [2.] Full description of the problem/report:
 [3.] Keywords (i.e., modules, networking, kernel):
 [4.] Kernel version (from /proc/version):
index fc5ef90c4fc95863653860190f76dc92a647616a..24ae9a3660737767db4ca7f4b1a7da997703bd76 100644 (file)
@@ -185,15 +185,6 @@ EXPORT_SYMBOL(smp_num_cpus);
 EXPORT_SYMBOL(smp_call_function);
 EXPORT_SYMBOL(smp_call_function_on_cpu);
 EXPORT_SYMBOL(_atomic_dec_and_lock);
-#ifdef CONFIG_DEBUG_SPINLOCK
-EXPORT_SYMBOL(_raw_spin_unlock);
-EXPORT_SYMBOL(debug_spin_lock);
-EXPORT_SYMBOL(debug_spin_trylock);
-#endif
-#ifdef CONFIG_DEBUG_RWLOCK
-EXPORT_SYMBOL(_raw_write_lock);
-EXPORT_SYMBOL(_raw_read_lock);
-#endif
 EXPORT_SYMBOL(cpu_present_mask);
 #endif /* CONFIG_SMP */
 
index 2b034182a0ca82c6ee33c4deeb0f59af3f99d825..0636116210d236e2ed0353c4296036e2edf793ec 100644 (file)
@@ -1154,8 +1154,7 @@ osf_usleep_thread(struct timeval32 __user *sleep, struct timeval32 __user *remai
 
        ticks = timeval_to_jiffies(&tmp);
 
-       current->state = TASK_INTERRUPTIBLE;
-       ticks = schedule_timeout(ticks);
+       ticks = schedule_timeout_interruptible(ticks);
 
        if (remain) {
                jiffies_to_timeval(ticks, &tmp);
index e211aa7404e6152c4668277fdc03872547d8fab1..da0be34657915beec4cab3beeb15af6d33e0956c 100644 (file)
@@ -989,175 +989,3 @@ flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
 
        preempt_enable();
 }
-\f
-#ifdef CONFIG_DEBUG_SPINLOCK
-void
-_raw_spin_unlock(spinlock_t * lock)
-{
-       mb();
-       lock->lock = 0;
-
-       lock->on_cpu = -1;
-       lock->previous = NULL;
-       lock->task = NULL;
-       lock->base_file = "none";
-       lock->line_no = 0;
-}
-
-void
-debug_spin_lock(spinlock_t * lock, const char *base_file, int line_no)
-{
-       long tmp;
-       long stuck;
-       void *inline_pc = __builtin_return_address(0);
-       unsigned long started = jiffies;
-       int printed = 0;
-       int cpu = smp_processor_id();
-
-       stuck = 1L << 30;
- try_again:
-
-       /* Use sub-sections to put the actual loop at the end
-          of this object file's text section so as to perfect
-          branch prediction.  */
-       __asm__ __volatile__(
-       "1:     ldl_l   %0,%1\n"
-       "       subq    %2,1,%2\n"
-       "       blbs    %0,2f\n"
-       "       or      %0,1,%0\n"
-       "       stl_c   %0,%1\n"
-       "       beq     %0,3f\n"
-       "4:     mb\n"
-       ".subsection 2\n"
-       "2:     ldl     %0,%1\n"
-       "       subq    %2,1,%2\n"
-       "3:     blt     %2,4b\n"
-       "       blbs    %0,2b\n"
-       "       br      1b\n"
-       ".previous"
-       : "=r" (tmp), "=m" (lock->lock), "=r" (stuck)
-       : "m" (lock->lock), "2" (stuck) : "memory");
-
-       if (stuck < 0) {
-               printk(KERN_WARNING
-                      "%s:%d spinlock stuck in %s at %p(%d)"
-                      " owner %s at %p(%d) %s:%d\n",
-                      base_file, line_no,
-                      current->comm, inline_pc, cpu,
-                      lock->task->comm, lock->previous,
-                      lock->on_cpu, lock->base_file, lock->line_no);
-               stuck = 1L << 36;
-               printed = 1;
-               goto try_again;
-       }
-
-       /* Exiting.  Got the lock.  */
-       lock->on_cpu = cpu;
-       lock->previous = inline_pc;
-       lock->task = current;
-       lock->base_file = base_file;
-       lock->line_no = line_no;
-
-       if (printed) {
-               printk(KERN_WARNING
-                      "%s:%d spinlock grabbed in %s at %p(%d) %ld ticks\n",
-                      base_file, line_no, current->comm, inline_pc,
-                      cpu, jiffies - started);
-       }
-}
-
-int
-debug_spin_trylock(spinlock_t * lock, const char *base_file, int line_no)
-{
-       int ret;
-       if ((ret = !test_and_set_bit(0, lock))) {
-               lock->on_cpu = smp_processor_id();
-               lock->previous = __builtin_return_address(0);
-               lock->task = current;
-       } else {
-               lock->base_file = base_file;
-               lock->line_no = line_no;
-       }
-       return ret;
-}
-#endif /* CONFIG_DEBUG_SPINLOCK */
-\f
-#ifdef CONFIG_DEBUG_RWLOCK
-void _raw_write_lock(rwlock_t * lock)
-{
-       long regx, regy;
-       int stuck_lock, stuck_reader;
-       void *inline_pc = __builtin_return_address(0);
-
- try_again:
-
-       stuck_lock = 1<<30;
-       stuck_reader = 1<<30;
-
-       __asm__ __volatile__(
-       "1:     ldl_l   %1,%0\n"
-       "       blbs    %1,6f\n"
-       "       blt     %1,8f\n"
-       "       mov     1,%1\n"
-       "       stl_c   %1,%0\n"
-       "       beq     %1,6f\n"
-       "4:     mb\n"
-       ".subsection 2\n"
-       "6:     blt     %3,4b   # debug\n"
-       "       subl    %3,1,%3 # debug\n"
-       "       ldl     %1,%0\n"
-       "       blbs    %1,6b\n"
-       "8:     blt     %4,4b   # debug\n"
-       "       subl    %4,1,%4 # debug\n"
-       "       ldl     %1,%0\n"
-       "       blt     %1,8b\n"
-       "       br      1b\n"
-       ".previous"
-       : "=m" (*(volatile int *)lock), "=&r" (regx), "=&r" (regy),
-         "=&r" (stuck_lock), "=&r" (stuck_reader)
-       : "m" (*(volatile int *)lock), "3" (stuck_lock), "4" (stuck_reader) : "memory");
-
-       if (stuck_lock < 0) {
-               printk(KERN_WARNING "write_lock stuck at %p\n", inline_pc);
-               goto try_again;
-       }
-       if (stuck_reader < 0) {
-               printk(KERN_WARNING "write_lock stuck on readers at %p\n",
-                      inline_pc);
-               goto try_again;
-       }
-}
-
-void _raw_read_lock(rwlock_t * lock)
-{
-       long regx;
-       int stuck_lock;
-       void *inline_pc = __builtin_return_address(0);
-
- try_again:
-
-       stuck_lock = 1<<30;
-
-       __asm__ __volatile__(
-       "1:     ldl_l   %1,%0;"
-       "       blbs    %1,6f;"
-       "       subl    %1,2,%1;"
-       "       stl_c   %1,%0;"
-       "       beq     %1,6f;"
-       "4:     mb\n"
-       ".subsection 2\n"
-       "6:     ldl     %1,%0;"
-       "       blt     %2,4b   # debug\n"
-       "       subl    %2,1,%2 # debug\n"
-       "       blbs    %1,6b;"
-       "       br      1b\n"
-       ".previous"
-       : "=m" (*(volatile int *)lock), "=&r" (regx), "=&r" (stuck_lock)
-       : "m" (*(volatile int *)lock), "2" (stuck_lock) : "memory");
-
-       if (stuck_lock < 0) {
-               printk(KERN_WARNING "read_lock stuck at %p\n", inline_pc);
-               goto try_again;
-       }
-}
-#endif /* CONFIG_DEBUG_RWLOCK */
index 0f2899b4159dfb4f12c4cc2d69f411dd9d88e0af..11fff042aa817d022139947e82093d24a3e567a4 100644 (file)
@@ -326,8 +326,8 @@ config SMP
          processor machines. On a single processor machine, the kernel will
          run faster if you say N here.
 
-         See also the <file:Documentation/smp.tex>,
-         <file:Documentation/smp.txt>, <file:Documentation/i386/IO-APIC.txt>,
+         See also the <file:Documentation/smp.txt>,
+         <file:Documentation/i386/IO-APIC.txt>,
          <file:Documentation/nmi_watchdog.txt> and the SMP-HOWTO available at
          <http://www.linuxdoc.org/docs.html#howto>.
 
index 45a5709eaaa459ef876ef6e406013f94880b395e..5d3acff8c596e0c0670d7d956379d860e9591ba7 100644 (file)
@@ -53,7 +53,7 @@ config DEBUG_LL
        bool "Kernel low-level debugging functions"
        depends on DEBUG_KERNEL
        help
-         Say Y here to include definitions of printascii, printchar, printhex
+         Say Y here to include definitions of printascii, printch, printhex
          in the kernel.  This is helpful if you are debugging code that
          executes before the console is initialized.
 
index 688a595598c8a5cdc7b42a9b55a2b5ffe1868a17..d3a04c2a2c857192c7d861549acdfa320e78f681 100644 (file)
@@ -91,7 +91,7 @@ EXPORT_SYMBOL(read_scoop_reg);
 EXPORT_SYMBOL(write_scoop_reg);
 
 #ifdef CONFIG_PM
-static int scoop_suspend(struct device *dev, uint32_t state, uint32_t level)
+static int scoop_suspend(struct device *dev, pm_message_t state, uint32_t level)
 {
        if (level == SUSPEND_POWER_DOWN) {
                struct scoop_dev *sdev = dev_get_drvdata(dev);
index db07ce42b3b2c6873f9b767389ae766d0f89ad6f..949ec4427f2165efdd2a7c04a66d391a76c7da6e 100644 (file)
@@ -10,7 +10,7 @@
  *  This file is included twice in entry-common.S
  */
 #ifndef NR_syscalls
-#define NR_syscalls 320
+#define NR_syscalls 328
 #else
 
 __syscall_start:
@@ -333,6 +333,9 @@ __syscall_start:
                .long   sys_inotify_init
                .long   sys_inotify_add_watch
                .long   sys_inotify_rm_watch
+               .long   sys_mbind_wrapper
+/* 320 */      .long   sys_get_mempolicy
+               .long   sys_set_mempolicy
 __syscall_end:
 
                .rept   NR_syscalls - (__syscall_end - __syscall_start) / 4
index 6281d488ac975d225aa10f536b5849db1b528956..db302c6e53439ddd553d42992109deccc3105741 100644 (file)
@@ -269,6 +269,10 @@ sys_arm_fadvise64_64_wrapper:
                str     r5, [sp, #4]            @ push r5 to stack
                b       sys_arm_fadvise64_64
 
+sys_mbind_wrapper:
+               str     r5, [sp, #4]
+               b       sys_mbind
+
 /*
  * Note: off_4k (r5) is always units of 4K.  If we can't do the requested
  * offset, we return EINVAL.
index 29185acdd9e1ea7e6eb7a8f9182e18bac37f35b1..07b5dd453565402d9ec8c69b691681bae063be5a 100644 (file)
@@ -131,27 +131,12 @@ static struct platform_device corgits_device = {
 /*
  * MMC/SD Device
  *
- * The card detect interrupt isn't debounced so we delay it by HZ/4
+ * The card detect interrupt isn't debounced so we delay it by 250ms
  * to give the card a chance to fully insert/eject.
  */
-static struct mmc_detect {
-       struct timer_list detect_timer;
-       void *devid;
-} mmc_detect;
+static struct pxamci_platform_data corgi_mci_platform_data;
 
-static void mmc_detect_callback(unsigned long data)
-{
-       mmc_detect_change(mmc_detect.devid);
-}
-
-static irqreturn_t corgi_mmc_detect_int(int irq, void *devid, struct pt_regs *regs)
-{
-       mmc_detect.devid=devid;
-       mod_timer(&mmc_detect.detect_timer, jiffies + HZ/4);
-       return IRQ_HANDLED;
-}
-
-static int corgi_mci_init(struct device *dev, irqreturn_t (*unused_detect_int)(int, void *, struct pt_regs *), void *data)
+static int corgi_mci_init(struct device *dev, irqreturn_t (*corgi_detect_int)(int, void *, struct pt_regs *), void *data)
 {
        int err;
 
@@ -161,11 +146,9 @@ static int corgi_mci_init(struct device *dev, irqreturn_t (*unused_detect_int)(i
        pxa_gpio_mode(CORGI_GPIO_nSD_DETECT | GPIO_IN);
        pxa_gpio_mode(CORGI_GPIO_SD_PWR | GPIO_OUT);
 
-       init_timer(&mmc_detect.detect_timer);
-       mmc_detect.detect_timer.function = mmc_detect_callback;
-       mmc_detect.detect_timer.data = (unsigned long) &mmc_detect;
+       corgi_mci_platform_data.detect_delay = msecs_to_jiffies(250);
 
-       err = request_irq(CORGI_IRQ_GPIO_nSD_DETECT, corgi_mmc_detect_int, SA_INTERRUPT,
+       err = request_irq(CORGI_IRQ_GPIO_nSD_DETECT, corgi_detect_int, SA_INTERRUPT,
                             "MMC card detect", data);
        if (err) {
                printk(KERN_ERR "corgi_mci_init: MMC/SD: can't request MMC card detect IRQ\n");
@@ -198,7 +181,6 @@ static int corgi_mci_get_ro(struct device *dev)
 static void corgi_mci_exit(struct device *dev, void *data)
 {
        free_irq(CORGI_IRQ_GPIO_nSD_DETECT, data);
-       del_timer(&mmc_detect.detect_timer);
 }
 
 static struct pxamci_platform_data corgi_mci_platform_data = {
index 44d886c745ecc26063d88fed02a6d4929461337f..7c74fe0dc93c17b8a8bbb3483d0da623589d2449 100644 (file)
@@ -304,12 +304,6 @@ ret_point:
        call    restore_processor_state
        ret
 
-ENTRY(do_suspend_lowlevel_s4bios)
-       call save_processor_state
-       call save_registers
-       call acpi_enter_sleep_state_s4bios
-       ret
-
 ALIGN
 # saved registers
 saved_gdt:     .long   0,0
index 46ce9b248f5510088ca7fd1765fcfdd86ea44bd9..9ad43be9a01f0ebc2102f7d1fef5a11aa0146c71 100644 (file)
@@ -151,7 +151,7 @@ static char __devinit *table_lookup_model(struct cpuinfo_x86 *c)
 }
 
 
-void __devinit get_cpu_vendor(struct cpuinfo_x86 *c, int early)
+static void __devinit get_cpu_vendor(struct cpuinfo_x86 *c, int early)
 {
        char *v = c->x86_vendor_id;
        int i;
index 1efdc76ae96dfcb4c4a9a0b4c89d843dd7398eeb..35d3ce26a544aed8d45b3428444d0211c3593e3c 100644 (file)
@@ -573,8 +573,7 @@ static int balanced_irq(void *unused)
        }
 
        for ( ; ; ) {
-               set_current_state(TASK_INTERRUPTIBLE);
-               time_remaining = schedule_timeout(time_remaining);
+               time_remaining = schedule_timeout_interruptible(time_remaining);
                try_to_freeze();
                if (time_after(jiffies,
                                prev_balance_time+balanced_irq_interval)) {
index 5e4893d2b9f272b4c1120f3812cebef501f4164f..c70cd2a083045ea7ff12069a1fce5e9444cd6257 100644 (file)
@@ -1330,8 +1330,7 @@ void __cpu_die(unsigned int cpu)
                        printk ("CPU %d is now offline\n", cpu);
                        return;
                }
-               current->state = TASK_UNINTERRUPTIBLE;
-               schedule_timeout(HZ/10);
+               msleep(100);
        }
        printk(KERN_ERR "CPU %u didn't die...\n", cpu);
 }
index 13b9c62cbbb45ce1d95357a1784afea6f8e3db89..4710195b6b748e6cc69534f0f268e957a38edf58 100644 (file)
@@ -144,12 +144,7 @@ SECTIONS
        *(.exitcall.exit)
        }
 
-  /* Stabs debugging sections.  */
-  .stab 0 : { *(.stab) }
-  .stabstr 0 : { *(.stabstr) }
-  .stab.excl 0 : { *(.stab.excl) }
-  .stab.exclstr 0 : { *(.stab.exclstr) }
-  .stab.index 0 : { *(.stab.index) }
-  .stab.indexstr 0 : { *(.stab.indexstr) }
-  .comment 0 : { *(.comment) }
+  STABS_DEBUG
+
+  DWARF_DEBUG
 }
index 4ebbf3974381af2b7669c98c198d670c8e460fa7..8d484204a3ff670cd97b7946d766ed877208eeff 100644 (file)
@@ -491,12 +491,7 @@ init_handler_platform (pal_min_state_area_t *ms,
        unw_init_from_interruption(&info, current, pt, sw);
        ia64_do_show_stack(&info, NULL);
 
-#ifdef CONFIG_SMP
-       /* read_trylock() would be handy... */
-       if (!tasklist_lock.write_lock)
-               read_lock(&tasklist_lock);
-#endif
-       {
+       if (read_trylock(&tasklist_lock)) {
                struct task_struct *g, *t;
                do_each_thread (g, t) {
                        if (t == current)
@@ -506,10 +501,6 @@ init_handler_platform (pal_min_state_area_t *ms,
                        show_stack(t, NULL);
                } while_each_thread (g, t);
        }
-#ifdef CONFIG_SMP
-       if (!tasklist_lock.write_lock)
-               read_unlock(&tasklist_lock);
-#endif
 
        printk("\nINIT dump complete.  Please reboot now.\n");
        while (1);                      /* hang city if no debugger */
index 7622d4ec5f08d2ff995b9a05e10fdefb63630fbe..1ef3987ebc6ac28e31cd1242e9c6e07b34b8ebb2 100644 (file)
@@ -242,8 +242,8 @@ config SMP
          Y to "Enhanced Real Time Clock Support", below. The "Advanced Power
          Management" code will be disabled if you say Y here.
 
-         See also the <file:Documentation/smp.tex>,
-         <file:Documentation/smp.txt> and the SMP-HOWTO available at
+         See also the <file:Documentation/smp.txt>,
+         and the SMP-HOWTO available at
          <http://www.linuxdoc.org/docs.html#howto>.
 
          If you don't know what to do here, say N.
index 48b187f2d2b350d5cd5da50ba71e1a9b8664a8c6..a4576ac7e8702c27e787f8f0c7d87d0c61285f26 100644 (file)
@@ -892,7 +892,6 @@ unsigned long send_IPI_mask_phys(cpumask_t physid_mask, int ipi_num,
        int try)
 {
        spinlock_t *ipilock;
-       unsigned long flags = 0;
        volatile unsigned long *ipicr_addr;
        unsigned long ipicr_val;
        unsigned long my_physid_mask;
@@ -916,50 +915,27 @@ unsigned long send_IPI_mask_phys(cpumask_t physid_mask, int ipi_num,
         * write IPICRi (send IPIi)
         * unlock ipi_lock[i]
         */
+       spin_lock(ipilock);
        __asm__ __volatile__ (
-               ";; LOCK ipi_lock[i]            \n\t"
+               ";; CHECK IPICRi == 0           \n\t"
                ".fillinsn                      \n"
                "1:                             \n\t"
-               "mvfc   %1, psw                 \n\t"
-               "clrpsw #0x40 -> nop            \n\t"
-               DCACHE_CLEAR("r4", "r5", "%2")
-               "lock   r4, @%2                 \n\t"
-               "addi   r4, #-1                 \n\t"
-               "unlock r4, @%2                 \n\t"
-               "mvtc   %1, psw                 \n\t"
-               "bnez   r4, 2f                  \n\t"
-               LOCK_SECTION_START(".balign 4 \n\t")
-               ".fillinsn                      \n"
-               "2:                             \n\t"
-               "ld     r4, @%2                 \n\t"
-               "blez   r4, 2b                  \n\t"
+               "ld     %0, @%1                 \n\t"
+               "and    %0, %4                  \n\t"
+               "beqz   %0, 2f                  \n\t"
+               "bnez   %3, 3f                  \n\t"
                "bra    1b                      \n\t"
-               LOCK_SECTION_END
-               ";; CHECK IPICRi == 0           \n\t"
-               ".fillinsn                      \n"
-               "3:                             \n\t"
-               "ld     %0, @%3                 \n\t"
-               "and    %0, %6                  \n\t"
-               "beqz   %0, 4f                  \n\t"
-               "bnez   %5, 5f                  \n\t"
-               "bra    3b                      \n\t"
                ";; WRITE IPICRi (send IPIi)    \n\t"
                ".fillinsn                      \n"
-               "4:                             \n\t"
-               "st     %4, @%3                 \n\t"
-               ";; UNLOCK ipi_lock[i]          \n\t"
+               "2:                             \n\t"
+               "st     %2, @%1                 \n\t"
                ".fillinsn                      \n"
-               "5:                             \n\t"
-               "ldi    r4, #1                  \n\t"
-               "st     r4, @%2                 \n\t"
+               "3:                             \n\t"
                : "=&r"(ipicr_val)
-               : "r"(flags), "r"(&ipilock->slock), "r"(ipicr_addr),
-                 "r"(mask), "r"(try), "r"(my_physid_mask)
-               : "memory", "r4"
-#ifdef CONFIG_CHIP_M32700_TS1
-               , "r5"
-#endif /* CONFIG_CHIP_M32700_TS1 */
+               : "r"(ipicr_addr), "r"(mask), "r"(try), "r"(my_physid_mask)
+               : "memory"
        );
+       spin_unlock(ipilock);
 
        return ipicr_val;
 }
index 4c114ae21793eb8a4fde93da5b8ba529c2222395..eff89322ba50669e77ca3b001f9139e50fde016f 100644 (file)
@@ -440,18 +440,6 @@ struct irix5_siginfo {
        } stuff;
 };
 
-static inline unsigned long timespectojiffies(struct timespec *value)
-{
-       unsigned long sec = (unsigned) value->tv_sec;
-       long nsec = value->tv_nsec;
-
-       if (sec > (LONG_MAX / HZ))
-               return LONG_MAX;
-       nsec += 1000000000L / HZ - 1;
-       nsec /= 1000000000L / HZ;
-       return HZ * sec + nsec;
-}
-
 asmlinkage int irix_sigpoll_sys(unsigned long *set, struct irix5_siginfo *info,
                                struct timespec *tp)
 {
@@ -489,14 +477,13 @@ asmlinkage int irix_sigpoll_sys(unsigned long *set, struct irix5_siginfo *info,
                        error = -EINVAL;
                        goto out;
                }
-               expire = timespectojiffies(tp)+(tp->tv_sec||tp->tv_nsec);
+               expire = timespec_to_jiffies(tp) + (tp->tv_sec||tp->tv_nsec);
        }
 
        while(1) {
                long tmp = 0;
 
-               current->state = TASK_INTERRUPTIBLE;
-               expire = schedule_timeout(expire);
+               expire = schedule_timeout_interruptible(expire);
 
                for (i=0; i<=4; i++)
                        tmp |= (current->pending.signal.sig[i] & kset.sig[i]);
index b4659546271789c6498967d40f81bb68e7283f83..4de155699c4fa2d748267365c923b6a1eaf2a58a 100644 (file)
@@ -1032,8 +1032,7 @@ bad:
 
 asmlinkage int irix_sginap(int ticks)
 {
-       current->state = TASK_INTERRUPTIBLE;
-       schedule_timeout(ticks);
+       schedule_timeout_interruptible(ticks);
        return 0;
 }
 
index e44e9579bd36dcce1f2f1a80e5594f95788612e4..fd82c84a93b726d318411fe8c0967c71b12e8c50 100644 (file)
  * has a cmpxchg, and where atomic->value is an int holding
  * the value of the atomic (i.e. the high bits aren't used
  * for a lock or anything like that).
- *
- * N.B. ATOMIC_DEC_AND_LOCK gets defined in include/linux/spinlock.h
- * if spinlocks are empty and thus atomic_dec_and_lock is defined
- * to be atomic_dec_and_test - in that case we don't need it
- * defined here as well.
  */
-
-#ifndef ATOMIC_DEC_AND_LOCK
 int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
 {
        int counter;
@@ -52,4 +45,3 @@ int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
 }
 
 EXPORT_SYMBOL(_atomic_dec_and_lock);
-#endif /* ATOMIC_DEC_AND_LOCK */
index 7bf705676297a424af3f35dda2787224bce46f7c..5f2e6904d14aecb897773538b1eadd001fde93f5 100644 (file)
@@ -5,5 +5,3 @@
 lib-y  := lusercopy.o bitops.o checksum.o io.o memset.o fixup.o memcpy.o
 
 obj-y  := iomap.o
-
-lib-$(CONFIG_SMP) += debuglocks.o
index 2de182f6fe8a9eb2077404de6ed71bd65d00010a..90f400b10282276b12eb5fba16ea14c6c61ff449 100644 (file)
@@ -13,8 +13,8 @@
 #include <asm/atomic.h>
 
 #ifdef CONFIG_SMP
-spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned = {
-       [0 ... (ATOMIC_HASH_SIZE-1)]  = SPIN_LOCK_UNLOCKED
+raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned = {
+       [0 ... (ATOMIC_HASH_SIZE-1)]  = __RAW_SPIN_LOCK_UNLOCKED
 };
 #endif
 
diff --git a/arch/parisc/lib/debuglocks.c b/arch/parisc/lib/debuglocks.c
deleted file mode 100644 (file)
index 1b33fe6..0000000
+++ /dev/null
@@ -1,277 +0,0 @@
-/* 
- *    Debugging versions of SMP locking primitives.
- *
- *    Copyright (C) 2004 Thibaut VARENE <varenet@parisc-linux.org>
- *
- *    Some code stollen from alpha & sparc64 ;)
- *
- *    This program is free software; you can redistribute it and/or modify
- *    it under the terms of the GNU General Public License as published by
- *    the Free Software Foundation; either version 2 of the License, or
- *    (at your option) any later version.
- *
- *    This program is distributed in the hope that it will be useful,
- *    but WITHOUT ANY WARRANTY; without even the implied warranty of
- *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *    GNU General Public License for more details.
- *
- *    You should have received a copy of the GNU General Public License
- *    along with this program; if not, write to the Free Software
- *    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- *
- *    We use pdc_printf() throughout the file for all output messages, to avoid
- *    losing messages because of disabled interrupts. Since we're using these
- *    messages for debugging purposes, it makes sense not to send them to the
- *    linux console.
- */
-
-
-#include <linux/config.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/spinlock.h>
-#include <linux/hardirq.h>     /* in_interrupt() */
-#include <asm/system.h>
-#include <asm/hardirq.h>       /* in_interrupt() */
-#include <asm/pdc.h>
-
-#undef INIT_STUCK
-#define INIT_STUCK 1L << 30
-
-#ifdef CONFIG_DEBUG_SPINLOCK
-
-
-void _dbg_spin_lock(spinlock_t * lock, const char *base_file, int line_no)
-{
-       volatile unsigned int *a;
-       long stuck = INIT_STUCK;
-       void *inline_pc = __builtin_return_address(0);
-       unsigned long started = jiffies;
-       int printed = 0;
-       int cpu = smp_processor_id();
-
-try_again:
-
-       /* Do the actual locking */
-       /* <T-Bone> ggg: we can't get stuck on the outter loop?
-        * <ggg> T-Bone: We can hit the outer loop
-        *      alot if multiple CPUs are constantly racing for a lock
-        *      and the backplane is NOT fair about which CPU sees
-        *      the update first. But it won't hang since every failed
-        *      attempt will drop us back into the inner loop and
-        *      decrement `stuck'.
-        * <ggg> K-class and some of the others are NOT fair in the HW
-        *      implementation so we could see false positives.
-        *      But fixing the lock contention is easier than
-        *      fixing the HW to be fair.
-        * <tausq> __ldcw() returns 1 if we get the lock; otherwise we
-        *      spin until the value of the lock changes, or we time out.
-        */
-       mb();
-       a = __ldcw_align(lock);
-       while (stuck && (__ldcw(a) == 0))
-               while ((*a == 0) && --stuck);
-       mb();
-
-       if (unlikely(stuck <= 0)) {
-               pdc_printf(
-                       "%s:%d: spin_lock(%s/%p) stuck in %s at %p(%d)"
-                       " owned by %s:%d in %s at %p(%d)\n",
-                       base_file, line_no, lock->module, lock,
-                       current->comm, inline_pc, cpu,
-                       lock->bfile, lock->bline, lock->task->comm,
-                       lock->previous, lock->oncpu);
-               stuck = INIT_STUCK;
-               printed = 1;
-               goto try_again;
-       }
-
-       /* Exiting.  Got the lock.  */
-       lock->oncpu = cpu;
-       lock->previous = inline_pc;
-       lock->task = current;
-       lock->bfile = (char *)base_file;
-       lock->bline = line_no;
-
-       if (unlikely(printed)) {
-               pdc_printf(
-                       "%s:%d: spin_lock grabbed in %s at %p(%d) %ld ticks\n",
-                       base_file, line_no, current->comm, inline_pc,
-                       cpu, jiffies - started);
-       }
-}
-
-void _dbg_spin_unlock(spinlock_t * lock, const char *base_file, int line_no)
-{
-       CHECK_LOCK(lock);
-       volatile unsigned int *a;
-       mb();
-       a = __ldcw_align(lock);
-       if (unlikely((*a != 0) && lock->babble)) {
-               lock->babble--;
-               pdc_printf(
-                       "%s:%d: spin_unlock(%s:%p) not locked\n",
-                       base_file, line_no, lock->module, lock);
-       }
-       *a = 1; 
-       mb();
-}
-
-int _dbg_spin_trylock(spinlock_t * lock, const char *base_file, int line_no)
-{
-       int ret;
-       volatile unsigned int *a;
-       mb();
-       a = __ldcw_align(lock);
-       ret = (__ldcw(a) != 0);
-       mb();
-       if (ret) {
-               lock->oncpu = smp_processor_id();
-               lock->previous = __builtin_return_address(0);
-               lock->task = current;
-       } else {
-               lock->bfile = (char *)base_file;
-               lock->bline = line_no;
-       }
-       return ret;
-}
-
-#endif /* CONFIG_DEBUG_SPINLOCK */
-
-#ifdef CONFIG_DEBUG_RWLOCK
-
-/* Interrupts trouble detailed explanation, thx Grant:
- *
- * o writer (wants to modify data) attempts to acquire the rwlock
- * o He gets the write lock.
- * o Interupts are still enabled, we take an interrupt with the
- *   write still holding the lock.
- * o interrupt handler tries to acquire the rwlock for read.
- * o deadlock since the writer can't release it at this point.
- * 
- * In general, any use of spinlocks that competes between "base"
- * level and interrupt level code will risk deadlock. Interrupts
- * need to be disabled in the base level routines to avoid it.
- * Or more precisely, only the IRQ the base level routine
- * is competing with for the lock.  But it's more efficient/faster
- * to just disable all interrupts on that CPU to guarantee
- * once it gets the lock it can release it quickly too.
- */
-void _dbg_write_lock(rwlock_t *rw, const char *bfile, int bline)
-{
-       void *inline_pc = __builtin_return_address(0);
-       unsigned long started = jiffies;
-       long stuck = INIT_STUCK;
-       int printed = 0;
-       int cpu = smp_processor_id();
-       
-       if(unlikely(in_interrupt())) {  /* acquiring write lock in interrupt context, bad idea */
-               pdc_printf("write_lock caller: %s:%d, IRQs enabled,\n", bfile, bline);
-               BUG();
-       }
-
-       /* Note: if interrupts are disabled (which is most likely), the printk
-       will never show on the console. We might need a polling method to flush
-       the dmesg buffer anyhow. */
-       
-retry:
-       _raw_spin_lock(&rw->lock);
-
-       if(rw->counter != 0) {
-               /* this basically never happens */
-               _raw_spin_unlock(&rw->lock);
-               
-               stuck--;
-               if ((unlikely(stuck <= 0)) && (rw->counter < 0)) {
-                       pdc_printf(
-                               "%s:%d: write_lock stuck on writer"
-                               " in %s at %p(%d) %ld ticks\n",
-                               bfile, bline, current->comm, inline_pc,
-                               cpu, jiffies - started);
-                       stuck = INIT_STUCK;
-                       printed = 1;
-               }
-               else if (unlikely(stuck <= 0)) {
-                       pdc_printf(
-                               "%s:%d: write_lock stuck on reader"
-                               " in %s at %p(%d) %ld ticks\n",
-                               bfile, bline, current->comm, inline_pc,
-                               cpu, jiffies - started);
-                       stuck = INIT_STUCK;
-                       printed = 1;
-               }
-               
-               while(rw->counter != 0);
-
-               goto retry;
-       }
-
-       /* got it.  now leave without unlocking */
-       rw->counter = -1; /* remember we are locked */
-
-       if (unlikely(printed)) {
-               pdc_printf(
-                       "%s:%d: write_lock grabbed in %s at %p(%d) %ld ticks\n",
-                       bfile, bline, current->comm, inline_pc,
-                       cpu, jiffies - started);
-       }
-}
-
-int _dbg_write_trylock(rwlock_t *rw, const char *bfile, int bline)
-{
-#if 0
-       void *inline_pc = __builtin_return_address(0);
-       int cpu = smp_processor_id();
-#endif
-       
-       if(unlikely(in_interrupt())) {  /* acquiring write lock in interrupt context, bad idea */
-               pdc_printf("write_lock caller: %s:%d, IRQs enabled,\n", bfile, bline);
-               BUG();
-       }
-
-       /* Note: if interrupts are disabled (which is most likely), the printk
-       will never show on the console. We might need a polling method to flush
-       the dmesg buffer anyhow. */
-       
-       _raw_spin_lock(&rw->lock);
-
-       if(rw->counter != 0) {
-               /* this basically never happens */
-               _raw_spin_unlock(&rw->lock);
-               return 0;
-       }
-
-       /* got it.  now leave without unlocking */
-       rw->counter = -1; /* remember we are locked */
-#if 0
-       pdc_printf("%s:%d: try write_lock grabbed in %s at %p(%d)\n",
-                  bfile, bline, current->comm, inline_pc, cpu);
-#endif
-       return 1;
-}
-
-void _dbg_read_lock(rwlock_t * rw, const char *bfile, int bline)
-{
-#if 0
-       void *inline_pc = __builtin_return_address(0);
-       unsigned long started = jiffies;
-       int cpu = smp_processor_id();
-#endif
-       unsigned long flags;
-
-       local_irq_save(flags);
-       _raw_spin_lock(&rw->lock); 
-
-       rw->counter++;
-#if 0
-       pdc_printf(
-               "%s:%d: read_lock grabbed in %s at %p(%d) %ld ticks\n",
-               bfile, bline, current->comm, inline_pc,
-               cpu, jiffies - started);
-#endif
-       _raw_spin_unlock(&rw->lock);
-       local_irq_restore(flags);
-}
-
-#endif /* CONFIG_DEBUG_RWLOCK */
index e3f1ce33e64251a62502bfead0eab6102f6ee4fd..347ea284140b28be32262dbb0427af539845940b 100644 (file)
@@ -265,6 +265,15 @@ config PPC601_SYNC_FIX
 
          If in doubt, say Y here.
 
+config HOTPLUG_CPU
+       bool "Support for enabling/disabling CPUs"
+       depends on SMP && HOTPLUG && EXPERIMENTAL && PPC_PMAC
+       ---help---
+         Say Y here to be able to disable and re-enable individual
+         CPUs at runtime on SMP machines.
+
+         Say N if you are unsure.
+
 source arch/ppc/platforms/4xx/Kconfig
 source arch/ppc/platforms/85xx/Kconfig
 
index 4b3fe395ffa4d402641fcbef698a4ed895845637..6dd7b50e06691f5b7ffb8601cd3a04eab18fe70e 100644 (file)
@@ -21,13 +21,14 @@ CC          := $(CC) -m32
 endif
 
 LDFLAGS_vmlinux        := -Ttext $(KERNELLOAD) -Bstatic
-CPPFLAGS       += -Iarch/$(ARCH) -Iinclude3
+# The -Iarch/$(ARCH)/include is temporary while we are merging
+CPPFLAGS       += -Iarch/$(ARCH) -Iarch/$(ARCH)/include
 AFLAGS         += -Iarch/$(ARCH)
 CFLAGS         += -Iarch/$(ARCH) -msoft-float -pipe \
                -ffixed-r2 -mmultiple
 CPP            = $(CC) -E $(CFLAGS)
 # Temporary hack until we have migrated to asm-powerpc
-LINUXINCLUDE    += -Iinclude3
+LINUXINCLUDE    += -Iarch/$(ARCH)/include
 
 CHECKFLAGS     += -D__powerpc__
 
@@ -103,15 +104,16 @@ endef
 
 archclean:
        $(Q)$(MAKE) $(clean)=arch/ppc/boot
-       $(Q)rm -rf include3
+       # Temporary hack until we have migrated to asm-powerpc
+       $(Q)rm -rf arch/$(ARCH)/include
 
 prepare: checkbin
 
 # Temporary hack until we have migrated to asm-powerpc
-include/asm: include3/asm
-include3/asm:
-       $(Q)if [ ! -d include3 ]; then mkdir -p include3; fi
-       $(Q)ln -fsn $(srctree)/include/asm-powerpc include3/asm
+include/asm: arch/$(ARCH)/include/asm
+arch/$(ARCH)/include/asm:
+       $(Q)if [ ! -d arch/$(ARCH)/include ]; then mkdir -p arch/$(ARCH)/include; fi
+       $(Q)ln -fsn $(srctree)/include/asm-powerpc arch/$(ARCH)/include/asm
 
 # Use the file '.tmp_gas_check' for binutils tests, as gas won't output
 # to stdout and these checks are run even on install targets.
index 1f37b7eafac215b912a965f1ee9cada8e714abeb..ba396438ede34ff1c7b555f95b7c1dee9e3429d3 100644 (file)
@@ -12,7 +12,6 @@
 #include <linux/config.h>
 #include <asm/processor.h>
 #include <asm/page.h>
-#include <asm/ppc_asm.h>
 #include <asm/cputable.h>
 #include <asm/ppc_asm.h>
 #include <asm/asm-offsets.h>
index 304589aebdbcc4f9eaca7083c70627778327919b..7e4fbb65372405d5d22482197518436859f3e6f9 100644 (file)
@@ -14,7 +14,6 @@
 #include <asm/page.h>
 #include <asm/ppc_asm.h>
 #include <asm/cputable.h>
-#include <asm/ppc_asm.h>
 #include <asm/asm-offsets.h>
 #include <asm/cache.h>
 
index e0c631cf96b039429a24c609284c9531bef076c8..b566d982806c937ba8ca7e55c01b007a32777477 100644 (file)
@@ -393,7 +393,7 @@ EXPORT_SYMBOL(__dma_sync);
  * __dma_sync_page() implementation for systems using highmem.
  * In this case, each page of a buffer must be kmapped/kunmapped
  * in order to have a virtual address for __dma_sync(). This must
- * not sleep so kmap_atmomic()/kunmap_atomic() are used.
+ * not sleep so kmap_atomic()/kunmap_atomic() are used.
  *
  * Note: yes, it is possible and correct to have a buffer extend
  * beyond the first page.
index 55daf1210f3228050c5841f968f1b1fabfe44d4c..1960fb8c259c6e268c270ac716c3498302279784 100644 (file)
@@ -1023,23 +1023,21 @@ __secondary_start_gemini:
         andc    r4,r4,r3
         mtspr   SPRN_HID0,r4
         sync
-        bl      gemini_prom_init
         b       __secondary_start
 #endif /* CONFIG_GEMINI */
-       .globl  __secondary_start_psurge
-__secondary_start_psurge:
-       li      r24,1                   /* cpu # */
-       b       __secondary_start_psurge99
-       .globl  __secondary_start_psurge2
-__secondary_start_psurge2:
-       li      r24,2                   /* cpu # */
-       b       __secondary_start_psurge99
-       .globl  __secondary_start_psurge3
-__secondary_start_psurge3:
-       li      r24,3                   /* cpu # */
-       b       __secondary_start_psurge99
-__secondary_start_psurge99:
-       /* we come in here with IR=0 and DR=1, and DBAT 0
+
+       .globl  __secondary_start_pmac_0
+__secondary_start_pmac_0:
+       /* NB the entries for cpus 0, 1, 2 must each occupy 8 bytes. */
+       li      r24,0
+       b       1f
+       li      r24,1
+       b       1f
+       li      r24,2
+       b       1f
+       li      r24,3
+1:
+       /* on powersurge, we come in here with IR=0 and DR=1, and DBAT 0
           set to map the 0xf0000000 - 0xffffffff region */
        mfmsr   r0
        rlwinm  r0,r0,0,28,26           /* clear DR (0x10) */
index 53547b6de45bd491e96c0444907b74b36915592c..fba29c876b62ab843d06e308ef2590c9f3999f85 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/ptrace.h>
 #include <linux/slab.h>
 #include <linux/sysctl.h>
+#include <linux/cpu.h>
 
 #include <asm/pgtable.h>
 #include <asm/uaccess.h>
@@ -35,6 +36,7 @@
 void default_idle(void)
 {
        void (*powersave)(void);
+       int cpu = smp_processor_id();
 
        powersave = ppc_md.power_save;
 
@@ -44,7 +46,7 @@ void default_idle(void)
 #ifdef CONFIG_SMP
                else {
                        set_thread_flag(TIF_POLLING_NRFLAG);
-                       while (!need_resched())
+                       while (!need_resched() && !cpu_is_offline(cpu))
                                barrier();
                        clear_thread_flag(TIF_POLLING_NRFLAG);
                }
@@ -52,6 +54,8 @@ void default_idle(void)
        }
        if (need_resched())
                schedule();
+       if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING)
+               cpu_die();
 }
 
 /*
index e70b587b9e514ae39b80ca70a2601ea7776fdb62..726fe7ce1747ae004570f9c6e05766907a89d4bb 100644 (file)
@@ -45,6 +45,7 @@ cpumask_t cpu_online_map;
 cpumask_t cpu_possible_map;
 int smp_hw_index[NR_CPUS];
 struct thread_info *secondary_ti;
+static struct task_struct *idle_tasks[NR_CPUS];
 
 EXPORT_SYMBOL(cpu_online_map);
 EXPORT_SYMBOL(cpu_possible_map);
@@ -286,7 +287,8 @@ static void __devinit smp_store_cpu_info(int id)
 
 void __init smp_prepare_cpus(unsigned int max_cpus)
 {
-       int num_cpus, i;
+       int num_cpus, i, cpu;
+       struct task_struct *p;
 
        /* Fixup boot cpu */
         smp_store_cpu_info(smp_processor_id());
@@ -308,6 +310,17 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
 
        if (smp_ops->space_timers)
                smp_ops->space_timers(num_cpus);
+
+       for_each_cpu(cpu) {
+               if (cpu == smp_processor_id())
+                       continue;
+               /* create a process for the processor */
+               p = fork_idle(cpu);
+               if (IS_ERR(p))
+                       panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p));
+               p->thread_info->cpu = cpu;
+               idle_tasks[cpu] = p;
+       }
 }
 
 void __devinit smp_prepare_boot_cpu(void)
@@ -334,12 +347,17 @@ int __devinit start_secondary(void *unused)
        set_dec(tb_ticks_per_jiffy);
        cpu_callin_map[cpu] = 1;
 
-       printk("CPU %i done callin...\n", cpu);
+       printk("CPU %d done callin...\n", cpu);
        smp_ops->setup_cpu(cpu);
-       printk("CPU %i done setup...\n", cpu);
-       local_irq_enable();
+       printk("CPU %d done setup...\n", cpu);
        smp_ops->take_timebase();
-       printk("CPU %i done timebase take...\n", cpu);
+       printk("CPU %d done timebase take...\n", cpu);
+
+       spin_lock(&call_lock);
+       cpu_set(cpu, cpu_online_map);
+       spin_unlock(&call_lock);
+
+       local_irq_enable();
 
        cpu_idle();
        return 0;
@@ -347,17 +365,11 @@ int __devinit start_secondary(void *unused)
 
 int __cpu_up(unsigned int cpu)
 {
-       struct task_struct *p;
        char buf[32];
        int c;
 
-       /* create a process for the processor */
-       /* only regs.msr is actually used, and 0 is OK for it */
-       p = fork_idle(cpu);
-       if (IS_ERR(p))
-               panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p));
-       secondary_ti = p->thread_info;
-       p->thread_info->cpu = cpu;
+       secondary_ti = idle_tasks[cpu]->thread_info;
+       mb();
 
        /*
         * There was a cache flush loop here to flush the cache
@@ -389,7 +401,11 @@ int __cpu_up(unsigned int cpu)
        printk("Processor %d found.\n", cpu);
 
        smp_ops->give_timebase();
-       cpu_set(cpu, cpu_online_map);
+
+       /* Wait until cpu puts itself in the online map */
+       while (!cpu_online(cpu))
+               cpu_relax();
+
        return 0;
 }
 
index 8356d544fa60dc3cbda579cc43c075637bddbaf3..961ede87be72254f421d6f2f269668bb19f54070 100644 (file)
@@ -118,6 +118,28 @@ void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
        info.si_code = code;
        info.si_addr = (void __user *) addr;
        force_sig_info(signr, &info, current);
+
+       /*
+        * Init gets no signals that it doesn't have a handler for.
+        * That's all very well, but if it has caused a synchronous
+        * exception and we ignore the resulting signal, it will just
+        * generate the same exception over and over again and we get
+        * nowhere.  Better to kill it and let the kernel panic.
+        */
+       if (current->pid == 1) {
+               __sighandler_t handler;
+
+               spin_lock_irq(&current->sighand->siglock);
+               handler = current->sighand->action[signr-1].sa.sa_handler;
+               spin_unlock_irq(&current->sighand->siglock);
+               if (handler == SIG_DFL) {
+                       /* init has generated a synchronous exception
+                          and it doesn't have a handler for the signal */
+                       printk(KERN_CRIT "init has generated signal %d "
+                              "but has no handler for it\n", signr);
+                       do_exit(signr);
+               }
+       }
 }
 
 /*
index 1c380e67d4357389e29429af3b6e4f3f77f8f935..f1e1fb4144f034a9be9197cffab83570efa9790d 100644 (file)
@@ -4,6 +4,5 @@
 
 obj-y                  := checksum.o string.o strcase.o dec_and_lock.o div64.o
 
-obj-$(CONFIG_SMP)      += locks.o
 obj-$(CONFIG_8xx)      += rheap.o
 obj-$(CONFIG_CPM2)     += rheap.o
index 4ee888070d914618ebdc8cc6abcd3dc7e8530351..b18f0d9a00fc70a8225260af62df6492b1f948c8 100644 (file)
  * has a cmpxchg, and where atomic->value is an int holding
  * the value of the atomic (i.e. the high bits aren't used
  * for a lock or anything like that).
- *
- * N.B. ATOMIC_DEC_AND_LOCK gets defined in include/linux/spinlock.h
- * if spinlocks are empty and thus atomic_dec_and_lock is defined
- * to be atomic_dec_and_test - in that case we don't need it
- * defined here as well.
  */
-
-#ifndef ATOMIC_DEC_AND_LOCK
 int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
 {
        int counter;
@@ -43,4 +36,3 @@ int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
 }
 
 EXPORT_SYMBOL(_atomic_dec_and_lock);
-#endif /* ATOMIC_DEC_AND_LOCK */
index 57d9930843ac28f9d5c726071c8368fc8a69942b..ee5e9f25baf98d7c567361739ee30864bb8c980d 100644 (file)
@@ -278,11 +278,7 @@ bad_area:
 
        /* User mode accesses cause a SIGSEGV */
        if (user_mode(regs)) {
-               info.si_signo = SIGSEGV;
-               info.si_errno = 0;
-               info.si_code = code;
-               info.si_addr = (void __user *) address;
-               force_sig_info(SIGSEGV, &info, current);
+               _exception(SIGSEGV, regs, code, address);
                return 0;
        }
 
index 8d67adc7692580d8207597a2245d13025e057ea2..88419c77ac439cd72b341c59dab10201efee9240 100644 (file)
@@ -161,6 +161,8 @@ _GLOBAL(low_sleep_handler)
        addi r3,r3,sleep_storage@l
        stw r5,0(r3)
 
+       .globl  low_cpu_die
+low_cpu_die:
        /* Flush & disable all caches */
        bl      flush_disable_caches
 
index 8e049dab4e6321f28354e0e821fe0569f36bac35..794a23994b82a09185b82ad3b797859ae96cda83 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/spinlock.h>
 #include <linux/errno.h>
 #include <linux/hardirq.h>
+#include <linux/cpu.h>
 
 #include <asm/ptrace.h>
 #include <asm/atomic.h>
@@ -55,9 +56,7 @@
  * Powersurge (old powermac SMP) support.
  */
 
-extern void __secondary_start_psurge(void);
-extern void __secondary_start_psurge2(void);   /* Temporary horrible hack */
-extern void __secondary_start_psurge3(void);   /* Temporary horrible hack */
+extern void __secondary_start_pmac_0(void);
 
 /* Addresses for powersurge registers */
 #define HAMMERHEAD_BASE                0xf8000000
@@ -119,7 +118,7 @@ static volatile int sec_tb_reset = 0;
 static unsigned int pri_tb_hi, pri_tb_lo;
 static unsigned int pri_tb_stamp;
 
-static void __init core99_init_caches(int cpu)
+static void __devinit core99_init_caches(int cpu)
 {
        if (!cpu_has_feature(CPU_FTR_L2CR))
                return;
@@ -346,7 +345,7 @@ static int __init smp_psurge_probe(void)
 
 static void __init smp_psurge_kick_cpu(int nr)
 {
-       void (*start)(void) = __secondary_start_psurge;
+       unsigned long start = __pa(__secondary_start_pmac_0) + nr * 8;
        unsigned long a;
 
        /* may need to flush here if secondary bats aren't setup */
@@ -356,17 +355,7 @@ static void __init smp_psurge_kick_cpu(int nr)
 
        if (ppc_md.progress) ppc_md.progress("smp_psurge_kick_cpu", 0x353);
 
-       /* setup entry point of secondary processor */
-       switch (nr) {
-       case 2:
-               start = __secondary_start_psurge2;
-               break;
-       case 3:
-               start = __secondary_start_psurge3;
-               break;
-       }
-
-       out_be32(psurge_start, __pa(start));
+       out_be32(psurge_start, start);
        mb();
 
        psurge_set_ipi(nr);
@@ -500,14 +489,14 @@ static int __init smp_core99_probe(void)
        return ncpus;
 }
 
-static void __init smp_core99_kick_cpu(int nr)
+static void __devinit smp_core99_kick_cpu(int nr)
 {
        unsigned long save_vector, new_vector;
        unsigned long flags;
 
        volatile unsigned long *vector
                 = ((volatile unsigned long *)(KERNELBASE+0x100));
-       if (nr < 1 || nr > 3)
+       if (nr < 0 || nr > 3)
                return;
        if (ppc_md.progress) ppc_md.progress("smp_core99_kick_cpu", 0x346);
 
@@ -518,19 +507,9 @@ static void __init smp_core99_kick_cpu(int nr)
        save_vector = *vector;
 
        /* Setup fake reset vector that does    
-        *   b __secondary_start_psurge - KERNELBASE
+        *   b __secondary_start_pmac_0 + nr*8 - KERNELBASE
         */
-       switch(nr) {
-               case 1:
-                       new_vector = (unsigned long)__secondary_start_psurge;
-                       break;
-               case 2:
-                       new_vector = (unsigned long)__secondary_start_psurge2;
-                       break;
-               case 3:
-                       new_vector = (unsigned long)__secondary_start_psurge3;
-                       break;
-       }
+       new_vector = (unsigned long) __secondary_start_pmac_0 + nr * 8;
        *vector = 0x48000002 + new_vector - KERNELBASE;
 
        /* flush data cache and inval instruction cache */
@@ -554,7 +533,7 @@ static void __init smp_core99_kick_cpu(int nr)
        if (ppc_md.progress) ppc_md.progress("smp_core99_kick_cpu done", 0x347);
 }
 
-static void __init smp_core99_setup_cpu(int cpu_nr)
+static void __devinit smp_core99_setup_cpu(int cpu_nr)
 {
        /* Setup L2/L3 */
        if (cpu_nr != 0)
@@ -668,3 +647,47 @@ struct smp_ops_t core99_smp_ops __pmacdata = {
        .give_timebase  = smp_core99_give_timebase,
        .take_timebase  = smp_core99_take_timebase,
 };
+
+#ifdef CONFIG_HOTPLUG_CPU
+
+int __cpu_disable(void)
+{
+       cpu_clear(smp_processor_id(), cpu_online_map);
+
+       /* XXX reset cpu affinity here */
+       openpic_set_priority(0xf);
+       asm volatile("mtdec %0" : : "r" (0x7fffffff));
+       mb();
+       udelay(20);
+       asm volatile("mtdec %0" : : "r" (0x7fffffff));
+       return 0;
+}
+
+extern void low_cpu_die(void) __attribute__((noreturn)); /* in pmac_sleep.S */
+static int cpu_dead[NR_CPUS];
+
+void cpu_die(void)
+{
+       local_irq_disable();
+       cpu_dead[smp_processor_id()] = 1;
+       mb();
+       low_cpu_die();
+}
+
+void __cpu_die(unsigned int cpu)
+{
+       int timeout;
+
+       timeout = 1000;
+       while (!cpu_dead[cpu]) {
+               if (--timeout == 0) {
+                       printk("CPU %u refused to die!\n", cpu);
+                       break;
+               }
+               msleep(1);
+       }
+       cpu_callin_map[cpu] = 0;
+       cpu_dead[cpu] = 0;
+}
+
+#endif
index 774709807538d593d6bc637040e112ce93fbbadc..75fe8eb1069359c3d52cbc3445d0ebd5b2658f26 100644 (file)
@@ -90,14 +90,10 @@ cpc700_mask_and_ack_irq(unsigned int irq)
 }
 
 static struct hw_interrupt_type cpc700_pic = {
-       "CPC700 PIC",
-       NULL,
-       NULL,
-       cpc700_unmask_irq,
-       cpc700_mask_irq,
-       cpc700_mask_and_ack_irq,
-       NULL,
-       NULL
+       .typename = "CPC700 PIC",
+       .enable = cpc700_unmask_irq,
+       .disable = cpc700_mask_irq,
+       .ack = cpc700_mask_and_ack_irq,
 };
 
 __init static void
index b9391e65014124f946d83704ef88e54cf58f3a8d..5c7908c20e43e8e2b3b023917e297296df1f99f1 100644 (file)
@@ -129,14 +129,11 @@ static void i8259_end_irq(unsigned int irq)
 }
 
 struct hw_interrupt_type i8259_pic = {
-       " i8259    ",
-       NULL,
-       NULL,
-       i8259_unmask_irq,
-       i8259_mask_irq,
-       i8259_mask_and_ack_irq,
-       i8259_end_irq,
-       NULL
+       .typename = " i8259    ",
+       .enable = i8259_unmask_irq,
+       .disable = i8259_mask_irq,
+       .ack = i8259_mask_and_ack_irq,
+       .end = i8259_end_irq,
 };
 
 static struct resource pic1_iores = {
index 7e272c51a4973e17e3deab27cb1f1754e086cfc7..2e0ea92144f6b3f15a759dc9f9983f0b49f711b7 100644 (file)
@@ -82,13 +82,11 @@ static void openpic2_end_irq(unsigned int irq_nr);
 static void openpic2_ack_irq(unsigned int irq_nr);
 
 struct hw_interrupt_type open_pic2 = {
-       " OpenPIC2 ",
-       NULL,
-       NULL,
-       openpic2_enable_irq,
-       openpic2_disable_irq,
-       openpic2_ack_irq,
-       openpic2_end_irq,
+       .typename = " OpenPIC2 ",
+       .enable = openpic2_enable_irq,
+       .disable = openpic2_disable_irq,
+       .ack = openpic2_ack_irq,
+       .end = openpic2_end_irq,
 };
 
 /*
index 06cb0af2a58d8d779791b6fdb8389d4e568dee8f..ce4d1deb86e930ee75c4228a44bd4e418a1d743f 100644 (file)
@@ -34,13 +34,10 @@ static void ppc403_aic_disable(unsigned int irq);
 static void ppc403_aic_disable_and_ack(unsigned int irq);
 
 static struct hw_interrupt_type ppc403_aic = {
-       "403GC AIC",
-       NULL,
-       NULL,
-       ppc403_aic_enable,
-       ppc403_aic_disable,
-       ppc403_aic_disable_and_ack,
-       0
+       .typename = "403GC AIC",
+       .enable = ppc403_aic_enable,
+       .disable = ppc403_aic_disable,
+       .ack = ppc403_aic_disable_and_ack,
 };
 
 int
index e0bd66f0847a5ff022bfdd2acd36de41ff60482c..2cbcad278cefc50cc273eeec464d29201e16f0f5 100644 (file)
@@ -79,14 +79,11 @@ xilinx_intc_end(unsigned int irq)
 }
 
 static struct hw_interrupt_type xilinx_intc = {
-       "Xilinx Interrupt Controller",
-       NULL,
-       NULL,
-       xilinx_intc_enable,
-       xilinx_intc_disable,
-       xilinx_intc_disable_and_ack,
-       xilinx_intc_end,
-       0
+       .typename = "Xilinx Interrupt Controller",
+       .enable = xilinx_intc_enable,
+       .disable = xilinx_intc_disable,
+       .ack = xilinx_intc_disable_and_ack,
+       .end = xilinx_intc_end,
 };
 
 int
index 0a23aeacba885cf80e697d9c0a6e21317af5bc10..17d2c1eac3b8567e8cd711f3dd72a0c1bf037a82 100644 (file)
@@ -56,7 +56,7 @@ LDFLAGS_vmlinux       := -Bstatic -e $(KERNELLOAD) -Ttext $(KERNELLOAD)
 CFLAGS         += -msoft-float -pipe -mminimal-toc -mtraceback=none \
                   -mcall-aixdesc
 # Temporary hack until we have migrated to asm-powerpc
-CPPFLAGS       += -Iinclude3
+CPPFLAGS       += -Iarch/$(ARCH)/include
 
 GCC_VERSION     := $(call cc-version)
 GCC_BROKEN_VEC := $(shell if [ $(GCC_VERSION) -lt 0400 ] ; then echo "y"; fi ;)
@@ -115,14 +115,15 @@ all: $(KBUILD_IMAGE)
 
 archclean:
        $(Q)$(MAKE) $(clean)=$(boot)
-       $(Q)rm -rf include3
+       # Temporary hack until we have migrated to asm-powerpc
+       $(Q)rm -rf arch/$(ARCH)/include
 
 
 # Temporary hack until we have migrated to asm-powerpc
-include/asm: include3/asm
-include3/asm:
-       $(Q)if [ ! -d include3 ]; then mkdir -p include3; fi;
-       $(Q)ln -fsn $(srctree)/include/asm-powerpc include3/asm
+include/asm: arch/$(ARCH)/include/asm
+arch/$(ARCH)/include/asm:
+       $(Q)if [ ! -d arch/$(ARCH)/include ]; then mkdir -p arch/$(ARCH)/include; fi
+       $(Q)ln -fsn $(srctree)/include/asm-powerpc arch/$(ARCH)/include/asm
 
 define archhelp
   echo  '  zImage.vmode        - Compressed kernel image (arch/$(ARCH)/boot/zImage.vmode)'
index bfce609e8e6be9850f53439f8c88456ff0743ca3..1fb673c511ff504972e0568fbfaf35d88ab290a6 100644 (file)
@@ -12,7 +12,6 @@
 #include <linux/config.h>
 #include <asm/processor.h>
 #include <asm/page.h>
-#include <asm/ppc_asm.h>
 #include <asm/cputable.h>
 #include <asm/ppc_asm.h>
 #include <asm/asm-offsets.h>
index 6e8d8591708cbbc37e7a9dfe8fd9911774899705..7b9d4da5cf926e12ff1c9811242af1bc7f899662 100644 (file)
  * has a cmpxchg, and where atomic->value is an int holding
  * the value of the atomic (i.e. the high bits aren't used
  * for a lock or anything like that).
- *
- * N.B. ATOMIC_DEC_AND_LOCK gets defined in include/linux/spinlock.h
- * if spinlocks are empty and thus atomic_dec_and_lock is defined
- * to be atomic_dec_and_test - in that case we don't need it
- * defined here as well.
  */
-
-#ifndef ATOMIC_DEC_AND_LOCK
 int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
 {
        int counter;
@@ -52,4 +45,3 @@ int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
 }
 
 EXPORT_SYMBOL(_atomic_dec_and_lock);
-#endif /* ATOMIC_DEC_AND_LOCK */
index ef70ef91abe26dfc9d21606a34f80d3d5f428892..033643ab69e0bfac4903c4544fbfc52f70ba8972 100644 (file)
 /* waiting for a spinlock... */
 #if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES)
 
-void __spin_yield(spinlock_t *lock)
+void __spin_yield(raw_spinlock_t *lock)
 {
        unsigned int lock_value, holder_cpu, yield_count;
        struct paca_struct *holder_paca;
 
-       lock_value = lock->lock;
+       lock_value = lock->slock;
        if (lock_value == 0)
                return;
        holder_cpu = lock_value & 0xffff;
@@ -38,7 +38,7 @@ void __spin_yield(spinlock_t *lock)
        if ((yield_count & 1) == 0)
                return;         /* virtual cpu is currently running */
        rmb();
-       if (lock->lock != lock_value)
+       if (lock->slock != lock_value)
                return;         /* something has changed */
 #ifdef CONFIG_PPC_ISERIES
        HvCall2(HvCallBaseYieldProcessor, HvCall_YieldToProc,
@@ -54,7 +54,7 @@ void __spin_yield(spinlock_t *lock)
  * This turns out to be the same for read and write locks, since
  * we only know the holder if it is write-locked.
  */
-void __rw_yield(rwlock_t *rw)
+void __rw_yield(raw_rwlock_t *rw)
 {
        int lock_value;
        unsigned int holder_cpu, yield_count;
@@ -82,9 +82,9 @@ void __rw_yield(rwlock_t *rw)
 }
 #endif
 
-void spin_unlock_wait(spinlock_t *lock)
+void __raw_spin_unlock_wait(raw_spinlock_t *lock)
 {
-       while (lock->lock) {
+       while (lock->slock) {
                HMT_low();
                if (SHARED_PROCESSOR)
                        __spin_yield(lock);
@@ -92,4 +92,4 @@ void spin_unlock_wait(spinlock_t *lock)
        HMT_medium();
 }
 
-EXPORT_SYMBOL(spin_unlock_wait);
+EXPORT_SYMBOL(__raw_spin_unlock_wait);
index 888b5596c195d5123a24b60099b46f2c6f3634aa..2dc14e9c83276818f95afa4d054e1f836fec4774 100644 (file)
@@ -36,7 +36,7 @@ _diag44(void)
 }
 
 void
-_raw_spin_lock_wait(spinlock_t *lp, unsigned int pc)
+_raw_spin_lock_wait(raw_spinlock_t *lp, unsigned int pc)
 {
        int count = spin_retry;
 
@@ -53,7 +53,7 @@ _raw_spin_lock_wait(spinlock_t *lp, unsigned int pc)
 EXPORT_SYMBOL(_raw_spin_lock_wait);
 
 int
-_raw_spin_trylock_retry(spinlock_t *lp, unsigned int pc)
+_raw_spin_trylock_retry(raw_spinlock_t *lp, unsigned int pc)
 {
        int count = spin_retry;
 
@@ -67,7 +67,7 @@ _raw_spin_trylock_retry(spinlock_t *lp, unsigned int pc)
 EXPORT_SYMBOL(_raw_spin_trylock_retry);
 
 void
-_raw_read_lock_wait(rwlock_t *rw)
+_raw_read_lock_wait(raw_rwlock_t *rw)
 {
        unsigned int old;
        int count = spin_retry;
@@ -86,7 +86,7 @@ _raw_read_lock_wait(rwlock_t *rw)
 EXPORT_SYMBOL(_raw_read_lock_wait);
 
 int
-_raw_read_trylock_retry(rwlock_t *rw)
+_raw_read_trylock_retry(raw_rwlock_t *rw)
 {
        unsigned int old;
        int count = spin_retry;
@@ -102,7 +102,7 @@ _raw_read_trylock_retry(rwlock_t *rw)
 EXPORT_SYMBOL(_raw_read_trylock_retry);
 
 void
-_raw_write_lock_wait(rwlock_t *rw)
+_raw_write_lock_wait(raw_rwlock_t *rw)
 {
        int count = spin_retry;
 
@@ -119,7 +119,7 @@ _raw_write_lock_wait(rwlock_t *rw)
 EXPORT_SYMBOL(_raw_write_lock_wait);
 
 int
-_raw_write_trylock_retry(rwlock_t *rw)
+_raw_write_trylock_retry(raw_rwlock_t *rw)
 {
        int count = spin_retry;
 
index ca91bb0f1f5c0dc9305259c833e62612d2b9f061..c0973f8d57bae3c6060a9f4e67ddf9a11adfef21 100644 (file)
@@ -37,13 +37,13 @@ static void end_maskreg_irq(unsigned int irq);
 
 /* hw_interrupt_type */
 static struct hw_interrupt_type maskreg_irq_type = {
-       " Mask Register",
-       startup_maskreg_irq,
-       shutdown_maskreg_irq,
-       enable_maskreg_irq,
-       disable_maskreg_irq,
-       mask_and_ack_maskreg,
-       end_maskreg_irq
+       .typename = " Mask Register",
+       .startup = startup_maskreg_irq,
+       .shutdown = shutdown_maskreg_irq,
+       .enable = enable_maskreg_irq,
+       .disable = disable_maskreg_irq,
+       .ack = mask_and_ack_maskreg,
+       .end = end_maskreg_irq
 };
 
 /* actual implementatin */
index 697144de741947ec191ce31835e3e05bbd0f723a..a9fde781b21acb8f5c185a5bd1fd881b9fc90efb 100644 (file)
@@ -37,10 +37,6 @@ static u8 bigsur_iomap_lo_shift[BIGSUR_IOMAP_LO_NMAP];
 static u32 bigsur_iomap_hi[BIGSUR_IOMAP_HI_NMAP];
 static u8 bigsur_iomap_hi_shift[BIGSUR_IOMAP_HI_NMAP];
 
-#ifndef MAX
-#define MAX(a,b)    ((a)>(b)?(a):(b))
-#endif
-
 void bigsur_port_map(u32 baseport, u32 nports, u32 addr, u8 shift)
 {
        u32 port, endport = baseport + nports;
@@ -57,7 +53,7 @@ void bigsur_port_map(u32 baseport, u32 nports, u32 addr, u8 shift)
                addr += (1<<(BIGSUR_IOMAP_LO_SHIFT));
        }
 
-       for (port = MAX(baseport, BIGSUR_IOMAP_LO_THRESH) ;
+       for (port = max_t(u32, baseport, BIGSUR_IOMAP_LO_THRESH);
             port < endport && port < BIGSUR_IOMAP_HI_THRESH ;
             port += (1<<BIGSUR_IOMAP_HI_SHIFT)) {
                pr_debug("    maphi[0x%x] = 0x%08x\n", port, addr);
@@ -80,7 +76,7 @@ void bigsur_port_unmap(u32 baseport, u32 nports)
                bigsur_iomap_lo[port>>BIGSUR_IOMAP_LO_SHIFT] = 0;
        }
 
-       for (port = MAX(baseport, BIGSUR_IOMAP_LO_THRESH) ;
+       for (port = max_t(u32, baseport, BIGSUR_IOMAP_LO_THRESH);
             port < endport && port < BIGSUR_IOMAP_HI_THRESH ;
             port += (1<<BIGSUR_IOMAP_HI_SHIFT)) {
                bigsur_iomap_hi[port>>BIGSUR_IOMAP_HI_SHIFT] = 0;
index c188fc32dc9a8c4f1207d9e59cbc83a3b9178ff9..6ddbcc77244da4d81653baa2c6bb2a6bb838debd 100644 (file)
@@ -228,23 +228,23 @@ static void shutdown_bigsur_irq(unsigned int irq)
 
 /* Define the IRQ structures for the L1 and L2 IRQ types */
 static struct hw_interrupt_type bigsur_l1irq_type = {
-        "BigSur-CPLD-Level1-IRQ",
-        startup_bigsur_irq,
-        shutdown_bigsur_irq,
-        enable_bigsur_l1irq,
-        disable_bigsur_l1irq,
-        mask_and_ack_bigsur,
-        end_bigsur_irq
+       .typename  = "BigSur-CPLD-Level1-IRQ",
+       .startup = startup_bigsur_irq,
+       .shutdown = shutdown_bigsur_irq,
+       .enable = enable_bigsur_l1irq,
+       .disable = disable_bigsur_l1irq,
+       .ack = mask_and_ack_bigsur,
+       .end = end_bigsur_irq
 };
 
 static struct hw_interrupt_type bigsur_l2irq_type = {
-        "BigSur-CPLD-Level2-IRQ",
-        startup_bigsur_irq,
-        shutdown_bigsur_irq,
-        enable_bigsur_l2irq,
-        disable_bigsur_l2irq,
-        mask_and_ack_bigsur,
-        end_bigsur_irq
+       .typename  = "BigSur-CPLD-Level2-IRQ",
+       .startup = startup_bigsur_irq,
+       .shutdown  =shutdown_bigsur_irq,
+       .enable = enable_bigsur_l2irq,
+       .disable = disable_bigsur_l2irq,
+       .ack = mask_and_ack_bigsur,
+       .end = end_bigsur_irq
 };
 
 
index fa6cfe5a20a70512d43c5a6d7c0f4949710122d2..d1da0d844567f66156bff7632a4c73012e466fbe 100644 (file)
@@ -83,13 +83,13 @@ static void shutdown_cqreek_irq(unsigned int irq)
 }
 
 static struct hw_interrupt_type cqreek_irq_type = {
-       "CqREEK-IRQ",
-       startup_cqreek_irq,
-       shutdown_cqreek_irq,
-       enable_cqreek_irq,
-       disable_cqreek_irq,
-       mask_and_ack_cqreek,
-       end_cqreek_irq
+       .typename = "CqREEK-IRQ",
+       .startup = startup_cqreek_irq,
+       .shutdown = shutdown_cqreek_irq,
+       .enable = enable_cqreek_irq,
+       .disable = disable_cqreek_irq,
+       .ack = mask_and_ack_cqreek,
+       .end = end_cqreek_irq
 };
 
 int cqreek_has_ide, cqreek_has_isa;
index acd58489970f4c04ed585fc4b612ebddc9e1cc48..52d0ba39031b7215a1a9c62dc3af49461c6f817f 100644 (file)
@@ -39,13 +39,13 @@ static unsigned int startup_harp_irq(unsigned int irq)
 }
 
 static struct hw_interrupt_type harp_irq_type = {
-       "Harp-IRQ",
-       startup_harp_irq,
-       shutdown_harp_irq,
-       enable_harp_irq,
-       disable_harp_irq,
-       mask_and_ack_harp,
-       end_harp_irq
+       .typename = "Harp-IRQ",
+       .startup = startup_harp_irq,
+       .shutdown = shutdown_harp_irq,
+       .enable = enable_harp_irq,
+       .disable = disable_harp_irq,
+       .ack = mask_and_ack_harp,
+       .end = end_harp_irq
 };
 
 static void disable_harp_irq(unsigned int irq)
index 23adc6be71e728e6fc8a6bba7efdfaf1abe9061f..715e8feb3a68783cc8d1b4375beb632db0f21a7d 100644 (file)
@@ -86,13 +86,13 @@ static unsigned int startup_od_irq(unsigned int irq)
 }
 
 static struct hw_interrupt_type od_irq_type = {
-       "Overdrive-IRQ",
-       startup_od_irq,
-       shutdown_od_irq,
-       enable_od_irq,
-       disable_od_irq,
-       mask_and_ack_od,
-       end_od_irq
+       .typename = "Overdrive-IRQ",
+       .startup = startup_od_irq,
+       .shutdown = shutdown_od_irq,
+       .enable = enable_od_irq,
+       .disable = disable_od_irq,
+       .ack = mask_and_ack_od,
+       .end = end_od_irq
 };
 
 static void disable_od_irq(unsigned int irq)
index a7921f67a35fcc42bc352e9b89ca8f7e93e6a326..ed4c5b50ea45edc29cb77c57f0731dfeda251f47 100644 (file)
@@ -74,13 +74,13 @@ static void end_hs7751rvoip_irq(unsigned int irq)
 }
 
 static struct hw_interrupt_type hs7751rvoip_irq_type = {
-       "HS7751RVoIP IRQ",
-       startup_hs7751rvoip_irq,
-       shutdown_hs7751rvoip_irq,
-       enable_hs7751rvoip_irq,
-       disable_hs7751rvoip_irq,
-       ack_hs7751rvoip_irq,
-       end_hs7751rvoip_irq,
+       .typename =  "HS7751RVoIP IRQ",
+       .startup = startup_hs7751rvoip_irq,
+       .shutdown = shutdown_hs7751rvoip_irq,
+       .enable = enable_hs7751rvoip_irq,
+       .disable = disable_hs7751rvoip_irq,
+       .ack = ack_hs7751rvoip_irq,
+       .end = end_hs7751rvoip_irq,
 };
 
 static void make_hs7751rvoip_irq(unsigned int irq)
index 95717f4f1e2d024f6c4ec890c584c10df64996ab..d36c9374aed1dffc9daf9980a5a8439f3da2dd00 100644 (file)
@@ -88,13 +88,13 @@ static void end_rts7751r2d_irq(unsigned int irq)
 }
 
 static struct hw_interrupt_type rts7751r2d_irq_type = {
-       "RTS7751R2D IRQ",
-       startup_rts7751r2d_irq,
-       shutdown_rts7751r2d_irq,
-       enable_rts7751r2d_irq,
-       disable_rts7751r2d_irq,
-       ack_rts7751r2d_irq,
-       end_rts7751r2d_irq,
+       .typename = "RTS7751R2D IRQ",
+       .startup = startup_rts7751r2d_irq,
+       .shutdown = shutdown_rts7751r2d_irq,
+       .enable = enable_rts7751r2d_irq,
+       .disable = disable_rts7751r2d_irq,
+       .ack = ack_rts7751r2d_irq,
+       .end = end_rts7751r2d_irq,
 };
 
 static void make_rts7751r2d_irq(unsigned int irq)
index 5675a4134eee8e13cc749a4765eb3cf38bfc773a..7a2eb10edb563c4b108b35204528b536aa1d6e45 100644 (file)
@@ -35,13 +35,13 @@ static void end_systemh_irq(unsigned int irq);
 
 /* hw_interrupt_type */
 static struct hw_interrupt_type systemh_irq_type = {
-       " SystemH Register",
-       startup_systemh_irq,
-       shutdown_systemh_irq,
-       enable_systemh_irq,
-       disable_systemh_irq,
-       mask_and_ack_systemh,
-       end_systemh_irq
+       .typename = " SystemH Register",
+       .startup = startup_systemh_irq,
+       .shutdown = shutdown_systemh_irq,
+       .enable = enable_systemh_irq,
+       .disable = disable_systemh_irq,
+       .ack = mask_and_ack_systemh,
+       .end = end_systemh_irq
 };
 
 static unsigned int startup_systemh_irq(unsigned int irq)
index 1298883eca4bef862b0884fcdd7ae009f63ade23..1395c1e65da4546948548918473108d42ff0b4f6 100644 (file)
@@ -83,13 +83,13 @@ static unsigned int startup_microdev_irq(unsigned int irq)
 }
 
 static struct hw_interrupt_type microdev_irq_type = {
-       "MicroDev-IRQ",
-       startup_microdev_irq,
-       shutdown_microdev_irq,
-       enable_microdev_irq,
-       disable_microdev_irq,
-       mask_and_ack_microdev,
-       end_microdev_irq
+       .typename = "MicroDev-IRQ",
+       .startup = startup_microdev_irq,
+       .shutdown = shutdown_microdev_irq,
+       .enable = enable_microdev_irq,
+       .disable = disable_microdev_irq,
+       .ack = mask_and_ack_microdev,
+       .end = end_microdev_irq
 };
 
 static void disable_microdev_irq(unsigned int irq)
index 99ac709c550e920feeb2c241b3c739859bc7a08d..84cb142def0b150f7a463c9dc8f3ac79224b45d1 100644 (file)
@@ -48,10 +48,6 @@ static unsigned char hd64465_iomap_lo_shift[HD64465_IOMAP_LO_NMAP];
 static unsigned long   hd64465_iomap_hi[HD64465_IOMAP_HI_NMAP];
 static unsigned char   hd64465_iomap_hi_shift[HD64465_IOMAP_HI_NMAP];
 
-#ifndef MAX
-#define MAX(a,b)    ((a)>(b)?(a):(b))
-#endif
-
 #define PORT2ADDR(x) (sh_mv.mv_isa_port2addr(x))
 
 void hd64465_port_map(unsigned short baseport, unsigned int nports,
@@ -71,7 +67,7 @@ void hd64465_port_map(unsigned short baseport, unsigned int nports,
            addr += (1<<(HD64465_IOMAP_LO_SHIFT));
        }
 
-       for (port = MAX(baseport, HD64465_IOMAP_LO_THRESH) ;
+       for (port = max_t(unsigned int, baseport, HD64465_IOMAP_LO_THRESH);
             port < endport && port < HD64465_IOMAP_HI_THRESH ;
             port += (1<<HD64465_IOMAP_HI_SHIFT)) {
            DPRINTK("    maphi[0x%x] = 0x%08lx\n", port, addr);
@@ -95,7 +91,7 @@ void hd64465_port_unmap(unsigned short baseport, unsigned int nports)
            hd64465_iomap_lo[port>>HD64465_IOMAP_LO_SHIFT] = 0;
        }
 
-       for (port = MAX(baseport, HD64465_IOMAP_LO_THRESH) ;
+       for (port = max_t(unsigned int, baseport, HD64465_IOMAP_LO_THRESH);
             port < endport && port < HD64465_IOMAP_HI_THRESH ;
             port += (1<<HD64465_IOMAP_HI_SHIFT)) {
            hd64465_iomap_hi[port>>HD64465_IOMAP_HI_SHIFT] = 0;
index 3079234cb65b30d319d72ddf349e6cb85ffcf418..1b6ac523b4584e7cf5518f992cb340d293859484 100644 (file)
@@ -87,13 +87,13 @@ static void shutdown_voyagergx_irq(unsigned int irq)
 }
 
 static struct hw_interrupt_type voyagergx_irq_type = {
-       "VOYAGERGX-IRQ",
-       startup_voyagergx_irq,
-       shutdown_voyagergx_irq,
-       enable_voyagergx_irq,
-       disable_voyagergx_irq,
-       mask_and_ack_voyagergx,
-       end_voyagergx_irq,
+       .typename = "VOYAGERGX-IRQ",
+       .startup = startup_voyagergx_irq,
+       .shutdown = shutdown_voyagergx_irq,
+       .enable = enable_voyagergx_irq,
+       .disable = disable_voyagergx_irq,
+       .ack = mask_and_ack_voyagergx,
+       .end = end_voyagergx_irq,
 };
 
 static irqreturn_t voyagergx_interrupt(int irq, void *dev_id, struct pt_regs *regs)
index f76901e732fb801a268fe05cacfa5efd796f231a..a963d00a971e6533f4a7eaacf6c0dc3122c86fc6 100644 (file)
@@ -46,13 +46,13 @@ static unsigned int startup_imask_irq(unsigned int irq)
 }
 
 static struct hw_interrupt_type imask_irq_type = {
-       "SR.IMASK",
-       startup_imask_irq,
-       shutdown_imask_irq,
-       enable_imask_irq,
-       disable_imask_irq,
-       mask_and_ack_imask,
-       end_imask_irq
+       .typename = "SR.IMASK",
+       .startup = startup_imask_irq,
+       .shutdown = shutdown_imask_irq,
+       .enable = enable_imask_irq,
+       .disable = disable_imask_irq,
+       .ack = mask_and_ack_imask,
+       .end = end_imask_irq
 };
 
 void static inline set_interrupt_registers(int ip)
index 7ea3d2d030e5581e847b5d2f9484e5bd41e9a739..71f92096132b3228e373981a267a0900b919d3e5 100644 (file)
@@ -48,13 +48,13 @@ static unsigned int startup_ipr_irq(unsigned int irq)
 }
 
 static struct hw_interrupt_type ipr_irq_type = {
-       "IPR-IRQ",
-       startup_ipr_irq,
-       shutdown_ipr_irq,
-       enable_ipr_irq,
-       disable_ipr_irq,
-       mask_and_ack_ipr,
-       end_ipr_irq
+       .typename = "IPR-IRQ",
+       .startup = startup_ipr_irq,
+       .shutdown = shutdown_ipr_irq,
+       .enable = enable_ipr_irq,
+       .disable = disable_ipr_irq,
+       .ack = mask_and_ack_ipr,
+       .end = end_ipr_irq
 };
 
 static void disable_ipr_irq(unsigned int irq)
@@ -142,13 +142,13 @@ static unsigned int startup_pint_irq(unsigned int irq)
 }
 
 static struct hw_interrupt_type pint_irq_type = {
-       "PINT-IRQ",
-       startup_pint_irq,
-       shutdown_pint_irq,
-       enable_pint_irq,
-       disable_pint_irq,
-       mask_and_ack_pint,
-       end_pint_irq
+       .typename = "PINT-IRQ",
+       .startup = startup_pint_irq,
+       .shutdown = shutdown_pint_irq,
+       .enable = enable_pint_irq,
+       .disable = disable_pint_irq,
+       .ack = mask_and_ack_pint,
+       .end = end_pint_irq
 };
 
 static void disable_pint_irq(unsigned int irq)
index 099ebbf897457a5b606c9f3acae891d0d717a6b8..f6b16ba01932f89929812c088a7d8808f2c5aa87 100644 (file)
@@ -48,13 +48,13 @@ static unsigned int startup_intc2_irq(unsigned int irq)
 }
 
 static struct hw_interrupt_type intc2_irq_type = {
-       "INTC2-IRQ",
-       startup_intc2_irq,
-       shutdown_intc2_irq,
-       enable_intc2_irq,
-       disable_intc2_irq,
-       mask_and_ack_intc2,
-       end_intc2_irq
+       .typename = "INTC2-IRQ",
+       .startup = startup_intc2_irq,
+       .shutdown = shutdown_intc2_irq,
+       .enable = enable_intc2_irq,
+       .disable = disable_intc2_irq,
+       .ack = mask_and_ack_intc2,
+       .end = end_intc2_irq
 };
 
 static void disable_intc2_irq(unsigned int irq)
index 43f88f3a78b0d219c5910560d0f6eefe0024b669..fc99bf4e362c9dfca4dada4ff45263baa3f46a08 100644 (file)
@@ -107,13 +107,13 @@ static void mask_and_ack_intc(unsigned int);
 static void end_intc_irq(unsigned int irq);
 
 static struct hw_interrupt_type intc_irq_type = {
-       "INTC",
-       startup_intc_irq,
-       shutdown_intc_irq,
-       enable_intc_irq,
-       disable_intc_irq,
-       mask_and_ack_intc,
-       end_intc_irq
+       .typename = "INTC",
+       .startup = startup_intc_irq,
+       .shutdown = shutdown_intc_irq,
+       .enable = enable_intc_irq,
+       .disable = disable_intc_irq,
+       .ack = mask_and_ack_intc,
+       .end = end_intc_irq
 };
 
 static int irlm;               /* IRL mode */
index 5d974a2b735a9ac354295ec6e4870cfb85049e12..f84809333624aae07d323e3b1577bc0ce5ddfb74 100644 (file)
@@ -114,17 +114,7 @@ DOT_ALIAS2(unsigned, urem, unsigned, unsigned)
 /* used by various drivers */
 EXPORT_SYMBOL(sparc_cpu_model);
 EXPORT_SYMBOL(kernel_thread);
-#ifdef CONFIG_DEBUG_SPINLOCK
 #ifdef CONFIG_SMP
-EXPORT_SYMBOL(_do_spin_lock);
-EXPORT_SYMBOL(_do_spin_unlock);
-EXPORT_SYMBOL(_spin_trylock);
-EXPORT_SYMBOL(_do_read_lock);
-EXPORT_SYMBOL(_do_read_unlock);
-EXPORT_SYMBOL(_do_write_lock);
-EXPORT_SYMBOL(_do_write_unlock);
-#endif
-#else
 // XXX find what uses (or used) these.
 EXPORT_SYMBOL(___rw_read_enter);
 EXPORT_SYMBOL(___rw_read_exit);
index 2296ff9dc47aa67c1af4c90a160317621aa59a35..fa5006946062134462f64f553d58d94f142f7501 100644 (file)
@@ -9,5 +9,3 @@ lib-y := mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o memcpy.o memset.o \
         strncpy_from_user.o divdi3.o udivdi3.o strlen_user.o \
         copy_user.o locks.o atomic.o atomic32.o bitops.o \
         lshrdi3.o ashldi3.o rwsem.o muldi3.o bitext.o
-
-lib-$(CONFIG_DEBUG_SPINLOCK) +=        debuglocks.o
diff --git a/arch/sparc/lib/debuglocks.c b/arch/sparc/lib/debuglocks.c
deleted file mode 100644 (file)
index fb18235..0000000
+++ /dev/null
@@ -1,202 +0,0 @@
-/* $Id: debuglocks.c,v 1.11 2001/09/20 00:35:31 davem Exp $
- * debuglocks.c: Debugging versions of SMP locking primitives.
- *
- * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
- * Copyright (C) 1998-99 Anton Blanchard (anton@progsoc.uts.edu.au)
- */
-
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/threads.h>     /* For NR_CPUS */
-#include <linux/spinlock.h>
-#include <asm/psr.h>
-#include <asm/system.h>
-
-#ifdef CONFIG_SMP
-
-/* Some notes on how these debugging routines work.  When a lock is acquired
- * an extra debugging member lock->owner_pc is set to the caller of the lock
- * acquisition routine.  Right before releasing a lock, the debugging program
- * counter is cleared to zero.
- *
- * Furthermore, since PC's are 4 byte aligned on Sparc, we stuff the CPU
- * number of the owner in the lowest two bits.
- */
-
-#define STORE_CALLER(A) __asm__ __volatile__("mov %%i7, %0" : "=r" (A));
-
-static inline void show(char *str, spinlock_t *lock, unsigned long caller)
-{
-       int cpu = smp_processor_id();
-
-       printk("%s(%p) CPU#%d stuck at %08lx, owner PC(%08lx):CPU(%lx)\n",str,
-               lock, cpu, caller, lock->owner_pc & ~3, lock->owner_pc & 3);
-}
-
-static inline void show_read(char *str, rwlock_t *lock, unsigned long caller)
-{
-       int cpu = smp_processor_id();
-
-       printk("%s(%p) CPU#%d stuck at %08lx, owner PC(%08lx):CPU(%lx)\n", str,
-               lock, cpu, caller, lock->owner_pc & ~3, lock->owner_pc & 3);
-}
-
-static inline void show_write(char *str, rwlock_t *lock, unsigned long caller)
-{
-       int cpu = smp_processor_id();
-       int i;
-
-       printk("%s(%p) CPU#%d stuck at %08lx, owner PC(%08lx):CPU(%lx)", str,
-               lock, cpu, caller, lock->owner_pc & ~3, lock->owner_pc & 3);
-
-       for(i = 0; i < NR_CPUS; i++)
-               printk(" reader[%d]=%08lx", i, lock->reader_pc[i]);
-
-       printk("\n");
-}
-
-#undef INIT_STUCK
-#define INIT_STUCK 100000000
-
-void _do_spin_lock(spinlock_t *lock, char *str)
-{
-       unsigned long caller;
-       unsigned long val;
-       int cpu = smp_processor_id();
-       int stuck = INIT_STUCK;
-
-       STORE_CALLER(caller);
-
-again:
-       __asm__ __volatile__("ldstub [%1], %0" : "=r" (val) : "r" (&(lock->lock)));
-       if(val) {
-               while(lock->lock) {
-                       if (!--stuck) {
-                               show(str, lock, caller);
-                               stuck = INIT_STUCK;
-                       }
-                       barrier();
-               }
-               goto again;
-       }
-       lock->owner_pc = (cpu & 3) | (caller & ~3);
-}
-
-int _spin_trylock(spinlock_t *lock)
-{
-       unsigned long val;
-       unsigned long caller;
-       int cpu = smp_processor_id();
-
-       STORE_CALLER(caller);
-
-       __asm__ __volatile__("ldstub [%1], %0" : "=r" (val) : "r" (&(lock->lock)));
-       if(!val) {
-               /* We got it, record our identity for debugging. */
-               lock->owner_pc = (cpu & 3) | (caller & ~3);
-       }
-       return val == 0;
-}
-
-void _do_spin_unlock(spinlock_t *lock)
-{
-       lock->owner_pc = 0;
-       barrier();
-       lock->lock = 0;
-}
-
-void _do_read_lock(rwlock_t *rw, char *str)
-{
-       unsigned long caller;
-       unsigned long val;
-       int cpu = smp_processor_id();
-       int stuck = INIT_STUCK;
-
-       STORE_CALLER(caller);
-
-wlock_again:
-       __asm__ __volatile__("ldstub [%1 + 3], %0" : "=r" (val) : "r" (&(rw->lock)));
-       if(val) {
-               while(rw->lock & 0xff) {
-                       if (!--stuck) {
-                               show_read(str, rw, caller);
-                               stuck = INIT_STUCK;
-                       }
-                       barrier();
-               }
-               goto wlock_again;
-       }
-
-       rw->reader_pc[cpu] = caller;
-       barrier();
-       rw->lock++;
-}
-
-void _do_read_unlock(rwlock_t *rw, char *str)
-{
-       unsigned long caller;
-       unsigned long val;
-       int cpu = smp_processor_id();
-       int stuck = INIT_STUCK;
-
-       STORE_CALLER(caller);
-
-wlock_again:
-       __asm__ __volatile__("ldstub [%1 + 3], %0" : "=r" (val) : "r" (&(rw->lock)));
-       if(val) {
-               while(rw->lock & 0xff) {
-                       if (!--stuck) {
-                               show_read(str, rw, caller);
-                               stuck = INIT_STUCK;
-                       }
-                       barrier();
-               }
-               goto wlock_again;
-       }
-
-       rw->reader_pc[cpu] = 0;
-       barrier();
-       rw->lock -= 0x1ff;
-}
-
-void _do_write_lock(rwlock_t *rw, char *str)
-{
-       unsigned long caller;
-       unsigned long val;
-       int cpu = smp_processor_id();
-       int stuck = INIT_STUCK;
-
-       STORE_CALLER(caller);
-
-wlock_again:
-       __asm__ __volatile__("ldstub [%1 + 3], %0" : "=r" (val) : "r" (&(rw->lock)));
-       if(val) {
-wlock_wait:
-               while(rw->lock) {
-                       if (!--stuck) {
-                               show_write(str, rw, caller);
-                               stuck = INIT_STUCK;
-                       }
-                       barrier();
-               }
-               goto wlock_again;
-       }
-
-       if (rw->lock & ~0xff) {
-               *(((unsigned char *)&rw->lock)+3) = 0;
-               barrier();
-               goto wlock_wait;
-       }
-
-       barrier();
-       rw->owner_pc = (cpu & 3) | (caller & ~3);
-}
-
-void _do_write_unlock(rwlock_t *rw)
-{
-       rw->owner_pc = 0;
-       barrier();
-       rw->lock = 0;
-}
-
-#endif /* SMP */
index 66255434128a2e6ebf6b144d5d030003fe4ebaca..7d10b03970919c63b6d1c1dd138b203f6fa42568 100644 (file)
@@ -607,11 +607,6 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
        struct thread_info *t = p->thread_info;
        char *child_trap_frame;
 
-#ifdef CONFIG_DEBUG_SPINLOCK
-       p->thread.smp_lock_count = 0;
-       p->thread.smp_lock_pc = 0;
-#endif
-
        /* Calculate offset to stack_frame & pt_regs */
        child_trap_frame = ((char *)t) + (THREAD_SIZE - (TRACEREG_SZ+STACKFRAME_SZ));
        memcpy(child_trap_frame, (((struct sparc_stackf *)regs)-1), (TRACEREG_SZ+STACKFRAME_SZ));
index 7d9a0f6c437dcc60dba88b30afe33af1dafcc550..cbb5e59824e569f68757dc220baed881159b9b54 100644 (file)
@@ -115,17 +115,12 @@ EXPORT_PER_CPU_SYMBOL(__cpu_data);
 
 /* used by various drivers */
 #ifdef CONFIG_SMP
-#ifndef CONFIG_DEBUG_SPINLOCK
 /* Out of line rw-locking implementation. */
 EXPORT_SYMBOL(__read_lock);
 EXPORT_SYMBOL(__read_unlock);
 EXPORT_SYMBOL(__write_lock);
 EXPORT_SYMBOL(__write_unlock);
 EXPORT_SYMBOL(__write_trylock);
-/* Out of line spin-locking implementation. */
-EXPORT_SYMBOL(_raw_spin_lock);
-EXPORT_SYMBOL(_raw_spin_lock_flags);
-#endif
 
 /* Hard IRQ locking */
 EXPORT_SYMBOL(synchronize_irq);
index 40dbeec7e5d6a8ed75006dd2873e0d40e019b29e..d968aebe83b282319fefb194dd942b3614a67764 100644 (file)
@@ -14,7 +14,6 @@ lib-y := PeeCeeI.o copy_page.o clear_page.o strlen.o strncmp.o \
         copy_in_user.o user_fixup.o memmove.o \
         mcount.o ipcsum.o rwsem.o xor.o find_bit.o delay.o
 
-lib-$(CONFIG_DEBUG_SPINLOCK) += debuglocks.o
 lib-$(CONFIG_HAVE_DEC_LOCK) += dec_and_lock.o
 
 obj-y += iomap.o
diff --git a/arch/sparc64/lib/debuglocks.c b/arch/sparc64/lib/debuglocks.c
deleted file mode 100644 (file)
index f5f0b55..0000000
+++ /dev/null
@@ -1,366 +0,0 @@
-/* $Id: debuglocks.c,v 1.9 2001/11/17 00:10:48 davem Exp $
- * debuglocks.c: Debugging versions of SMP locking primitives.
- *
- * Copyright (C) 1998 David S. Miller (davem@redhat.com)
- */
-
-#include <linux/config.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/spinlock.h>
-#include <asm/system.h>
-
-#ifdef CONFIG_SMP
-
-static inline void show (char *str, spinlock_t *lock, unsigned long caller)
-{
-       int cpu = smp_processor_id();
-
-       printk("%s(%p) CPU#%d stuck at %08x, owner PC(%08x):CPU(%x)\n",
-              str, lock, cpu, (unsigned int) caller,
-              lock->owner_pc, lock->owner_cpu);
-}
-
-static inline void show_read (char *str, rwlock_t *lock, unsigned long caller)
-{
-       int cpu = smp_processor_id();
-
-       printk("%s(%p) CPU#%d stuck at %08x, writer PC(%08x):CPU(%x)\n",
-              str, lock, cpu, (unsigned int) caller,
-              lock->writer_pc, lock->writer_cpu);
-}
-
-static inline void show_write (char *str, rwlock_t *lock, unsigned long caller)
-{
-       int cpu = smp_processor_id();
-       int i;
-
-       printk("%s(%p) CPU#%d stuck at %08x\n",
-              str, lock, cpu, (unsigned int) caller);
-       printk("Writer: PC(%08x):CPU(%x)\n",
-              lock->writer_pc, lock->writer_cpu);
-       printk("Readers:");
-       for (i = 0; i < NR_CPUS; i++)
-               if (lock->reader_pc[i])
-                       printk(" %d[%08x]", i, lock->reader_pc[i]);
-       printk("\n");
-}
-
-#undef INIT_STUCK
-#define INIT_STUCK 100000000
-
-void _do_spin_lock(spinlock_t *lock, char *str, unsigned long caller)
-{
-       unsigned long val;
-       int stuck = INIT_STUCK;
-       int cpu = get_cpu();
-       int shown = 0;
-
-again:
-       __asm__ __volatile__("ldstub [%1], %0"
-                            : "=r" (val)
-                            : "r" (&(lock->lock))
-                            : "memory");
-       membar_storeload_storestore();
-       if (val) {
-               while (lock->lock) {
-                       if (!--stuck) {
-                               if (shown++ <= 2)
-                                       show(str, lock, caller);
-                               stuck = INIT_STUCK;
-                       }
-                       rmb();
-               }
-               goto again;
-       }
-       lock->owner_pc = ((unsigned int)caller);
-       lock->owner_cpu = cpu;
-       current->thread.smp_lock_count++;
-       current->thread.smp_lock_pc = ((unsigned int)caller);
-
-       put_cpu();
-}
-
-int _do_spin_trylock(spinlock_t *lock, unsigned long caller)
-{
-       unsigned long val;
-       int cpu = get_cpu();
-
-       __asm__ __volatile__("ldstub [%1], %0"
-                            : "=r" (val)
-                            : "r" (&(lock->lock))
-                            : "memory");
-       membar_storeload_storestore();
-       if (!val) {
-               lock->owner_pc = ((unsigned int)caller);
-               lock->owner_cpu = cpu;
-               current->thread.smp_lock_count++;
-               current->thread.smp_lock_pc = ((unsigned int)caller);
-       }
-
-       put_cpu();
-
-       return val == 0;
-}
-
-void _do_spin_unlock(spinlock_t *lock)
-{
-       lock->owner_pc = 0;
-       lock->owner_cpu = NO_PROC_ID;
-       membar_storestore_loadstore();
-       lock->lock = 0;
-       current->thread.smp_lock_count--;
-}
-
-/* Keep INIT_STUCK the same... */
-
-void _do_read_lock(rwlock_t *rw, char *str, unsigned long caller)
-{
-       unsigned long val;
-       int stuck = INIT_STUCK;
-       int cpu = get_cpu();
-       int shown = 0;
-
-wlock_again:
-       /* Wait for any writer to go away.  */
-       while (((long)(rw->lock)) < 0) {
-               if (!--stuck) {
-                       if (shown++ <= 2)
-                               show_read(str, rw, caller);
-                       stuck = INIT_STUCK;
-               }
-               rmb();
-       }
-       /* Try once to increment the counter.  */
-       __asm__ __volatile__(
-"      ldx             [%0], %%g1\n"
-"      brlz,a,pn       %%g1, 2f\n"
-"       mov            1, %0\n"
-"      add             %%g1, 1, %%g7\n"
-"      casx            [%0], %%g1, %%g7\n"
-"      sub             %%g1, %%g7, %0\n"
-"2:"   : "=r" (val)
-       : "0" (&(rw->lock))
-       : "g1", "g7", "memory");
-       membar_storeload_storestore();
-       if (val)
-               goto wlock_again;
-       rw->reader_pc[cpu] = ((unsigned int)caller);
-       current->thread.smp_lock_count++;
-       current->thread.smp_lock_pc = ((unsigned int)caller);
-
-       put_cpu();
-}
-
-void _do_read_unlock(rwlock_t *rw, char *str, unsigned long caller)
-{
-       unsigned long val;
-       int stuck = INIT_STUCK;
-       int cpu = get_cpu();
-       int shown = 0;
-
-       /* Drop our identity _first_. */
-       rw->reader_pc[cpu] = 0;
-       current->thread.smp_lock_count--;
-runlock_again:
-       /* Spin trying to decrement the counter using casx.  */
-       __asm__ __volatile__(
-"      membar  #StoreLoad | #LoadLoad\n"
-"      ldx     [%0], %%g1\n"
-"      sub     %%g1, 1, %%g7\n"
-"      casx    [%0], %%g1, %%g7\n"
-"      membar  #StoreLoad | #StoreStore\n"
-"      sub     %%g1, %%g7, %0\n"
-       : "=r" (val)
-       : "0" (&(rw->lock))
-       : "g1", "g7", "memory");
-       if (val) {
-               if (!--stuck) {
-                       if (shown++ <= 2)
-                               show_read(str, rw, caller);
-                       stuck = INIT_STUCK;
-               }
-               goto runlock_again;
-       }
-
-       put_cpu();
-}
-
-void _do_write_lock(rwlock_t *rw, char *str, unsigned long caller)
-{
-       unsigned long val;
-       int stuck = INIT_STUCK;
-       int cpu = get_cpu();
-       int shown = 0;
-
-wlock_again:
-       /* Spin while there is another writer. */
-       while (((long)rw->lock) < 0) {
-               if (!--stuck) {
-                       if (shown++ <= 2)
-                               show_write(str, rw, caller);
-                       stuck = INIT_STUCK;
-               }
-               rmb();
-       }
-
-       /* Try to acuire the write bit.  */
-       __asm__ __volatile__(
-"      mov     1, %%g3\n"
-"      sllx    %%g3, 63, %%g3\n"
-"      ldx     [%0], %%g1\n"
-"      brlz,pn %%g1, 1f\n"
-"       or     %%g1, %%g3, %%g7\n"
-"      casx    [%0], %%g1, %%g7\n"
-"      membar  #StoreLoad | #StoreStore\n"
-"      ba,pt   %%xcc, 2f\n"
-"       sub    %%g1, %%g7, %0\n"
-"1:    mov     1, %0\n"
-"2:"   : "=r" (val)
-       : "0" (&(rw->lock))
-       : "g3", "g1", "g7", "memory");
-       if (val) {
-               /* We couldn't get the write bit. */
-               if (!--stuck) {
-                       if (shown++ <= 2)
-                               show_write(str, rw, caller);
-                       stuck = INIT_STUCK;
-               }
-               goto wlock_again;
-       }
-       if ((rw->lock & ((1UL<<63)-1UL)) != 0UL) {
-               /* Readers still around, drop the write
-                * lock, spin, and try again.
-                */
-               if (!--stuck) {
-                       if (shown++ <= 2)
-                               show_write(str, rw, caller);
-                       stuck = INIT_STUCK;
-               }
-               __asm__ __volatile__(
-"              mov     1, %%g3\n"
-"              sllx    %%g3, 63, %%g3\n"
-"1:            ldx     [%0], %%g1\n"
-"              andn    %%g1, %%g3, %%g7\n"
-"              casx    [%0], %%g1, %%g7\n"
-"              cmp     %%g1, %%g7\n"
-"              membar  #StoreLoad | #StoreStore\n"
-"              bne,pn  %%xcc, 1b\n"
-"               nop"
-               : /* no outputs */
-               : "r" (&(rw->lock))
-               : "g3", "g1", "g7", "cc", "memory");
-               while(rw->lock != 0) {
-                       if (!--stuck) {
-                               if (shown++ <= 2)
-                                       show_write(str, rw, caller);
-                               stuck = INIT_STUCK;
-                       }
-                       rmb();
-               }
-               goto wlock_again;
-       }
-
-       /* We have it, say who we are. */
-       rw->writer_pc = ((unsigned int)caller);
-       rw->writer_cpu = cpu;
-       current->thread.smp_lock_count++;
-       current->thread.smp_lock_pc = ((unsigned int)caller);
-
-       put_cpu();
-}
-
-void _do_write_unlock(rwlock_t *rw, unsigned long caller)
-{
-       unsigned long val;
-       int stuck = INIT_STUCK;
-       int shown = 0;
-
-       /* Drop our identity _first_ */
-       rw->writer_pc = 0;
-       rw->writer_cpu = NO_PROC_ID;
-       current->thread.smp_lock_count--;
-wlock_again:
-       __asm__ __volatile__(
-"      membar  #StoreLoad | #LoadLoad\n"
-"      mov     1, %%g3\n"
-"      sllx    %%g3, 63, %%g3\n"
-"      ldx     [%0], %%g1\n"
-"      andn    %%g1, %%g3, %%g7\n"
-"      casx    [%0], %%g1, %%g7\n"
-"      membar  #StoreLoad | #StoreStore\n"
-"      sub     %%g1, %%g7, %0\n"
-       : "=r" (val)
-       : "0" (&(rw->lock))
-       : "g3", "g1", "g7", "memory");
-       if (val) {
-               if (!--stuck) {
-                       if (shown++ <= 2)
-                               show_write("write_unlock", rw, caller);
-                       stuck = INIT_STUCK;
-               }
-               goto wlock_again;
-       }
-}
-
-int _do_write_trylock(rwlock_t *rw, char *str, unsigned long caller)
-{
-       unsigned long val;
-       int cpu = get_cpu();
-
-       /* Try to acuire the write bit.  */
-       __asm__ __volatile__(
-"      mov     1, %%g3\n"
-"      sllx    %%g3, 63, %%g3\n"
-"      ldx     [%0], %%g1\n"
-"      brlz,pn %%g1, 1f\n"
-"       or     %%g1, %%g3, %%g7\n"
-"      casx    [%0], %%g1, %%g7\n"
-"      membar  #StoreLoad | #StoreStore\n"
-"      ba,pt   %%xcc, 2f\n"
-"       sub    %%g1, %%g7, %0\n"
-"1:    mov     1, %0\n"
-"2:"   : "=r" (val)
-       : "0" (&(rw->lock))
-       : "g3", "g1", "g7", "memory");
-
-       if (val) {
-               put_cpu();
-               return 0;
-       }
-
-       if ((rw->lock & ((1UL<<63)-1UL)) != 0UL) {
-               /* Readers still around, drop the write
-                * lock, return failure.
-                */
-               __asm__ __volatile__(
-"              mov     1, %%g3\n"
-"              sllx    %%g3, 63, %%g3\n"
-"1:            ldx     [%0], %%g1\n"
-"              andn    %%g1, %%g3, %%g7\n"
-"              casx    [%0], %%g1, %%g7\n"
-"              cmp     %%g1, %%g7\n"
-"              membar  #StoreLoad | #StoreStore\n"
-"              bne,pn  %%xcc, 1b\n"
-"               nop"
-               : /* no outputs */
-               : "r" (&(rw->lock))
-               : "g3", "g1", "g7", "cc", "memory");
-
-               put_cpu();
-
-               return 0;
-       }
-
-       /* We have it, say who we are. */
-       rw->writer_pc = ((unsigned int)caller);
-       rw->writer_cpu = cpu;
-       current->thread.smp_lock_count++;
-       current->thread.smp_lock_pc = ((unsigned int)caller);
-
-       put_cpu();
-
-       return 1;
-}
-
-#endif /* CONFIG_SMP */
index 577b8d1cf1a6e9b2bf74aecf087ed65c8e2df955..154803a226984ee6651829701952339921512647 100644 (file)
@@ -103,7 +103,6 @@ endef
 
 ifneq ($(KBUILD_SRC),)
 $(shell mkdir -p $(ARCH_DIR) && ln -fsn $(srctree)/$(ARCH_DIR)/Kconfig.$(SUBARCH) $(ARCH_DIR)/Kconfig.arch)
-CLEAN_FILES += $(ARCH_DIR)/Kconfig.arch
 else
 $(shell cd $(ARCH_DIR) && ln -sf Kconfig.$(SUBARCH) Kconfig.arch)
 endif
@@ -144,14 +143,14 @@ endef
 #TT or skas makefiles and don't clean skas_ptregs.h.
 CLEAN_FILES += linux x.i gmon.out $(ARCH_DIR)/include/uml-config.h \
        $(GEN_HEADERS) $(ARCH_DIR)/include/skas_ptregs.h \
-       $(ARCH_DIR)/include/user_constants.h
+       $(ARCH_DIR)/include/user_constants.h $(ARCH_DIR)/Kconfig.arch
 
 MRPROPER_FILES += $(SYMLINK_HEADERS) $(ARCH_SYMLINKS) \
-       $(addprefix $(ARCH_DIR)/kernel/,$(KERN_SYMLINKS)) $(ARCH_DIR)/os \
-       $(ARCH_DIR)/Kconfig.arch
+       $(addprefix $(ARCH_DIR)/kernel/,$(KERN_SYMLINKS)) $(ARCH_DIR)/os
 
 archclean:
        $(Q)$(MAKE) $(clean)=$(ARCH_DIR)/util
+       $(Q)$(MAKE) $(clean)=$(ARCH_DIR)/os-$(OS)/util
        @find . \( -name '*.bb' -o -name '*.bbg' -o -name '*.da' \
                -o -name '*.gcov' \) -type f -print | xargs rm -f
 
index 99d3ad4a03e5ab5cd060390cc340b0fdf7f6faa4..e8ff0d8fa61037360dd3cd3488b4cab1588f0b52 100644 (file)
@@ -13,7 +13,17 @@ extern int physmem_subst_mapping(void *virt, int fd, __u64 offset, int w);
 extern int is_remapped(void *virt);
 extern int physmem_remove_mapping(void *virt);
 extern void physmem_forget_descriptor(int fd);
-extern unsigned long to_phys(void *virt);
+
+extern unsigned long uml_physmem;
+static inline unsigned long to_phys(void *virt)
+{
+       return(((unsigned long) virt) - uml_physmem);
+}
+
+static inline void *to_virt(unsigned long phys)
+{
+       return((void *) uml_physmem + phys);
+}
 
 #endif
 
index 3942a5f245de6c038b0712155273d5097f4ff815..2517ecb8bf27d07d56fc5903c0cd26052aa7617a 100644 (file)
@@ -146,37 +146,8 @@ SECTIONS
   }
   _end = .;
   PROVIDE (end = .);
-   /* Stabs debugging sections.  */
-  .stab          0 : { *(.stab) }
-  .stabstr       0 : { *(.stabstr) }
-  .stab.excl     0 : { *(.stab.excl) }
-  .stab.exclstr  0 : { *(.stab.exclstr) }
-  .stab.index    0 : { *(.stab.index) }
-  .stab.indexstr 0 : { *(.stab.indexstr) }
-  .comment       0 : { *(.comment) }
-  /* DWARF debug sections.
-     Symbols in the DWARF debugging sections are relative to the beginning
-     of the section so we begin them at 0.  */
-  /* DWARF 1 */
-  .debug          0 : { *(.debug) }
-  .line           0 : { *(.line) }
-  /* GNU DWARF 1 extensions */
-  .debug_srcinfo  0 : { *(.debug_srcinfo) }
-  .debug_sfnames  0 : { *(.debug_sfnames) }
-  /* DWARF 1.1 and DWARF 2 */
-  .debug_aranges  0 : { *(.debug_aranges) }
-  .debug_pubnames 0 : { *(.debug_pubnames) }
-  /* DWARF 2 */
-  .debug_info     0 : { *(.debug_info .gnu.linkonce.wi.*) }
-  .debug_abbrev   0 : { *(.debug_abbrev) }
-  .debug_line     0 : { *(.debug_line) }
-  .debug_frame    0 : { *(.debug_frame) }
-  .debug_str      0 : { *(.debug_str) }
-  .debug_loc      0 : { *(.debug_loc) }
-  .debug_macinfo  0 : { *(.debug_macinfo) }
-  /* SGI/MIPS DWARF 2 extensions */
-  .debug_weaknames 0 : { *(.debug_weaknames) }
-  .debug_funcnames 0 : { *(.debug_funcnames) }
-  .debug_typenames 0 : { *(.debug_typenames) }
-  .debug_varnames  0 : { *(.debug_varnames) }
+
+  STABS_DEBUG
+
+  DWARF_DEBUG
 }
index 32d3076dd2204206e06aed7d835dbc4659c6e47a..a97a72e516aa6f6c6b7b4e6e0e4814b6e24f3a69 100644 (file)
@@ -34,14 +34,9 @@ EXPORT_SYMBOL(host_task_size);
 EXPORT_SYMBOL(arch_validate);
 EXPORT_SYMBOL(get_kmem_end);
 
-EXPORT_SYMBOL(page_to_phys);
-EXPORT_SYMBOL(phys_to_page);
 EXPORT_SYMBOL(high_physmem);
 EXPORT_SYMBOL(empty_zero_page);
 EXPORT_SYMBOL(um_virt_to_phys);
-EXPORT_SYMBOL(__virt_to_page);
-EXPORT_SYMBOL(to_phys);
-EXPORT_SYMBOL(to_virt);
 EXPORT_SYMBOL(mode_tt);
 EXPORT_SYMBOL(handle_page_fault);
 EXPORT_SYMBOL(find_iomem);
index a24e3b7f4bf076a7e3e1dd48d6e0ffdc906f7a6c..ea670fcc8af5b3c52b020afe31fbd09f02df97cd 100644 (file)
@@ -248,16 +248,6 @@ unsigned long high_physmem;
 
 extern unsigned long physmem_size;
 
-void *to_virt(unsigned long phys)
-{
-       return((void *) uml_physmem + phys);
-}
-
-unsigned long to_phys(void *virt)
-{
-       return(((unsigned long) virt) - uml_physmem);
-}
-
 int init_maps(unsigned long physmem, unsigned long iomem, unsigned long highmem)
 {
        struct page *p, *map;
@@ -298,31 +288,6 @@ int init_maps(unsigned long physmem, unsigned long iomem, unsigned long highmem)
        return(0);
 }
 
-struct page *phys_to_page(const unsigned long phys)
-{
-       return(&mem_map[phys >> PAGE_SHIFT]);
-}
-
-struct page *__virt_to_page(const unsigned long virt)
-{
-       return(&mem_map[__pa(virt) >> PAGE_SHIFT]);
-}
-
-phys_t page_to_phys(struct page *page)
-{
-       return((page - mem_map) << PAGE_SHIFT);
-}
-
-pte_t mk_pte(struct page *page, pgprot_t pgprot)
-{
-       pte_t pte;
-
-       pte_set_val(pte, page_to_phys(page), pgprot);
-       if(pte_present(pte))
-               pte_mknewprot(pte_mknewpage(pte));
-       return(pte);
-}
-
 /* Changed during early boot */
 static unsigned long kmem_top = 0;
 
index b5fc89fe9eab8f2e51859e5a44c5f2bf5d226ab4..87cc6fd76cedecd10ac110db2b2f60cd0b109fcc 100644 (file)
@@ -57,7 +57,8 @@ good_area:
        if(is_write && !(vma->vm_flags & VM_WRITE)) 
                goto out;
 
-        if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
+       /* Don't require VM_READ|VM_EXEC for write faults! */
+        if(!is_write && !(vma->vm_flags & (VM_READ | VM_EXEC)))
                 goto out;
 
        do {
@@ -84,8 +85,7 @@ survive:
                pte = pte_offset_kernel(pmd, address);
        } while(!pte_present(*pte));
        err = 0;
-       *pte = pte_mkyoung(*pte);
-       if(pte_write(*pte)) *pte = pte_mkdirty(*pte);
+       WARN_ON(!pte_young(*pte) || (is_write && !pte_dirty(*pte)));
        flush_tlb_page(vma, address);
 out:
        up_read(&mm->mmap_sem);
index b03326d391c9228e200b1d2c9572bbcd7f1be705..af11915ce0a8ca5155dad601c853b138f250277a 100644 (file)
@@ -93,14 +93,10 @@ SECTIONS
    *(.bss)
    *(COMMON)
   }
-  _end = . ;
+  _end = .;
   PROVIDE (end = .);
-  /* Stabs debugging sections.  */
-  .stab 0 : { *(.stab) }
-  .stabstr 0 : { *(.stabstr) }
-  .stab.excl 0 : { *(.stab.excl) }
-  .stab.exclstr 0 : { *(.stab.exclstr) }
-  .stab.index 0 : { *(.stab.index) }
-  .stab.indexstr 0 : { *(.stab.indexstr) }
-  .comment 0 : { *(.comment) }
+
+  STABS_DEBUG
+
+  DWARF_DEBUG
 }
index 336cbf21dc8ffa8ba8791766c3d55bb9b0c84aeb..9e85969ba9767d714486334c0b31f743470b5cb2 100644 (file)
@@ -67,13 +67,13 @@ static void ack_none(unsigned int irq)
 #define end_none       enable_none
 
 struct hw_interrupt_type no_irq_type = {
-       "none",
-       startup_none,
-       shutdown_none,
-       enable_none,
-       disable_none,
-       ack_none,
-       end_none
+       .typename = "none",
+       .startup = startup_none,
+       .shutdown = shutdown_none,
+       .enable = enable_none,
+       .disable = disable_none,
+       .ack = ack_none,
+       .end = end_none
 };
 
 volatile unsigned long irq_err_count, spurious_count;
index abd48409dcca51f5d4816aaf54e4fa9e149e7fe9..62bdb8d29fc05191750c7f1033a802e24f445d4d 100644 (file)
@@ -138,13 +138,13 @@ static void nmi_end (unsigned irq)
 }
 
 static struct hw_interrupt_type nmi_irq_type = {
-       "NMI",
-       irq_zero,               /* startup */
-       irq_nop,                /* shutdown */
-       irq_nop,                /* enable */
-       irq_nop,                /* disable */
-       irq_nop,                /* ack */
-       nmi_end,                /* end */
+       .typename = "NMI",
+       .startup = irq_zero,            /* startup */
+       .shutdown = irq_nop,            /* shutdown */
+       .enable = irq_nop,              /* enable */
+       .disable = irq_nop,             /* disable */
+       .ack = irq_nop,         /* ack */
+       .end = nmi_end,         /* end */
 };
 
 void __init init_IRQ (void)
index e2cc5580fa2a1eb9191962261857388698dbbf06..17049aaa8f11832cf4a431e9a9e4a09282dd2208 100644 (file)
@@ -73,13 +73,13 @@ static void irq_nop (unsigned irq) { }
 static unsigned irq_zero (unsigned irq) { return 0; }
 
 static struct hw_interrupt_type sim_irq_type = {
-       "IRQ",
-       irq_zero,               /* startup */
-       irq_nop,                /* shutdown */
-       irq_nop,                /* enable */
-       irq_nop,                /* disable */
-       irq_nop,                /* ack */
-       irq_nop,                /* end */
+       .typename = "IRQ",
+       .startup = irq_zero,            /* startup */
+       .shutdown = irq_nop,            /* shutdown */
+       .enable = irq_nop,              /* enable */
+       .disable = irq_nop,             /* disable */
+       .ack = irq_nop,         /* ack */
+       .end = irq_nop,         /* end */
 };
 
 void __init mach_init_irqs (void)
index 0511d8087910d077741a93ce811a4241ee67e0e2..9aec524be3ebbbb653bcd32b42e44ea51324f442 100644 (file)
@@ -929,7 +929,7 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c)
        c->x86_num_cores = intel_num_cpu_cores(c);
 }
 
-void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
+static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
 {
        char *v = c->x86_vendor_id;
 
index d4abb07af52dbef72da991331845b4438251a60b..6dd642cad2efd0074d2150edebe7d78080836850 100644 (file)
@@ -194,20 +194,7 @@ SECTIONS
 #endif
        }
 
-  /* DWARF 2 */
-  .debug_info     0 : { *(.debug_info) }
-  .debug_abbrev   0 : { *(.debug_abbrev) }
-  .debug_line     0 : { *(.debug_line) }
-  .debug_frame    0 : { *(.debug_frame) }
-  .debug_str      0 : { *(.debug_str) }
-  .debug_loc      0 : { *(.debug_loc) }
-  .debug_macinfo  0 : { *(.debug_macinfo) }
-  /* SGI/MIPS DWARF 2 extensions */
-  .debug_weaknames 0 : { *(.debug_weaknames) }
-  .debug_funcnames 0 : { *(.debug_funcnames) }
-  .debug_typenames 0 : { *(.debug_typenames) }
-  .debug_varnames  0 : { *(.debug_varnames) }
-
-
-  .comment 0 : { *(.comment) }
+  STABS_DEBUG
+
+  DWARF_DEBUG
 }
index 7249ba2b7a2740affaee5ba54d82ca4d27c2cc1c..aee50b453265905d22442992afc6f4701177a98e 100644 (file)
@@ -23,7 +23,6 @@ u8 sleep_states[ACPI_S_STATE_COUNT];
 
 static struct pm_ops acpi_pm_ops;
 
-extern void do_suspend_lowlevel_s4bios(void);
 extern void do_suspend_lowlevel(void);
 
 static u32 acpi_suspend_states[] = {
@@ -98,8 +97,6 @@ static int acpi_pm_enter(suspend_state_t pm_state)
        case PM_SUSPEND_DISK:
                if (acpi_pm_ops.pm_disk_mode == PM_DISK_PLATFORM)
                        status = acpi_enter_sleep_state(acpi_state);
-               else
-                       do_suspend_lowlevel_s4bios();
                break;
        case PM_SUSPEND_MAX:
                acpi_power_off();
@@ -206,11 +203,6 @@ static int __init acpi_sleep_init(void)
                        printk(" S%d", i);
                }
                if (i == ACPI_STATE_S4) {
-                       if (acpi_gbl_FACS->S4bios_f) {
-                               sleep_states[i] = 1;
-                               printk(" S4bios");
-                               acpi_pm_ops.pm_disk_mode = PM_DISK_FIRMWARE;
-                       }
                        if (sleep_states[i])
                                acpi_pm_ops.pm_disk_mode = PM_DISK_PLATFORM;
                }
index a5f947de879bc62a52ade449c729cf5ffe5b69c5..af7935a95bcc821efea2c4236817e4c5cf0286be 100644 (file)
@@ -21,9 +21,7 @@ int acpi_sleep_prepare(u32 acpi_state)
 {
 #ifdef CONFIG_ACPI_SLEEP
        /* do we have a wakeup address for S2 and S3? */
-       /* Here, we support only S4BIOS, those we set the wakeup address */
-       /* S4OS is only supported for now via swsusp.. */
-       if (acpi_state == ACPI_STATE_S3 || acpi_state == ACPI_STATE_S4) {
+       if (acpi_state == ACPI_STATE_S3) {
                if (!acpi_wakeup_address) {
                        return -EFAULT;
                }
index 09a603f3523ec148df72d54d629ef4712fdbe8c6..4696a85a98b92b34c2789e52fae6a6122039b447 100644 (file)
@@ -25,8 +25,6 @@ static int acpi_system_sleep_seq_show(struct seq_file *seq, void *offset)
        for (i = 0; i <= ACPI_STATE_S5; i++) {
                if (sleep_states[i]) {
                        seq_printf(seq, "S%d ", i);
-                       if (i == ACPI_STATE_S4 && acpi_gbl_FACS->S4bios_f)
-                               seq_printf(seq, "S4bios ");
                }
        }
 
index c4aebf2f522d724abfbb8e1c5538ac8441322bbd..60a7ef6a201b879ecedafe996c36c730259d4ba6 100644 (file)
@@ -262,7 +262,8 @@ dma_pool_destroy (struct dma_pool *pool)
  * If such a memory block can't be allocated, null is returned.
  */
 void *
-dma_pool_alloc (struct dma_pool *pool, int mem_flags, dma_addr_t *handle)
+dma_pool_alloc (struct dma_pool *pool, unsigned int __nocast mem_flags,
+               dma_addr_t *handle)
 {
        unsigned long           flags;
        struct dma_page         *page;
index 418b1469d75d116b1f029e4ae5bc44ed1dd768a2..28f2c177a54167a0ce7d3bd9e3f1dbe7ac1d7995 100644 (file)
@@ -1713,10 +1713,9 @@ static unsigned long pollcomplete(int ctlr)
 
        for (i = 20 * HZ; i > 0; i--) {
                done = hba[ctlr]->access.command_completed(hba[ctlr]);
-               if (done == FIFO_EMPTY) {
-                       set_current_state(TASK_UNINTERRUPTIBLE);
-                       schedule_timeout(1);
-               } else
+               if (done == FIFO_EMPTY)
+                       schedule_timeout_uninterruptible(1);
+               else
                        return (done);
        }
        /* Invalid address to tell caller we ran out of time */
index 30c0903c7cddfdd46dbdd0de9c8e999c1ed7f349..cd056e7e64ec15d5cf81f16068623e29e07ad187 100644 (file)
@@ -2260,6 +2260,8 @@ static void cfq_put_cfqd(struct cfq_data *cfqd)
        if (!atomic_dec_and_test(&cfqd->ref))
                return;
 
+       blk_put_queue(q);
+
        cfq_shutdown_timer_wq(cfqd);
        q->elevator->elevator_data = NULL;
 
@@ -2316,6 +2318,7 @@ static int cfq_init_queue(request_queue_t *q, elevator_t *e)
        e->elevator_data = cfqd;
 
        cfqd->queue = q;
+       atomic_inc(&q->refcnt);
 
        cfqd->max_queued = q->nr_requests / 4;
        q->nr_batching = cfq_queued;
index 7289f67e9568771fb703435917814868bf482757..ac5ba462710b2f0af37b37032804aa238bbdf2b8 100644 (file)
@@ -516,8 +516,7 @@ static int pcd_tray_move(struct cdrom_device_info *cdi, int position)
 
 static void pcd_sleep(int cs)
 {
-       current->state = TASK_INTERRUPTIBLE;
-       schedule_timeout(cs);
+       schedule_timeout_interruptible(cs);
 }
 
 static int pcd_reset(struct pcd_unit *cd)
index 060b1f2a91ddcec82c36d70d3ea383080b58033e..711d2f314ac32736c01fcdd63647d18f2d0eecb7 100644 (file)
@@ -507,8 +507,7 @@ static void pf_eject(struct pf_unit *pf)
 
 static void pf_sleep(int cs)
 {
-       current->state = TASK_INTERRUPTIBLE;
-       schedule_timeout(cs);
+       schedule_timeout_interruptible(cs);
 }
 
 /* the ATAPI standard actually specifies the contents of all 7 registers
index 84d8e291ed964aaafb537790dc2fd00f1b781263..b3982395f22b4d56fcc0de0f1f555ee699b2447f 100644 (file)
@@ -276,8 +276,7 @@ static inline u8 DRIVE(struct pg *dev)
 
 static void pg_sleep(int cs)
 {
-       current->state = TASK_INTERRUPTIBLE;
-       schedule_timeout(cs);
+       schedule_timeout_interruptible(cs);
 }
 
 static int pg_wait(struct pg *dev, int go, int stop, unsigned long tmo, char *msg)
index 5fe8ee86f095bb82a9691fd5792f8411a4797555..d8d35233cf495df4fd84fbdecd9cb65710137993 100644 (file)
@@ -383,8 +383,7 @@ static int pt_atapi(struct pt_unit *tape, char *cmd, int dlen, char *buf, char *
 
 static void pt_sleep(int cs)
 {
-       current->state = TASK_INTERRUPTIBLE;
-       schedule_timeout(cs);
+       schedule_timeout_interruptible(cs);
 }
 
 static int pt_poll_dsc(struct pt_unit *tape, int pause, int tmo, char *msg)
index e5f7494c00eed322d946269a568acf9240632c55..e425ad3eebba80618be8c56f6282f9ed99bad740 100644 (file)
@@ -834,8 +834,7 @@ static int fd_eject(struct floppy_state *fs)
                        break;
                }
                swim3_select(fs, RELAX);
-               current->state = TASK_INTERRUPTIBLE;
-               schedule_timeout(1);
+               schedule_timeout_interruptible(1);
                if (swim3_readbit(fs, DISK_IN) == 0)
                        break;
        }
@@ -906,8 +905,7 @@ static int floppy_open(struct inode *inode, struct file *filp)
                                break;
                        }
                        swim3_select(fs, RELAX);
-                       current->state = TASK_INTERRUPTIBLE;
-                       schedule_timeout(1);
+                       schedule_timeout_interruptible(1);
                }
                if (err == 0 && (swim3_readbit(fs, SEEK_COMPLETE) == 0
                                 || swim3_readbit(fs, DISK_IN) == 0))
@@ -992,8 +990,7 @@ static int floppy_revalidate(struct gendisk *disk)
                if (signal_pending(current))
                        break;
                swim3_select(fs, RELAX);
-               current->state = TASK_INTERRUPTIBLE;
-               schedule_timeout(1);
+               schedule_timeout_interruptible(1);
        }
        ret = swim3_readbit(fs, SEEK_COMPLETE) == 0
                || swim3_readbit(fs, DISK_IN) == 0;
index a1283f6dc0189e1ee0d3d7b0622ea875f2333b5d..89e3c2f8b77681bacd376ba480732d4e84b97b9d 100644 (file)
@@ -338,8 +338,7 @@ static int swimiop_eject(struct floppy_state *fs)
                        err = -EINTR;
                        break;
                }
-               current->state = TASK_INTERRUPTIBLE;
-               schedule_timeout(1);
+               schedule_timeout_interruptible(1);
        }
        release_drive(fs);
        return cmd->error;
index 0c4c121d2e7962bf798958ad2cee2df0e907527c..0f48301342da4e2e0a13a23bed85821e06efb79b 100644 (file)
@@ -34,6 +34,7 @@
  *                      - set initialised bit then.
  */
 
+//#define DEBUG /* uncomment if you want debugging info (pr_debug) */
 #include <linux/config.h>
 #include <linux/sched.h>
 #include <linux/fs.h>
 #include <asm/uaccess.h>
 #include <asm/io.h>
 
-#define PRINTK(x...) do {} while (0)
-#define dprintk(x...) do {} while (0)
-/*#define dprintk(x...) printk(x) */
-
 #define MM_MAXCARDS 4
 #define MM_RAHEAD 2      /* two sectors */
 #define MM_BLKSIZE 1024  /* 1k blocks */
@@ -299,7 +296,7 @@ static void mm_start_io(struct cardinfo *card)
 
        /* make the last descriptor end the chain */
        page = &card->mm_pages[card->Active];
-       PRINTK("start_io: %d %d->%d\n", card->Active, page->headcnt, page->cnt-1);
+       pr_debug("start_io: %d %d->%d\n", card->Active, page->headcnt, page->cnt-1);
        desc = &page->desc[page->cnt-1];
 
        desc->control_bits |= cpu_to_le32(DMASCR_CHAIN_COMP_EN);
@@ -532,7 +529,7 @@ static void process_page(unsigned long data)
                activate(card);
        } else {
                /* haven't finished with this one yet */
-               PRINTK("do some more\n");
+               pr_debug("do some more\n");
                mm_start_io(card);
        }
  out_unlock:
@@ -555,7 +552,7 @@ static void process_page(unsigned long data)
 static int mm_make_request(request_queue_t *q, struct bio *bio)
 {
        struct cardinfo *card = q->queuedata;
-       PRINTK("mm_make_request %ld %d\n", bh->b_rsector, bh->b_size);
+       pr_debug("mm_make_request %ld %d\n", bh->b_rsector, bh->b_size);
 
        bio->bi_phys_segments = bio->bi_idx; /* count of completed segments*/
        spin_lock_irq(&card->lock);
index 1676033da6c6f359b060b57cfe4a46bae0d63772..68b6d7b154cf88057b138d72a58b74d93409c23f 100644 (file)
@@ -47,6 +47,7 @@
 #include <linux/wait.h>
 #include <linux/blkdev.h>
 #include <linux/blkpg.h>
+#include <linux/delay.h>
 
 #include <asm/system.h>
 #include <asm/io.h>
@@ -62,7 +63,7 @@ static int xd[5] = { -1,-1,-1,-1, };
 
 #define XD_DONT_USE_DMA                0  /* Initial value. may be overriden using
                                      "nodma" module option */
-#define XD_INIT_DISK_DELAY     (30*HZ/1000)  /* 30 ms delay during disk initialization */
+#define XD_INIT_DISK_DELAY     (30)  /* 30 ms delay during disk initialization */
 
 /* Above may need to be increased if a problem with the 2nd drive detection
    (ST11M controller) or resetting a controller (WD) appears */
@@ -529,10 +530,8 @@ static inline u_char xd_waitport (u_short port,u_char flags,u_char mask,u_long t
        int success;
 
        xdc_busy = 1;
-       while ((success = ((inb(port) & mask) != flags)) && time_before(jiffies, expiry)) {
-               set_current_state(TASK_UNINTERRUPTIBLE);
-               schedule_timeout(1);
-       }
+       while ((success = ((inb(port) & mask) != flags)) && time_before(jiffies, expiry))
+               schedule_timeout_uninterruptible(1);
        xdc_busy = 0;
        return (success);
 }
@@ -633,14 +632,12 @@ static u_char __init xd_initdrives (void (*init_drive)(u_char drive))
        for (i = 0; i < XD_MAXDRIVES; i++) {
                xd_build(cmdblk,CMD_TESTREADY,i,0,0,0,0,0);
                if (!xd_command(cmdblk,PIO_MODE,NULL,NULL,NULL,XD_TIMEOUT*8)) {
-                       set_current_state(TASK_INTERRUPTIBLE);
-                       schedule_timeout(XD_INIT_DISK_DELAY);
+                       msleep_interruptible(XD_INIT_DISK_DELAY);
 
                        init_drive(count);
                        count++;
 
-                       set_current_state(TASK_INTERRUPTIBLE);
-                       schedule_timeout(XD_INIT_DISK_DELAY);
+                       msleep_interruptible(XD_INIT_DISK_DELAY);
                }
        }
        return (count);
@@ -761,8 +758,7 @@ static void __init xd_wd_init_controller (unsigned int address)
 
        outb(0,XD_RESET);               /* reset the controller */
 
-       set_current_state(TASK_UNINTERRUPTIBLE);
-       schedule_timeout(XD_INIT_DISK_DELAY);
+       msleep(XD_INIT_DISK_DELAY);
 }
 
 static void __init xd_wd_init_drive (u_char drive)
@@ -936,8 +932,7 @@ If you need non-standard settings use the xd=... command */
        xd_maxsectors = 0x01;
        outb(0,XD_RESET);               /* reset the controller */
 
-       set_current_state(TASK_UNINTERRUPTIBLE);
-       schedule_timeout(XD_INIT_DISK_DELAY);
+       msleep(XD_INIT_DISK_DELAY);
 }
 
 static void __init xd_xebec_init_drive (u_char drive)
index 007f6a6624392c6427601306dddebbda745d60e9..bb5e8d665a2a3ce8bd41fbd647bd06eeec772541 100644 (file)
@@ -296,7 +296,7 @@ z2_open( struct inode *inode, struct file *filp )
     return 0;
 
 err_out_kfree:
-    kfree( z2ram_map );
+    kfree(z2ram_map);
 err_out:
     return rc;
 }
index 30a8977553617c86fb5ec42601ce59dc266811b6..466e9c2974bdd917e7840daa8f075e8f7bda7cb9 100644 (file)
@@ -827,8 +827,7 @@ static void mark_timeout_audio(u_long i)
 static void sbp_sleep(u_int time)
 {
        sti();
-       current->state = TASK_INTERRUPTIBLE;
-       schedule_timeout(time);
+       schedule_timeout_interruptible(time);
        sti();
 }
 /*==========================================================================*/
@@ -4216,7 +4215,8 @@ static int sbpcd_dev_ioctl(struct cdrom_device_info *cdi, u_int cmd,
                
        case CDROMAUDIOBUFSIZ: /* configure the audio buffer size */
                msg(DBG_IOC,"ioctl: CDROMAUDIOBUFSIZ entered.\n");
-               if (current_drive->sbp_audsiz>0) vfree(current_drive->aud_buf);
+               if (current_drive->sbp_audsiz>0)
+                       vfree(current_drive->aud_buf);
                current_drive->aud_buf=NULL;
                current_drive->sbp_audsiz=arg;
                
@@ -5910,7 +5910,8 @@ static void sbpcd_exit(void)
                put_disk(D_S[j].disk);
                devfs_remove("sbp/c0t%d", j);
                vfree(D_S[j].sbp_buf);
-               if (D_S[j].sbp_audsiz>0) vfree(D_S[j].aud_buf);
+               if (D_S[j].sbp_audsiz>0)
+                       vfree(D_S[j].aud_buf);
                if ((unregister_cdrom(D_S[j].sbpcd_infop) == -EINVAL))
                {
                        msg(DBG_INF, "What's that: can't unregister info %s.\n", major_name);
index 9f22e8f1f6c0182dbbbb9ecd5689d01235f53404..e6565992643260580cbbcaf7e5209a1744fd97f2 100644 (file)
@@ -1478,8 +1478,7 @@ static int __init sony535_init(void)
        /* look for the CD-ROM, follows the procedure in the DOS driver */
        inb(select_unit_reg);
        /* wait for 40 18 Hz ticks (reverse-engineered from DOS driver) */
-       set_current_state(TASK_INTERRUPTIBLE);
-       schedule_timeout((HZ+17)*40/18);
+       schedule_timeout_interruptible((HZ+17)*40/18);
        inb(result_reg);
 
        outb(0, read_status_reg);       /* does a reset? */
index 4d4e602fdc7e7cfa326000b84135583ed866296d..82b43c541c8dca52ad90c0ea6c6b984a50c455ee 100644 (file)
@@ -206,10 +206,9 @@ static void agp_backend_cleanup(struct agp_bridge_data *bridge)
                bridge->driver->cleanup();
        if (bridge->driver->free_gatt_table)
                bridge->driver->free_gatt_table(bridge);
-       if (bridge->key_list) {
-               vfree(bridge->key_list);
-               bridge->key_list = NULL;
-       }
+
+       vfree(bridge->key_list);
+       bridge->key_list = NULL;
 
        if (bridge->driver->agp_destroy_page &&
            bridge->driver->needs_scratch_page)
index 11f9ee5811242973f15bc0232d1e15fbee33373e..927a5bbe112c9d9eb175e6840f557d4348d159c2 100644 (file)
@@ -172,7 +172,7 @@ static int ac_register_board(unsigned long physloc, void __iomem *loc,
 
 void cleanup_module(void)
 {
-       int i;
+       unsigned int i;
 
        misc_deregister(&ac_miscdev);
 
@@ -195,7 +195,7 @@ int __init applicom_init(void)
        int i, numisa = 0;
        struct pci_dev *dev = NULL;
        void __iomem *RamIO;
-       int boardno;
+       int boardno, ret;
 
        printk(KERN_INFO "Applicom driver: $Id: ac.c,v 1.30 2000/03/22 16:03:57 dwmw2 Exp $\n");
 
@@ -294,7 +294,8 @@ int __init applicom_init(void)
        }
 
        if (!numisa)
-               printk(KERN_WARNING"ac.o: No valid ISA Applicom boards found at mem 0x%lx\n",mem);
+               printk(KERN_WARNING "ac.o: No valid ISA Applicom boards found "
+                               "at mem 0x%lx\n", mem);
 
  fin:
        init_waitqueue_head(&FlagSleepRec);
@@ -304,7 +305,11 @@ int __init applicom_init(void)
        DeviceErrorCount = 0;
 
        if (numboards) {
-               misc_register(&ac_miscdev);
+               ret = misc_register(&ac_miscdev);
+               if (ret) {
+                       printk(KERN_WARNING "ac.o: Unable to register misc device\n");
+                       goto out;
+               }
                for (i = 0; i < MAX_BOARD; i++) {
                        int serial;
                        char boardname[(SERIAL_NUMBER - TYPE_CARD) + 1];
@@ -337,6 +342,17 @@ int __init applicom_init(void)
 
        else
                return -ENXIO;
+
+out:
+       for (i = 0; i < MAX_BOARD; i++) {
+               if (!apbs[i].RamIO)
+                       continue;
+               if (apbs[i].irq)
+                       free_irq(apbs[i].irq, &dummy);
+               iounmap(apbs[i].RamIO);
+       }
+       pci_disable_device(dev);
+       return ret;
 }
 
 
index 1704a2a57048b85adda6bd799d488b053940ffc6..b2e0928e84288b6a392e7951fd3724649a54fd27 100644 (file)
@@ -387,10 +387,8 @@ int fdc_interrupt_wait(unsigned int time)
 
        set_current_state(TASK_INTERRUPTIBLE);
        add_wait_queue(&ftape_wait_intr, &wait);
-       while (!ft_interrupt_seen && timeout) {
-               set_current_state(TASK_INTERRUPTIBLE);
-               timeout = schedule_timeout(timeout);
-        }
+       while (!ft_interrupt_seen && timeout)
+               timeout = schedule_timeout_interruptible(timeout);
 
        spin_lock_irq(&current->sighand->siglock);
        current->blocked = old_sigmask;
index 5fe8461271fc35812222bd2b2e2f8e138d1ffc02..de0379b6d502cd4f2e512565bf6da5c04439780e 100644 (file)
@@ -100,14 +100,14 @@ static struct hpets *hpets;
 #endif
 
 #ifndef readq
-static unsigned long long __inline readq(void __iomem *addr)
+static inline unsigned long long readq(void __iomem *addr)
 {
        return readl(addr) | (((unsigned long long)readl(addr + 4)) << 32LL);
 }
 #endif
 
 #ifndef writeq
-static void __inline writeq(unsigned long long v, void __iomem *addr)
+static inline void writeq(unsigned long long v, void __iomem *addr)
 {
        writel(v & 0xffffffff, addr);
        writel(v >> 32, addr + 4);
index 3480535a09c5f113af1ca110bb0fa37ae58c2206..6f673d2de0b1717439e5d908f4ce3e5cb4b85072 100644 (file)
@@ -513,10 +513,7 @@ static ssize_t rng_dev_read (struct file *filp, char __user *buf, size_t size,
                        return ret ? : -EAGAIN;
 
                if(need_resched())
-               {
-                       current->state = TASK_INTERRUPTIBLE;
-                       schedule_timeout(1);
-               }
+                       schedule_timeout_interruptible(1);
                else
                        udelay(200);    /* FIXME: We could poll for 250uS ?? */
 
index 82c5f30375acc816cd879dc0d083767cb9cd457a..ba85eb1b6ec75bfa56cd10b06b95cd9ff0186ee8 100644 (file)
@@ -655,8 +655,7 @@ i2QueueCommands(int type, i2ChanStrPtr pCh, int timeout, int nCommands,
                        timeout--;   // So negative values == forever
                
                if (!in_interrupt()) {
-                       current->state = TASK_INTERRUPTIBLE;
-                       schedule_timeout(1);    // short nap 
+                       schedule_timeout_interruptible(1);      // short nap
                } else {
                        // we cannot sched/sleep in interrrupt silly
                        return 0;   
@@ -1132,8 +1131,7 @@ i2Output(i2ChanStrPtr pCh, const char *pSource, int count, int user )
 
                                        ip2trace (CHANN, ITRC_OUTPUT, 61, 0 );
 
-                                       current->state = TASK_INTERRUPTIBLE;
-                                       schedule_timeout(2);
+                                       schedule_timeout_interruptible(2);
                                        if (signal_pending(current)) {
                                                break;
                                        }
index 278f841049968c15867d25c34e2f17772d375829..b6e5cbfb09f81d897c1dd95079c4e9281dda4ba5 100644 (file)
@@ -1920,8 +1920,7 @@ static int try_get_dev_id(struct smi_info *smi_info)
        for (;;)
        {
                if (smi_result == SI_SM_CALL_WITH_DELAY) {
-                       set_current_state(TASK_UNINTERRUPTIBLE);
-                       schedule_timeout(1);
+                       schedule_timeout_uninterruptible(1);
                        smi_result = smi_info->handlers->event(
                                smi_info->si_sm, 100);
                }
@@ -2256,10 +2255,8 @@ static int init_one_smi(int intf_num, struct smi_info **smi)
 
        /* Wait for the timer to stop.  This avoids problems with race
           conditions removing the timer here. */
-       while (! new_smi->timer_stopped) {
-               set_current_state(TASK_UNINTERRUPTIBLE);
-               schedule_timeout(1);
-       }
+       while (!new_smi->timer_stopped)
+               schedule_timeout_uninterruptible(1);
 
  out_err:
        if (new_smi->intf)
@@ -2379,17 +2376,14 @@ static void __exit cleanup_one_si(struct smi_info *to_clean)
 
        /* Wait for the timer to stop.  This avoids problems with race
           conditions removing the timer here. */
-       while (! to_clean->timer_stopped) {
-               set_current_state(TASK_UNINTERRUPTIBLE);
-               schedule_timeout(1);
-       }
+       while (!to_clean->timer_stopped)
+               schedule_timeout_uninterruptible(1);
 
        /* Interrupts and timeouts are stopped, now make sure the
           interface is in a clean state. */
        while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
                poll(to_clean);
-               set_current_state(TASK_UNINTERRUPTIBLE);
-               schedule_timeout(1);
+               schedule_timeout_uninterruptible(1);
        }
 
        rv = ipmi_unregister_smi(to_clean->intf);
index e71aaae855adda775f9c296e6d91a333192d8110..2da64bf7469c6963e008e6215c610dae78dd3e3a 100644 (file)
@@ -1037,10 +1037,8 @@ static __exit void ipmi_unregister_watchdog(void)
        /* Wait to make sure the message makes it out.  The lower layer has
           pointers to our buffers, we want to make sure they are done before
           we release our memory. */
-       while (atomic_read(&set_timeout_tofree)) {
-               set_current_state(TASK_UNINTERRUPTIBLE);
-               schedule_timeout(1);
-       }
+       while (atomic_read(&set_timeout_tofree))
+               schedule_timeout_uninterruptible(1);
 
        /* Disconnect from IPMI. */
        rv = ipmi_destroy_user(watchdog_user);
index 1745065d8f789f062b76fdb1c6a21bb565b0e0f5..449d029ad4f40abc936fb85f6ee66f60511201a6 100644 (file)
@@ -14,7 +14,7 @@
  * `Sticky' modifier keys, 951006.
  *
  * 11-11-96: SAK should now work in the raw mode (Martin Mares)
- * 
+ *
  * Modified to provide 'generic' keyboard support by Hamish Macdonald
  * Merge with the m68k keyboard driver and split-off of the PC low-level
  * parts by Geert Uytterhoeven, May 1997
@@ -52,7 +52,7 @@ extern void ctrl_alt_del(void);
 /*
  * Some laptops take the 789uiojklm,. keys as number pad when NumLock is on.
  * This seems a good reason to start with NumLock off. On HIL keyboards
- * of PARISC machines however there is no NumLock key and everyone expects the keypad 
+ * of PARISC machines however there is no NumLock key and everyone expects the keypad
  * to be used for numbers.
  */
 
@@ -76,17 +76,17 @@ void compute_shiftstate(void);
        k_meta,         k_ascii,        k_lock,         k_lowercase,\
        k_slock,        k_dead2,        k_ignore,       k_ignore
 
-typedef void (k_handler_fn)(struct vc_data *vc, unsigned char value, 
+typedef void (k_handler_fn)(struct vc_data *vc, unsigned char value,
                            char up_flag, struct pt_regs *regs);
 static k_handler_fn K_HANDLERS;
 static k_handler_fn *k_handler[16] = { K_HANDLERS };
 
 #define FN_HANDLERS\
-       fn_null,        fn_enter,       fn_show_ptregs, fn_show_mem,\
-       fn_show_state,  fn_send_intr,   fn_lastcons,    fn_caps_toggle,\
-       fn_num,         fn_hold,        fn_scroll_forw, fn_scroll_back,\
-       fn_boot_it,     fn_caps_on,     fn_compose,     fn_SAK,\
-       fn_dec_console, fn_inc_console, fn_spawn_con,   fn_bare_num
+       fn_null,        fn_enter,       fn_show_ptregs, fn_show_mem,\
+       fn_show_state,  fn_send_intr,   fn_lastcons,    fn_caps_toggle,\
+       fn_num,         fn_hold,        fn_scroll_forw, fn_scroll_back,\
+       fn_boot_it,     fn_caps_on,     fn_compose,     fn_SAK,\
+       fn_dec_console, fn_inc_console, fn_spawn_con,   fn_bare_num
 
 typedef void (fn_handler_fn)(struct vc_data *vc, struct pt_regs *regs);
 static fn_handler_fn FN_HANDLERS;
@@ -159,13 +159,13 @@ static int sysrq_alt;
  */
 int getkeycode(unsigned int scancode)
 {
-       struct list_head * node;
+       struct list_head *node;
        struct input_dev *dev = NULL;
 
-       list_for_each(node,&kbd_handler.h_list) {
-               struct input_handle * handle = to_handle_h(node);
-               if (handle->dev->keycodesize) { 
-                       dev = handle->dev; 
+       list_for_each(node, &kbd_handler.h_list) {
+               struct input_handle *handle = to_handle_h(node);
+               if (handle->dev->keycodesize) {
+                       dev = handle->dev;
                        break;
                }
        }
@@ -181,15 +181,15 @@ int getkeycode(unsigned int scancode)
 
 int setkeycode(unsigned int scancode, unsigned int keycode)
 {
-       struct list_head * node;
+       struct list_head *node;
        struct input_dev *dev = NULL;
        unsigned int i, oldkey;
 
-       list_for_each(node,&kbd_handler.h_list) {
+       list_for_each(node, &kbd_handler.h_list) {
                struct input_handle *handle = to_handle_h(node);
-               if (handle->dev->keycodesize) { 
-                       dev = handle->dev; 
-                       break; 
+               if (handle->dev->keycodesize) {
+                       dev = handle->dev;
+                       break;
                }
        }
 
@@ -200,7 +200,7 @@ int setkeycode(unsigned int scancode, unsigned int keycode)
                return -EINVAL;
        if (keycode < 0 || keycode > KEY_MAX)
                return -EINVAL;
-       if (keycode >> (dev->keycodesize * 8))
+       if (dev->keycodesize < sizeof(keycode) && (keycode >> (dev->keycodesize * 8)))
                return -EINVAL;
 
        oldkey = SET_INPUT_KEYCODE(dev, scancode, keycode);
@@ -216,11 +216,11 @@ int setkeycode(unsigned int scancode, unsigned int keycode)
 }
 
 /*
- * Making beeps and bells. 
+ * Making beeps and bells.
  */
 static void kd_nosound(unsigned long ignored)
 {
-       struct list_head * node;
+       struct list_head *node;
 
        list_for_each(node,&kbd_handler.h_list) {
                struct input_handle *handle = to_handle_h(node);
@@ -237,12 +237,12 @@ static DEFINE_TIMER(kd_mksound_timer, kd_nosound, 0, 0);
 
 void kd_mksound(unsigned int hz, unsigned int ticks)
 {
-       struct list_head * node;
+       struct list_head *node;
 
        del_timer(&kd_mksound_timer);
 
        if (hz) {
-               list_for_each_prev(node,&kbd_handler.h_list) {
+               list_for_each_prev(node, &kbd_handler.h_list) {
                        struct input_handle *handle = to_handle_h(node);
                        if (test_bit(EV_SND, handle->dev->evbit)) {
                                if (test_bit(SND_TONE, handle->dev->sndbit)) {
@@ -337,19 +337,19 @@ static void to_utf8(struct vc_data *vc, ushort c)
        if (c < 0x80)
                /*  0******* */
                put_queue(vc, c);
-       else if (c < 0x800) {
+       else if (c < 0x800) {
                /* 110***** 10****** */
-               put_queue(vc, 0xc0 | (c >> 6)); 
+               put_queue(vc, 0xc0 | (c >> 6));
                put_queue(vc, 0x80 | (c & 0x3f));
-       } else {
+       } else {
                /* 1110**** 10****** 10****** */
                put_queue(vc, 0xe0 | (c >> 12));
                put_queue(vc, 0x80 | ((c >> 6) & 0x3f));
                put_queue(vc, 0x80 | (c & 0x3f));
-       }
+       }
 }
 
-/* 
+/*
  * Called after returning from RAW mode or when changing consoles - recompute
  * shift_down[] and shift_state from key_down[] maybe called when keymap is
  * undefined, so that shiftkey release is seen
@@ -360,7 +360,7 @@ void compute_shiftstate(void)
 
        shift_state = 0;
        memset(shift_down, 0, sizeof(shift_down));
-       
+
        for (i = 0; i < ARRAY_SIZE(key_down); i++) {
 
                if (!key_down[i])
@@ -499,9 +499,9 @@ static void fn_dec_console(struct vc_data *vc, struct pt_regs *regs)
        if (want_console != -1)
                cur = want_console;
 
-       for (i = cur-1; i != cur; i--) {
+       for (i = cur - 1; i != cur; i--) {
                if (i == -1)
-                       i = MAX_NR_CONSOLES-1;
+                       i = MAX_NR_CONSOLES - 1;
                if (vc_cons_allocated(i))
                        break;
        }
@@ -567,9 +567,9 @@ static void fn_compose(struct vc_data *vc, struct pt_regs *regs)
 
 static void fn_spawn_con(struct vc_data *vc, struct pt_regs *regs)
 {
-        if (spawnpid)
-          if(kill_proc(spawnpid, spawnsig, 1))
-            spawnpid = 0;
+       if (spawnpid)
+               if (kill_proc(spawnpid, spawnsig, 1))
+                       spawnpid = 0;
 }
 
 static void fn_SAK(struct vc_data *vc, struct pt_regs *regs)
@@ -603,8 +603,8 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag, struct
                return;
        if (value >= ARRAY_SIZE(fn_handler))
                return;
-       if ((kbd->kbdmode == VC_RAW || 
-            kbd->kbdmode == VC_MEDIUMRAW) && 
+       if ((kbd->kbdmode == VC_RAW ||
+            kbd->kbdmode == VC_MEDIUMRAW) &&
             value != KVAL(K_SAK))
                return;         /* SAK is allowed even in raw mode */
        fn_handler[value](vc, regs);
@@ -894,11 +894,11 @@ static inline unsigned char getleds(void)
 
 static void kbd_bh(unsigned long dummy)
 {
-       struct list_head * node;
+       struct list_head *node;
        unsigned char leds = getleds();
 
        if (leds != ledstate) {
-               list_for_each(node,&kbd_handler.h_list) {
+               list_for_each(node, &kbd_handler.h_list) {
                        struct input_handle * handle = to_handle_h(node);
                        input_event(handle->dev, EV_LED, LED_SCROLLL, !!(leds & 0x01));
                        input_event(handle->dev, EV_LED, LED_NUML,    !!(leds & 0x02));
@@ -963,11 +963,11 @@ static int sparc_l1_a_state = 0;
 extern void sun_do_break(void);
 #endif
 
-static int emulate_raw(struct vc_data *vc, unsigned int keycode, 
+static int emulate_raw(struct vc_data *vc, unsigned int keycode,
                       unsigned char up_flag)
 {
        if (keycode > 255 || !x86_keycodes[keycode])
-               return -1; 
+               return -1;
 
        switch (keycode) {
                case KEY_PAUSE:
@@ -981,7 +981,7 @@ static int emulate_raw(struct vc_data *vc, unsigned int keycode,
                case KEY_HANJA:
                        if (!up_flag) put_queue(vc, 0xf2);
                        return 0;
-       } 
+       }
 
        if (keycode == KEY_SYSRQ && sysrq_alt) {
                put_queue(vc, 0x54 | up_flag);
@@ -1104,11 +1104,12 @@ static void kbd_keycode(unsigned int keycode, int down,
        else
                clear_bit(keycode, key_down);
 
-       if (rep && (!vc_kbd_mode(kbd, VC_REPEAT) || (tty && 
-               (!L_ECHO(tty) && tty->driver->chars_in_buffer(tty))))) {
+       if (rep &&
+           (!vc_kbd_mode(kbd, VC_REPEAT) ||
+            (tty && !L_ECHO(tty) && tty->driver->chars_in_buffer(tty)))) {
                /*
                 * Don't repeat a key if the input buffers are not empty and the
-                * characters get aren't echoed locally. This makes key repeat 
+                * characters get aren't echoed locally. This makes key repeat
                 * usable with slow applications and under heavy loads.
                 */
                return;
@@ -1130,7 +1131,8 @@ static void kbd_keycode(unsigned int keycode, int down,
        type = KTYP(keysym);
 
        if (type < 0xf0) {
-               if (down && !raw_mode) to_utf8(vc, keysym);
+               if (down && !raw_mode)
+                       to_utf8(vc, keysym);
                return;
        }
 
@@ -1154,7 +1156,7 @@ static void kbd_keycode(unsigned int keycode, int down,
                kbd->slockstate = 0;
 }
 
-static void kbd_event(struct input_handle *handle, unsigned int event_type, 
+static void kbd_event(struct input_handle *handle, unsigned int event_type,
                      unsigned int event_code, int value)
 {
        if (event_type == EV_MSC && event_code == MSC_RAW && HW_RAW(handle->dev))
@@ -1166,15 +1168,13 @@ static void kbd_event(struct input_handle *handle, unsigned int event_type,
        schedule_console_callback();
 }
 
-static char kbd_name[] = "kbd";
-
 /*
  * When a keyboard (or other input device) is found, the kbd_connect
  * function is called. The function then looks at the device, and if it
  * likes it, it can open it and get events from it. In this (kbd_connect)
  * function, we should decide which VT to bind that keyboard to initially.
  */
-static struct input_handle *kbd_connect(struct input_handler *handler, 
+static struct input_handle *kbd_connect(struct input_handler *handler,
                                        struct input_dev *dev,
                                        struct input_device_id *id)
 {
@@ -1182,18 +1182,19 @@ static struct input_handle *kbd_connect(struct input_handler *handler,
        int i;
 
        for (i = KEY_RESERVED; i < BTN_MISC; i++)
-               if (test_bit(i, dev->keybit)) break;
+               if (test_bit(i, dev->keybit))
+                       break;
 
-       if ((i == BTN_MISC) && !test_bit(EV_SND, dev->evbit)) 
+       if (i == BTN_MISC && !test_bit(EV_SND, dev->evbit))
                return NULL;
 
-       if (!(handle = kmalloc(sizeof(struct input_handle), GFP_KERNEL))) 
+       if (!(handle = kmalloc(sizeof(struct input_handle), GFP_KERNEL)))
                return NULL;
        memset(handle, 0, sizeof(struct input_handle));
 
        handle->dev = dev;
        handle->handler = handler;
-       handle->name = kbd_name;
+       handle->name = "kbd";
 
        input_open_device(handle);
        kbd_refresh_leds(handle);
@@ -1212,11 +1213,11 @@ static struct input_device_id kbd_ids[] = {
                 .flags = INPUT_DEVICE_ID_MATCH_EVBIT,
                 .evbit = { BIT(EV_KEY) },
         },
-       
+
        {
                 .flags = INPUT_DEVICE_ID_MATCH_EVBIT,
                 .evbit = { BIT(EV_SND) },
-        },     
+        },
 
        { },    /* Terminating entry */
 };
index cf01a720eb2eff74a8154510cfd970ebbfb60f8d..b771611461445548bed26bd09df0c06fe2265587 100644 (file)
@@ -613,10 +613,15 @@ static struct miscdevice lcd_dev = {
 
 static int lcd_init(void)
 {
+       int ret;
        unsigned long data;
 
        pr_info("%s\n", LCD_DRIVER);
-       misc_register(&lcd_dev);
+       ret = misc_register(&lcd_dev);
+       if (ret) {
+               printk(KERN_WARNING LCD "Unable to register misc device.\n");
+               return ret;
+       }
 
        /* Check region? Naaah! Just snarf it up. */
 /*     request_region(RTC_PORT(0), RTC_IO_EXTENT, "lcd");*/
index 59eebe5a035f1c7691656f389c75af261769c0eb..2afb9038dbc5752abbdf55dc1dc3c8a9737bd841 100644 (file)
 #include <linux/console.h>
 #include <linux/device.h>
 #include <linux/wait.h>
+#include <linux/jiffies.h>
 
 #include <linux/parport.h>
 #undef LP_STATS
@@ -307,7 +308,7 @@ static ssize_t lp_write(struct file * file, const char __user * buf,
                        (LP_F(minor) & LP_ABORT));
 
 #ifdef LP_STATS
-       if (jiffies-lp_table[minor].lastcall > LP_TIME(minor))
+       if (time_after(jiffies, lp_table[minor].lastcall + LP_TIME(minor)))
                lp_table[minor].runchars = 0;
 
        lp_table[minor].lastcall = jiffies;
index d0ef1ae412981d774289575dba2ea8a866975e37..45d012d85e8c3d436d33c46e1b809f8dee9216d3 100644 (file)
@@ -1058,8 +1058,7 @@ static void mxser_close(struct tty_struct *tty, struct file *filp)
                 */
                timeout = jiffies + HZ;
                while (!(inb(info->base + UART_LSR) & UART_LSR_TEMT)) {
-                       set_current_state(TASK_INTERRUPTIBLE);
-                       schedule_timeout(5);
+                       schedule_timeout_interruptible(5);
                        if (time_after(jiffies, timeout))
                                break;
                }
@@ -1080,10 +1079,8 @@ static void mxser_close(struct tty_struct *tty, struct file *filp)
        info->event = 0;
        info->tty = NULL;
        if (info->blocked_open) {
-               if (info->close_delay) {
-                       set_current_state(TASK_INTERRUPTIBLE);
-                       schedule_timeout(info->close_delay);
-               }
+               if (info->close_delay)
+                       schedule_timeout_interruptible(info->close_delay);
                wake_up_interruptible(&info->open_wait);
        }
 
@@ -1801,8 +1798,7 @@ static void mxser_wait_until_sent(struct tty_struct *tty, int timeout)
 #ifdef SERIAL_DEBUG_RS_WAIT_UNTIL_SENT
                printk("lsr = %d (jiff=%lu)...", lsr, jiffies);
 #endif
-               set_current_state(TASK_INTERRUPTIBLE);
-               schedule_timeout(char_time);
+               schedule_timeout_interruptible(char_time);
                if (signal_pending(current))
                        break;
                if (timeout && time_after(jiffies, orig_jiffies + timeout))
index 09103b3d8f05a26beae37b61d6160ccfb7911fc2..c9bdf544ed2cd1126f6d67840c082ee6bb53d42e 100644 (file)
@@ -62,7 +62,7 @@
 
 static inline unsigned char *alloc_buf(void)
 {
-       int prio = in_interrupt() ? GFP_ATOMIC : GFP_KERNEL;
+       unsigned int prio = in_interrupt() ? GFP_ATOMIC : GFP_KERNEL;
 
        if (PAGE_SIZE != N_TTY_BUF_SIZE)
                return kmalloc(N_TTY_BUF_SIZE, prio);
index 7a0c7464812492166a14ebbd778c6e60cfb6dc0a..02d7f046c10aeae25e831b2f15f9f85e63a22d99 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * linux/drivers/char/pcmcia/synclink_cs.c
  *
- * $Id: synclink_cs.c,v 4.26 2004/08/11 19:30:02 paulkf Exp $
+ * $Id: synclink_cs.c,v 4.34 2005/09/08 13:20:54 paulkf Exp $
  *
  * Device driver for Microgate SyncLink PC Card
  * multiprotocol serial adapter.
@@ -472,7 +472,7 @@ module_param_array(dosyncppp, int, NULL, 0);
 MODULE_LICENSE("GPL");
 
 static char *driver_name = "SyncLink PC Card driver";
-static char *driver_version = "$Revision: 4.26 $";
+static char *driver_version = "$Revision: 4.34 $";
 
 static struct tty_driver *serial_driver;
 
@@ -1457,6 +1457,8 @@ static int startup(MGSLPC_INFO * info)
 
        info->pending_bh = 0;
        
+       memset(&info->icount, 0, sizeof(info->icount));
+
        init_timer(&info->tx_timer);
        info->tx_timer.data = (unsigned long)info;
        info->tx_timer.function = tx_timeout;
@@ -1946,9 +1948,13 @@ static int get_stats(MGSLPC_INFO * info, struct mgsl_icount __user *user_icount)
        int err;
        if (debug_level >= DEBUG_LEVEL_INFO)
                printk("get_params(%s)\n", info->device_name);
-       COPY_TO_USER(err,user_icount, &info->icount, sizeof(struct mgsl_icount));
-       if (err)
-               return -EFAULT;
+       if (!user_icount) {
+               memset(&info->icount, 0, sizeof(info->icount));
+       } else {
+               COPY_TO_USER(err, user_icount, &info->icount, sizeof(struct mgsl_icount));
+               if (err)
+                       return -EFAULT;
+       }
        return 0;
 }
 
index f174aee659e5e0d086314e6ba3ebcfb3c92249cc..9e9cf1407311f92eabc53cf702913f70537b6c17 100644 (file)
@@ -560,7 +560,7 @@ ide_startstop_t __ide_abort(ide_drive_t *drive, struct request *rq)
 EXPORT_SYMBOL_GPL(__ide_abort);
 
 /**
- *     ide_abort       -       abort pending IDE operatins
+ *     ide_abort       -       abort pending IDE operations
  *     @drive: drive the error occurred on
  *     @msg: message to report
  *
@@ -623,7 +623,7 @@ static void ide_cmd (ide_drive_t *drive, u8 cmd, u8 nsect,
  *     @drive: drive the completion interrupt occurred on
  *
  *     drive_cmd_intr() is invoked on completion of a special DRIVE_CMD.
- *     We do any necessary daya reading and then wait for the drive to
+ *     We do any necessary data reading and then wait for the drive to
  *     go non busy. At that point we may read the error data and complete
  *     the request
  */
@@ -773,7 +773,7 @@ EXPORT_SYMBOL_GPL(ide_init_sg_cmd);
 
 /**
  *     execute_drive_command   -       issue special drive command
- *     @drive: the drive to issue th command on
+ *     @drive: the drive to issue the command on
  *     @rq: the request structure holding the command
  *
  *     execute_drive_cmd() issues a special drive command,  usually 
index 5a3dc46008e64895fd5ccbe5e48dd3d32bd7a079..ee38e6b143a4c2f453386d21f19f646217bf1f74 100644 (file)
@@ -2903,8 +2903,7 @@ static int idetape_wait_ready(ide_drive_t *drive, unsigned long timeout)
                } else if (!(tape->sense_key == 2 && tape->asc == 4 &&
                             (tape->ascq == 1 || tape->ascq == 8)))
                        return -EIO;
-               current->state = TASK_INTERRUPTIBLE;
-               schedule_timeout(HZ / 10);
+               msleep(100);
        }
        return -EIO;
 }
index c1196ce15b4d465ba3eaa91be1d3210d99d69b6d..2fcfac6e967aa221fec1053568c4d6e48715e2e9 100644 (file)
@@ -27,6 +27,7 @@
  * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
  */
 
+#include <linux/kernel.h>
 #include <linux/hdreg.h>
 
 #define XFER_PIO_5             0x0d
@@ -96,11 +97,9 @@ static struct ide_timing ide_timing[] = {
 #define IDE_TIMING_UDMA                0x80
 #define IDE_TIMING_ALL         0xff
 
-#define MIN(a,b)       ((a)<(b)?(a):(b))
-#define MAX(a,b)       ((a)>(b)?(a):(b))
-#define FIT(v,min,max) MAX(MIN(v,max),min)
-#define ENOUGH(v,unit) (((v)-1)/(unit)+1)
-#define EZ(v,unit)     ((v)?ENOUGH(v,unit):0)
+#define FIT(v,vmin,vmax)       max_t(short,min_t(short,v,vmax),vmin)
+#define ENOUGH(v,unit)         (((v)-1)/(unit)+1)
+#define EZ(v,unit)             ((v)?ENOUGH(v,unit):0)
 
 #define XFER_MODE      0xf0
 #define XFER_UDMA_133  0x48
@@ -188,14 +187,14 @@ static void ide_timing_quantize(struct ide_timing *t, struct ide_timing *q, int
 
 static void ide_timing_merge(struct ide_timing *a, struct ide_timing *b, struct ide_timing *m, unsigned int what)
 {
-       if (what & IDE_TIMING_SETUP  ) m->setup   = MAX(a->setup,   b->setup);
-       if (what & IDE_TIMING_ACT8B  ) m->act8b   = MAX(a->act8b,   b->act8b);
-       if (what & IDE_TIMING_REC8B  ) m->rec8b   = MAX(a->rec8b,   b->rec8b);
-       if (what & IDE_TIMING_CYC8B  ) m->cyc8b   = MAX(a->cyc8b,   b->cyc8b);
-       if (what & IDE_TIMING_ACTIVE ) m->active  = MAX(a->active,  b->active);
-       if (what & IDE_TIMING_RECOVER) m->recover = MAX(a->recover, b->recover);
-       if (what & IDE_TIMING_CYCLE  ) m->cycle   = MAX(a->cycle,   b->cycle);
-       if (what & IDE_TIMING_UDMA   ) m->udma    = MAX(a->udma,    b->udma);
+       if (what & IDE_TIMING_SETUP  ) m->setup   = max(a->setup,   b->setup);
+       if (what & IDE_TIMING_ACT8B  ) m->act8b   = max(a->act8b,   b->act8b);
+       if (what & IDE_TIMING_REC8B  ) m->rec8b   = max(a->rec8b,   b->rec8b);
+       if (what & IDE_TIMING_CYC8B  ) m->cyc8b   = max(a->cyc8b,   b->cyc8b);
+       if (what & IDE_TIMING_ACTIVE ) m->active  = max(a->active,  b->active);
+       if (what & IDE_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
+       if (what & IDE_TIMING_CYCLE  ) m->cycle   = max(a->cycle,   b->cycle);
+       if (what & IDE_TIMING_UDMA   ) m->udma    = max(a->udma,    b->udma);
 }
 
 static struct ide_timing* ide_timing_find_mode(short speed)
index dc0841b2721c6ccf973c495a650710ff21f61527..0ccf85fcee3478669dfb5f2a1827d9c5494a7082 100644 (file)
@@ -43,6 +43,7 @@
 #include <linux/ide.h>
 #include <linux/hdreg.h>
 #include <linux/major.h>
+#include <linux/delay.h>
 #include <asm/io.h>
 #include <asm/system.h>
 
@@ -340,8 +341,7 @@ static void ide_config(dev_link_t *link)
                break;
            }
        }
-       __set_current_state(TASK_UNINTERRUPTIBLE);
-       schedule_timeout(HZ/10);
+       msleep(100);
     }
 
     if (hd < 0) {
index 60b696e9336b8442df9e24e571e832862179e257..3738d173f9a65a43e3d075294f00d6f07851ecab 100644 (file)
@@ -322,7 +322,7 @@ static long evdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
                        if (t < 0 || t >= dev->keycodemax || !dev->keycodesize) return -EINVAL;
                        if (get_user(v, ip + 1)) return -EFAULT;
                        if (v < 0 || v > KEY_MAX) return -EINVAL;
-                       if (v >> (dev->keycodesize * 8)) return -EINVAL;
+                       if (dev->keycodesize < sizeof(v) && (v >> (dev->keycodesize * 8))) return -EINVAL;
                        u = SET_INPUT_KEYCODE(dev, t, v);
                        clear_bit(u, dev->keybit);
                        set_bit(v, dev->keybit);
index 58728ebaaf80ccc33b31770af4e77a7d824a7299..e5a31e55d3e2474d7d0af423790f27c8b5b86a83 100644 (file)
@@ -249,9 +249,6 @@ void iforce_process_packet(struct iforce *iforce, u16 cmd, unsigned char *data,
 
 int iforce_get_id_packet(struct iforce *iforce, char *packet)
 {
-       DECLARE_WAITQUEUE(wait, current);
-       int timeout = HZ; /* 1 second */
-
        switch (iforce->bus) {
 
        case IFORCE_USB:
@@ -260,22 +257,13 @@ int iforce_get_id_packet(struct iforce *iforce, char *packet)
                iforce->cr.bRequest = packet[0];
                iforce->ctrl->dev = iforce->usbdev;
 
-               set_current_state(TASK_INTERRUPTIBLE);
-               add_wait_queue(&iforce->wait, &wait);
-
-               if (usb_submit_urb(iforce->ctrl, GFP_ATOMIC)) {
-                       set_current_state(TASK_RUNNING);
-                       remove_wait_queue(&iforce->wait, &wait);
+               if (usb_submit_urb(iforce->ctrl, GFP_ATOMIC))
                        return -1;
-               }
 
-               while (timeout && iforce->ctrl->status == -EINPROGRESS)
-                       timeout = schedule_timeout(timeout);
+               wait_event_interruptible_timeout(iforce->wait,
+                       iforce->ctrl->status != -EINPROGRESS, HZ);
 
-               set_current_state(TASK_RUNNING);
-               remove_wait_queue(&iforce->wait, &wait);
-
-               if (!timeout) {
+               if (iforce->ctrl->status != -EINPROGRESS) {
                        usb_unlink_urb(iforce->ctrl);
                        return -1;
                }
@@ -290,16 +278,10 @@ int iforce_get_id_packet(struct iforce *iforce, char *packet)
                iforce->expect_packet = FF_CMD_QUERY;
                iforce_send_packet(iforce, FF_CMD_QUERY, packet);
 
-               set_current_state(TASK_INTERRUPTIBLE);
-               add_wait_queue(&iforce->wait, &wait);
-
-               while (timeout && iforce->expect_packet)
-                       timeout = schedule_timeout(timeout);
-
-               set_current_state(TASK_RUNNING);
-               remove_wait_queue(&iforce->wait, &wait);
+               wait_event_interruptible_timeout(iforce->wait,
+                       !iforce->expect_packet, HZ);
 
-               if (!timeout) {
+               if (iforce->expect_packet) {
                        iforce->expect_packet = 0;
                        return -1;
                }
index 6369a24684fe35eb1d25ed98a384c902197ad76b..58600f91eff5212b6464d3843e367b482fd09c75 100644 (file)
@@ -95,6 +95,7 @@ static void iforce_usb_irq(struct urb *urb, struct pt_regs *regs)
                goto exit;
        }
 
+       wake_up(&iforce->wait);
        iforce_process_packet(iforce,
                (iforce->data[0] << 8) | (urb->actual_length - 1), iforce->data + 1, regs);
 
index 4d4985b59abf182887349fedd73e758d9986102d..1ad8c2ee7dbf337b4417e93cfcf158a42895700a 100644 (file)
@@ -208,6 +208,7 @@ struct atkbd {
        unsigned char resend;
        unsigned char release;
        unsigned char bat_xl;
+       unsigned char err_xl;
        unsigned int last;
        unsigned long time;
 };
@@ -296,15 +297,18 @@ static irqreturn_t atkbd_interrupt(struct serio *serio, unsigned char data,
                if (atkbd->emul ||
                    !(code == ATKBD_RET_EMUL0 || code == ATKBD_RET_EMUL1 ||
                      code == ATKBD_RET_HANGUEL || code == ATKBD_RET_HANJA ||
-                     code == ATKBD_RET_ERR ||
+                    (code == ATKBD_RET_ERR && !atkbd->err_xl) ||
                     (code == ATKBD_RET_BAT && !atkbd->bat_xl))) {
                        atkbd->release = code >> 7;
                        code &= 0x7f;
                }
 
-               if (!atkbd->emul &&
-                    (code & 0x7f) == (ATKBD_RET_BAT & 0x7f))
+               if (!atkbd->emul) {
+                    if ((code & 0x7f) == (ATKBD_RET_BAT & 0x7f))
                        atkbd->bat_xl = !atkbd->release;
+                    if ((code & 0x7f) == (ATKBD_RET_ERR & 0x7f))
+                       atkbd->err_xl = !atkbd->release;
+               }
        }
 
        switch (code) {
index 596964ceb96dc39b0351ff386fa539351aeb6a10..4bae5d89348d7d798169b73be559e3f0215a6220 100644 (file)
@@ -44,7 +44,7 @@ MODULE_DESCRIPTION(DRIVER_DESC);
 MODULE_LICENSE("GPL");
 
 static unsigned char sunkbd_keycode[128] = {
-         0,128,114,129,115, 59, 60, 68, 61, 87, 62, 88, 63,100, 64,  0,
+         0,128,114,129,115, 59, 60, 68, 61, 87, 62, 88, 63,100, 64,112,
         65, 66, 67, 56,103,119, 99, 70,105,130,131,108,106,  1,  2,  3,
          4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 41, 14,110,113, 98, 55,
        116,132, 83,133,102, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
index c4909b49337d23e708e4e4850c1e69ac1f1488b7..82b330bbf068d27a94764fd1af5bc4975ed1d4fb 100644 (file)
@@ -15,4 +15,4 @@ obj-$(CONFIG_MOUSE_SERIAL)    += sermouse.o
 obj-$(CONFIG_MOUSE_HIL)                += hil_ptr.o
 obj-$(CONFIG_MOUSE_VSXXXAA)    += vsxxxaa.o
 
-psmouse-objs  := psmouse-base.o alps.o logips2pp.o synaptics.o lifebook.o
+psmouse-objs  := psmouse-base.o alps.o logips2pp.o synaptics.o lifebook.o trackpoint.o
index 0d68e5e0182aea89825963b8fc87b291ae320fd9..b20783f9748adbdad1be653abfd83dbec4448b1e 100644 (file)
@@ -170,7 +170,7 @@ static void alps_process_packet(struct psmouse *psmouse, struct pt_regs *regs)
        input_report_key(dev, BTN_TOOL_FINGER, z > 0);
 
        if (priv->i->flags & ALPS_WHEEL)
-               input_report_rel(dev, REL_WHEEL, ((packet[0] >> 4) & 0x07) | ((packet[2] >> 2) & 0x08));
+               input_report_rel(dev, REL_WHEEL, ((packet[2] << 1) & 0x08) - ((packet[0] >> 4) & 0x07));
 
        if (priv->i->flags & (ALPS_FW_BK_1 | ALPS_FW_BK_2)) {
                input_report_key(dev, BTN_FORWARD, forward);
index 48d2b20d264299b30cd6cac8441ecab38253c1d4..7df96525222e5dc6a2fc2ba6aea9a332023b4558 100644 (file)
@@ -150,12 +150,12 @@ static void ps2pp_set_smartscroll(struct psmouse *psmouse, unsigned int smartscr
        ps2_command(ps2dev, param, PSMOUSE_CMD_SETRES);
 }
 
-static ssize_t psmouse_attr_show_smartscroll(struct psmouse *psmouse, char *buf)
+static ssize_t ps2pp_attr_show_smartscroll(struct psmouse *psmouse, void *data, char *buf)
 {
        return sprintf(buf, "%d\n", psmouse->smartscroll ? 1 : 0);
 }
 
-static ssize_t psmouse_attr_set_smartscroll(struct psmouse *psmouse, const char *buf, size_t count)
+static ssize_t ps2pp_attr_set_smartscroll(struct psmouse *psmouse, void *data, const char *buf, size_t count)
 {
        unsigned long value;
        char *rest;
@@ -169,7 +169,8 @@ static ssize_t psmouse_attr_set_smartscroll(struct psmouse *psmouse, const char
        return count;
 }
 
-PSMOUSE_DEFINE_ATTR(smartscroll);
+PSMOUSE_DEFINE_ATTR(smartscroll, S_IWUSR | S_IRUGO, NULL,
+                       ps2pp_attr_show_smartscroll, ps2pp_attr_set_smartscroll);
 
 /*
  * Support 800 dpi resolution _only_ if the user wants it (there are good
@@ -194,7 +195,7 @@ static void ps2pp_set_resolution(struct psmouse *psmouse, unsigned int resolutio
 
 static void ps2pp_disconnect(struct psmouse *psmouse)
 {
-       device_remove_file(&psmouse->ps2dev.serio->dev, &psmouse_attr_smartscroll);
+       device_remove_file(&psmouse->ps2dev.serio->dev, &psmouse_attr_smartscroll.dattr);
 }
 
 static struct ps2pp_info *get_model_info(unsigned char model)
@@ -222,6 +223,7 @@ static struct ps2pp_info *get_model_info(unsigned char model)
                { 80,   PS2PP_KIND_WHEEL,       PS2PP_SIDE_BTN | PS2PP_WHEEL },
                { 81,   PS2PP_KIND_WHEEL,       PS2PP_WHEEL },
                { 83,   PS2PP_KIND_WHEEL,       PS2PP_WHEEL },
+               { 86,   PS2PP_KIND_WHEEL,       PS2PP_WHEEL },
                { 88,   PS2PP_KIND_WHEEL,       PS2PP_WHEEL },
                { 96,   0,                      0 },
                { 97,   PS2PP_KIND_TP3,         PS2PP_WHEEL | PS2PP_HWHEEL },
@@ -379,7 +381,8 @@ int ps2pp_init(struct psmouse *psmouse, int set_properties)
                                psmouse->set_resolution = ps2pp_set_resolution;
                                psmouse->disconnect = ps2pp_disconnect;
 
-                               device_create_file(&psmouse->ps2dev.serio->dev, &psmouse_attr_smartscroll);
+                               device_create_file(&psmouse->ps2dev.serio->dev,
+                                                  &psmouse_attr_smartscroll.dattr);
                        }
                }
 
index 12bdd3eff923ea32042d76f47110d217c43243f7..af24313ff5bb14973c5fdaa9bcbfcd5dc0f66dae 100644 (file)
@@ -25,6 +25,7 @@
 #include "logips2pp.h"
 #include "alps.h"
 #include "lifebook.h"
+#include "trackpoint.h"
 
 #define DRIVER_DESC    "PS/2 mouse driver"
 
@@ -57,10 +58,30 @@ static unsigned int psmouse_resetafter;
 module_param_named(resetafter, psmouse_resetafter, uint, 0644);
 MODULE_PARM_DESC(resetafter, "Reset device after so many bad packets (0 = never).");
 
-PSMOUSE_DEFINE_ATTR(protocol);
-PSMOUSE_DEFINE_ATTR(rate);
-PSMOUSE_DEFINE_ATTR(resolution);
-PSMOUSE_DEFINE_ATTR(resetafter);
+PSMOUSE_DEFINE_ATTR(protocol, S_IWUSR | S_IRUGO,
+                       NULL,
+                       psmouse_attr_show_protocol, psmouse_attr_set_protocol);
+PSMOUSE_DEFINE_ATTR(rate, S_IWUSR | S_IRUGO,
+                       (void *) offsetof(struct psmouse, rate),
+                       psmouse_show_int_attr, psmouse_attr_set_rate);
+PSMOUSE_DEFINE_ATTR(resolution, S_IWUSR | S_IRUGO,
+                       (void *) offsetof(struct psmouse, resolution),
+                       psmouse_show_int_attr, psmouse_attr_set_resolution);
+PSMOUSE_DEFINE_ATTR(resetafter, S_IWUSR | S_IRUGO,
+                       (void *) offsetof(struct psmouse, resetafter),
+                       psmouse_show_int_attr, psmouse_set_int_attr);
+
+static struct attribute *psmouse_attributes[] = {
+       &psmouse_attr_protocol.dattr.attr,
+       &psmouse_attr_rate.dattr.attr,
+       &psmouse_attr_resolution.dattr.attr,
+       &psmouse_attr_resetafter.dattr.attr,
+       NULL
+};
+
+static struct attribute_group psmouse_attribute_group = {
+       .attrs  = psmouse_attributes,
+};
 
 __obsolete_setup("psmouse_noext");
 __obsolete_setup("psmouse_resolution=");
@@ -519,6 +540,12 @@ static int psmouse_extensions(struct psmouse *psmouse,
        if (max_proto >= PSMOUSE_IMPS && intellimouse_detect(psmouse, set_properties) == 0)
                return PSMOUSE_IMPS;
 
+/*
+ * Try to initialize the IBM TrackPoint
+ */
+       if (max_proto > PSMOUSE_IMEX && trackpoint_detect(psmouse, set_properties) == 0)
+               return PSMOUSE_TRACKPOINT;
+
 /*
  * Okay, all failed, we have a standard mouse here. The number of the buttons
  * is still a question, though. We assume 3.
@@ -599,6 +626,12 @@ static struct psmouse_protocol psmouse_protocols[] = {
                .alias          = "lifebook",
                .init           = lifebook_init,
        },
+       {
+               .type           = PSMOUSE_TRACKPOINT,
+               .name           = "TPPS/2",
+               .alias          = "trackpoint",
+               .detect         = trackpoint_detect,
+       },
        {
                .type           = PSMOUSE_AUTO,
                .name           = "auto",
@@ -787,10 +820,7 @@ static void psmouse_disconnect(struct serio *serio)
 
        psmouse = serio_get_drvdata(serio);
 
-       device_remove_file(&serio->dev, &psmouse_attr_protocol);
-       device_remove_file(&serio->dev, &psmouse_attr_rate);
-       device_remove_file(&serio->dev, &psmouse_attr_resolution);
-       device_remove_file(&serio->dev, &psmouse_attr_resetafter);
+       sysfs_remove_group(&serio->dev.kobj, &psmouse_attribute_group);
 
        down(&psmouse_sem);
 
@@ -927,10 +957,7 @@ static int psmouse_connect(struct serio *serio, struct serio_driver *drv)
        if (parent && parent->pt_activate)
                parent->pt_activate(parent);
 
-       device_create_file(&serio->dev, &psmouse_attr_protocol);
-       device_create_file(&serio->dev, &psmouse_attr_rate);
-       device_create_file(&serio->dev, &psmouse_attr_resolution);
-       device_create_file(&serio->dev, &psmouse_attr_resetafter);
+       sysfs_create_group(&serio->dev.kobj, &psmouse_attribute_group);
 
        psmouse_activate(psmouse);
 
@@ -1027,10 +1054,12 @@ static struct serio_driver psmouse_drv = {
        .cleanup        = psmouse_cleanup,
 };
 
-ssize_t psmouse_attr_show_helper(struct device *dev, char *buf,
-                                ssize_t (*handler)(struct psmouse *, char *))
+ssize_t psmouse_attr_show_helper(struct device *dev, struct device_attribute *devattr,
+                                char *buf)
 {
        struct serio *serio = to_serio_port(dev);
+       struct psmouse_attribute *attr = to_psmouse_attr(devattr);
+       struct psmouse *psmouse;
        int retval;
 
        retval = serio_pin_driver(serio);
@@ -1042,19 +1071,21 @@ ssize_t psmouse_attr_show_helper(struct device *dev, char *buf,
                goto out;
        }
 
-       retval = handler(serio_get_drvdata(serio), buf);
+       psmouse = serio_get_drvdata(serio);
+
+       retval = attr->show(psmouse, attr->data, buf);
 
 out:
        serio_unpin_driver(serio);
        return retval;
 }
 
-ssize_t psmouse_attr_set_helper(struct device *dev, const char *buf, size_t count,
-                               ssize_t (*handler)(struct psmouse *, const char *, size_t))
+ssize_t psmouse_attr_set_helper(struct device *dev, struct device_attribute *devattr,
+                               const char *buf, size_t count)
 {
        struct serio *serio = to_serio_port(dev);
-       struct psmouse *psmouse = serio_get_drvdata(serio);
-       struct psmouse *parent = NULL;
+       struct psmouse_attribute *attr = to_psmouse_attr(devattr);
+       struct psmouse *psmouse, *parent = NULL;
        int retval;
 
        retval = serio_pin_driver(serio);
@@ -1070,6 +1101,8 @@ ssize_t psmouse_attr_set_helper(struct device *dev, const char *buf, size_t coun
        if (retval)
                goto out_unpin;
 
+       psmouse = serio_get_drvdata(serio);
+
        if (psmouse->state == PSMOUSE_IGNORE) {
                retval = -ENODEV;
                goto out_up;
@@ -1082,7 +1115,7 @@ ssize_t psmouse_attr_set_helper(struct device *dev, const char *buf, size_t coun
 
        psmouse_deactivate(psmouse);
 
-       retval = handler(psmouse, buf, count);
+       retval = attr->set(psmouse, attr->data, buf, count);
 
        if (retval != -ENODEV)
                psmouse_activate(psmouse);
@@ -1097,12 +1130,34 @@ ssize_t psmouse_attr_set_helper(struct device *dev, const char *buf, size_t coun
        return retval;
 }
 
-static ssize_t psmouse_attr_show_protocol(struct psmouse *psmouse, char *buf)
+static ssize_t psmouse_show_int_attr(struct psmouse *psmouse, void *offset, char *buf)
+{
+       unsigned long *field = (unsigned long *)((char *)psmouse + (size_t)offset);
+
+       return sprintf(buf, "%lu\n", *field);
+}
+
+static ssize_t psmouse_set_int_attr(struct psmouse *psmouse, void *offset, const char *buf, size_t count)
+{
+       unsigned long *field = (unsigned long *)((char *)psmouse + (size_t)offset);
+       unsigned long value;
+       char *rest;
+
+       value = simple_strtoul(buf, &rest, 10);
+       if (*rest)
+               return -EINVAL;
+
+       *field = value;
+
+       return count;
+}
+
+static ssize_t psmouse_attr_show_protocol(struct psmouse *psmouse, void *data, char *buf)
 {
        return sprintf(buf, "%s\n", psmouse_protocol_by_type(psmouse->type)->name);
 }
 
-static ssize_t psmouse_attr_set_protocol(struct psmouse *psmouse, const char *buf, size_t count)
+static ssize_t psmouse_attr_set_protocol(struct psmouse *psmouse, void *data, const char *buf, size_t count)
 {
        struct serio *serio = psmouse->ps2dev.serio;
        struct psmouse *parent = NULL;
@@ -1166,12 +1221,7 @@ static ssize_t psmouse_attr_set_protocol(struct psmouse *psmouse, const char *bu
        return count;
 }
 
-static ssize_t psmouse_attr_show_rate(struct psmouse *psmouse, char *buf)
-{
-       return sprintf(buf, "%d\n", psmouse->rate);
-}
-
-static ssize_t psmouse_attr_set_rate(struct psmouse *psmouse, const char *buf, size_t count)
+static ssize_t psmouse_attr_set_rate(struct psmouse *psmouse, void *data, const char *buf, size_t count)
 {
        unsigned long value;
        char *rest;
@@ -1184,12 +1234,7 @@ static ssize_t psmouse_attr_set_rate(struct psmouse *psmouse, const char *buf, s
        return count;
 }
 
-static ssize_t psmouse_attr_show_resolution(struct psmouse *psmouse, char *buf)
-{
-       return sprintf(buf, "%d\n", psmouse->resolution);
-}
-
-static ssize_t psmouse_attr_set_resolution(struct psmouse *psmouse, const char *buf, size_t count)
+static ssize_t psmouse_attr_set_resolution(struct psmouse *psmouse, void *data, const char *buf, size_t count)
 {
        unsigned long value;
        char *rest;
@@ -1202,23 +1247,6 @@ static ssize_t psmouse_attr_set_resolution(struct psmouse *psmouse, const char *
        return count;
 }
 
-static ssize_t psmouse_attr_show_resetafter(struct psmouse *psmouse, char *buf)
-{
-       return sprintf(buf, "%d\n", psmouse->resetafter);
-}
-
-static ssize_t psmouse_attr_set_resetafter(struct psmouse *psmouse, const char *buf, size_t count)
-{
-       unsigned long value;
-       char *rest;
-
-       value = simple_strtoul(buf, &rest, 10);
-       if (*rest)
-               return -EINVAL;
-
-       psmouse->resetafter = value;
-       return count;
-}
 
 static int psmouse_set_maxproto(const char *val, struct kernel_param *kp)
 {
@@ -1234,7 +1262,7 @@ static int psmouse_set_maxproto(const char *val, struct kernel_param *kp)
 
        *((unsigned int *)kp->arg) = proto->type;
 
-       return 0;                                       \
+       return 0;
 }
 
 static int psmouse_get_maxproto(char *buffer, struct kernel_param *kp)
index 86691cf43433f182e9c2d7ac0d3f925dab5bb5bb..45d2bd774f00f02cf5644306760b8c29425d364d 100644 (file)
@@ -78,6 +78,7 @@ enum psmouse_type {
        PSMOUSE_SYNAPTICS,
        PSMOUSE_ALPS,
        PSMOUSE_LIFEBOOK,
+       PSMOUSE_TRACKPOINT,
        PSMOUSE_AUTO            /* This one should always be last */
 };
 
@@ -85,24 +86,37 @@ int psmouse_sliced_command(struct psmouse *psmouse, unsigned char command);
 int psmouse_reset(struct psmouse *psmouse);
 void psmouse_set_resolution(struct psmouse *psmouse, unsigned int resolution);
 
-ssize_t psmouse_attr_show_helper(struct device *dev, char *buf,
-                       ssize_t (*handler)(struct psmouse *, char *));
-ssize_t psmouse_attr_set_helper(struct device *dev, const char *buf, size_t count,
-                       ssize_t (*handler)(struct psmouse *, const char *, size_t));
-
-#define PSMOUSE_DEFINE_ATTR(_name)                                             \
-static ssize_t psmouse_attr_show_##_name(struct psmouse *, char *);            \
-static ssize_t psmouse_attr_set_##_name(struct psmouse *, const char *, size_t);\
-static ssize_t psmouse_do_show_##_name(struct device *d, struct device_attribute *attr, char *b)               \
-{                                                                              \
-       return psmouse_attr_show_helper(d, b, psmouse_attr_show_##_name);       \
-}                                                                              \
-static ssize_t psmouse_do_set_##_name(struct device *d, struct device_attribute *attr, const char *b, size_t s)\
-{                                                                              \
-       return psmouse_attr_set_helper(d, b, s, psmouse_attr_set_##_name);      \
-}                                                                              \
-static struct device_attribute psmouse_attr_##_name =                          \
-       __ATTR(_name, S_IWUSR | S_IRUGO,                                        \
-               psmouse_do_show_##_name, psmouse_do_set_##_name);
+
+struct psmouse_attribute {
+       struct device_attribute dattr;
+       void *data;
+       ssize_t (*show)(struct psmouse *psmouse, void *data, char *buf);
+       ssize_t (*set)(struct psmouse *psmouse, void *data,
+                       const char *buf, size_t count);
+};
+#define to_psmouse_attr(a)     container_of((a), struct psmouse_attribute, dattr)
+
+ssize_t psmouse_attr_show_helper(struct device *dev, struct device_attribute *attr,
+                                char *buf);
+ssize_t psmouse_attr_set_helper(struct device *dev, struct device_attribute *attr,
+                               const char *buf, size_t count);
+
+#define PSMOUSE_DEFINE_ATTR(_name, _mode, _data, _show, _set)                  \
+static ssize_t _show(struct psmouse *, void *data, char *);                    \
+static ssize_t _set(struct psmouse *, void *data, const char *, size_t);       \
+static struct psmouse_attribute psmouse_attr_##_name = {                       \
+       .dattr  = {                                                             \
+               .attr   = {                                                     \
+                       .name   = __stringify(_name),                           \
+                       .mode   = _mode,                                        \
+                       .owner  = THIS_MODULE,                                  \
+               },                                                              \
+               .show   = psmouse_attr_show_helper,                             \
+               .store  = psmouse_attr_set_helper,                              \
+       },                                                                      \
+       .data   = _data,                                                        \
+       .show   = _show,                                                        \
+       .set    = _set,                                                         \
+}
 
 #endif /* _PSMOUSE_H */
diff --git a/drivers/input/mouse/trackpoint.c b/drivers/input/mouse/trackpoint.c
new file mode 100644 (file)
index 0000000..b4898d8
--- /dev/null
@@ -0,0 +1,304 @@
+/*
+ * Stephen Evanchik <evanchsa@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * Trademarks are the property of their respective owners.
+ */
+
+#include <linux/delay.h>
+#include <linux/serio.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/input.h>
+#include <linux/libps2.h>
+#include <linux/proc_fs.h>
+#include <asm/uaccess.h>
+#include "psmouse.h"
+#include "trackpoint.h"
+
+/*
+ * Device IO: read, write and toggle bit
+ */
+static int trackpoint_read(struct ps2dev *ps2dev, unsigned char loc, unsigned char *results)
+{
+       if (ps2_command(ps2dev, NULL, MAKE_PS2_CMD(0, 0, TP_COMMAND)) ||
+           ps2_command(ps2dev, results, MAKE_PS2_CMD(0, 1, loc))) {
+               return -1;
+       }
+
+       return 0;
+}
+
+static int trackpoint_write(struct ps2dev *ps2dev, unsigned char loc, unsigned char val)
+{
+       if (ps2_command(ps2dev, NULL, MAKE_PS2_CMD(0, 0, TP_COMMAND)) ||
+           ps2_command(ps2dev, NULL, MAKE_PS2_CMD(0, 0, TP_WRITE_MEM)) ||
+           ps2_command(ps2dev, NULL, MAKE_PS2_CMD(0, 0, loc)) ||
+           ps2_command(ps2dev, NULL, MAKE_PS2_CMD(0, 0, val))) {
+               return -1;
+       }
+
+       return 0;
+}
+
+static int trackpoint_toggle_bit(struct ps2dev *ps2dev, unsigned char loc, unsigned char mask)
+{
+       /* Bad things will happen if the loc param isn't in this range */
+       if (loc < 0x20 || loc >= 0x2F)
+               return -1;
+
+       if (ps2_command(ps2dev, NULL, MAKE_PS2_CMD(0, 0, TP_COMMAND)) ||
+           ps2_command(ps2dev, NULL, MAKE_PS2_CMD(0, 0, TP_TOGGLE)) ||
+           ps2_command(ps2dev, NULL, MAKE_PS2_CMD(0, 0, loc)) ||
+           ps2_command(ps2dev, NULL, MAKE_PS2_CMD(0, 0, mask))) {
+               return -1;
+       }
+
+       return 0;
+}
+
+
+/*
+ * Trackpoint-specific attributes
+ */
+struct trackpoint_attr_data {
+       size_t field_offset;
+       unsigned char command;
+       unsigned char mask;
+};
+
+static ssize_t trackpoint_show_int_attr(struct psmouse *psmouse, void *data, char *buf)
+{
+       struct trackpoint_data *tp = psmouse->private;
+       struct trackpoint_attr_data *attr = data;
+       unsigned char *field = (unsigned char *)((char *)tp + attr->field_offset);
+
+       return sprintf(buf, "%u\n", *field);
+}
+
+static ssize_t trackpoint_set_int_attr(struct psmouse *psmouse, void *data,
+                                       const char *buf, size_t count)
+{
+       struct trackpoint_data *tp = psmouse->private;
+       struct trackpoint_attr_data *attr = data;
+       unsigned char *field = (unsigned char *)((char *)tp + attr->field_offset);
+       unsigned long value;
+       char *rest;
+
+       value = simple_strtoul(buf, &rest, 10);
+       if (*rest || value > 255)
+               return -EINVAL;
+
+       *field = value;
+       trackpoint_write(&psmouse->ps2dev, attr->command, value);
+
+       return count;
+}
+
+#define TRACKPOINT_INT_ATTR(_name, _command)                                   \
+       static struct trackpoint_attr_data trackpoint_attr_##_name = {          \
+               .field_offset = offsetof(struct trackpoint_data, _name),        \
+               .command = _command,                                            \
+       };                                                                      \
+       PSMOUSE_DEFINE_ATTR(_name, S_IWUSR | S_IRUGO,                           \
+                           &trackpoint_attr_##_name,                           \
+                           trackpoint_show_int_attr, trackpoint_set_int_attr)
+
+static ssize_t trackpoint_set_bit_attr(struct psmouse *psmouse, void *data,
+                                       const char *buf, size_t count)
+{
+       struct trackpoint_data *tp = psmouse->private;
+       struct trackpoint_attr_data *attr = data;
+       unsigned char *field = (unsigned char *)((char *)tp + attr->field_offset);
+       unsigned long value;
+       char *rest;
+
+       value = simple_strtoul(buf, &rest, 10);
+       if (*rest || value > 1)
+               return -EINVAL;
+
+       if (*field != value) {
+               *field = value;
+               trackpoint_toggle_bit(&psmouse->ps2dev, attr->command, attr->mask);
+       }
+
+       return count;
+}
+
+
+#define TRACKPOINT_BIT_ATTR(_name, _command, _mask)                            \
+       static struct trackpoint_attr_data trackpoint_attr_##_name = {          \
+               .field_offset   = offsetof(struct trackpoint_data, _name),      \
+               .command        = _command,                                     \
+               .mask           = _mask,                                        \
+       };                                                                      \
+       PSMOUSE_DEFINE_ATTR(_name, S_IWUSR | S_IRUGO,                           \
+                           &trackpoint_attr_##_name,                           \
+                           trackpoint_show_int_attr, trackpoint_set_bit_attr)
+
+TRACKPOINT_INT_ATTR(sensitivity, TP_SENS);
+TRACKPOINT_INT_ATTR(speed, TP_SPEED);
+TRACKPOINT_INT_ATTR(inertia, TP_INERTIA);
+TRACKPOINT_INT_ATTR(reach, TP_REACH);
+TRACKPOINT_INT_ATTR(draghys, TP_DRAGHYS);
+TRACKPOINT_INT_ATTR(mindrag, TP_MINDRAG);
+TRACKPOINT_INT_ATTR(thresh, TP_THRESH);
+TRACKPOINT_INT_ATTR(upthresh, TP_UP_THRESH);
+TRACKPOINT_INT_ATTR(ztime, TP_Z_TIME);
+TRACKPOINT_INT_ATTR(jenks, TP_JENKS_CURV);
+
+TRACKPOINT_BIT_ATTR(press_to_select, TP_TOGGLE_PTSON, TP_MASK_PTSON);
+TRACKPOINT_BIT_ATTR(skipback, TP_TOGGLE_SKIPBACK, TP_MASK_SKIPBACK);
+TRACKPOINT_BIT_ATTR(ext_dev, TP_TOGGLE_EXT_DEV, TP_MASK_EXT_DEV);
+
+static struct attribute *trackpoint_attrs[] = {
+       &psmouse_attr_sensitivity.dattr.attr,
+       &psmouse_attr_speed.dattr.attr,
+       &psmouse_attr_inertia.dattr.attr,
+       &psmouse_attr_reach.dattr.attr,
+       &psmouse_attr_draghys.dattr.attr,
+       &psmouse_attr_mindrag.dattr.attr,
+       &psmouse_attr_thresh.dattr.attr,
+       &psmouse_attr_upthresh.dattr.attr,
+       &psmouse_attr_ztime.dattr.attr,
+       &psmouse_attr_jenks.dattr.attr,
+       &psmouse_attr_press_to_select.dattr.attr,
+       &psmouse_attr_skipback.dattr.attr,
+       &psmouse_attr_ext_dev.dattr.attr,
+       NULL
+};
+
+static struct attribute_group trackpoint_attr_group = {
+       .attrs = trackpoint_attrs,
+};
+
+static void trackpoint_disconnect(struct psmouse *psmouse)
+{
+       sysfs_remove_group(&psmouse->ps2dev.serio->dev.kobj, &trackpoint_attr_group);
+
+       kfree(psmouse->private);
+       psmouse->private = NULL;
+}
+
+static int trackpoint_sync(struct psmouse *psmouse)
+{
+       unsigned char toggle;
+       struct trackpoint_data *tp = psmouse->private;
+
+       if (!tp)
+               return -1;
+
+       /* Disable features that may make device unusable with this driver */
+       trackpoint_read(&psmouse->ps2dev, TP_TOGGLE_TWOHAND, &toggle);
+       if (toggle & TP_MASK_TWOHAND)
+               trackpoint_toggle_bit(&psmouse->ps2dev, TP_TOGGLE_TWOHAND, TP_MASK_TWOHAND);
+
+       trackpoint_read(&psmouse->ps2dev, TP_TOGGLE_SOURCE_TAG, &toggle);
+       if (toggle & TP_MASK_SOURCE_TAG)
+               trackpoint_toggle_bit(&psmouse->ps2dev, TP_TOGGLE_SOURCE_TAG, TP_MASK_SOURCE_TAG);
+
+       trackpoint_read(&psmouse->ps2dev, TP_TOGGLE_MB, &toggle);
+       if (toggle & TP_MASK_MB)
+               trackpoint_toggle_bit(&psmouse->ps2dev, TP_TOGGLE_MB, TP_MASK_MB);
+
+       /* Push the config to the device */
+       trackpoint_write(&psmouse->ps2dev, TP_SENS, tp->sensitivity);
+       trackpoint_write(&psmouse->ps2dev, TP_INERTIA, tp->inertia);
+       trackpoint_write(&psmouse->ps2dev, TP_SPEED, tp->speed);
+
+       trackpoint_write(&psmouse->ps2dev, TP_REACH, tp->reach);
+       trackpoint_write(&psmouse->ps2dev, TP_DRAGHYS, tp->draghys);
+       trackpoint_write(&psmouse->ps2dev, TP_MINDRAG, tp->mindrag);
+
+       trackpoint_write(&psmouse->ps2dev, TP_THRESH, tp->thresh);
+       trackpoint_write(&psmouse->ps2dev, TP_UP_THRESH, tp->upthresh);
+
+       trackpoint_write(&psmouse->ps2dev, TP_Z_TIME, tp->ztime);
+       trackpoint_write(&psmouse->ps2dev, TP_JENKS_CURV, tp->jenks);
+
+       trackpoint_read(&psmouse->ps2dev, TP_TOGGLE_PTSON, &toggle);
+       if (((toggle & TP_MASK_PTSON) == TP_MASK_PTSON) != tp->press_to_select)
+                trackpoint_toggle_bit(&psmouse->ps2dev, TP_TOGGLE_PTSON, TP_MASK_PTSON);
+
+       trackpoint_read(&psmouse->ps2dev, TP_TOGGLE_SKIPBACK, &toggle);
+       if (((toggle & TP_MASK_SKIPBACK) == TP_MASK_SKIPBACK) != tp->skipback)
+               trackpoint_toggle_bit(&psmouse->ps2dev, TP_TOGGLE_SKIPBACK, TP_MASK_SKIPBACK);
+
+       trackpoint_read(&psmouse->ps2dev, TP_TOGGLE_EXT_DEV, &toggle);
+       if (((toggle & TP_MASK_EXT_DEV) == TP_MASK_EXT_DEV) != tp->ext_dev)
+               trackpoint_toggle_bit(&psmouse->ps2dev, TP_TOGGLE_EXT_DEV, TP_MASK_EXT_DEV);
+
+       return 0;
+}
+
+static void trackpoint_defaults(struct trackpoint_data *tp)
+{
+       tp->press_to_select = TP_DEF_PTSON;
+       tp->sensitivity = TP_DEF_SENS;
+       tp->speed = TP_DEF_SPEED;
+       tp->reach = TP_DEF_REACH;
+
+       tp->draghys = TP_DEF_DRAGHYS;
+       tp->mindrag = TP_DEF_MINDRAG;
+
+       tp->thresh = TP_DEF_THRESH;
+       tp->upthresh = TP_DEF_UP_THRESH;
+
+       tp->ztime = TP_DEF_Z_TIME;
+       tp->jenks = TP_DEF_JENKS_CURV;
+
+       tp->inertia = TP_DEF_INERTIA;
+       tp->skipback = TP_DEF_SKIPBACK;
+       tp->ext_dev = TP_DEF_EXT_DEV;
+}
+
+int trackpoint_detect(struct psmouse *psmouse, int set_properties)
+{
+       struct trackpoint_data *priv;
+       struct ps2dev *ps2dev = &psmouse->ps2dev;
+       unsigned char firmware_id;
+       unsigned char button_info;
+       unsigned char param[2];
+
+       param[0] = param[1] = 0;
+
+       if (ps2_command(ps2dev, param, MAKE_PS2_CMD(0, 2, TP_READ_ID)))
+               return -1;
+
+       if (param[0] != TP_MAGIC_IDENT)
+               return -1;
+
+       if (!set_properties)
+               return 0;
+
+       firmware_id = param[1];
+
+       if (trackpoint_read(&psmouse->ps2dev, TP_EXT_BTN, &button_info)) {
+               printk(KERN_WARNING "trackpoint.c: failed to get extended button data\n");
+               button_info = 0;
+       }
+
+       psmouse->private = priv = kcalloc(1, sizeof(struct trackpoint_data), GFP_KERNEL);
+       if (!priv)
+               return -1;
+
+       psmouse->vendor = "IBM";
+       psmouse->name = "TrackPoint";
+
+       psmouse->reconnect = trackpoint_sync;
+       psmouse->disconnect = trackpoint_disconnect;
+
+       trackpoint_defaults(priv);
+       trackpoint_sync(psmouse);
+
+       sysfs_create_group(&ps2dev->serio->dev.kobj, &trackpoint_attr_group);
+
+       printk(KERN_INFO "IBM TrackPoint firmware: 0x%02x, buttons: %d/%d\n",
+               firmware_id, (button_info & 0xf0) >> 4, button_info & 0x0f);
+
+       return 0;
+}
+
diff --git a/drivers/input/mouse/trackpoint.h b/drivers/input/mouse/trackpoint.h
new file mode 100644 (file)
index 0000000..9857d8b
--- /dev/null
@@ -0,0 +1,147 @@
+/*
+ * IBM TrackPoint PS/2 mouse driver
+ *
+ * Stephen Evanchik <evanchsa@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef _TRACKPOINT_H
+#define _TRACKPOINT_H
+
+/*
+ * These constants are from the TrackPoint System
+ * Engineering documentation Version 4 from IBM Watson
+ * research:
+ *     http://wwwcssrv.almaden.ibm.com/trackpoint/download.html
+ */
+
+#define TP_COMMAND             0xE2    /* Commands start with this */
+
+#define TP_READ_ID             0xE1    /* Sent for device identification */
+#define TP_MAGIC_IDENT         0x01    /* Sent after a TP_READ_ID followed */
+                                       /* by the firmware ID */
+
+
+/*
+ * Commands
+ */
+#define TP_RECALIB             0x51    /* Recalibrate */
+#define TP_POWER_DOWN          0x44    /* Can only be undone through HW reset */
+#define TP_EXT_DEV             0x21    /* Determines if external device is connected (RO) */
+#define TP_EXT_BTN             0x4B    /* Read extended button status */
+#define TP_POR                 0x7F    /* Execute Power on Reset */
+#define TP_POR_RESULTS         0x25    /* Read Power on Self test results */
+#define TP_DISABLE_EXT         0x40    /* Disable external pointing device */
+#define TP_ENABLE_EXT          0x41    /* Enable external pointing device */
+
+/*
+ * Mode manipulation
+ */
+#define TP_SET_SOFT_TRANS      0x4E    /* Set mode */
+#define TP_CANCEL_SOFT_TRANS   0xB9    /* Cancel mode */
+#define TP_SET_HARD_TRANS      0x45    /* Mode can only be set */
+
+
+/*
+ * Register oriented commands/properties
+ */
+#define TP_WRITE_MEM           0x81
+#define TP_READ_MEM            0x80    /* Not used in this implementation */
+
+/*
+* RAM Locations for properties
+ */
+#define TP_SENS                        0x4A    /* Sensitivity */
+#define TP_MB                  0x4C    /* Read Middle Button Status (RO) */
+#define TP_INERTIA             0x4D    /* Negative Inertia */
+#define TP_SPEED               0x60    /* Speed of TP Cursor */
+#define TP_REACH               0x57    /* Backup for Z-axis press */
+#define TP_DRAGHYS             0x58    /* Drag Hysteresis */
+                                       /* (how hard it is to drag */
+                                       /* with Z-axis pressed) */
+
+#define TP_MINDRAG             0x59    /* Minimum amount of force needed */
+                                       /* to trigger dragging */
+
+#define TP_THRESH              0x5C    /* Minimum value for a Z-axis press */
+#define TP_UP_THRESH           0x5A    /* Used to generate a 'click' on Z-axis */
+#define TP_Z_TIME              0x5E    /* How sharp of a press */
+#define TP_JENKS_CURV          0x5D    /* Minimum curvature for double click */
+
+/*
+ * Toggling Flag bits
+ */
+#define TP_TOGGLE              0x47    /* Toggle command */
+
+#define TP_TOGGLE_MB           0x23    /* Disable/Enable Middle Button */
+#define TP_MASK_MB                     0x01
+#define TP_TOGGLE_EXT_DEV      0x23    /* Toggle external device */
+#define TP_MASK_EXT_DEV                        0x02
+#define TP_TOGGLE_DRIFT                0x23    /* Drift Correction */
+#define TP_MASK_DRIFT                  0x80
+#define TP_TOGGLE_BURST                0x28    /* Burst Mode */
+#define TP_MASK_BURST                  0x80
+#define TP_TOGGLE_PTSON                0x2C    /* Press to Select */
+#define TP_MASK_PTSON                  0x01
+#define TP_TOGGLE_HARD_TRANS   0x2C    /* Alternate method to set Hard Transparency */
+#define TP_MASK_HARD_TRANS             0x80
+#define TP_TOGGLE_TWOHAND      0x2D    /* Two handed */
+#define TP_MASK_TWOHAND                        0x01
+#define TP_TOGGLE_STICKY_TWO   0x2D    /* Sticky two handed */
+#define TP_MASK_STICKY_TWO             0x04
+#define TP_TOGGLE_SKIPBACK     0x2D    /* Suppress movement after drag release */
+#define TP_MASK_SKIPBACK               0x08
+#define TP_TOGGLE_SOURCE_TAG   0x20    /* Bit 3 of the first packet will be set to
+                                          to the origin of the packet (external or TP) */
+#define TP_MASK_SOURCE_TAG             0x80
+#define TP_TOGGLE_EXT_TAG      0x22    /* Bit 3 of the first packet coming from the
+                                          external device will be forced to 1 */
+#define TP_MASK_EXT_TAG                        0x04
+
+
+/* Power on Self Test Results */
+#define TP_POR_SUCCESS         0x3B
+
+/*
+ * Default power on values
+ */
+#define TP_DEF_SENS            0x80
+#define TP_DEF_INERTIA         0x06
+#define TP_DEF_SPEED           0x61
+#define TP_DEF_REACH           0x0A
+
+#define TP_DEF_DRAGHYS         0xFF
+#define TP_DEF_MINDRAG         0x14
+
+#define TP_DEF_THRESH          0x08
+#define TP_DEF_UP_THRESH       0xFF
+#define TP_DEF_Z_TIME          0x26
+#define TP_DEF_JENKS_CURV      0x87
+
+/* Toggles */
+#define TP_DEF_MB              0x00
+#define TP_DEF_PTSON           0x00
+#define TP_DEF_SKIPBACK                0x00
+#define TP_DEF_EXT_DEV         0x01
+
+#define MAKE_PS2_CMD(params, results, cmd) ((params<<12) | (results<<8) | (cmd))
+
+struct trackpoint_data
+{
+       unsigned char sensitivity, speed, inertia, reach;
+       unsigned char draghys, mindrag;
+       unsigned char thresh, upthresh;
+       unsigned char ztime, jenks;
+
+       unsigned char press_to_select;
+       unsigned char skipback;
+
+       unsigned char ext_dev;
+};
+
+extern int trackpoint_detect(struct psmouse *psmouse, int set_properties);
+
+#endif /* _TRACKPOINT_H */
index c9e633d21d9035b4a91b1da30a04f1ae8d010890..9a92216442501d066ec4b41476ad37b41ffbfd03 100644 (file)
@@ -69,16 +69,16 @@ static inline int i8042_platform_init(void)
  */
 #if !defined(__sh__) && !defined(__alpha__) && !defined(__mips__) && !defined(CONFIG_PPC64)
        if (!request_region(I8042_DATA_REG, 16, "i8042"))
-               return -1;
+               return -EBUSY;
 #endif
 
         i8042_reset = 1;
 
 #if defined(CONFIG_PPC64)
        if (check_legacy_ioport(I8042_DATA_REG))
-               return -1;
+               return -EBUSY;
        if (!request_region(I8042_DATA_REG, 16, "i8042"))
-               return -1;
+               return -EBUSY;
 #endif
        return 0;
 }
index 863b9c95fbb86964713ff52d075f6f3d7ede377e..ee1ad27d6ed06ef70370f3096352b37acb1c1368 100644 (file)
@@ -58,7 +58,7 @@ static inline int i8042_platform_init(void)
 #if 0
        /* XXX sgi_kh is a virtual address */
        if (!request_mem_region(sgi_kh, sizeof(struct hpc_keyb), "i8042"))
-               return 1;
+               return -EBUSY;
 #endif
 
        i8042_reset = 1;
index 5c20ab131488e038228f642e5c7e0705834257e9..13fd7108eb2836ad5eb7625fab88c1d2dbf425de 100644 (file)
@@ -53,7 +53,7 @@ static inline int i8042_platform_init(void)
 #if 0
        /* XXX JAZZ_KEYBOARD_ADDRESS is a virtual address */
        if (!request_mem_region(JAZZ_KEYBOARD_ADDRESS, 2, "i8042"))
-               return 1;
+               return -EBUSY;
 #endif
 
        return 0;
index da2a19812485208e72d4a5c55c60c3f0f2ed90af..ed9446f6d7e3332cdf8f9ec4f6ec0f4b1fb6784c 100644 (file)
@@ -48,10 +48,10 @@ static inline void i8042_write_command(int val)
 #define OBP_PS2MS_NAME1                "kdmouse"
 #define OBP_PS2MS_NAME2                "mouse"
 
-static int i8042_platform_init(void)
+static int __init i8042_platform_init(void)
 {
 #ifndef CONFIG_PCI
-       return -1;
+       return -ENODEV;
 #else
        char prop[128];
        int len;
@@ -59,14 +59,14 @@ static int i8042_platform_init(void)
        len = prom_getproperty(prom_root_node, "name", prop, sizeof(prop));
        if (len < 0) {
                printk("i8042: Cannot get name property of root OBP node.\n");
-               return -1;
+               return -ENODEV;
        }
        if (strncmp(prop, "SUNW,JavaStation-1", len) == 0) {
                /* Hardcoded values for MrCoffee.  */
                i8042_kbd_irq = i8042_aux_irq = 13 | 0x20;
                kbd_iobase = ioremap(0x71300060, 8);
                if (!kbd_iobase)
-                       return -1;
+                       return -ENODEV;
        } else {
                struct linux_ebus *ebus;
                struct linux_ebus_device *edev;
@@ -78,7 +78,7 @@ static int i8042_platform_init(void)
                                        goto edev_found;
                        }
                }
-               return -1;
+               return -ENODEV;
 
        edev_found:
                for_each_edevchild(edev, child) {
@@ -96,7 +96,7 @@ static int i8042_platform_init(void)
                    i8042_aux_irq == -1) {
                        printk("i8042: Error, 8042 device lacks both kbd and "
                               "mouse nodes.\n");
-                       return -1;
+                       return -ENODEV;
                }
        }
 
index 03877c84e6ff97487b6585a5efa8c953619cd39d..273bb3b08cfa254146cf1b6362bfc8b1df1e224e 100644 (file)
@@ -137,6 +137,13 @@ static struct dmi_system_id __initdata i8042_dmi_nomux_table[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK T3010"),
                },
        },
+       {
+               .ident = "Fujitsu-Siemens Lifebook E4010",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E4010"),
+               },
+       },
        {
                .ident = "Toshiba P10",
                .matches = {
@@ -256,9 +263,10 @@ static void i8042_pnp_exit(void)
        }
 }
 
-static int i8042_pnp_init(void)
+static int __init i8042_pnp_init(void)
 {
-       int result_kbd, result_aux;
+       int result_kbd = 0, result_aux = 0;
+       char kbd_irq_str[4] = { 0 }, aux_irq_str[4] = { 0 };
 
        if (i8042_nopnp) {
                printk(KERN_INFO "i8042: PNP detection disabled\n");
@@ -267,6 +275,7 @@ static int i8042_pnp_init(void)
 
        if ((result_kbd = pnp_register_driver(&i8042_pnp_kbd_driver)) >= 0)
                i8042_pnp_kbd_registered = 1;
+
        if ((result_aux = pnp_register_driver(&i8042_pnp_aux_driver)) >= 0)
                i8042_pnp_aux_registered = 1;
 
@@ -280,6 +289,27 @@ static int i8042_pnp_init(void)
 #endif
        }
 
+       if (result_kbd > 0)
+               snprintf(kbd_irq_str, sizeof(kbd_irq_str),
+                       "%d", i8042_pnp_kbd_irq);
+       if (result_aux > 0)
+               snprintf(aux_irq_str, sizeof(aux_irq_str),
+                       "%d", i8042_pnp_aux_irq);
+
+       printk(KERN_INFO "PNP: PS/2 Controller [%s%s%s] at %#x,%#x irq %s%s%s\n",
+               i8042_pnp_kbd_name, (result_kbd > 0 && result_aux > 0) ? "," : "",
+               i8042_pnp_aux_name,
+               i8042_pnp_data_reg, i8042_pnp_command_reg,
+               kbd_irq_str, (result_kbd > 0 && result_aux > 0) ? "," : "",
+               aux_irq_str);
+
+#if defined(__ia64__)
+       if (result_kbd <= 0)
+               i8042_nokbd = 1;
+       if (result_aux <= 0)
+               i8042_noaux = 1;
+#endif
+
        if (((i8042_pnp_data_reg & ~0xf) == (i8042_data_reg & ~0xf) &&
              i8042_pnp_data_reg != i8042_data_reg) || !i8042_pnp_data_reg) {
                printk(KERN_WARNING "PNP: PS/2 controller has invalid data port %#x; using default %#x\n",
@@ -294,53 +324,47 @@ static int i8042_pnp_init(void)
                i8042_pnp_command_reg = i8042_command_reg;
        }
 
-       if (!i8042_pnp_kbd_irq) {
-               printk(KERN_WARNING "PNP: PS/2 controller doesn't have KBD irq; using default %#x\n", i8042_kbd_irq);
+       if (!i8042_nokbd && !i8042_pnp_kbd_irq) {
+               printk(KERN_WARNING "PNP: PS/2 controller doesn't have KBD irq; using default %d\n", i8042_kbd_irq);
                i8042_pnp_kbd_irq = i8042_kbd_irq;
        }
 
-       if (!i8042_pnp_aux_irq) {
-               printk(KERN_WARNING "PNP: PS/2 controller doesn't have AUX irq; using default %#x\n", i8042_aux_irq);
+       if (!i8042_noaux && !i8042_pnp_aux_irq) {
+               printk(KERN_WARNING "PNP: PS/2 controller doesn't have AUX irq; using default %d\n", i8042_aux_irq);
                i8042_pnp_aux_irq = i8042_aux_irq;
        }
 
-#if defined(__ia64__)
-       if (result_aux <= 0)
-               i8042_noaux = 1;
-#endif
-
        i8042_data_reg = i8042_pnp_data_reg;
        i8042_command_reg = i8042_pnp_command_reg;
        i8042_kbd_irq = i8042_pnp_kbd_irq;
        i8042_aux_irq = i8042_pnp_aux_irq;
 
-       printk(KERN_INFO "PNP: PS/2 Controller [%s%s%s] at %#x,%#x irq %d%s%d\n",
-               i8042_pnp_kbd_name, (result_kbd > 0 && result_aux > 0) ? "," : "", i8042_pnp_aux_name,
-               i8042_data_reg, i8042_command_reg, i8042_kbd_irq,
-               (result_aux > 0) ? "," : "", i8042_aux_irq);
-
        return 0;
 }
 
+#else
+static inline int i8042_pnp_init(void) { return 0; }
+static inline void i8042_pnp_exit(void) { }
 #endif
 
-static inline int i8042_platform_init(void)
+static int __init i8042_platform_init(void)
 {
+       int retval;
+
 /*
  * On ix86 platforms touching the i8042 data register region can do really
  * bad things. Because of this the region is always reserved on ix86 boxes.
  *
  *     if (!request_region(I8042_DATA_REG, 16, "i8042"))
- *             return -1;
+ *             return -EBUSY;
  */
 
        i8042_kbd_irq = I8042_MAP_IRQ(1);
        i8042_aux_irq = I8042_MAP_IRQ(12);
 
-#ifdef CONFIG_PNP
-       if (i8042_pnp_init())
-               return -1;
-#endif
+       retval = i8042_pnp_init();
+       if (retval)
+               return retval;
 
 #if defined(__ia64__)
         i8042_reset = 1;
@@ -354,14 +378,12 @@ static inline int i8042_platform_init(void)
                i8042_nomux = 1;
 #endif
 
-       return 0;
+       return retval;
 }
 
 static inline void i8042_platform_exit(void)
 {
-#ifdef CONFIG_PNP
        i8042_pnp_exit();
-#endif
 }
 
 #endif /* _I8042_X86IA64IO_H */
index 708a1d3beab975100bd6dbf4f88754c9d99e3f2a..40d451ce07ffecac8052e973e126ae112e256505 100644 (file)
@@ -27,6 +27,10 @@ MODULE_AUTHOR("Vojtech Pavlik <vojtech@suse.cz>");
 MODULE_DESCRIPTION("i8042 keyboard and mouse controller driver");
 MODULE_LICENSE("GPL");
 
+static unsigned int i8042_nokbd;
+module_param_named(nokbd, i8042_nokbd, bool, 0);
+MODULE_PARM_DESC(nokbd, "Do not probe or use KBD port.");
+
 static unsigned int i8042_noaux;
 module_param_named(noaux, i8042_noaux, bool, 0);
 MODULE_PARM_DESC(noaux, "Do not probe or use AUX (mouse) port.");
@@ -338,10 +342,10 @@ static int i8042_open(struct serio *serio)
 
        return 0;
 
-activate_fail:
+ activate_fail:
        free_irq(port->irq, i8042_request_irq_cookie);
 
-irq_fail:
+ irq_fail:
        serio_unregister_port_delayed(serio);
 
        return -1;
@@ -485,7 +489,7 @@ static irqreturn_t i8042_interrupt(int irq, void *dev_id, struct pt_regs *regs)
                serio_interrupt(port->serio, data, dfl, regs);
 
        ret = 1;
-out:
+ out:
        return IRQ_RETVAL(ret);
 }
 
@@ -552,7 +556,7 @@ static int i8042_enable_mux_ports(void)
  * Enable all muxed ports.
  */
 
-       for (i = 0; i < 4; i++) {
+       for (i = 0; i < I8042_NUM_MUX_PORTS; i++) {
                i8042_command(&param, I8042_CMD_MUX_PFX + i);
                i8042_command(&param, I8042_CMD_AUX_ENABLE);
        }
@@ -682,7 +686,7 @@ static int __init i8042_port_register(struct i8042_port *port)
                kfree(port->serio);
                port->serio = NULL;
                i8042_ctr |= port->disable;
-               return -1;
+               return -EIO;
        }
 
        printk(KERN_INFO "serio: i8042 %s port at %#lx,%#lx irq %d\n",
@@ -977,85 +981,88 @@ static struct device_driver i8042_driver = {
        .shutdown       = i8042_shutdown,
 };
 
-static void __init i8042_create_kbd_port(void)
+static int __init i8042_create_kbd_port(void)
 {
        struct serio *serio;
        struct i8042_port *port = &i8042_ports[I8042_KBD_PORT_NO];
 
-       serio = kmalloc(sizeof(struct serio), GFP_KERNEL);
-       if (serio) {
-               memset(serio, 0, sizeof(struct serio));
-               serio->id.type          = i8042_direct ? SERIO_8042 : SERIO_8042_XL;
-               serio->write            = i8042_dumbkbd ? NULL : i8042_kbd_write;
-               serio->open             = i8042_open;
-               serio->close            = i8042_close;
-               serio->start            = i8042_start;
-               serio->stop             = i8042_stop;
-               serio->port_data        = port;
-               serio->dev.parent       = &i8042_platform_device->dev;
-               strlcpy(serio->name, "i8042 Kbd Port", sizeof(serio->name));
-               strlcpy(serio->phys, I8042_KBD_PHYS_DESC, sizeof(serio->phys));
-
-               port->serio = serio;
-               i8042_port_register(port);
-       }
+       serio = kzalloc(sizeof(struct serio), GFP_KERNEL);
+       if (!serio)
+               return -ENOMEM;
+
+       serio->id.type          = i8042_direct ? SERIO_8042 : SERIO_8042_XL;
+       serio->write            = i8042_dumbkbd ? NULL : i8042_kbd_write;
+       serio->open             = i8042_open;
+       serio->close            = i8042_close;
+       serio->start            = i8042_start;
+       serio->stop             = i8042_stop;
+       serio->port_data        = port;
+       serio->dev.parent       = &i8042_platform_device->dev;
+       strlcpy(serio->name, "i8042 Kbd Port", sizeof(serio->name));
+       strlcpy(serio->phys, I8042_KBD_PHYS_DESC, sizeof(serio->phys));
+
+       port->serio = serio;
+
+       return i8042_port_register(port);
 }
 
-static void __init i8042_create_aux_port(void)
+static int __init i8042_create_aux_port(void)
 {
        struct serio *serio;
        struct i8042_port *port = &i8042_ports[I8042_AUX_PORT_NO];
 
-       serio = kmalloc(sizeof(struct serio), GFP_KERNEL);
-       if (serio) {
-               memset(serio, 0, sizeof(struct serio));
-               serio->id.type          = SERIO_8042;
-               serio->write            = i8042_aux_write;
-               serio->open             = i8042_open;
-               serio->close            = i8042_close;
-               serio->start            = i8042_start;
-               serio->stop             = i8042_stop;
-               serio->port_data        = port;
-               serio->dev.parent       = &i8042_platform_device->dev;
-               strlcpy(serio->name, "i8042 Aux Port", sizeof(serio->name));
-               strlcpy(serio->phys, I8042_AUX_PHYS_DESC, sizeof(serio->phys));
-
-               port->serio = serio;
-               i8042_port_register(port);
-       }
+       serio = kzalloc(sizeof(struct serio), GFP_KERNEL);
+       if (!serio)
+               return -ENOMEM;
+
+       serio->id.type          = SERIO_8042;
+       serio->write            = i8042_aux_write;
+       serio->open             = i8042_open;
+       serio->close            = i8042_close;
+       serio->start            = i8042_start;
+       serio->stop             = i8042_stop;
+       serio->port_data        = port;
+       serio->dev.parent       = &i8042_platform_device->dev;
+       strlcpy(serio->name, "i8042 Aux Port", sizeof(serio->name));
+       strlcpy(serio->phys, I8042_AUX_PHYS_DESC, sizeof(serio->phys));
+
+       port->serio = serio;
+
+       return i8042_port_register(port);
 }
 
-static void __init i8042_create_mux_port(int index)
+static int __init i8042_create_mux_port(int index)
 {
        struct serio *serio;
        struct i8042_port *port = &i8042_ports[I8042_MUX_PORT_NO + index];
 
-       serio = kmalloc(sizeof(struct serio), GFP_KERNEL);
-       if (serio) {
-               memset(serio, 0, sizeof(struct serio));
-               serio->id.type          = SERIO_8042;
-               serio->write            = i8042_aux_write;
-               serio->open             = i8042_open;
-               serio->close            = i8042_close;
-               serio->start            = i8042_start;
-               serio->stop             = i8042_stop;
-               serio->port_data        = port;
-               serio->dev.parent       = &i8042_platform_device->dev;
-               snprintf(serio->name, sizeof(serio->name), "i8042 Aux-%d Port", index);
-               snprintf(serio->phys, sizeof(serio->phys), I8042_MUX_PHYS_DESC, index + 1);
-
-               *port = i8042_ports[I8042_AUX_PORT_NO];
-               port->exists = 0;
-               snprintf(port->name, sizeof(port->name), "AUX%d", index);
-               port->mux = index;
-               port->serio = serio;
-               i8042_port_register(port);
-       }
+       serio = kzalloc(sizeof(struct serio), GFP_KERNEL);
+       if (!serio)
+               return -ENOMEM;
+
+       serio->id.type          = SERIO_8042;
+       serio->write            = i8042_aux_write;
+       serio->open             = i8042_open;
+       serio->close            = i8042_close;
+       serio->start            = i8042_start;
+       serio->stop             = i8042_stop;
+       serio->port_data        = port;
+       serio->dev.parent       = &i8042_platform_device->dev;
+       snprintf(serio->name, sizeof(serio->name), "i8042 Aux-%d Port", index);
+       snprintf(serio->phys, sizeof(serio->phys), I8042_MUX_PHYS_DESC, index + 1);
+
+       *port = i8042_ports[I8042_AUX_PORT_NO];
+       port->exists = 0;
+       snprintf(port->name, sizeof(port->name), "AUX%d", index);
+       port->mux = index;
+       port->serio = serio;
+
+       return i8042_port_register(port);
 }
 
 static int __init i8042_init(void)
 {
-       int i;
+       int i, have_ports = 0;
        int err;
 
        dbg_init();
@@ -1063,43 +1070,73 @@ static int __init i8042_init(void)
        init_timer(&i8042_timer);
        i8042_timer.function = i8042_timer_func;
 
-       if (i8042_platform_init())
-               return -EBUSY;
+       err = i8042_platform_init();
+       if (err)
+               return err;
 
        i8042_ports[I8042_AUX_PORT_NO].irq = I8042_AUX_IRQ;
        i8042_ports[I8042_KBD_PORT_NO].irq = I8042_KBD_IRQ;
 
        if (i8042_controller_init()) {
-               i8042_platform_exit();
-               return -ENODEV;
+               err = -ENODEV;
+               goto err_platform_exit;
        }
 
        err = driver_register(&i8042_driver);
-       if (err) {
-               i8042_platform_exit();
-               return err;
-       }
+       if (err)
+               goto err_controller_cleanup;
 
        i8042_platform_device = platform_device_register_simple("i8042", -1, NULL, 0);
        if (IS_ERR(i8042_platform_device)) {
-               driver_unregister(&i8042_driver);
-               i8042_platform_exit();
-               return PTR_ERR(i8042_platform_device);
+               err = PTR_ERR(i8042_platform_device);
+               goto err_unregister_driver;
        }
 
        if (!i8042_noaux && !i8042_check_aux()) {
-               if (!i8042_nomux && !i8042_check_mux())
-                       for (i = 0; i < I8042_NUM_MUX_PORTS; i++)
-                               i8042_create_mux_port(i);
-               else
-                       i8042_create_aux_port();
+               if (!i8042_nomux && !i8042_check_mux()) {
+                       for (i = 0; i < I8042_NUM_MUX_PORTS; i++) {
+                               err = i8042_create_mux_port(i);
+                               if (err)
+                                       goto err_unregister_ports;
+                       }
+               } else {
+                       err = i8042_create_aux_port();
+                       if (err)
+                               goto err_unregister_ports;
+               }
+               have_ports = 1;
        }
 
-       i8042_create_kbd_port();
+       if (!i8042_nokbd) {
+               err = i8042_create_kbd_port();
+               if (err)
+                       goto err_unregister_ports;
+               have_ports = 1;
+       }
+
+       if (!have_ports) {
+               err = -ENODEV;
+               goto err_unregister_device;
+       }
 
        mod_timer(&i8042_timer, jiffies + I8042_POLL_PERIOD);
 
        return 0;
+
+ err_unregister_ports:
+       for (i = 0; i < I8042_NUM_PORTS; i++)
+               if (i8042_ports[i].serio)
+                       serio_unregister_port(i8042_ports[i].serio);
+ err_unregister_device:
+       platform_device_unregister(i8042_platform_device);
+ err_unregister_driver:
+       driver_unregister(&i8042_driver);
+ err_controller_cleanup:
+       i8042_controller_cleanup();
+ err_platform_exit:
+       i8042_platform_exit();
+
+       return err;
 }
 
 static void __exit i8042_exit(void)
index baf4bcad9bf93a825c34941ea57e9f244b1d5bff..0afe442db3b0011568fc96f5f5c2d2baae591342 100644 (file)
@@ -283,23 +283,19 @@ static void bsd_free (void *state)
                /*
                 * Release the dictionary
                 */
-               if (db->dict) {
-                       vfree (db->dict);
-                       db->dict = NULL;
-               }
+               vfree(db->dict);
+               db->dict = NULL;
 
                /*
                 * Release the string buffer
                 */
-               if (db->lens) {
-                       vfree (db->lens);
-                       db->lens = NULL;
-               }
+               vfree(db->lens);
+               db->lens = NULL;
 
                /*
                 * Finally release the structure itself.
                 */
-               kfree (db);
+               kfree(db);
        }
 }
 
index eebcb0b97f0e9b6f05d0df1bfa12633285dd9f37..8a7d54a5c97d6634525d31e536aa3af098087a5a 100644 (file)
@@ -1953,7 +1953,8 @@ isdn_add_channels(isdn_driver_t *d, int drvidx, int n, int adding)
                kfree(d->rcvcount);
        if (!(d->rcvcount = kmalloc(sizeof(int) * m, GFP_ATOMIC))) {
                printk(KERN_WARNING "register_isdn: Could not alloc rcvcount\n");
-               if (!adding) kfree(d->rcverr);
+               if (!adding)
+                       kfree(d->rcverr);
                return -1;
        }
        memset((char *) d->rcvcount, 0, sizeof(int) * m);
index 17212b4201a156aa7d17040ebeabb339b1af0f42..cc07bbebbb1688156db22c88d746ae54a18b41ee 100644 (file)
@@ -568,12 +568,9 @@ int dm_create_persistent(struct exception_store *store, uint32_t chunk_size)
 
       bad:
        dm_io_put(sectors_to_pages(chunk_size));
-       if (ps) {
-               if (ps->area)
-                       free_area(ps);
-
-               kfree(ps);
-       }
+       if (ps && ps->area)
+               free_area(ps);
+       kfree(ps);
        return r;
 }
 
index 1554b924fbb9d891db08363f6948a129c5e5dd73..2897df90df44856df8d769bbc4f305543425aaf6 100644 (file)
@@ -74,7 +74,7 @@ static DEFINE_SPINLOCK(pers_lock);
  * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
  * is 1000 KB/sec, so the extra system load does not show up that much.
  * Increase it if you want to have more _guaranteed_ speed. Note that
- * the RAID driver will use the maximum available bandwith if the IO
+ * the RAID driver will use the maximum available bandwidth if the IO
  * subsystem is idle. There is also an 'absolute maximum' reconstruction
  * speed limit - in case reconstruction slows down your system despite
  * idle IO detection.
@@ -3616,7 +3616,7 @@ static void md_do_sync(mddev_t *mddev)
        printk(KERN_INFO "md: syncing RAID array %s\n", mdname(mddev));
        printk(KERN_INFO "md: minimum _guaranteed_ reconstruction speed:"
                " %d KB/sec/disc.\n", sysctl_speed_limit_min);
-       printk(KERN_INFO "md: using maximum available idle IO bandwith "
+       printk(KERN_INFO "md: using maximum available idle IO bandwidth "
               "(but not more than %d KB/sec) for reconstruction.\n",
               sysctl_speed_limit_max);
 
index cd5828b5e9e346ad2199ef18d542f31ebc0413ec..206cc2f61f26154bb3f5e1a6b62e888ee74e8065 100644 (file)
@@ -168,10 +168,8 @@ void saa7146_pgtable_free(struct pci_dev *pci, struct saa7146_pgtable *pt)
                return;
        pci_free_consistent(pci, pt->size, pt->cpu, pt->dma);
        pt->cpu = NULL;
-       if (NULL != pt->slist) {
-               kfree(pt->slist);
-               pt->slist = NULL;
-       }
+       kfree(pt->slist);
+       pt->slist = NULL;
 }
 
 int saa7146_pgtable_alloc(struct pci_dev *pci, struct saa7146_pgtable *pt)
index cdda423386c5ca3cdf33246893a0f80a82729908..9774e94d1e7d94ae7e8a8eb46a7e72b936680e8d 100644 (file)
@@ -445,10 +445,8 @@ static void cpia_usb_free_resources(struct usb_cpia *ucpia, int try)
                ucpia->sbuf[1].urb = NULL;
        }
 
-       if (ucpia->sbuf[1].data) {
-               kfree(ucpia->sbuf[1].data);
-               ucpia->sbuf[1].data = NULL;
-       }
+       kfree(ucpia->sbuf[1].data);
+       ucpia->sbuf[1].data = NULL;
  
        if (ucpia->sbuf[0].urb) {
                usb_kill_urb(ucpia->sbuf[0].urb);
@@ -456,10 +454,8 @@ static void cpia_usb_free_resources(struct usb_cpia *ucpia, int try)
                ucpia->sbuf[0].urb = NULL;
        }
 
-       if (ucpia->sbuf[0].data) {
-               kfree(ucpia->sbuf[0].data);
-               ucpia->sbuf[0].data = NULL;
-       }
+       kfree(ucpia->sbuf[0].data);
+       ucpia->sbuf[0].data = NULL;
 }
 
 static int cpia_usb_close(void *privdata)
@@ -623,20 +619,14 @@ static void cpia_disconnect(struct usb_interface *intf)
 
        ucpia->curbuff = ucpia->workbuff = NULL;
 
-       if (ucpia->buffers[2]) {
-               vfree(ucpia->buffers[2]);
-               ucpia->buffers[2] = NULL;
-       }
+       vfree(ucpia->buffers[2]);
+       ucpia->buffers[2] = NULL;
 
-       if (ucpia->buffers[1]) {
-               vfree(ucpia->buffers[1]);
-               ucpia->buffers[1] = NULL;
-       }
+       vfree(ucpia->buffers[1]);
+       ucpia->buffers[1] = NULL;
 
-       if (ucpia->buffers[0]) {
-               vfree(ucpia->buffers[0]);
-               ucpia->buffers[0] = NULL;
-       }
+       vfree(ucpia->buffers[0]);
+       ucpia->buffers[0] = NULL;
 
        cam->lowlevel_data = NULL;
        kfree(ucpia);
index b57743571087b892d3a351f7c07d769d5976db90..d4497dbae05c7b641abaa617e9f17563847ecd0e 100644 (file)
@@ -2184,30 +2184,18 @@ static void release_saa(void)
                vfree(saa->vidbuf);
                vfree(saa->audbuf);
                vfree(saa->osdbuf);
-               if (saa->dmavid2)
-                       kfree((void *) saa->dmavid2);
+               kfree(saa->dmavid2);
                saa->audbuf = saa->vidbuf = saa->osdbuf = NULL;
                saa->dmavid2 = NULL;
-               if (saa->dmadebi)
-                       kfree((void *) saa->dmadebi);
-               if (saa->dmavid1)
-                       kfree((void *) saa->dmavid1);
-               if (saa->dmavid2)
-                       kfree((void *) saa->dmavid2);
-               if (saa->dmavid3)
-                       kfree((void *) saa->dmavid3);
-               if (saa->dmaa1in)
-                       kfree((void *) saa->dmaa1in);
-               if (saa->dmaa1out)
-                       kfree((void *) saa->dmaa1out);
-               if (saa->dmaa2in)
-                       kfree((void *) saa->dmaa2in);
-               if (saa->dmaa2out)
-                       kfree((void *) saa->dmaa2out);
-               if (saa->dmaRPS1)
-                       kfree((void *) saa->dmaRPS1);
-               if (saa->dmaRPS2)
-                       kfree((void *) saa->dmaRPS2);
+               kfree(saa->dmadebi);
+               kfree(saa->dmavid1);
+               kfree(saa->dmavid3);
+               kfree(saa->dmaa1in);
+               kfree(saa->dmaa1out);
+               kfree(saa->dmaa2in);
+               kfree(saa->dmaa2out);
+               kfree(saa->dmaRPS1);
+               kfree(saa->dmaRPS2);
                free_irq(saa->irq, saa);
                if (saa->saa7146_mem)
                        iounmap(saa->saa7146_mem);
index 97354f253a80273b58e128c566222bf1d45038fb..574b8e36f3c611e54e4698db948b2dd04c9b8cb3 100644 (file)
@@ -267,10 +267,10 @@ int videobuf_dma_free(struct videobuf_dmabuf *dma)
                kfree(dma->pages);
                dma->pages = NULL;
        }
-       if (dma->vmalloc) {
-               vfree(dma->vmalloc);
-               dma->vmalloc = NULL;
-       }
+
+       vfree(dma->vmalloc);
+       dma->vmalloc = NULL;
+
        if (dma->bus_addr) {
                dma->bus_addr = 0;
        }
index ba838a42ec806c7e8fe1d55abb2ac1626585a28a..53adeb70f2cafb2f7a551ff5e121782f306df329 100644 (file)
@@ -650,7 +650,7 @@ jpg_fbuffer_free (struct file *file)
                                     off += PAGE_SIZE)
                                        ClearPageReserved(MAP_NR
                                                          (mem + off));
-                               kfree((void *) mem);
+                               kfree(mem);
                                fh->jpg_buffers.buffer[i].frag_tab[0] = 0;
                                fh->jpg_buffers.buffer[i].frag_tab[1] = 0;
                        }
index c33533155cc7242f9954dfd8a80ea5d4ae710625..07286816d7dfd4828df6aa879f17e02c864ac2d1 100644 (file)
@@ -820,11 +820,9 @@ void zoran_close(struct video_device* dev)
         msleep(100);                   /* Wait 1/10th of a second */
 
        /* free the allocated framebuffer */
-       if (ztv->fbuffer)
-               bfree( ztv->fbuffer, ZORAN_MAX_FBUFSIZE );
+       bfree(ztv->fbuffer, ZORAN_MAX_FBUFSIZE);
        ztv->fbuffer = 0;
-       if (ztv->overinfo.overlay)
-               kfree( ztv->overinfo.overlay );
+       kfree(ztv->overinfo.overlay);
        ztv->overinfo.overlay = 0;
 
 }
index 1588a59e3767a18a2a03fbf7bf1ba7a0d54b2bbb..550f29744812fe15c748f5c04670df0fcf11492b 100644 (file)
@@ -13,4 +13,13 @@ config MCP_SA11X0
        depends on ARCH_SA1100
        select MCP
 
+# Chip drivers
+config MCP_UCB1200
+       tristate "Support for UCB1200 / UCB1300"
+       depends on MCP
+
+config MCP_UCB1200_TS
+       tristate "Touchscreen interface support"
+       depends on MCP_UCB1200 && INPUT
+
 endmenu
index 98bdd6a421889c9d9d400c1c3e22e0c9a527e51c..adb29b5368a8deb7a75758c12df7a3577e6a0648 100644 (file)
@@ -4,3 +4,9 @@
 
 obj-$(CONFIG_MCP)              += mcp-core.o
 obj-$(CONFIG_MCP_SA11X0)       += mcp-sa11x0.o
+obj-$(CONFIG_MCP_UCB1200)      += ucb1x00-core.o
+obj-$(CONFIG_MCP_UCB1200_TS)   += ucb1x00-ts.o
+
+ifeq ($(CONFIG_SA1100_ASSABET),y)
+obj-$(CONFIG_MCP_UCB1200)      += ucb1x00-assabet.o
+endif
diff --git a/drivers/mfd/ucb1x00-assabet.c b/drivers/mfd/ucb1x00-assabet.c
new file mode 100644 (file)
index 0000000..e325fa7
--- /dev/null
@@ -0,0 +1,73 @@
+/*
+ *  linux/drivers/mfd/ucb1x00-assabet.c
+ *
+ *  Copyright (C) 2001-2003 Russell King, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License.
+ *
+ *  We handle the machine-specific bits of the UCB1x00 driver here.
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/proc_fs.h>
+#include <linux/device.h>
+
+#include <asm/dma.h>
+
+#include "ucb1x00.h"
+
+#define UCB1X00_ATTR(name,input)\
+static ssize_t name##_show(struct class_device *dev, char *buf)        \
+{                                                              \
+       struct ucb1x00 *ucb = classdev_to_ucb1x00(dev);         \
+       int val;                                                \
+       ucb1x00_adc_enable(ucb);                                \
+       val = ucb1x00_adc_read(ucb, input, UCB_NOSYNC);         \
+       ucb1x00_adc_disable(ucb);                               \
+       return sprintf(buf, "%d\n", val);                       \
+}                                                              \
+static CLASS_DEVICE_ATTR(name,0444,name##_show,NULL)
+
+UCB1X00_ATTR(vbatt, UCB_ADC_INP_AD1);
+UCB1X00_ATTR(vcharger, UCB_ADC_INP_AD0);
+UCB1X00_ATTR(batt_temp, UCB_ADC_INP_AD2);
+
+static int ucb1x00_assabet_add(struct ucb1x00_dev *dev)
+{
+       class_device_create_file(&dev->ucb->cdev, &class_device_attr_vbatt);
+       class_device_create_file(&dev->ucb->cdev, &class_device_attr_vcharger);
+       class_device_create_file(&dev->ucb->cdev, &class_device_attr_batt_temp);
+       return 0;
+}
+
+static void ucb1x00_assabet_remove(struct ucb1x00_dev *dev)
+{
+       class_device_remove_file(&dev->ucb->cdev, &class_device_attr_batt_temp);
+       class_device_remove_file(&dev->ucb->cdev, &class_device_attr_vcharger);
+       class_device_remove_file(&dev->ucb->cdev, &class_device_attr_vbatt);
+}
+
+static struct ucb1x00_driver ucb1x00_assabet_driver = {
+       .add    = ucb1x00_assabet_add,
+       .remove = ucb1x00_assabet_remove,
+};
+
+static int __init ucb1x00_assabet_init(void)
+{
+       return ucb1x00_register_driver(&ucb1x00_assabet_driver);
+}
+
+static void __exit ucb1x00_assabet_exit(void)
+{
+       ucb1x00_unregister_driver(&ucb1x00_assabet_driver);
+}
+
+module_init(ucb1x00_assabet_init);
+module_exit(ucb1x00_assabet_exit);
+
+MODULE_AUTHOR("Russell King <rmk@arm.linux.org.uk>");
+MODULE_DESCRIPTION("Assabet noddy testing only example ADC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/ucb1x00-core.c b/drivers/mfd/ucb1x00-core.c
new file mode 100644 (file)
index 0000000..10f6ce1
--- /dev/null
@@ -0,0 +1,665 @@
+/*
+ *  linux/drivers/mfd/ucb1x00-core.c
+ *
+ *  Copyright (C) 2001 Russell King, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License.
+ *
+ *  The UCB1x00 core driver provides basic services for handling IO,
+ *  the ADC, interrupts, and accessing registers.  It is designed
+ *  such that everything goes through this layer, thereby providing
+ *  a consistent locking methodology, as well as allowing the drivers
+ *  to be used on other non-MCP-enabled hardware platforms.
+ *
+ *  Note that all locks are private to this file.  Nothing else may
+ *  touch them.
+ */
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+
+#include <asm/dma.h>
+#include <asm/hardware.h>
+#include <asm/irq.h>
+
+#include "ucb1x00.h"
+
+static DECLARE_MUTEX(ucb1x00_sem);
+static LIST_HEAD(ucb1x00_drivers);
+static LIST_HEAD(ucb1x00_devices);
+
+/**
+ *     ucb1x00_io_set_dir - set IO direction
+ *     @ucb: UCB1x00 structure describing chip
+ *     @in:  bitfield of IO pins to be set as inputs
+ *     @out: bitfield of IO pins to be set as outputs
+ *
+ *     Set the IO direction of the ten general purpose IO pins on
+ *     the UCB1x00 chip.  The @in bitfield has priority over the
+ *     @out bitfield, in that if you specify a pin as both input
+ *     and output, it will end up as an input.
+ *
+ *     ucb1x00_enable must have been called to enable the comms
+ *     before using this function.
+ *
+ *     This function takes a spinlock, disabling interrupts.
+ */
+void ucb1x00_io_set_dir(struct ucb1x00 *ucb, unsigned int in, unsigned int out)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&ucb->io_lock, flags);
+       ucb->io_dir |= out;
+       ucb->io_dir &= ~in;
+
+       ucb1x00_reg_write(ucb, UCB_IO_DIR, ucb->io_dir);
+       spin_unlock_irqrestore(&ucb->io_lock, flags);
+}
+
+/**
+ *     ucb1x00_io_write - set or clear IO outputs
+ *     @ucb:   UCB1x00 structure describing chip
+ *     @set:   bitfield of IO pins to set to logic '1'
+ *     @clear: bitfield of IO pins to set to logic '0'
+ *
+ *     Set the IO output state of the specified IO pins.  The value
+ *     is retained if the pins are subsequently configured as inputs.
+ *     The @clear bitfield has priority over the @set bitfield -
+ *     outputs will be cleared.
+ *
+ *     ucb1x00_enable must have been called to enable the comms
+ *     before using this function.
+ *
+ *     This function takes a spinlock, disabling interrupts.
+ */
+void ucb1x00_io_write(struct ucb1x00 *ucb, unsigned int set, unsigned int clear)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&ucb->io_lock, flags);
+       ucb->io_out |= set;
+       ucb->io_out &= ~clear;
+
+       ucb1x00_reg_write(ucb, UCB_IO_DATA, ucb->io_out);
+       spin_unlock_irqrestore(&ucb->io_lock, flags);
+}
+
+/**
+ *     ucb1x00_io_read - read the current state of the IO pins
+ *     @ucb: UCB1x00 structure describing chip
+ *
+ *     Return a bitfield describing the logic state of the ten
+ *     general purpose IO pins.
+ *
+ *     ucb1x00_enable must have been called to enable the comms
+ *     before using this function.
+ *
+ *     This function does not take any semaphores or spinlocks.
+ */
+unsigned int ucb1x00_io_read(struct ucb1x00 *ucb)
+{
+       return ucb1x00_reg_read(ucb, UCB_IO_DATA);
+}
+
+/*
+ * UCB1300 data sheet says we must:
+ *  1. enable ADC      => 5us (including reference startup time)
+ *  2. select input    => 51*tsibclk  => 4.3us
+ *  3. start conversion        => 102*tsibclk => 8.5us
+ * (tsibclk = 1/11981000)
+ * Period between SIB 128-bit frames = 10.7us
+ */
+
+/**
+ *     ucb1x00_adc_enable - enable the ADC converter
+ *     @ucb: UCB1x00 structure describing chip
+ *
+ *     Enable the ucb1x00 and ADC converter on the UCB1x00 for use.
+ *     Any code wishing to use the ADC converter must call this
+ *     function prior to using it.
+ *
+ *     This function takes the ADC semaphore to prevent two or more
+ *     concurrent uses, and therefore may sleep.  As a result, it
+ *     can only be called from process context, not interrupt
+ *     context.
+ *
+ *     You should release the ADC as soon as possible using
+ *     ucb1x00_adc_disable.
+ */
+void ucb1x00_adc_enable(struct ucb1x00 *ucb)
+{
+       down(&ucb->adc_sem);
+
+       ucb->adc_cr |= UCB_ADC_ENA;
+
+       ucb1x00_enable(ucb);
+       ucb1x00_reg_write(ucb, UCB_ADC_CR, ucb->adc_cr);
+}
+
+/**
+ *     ucb1x00_adc_read - read the specified ADC channel
+ *     @ucb: UCB1x00 structure describing chip
+ *     @adc_channel: ADC channel mask
+ *     @sync: wait for syncronisation pulse.
+ *
+ *     Start an ADC conversion and wait for the result.  Note that
+ *     synchronised ADC conversions (via the ADCSYNC pin) must wait
+ *     until the trigger is asserted and the conversion is finished.
+ *
+ *     This function currently spins waiting for the conversion to
+ *     complete (2 frames max without sync).
+ *
+ *     If called for a synchronised ADC conversion, it may sleep
+ *     with the ADC semaphore held.
+ */
+unsigned int ucb1x00_adc_read(struct ucb1x00 *ucb, int adc_channel, int sync)
+{
+       unsigned int val;
+
+       if (sync)
+               adc_channel |= UCB_ADC_SYNC_ENA;
+
+       ucb1x00_reg_write(ucb, UCB_ADC_CR, ucb->adc_cr | adc_channel);
+       ucb1x00_reg_write(ucb, UCB_ADC_CR, ucb->adc_cr | adc_channel | UCB_ADC_START);
+
+       for (;;) {
+               val = ucb1x00_reg_read(ucb, UCB_ADC_DATA);
+               if (val & UCB_ADC_DAT_VAL)
+                       break;
+               /* yield to other processes */
+               set_current_state(TASK_INTERRUPTIBLE);
+               schedule_timeout(1);
+       }
+
+       return UCB_ADC_DAT(val);
+}
+
+/**
+ *     ucb1x00_adc_disable - disable the ADC converter
+ *     @ucb: UCB1x00 structure describing chip
+ *
+ *     Disable the ADC converter and release the ADC semaphore.
+ */
+void ucb1x00_adc_disable(struct ucb1x00 *ucb)
+{
+       ucb->adc_cr &= ~UCB_ADC_ENA;
+       ucb1x00_reg_write(ucb, UCB_ADC_CR, ucb->adc_cr);
+       ucb1x00_disable(ucb);
+
+       up(&ucb->adc_sem);
+}
+
+/*
+ * UCB1x00 Interrupt handling.
+ *
+ * The UCB1x00 can generate interrupts when the SIBCLK is stopped.
+ * Since we need to read an internal register, we must re-enable
+ * SIBCLK to talk to the chip.  We leave the clock running until
+ * we have finished processing all interrupts from the chip.
+ */
+static irqreturn_t ucb1x00_irq(int irqnr, void *devid, struct pt_regs *regs)
+{
+       struct ucb1x00 *ucb = devid;
+       struct ucb1x00_irq *irq;
+       unsigned int isr, i;
+
+       ucb1x00_enable(ucb);
+       isr = ucb1x00_reg_read(ucb, UCB_IE_STATUS);
+       ucb1x00_reg_write(ucb, UCB_IE_CLEAR, isr);
+       ucb1x00_reg_write(ucb, UCB_IE_CLEAR, 0);
+
+       for (i = 0, irq = ucb->irq_handler; i < 16 && isr; i++, isr >>= 1, irq++)
+               if (isr & 1 && irq->fn)
+                       irq->fn(i, irq->devid);
+       ucb1x00_disable(ucb);
+
+       return IRQ_HANDLED;
+}
+
+/**
+ *     ucb1x00_hook_irq - hook a UCB1x00 interrupt
+ *     @ucb:   UCB1x00 structure describing chip
+ *     @idx:   interrupt index
+ *     @fn:    function to call when interrupt is triggered
+ *     @devid: device id to pass to interrupt handler
+ *
+ *     Hook the specified interrupt.  You can only register one handler
+ *     for each interrupt source.  The interrupt source is not enabled
+ *     by this function; use ucb1x00_enable_irq instead.
+ *
+ *     Interrupt handlers will be called with other interrupts enabled.
+ *
+ *     Returns zero on success, or one of the following errors:
+ *      -EINVAL if the interrupt index is invalid
+ *      -EBUSY if the interrupt has already been hooked
+ */
+int ucb1x00_hook_irq(struct ucb1x00 *ucb, unsigned int idx, void (*fn)(int, void *), void *devid)
+{
+       struct ucb1x00_irq *irq;
+       int ret = -EINVAL;
+
+       if (idx < 16) {
+               irq = ucb->irq_handler + idx;
+               ret = -EBUSY;
+
+               spin_lock_irq(&ucb->lock);
+               if (irq->fn == NULL) {
+                       irq->devid = devid;
+                       irq->fn = fn;
+                       ret = 0;
+               }
+               spin_unlock_irq(&ucb->lock);
+       }
+       return ret;
+}
+
+/**
+ *     ucb1x00_enable_irq - enable an UCB1x00 interrupt source
+ *     @ucb: UCB1x00 structure describing chip
+ *     @idx: interrupt index
+ *     @edges: interrupt edges to enable
+ *
+ *     Enable the specified interrupt to trigger on %UCB_RISING,
+ *     %UCB_FALLING or both edges.  The interrupt should have been
+ *     hooked by ucb1x00_hook_irq.
+ */
+void ucb1x00_enable_irq(struct ucb1x00 *ucb, unsigned int idx, int edges)
+{
+       unsigned long flags;
+
+       if (idx < 16) {
+               spin_lock_irqsave(&ucb->lock, flags);
+
+               ucb1x00_enable(ucb);
+               if (edges & UCB_RISING) {
+                       ucb->irq_ris_enbl |= 1 << idx;
+                       ucb1x00_reg_write(ucb, UCB_IE_RIS, ucb->irq_ris_enbl);
+               }
+               if (edges & UCB_FALLING) {
+                       ucb->irq_fal_enbl |= 1 << idx;
+                       ucb1x00_reg_write(ucb, UCB_IE_FAL, ucb->irq_fal_enbl);
+               }
+               ucb1x00_disable(ucb);
+               spin_unlock_irqrestore(&ucb->lock, flags);
+       }
+}
+
+/**
+ *     ucb1x00_disable_irq - disable an UCB1x00 interrupt source
+ *     @ucb: UCB1x00 structure describing chip
+ *     @edges: interrupt edges to disable
+ *
+ *     Disable the specified interrupt triggering on the specified
+ *     (%UCB_RISING, %UCB_FALLING or both) edges.
+ */
+void ucb1x00_disable_irq(struct ucb1x00 *ucb, unsigned int idx, int edges)
+{
+       unsigned long flags;
+
+       if (idx < 16) {
+               spin_lock_irqsave(&ucb->lock, flags);
+
+               ucb1x00_enable(ucb);
+               if (edges & UCB_RISING) {
+                       ucb->irq_ris_enbl &= ~(1 << idx);
+                       ucb1x00_reg_write(ucb, UCB_IE_RIS, ucb->irq_ris_enbl);
+               }
+               if (edges & UCB_FALLING) {
+                       ucb->irq_fal_enbl &= ~(1 << idx);
+                       ucb1x00_reg_write(ucb, UCB_IE_FAL, ucb->irq_fal_enbl);
+               }
+               ucb1x00_disable(ucb);
+               spin_unlock_irqrestore(&ucb->lock, flags);
+       }
+}
+
+/**
+ *     ucb1x00_free_irq - disable and free the specified UCB1x00 interrupt
+ *     @ucb: UCB1x00 structure describing chip
+ *     @idx: interrupt index
+ *     @devid: device id.
+ *
+ *     Disable the interrupt source and remove the handler.  devid must
+ *     match the devid passed when hooking the interrupt.
+ *
+ *     Returns zero on success, or one of the following errors:
+ *      -EINVAL if the interrupt index is invalid
+ *      -ENOENT if devid does not match
+ */
+int ucb1x00_free_irq(struct ucb1x00 *ucb, unsigned int idx, void *devid)
+{
+       struct ucb1x00_irq *irq;
+       int ret;
+
+       if (idx >= 16)
+               goto bad;
+
+       irq = ucb->irq_handler + idx;
+       ret = -ENOENT;
+
+       spin_lock_irq(&ucb->lock);
+       if (irq->devid == devid) {
+               ucb->irq_ris_enbl &= ~(1 << idx);
+               ucb->irq_fal_enbl &= ~(1 << idx);
+
+               ucb1x00_enable(ucb);
+               ucb1x00_reg_write(ucb, UCB_IE_RIS, ucb->irq_ris_enbl);
+               ucb1x00_reg_write(ucb, UCB_IE_FAL, ucb->irq_fal_enbl);
+               ucb1x00_disable(ucb);
+
+               irq->fn = NULL;
+               irq->devid = NULL;
+               ret = 0;
+       }
+       spin_unlock_irq(&ucb->lock);
+       return ret;
+
+bad:
+       printk(KERN_ERR "Freeing bad UCB1x00 irq %d\n", idx);
+       return -EINVAL;
+}
+
+static int ucb1x00_add_dev(struct ucb1x00 *ucb, struct ucb1x00_driver *drv)
+{
+       struct ucb1x00_dev *dev;
+       int ret = -ENOMEM;
+
+       dev = kmalloc(sizeof(struct ucb1x00_dev), GFP_KERNEL);
+       if (dev) {
+               dev->ucb = ucb;
+               dev->drv = drv;
+
+               ret = drv->add(dev);
+
+               if (ret == 0) {
+                       list_add(&dev->dev_node, &ucb->devs);
+                       list_add(&dev->drv_node, &drv->devs);
+               } else {
+                       kfree(dev);
+               }
+       }
+       return ret;
+}
+
+static void ucb1x00_remove_dev(struct ucb1x00_dev *dev)
+{
+       dev->drv->remove(dev);
+       list_del(&dev->dev_node);
+       list_del(&dev->drv_node);
+       kfree(dev);
+}
+
+/*
+ * Try to probe our interrupt, rather than relying on lots of
+ * hard-coded machine dependencies.  For reference, the expected
+ * IRQ mappings are:
+ *
+ *     Machine         Default IRQ
+ *     adsbitsy        IRQ_GPCIN4
+ *     cerf            IRQ_GPIO_UCB1200_IRQ
+ *     flexanet        IRQ_GPIO_GUI
+ *     freebird        IRQ_GPIO_FREEBIRD_UCB1300_IRQ
+ *     graphicsclient  ADS_EXT_IRQ(8)
+ *     graphicsmaster  ADS_EXT_IRQ(8)
+ *     lart            LART_IRQ_UCB1200
+ *     omnimeter       IRQ_GPIO23
+ *     pfs168          IRQ_GPIO_UCB1300_IRQ
+ *     simpad          IRQ_GPIO_UCB1300_IRQ
+ *     shannon         SHANNON_IRQ_GPIO_IRQ_CODEC
+ *     yopy            IRQ_GPIO_UCB1200_IRQ
+ */
+static int ucb1x00_detect_irq(struct ucb1x00 *ucb)
+{
+       unsigned long mask;
+
+       mask = probe_irq_on();
+       if (!mask)
+               return NO_IRQ;
+
+       /*
+        * Enable the ADC interrupt.
+        */
+       ucb1x00_reg_write(ucb, UCB_IE_RIS, UCB_IE_ADC);
+       ucb1x00_reg_write(ucb, UCB_IE_FAL, UCB_IE_ADC);
+       ucb1x00_reg_write(ucb, UCB_IE_CLEAR, 0xffff);
+       ucb1x00_reg_write(ucb, UCB_IE_CLEAR, 0);
+
+       /*
+        * Cause an ADC interrupt.
+        */
+       ucb1x00_reg_write(ucb, UCB_ADC_CR, UCB_ADC_ENA);
+       ucb1x00_reg_write(ucb, UCB_ADC_CR, UCB_ADC_ENA | UCB_ADC_START);
+
+       /*
+        * Wait for the conversion to complete.
+        */
+       while ((ucb1x00_reg_read(ucb, UCB_ADC_DATA) & UCB_ADC_DAT_VAL) == 0);
+       ucb1x00_reg_write(ucb, UCB_ADC_CR, 0);
+
+       /*
+        * Disable and clear interrupt.
+        */
+       ucb1x00_reg_write(ucb, UCB_IE_RIS, 0);
+       ucb1x00_reg_write(ucb, UCB_IE_FAL, 0);
+       ucb1x00_reg_write(ucb, UCB_IE_CLEAR, 0xffff);
+       ucb1x00_reg_write(ucb, UCB_IE_CLEAR, 0);
+
+       /*
+        * Read triggered interrupt.
+        */
+       return probe_irq_off(mask);
+}
+
+static int ucb1x00_probe(struct mcp *mcp)
+{
+       struct ucb1x00 *ucb;
+       struct ucb1x00_driver *drv;
+       unsigned int id;
+       int ret = -ENODEV;
+
+       mcp_enable(mcp);
+       id = mcp_reg_read(mcp, UCB_ID);
+
+       if (id != UCB_ID_1200 && id != UCB_ID_1300) {
+               printk(KERN_WARNING "UCB1x00 ID not found: %04x\n", id);
+               goto err_disable;
+       }
+
+       ucb = kmalloc(sizeof(struct ucb1x00), GFP_KERNEL);
+       ret = -ENOMEM;
+       if (!ucb)
+               goto err_disable;
+
+       memset(ucb, 0, sizeof(struct ucb1x00));
+
+       ucb->cdev.class = &ucb1x00_class;
+       ucb->cdev.dev = &mcp->attached_device;
+       strlcpy(ucb->cdev.class_id, "ucb1x00", sizeof(ucb->cdev.class_id));
+
+       spin_lock_init(&ucb->lock);
+       spin_lock_init(&ucb->io_lock);
+       sema_init(&ucb->adc_sem, 1);
+
+       ucb->id  = id;
+       ucb->mcp = mcp;
+       ucb->irq = ucb1x00_detect_irq(ucb);
+       if (ucb->irq == NO_IRQ) {
+               printk(KERN_ERR "UCB1x00: IRQ probe failed\n");
+               ret = -ENODEV;
+               goto err_free;
+       }
+
+       ret = request_irq(ucb->irq, ucb1x00_irq, 0, "UCB1x00", ucb);
+       if (ret) {
+               printk(KERN_ERR "ucb1x00: unable to grab irq%d: %d\n",
+                       ucb->irq, ret);
+               goto err_free;
+       }
+
+       set_irq_type(ucb->irq, IRQT_RISING);
+       mcp_set_drvdata(mcp, ucb);
+
+       ret = class_device_register(&ucb->cdev);
+       if (ret)
+               goto err_irq;
+
+       INIT_LIST_HEAD(&ucb->devs);
+       down(&ucb1x00_sem);
+       list_add(&ucb->node, &ucb1x00_devices);
+       list_for_each_entry(drv, &ucb1x00_drivers, node) {
+               ucb1x00_add_dev(ucb, drv);
+       }
+       up(&ucb1x00_sem);
+       goto out;
+
+ err_irq:
+       free_irq(ucb->irq, ucb);
+ err_free:
+       kfree(ucb);
+ err_disable:
+       mcp_disable(mcp);
+ out:
+       return ret;
+}
+
+static void ucb1x00_remove(struct mcp *mcp)
+{
+       struct ucb1x00 *ucb = mcp_get_drvdata(mcp);
+       struct list_head *l, *n;
+
+       down(&ucb1x00_sem);
+       list_del(&ucb->node);
+       list_for_each_safe(l, n, &ucb->devs) {
+               struct ucb1x00_dev *dev = list_entry(l, struct ucb1x00_dev, dev_node);
+               ucb1x00_remove_dev(dev);
+       }
+       up(&ucb1x00_sem);
+
+       free_irq(ucb->irq, ucb);
+       class_device_unregister(&ucb->cdev);
+}
+
+static void ucb1x00_release(struct class_device *dev)
+{
+       struct ucb1x00 *ucb = classdev_to_ucb1x00(dev);
+       kfree(ucb);
+}
+
+static struct class ucb1x00_class = {
+       .name           = "ucb1x00",
+       .release        = ucb1x00_release,
+};
+
+int ucb1x00_register_driver(struct ucb1x00_driver *drv)
+{
+       struct ucb1x00 *ucb;
+
+       INIT_LIST_HEAD(&drv->devs);
+       down(&ucb1x00_sem);
+       list_add(&drv->node, &ucb1x00_drivers);
+       list_for_each_entry(ucb, &ucb1x00_devices, node) {
+               ucb1x00_add_dev(ucb, drv);
+       }
+       up(&ucb1x00_sem);
+       return 0;
+}
+
+void ucb1x00_unregister_driver(struct ucb1x00_driver *drv)
+{
+       struct list_head *n, *l;
+
+       down(&ucb1x00_sem);
+       list_del(&drv->node);
+       list_for_each_safe(l, n, &drv->devs) {
+               struct ucb1x00_dev *dev = list_entry(l, struct ucb1x00_dev, drv_node);
+               ucb1x00_remove_dev(dev);
+       }
+       up(&ucb1x00_sem);
+}
+
+static int ucb1x00_suspend(struct mcp *mcp, pm_message_t state)
+{
+       struct ucb1x00 *ucb = mcp_get_drvdata(mcp);
+       struct ucb1x00_dev *dev;
+
+       down(&ucb1x00_sem);
+       list_for_each_entry(dev, &ucb->devs, dev_node) {
+               if (dev->drv->suspend)
+                       dev->drv->suspend(dev, state);
+       }
+       up(&ucb1x00_sem);
+       return 0;
+}
+
+static int ucb1x00_resume(struct mcp *mcp)
+{
+       struct ucb1x00 *ucb = mcp_get_drvdata(mcp);
+       struct ucb1x00_dev *dev;
+
+       down(&ucb1x00_sem);
+       list_for_each_entry(dev, &ucb->devs, dev_node) {
+               if (dev->drv->resume)
+                       dev->drv->resume(dev);
+       }
+       up(&ucb1x00_sem);
+       return 0;
+}
+
+static struct mcp_driver ucb1x00_driver = {
+       .drv            = {
+               .name   = "ucb1x00",
+       },
+       .probe          = ucb1x00_probe,
+       .remove         = ucb1x00_remove,
+       .suspend        = ucb1x00_suspend,
+       .resume         = ucb1x00_resume,
+};
+
+static int __init ucb1x00_init(void)
+{
+       int ret = class_register(&ucb1x00_class);
+       if (ret == 0) {
+               ret = mcp_driver_register(&ucb1x00_driver);
+               if (ret)
+                       class_unregister(&ucb1x00_class);
+       }
+       return ret;
+}
+
+static void __exit ucb1x00_exit(void)
+{
+       mcp_driver_unregister(&ucb1x00_driver);
+       class_unregister(&ucb1x00_class);
+}
+
+module_init(ucb1x00_init);
+module_exit(ucb1x00_exit);
+
+EXPORT_SYMBOL(ucb1x00_class);
+
+EXPORT_SYMBOL(ucb1x00_io_set_dir);
+EXPORT_SYMBOL(ucb1x00_io_write);
+EXPORT_SYMBOL(ucb1x00_io_read);
+
+EXPORT_SYMBOL(ucb1x00_adc_enable);
+EXPORT_SYMBOL(ucb1x00_adc_read);
+EXPORT_SYMBOL(ucb1x00_adc_disable);
+
+EXPORT_SYMBOL(ucb1x00_hook_irq);
+EXPORT_SYMBOL(ucb1x00_free_irq);
+EXPORT_SYMBOL(ucb1x00_enable_irq);
+EXPORT_SYMBOL(ucb1x00_disable_irq);
+
+EXPORT_SYMBOL(ucb1x00_register_driver);
+EXPORT_SYMBOL(ucb1x00_unregister_driver);
+
+MODULE_AUTHOR("Russell King <rmk@arm.linux.org.uk>");
+MODULE_DESCRIPTION("UCB1x00 core driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/ucb1x00-ts.c b/drivers/mfd/ucb1x00-ts.c
new file mode 100644 (file)
index 0000000..a851d65
--- /dev/null
@@ -0,0 +1,399 @@
+/*
+ *  Touchscreen driver for UCB1x00-based touchscreens
+ *
+ *  Copyright (C) 2001 Russell King, All Rights Reserved.
+ *  Copyright (C) 2005 Pavel Machek
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * 21-Jan-2002 <jco@ict.es> :
+ *
+ * Added support for synchronous A/D mode. This mode is useful to
+ * avoid noise induced in the touchpanel by the LCD, provided that
+ * the UCB1x00 has a valid LCD sync signal routed to its ADCSYNC pin.
+ * It is important to note that the signal connected to the ADCSYNC
+ * pin should provide pulses even when the LCD is blanked, otherwise
+ * a pen touch needed to unblank the LCD will never be read.
+ */
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/sched.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/string.h>
+#include <linux/input.h>
+#include <linux/device.h>
+#include <linux/suspend.h>
+#include <linux/slab.h>
+#include <linux/kthread.h>
+
+#include <asm/dma.h>
+#include <asm/semaphore.h>
+
+#include "ucb1x00.h"
+
+
+struct ucb1x00_ts {
+       struct input_dev        idev;
+       struct ucb1x00          *ucb;
+
+       wait_queue_head_t       irq_wait;
+       struct task_struct      *rtask;
+       u16                     x_res;
+       u16                     y_res;
+
+       int                     restart:1;
+       int                     adcsync:1;
+};
+
+static int adcsync;
+
+static inline void ucb1x00_ts_evt_add(struct ucb1x00_ts *ts, u16 pressure, u16 x, u16 y)
+{
+       input_report_abs(&ts->idev, ABS_X, x);
+       input_report_abs(&ts->idev, ABS_Y, y);
+       input_report_abs(&ts->idev, ABS_PRESSURE, pressure);
+       input_sync(&ts->idev);
+}
+
+static inline void ucb1x00_ts_event_release(struct ucb1x00_ts *ts)
+{
+       input_report_abs(&ts->idev, ABS_PRESSURE, 0);
+       input_sync(&ts->idev);
+}
+
+/*
+ * Switch to interrupt mode.
+ */
+static inline void ucb1x00_ts_mode_int(struct ucb1x00_ts *ts)
+{
+       ucb1x00_reg_write(ts->ucb, UCB_TS_CR,
+                       UCB_TS_CR_TSMX_POW | UCB_TS_CR_TSPX_POW |
+                       UCB_TS_CR_TSMY_GND | UCB_TS_CR_TSPY_GND |
+                       UCB_TS_CR_MODE_INT);
+}
+
+/*
+ * Switch to pressure mode, and read pressure.  We don't need to wait
+ * here, since both plates are being driven.
+ */
+static inline unsigned int ucb1x00_ts_read_pressure(struct ucb1x00_ts *ts)
+{
+       ucb1x00_reg_write(ts->ucb, UCB_TS_CR,
+                       UCB_TS_CR_TSMX_POW | UCB_TS_CR_TSPX_POW |
+                       UCB_TS_CR_TSMY_GND | UCB_TS_CR_TSPY_GND |
+                       UCB_TS_CR_MODE_PRES | UCB_TS_CR_BIAS_ENA);
+
+       return ucb1x00_adc_read(ts->ucb, UCB_ADC_INP_TSPY, ts->adcsync);
+}
+
+/*
+ * Switch to X position mode and measure Y plate.  We switch the plate
+ * configuration in pressure mode, then switch to position mode.  This
+ * gives a faster response time.  Even so, we need to wait about 55us
+ * for things to stabilise.
+ */
+static inline unsigned int ucb1x00_ts_read_xpos(struct ucb1x00_ts *ts)
+{
+       ucb1x00_reg_write(ts->ucb, UCB_TS_CR,
+                       UCB_TS_CR_TSMX_GND | UCB_TS_CR_TSPX_POW |
+                       UCB_TS_CR_MODE_PRES | UCB_TS_CR_BIAS_ENA);
+       ucb1x00_reg_write(ts->ucb, UCB_TS_CR,
+                       UCB_TS_CR_TSMX_GND | UCB_TS_CR_TSPX_POW |
+                       UCB_TS_CR_MODE_PRES | UCB_TS_CR_BIAS_ENA);
+       ucb1x00_reg_write(ts->ucb, UCB_TS_CR,
+                       UCB_TS_CR_TSMX_GND | UCB_TS_CR_TSPX_POW |
+                       UCB_TS_CR_MODE_POS | UCB_TS_CR_BIAS_ENA);
+
+       udelay(55);
+
+       return ucb1x00_adc_read(ts->ucb, UCB_ADC_INP_TSPY, ts->adcsync);
+}
+
+/*
+ * Switch to Y position mode and measure X plate.  We switch the plate
+ * configuration in pressure mode, then switch to position mode.  This
+ * gives a faster response time.  Even so, we need to wait about 55us
+ * for things to stabilise.
+ */
+static inline unsigned int ucb1x00_ts_read_ypos(struct ucb1x00_ts *ts)
+{
+       ucb1x00_reg_write(ts->ucb, UCB_TS_CR,
+                       UCB_TS_CR_TSMY_GND | UCB_TS_CR_TSPY_POW |
+                       UCB_TS_CR_MODE_PRES | UCB_TS_CR_BIAS_ENA);
+       ucb1x00_reg_write(ts->ucb, UCB_TS_CR,
+                       UCB_TS_CR_TSMY_GND | UCB_TS_CR_TSPY_POW |
+                       UCB_TS_CR_MODE_PRES | UCB_TS_CR_BIAS_ENA);
+       ucb1x00_reg_write(ts->ucb, UCB_TS_CR,
+                       UCB_TS_CR_TSMY_GND | UCB_TS_CR_TSPY_POW |
+                       UCB_TS_CR_MODE_POS | UCB_TS_CR_BIAS_ENA);
+
+       udelay(55);
+
+       return ucb1x00_adc_read(ts->ucb, UCB_ADC_INP_TSPX, ts->adcsync);
+}
+
+/*
+ * Switch to X plate resistance mode.  Set MX to ground, PX to
+ * supply.  Measure current.
+ */
+static inline unsigned int ucb1x00_ts_read_xres(struct ucb1x00_ts *ts)
+{
+       ucb1x00_reg_write(ts->ucb, UCB_TS_CR,
+                       UCB_TS_CR_TSMX_GND | UCB_TS_CR_TSPX_POW |
+                       UCB_TS_CR_MODE_PRES | UCB_TS_CR_BIAS_ENA);
+       return ucb1x00_adc_read(ts->ucb, 0, ts->adcsync);
+}
+
+/*
+ * Switch to Y plate resistance mode.  Set MY to ground, PY to
+ * supply.  Measure current.
+ */
+static inline unsigned int ucb1x00_ts_read_yres(struct ucb1x00_ts *ts)
+{
+       ucb1x00_reg_write(ts->ucb, UCB_TS_CR,
+                       UCB_TS_CR_TSMY_GND | UCB_TS_CR_TSPY_POW |
+                       UCB_TS_CR_MODE_PRES | UCB_TS_CR_BIAS_ENA);
+       return ucb1x00_adc_read(ts->ucb, 0, ts->adcsync);
+}
+
+/*
+ * This is a RT kernel thread that handles the ADC accesses
+ * (mainly so we can use semaphores in the UCB1200 core code
+ * to serialise accesses to the ADC).
+ */
+static int ucb1x00_thread(void *_ts)
+{
+       struct ucb1x00_ts *ts = _ts;
+       struct task_struct *tsk = current;
+       DECLARE_WAITQUEUE(wait, tsk);
+       int valid;
+
+       /*
+        * We could run as a real-time thread.  However, thus far
+        * this doesn't seem to be necessary.
+        */
+//     tsk->policy = SCHED_FIFO;
+//     tsk->rt_priority = 1;
+
+       valid = 0;
+
+       add_wait_queue(&ts->irq_wait, &wait);
+       while (!kthread_should_stop()) {
+               unsigned int x, y, p, val;
+               signed long timeout;
+
+               ts->restart = 0;
+
+               ucb1x00_adc_enable(ts->ucb);
+
+               x = ucb1x00_ts_read_xpos(ts);
+               y = ucb1x00_ts_read_ypos(ts);
+               p = ucb1x00_ts_read_pressure(ts);
+
+               /*
+                * Switch back to interrupt mode.
+                */
+               ucb1x00_ts_mode_int(ts);
+               ucb1x00_adc_disable(ts->ucb);
+
+               msleep(10);
+
+               ucb1x00_enable(ts->ucb);
+               val = ucb1x00_reg_read(ts->ucb, UCB_TS_CR);
+
+               if (val & (UCB_TS_CR_TSPX_LOW | UCB_TS_CR_TSMX_LOW)) {
+                       set_task_state(tsk, TASK_INTERRUPTIBLE);
+
+                       ucb1x00_enable_irq(ts->ucb, UCB_IRQ_TSPX, UCB_FALLING);
+                       ucb1x00_disable(ts->ucb);
+
+                       /*
+                        * If we spat out a valid sample set last time,
+                        * spit out a "pen off" sample here.
+                        */
+                       if (valid) {
+                               ucb1x00_ts_event_release(ts);
+                               valid = 0;
+                       }
+
+                       timeout = MAX_SCHEDULE_TIMEOUT;
+               } else {
+                       ucb1x00_disable(ts->ucb);
+
+                       /*
+                        * Filtering is policy.  Policy belongs in user
+                        * space.  We therefore leave it to user space
+                        * to do any filtering they please.
+                        */
+                       if (!ts->restart) {
+                               ucb1x00_ts_evt_add(ts, p, x, y);
+                               valid = 1;
+                       }
+
+                       set_task_state(tsk, TASK_INTERRUPTIBLE);
+                       timeout = HZ / 100;
+               }
+
+               try_to_freeze();
+
+               schedule_timeout(timeout);
+       }
+
+       remove_wait_queue(&ts->irq_wait, &wait);
+
+       ts->rtask = NULL;
+       return 0;
+}
+
+/*
+ * We only detect touch screen _touches_ with this interrupt
+ * handler, and even then we just schedule our task.
+ */
+static void ucb1x00_ts_irq(int idx, void *id)
+{
+       struct ucb1x00_ts *ts = id;
+       ucb1x00_disable_irq(ts->ucb, UCB_IRQ_TSPX, UCB_FALLING);
+       wake_up(&ts->irq_wait);
+}
+
+static int ucb1x00_ts_open(struct input_dev *idev)
+{
+       struct ucb1x00_ts *ts = (struct ucb1x00_ts *)idev;
+       int ret = 0;
+
+       BUG_ON(ts->rtask);
+
+       init_waitqueue_head(&ts->irq_wait);
+       ret = ucb1x00_hook_irq(ts->ucb, UCB_IRQ_TSPX, ucb1x00_ts_irq, ts);
+       if (ret < 0)
+               goto out;
+
+       /*
+        * If we do this at all, we should allow the user to
+        * measure and read the X and Y resistance at any time.
+        */
+       ucb1x00_adc_enable(ts->ucb);
+       ts->x_res = ucb1x00_ts_read_xres(ts);
+       ts->y_res = ucb1x00_ts_read_yres(ts);
+       ucb1x00_adc_disable(ts->ucb);
+
+       ts->rtask = kthread_run(ucb1x00_thread, ts, "ktsd");
+       if (!IS_ERR(ts->rtask)) {
+               ret = 0;
+       } else {
+               ucb1x00_free_irq(ts->ucb, UCB_IRQ_TSPX, ts);
+               ts->rtask = NULL;
+               ret = -EFAULT;
+       }
+
+ out:
+       return ret;
+}
+
+/*
+ * Release touchscreen resources.  Disable IRQs.
+ */
+static void ucb1x00_ts_close(struct input_dev *idev)
+{
+       struct ucb1x00_ts *ts = (struct ucb1x00_ts *)idev;
+
+       if (ts->rtask)
+               kthread_stop(ts->rtask);
+
+       ucb1x00_enable(ts->ucb);
+       ucb1x00_free_irq(ts->ucb, UCB_IRQ_TSPX, ts);
+       ucb1x00_reg_write(ts->ucb, UCB_TS_CR, 0);
+       ucb1x00_disable(ts->ucb);
+}
+
+#ifdef CONFIG_PM
+static int ucb1x00_ts_resume(struct ucb1x00_dev *dev)
+{
+       struct ucb1x00_ts *ts = dev->priv;
+
+       if (ts->rtask != NULL) {
+               /*
+                * Restart the TS thread to ensure the
+                * TS interrupt mode is set up again
+                * after sleep.
+                */
+               ts->restart = 1;
+               wake_up(&ts->irq_wait);
+       }
+       return 0;
+}
+#else
+#define ucb1x00_ts_resume NULL
+#endif
+
+
+/*
+ * Initialisation.
+ */
+static int ucb1x00_ts_add(struct ucb1x00_dev *dev)
+{
+       struct ucb1x00_ts *ts;
+
+       ts = kmalloc(sizeof(struct ucb1x00_ts), GFP_KERNEL);
+       if (!ts)
+               return -ENOMEM;
+
+       memset(ts, 0, sizeof(struct ucb1x00_ts));
+
+       ts->ucb = dev->ucb;
+       ts->adcsync = adcsync ? UCB_SYNC : UCB_NOSYNC;
+
+       ts->idev.name       = "Touchscreen panel";
+       ts->idev.id.product = ts->ucb->id;
+       ts->idev.open       = ucb1x00_ts_open;
+       ts->idev.close      = ucb1x00_ts_close;
+
+       __set_bit(EV_ABS, ts->idev.evbit);
+       __set_bit(ABS_X, ts->idev.absbit);
+       __set_bit(ABS_Y, ts->idev.absbit);
+       __set_bit(ABS_PRESSURE, ts->idev.absbit);
+
+       input_register_device(&ts->idev);
+
+       dev->priv = ts;
+
+       return 0;
+}
+
+static void ucb1x00_ts_remove(struct ucb1x00_dev *dev)
+{
+       struct ucb1x00_ts *ts = dev->priv;
+       input_unregister_device(&ts->idev);
+       kfree(ts);
+}
+
+static struct ucb1x00_driver ucb1x00_ts_driver = {
+       .add            = ucb1x00_ts_add,
+       .remove         = ucb1x00_ts_remove,
+       .resume         = ucb1x00_ts_resume,
+};
+
+static int __init ucb1x00_ts_init(void)
+{
+       return ucb1x00_register_driver(&ucb1x00_ts_driver);
+}
+
+static void __exit ucb1x00_ts_exit(void)
+{
+       ucb1x00_unregister_driver(&ucb1x00_ts_driver);
+}
+
+module_param(adcsync, int, 0444);
+module_init(ucb1x00_ts_init);
+module_exit(ucb1x00_ts_exit);
+
+MODULE_AUTHOR("Russell King <rmk@arm.linux.org.uk>");
+MODULE_DESCRIPTION("UCB1x00 touchscreen driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/ucb1x00.h b/drivers/mfd/ucb1x00.h
new file mode 100644 (file)
index 0000000..6b63264
--- /dev/null
@@ -0,0 +1,256 @@
+/*
+ *  linux/drivers/mfd/ucb1x00.h
+ *
+ *  Copyright (C) 2001 Russell King, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License.
+ */
+#ifndef UCB1200_H
+#define UCB1200_H
+
+#define UCB_IO_DATA    0x00
+#define UCB_IO_DIR     0x01
+
+#define UCB_IO_0               (1 << 0)
+#define UCB_IO_1               (1 << 1)
+#define UCB_IO_2               (1 << 2)
+#define UCB_IO_3               (1 << 3)
+#define UCB_IO_4               (1 << 4)
+#define UCB_IO_5               (1 << 5)
+#define UCB_IO_6               (1 << 6)
+#define UCB_IO_7               (1 << 7)
+#define UCB_IO_8               (1 << 8)
+#define UCB_IO_9               (1 << 9)
+
+#define UCB_IE_RIS     0x02
+#define UCB_IE_FAL     0x03
+#define UCB_IE_STATUS  0x04
+#define UCB_IE_CLEAR   0x04
+#define UCB_IE_ADC             (1 << 11)
+#define UCB_IE_TSPX            (1 << 12)
+#define UCB_IE_TSMX            (1 << 13)
+#define UCB_IE_TCLIP           (1 << 14)
+#define UCB_IE_ACLIP           (1 << 15)
+
+#define UCB_IRQ_TSPX           12
+
+#define UCB_TC_A       0x05
+#define UCB_TC_A_LOOP          (1 << 7)        /* UCB1200 */
+#define UCB_TC_A_AMPL          (1 << 7)        /* UCB1300 */
+
+#define UCB_TC_B       0x06
+#define UCB_TC_B_VOICE_ENA     (1 << 3)
+#define UCB_TC_B_CLIP          (1 << 4)
+#define UCB_TC_B_ATT           (1 << 6)
+#define UCB_TC_B_SIDE_ENA      (1 << 11)
+#define UCB_TC_B_MUTE          (1 << 13)
+#define UCB_TC_B_IN_ENA                (1 << 14)
+#define UCB_TC_B_OUT_ENA       (1 << 15)
+
+#define UCB_AC_A       0x07
+#define UCB_AC_B       0x08
+#define UCB_AC_B_LOOP          (1 << 8)
+#define UCB_AC_B_MUTE          (1 << 13)
+#define UCB_AC_B_IN_ENA                (1 << 14)
+#define UCB_AC_B_OUT_ENA       (1 << 15)
+
+#define UCB_TS_CR      0x09
+#define UCB_TS_CR_TSMX_POW     (1 << 0)
+#define UCB_TS_CR_TSPX_POW     (1 << 1)
+#define UCB_TS_CR_TSMY_POW     (1 << 2)
+#define UCB_TS_CR_TSPY_POW     (1 << 3)
+#define UCB_TS_CR_TSMX_GND     (1 << 4)
+#define UCB_TS_CR_TSPX_GND     (1 << 5)
+#define UCB_TS_CR_TSMY_GND     (1 << 6)
+#define UCB_TS_CR_TSPY_GND     (1 << 7)
+#define UCB_TS_CR_MODE_INT     (0 << 8)
+#define UCB_TS_CR_MODE_PRES    (1 << 8)
+#define UCB_TS_CR_MODE_POS     (2 << 8)
+#define UCB_TS_CR_BIAS_ENA     (1 << 11)
+#define UCB_TS_CR_TSPX_LOW     (1 << 12)
+#define UCB_TS_CR_TSMX_LOW     (1 << 13)
+
+#define UCB_ADC_CR     0x0a
+#define UCB_ADC_SYNC_ENA       (1 << 0)
+#define UCB_ADC_VREFBYP_CON    (1 << 1)
+#define UCB_ADC_INP_TSPX       (0 << 2)
+#define UCB_ADC_INP_TSMX       (1 << 2)
+#define UCB_ADC_INP_TSPY       (2 << 2)
+#define UCB_ADC_INP_TSMY       (3 << 2)
+#define UCB_ADC_INP_AD0                (4 << 2)
+#define UCB_ADC_INP_AD1                (5 << 2)
+#define UCB_ADC_INP_AD2                (6 << 2)
+#define UCB_ADC_INP_AD3                (7 << 2)
+#define UCB_ADC_EXT_REF                (1 << 5)
+#define UCB_ADC_START          (1 << 7)
+#define UCB_ADC_ENA            (1 << 15)
+
+#define UCB_ADC_DATA   0x0b
+#define UCB_ADC_DAT_VAL                (1 << 15)
+#define UCB_ADC_DAT(x)         (((x) & 0x7fe0) >> 5)
+
+#define UCB_ID         0x0c
+#define UCB_ID_1200            0x1004
+#define UCB_ID_1300            0x1005
+
+#define UCB_MODE       0x0d
+#define UCB_MODE_DYN_VFLAG_ENA (1 << 12)
+#define UCB_MODE_AUD_OFF_CAN   (1 << 13)
+
+#include "mcp.h"
+
+struct ucb1x00_irq {
+       void *devid;
+       void (*fn)(int, void *);
+};
+
+extern struct class ucb1x00_class;
+
+struct ucb1x00 {
+       spinlock_t              lock;
+       struct mcp              *mcp;
+       unsigned int            irq;
+       struct semaphore        adc_sem;
+       spinlock_t              io_lock;
+       u16                     id;
+       u16                     io_dir;
+       u16                     io_out;
+       u16                     adc_cr;
+       u16                     irq_fal_enbl;
+       u16                     irq_ris_enbl;
+       struct ucb1x00_irq      irq_handler[16];
+       struct class_device     cdev;
+       struct list_head        node;
+       struct list_head        devs;
+};
+
+struct ucb1x00_driver;
+
+struct ucb1x00_dev {
+       struct list_head        dev_node;
+       struct list_head        drv_node;
+       struct ucb1x00          *ucb;
+       struct ucb1x00_driver   *drv;
+       void                    *priv;
+};
+
+struct ucb1x00_driver {
+       struct list_head        node;
+       struct list_head        devs;
+       int     (*add)(struct ucb1x00_dev *dev);
+       void    (*remove)(struct ucb1x00_dev *dev);
+       int     (*suspend)(struct ucb1x00_dev *dev, pm_message_t state);
+       int     (*resume)(struct ucb1x00_dev *dev);
+};
+
+#define classdev_to_ucb1x00(cd)        container_of(cd, struct ucb1x00, cdev)
+
+int ucb1x00_register_driver(struct ucb1x00_driver *);
+void ucb1x00_unregister_driver(struct ucb1x00_driver *);
+
+/**
+ *     ucb1x00_clkrate - return the UCB1x00 SIB clock rate
+ *     @ucb: UCB1x00 structure describing chip
+ *
+ *     Return the SIB clock rate in Hz.
+ */
+static inline unsigned int ucb1x00_clkrate(struct ucb1x00 *ucb)
+{
+       return mcp_get_sclk_rate(ucb->mcp);
+}
+
+/**
+ *     ucb1x00_enable - enable the UCB1x00 SIB clock
+ *     @ucb: UCB1x00 structure describing chip
+ *
+ *     Enable the SIB clock.  This can be called multiple times.
+ */
+static inline void ucb1x00_enable(struct ucb1x00 *ucb)
+{
+       mcp_enable(ucb->mcp);
+}
+
+/**
+ *     ucb1x00_disable - disable the UCB1x00 SIB clock
+ *     @ucb: UCB1x00 structure describing chip
+ *
+ *     Disable the SIB clock.  The SIB clock will only be disabled
+ *     when the number of ucb1x00_enable calls match the number of
+ *     ucb1x00_disable calls.
+ */
+static inline void ucb1x00_disable(struct ucb1x00 *ucb)
+{
+       mcp_disable(ucb->mcp);
+}
+
+/**
+ *     ucb1x00_reg_write - write a UCB1x00 register
+ *     @ucb: UCB1x00 structure describing chip
+ *     @reg: UCB1x00 4-bit register index to write
+ *     @val: UCB1x00 16-bit value to write
+ *
+ *     Write the UCB1x00 register @reg with value @val.  The SIB
+ *     clock must be running for this function to return.
+ */
+static inline void ucb1x00_reg_write(struct ucb1x00 *ucb, unsigned int reg, unsigned int val)
+{
+       mcp_reg_write(ucb->mcp, reg, val);
+}
+
+/**
+ *     ucb1x00_reg_read - read a UCB1x00 register
+ *     @ucb: UCB1x00 structure describing chip
+ *     @reg: UCB1x00 4-bit register index to write
+ *
+ *     Read the UCB1x00 register @reg and return its value.  The SIB
+ *     clock must be running for this function to return.
+ */
+static inline unsigned int ucb1x00_reg_read(struct ucb1x00 *ucb, unsigned int reg)
+{
+       return mcp_reg_read(ucb->mcp, reg);
+}
+/**
+ *     ucb1x00_set_audio_divisor - 
+ *     @ucb: UCB1x00 structure describing chip
+ *     @div: SIB clock divisor
+ */
+static inline void ucb1x00_set_audio_divisor(struct ucb1x00 *ucb, unsigned int div)
+{
+       mcp_set_audio_divisor(ucb->mcp, div);
+}
+
+/**
+ *     ucb1x00_set_telecom_divisor -
+ *     @ucb: UCB1x00 structure describing chip
+ *     @div: SIB clock divisor
+ */
+static inline void ucb1x00_set_telecom_divisor(struct ucb1x00 *ucb, unsigned int div)
+{
+       mcp_set_telecom_divisor(ucb->mcp, div);
+}
+
+void ucb1x00_io_set_dir(struct ucb1x00 *ucb, unsigned int, unsigned int);
+void ucb1x00_io_write(struct ucb1x00 *ucb, unsigned int, unsigned int);
+unsigned int ucb1x00_io_read(struct ucb1x00 *ucb);
+
+#define UCB_NOSYNC     (0)
+#define UCB_SYNC       (1)
+
+unsigned int ucb1x00_adc_read(struct ucb1x00 *ucb, int adc_channel, int sync);
+void ucb1x00_adc_enable(struct ucb1x00 *ucb);
+void ucb1x00_adc_disable(struct ucb1x00 *ucb);
+
+/*
+ * Which edges of the IRQ do you want to control today?
+ */
+#define UCB_RISING     (1 << 0)
+#define UCB_FALLING    (1 << 1)
+
+int ucb1x00_hook_irq(struct ucb1x00 *ucb, unsigned int idx, void (*fn)(int, void *), void *devid);
+void ucb1x00_enable_irq(struct ucb1x00 *ucb, unsigned int idx, int edges);
+void ucb1x00_disable_irq(struct ucb1x00 *ucb, unsigned int idx, int edges);
+int ucb1x00_free_irq(struct ucb1x00 *ucb, unsigned int idx, void *devid);
+
+#endif
index 7501fab349e4ce5271cbe7b517b2f32f945ed954..46de5c9405557b0ddb1166f397539b94742bc38b 100644 (file)
@@ -192,22 +192,37 @@ static int hdpu_cpustate_probe(struct device *ddev)
 {
        struct platform_device *pdev = to_platform_device(ddev);
        struct resource *res;
+       struct proc_dir_entry *proc_de;
+       int ret;
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        cpustate.set_addr = (unsigned long *)res->start;
        cpustate.clr_addr = (unsigned long *)res->end - 1;
 
-       misc_register(&cpustate_dev);
-       create_proc_read_entry("sky_cpustate", 0, 0, cpustate_read_proc, NULL);
+       ret = misc_register(&cpustate_dev);
+       if (ret) {
+               printk(KERN_WARNING "sky_cpustate: Unable to register misc "
+                                       "device.\n");
+               cpustate.set_addr = NULL;
+               cpustate.clr_addr = NULL;
+               return ret;
+       }
+
+       proc_de = create_proc_read_entry("sky_cpustate", 0, 0,
+                                       cpustate_read_proc, NULL);
+       if (proc_de == NULL)
+               printk(KERN_WARNING "sky_cpustate: Unable to create proc "
+                                       "dir entry\n");
 
        printk(KERN_INFO "Sky CPU State Driver v" SKY_CPUSTATE_VERSION "\n");
        return 0;
 }
+
 static int hdpu_cpustate_remove(struct device *ddev)
 {
 
-       cpustate.set_addr = 0;
-       cpustate.clr_addr = 0;
+       cpustate.set_addr = NULL;
+       cpustate.clr_addr = NULL;
 
        remove_proc_entry("sky_cpustate", NULL);
        misc_deregister(&cpustate_dev);
index bb713fed2f373c66fd13e8f29be3abbc733963bb..1443117fd8f4afb988becd36beb1d14a084c5646 100644 (file)
@@ -91,8 +91,7 @@ static void __exit cleanup_mtdram(void)
 {
        if (mtd_info) {
                del_mtd_device(mtd_info);
-               if (mtd_info->priv)
-                       vfree(mtd_info->priv);
+               vfree(mtd_info->priv);
                kfree(mtd_info);
        }
 }
index d9ab60b36fd43d571995165d3df190fdbebbb01b..d32c1b3a8ce34422bd7c476b80299beef40ebb67 100644 (file)
@@ -1017,27 +1017,16 @@ static int ftl_writesect(struct mtd_blktrans_dev *dev,
 
 void ftl_freepart(partition_t *part)
 {
-    if (part->VirtualBlockMap) {
        vfree(part->VirtualBlockMap);
        part->VirtualBlockMap = NULL;
-    }
-    if (part->VirtualPageMap) {
        kfree(part->VirtualPageMap);
        part->VirtualPageMap = NULL;
-    }
-    if (part->EUNInfo) {
        kfree(part->EUNInfo);
        part->EUNInfo = NULL;
-    }
-    if (part->XferInfo) {
        kfree(part->XferInfo);
        part->XferInfo = NULL;
-    }
-    if (part->bam_cache) {
        kfree(part->bam_cache);
        part->bam_cache = NULL;
-    }
-    
 } /* ftl_freepart */
 
 static void ftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
index 3d88ad622bdb35faf267fcc2d4e21e04628af9eb..fb4098ed469eeb8a2be64d6754cddda98e640324 100644 (file)
@@ -323,33 +323,27 @@ static void bsd_reset (void *state)
  */
 
 static void bsd_free (void *state)
-  {
-    struct bsd_db *db = (struct bsd_db *) state;
+{
+       struct bsd_db *db = state;
     
-    if (db)
-      {
+       if (!db)
+               return;
+
 /*
  * Release the dictionary
  */
-       if (db->dict)
-         {
-           vfree (db->dict);
-           db->dict = NULL;
-         }
+       vfree(db->dict);
+       db->dict = NULL;
 /*
  * Release the string buffer
  */
-       if (db->lens)
-         {
-           vfree (db->lens);
-           db->lens = NULL;
-         }
+       vfree(db->lens);
+       db->lens = NULL;
 /*
  * Finally release the structure itself.
  */
-       kfree (db);
-      }
-  }
+       kfree(db);
+}
 
 /*
  * Allocate space for a (de) compressor.
index bb71638a7c4484a6dfe5b6ba19ad7512ea4e911d..0df7e92b0bf8d587a3b21610ef5d2d1495a6f954 100644 (file)
@@ -1232,9 +1232,7 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
        navail = 0;     /* total # of usable channels (not deregistered) */
        hdrlen = (ppp->flags & SC_MP_XSHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN;
        i = 0;
-       list = &ppp->channels;
-       while ((list = list->next) != &ppp->channels) {
-               pch = list_entry(list, struct channel, clist);
+       list_for_each_entry(pch, &ppp->channels, clist) {
                navail += pch->avail = (pch->chan != NULL);
                if (pch->avail) {
                        if (skb_queue_empty(&pch->file.xq) ||
@@ -1280,6 +1278,7 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
 
        /* skip to the channel after the one we last used
           and start at that one */
+       list = &ppp->channels;
        for (i = 0; i < ppp->nxchan; ++i) {
                list = list->next;
                if (list == &ppp->channels) {
@@ -1730,7 +1729,7 @@ static void
 ppp_receive_mp_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
 {
        u32 mask, seq;
-       struct list_head *l;
+       struct channel *ch;
        int mphdrlen = (ppp->flags & SC_MP_SHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN;
 
        if (!pskb_may_pull(skb, mphdrlen) || ppp->mrru == 0)
@@ -1784,8 +1783,7 @@ ppp_receive_mp_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
         * The list of channels can't change because we have the receive
         * side of the ppp unit locked.
         */
-       for (l = ppp->channels.next; l != &ppp->channels; l = l->next) {
-               struct channel *ch = list_entry(l, struct channel, clist);
+       list_for_each_entry(ch, &ppp->channels, clist) {
                if (seq_before(ch->lastseq, seq))
                        seq = ch->lastseq;
        }
@@ -2271,10 +2269,8 @@ static struct compressor_entry *
 find_comp_entry(int proto)
 {
        struct compressor_entry *ce;
-       struct list_head *list = &compressor_list;
 
-       while ((list = list->next) != &compressor_list) {
-               ce = list_entry(list, struct compressor_entry, list);
+       list_for_each_entry(ce, &compressor_list, list) {
                if (ce->comp->compress_proto == proto)
                        return ce;
        }
@@ -2540,20 +2536,15 @@ static struct channel *
 ppp_find_channel(int unit)
 {
        struct channel *pch;
-       struct list_head *list;
 
-       list = &new_channels;
-       while ((list = list->next) != &new_channels) {
-               pch = list_entry(list, struct channel, list);
+       list_for_each_entry(pch, &new_channels, list) {
                if (pch->file.index == unit) {
                        list_del(&pch->list);
                        list_add(&pch->list, &all_channels);
                        return pch;
                }
        }
-       list = &all_channels;
-       while ((list = list->next) != &all_channels) {
-               pch = list_entry(list, struct channel, list);
+       list_for_each_entry(pch, &all_channels, list) {
                if (pch->file.index == unit)
                        return pch;
        }
index 93800c126e8669c88942fce107a79d775e777fa8..ee48bfd6734938a0e241170e320b1335ee6dac7c 100644 (file)
@@ -2144,9 +2144,9 @@ srom_search(struct net_device *dev, struct pci_dev *pdev)
     u_long iobase = 0;                     /* Clear upper 32 bits in Alphas */
     int i, j, cfrv;
     struct de4x5_private *lp = netdev_priv(dev);
-    struct list_head *walk = &pdev->bus_list;
+    struct list_head *walk;
 
-    for (walk = walk->next; walk != &pdev->bus_list; walk = walk->next) {
+    list_for_each(walk, &pdev->bus_list) {
        struct pci_dev *this_dev = pci_dev_b(walk);
 
        /* Skip the pci_bus list entry */
index 731855053392995fa6ad4530314ed588b9249056..cb84a4e84a2fd5a3e9aee87e75c3692066645e5d 100644 (file)
@@ -20,7 +20,6 @@
 #include <linux/slab.h>
 #include <linux/module.h>
 #include <linux/pm.h>
-#include <linux/slab.h>
 #include <linux/types.h>
 
 #include <asm/io.h>
index 694bae162fed916b712112d4feda495e583bcda8..5b887ba5aaf9e613c39939afd62c6f82febef660 100644 (file)
@@ -196,7 +196,7 @@ int parport_wait_peripheral(struct parport *port,
                return 1;
 
        /* 40ms of slow polling. */
-       deadline = jiffies + (HZ + 24) / 25;
+       deadline = jiffies + msecs_to_jiffies(40);
        while (time_before (jiffies, deadline)) {
                int ret;
 
@@ -205,7 +205,7 @@ int parport_wait_peripheral(struct parport *port,
 
                /* Wait for 10ms (or until an interrupt occurs if
                 * the handler is set) */
-               if ((ret = parport_wait_event (port, (HZ + 99) / 100)) < 0)
+               if ((ret = parport_wait_event (port, msecs_to_jiffies(10))) < 0)
                        return ret;
 
                status = parport_read_status (port);
@@ -216,8 +216,7 @@ int parport_wait_peripheral(struct parport *port,
                        /* parport_wait_event didn't time out, but the
                         * peripheral wasn't actually ready either.
                         * Wait for another 10ms. */
-                       __set_current_state (TASK_INTERRUPTIBLE);
-                       schedule_timeout ((HZ+ 99) / 100);
+                       schedule_timeout_interruptible(msecs_to_jiffies(10));
                }
        }
 
index 6624278c6ed86e494d04f53345b1154fcb1c7c79..ce1e2aad8b1008f1efa750fd0155719b4fa8aef4 100644 (file)
@@ -60,7 +60,7 @@ size_t parport_ieee1284_write_compat (struct parport *port,
        parport_data_forward (port);
        while (count < len) {
                unsigned long expire = jiffies + dev->timeout;
-               long wait = (HZ + 99) / 100;
+               long wait = msecs_to_jiffies(10);
                unsigned char mask = (PARPORT_STATUS_ERROR
                                      | PARPORT_STATUS_BUSY);
                unsigned char val = (PARPORT_STATUS_ERROR
@@ -97,8 +97,7 @@ size_t parport_ieee1284_write_compat (struct parport *port,
                            our interrupt handler called. */
                        if (count && no_irq) {
                                parport_release (dev);
-                               __set_current_state (TASK_INTERRUPTIBLE);
-                               schedule_timeout (wait);
+                               schedule_timeout_interruptible(wait);
                                parport_claim_or_block (dev);
                        }
                        else
@@ -542,13 +541,12 @@ size_t parport_ieee1284_ecp_read_data (struct parport *port,
                        /* Yield the port for a while. */
                        if (count && dev->port->irq != PARPORT_IRQ_NONE) {
                                parport_release (dev);
-                               __set_current_state (TASK_INTERRUPTIBLE);
-                               schedule_timeout ((HZ + 24) / 25);
+                               schedule_timeout_interruptible(msecs_to_jiffies(40));
                                parport_claim_or_block (dev);
                        }
                        else
                                /* We must have the device claimed here. */
-                               parport_wait_event (port, (HZ + 24) / 25);
+                               parport_wait_event (port, msecs_to_jiffies(40));
 
                        /* Is there a signal pending? */
                        if (signal_pending (current))
index 1b938bb9be3cc0a985c8b22d80fbdac97344e33a..c6493ad7c0c86d070a79e3a4da4754432ef58b62 100644 (file)
@@ -173,8 +173,7 @@ static int change_mode(struct parport *p, int m)
                                if (time_after_eq (jiffies, expire))
                                        /* The FIFO is stuck. */
                                        return -EBUSY;
-                               __set_current_state (TASK_INTERRUPTIBLE);
-                               schedule_timeout ((HZ + 99) / 100);
+                               schedule_timeout_interruptible(msecs_to_jiffies(10));
                                if (signal_pending (current))
                                        break;
                        }
index cc9d65388e623ffb3436904c16e13cda81204216..56a3b397efee217736b1ce54d6f7621619cb04f4 100644 (file)
@@ -44,10 +44,14 @@ pci_config_attr(subsystem_device, "0x%04x\n");
 pci_config_attr(class, "0x%06x\n");
 pci_config_attr(irq, "%u\n");
 
-static ssize_t local_cpus_show(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t local_cpus_show(struct device *dev,
+                       struct device_attribute *attr, char *buf)
 {              
-       cpumask_t mask = pcibus_to_cpumask(to_pci_dev(dev)->bus);
-       int len = cpumask_scnprintf(buf, PAGE_SIZE-2, mask);
+       cpumask_t mask;
+       int len;
+
+       mask = pcibus_to_cpumask(to_pci_dev(dev)->bus);
+       len = cpumask_scnprintf(buf, PAGE_SIZE-2, mask);
        strcat(buf,"\n"); 
        return 1+len;
 }
index 35caec13023a0d236de67b11a06cd76cf734aed2..26a55d08b506afde854a79dc4bfebd4e57c1ed6a 100644 (file)
@@ -72,11 +72,13 @@ void pci_remove_legacy_files(struct pci_bus *bus) { return; }
 /*
  * PCI Bus Class Devices
  */
-static ssize_t pci_bus_show_cpuaffinity(struct class_device *class_dev, char *buf)
+static ssize_t pci_bus_show_cpuaffinity(struct class_device *class_dev,
+                                       char *buf)
 {
-       cpumask_t cpumask = pcibus_to_cpumask(to_pci_bus(class_dev));
        int ret;
+       cpumask_t cpumask;
 
+       cpumask = pcibus_to_cpumask(to_pci_bus(class_dev));
        ret = cpumask_scnprintf(buf, PAGE_SIZE, cpumask);
        if (ret < PAGE_SIZE)
                buf[ret++] = '\n';
index d44205d52bf3ce7a69a0da0678c0fe0d377f10d9..d89f83f769f5426d36d3e5ec16a18afcffd61469 100644 (file)
@@ -5,6 +5,7 @@
  */
 
 #define __KERNEL_SYSCALLS__
+static int errno;
 
 #include <linux/kernel.h>
 #include <linux/kthread.h>
@@ -13,8 +14,6 @@
 #include <linux/delay.h>
 #include <asm/oplib.h>
 #include <asm/ebus.h>
-static int errno;
-#include <asm/unistd.h>
 
 #include "bbc_i2c.h"
 #include "max1617.h"
index d765cc1bf060bc30d17e1e3ba78d01429dedf457..b0cc3c2588fdd44074cf6ae873c920489b0a111f 100644 (file)
@@ -20,6 +20,7 @@
  */
 
 #define __KERNEL_SYSCALLS__
+static int errno;
 
 #include <linux/config.h>
 #include <linux/module.h>
@@ -38,9 +39,6 @@
 #include <asm/uaccess.h>
 #include <asm/envctrl.h>
 
-static int errno;
-#include <asm/unistd.h>
-
 #define ENVCTRL_MINOR  162
 
 #define PCF8584_ADDRESS        0x55
index 2341d27ceed7359991733e564eebba5c6a039f08..7a33c708f5b3dd0a65d4433e3900d52f64ae42aa 100644 (file)
@@ -6090,8 +6090,8 @@ NCR53c7x0_release(struct Scsi_Host *host) {
     if (hostdata->num_cmds)
        printk ("scsi%d : leaked %d NCR53c7x0_cmd structures\n",
            host->host_no, hostdata->num_cmds);
-    if (hostdata->events) 
-       vfree ((void *)hostdata->events);
+
+    vfree(hostdata->events);
 
     /* XXX This assumes default cache mode to be IOMAP_FULL_CACHING, which
      * XXX may be invalid (CONFIG_060_WRITETHROUGH)
index 13ecd0c4740434b36f06603d9deea98bf1ce24e5..da6e51c7fe696dd74ed6b688c3ccef48f482df3e 100644 (file)
@@ -560,7 +560,7 @@ ch_set_voltag(scsi_changer *ch, u_int elem,
        return result;
 }
 
-static int ch_gstatus(scsi_changer *ch, int type, unsigned char *dest)
+static int ch_gstatus(scsi_changer *ch, int type, unsigned char __user *dest)
 {
        int retval = 0;
        u_char data[16];
@@ -634,6 +634,7 @@ static int ch_ioctl(struct inode * inode, struct file * file,
 {
        scsi_changer *ch = file->private_data;
        int retval;
+       void __user *argp = (void __user *)arg;
        
        switch (cmd) {
        case CHIOGPARAMS:
@@ -646,7 +647,7 @@ static int ch_ioctl(struct inode * inode, struct file * file,
                params.cp_nportals  = ch->counts[CHET_IE];
                params.cp_ndrives   = ch->counts[CHET_DT];
                
-               if (copy_to_user((void *) arg, &params, sizeof(params)))
+               if (copy_to_user(argp, &params, sizeof(params)))
                        return -EFAULT;
                return 0;
        }
@@ -671,7 +672,7 @@ static int ch_ioctl(struct inode * inode, struct file * file,
                        vparams.cvp_n4  = ch->counts[CHET_V4];
                        strncpy(vparams.cvp_label4,vendor_labels[3],16);
                }
-               if (copy_to_user((void *) arg, &vparams, sizeof(vparams)))
+               if (copy_to_user(argp, &vparams, sizeof(vparams)))
                        return -EFAULT;
                return 0;
        }
@@ -680,7 +681,7 @@ static int ch_ioctl(struct inode * inode, struct file * file,
        {
                struct changer_position pos;
                
-               if (copy_from_user(&pos, (void*)arg, sizeof (pos)))
+               if (copy_from_user(&pos, argp, sizeof (pos)))
                        return -EFAULT;
 
                if (0 != ch_checkrange(ch, pos.cp_type, pos.cp_unit)) {
@@ -699,7 +700,7 @@ static int ch_ioctl(struct inode * inode, struct file * file,
        {
                struct changer_move mv;
 
-               if (copy_from_user(&mv, (void*)arg, sizeof (mv)))
+               if (copy_from_user(&mv, argp, sizeof (mv)))
                        return -EFAULT;
 
                if (0 != ch_checkrange(ch, mv.cm_fromtype, mv.cm_fromunit) ||
@@ -721,7 +722,7 @@ static int ch_ioctl(struct inode * inode, struct file * file,
        {
                struct changer_exchange mv;
                
-               if (copy_from_user(&mv, (void*)arg, sizeof (mv)))
+               if (copy_from_user(&mv, argp, sizeof (mv)))
                        return -EFAULT;
 
                if (0 != ch_checkrange(ch, mv.ce_srctype,  mv.ce_srcunit ) ||
@@ -746,7 +747,7 @@ static int ch_ioctl(struct inode * inode, struct file * file,
        {
                struct changer_element_status ces;
                
-               if (copy_from_user(&ces, (void*)arg, sizeof (ces)))
+               if (copy_from_user(&ces, argp, sizeof (ces)))
                        return -EFAULT;
                if (ces.ces_type < 0 || ces.ces_type >= CH_TYPES)
                        return -EINVAL;
@@ -762,7 +763,7 @@ static int ch_ioctl(struct inode * inode, struct file * file,
                unsigned int elem;
                int     result,i;
                
-               if (copy_from_user(&cge, (void*)arg, sizeof (cge)))
+               if (copy_from_user(&cge, argp, sizeof (cge)))
                        return -EFAULT;
 
                if (0 != ch_checkrange(ch, cge.cge_type, cge.cge_unit))
@@ -825,7 +826,7 @@ static int ch_ioctl(struct inode * inode, struct file * file,
                kfree(buffer);
                up(&ch->lock);
                
-               if (copy_to_user((void*)arg, &cge, sizeof (cge)))
+               if (copy_to_user(argp, &cge, sizeof (cge)))
                        return -EFAULT;
                return result;
        }
@@ -843,7 +844,7 @@ static int ch_ioctl(struct inode * inode, struct file * file,
                struct changer_set_voltag csv;
                int elem;
 
-               if (copy_from_user(&csv, (void*)arg, sizeof(csv)))
+               if (copy_from_user(&csv, argp, sizeof(csv)))
                        return -EFAULT;
 
                if (0 != ch_checkrange(ch, csv.csv_type, csv.csv_unit)) {
@@ -861,7 +862,7 @@ static int ch_ioctl(struct inode * inode, struct file * file,
        }
 
        default:
-               return scsi_ioctl(ch->device, cmd, (void*)arg);
+               return scsi_ioctl(ch->device, cmd, argp);
 
        }
 }
@@ -894,9 +895,9 @@ static long ch_ioctl_compat(struct file * file,
        case CHIOGSTATUS32:
        {
                struct changer_element_status32 ces32;
-               unsigned char *data;
+               unsigned char __user *data;
                
-               if (copy_from_user(&ces32, (void*)arg, sizeof (ces32)))
+               if (copy_from_user(&ces32, (void __user *)arg, sizeof (ces32)))
                        return -EFAULT;
                if (ces32.ces_type < 0 || ces32.ces_type >= CH_TYPES)
                        return -EINVAL;
index d72be0ce89c84534911026dce7879659045bd619..3fda8d455c5b06e1965d95f26810d128586ab690 100644 (file)
@@ -691,8 +691,7 @@ int cpqfcTS_ioctl( struct scsi_device *ScsiDev, int Cmnd, void *arg)
         if(  copy_to_user( vendor_cmd->bufp, buf, vendor_cmd->len))
                result = -EFAULT;
 
-        if( buf) 
-         kfree( buf);
+       kfree(buf);
 
         return result;
       }
index b5dc3535557079d368444aa136f1ff28fcdae3ca..6e54c7d9b33c9d1788a26150cf4d2ff2d395b9fc 100644 (file)
@@ -36,7 +36,6 @@
 #include <linux/proc_fs.h>
 #include <linux/stat.h>
 #include <linux/mca.h>
-#include <linux/string.h>
 #include <linux/spinlock.h>
 #include <linux/init.h>
 #include <linux/mca-legacy.h>
index 89a4a0615c22d67d6099527443347ec3f21f5026..3f2f2464fa6351ebbe8938c594e3f3cce3dec9c1 100644 (file)
@@ -1377,7 +1377,7 @@ static int osst_read_back_buffer_and_rewrite(struct osst_tape * STp, struct scsi
        
                if ((STp->buffer)->syscall_result || !SRpnt) {
                        printk(KERN_ERR "%s:E: Failed to read frame back from OnStream buffer\n", name);
-                       vfree((void *)buffer);
+                       vfree(buffer);
                        *aSRpnt = SRpnt;
                        return (-EIO);
                }
@@ -1419,7 +1419,7 @@ static int osst_read_back_buffer_and_rewrite(struct osst_tape * STp, struct scsi
 
                        if (new_frame > frame + 1000) {
                                printk(KERN_ERR "%s:E: Failed to find writable tape media\n", name);
-                               vfree((void *)buffer);
+                               vfree(buffer);
                                return (-EIO);
                        }
                        if ( i >= nframes + pending ) break;
@@ -1500,7 +1500,7 @@ static int osst_read_back_buffer_and_rewrite(struct osst_tape * STp, struct scsi
                             SRpnt->sr_sense_buffer[12]         ==  0 &&
                             SRpnt->sr_sense_buffer[13]         ==  2) {
                                printk(KERN_ERR "%s:E: Volume overflow in write error recovery\n", name);
-                               vfree((void *)buffer);
+                               vfree(buffer);
                                return (-EIO);                  /* hit end of tape = fail */
                        }
                        i = ((SRpnt->sr_sense_buffer[3] << 24) |
@@ -1525,7 +1525,7 @@ static int osst_read_back_buffer_and_rewrite(struct osst_tape * STp, struct scsi
        }
        if (!pending)
                osst_copy_to_buffer(STp->buffer, p);    /* so buffer content == at entry in all cases */
-       vfree((void *)buffer);
+       vfree(buffer);
        return 0;
 }
 
@@ -5852,7 +5852,7 @@ static int osst_remove(struct device *dev)
                        os_scsi_tapes[i] = NULL;
                        osst_nr_dev--;
                        write_unlock(&os_scsi_tapes_lock);
-                       if (tpnt->header_cache != NULL) vfree(tpnt->header_cache);
+                       vfree(tpnt->header_cache);
                        if (tpnt->buffer) {
                                normalize_buffer(tpnt->buffer);
                                kfree(tpnt->buffer);
@@ -5896,8 +5896,7 @@ static void __exit exit_osst (void)
                for (i=0; i < osst_max_dev; ++i) {
                        if (!(STp = os_scsi_tapes[i])) continue;
                        /* This is defensive, supposed to happen during detach */
-                       if (STp->header_cache)
-                               vfree(STp->header_cache);
+                       vfree(STp->header_cache);
                        if (STp->buffer) {
                                normalize_buffer(STp->buffer);
                                kfree(STp->buffer);
index 5b65e208893bc80df44179ceb2dd5d6a32b1dccc..4d75cdfa0a0af00bd928faae571008dcbac4d0a2 100644 (file)
@@ -864,7 +864,7 @@ static void autoconfig(struct uart_8250_port *up, unsigned int probeflags)
        /*
         * We're pretty sure there's a port here.  Lets find out what
         * type of port it is.  The IIR top two bits allows us to find
-        * out if its 8250 or 16450, 16550, 16550A or later.  This
+        * out if it's 8250 or 16450, 16550, 16550A or later.  This
         * determines what we test for next.
         *
         * We also initialise the EFR (if any) to zero for later.  The
index f2c9fa423d402d1262b6af64e3d3180c567f39cf..f6704688ee8c2e15efb500ce0ed807030e078280 100644 (file)
@@ -774,10 +774,7 @@ static int ixj_wink(IXJ *j)
        j->pots_winkstart = jiffies;
        SLIC_SetState(PLD_SLIC_STATE_OC, j);
 
-       while (time_before(jiffies, j->pots_winkstart + j->winktime)) {
-               set_current_state(TASK_INTERRUPTIBLE);
-               schedule_timeout(1);
-       }
+       msleep(jiffies_to_msecs(j->winktime));
 
        SLIC_SetState(slicnow, j);
        return 0;
@@ -1912,7 +1909,6 @@ static int ixj_pcmcia_cable_check(IXJ *j)
 
 static int ixj_hookstate(IXJ *j)
 {
-       unsigned long det;
        int fOffHook = 0;
 
        switch (j->cardtype) {
@@ -1943,11 +1939,7 @@ static int ixj_hookstate(IXJ *j)
                            j->pld_slicr.bits.state == PLD_SLIC_STATE_STANDBY) {
                                if (j->flags.ringing || j->flags.cringing) {
                                        if (!in_interrupt()) {
-                                               det = jiffies + (hertz / 50);
-                                               while (time_before(jiffies, det)) {
-                                                       set_current_state(TASK_INTERRUPTIBLE);
-                                                       schedule_timeout(1);
-                                               }
+                                               msleep(20);
                                        }
                                        SLIC_GetState(j);
                                        if (j->pld_slicr.bits.state == PLD_SLIC_STATE_RINGING) {
@@ -2062,7 +2054,7 @@ static void ixj_ring_start(IXJ *j)
 static int ixj_ring(IXJ *j)
 {
        char cntr;
-       unsigned long jif, det;
+       unsigned long jif;
 
        j->flags.ringing = 1;
        if (ixj_hookstate(j) & 1) {
@@ -2070,7 +2062,6 @@ static int ixj_ring(IXJ *j)
                j->flags.ringing = 0;
                return 1;
        }
-       det = 0;
        for (cntr = 0; cntr < j->maxrings; cntr++) {
                jif = jiffies + (1 * hertz);
                ixj_ring_on(j);
@@ -2080,8 +2071,7 @@ static int ixj_ring(IXJ *j)
                                j->flags.ringing = 0;
                                return 1;
                        }
-                       set_current_state(TASK_INTERRUPTIBLE);
-                       schedule_timeout(1);
+                       schedule_timeout_interruptible(1);
                        if (signal_pending(current))
                                break;
                }
@@ -2089,20 +2079,13 @@ static int ixj_ring(IXJ *j)
                ixj_ring_off(j);
                while (time_before(jiffies, jif)) {
                        if (ixj_hookstate(j) & 1) {
-                               det = jiffies + (hertz / 100);
-                               while (time_before(jiffies, det)) {
-                                       set_current_state(TASK_INTERRUPTIBLE);
-                                       schedule_timeout(1);
-                                       if (signal_pending(current))
-                                               break;
-                               }
+                               msleep(10);
                                if (ixj_hookstate(j) & 1) {
                                        j->flags.ringing = 0;
                                        return 1;
                                }
                        }
-                       set_current_state(TASK_INTERRUPTIBLE);
-                       schedule_timeout(1);
+                       schedule_timeout_interruptible(1);
                        if (signal_pending(current))
                                break;
                }
@@ -2168,10 +2151,8 @@ static int ixj_release(struct inode *inode, struct file *file_p)
         *    Set up locks to ensure that only one process is talking to the DSP at a time.
         *    This is necessary to keep the DSP from locking up.
         */
-       while(test_and_set_bit(board, (void *)&j->busyflags) != 0) {
-               set_current_state(TASK_INTERRUPTIBLE);
-               schedule_timeout(1);
-       }
+       while(test_and_set_bit(board, (void *)&j->busyflags) != 0)
+               schedule_timeout_interruptible(1);
        if (ixjdebug & 0x0002)
                printk(KERN_INFO "Closing board %d\n", NUM(inode));
 
@@ -3301,14 +3282,10 @@ static void ixj_write_cidcw(IXJ *j)
        ixj_play_tone(j, 23);
 
        clear_bit(j->board, &j->busyflags);
-       while(j->tone_state) {
-               set_current_state(TASK_INTERRUPTIBLE);
-               schedule_timeout(1);
-       }
-       while(test_and_set_bit(j->board, (void *)&j->busyflags) != 0) {
-               set_current_state(TASK_INTERRUPTIBLE);
-               schedule_timeout(1);
-       }
+       while(j->tone_state)
+               schedule_timeout_interruptible(1);
+       while(test_and_set_bit(j->board, (void *)&j->busyflags) != 0)
+               schedule_timeout_interruptible(1);
        if(ixjdebug & 0x0200) {
                printk("IXJ cidcw phone%d first tone end at %ld\n", j->board, jiffies);
        }
@@ -3328,14 +3305,10 @@ static void ixj_write_cidcw(IXJ *j)
        ixj_play_tone(j, 24);
 
        clear_bit(j->board, &j->busyflags);
-       while(j->tone_state) {
-               set_current_state(TASK_INTERRUPTIBLE);
-               schedule_timeout(1);
-       }
-       while(test_and_set_bit(j->board, (void *)&j->busyflags) != 0) {
-               set_current_state(TASK_INTERRUPTIBLE);
-               schedule_timeout(1);
-       }
+       while(j->tone_state)
+               schedule_timeout_interruptible(1);
+       while(test_and_set_bit(j->board, (void *)&j->busyflags) != 0)
+               schedule_timeout_interruptible(1);
        if(ixjdebug & 0x0200) {
                printk("IXJ cidcw phone%d sent second tone at %ld\n", j->board, jiffies);
        }
@@ -3343,14 +3316,10 @@ static void ixj_write_cidcw(IXJ *j)
        j->cidcw_wait = jiffies + ((50 * hertz) / 100);
 
        clear_bit(j->board, &j->busyflags);
-       while(!j->flags.cidcw_ack && time_before(jiffies, j->cidcw_wait)) {
-               set_current_state(TASK_INTERRUPTIBLE);
-               schedule_timeout(1);
-       }
-       while(test_and_set_bit(j->board, (void *)&j->busyflags) != 0) {
-               set_current_state(TASK_INTERRUPTIBLE);
-               schedule_timeout(1);
-       }
+       while(!j->flags.cidcw_ack && time_before(jiffies, j->cidcw_wait))
+               schedule_timeout_interruptible(1);
+       while(test_and_set_bit(j->board, (void *)&j->busyflags) != 0)
+               schedule_timeout_interruptible(1);
        j->cidcw_wait = 0;
        if(!j->flags.cidcw_ack) {
                if(ixjdebug & 0x0200) {
@@ -6125,10 +6094,8 @@ static int ixj_ioctl(struct inode *inode, struct file *file_p, unsigned int cmd,
         *    Set up locks to ensure that only one process is talking to the DSP at a time.
         *    This is necessary to keep the DSP from locking up.
         */
-       while(test_and_set_bit(board, (void *)&j->busyflags) != 0) {
-               set_current_state(TASK_INTERRUPTIBLE);
-               schedule_timeout(1);
-       }
+       while(test_and_set_bit(board, (void *)&j->busyflags) != 0)
+               schedule_timeout_interruptible(1);
        if (ixjdebug & 0x0040)
                printk("phone%d ioctl, cmd: 0x%x, arg: 0x%lx\n", minor, cmd, arg);
        if (minor >= IXJMAX) {
@@ -6694,8 +6661,6 @@ static struct file_operations ixj_fops =
 
 static int ixj_linetest(IXJ *j)
 {
-       unsigned long jifwait;
-
        j->flags.pstncheck = 1; /* Testing */
        j->flags.pstn_present = 0; /* Assume the line is not there */
 
@@ -6726,11 +6691,7 @@ static int ixj_linetest(IXJ *j)
 
                outb_p(j->pld_scrw.byte, j->XILINXbase);
                daa_set_mode(j, SOP_PU_CONVERSATION);
-               jifwait = jiffies + hertz;
-               while (time_before(jiffies, jifwait)) {
-                       set_current_state(TASK_INTERRUPTIBLE);
-                       schedule_timeout(1);
-               }
+               msleep(1000);
                daa_int_read(j);
                daa_set_mode(j, SOP_PU_RESET);
                if (j->m_DAAShadowRegs.XOP_REGS.XOP.xr0.bitreg.VDD_OK) {
@@ -6750,11 +6711,7 @@ static int ixj_linetest(IXJ *j)
        j->pld_slicw.bits.rly3 = 0;
        outb_p(j->pld_slicw.byte, j->XILINXbase + 0x01);
        daa_set_mode(j, SOP_PU_CONVERSATION);
-       jifwait = jiffies + hertz;
-       while (time_before(jiffies, jifwait)) {
-               set_current_state(TASK_INTERRUPTIBLE);
-               schedule_timeout(1);
-       }
+       msleep(1000);
        daa_int_read(j);
        daa_set_mode(j, SOP_PU_RESET);
        if (j->m_DAAShadowRegs.XOP_REGS.XOP.xr0.bitreg.VDD_OK) {
@@ -6783,7 +6740,6 @@ static int ixj_linetest(IXJ *j)
 static int ixj_selfprobe(IXJ *j)
 {
        unsigned short cmd;
-       unsigned long jif;
        int cnt;
        BYTES bytes;
 
@@ -6933,29 +6889,13 @@ static int ixj_selfprobe(IXJ *j)
        } else {
                if (j->cardtype == QTI_LINEJACK) {
                        LED_SetState(0x1, j);
-                       jif = jiffies + (hertz / 10);
-                       while (time_before(jiffies, jif)) {
-                               set_current_state(TASK_INTERRUPTIBLE);
-                               schedule_timeout(1);
-                       }
+                       msleep(100);
                        LED_SetState(0x2, j);
-                       jif = jiffies + (hertz / 10);
-                       while (time_before(jiffies, jif)) {
-                               set_current_state(TASK_INTERRUPTIBLE);
-                               schedule_timeout(1);
-                       }
+                       msleep(100);
                        LED_SetState(0x4, j);
-                       jif = jiffies + (hertz / 10);
-                       while (time_before(jiffies, jif)) {
-                               set_current_state(TASK_INTERRUPTIBLE);
-                               schedule_timeout(1);
-                       }
+                       msleep(100);
                        LED_SetState(0x8, j);
-                       jif = jiffies + (hertz / 10);
-                       while (time_before(jiffies, jif)) {
-                               set_current_state(TASK_INTERRUPTIBLE);
-                               schedule_timeout(1);
-                       }
+                       msleep(100);
                        LED_SetState(0x0, j);
                        daa_get_version(j);
                        if (ixjdebug & 0x0002)
index 1ab95d24c5e25c8715078538ff6176b2b4f01883..e108e0a36b743fd88c191a7e96c8339dbe39f451 100644 (file)
@@ -2,7 +2,8 @@
  *  USB HID support for Linux
  *
  *  Copyright (c) 1999 Andreas Gal
- *  Copyright (c) 2000-2001 Vojtech Pavlik <vojtech@suse.cz>
+ *  Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz>
+ *  Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc
  */
 
 /*
@@ -38,7 +39,7 @@
  * Version Information
  */
 
-#define DRIVER_VERSION "v2.01"
+#define DRIVER_VERSION "v2.6"
 #define DRIVER_AUTHOR "Andreas Gal, Vojtech Pavlik"
 #define DRIVER_DESC "USB HID core driver"
 #define DRIVER_LICENSE "GPL"
@@ -1058,8 +1059,8 @@ static int hid_submit_ctrl(struct hid_device *hid)
                if (maxpacket > 0) {
                        padlen = (len + maxpacket - 1) / maxpacket;
                        padlen *= maxpacket;
-                       if (padlen > HID_BUFFER_SIZE)
-                               padlen = HID_BUFFER_SIZE;
+                       if (padlen > hid->bufsize)
+                               padlen = hid->bufsize;
                } else
                        padlen = 0;
                hid->urbctrl->transfer_buffer_length = padlen;
@@ -1096,6 +1097,7 @@ static void hid_irq_out(struct urb *urb, struct pt_regs *regs)
 
        switch (urb->status) {
                case 0:                 /* success */
+                       break;
                case -ESHUTDOWN:        /* unplug */
                case -EILSEQ:           /* unplug timeout on uhci */
                        unplug = 1;
@@ -1143,6 +1145,7 @@ static void hid_ctrl(struct urb *urb, struct pt_regs *regs)
                case 0:                 /* success */
                        if (hid->ctrl[hid->ctrltail].dir == USB_DIR_IN)
                                hid_input_report(hid->ctrl[hid->ctrltail].report->type, urb, 0, regs);
+                       break;
                case -ESHUTDOWN:        /* unplug */
                case -EILSEQ:           /* unplug timectrl on uhci */
                        unplug = 1;
@@ -1284,13 +1287,8 @@ void hid_init_reports(struct hid_device *hid)
        struct hid_report *report;
        int err, ret;
 
-       list_for_each_entry(report, &hid->report_enum[HID_INPUT_REPORT].report_list, list) {
-               int size = ((report->size - 1) >> 3) + 1 + hid->report_enum[HID_INPUT_REPORT].numbered;
-               if (size > HID_BUFFER_SIZE) size = HID_BUFFER_SIZE;
-               if (size > hid->urbin->transfer_buffer_length)
-                       hid->urbin->transfer_buffer_length = size;
+       list_for_each_entry(report, &hid->report_enum[HID_INPUT_REPORT].report_list, list)
                hid_submit_report(hid, report, USB_DIR_IN);
-       }
 
        list_for_each_entry(report, &hid->report_enum[HID_FEATURE_REPORT].report_list, list)
                hid_submit_report(hid, report, USB_DIR_IN);
@@ -1372,12 +1370,14 @@ void hid_init_reports(struct hid_device *hid)
 #define USB_VENDOR_ID_A4TECH           0x09da
 #define USB_DEVICE_ID_A4TECH_WCP32PU   0x0006
 
-#define USB_VENDOR_ID_AASHIMA          0x06D6
+#define USB_VENDOR_ID_AASHIMA          0x06d6
 #define USB_DEVICE_ID_AASHIMA_GAMEPAD  0x0025
+#define USB_DEVICE_ID_AASHIMA_PREDATOR 0x0026
 
 #define USB_VENDOR_ID_CYPRESS          0x04b4
 #define USB_DEVICE_ID_CYPRESS_MOUSE    0x0001
 #define USB_DEVICE_ID_CYPRESS_HIDCOM   0x5500
+#define USB_DEVICE_ID_CYPRESS_ULTRAMOUSE       0x7417
 
 #define USB_VENDOR_ID_BERKSHIRE                0x0c98
 #define USB_DEVICE_ID_BERKSHIRE_PCWD   0x1140
@@ -1432,7 +1432,7 @@ void hid_init_reports(struct hid_device *hid)
 #define USB_DEVICE_ID_VERNIER_CYCLOPS  0x0004
 
 #define USB_VENDOR_ID_LD               0x0f11
-#define USB_DEVICE_ID_CASSY            0x1000
+#define USB_DEVICE_ID_CASSY            0x1000
 #define USB_DEVICE_ID_POCKETCASSY      0x1010
 #define USB_DEVICE_ID_MOBILECASSY      0x1020
 #define USB_DEVICE_ID_JWM              0x1080
@@ -1445,7 +1445,8 @@ void hid_init_reports(struct hid_device *hid)
 #define USB_DEVICE_ID_POWERCONTROL     0x2030
 
 #define USB_VENDOR_ID_APPLE            0x05ac
-#define USB_DEVICE_ID_APPLE_BLUETOOTH          0x1000
+#define USB_DEVICE_ID_APPLE_POWERMOUSE 0x0304
+#define USB_DEVICE_ID_APPLE_BLUETOOTH  0x1000
 
 /*
  * Alphabetically sorted blacklist by quirk type.
@@ -1471,6 +1472,7 @@ static struct hid_blacklist {
        { USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW48, HID_QUIRK_IGNORE },
        { USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW28, HID_QUIRK_IGNORE },
        { USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_HIDCOM, HID_QUIRK_IGNORE },
+       { USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_ULTRAMOUSE, HID_QUIRK_IGNORE },
        { USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EARTHMATE, HID_QUIRK_IGNORE },
        { USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EM_LT20, HID_QUIRK_IGNORE },
        { USB_VENDOR_ID_ESSENTIAL_REALITY, USB_DEVICE_ID_ESSENTIAL_REALITY_P5, HID_QUIRK_IGNORE },
@@ -1551,10 +1553,12 @@ static struct hid_blacklist {
        { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_USBHUB_KB, HID_QUIRK_NOGET},
        { USB_VENDOR_ID_TANGTOP, USB_DEVICE_ID_TANGTOP_USBPS2, HID_QUIRK_NOGET },
 
+       { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_POWERMOUSE, HID_QUIRK_2WHEEL_POWERMOUSE },
        { USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_WCP32PU, HID_QUIRK_2WHEEL_MOUSE_HACK_7 },
        { USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_MOUSE, HID_QUIRK_2WHEEL_MOUSE_HACK_5 },
 
        { USB_VENDOR_ID_AASHIMA, USB_DEVICE_ID_AASHIMA_GAMEPAD, HID_QUIRK_BADPAD },
+       { USB_VENDOR_ID_AASHIMA, USB_DEVICE_ID_AASHIMA_PREDATOR, HID_QUIRK_BADPAD },
        { USB_VENDOR_ID_ALPS, USB_DEVICE_ID_IBM_GAMEPAD, HID_QUIRK_BADPAD },
        { USB_VENDOR_ID_CHIC, USB_DEVICE_ID_CHIC_GAMEPAD, HID_QUIRK_BADPAD },
        { USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_DRIVING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
@@ -1567,15 +1571,32 @@ static struct hid_blacklist {
        { 0, 0 }
 };
 
+/*
+ * Traverse the supplied list of reports and find the longest
+ */
+static void hid_find_max_report(struct hid_device *hid, unsigned int type, int *max)
+{
+       struct hid_report *report;
+       int size;
+
+       list_for_each_entry(report, &hid->report_enum[type].report_list, list) {
+               size = ((report->size - 1) >> 3) + 1;
+               if (type == HID_INPUT_REPORT && hid->report_enum[type].numbered)
+                       size++;
+               if (*max < size)
+                       *max = size;
+       }
+}
+
 static int hid_alloc_buffers(struct usb_device *dev, struct hid_device *hid)
 {
-       if (!(hid->inbuf = usb_buffer_alloc(dev, HID_BUFFER_SIZE, SLAB_ATOMIC, &hid->inbuf_dma)))
+       if (!(hid->inbuf = usb_buffer_alloc(dev, hid->bufsize, SLAB_ATOMIC, &hid->inbuf_dma)))
                return -1;
-       if (!(hid->outbuf = usb_buffer_alloc(dev, HID_BUFFER_SIZE, SLAB_ATOMIC, &hid->outbuf_dma)))
+       if (!(hid->outbuf = usb_buffer_alloc(dev, hid->bufsize, SLAB_ATOMIC, &hid->outbuf_dma)))
                return -1;
        if (!(hid->cr = usb_buffer_alloc(dev, sizeof(*(hid->cr)), SLAB_ATOMIC, &hid->cr_dma)))
                return -1;
-       if (!(hid->ctrlbuf = usb_buffer_alloc(dev, HID_BUFFER_SIZE, SLAB_ATOMIC, &hid->ctrlbuf_dma)))
+       if (!(hid->ctrlbuf = usb_buffer_alloc(dev, hid->bufsize, SLAB_ATOMIC, &hid->ctrlbuf_dma)))
                return -1;
 
        return 0;
@@ -1584,13 +1605,13 @@ static int hid_alloc_buffers(struct usb_device *dev, struct hid_device *hid)
 static void hid_free_buffers(struct usb_device *dev, struct hid_device *hid)
 {
        if (hid->inbuf)
-               usb_buffer_free(dev, HID_BUFFER_SIZE, hid->inbuf, hid->inbuf_dma);
+               usb_buffer_free(dev, hid->bufsize, hid->inbuf, hid->inbuf_dma);
        if (hid->outbuf)
-               usb_buffer_free(dev, HID_BUFFER_SIZE, hid->outbuf, hid->outbuf_dma);
+               usb_buffer_free(dev, hid->bufsize, hid->outbuf, hid->outbuf_dma);
        if (hid->cr)
                usb_buffer_free(dev, sizeof(*(hid->cr)), hid->cr, hid->cr_dma);
        if (hid->ctrlbuf)
-               usb_buffer_free(dev, HID_BUFFER_SIZE, hid->ctrlbuf, hid->ctrlbuf_dma);
+               usb_buffer_free(dev, hid->bufsize, hid->ctrlbuf, hid->ctrlbuf_dma);
 }
 
 static struct hid_device *usb_hid_configure(struct usb_interface *intf)
@@ -1601,7 +1622,7 @@ static struct hid_device *usb_hid_configure(struct usb_interface *intf)
        struct hid_device *hid;
        unsigned quirks = 0, rsize = 0;
        char *buf, *rdesc;
-       int n;
+       int n, insize = 0;
 
        for (n = 0; hid_blacklist[n].idVendor; n++)
                if ((hid_blacklist[n].idVendor == le16_to_cpu(dev->descriptor.idVendor)) &&
@@ -1655,6 +1676,19 @@ static struct hid_device *usb_hid_configure(struct usb_interface *intf)
        kfree(rdesc);
        hid->quirks = quirks;
 
+       hid->bufsize = HID_MIN_BUFFER_SIZE;
+       hid_find_max_report(hid, HID_INPUT_REPORT, &hid->bufsize);
+       hid_find_max_report(hid, HID_OUTPUT_REPORT, &hid->bufsize);
+       hid_find_max_report(hid, HID_FEATURE_REPORT, &hid->bufsize);
+
+       if (hid->bufsize > HID_MAX_BUFFER_SIZE)
+               hid->bufsize = HID_MAX_BUFFER_SIZE;
+
+       hid_find_max_report(hid, HID_INPUT_REPORT, &insize);
+
+       if (insize > HID_MAX_BUFFER_SIZE)
+               insize = HID_MAX_BUFFER_SIZE;
+
        if (hid_alloc_buffers(dev, hid)) {
                hid_free_buffers(dev, hid);
                goto fail;
@@ -1685,7 +1719,7 @@ static struct hid_device *usb_hid_configure(struct usb_interface *intf)
                        if (!(hid->urbin = usb_alloc_urb(0, GFP_KERNEL)))
                                goto fail;
                        pipe = usb_rcvintpipe(dev, endpoint->bEndpointAddress);
-                       usb_fill_int_urb(hid->urbin, dev, pipe, hid->inbuf, 0,
+                       usb_fill_int_urb(hid->urbin, dev, pipe, hid->inbuf, insize,
                                         hid_irq_in, hid, interval);
                        hid->urbin->transfer_dma = hid->inbuf_dma;
                        hid->urbin->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
index 52437e5e2e7806fa20a81ae67c733ac0dec5d9be..ceebab99eff24e08017efb8e7acf80826aab7d74 100644 (file)
@@ -85,6 +85,23 @@ static const struct hid_usage_entry hid_usage_table[] = {
       {0, 0x91, "D-PadDown"},
       {0, 0x92, "D-PadRight"},
       {0, 0x93, "D-PadLeft"},
+  {  2, 0, "Simulation" },
+      {0, 0xb0, "Aileron"},
+      {0, 0xb1, "AileronTrim"},
+      {0, 0xb2, "Anti-Torque"},
+      {0, 0xb3, "Autopilot"},
+      {0, 0xb4, "Chaff"},
+      {0, 0xb5, "Collective"},
+      {0, 0xb6, "DiveBrake"},
+      {0, 0xb7, "ElectronicCountermeasures"},
+      {0, 0xb8, "Elevator"},
+      {0, 0xb9, "ElevatorTrim"},
+      {0, 0xba, "Rudder"},
+      {0, 0xbb, "Throttle"},
+      {0, 0xbc, "FlightCommunications"},
+      {0, 0xbd, "FlareRelease"},
+      {0, 0xbe, "LandingGear"},
+      {0, 0xbf, "ToeBrake"},
   {  7, 0, "Keyboard" },
   {  8, 0, "LED" },
       {0, 0x01, "NumLock"},
@@ -92,6 +109,7 @@ static const struct hid_usage_entry hid_usage_table[] = {
       {0, 0x03, "ScrollLock"},
       {0, 0x04, "Compose"},
       {0, 0x05, "Kana"},
+      {0, 0x4b, "GenericIndicator"},
   {  9, 0, "Button" },
   { 10, 0, "Ordinal" },
   { 12, 0, "Consumer" },
@@ -574,7 +592,8 @@ static char *keys[KEY_MAX + 1] = {
        [KEY_EXIT] = "Exit",                    [KEY_MOVE] = "Move",
        [KEY_EDIT] = "Edit",                    [KEY_SCROLLUP] = "ScrollUp",
        [KEY_SCROLLDOWN] = "ScrollDown",        [KEY_KPLEFTPAREN] = "KPLeftParenthesis",
-       [KEY_KPRIGHTPAREN] = "KPRightParenthesis", [KEY_F13] = "F13",
+       [KEY_KPRIGHTPAREN] = "KPRightParenthesis", [KEY_NEW] = "New",
+       [KEY_REDO] = "Redo",                    [KEY_F13] = "F13",
        [KEY_F14] = "F14",                      [KEY_F15] = "F15",
        [KEY_F16] = "F16",                      [KEY_F17] = "F17",
        [KEY_F18] = "F18",                      [KEY_F19] = "F19",
@@ -584,15 +603,15 @@ static char *keys[KEY_MAX + 1] = {
        [KEY_PAUSECD] = "PauseCD",              [KEY_PROG3] = "Prog3",
        [KEY_PROG4] = "Prog4",                  [KEY_SUSPEND] = "Suspend",
        [KEY_CLOSE] = "Close",                  [KEY_PLAY] = "Play",
-       [KEY_FASTFORWARD] = "Fast Forward",     [KEY_BASSBOOST] = "Bass Boost",
+       [KEY_FASTFORWARD] = "FastForward",      [KEY_BASSBOOST] = "BassBoost",
        [KEY_PRINT] = "Print",                  [KEY_HP] = "HP",
        [KEY_CAMERA] = "Camera",                [KEY_SOUND] = "Sound",
        [KEY_QUESTION] = "Question",            [KEY_EMAIL] = "Email",
        [KEY_CHAT] = "Chat",                    [KEY_SEARCH] = "Search",
        [KEY_CONNECT] = "Connect",              [KEY_FINANCE] = "Finance",
        [KEY_SPORT] = "Sport",                  [KEY_SHOP] = "Shop",
-       [KEY_ALTERASE] = "Alternate Erase",     [KEY_CANCEL] = "Cancel",
-       [KEY_BRIGHTNESSDOWN] = "Brightness down", [KEY_BRIGHTNESSUP] = "Brightness up",
+       [KEY_ALTERASE] = "AlternateErase",      [KEY_CANCEL] = "Cancel",
+       [KEY_BRIGHTNESSDOWN] = "BrightnessDown", [KEY_BRIGHTNESSUP] = "BrightnessUp",
        [KEY_MEDIA] = "Media",                  [KEY_UNKNOWN] = "Unknown",
        [BTN_0] = "Btn0",                       [BTN_1] = "Btn1",
        [BTN_2] = "Btn2",                       [BTN_3] = "Btn3",
@@ -622,8 +641,8 @@ static char *keys[KEY_MAX + 1] = {
        [BTN_TOOL_AIRBRUSH] = "ToolAirbrush",   [BTN_TOOL_FINGER] = "ToolFinger",
        [BTN_TOOL_MOUSE] = "ToolMouse",         [BTN_TOOL_LENS] = "ToolLens",
        [BTN_TOUCH] = "Touch",                  [BTN_STYLUS] = "Stylus",
-       [BTN_STYLUS2] = "Stylus2",              [BTN_TOOL_DOUBLETAP] = "Tool Doubletap",
-       [BTN_TOOL_TRIPLETAP] = "Tool Tripletap", [BTN_GEAR_DOWN] = "WheelBtn",
+       [BTN_STYLUS2] = "Stylus2",              [BTN_TOOL_DOUBLETAP] = "ToolDoubleTap",
+       [BTN_TOOL_TRIPLETAP] = "ToolTripleTap", [BTN_GEAR_DOWN] = "WheelBtn",
        [BTN_GEAR_UP] = "Gear up",              [KEY_OK] = "Ok",
        [KEY_SELECT] = "Select",                [KEY_GOTO] = "Goto",
        [KEY_CLEAR] = "Clear",                  [KEY_POWER2] = "Power2",
@@ -659,6 +678,9 @@ static char *keys[KEY_MAX + 1] = {
        [KEY_TWEN] = "TWEN",                    [KEY_DEL_EOL] = "DeleteEOL",
        [KEY_DEL_EOS] = "DeleteEOS",            [KEY_INS_LINE] = "InsertLine",
        [KEY_DEL_LINE] = "DeleteLine",
+       [KEY_SEND] = "Send",                    [KEY_REPLY] = "Reply",
+       [KEY_FORWARDMAIL] = "ForwardMail",      [KEY_SAVE] = "Save",
+       [KEY_DOCUMENTS] = "Documents",
 };
 
 static char *relatives[REL_MAX + 1] = {
index 63a4db721f7e5d2e3507b29e76d81c60491ab7c7..0b6452248a398ae38c47848d66416ea6dd07fcfc 100644 (file)
@@ -78,8 +78,8 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
 {
        struct input_dev *input = &hidinput->input;
        struct hid_device *device = hidinput->input.private;
-       int max, code;
-       unsigned long *bit;
+       int max = 0, code;
+       unsigned long *bit = NULL;
 
        field->hidinput = hidinput;
 
@@ -131,6 +131,15 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
                        map_key(code);
                        break;
 
+
+               case HID_UP_SIMULATION:
+
+                       switch (usage->hid & 0xffff) {
+                               case 0xba: map_abs(ABS_RUDDER); break;
+                               case 0xbb: map_abs(ABS_THROTTLE); break;
+                       }
+                       break;
+
                case HID_UP_GENDESK:
 
                        if ((usage->hid & 0xf0) == 0x80) {      /* SystemControl */
@@ -238,8 +247,12 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
                                case 0x000: goto ignore;
                                case 0x034: map_key_clear(KEY_SLEEP);           break;
                                case 0x036: map_key_clear(BTN_MISC);            break;
+                               case 0x045: map_key_clear(KEY_RADIO);           break;
                                case 0x08a: map_key_clear(KEY_WWW);             break;
+                               case 0x08d: map_key_clear(KEY_PROGRAM);         break;
                                case 0x095: map_key_clear(KEY_HELP);            break;
+                               case 0x09c: map_key_clear(KEY_CHANNELUP);       break;
+                               case 0x09d: map_key_clear(KEY_CHANNELDOWN);     break;
                                case 0x0b0: map_key_clear(KEY_PLAY);            break;
                                case 0x0b1: map_key_clear(KEY_PAUSE);           break;
                                case 0x0b2: map_key_clear(KEY_RECORD);          break;
@@ -259,6 +272,11 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
                                case 0x18a: map_key_clear(KEY_MAIL);            break;
                                case 0x192: map_key_clear(KEY_CALC);            break;
                                case 0x194: map_key_clear(KEY_FILE);            break;
+                               case 0x1a7: map_key_clear(KEY_DOCUMENTS);       break;
+                               case 0x201: map_key_clear(KEY_NEW);             break;
+                               case 0x207: map_key_clear(KEY_SAVE);            break;
+                               case 0x208: map_key_clear(KEY_PRINT);           break;
+                               case 0x209: map_key_clear(KEY_PROPS);           break;
                                case 0x21a: map_key_clear(KEY_UNDO);            break;
                                case 0x21b: map_key_clear(KEY_COPY);            break;
                                case 0x21c: map_key_clear(KEY_CUT);             break;
@@ -271,7 +289,11 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
                                case 0x227: map_key_clear(KEY_REFRESH);         break;
                                case 0x22a: map_key_clear(KEY_BOOKMARKS);       break;
                                case 0x238: map_rel(REL_HWHEEL);                break;
-                               default:    goto unknown;
+                               case 0x279: map_key_clear(KEY_REDO);            break;
+                               case 0x289: map_key_clear(KEY_REPLY);           break;
+                               case 0x28b: map_key_clear(KEY_FORWARDMAIL);     break;
+                               case 0x28c: map_key_clear(KEY_SEND);            break;
+                               default:    goto ignore;
                        }
                        break;
 
@@ -296,9 +318,42 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
                        break;
 
                case HID_UP_MSVENDOR:
-
                        goto ignore;
 
+               case HID_UP_CUSTOM: /* Reported on Logitech and Powerbook USB keyboards */
+
+                       set_bit(EV_REP, input->evbit);
+                       switch(usage->hid & HID_USAGE) {
+                               case 0x003: map_key_clear(KEY_FN);              break;
+                               default:    goto ignore;
+                       }
+                       break;
+
+               case HID_UP_LOGIVENDOR: /* Reported on Logitech Ultra X Media Remote */
+
+                       set_bit(EV_REP, input->evbit);
+                       switch(usage->hid & HID_USAGE) {
+                               case 0x004: map_key_clear(KEY_AGAIN);           break;
+                               case 0x00d: map_key_clear(KEY_HOME);            break;
+                               case 0x024: map_key_clear(KEY_SHUFFLE);         break;
+                               case 0x025: map_key_clear(KEY_TV);              break;
+                               case 0x026: map_key_clear(KEY_MENU);            break;
+                               case 0x031: map_key_clear(KEY_AUDIO);           break;
+                               case 0x032: map_key_clear(KEY_SUBTITLE);        break;
+                               case 0x033: map_key_clear(KEY_LAST);            break;
+                               case 0x047: map_key_clear(KEY_MP3);             break;
+                               case 0x048: map_key_clear(KEY_DVD);             break;
+                               case 0x049: map_key_clear(KEY_MEDIA);           break;
+                               case 0x04a: map_key_clear(KEY_VIDEO);           break;
+                               case 0x04b: map_key_clear(KEY_ANGLE);           break;
+                               case 0x04c: map_key_clear(KEY_LANGUAGE);        break;
+                               case 0x04d: map_key_clear(KEY_SUBTITLE);        break;
+                               case 0x051: map_key_clear(KEY_RED);             break;
+                               case 0x052: map_key_clear(KEY_CLOSE);           break;
+                               default:    goto ignore;
+                       }
+                       break;
+
                case HID_UP_PID:
 
                        set_bit(EV_FF, input->evbit);
@@ -349,6 +404,9 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
        if (usage->code > max)
                goto ignore;
 
+       if (((device->quirks & (HID_QUIRK_2WHEEL_POWERMOUSE)) && (usage->hid == 0x00010032)))
+               map_rel(REL_HWHEEL);
+
        if ((device->quirks & (HID_QUIRK_2WHEEL_MOUSE_HACK_7 | HID_QUIRK_2WHEEL_MOUSE_HACK_5)) &&
                 (usage->type == EV_REL) && (usage->code == REL_WHEEL))
                        set_bit(REL_HWHEEL, bit);
index c1b6b69bc4a46979eca2f9b31a6922f9de34f313..ec2412c42f1ea88bebe6a4aade4a2d3e6f82cf4a 100644 (file)
@@ -173,6 +173,7 @@ struct hid_item {
 
 #define HID_UP_UNDEFINED       0x00000000
 #define HID_UP_GENDESK         0x00010000
+#define HID_UP_SIMULATION      0x00020000
 #define HID_UP_KEYBOARD                0x00070000
 #define HID_UP_LED             0x00080000
 #define HID_UP_BUTTON          0x00090000
@@ -182,6 +183,8 @@ struct hid_item {
 #define HID_UP_PID             0x000f0000
 #define HID_UP_HPVENDOR         0xff7f0000
 #define HID_UP_MSVENDOR                0xff000000
+#define HID_UP_CUSTOM          0x00ff0000
+#define HID_UP_LOGIVENDOR      0xffbc0000
 
 #define HID_USAGE              0x0000ffff
 
@@ -242,6 +245,7 @@ struct hid_item {
 #define HID_QUIRK_2WHEEL_MOUSE_HACK_7          0x080
 #define HID_QUIRK_2WHEEL_MOUSE_HACK_5          0x100
 #define HID_QUIRK_2WHEEL_MOUSE_HACK_ON         0x200
+#define HID_QUIRK_2WHEEL_POWERMOUSE            0x400
 
 /*
  * This is the global environment of the parser. This information is
@@ -348,7 +352,8 @@ struct hid_report_enum {
 
 #define HID_REPORT_TYPES 3
 
-#define HID_BUFFER_SIZE                64              /* use 64 for compatibility with all possible packetlen */
+#define HID_MIN_BUFFER_SIZE    64              /* make sure there is at least a packet size of space */
+#define HID_MAX_BUFFER_SIZE    4096            /* 4kb */
 #define HID_CONTROL_FIFO_SIZE  256             /* to init devices with >100 reports */
 #define HID_OUTPUT_FIFO_SIZE   64
 
@@ -386,6 +391,8 @@ struct hid_device {                                                 /* device report descriptor */
 
        unsigned long iofl;                                             /* I/O flags (CTRL_RUNNING, OUT_RUNNING) */
 
+       unsigned int bufsize;                                           /* URB buffer size */
+
        struct urb *urbin;                                              /* Input URB */
        char *inbuf;                                                    /* Input buffer */
        dma_addr_t inbuf_dma;                                           /* Input buffer dma */
index 4c13331b5f41d0694f3e44356e92af540de2192d..d32427818af78d590f547e5bcd24efbadef2036a 100644 (file)
@@ -507,6 +507,7 @@ static int hiddev_ioctl(struct inode *inode, struct file *file, unsigned int cmd
                        return -EINVAL;
 
                hid_submit_report(hid, report, USB_DIR_OUT);
+               hid_wait_io(hid);
 
                return 0;
 
index 7398a7f19c1e4e994c8de5f2343fc6dcfe6255c3..0fd0fa9fec21f172d703f16fed7e34b246ed35a8 100644 (file)
@@ -260,7 +260,7 @@ static int stv_stop_video (struct usb_stv *dev)
                        PDEBUG (0, "STV(i): Camera set to original resolution");
        }
        /* origMode */
-       kfree (buf);
+       kfree(buf);
        return i;
 }
 
@@ -276,7 +276,7 @@ static int stv_set_video_mode (struct usb_stv *dev)
        }
 
        if ((i = stv_set_config (dev, 1, 0, 0)) < 0) {
-               kfree (buf);
+               kfree(buf);
                return i;
        }
 
@@ -301,13 +301,13 @@ static int stv_set_video_mode (struct usb_stv *dev)
        goto exit;
 
 error:
-       kfree (buf);
+       kfree(buf);
        if (stop_video == 1)
                stv_stop_video (dev);
        return -1;
 
 exit:
-       kfree (buf);
+       kfree(buf);
        return 0;
 }
 
@@ -327,7 +327,7 @@ static int stv_init (struct usb_stv *stv680)
 
        /* set config 1, interface 0, alternate 0 */
        if ((i = stv_set_config (stv680, 1, 0, 0)) < 0) {
-               kfree (buffer);
+               kfree(buffer);
                PDEBUG (0, "STV(e): set config 1,0,0 failed");
                return -1;
        }
@@ -435,11 +435,11 @@ static int stv_init (struct usb_stv *stv680)
 error:
        i = stv_sndctrl (0, stv680, 0x80, 0, buffer, 0x02);     /* Get Last Error */
        PDEBUG (1, "STV(i): last error: %i,  command = 0x%x", buffer[0], buffer[1]);
-       kfree (buffer);
+       kfree(buffer);
        return -1;
 
 exit:
-       kfree (buffer);
+       kfree(buffer);
 
        /* video = 320x240, 352x288 */
        if (stv680->CIF == 1) {
@@ -708,10 +708,10 @@ static int stv680_stop_stream (struct usb_stv *stv680)
                        usb_kill_urb (stv680->urb[i]);
                        usb_free_urb (stv680->urb[i]);
                        stv680->urb[i] = NULL;
-                       kfree (stv680->sbuf[i].data);
+                       kfree(stv680->sbuf[i].data);
                }
        for (i = 0; i < STV680_NUMSCRATCH; i++) {
-               kfree (stv680->scratch[i].data);
+               kfree(stv680->scratch[i].data);
                stv680->scratch[i].data = NULL;
        }
 
@@ -1068,7 +1068,7 @@ static int stv_close (struct inode *inode, struct file *file)
        stv680->user = 0;
 
        if (stv680->removed) {
-               kfree (stv680);
+               kfree(stv680);
                stv680 = NULL;
                PDEBUG (0, "STV(i): device unregistered");
        }
@@ -1445,14 +1445,14 @@ static inline void usb_stv680_remove_disconnected (struct usb_stv *stv680)
                        usb_kill_urb (stv680->urb[i]);
                        usb_free_urb (stv680->urb[i]);
                        stv680->urb[i] = NULL;
-                       kfree (stv680->sbuf[i].data);
+                       kfree(stv680->sbuf[i].data);
                }
        for (i = 0; i < STV680_NUMSCRATCH; i++)
-               kfree (stv680->scratch[i].data);
+               kfree(stv680->scratch[i].data);
        PDEBUG (0, "STV(i): %s disconnected", stv680->camera_name);
 
        /* Free the memory */
-       kfree (stv680);
+       kfree(stv680);
 }
 
 static void stv680_disconnect (struct usb_interface *intf)
index 7d12eb85310de04c9043d4408dda2d005a92c4da..4fa2cf9a8af2adbf6e673022ea2655c9810e5de6 100644 (file)
@@ -30,8 +30,9 @@
 void nvidia_create_i2c_busses(struct nvidia_par *par) {}
 void nvidia_delete_i2c_busses(struct nvidia_par *par) {}
 
-int nvidia_probe_i2c_connector(struct nvidia_par *par, int conn, u8 **out_edid)
+int nvidia_probe_i2c_connector(struct fb_info *info, int conn, u8 **out_edid)
 {
+       struct nvidia_par *par = info->par;
        struct device_node *dp;
        unsigned char *pedid = NULL;
        unsigned char *disptype = NULL;
index 0ea62d8bc703d1550f9d9a005d97cfb16c17ba76..ca92940f39438d9d7b4bffe0456815a2421b6b58 100644 (file)
@@ -342,16 +342,11 @@ static void vga_cleanup(struct vgastate *state)
        if (state->vidstate != NULL) {
                struct regstate *saved = (struct regstate *) state->vidstate;
 
-               if (saved->vga_font0) 
-                       vfree(saved->vga_font0);
-               if (saved->vga_font1) 
-                       vfree(saved->vga_font1);
-               if (saved->vga_text)
-                       vfree(saved->vga_text);
-               if (saved->vga_cmap)
-                       vfree(saved->vga_cmap);
-               if (saved->attr)
-                       vfree(saved->attr);
+               vfree(saved->vga_font0);
+               vfree(saved->vga_font1);
+               vfree(saved->vga_text);
+               vfree(saved->vga_cmap);
+               vfree(saved->attr);
                kfree(saved);
                state->vidstate = NULL;
        }
index 1c62203a4906ec1c7ba2ca9ede00e2a2c498aeda..6cbfceabd95d78451fd16f053c6ed1b79872fbda 100644 (file)
@@ -40,6 +40,7 @@
 #include <linux/cpu.h>
 #include <linux/bitops.h>
 #include <linux/mpage.h>
+#include <linux/bit_spinlock.h>
 
 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
 static void invalidate_bh_lrus(void);
index 3217ac5f6bd7aa03277307aa66a5f64573681c49..2335f14a15830902f1fff25c248d720cba799338 100644 (file)
@@ -3215,10 +3215,8 @@ cifs_umount(struct super_block *sb, struct cifs_sb_info *cifs_sb)
        }
        
        cifs_sb->tcon = NULL;
-       if (ses) {
-               set_current_state(TASK_INTERRUPTIBLE);
-               schedule_timeout(HZ / 2);
-       }
+       if (ses)
+               schedule_timeout_interruptible(msecs_to_jiffies(500));
        if (ses)
                sesInfoFree(ses);
 
index 5034365b06a862ac84470d560fc1684253d598ea..8def89f2c4383b052490eeba366241adc07ccaa8 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/errno.h>
 #include <linux/vmalloc.h>
 #include <linux/zlib.h>
+#include <linux/cramfs_fs.h>
 
 static z_stream stream;
 static int initialized;
index a15a2e1f55208882ec828339943894928a651bc0..7376b61269fb711c230ec1d0161119ce0672fc36 100644 (file)
@@ -337,12 +337,10 @@ struct dentry * d_find_alias(struct inode *inode)
  */
 void d_prune_aliases(struct inode *inode)
 {
-       struct list_head *tmp, *head = &inode->i_dentry;
+       struct dentry *dentry;
 restart:
        spin_lock(&dcache_lock);
-       tmp = head;
-       while ((tmp = tmp->next) != head) {
-               struct dentry *dentry = list_entry(tmp, struct dentry, d_alias);
+       list_for_each_entry(dentry, &inode->i_dentry, d_alias) {
                spin_lock(&dentry->d_lock);
                if (!atomic_read(&dentry->d_count)) {
                        __dget_locked(dentry);
@@ -463,10 +461,7 @@ void shrink_dcache_sb(struct super_block * sb)
         * superblock to the most recent end of the unused list.
         */
        spin_lock(&dcache_lock);
-       next = dentry_unused.next;
-       while (next != &dentry_unused) {
-               tmp = next;
-               next = tmp->next;
+       list_for_each_safe(tmp, next, &dentry_unused) {
                dentry = list_entry(tmp, struct dentry, d_lru);
                if (dentry->d_sb != sb)
                        continue;
@@ -478,10 +473,7 @@ void shrink_dcache_sb(struct super_block * sb)
         * Pass two ... free the dentries for this superblock.
         */
 repeat:
-       next = dentry_unused.next;
-       while (next != &dentry_unused) {
-               tmp = next;
-               next = tmp->next;
+       list_for_each_safe(tmp, next, &dentry_unused) {
                dentry = list_entry(tmp, struct dentry, d_lru);
                if (dentry->d_sb != sb)
                        continue;
index c6ec66fd876682f4846f0638c2b3682a66b49f94..49bbc2be3d72937ba4719e656a140bf1c9ea7aba 100644 (file)
@@ -1340,8 +1340,7 @@ int journal_stop(handle_t *handle)
        if (handle->h_sync) {
                do {
                        old_handle_count = transaction->t_handle_count;
-                       set_current_state(TASK_UNINTERRUPTIBLE);
-                       schedule_timeout(1);
+                       schedule_timeout_uninterruptible(1);
                } while (old_handle_count != transaction->t_handle_count);
        }
 
index 456d7e6e29c2683bd2fc4724b7980cb81171bfff..27f199e94cfc8edd369775b4e962eee517dc3a14 100644 (file)
@@ -1701,12 +1701,10 @@ jffs_find_file(struct jffs_control *c, __u32 ino)
 {
        struct jffs_file *f;
        int i = ino % c->hash_len;
-       struct list_head *tmp;
 
        D3(printk("jffs_find_file(): ino: %u\n", ino));
 
-       for (tmp = c->hash[i].next; tmp != &c->hash[i]; tmp = tmp->next) {
-               f = list_entry(tmp, struct jffs_file, hash);
+       list_for_each_entry(f, &c->hash[i], hash) {
                if (ino != f->ino)
                        continue;
                D3(printk("jffs_find_file(): Found file with ino "
@@ -2102,13 +2100,12 @@ jffs_foreach_file(struct jffs_control *c, int (*func)(struct jffs_file *))
        int result = 0;
 
        for (pos = 0; pos < c->hash_len; pos++) {
-               struct list_head *p, *next;
-               for (p = c->hash[pos].next; p != &c->hash[pos]; p = next) {
-                       /* We need a reference to the next file in the
-                          list because `func' might remove the current
-                          file `f'.  */
-                       next = p->next;
-                       r = func(list_entry(p, struct jffs_file, hash));
+               struct jffs_file *f, *next;
+
+               /* We must do _safe, because 'func' might remove the
+                  current file 'f' from the list.  */
+               list_for_each_entry_safe(f, next, &c->hash[pos], hash) {
+                       r = func(f);
                        if (r < 0)
                                return r;
                        result += r;
@@ -2613,9 +2610,8 @@ jffs_print_hash_table(struct jffs_control *c)
 
        printk("JFFS: Dumping the file system's hash table...\n");
        for (i = 0; i < c->hash_len; i++) {
-               struct list_head *p;
-               for (p = c->hash[i].next; p != &c->hash[i]; p = p->next) {
-                       struct jffs_file *f=list_entry(p,struct jffs_file,hash);
+               struct jffs_file *f;
+               list_for_each_entry(f, &c->hash[i], hash) {
                        printk("*** c->hash[%u]: \"%s\" "
                               "(ino: %u, pino: %u)\n",
                               i, (f->name ? f->name : ""),
index 14b3ce87fa29cd96162678fb754936e22c99caec..87332f30141b621b7478f3faacfac6e4c226d991 100644 (file)
@@ -299,8 +299,7 @@ nlmclnt_alloc_call(void)
                        return call;
                }
                printk("nlmclnt_alloc_call: failed, waiting for memory\n");
-               current->state = TASK_INTERRUPTIBLE;
-               schedule_timeout(5*HZ);
+               schedule_timeout_interruptible(5*HZ);
        }
        return NULL;
 }
index 34156260c9b6586939e932e3856cc0211e98cd97..2fa9fdf7d6f573f1f6b004ab82d9a0945d9a6f9c 100644 (file)
@@ -537,7 +537,6 @@ lives_below_in_same_fs(struct dentry *d, struct dentry *dentry)
 static struct vfsmount *copy_tree(struct vfsmount *mnt, struct dentry *dentry)
 {
        struct vfsmount *res, *p, *q, *r, *s;
-       struct list_head *h;
        struct nameidata nd;
 
        res = q = clone_mnt(mnt, dentry);
@@ -546,8 +545,7 @@ static struct vfsmount *copy_tree(struct vfsmount *mnt, struct dentry *dentry)
        q->mnt_mountpoint = mnt->mnt_mountpoint;
 
        p = mnt;
-       for (h = mnt->mnt_mounts.next; h != &mnt->mnt_mounts; h = h->next) {
-               r = list_entry(h, struct vfsmount, mnt_child);
+       list_for_each_entry(r, &mnt->mnt_mounts, mnt_child) {
                if (!lives_below_in_same_fs(r->mnt_mountpoint, dentry))
                        continue;
 
index 2681485cf2d00f9f9fdd647a8bf1cd9861299104..edc95514046d50415b127988fc36ae577b8b3d97 100644 (file)
@@ -34,8 +34,7 @@ nfs3_rpc_wrapper(struct rpc_clnt *clnt, struct rpc_message *msg, int flags)
                res = rpc_call_sync(clnt, msg, flags);
                if (res != -EJUKEBOX)
                        break;
-               set_current_state(TASK_INTERRUPTIBLE);
-               schedule_timeout(NFS_JUKEBOX_RETRY_TIME);
+               schedule_timeout_interruptible(NFS_JUKEBOX_RETRY_TIME);
                res = -ERESTARTSYS;
        } while (!signalled());
        rpc_clnt_sigunmask(clnt, &oldset);
index 0c5a308e49638171291d22aa3630fc431ac74528..9701ca8c942855a719ccd98559fd878c60ee636c 100644 (file)
@@ -2418,14 +2418,11 @@ static int nfs4_delay(struct rpc_clnt *clnt, long *timeout)
                *timeout = NFS4_POLL_RETRY_MAX;
        rpc_clnt_sigmask(clnt, &oldset);
        if (clnt->cl_intr) {
-               set_current_state(TASK_INTERRUPTIBLE);
-               schedule_timeout(*timeout);
+               schedule_timeout_interruptible(*timeout);
                if (signalled())
                        res = -ERESTARTSYS;
-       } else {
-               set_current_state(TASK_UNINTERRUPTIBLE);
-               schedule_timeout(*timeout);
-       }
+       } else
+               schedule_timeout_uninterruptible(*timeout);
        rpc_clnt_sigunmask(clnt, &oldset);
        *timeout <<= 1;
        return res;
@@ -2578,8 +2575,7 @@ int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4
 static unsigned long
 nfs4_set_lock_task_retry(unsigned long timeout)
 {
-       current->state = TASK_INTERRUPTIBLE;
-       schedule_timeout(timeout);
+       schedule_timeout_interruptible(timeout);
        timeout <<= 1;
        if (timeout > NFS4_LOCK_MAXTIMEOUT)
                return NFS4_LOCK_MAXTIMEOUT;
index 545236414d592b7f3abdf810799f64ae758ca2d2..b6cc8cf24626b0a1f15ec8deffddc704da797c0e 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/swap.h>
 #include <linux/buffer_head.h>
 #include <linux/writeback.h>
+#include <linux/bit_spinlock.h>
 
 #include "aops.h"
 #include "attrib.h"
index 2c7a23dde2d83f1ae18190e5723c0eb8d8ae0548..66aa0b938d6aca876bf2d246462c5fa2e3586e8b 100644 (file)
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -39,7 +39,11 @@ void pipe_wait(struct inode * inode)
 {
        DEFINE_WAIT(wait);
 
-       prepare_to_wait(PIPE_WAIT(*inode), &wait, TASK_INTERRUPTIBLE);
+       /*
+        * Pipes are system-local resources, so sleeping on them
+        * is considered a noninteractive wait:
+        */
+       prepare_to_wait(PIPE_WAIT(*inode), &wait, TASK_INTERRUPTIBLE|TASK_NONINTERACTIVE);
        up(PIPE_SEM(*inode));
        schedule();
        finish_wait(PIPE_WAIT(*inode), &wait);
index a8e29e9bbbd08e336fc13a3fd9b1e81d7ec4eda0..4b15761434bc7fac7c494703267995a9b49a6ff2 100644 (file)
@@ -2868,8 +2868,7 @@ static void let_transaction_grow(struct super_block *sb, unsigned long trans_id)
        struct reiserfs_journal *journal = SB_JOURNAL(sb);
        unsigned long bcount = journal->j_bcount;
        while (1) {
-               set_current_state(TASK_UNINTERRUPTIBLE);
-               schedule_timeout(1);
+               schedule_timeout_uninterruptible(1);
                journal->j_current_jl->j_state |= LIST_COMMIT_PENDING;
                while ((atomic_read(&journal->j_wcount) > 0 ||
                        atomic_read(&journal->j_jlock)) &&
index 6951c35755beba2502c5e405356a9ff038100189..44b02fc02ebefa3121b2e2956ac4c35fdd1f48ed 100644 (file)
@@ -1934,8 +1934,7 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
                        if (SB_AP_BITMAP(s))
                                brelse(SB_AP_BITMAP(s)[j].bh);
                }
-               if (SB_AP_BITMAP(s))
-                       vfree(SB_AP_BITMAP(s));
+               vfree(SB_AP_BITMAP(s));
        }
        if (SB_BUFFER_WITH_SB(s))
                brelse(SB_BUFFER_WITH_SB(s));
index 220babe91efd1e8ebdb612a8dbdb56dd52ceb15f..38ab558835c4ba1348dad8738bcc1df7997df598 100644 (file)
@@ -2397,8 +2397,7 @@ smb_proc_readdir_long(struct file *filp, void *dirent, filldir_t filldir,
                if (req->rq_rcls == ERRSRV && req->rq_err == ERRerror) {
                        /* a damn Win95 bug - sometimes it clags if you 
                           ask it too fast */
-                       current->state = TASK_INTERRUPTIBLE;
-                       schedule_timeout(HZ/5);
+                       schedule_timeout_interruptible(msecs_to_jiffies(200));
                        continue;
                 }
 
index 6c6fd0faa8e1fb26fedfb8d1174e228921b4c08e..b0d2873ab2747032118ded993a6e52d389cd80ea 100644 (file)
@@ -39,8 +39,7 @@ typedef struct timespec timespec_t;
 
 static inline void delay(long ticks)
 {
-       set_current_state(TASK_UNINTERRUPTIBLE);
-       schedule_timeout(ticks);
+       schedule_timeout_uninterruptible(ticks);
 }
 
 static inline void nanotime(struct timespec *tvp)
index 655bf4a78afeb72a57eaf4668b2034b3c85c6aa7..e82cf72ac599a55aad2539c6fe0158f2788b15d0 100644 (file)
@@ -1780,10 +1780,10 @@ xfsbufd(
                        xfsbufd_force_sleep = 0;
                }
 
-               set_current_state(TASK_INTERRUPTIBLE);
-               schedule_timeout((xfs_buf_timer_centisecs * HZ) / 100);
+               schedule_timeout_interruptible
+                       (xfs_buf_timer_centisecs * msecs_to_jiffies(10));
 
-               age = (xfs_buf_age_centisecs * HZ) / 100;
+               age = xfs_buf_age_centisecs * msecs_to_jiffies(10);
                spin_lock(&pbd_delwrite_lock);
                list_for_each_entry_safe(pb, n, &pbd_delwrite_queue, pb_list) {
                        PB_TRACE(pb, "walkq1", (long)pagebuf_ispin(pb));
index 0da87bfc999999e74a32985ffbde515c429cd111..2302454d8d479d72eeeda5bb472633511ff1d4f7 100644 (file)
@@ -467,7 +467,7 @@ xfs_flush_inode(
 
        igrab(inode);
        xfs_syncd_queue_work(vfs, inode, xfs_flush_inode_work);
-       delay(HZ/2);
+       delay(msecs_to_jiffies(500));
 }
 
 /*
@@ -492,7 +492,7 @@ xfs_flush_device(
 
        igrab(inode);
        xfs_syncd_queue_work(vfs, inode, xfs_flush_device_work);
-       delay(HZ/2);
+       delay(msecs_to_jiffies(500));
        xfs_log_force(ip->i_mount, (xfs_lsn_t)0, XFS_LOG_FORCE|XFS_LOG_SYNC);
 }
 
@@ -520,10 +520,9 @@ xfssyncd(
        struct vfs_sync_work    *work, *n;
        LIST_HEAD               (tmp);
 
-       timeleft = (xfs_syncd_centisecs * HZ) / 100;
+       timeleft = xfs_syncd_centisecs * msecs_to_jiffies(10);
        for (;;) {
-               set_current_state(TASK_INTERRUPTIBLE);
-               timeleft = schedule_timeout(timeleft);
+               timeleft = schedule_timeout_interruptible(timeleft);
                /* swsusp */
                try_to_freeze();
                if (kthread_should_stop())
@@ -537,7 +536,8 @@ xfssyncd(
                 */
                if (!timeleft || list_empty(&vfsp->vfs_sync_list)) {
                        if (!timeleft)
-                               timeleft = (xfs_syncd_centisecs * HZ) / 100;
+                               timeleft = xfs_syncd_centisecs *
+                                                       msecs_to_jiffies(10);
                        INIT_LIST_HEAD(&vfsp->vfs_sync_work.w_list);
                        list_add_tail(&vfsp->vfs_sync_work.w_list,
                                        &vfsp->vfs_sync_list);
index 80780dba9986fb47b78c204e5c23ab6b77a2fddf..8197c69eff44cdbebed4823db0bfdf0c1a2428fd 100644 (file)
@@ -6,7 +6,6 @@
 #include <linux/kernel.h>
 #include <asm/current.h>
 
-
 /*
  * Simple spin lock operations.  There are two variants, one clears IRQ's
  * on the local processor, one does not.
  * We make no fairness assumptions. They have a cost.
  */
 
-typedef struct {
-       volatile unsigned int lock;
-#ifdef CONFIG_DEBUG_SPINLOCK
-       int on_cpu;
-       int line_no;
-       void *previous;
-       struct task_struct * task;
-       const char *base_file;
-#endif
-} spinlock_t;
-
-#ifdef CONFIG_DEBUG_SPINLOCK
-#define SPIN_LOCK_UNLOCKED     (spinlock_t){ 0, -1, 0, NULL, NULL, NULL }
-#else
-#define SPIN_LOCK_UNLOCKED     (spinlock_t){ 0 }
-#endif
-
-#define spin_lock_init(x)      do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
-#define spin_is_locked(x)      ((x)->lock != 0)
-#define spin_unlock_wait(x)    do { barrier(); } while ((x)->lock)
-
-#ifdef CONFIG_DEBUG_SPINLOCK
-extern void _raw_spin_unlock(spinlock_t * lock);
-extern void debug_spin_lock(spinlock_t * lock, const char *, int);
-extern int debug_spin_trylock(spinlock_t * lock, const char *, int);
-#define _raw_spin_lock(LOCK) \
-       debug_spin_lock(LOCK, __BASE_FILE__, __LINE__)
-#define _raw_spin_trylock(LOCK) \
-       debug_spin_trylock(LOCK, __BASE_FILE__, __LINE__)
-#else
-static inline void _raw_spin_unlock(spinlock_t * lock)
+#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
+#define __raw_spin_is_locked(x)        ((x)->lock != 0)
+#define __raw_spin_unlock_wait(x) \
+               do { cpu_relax(); } while ((x)->lock)
+
+static inline void __raw_spin_unlock(raw_spinlock_t * lock)
 {
        mb();
        lock->lock = 0;
 }
 
-static inline void _raw_spin_lock(spinlock_t * lock)
+static inline void __raw_spin_lock(raw_spinlock_t * lock)
 {
        long tmp;
 
@@ -70,80 +44,64 @@ static inline void _raw_spin_lock(spinlock_t * lock)
        : "m"(lock->lock) : "memory");
 }
 
-static inline int _raw_spin_trylock(spinlock_t *lock)
+static inline int __raw_spin_trylock(raw_spinlock_t *lock)
 {
        return !test_and_set_bit(0, &lock->lock);
 }
-#endif /* CONFIG_DEBUG_SPINLOCK */
-
-#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
 
 /***********************************************************/
 
-typedef struct {
-       volatile unsigned int lock;
-} rwlock_t;
-
-#define RW_LOCK_UNLOCKED       (rwlock_t){ 0 }
-
-#define rwlock_init(x)         do { *(x) = RW_LOCK_UNLOCKED; } while(0)
-
-static inline int read_can_lock(rwlock_t *lock)
+static inline int __raw_read_can_lock(raw_rwlock_t *lock)
 {
        return (lock->lock & 1) == 0;
 }
 
-static inline int write_can_lock(rwlock_t *lock)
+static inline int __raw_write_can_lock(raw_rwlock_t *lock)
 {
        return lock->lock == 0;
 }
 
-#ifdef CONFIG_DEBUG_RWLOCK
-extern void _raw_write_lock(rwlock_t * lock);
-extern void _raw_read_lock(rwlock_t * lock);
-#else
-static inline void _raw_write_lock(rwlock_t * lock)
+static inline void __raw_read_lock(raw_rwlock_t *lock)
 {
        long regx;
 
        __asm__ __volatile__(
        "1:     ldl_l   %1,%0\n"
-       "       bne     %1,6f\n"
-       "       lda     %1,1\n"
+       "       blbs    %1,6f\n"
+       "       subl    %1,2,%1\n"
        "       stl_c   %1,%0\n"
        "       beq     %1,6f\n"
        "       mb\n"
        ".subsection 2\n"
        "6:     ldl     %1,%0\n"
-       "       bne     %1,6b\n"
+       "       blbs    %1,6b\n"
        "       br      1b\n"
        ".previous"
        : "=m" (*lock), "=&r" (regx)
        : "m" (*lock) : "memory");
 }
 
-static inline void _raw_read_lock(rwlock_t * lock)
+static inline void __raw_write_lock(raw_rwlock_t *lock)
 {
        long regx;
 
        __asm__ __volatile__(
        "1:     ldl_l   %1,%0\n"
-       "       blbs    %1,6f\n"
-       "       subl    %1,2,%1\n"
+       "       bne     %1,6f\n"
+       "       lda     %1,1\n"
        "       stl_c   %1,%0\n"
        "       beq     %1,6f\n"
        "       mb\n"
        ".subsection 2\n"
        "6:     ldl     %1,%0\n"
-       "       blbs    %1,6b\n"
+       "       bne     %1,6b\n"
        "       br      1b\n"
        ".previous"
        : "=m" (*lock), "=&r" (regx)
        : "m" (*lock) : "memory");
 }
-#endif /* CONFIG_DEBUG_RWLOCK */
 
-static inline int _raw_read_trylock(rwlock_t * lock)
+static inline int __raw_read_trylock(raw_rwlock_t * lock)
 {
        long regx;
        int success;
@@ -165,7 +123,7 @@ static inline int _raw_read_trylock(rwlock_t * lock)
        return success;
 }
 
-static inline int _raw_write_trylock(rwlock_t * lock)
+static inline int __raw_write_trylock(raw_rwlock_t * lock)
 {
        long regx;
        int success;
@@ -187,13 +145,7 @@ static inline int _raw_write_trylock(rwlock_t * lock)
        return success;
 }
 
-static inline void _raw_write_unlock(rwlock_t * lock)
-{
-       mb();
-       lock->lock = 0;
-}
-
-static inline void _raw_read_unlock(rwlock_t * lock)
+static inline void __raw_read_unlock(raw_rwlock_t * lock)
 {
        long regx;
        __asm__ __volatile__(
@@ -209,4 +161,10 @@ static inline void _raw_read_unlock(rwlock_t * lock)
        : "m" (*lock) : "memory");
 }
 
+static inline void __raw_write_unlock(raw_rwlock_t * lock)
+{
+       mb();
+       lock->lock = 0;
+}
+
 #endif /* _ALPHA_SPINLOCK_H */
diff --git a/include/asm-alpha/spinlock_types.h b/include/asm-alpha/spinlock_types.h
new file mode 100644 (file)
index 0000000..8141eb5
--- /dev/null
@@ -0,0 +1,20 @@
+#ifndef _ALPHA_SPINLOCK_TYPES_H
+#define _ALPHA_SPINLOCK_TYPES_H
+
+#ifndef __LINUX_SPINLOCK_TYPES_H
+# error "please don't include this file directly"
+#endif
+
+typedef struct {
+       volatile unsigned int lock;
+} raw_spinlock_t;
+
+#define __RAW_SPIN_LOCK_UNLOCKED       { 0 }
+
+typedef struct {
+       volatile unsigned int lock;
+} raw_rwlock_t;
+
+#define __RAW_RW_LOCK_UNLOCKED         { 0 }
+
+#endif
index 1f906d09b6880f2ac04e9bb0dedf3dd0eafdafee..cb4906b4555583d36c0b3b91f94d2d9d9a4724c4 100644 (file)
  * Unlocked value: 0
  * Locked value: 1
  */
-typedef struct {
-       volatile unsigned int lock;
-#ifdef CONFIG_PREEMPT
-       unsigned int break_lock;
-#endif
-} spinlock_t;
 
-#define SPIN_LOCK_UNLOCKED     (spinlock_t) { 0 }
+#define __raw_spin_is_locked(x)                ((x)->lock != 0)
+#define __raw_spin_unlock_wait(lock) \
+       do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
 
-#define spin_lock_init(x)      do { *(x) = SPIN_LOCK_UNLOCKED; } while (0)
-#define spin_is_locked(x)      ((x)->lock != 0)
-#define spin_unlock_wait(x)    do { barrier(); } while (spin_is_locked(x))
-#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
+#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
 
-static inline void _raw_spin_lock(spinlock_t *lock)
+static inline void __raw_spin_lock(raw_spinlock_t *lock)
 {
        unsigned long tmp;
 
@@ -47,7 +40,7 @@ static inline void _raw_spin_lock(spinlock_t *lock)
        smp_mb();
 }
 
-static inline int _raw_spin_trylock(spinlock_t *lock)
+static inline int __raw_spin_trylock(raw_spinlock_t *lock)
 {
        unsigned long tmp;
 
@@ -67,7 +60,7 @@ static inline int _raw_spin_trylock(spinlock_t *lock)
        }
 }
 
-static inline void _raw_spin_unlock(spinlock_t *lock)
+static inline void __raw_spin_unlock(raw_spinlock_t *lock)
 {
        smp_mb();
 
@@ -80,23 +73,14 @@ static inline void _raw_spin_unlock(spinlock_t *lock)
 
 /*
  * RWLOCKS
- */
-typedef struct {
-       volatile unsigned int lock;
-#ifdef CONFIG_PREEMPT
-       unsigned int break_lock;
-#endif
-} rwlock_t;
-
-#define RW_LOCK_UNLOCKED       (rwlock_t) { 0 }
-#define rwlock_init(x)         do { *(x) = RW_LOCK_UNLOCKED; } while (0)
-#define rwlock_is_locked(x)    (*((volatile unsigned int *)(x)) != 0)
-
-/*
+ *
+ *
  * Write locks are easy - we just set bit 31.  When unlocking, we can
  * just write zero since the lock is exclusively held.
  */
-static inline void _raw_write_lock(rwlock_t *rw)
+#define rwlock_is_locked(x)    (*((volatile unsigned int *)(x)) != 0)
+
+static inline void __raw_write_lock(rwlock_t *rw)
 {
        unsigned long tmp;
 
@@ -113,7 +97,7 @@ static inline void _raw_write_lock(rwlock_t *rw)
        smp_mb();
 }
 
-static inline int _raw_write_trylock(rwlock_t *rw)
+static inline int __raw_write_trylock(rwlock_t *rw)
 {
        unsigned long tmp;
 
@@ -133,7 +117,7 @@ static inline int _raw_write_trylock(rwlock_t *rw)
        }
 }
 
-static inline void _raw_write_unlock(rwlock_t *rw)
+static inline void __raw_write_unlock(raw_rwlock_t *rw)
 {
        smp_mb();
 
@@ -156,7 +140,7 @@ static inline void _raw_write_unlock(rwlock_t *rw)
  * currently active.  However, we know we won't have any write
  * locks.
  */
-static inline void _raw_read_lock(rwlock_t *rw)
+static inline void __raw_read_lock(raw_rwlock_t *rw)
 {
        unsigned long tmp, tmp2;
 
@@ -173,7 +157,7 @@ static inline void _raw_read_lock(rwlock_t *rw)
        smp_mb();
 }
 
-static inline void _raw_read_unlock(rwlock_t *rw)
+static inline void __raw_read_unlock(rwlock_t *rw)
 {
        unsigned long tmp, tmp2;
 
@@ -190,6 +174,6 @@ static inline void _raw_read_unlock(rwlock_t *rw)
        : "cc");
 }
 
-#define _raw_read_trylock(lock) generic_raw_read_trylock(lock)
+#define __raw_read_trylock(lock) generic__raw_read_trylock(lock)
 
 #endif /* __ASM_SPINLOCK_H */
diff --git a/include/asm-arm/spinlock_types.h b/include/asm-arm/spinlock_types.h
new file mode 100644 (file)
index 0000000..43e83f6
--- /dev/null
@@ -0,0 +1,20 @@
+#ifndef __ASM_SPINLOCK_TYPES_H
+#define __ASM_SPINLOCK_TYPES_H
+
+#ifndef __LINUX_SPINLOCK_TYPES_H
+# error "please don't include this file directly"
+#endif
+
+typedef struct {
+       volatile unsigned int lock;
+} raw_spinlock_t;
+
+#define __RAW_SPIN_LOCK_UNLOCKED       { 0 }
+
+typedef struct {
+       volatile unsigned int lock;
+} raw_rwlock_t;
+
+#define __RAW_RW_LOCK_UNLOCKED         { 0 }
+
+#endif
index 278de61224d1c82ca667ebee87f62a536bc06986..c49df635a80f11e6d5202152dee10a30cb6f5cc7 100644 (file)
 #define __NR_inotify_init              (__NR_SYSCALL_BASE+316)
 #define __NR_inotify_add_watch         (__NR_SYSCALL_BASE+317)
 #define __NR_inotify_rm_watch          (__NR_SYSCALL_BASE+318)
+#define __NR_mbind                     (__NR_SYSCALL_BASE+319)
+#define __NR_get_mempolicy             (__NR_SYSCALL_BASE+320)
+#define __NR_set_mempolicy             (__NR_SYSCALL_BASE+321)
 
 /*
  * The following SWIs are ARM private.
index 791ee1da9bfa0f81a0e320c872a1fbebe520a81d..dc28daab8aa8d7786fa619facc0116480c33858e 100644 (file)
@@ -22,8 +22,6 @@ typedef struct {
 # error HARDIRQ_BITS is too low!
 #endif
 
-#define irq_enter()            (preempt_count() += HARDIRQ_OFFSET)
-
 #ifndef CONFIG_SMP
 
 extern asmlinkage void __do_softirq(void);
index 6f857be2b6447ac3f64f134ce45844ef2555b11b..a9c55490fb8238e78584176bdb6002a6bd38432e 100644 (file)
                VMLINUX_SYMBOL(__kprobes_text_start) = .;               \
                *(.kprobes.text)                                        \
                VMLINUX_SYMBOL(__kprobes_text_end) = .;
+
+               /* DWARF debug sections.
+               Symbols in the DWARF debugging sections are relative to
+               the beginning of the section so we begin them at 0.  */
+#define DWARF_DEBUG                                                    \
+               /* DWARF 1 */                                           \
+               .debug          0 : { *(.debug) }                       \
+               .line           0 : { *(.line) }                        \
+               /* GNU DWARF 1 extensions */                            \
+               .debug_srcinfo  0 : { *(.debug_srcinfo) }               \
+               .debug_sfnames  0 : { *(.debug_sfnames) }               \
+               /* DWARF 1.1 and DWARF 2 */                             \
+               .debug_aranges  0 : { *(.debug_aranges) }               \
+               .debug_pubnames 0 : { *(.debug_pubnames) }              \
+               /* DWARF 2 */                                           \
+               .debug_info     0 : { *(.debug_info                     \
+                               .gnu.linkonce.wi.*) }                   \
+               .debug_abbrev   0 : { *(.debug_abbrev) }                \
+               .debug_line     0 : { *(.debug_line) }                  \
+               .debug_frame    0 : { *(.debug_frame) }                 \
+               .debug_str      0 : { *(.debug_str) }                   \
+               .debug_loc      0 : { *(.debug_loc) }                   \
+               .debug_macinfo  0 : { *(.debug_macinfo) }               \
+               /* SGI/MIPS DWARF 2 extensions */                       \
+               .debug_weaknames 0 : { *(.debug_weaknames) }            \
+               .debug_funcnames 0 : { *(.debug_funcnames) }            \
+               .debug_typenames 0 : { *(.debug_typenames) }            \
+               .debug_varnames  0 : { *(.debug_varnames) }             \
+
+               /* Stabs debugging sections.  */
+#define STABS_DEBUG                                                    \
+               .stab 0 : { *(.stab) }                                  \
+               .stabstr 0 : { *(.stabstr) }                            \
+               .stab.excl 0 : { *(.stab.excl) }                        \
+               .stab.exclstr 0 : { *(.stab.exclstr) }                  \
+               .stab.index 0 : { *(.stab.index) }                      \
+               .stab.indexstr 0 : { *(.stab.indexstr) }                \
+               .comment 0 : { *(.comment) }
index 28ed8b296afc137af6b3043179fc97eb230a2f54..75c67c785bb8dfd5ddcebf1e07579b5270db2a7c 100644 (file)
@@ -35,7 +35,7 @@
  */
 #define div_long_long_rem(a,b,c) div_ll_X_l_rem(a,b,c)
 
-extern inline long
+static inline long
 div_ll_X_l_rem(long long divs, long div, long *rem)
 {
        long dum2;
index 37bef8ed7bed12bd33bf3670ccf7991516aa7484..0a4ec764377ca1aa0db8c607a35e9ddfd586d297 100644 (file)
@@ -679,7 +679,7 @@ static inline void rep_nop(void)
    However we don't do prefetches for pre XP Athlons currently
    That should be fixed. */
 #define ARCH_HAS_PREFETCH
-extern inline void prefetch(const void *x)
+static inline void prefetch(const void *x)
 {
        alternative_input(ASM_NOP4,
                          "prefetchnta (%1)",
@@ -693,7 +693,7 @@ extern inline void prefetch(const void *x)
 
 /* 3dnow! prefetch to get an exclusive cache line. Useful for 
    spinlocks to avoid one state transition in the cache coherency protocol. */
-extern inline void prefetchw(const void *x)
+static inline void prefetchw(const void *x)
 {
        alternative_input(ASM_NOP4,
                          "prefetchw (%1)",
index f9ff31f400369f0e1429f72829d95f6738e7a129..23604350cdf45e25df90180afa2985f9b489dfd1 100644 (file)
@@ -7,46 +7,21 @@
 #include <linux/config.h>
 #include <linux/compiler.h>
 
-asmlinkage int printk(const char * fmt, ...)
-       __attribute__ ((format (printf, 1, 2)));
-
 /*
  * Your basic SMP spinlocks, allowing only a single CPU anywhere
- */
-
-typedef struct {
-       volatile unsigned int slock;
-#ifdef CONFIG_DEBUG_SPINLOCK
-       unsigned magic;
-#endif
-#ifdef CONFIG_PREEMPT
-       unsigned int break_lock;
-#endif
-} spinlock_t;
-
-#define SPINLOCK_MAGIC 0xdead4ead
-
-#ifdef CONFIG_DEBUG_SPINLOCK
-#define SPINLOCK_MAGIC_INIT    , SPINLOCK_MAGIC
-#else
-#define SPINLOCK_MAGIC_INIT    /* */
-#endif
-
-#define SPIN_LOCK_UNLOCKED (spinlock_t) { 1 SPINLOCK_MAGIC_INIT }
-
-#define spin_lock_init(x)      do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
-
-/*
+ *
  * Simple spin lock operations.  There are two variants, one clears IRQ's
  * on the local processor, one does not.
  *
  * We make no fairness assumptions. They have a cost.
+ *
+ * (the type definitions are in asm/spinlock_types.h)
  */
 
-#define spin_is_locked(x)      (*(volatile signed char *)(&(x)->slock) <= 0)
-#define spin_unlock_wait(x)    do { barrier(); } while(spin_is_locked(x))
+#define __raw_spin_is_locked(x) \
+               (*(volatile signed char *)(&(x)->slock) <= 0)
 
-#define spin_lock_string \
+#define __raw_spin_lock_string \
        "\n1:\t" \
        "lock ; decb %0\n\t" \
        "jns 3f\n" \
@@ -57,7 +32,7 @@ typedef struct {
        "jmp 1b\n" \
        "3:\n\t"
 
-#define spin_lock_string_flags \
+#define __raw_spin_lock_string_flags \
        "\n1:\t" \
        "lock ; decb %0\n\t" \
        "jns 4f\n\t" \
@@ -73,86 +48,71 @@ typedef struct {
        "jmp 1b\n" \
        "4:\n\t"
 
+static inline void __raw_spin_lock(raw_spinlock_t *lock)
+{
+       __asm__ __volatile__(
+               __raw_spin_lock_string
+               :"=m" (lock->slock) : : "memory");
+}
+
+static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
+{
+       __asm__ __volatile__(
+               __raw_spin_lock_string_flags
+               :"=m" (lock->slock) : "r" (flags) : "memory");
+}
+
+static inline int __raw_spin_trylock(raw_spinlock_t *lock)
+{
+       char oldval;
+       __asm__ __volatile__(
+               "xchgb %b0,%1"
+               :"=q" (oldval), "=m" (lock->slock)
+               :"0" (0) : "memory");
+       return oldval > 0;
+}
+
 /*
- * This works. Despite all the confusion.
- * (except on PPro SMP or if we are using OOSTORE)
+ * __raw_spin_unlock based on writing $1 to the low byte.
+ * This method works. Despite all the confusion.
+ * (except on PPro SMP or if we are using OOSTORE, so we use xchgb there)
  * (PPro errata 66, 92)
  */
 
 #if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE)
 
-#define spin_unlock_string \
+#define __raw_spin_unlock_string \
        "movb $1,%0" \
                :"=m" (lock->slock) : : "memory"
 
 
-static inline void _raw_spin_unlock(spinlock_t *lock)
+static inline void __raw_spin_unlock(raw_spinlock_t *lock)
 {
-#ifdef CONFIG_DEBUG_SPINLOCK
-       BUG_ON(lock->magic != SPINLOCK_MAGIC);
-       BUG_ON(!spin_is_locked(lock));
-#endif
        __asm__ __volatile__(
-               spin_unlock_string
+               __raw_spin_unlock_string
        );
 }
 
 #else
 
-#define spin_unlock_string \
+#define __raw_spin_unlock_string \
        "xchgb %b0, %1" \
                :"=q" (oldval), "=m" (lock->slock) \
                :"0" (oldval) : "memory"
 
-static inline void _raw_spin_unlock(spinlock_t *lock)
+static inline void __raw_spin_unlock(raw_spinlock_t *lock)
 {
        char oldval = 1;
-#ifdef CONFIG_DEBUG_SPINLOCK
-       BUG_ON(lock->magic != SPINLOCK_MAGIC);
-       BUG_ON(!spin_is_locked(lock));
-#endif
-       __asm__ __volatile__(
-               spin_unlock_string
-       );
-}
 
-#endif
-
-static inline int _raw_spin_trylock(spinlock_t *lock)
-{
-       char oldval;
        __asm__ __volatile__(
-               "xchgb %b0,%1"
-               :"=q" (oldval), "=m" (lock->slock)
-               :"0" (0) : "memory");
-       return oldval > 0;
+               __raw_spin_unlock_string
+       );
 }
 
-static inline void _raw_spin_lock(spinlock_t *lock)
-{
-#ifdef CONFIG_DEBUG_SPINLOCK
-       if (unlikely(lock->magic != SPINLOCK_MAGIC)) {
-               printk("eip: %p\n", __builtin_return_address(0));
-               BUG();
-       }
 #endif
-       __asm__ __volatile__(
-               spin_lock_string
-               :"=m" (lock->slock) : : "memory");
-}
 
-static inline void _raw_spin_lock_flags (spinlock_t *lock, unsigned long flags)
-{
-#ifdef CONFIG_DEBUG_SPINLOCK
-       if (unlikely(lock->magic != SPINLOCK_MAGIC)) {
-               printk("eip: %p\n", __builtin_return_address(0));
-               BUG();
-       }
-#endif
-       __asm__ __volatile__(
-               spin_lock_string_flags
-               :"=m" (lock->slock) : "r" (flags) : "memory");
-}
+#define __raw_spin_unlock_wait(lock) \
+       do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
 
 /*
  * Read-write spinlocks, allowing multiple readers
@@ -163,72 +123,41 @@ static inline void _raw_spin_lock_flags (spinlock_t *lock, unsigned long flags)
  * can "mix" irq-safe locks - any writer needs to get a
  * irq-safe write-lock, but readers can get non-irqsafe
  * read-locks.
+ *
+ * On x86, we implement read-write locks as a 32-bit counter
+ * with the high bit (sign) being the "contended" bit.
+ *
+ * The inline assembly is non-obvious. Think about it.
+ *
+ * Changed to use the same technique as rw semaphores.  See
+ * semaphore.h for details.  -ben
+ *
+ * the helpers are in arch/i386/kernel/semaphore.c
  */
-typedef struct {
-       volatile unsigned int lock;
-#ifdef CONFIG_DEBUG_SPINLOCK
-       unsigned magic;
-#endif
-#ifdef CONFIG_PREEMPT
-       unsigned int break_lock;
-#endif
-} rwlock_t;
-
-#define RWLOCK_MAGIC   0xdeaf1eed
-
-#ifdef CONFIG_DEBUG_SPINLOCK
-#define RWLOCK_MAGIC_INIT      , RWLOCK_MAGIC
-#else
-#define RWLOCK_MAGIC_INIT      /* */
-#endif
-
-#define RW_LOCK_UNLOCKED (rwlock_t) { RW_LOCK_BIAS RWLOCK_MAGIC_INIT }
-
-#define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0)
 
 /**
  * read_can_lock - would read_trylock() succeed?
  * @lock: the rwlock in question.
  */
-#define read_can_lock(x) ((int)(x)->lock > 0)
+#define __raw_read_can_lock(x)         ((int)(x)->lock > 0)
 
 /**
  * write_can_lock - would write_trylock() succeed?
  * @lock: the rwlock in question.
  */
-#define write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
+#define __raw_write_can_lock(x)                ((x)->lock == RW_LOCK_BIAS)
 
-/*
- * On x86, we implement read-write locks as a 32-bit counter
- * with the high bit (sign) being the "contended" bit.
- *
- * The inline assembly is non-obvious. Think about it.
- *
- * Changed to use the same technique as rw semaphores.  See
- * semaphore.h for details.  -ben
- */
-/* the spinlock helpers are in arch/i386/kernel/semaphore.c */
-
-static inline void _raw_read_lock(rwlock_t *rw)
+static inline void __raw_read_lock(raw_rwlock_t *rw)
 {
-#ifdef CONFIG_DEBUG_SPINLOCK
-       BUG_ON(rw->magic != RWLOCK_MAGIC);
-#endif
        __build_read_lock(rw, "__read_lock_failed");
 }
 
-static inline void _raw_write_lock(rwlock_t *rw)
+static inline void __raw_write_lock(raw_rwlock_t *rw)
 {
-#ifdef CONFIG_DEBUG_SPINLOCK
-       BUG_ON(rw->magic != RWLOCK_MAGIC);
-#endif
        __build_write_lock(rw, "__write_lock_failed");
 }
 
-#define _raw_read_unlock(rw)           asm volatile("lock ; incl %0" :"=m" ((rw)->lock) : : "memory")
-#define _raw_write_unlock(rw)  asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ",%0":"=m" ((rw)->lock) : : "memory")
-
-static inline int _raw_read_trylock(rwlock_t *lock)
+static inline int __raw_read_trylock(raw_rwlock_t *lock)
 {
        atomic_t *count = (atomic_t *)lock;
        atomic_dec(count);
@@ -238,7 +167,7 @@ static inline int _raw_read_trylock(rwlock_t *lock)
        return 0;
 }
 
-static inline int _raw_write_trylock(rwlock_t *lock)
+static inline int __raw_write_trylock(raw_rwlock_t *lock)
 {
        atomic_t *count = (atomic_t *)lock;
        if (atomic_sub_and_test(RW_LOCK_BIAS, count))
@@ -247,4 +176,15 @@ static inline int _raw_write_trylock(rwlock_t *lock)
        return 0;
 }
 
+static inline void __raw_read_unlock(raw_rwlock_t *rw)
+{
+       asm volatile("lock ; incl %0" :"=m" (rw->lock) : : "memory");
+}
+
+static inline void __raw_write_unlock(raw_rwlock_t *rw)
+{
+       asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ", %0"
+                                : "=m" (rw->lock) : : "memory");
+}
+
 #endif /* __ASM_SPINLOCK_H */
diff --git a/include/asm-i386/spinlock_types.h b/include/asm-i386/spinlock_types.h
new file mode 100644 (file)
index 0000000..59efe84
--- /dev/null
@@ -0,0 +1,20 @@
+#ifndef __ASM_SPINLOCK_TYPES_H
+#define __ASM_SPINLOCK_TYPES_H
+
+#ifndef __LINUX_SPINLOCK_TYPES_H
+# error "please don't include this file directly"
+#endif
+
+typedef struct {
+       volatile unsigned int slock;
+} raw_spinlock_t;
+
+#define __RAW_SPIN_LOCK_UNLOCKED       { 1 }
+
+typedef struct {
+       volatile unsigned int lock;
+} raw_rwlock_t;
+
+#define __RAW_RW_LOCK_UNLOCKED         { RW_LOCK_BIAS }
+
+#endif
index d2430aa0d49db76db4b6c771bc1eb9a9bc402d36..5b78611411c30e244a650b230aaae7c62ec5a6e9 100644 (file)
 #include <asm/intrinsics.h>
 #include <asm/system.h>
 
-typedef struct {
-       volatile unsigned int lock;
-#ifdef CONFIG_PREEMPT
-       unsigned int break_lock;
-#endif
-} spinlock_t;
-
-#define SPIN_LOCK_UNLOCKED                     (spinlock_t) { 0 }
-#define spin_lock_init(x)                      ((x)->lock = 0)
+#define __raw_spin_lock_init(x)                        ((x)->lock = 0)
 
 #ifdef ASM_SUPPORTED
 /*
  * Try to get the lock.  If we fail to get the lock, make a non-standard call to
  * ia64_spinlock_contention().  We do not use a normal call because that would force all
- * callers of spin_lock() to be non-leaf routines.  Instead, ia64_spinlock_contention() is
- * carefully coded to touch only those registers that spin_lock() marks "clobbered".
+ * callers of __raw_spin_lock() to be non-leaf routines.  Instead, ia64_spinlock_contention() is
+ * carefully coded to touch only those registers that __raw_spin_lock() marks "clobbered".
  */
 
 #define IA64_SPINLOCK_CLOBBERS "ar.ccv", "ar.pfs", "p14", "p15", "r27", "r28", "r29", "r30", "b6", "memory"
 
 static inline void
-_raw_spin_lock_flags (spinlock_t *lock, unsigned long flags)
+__raw_spin_lock_flags (raw_spinlock_t *lock, unsigned long flags)
 {
        register volatile unsigned int *ptr asm ("r31") = &lock->lock;
 
@@ -94,17 +86,17 @@ _raw_spin_lock_flags (spinlock_t *lock, unsigned long flags)
 #endif
 }
 
-#define _raw_spin_lock(lock) _raw_spin_lock_flags(lock, 0)
+#define __raw_spin_lock(lock) __raw_spin_lock_flags(lock, 0)
 
 /* Unlock by doing an ordered store and releasing the cacheline with nta */
-static inline void _raw_spin_unlock(spinlock_t *x) {
+static inline void __raw_spin_unlock(raw_spinlock_t *x) {
        barrier();
        asm volatile ("st4.rel.nta [%0] = r0\n\t" :: "r"(x));
 }
 
 #else /* !ASM_SUPPORTED */
-#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
-# define _raw_spin_lock(x)                                                             \
+#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
+# define __raw_spin_lock(x)                                                            \
 do {                                                                                   \
        __u32 *ia64_spinlock_ptr = (__u32 *) (x);                                       \
        __u64 ia64_spinlock_val;                                                        \
@@ -117,29 +109,20 @@ do {                                                                                      \
                } while (ia64_spinlock_val);                                            \
        }                                                                               \
 } while (0)
-#define _raw_spin_unlock(x)    do { barrier(); ((spinlock_t *) x)->lock = 0; } while (0)
+#define __raw_spin_unlock(x)   do { barrier(); ((raw_spinlock_t *) x)->lock = 0; } while (0)
 #endif /* !ASM_SUPPORTED */
 
-#define spin_is_locked(x)      ((x)->lock != 0)
-#define _raw_spin_trylock(x)   (cmpxchg_acq(&(x)->lock, 0, 1) == 0)
-#define spin_unlock_wait(x)    do { barrier(); } while ((x)->lock)
-
-typedef struct {
-       volatile unsigned int read_counter      : 24;
-       volatile unsigned int write_lock        :  8;
-#ifdef CONFIG_PREEMPT
-       unsigned int break_lock;
-#endif
-} rwlock_t;
-#define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0 }
+#define __raw_spin_is_locked(x)                ((x)->lock != 0)
+#define __raw_spin_trylock(x)          (cmpxchg_acq(&(x)->lock, 0, 1) == 0)
+#define __raw_spin_unlock_wait(lock) \
+       do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
 
-#define rwlock_init(x)         do { *(x) = RW_LOCK_UNLOCKED; } while(0)
-#define read_can_lock(rw)      (*(volatile int *)(rw) >= 0)
-#define write_can_lock(rw)     (*(volatile int *)(rw) == 0)
+#define __raw_read_can_lock(rw)                (*(volatile int *)(rw) >= 0)
+#define __raw_write_can_lock(rw)       (*(volatile int *)(rw) == 0)
 
-#define _raw_read_lock(rw)                                                             \
+#define __raw_read_lock(rw)                                                            \
 do {                                                                                   \
-       rwlock_t *__read_lock_ptr = (rw);                                               \
+       raw_rwlock_t *__read_lock_ptr = (rw);                                           \
                                                                                        \
        while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, acq) < 0)) {          \
                ia64_fetchadd(-1, (int *) __read_lock_ptr, rel);                        \
@@ -148,14 +131,14 @@ do {                                                                                      \
        }                                                                               \
 } while (0)
 
-#define _raw_read_unlock(rw)                                   \
+#define __raw_read_unlock(rw)                                  \
 do {                                                           \
-       rwlock_t *__read_lock_ptr = (rw);                       \
+       raw_rwlock_t *__read_lock_ptr = (rw);                   \
        ia64_fetchadd(-1, (int *) __read_lock_ptr, rel);        \
 } while (0)
 
 #ifdef ASM_SUPPORTED
-#define _raw_write_lock(rw)                                                    \
+#define __raw_write_lock(rw)                                                   \
 do {                                                                           \
        __asm__ __volatile__ (                                                  \
                "mov ar.ccv = r0\n"                                             \
@@ -170,7 +153,7 @@ do {                                                                                \
                :: "r"(rw) : "ar.ccv", "p7", "r2", "r29", "memory");            \
 } while(0)
 
-#define _raw_write_trylock(rw)                                                 \
+#define __raw_write_trylock(rw)                                                        \
 ({                                                                             \
        register long result;                                                   \
                                                                                \
@@ -182,7 +165,7 @@ do {                                                                                \
        (result == 0);                                                          \
 })
 
-static inline void _raw_write_unlock(rwlock_t *x)
+static inline void __raw_write_unlock(raw_rwlock_t *x)
 {
        u8 *y = (u8 *)x;
        barrier();
@@ -191,7 +174,7 @@ static inline void _raw_write_unlock(rwlock_t *x)
 
 #else /* !ASM_SUPPORTED */
 
-#define _raw_write_lock(l)                                                             \
+#define __raw_write_lock(l)                                                            \
 ({                                                                                     \
        __u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1);                       \
        __u32 *ia64_write_lock_ptr = (__u32 *) (l);                                     \
@@ -202,7 +185,7 @@ static inline void _raw_write_unlock(rwlock_t *x)
        } while (ia64_val);                                                             \
 })
 
-#define _raw_write_trylock(rw)                                         \
+#define __raw_write_trylock(rw)                                                \
 ({                                                                     \
        __u64 ia64_val;                                                 \
        __u64 ia64_set_val = ia64_dep_mi(-1, 0, 31,1);                  \
@@ -210,7 +193,7 @@ static inline void _raw_write_unlock(rwlock_t *x)
        (ia64_val == 0);                                                \
 })
 
-static inline void _raw_write_unlock(rwlock_t *x)
+static inline void __raw_write_unlock(raw_rwlock_t *x)
 {
        barrier();
        x->write_lock = 0;
@@ -218,6 +201,6 @@ static inline void _raw_write_unlock(rwlock_t *x)
 
 #endif /* !ASM_SUPPORTED */
 
-#define _raw_read_trylock(lock) generic_raw_read_trylock(lock)
+#define __raw_read_trylock(lock) generic__raw_read_trylock(lock)
 
 #endif /*  _ASM_IA64_SPINLOCK_H */
diff --git a/include/asm-ia64/spinlock_types.h b/include/asm-ia64/spinlock_types.h
new file mode 100644 (file)
index 0000000..474e46f
--- /dev/null
@@ -0,0 +1,21 @@
+#ifndef _ASM_IA64_SPINLOCK_TYPES_H
+#define _ASM_IA64_SPINLOCK_TYPES_H
+
+#ifndef __LINUX_SPINLOCK_TYPES_H
+# error "please don't include this file directly"
+#endif
+
+typedef struct {
+       volatile unsigned int lock;
+} raw_spinlock_t;
+
+#define __RAW_SPIN_LOCK_UNLOCKED       { 0 }
+
+typedef struct {
+       volatile unsigned int read_counter      : 31;
+       volatile unsigned int write_lock        :  1;
+} raw_rwlock_t;
+
+#define __RAW_RW_LOCK_UNLOCKED         { 0, 0 }
+
+#endif
index 6608d8371c5084541c2d84cb95e0f1df12eeb8f8..7de7def28da97c651ef7aaebed4271905663e038 100644 (file)
 #include <asm/atomic.h>
 #include <asm/page.h>
 
-extern int printk(const char * fmt, ...)
-       __attribute__ ((format (printf, 1, 2)));
-
-#define RW_LOCK_BIAS            0x01000000
-#define RW_LOCK_BIAS_STR       "0x01000000"
-
 /*
  * Your basic SMP spinlocks, allowing only a single CPU anywhere
- */
-
-typedef struct {
-       volatile int slock;
-#ifdef CONFIG_DEBUG_SPINLOCK
-       unsigned magic;
-#endif
-#ifdef CONFIG_PREEMPT
-       unsigned int break_lock;
-#endif
-} spinlock_t;
-
-#define SPINLOCK_MAGIC 0xdead4ead
-
-#ifdef CONFIG_DEBUG_SPINLOCK
-#define SPINLOCK_MAGIC_INIT    , SPINLOCK_MAGIC
-#else
-#define SPINLOCK_MAGIC_INIT    /* */
-#endif
-
-#define SPIN_LOCK_UNLOCKED (spinlock_t) { 1 SPINLOCK_MAGIC_INIT }
-
-#define spin_lock_init(x)      do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
-
-/*
+ *
+ * (the type definitions are in asm/spinlock_types.h)
+ *
  * Simple spin lock operations.  There are two variants, one clears IRQ's
  * on the local processor, one does not.
  *
  * We make no fairness assumptions. They have a cost.
  */
 
-#define spin_is_locked(x)      (*(volatile int *)(&(x)->slock) <= 0)
-#define spin_unlock_wait(x)    do { barrier(); } while(spin_is_locked(x))
-#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
+#define __raw_spin_is_locked(x)                (*(volatile int *)(&(x)->slock) <= 0)
+#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
+#define __raw_spin_unlock_wait(x) \
+               do { cpu_relax(); } while (__raw_spin_is_locked(x))
 
 /**
- * _raw_spin_trylock - Try spin lock and return a result
+ * __raw_spin_trylock - Try spin lock and return a result
  * @lock: Pointer to the lock variable
  *
- * _raw_spin_trylock() tries to get the lock and returns a result.
+ * __raw_spin_trylock() tries to get the lock and returns a result.
  * On the m32r, the result value is 1 (= Success) or 0 (= Failure).
  */
-static inline int _raw_spin_trylock(spinlock_t *lock)
+static inline int __raw_spin_trylock(raw_spinlock_t *lock)
 {
        int oldval;
        unsigned long tmp1, tmp2;
@@ -78,7 +51,7 @@ static inline int _raw_spin_trylock(spinlock_t *lock)
         * }
         */
        __asm__ __volatile__ (
-               "# spin_trylock                 \n\t"
+               "# __raw_spin_trylock           \n\t"
                "ldi    %1, #0;                 \n\t"
                "mvfc   %2, psw;                \n\t"
                "clrpsw #0x40 -> nop;           \n\t"
@@ -97,16 +70,10 @@ static inline int _raw_spin_trylock(spinlock_t *lock)
        return (oldval > 0);
 }
 
-static inline void _raw_spin_lock(spinlock_t *lock)
+static inline void __raw_spin_lock(raw_spinlock_t *lock)
 {
        unsigned long tmp0, tmp1;
 
-#ifdef CONFIG_DEBUG_SPINLOCK
-       if (unlikely(lock->magic != SPINLOCK_MAGIC)) {
-               printk("pc: %p\n", __builtin_return_address(0));
-               BUG();
-       }
-#endif
        /*
         * lock->slock :  =1 : unlock
         *             : <=0 : lock
@@ -118,7 +85,7 @@ static inline void _raw_spin_lock(spinlock_t *lock)
         * }
         */
        __asm__ __volatile__ (
-               "# spin_lock                    \n\t"
+               "# __raw_spin_lock              \n\t"
                ".fillinsn                      \n"
                "1:                             \n\t"
                "mvfc   %1, psw;                \n\t"
@@ -145,12 +112,8 @@ static inline void _raw_spin_lock(spinlock_t *lock)
        );
 }
 
-static inline void _raw_spin_unlock(spinlock_t *lock)
+static inline void __raw_spin_unlock(raw_spinlock_t *lock)
 {
-#ifdef CONFIG_DEBUG_SPINLOCK
-       BUG_ON(lock->magic != SPINLOCK_MAGIC);
-       BUG_ON(!spin_is_locked(lock));
-#endif
        mb();
        lock->slock = 1;
 }
@@ -164,59 +127,32 @@ static inline void _raw_spin_unlock(spinlock_t *lock)
  * can "mix" irq-safe locks - any writer needs to get a
  * irq-safe write-lock, but readers can get non-irqsafe
  * read-locks.
+ *
+ * On x86, we implement read-write locks as a 32-bit counter
+ * with the high bit (sign) being the "contended" bit.
+ *
+ * The inline assembly is non-obvious. Think about it.
+ *
+ * Changed to use the same technique as rw semaphores.  See
+ * semaphore.h for details.  -ben
  */
-typedef struct {
-       volatile int lock;
-#ifdef CONFIG_DEBUG_SPINLOCK
-       unsigned magic;
-#endif
-#ifdef CONFIG_PREEMPT
-       unsigned int break_lock;
-#endif
-} rwlock_t;
-
-#define RWLOCK_MAGIC   0xdeaf1eed
-
-#ifdef CONFIG_DEBUG_SPINLOCK
-#define RWLOCK_MAGIC_INIT      , RWLOCK_MAGIC
-#else
-#define RWLOCK_MAGIC_INIT      /* */
-#endif
-
-#define RW_LOCK_UNLOCKED (rwlock_t) { RW_LOCK_BIAS RWLOCK_MAGIC_INIT }
-
-#define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0)
 
 /**
  * read_can_lock - would read_trylock() succeed?
  * @lock: the rwlock in question.
  */
-#define read_can_lock(x) ((int)(x)->lock > 0)
+#define __raw_read_can_lock(x) ((int)(x)->lock > 0)
 
 /**
  * write_can_lock - would write_trylock() succeed?
  * @lock: the rwlock in question.
  */
-#define write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
-
-/*
- * On x86, we implement read-write locks as a 32-bit counter
- * with the high bit (sign) being the "contended" bit.
- *
- * The inline assembly is non-obvious. Think about it.
- *
- * Changed to use the same technique as rw semaphores.  See
- * semaphore.h for details.  -ben
- */
-/* the spinlock helpers are in arch/i386/kernel/semaphore.c */
+#define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
 
-static inline void _raw_read_lock(rwlock_t *rw)
+static inline void __raw_read_lock(raw_rwlock_t *rw)
 {
        unsigned long tmp0, tmp1;
 
-#ifdef CONFIG_DEBUG_SPINLOCK
-       BUG_ON(rw->magic != RWLOCK_MAGIC);
-#endif
        /*
         * rw->lock :  >0 : unlock
         *          : <=0 : lock
@@ -264,13 +200,10 @@ static inline void _raw_read_lock(rwlock_t *rw)
        );
 }
 
-static inline void _raw_write_lock(rwlock_t *rw)
+static inline void __raw_write_lock(raw_rwlock_t *rw)
 {
        unsigned long tmp0, tmp1, tmp2;
 
-#ifdef CONFIG_DEBUG_SPINLOCK
-       BUG_ON(rw->magic != RWLOCK_MAGIC);
-#endif
        /*
         * rw->lock :  =RW_LOCK_BIAS_STR : unlock
         *          : !=RW_LOCK_BIAS_STR : lock
@@ -320,7 +253,7 @@ static inline void _raw_write_lock(rwlock_t *rw)
        );
 }
 
-static inline void _raw_read_unlock(rwlock_t *rw)
+static inline void __raw_read_unlock(raw_rwlock_t *rw)
 {
        unsigned long tmp0, tmp1;
 
@@ -342,7 +275,7 @@ static inline void _raw_read_unlock(rwlock_t *rw)
        );
 }
 
-static inline void _raw_write_unlock(rwlock_t *rw)
+static inline void __raw_write_unlock(raw_rwlock_t *rw)
 {
        unsigned long tmp0, tmp1, tmp2;
 
@@ -366,9 +299,9 @@ static inline void _raw_write_unlock(rwlock_t *rw)
        );
 }
 
-#define _raw_read_trylock(lock) generic_raw_read_trylock(lock)
+#define __raw_read_trylock(lock) generic__raw_read_trylock(lock)
 
-static inline int _raw_write_trylock(rwlock_t *lock)
+static inline int __raw_write_trylock(raw_rwlock_t *lock)
 {
        atomic_t *count = (atomic_t *)lock;
        if (atomic_sub_and_test(RW_LOCK_BIAS, count))
diff --git a/include/asm-m32r/spinlock_types.h b/include/asm-m32r/spinlock_types.h
new file mode 100644 (file)
index 0000000..7e9941c
--- /dev/null
@@ -0,0 +1,23 @@
+#ifndef _ASM_M32R_SPINLOCK_TYPES_H
+#define _ASM_M32R_SPINLOCK_TYPES_H
+
+#ifndef __LINUX_SPINLOCK_TYPES_H
+# error "please don't include this file directly"
+#endif
+
+typedef struct {
+       volatile int slock;
+} raw_spinlock_t;
+
+#define __RAW_SPIN_LOCK_UNLOCKED       { 1 }
+
+typedef struct {
+       volatile int lock;
+} raw_rwlock_t;
+
+#define RW_LOCK_BIAS                   0x01000000
+#define RW_LOCK_BIAS_STR               "0x01000000"
+
+#define __RAW_RW_LOCK_UNLOCKED         { RW_LOCK_BIAS }
+
+#endif
index 114d3eb98a6aa39fc4ad01a46c4ab6aa6c3650ab..4d0135b111567e5e59956a3bd30bf7b58f86debe 100644 (file)
  * Your basic SMP spinlocks, allowing only a single CPU anywhere
  */
 
-typedef struct {
-       volatile unsigned int lock;
-#ifdef CONFIG_PREEMPT
-       unsigned int break_lock;
-#endif
-} spinlock_t;
-
-#define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
-
-#define spin_lock_init(x)      do { (x)->lock = 0; } while(0)
-
-#define spin_is_locked(x)      ((x)->lock != 0)
-#define spin_unlock_wait(x)    do { barrier(); } while ((x)->lock)
-#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
+#define __raw_spin_is_locked(x)        ((x)->lock != 0)
+#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
+#define __raw_spin_unlock_wait(x) \
+               do { cpu_relax(); } while ((x)->lock)
 
 /*
  * Simple spin lock operations.  There are two variants, one clears IRQ's
@@ -38,13 +28,13 @@ typedef struct {
  * We make no fairness assumptions.  They have a cost.
  */
 
-static inline void _raw_spin_lock(spinlock_t *lock)
+static inline void __raw_spin_lock(raw_spinlock_t *lock)
 {
        unsigned int tmp;
 
        if (R10000_LLSC_WAR) {
                __asm__ __volatile__(
-               "       .set    noreorder       # _raw_spin_lock        \n"
+               "       .set    noreorder       # __raw_spin_lock       \n"
                "1:     ll      %1, %2                                  \n"
                "       bnez    %1, 1b                                  \n"
                "        li     %1, 1                                   \n"
@@ -58,7 +48,7 @@ static inline void _raw_spin_lock(spinlock_t *lock)
                : "memory");
        } else {
                __asm__ __volatile__(
-               "       .set    noreorder       # _raw_spin_lock        \n"
+               "       .set    noreorder       # __raw_spin_lock       \n"
                "1:     ll      %1, %2                                  \n"
                "       bnez    %1, 1b                                  \n"
                "        li     %1, 1                                   \n"
@@ -72,10 +62,10 @@ static inline void _raw_spin_lock(spinlock_t *lock)
        }
 }
 
-static inline void _raw_spin_unlock(spinlock_t *lock)
+static inline void __raw_spin_unlock(raw_spinlock_t *lock)
 {
        __asm__ __volatile__(
-       "       .set    noreorder       # _raw_spin_unlock      \n"
+       "       .set    noreorder       # __raw_spin_unlock     \n"
        "       sync                                            \n"
        "       sw      $0, %0                                  \n"
        "       .set\treorder                                   \n"
@@ -84,13 +74,13 @@ static inline void _raw_spin_unlock(spinlock_t *lock)
        : "memory");
 }
 
-static inline unsigned int _raw_spin_trylock(spinlock_t *lock)
+static inline unsigned int __raw_spin_trylock(raw_spinlock_t *lock)
 {
        unsigned int temp, res;
 
        if (R10000_LLSC_WAR) {
                __asm__ __volatile__(
-               "       .set    noreorder       # _raw_spin_trylock     \n"
+               "       .set    noreorder       # __raw_spin_trylock    \n"
                "1:     ll      %0, %3                                  \n"
                "       ori     %2, %0, 1                               \n"
                "       sc      %2, %1                                  \n"
@@ -104,7 +94,7 @@ static inline unsigned int _raw_spin_trylock(spinlock_t *lock)
                : "memory");
        } else {
                __asm__ __volatile__(
-               "       .set    noreorder       # _raw_spin_trylock     \n"
+               "       .set    noreorder       # __raw_spin_trylock    \n"
                "1:     ll      %0, %3                                  \n"
                "       ori     %2, %0, 1                               \n"
                "       sc      %2, %1                                  \n"
@@ -129,24 +119,13 @@ static inline unsigned int _raw_spin_trylock(spinlock_t *lock)
  * read-locks.
  */
 
-typedef struct {
-       volatile unsigned int lock;
-#ifdef CONFIG_PREEMPT
-       unsigned int break_lock;
-#endif
-} rwlock_t;
-
-#define RW_LOCK_UNLOCKED (rwlock_t) { 0 }
-
-#define rwlock_init(x)  do { *(x) = RW_LOCK_UNLOCKED; } while(0)
-
-static inline void _raw_read_lock(rwlock_t *rw)
+static inline void __raw_read_lock(raw_rwlock_t *rw)
 {
        unsigned int tmp;
 
        if (R10000_LLSC_WAR) {
                __asm__ __volatile__(
-               "       .set    noreorder       # _raw_read_lock        \n"
+               "       .set    noreorder       # __raw_read_lock       \n"
                "1:     ll      %1, %2                                  \n"
                "       bltz    %1, 1b                                  \n"
                "        addu   %1, 1                                   \n"
@@ -160,7 +139,7 @@ static inline void _raw_read_lock(rwlock_t *rw)
                : "memory");
        } else {
                __asm__ __volatile__(
-               "       .set    noreorder       # _raw_read_lock        \n"
+               "       .set    noreorder       # __raw_read_lock       \n"
                "1:     ll      %1, %2                                  \n"
                "       bltz    %1, 1b                                  \n"
                "        addu   %1, 1                                   \n"
@@ -177,13 +156,13 @@ static inline void _raw_read_lock(rwlock_t *rw)
 /* Note the use of sub, not subu which will make the kernel die with an
    overflow exception if we ever try to unlock an rwlock that is already
    unlocked or is being held by a writer.  */
-static inline void _raw_read_unlock(rwlock_t *rw)
+static inline void __raw_read_unlock(raw_rwlock_t *rw)
 {
        unsigned int tmp;
 
        if (R10000_LLSC_WAR) {
                __asm__ __volatile__(
-               "1:     ll      %1, %2          # _raw_read_unlock      \n"
+               "1:     ll      %1, %2          # __raw_read_unlock     \n"
                "       sub     %1, 1                                   \n"
                "       sc      %1, %0                                  \n"
                "       beqzl   %1, 1b                                  \n"
@@ -193,7 +172,7 @@ static inline void _raw_read_unlock(rwlock_t *rw)
                : "memory");
        } else {
                __asm__ __volatile__(
-               "       .set    noreorder       # _raw_read_unlock      \n"
+               "       .set    noreorder       # __raw_read_unlock     \n"
                "1:     ll      %1, %2                                  \n"
                "       sub     %1, 1                                   \n"
                "       sc      %1, %0                                  \n"
@@ -206,13 +185,13 @@ static inline void _raw_read_unlock(rwlock_t *rw)
        }
 }
 
-static inline void _raw_write_lock(rwlock_t *rw)
+static inline void __raw_write_lock(raw_rwlock_t *rw)
 {
        unsigned int tmp;
 
        if (R10000_LLSC_WAR) {
                __asm__ __volatile__(
-               "       .set    noreorder       # _raw_write_lock       \n"
+               "       .set    noreorder       # __raw_write_lock      \n"
                "1:     ll      %1, %2                                  \n"
                "       bnez    %1, 1b                                  \n"
                "        lui    %1, 0x8000                              \n"
@@ -226,7 +205,7 @@ static inline void _raw_write_lock(rwlock_t *rw)
                : "memory");
        } else {
                __asm__ __volatile__(
-               "       .set    noreorder       # _raw_write_lock       \n"
+               "       .set    noreorder       # __raw_write_lock      \n"
                "1:     ll      %1, %2                                  \n"
                "       bnez    %1, 1b                                  \n"
                "        lui    %1, 0x8000                              \n"
@@ -241,26 +220,26 @@ static inline void _raw_write_lock(rwlock_t *rw)
        }
 }
 
-static inline void _raw_write_unlock(rwlock_t *rw)
+static inline void __raw_write_unlock(raw_rwlock_t *rw)
 {
        __asm__ __volatile__(
-       "       sync                    # _raw_write_unlock     \n"
+       "       sync                    # __raw_write_unlock    \n"
        "       sw      $0, %0                                  \n"
        : "=m" (rw->lock)
        : "m" (rw->lock)
        : "memory");
 }
 
-#define _raw_read_trylock(lock) generic_raw_read_trylock(lock)
+#define __raw_read_trylock(lock) generic__raw_read_trylock(lock)
 
-static inline int _raw_write_trylock(rwlock_t *rw)
+static inline int __raw_write_trylock(raw_rwlock_t *rw)
 {
        unsigned int tmp;
        int ret;
 
        if (R10000_LLSC_WAR) {
                __asm__ __volatile__(
-               "       .set    noreorder       # _raw_write_trylock    \n"
+               "       .set    noreorder       # __raw_write_trylock   \n"
                "       li      %2, 0                                   \n"
                "1:     ll      %1, %3                                  \n"
                "       bnez    %1, 2f                                  \n"
@@ -277,7 +256,7 @@ static inline int _raw_write_trylock(rwlock_t *rw)
                : "memory");
        } else {
                __asm__ __volatile__(
-               "       .set    noreorder       # _raw_write_trylock    \n"
+               "       .set    noreorder       # __raw_write_trylock   \n"
                "       li      %2, 0                                   \n"
                "1:     ll      %1, %3                                  \n"
                "       bnez    %1, 2f                                  \n"
diff --git a/include/asm-mips/spinlock_types.h b/include/asm-mips/spinlock_types.h
new file mode 100644 (file)
index 0000000..ce26c50
--- /dev/null
@@ -0,0 +1,20 @@
+#ifndef _ASM_SPINLOCK_TYPES_H
+#define _ASM_SPINLOCK_TYPES_H
+
+#ifndef __LINUX_SPINLOCK_TYPES_H
+# error "please don't include this file directly"
+#endif
+
+typedef struct {
+       volatile unsigned int lock;
+} raw_spinlock_t;
+
+#define __RAW_SPIN_LOCK_UNLOCKED       { 0 }
+
+typedef struct {
+       volatile unsigned int lock;
+} raw_rwlock_t;
+
+#define __RAW_RW_LOCK_UNLOCKED         { 0 }
+
+#endif
index e24f7579adb0d70dd4024f81883d2a4ada961c9a..048a2c7fd0c0a54530fdabb5350915652fa1e2c0 100644 (file)
 #  define ATOMIC_HASH_SIZE 4
 #  define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) a)/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
 
-extern spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
+extern raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
 
-/* Can't use _raw_spin_lock_irq because of #include problems, so
+/* Can't use raw_spin_lock_irq because of #include problems, so
  * this is the substitute */
 #define _atomic_spin_lock_irqsave(l,f) do {    \
-       spinlock_t *s = ATOMIC_HASH(l);         \
+       raw_spinlock_t *s = ATOMIC_HASH(l);             \
        local_irq_save(f);                      \
-       _raw_spin_lock(s);                      \
+       __raw_spin_lock(s);                     \
 } while(0)
 
 #define _atomic_spin_unlock_irqrestore(l,f) do {       \
-       spinlock_t *s = ATOMIC_HASH(l);                 \
-       _raw_spin_unlock(s);                            \
+       raw_spinlock_t *s = ATOMIC_HASH(l);                     \
+       __raw_spin_unlock(s);                           \
        local_irq_restore(f);                           \
 } while(0)
 
index 928e5ef850bd2a3fb8cbc2fec71305069ec57db9..af7db694b22d3b9a0827828cd3a15128f9233b35 100644 (file)
@@ -2,7 +2,7 @@
 #define _PARISC_BITOPS_H
 
 #include <linux/compiler.h>
-#include <asm/system.h>
+#include <asm/spinlock.h>
 #include <asm/byteorder.h>
 #include <asm/atomic.h>
 
index 06732719d927f7d2a5d71151d9245ccda79bccba..aa592d8c0e396247f759df4a382511819a70750b 100644 (file)
@@ -3,6 +3,7 @@
 
 #include <linux/config.h>
 #include <linux/mm.h>
+#include <asm/cache.h> /* for flush_user_dcache_range_asm() proto */
 
 /* The usual comment is "Caches aren't brain-dead on the <architecture>".
  * Unfortunately, that doesn't apply to PA-RISC. */
index 0b61f51d84670a7c37dfd916950e9c831389a782..a9dfadd05658e7a548c73318385c73b791999593 100644 (file)
@@ -11,6 +11,7 @@
 #ifndef __ASSEMBLY__
 #include <linux/config.h>
 #include <linux/threads.h>
+#include <linux/spinlock_types.h>
 
 #include <asm/hardware.h>
 #include <asm/page.h>
index 679ea1c651efed3f0fa9f35639c5a56c6b899194..43eaa6e742e06f3a1f77dffca9d6eccf61fa29d4 100644 (file)
@@ -2,30 +2,25 @@
 #define __ASM_SPINLOCK_H
 
 #include <asm/system.h>
+#include <asm/processor.h>
+#include <asm/spinlock_types.h>
 
 /* Note that PA-RISC has to use `1' to mean unlocked and `0' to mean locked
  * since it only has load-and-zero. Moreover, at least on some PA processors,
  * the semaphore address has to be 16-byte aligned.
  */
 
-#ifndef CONFIG_DEBUG_SPINLOCK
-
-#define __SPIN_LOCK_UNLOCKED   { { 1, 1, 1, 1 } }
-#undef SPIN_LOCK_UNLOCKED
-#define SPIN_LOCK_UNLOCKED (spinlock_t) __SPIN_LOCK_UNLOCKED
-
-#define spin_lock_init(x)      do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
-
-static inline int spin_is_locked(spinlock_t *x)
+static inline int __raw_spin_is_locked(raw_spinlock_t *x)
 {
        volatile unsigned int *a = __ldcw_align(x);
        return *a == 0;
 }
 
-#define spin_unlock_wait(x)    do { barrier(); } while(spin_is_locked(x))
-#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
+#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
+#define __raw_spin_unlock_wait(x) \
+               do { cpu_relax(); } while (__raw_spin_is_locked(x))
 
-static inline void _raw_spin_lock(spinlock_t *x)
+static inline void __raw_spin_lock(raw_spinlock_t *x)
 {
        volatile unsigned int *a;
 
@@ -36,7 +31,7 @@ static inline void _raw_spin_lock(spinlock_t *x)
        mb();
 }
 
-static inline void _raw_spin_unlock(spinlock_t *x)
+static inline void __raw_spin_unlock(raw_spinlock_t *x)
 {
        volatile unsigned int *a;
        mb();
@@ -45,7 +40,7 @@ static inline void _raw_spin_unlock(spinlock_t *x)
        mb();
 }
 
-static inline int _raw_spin_trylock(spinlock_t *x)
+static inline int __raw_spin_trylock(raw_spinlock_t *x)
 {
        volatile unsigned int *a;
        int ret;
@@ -57,131 +52,38 @@ static inline int _raw_spin_trylock(spinlock_t *x)
 
        return ret;
 }
-       
-#define spin_lock_own(LOCK, LOCATION)  ((void)0)
-
-#else /* !(CONFIG_DEBUG_SPINLOCK) */
-
-#define SPINLOCK_MAGIC 0x1D244B3C
-
-#define __SPIN_LOCK_UNLOCKED   { { 1, 1, 1, 1 }, SPINLOCK_MAGIC, 10, __FILE__ , NULL, 0, -1, NULL, NULL }
-#undef SPIN_LOCK_UNLOCKED
-#define SPIN_LOCK_UNLOCKED (spinlock_t) __SPIN_LOCK_UNLOCKED
-
-#define spin_lock_init(x)      do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
-
-#define CHECK_LOCK(x)                                                  \
-       do {                                                            \
-               if (unlikely((x)->magic != SPINLOCK_MAGIC)) {                   \
-                       printk(KERN_ERR "%s:%d: spin_is_locked"         \
-                       " on uninitialized spinlock %p.\n",             \
-                               __FILE__, __LINE__, (x));               \
-               }                                                       \
-       } while(0)
-
-#define spin_is_locked(x)                                              \
-       ({                                                              \
-               CHECK_LOCK(x);                                          \
-               volatile unsigned int *a = __ldcw_align(x);             \
-               if (unlikely((*a == 0) && (x)->babble)) {                               \
-                       (x)->babble--;                                  \
-                       printk("KERN_WARNING                            \
-                               %s:%d: spin_is_locked(%s/%p) already"   \
-                               " locked by %s:%d in %s at %p(%d)\n",   \
-                               __FILE__,__LINE__, (x)->module, (x),    \
-                               (x)->bfile, (x)->bline, (x)->task->comm,\
-                               (x)->previous, (x)->oncpu);             \
-               }                                                       \
-               *a == 0;                                                \
-       })
-
-#define spin_unlock_wait(x)                                            \
-       do {                                                            \
-               CHECK_LOCK(x);                                          \
-               volatile unsigned int *a = __ldcw_align(x);             \
-               if (unlikely((*a == 0) && (x)->babble)) {                               \
-                       (x)->babble--;                                  \
-                       printk("KERN_WARNING                            \
-                               %s:%d: spin_unlock_wait(%s/%p)"         \
-                               " owned by %s:%d in %s at %p(%d)\n",    \
-                               __FILE__,__LINE__, (x)->module, (x),    \
-                               (x)->bfile, (x)->bline, (x)->task->comm,\
-                               (x)->previous, (x)->oncpu);             \
-               }                                                       \
-               barrier();                                              \
-       } while (*((volatile unsigned char *)(__ldcw_align(x))) == 0)
-
-extern void _dbg_spin_lock(spinlock_t *lock, const char *base_file, int line_no);
-extern void _dbg_spin_unlock(spinlock_t *lock, const char *, int);
-extern int _dbg_spin_trylock(spinlock_t * lock, const char *, int);
-
-#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
-
-#define _raw_spin_unlock(lock) _dbg_spin_unlock(lock, __FILE__, __LINE__)
-#define _raw_spin_lock(lock) _dbg_spin_lock(lock, __FILE__, __LINE__)
-#define _raw_spin_trylock(lock) _dbg_spin_trylock(lock, __FILE__, __LINE__)
-
-/* just in case we need it */
-#define spin_lock_own(LOCK, LOCATION)                                  \
-do {                                                                   \
-       volatile unsigned int *a = __ldcw_align(LOCK);                  \
-       if (!((*a == 0) && ((LOCK)->oncpu == smp_processor_id())))      \
-               printk("KERN_WARNING                                    \
-                       %s: called on %d from %p but lock %s on %d\n",  \
-                       LOCATION, smp_processor_id(),                   \
-                       __builtin_return_address(0),                    \
-                       (*a == 0) ? "taken" : "freed", (LOCK)->on_cpu); \
-} while (0)
-
-#endif /* !(CONFIG_DEBUG_SPINLOCK) */
 
 /*
  * Read-write spinlocks, allowing multiple readers
  * but only one writer.
  */
-typedef struct {
-       spinlock_t lock;
-       volatile int counter;
-#ifdef CONFIG_PREEMPT
-       unsigned int break_lock;
-#endif
-} rwlock_t;
-
-#define RW_LOCK_UNLOCKED (rwlock_t) { __SPIN_LOCK_UNLOCKED, 0 }
-
-#define rwlock_init(lp)        do { *(lp) = RW_LOCK_UNLOCKED; } while (0)
 
-#define _raw_read_trylock(lock) generic_raw_read_trylock(lock)
+#define __raw_read_trylock(lock) generic__raw_read_trylock(lock)
 
 /* read_lock, read_unlock are pretty straightforward.  Of course it somehow
  * sucks we end up saving/restoring flags twice for read_lock_irqsave aso. */
 
-#ifdef CONFIG_DEBUG_RWLOCK
-extern void _dbg_read_lock(rwlock_t * rw, const char *bfile, int bline);
-#define _raw_read_lock(rw) _dbg_read_lock(rw, __FILE__, __LINE__)
-#else
-static  __inline__ void _raw_read_lock(rwlock_t *rw)
+static  __inline__ void __raw_read_lock(raw_rwlock_t *rw)
 {
        unsigned long flags;
        local_irq_save(flags);
-       _raw_spin_lock(&rw->lock); 
+       __raw_spin_lock(&rw->lock);
 
        rw->counter++;
 
-       _raw_spin_unlock(&rw->lock);
+       __raw_spin_unlock(&rw->lock);
        local_irq_restore(flags);
 }
-#endif /* CONFIG_DEBUG_RWLOCK */
 
-static  __inline__ void _raw_read_unlock(rwlock_t *rw)
+static  __inline__ void __raw_read_unlock(raw_rwlock_t *rw)
 {
        unsigned long flags;
        local_irq_save(flags);
-       _raw_spin_lock(&rw->lock); 
+       __raw_spin_lock(&rw->lock);
 
        rw->counter--;
 
-       _raw_spin_unlock(&rw->lock);
+       __raw_spin_unlock(&rw->lock);
        local_irq_restore(flags);
 }
 
@@ -194,20 +96,17 @@ static  __inline__ void _raw_read_unlock(rwlock_t *rw)
  * writers) in interrupt handlers someone fucked up and we'd dead-lock
  * sooner or later anyway.   prumpf */
 
-#ifdef CONFIG_DEBUG_RWLOCK
-extern void _dbg_write_lock(rwlock_t * rw, const char *bfile, int bline);
-#define _raw_write_lock(rw) _dbg_write_lock(rw, __FILE__, __LINE__)
-#else
-static  __inline__ void _raw_write_lock(rwlock_t *rw)
+static  __inline__ void __raw_write_lock(raw_rwlock_t *rw)
 {
 retry:
-       _raw_spin_lock(&rw->lock);
+       __raw_spin_lock(&rw->lock);
 
        if(rw->counter != 0) {
                /* this basically never happens */
-               _raw_spin_unlock(&rw->lock);
+               __raw_spin_unlock(&rw->lock);
 
-               while(rw->counter != 0);
+               while (rw->counter != 0)
+                       cpu_relax();
 
                goto retry;
        }
@@ -215,26 +114,21 @@ retry:
        /* got it.  now leave without unlocking */
        rw->counter = -1; /* remember we are locked */
 }
-#endif /* CONFIG_DEBUG_RWLOCK */
 
 /* write_unlock is absolutely trivial - we don't have to wait for anything */
 
-static  __inline__ void _raw_write_unlock(rwlock_t *rw)
+static  __inline__ void __raw_write_unlock(raw_rwlock_t *rw)
 {
        rw->counter = 0;
-       _raw_spin_unlock(&rw->lock);
+       __raw_spin_unlock(&rw->lock);
 }
 
-#ifdef CONFIG_DEBUG_RWLOCK
-extern int _dbg_write_trylock(rwlock_t * rw, const char *bfile, int bline);
-#define _raw_write_trylock(rw) _dbg_write_trylock(rw, __FILE__, __LINE__)
-#else
-static  __inline__ int _raw_write_trylock(rwlock_t *rw)
+static  __inline__ int __raw_write_trylock(raw_rwlock_t *rw)
 {
-       _raw_spin_lock(&rw->lock);
+       __raw_spin_lock(&rw->lock);
        if (rw->counter != 0) {
                /* this basically never happens */
-               _raw_spin_unlock(&rw->lock);
+               __raw_spin_unlock(&rw->lock);
 
                return 0;
        }
@@ -243,14 +137,13 @@ static  __inline__ int _raw_write_trylock(rwlock_t *rw)
        rw->counter = -1; /* remember we are locked */
        return 1;
 }
-#endif /* CONFIG_DEBUG_RWLOCK */
 
-static __inline__ int is_read_locked(rwlock_t *rw)
+static __inline__ int __raw_is_read_locked(raw_rwlock_t *rw)
 {
        return rw->counter > 0;
 }
 
-static __inline__ int is_write_locked(rwlock_t *rw)
+static __inline__ int __raw_is_write_locked(raw_rwlock_t *rw)
 {
        return rw->counter < 0;
 }
diff --git a/include/asm-parisc/spinlock_types.h b/include/asm-parisc/spinlock_types.h
new file mode 100644 (file)
index 0000000..785bba8
--- /dev/null
@@ -0,0 +1,21 @@
+#ifndef __ASM_SPINLOCK_TYPES_H
+#define __ASM_SPINLOCK_TYPES_H
+
+#ifndef __LINUX_SPINLOCK_TYPES_H
+# error "please don't include this file directly"
+#endif
+
+typedef struct {
+       volatile unsigned int lock[4];
+} raw_spinlock_t;
+
+#define __RAW_SPIN_LOCK_UNLOCKED       { { 1, 1, 1, 1 } }
+
+typedef struct {
+       raw_spinlock_t lock;
+       volatile int counter;
+} raw_rwlock_t;
+
+#define __RAW_RW_LOCK_UNLOCKED         { __RAW_SPIN_LOCK_UNLOCKED, 0 }
+
+#endif
index 81c543339036cb1f31df3b41863b3e7c9157fa3e..26ff844a21c18a36eed14cccdb951e68d8411187 100644 (file)
@@ -160,29 +160,7 @@ static inline void set_eiem(unsigned long val)
 })
 
 #ifdef CONFIG_SMP
-/*
- * Your basic SMP spinlocks, allowing only a single CPU anywhere
- */
-
-typedef struct {
-       volatile unsigned int lock[4];
-#ifdef CONFIG_DEBUG_SPINLOCK
-       unsigned long magic;
-       volatile unsigned int babble;
-       const char *module;
-       char *bfile;
-       int bline;
-       int oncpu;
-       void *previous;
-       struct task_struct * task;
-#endif
-#ifdef CONFIG_PREEMPT
-       unsigned int break_lock;
-#endif
-} spinlock_t;
-
-#define __lock_aligned __attribute__((__section__(".data.lock_aligned")))
-
+# define __lock_aligned __attribute__((__section__(".data.lock_aligned")))
 #endif
 
 #define KERNEL_START (0x10100000 - 0x1000)
index 17530c232c7690c6a47a0b87a0e50ea4e23b8788..829481c0a9dc2fd6321a71a629045d3a5d94f4b1 100644 (file)
@@ -41,6 +41,10 @@ extern void smp_send_xmon_break(int cpu);
 struct pt_regs;
 extern void smp_message_recv(int, struct pt_regs *);
 
+extern int __cpu_disable(void);
+extern void __cpu_die(unsigned int cpu);
+extern void cpu_die(void) __attribute__((noreturn));
+
 #define NO_PROC_ID             0xFF            /* No processor magic marker */
 #define PROC_CHANGE_PENALTY    20
 
@@ -64,6 +68,8 @@ extern struct klock_info_struct klock_info;
 
 #else /* !(CONFIG_SMP) */
 
+static inline void cpu_die(void) { }
+
 #endif /* !(CONFIG_SMP) */
 
 #endif /* !(_PPC_SMP_H) */
index 909199aae1047f2baf6934e384bd0918eb3f855e..20edcf2a6e0ce47eddfb7b213f62e06304c5a093 100644 (file)
@@ -5,41 +5,21 @@
 
 /*
  * Simple spin lock operations.
+ *
+ * (the type definitions are in asm/raw_spinlock_types.h)
  */
 
-typedef struct {
-       volatile unsigned long lock;
-#ifdef CONFIG_DEBUG_SPINLOCK
-       volatile unsigned long owner_pc;
-       volatile unsigned long owner_cpu;
-#endif
-#ifdef CONFIG_PREEMPT
-       unsigned int break_lock;
-#endif
-} spinlock_t;
-
-#ifdef __KERNEL__
-#ifdef CONFIG_DEBUG_SPINLOCK
-#define SPINLOCK_DEBUG_INIT     , 0, 0
-#else
-#define SPINLOCK_DEBUG_INIT     /* */
-#endif
-
-#define SPIN_LOCK_UNLOCKED     (spinlock_t) { 0 SPINLOCK_DEBUG_INIT }
-
-#define spin_lock_init(x)      do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
-#define spin_is_locked(x)      ((x)->lock != 0)
-#define spin_unlock_wait(x)    do { barrier(); } while(spin_is_locked(x))
-#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
-
-#ifndef CONFIG_DEBUG_SPINLOCK
-
-static inline void _raw_spin_lock(spinlock_t *lock)
+#define __raw_spin_is_locked(x)                ((x)->lock != 0)
+#define __raw_spin_unlock_wait(lock) \
+       do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
+#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
+
+static inline void __raw_spin_lock(raw_spinlock_t *lock)
 {
        unsigned long tmp;
 
        __asm__ __volatile__(
-       "b      1f              # spin_lock\n\
+       "b      1f              # __raw_spin_lock\n\
 2:     lwzx    %0,0,%1\n\
        cmpwi   0,%0,0\n\
        bne+    2b\n\
@@ -55,21 +35,13 @@ static inline void _raw_spin_lock(spinlock_t *lock)
        : "cr0", "memory");
 }
 
-static inline void _raw_spin_unlock(spinlock_t *lock)
+static inline void __raw_spin_unlock(raw_spinlock_t *lock)
 {
-       __asm__ __volatile__("eieio             # spin_unlock": : :"memory");
+       __asm__ __volatile__("eieio     # __raw_spin_unlock": : :"memory");
        lock->lock = 0;
 }
 
-#define _raw_spin_trylock(l) (!test_and_set_bit(0,&(l)->lock))
-
-#else
-
-extern void _raw_spin_lock(spinlock_t *lock);
-extern void _raw_spin_unlock(spinlock_t *lock);
-extern int _raw_spin_trylock(spinlock_t *lock);
-
-#endif
+#define __raw_spin_trylock(l) (!test_and_set_bit(0,&(l)->lock))
 
 /*
  * Read-write spinlocks, allowing multiple readers
@@ -81,22 +53,11 @@ extern int _raw_spin_trylock(spinlock_t *lock);
  * irq-safe write-lock, but readers can get non-irqsafe
  * read-locks.
  */
-typedef struct {
-       volatile signed int lock;
-#ifdef CONFIG_PREEMPT
-       unsigned int break_lock;
-#endif
-} rwlock_t;
 
-#define RW_LOCK_UNLOCKED (rwlock_t) { 0 }
-#define rwlock_init(lp) do { *(lp) = RW_LOCK_UNLOCKED; } while(0)
+#define __raw_read_can_lock(rw)        ((rw)->lock >= 0)
+#define __raw_write_can_lock(rw)       (!(rw)->lock)
 
-#define read_can_lock(rw)      ((rw)->lock >= 0)
-#define write_can_lock(rw)     (!(rw)->lock)
-
-#ifndef CONFIG_DEBUG_SPINLOCK
-
-static __inline__ int _raw_read_trylock(rwlock_t *rw)
+static __inline__ int __raw_read_trylock(raw_rwlock_t *rw)
 {
        signed int tmp;
 
@@ -116,7 +77,7 @@ static __inline__ int _raw_read_trylock(rwlock_t *rw)
        return tmp > 0;
 }
 
-static __inline__ void _raw_read_lock(rwlock_t *rw)
+static __inline__ void __raw_read_lock(raw_rwlock_t *rw)
 {
        signed int tmp;
 
@@ -137,7 +98,7 @@ static __inline__ void _raw_read_lock(rwlock_t *rw)
        : "cr0", "memory");
 }
 
-static __inline__ void _raw_read_unlock(rwlock_t *rw)
+static __inline__ void __raw_read_unlock(raw_rwlock_t *rw)
 {
        signed int tmp;
 
@@ -153,7 +114,7 @@ static __inline__ void _raw_read_unlock(rwlock_t *rw)
        : "cr0", "memory");
 }
 
-static __inline__ int _raw_write_trylock(rwlock_t *rw)
+static __inline__ int __raw_write_trylock(raw_rwlock_t *rw)
 {
        signed int tmp;
 
@@ -173,7 +134,7 @@ static __inline__ int _raw_write_trylock(rwlock_t *rw)
        return tmp == 0;
 }
 
-static __inline__ void _raw_write_lock(rwlock_t *rw)
+static __inline__ void __raw_write_lock(raw_rwlock_t *rw)
 {
        signed int tmp;
 
@@ -194,22 +155,10 @@ static __inline__ void _raw_write_lock(rwlock_t *rw)
        : "cr0", "memory");
 }
 
-static __inline__ void _raw_write_unlock(rwlock_t *rw)
+static __inline__ void __raw_write_unlock(raw_rwlock_t *rw)
 {
        __asm__ __volatile__("eieio             # write_unlock": : :"memory");
        rw->lock = 0;
 }
 
-#else
-
-extern void _raw_read_lock(rwlock_t *rw);
-extern void _raw_read_unlock(rwlock_t *rw);
-extern void _raw_write_lock(rwlock_t *rw);
-extern void _raw_write_unlock(rwlock_t *rw);
-extern int _raw_read_trylock(rwlock_t *rw);
-extern int _raw_write_trylock(rwlock_t *rw);
-
-#endif
-
 #endif /* __ASM_SPINLOCK_H */
-#endif /* __KERNEL__ */
diff --git a/include/asm-ppc/spinlock_types.h b/include/asm-ppc/spinlock_types.h
new file mode 100644 (file)
index 0000000..7919ccc
--- /dev/null
@@ -0,0 +1,20 @@
+#ifndef __ASM_SPINLOCK_TYPES_H
+#define __ASM_SPINLOCK_TYPES_H
+
+#ifndef __LINUX_SPINLOCK_TYPES_H
+# error "please don't include this file directly"
+#endif
+
+typedef struct {
+       volatile unsigned long lock;
+} raw_spinlock_t;
+
+#define __RAW_SPIN_LOCK_UNLOCKED       { 0 }
+
+typedef struct {
+       volatile signed int lock;
+} raw_rwlock_t;
+
+#define __RAW_RW_LOCK_UNLOCKED         { 0 }
+
+#endif
index 513a334c581032ec21ddc33819d7e25dad5b4bd0..d754ab570fe0eba855c55e53446564076f6d895f 100644 (file)
@@ -88,6 +88,7 @@ extern void *cacheable_memcpy(void *, const void *, unsigned int);
 extern int do_page_fault(struct pt_regs *, unsigned long, unsigned long);
 extern void bad_page_fault(struct pt_regs *, unsigned long, int);
 extern void die(const char *, struct pt_regs *, long);
+extern void _exception(int, struct pt_regs *, int, unsigned long);
 #ifdef CONFIG_BOOKE_WDT
 extern u32 booke_wdt_enabled;
 extern u32 booke_wdt_period;
index acd11564dd752d64a8e29951e3beb55eee0f09c4..14cb895bb607245477e4dc466e7dd6a227491956 100644 (file)
  * modify it under the terms of the GNU General Public License
  * as published by the Free Software Foundation; either version
  * 2 of the License, or (at your option) any later version.
+ *
+ * (the type definitions are in asm/spinlock_types.h)
  */
 #include <linux/config.h>
 #include <asm/paca.h>
 #include <asm/hvcall.h>
 #include <asm/iSeries/HvCall.h>
 
-typedef struct {
-       volatile unsigned int lock;
-#ifdef CONFIG_PREEMPT
-       unsigned int break_lock;
-#endif
-} spinlock_t;
+#define __raw_spin_is_locked(x)                ((x)->slock != 0)
 
-typedef struct {
-       volatile signed int lock;
-#ifdef CONFIG_PREEMPT
-       unsigned int break_lock;
-#endif
-} rwlock_t;
+/*
+ * This returns the old value in the lock, so we succeeded
+ * in getting the lock if the return value is 0.
+ */
+static __inline__ unsigned long __spin_trylock(raw_spinlock_t *lock)
+{
+       unsigned long tmp, tmp2;
 
-#ifdef __KERNEL__
-#define SPIN_LOCK_UNLOCKED     (spinlock_t) { 0 }
+       __asm__ __volatile__(
+"      lwz             %1,%3(13)               # __spin_trylock\n\
+1:     lwarx           %0,0,%2\n\
+       cmpwi           0,%0,0\n\
+       bne-            2f\n\
+       stwcx.          %1,0,%2\n\
+       bne-            1b\n\
+       isync\n\
+2:"    : "=&r" (tmp), "=&r" (tmp2)
+       : "r" (&lock->slock), "i" (offsetof(struct paca_struct, lock_token))
+       : "cr0", "memory");
 
-#define spin_is_locked(x)      ((x)->lock != 0)
-#define spin_lock_init(x)      do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
+       return tmp;
+}
 
-static __inline__ void _raw_spin_unlock(spinlock_t *lock)
+static int __inline__ __raw_spin_trylock(raw_spinlock_t *lock)
 {
-       __asm__ __volatile__("lwsync    # spin_unlock": : :"memory");
-       lock->lock = 0;
+       return __spin_trylock(lock) == 0;
 }
 
 /*
@@ -64,44 +70,15 @@ static __inline__ void _raw_spin_unlock(spinlock_t *lock)
 #if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES)
 /* We only yield to the hypervisor if we are in shared processor mode */
 #define SHARED_PROCESSOR (get_paca()->lppaca.shared_proc)
-extern void __spin_yield(spinlock_t *lock);
-extern void __rw_yield(rwlock_t *lock);
+extern void __spin_yield(raw_spinlock_t *lock);
+extern void __rw_yield(raw_rwlock_t *lock);
 #else /* SPLPAR || ISERIES */
 #define __spin_yield(x)        barrier()
 #define __rw_yield(x)  barrier()
 #define SHARED_PROCESSOR       0
 #endif
-extern void spin_unlock_wait(spinlock_t *lock);
-
-/*
- * This returns the old value in the lock, so we succeeded
- * in getting the lock if the return value is 0.
- */
-static __inline__ unsigned long __spin_trylock(spinlock_t *lock)
-{
-       unsigned long tmp, tmp2;
-
-       __asm__ __volatile__(
-"      lwz             %1,%3(13)               # __spin_trylock\n\
-1:     lwarx           %0,0,%2\n\
-       cmpwi           0,%0,0\n\
-       bne-            2f\n\
-       stwcx.          %1,0,%2\n\
-       bne-            1b\n\
-       isync\n\
-2:"    : "=&r" (tmp), "=&r" (tmp2)
-       : "r" (&lock->lock), "i" (offsetof(struct paca_struct, lock_token))
-       : "cr0", "memory");
-
-       return tmp;
-}
-
-static int __inline__ _raw_spin_trylock(spinlock_t *lock)
-{
-       return __spin_trylock(lock) == 0;
-}
 
-static void __inline__ _raw_spin_lock(spinlock_t *lock)
+static void __inline__ __raw_spin_lock(raw_spinlock_t *lock)
 {
        while (1) {
                if (likely(__spin_trylock(lock) == 0))
@@ -110,12 +87,12 @@ static void __inline__ _raw_spin_lock(spinlock_t *lock)
                        HMT_low();
                        if (SHARED_PROCESSOR)
                                __spin_yield(lock);
-               } while (unlikely(lock->lock != 0));
+               } while (unlikely(lock->slock != 0));
                HMT_medium();
        }
 }
 
-static void __inline__ _raw_spin_lock_flags(spinlock_t *lock, unsigned long flags)
+static void __inline__ __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
 {
        unsigned long flags_dis;
 
@@ -128,12 +105,20 @@ static void __inline__ _raw_spin_lock_flags(spinlock_t *lock, unsigned long flag
                        HMT_low();
                        if (SHARED_PROCESSOR)
                                __spin_yield(lock);
-               } while (unlikely(lock->lock != 0));
+               } while (unlikely(lock->slock != 0));
                HMT_medium();
                local_irq_restore(flags_dis);
        }
 }
 
+static __inline__ void __raw_spin_unlock(raw_spinlock_t *lock)
+{
+       __asm__ __volatile__("lwsync    # __raw_spin_unlock": : :"memory");
+       lock->slock = 0;
+}
+
+extern void __raw_spin_unlock_wait(raw_spinlock_t *lock);
+
 /*
  * Read-write spinlocks, allowing multiple readers
  * but only one writer.
@@ -144,24 +129,15 @@ static void __inline__ _raw_spin_lock_flags(spinlock_t *lock, unsigned long flag
  * irq-safe write-lock, but readers can get non-irqsafe
  * read-locks.
  */
-#define RW_LOCK_UNLOCKED (rwlock_t) { 0 }
 
-#define rwlock_init(x)         do { *(x) = RW_LOCK_UNLOCKED; } while(0)
-
-#define read_can_lock(rw)      ((rw)->lock >= 0)
-#define write_can_lock(rw)     (!(rw)->lock)
-
-static __inline__ void _raw_write_unlock(rwlock_t *rw)
-{
-       __asm__ __volatile__("lwsync            # write_unlock": : :"memory");
-       rw->lock = 0;
-}
+#define __raw_read_can_lock(rw)                ((rw)->lock >= 0)
+#define __raw_write_can_lock(rw)       (!(rw)->lock)
 
 /*
  * This returns the old value in the lock + 1,
  * so we got a read lock if the return value is > 0.
  */
-static long __inline__ __read_trylock(rwlock_t *rw)
+static long __inline__ __read_trylock(raw_rwlock_t *rw)
 {
        long tmp;
 
@@ -180,45 +156,11 @@ static long __inline__ __read_trylock(rwlock_t *rw)
        return tmp;
 }
 
-static int __inline__ _raw_read_trylock(rwlock_t *rw)
-{
-       return __read_trylock(rw) > 0;
-}
-
-static void __inline__ _raw_read_lock(rwlock_t *rw)
-{
-       while (1) {
-               if (likely(__read_trylock(rw) > 0))
-                       break;
-               do {
-                       HMT_low();
-                       if (SHARED_PROCESSOR)
-                               __rw_yield(rw);
-               } while (unlikely(rw->lock < 0));
-               HMT_medium();
-       }
-}
-
-static void __inline__ _raw_read_unlock(rwlock_t *rw)
-{
-       long tmp;
-
-       __asm__ __volatile__(
-       "eieio                          # read_unlock\n\
-1:     lwarx           %0,0,%1\n\
-       addic           %0,%0,-1\n\
-       stwcx.          %0,0,%1\n\
-       bne-            1b"
-       : "=&r"(tmp)
-       : "r"(&rw->lock)
-       : "cr0", "memory");
-}
-
 /*
  * This returns the old value in the lock,
  * so we got the write lock if the return value is 0.
  */
-static __inline__ long __write_trylock(rwlock_t *rw)
+static __inline__ long __write_trylock(raw_rwlock_t *rw)
 {
        long tmp, tmp2;
 
@@ -237,12 +179,21 @@ static __inline__ long __write_trylock(rwlock_t *rw)
        return tmp;
 }
 
-static int __inline__ _raw_write_trylock(rwlock_t *rw)
+static void __inline__ __raw_read_lock(raw_rwlock_t *rw)
 {
-       return __write_trylock(rw) == 0;
+       while (1) {
+               if (likely(__read_trylock(rw) > 0))
+                       break;
+               do {
+                       HMT_low();
+                       if (SHARED_PROCESSOR)
+                               __rw_yield(rw);
+               } while (unlikely(rw->lock < 0));
+               HMT_medium();
+       }
 }
 
-static void __inline__ _raw_write_lock(rwlock_t *rw)
+static void __inline__ __raw_write_lock(raw_rwlock_t *rw)
 {
        while (1) {
                if (likely(__write_trylock(rw) == 0))
@@ -256,5 +207,35 @@ static void __inline__ _raw_write_lock(rwlock_t *rw)
        }
 }
 
-#endif /* __KERNEL__ */
+static int __inline__ __raw_read_trylock(raw_rwlock_t *rw)
+{
+       return __read_trylock(rw) > 0;
+}
+
+static int __inline__ __raw_write_trylock(raw_rwlock_t *rw)
+{
+       return __write_trylock(rw) == 0;
+}
+
+static void __inline__ __raw_read_unlock(raw_rwlock_t *rw)
+{
+       long tmp;
+
+       __asm__ __volatile__(
+       "eieio                          # read_unlock\n\
+1:     lwarx           %0,0,%1\n\
+       addic           %0,%0,-1\n\
+       stwcx.          %0,0,%1\n\
+       bne-            1b"
+       : "=&r"(tmp)
+       : "r"(&rw->lock)
+       : "cr0", "memory");
+}
+
+static __inline__ void __raw_write_unlock(raw_rwlock_t *rw)
+{
+       __asm__ __volatile__("lwsync    # write_unlock": : :"memory");
+       rw->lock = 0;
+}
+
 #endif /* __ASM_SPINLOCK_H */
diff --git a/include/asm-ppc64/spinlock_types.h b/include/asm-ppc64/spinlock_types.h
new file mode 100644 (file)
index 0000000..a37c8ea
--- /dev/null
@@ -0,0 +1,20 @@
+#ifndef __ASM_SPINLOCK_TYPES_H
+#define __ASM_SPINLOCK_TYPES_H
+
+#ifndef __LINUX_SPINLOCK_TYPES_H
+# error "please don't include this file directly"
+#endif
+
+typedef struct {
+       volatile unsigned int slock;
+} raw_spinlock_t;
+
+#define __RAW_SPIN_LOCK_UNLOCKED       { 0 }
+
+typedef struct {
+       volatile signed int lock;
+} raw_rwlock_t;
+
+#define __RAW_RW_LOCK_UNLOCKED         { 0 }
+
+#endif
index 321b23bba1ecf16987b724e45f79319f55972159..273dbecf8acef0465060133bbac01bab72118fc5 100644 (file)
@@ -27,25 +27,19 @@ _raw_compare_and_swap(volatile unsigned int *lock,
  * on the local processor, one does not.
  *
  * We make no fairness assumptions. They have a cost.
+ *
+ * (the type definitions are in asm/spinlock_types.h)
  */
 
-typedef struct {
-       volatile unsigned int lock;
-#ifdef CONFIG_PREEMPT
-       unsigned int break_lock;
-#endif
-} __attribute__ ((aligned (4))) spinlock_t;
-
-#define SPIN_LOCK_UNLOCKED     (spinlock_t) { 0 }
-#define spin_lock_init(lp)     do { (lp)->lock = 0; } while(0)
-#define spin_unlock_wait(lp)   do { barrier(); } while(((volatile spinlock_t *)(lp))->lock)
-#define spin_is_locked(x)      ((x)->lock != 0)
-#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
+#define __raw_spin_is_locked(x) ((x)->lock != 0)
+#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
+#define __raw_spin_unlock_wait(lock) \
+       do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
 
-extern void _raw_spin_lock_wait(spinlock_t *lp, unsigned int pc);
-extern int _raw_spin_trylock_retry(spinlock_t *lp, unsigned int pc);
+extern void _raw_spin_lock_wait(raw_spinlock_t *lp, unsigned int pc);
+extern int _raw_spin_trylock_retry(raw_spinlock_t *lp, unsigned int pc);
 
-static inline void _raw_spin_lock(spinlock_t *lp)
+static inline void __raw_spin_lock(raw_spinlock_t *lp)
 {
        unsigned long pc = 1 | (unsigned long) __builtin_return_address(0);
 
@@ -53,7 +47,7 @@ static inline void _raw_spin_lock(spinlock_t *lp)
                _raw_spin_lock_wait(lp, pc);
 }
 
-static inline int _raw_spin_trylock(spinlock_t *lp)
+static inline int __raw_spin_trylock(raw_spinlock_t *lp)
 {
        unsigned long pc = 1 | (unsigned long) __builtin_return_address(0);
 
@@ -62,7 +56,7 @@ static inline int _raw_spin_trylock(spinlock_t *lp)
        return _raw_spin_trylock_retry(lp, pc);
 }
 
-static inline void _raw_spin_unlock(spinlock_t *lp)
+static inline void __raw_spin_unlock(raw_spinlock_t *lp)
 {
        _raw_compare_and_swap(&lp->lock, lp->lock, 0);
 }
@@ -77,36 +71,25 @@ static inline void _raw_spin_unlock(spinlock_t *lp)
  * irq-safe write-lock, but readers can get non-irqsafe
  * read-locks.
  */
-typedef struct {
-       volatile unsigned int lock;
-       volatile unsigned long owner_pc;
-#ifdef CONFIG_PREEMPT
-       unsigned int break_lock;
-#endif
-} rwlock_t;
-
-#define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0 }
-
-#define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0)
 
 /**
  * read_can_lock - would read_trylock() succeed?
  * @lock: the rwlock in question.
  */
-#define read_can_lock(x) ((int)(x)->lock >= 0)
+#define __raw_read_can_lock(x) ((int)(x)->lock >= 0)
 
 /**
  * write_can_lock - would write_trylock() succeed?
  * @lock: the rwlock in question.
  */
-#define write_can_lock(x) ((x)->lock == 0)
+#define __raw_write_can_lock(x) ((x)->lock == 0)
 
-extern void _raw_read_lock_wait(rwlock_t *lp);
-extern int _raw_read_trylock_retry(rwlock_t *lp);
-extern void _raw_write_lock_wait(rwlock_t *lp);
-extern int _raw_write_trylock_retry(rwlock_t *lp);
+extern void _raw_read_lock_wait(raw_rwlock_t *lp);
+extern int _raw_read_trylock_retry(raw_rwlock_t *lp);
+extern void _raw_write_lock_wait(raw_rwlock_t *lp);
+extern int _raw_write_trylock_retry(raw_rwlock_t *lp);
 
-static inline void _raw_read_lock(rwlock_t *rw)
+static inline void __raw_read_lock(raw_rwlock_t *rw)
 {
        unsigned int old;
        old = rw->lock & 0x7fffffffU;
@@ -114,7 +97,7 @@ static inline void _raw_read_lock(rwlock_t *rw)
                _raw_read_lock_wait(rw);
 }
 
-static inline void _raw_read_unlock(rwlock_t *rw)
+static inline void __raw_read_unlock(raw_rwlock_t *rw)
 {
        unsigned int old, cmp;
 
@@ -125,18 +108,18 @@ static inline void _raw_read_unlock(rwlock_t *rw)
        } while (cmp != old);
 }
 
-static inline void _raw_write_lock(rwlock_t *rw)
+static inline void __raw_write_lock(raw_rwlock_t *rw)
 {
        if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0))
                _raw_write_lock_wait(rw);
 }
 
-static inline void _raw_write_unlock(rwlock_t *rw)
+static inline void __raw_write_unlock(raw_rwlock_t *rw)
 {
        _raw_compare_and_swap(&rw->lock, 0x80000000, 0);
 }
 
-static inline int _raw_read_trylock(rwlock_t *rw)
+static inline int __raw_read_trylock(raw_rwlock_t *rw)
 {
        unsigned int old;
        old = rw->lock & 0x7fffffffU;
@@ -145,7 +128,7 @@ static inline int _raw_read_trylock(rwlock_t *rw)
        return _raw_read_trylock_retry(rw);
 }
 
-static inline int _raw_write_trylock(rwlock_t *rw)
+static inline int __raw_write_trylock(raw_rwlock_t *rw)
 {
        if (likely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0))
                return 1;
diff --git a/include/asm-s390/spinlock_types.h b/include/asm-s390/spinlock_types.h
new file mode 100644 (file)
index 0000000..f79a221
--- /dev/null
@@ -0,0 +1,21 @@
+#ifndef __ASM_SPINLOCK_TYPES_H
+#define __ASM_SPINLOCK_TYPES_H
+
+#ifndef __LINUX_SPINLOCK_TYPES_H
+# error "please don't include this file directly"
+#endif
+
+typedef struct {
+       volatile unsigned int lock;
+} __attribute__ ((aligned (4))) raw_spinlock_t;
+
+#define __RAW_SPIN_LOCK_UNLOCKED       { 0 }
+
+typedef struct {
+       volatile unsigned int lock;
+       volatile unsigned int owner_pc;
+} raw_rwlock_t;
+
+#define __RAW_RW_LOCK_UNLOCKED         { 0, 0 }
+
+#endif
index e770b55649ebb88f0838e1b760844630f1cbf141..846322d4c35d489daf86e337518dbee79f5c738f 100644 (file)
 /*
  * Your basic SMP spinlocks, allowing only a single CPU anywhere
  */
-typedef struct {
-       volatile unsigned long lock;
-#ifdef CONFIG_PREEMPT
-       unsigned int break_lock;
-#endif
-} spinlock_t;
 
-#define SPIN_LOCK_UNLOCKED     (spinlock_t) { 0 }
-
-#define spin_lock_init(x)      do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
-
-#define spin_is_locked(x)      ((x)->lock != 0)
-#define spin_unlock_wait(x)    do { barrier(); } while (spin_is_locked(x))
-#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
+#define __raw_spin_is_locked(x)        ((x)->lock != 0)
+#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
+#define __raw_spin_unlock_wait(x) \
+       do { cpu_relax(); } while (__raw_spin_is_locked(x))
 
 /*
  * Simple spin lock operations.  There are two variants, one clears IRQ's
@@ -36,7 +27,7 @@ typedef struct {
  *
  * We make no fairness assumptions.  They have a cost.
  */
-static inline void _raw_spin_lock(spinlock_t *lock)
+static inline void __raw_spin_lock(raw_spinlock_t *lock)
 {
        __asm__ __volatile__ (
                "1:\n\t"
@@ -49,14 +40,14 @@ static inline void _raw_spin_lock(spinlock_t *lock)
        );
 }
 
-static inline void _raw_spin_unlock(spinlock_t *lock)
+static inline void __raw_spin_unlock(raw_spinlock_t *lock)
 {
        assert_spin_locked(lock);
 
        lock->lock = 0;
 }
 
-#define _raw_spin_trylock(x) (!test_and_set_bit(0, &(x)->lock))
+#define __raw_spin_trylock(x) (!test_and_set_bit(0, &(x)->lock))
 
 /*
  * Read-write spinlocks, allowing multiple readers but only one writer.
@@ -66,51 +57,40 @@ static inline void _raw_spin_unlock(spinlock_t *lock)
  * needs to get a irq-safe write-lock, but readers can get non-irqsafe
  * read-locks.
  */
-typedef struct {
-       spinlock_t lock;
-       atomic_t counter;
-#ifdef CONFIG_PREEMPT
-       unsigned int break_lock;
-#endif
-} rwlock_t;
-
-#define RW_LOCK_BIAS           0x01000000
-#define RW_LOCK_UNLOCKED       (rwlock_t) { { 0 }, { RW_LOCK_BIAS } }
-#define rwlock_init(x)         do { *(x) = RW_LOCK_UNLOCKED; } while (0)
-
-static inline void _raw_read_lock(rwlock_t *rw)
+
+static inline void __raw_read_lock(raw_rwlock_t *rw)
 {
-       _raw_spin_lock(&rw->lock);
+       __raw_spin_lock(&rw->lock);
 
        atomic_inc(&rw->counter);
 
-       _raw_spin_unlock(&rw->lock);
+       __raw_spin_unlock(&rw->lock);
 }
 
-static inline void _raw_read_unlock(rwlock_t *rw)
+static inline void __raw_read_unlock(raw_rwlock_t *rw)
 {
-       _raw_spin_lock(&rw->lock);
+       __raw_spin_lock(&rw->lock);
 
        atomic_dec(&rw->counter);
 
-       _raw_spin_unlock(&rw->lock);
+       __raw_spin_unlock(&rw->lock);
 }
 
-static inline void _raw_write_lock(rwlock_t *rw)
+static inline void __raw_write_lock(raw_rwlock_t *rw)
 {
-       _raw_spin_lock(&rw->lock);
+       __raw_spin_lock(&rw->lock);
        atomic_set(&rw->counter, -1);
 }
 
-static inline void _raw_write_unlock(rwlock_t *rw)
+static inline void __raw_write_unlock(raw_rwlock_t *rw)
 {
        atomic_set(&rw->counter, 0);
-       _raw_spin_unlock(&rw->lock);
+       __raw_spin_unlock(&rw->lock);
 }
 
-#define _raw_read_trylock(lock) generic_raw_read_trylock(lock)
+#define __raw_read_trylock(lock) generic__raw_read_trylock(lock)
 
-static inline int _raw_write_trylock(rwlock_t *rw)
+static inline int __raw_write_trylock(raw_rwlock_t *rw)
 {
        if (atomic_sub_and_test(RW_LOCK_BIAS, &rw->counter))
                return 1;
@@ -121,4 +101,3 @@ static inline int _raw_write_trylock(rwlock_t *rw)
 }
 
 #endif /* __ASM_SH_SPINLOCK_H */
-
diff --git a/include/asm-sh/spinlock_types.h b/include/asm-sh/spinlock_types.h
new file mode 100644 (file)
index 0000000..8c41b6c
--- /dev/null
@@ -0,0 +1,22 @@
+#ifndef __ASM_SH_SPINLOCK_TYPES_H
+#define __ASM_SH_SPINLOCK_TYPES_H
+
+#ifndef __LINUX_SPINLOCK_TYPES_H
+# error "please don't include this file directly"
+#endif
+
+typedef struct {
+       volatile unsigned long lock;
+} raw_spinlock_t;
+
+#define __SPIN_LOCK_UNLOCKED           { 0 }
+
+typedef struct {
+       raw_spinlock_t lock;
+       atomic_t counter;
+} raw_rwlock_t;
+
+#define RW_LOCK_BIAS                   0x01000000
+#define __RAW_RW_LOCK_UNLOCKED         { { 0 }, { RW_LOCK_BIAS } }
+
+#endif
index 0cbd87ad491280e620acbbfde7280d21ca1b367e..111727a2bb4e4f173b89434ddba3f4f00f191c91 100644 (file)
 
 #include <asm/psr.h>
 
-#ifdef CONFIG_DEBUG_SPINLOCK
-struct _spinlock_debug {
-       unsigned char lock;
-       unsigned long owner_pc;
-#ifdef CONFIG_PREEMPT
-       unsigned int break_lock;
-#endif
-};
-typedef struct _spinlock_debug spinlock_t;
-
-#define SPIN_LOCK_UNLOCKED     (spinlock_t) { 0, 0 }
-#define spin_lock_init(lp)     do { *(lp)= SPIN_LOCK_UNLOCKED; } while(0)
-#define spin_is_locked(lp)  (*((volatile unsigned char *)(&((lp)->lock))) != 0)
-#define spin_unlock_wait(lp)   do { barrier(); } while(*(volatile unsigned char *)(&(lp)->lock))
-
-extern void _do_spin_lock(spinlock_t *lock, char *str);
-extern int _spin_trylock(spinlock_t *lock);
-extern void _do_spin_unlock(spinlock_t *lock);
-
-#define _raw_spin_trylock(lp)  _spin_trylock(lp)
-#define _raw_spin_lock(lock)   _do_spin_lock(lock, "spin_lock")
-#define _raw_spin_unlock(lock) _do_spin_unlock(lock)
-
-struct _rwlock_debug {
-       volatile unsigned int lock;
-       unsigned long owner_pc;
-       unsigned long reader_pc[NR_CPUS];
-#ifdef CONFIG_PREEMPT
-       unsigned int break_lock;
-#endif
-};
-typedef struct _rwlock_debug rwlock_t;
-
-#define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0, {0} }
-
-#define rwlock_init(lp)        do { *(lp)= RW_LOCK_UNLOCKED; } while(0)
-
-extern void _do_read_lock(rwlock_t *rw, char *str);
-extern void _do_read_unlock(rwlock_t *rw, char *str);
-extern void _do_write_lock(rwlock_t *rw, char *str);
-extern void _do_write_unlock(rwlock_t *rw);
-
-#define _raw_read_lock(lock)   \
-do {   unsigned long flags; \
-       local_irq_save(flags); \
-       _do_read_lock(lock, "read_lock"); \
-       local_irq_restore(flags); \
-} while(0)
-
-#define _raw_read_unlock(lock) \
-do {   unsigned long flags; \
-       local_irq_save(flags); \
-       _do_read_unlock(lock, "read_unlock"); \
-       local_irq_restore(flags); \
-} while(0)
-
-#define _raw_write_lock(lock) \
-do {   unsigned long flags; \
-       local_irq_save(flags); \
-       _do_write_lock(lock, "write_lock"); \
-       local_irq_restore(flags); \
-} while(0)
-
-#define _raw_write_unlock(lock) \
-do {   unsigned long flags; \
-       local_irq_save(flags); \
-       _do_write_unlock(lock); \
-       local_irq_restore(flags); \
-} while(0)
-
-#else /* !CONFIG_DEBUG_SPINLOCK */
-
-typedef struct {
-       unsigned char lock;
-#ifdef CONFIG_PREEMPT
-       unsigned int break_lock;
-#endif
-} spinlock_t;
-
-#define SPIN_LOCK_UNLOCKED     (spinlock_t) { 0 }
-
-#define spin_lock_init(lock)   (*((unsigned char *)(lock)) = 0)
-#define spin_is_locked(lock)    (*((volatile unsigned char *)(lock)) != 0)
+#define __raw_spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0)
 
-#define spin_unlock_wait(lock) \
-do { \
-       barrier(); \
-} while(*((volatile unsigned char *)lock))
+#define __raw_spin_unlock_wait(lock) \
+       do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
 
-extern __inline__ void _raw_spin_lock(spinlock_t *lock)
+extern __inline__ void __raw_spin_lock(raw_spinlock_t *lock)
 {
        __asm__ __volatile__(
        "\n1:\n\t"
@@ -121,7 +37,7 @@ extern __inline__ void _raw_spin_lock(spinlock_t *lock)
        : "g2", "memory", "cc");
 }
 
-extern __inline__ int _raw_spin_trylock(spinlock_t *lock)
+extern __inline__ int __raw_spin_trylock(raw_spinlock_t *lock)
 {
        unsigned int result;
        __asm__ __volatile__("ldstub [%1], %0"
@@ -131,7 +47,7 @@ extern __inline__ int _raw_spin_trylock(spinlock_t *lock)
        return (result == 0);
 }
 
-extern __inline__ void _raw_spin_unlock(spinlock_t *lock)
+extern __inline__ void __raw_spin_unlock(raw_spinlock_t *lock)
 {
        __asm__ __volatile__("stb %%g0, [%0]" : : "r" (lock) : "memory");
 }
@@ -147,23 +63,11 @@ extern __inline__ void _raw_spin_unlock(spinlock_t *lock)
  *
  * XXX This might create some problems with my dual spinlock
  * XXX scheme, deadlocks etc. -DaveM
- */
-typedef struct {
-       volatile unsigned int lock;
-#ifdef CONFIG_PREEMPT
-       unsigned int break_lock;
-#endif
-} rwlock_t;
-
-#define RW_LOCK_UNLOCKED (rwlock_t) { 0 }
-
-#define rwlock_init(lp)        do { *(lp)= RW_LOCK_UNLOCKED; } while(0)
-
-
-/* Sort of like atomic_t's on Sparc, but even more clever.
+ *
+ * Sort of like atomic_t's on Sparc, but even more clever.
  *
  *     ------------------------------------
- *     | 24-bit counter           | wlock |  rwlock_t
+ *     | 24-bit counter           | wlock |  raw_rwlock_t
  *     ------------------------------------
  *      31                       8 7     0
  *
@@ -174,9 +78,9 @@ typedef struct {
  *
  * Unfortunately this scheme limits us to ~16,000,000 cpus.
  */
-extern __inline__ void _read_lock(rwlock_t *rw)
+extern __inline__ void __read_lock(raw_rwlock_t *rw)
 {
-       register rwlock_t *lp asm("g1");
+       register raw_rwlock_t *lp asm("g1");
        lp = rw;
        __asm__ __volatile__(
        "mov    %%o7, %%g4\n\t"
@@ -187,16 +91,16 @@ extern __inline__ void _read_lock(rwlock_t *rw)
        : "g2", "g4", "memory", "cc");
 }
 
-#define _raw_read_lock(lock) \
+#define __raw_read_lock(lock) \
 do {   unsigned long flags; \
        local_irq_save(flags); \
-       _read_lock(lock); \
+       __raw_read_lock(lock); \
        local_irq_restore(flags); \
 } while(0)
 
-extern __inline__ void _read_unlock(rwlock_t *rw)
+extern __inline__ void __read_unlock(raw_rwlock_t *rw)
 {
-       register rwlock_t *lp asm("g1");
+       register raw_rwlock_t *lp asm("g1");
        lp = rw;
        __asm__ __volatile__(
        "mov    %%o7, %%g4\n\t"
@@ -207,16 +111,16 @@ extern __inline__ void _read_unlock(rwlock_t *rw)
        : "g2", "g4", "memory", "cc");
 }
 
-#define _raw_read_unlock(lock) \
+#define __raw_read_unlock(lock) \
 do {   unsigned long flags; \
        local_irq_save(flags); \
-       _read_unlock(lock); \
+       __raw_read_unlock(lock); \
        local_irq_restore(flags); \
 } while(0)
 
-extern __inline__ void _raw_write_lock(rwlock_t *rw)
+extern __inline__ void __raw_write_lock(raw_rwlock_t *rw)
 {
-       register rwlock_t *lp asm("g1");
+       register raw_rwlock_t *lp asm("g1");
        lp = rw;
        __asm__ __volatile__(
        "mov    %%o7, %%g4\n\t"
@@ -227,11 +131,9 @@ extern __inline__ void _raw_write_lock(rwlock_t *rw)
        : "g2", "g4", "memory", "cc");
 }
 
-#define _raw_write_unlock(rw)  do { (rw)->lock = 0; } while(0)
-
-#endif /* CONFIG_DEBUG_SPINLOCK */
+#define __raw_write_unlock(rw) do { (rw)->lock = 0; } while(0)
 
-#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
+#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
 
 #endif /* !(__ASSEMBLY__) */
 
diff --git a/include/asm-sparc/spinlock_types.h b/include/asm-sparc/spinlock_types.h
new file mode 100644 (file)
index 0000000..0a0fb11
--- /dev/null
@@ -0,0 +1,20 @@
+#ifndef __SPARC_SPINLOCK_TYPES_H
+#define __SPARC_SPINLOCK_TYPES_H
+
+#ifndef __LINUX_SPINLOCK_TYPES_H
+# error "please don't include this file directly"
+#endif
+
+typedef struct {
+       unsigned char lock;
+} raw_spinlock_t;
+
+#define __RAW_SPIN_LOCK_UNLOCKED       { 0 }
+
+typedef struct {
+       volatile unsigned int lock;
+} raw_rwlock_t;
+
+#define __RAW_RW_LOCK_UNLOCKED         { 0 }
+
+#endif
index a02c4370eb42e0d1c8f6043a25f900e0ea03ff2c..ec85d12d73b98a353e0fdcaf346489b09f80839a 100644 (file)
  * must be pre-V9 branches.
  */
 
-#ifndef CONFIG_DEBUG_SPINLOCK
+#define __raw_spin_is_locked(lp)       ((lp)->lock != 0)
 
-typedef struct {
-       volatile unsigned char lock;
-#ifdef CONFIG_PREEMPT
-       unsigned int break_lock;
-#endif
-} spinlock_t;
-#define SPIN_LOCK_UNLOCKED     (spinlock_t) {0,}
+#define __raw_spin_unlock_wait(lp)     \
+       do {    rmb();                  \
+       } while((lp)->lock)
 
-#define spin_lock_init(lp)     do { *(lp)= SPIN_LOCK_UNLOCKED; } while(0)
-#define spin_is_locked(lp)  ((lp)->lock != 0)
-
-#define spin_unlock_wait(lp)   \
-do {   rmb();                  \
-} while((lp)->lock)
-
-static inline void _raw_spin_lock(spinlock_t *lock)
+static inline void __raw_spin_lock(raw_spinlock_t *lock)
 {
        unsigned long tmp;
 
@@ -67,7 +56,7 @@ static inline void _raw_spin_lock(spinlock_t *lock)
        : "memory");
 }
 
-static inline int _raw_spin_trylock(spinlock_t *lock)
+static inline int __raw_spin_trylock(raw_spinlock_t *lock)
 {
        unsigned long result;
 
@@ -81,7 +70,7 @@ static inline int _raw_spin_trylock(spinlock_t *lock)
        return (result == 0UL);
 }
 
-static inline void _raw_spin_unlock(spinlock_t *lock)
+static inline void __raw_spin_unlock(raw_spinlock_t *lock)
 {
        __asm__ __volatile__(
 "      membar          #StoreStore | #LoadStore\n"
@@ -91,7 +80,7 @@ static inline void _raw_spin_unlock(spinlock_t *lock)
        : "memory");
 }
 
-static inline void _raw_spin_lock_flags(spinlock_t *lock, unsigned long flags)
+static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
 {
        unsigned long tmp1, tmp2;
 
@@ -115,51 +104,9 @@ static inline void _raw_spin_lock_flags(spinlock_t *lock, unsigned long flags)
        : "memory");
 }
 
-#else /* !(CONFIG_DEBUG_SPINLOCK) */
-
-typedef struct {
-       volatile unsigned char lock;
-       unsigned int owner_pc, owner_cpu;
-#ifdef CONFIG_PREEMPT
-       unsigned int break_lock;
-#endif
-} spinlock_t;
-#define SPIN_LOCK_UNLOCKED (spinlock_t) { 0, 0, 0xff }
-#define spin_lock_init(lp)     do { *(lp)= SPIN_LOCK_UNLOCKED; } while(0)
-#define spin_is_locked(__lock) ((__lock)->lock != 0)
-#define spin_unlock_wait(__lock)       \
-do { \
-       rmb(); \
-} while((__lock)->lock)
-
-extern void _do_spin_lock(spinlock_t *lock, char *str, unsigned long caller);
-extern void _do_spin_unlock(spinlock_t *lock);
-extern int _do_spin_trylock(spinlock_t *lock, unsigned long caller);
-
-#define _raw_spin_trylock(lp)  \
-       _do_spin_trylock(lp, (unsigned long) __builtin_return_address(0))
-#define _raw_spin_lock(lock)   \
-       _do_spin_lock(lock, "spin_lock", \
-                     (unsigned long) __builtin_return_address(0))
-#define _raw_spin_unlock(lock) _do_spin_unlock(lock)
-#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
-
-#endif /* CONFIG_DEBUG_SPINLOCK */
-
 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
 
-#ifndef CONFIG_DEBUG_SPINLOCK
-
-typedef struct {
-       volatile unsigned int lock;
-#ifdef CONFIG_PREEMPT
-       unsigned int break_lock;
-#endif
-} rwlock_t;
-#define RW_LOCK_UNLOCKED       (rwlock_t) {0,}
-#define rwlock_init(lp) do { *(lp) = RW_LOCK_UNLOCKED; } while(0)
-
-static void inline __read_lock(rwlock_t *lock)
+static void inline __read_lock(raw_rwlock_t *lock)
 {
        unsigned long tmp1, tmp2;
 
@@ -184,7 +131,7 @@ static void inline __read_lock(rwlock_t *lock)
        : "memory");
 }
 
-static void inline __read_unlock(rwlock_t *lock)
+static void inline __read_unlock(raw_rwlock_t *lock)
 {
        unsigned long tmp1, tmp2;
 
@@ -201,7 +148,7 @@ static void inline __read_unlock(rwlock_t *lock)
        : "memory");
 }
 
-static void inline __write_lock(rwlock_t *lock)
+static void inline __write_lock(raw_rwlock_t *lock)
 {
        unsigned long mask, tmp1, tmp2;
 
@@ -228,7 +175,7 @@ static void inline __write_lock(rwlock_t *lock)
        : "memory");
 }
 
-static void inline __write_unlock(rwlock_t *lock)
+static void inline __write_unlock(raw_rwlock_t *lock)
 {
        __asm__ __volatile__(
 "      membar          #LoadStore | #StoreStore\n"
@@ -238,7 +185,7 @@ static void inline __write_unlock(rwlock_t *lock)
        : "memory");
 }
 
-static int inline __write_trylock(rwlock_t *lock)
+static int inline __write_trylock(raw_rwlock_t *lock)
 {
        unsigned long mask, tmp1, tmp2, result;
 
@@ -263,78 +210,15 @@ static int inline __write_trylock(rwlock_t *lock)
        return result;
 }
 
-#define _raw_read_lock(p)      __read_lock(p)
-#define _raw_read_unlock(p)    __read_unlock(p)
-#define _raw_write_lock(p)     __write_lock(p)
-#define _raw_write_unlock(p)   __write_unlock(p)
-#define _raw_write_trylock(p)  __write_trylock(p)
-
-#else /* !(CONFIG_DEBUG_SPINLOCK) */
-
-typedef struct {
-       volatile unsigned long lock;
-       unsigned int writer_pc, writer_cpu;
-       unsigned int reader_pc[NR_CPUS];
-#ifdef CONFIG_PREEMPT
-       unsigned int break_lock;
-#endif
-} rwlock_t;
-#define RW_LOCK_UNLOCKED       (rwlock_t) { 0, 0, 0xff, { } }
-#define rwlock_init(lp) do { *(lp) = RW_LOCK_UNLOCKED; } while(0)
-
-extern void _do_read_lock(rwlock_t *rw, char *str, unsigned long caller);
-extern void _do_read_unlock(rwlock_t *rw, char *str, unsigned long caller);
-extern void _do_write_lock(rwlock_t *rw, char *str, unsigned long caller);
-extern void _do_write_unlock(rwlock_t *rw, unsigned long caller);
-extern int _do_write_trylock(rwlock_t *rw, char *str, unsigned long caller);
-
-#define _raw_read_lock(lock) \
-do {   unsigned long flags; \
-       local_irq_save(flags); \
-       _do_read_lock(lock, "read_lock", \
-                     (unsigned long) __builtin_return_address(0)); \
-       local_irq_restore(flags); \
-} while(0)
-
-#define _raw_read_unlock(lock) \
-do {   unsigned long flags; \
-       local_irq_save(flags); \
-       _do_read_unlock(lock, "read_unlock", \
-                     (unsigned long) __builtin_return_address(0)); \
-       local_irq_restore(flags); \
-} while(0)
-
-#define _raw_write_lock(lock) \
-do {   unsigned long flags; \
-       local_irq_save(flags); \
-       _do_write_lock(lock, "write_lock", \
-                     (unsigned long) __builtin_return_address(0)); \
-       local_irq_restore(flags); \
-} while(0)
-
-#define _raw_write_unlock(lock) \
-do {   unsigned long flags; \
-       local_irq_save(flags); \
-       _do_write_unlock(lock, \
-                     (unsigned long) __builtin_return_address(0)); \
-       local_irq_restore(flags); \
-} while(0)
-
-#define _raw_write_trylock(lock) \
-({     unsigned long flags; \
-       int val; \
-       local_irq_save(flags); \
-       val = _do_write_trylock(lock, "write_trylock", \
-                               (unsigned long) __builtin_return_address(0)); \
-       local_irq_restore(flags); \
-       val; \
-})
-
-#endif /* CONFIG_DEBUG_SPINLOCK */
-
-#define _raw_read_trylock(lock) generic_raw_read_trylock(lock)
-#define read_can_lock(rw)      (!((rw)->lock & 0x80000000UL))
-#define write_can_lock(rw)     (!(rw)->lock)
+#define __raw_read_lock(p)     __read_lock(p)
+#define __raw_read_unlock(p)   __read_unlock(p)
+#define __raw_write_lock(p)    __write_lock(p)
+#define __raw_write_unlock(p)  __write_unlock(p)
+#define __raw_write_trylock(p) __write_trylock(p)
+
+#define __raw_read_trylock(lock)       generic__raw_read_trylock(lock)
+#define __raw_read_can_lock(rw)                (!((rw)->lock & 0x80000000UL))
+#define __raw_write_can_lock(rw)       (!(rw)->lock)
 
 #endif /* !(__ASSEMBLY__) */
 
diff --git a/include/asm-sparc64/spinlock_types.h b/include/asm-sparc64/spinlock_types.h
new file mode 100644 (file)
index 0000000..e128112
--- /dev/null
@@ -0,0 +1,20 @@
+#ifndef __SPARC64_SPINLOCK_TYPES_H
+#define __SPARC64_SPINLOCK_TYPES_H
+
+#ifndef __LINUX_SPINLOCK_TYPES_H
+# error "please don't include this file directly"
+#endif
+
+typedef struct {
+       volatile unsigned char lock;
+} raw_spinlock_t;
+
+#define __RAW_SPIN_LOCK_UNLOCKED       { 0 }
+
+typedef struct {
+       volatile unsigned int lock;
+} raw_rwlock_t;
+
+#define __RAW_RW_LOCK_UNLOCKED         { 0 }
+
+#endif
index bd850a249183d21399465a5b4a612c1f4b56d715..2c192abe9aeb0d29ac9ce1946214bc697af7c170 100644 (file)
@@ -96,8 +96,7 @@ extern unsigned long uml_physmem;
 
 #define __va_space (8*1024*1024)
 
-extern unsigned long to_phys(void *virt);
-extern void *to_virt(unsigned long phys);
+#include "mem.h"
 
 /* Cast to unsigned long before casting to void * to avoid a warning from
  * mmap_kmem about cutting a long long down to a void *.  Not sure that
index b48e0966ecd7b8b936a4be2a3e8cf06d9645edec..ed06170e0eddd448b5ac5f1a2b99613d170ed3ea 100644 (file)
@@ -326,14 +326,22 @@ static inline void set_pte(pte_t *pteptr, pte_t pteval)
 }
 #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
 
-extern phys_t page_to_phys(struct page *page);
-
 /*
  * Conversion functions: convert a page and protection to a page entry,
  * and a page entry and page directory to the page they refer to.
  */
 
-extern pte_t mk_pte(struct page *page, pgprot_t pgprot);
+#define phys_to_page(phys) pfn_to_page(phys_to_pfn(phys))
+#define __virt_to_page(virt) phys_to_page(__pa(virt))
+#define page_to_phys(page) pfn_to_phys(page_to_pfn(page))
+
+#define mk_pte(page, pgprot) \
+       ({ pte_t pte;                                   \
+                                                       \
+       pte_set_val(pte, page_to_phys(page), (pgprot)); \
+       if (pte_present(pte))                           \
+               pte_mknewprot(pte_mknewpage(pte));      \
+       pte;})
 
 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 {
@@ -410,8 +418,6 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 #endif
 #endif
 
-extern struct page *phys_to_page(const unsigned long phys);
-extern struct page *__virt_to_page(const unsigned long virt);
 #define virt_to_page(addr) __virt_to_page((const unsigned long) addr)
 
 /*
diff --git a/include/asm-um/spinlock_types.h b/include/asm-um/spinlock_types.h
new file mode 100644 (file)
index 0000000..e5a9429
--- /dev/null
@@ -0,0 +1,6 @@
+#ifndef __UM_SPINLOCK_TYPES_H
+#define __UM_SPINLOCK_TYPES_H
+
+#include "asm/arch/spinlock_types.h"
+
+#endif
index 6c813eb521f339f363a71e21caca53ba204bfcf8..f7574196424e1c4e266b1b0d7e9bbd4f5a48f2a3 100644 (file)
@@ -8,7 +8,6 @@
 struct cpuinfo_x86; 
 struct pt_regs;
 
-extern void get_cpu_vendor(struct cpuinfo_x86*);
 extern void start_kernel(void);
 extern void pda_init(int); 
 
index 5aeb57a3baad30b7111aa28cc6149cf891394bd5..69636831ad2f4e3f5d756ab4bbec60da5bfd18f9 100644 (file)
@@ -6,47 +6,21 @@
 #include <asm/page.h>
 #include <linux/config.h>
 
-extern int printk(const char * fmt, ...)
-       __attribute__ ((format (printf, 1, 2)));
-
 /*
  * Your basic SMP spinlocks, allowing only a single CPU anywhere
- */
-
-typedef struct {
-       volatile unsigned int lock;
-#ifdef CONFIG_DEBUG_SPINLOCK
-       unsigned magic;
-#endif
-#ifdef CONFIG_PREEMPT
-       unsigned int break_lock;
-#endif
-} spinlock_t;
-
-#define SPINLOCK_MAGIC 0xdead4ead
-
-#ifdef CONFIG_DEBUG_SPINLOCK
-#define SPINLOCK_MAGIC_INIT    , SPINLOCK_MAGIC
-#else
-#define SPINLOCK_MAGIC_INIT    /* */
-#endif
-
-#define SPIN_LOCK_UNLOCKED (spinlock_t) { 1 SPINLOCK_MAGIC_INIT }
-
-#define spin_lock_init(x)      do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
-
-/*
+ *
  * Simple spin lock operations.  There are two variants, one clears IRQ's
  * on the local processor, one does not.
  *
  * We make no fairness assumptions. They have a cost.
+ *
+ * (the type definitions are in asm/spinlock_types.h)
  */
 
-#define spin_is_locked(x)      (*(volatile signed char *)(&(x)->lock) <= 0)
-#define spin_unlock_wait(x)    do { barrier(); } while(spin_is_locked(x))
-#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
+#define __raw_spin_is_locked(x) \
+               (*(volatile signed char *)(&(x)->slock) <= 0)
 
-#define spin_lock_string \
+#define __raw_spin_lock_string \
        "\n1:\t" \
        "lock ; decb %0\n\t" \
        "js 2f\n" \
@@ -58,74 +32,40 @@ typedef struct {
        "jmp 1b\n" \
        LOCK_SECTION_END
 
-/*
- * This works. Despite all the confusion.
- * (except on PPro SMP or if we are using OOSTORE)
- * (PPro errata 66, 92)
- */
-#if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE)
-
-#define spin_unlock_string \
+#define __raw_spin_unlock_string \
        "movb $1,%0" \
-               :"=m" (lock->lock) : : "memory"
-
-
-static inline void _raw_spin_unlock(spinlock_t *lock)
-{
-#ifdef CONFIG_DEBUG_SPINLOCK
-       BUG_ON(lock->magic != SPINLOCK_MAGIC);
-       assert_spin_locked(lock);
-#endif
-       __asm__ __volatile__(
-               spin_unlock_string
-       );
-}
-
-#else
-
-#define spin_unlock_string \
-       "xchgb %b0, %1" \
-               :"=q" (oldval), "=m" (lock->lock) \
-               :"0" (oldval) : "memory"
+               :"=m" (lock->slock) : : "memory"
 
-static inline void _raw_spin_unlock(spinlock_t *lock)
+static inline void __raw_spin_lock(raw_spinlock_t *lock)
 {
-       char oldval = 1;
-#ifdef CONFIG_DEBUG_SPINLOCK
-       BUG_ON(lock->magic != SPINLOCK_MAGIC);
-       assert_spin_locked(lock);
-#endif
        __asm__ __volatile__(
-               spin_unlock_string
-       );
+               __raw_spin_lock_string
+               :"=m" (lock->slock) : : "memory");
 }
 
-#endif
+#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
 
-static inline int _raw_spin_trylock(spinlock_t *lock)
+static inline int __raw_spin_trylock(raw_spinlock_t *lock)
 {
        char oldval;
+
        __asm__ __volatile__(
                "xchgb %b0,%1"
-               :"=q" (oldval), "=m" (lock->lock)
+               :"=q" (oldval), "=m" (lock->slock)
                :"0" (0) : "memory");
+
        return oldval > 0;
 }
 
-static inline void _raw_spin_lock(spinlock_t *lock)
+static inline void __raw_spin_unlock(raw_spinlock_t *lock)
 {
-#ifdef CONFIG_DEBUG_SPINLOCK
-       if (lock->magic != SPINLOCK_MAGIC) {
-               printk("eip: %p\n", __builtin_return_address(0));
-               BUG();
-       }
-#endif
        __asm__ __volatile__(
-               spin_lock_string
-               :"=m" (lock->lock) : : "memory");
+               __raw_spin_unlock_string
+       );
 }
 
+#define __raw_spin_unlock_wait(lock) \
+       do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
 
 /*
  * Read-write spinlocks, allowing multiple readers
@@ -136,33 +76,7 @@ static inline void _raw_spin_lock(spinlock_t *lock)
  * can "mix" irq-safe locks - any writer needs to get a
  * irq-safe write-lock, but readers can get non-irqsafe
  * read-locks.
- */
-typedef struct {
-       volatile unsigned int lock;
-#ifdef CONFIG_DEBUG_SPINLOCK
-       unsigned magic;
-#endif
-#ifdef CONFIG_PREEMPT
-       unsigned int break_lock;
-#endif
-} rwlock_t;
-
-#define RWLOCK_MAGIC   0xdeaf1eed
-
-#ifdef CONFIG_DEBUG_SPINLOCK
-#define RWLOCK_MAGIC_INIT      , RWLOCK_MAGIC
-#else
-#define RWLOCK_MAGIC_INIT      /* */
-#endif
-
-#define RW_LOCK_UNLOCKED (rwlock_t) { RW_LOCK_BIAS RWLOCK_MAGIC_INIT }
-
-#define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0)
-
-#define read_can_lock(x)       ((int)(x)->lock > 0)
-#define write_can_lock(x)      ((x)->lock == RW_LOCK_BIAS)
-
-/*
+ *
  * On x86, we implement read-write locks as a 32-bit counter
  * with the high bit (sign) being the "contended" bit.
  *
@@ -170,29 +84,24 @@ typedef struct {
  *
  * Changed to use the same technique as rw semaphores.  See
  * semaphore.h for details.  -ben
+ *
+ * the helpers are in arch/i386/kernel/semaphore.c
  */
-/* the spinlock helpers are in arch/i386/kernel/semaphore.c */
 
-static inline void _raw_read_lock(rwlock_t *rw)
+#define __raw_read_can_lock(x)         ((int)(x)->lock > 0)
+#define __raw_write_can_lock(x)                ((x)->lock == RW_LOCK_BIAS)
+
+static inline void __raw_read_lock(raw_rwlock_t *rw)
 {
-#ifdef CONFIG_DEBUG_SPINLOCK
-       BUG_ON(rw->magic != RWLOCK_MAGIC);
-#endif
        __build_read_lock(rw, "__read_lock_failed");
 }
 
-static inline void _raw_write_lock(rwlock_t *rw)
+static inline void __raw_write_lock(raw_rwlock_t *rw)
 {
-#ifdef CONFIG_DEBUG_SPINLOCK
-       BUG_ON(rw->magic != RWLOCK_MAGIC);
-#endif
        __build_write_lock(rw, "__write_lock_failed");
 }
 
-#define _raw_read_unlock(rw)           asm volatile("lock ; incl %0" :"=m" ((rw)->lock) : : "memory")
-#define _raw_write_unlock(rw)  asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ",%0":"=m" ((rw)->lock) : : "memory")
-
-static inline int _raw_read_trylock(rwlock_t *lock)
+static inline int __raw_read_trylock(raw_rwlock_t *lock)
 {
        atomic_t *count = (atomic_t *)lock;
        atomic_dec(count);
@@ -202,7 +111,7 @@ static inline int _raw_read_trylock(rwlock_t *lock)
        return 0;
 }
 
-static inline int _raw_write_trylock(rwlock_t *lock)
+static inline int __raw_write_trylock(raw_rwlock_t *lock)
 {
        atomic_t *count = (atomic_t *)lock;
        if (atomic_sub_and_test(RW_LOCK_BIAS, count))
@@ -211,4 +120,15 @@ static inline int _raw_write_trylock(rwlock_t *lock)
        return 0;
 }
 
+static inline void __raw_read_unlock(raw_rwlock_t *rw)
+{
+       asm volatile("lock ; incl %0" :"=m" (rw->lock) : : "memory");
+}
+
+static inline void __raw_write_unlock(raw_rwlock_t *rw)
+{
+       asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ",%0"
+                               : "=m" (rw->lock) : : "memory");
+}
+
 #endif /* __ASM_SPINLOCK_H */
diff --git a/include/asm-x86_64/spinlock_types.h b/include/asm-x86_64/spinlock_types.h
new file mode 100644 (file)
index 0000000..59efe84
--- /dev/null
@@ -0,0 +1,20 @@
+#ifndef __ASM_SPINLOCK_TYPES_H
+#define __ASM_SPINLOCK_TYPES_H
+
+#ifndef __LINUX_SPINLOCK_TYPES_H
+# error "please don't include this file directly"
+#endif
+
+typedef struct {
+       volatile unsigned int slock;
+} raw_spinlock_t;
+
+#define __RAW_SPIN_LOCK_UNLOCKED       { 1 }
+
+typedef struct {
+       volatile unsigned int lock;
+} raw_rwlock_t;
+
+#define __RAW_RW_LOCK_UNLOCKED         { RW_LOCK_BIAS }
+
+#endif
index cdaf03a14a5115333ea424944ba320b1f687bedb..6e1c79c8b6bfd0eebc33187804169d47caa2b92e 100644 (file)
@@ -314,9 +314,8 @@ void zero_fill_bio(struct bio *bio);
  * bvec_kmap_irq and bvec_kunmap_irq!!
  *
  * This function MUST be inlined - it plays with the CPU interrupt flags.
- * Hence the `extern inline'.
  */
-extern inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags)
+static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags)
 {
        unsigned long addr;
 
@@ -332,7 +331,7 @@ extern inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags)
        return (char *) addr + bvec->bv_offset;
 }
 
-extern inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
+static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
 {
        unsigned long ptr = (unsigned long) buffer & PAGE_MASK;
 
@@ -345,7 +344,7 @@ extern inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
 #define bvec_kunmap_irq(buf, flags)    do { *(flags) = 0; } while (0)
 #endif
 
-extern inline char *__bio_kmap_irq(struct bio *bio, unsigned short idx,
+static inline char *__bio_kmap_irq(struct bio *bio, unsigned short idx,
                                   unsigned long *flags)
 {
        return bvec_kmap_irq(bio_iovec_idx(bio, idx), flags);
diff --git a/include/linux/bit_spinlock.h b/include/linux/bit_spinlock.h
new file mode 100644 (file)
index 0000000..6b20af0
--- /dev/null
@@ -0,0 +1,77 @@
+#ifndef __LINUX_BIT_SPINLOCK_H
+#define __LINUX_BIT_SPINLOCK_H
+
+/*
+ *  bit-based spin_lock()
+ *
+ * Don't use this unless you really need to: spin_lock() and spin_unlock()
+ * are significantly faster.
+ */
+static inline void bit_spin_lock(int bitnum, unsigned long *addr)
+{
+       /*
+        * Assuming the lock is uncontended, this never enters
+        * the body of the outer loop. If it is contended, then
+        * within the inner loop a non-atomic test is used to
+        * busywait with less bus contention for a good time to
+        * attempt to acquire the lock bit.
+        */
+       preempt_disable();
+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
+       while (test_and_set_bit(bitnum, addr)) {
+               while (test_bit(bitnum, addr)) {
+                       preempt_enable();
+                       cpu_relax();
+                       preempt_disable();
+               }
+       }
+#endif
+       __acquire(bitlock);
+}
+
+/*
+ * Return true if it was acquired
+ */
+static inline int bit_spin_trylock(int bitnum, unsigned long *addr)
+{
+       preempt_disable();
+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
+       if (test_and_set_bit(bitnum, addr)) {
+               preempt_enable();
+               return 0;
+       }
+#endif
+       __acquire(bitlock);
+       return 1;
+}
+
+/*
+ *  bit-based spin_unlock()
+ */
+static inline void bit_spin_unlock(int bitnum, unsigned long *addr)
+{
+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
+       BUG_ON(!test_bit(bitnum, addr));
+       smp_mb__before_clear_bit();
+       clear_bit(bitnum, addr);
+#endif
+       preempt_enable();
+       __release(bitlock);
+}
+
+/*
+ * Return true if the lock is held.
+ */
+static inline int bit_spin_is_locked(int bitnum, unsigned long *addr)
+{
+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
+       return test_bit(bitnum, addr);
+#elif defined CONFIG_PREEMPT
+       return preempt_count();
+#else
+       return 1;
+#endif
+}
+
+#endif /* __LINUX_BIT_SPINLOCK_H */
+
index aefa26fbae8add64ee373d9742523b534eaec7ce..efdc9b5bc05c8687380200f8a871975c91709860 100644 (file)
@@ -728,7 +728,7 @@ static inline unsigned int blksize_bits(unsigned int size)
        return bits;
 }
 
-extern inline unsigned int block_size(struct block_device *bdev)
+static inline unsigned int block_size(struct block_device *bdev)
 {
        return bdev->bd_block_size;
 }
index 63035ae67e6350881638ed2f64da672b20bda410..a404c111c937a2da3dcddba17f7aba0a63e82cb4 100644 (file)
@@ -96,7 +96,7 @@ struct changer_position {
  */
 struct changer_element_status {
        int             ces_type;
-       unsigned char   *ces_data;
+       unsigned char   __user *ces_data;
 };
 #define CESTATUS_FULL     0x01 /* full */
 #define CESTATUS_IMPEXP   0x02 /* media was imported (inserted by sysop) */
index e60bfdac348d2d0cb4e3b13e5f858b9514a91fa8..4932ee5c77f0896d081de2ad70dadc5b122492fa 100644 (file)
@@ -19,7 +19,8 @@ struct dma_pool *dma_pool_create(const char *name, struct device *dev,
 
 void dma_pool_destroy(struct dma_pool *pool);
 
-void *dma_pool_alloc(struct dma_pool *pool, int mem_flags, dma_addr_t *handle);
+void *dma_pool_alloc(struct dma_pool *pool, unsigned int __nocast mem_flags,
+                    dma_addr_t *handle);
 
 void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t addr);
 
index 7f61227827d760ee8ed89f4829484502b89a0253..e0b77c5af9a02aadab2a1fcd2e967c92c402ce16 100644 (file)
@@ -1509,8 +1509,6 @@ extern void do_generic_mapping_read(struct address_space *mapping,
                                    loff_t *, read_descriptor_t *, read_actor_t);
 extern void
 file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping);
-extern ssize_t generic_file_direct_IO(int rw, struct kiocb *iocb,
-       const struct iovec *iov, loff_t offset, unsigned long nr_segs);
 extern ssize_t generic_file_readv(struct file *filp, const struct iovec *iov, 
        unsigned long nr_segs, loff_t *ppos);
 ssize_t generic_file_writev(struct file *filp, const struct iovec *iov, 
index bd32b79d6295fe7cb4ff3239be932df09a63972b..304aaedea305dadd18748fda4cdd96d6e2408ace 100644 (file)
@@ -198,27 +198,43 @@ struct in6_flowlabel_req
  * MCAST_MSFILTER              48
  */
 
-/* RFC3542 advanced socket options (50-67) */
-#define IPV6_RECVPKTINFO       50
-#define IPV6_PKTINFO           51
-#if 0
-#define IPV6_RECVPATHMTU       52
-#define IPV6_PATHMTU           53
-#define IPV6_DONTFRAG          54
-#define IPV6_USE_MIN_MTU       55
-#endif
-#define IPV6_RECVHOPOPTS       56
-#define IPV6_HOPOPTS           57
-#if 0
-#define IPV6_RECVRTHDRDSTOPTS  58      /* Unused, see net/ipv6/datagram.c */
+/*
+ * Advanced API (RFC3542) (1)
+ *
+ * Note: IPV6_RECVRTHDRDSTOPTS does not exist. see net/ipv6/datagram.c.
+ */
+
+#define IPV6_RECVPKTINFO       49
+#define IPV6_PKTINFO           50
+#define IPV6_RECVHOPLIMIT      51
+#define IPV6_HOPLIMIT          52
+#define IPV6_RECVHOPOPTS       53
+#define IPV6_HOPOPTS           54
+#define IPV6_RTHDRDSTOPTS      55
+#define IPV6_RECVRTHDR         56
+#define IPV6_RTHDR             57
+#define IPV6_RECVDSTOPTS       58
+#define IPV6_DSTOPTS           59
+#if 0  /* not yet */
+#define IPV6_RECVPATHMTU       60
+#define IPV6_PATHMTU           61
+#define IPV6_DONTFRAG          62
+#define IPV6_USE_MIN_MTU       63
 #endif
-#define IPV6_RTHDRDSTOPTS      59
-#define IPV6_RECVRTHDR         60
-#define IPV6_RTHDR             61
-#define IPV6_RECVDSTOPTS       62
-#define IPV6_DSTOPTS           63
-#define IPV6_RECVHOPLIMIT      64
-#define IPV6_HOPLIMIT          65
+
+/*
+ * Netfilter
+ *
+ * Following socket options are used in ip6_tables;
+ * see include/linux/netfilter_ipv6/ip6_tables.h.
+ *
+ * IP6T_SO_SET_REPLACE / IP6T_SO_GET_INFO              64
+ * IP6T_SO_SET_ADD_COUNTERS / IP6T_SO_GET_ENTRIES      65
+ */
+
+/*
+ * Advanced API (RFC3542) (2)
+ */
 #define IPV6_RECVTCLASS                66
 #define IPV6_TCLASS            67
 
index 4767e5429534dff11c004923b5a3ee9827bcac50..e8c296ff6257adeccdd9d60245f463d1ea0255a3 100644 (file)
@@ -289,6 +289,8 @@ struct input_absinfo {
 #define KEY_SCROLLDOWN         178
 #define KEY_KPLEFTPAREN                179
 #define KEY_KPRIGHTPAREN       180
+#define KEY_NEW                        181
+#define KEY_REDO               182
 
 #define KEY_F13                        183
 #define KEY_F14                        184
@@ -335,6 +337,12 @@ struct input_absinfo {
 #define KEY_KBDILLUMDOWN       229
 #define KEY_KBDILLUMUP         230
 
+#define KEY_SEND               231
+#define KEY_REPLY              232
+#define KEY_FORWARDMAIL                233
+#define KEY_SAVE               234
+#define KEY_DOCUMENTS          235
+
 #define KEY_UNKNOWN            240
 
 #define BTN_MISC               0x100
index 6c5f7b39a4b0135fc479fed6979f267d681755f2..bb6f88e14061ed6d3543a632aebb7b616fca0a1e 100644 (file)
@@ -68,7 +68,7 @@ struct ipv6_opt_hdr {
 
 struct rt0_hdr {
        struct ipv6_rt_hdr      rt_hdr;
-       __u32                   bitmap;         /* strict/loose bit map */
+       __u32                   reserved;
        struct in6_addr         addr[0];
 
 #define rt0_type               rt_hdr.type
index 84321a4cac93a1e942a4df994f28dbf44aeeec86..de097269bd7f00596f6da5059876e6d7305c1c56 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/buffer_head.h>
 #include <linux/journal-head.h>
 #include <linux/stddef.h>
+#include <linux/bit_spinlock.h>
 #include <asm/semaphore.h>
 #endif
 
index d7a2555a886c35edfe157f763236749d05e9f0e5..6acfdbba734b17f554a089ca30f27ef12102789e 100644 (file)
@@ -254,23 +254,23 @@ static inline u64 get_jiffies_64(void)
  */
 static inline unsigned int jiffies_to_msecs(const unsigned long j)
 {
-#if HZ <= 1000 && !(1000 % HZ)
-       return (1000 / HZ) * j;
-#elif HZ > 1000 && !(HZ % 1000)
-       return (j + (HZ / 1000) - 1)/(HZ / 1000);
+#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
+       return (MSEC_PER_SEC / HZ) * j;
+#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
+       return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC);
 #else
-       return (j * 1000) / HZ;
+       return (j * MSEC_PER_SEC) / HZ;
 #endif
 }
 
 static inline unsigned int jiffies_to_usecs(const unsigned long j)
 {
-#if HZ <= 1000000 && !(1000000 % HZ)
-       return (1000000 / HZ) * j;
-#elif HZ > 1000000 && !(HZ % 1000000)
-       return (j + (HZ / 1000000) - 1)/(HZ / 1000000);
+#if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
+       return (USEC_PER_SEC / HZ) * j;
+#elif HZ > USEC_PER_SEC && !(HZ % USEC_PER_SEC)
+       return (j + (HZ / USEC_PER_SEC) - 1)/(HZ / USEC_PER_SEC);
 #else
-       return (j * 1000000) / HZ;
+       return (j * USEC_PER_SEC) / HZ;
 #endif
 }
 
@@ -278,12 +278,12 @@ static inline unsigned long msecs_to_jiffies(const unsigned int m)
 {
        if (m > jiffies_to_msecs(MAX_JIFFY_OFFSET))
                return MAX_JIFFY_OFFSET;
-#if HZ <= 1000 && !(1000 % HZ)
-       return (m + (1000 / HZ) - 1) / (1000 / HZ);
-#elif HZ > 1000 && !(HZ % 1000)
-       return m * (HZ / 1000);
+#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
+       return (m + (MSEC_PER_SEC / HZ) - 1) / (MSEC_PER_SEC / HZ);
+#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
+       return m * (HZ / MSEC_PER_SEC);
 #else
-       return (m * HZ + 999) / 1000;
+       return (m * HZ + MSEC_PER_SEC - 1) / MSEC_PER_SEC;
 #endif
 }
 
@@ -291,12 +291,12 @@ static inline unsigned long usecs_to_jiffies(const unsigned int u)
 {
        if (u > jiffies_to_usecs(MAX_JIFFY_OFFSET))
                return MAX_JIFFY_OFFSET;
-#if HZ <= 1000000 && !(1000000 % HZ)
-       return (u + (1000000 / HZ) - 1) / (1000000 / HZ);
-#elif HZ > 1000000 && !(HZ % 1000000)
-       return u * (HZ / 1000000);
+#if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
+       return (u + (USEC_PER_SEC / HZ) - 1) / (USEC_PER_SEC / HZ);
+#elif HZ > USEC_PER_SEC && !(HZ % USEC_PER_SEC)
+       return u * (HZ / USEC_PER_SEC);
 #else
-       return (u * HZ + 999999) / 1000000;
+       return (u * HZ + USEC_PER_SEC - 1) / USEC_PER_SEC;
 #endif
 }
 
index 8081a281fa5eb0a195e8b13cc8110c531a2f90f0..9c51917b1cce1f9146ec4b564cb43780cc3f3b21 100644 (file)
@@ -24,7 +24,7 @@
 
 struct radix_tree_root {
        unsigned int            height;
-       int                     gfp_mask;
+       unsigned int            gfp_mask;
        struct radix_tree_node  *rnode;
 };
 
@@ -50,7 +50,7 @@ void *radix_tree_delete(struct radix_tree_root *, unsigned long);
 unsigned int
 radix_tree_gang_lookup(struct radix_tree_root *root, void **results,
                        unsigned long first_index, unsigned int max_items);
-int radix_tree_preload(int gfp_mask);
+int radix_tree_preload(unsigned int __nocast gfp_mask);
 void radix_tree_init(void);
 void *radix_tree_tag_set(struct radix_tree_root *root,
                        unsigned long index, int tag);
index 17e458e17e2bb08708e623e260609a9653d40996..af00b10294cde3abfc57b09a4d6c6867413575b3 100644 (file)
@@ -2097,7 +2097,7 @@ void reiserfs_free_block(struct reiserfs_transaction_handle *th, struct inode *,
                         b_blocknr_t, int for_unformatted);
 int reiserfs_allocate_blocknrs(reiserfs_blocknr_hint_t *, b_blocknr_t *, int,
                               int);
-extern inline int reiserfs_new_form_blocknrs(struct tree_balance *tb,
+static inline int reiserfs_new_form_blocknrs(struct tree_balance *tb,
                                             b_blocknr_t * new_blocknrs,
                                             int amount_needed)
 {
@@ -2113,7 +2113,7 @@ extern inline int reiserfs_new_form_blocknrs(struct tree_balance *tb,
                                          0);
 }
 
-extern inline int reiserfs_new_unf_blocknrs(struct reiserfs_transaction_handle
+static inline int reiserfs_new_unf_blocknrs(struct reiserfs_transaction_handle
                                            *th, struct inode *inode,
                                            b_blocknr_t * new_blocknrs,
                                            struct path *path, long block)
@@ -2130,7 +2130,7 @@ extern inline int reiserfs_new_unf_blocknrs(struct reiserfs_transaction_handle
 }
 
 #ifdef REISERFS_PREALLOCATE
-extern inline int reiserfs_new_unf_blocknrs2(struct reiserfs_transaction_handle
+static inline int reiserfs_new_unf_blocknrs2(struct reiserfs_transaction_handle
                                             *th, struct inode *inode,
                                             b_blocknr_t * new_blocknrs,
                                             struct path *path, long block)
index c551e6a1447e57949e84435fe3fb846799d30f8b..4b83cb230006afe05271c0143c2b86dbb40c0ebc 100644 (file)
@@ -114,6 +114,7 @@ extern unsigned long nr_iowait(void);
 #define TASK_TRACED            8
 #define EXIT_ZOMBIE            16
 #define EXIT_DEAD              32
+#define TASK_NONINTERACTIVE    64
 
 #define __set_task_state(tsk, state_value)             \
        do { (tsk)->state = (state_value); } while (0)
@@ -202,6 +203,8 @@ extern int in_sched_functions(unsigned long addr);
 
 #define        MAX_SCHEDULE_TIMEOUT    LONG_MAX
 extern signed long FASTCALL(schedule_timeout(signed long timeout));
+extern signed long schedule_timeout_interruptible(signed long timeout);
+extern signed long schedule_timeout_uninterruptible(signed long timeout);
 asmlinkage void schedule(void);
 
 struct namespace;
@@ -782,6 +785,7 @@ struct task_struct {
        short il_next;
 #endif
 #ifdef CONFIG_CPUSETS
+       short cpuset_sem_nest_depth;
        struct cpuset *cpuset;
        nodemask_t mems_allowed;
        int cpuset_mems_generation;
index 42a6bea58af369f4408eba0cf020dbc6d74352eb..1f356f3bbc6468d56a75abe833eea06280c199aa 100644 (file)
@@ -118,7 +118,8 @@ extern void kfree(const void *);
 extern unsigned int ksize(const void *);
 
 #ifdef CONFIG_NUMA
-extern void *kmem_cache_alloc_node(kmem_cache_t *, int flags, int node);
+extern void *kmem_cache_alloc_node(kmem_cache_t *,
+                       unsigned int __nocast flags, int node);
 extern void *kmalloc_node(size_t size, unsigned int __nocast flags, int node);
 #else
 static inline void *kmem_cache_alloc_node(kmem_cache_t *cachep, int flags, int node)
index d6ba068719b61bf4a035e4d6e297694b65941442..cdc99a27840d6433cabf5d7a6c5bfa6e43198215 100644 (file)
@@ -2,7 +2,48 @@
 #define __LINUX_SPINLOCK_H
 
 /*
- * include/linux/spinlock.h - generic locking declarations
+ * include/linux/spinlock.h - generic spinlock/rwlock declarations
+ *
+ * here's the role of the various spinlock/rwlock related include files:
+ *
+ * on SMP builds:
+ *
+ *  asm/spinlock_types.h: contains the raw_spinlock_t/raw_rwlock_t and the
+ *                        initializers
+ *
+ *  linux/spinlock_types.h:
+ *                        defines the generic type and initializers
+ *
+ *  asm/spinlock.h:       contains the __raw_spin_*()/etc. lowlevel
+ *                        implementations, mostly inline assembly code
+ *
+ *   (also included on UP-debug builds:)
+ *
+ *  linux/spinlock_api_smp.h:
+ *                        contains the prototypes for the _spin_*() APIs.
+ *
+ *  linux/spinlock.h:     builds the final spin_*() APIs.
+ *
+ * on UP builds:
+ *
+ *  linux/spinlock_type_up.h:
+ *                        contains the generic, simplified UP spinlock type.
+ *                        (which is an empty structure on non-debug builds)
+ *
+ *  linux/spinlock_types.h:
+ *                        defines the generic type and initializers
+ *
+ *  linux/spinlock_up.h:
+ *                        contains the __raw_spin_*()/etc. version of UP
+ *                        builds. (which are NOPs on non-debug, non-preempt
+ *                        builds)
+ *
+ *   (included on UP-non-debug builds:)
+ *
+ *  linux/spinlock_api_up.h:
+ *                        builds the _spin_*() APIs.
+ *
+ *  linux/spinlock.h:     builds the final spin_*() APIs.
  */
 
 #include <linux/config.h>
@@ -13,7 +54,6 @@
 #include <linux/kernel.h>
 #include <linux/stringify.h>
 
-#include <asm/processor.h>     /* for cpu relax */
 #include <asm/system.h>
 
 /*
 #define __lockfunc fastcall __attribute__((section(".spinlock.text")))
 
 /*
- * If CONFIG_SMP is set, pull in the _raw_* definitions
+ * Pull the raw_spinlock_t and raw_rwlock_t definitions:
  */
-#ifdef CONFIG_SMP
-
-#define assert_spin_locked(x)  BUG_ON(!spin_is_locked(x))
-#include <asm/spinlock.h>
-
-int __lockfunc _spin_trylock(spinlock_t *lock);
-int __lockfunc _read_trylock(rwlock_t *lock);
-int __lockfunc _write_trylock(rwlock_t *lock);
-
-void __lockfunc _spin_lock(spinlock_t *lock)   __acquires(spinlock_t);
-void __lockfunc _read_lock(rwlock_t *lock)     __acquires(rwlock_t);
-void __lockfunc _write_lock(rwlock_t *lock)    __acquires(rwlock_t);
-
-void __lockfunc _spin_unlock(spinlock_t *lock) __releases(spinlock_t);
-void __lockfunc _read_unlock(rwlock_t *lock)   __releases(rwlock_t);
-void __lockfunc _write_unlock(rwlock_t *lock)  __releases(rwlock_t);
-
-unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)  __acquires(spinlock_t);
-unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock)    __acquires(rwlock_t);
-unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock)   __acquires(rwlock_t);
-
-void __lockfunc _spin_lock_irq(spinlock_t *lock)       __acquires(spinlock_t);
-void __lockfunc _spin_lock_bh(spinlock_t *lock)                __acquires(spinlock_t);
-void __lockfunc _read_lock_irq(rwlock_t *lock)         __acquires(rwlock_t);
-void __lockfunc _read_lock_bh(rwlock_t *lock)          __acquires(rwlock_t);
-void __lockfunc _write_lock_irq(rwlock_t *lock)                __acquires(rwlock_t);
-void __lockfunc _write_lock_bh(rwlock_t *lock)         __acquires(rwlock_t);
-
-void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) __releases(spinlock_t);
-void __lockfunc _spin_unlock_irq(spinlock_t *lock)                             __releases(spinlock_t);
-void __lockfunc _spin_unlock_bh(spinlock_t *lock)                              __releases(spinlock_t);
-void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)   __releases(rwlock_t);
-void __lockfunc _read_unlock_irq(rwlock_t *lock)                               __releases(rwlock_t);
-void __lockfunc _read_unlock_bh(rwlock_t *lock)                                        __releases(rwlock_t);
-void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)  __releases(rwlock_t);
-void __lockfunc _write_unlock_irq(rwlock_t *lock)                              __releases(rwlock_t);
-void __lockfunc _write_unlock_bh(rwlock_t *lock)                               __releases(rwlock_t);
-
-int __lockfunc _spin_trylock_bh(spinlock_t *lock);
-int __lockfunc generic_raw_read_trylock(rwlock_t *lock);
-int in_lock_functions(unsigned long addr);
-
-#else
+#include <linux/spinlock_types.h>
 
-#define in_lock_functions(ADDR) 0
+extern int __lockfunc generic__raw_read_trylock(raw_rwlock_t *lock);
 
-#if !defined(CONFIG_PREEMPT) && !defined(CONFIG_DEBUG_SPINLOCK)
-# define _atomic_dec_and_lock(atomic,lock) atomic_dec_and_test(atomic)
-# define ATOMIC_DEC_AND_LOCK
-#endif
-
-#ifdef CONFIG_DEBUG_SPINLOCK
-#define SPINLOCK_MAGIC 0x1D244B3C
-typedef struct {
-       unsigned long magic;
-       volatile unsigned long lock;
-       volatile unsigned int babble;
-       const char *module;
-       char *owner;
-       int oline;
-} spinlock_t;
-#define SPIN_LOCK_UNLOCKED (spinlock_t) { SPINLOCK_MAGIC, 0, 10, __FILE__ , NULL, 0}
-
-#define spin_lock_init(x) \
-       do { \
-               (x)->magic = SPINLOCK_MAGIC; \
-               (x)->lock = 0; \
-               (x)->babble = 5; \
-               (x)->module = __FILE__; \
-               (x)->owner = NULL; \
-               (x)->oline = 0; \
-       } while (0)
-
-#define CHECK_LOCK(x) \
-       do { \
-               if ((x)->magic != SPINLOCK_MAGIC) { \
-                       printk(KERN_ERR "%s:%d: spin_is_locked on uninitialized spinlock %p.\n", \
-                                       __FILE__, __LINE__, (x)); \
-               } \
-       } while(0)
-
-#define _raw_spin_lock(x)              \
-       do { \
-               CHECK_LOCK(x); \
-               if ((x)->lock&&(x)->babble) { \
-                       (x)->babble--; \
-                       printk("%s:%d: spin_lock(%s:%p) already locked by %s/%d\n", \
-                                       __FILE__,__LINE__, (x)->module, \
-                                       (x), (x)->owner, (x)->oline); \
-               } \
-               (x)->lock = 1; \
-               (x)->owner = __FILE__; \
-               (x)->oline = __LINE__; \
-       } while (0)
-
-/* without debugging, spin_is_locked on UP always says
- * FALSE. --> printk if already locked. */
-#define spin_is_locked(x) \
-       ({ \
-               CHECK_LOCK(x); \
-               if ((x)->lock&&(x)->babble) { \
-                       (x)->babble--; \
-                       printk("%s:%d: spin_is_locked(%s:%p) already locked by %s/%d\n", \
-                                       __FILE__,__LINE__, (x)->module, \
-                                       (x), (x)->owner, (x)->oline); \
-               } \
-               0; \
-       })
-
-/* with debugging, assert_spin_locked() on UP does check
- * the lock value properly */
-#define assert_spin_locked(x) \
-       ({ \
-               CHECK_LOCK(x); \
-               BUG_ON(!(x)->lock); \
-       })
-
-/* without debugging, spin_trylock on UP always says
- * TRUE. --> printk if already locked. */
-#define _raw_spin_trylock(x) \
-       ({ \
-               CHECK_LOCK(x); \
-               if ((x)->lock&&(x)->babble) { \
-                       (x)->babble--; \
-                       printk("%s:%d: spin_trylock(%s:%p) already locked by %s/%d\n", \
-                                       __FILE__,__LINE__, (x)->module, \
-                                       (x), (x)->owner, (x)->oline); \
-               } \
-               (x)->lock = 1; \
-               (x)->owner = __FILE__; \
-               (x)->oline = __LINE__; \
-               1; \
-       })
-
-#define spin_unlock_wait(x)    \
-       do { \
-               CHECK_LOCK(x); \
-               if ((x)->lock&&(x)->babble) { \
-                       (x)->babble--; \
-                       printk("%s:%d: spin_unlock_wait(%s:%p) owned by %s/%d\n", \
-                                       __FILE__,__LINE__, (x)->module, (x), \
-                                       (x)->owner, (x)->oline); \
-               }\
-       } while (0)
-
-#define _raw_spin_unlock(x) \
-       do { \
-               CHECK_LOCK(x); \
-               if (!(x)->lock&&(x)->babble) { \
-                       (x)->babble--; \
-                       printk("%s:%d: spin_unlock(%s:%p) not locked\n", \
-                                       __FILE__,__LINE__, (x)->module, (x));\
-               } \
-               (x)->lock = 0; \
-       } while (0)
-#else
 /*
- * gcc versions before ~2.95 have a nasty bug with empty initializers.
+ * Pull the __raw*() functions/declarations (UP-nondebug doesnt need them):
  */
-#if (__GNUC__ > 2)
-  typedef struct { } spinlock_t;
-  #define SPIN_LOCK_UNLOCKED (spinlock_t) { }
+#if defined(CONFIG_SMP)
+# include <asm/spinlock.h>
 #else
-  typedef struct { int gcc_is_buggy; } spinlock_t;
-  #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
+# include <linux/spinlock_up.h>
 #endif
 
+#define spin_lock_init(lock)   do { *(lock) = SPIN_LOCK_UNLOCKED; } while (0)
+#define rwlock_init(lock)      do { *(lock) = RW_LOCK_UNLOCKED; } while (0)
+
+#define spin_is_locked(lock)   __raw_spin_is_locked(&(lock)->raw_lock)
+
+/**
+ * spin_unlock_wait - wait until the spinlock gets unlocked
+ * @lock: the spinlock in question.
+ */
+#define spin_unlock_wait(lock) __raw_spin_unlock_wait(&(lock)->raw_lock)
+
 /*
- * If CONFIG_SMP is unset, declare the _raw_* definitions as nops
+ * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
  */
-#define spin_lock_init(lock)   do { (void)(lock); } while(0)
-#define _raw_spin_lock(lock)   do { (void)(lock); } while(0)
-#define spin_is_locked(lock)   ((void)(lock), 0)
-#define assert_spin_locked(lock)       do { (void)(lock); } while(0)
-#define _raw_spin_trylock(lock)        (((void)(lock), 1))
-#define spin_unlock_wait(lock) (void)(lock)
-#define _raw_spin_unlock(lock) do { (void)(lock); } while(0)
-#endif /* CONFIG_DEBUG_SPINLOCK */
-
-/* RW spinlocks: No debug version */
-
-#if (__GNUC__ > 2)
-  typedef struct { } rwlock_t;
-  #define RW_LOCK_UNLOCKED (rwlock_t) { }
+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
+# include <linux/spinlock_api_smp.h>
 #else
-  typedef struct { int gcc_is_buggy; } rwlock_t;
-  #define RW_LOCK_UNLOCKED (rwlock_t) { 0 }
+# include <linux/spinlock_api_up.h>
 #endif
 
-#define rwlock_init(lock)      do { (void)(lock); } while(0)
-#define _raw_read_lock(lock)   do { (void)(lock); } while(0)
-#define _raw_read_unlock(lock) do { (void)(lock); } while(0)
-#define _raw_write_lock(lock)  do { (void)(lock); } while(0)
-#define _raw_write_unlock(lock)        do { (void)(lock); } while(0)
-#define read_can_lock(lock)    (((void)(lock), 1))
-#define write_can_lock(lock)   (((void)(lock), 1))
-#define _raw_read_trylock(lock) ({ (void)(lock); (1); })
-#define _raw_write_trylock(lock) ({ (void)(lock); (1); })
-
-#define _spin_trylock(lock)    ({preempt_disable(); _raw_spin_trylock(lock) ? \
-                               1 : ({preempt_enable(); 0;});})
-
-#define _read_trylock(lock)    ({preempt_disable();_raw_read_trylock(lock) ? \
-                               1 : ({preempt_enable(); 0;});})
-
-#define _write_trylock(lock)   ({preempt_disable(); _raw_write_trylock(lock) ? \
-                               1 : ({preempt_enable(); 0;});})
-
-#define _spin_trylock_bh(lock) ({preempt_disable(); local_bh_disable(); \
-                               _raw_spin_trylock(lock) ? \
-                               1 : ({preempt_enable_no_resched(); local_bh_enable(); 0;});})
-
-#define _spin_lock(lock)       \
-do { \
-       preempt_disable(); \
-       _raw_spin_lock(lock); \
-       __acquire(lock); \
-} while(0)
-
-#define _write_lock(lock) \
-do { \
-       preempt_disable(); \
-       _raw_write_lock(lock); \
-       __acquire(lock); \
-} while(0)
-#define _read_lock(lock)       \
-do { \
-       preempt_disable(); \
-       _raw_read_lock(lock); \
-       __acquire(lock); \
-} while(0)
-
-#define _spin_unlock(lock) \
-do { \
-       _raw_spin_unlock(lock); \
-       preempt_enable(); \
-       __release(lock); \
-} while (0)
-
-#define _write_unlock(lock) \
-do { \
-       _raw_write_unlock(lock); \
-       preempt_enable(); \
-       __release(lock); \
-} while(0)
-
-#define _read_unlock(lock) \
-do { \
-       _raw_read_unlock(lock); \
-       preempt_enable(); \
-       __release(lock); \
-} while(0)
-
-#define _spin_lock_irqsave(lock, flags) \
-do {   \
-       local_irq_save(flags); \
-       preempt_disable(); \
-       _raw_spin_lock(lock); \
-       __acquire(lock); \
-} while (0)
-
-#define _spin_lock_irq(lock) \
-do { \
-       local_irq_disable(); \
-       preempt_disable(); \
-       _raw_spin_lock(lock); \
-       __acquire(lock); \
-} while (0)
-
-#define _spin_lock_bh(lock) \
-do { \
-       local_bh_disable(); \
-       preempt_disable(); \
-       _raw_spin_lock(lock); \
-       __acquire(lock); \
-} while (0)
-
-#define _read_lock_irqsave(lock, flags) \
-do {   \
-       local_irq_save(flags); \
-       preempt_disable(); \
-       _raw_read_lock(lock); \
-       __acquire(lock); \
-} while (0)
-
-#define _read_lock_irq(lock) \
-do { \
-       local_irq_disable(); \
-       preempt_disable(); \
-       _raw_read_lock(lock); \
-       __acquire(lock); \
-} while (0)
-
-#define _read_lock_bh(lock) \
-do { \
-       local_bh_disable(); \
-       preempt_disable(); \
-       _raw_read_lock(lock); \
-       __acquire(lock); \
-} while (0)
-
-#define _write_lock_irqsave(lock, flags) \
-do {   \
-       local_irq_save(flags); \
-       preempt_disable(); \
-       _raw_write_lock(lock); \
-       __acquire(lock); \
-} while (0)
+#ifdef CONFIG_DEBUG_SPINLOCK
+ extern void _raw_spin_lock(spinlock_t *lock);
+#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
+ extern int _raw_spin_trylock(spinlock_t *lock);
+ extern void _raw_spin_unlock(spinlock_t *lock);
+
+ extern void _raw_read_lock(rwlock_t *lock);
+ extern int _raw_read_trylock(rwlock_t *lock);
+ extern void _raw_read_unlock(rwlock_t *lock);
+ extern void _raw_write_lock(rwlock_t *lock);
+ extern int _raw_write_trylock(rwlock_t *lock);
+ extern void _raw_write_unlock(rwlock_t *lock);
+#else
+# define _raw_spin_unlock(lock)                __raw_spin_unlock(&(lock)->raw_lock)
+# define _raw_spin_trylock(lock)       __raw_spin_trylock(&(lock)->raw_lock)
+# define _raw_spin_lock(lock)          __raw_spin_lock(&(lock)->raw_lock)
+# define _raw_spin_lock_flags(lock, flags) \
+               __raw_spin_lock_flags(&(lock)->raw_lock, *(flags))
+# define _raw_read_lock(rwlock)                __raw_read_lock(&(rwlock)->raw_lock)
+# define _raw_write_lock(rwlock)       __raw_write_lock(&(rwlock)->raw_lock)
+# define _raw_read_unlock(rwlock)      __raw_read_unlock(&(rwlock)->raw_lock)
+# define _raw_write_unlock(rwlock)     __raw_write_unlock(&(rwlock)->raw_lock)
+# define _raw_read_trylock(rwlock)     __raw_read_trylock(&(rwlock)->raw_lock)
+# define _raw_write_trylock(rwlock)    __raw_write_trylock(&(rwlock)->raw_lock)
+#endif
 
-#define _write_lock_irq(lock) \
-do { \
-       local_irq_disable(); \
-       preempt_disable(); \
-       _raw_write_lock(lock); \
-       __acquire(lock); \
-} while (0)
-
-#define _write_lock_bh(lock) \
-do { \
-       local_bh_disable(); \
-       preempt_disable(); \
-       _raw_write_lock(lock); \
-       __acquire(lock); \
-} while (0)
-
-#define _spin_unlock_irqrestore(lock, flags) \
-do { \
-       _raw_spin_unlock(lock); \
-       local_irq_restore(flags); \
-       preempt_enable(); \
-       __release(lock); \
-} while (0)
-
-#define _spin_unlock_irq(lock) \
-do { \
-       _raw_spin_unlock(lock); \
-       local_irq_enable(); \
-       preempt_enable(); \
-       __release(lock); \
-} while (0)
-
-#define _spin_unlock_bh(lock) \
-do { \
-       _raw_spin_unlock(lock); \
-       preempt_enable_no_resched(); \
-       local_bh_enable(); \
-       __release(lock); \
-} while (0)
-
-#define _write_unlock_bh(lock) \
-do { \
-       _raw_write_unlock(lock); \
-       preempt_enable_no_resched(); \
-       local_bh_enable(); \
-       __release(lock); \
-} while (0)
-
-#define _read_unlock_irqrestore(lock, flags) \
-do { \
-       _raw_read_unlock(lock); \
-       local_irq_restore(flags); \
-       preempt_enable(); \
-       __release(lock); \
-} while (0)
-
-#define _write_unlock_irqrestore(lock, flags) \
-do { \
-       _raw_write_unlock(lock); \
-       local_irq_restore(flags); \
-       preempt_enable(); \
-       __release(lock); \
-} while (0)
-
-#define _read_unlock_irq(lock) \
-do { \
-       _raw_read_unlock(lock); \
-       local_irq_enable();     \
-       preempt_enable();       \
-       __release(lock); \
-} while (0)
-
-#define _read_unlock_bh(lock)  \
-do { \
-       _raw_read_unlock(lock); \
-       preempt_enable_no_resched();    \
-       local_bh_enable();      \
-       __release(lock); \
-} while (0)
-
-#define _write_unlock_irq(lock)        \
-do { \
-       _raw_write_unlock(lock);        \
-       local_irq_enable();     \
-       preempt_enable();       \
-       __release(lock); \
-} while (0)
-
-#endif /* !SMP */
+#define read_can_lock(rwlock)          __raw_read_can_lock(&(rwlock)->raw_lock)
+#define write_can_lock(rwlock)         __raw_write_can_lock(&(rwlock)->raw_lock)
 
 /*
  * Define the various spin_lock and rw_lock methods.  Note we define these
  * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various
  * methods are defined as nops in the case they are not required.
  */
-#define spin_trylock(lock)     __cond_lock(_spin_trylock(lock))
-#define read_trylock(lock)     __cond_lock(_read_trylock(lock))
-#define write_trylock(lock)    __cond_lock(_write_trylock(lock))
+#define spin_trylock(lock)             __cond_lock(_spin_trylock(lock))
+#define read_trylock(lock)             __cond_lock(_read_trylock(lock))
+#define write_trylock(lock)            __cond_lock(_write_trylock(lock))
 
-#define spin_lock(lock)                _spin_lock(lock)
-#define write_lock(lock)       _write_lock(lock)
-#define read_lock(lock)                _read_lock(lock)
+#define spin_lock(lock)                        _spin_lock(lock)
+#define write_lock(lock)               _write_lock(lock)
+#define read_lock(lock)                        _read_lock(lock)
 
-#ifdef CONFIG_SMP
+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
 #define spin_lock_irqsave(lock, flags) flags = _spin_lock_irqsave(lock)
 #define read_lock_irqsave(lock, flags) flags = _read_lock_irqsave(lock)
 #define write_lock_irqsave(lock, flags)        flags = _write_lock_irqsave(lock)
@@ -470,137 +171,59 @@ do { \
 #define write_lock_irq(lock)           _write_lock_irq(lock)
 #define write_lock_bh(lock)            _write_lock_bh(lock)
 
-#define spin_unlock(lock)      _spin_unlock(lock)
-#define write_unlock(lock)     _write_unlock(lock)
-#define read_unlock(lock)      _read_unlock(lock)
+#define spin_unlock(lock)              _spin_unlock(lock)
+#define write_unlock(lock)             _write_unlock(lock)
+#define read_unlock(lock)              _read_unlock(lock)
 
-#define spin_unlock_irqrestore(lock, flags)    _spin_unlock_irqrestore(lock, flags)
+#define spin_unlock_irqrestore(lock, flags) \
+                                       _spin_unlock_irqrestore(lock, flags)
 #define spin_unlock_irq(lock)          _spin_unlock_irq(lock)
 #define spin_unlock_bh(lock)           _spin_unlock_bh(lock)
 
-#define read_unlock_irqrestore(lock, flags)    _read_unlock_irqrestore(lock, flags)
-#define read_unlock_irq(lock)                  _read_unlock_irq(lock)
-#define read_unlock_bh(lock)                   _read_unlock_bh(lock)
+#define read_unlock_irqrestore(lock, flags) \
+                                       _read_unlock_irqrestore(lock, flags)
+#define read_unlock_irq(lock)          _read_unlock_irq(lock)
+#define read_unlock_bh(lock)           _read_unlock_bh(lock)
 
-#define write_unlock_irqrestore(lock, flags)   _write_unlock_irqrestore(lock, flags)
-#define write_unlock_irq(lock)                 _write_unlock_irq(lock)
-#define write_unlock_bh(lock)                  _write_unlock_bh(lock)
+#define write_unlock_irqrestore(lock, flags) \
+                                       _write_unlock_irqrestore(lock, flags)
+#define write_unlock_irq(lock)         _write_unlock_irq(lock)
+#define write_unlock_bh(lock)          _write_unlock_bh(lock)
 
-#define spin_trylock_bh(lock)                  __cond_lock(_spin_trylock_bh(lock))
+#define spin_trylock_bh(lock)          __cond_lock(_spin_trylock_bh(lock))
 
 #define spin_trylock_irq(lock) \
 ({ \
        local_irq_disable(); \
        _spin_trylock(lock) ? \
-       1 : ({local_irq_enable(); 0; }); \
+       1 : ({ local_irq_enable(); 0;  }); \
 })
 
 #define spin_trylock_irqsave(lock, flags) \
 ({ \
        local_irq_save(flags); \
        _spin_trylock(lock) ? \
-       1 : ({local_irq_restore(flags); 0;}); \
+       1 : ({ local_irq_restore(flags); 0; }); \
 })
 
-#ifdef CONFIG_LOCKMETER
-extern void _metered_spin_lock   (spinlock_t *lock);
-extern void _metered_spin_unlock (spinlock_t *lock);
-extern int  _metered_spin_trylock(spinlock_t *lock);
-extern void _metered_read_lock    (rwlock_t *lock);
-extern void _metered_read_unlock  (rwlock_t *lock);
-extern void _metered_write_lock   (rwlock_t *lock);
-extern void _metered_write_unlock (rwlock_t *lock);
-extern int  _metered_read_trylock (rwlock_t *lock);
-extern int  _metered_write_trylock(rwlock_t *lock);
-#endif
-
-/* "lock on reference count zero" */
-#ifndef ATOMIC_DEC_AND_LOCK
-#include <asm/atomic.h>
-extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
-#endif
-
-#define atomic_dec_and_lock(atomic,lock) __cond_lock(_atomic_dec_and_lock(atomic,lock))
-
-/*
- *  bit-based spin_lock()
- *
- * Don't use this unless you really need to: spin_lock() and spin_unlock()
- * are significantly faster.
- */
-static inline void bit_spin_lock(int bitnum, unsigned long *addr)
-{
-       /*
-        * Assuming the lock is uncontended, this never enters
-        * the body of the outer loop. If it is contended, then
-        * within the inner loop a non-atomic test is used to
-        * busywait with less bus contention for a good time to
-        * attempt to acquire the lock bit.
-        */
-       preempt_disable();
-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
-       while (test_and_set_bit(bitnum, addr)) {
-               while (test_bit(bitnum, addr)) {
-                       preempt_enable();
-                       cpu_relax();
-                       preempt_disable();
-               }
-       }
-#endif
-       __acquire(bitlock);
-}
-
 /*
- * Return true if it was acquired
+ * Pull the atomic_t declaration:
+ * (asm-mips/atomic.h needs above definitions)
  */
-static inline int bit_spin_trylock(int bitnum, unsigned long *addr)
-{
-       preempt_disable();      
-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
-       if (test_and_set_bit(bitnum, addr)) {
-               preempt_enable();
-               return 0;
-       }
-#endif
-       __acquire(bitlock);
-       return 1;
-}
-
-/*
- *  bit-based spin_unlock()
- */
-static inline void bit_spin_unlock(int bitnum, unsigned long *addr)
-{
-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
-       BUG_ON(!test_bit(bitnum, addr));
-       smp_mb__before_clear_bit();
-       clear_bit(bitnum, addr);
-#endif
-       preempt_enable();
-       __release(bitlock);
-}
-
-/*
- * Return true if the lock is held.
+#include <asm/atomic.h>
+/**
+ * atomic_dec_and_lock - lock on reaching reference count zero
+ * @atomic: the atomic counter
+ * @lock: the spinlock in question
  */
-static inline int bit_spin_is_locked(int bitnum, unsigned long *addr)
-{
-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
-       return test_bit(bitnum, addr);
-#elif defined CONFIG_PREEMPT
-       return preempt_count();
-#else
-       return 1;
-#endif
-}
-
-#define DEFINE_SPINLOCK(x) spinlock_t x = SPIN_LOCK_UNLOCKED
-#define DEFINE_RWLOCK(x) rwlock_t x = RW_LOCK_UNLOCKED
+extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
+#define atomic_dec_and_lock(atomic, lock) \
+               __cond_lock(_atomic_dec_and_lock(atomic, lock))
 
 /**
  * spin_can_lock - would spin_trylock() succeed?
  * @lock: the spinlock in question.
  */
-#define spin_can_lock(lock)            (!spin_is_locked(lock))
+#define spin_can_lock(lock)    (!spin_is_locked(lock))
 
 #endif /* __LINUX_SPINLOCK_H */
diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h
new file mode 100644 (file)
index 0000000..78e6989
--- /dev/null
@@ -0,0 +1,57 @@
+#ifndef __LINUX_SPINLOCK_API_SMP_H
+#define __LINUX_SPINLOCK_API_SMP_H
+
+#ifndef __LINUX_SPINLOCK_H
+# error "please don't include this file directly"
+#endif
+
+/*
+ * include/linux/spinlock_api_smp.h
+ *
+ * spinlock API declarations on SMP (and debug)
+ * (implemented in kernel/spinlock.c)
+ *
+ * portions Copyright 2005, Red Hat, Inc., Ingo Molnar
+ * Released under the General Public License (GPL).
+ */
+
+int in_lock_functions(unsigned long addr);
+
+#define assert_spin_locked(x)  BUG_ON(!spin_is_locked(x))
+
+void __lockfunc _spin_lock(spinlock_t *lock)           __acquires(spinlock_t);
+void __lockfunc _read_lock(rwlock_t *lock)             __acquires(rwlock_t);
+void __lockfunc _write_lock(rwlock_t *lock)            __acquires(rwlock_t);
+void __lockfunc _spin_lock_bh(spinlock_t *lock)                __acquires(spinlock_t);
+void __lockfunc _read_lock_bh(rwlock_t *lock)          __acquires(rwlock_t);
+void __lockfunc _write_lock_bh(rwlock_t *lock)         __acquires(rwlock_t);
+void __lockfunc _spin_lock_irq(spinlock_t *lock)       __acquires(spinlock_t);
+void __lockfunc _read_lock_irq(rwlock_t *lock)         __acquires(rwlock_t);
+void __lockfunc _write_lock_irq(rwlock_t *lock)                __acquires(rwlock_t);
+unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)
+                                                       __acquires(spinlock_t);
+unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock)
+                                                       __acquires(rwlock_t);
+unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock)
+                                                       __acquires(rwlock_t);
+int __lockfunc _spin_trylock(spinlock_t *lock);
+int __lockfunc _read_trylock(rwlock_t *lock);
+int __lockfunc _write_trylock(rwlock_t *lock);
+int __lockfunc _spin_trylock_bh(spinlock_t *lock);
+void __lockfunc _spin_unlock(spinlock_t *lock)         __releases(spinlock_t);
+void __lockfunc _read_unlock(rwlock_t *lock)           __releases(rwlock_t);
+void __lockfunc _write_unlock(rwlock_t *lock)          __releases(rwlock_t);
+void __lockfunc _spin_unlock_bh(spinlock_t *lock)      __releases(spinlock_t);
+void __lockfunc _read_unlock_bh(rwlock_t *lock)                __releases(rwlock_t);
+void __lockfunc _write_unlock_bh(rwlock_t *lock)       __releases(rwlock_t);
+void __lockfunc _spin_unlock_irq(spinlock_t *lock)     __releases(spinlock_t);
+void __lockfunc _read_unlock_irq(rwlock_t *lock)       __releases(rwlock_t);
+void __lockfunc _write_unlock_irq(rwlock_t *lock)      __releases(rwlock_t);
+void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
+                                                       __releases(spinlock_t);
+void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
+                                                       __releases(rwlock_t);
+void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
+                                                       __releases(rwlock_t);
+
+#endif /* __LINUX_SPINLOCK_API_SMP_H */
diff --git a/include/linux/spinlock_api_up.h b/include/linux/spinlock_api_up.h
new file mode 100644 (file)
index 0000000..cd81cee
--- /dev/null
@@ -0,0 +1,80 @@
+#ifndef __LINUX_SPINLOCK_API_UP_H
+#define __LINUX_SPINLOCK_API_UP_H
+
+#ifndef __LINUX_SPINLOCK_H
+# error "please don't include this file directly"
+#endif
+
+/*
+ * include/linux/spinlock_api_up.h
+ *
+ * spinlock API implementation on UP-nondebug (inlined implementation)
+ *
+ * portions Copyright 2005, Red Hat, Inc., Ingo Molnar
+ * Released under the General Public License (GPL).
+ */
+
+#define in_lock_functions(ADDR)                0
+
+#define assert_spin_locked(lock)       do { (void)(lock); } while (0)
+
+/*
+ * In the UP-nondebug case there's no real locking going on, so the
+ * only thing we have to do is to keep the preempt counts and irq
+ * flags straight, to supress compiler warnings of unused lock
+ * variables, and to add the proper checker annotations:
+ */
+#define __LOCK(lock) \
+  do { preempt_disable(); __acquire(lock); (void)(lock); } while (0)
+
+#define __LOCK_BH(lock) \
+  do { local_bh_disable(); __LOCK(lock); } while (0)
+
+#define __LOCK_IRQ(lock) \
+  do { local_irq_disable(); __LOCK(lock); } while (0)
+
+#define __LOCK_IRQSAVE(lock, flags) \
+  do { local_irq_save(flags); __LOCK(lock); } while (0)
+
+#define __UNLOCK(lock) \
+  do { preempt_enable(); __release(lock); (void)(lock); } while (0)
+
+#define __UNLOCK_BH(lock) \
+  do { preempt_enable_no_resched(); local_bh_enable(); __release(lock); (void)(lock); } while (0)
+
+#define __UNLOCK_IRQ(lock) \
+  do { local_irq_enable(); __UNLOCK(lock); } while (0)
+
+#define __UNLOCK_IRQRESTORE(lock, flags) \
+  do { local_irq_restore(flags); __UNLOCK(lock); } while (0)
+
+#define _spin_lock(lock)                       __LOCK(lock)
+#define _read_lock(lock)                       __LOCK(lock)
+#define _write_lock(lock)                      __LOCK(lock)
+#define _spin_lock_bh(lock)                    __LOCK_BH(lock)
+#define _read_lock_bh(lock)                    __LOCK_BH(lock)
+#define _write_lock_bh(lock)                   __LOCK_BH(lock)
+#define _spin_lock_irq(lock)                   __LOCK_IRQ(lock)
+#define _read_lock_irq(lock)                   __LOCK_IRQ(lock)
+#define _write_lock_irq(lock)                  __LOCK_IRQ(lock)
+#define _spin_lock_irqsave(lock, flags)                __LOCK_IRQSAVE(lock, flags)
+#define _read_lock_irqsave(lock, flags)                __LOCK_IRQSAVE(lock, flags)
+#define _write_lock_irqsave(lock, flags)       __LOCK_IRQSAVE(lock, flags)
+#define _spin_trylock(lock)                    ({ __LOCK(lock); 1; })
+#define _read_trylock(lock)                    ({ __LOCK(lock); 1; })
+#define _write_trylock(lock)                   ({ __LOCK(lock); 1; })
+#define _spin_trylock_bh(lock)                 ({ __LOCK_BH(lock); 1; })
+#define _spin_unlock(lock)                     __UNLOCK(lock)
+#define _read_unlock(lock)                     __UNLOCK(lock)
+#define _write_unlock(lock)                    __UNLOCK(lock)
+#define _spin_unlock_bh(lock)                  __UNLOCK_BH(lock)
+#define _write_unlock_bh(lock)                 __UNLOCK_BH(lock)
+#define _read_unlock_bh(lock)                  __UNLOCK_BH(lock)
+#define _spin_unlock_irq(lock)                 __UNLOCK_IRQ(lock)
+#define _read_unlock_irq(lock)                 __UNLOCK_IRQ(lock)
+#define _write_unlock_irq(lock)                        __UNLOCK_IRQ(lock)
+#define _spin_unlock_irqrestore(lock, flags)   __UNLOCK_IRQRESTORE(lock, flags)
+#define _read_unlock_irqrestore(lock, flags)   __UNLOCK_IRQRESTORE(lock, flags)
+#define _write_unlock_irqrestore(lock, flags)  __UNLOCK_IRQRESTORE(lock, flags)
+
+#endif /* __LINUX_SPINLOCK_API_UP_H */
diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h
new file mode 100644 (file)
index 0000000..9cb51e0
--- /dev/null
@@ -0,0 +1,67 @@
+#ifndef __LINUX_SPINLOCK_TYPES_H
+#define __LINUX_SPINLOCK_TYPES_H
+
+/*
+ * include/linux/spinlock_types.h - generic spinlock type definitions
+ *                                  and initializers
+ *
+ * portions Copyright 2005, Red Hat, Inc., Ingo Molnar
+ * Released under the General Public License (GPL).
+ */
+
+#if defined(CONFIG_SMP)
+# include <asm/spinlock_types.h>
+#else
+# include <linux/spinlock_types_up.h>
+#endif
+
+typedef struct {
+       raw_spinlock_t raw_lock;
+#if defined(CONFIG_PREEMPT) && defined(CONFIG_SMP)
+       unsigned int break_lock;
+#endif
+#ifdef CONFIG_DEBUG_SPINLOCK
+       unsigned int magic, owner_cpu;
+       void *owner;
+#endif
+} spinlock_t;
+
+#define SPINLOCK_MAGIC         0xdead4ead
+
+typedef struct {
+       raw_rwlock_t raw_lock;
+#if defined(CONFIG_PREEMPT) && defined(CONFIG_SMP)
+       unsigned int break_lock;
+#endif
+#ifdef CONFIG_DEBUG_SPINLOCK
+       unsigned int magic, owner_cpu;
+       void *owner;
+#endif
+} rwlock_t;
+
+#define RWLOCK_MAGIC           0xdeaf1eed
+
+#define SPINLOCK_OWNER_INIT    ((void *)-1L)
+
+#ifdef CONFIG_DEBUG_SPINLOCK
+# define SPIN_LOCK_UNLOCKED                                            \
+       (spinlock_t)    {       .raw_lock = __RAW_SPIN_LOCK_UNLOCKED,   \
+                               .magic = SPINLOCK_MAGIC,                \
+                               .owner = SPINLOCK_OWNER_INIT,           \
+                               .owner_cpu = -1 }
+#define RW_LOCK_UNLOCKED                                               \
+       (rwlock_t)      {       .raw_lock = __RAW_RW_LOCK_UNLOCKED,     \
+                               .magic = RWLOCK_MAGIC,                  \
+                               .owner = SPINLOCK_OWNER_INIT,           \
+                               .owner_cpu = -1 }
+#else
+# define SPIN_LOCK_UNLOCKED \
+       (spinlock_t)    {       .raw_lock = __RAW_SPIN_LOCK_UNLOCKED }
+#define RW_LOCK_UNLOCKED \
+       (rwlock_t)      {       .raw_lock = __RAW_RW_LOCK_UNLOCKED }
+#endif
+
+#define DEFINE_SPINLOCK(x)     spinlock_t x = SPIN_LOCK_UNLOCKED
+#define DEFINE_RWLOCK(x)       rwlock_t x = RW_LOCK_UNLOCKED
+
+#endif /* __LINUX_SPINLOCK_TYPES_H */
diff --git a/include/linux/spinlock_types_up.h b/include/linux/spinlock_types_up.h
new file mode 100644 (file)
index 0000000..def2d17
--- /dev/null
@@ -0,0 +1,51 @@
+#ifndef __LINUX_SPINLOCK_TYPES_UP_H
+#define __LINUX_SPINLOCK_TYPES_UP_H
+
+#ifndef __LINUX_SPINLOCK_TYPES_H
+# error "please don't include this file directly"
+#endif
+
+/*
+ * include/linux/spinlock_types_up.h - spinlock type definitions for UP
+ *
+ * portions Copyright 2005, Red Hat, Inc., Ingo Molnar
+ * Released under the General Public License (GPL).
+ */
+
+#ifdef CONFIG_DEBUG_SPINLOCK
+
+typedef struct {
+       volatile unsigned int slock;
+} raw_spinlock_t;
+
+#define __RAW_SPIN_LOCK_UNLOCKED { 1 }
+
+#else
+
+/*
+ * All gcc 2.95 versions and early versions of 2.96 have a nasty bug
+ * with empty initializers.
+ */
+#if (__GNUC__ > 2)
+typedef struct { } raw_spinlock_t;
+
+#define __RAW_SPIN_LOCK_UNLOCKED { }
+#else
+typedef struct { int gcc_is_buggy; } raw_spinlock_t;
+#define __RAW_SPIN_LOCK_UNLOCKED (raw_spinlock_t) { 0 }
+#endif
+
+#endif
+
+#if (__GNUC__ > 2)
+typedef struct {
+       /* no debug version on UP */
+} raw_rwlock_t;
+
+#define __RAW_RW_LOCK_UNLOCKED { }
+#else
+typedef struct { int gcc_is_buggy; } raw_rwlock_t;
+#define __RAW_RW_LOCK_UNLOCKED (raw_rwlock_t) { 0 }
+#endif
+
+#endif /* __LINUX_SPINLOCK_TYPES_UP_H */
diff --git a/include/linux/spinlock_up.h b/include/linux/spinlock_up.h
new file mode 100644 (file)
index 0000000..31accf2
--- /dev/null
@@ -0,0 +1,74 @@
+#ifndef __LINUX_SPINLOCK_UP_H
+#define __LINUX_SPINLOCK_UP_H
+
+#ifndef __LINUX_SPINLOCK_H
+# error "please don't include this file directly"
+#endif
+
+/*
+ * include/linux/spinlock_up.h - UP-debug version of spinlocks.
+ *
+ * portions Copyright 2005, Red Hat, Inc., Ingo Molnar
+ * Released under the General Public License (GPL).
+ *
+ * In the debug case, 1 means unlocked, 0 means locked. (the values
+ * are inverted, to catch initialization bugs)
+ *
+ * No atomicity anywhere, we are on UP.
+ */
+
+#ifdef CONFIG_DEBUG_SPINLOCK
+
+#define __raw_spin_is_locked(x)                ((x)->slock == 0)
+
+static inline void __raw_spin_lock(raw_spinlock_t *lock)
+{
+       lock->slock = 0;
+}
+
+static inline void
+__raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
+{
+       local_irq_save(flags);
+       lock->slock = 0;
+}
+
+static inline int __raw_spin_trylock(raw_spinlock_t *lock)
+{
+       char oldval = lock->slock;
+
+       lock->slock = 0;
+
+       return oldval > 0;
+}
+
+static inline void __raw_spin_unlock(raw_spinlock_t *lock)
+{
+       lock->slock = 1;
+}
+
+/*
+ * Read-write spinlocks. No debug version.
+ */
+#define __raw_read_lock(lock)          do { (void)(lock); } while (0)
+#define __raw_write_lock(lock)         do { (void)(lock); } while (0)
+#define __raw_read_trylock(lock)       ({ (void)(lock); 1; })
+#define __raw_write_trylock(lock)      ({ (void)(lock); 1; })
+#define __raw_read_unlock(lock)                do { (void)(lock); } while (0)
+#define __raw_write_unlock(lock)       do { (void)(lock); } while (0)
+
+#else /* DEBUG_SPINLOCK */
+#define __raw_spin_is_locked(lock)     ((void)(lock), 0)
+/* for sched.c and kernel_lock.c: */
+# define __raw_spin_lock(lock)         do { (void)(lock); } while (0)
+# define __raw_spin_unlock(lock)       do { (void)(lock); } while (0)
+# define __raw_spin_trylock(lock)      ({ (void)(lock); 1; })
+#endif /* DEBUG_SPINLOCK */
+
+#define __raw_read_can_lock(lock)      (((void)(lock), 1))
+#define __raw_write_can_lock(lock)     (((void)(lock), 1))
+
+#define __raw_spin_unlock_wait(lock) \
+               do { cpu_relax(); } while (__raw_spin_is_locked(lock))
+
+#endif /* __LINUX_SPINLOCK_UP_H */
index c10d4c21c18397cd3bfe66ea83d173d89a5d85b2..8e83f4e778bb11657f2363cf82eb491562615439 100644 (file)
@@ -28,17 +28,10 @@ struct timezone {
 #ifdef __KERNEL__
 
 /* Parameters used to convert the timespec values */
-#ifndef USEC_PER_SEC
+#define MSEC_PER_SEC (1000L)
 #define USEC_PER_SEC (1000000L)
-#endif
-
-#ifndef NSEC_PER_SEC
 #define NSEC_PER_SEC (1000000000L)
-#endif
-
-#ifndef NSEC_PER_USEC
 #define NSEC_PER_USEC (1000L)
-#endif
 
 static __inline__ int timespec_equal(struct timespec *a, struct timespec *b) 
 { 
index 542dbaee65129480867593be885ab71c05be8796..343d883d69c5d56314b7b15280513376f1b6d1bd 100644 (file)
@@ -109,8 +109,6 @@ int pdflush_operation(void (*fn)(unsigned long), unsigned long arg0);
 int do_writepages(struct address_space *mapping, struct writeback_control *wbc);
 int sync_page_range(struct inode *inode, struct address_space *mapping,
                        loff_t pos, size_t count);
-int sync_page_range_nolock(struct inode *inode, struct address_space
-               *mapping, loff_t pos, size_t count);
 
 /* pdflush.c */
 extern int nr_pdflush_threads; /* Global so it can be exported to sysctl
index 0acf245f441d6640cbf8739211bb19dfd51e9d8c..3a926011507b8768ff9991b55837a3f7ee9f579e 100644 (file)
@@ -69,7 +69,7 @@ struct mqueue_inode_info {
 
        struct sigevent notify;
        pid_t notify_owner;
-       struct user_struct *user;       /* user who created, for accouting */
+       struct user_struct *user;       /* user who created, for accounting */
        struct sock *notify_sock;
        struct sk_buff *notify_cookie;
 
index 8d57a2f1226baa8034e8d93b7e4806d5a3f5fce1..ff4dc02ce17027f22dc4bf4027075653ee7f42bb 100644 (file)
@@ -12,6 +12,7 @@ obj-y     = sched.o fork.o exec_domain.o panic.o printk.o profile.o \
 obj-$(CONFIG_FUTEX) += futex.o
 obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o
 obj-$(CONFIG_SMP) += cpu.o spinlock.o
+obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o
 obj-$(CONFIG_UID16) += uid16.o
 obj-$(CONFIG_MODULES) += module.o
 obj-$(CONFIG_KALLSYMS) += kallsyms.o
index f70e6027cca9758c7ca35f3dc4bd89b7d97d5b14..b756f527497ea8c201d533df64ff053da0084d96 100644 (file)
@@ -165,7 +165,7 @@ out:
 }
 
 /*
- * Close the old accouting file (if currently open) and then replace
+ * Close the old accounting file (if currently open) and then replace
  * it with file (if non-NULL).
  *
  * NOTE: acct_globals.lock MUST be held on entry and exit.
@@ -199,11 +199,16 @@ static void acct_file_reopen(struct file *file)
        }
 }
 
-/*
- *  sys_acct() is the only system call needed to implement process
- *  accounting. It takes the name of the file where accounting records
- *  should be written. If the filename is NULL, accounting will be
- *  shutdown.
+/**
+ * sys_acct - enable/disable process accounting
+ * @name: file name for accounting records or NULL to shutdown accounting
+ *
+ * Returns 0 for success or negative errno values for failure.
+ *
+ * sys_acct() is the only system call needed to implement process
+ * accounting. It takes the name of the file where accounting records
+ * should be written. If the filename is NULL, accounting will be
+ * shutdown.
  */
 asmlinkage long sys_acct(const char __user *name)
 {
@@ -250,9 +255,12 @@ asmlinkage long sys_acct(const char __user *name)
        return (0);
 }
 
-/*
- * If the accouting is turned on for a file in the filesystem pointed
- * to by sb, turn accouting off.
+/**
+ * acct_auto_close - turn off a filesystem's accounting if it is on
+ * @sb: super block for the filesystem
+ *
+ * If the accounting is turned on for a file in the filesystem pointed
+ * to by sb, turn accounting off.
  */
 void acct_auto_close(struct super_block *sb)
 {
@@ -503,8 +511,11 @@ static void do_acct_process(long exitcode, struct file *file)
        set_fs(fs);
 }
 
-/*
+/**
  * acct_process - now just a wrapper around do_acct_process
+ * @exitcode: task exit code
+ *
+ * handles process accounting for an exiting task
  */
 void acct_process(long exitcode)
 {
@@ -530,9 +541,9 @@ void acct_process(long exitcode)
 }
 
 
-/*
- * acct_update_integrals
- *    -  update mm integral fields in task_struct
+/**
+ * acct_update_integrals - update mm integral fields in task_struct
+ * @tsk: task_struct for accounting
  */
 void acct_update_integrals(struct task_struct *tsk)
 {
@@ -547,9 +558,9 @@ void acct_update_integrals(struct task_struct *tsk)
        }
 }
 
-/*
- * acct_clear_integrals
- *    - clear the mm integral fields in task_struct
+/**
+ * acct_clear_integrals - clear the mm integral fields in task_struct
+ * @tsk: task_struct whose accounting fields are cleared
  */
 void acct_clear_integrals(struct task_struct *tsk)
 {
index ddfcaaa86623a0c02b68bc93a62c3d54bbe51762..102296e21ea86671f10fb922acf67feb81b22b1e 100644 (file)
@@ -48,8 +48,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
        if (!time_after(expire, now))
                return 0;
 
-       current->state = TASK_INTERRUPTIBLE;
-       expire = schedule_timeout(expire - now);
+       expire = schedule_timeout_interruptible(expire - now);
        if (expire == 0)
                return 0;
 
@@ -82,8 +81,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
                return -EINVAL;
 
        expire = timespec_to_jiffies(&t) + (t.tv_sec || t.tv_nsec);
-       current->state = TASK_INTERRUPTIBLE;
-       expire = schedule_timeout(expire);
+       expire = schedule_timeout_interruptible(expire);
        if (expire == 0)
                return 0;
 
@@ -795,8 +793,7 @@ compat_sys_rt_sigtimedwait (compat_sigset_t __user *uthese,
                        recalc_sigpending();
                        spin_unlock_irq(&current->sighand->siglock);
 
-                       current->state = TASK_INTERRUPTIBLE;
-                       timeout = schedule_timeout(timeout);
+                       timeout = schedule_timeout_interruptible(timeout);
 
                        spin_lock_irq(&current->sighand->siglock);
                        sig = dequeue_signal(current, &s, &info);
index 712d02029971ed456a4025d23f0cd4d187d2f6ff..407b5f0a8c8eeed2aea648b08748771dd284b3d7 100644 (file)
@@ -181,6 +181,37 @@ static struct super_block *cpuset_sb = NULL;
 
 static DECLARE_MUTEX(cpuset_sem);
 
+/*
+ * The global cpuset semaphore cpuset_sem can be needed by the
+ * memory allocator to update a tasks mems_allowed (see the calls
+ * to cpuset_update_current_mems_allowed()) or to walk up the
+ * cpuset hierarchy to find a mem_exclusive cpuset see the calls
+ * to cpuset_excl_nodes_overlap()).
+ *
+ * But if the memory allocation is being done by cpuset.c code, it
+ * usually already holds cpuset_sem.  Double tripping on a kernel
+ * semaphore deadlocks the current task, and any other task that
+ * subsequently tries to obtain the lock.
+ *
+ * Run all up's and down's on cpuset_sem through the following
+ * wrappers, which will detect this nested locking, and avoid
+ * deadlocking.
+ */
+
+static inline void cpuset_down(struct semaphore *psem)
+{
+       if (current->cpuset_sem_nest_depth == 0)
+               down(psem);
+       current->cpuset_sem_nest_depth++;
+}
+
+static inline void cpuset_up(struct semaphore *psem)
+{
+       current->cpuset_sem_nest_depth--;
+       if (current->cpuset_sem_nest_depth == 0)
+               up(psem);
+}
+
 /*
  * A couple of forward declarations required, due to cyclic reference loop:
  *  cpuset_mkdir -> cpuset_create -> cpuset_populate_dir -> cpuset_add_file
@@ -522,19 +553,10 @@ static void guarantee_online_mems(const struct cpuset *cs, nodemask_t *pmask)
  * Refresh current tasks mems_allowed and mems_generation from
  * current tasks cpuset.  Call with cpuset_sem held.
  *
- * Be sure to call refresh_mems() on any cpuset operation which
- * (1) holds cpuset_sem, and (2) might possibly alloc memory.
- * Call after obtaining cpuset_sem lock, before any possible
- * allocation.  Otherwise one risks trying to allocate memory
- * while the task cpuset_mems_generation is not the same as
- * the mems_generation in its cpuset, which would deadlock on
- * cpuset_sem in cpuset_update_current_mems_allowed().
- *
- * Since we hold cpuset_sem, once refresh_mems() is called, the
- * test (current->cpuset_mems_generation != cs->mems_generation)
- * in cpuset_update_current_mems_allowed() will remain false,
- * until we drop cpuset_sem.  Anyone else who would change our
- * cpusets mems_generation needs to lock cpuset_sem first.
+ * This routine is needed to update the per-task mems_allowed
+ * data, within the tasks context, when it is trying to allocate
+ * memory (in various mm/mempolicy.c routines) and notices
+ * that some other task has been modifying its cpuset.
  */
 
 static void refresh_mems(void)
@@ -840,7 +862,7 @@ static ssize_t cpuset_common_file_write(struct file *file, const char __user *us
        }
        buffer[nbytes] = 0;     /* nul-terminate */
 
-       down(&cpuset_sem);
+       cpuset_down(&cpuset_sem);
 
        if (is_removed(cs)) {
                retval = -ENODEV;
@@ -874,7 +896,7 @@ static ssize_t cpuset_common_file_write(struct file *file, const char __user *us
        if (retval == 0)
                retval = nbytes;
 out2:
-       up(&cpuset_sem);
+       cpuset_up(&cpuset_sem);
        cpuset_release_agent(pathbuf);
 out1:
        kfree(buffer);
@@ -914,9 +936,9 @@ static int cpuset_sprintf_cpulist(char *page, struct cpuset *cs)
 {
        cpumask_t mask;
 
-       down(&cpuset_sem);
+       cpuset_down(&cpuset_sem);
        mask = cs->cpus_allowed;
-       up(&cpuset_sem);
+       cpuset_up(&cpuset_sem);
 
        return cpulist_scnprintf(page, PAGE_SIZE, mask);
 }
@@ -925,9 +947,9 @@ static int cpuset_sprintf_memlist(char *page, struct cpuset *cs)
 {
        nodemask_t mask;
 
-       down(&cpuset_sem);
+       cpuset_down(&cpuset_sem);
        mask = cs->mems_allowed;
-       up(&cpuset_sem);
+       cpuset_up(&cpuset_sem);
 
        return nodelist_scnprintf(page, PAGE_SIZE, mask);
 }
@@ -1334,8 +1356,7 @@ static long cpuset_create(struct cpuset *parent, const char *name, int mode)
        if (!cs)
                return -ENOMEM;
 
-       down(&cpuset_sem);
-       refresh_mems();
+       cpuset_down(&cpuset_sem);
        cs->flags = 0;
        if (notify_on_release(parent))
                set_bit(CS_NOTIFY_ON_RELEASE, &cs->flags);
@@ -1360,14 +1381,14 @@ static long cpuset_create(struct cpuset *parent, const char *name, int mode)
         * will down() this new directory's i_sem and if we race with
         * another mkdir, we might deadlock.
         */
-       up(&cpuset_sem);
+       cpuset_up(&cpuset_sem);
 
        err = cpuset_populate_dir(cs->dentry);
        /* If err < 0, we have a half-filled directory - oh well ;) */
        return 0;
 err:
        list_del(&cs->sibling);
-       up(&cpuset_sem);
+       cpuset_up(&cpuset_sem);
        kfree(cs);
        return err;
 }
@@ -1389,14 +1410,13 @@ static int cpuset_rmdir(struct inode *unused_dir, struct dentry *dentry)
 
        /* the vfs holds both inode->i_sem already */
 
-       down(&cpuset_sem);
-       refresh_mems();
+       cpuset_down(&cpuset_sem);
        if (atomic_read(&cs->count) > 0) {
-               up(&cpuset_sem);
+               cpuset_up(&cpuset_sem);
                return -EBUSY;
        }
        if (!list_empty(&cs->children)) {
-               up(&cpuset_sem);
+               cpuset_up(&cpuset_sem);
                return -EBUSY;
        }
        parent = cs->parent;
@@ -1412,7 +1432,7 @@ static int cpuset_rmdir(struct inode *unused_dir, struct dentry *dentry)
        spin_unlock(&d->d_lock);
        cpuset_d_remove_dir(d);
        dput(d);
-       up(&cpuset_sem);
+       cpuset_up(&cpuset_sem);
        cpuset_release_agent(pathbuf);
        return 0;
 }
@@ -1515,10 +1535,10 @@ void cpuset_exit(struct task_struct *tsk)
        if (notify_on_release(cs)) {
                char *pathbuf = NULL;
 
-               down(&cpuset_sem);
+               cpuset_down(&cpuset_sem);
                if (atomic_dec_and_test(&cs->count))
                        check_for_release(cs, &pathbuf);
-               up(&cpuset_sem);
+               cpuset_up(&cpuset_sem);
                cpuset_release_agent(pathbuf);
        } else {
                atomic_dec(&cs->count);
@@ -1539,11 +1559,11 @@ cpumask_t cpuset_cpus_allowed(const struct task_struct *tsk)
 {
        cpumask_t mask;
 
-       down(&cpuset_sem);
+       cpuset_down(&cpuset_sem);
        task_lock((struct task_struct *)tsk);
        guarantee_online_cpus(tsk->cpuset, &mask);
        task_unlock((struct task_struct *)tsk);
-       up(&cpuset_sem);
+       cpuset_up(&cpuset_sem);
 
        return mask;
 }
@@ -1568,9 +1588,9 @@ void cpuset_update_current_mems_allowed(void)
        if (!cs)
                return;         /* task is exiting */
        if (current->cpuset_mems_generation != cs->mems_generation) {
-               down(&cpuset_sem);
+               cpuset_down(&cpuset_sem);
                refresh_mems();
-               up(&cpuset_sem);
+               cpuset_up(&cpuset_sem);
        }
 }
 
@@ -1669,14 +1689,14 @@ int cpuset_zone_allowed(struct zone *z, unsigned int __nocast gfp_mask)
                return 0;
 
        /* Not hardwall and node outside mems_allowed: scan up cpusets */
-       down(&cpuset_sem);
+       cpuset_down(&cpuset_sem);
        cs = current->cpuset;
        if (!cs)
                goto done;              /* current task exiting */
        cs = nearest_exclusive_ancestor(cs);
        allowed = node_isset(node, cs->mems_allowed);
 done:
-       up(&cpuset_sem);
+       cpuset_up(&cpuset_sem);
        return allowed;
 }
 
@@ -1697,7 +1717,7 @@ int cpuset_excl_nodes_overlap(const struct task_struct *p)
        const struct cpuset *cs1, *cs2; /* my and p's cpuset ancestors */
        int overlap = 0;                /* do cpusets overlap? */
 
-       down(&cpuset_sem);
+       cpuset_down(&cpuset_sem);
        cs1 = current->cpuset;
        if (!cs1)
                goto done;              /* current task exiting */
@@ -1708,7 +1728,7 @@ int cpuset_excl_nodes_overlap(const struct task_struct *p)
        cs2 = nearest_exclusive_ancestor(cs2);
        overlap = nodes_intersects(cs1->mems_allowed, cs2->mems_allowed);
 done:
-       up(&cpuset_sem);
+       cpuset_up(&cpuset_sem);
 
        return overlap;
 }
@@ -1731,7 +1751,7 @@ static int proc_cpuset_show(struct seq_file *m, void *v)
                return -ENOMEM;
 
        tsk = m->private;
-       down(&cpuset_sem);
+       cpuset_down(&cpuset_sem);
        task_lock(tsk);
        cs = tsk->cpuset;
        task_unlock(tsk);
@@ -1746,7 +1766,7 @@ static int proc_cpuset_show(struct seq_file *m, void *v)
        seq_puts(m, buf);
        seq_putc(m, '\n');
 out:
-       up(&cpuset_sem);
+       cpuset_up(&cpuset_sem);
        kfree(buf);
        return retval;
 }
index 2632b812cf24a1b7ce0e109689793b8476362d42..dbd4490afec14b1f2e71d11099feb04331836509 100644 (file)
@@ -875,7 +875,7 @@ static int migrate_task(task_t *p, int dest_cpu, migration_req_t *req)
  * smp_call_function() if an IPI is sent by the same process we are
  * waiting to become inactive.
  */
-void wait_task_inactive(task_t * p)
+void wait_task_inactive(task_t *p)
 {
        unsigned long flags;
        runqueue_t *rq;
@@ -966,8 +966,11 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
                int local_group;
                int i;
 
+               /* Skip over this group if it has no CPUs allowed */
+               if (!cpus_intersects(group->cpumask, p->cpus_allowed))
+                       goto nextgroup;
+
                local_group = cpu_isset(this_cpu, group->cpumask);
-               /* XXX: put a cpus allowed check */
 
                /* Tally up the load of all CPUs in the group */
                avg_load = 0;
@@ -992,6 +995,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
                        min_load = avg_load;
                        idlest = group;
                }
+nextgroup:
                group = group->next;
        } while (group != sd->groups);
 
@@ -1003,13 +1007,18 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
 /*
  * find_idlest_queue - find the idlest runqueue among the cpus in group.
  */
-static int find_idlest_cpu(struct sched_group *group, int this_cpu)
+static int
+find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
 {
+       cpumask_t tmp;
        unsigned long load, min_load = ULONG_MAX;
        int idlest = -1;
        int i;
 
-       for_each_cpu_mask(i, group->cpumask) {
+       /* Traverse only the allowed CPUs */
+       cpus_and(tmp, group->cpumask, p->cpus_allowed);
+
+       for_each_cpu_mask(i, tmp) {
                load = source_load(i, 0);
 
                if (load < min_load || (load == min_load && i == this_cpu)) {
@@ -1052,7 +1061,7 @@ static int sched_balance_self(int cpu, int flag)
                if (!group)
                        goto nextlevel;
 
-               new_cpu = find_idlest_cpu(group, cpu);
+               new_cpu = find_idlest_cpu(group, t, cpu);
                if (new_cpu == -1 || new_cpu == cpu)
                        goto nextlevel;
 
@@ -1127,7 +1136,7 @@ static inline int wake_idle(int cpu, task_t *p)
  *
  * returns failure only if the task is already active.
  */
-static int try_to_wake_up(task_t * p, unsigned int state, int sync)
+static int try_to_wake_up(task_t *p, unsigned int state, int sync)
 {
        int cpu, this_cpu, success = 0;
        unsigned long flags;
@@ -1251,6 +1260,16 @@ out_activate:
                p->activated = -1;
        }
 
+       /*
+        * Tasks that have marked their sleep as noninteractive get
+        * woken up without updating their sleep average. (i.e. their
+        * sleep is handled in a priority-neutral manner, no priority
+        * boost and no penalty.)
+        */
+       if (old_state & TASK_NONINTERACTIVE)
+               __activate_task(p, rq);
+       else
+               activate_task(p, rq, cpu == this_cpu);
        /*
         * Sync wakeups (i.e. those types of wakeups where the waker
         * has indicated that it will leave the CPU in short order)
@@ -1259,7 +1278,6 @@ out_activate:
         * the waker guarantees that the freshly woken up task is going
         * to be considered on this CPU.)
         */
-       activate_task(p, rq, cpu == this_cpu);
        if (!sync || cpu != this_cpu) {
                if (TASK_PREEMPTS_CURR(p, rq))
                        resched_task(rq->curr);
@@ -1274,7 +1292,7 @@ out:
        return success;
 }
 
-int fastcall wake_up_process(task_t * p)
+int fastcall wake_up_process(task_t *p)
 {
        return try_to_wake_up(p, TASK_STOPPED | TASK_TRACED |
                                 TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE, 0);
@@ -1353,7 +1371,7 @@ void fastcall sched_fork(task_t *p, int clone_flags)
  * that must be done for every newly created context, then puts the task
  * on the runqueue and wakes it.
  */
-void fastcall wake_up_new_task(task_t * p, unsigned long clone_flags)
+void fastcall wake_up_new_task(task_t *p, unsigned long clone_flags)
 {
        unsigned long flags;
        int this_cpu, cpu;
@@ -1436,7 +1454,7 @@ void fastcall wake_up_new_task(task_t * p, unsigned long clone_flags)
  * artificially, because any timeslice recovered here
  * was given away by the parent in the first place.)
  */
-void fastcall sched_exit(task_t * p)
+void fastcall sched_exit(task_t *p)
 {
        unsigned long flags;
        runqueue_t *rq;
@@ -1511,6 +1529,10 @@ static inline void finish_task_switch(runqueue_t *rq, task_t *prev)
         *              Manfred Spraul <manfred@colorfullife.com>
         */
        prev_task_flags = prev->flags;
+#ifdef CONFIG_DEBUG_SPINLOCK
+       /* this is a valid case when another task releases the spinlock */
+       rq->lock.owner = current;
+#endif
        finish_arch_switch(prev);
        finish_lock_switch(rq, prev);
        if (mm)
@@ -1753,7 +1775,8 @@ void pull_task(runqueue_t *src_rq, prio_array_t *src_array, task_t *p,
  */
 static inline
 int can_migrate_task(task_t *p, runqueue_t *rq, int this_cpu,
-            struct sched_domain *sd, enum idle_type idle, int *all_pinned)
+                    struct sched_domain *sd, enum idle_type idle,
+                    int *all_pinned)
 {
        /*
         * We do not migrate tasks that are:
@@ -1883,10 +1906,11 @@ out:
  */
 static struct sched_group *
 find_busiest_group(struct sched_domain *sd, int this_cpu,
-                  unsigned long *imbalance, enum idle_type idle)
+                  unsigned long *imbalance, enum idle_type idle, int *sd_idle)
 {
        struct sched_group *busiest = NULL, *this = NULL, *group = sd->groups;
        unsigned long max_load, avg_load, total_load, this_load, total_pwr;
+       unsigned long max_pull;
        int load_idx;
 
        max_load = this_load = total_load = total_pwr = 0;
@@ -1908,6 +1932,9 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
                avg_load = 0;
 
                for_each_cpu_mask(i, group->cpumask) {
+                       if (*sd_idle && !idle_cpu(i))
+                               *sd_idle = 0;
+
                        /* Bias balancing toward cpus of our domain */
                        if (local_group)
                                load = target_load(i, load_idx);
@@ -1933,7 +1960,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
                group = group->next;
        } while (group != sd->groups);
 
-       if (!busiest || this_load >= max_load)
+       if (!busiest || this_load >= max_load || max_load <= SCHED_LOAD_SCALE)
                goto out_balanced;
 
        avg_load = (SCHED_LOAD_SCALE * total_load) / total_pwr;
@@ -1953,8 +1980,12 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
         * by pulling tasks to us.  Be careful of negative numbers as they'll
         * appear as very large values with unsigned longs.
         */
+
+       /* Don't want to pull so many tasks that a group would go idle */
+       max_pull = min(max_load - avg_load, max_load - SCHED_LOAD_SCALE);
+
        /* How much load to actually move to equalise the imbalance */
-       *imbalance = min((max_load - avg_load) * busiest->cpu_power,
+       *imbalance = min(max_pull * busiest->cpu_power,
                                (avg_load - this_load) * this->cpu_power)
                        / SCHED_LOAD_SCALE;
 
@@ -2051,11 +2082,14 @@ static int load_balance(int this_cpu, runqueue_t *this_rq,
        unsigned long imbalance;
        int nr_moved, all_pinned = 0;
        int active_balance = 0;
+       int sd_idle = 0;
+
+       if (idle != NOT_IDLE && sd->flags & SD_SHARE_CPUPOWER)
+               sd_idle = 1;
 
-       spin_lock(&this_rq->lock);
        schedstat_inc(sd, lb_cnt[idle]);
 
-       group = find_busiest_group(sd, this_cpu, &imbalance, idle);
+       group = find_busiest_group(sd, this_cpu, &imbalance, idle, &sd_idle);
        if (!group) {
                schedstat_inc(sd, lb_nobusyg[idle]);
                goto out_balanced;
@@ -2079,19 +2113,16 @@ static int load_balance(int this_cpu, runqueue_t *this_rq,
                 * still unbalanced. nr_moved simply stays zero, so it is
                 * correctly treated as an imbalance.
                 */
-               double_lock_balance(this_rq, busiest);
+               double_rq_lock(this_rq, busiest);
                nr_moved = move_tasks(this_rq, this_cpu, busiest,
-                                               imbalance, sd, idle,
-                                               &all_pinned);
-               spin_unlock(&busiest->lock);
+                                       imbalance, sd, idle, &all_pinned);
+               double_rq_unlock(this_rq, busiest);
 
                /* All tasks on this runqueue were pinned by CPU affinity */
                if (unlikely(all_pinned))
                        goto out_balanced;
        }
 
-       spin_unlock(&this_rq->lock);
-
        if (!nr_moved) {
                schedstat_inc(sd, lb_failed[idle]);
                sd->nr_balance_failed++;
@@ -2099,6 +2130,16 @@ static int load_balance(int this_cpu, runqueue_t *this_rq,
                if (unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2)) {
 
                        spin_lock(&busiest->lock);
+
+                       /* don't kick the migration_thread, if the curr
+                        * task on busiest cpu can't be moved to this_cpu
+                        */
+                       if (!cpu_isset(this_cpu, busiest->curr->cpus_allowed)) {
+                               spin_unlock(&busiest->lock);
+                               all_pinned = 1;
+                               goto out_one_pinned;
+                       }
+
                        if (!busiest->active_balance) {
                                busiest->active_balance = 1;
                                busiest->push_cpu = this_cpu;
@@ -2131,19 +2172,23 @@ static int load_balance(int this_cpu, runqueue_t *this_rq,
                        sd->balance_interval *= 2;
        }
 
+       if (!nr_moved && !sd_idle && sd->flags & SD_SHARE_CPUPOWER)
+               return -1;
        return nr_moved;
 
 out_balanced:
-       spin_unlock(&this_rq->lock);
-
        schedstat_inc(sd, lb_balanced[idle]);
 
        sd->nr_balance_failed = 0;
+
+out_one_pinned:
        /* tune up the balancing interval */
        if ((all_pinned && sd->balance_interval < MAX_PINNED_INTERVAL) ||
                        (sd->balance_interval < sd->max_interval))
                sd->balance_interval *= 2;
 
+       if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER)
+               return -1;
        return 0;
 }
 
@@ -2161,9 +2206,13 @@ static int load_balance_newidle(int this_cpu, runqueue_t *this_rq,
        runqueue_t *busiest = NULL;
        unsigned long imbalance;
        int nr_moved = 0;
+       int sd_idle = 0;
+
+       if (sd->flags & SD_SHARE_CPUPOWER)
+               sd_idle = 1;
 
        schedstat_inc(sd, lb_cnt[NEWLY_IDLE]);
-       group = find_busiest_group(sd, this_cpu, &imbalance, NEWLY_IDLE);
+       group = find_busiest_group(sd, this_cpu, &imbalance, NEWLY_IDLE, &sd_idle);
        if (!group) {
                schedstat_inc(sd, lb_nobusyg[NEWLY_IDLE]);
                goto out_balanced;
@@ -2177,22 +2226,30 @@ static int load_balance_newidle(int this_cpu, runqueue_t *this_rq,
 
        BUG_ON(busiest == this_rq);
 
-       /* Attempt to move tasks */
-       double_lock_balance(this_rq, busiest);
-
        schedstat_add(sd, lb_imbalance[NEWLY_IDLE], imbalance);
-       nr_moved = move_tasks(this_rq, this_cpu, busiest,
+
+       nr_moved = 0;
+       if (busiest->nr_running > 1) {
+               /* Attempt to move tasks */
+               double_lock_balance(this_rq, busiest);
+               nr_moved = move_tasks(this_rq, this_cpu, busiest,
                                        imbalance, sd, NEWLY_IDLE, NULL);
-       if (!nr_moved)
+               spin_unlock(&busiest->lock);
+       }
+
+       if (!nr_moved) {
                schedstat_inc(sd, lb_failed[NEWLY_IDLE]);
-       else
+               if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER)
+                       return -1;
+       } else
                sd->nr_balance_failed = 0;
 
-       spin_unlock(&busiest->lock);
        return nr_moved;
 
 out_balanced:
        schedstat_inc(sd, lb_balanced[NEWLY_IDLE]);
+       if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER)
+               return -1;
        sd->nr_balance_failed = 0;
        return 0;
 }
@@ -2317,7 +2374,11 @@ static void rebalance_tick(int this_cpu, runqueue_t *this_rq,
 
                if (j - sd->last_balance >= interval) {
                        if (load_balance(this_cpu, this_rq, sd, idle)) {
-                               /* We've pulled tasks over so no longer idle */
+                               /*
+                                * We've pulled tasks over so either we're no
+                                * longer idle, or one of our SMT siblings is
+                                * not idle.
+                                */
                                idle = NOT_IDLE;
                        }
                        sd->last_balance += interval;
@@ -2576,6 +2637,13 @@ out:
 }
 
 #ifdef CONFIG_SCHED_SMT
+static inline void wakeup_busy_runqueue(runqueue_t *rq)
+{
+       /* If an SMT runqueue is sleeping due to priority reasons wake it up */
+       if (rq->curr == rq->idle && rq->nr_running)
+               resched_task(rq->idle);
+}
+
 static inline void wake_sleeping_dependent(int this_cpu, runqueue_t *this_rq)
 {
        struct sched_domain *tmp, *sd = NULL;
@@ -2609,12 +2677,7 @@ static inline void wake_sleeping_dependent(int this_cpu, runqueue_t *this_rq)
        for_each_cpu_mask(i, sibling_map) {
                runqueue_t *smt_rq = cpu_rq(i);
 
-               /*
-                * If an SMT sibling task is sleeping due to priority
-                * reasons wake it up now.
-                */
-               if (smt_rq->curr == smt_rq->idle && smt_rq->nr_running)
-                       resched_task(smt_rq->idle);
+               wakeup_busy_runqueue(smt_rq);
        }
 
        for_each_cpu_mask(i, sibling_map)
@@ -2625,6 +2688,16 @@ static inline void wake_sleeping_dependent(int this_cpu, runqueue_t *this_rq)
         */
 }
 
+/*
+ * number of 'lost' timeslices this task wont be able to fully
+ * utilize, if another task runs on a sibling. This models the
+ * slowdown effect of other tasks running on siblings:
+ */
+static inline unsigned long smt_slice(task_t *p, struct sched_domain *sd)
+{
+       return p->time_slice * (100 - sd->per_cpu_gain) / 100;
+}
+
 static inline int dependent_sleeper(int this_cpu, runqueue_t *this_rq)
 {
        struct sched_domain *tmp, *sd = NULL;
@@ -2668,6 +2741,10 @@ static inline int dependent_sleeper(int this_cpu, runqueue_t *this_rq)
                runqueue_t *smt_rq = cpu_rq(i);
                task_t *smt_curr = smt_rq->curr;
 
+               /* Kernel threads do not participate in dependent sleeping */
+               if (!p->mm || !smt_curr->mm || rt_task(p))
+                       goto check_smt_task;
+
                /*
                 * If a user task with lower static priority than the
                 * running task on the SMT sibling is trying to schedule,
@@ -2676,21 +2753,45 @@ static inline int dependent_sleeper(int this_cpu, runqueue_t *this_rq)
                 * task from using an unfair proportion of the
                 * physical cpu's resources. -ck
                 */
-               if (((smt_curr->time_slice * (100 - sd->per_cpu_gain) / 100) >
-                       task_timeslice(p) || rt_task(smt_curr)) &&
-                       p->mm && smt_curr->mm && !rt_task(p))
-                               ret = 1;
+               if (rt_task(smt_curr)) {
+                       /*
+                        * With real time tasks we run non-rt tasks only
+                        * per_cpu_gain% of the time.
+                        */
+                       if ((jiffies % DEF_TIMESLICE) >
+                               (sd->per_cpu_gain * DEF_TIMESLICE / 100))
+                                       ret = 1;
+               } else
+                       if (smt_curr->static_prio < p->static_prio &&
+                               !TASK_PREEMPTS_CURR(p, smt_rq) &&
+                               smt_slice(smt_curr, sd) > task_timeslice(p))
+                                       ret = 1;
+
+check_smt_task:
+               if ((!smt_curr->mm && smt_curr != smt_rq->idle) ||
+                       rt_task(smt_curr))
+                               continue;
+               if (!p->mm) {
+                       wakeup_busy_runqueue(smt_rq);
+                       continue;
+               }
 
                /*
-                * Reschedule a lower priority task on the SMT sibling,
-                * or wake it up if it has been put to sleep for priority
-                * reasons.
+                * Reschedule a lower priority task on the SMT sibling for
+                * it to be put to sleep, or wake it up if it has been put to
+                * sleep for priority reasons to see if it should run now.
                 */
-               if ((((p->time_slice * (100 - sd->per_cpu_gain) / 100) >
-                       task_timeslice(smt_curr) || rt_task(p)) &&
-                       smt_curr->mm && p->mm && !rt_task(smt_curr)) ||
-                       (smt_curr == smt_rq->idle && smt_rq->nr_running))
-                               resched_task(smt_curr);
+               if (rt_task(p)) {
+                       if ((jiffies % DEF_TIMESLICE) >
+                               (sd->per_cpu_gain * DEF_TIMESLICE / 100))
+                                       resched_task(smt_curr);
+               } else {
+                       if (TASK_PREEMPTS_CURR(p, smt_rq) &&
+                               smt_slice(p, sd) > task_timeslice(smt_curr))
+                                       resched_task(smt_curr);
+                       else
+                               wakeup_busy_runqueue(smt_rq);
+               }
        }
 out_unlock:
        for_each_cpu_mask(i, sibling_map)
@@ -3016,7 +3117,8 @@ need_resched:
 
 #endif /* CONFIG_PREEMPT */
 
-int default_wake_function(wait_queue_t *curr, unsigned mode, int sync, void *key)
+int default_wake_function(wait_queue_t *curr, unsigned mode, int sync,
+                         void *key)
 {
        task_t *p = curr->private;
        return try_to_wake_up(p, mode, sync);
@@ -3058,7 +3160,7 @@ static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
  * @key: is directly passed to the wakeup function
  */
 void fastcall __wake_up(wait_queue_head_t *q, unsigned int mode,
-                               int nr_exclusive, void *key)
+                       int nr_exclusive, void *key)
 {
        unsigned long flags;
 
@@ -3090,7 +3192,8 @@ void fastcall __wake_up_locked(wait_queue_head_t *q, unsigned int mode)
  *
  * On UP it can prevent extra preemption.
  */
-void fastcall __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
+void fastcall
+__wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
 {
        unsigned long flags;
        int sync = 1;
@@ -3281,7 +3384,8 @@ void fastcall __sched interruptible_sleep_on(wait_queue_head_t *q)
 
 EXPORT_SYMBOL(interruptible_sleep_on);
 
-long fastcall __sched interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout)
+long fastcall __sched
+interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout)
 {
        SLEEP_ON_VAR
 
@@ -3500,7 +3604,8 @@ static void __setscheduler(struct task_struct *p, int policy, int prio)
  * @policy: new policy.
  * @param: structure containing the new RT priority.
  */
-int sched_setscheduler(struct task_struct *p, int policy, struct sched_param *param)
+int sched_setscheduler(struct task_struct *p, int policy,
+                      struct sched_param *param)
 {
        int retval;
        int oldprio, oldpolicy = -1;
@@ -3520,7 +3625,7 @@ recheck:
         * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL is 0.
         */
        if (param->sched_priority < 0 ||
-           (p->mm &&  param->sched_priority > MAX_USER_RT_PRIO-1) ||
+           (p->mm && param->sched_priority > MAX_USER_RT_PRIO-1) ||
            (!p->mm && param->sched_priority > MAX_RT_PRIO-1))
                return -EINVAL;
        if ((policy == SCHED_NORMAL) != (param->sched_priority == 0))
@@ -3583,7 +3688,8 @@ recheck:
 }
 EXPORT_SYMBOL_GPL(sched_setscheduler);
 
-static int do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
+static int
+do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
 {
        int retval;
        struct sched_param lparam;
@@ -3850,7 +3956,7 @@ asmlinkage long sys_sched_yield(void)
        if (rt_task(current))
                target = rq->active;
 
-       if (current->array->nr_active == 1) {
+       if (array->nr_active == 1) {
                schedstat_inc(rq, yld_act_empty);
                if (!rq->expired->nr_active)
                        schedstat_inc(rq, yld_both_empty);
@@ -3914,7 +4020,7 @@ EXPORT_SYMBOL(cond_resched);
  * operations here to prevent schedule() from being called twice (once via
  * spin_unlock(), once by hand).
  */
-int cond_resched_lock(spinlock_t * lock)
+int cond_resched_lock(spinlock_t *lock)
 {
        int ret = 0;
 
@@ -4097,7 +4203,7 @@ static inline struct task_struct *younger_sibling(struct task_struct *p)
        return list_entry(p->sibling.next,struct task_struct,sibling);
 }
 
-static void show_task(task_t * p)
+static void show_task(task_t *p)
 {
        task_t *relative;
        unsigned state;
@@ -4123,7 +4229,7 @@ static void show_task(task_t * p)
 #endif
 #ifdef CONFIG_DEBUG_STACK_USAGE
        {
-               unsigned long * n = (unsigned long *) (p->thread_info+1);
+               unsigned long *n = (unsigned long *) (p->thread_info+1);
                while (!*n)
                        n++;
                free = (unsigned long) n - (unsigned long)(p->thread_info+1);
@@ -4332,7 +4438,7 @@ out:
  * thread migration by bumping thread off CPU then 'pushing' onto
  * another runqueue.
  */
-static int migration_thread(void * data)
+static int migration_thread(void *data)
 {
        runqueue_t *rq;
        int cpu = (long)data;
index 4980a073237ff45ed28c39b38225c40923da0848..b92c3c9f8b9a24529e18eb2a1a1bf75f87840f10 100644 (file)
@@ -2221,8 +2221,7 @@ sys_rt_sigtimedwait(const sigset_t __user *uthese,
                        recalc_sigpending();
                        spin_unlock_irq(&current->sighand->siglock);
 
-                       current->state = TASK_INTERRUPTIBLE;
-                       timeout = schedule_timeout(timeout);
+                       timeout = schedule_timeout_interruptible(timeout);
 
                        try_to_freeze();
                        spin_lock_irq(&current->sighand->siglock);
index 0c3f9d8bbe17b3bd00a1c4a77e941bbebc90e226..0375fcd5921df5fe390ec6a3f1c1ce402d240f80 100644 (file)
@@ -3,7 +3,10 @@
  *
  * Author: Zwane Mwaikambo <zwane@fsmlabs.com>
  *
- * Copyright (2004) Ingo Molnar
+ * Copyright (2004, 2005) Ingo Molnar
+ *
+ * This file contains the spinlock/rwlock implementations for the
+ * SMP and the DEBUG_SPINLOCK cases. (UP-nondebug inlines them)
  */
 
 #include <linux/config.h>
  * Generic declaration of the raw read_trylock() function,
  * architectures are supposed to optimize this:
  */
-int __lockfunc generic_raw_read_trylock(rwlock_t *lock)
+int __lockfunc generic__raw_read_trylock(raw_rwlock_t *lock)
 {
-       _raw_read_lock(lock);
+       __raw_read_lock(lock);
        return 1;
 }
-EXPORT_SYMBOL(generic_raw_read_trylock);
+EXPORT_SYMBOL(generic__raw_read_trylock);
 
 int __lockfunc _spin_trylock(spinlock_t *lock)
 {
@@ -57,7 +60,7 @@ int __lockfunc _write_trylock(rwlock_t *lock)
 }
 EXPORT_SYMBOL(_write_trylock);
 
-#ifndef CONFIG_PREEMPT
+#if !defined(CONFIG_PREEMPT) || !defined(CONFIG_SMP)
 
 void __lockfunc _read_lock(rwlock_t *lock)
 {
@@ -72,7 +75,7 @@ unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)
 
        local_irq_save(flags);
        preempt_disable();
-       _raw_spin_lock_flags(lock, flags);
+       _raw_spin_lock_flags(lock, &flags);
        return flags;
 }
 EXPORT_SYMBOL(_spin_lock_irqsave);
index 13e2b513be019bf0b8b6b9218e4f268fff95f58a..f4152fcd9f8effecb04229c6e3a239576e7e09af 100644 (file)
@@ -1154,6 +1154,20 @@ fastcall signed long __sched schedule_timeout(signed long timeout)
 
 EXPORT_SYMBOL(schedule_timeout);
 
+signed long __sched schedule_timeout_interruptible(signed long timeout)
+{
+       set_current_state(TASK_INTERRUPTIBLE);
+       return schedule_timeout(timeout);
+}
+EXPORT_SYMBOL(schedule_timeout_interruptible);
+
+signed long __sched schedule_timeout_uninterruptible(signed long timeout)
+{
+       set_current_state(TASK_UNINTERRUPTIBLE);
+       return schedule_timeout(timeout);
+}
+EXPORT_SYMBOL(schedule_timeout_uninterruptible);
+
 /* Thread ID - the internal kernel "pid" */
 asmlinkage long sys_gettid(void)
 {
@@ -1170,8 +1184,7 @@ static long __sched nanosleep_restart(struct restart_block *restart)
        if (!time_after(expire, now))
                return 0;
 
-       current->state = TASK_INTERRUPTIBLE;
-       expire = schedule_timeout(expire - now);
+       expire = schedule_timeout_interruptible(expire - now);
 
        ret = 0;
        if (expire) {
@@ -1199,8 +1212,7 @@ asmlinkage long sys_nanosleep(struct timespec __user *rqtp, struct timespec __us
                return -EINVAL;
 
        expire = timespec_to_jiffies(&t) + (t.tv_sec || t.tv_nsec);
-       current->state = TASK_INTERRUPTIBLE;
-       expire = schedule_timeout(expire);
+       expire = schedule_timeout_interruptible(expire);
 
        ret = 0;
        if (expire) {
@@ -1598,10 +1610,8 @@ void msleep(unsigned int msecs)
 {
        unsigned long timeout = msecs_to_jiffies(msecs) + 1;
 
-       while (timeout) {
-               set_current_state(TASK_UNINTERRUPTIBLE);
-               timeout = schedule_timeout(timeout);
-       }
+       while (timeout)
+               timeout = schedule_timeout_uninterruptible(timeout);
 }
 
 EXPORT_SYMBOL(msleep);
@@ -1614,10 +1624,8 @@ unsigned long msleep_interruptible(unsigned int msecs)
 {
        unsigned long timeout = msecs_to_jiffies(msecs) + 1;
 
-       while (timeout && !signal_pending(current)) {
-               set_current_state(TASK_INTERRUPTIBLE);
-               timeout = schedule_timeout(timeout);
-       }
+       while (timeout && !signal_pending(current))
+               timeout = schedule_timeout_interruptible(timeout);
        return jiffies_to_msecs(timeout);
 }
 
index d9c38ba05e7bc2fcb3cbbca915bf43c51bd580fd..44a46750690ae3af65f338eefb2635ee4f9b70f5 100644 (file)
@@ -16,6 +16,7 @@ CFLAGS_kobject.o += -DDEBUG
 CFLAGS_kobject_uevent.o += -DDEBUG
 endif
 
+obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o
 lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
 lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
 lib-$(CONFIG_SEMAPHORE_SLEEPERS) += semaphore-sleepers.o
index 6658d81e1836517107b5b8b39aa0c7e65032ec6e..2377af057d099ebd628bd9f7039cd4f54c9bf229 100644 (file)
@@ -25,8 +25,6 @@
  * this is trivially done efficiently using a load-locked
  * store-conditional approach, for example.
  */
-
-#ifndef ATOMIC_DEC_AND_LOCK
 int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
 {
        spin_lock(lock);
@@ -37,4 +35,3 @@ int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
 }
 
 EXPORT_SYMBOL(_atomic_dec_and_lock);
-#endif
index bd2bc5d887b815e261ff82c4167e410966b659d3..cb5490ec00f20f4fea3640275192f20d942a7fba 100644 (file)
@@ -177,8 +177,7 @@ static inline void __lock_kernel(void)
 
 static inline void __unlock_kernel(void)
 {
-       _raw_spin_unlock(&kernel_flag);
-       preempt_enable();
+       spin_unlock(&kernel_flag);
 }
 
 /*
index b972dd29289d6669fd353dcd2b3118cd93ab87cb..6a8bc6e06431eec49c1af2f34ec42d7d100e4e0b 100644 (file)
@@ -110,7 +110,7 @@ radix_tree_node_free(struct radix_tree_node *node)
  * success, return zero, with preemption disabled.  On error, return -ENOMEM
  * with preemption not disabled.
  */
-int radix_tree_preload(int gfp_mask)
+int radix_tree_preload(unsigned int __nocast gfp_mask)
 {
        struct radix_tree_preload *rtp;
        struct radix_tree_node *node;
index b73dbb0e7c83a4721fafbe4401c551a02e3225f9..ddc4d35df289cff02a996ba41d0d560ce9ccdead 100644 (file)
@@ -6,15 +6,16 @@
 
 #include <linux/kernel.h>
 #include <linux/module.h>
+#include <linux/sort.h>
 
-void u32_swap(void *a, void *b, int size)
+static void u32_swap(void *a, void *b, int size)
 {
        u32 t = *(u32 *)a;
        *(u32 *)a = *(u32 *)b;
        *(u32 *)b = t;
 }
 
-void generic_swap(void *a, void *b, int size)
+static void generic_swap(void *a, void *b, int size)
 {
        char t;
 
diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c
new file mode 100644 (file)
index 0000000..906ad10
--- /dev/null
@@ -0,0 +1,257 @@
+/*
+ * Copyright 2005, Red Hat, Inc., Ingo Molnar
+ * Released under the General Public License (GPL).
+ *
+ * This file contains the spinlock/rwlock implementations for
+ * DEBUG_SPINLOCK.
+ */
+
+#include <linux/config.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+
+static void spin_bug(spinlock_t *lock, const char *msg)
+{
+       static long print_once = 1;
+       struct task_struct *owner = NULL;
+
+       if (xchg(&print_once, 0)) {
+               if (lock->owner && lock->owner != SPINLOCK_OWNER_INIT)
+                       owner = lock->owner;
+               printk("BUG: spinlock %s on CPU#%d, %s/%d\n",
+                       msg, smp_processor_id(), current->comm, current->pid);
+               printk(" lock: %p, .magic: %08x, .owner: %s/%d, .owner_cpu: %d\n",
+                       lock, lock->magic,
+                       owner ? owner->comm : "<none>",
+                       owner ? owner->pid : -1,
+                       lock->owner_cpu);
+               dump_stack();
+#ifdef CONFIG_SMP
+               /*
+                * We cannot continue on SMP:
+                */
+//             panic("bad locking");
+#endif
+       }
+}
+
+#define SPIN_BUG_ON(cond, lock, msg) if (unlikely(cond)) spin_bug(lock, msg)
+
+static inline void debug_spin_lock_before(spinlock_t *lock)
+{
+       SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
+       SPIN_BUG_ON(lock->owner == current, lock, "recursion");
+       SPIN_BUG_ON(lock->owner_cpu == raw_smp_processor_id(),
+                                                       lock, "cpu recursion");
+}
+
+static inline void debug_spin_lock_after(spinlock_t *lock)
+{
+       lock->owner_cpu = raw_smp_processor_id();
+       lock->owner = current;
+}
+
+static inline void debug_spin_unlock(spinlock_t *lock)
+{
+       SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
+       SPIN_BUG_ON(!spin_is_locked(lock), lock, "already unlocked");
+       SPIN_BUG_ON(lock->owner != current, lock, "wrong owner");
+       SPIN_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
+                                                       lock, "wrong CPU");
+       lock->owner = SPINLOCK_OWNER_INIT;
+       lock->owner_cpu = -1;
+}
+
+static void __spin_lock_debug(spinlock_t *lock)
+{
+       int print_once = 1;
+       u64 i;
+
+       for (;;) {
+               for (i = 0; i < loops_per_jiffy * HZ; i++) {
+                       cpu_relax();
+                       if (__raw_spin_trylock(&lock->raw_lock))
+                               return;
+               }
+               /* lockup suspected: */
+               if (print_once) {
+                       print_once = 0;
+                       printk("BUG: spinlock lockup on CPU#%d, %s/%d, %p\n",
+                               smp_processor_id(), current->comm, current->pid,
+                                       lock);
+                       dump_stack();
+               }
+       }
+}
+
+void _raw_spin_lock(spinlock_t *lock)
+{
+       debug_spin_lock_before(lock);
+       if (unlikely(!__raw_spin_trylock(&lock->raw_lock)))
+               __spin_lock_debug(lock);
+       debug_spin_lock_after(lock);
+}
+
+int _raw_spin_trylock(spinlock_t *lock)
+{
+       int ret = __raw_spin_trylock(&lock->raw_lock);
+
+       if (ret)
+               debug_spin_lock_after(lock);
+#ifndef CONFIG_SMP
+       /*
+        * Must not happen on UP:
+        */
+       SPIN_BUG_ON(!ret, lock, "trylock failure on UP");
+#endif
+       return ret;
+}
+
+void _raw_spin_unlock(spinlock_t *lock)
+{
+       debug_spin_unlock(lock);
+       __raw_spin_unlock(&lock->raw_lock);
+}
+
+static void rwlock_bug(rwlock_t *lock, const char *msg)
+{
+       static long print_once = 1;
+
+       if (xchg(&print_once, 0)) {
+               printk("BUG: rwlock %s on CPU#%d, %s/%d, %p\n", msg,
+                       smp_processor_id(), current->comm, current->pid, lock);
+               dump_stack();
+#ifdef CONFIG_SMP
+               /*
+                * We cannot continue on SMP:
+                */
+               panic("bad locking");
+#endif
+       }
+}
+
+#define RWLOCK_BUG_ON(cond, lock, msg) if (unlikely(cond)) rwlock_bug(lock, msg)
+
+static void __read_lock_debug(rwlock_t *lock)
+{
+       int print_once = 1;
+       u64 i;
+
+       for (;;) {
+               for (i = 0; i < loops_per_jiffy * HZ; i++) {
+                       cpu_relax();
+                       if (__raw_read_trylock(&lock->raw_lock))
+                               return;
+               }
+               /* lockup suspected: */
+               if (print_once) {
+                       print_once = 0;
+                       printk("BUG: read-lock lockup on CPU#%d, %s/%d, %p\n",
+                               smp_processor_id(), current->comm, current->pid,
+                                       lock);
+                       dump_stack();
+               }
+       }
+}
+
+void _raw_read_lock(rwlock_t *lock)
+{
+       RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
+       if (unlikely(!__raw_read_trylock(&lock->raw_lock)))
+               __read_lock_debug(lock);
+}
+
+int _raw_read_trylock(rwlock_t *lock)
+{
+       int ret = __raw_read_trylock(&lock->raw_lock);
+
+#ifndef CONFIG_SMP
+       /*
+        * Must not happen on UP:
+        */
+       RWLOCK_BUG_ON(!ret, lock, "trylock failure on UP");
+#endif
+       return ret;
+}
+
+void _raw_read_unlock(rwlock_t *lock)
+{
+       RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
+       __raw_read_unlock(&lock->raw_lock);
+}
+
+static inline void debug_write_lock_before(rwlock_t *lock)
+{
+       RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
+       RWLOCK_BUG_ON(lock->owner == current, lock, "recursion");
+       RWLOCK_BUG_ON(lock->owner_cpu == raw_smp_processor_id(),
+                                                       lock, "cpu recursion");
+}
+
+static inline void debug_write_lock_after(rwlock_t *lock)
+{
+       lock->owner_cpu = raw_smp_processor_id();
+       lock->owner = current;
+}
+
+static inline void debug_write_unlock(rwlock_t *lock)
+{
+       RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
+       RWLOCK_BUG_ON(lock->owner != current, lock, "wrong owner");
+       RWLOCK_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
+                                                       lock, "wrong CPU");
+       lock->owner = SPINLOCK_OWNER_INIT;
+       lock->owner_cpu = -1;
+}
+
+static void __write_lock_debug(rwlock_t *lock)
+{
+       int print_once = 1;
+       u64 i;
+
+       for (;;) {
+               for (i = 0; i < loops_per_jiffy * HZ; i++) {
+                       cpu_relax();
+                       if (__raw_write_trylock(&lock->raw_lock))
+                               return;
+               }
+               /* lockup suspected: */
+               if (print_once) {
+                       print_once = 0;
+                       printk("BUG: write-lock lockup on CPU#%d, %s/%d, %p\n",
+                               smp_processor_id(), current->comm, current->pid,
+                                       lock);
+                       dump_stack();
+               }
+       }
+}
+
+void _raw_write_lock(rwlock_t *lock)
+{
+       debug_write_lock_before(lock);
+       if (unlikely(!__raw_write_trylock(&lock->raw_lock)))
+               __write_lock_debug(lock);
+       debug_write_lock_after(lock);
+}
+
+int _raw_write_trylock(rwlock_t *lock)
+{
+       int ret = __raw_write_trylock(&lock->raw_lock);
+
+       if (ret)
+               debug_write_lock_after(lock);
+#ifndef CONFIG_SMP
+       /*
+        * Must not happen on UP:
+        */
+       RWLOCK_BUG_ON(!ret, lock, "trylock failure on UP");
+#endif
+       return ret;
+}
+
+void _raw_write_unlock(rwlock_t *lock)
+{
+       debug_write_unlock(lock);
+       __raw_write_unlock(&lock->raw_lock);
+}
index 88611928e71fc928a89b5b319044d759ea5f612d..b5346576e58d252ea63224606bd2564cb2f088fa 100644 (file)
 #include <asm/uaccess.h>
 #include <asm/mman.h>
 
+static ssize_t
+generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
+       loff_t offset, unsigned long nr_segs);
+
 /*
  * Shared mappings implemented 30.11.1994. It's not fully working yet,
  * though.
@@ -301,8 +305,9 @@ EXPORT_SYMBOL(sync_page_range);
  * as it forces O_SYNC writers to different parts of the same file
  * to be serialised right until io completion.
  */
-int sync_page_range_nolock(struct inode *inode, struct address_space *mapping,
-                       loff_t pos, size_t count)
+static int sync_page_range_nolock(struct inode *inode,
+                                 struct address_space *mapping,
+                                 loff_t pos, size_t count)
 {
        pgoff_t start = pos >> PAGE_CACHE_SHIFT;
        pgoff_t end = (pos + count - 1) >> PAGE_CACHE_SHIFT;
@@ -317,7 +322,6 @@ int sync_page_range_nolock(struct inode *inode, struct address_space *mapping,
                ret = wait_on_page_writeback_range(mapping, start, end);
        return ret;
 }
-EXPORT_SYMBOL(sync_page_range_nolock);
 
 /**
  * filemap_fdatawait - walk the list of under-writeback pages of the given
@@ -2008,7 +2012,7 @@ generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
 }
 EXPORT_SYMBOL(generic_file_buffered_write);
 
-ssize_t
+static ssize_t
 __generic_file_aio_write_nolock(struct kiocb *iocb, const struct iovec *iov,
                                unsigned long nr_segs, loff_t *ppos)
 {
@@ -2108,7 +2112,7 @@ generic_file_aio_write_nolock(struct kiocb *iocb, const struct iovec *iov,
        return ret;
 }
 
-ssize_t
+static ssize_t
 __generic_file_write_nolock(struct file *file, const struct iovec *iov,
                                unsigned long nr_segs, loff_t *ppos)
 {
@@ -2229,7 +2233,7 @@ EXPORT_SYMBOL(generic_file_writev);
  * Called under i_sem for writes to S_ISREG files.   Returns -EIO if something
  * went wrong during pagecache shootdown.
  */
-ssize_t
+static ssize_t
 generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
        loff_t offset, unsigned long nr_segs)
 {
@@ -2264,4 +2268,3 @@ generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
        }
        return retval;
 }
-EXPORT_SYMBOL_GPL(generic_file_direct_IO);
index 788a628103405e955a7d28f9935be690eb6d021a..ae8161f1f4595bd4d58afa0b20ecf4419ff23e71 100644 (file)
@@ -2225,7 +2225,7 @@ void update_mem_hiwater(struct task_struct *tsk)
 #if !defined(__HAVE_ARCH_GATE_AREA)
 
 #if defined(AT_SYSINFO_EHDR)
-struct vm_area_struct gate_vma;
+static struct vm_area_struct gate_vma;
 
 static int __init gate_vma_init(void)
 {
index 5ec8da12cfd986523bb1968ffa9f2ea0be064868..ac3bf33e53701171bcf4b5f6ba8b1817bdac60f5 100644 (file)
@@ -300,6 +300,5 @@ retry:
         * Give "p" a good chance of killing itself before we
         * retry to allocate memory.
         */
-       __set_current_state(TASK_INTERRUPTIBLE);
-       schedule_timeout(1);
+       schedule_timeout_interruptible(1);
 }
index 3974fd81d27c0de38a12a299e6f6b6dd2645cbf7..c5823c395f7145f2a66ef3ffd11af836c294845f 100644 (file)
@@ -335,7 +335,7 @@ static inline void free_pages_check(const char *function, struct page *page)
 /*
  * Frees a list of pages. 
  * Assumes all pages on list are in same zone, and of same order.
- * count is the number of pages to free, or 0 for all on the list.
+ * count is the number of pages to free.
  *
  * If the zone was previously in an "all pages pinned" state then look to
  * see if this freeing clears that state.
index 05a391059fe1d92b58a85fb5cb1e3039f6fa6468..9e876d6dfad97f2c4ab60a881edcb91d528c1c3c 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1720,7 +1720,7 @@ next:
        cachep->objsize = size;
 
        if (flags & CFLGS_OFF_SLAB)
-               cachep->slabp_cache = kmem_find_general_cachep(slab_size,0);
+               cachep->slabp_cache = kmem_find_general_cachep(slab_size, 0u);
        cachep->ctor = ctor;
        cachep->dtor = dtor;
        cachep->name = name;
@@ -2839,7 +2839,7 @@ out:
  * New and improved: it will now make sure that the object gets
  * put on the correct node list so that there is no false sharing.
  */
-void *kmem_cache_alloc_node(kmem_cache_t *cachep, int flags, int nodeid)
+void *kmem_cache_alloc_node(kmem_cache_t *cachep, unsigned int __nocast flags, int nodeid)
 {
        unsigned long save_flags;
        void *ptr;
index 029e56eb5e77c342559954e7c18c710a5c51092e..adbc2b426c2f12aeaf1160ac0247d95bcf9a35bc 100644 (file)
@@ -67,8 +67,8 @@ void show_swap_cache_info(void)
  * __add_to_swap_cache resembles add_to_page_cache on swapper_space,
  * but sets SwapCache flag and private instead of mapping and index.
  */
-static int __add_to_swap_cache(struct page *page,
-               swp_entry_t entry, int gfp_mask)
+static int __add_to_swap_cache(struct page *page, swp_entry_t entry,
+                              unsigned int __nocast gfp_mask)
 {
        int error;
 
index 4b6e8bf986bcad3080c23b9ad2424d24479d2f35..0184f510aacefd5cdce18b18a2d2f91008e255db 100644 (file)
@@ -1153,8 +1153,7 @@ asmlinkage long sys_swapoff(const char __user * specialfile)
        p->highest_bit = 0;             /* cuts scans short */
        while (p->flags >= SWP_SCANNING) {
                spin_unlock(&swap_lock);
-               set_current_state(TASK_UNINTERRUPTIBLE);
-               schedule_timeout(1);
+               schedule_timeout_uninterruptible(1);
                spin_lock(&swap_lock);
        }
 
index ea30012dd19545cb925a94456bedb6e88addb7b2..e05f4f955eeee8d0bd68eae5ccfab3bc674bec8b 100644 (file)
@@ -78,13 +78,11 @@ static struct dccp_li_hist *ccid3_li_hist;
 
 static int ccid3_init(struct sock *sk)
 {
-       ccid3_pr_debug("%s, sk=%p\n", dccp_role(sk), sk);
        return 0;
 }
 
 static void ccid3_exit(struct sock *sk)
 {
-       ccid3_pr_debug("%s, sk=%p\n", dccp_role(sk), sk);
 }
 
 /* TFRC sender states */
@@ -287,14 +285,14 @@ static int ccid3_hc_tx_send_packet(struct sock *sk,
        long delay;
        int rc = -ENOTCONN;
 
-       /* Check if pure ACK or Terminating*/
+       BUG_ON(hctx == NULL || hctx->ccid3hctx_state == TFRC_SSTATE_TERM);
 
+       /* Check if pure ACK or Terminating*/
        /*
         * XXX: We only call this function for DATA and DATAACK, on, these
         * packets can have zero length, but why the comment about "pure ACK"?
         */
-       if (hctx == NULL || len == 0 ||
-           hctx->ccid3hctx_state == TFRC_SSTATE_TERM)
+       if (unlikely(len == 0))
                goto out;
 
        /* See if last packet allocated was not sent */
@@ -304,10 +302,10 @@ static int ccid3_hc_tx_send_packet(struct sock *sk,
                                                    SLAB_ATOMIC);
 
                rc = -ENOBUFS;
-               if (new_packet == NULL) {
-                       ccid3_pr_debug("%s, sk=%p, not enough mem to add "
-                                      "to history, send refused\n",
-                                      dccp_role(sk), sk);
+               if (unlikely(new_packet == NULL)) {
+                       LIMIT_NETDEBUG(KERN_WARNING "%s: %s, sk=%p, not enough "
+                                      "mem to add to history, send refused\n",
+                                      __FUNCTION__, dccp_role(sk), sk);
                        goto out;
                }
 
@@ -318,9 +316,6 @@ static int ccid3_hc_tx_send_packet(struct sock *sk,
 
        switch (hctx->ccid3hctx_state) {
        case TFRC_SSTATE_NO_SENT:
-               ccid3_pr_debug("%s, sk=%p, first packet(%llu)\n",
-                              dccp_role(sk), sk, dp->dccps_gss);
-
                hctx->ccid3hctx_no_feedback_timer.function = ccid3_hc_tx_no_feedback_timer;
                hctx->ccid3hctx_no_feedback_timer.data     = (unsigned long)sk;
                sk_reset_timer(sk, &hctx->ccid3hctx_no_feedback_timer,
@@ -328,7 +323,7 @@ static int ccid3_hc_tx_send_packet(struct sock *sk,
                hctx->ccid3hctx_last_win_count   = 0;
                hctx->ccid3hctx_t_last_win_count = now;
                ccid3_hc_tx_set_state(sk, TFRC_SSTATE_NO_FBACK);
-               hctx->ccid3hctx_t_ipi = TFRC_INITIAL_TIMEOUT;
+               hctx->ccid3hctx_t_ipi = TFRC_INITIAL_IPI;
 
                /* Set nominal send time for initial packet */
                hctx->ccid3hctx_t_nom = now;
@@ -341,7 +336,6 @@ static int ccid3_hc_tx_send_packet(struct sock *sk,
        case TFRC_SSTATE_FBACK:
                delay = (timeval_delta(&now, &hctx->ccid3hctx_t_nom) -
                         hctx->ccid3hctx_delta);
-               ccid3_pr_debug("send_packet delay=%ld\n", delay);
                delay /= -1000;
                /* divide by -1000 is to convert to ms and get sign right */
                rc = delay > 0 ? delay : 0;
@@ -371,13 +365,7 @@ static void ccid3_hc_tx_packet_sent(struct sock *sk, int more, int len)
        struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
        struct timeval now;
 
-       BUG_ON(hctx == NULL);
-
-       if (hctx->ccid3hctx_state == TFRC_SSTATE_TERM) {
-               ccid3_pr_debug("%s, sk=%p, while state is TFRC_SSTATE_TERM!\n",
-                              dccp_role(sk), sk);
-               return;
-       }
+       BUG_ON(hctx == NULL || hctx->ccid3hctx_state == TFRC_SSTATE_TERM);
 
        dccp_timestamp(sk, &now);
 
@@ -387,14 +375,14 @@ static void ccid3_hc_tx_packet_sent(struct sock *sk, int more, int len)
                struct dccp_tx_hist_entry *packet;
 
                packet = dccp_tx_hist_head(&hctx->ccid3hctx_hist);
-               if (packet == NULL) {
-                       printk(KERN_CRIT "%s: packet doesn't exists in "
-                                        "history!\n", __FUNCTION__);
+               if (unlikely(packet == NULL)) {
+                       LIMIT_NETDEBUG(KERN_WARNING "%s: packet doesn't "
+                                      "exists in history!\n", __FUNCTION__);
                        return;
                }
-               if (packet->dccphtx_sent) {
-                       printk(KERN_CRIT "%s: no unsent packet in history!\n",
-                              __FUNCTION__);
+               if (unlikely(packet->dccphtx_sent)) {
+                       LIMIT_NETDEBUG(KERN_WARNING "%s: no unsent packet in "
+                                      "history!\n", __FUNCTION__);
                        return;
                }
                packet->dccphtx_tstamp = now;
@@ -465,14 +453,7 @@ static void ccid3_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
        u32 x_recv;
        u32 r_sample;
 
-       if (hctx == NULL)
-               return;
-
-       if (hctx->ccid3hctx_state == TFRC_SSTATE_TERM) {
-               ccid3_pr_debug("%s, sk=%p, received a packet when "
-                              "terminating!\n", dccp_role(sk), sk);
-               return;
-       }
+       BUG_ON(hctx == NULL || hctx->ccid3hctx_state == TFRC_SSTATE_TERM);
 
        /* we are only interested in ACKs */
        if (!(DCCP_SKB_CB(skb)->dccpd_type == DCCP_PKT_ACK ||
@@ -496,12 +477,12 @@ static void ccid3_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
                /* get t_recvdata from history */
                packet = dccp_tx_hist_find_entry(&hctx->ccid3hctx_hist,
                                                 DCCP_SKB_CB(skb)->dccpd_ack_seq);
-               if (packet == NULL) {
-                       ccid3_pr_debug("%s, sk=%p, seqno %llu(%s) does't "
-                                      "exist in history!\n",
-                                      dccp_role(sk), sk,
-                                      DCCP_SKB_CB(skb)->dccpd_ack_seq,
-                                      dccp_packet_name(DCCP_SKB_CB(skb)->dccpd_type));
+               if (unlikely(packet == NULL)) {
+                       LIMIT_NETDEBUG(KERN_WARNING "%s: %s, sk=%p, seqno "
+                                      "%llu(%s) does't exist in history!\n",
+                                      __FUNCTION__, dccp_role(sk), sk,
+                           (unsigned long long)DCCP_SKB_CB(skb)->dccpd_ack_seq,
+                               dccp_packet_name(DCCP_SKB_CB(skb)->dccpd_type));
                        return;
                }
 
@@ -509,8 +490,8 @@ static void ccid3_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
                dccp_timestamp(sk, &now);
                r_sample = timeval_delta(&now, &packet->dccphtx_tstamp);
                if (unlikely(r_sample <= t_elapsed))
-                       LIMIT_NETDEBUG(KERN_WARNING
-                                      "%s: r_sample=%uus, t_elapsed=%uus\n",
+                       LIMIT_NETDEBUG(KERN_WARNING "%s: r_sample=%uus, "
+                                      "t_elapsed=%uus\n",
                                       __FUNCTION__, r_sample, t_elapsed);
                else
                        r_sample -= t_elapsed;
@@ -606,10 +587,11 @@ static void ccid3_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
 
 static void ccid3_hc_tx_insert_options(struct sock *sk, struct sk_buff *skb)
 {
-       struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
+       const struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
 
-       if (hctx == NULL || !(sk->sk_state == DCCP_OPEN ||
-                             sk->sk_state == DCCP_PARTOPEN))
+       BUG_ON(hctx == NULL);
+
+       if (!(sk->sk_state == DCCP_OPEN || sk->sk_state == DCCP_PARTOPEN))
                return;
 
         DCCP_SKB_CB(skb)->dccpd_ccval = hctx->ccid3hctx_last_win_count;
@@ -624,8 +606,7 @@ static int ccid3_hc_tx_parse_options(struct sock *sk, unsigned char option,
        struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
        struct ccid3_options_received *opt_recv;
 
-       if (hctx == NULL)
-               return 0;
+       BUG_ON(hctx == NULL);
 
        opt_recv = &hctx->ccid3hctx_options_received;
 
@@ -639,10 +620,10 @@ static int ccid3_hc_tx_parse_options(struct sock *sk, unsigned char option,
 
        switch (option) {
        case TFRC_OPT_LOSS_EVENT_RATE:
-               if (len != 4) {
-                       ccid3_pr_debug("%s, sk=%p, invalid len for "
-                                      "TFRC_OPT_LOSS_EVENT_RATE\n",
-                                      dccp_role(sk), sk);
+               if (unlikely(len != 4)) {
+                       LIMIT_NETDEBUG(KERN_WARNING "%s: %s, sk=%p, invalid "
+                                      "len for TFRC_OPT_LOSS_EVENT_RATE\n",
+                                      __FUNCTION__, dccp_role(sk), sk);
                        rc = -EINVAL;
                } else {
                        opt_recv->ccid3or_loss_event_rate = ntohl(*(u32 *)value);
@@ -660,10 +641,10 @@ static int ccid3_hc_tx_parse_options(struct sock *sk, unsigned char option,
                               opt_recv->ccid3or_loss_intervals_len);
                break;
        case TFRC_OPT_RECEIVE_RATE:
-               if (len != 4) {
-                       ccid3_pr_debug("%s, sk=%p, invalid len for "
-                                      "TFRC_OPT_RECEIVE_RATE\n",
-                                      dccp_role(sk), sk);
+               if (unlikely(len != 4)) {
+                       LIMIT_NETDEBUG(KERN_WARNING "%s: %s, sk=%p, invalid "
+                                      "len for TFRC_OPT_RECEIVE_RATE\n",
+                                      __FUNCTION__, dccp_role(sk), sk);
                        rc = -EINVAL;
                } else {
                        opt_recv->ccid3or_receive_rate = ntohl(*(u32 *)value);
@@ -682,8 +663,6 @@ static int ccid3_hc_tx_init(struct sock *sk)
        struct dccp_sock *dp = dccp_sk(sk);
        struct ccid3_hc_tx_sock *hctx;
 
-       ccid3_pr_debug("%s, sk=%p\n", dccp_role(sk), sk);
-
        dp->dccps_hc_tx_ccid_private = kmalloc(sizeof(*hctx), gfp_any());
        if (dp->dccps_hc_tx_ccid_private == NULL)
                return -ENOMEM;
@@ -712,7 +691,6 @@ static void ccid3_hc_tx_exit(struct sock *sk)
        struct dccp_sock *dp = dccp_sk(sk);
        struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
 
-       ccid3_pr_debug("%s, sk=%p\n", dccp_role(sk), sk);
        BUG_ON(hctx == NULL);
 
        ccid3_hc_tx_set_state(sk, TFRC_SSTATE_TERM);
@@ -792,10 +770,10 @@ static void ccid3_hc_rx_send_feedback(struct sock *sk)
        }
 
        packet = dccp_rx_hist_find_data_packet(&hcrx->ccid3hcrx_hist);
-       if (packet == NULL) {
-               printk(KERN_CRIT "%s: %s, sk=%p, no data packet in history!\n",
-                      __FUNCTION__, dccp_role(sk), sk);
-               dump_stack();
+       if (unlikely(packet == NULL)) {
+               LIMIT_NETDEBUG(KERN_WARNING "%s: %s, sk=%p, no data packet "
+                              "in history!\n",
+                              __FUNCTION__, dccp_role(sk), sk);
                return;
        }
 
@@ -817,11 +795,12 @@ static void ccid3_hc_rx_send_feedback(struct sock *sk)
 
 static void ccid3_hc_rx_insert_options(struct sock *sk, struct sk_buff *skb)
 {
-       struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk);
+       const struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk);
        u32 x_recv, pinv;
 
-       if (hcrx == NULL || !(sk->sk_state == DCCP_OPEN ||
-                             sk->sk_state == DCCP_PARTOPEN))
+       BUG_ON(hcrx == NULL);
+
+       if (!(sk->sk_state == DCCP_OPEN || sk->sk_state == DCCP_PARTOPEN))
                return;
 
        DCCP_SKB_CB(skb)->dccpd_ccval = hcrx->ccid3hcrx_last_counter;
@@ -878,17 +857,17 @@ static u32 ccid3_hc_rx_calc_first_li(struct sock *sk)
                }
        }
 
-       if (step == 0) {
-               printk(KERN_CRIT "%s: %s, sk=%p, packet history contains no "
-                                "data packets!\n",
-                      __FUNCTION__, dccp_role(sk), sk);
+       if (unlikely(step == 0)) {
+               LIMIT_NETDEBUG(KERN_WARNING "%s: %s, sk=%p, packet history "
+                              "contains no data packets!\n",
+                              __FUNCTION__, dccp_role(sk), sk);
                return ~0;
        }
 
-       if (interval == 0) {
-               ccid3_pr_debug("%s, sk=%p, Could not find a win_count "
-                              "interval > 0. Defaulting to 1\n",
-                              dccp_role(sk), sk);
+       if (unlikely(interval == 0)) {
+               LIMIT_NETDEBUG(KERN_WARNING "%s: %s, sk=%p, Could not find a "
+                              "win_count interval > 0. Defaulting to 1\n",
+                              __FUNCTION__, dccp_role(sk), sk);
                interval = 1;
        }
 found:
@@ -931,8 +910,9 @@ static void ccid3_hc_rx_update_li(struct sock *sk, u64 seq_loss, u8 win_loss)
                if (li_tail == NULL)
                        return;
                li_tail->dccplih_interval = ccid3_hc_rx_calc_first_li(sk);
-       }
-       /* FIXME: find end of interval */
+       } else
+                   LIMIT_NETDEBUG(KERN_WARNING "%s: FIXME: find end of "
+                                  "interval\n", __FUNCTION__);
 }
 
 static void ccid3_hc_rx_detect_loss(struct sock *sk)
@@ -956,10 +936,8 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
        u32 p_prev, r_sample, t_elapsed;
        int ins;
 
-       if (hcrx == NULL)
-               return;
-
-       BUG_ON(!(hcrx->ccid3hcrx_state == TFRC_RSTATE_NO_DATA ||
+       BUG_ON(hcrx == NULL ||
+              !(hcrx->ccid3hcrx_state == TFRC_RSTATE_NO_DATA ||
                 hcrx->ccid3hcrx_state == TFRC_RSTATE_DATA));
 
        opt_recv = &dccp_sk(sk)->dccps_options_received;
@@ -978,8 +956,8 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
                t_elapsed = opt_recv->dccpor_elapsed_time * 10;
 
                if (unlikely(r_sample <= t_elapsed))
-                       LIMIT_NETDEBUG(KERN_WARNING
-                                      "%s: r_sample=%uus, t_elapsed=%uus\n",
+                       LIMIT_NETDEBUG(KERN_WARNING "%s: r_sample=%uus, "
+                                      "t_elapsed=%uus\n",
                                       __FUNCTION__, r_sample, t_elapsed);
                else
                        r_sample -= t_elapsed;
@@ -997,19 +975,16 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
                break;
        case DCCP_PKT_DATA:
                break;
-       default:
-               ccid3_pr_debug("%s, sk=%p, not DATA/DATAACK/ACK packet(%s)\n",
-                              dccp_role(sk), sk,
-                              dccp_packet_name(DCCP_SKB_CB(skb)->dccpd_type));
+       default: /* We're not interested in other packet types, move along */
                return;
        }
 
        packet = dccp_rx_hist_entry_new(ccid3_rx_hist, sk, opt_recv->dccpor_ndp,
                                        skb, SLAB_ATOMIC);
-       if (packet == NULL) {
-               ccid3_pr_debug("%s, sk=%p, Not enough mem to add rx packet "
-                              "to history (consider it lost)!",
-                              dccp_role(sk), sk);
+       if (unlikely(packet == NULL)) {
+               LIMIT_NETDEBUG(KERN_WARNING "%s: %s, sk=%p, Not enough mem to "
+                               "add rx packet to history, consider it lost!\n",
+                              __FUNCTION__, dccp_role(sk), sk);
                return;
        }
 
@@ -1102,10 +1077,7 @@ static void ccid3_hc_rx_exit(struct sock *sk)
        struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk);
        struct dccp_sock *dp = dccp_sk(sk);
 
-       ccid3_pr_debug("%s, sk=%p\n", dccp_role(sk), sk);
-
-       if (hcrx == NULL)
-               return;
+       BUG_ON(hcrx == NULL);
 
        ccid3_hc_rx_set_state(sk, TFRC_RSTATE_TERM);
 
@@ -1123,8 +1095,7 @@ static void ccid3_hc_rx_get_info(struct sock *sk, struct tcp_info *info)
 {
        const struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk);
 
-       if (hcrx == NULL)
-               return;
+       BUG_ON(hcrx == NULL);
 
        info->tcpi_ca_state     = hcrx->ccid3hcrx_state;
        info->tcpi_options      |= TCPI_OPT_TIMESTAMPS;
@@ -1135,8 +1106,7 @@ static void ccid3_hc_tx_get_info(struct sock *sk, struct tcp_info *info)
 {
        const struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
 
-       if (hctx == NULL)
-               return;
+       BUG_ON(hctx == NULL);
 
        info->tcpi_rto = hctx->ccid3hctx_t_rto;
        info->tcpi_rtt = hctx->ccid3hctx_rtt;
index d16f00d784f39375a771550280281ba0d53d4c19..eb248778eea371fc34d3f053b366c9de98496b8b 100644 (file)
@@ -48,6 +48,8 @@
 /* Two seconds as per CCID3 spec */
 #define TFRC_INITIAL_TIMEOUT      (2 * USEC_PER_SEC)
 
+#define TFRC_INITIAL_IPI          (USEC_PER_SEC / 4)
+
 /* In usecs - half the scheduling granularity as per RFC3448 4.6 */
 #define TFRC_OPSYS_HALF_TIME_GRAN  (USEC_PER_SEC / (2 * HZ))
 
index fee9a8c3777b3b8a8a35de2be2b1a6789ac69ce3..2afaa464e7f0912fc812855831d8e7f73f601030 100644 (file)
@@ -641,16 +641,12 @@ int dccp_v4_send_reset(struct sock *sk, enum dccp_reset_codes code)
 
        skb = dccp_make_reset(sk, sk->sk_dst_cache, code);
        if (skb != NULL) {
-               const struct dccp_sock *dp = dccp_sk(sk);
                const struct inet_sock *inet = inet_sk(sk);
 
                err = ip_build_and_send_pkt(skb, sk,
                                            inet->saddr, inet->daddr, NULL);
                if (err == NET_XMIT_CN)
                        err = 0;
-
-               ccid_hc_rx_exit(dp->dccps_hc_rx_ccid, sk);
-               ccid_hc_tx_exit(dp->dccps_hc_tx_ccid, sk);
        }
 
        return err;
index 28de157a432617323385395ccc6b05f10c873085..ea6d0e91e5117ac8f5ca1b5dbbce4a5c53269696 100644 (file)
@@ -522,7 +522,4 @@ void dccp_send_close(struct sock *sk, const int active)
                dccp_transmit_skb(sk, skb_clone(skb, prio));
        } else
                dccp_transmit_skb(sk, skb);
-
-       ccid_hc_rx_exit(dp->dccps_hc_rx_ccid, sk);
-       ccid_hc_tx_exit(dp->dccps_hc_tx_ccid, sk);
 }
index 15e1134da1b2af9e8aba4e110078455d4739894a..c10e4435e3b1258a9ca0b5615be8af6d103142a2 100644 (file)
@@ -485,11 +485,6 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss
        TCP_SKB_CB(buff)->when = TCP_SKB_CB(skb)->when;
        buff->tstamp = skb->tstamp;
 
-       if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST) {
-               tp->lost_out -= tcp_skb_pcount(skb);
-               tp->left_out -= tcp_skb_pcount(skb);
-       }
-
        old_factor = tcp_skb_pcount(skb);
 
        /* Fix up tso_factor for both original and new SKB.  */
index 47122728212ab570b5eef20d45117bd33419fbc0..922549581abc266100d6a3a28676665dd509f858 100644 (file)
@@ -406,8 +406,7 @@ ipv6_invert_rthdr(struct sock *sk, struct ipv6_rt_hdr *hdr)
 
        memcpy(opt->srcrt, hdr, sizeof(*hdr));
        irthdr = (struct rt0_hdr*)opt->srcrt;
-       /* Obsolete field, MBZ, when originated by us */
-       irthdr->bitmap = 0;
+       irthdr->reserved = 0;
        opt->srcrt->segments_left = n;
        for (i=0; i<n; i++)
                memcpy(irthdr->addr+i, rthdr->addr+(n-1-i), 16);
index a9526b773d284d1e9fca90aa719ddd26d2c22866..2bb670037df3a9de75bf1b7580d6e015aece6927 100644 (file)
@@ -161,8 +161,8 @@ match(const struct sk_buff *skb,
                            ((rtinfo->hdrlen == hdrlen) ^
                            !!(rtinfo->invflags & IP6T_RT_INV_LEN))));
        DEBUGP("res %02X %02X %02X ", 
-                       (rtinfo->flags & IP6T_RT_RES), ((struct rt0_hdr *)rh)->bitmap,
-                       !((rtinfo->flags & IP6T_RT_RES) && (((struct rt0_hdr *)rh)->bitmap)));
+                       (rtinfo->flags & IP6T_RT_RES), ((struct rt0_hdr *)rh)->reserved,
+                       !((rtinfo->flags & IP6T_RT_RES) && (((struct rt0_hdr *)rh)->reserved)));
 
        ret = (rh != NULL)
                        &&
@@ -179,12 +179,12 @@ match(const struct sk_buff *skb,
                            !!(rtinfo->invflags & IP6T_RT_INV_TYP)));
 
        if (ret && (rtinfo->flags & IP6T_RT_RES)) {
-               u_int32_t *bp, _bitmap;
-               bp = skb_header_pointer(skb,
-                                       ptr + offsetof(struct rt0_hdr, bitmap),
-                                       sizeof(_bitmap), &_bitmap);
+               u_int32_t *rp, _reserved;
+               rp = skb_header_pointer(skb,
+                                       ptr + offsetof(struct rt0_hdr, reserved),
+                                       sizeof(_reserved), &_reserved);
 
-               ret = (*bp == 0);
+               ret = (*rp == 0);
        }
 
        DEBUGP("#%d ",rtinfo->addrnr);
index 9087273abf91665e078ea43884cbea00d8e3ea07..db3c708e546b81cfac87bd206998d87848252a70 100644 (file)
@@ -49,6 +49,9 @@ build := -f $(if $(KBUILD_SRC),$(srctree)/)scripts/Makefile.build obj
 cmd = @$(if $($(quiet)cmd_$(1)),\
       echo '  $(subst ','\'',$($(quiet)cmd_$(1)))' &&) $(cmd_$(1))
 
+# Add $(obj)/ for paths that is not absolute
+objectify = $(foreach o,$(1),$(if $(filter /%,$(o)),$(o),$(obj)/$(o)))
+
 ###
 # if_changed      - execute command if any prerequisite is newer than 
 #                   target, or command line has changed
index f04f6273685161dc45c98e11f8d1065660ca0a0f..c2d54148a91f403eb74b8b81f53a117d7100ef70 100644 (file)
@@ -91,12 +91,7 @@ foreach $object (keys(%object)) {
                     $from !~ /\.exit\.data$/ &&
                     $from !~ /\.altinstructions$/ &&
                     $from !~ /\.pdr$/ &&
-                    $from !~ /\.debug_info$/ &&
-                    $from !~ /\.debug_aranges$/ &&
-                    $from !~ /\.debug_ranges$/ &&
-                    $from !~ /\.debug_line$/ &&
-                    $from !~ /\.debug_frame$/ &&
-                    $from !~ /\.debug_loc$/ &&
+                    $from !~ /\.debug_.*$/ &&
                     $from !~ /\.exitcall\.exit$/ &&
                     $from !~ /\.eh_frame$/ &&
                     $from !~ /\.stab$/)) {
index b62920eead3db8cbad173905ca27dff380b3be9c..d64790bcd831ad8104ec2e50c8e05ca228424fd3 100644 (file)
@@ -42,8 +42,6 @@ MODULE_LICENSE("GPL");
 #else
 #define CSP_HDR_VALUE(a,b,c,d) ((d) | ((c)<<8) | ((b)<<16) | ((a)<<24))
 #endif
-#define LE_SHORT(v)            le16_to_cpu(v)
-#define LE_INT(v)              le32_to_cpu(v)
 
 #define RIFF_HEADER    CSP_HDR_VALUE('R', 'I', 'F', 'F')
 #define CSP__HEADER    CSP_HDR_VALUE('C', 'S', 'P', ' ')
@@ -56,20 +54,20 @@ MODULE_LICENSE("GPL");
 /*
  * RIFF data format
  */
-typedef struct riff_header {
+struct riff_header {
        __u32 name;
        __u32 len;
-} riff_header_t;
+};
 
-typedef struct desc_header {
-       riff_header_t info;
+struct desc_header {
+       struct riff_header info;
        __u16 func_nr;
        __u16 VOC_type;
        __u16 flags_play_rec;
        __u16 flags_16bit_8bit;
        __u16 flags_stereo_mono;
        __u16 flags_rates;
-} desc_header_t;
+};
 
 /*
  * prototypes
@@ -302,9 +300,9 @@ static int snd_sb_csp_riff_load(snd_sb_csp_t * p, snd_sb_csp_microcode_t __user
        unsigned char __user *data_end;
        unsigned short func_nr = 0;
 
-       riff_header_t file_h, item_h, code_h;
+       struct riff_header file_h, item_h, code_h;
        __u32 item_type;
-       desc_header_t funcdesc_h;
+       struct desc_header funcdesc_h;
 
        unsigned long flags;
        int err;
@@ -316,12 +314,12 @@ static int snd_sb_csp_riff_load(snd_sb_csp_t * p, snd_sb_csp_microcode_t __user
        if (copy_from_user(&file_h, data_ptr, sizeof(file_h)))
                return -EFAULT;
        if ((file_h.name != RIFF_HEADER) ||
-           (LE_INT(file_h.len) >= SNDRV_SB_CSP_MAX_MICROCODE_FILE_SIZE - sizeof(file_h))) {
+           (le32_to_cpu(file_h.len) >= SNDRV_SB_CSP_MAX_MICROCODE_FILE_SIZE - sizeof(file_h))) {
                snd_printd("%s: Invalid RIFF header\n", __FUNCTION__);
                return -EINVAL;
        }
        data_ptr += sizeof(file_h);
-       data_end = data_ptr + LE_INT(file_h.len);
+       data_end = data_ptr + le32_to_cpu(file_h.len);
 
        if (copy_from_user(&item_type, data_ptr, sizeof(item_type)))
                return -EFAULT;
@@ -331,7 +329,7 @@ static int snd_sb_csp_riff_load(snd_sb_csp_t * p, snd_sb_csp_microcode_t __user
        }
        data_ptr += sizeof (item_type);
 
-       for (; data_ptr < data_end; data_ptr += LE_INT(item_h.len)) {
+       for (; data_ptr < data_end; data_ptr += le32_to_cpu(item_h.len)) {
                if (copy_from_user(&item_h, data_ptr, sizeof(item_h)))
                        return -EFAULT;
                data_ptr += sizeof(item_h);
@@ -344,7 +342,7 @@ static int snd_sb_csp_riff_load(snd_sb_csp_t * p, snd_sb_csp_microcode_t __user
                case FUNC_HEADER:
                        if (copy_from_user(&funcdesc_h, data_ptr + sizeof(item_type), sizeof(funcdesc_h)))
                                return -EFAULT;
-                       func_nr = LE_SHORT(funcdesc_h.func_nr);
+                       func_nr = le16_to_cpu(funcdesc_h.func_nr);
                        break;
                case CODE_HEADER:
                        if (func_nr != info.func_req)
@@ -370,11 +368,11 @@ static int snd_sb_csp_riff_load(snd_sb_csp_t * p, snd_sb_csp_microcode_t __user
                                if (code_h.name != INIT_HEADER)
                                        break;
                                data_ptr += sizeof(code_h);
-                               err = snd_sb_csp_load_user(p, data_ptr, LE_INT(code_h.len),
+                               err = snd_sb_csp_load_user(p, data_ptr, le32_to_cpu(code_h.len),
                                                      SNDRV_SB_CSP_LOAD_INITBLOCK);
                                if (err)
                                        return err;
-                               data_ptr += LE_INT(code_h.len);
+                               data_ptr += le32_to_cpu(code_h.len);
                        }
                        /* main microcode block */
                        if (copy_from_user(&code_h, data_ptr, sizeof(code_h)))
@@ -386,17 +384,17 @@ static int snd_sb_csp_riff_load(snd_sb_csp_t * p, snd_sb_csp_microcode_t __user
                        }
                        data_ptr += sizeof(code_h);
                        err = snd_sb_csp_load_user(p, data_ptr,
-                                                  LE_INT(code_h.len), 0);
+                                                  le32_to_cpu(code_h.len), 0);
                        if (err)
                                return err;
 
                        /* fill in codec header */
                        strlcpy(p->codec_name, info.codec_name, sizeof(p->codec_name));
                        p->func_nr = func_nr;
-                       p->mode = LE_SHORT(funcdesc_h.flags_play_rec);
-                       switch (LE_SHORT(funcdesc_h.VOC_type)) {
+                       p->mode = le16_to_cpu(funcdesc_h.flags_play_rec);
+                       switch (le16_to_cpu(funcdesc_h.VOC_type)) {
                        case 0x0001:    /* QSound decoder */
-                               if (LE_SHORT(funcdesc_h.flags_play_rec) == SNDRV_SB_CSP_MODE_DSP_WRITE) {
+                               if (le16_to_cpu(funcdesc_h.flags_play_rec) == SNDRV_SB_CSP_MODE_DSP_WRITE) {
                                        if (snd_sb_qsound_build(p) == 0)
                                                /* set QSound flag and clear all other mode flags */
                                                p->mode = SNDRV_SB_CSP_MODE_QSOUND;
@@ -426,12 +424,12 @@ static int snd_sb_csp_riff_load(snd_sb_csp_t * p, snd_sb_csp_microcode_t __user
                                p->mode = 0;
                                snd_printd("%s: Unsupported CSP codec type: 0x%04x\n",
                                           __FUNCTION__,
-                                          LE_SHORT(funcdesc_h.VOC_type));
+                                          le16_to_cpu(funcdesc_h.VOC_type));
                                return -EINVAL;
                        }
-                       p->acc_channels = LE_SHORT(funcdesc_h.flags_stereo_mono);
-                       p->acc_width = LE_SHORT(funcdesc_h.flags_16bit_8bit);
-                       p->acc_rates = LE_SHORT(funcdesc_h.flags_rates);
+                       p->acc_channels = le16_to_cpu(funcdesc_h.flags_stereo_mono);
+                       p->acc_width = le16_to_cpu(funcdesc_h.flags_16bit_8bit);
+                       p->acc_rates = le16_to_cpu(funcdesc_h.flags_rates);
 
                        /* Decouple CSP from IRQ and DMAREQ lines */
                        spin_lock_irqsave(&p->chip->reg_lock, flags);
diff --git a/sound/oss/skeleton.c b/sound/oss/skeleton.c
deleted file mode 100644 (file)
index 8fea783..0000000
+++ /dev/null
@@ -1,219 +0,0 @@
-/*
- *     PCI sound skeleton example
- *
- *     (c) 1998 Red Hat Software
- *
- *     This software may be used and distributed according to the 
- *     terms of the GNU General Public License, incorporated herein by 
- *     reference.
- *
- *     This example is designed to be built in the linux/drivers/sound
- *     directory as part of a kernel build. The example is modular only
- *     drop me a note once you have a working modular driver and want
- *     to integrate it with the main code.
- *             -- Alan <alan@redhat.com>
- *
- *     This is a first draft. Please report any errors, corrections or
- *     improvements to me.
- */
-
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/errno.h>
-#include <linux/fs.h>
-#include <linux/kernel.h>
-#include <linux/pci.h>
-
-#include <asm/io.h>
-
-#include "sound_config.h"
-
-/*
- *     Define our PCI vendor ID here
- */
-#ifndef PCI_VENDOR_MYIDENT
-#define PCI_VENDOR_MYIDENT                     0x125D
-
-/*
- *     PCI identity for the card.
- */
-#define PCI_DEVICE_ID_MYIDENT_MYCARD1          0x1969
-#endif
-
-#define CARD_NAME      "ExampleWave 3D Pro Ultra ThingyWotsit"
-
-#define MAX_CARDS      8
-
-/*
- *     Each address_info object holds the information about one of
- *     our card resources. In this case the MSS emulation of our
- *     ficticious card. Its used to manage and attach things.
- */
-static struct address_info     mss_data[MAX_CARDS];
-static int                     cards;
-
-/*
- *     Install the actual card. This is an example
- */
-
-static int mycard_install(struct pci_dev *pcidev)
-{
-       int iobase;
-       int mssbase;
-       int mpubase;
-       u8 x;
-       u16 w;
-       u32 v;
-       int i;
-       int dma;
-
-       /*
-        *      Our imaginary code has its I/O on PCI address 0, a
-        *      MSS on PCI address 1 and an MPU on address 2
-        *
-        *      For the example we will only initialise the MSS
-        */
-               
-       iobase = pci_resource_start(pcidev, 0);
-       mssbase = pci_resource_start(pcidev, 1);
-       mpubase = pci_resource_start(pcidev, 2);
-       
-       /*
-        *      Reset the board
-        */
-        
-       /*
-        *      Wait for completion. udelay() waits in microseconds
-        */
-        
-       udelay(100);
-       
-       /*
-        *      Ok card ready. Begin setup proper. You might for example
-        *      load the firmware here
-        */
-       
-       dma = card_specific_magic(ioaddr);
-       
-       /*
-        *      Turn on legacy mode (example), There are also byte and
-        *      dword (32bit) PCI configuration function calls
-        */
-
-       pci_read_config_word(pcidev, 0x40, &w);
-       w&=~(1<<15);                    /* legacy decode on */
-       w|=(1<<14);                     /* Reserved write as 1 in this case */
-       w|=(1<<3)|(1<<1)|(1<<0);        /* SB on , FM on, MPU on */
-       pci_write_config_word(pcidev, 0x40, w);
-       
-       /*
-        *      Let the user know we found his toy.
-        */
-        
-       printk(KERN_INFO "Programmed "CARD_NAME" at 0x%X to legacy mode.\n",
-               iobase);
-               
-       /*
-        *      Now set it up the description of the card
-        */
-        
-       mss_data[cards].io_base = mssbase;
-       mss_data[cards].irq = pcidev->irq;
-       mss_data[cards].dma = dma;
-       
-       /*
-        *      Check there is an MSS present
-        */
-
-       if(ad1848_detect(mssbase, NULL, mss_data[cards].osp)==0)
-               return 0;
-               
-       /*
-        *      Initialize it
-        */
-        
-       mss_data[cards].slots[3] = ad1848_init("MyCard MSS 16bit", 
-                       mssbase,
-                       mss_data[cards].irq,
-                       mss_data[cards].dma,
-                       mss_data[cards].dma,
-                       0,
-                       0,
-                       THIS_MODULE);
-
-       cards++;        
-       return 1;
-}
-
-
-/*
- *     This loop walks the PCI configuration database and finds where
- *     the sound cards are.
- */
-int init_mycard(void)
-{
-       struct pci_dev *pcidev=NULL;
-       int count=0;
-               
-       while((pcidev = pci_find_device(PCI_VENDOR_MYIDENT, PCI_DEVICE_ID_MYIDENT_MYCARD1, pcidev))!=NULL)
-       {
-               if (pci_enable_device(pcidev))
-                       continue;
-               count+=mycard_install(pcidev);
-               if(count)
-                       return 0;
-               if(count==MAX_CARDS)
-                       break;
-       }
-       
-       if(count==0)
-               return -ENODEV;
-       return 0;
-}
-
-/*
- *     This function is called when the user or kernel loads the 
- *     module into memory.
- */
-
-
-int init_module(void)
-{
-       if(init_mycard()<0)
-       {
-               printk(KERN_ERR "No "CARD_NAME" cards found.\n");
-               return -ENODEV;
-       }
-
-       return 0;
-}
-
-/*
- *     This is called when it is removed. It will only be removed 
- *     when its use count is 0.
- */
-void cleanup_module(void)
-{
-       for(i=0;i< cards; i++)
-       {
-               /*
-                *      Free attached resources
-                */
-                
-               ad1848_unload(mss_data[i].io_base,
-                             mss_data[i].irq,
-                             mss_data[i].dma,
-                             mss_data[i].dma,
-                             0);
-               /*
-                *      And disconnect the device from the kernel
-                */
-               sound_unload_audiodevice(mss_data[i].slots[3]);
-       }
-}
-