tty: stop using "delayed_work" in the tty layer
authorLinus Torvalds <torvalds@linux-foundation.org>
Tue, 22 Mar 2011 23:17:32 +0000 (16:17 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 22 Mar 2011 23:17:32 +0000 (16:17 -0700)
Using delayed-work for tty flip buffers ends up causing us to wait for
the next tick to complete some actions.  That's usually not all that
noticeable, but for certain latency-critical workloads it ends up being
totally unacceptable.

As an extreme case of this, passing a token back-and-forth over a pty
will take two ticks per iteration, so even just a thousand iterations
will take 8 seconds assuming a common 250Hz configuration.

Avoiding the whole delayed work issue brings that ping-pong test-case
down to 0.009s on my machine.

In more practical terms, this latency has been a performance problem for
things like dive computer simulators (simulating the serial interface
using the ptys) and for other environments (Alan mentions a CP/M emulator).

Reported-by: Jef Driesen <jefdriesen@telenet.be>
Acked-by: Greg KH <gregkh@suse.de>
Acked-by: Alan Cox <alan@lxorguk.ukuu.org.uk>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
drivers/tty/tty_buffer.c
drivers/tty/tty_ldisc.c
include/linux/kbd_kern.h
include/linux/tty.h

index d8210ca007206628163efea3b2eb6dff473fadf7..b9451219528b76bf01a900b0873dea751b27e9d5 100644 (file)
@@ -322,7 +322,7 @@ void tty_schedule_flip(struct tty_struct *tty)
        if (tty->buf.tail != NULL)
                tty->buf.tail->commit = tty->buf.tail->used;
        spin_unlock_irqrestore(&tty->buf.lock, flags);
-       schedule_delayed_work(&tty->buf.work, 1);
+       schedule_work(&tty->buf.work);
 }
 EXPORT_SYMBOL(tty_schedule_flip);
 
@@ -402,7 +402,7 @@ EXPORT_SYMBOL_GPL(tty_prepare_flip_string_flags);
 static void flush_to_ldisc(struct work_struct *work)
 {
        struct tty_struct *tty =
-               container_of(work, struct tty_struct, buf.work.work);
+               container_of(work, struct tty_struct, buf.work);
        unsigned long   flags;
        struct tty_ldisc *disc;
 
@@ -443,7 +443,7 @@ static void flush_to_ldisc(struct work_struct *work)
                        if (test_bit(TTY_FLUSHPENDING, &tty->flags))
                                break;
                        if (!tty->receive_room || seen_tail) {
-                               schedule_delayed_work(&tty->buf.work, 1);
+                               schedule_work(&tty->buf.work);
                                break;
                        }
                        if (count > tty->receive_room)
@@ -481,7 +481,7 @@ static void flush_to_ldisc(struct work_struct *work)
  */
 void tty_flush_to_ldisc(struct tty_struct *tty)
 {
-       flush_delayed_work(&tty->buf.work);
+       flush_work(&tty->buf.work);
 }
 
 /**
@@ -506,9 +506,9 @@ void tty_flip_buffer_push(struct tty_struct *tty)
        spin_unlock_irqrestore(&tty->buf.lock, flags);
 
        if (tty->low_latency)
-               flush_to_ldisc(&tty->buf.work.work);
+               flush_to_ldisc(&tty->buf.work);
        else
-               schedule_delayed_work(&tty->buf.work, 1);
+               schedule_work(&tty->buf.work);
 }
 EXPORT_SYMBOL(tty_flip_buffer_push);
 
@@ -529,6 +529,6 @@ void tty_buffer_init(struct tty_struct *tty)
        tty->buf.tail = NULL;
        tty->buf.free = NULL;
        tty->buf.memory_used = 0;
-       INIT_DELAYED_WORK(&tty->buf.work, flush_to_ldisc);
+       INIT_WORK(&tty->buf.work, flush_to_ldisc);
 }
 
index 0fc564a977065a24cf0bb377d54be5e9049cb039..e19e136471165dd163509750bddd6382ce70bcd2 100644 (file)
@@ -529,7 +529,7 @@ static void tty_ldisc_restore(struct tty_struct *tty, struct tty_ldisc *old)
 static int tty_ldisc_halt(struct tty_struct *tty)
 {
        clear_bit(TTY_LDISC, &tty->flags);
-       return cancel_delayed_work_sync(&tty->buf.work);
+       return cancel_work_sync(&tty->buf.work);
 }
 
 /**
@@ -542,7 +542,7 @@ static void tty_ldisc_flush_works(struct tty_struct *tty)
 {
        flush_work_sync(&tty->hangup_work);
        flush_work_sync(&tty->SAK_work);
-       flush_delayed_work_sync(&tty->buf.work);
+       flush_work_sync(&tty->buf.work);
 }
 
 /**
@@ -722,9 +722,9 @@ enable:
        /* Restart the work queue in case no characters kick it off. Safe if
           already running */
        if (work)
-               schedule_delayed_work(&tty->buf.work, 1);
+               schedule_work(&tty->buf.work);
        if (o_work)
-               schedule_delayed_work(&o_tty->buf.work, 1);
+               schedule_work(&o_tty->buf.work);
        mutex_unlock(&tty->ldisc_mutex);
        tty_unlock();
        return retval;
@@ -830,12 +830,12 @@ void tty_ldisc_hangup(struct tty_struct *tty)
 
        /*
         * this is like tty_ldisc_halt, but we need to give up
-        * the BTM before calling cancel_delayed_work_sync,
-        * which may need to wait for another function taking the BTM
+        * the BTM before calling cancel_work_sync, which may
+        * need to wait for another function taking the BTM
         */
        clear_bit(TTY_LDISC, &tty->flags);
        tty_unlock();
-       cancel_delayed_work_sync(&tty->buf.work);
+       cancel_work_sync(&tty->buf.work);
        mutex_unlock(&tty->ldisc_mutex);
 
        tty_lock();
index 4b0761cc7dd9d448ee22d6a9252405b83e1ef315..ec2d17bc1f1eaeccc4d7971d0643fba6d380b737 100644 (file)
@@ -159,7 +159,7 @@ static inline void con_schedule_flip(struct tty_struct *t)
        if (t->buf.tail != NULL)
                t->buf.tail->commit = t->buf.tail->used;
        spin_unlock_irqrestore(&t->buf.lock, flags);
-       schedule_delayed_work(&t->buf.work, 0);
+       schedule_work(&t->buf.work);
 }
 
 #endif
index 4e53d4641b388c965903af977d33b53f5fb1e20b..9f469c700550e79dc7fc3949e34c94096c2078ff 100644 (file)
@@ -82,7 +82,7 @@ struct tty_buffer {
 
 
 struct tty_bufhead {
-       struct delayed_work work;
+       struct work_struct work;
        spinlock_t lock;
        struct tty_buffer *head;        /* Queue head */
        struct tty_buffer *tail;        /* Active buffer */