}
EXPORT_SYMBOL(tty_free_termios);
+/**
+ * tty_flush_works - flush all works of a tty
+ * @tty: tty device to flush works for
+ *
+ * Sync flush all works belonging to @tty.
+ */
+static void tty_flush_works(struct tty_struct *tty)
+{
+ flush_work(&tty->SAK_work);
+ flush_work(&tty->hangup_work);
+}
/**
* release_one_tty - release tty structure memory
* Ask the line discipline code to release its structures
*/
tty_ldisc_release(tty, o_tty);
+
+ /* Wait for pending work before tty destruction commmences */
+ tty_flush_works(tty);
+ if (o_tty)
+ tty_flush_works(o_tty);
+
/*
* The release_tty function takes care of the details of clearing
* the slots and preserving the termios structure. The tty_unlock_pair
}
}
-/**
- * tty_ldisc_flush_works - flush all works of a tty
- * @tty: tty device to flush works for
- *
- * Sync flush all works belonging to @tty.
- */
-static void tty_ldisc_flush_works(struct tty_struct *tty)
-{
- flush_work(&tty->SAK_work);
- flush_work(&tty->hangup_work);
-}
-
/**
* tty_ldisc_wait_idle - wait for the ldisc to become idle
* @tty: tty to wait for
retval = tty_ldisc_halt(tty, o_tty, 5 * HZ);
/*
- * Wait for ->hangup_work and ->buf.work handlers to terminate.
+ * Wait for hangup to complete, if pending.
* We must drop the mutex here in case a hangup is also in process.
*/
mutex_unlock(&tty->ldisc_mutex);
- tty_ldisc_flush_works(tty);
+ flush_work(&tty->hangup_work);
tty_lock(tty);
mutex_lock(&tty->ldisc_mutex);
void tty_ldisc_release(struct tty_struct *tty, struct tty_struct *o_tty)
{
/*
- * Prevent flush_to_ldisc() from rescheduling the work for later. Then
- * kill any delayed work. As this is the final close it does not
- * race with the set_ldisc code path.
+ * Shutdown this line discipline. As this is the final close,
+ * it does not race with the set_ldisc code path.
*/
tty_ldisc_halt(tty, o_tty, MAX_SCHEDULE_TIMEOUT);
- tty_ldisc_flush_works(tty);
- if (o_tty)
- tty_ldisc_flush_works(o_tty);
tty_lock_pair(tty, o_tty);
/* This will need doing differently if we need to lock */