void tty_buffer_free_all(struct tty_port *port)
{
struct tty_bufhead *buf = &port->buf;
- struct tty_buffer *p;
+ struct tty_buffer *p, *next;
+ struct llist_node *llist;
while ((p = buf->head) != NULL) {
buf->head = p->next;
kfree(p);
}
- while ((p = buf->free) != NULL) {
- buf->free = p->next;
+ llist = llist_del_all(&buf->free);
+ llist_for_each_entry_safe(p, next, llist, free)
kfree(p);
- }
+
buf->tail = NULL;
buf->memory_used = 0;
}
* allocation behaviour.
* Return NULL if out of memory or the allocation would exceed the
* per device queue
- *
- * Locking: Caller must hold tty->buf.lock
*/
static struct tty_buffer *tty_buffer_alloc(struct tty_port *port, size_t size)
{
- struct tty_buffer **tbh = &port->buf.free;
+ struct llist_node *free;
struct tty_buffer *p;
/* Round the buffer size out */
size = __ALIGN_MASK(size, TTYB_ALIGN_MASK);
if (size <= MIN_TTYB_SIZE) {
- if (*tbh) {
- p = *tbh;
- *tbh = p->next;
+ free = llist_del_first(&port->buf.free);
+ if (free) {
+ p = llist_entry(free, struct tty_buffer, free);
goto found;
}
}
*
* Free a tty buffer, or add it to the free list according to our
* internal strategy
- *
- * Locking: Caller must hold tty->buf.lock
*/
static void tty_buffer_free(struct tty_port *port, struct tty_buffer *b)
if (b->size > MIN_TTYB_SIZE)
kfree(b);
- else {
- b->next = buf->free;
- buf->free = b;
- }
+ else
+ llist_add(&b->free, &buf->free);
}
/**
spin_lock_init(&buf->lock);
buf->head = NULL;
buf->tail = NULL;
- buf->free = NULL;
+ init_llist_head(&buf->free);
buf->memory_used = 0;
INIT_WORK(&buf->work, flush_to_ldisc);
}
&(pos)->member != NULL; \
(pos) = llist_entry((pos)->member.next, typeof(*(pos)), member))
+/**
+ * llist_for_each_entry_safe - iterate over some deleted entries of lock-less list of given type
+ * safe against removal of list entry
+ * @pos: the type * to use as a loop cursor.
+ * @n: another type * to use as temporary storage
+ * @node: the first entry of deleted list entries.
+ * @member: the name of the llist_node with the struct.
+ *
+ * In general, some entries of the lock-less list can be traversed
+ * safely only after being removed from list, so start with an entry
+ * instead of list head.
+ *
+ * If being used on entries deleted from lock-less list directly, the
+ * traverse order is from the newest to the oldest added entry. If
+ * you want to traverse from the oldest to the newest, you must
+ * reverse the order by yourself before traversing.
+ */
+#define llist_for_each_entry_safe(pos, n, node, member) \
+ for (pos = llist_entry((node), typeof(*pos), member); \
+ &pos->member != NULL && \
+ (n = llist_entry(pos->member.next, typeof(*n), member), true); \
+ pos = n)
+
/**
* llist_empty - tests whether a lock-less list is empty
* @head: the list to test
#include <linux/tty_flags.h>
#include <uapi/linux/tty.h>
#include <linux/rwsem.h>
+#include <linux/llist.h>
#define __DISABLED_CHAR '\0'
struct tty_buffer {
- struct tty_buffer *next;
+ union {
+ struct tty_buffer *next;
+ struct llist_node free;
+ };
int used;
int size;
int commit;
spinlock_t lock;
struct tty_buffer *head; /* Queue head */
struct tty_buffer *tail; /* Active buffer */
- struct tty_buffer *free; /* Free queue head */
+ struct llist_head free; /* Free queue head */
int memory_used; /* Buffer space used excluding
free queue */
};