int tty_buffer_space_avail(struct tty_port *port)
{
- int space = port->buf.mem_limit - atomic_read(&port->buf.memory_used);
+ int space = port->buf.mem_limit - atomic_read(&port->buf.mem_used);
return max(space, 0);
}
buf->head = &buf->sentinel;
buf->tail = &buf->sentinel;
- atomic_set(&buf->memory_used, 0);
+ atomic_set(&buf->mem_used, 0);
}
/**
/* Should possibly check if this fails for the largest buffer we
have queued and recycle that ? */
- if (atomic_read(&port->buf.memory_used) > port->buf.mem_limit)
+ if (atomic_read(&port->buf.mem_used) > port->buf.mem_limit)
return NULL;
p = kmalloc(sizeof(struct tty_buffer) + 2 * size, GFP_ATOMIC);
if (p == NULL)
found:
tty_buffer_reset(p, size);
- atomic_add(size, &port->buf.memory_used);
+ atomic_add(size, &port->buf.mem_used);
return p;
}
struct tty_bufhead *buf = &port->buf;
/* Dumb strategy for now - should keep some stats */
- WARN_ON(atomic_sub_return(b->size, &buf->memory_used) < 0);
+ WARN_ON(atomic_sub_return(b->size, &buf->mem_used) < 0);
if (b->size > MIN_TTYB_SIZE)
kfree(b);
buf->head = &buf->sentinel;
buf->tail = &buf->sentinel;
init_llist_head(&buf->free);
- atomic_set(&buf->memory_used, 0);
+ atomic_set(&buf->mem_used, 0);
atomic_set(&buf->priority, 0);
INIT_WORK(&buf->work, flush_to_ldisc);
buf->mem_limit = TTYB_DEFAULT_MEM_LIMIT;