* @max_qlen_log - log_2 of maximal queued SYNs/REQUESTs
*/
struct listen_sock {
- int qlen_inc; /* protected by listener lock */
- int young_inc;/* protected by listener lock */
-
- /* following fields can be updated by timer */
- atomic_t qlen_dec; /* qlen = qlen_inc - qlen_dec */
- atomic_t young_dec;
-
- u32 max_qlen_log ____cacheline_aligned_in_smp;
+ u32 max_qlen_log;
u32 synflood_warned;
u32 hash_rnd;
u32 nr_table_entries;
spinlock_t rskq_lock;
u8 rskq_defer_accept;
+ atomic_t qlen;
+ atomic_t young;
+
struct request_sock *rskq_accept_head;
struct request_sock *rskq_accept_tail;
struct listen_sock *listen_opt;
static inline void reqsk_queue_removed(struct request_sock_queue *queue,
const struct request_sock *req)
{
- struct listen_sock *lopt = queue->listen_opt;
-
if (req->num_timeout == 0)
- atomic_inc(&lopt->young_dec);
- atomic_inc(&lopt->qlen_dec);
+ atomic_dec(&queue->young);
+ atomic_dec(&queue->qlen);
}
static inline void reqsk_queue_added(struct request_sock_queue *queue)
{
- struct listen_sock *lopt = queue->listen_opt;
-
- lopt->young_inc++;
- lopt->qlen_inc++;
-}
-
-static inline int listen_sock_qlen(const struct listen_sock *lopt)
-{
- return lopt->qlen_inc - atomic_read(&lopt->qlen_dec);
-}
-
-static inline int listen_sock_young(const struct listen_sock *lopt)
-{
- return lopt->young_inc - atomic_read(&lopt->young_dec);
+ atomic_inc(&queue->young);
+ atomic_inc(&queue->qlen);
}
static inline int reqsk_queue_len(const struct request_sock_queue *queue)
{
- const struct listen_sock *lopt = queue->listen_opt;
-
- return lopt ? listen_sock_qlen(lopt) : 0;
+ return atomic_read(&queue->qlen);
}
static inline int reqsk_queue_len_young(const struct request_sock_queue *queue)
{
- return listen_sock_young(queue->listen_opt);
+ return atomic_read(&queue->young);
}
static inline int reqsk_queue_is_full(const struct request_sock_queue *queue)
/* make all the listen_opt local to us */
struct listen_sock *lopt = reqsk_queue_yank_listen_sk(queue);
- if (listen_sock_qlen(lopt) != 0) {
+ if (reqsk_queue_len(queue) != 0) {
unsigned int i;
for (i = 0; i < lopt->nr_table_entries; i++) {
* or risk a dead lock.
*/
spin_unlock_bh(&queue->syn_wait_lock);
- atomic_inc(&lopt->qlen_dec);
+ atomic_dec(&queue->qlen);
if (del_timer_sync(&req->rsk_timer))
reqsk_put(req);
reqsk_put(req);
}
}
- if (WARN_ON(listen_sock_qlen(lopt) != 0))
- pr_err("qlen %u\n", listen_sock_qlen(lopt));
+ if (WARN_ON(reqsk_queue_len(queue) != 0))
+ pr_err("qlen %u\n", reqsk_queue_len(queue));
kvfree(lopt);
}
* embrions; and abort old ones without pity, if old
* ones are about to clog our table.
*/
- qlen = listen_sock_qlen(lopt);
+ qlen = reqsk_queue_len(queue);
if (qlen >> (lopt->max_qlen_log - 1)) {
- int young = listen_sock_young(lopt) << 1;
+ int young = reqsk_queue_len_young(queue) << 1;
while (thresh > 2) {
if (qlen < young)
unsigned long timeo;
if (req->num_timeout++ == 0)
- atomic_inc(&lopt->young_dec);
+ atomic_dec(&queue->young);
timeo = min(TCP_TIMEOUT_INIT << req->num_timeout, TCP_RTO_MAX);
mod_timer_pinned(&req->rsk_timer, jiffies + timeo);
return;