Revert "knfsd: avoid overloading the CPU scheduler with enormous load averages"
authorJ. Bruce Fields <bfields@citi.umich.edu>
Thu, 6 Aug 2009 19:41:34 +0000 (15:41 -0400)
committerJ. Bruce Fields <bfields@citi.umich.edu>
Mon, 23 Nov 2009 17:34:05 +0000 (12:34 -0500)
This reverts commit 59a252ff8c0f2fa32c896f69d56ae33e641ce7ad.

This helps in an entirely cached workload but not necessarily in
workloads that require waiting on disk.

Conflicts:

include/linux/sunrpc/svc.h
net/sunrpc/svc_xprt.c

Reported-by: Simon Kirby <sim@hostway.ca>
Tested-by: Jesper Krogh <jesper@krogh.cc>
Signed-off-by: J. Bruce Fields <bfields@citi.umich.edu>
include/linux/sunrpc/svc.h
net/sunrpc/svc_xprt.c

index 52e8cb0a756993e0f095372b9e200d663d9da1ab..d1567d627557d84ded024ce5dd9b3a488586caf8 100644 (file)
@@ -29,7 +29,6 @@ struct svc_pool_stats {
        unsigned long   packets;
        unsigned long   sockets_queued;
        unsigned long   threads_woken;
-       unsigned long   overloads_avoided;
        unsigned long   threads_timedout;
 };
 
@@ -50,7 +49,6 @@ struct svc_pool {
        struct list_head        sp_sockets;     /* pending sockets */
        unsigned int            sp_nrthreads;   /* # of threads in pool */
        struct list_head        sp_all_threads; /* all server threads */
-       int                     sp_nwaking;     /* number of threads woken but not yet active */
        struct svc_pool_stats   sp_stats;       /* statistics on pool operation */
 } ____cacheline_aligned_in_smp;
 
@@ -284,7 +282,6 @@ struct svc_rqst {
                                                 * cache pages */
        wait_queue_head_t       rq_wait;        /* synchronization */
        struct task_struct      *rq_task;       /* service thread */
-       int                     rq_waking;      /* 1 if thread is being woken */
 };
 
 /*
index df124f78ee489762f4a9028af3b95f38a90f34d3..2c58b75a236f1db692ca8c368546381f1ea510ac 100644 (file)
@@ -16,8 +16,6 @@
 
 #define RPCDBG_FACILITY        RPCDBG_SVCXPRT
 
-#define SVC_MAX_WAKING 5
-
 static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt);
 static int svc_deferred_recv(struct svc_rqst *rqstp);
 static struct cache_deferred_req *svc_defer(struct cache_req *req);
@@ -306,7 +304,6 @@ void svc_xprt_enqueue(struct svc_xprt *xprt)
        struct svc_pool *pool;
        struct svc_rqst *rqstp;
        int cpu;
-       int thread_avail;
 
        if (!(xprt->xpt_flags &
              ((1<<XPT_CONN)|(1<<XPT_DATA)|(1<<XPT_CLOSE)|(1<<XPT_DEFERRED))))
@@ -318,6 +315,12 @@ void svc_xprt_enqueue(struct svc_xprt *xprt)
 
        spin_lock_bh(&pool->sp_lock);
 
+       if (!list_empty(&pool->sp_threads) &&
+           !list_empty(&pool->sp_sockets))
+               printk(KERN_ERR
+                      "svc_xprt_enqueue: "
+                      "threads and transports both waiting??\n");
+
        if (test_bit(XPT_DEAD, &xprt->xpt_flags)) {
                /* Don't enqueue dead transports */
                dprintk("svc: transport %p is dead, not enqueued\n", xprt);
@@ -358,15 +361,7 @@ void svc_xprt_enqueue(struct svc_xprt *xprt)
        }
 
  process:
-       /* Work out whether threads are available */
-       thread_avail = !list_empty(&pool->sp_threads);  /* threads are asleep */
-       if (pool->sp_nwaking >= SVC_MAX_WAKING) {
-               /* too many threads are runnable and trying to wake up */
-               thread_avail = 0;
-               pool->sp_stats.overloads_avoided++;
-       }
-
-       if (thread_avail) {
+       if (!list_empty(&pool->sp_threads)) {
                rqstp = list_entry(pool->sp_threads.next,
                                   struct svc_rqst,
                                   rq_list);
@@ -381,8 +376,6 @@ void svc_xprt_enqueue(struct svc_xprt *xprt)
                svc_xprt_get(xprt);
                rqstp->rq_reserved = serv->sv_max_mesg;
                atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved);
-               rqstp->rq_waking = 1;
-               pool->sp_nwaking++;
                pool->sp_stats.threads_woken++;
                BUG_ON(xprt->xpt_pool != pool);
                wake_up(&rqstp->rq_wait);
@@ -651,11 +644,6 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
                return -EINTR;
 
        spin_lock_bh(&pool->sp_lock);
-       if (rqstp->rq_waking) {
-               rqstp->rq_waking = 0;
-               pool->sp_nwaking--;
-               BUG_ON(pool->sp_nwaking < 0);
-       }
        xprt = svc_xprt_dequeue(pool);
        if (xprt) {
                rqstp->rq_xprt = xprt;
@@ -1204,16 +1192,15 @@ static int svc_pool_stats_show(struct seq_file *m, void *p)
        struct svc_pool *pool = p;
 
        if (p == SEQ_START_TOKEN) {
-               seq_puts(m, "# pool packets-arrived sockets-enqueued threads-woken overloads-avoided threads-timedout\n");
+               seq_puts(m, "# pool packets-arrived sockets-enqueued threads-woken threads-timedout\n");
                return 0;
        }
 
-       seq_printf(m, "%u %lu %lu %lu %lu %lu\n",
+       seq_printf(m, "%u %lu %lu %lu %lu\n",
                pool->sp_id,
                pool->sp_stats.packets,
                pool->sp_stats.sockets_queued,
                pool->sp_stats.threads_woken,
-               pool->sp_stats.overloads_avoided,
                pool->sp_stats.threads_timedout);
 
        return 0;