weight = n->weight;
- work = n->poll(n, weight);
+ /* This NAPI_STATE_SCHED test is for avoiding a race
+ * with netpoll's poll_napi(). Only the entity which
+ * obtains the lock and sees NAPI_STATE_SCHED set will
+ * actually make the ->poll() call. Therefore we avoid
+ * accidently calling ->poll() when NAPI is not scheduled.
+ */
+ work = 0;
+ if (test_bit(NAPI_STATE_SCHED, &n->state))
+ work = n->poll(n, weight);
WARN_ON_ONCE(work > weight);
* network adapter, forcing superfluous retries and possibly timeouts.
* Thus, we set our budget to greater than 1.
*/
+static int poll_one_napi(struct netpoll_info *npinfo,
+ struct napi_struct *napi, int budget)
+{
+ int work;
+
+ /* net_rx_action's ->poll() invocations and our's are
+ * synchronized by this test which is only made while
+ * holding the napi->poll_lock.
+ */
+ if (!test_bit(NAPI_STATE_SCHED, &napi->state))
+ return budget;
+
+ npinfo->rx_flags |= NETPOLL_RX_DROP;
+ atomic_inc(&trapped);
+
+ work = napi->poll(napi, budget);
+
+ atomic_dec(&trapped);
+ npinfo->rx_flags &= ~NETPOLL_RX_DROP;
+
+ return budget - work;
+}
+
static void poll_napi(struct netpoll *np)
{
struct netpoll_info *npinfo = np->dev->npinfo;
int budget = 16;
list_for_each_entry(napi, &np->dev->napi_list, dev_list) {
- if (test_bit(NAPI_STATE_SCHED, &napi->state) &&
- napi->poll_owner != smp_processor_id() &&
+ if (napi->poll_owner != smp_processor_id() &&
spin_trylock(&napi->poll_lock)) {
- npinfo->rx_flags |= NETPOLL_RX_DROP;
- atomic_inc(&trapped);
-
- napi->poll(napi, budget);
-
- atomic_dec(&trapped);
- npinfo->rx_flags &= ~NETPOLL_RX_DROP;
+ budget = poll_one_napi(npinfo, napi, budget);
spin_unlock(&napi->poll_lock);
+
+ if (!budget)
+ break;
}
}
}