From: Eric Dumazet Date: Tue, 15 Nov 2016 18:15:13 +0000 (-0800) Subject: net: busy-poll: return busypolling status to drivers X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=364b6055738b4c752c30ccaaf25c624e69d76195;p=GitHub%2Fmoto-9609%2Fandroid_kernel_motorola_exynos9610.git net: busy-poll: return busypolling status to drivers NAPI drivers use napi_complete_done() or napi_complete() when they drained RX ring and right before re-enabling device interrupts. In busy polling, we can avoid interrupts being delivered since we are polling RX ring in a controlled loop. Drivers can chose to use napi_complete_done() return value to reduce interrupts overhead while busy polling is active. This is optional, legacy drivers should work fine even if not updated. Signed-off-by: Eric Dumazet Cc: Willem de Bruijn Cc: Adam Belay Cc: Tariq Toukan Cc: Yuval Mintz Cc: Ariel Elior Signed-off-by: David S. Miller --- diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index e71de66e3792..bcddf951ccee 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -463,16 +463,17 @@ static inline bool napi_reschedule(struct napi_struct *napi) return false; } -void __napi_complete(struct napi_struct *n); -void napi_complete_done(struct napi_struct *n, int work_done); +bool __napi_complete(struct napi_struct *n); +bool napi_complete_done(struct napi_struct *n, int work_done); /** * napi_complete - NAPI processing complete * @n: NAPI context * * Mark NAPI processing as complete. * Consider using napi_complete_done() instead. + * Return false if device should avoid rearming interrupts. */ -static inline void napi_complete(struct napi_struct *n) +static inline bool napi_complete(struct napi_struct *n) { return napi_complete_done(n, 0); } diff --git a/net/core/dev.c b/net/core/dev.c index 369dcc8efc01..edba9efeb2e9 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -4898,7 +4898,7 @@ void __napi_schedule_irqoff(struct napi_struct *n) } EXPORT_SYMBOL(__napi_schedule_irqoff); -void __napi_complete(struct napi_struct *n) +bool __napi_complete(struct napi_struct *n) { BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state)); @@ -4906,15 +4906,16 @@ void __napi_complete(struct napi_struct *n) * napi_complete_done(). */ if (unlikely(test_bit(NAPI_STATE_IN_BUSY_POLL, &n->state))) - return; + return false; list_del_init(&n->poll_list); smp_mb__before_atomic(); clear_bit(NAPI_STATE_SCHED, &n->state); + return true; } EXPORT_SYMBOL(__napi_complete); -void napi_complete_done(struct napi_struct *n, int work_done) +bool napi_complete_done(struct napi_struct *n, int work_done) { unsigned long flags; @@ -4926,7 +4927,7 @@ void napi_complete_done(struct napi_struct *n, int work_done) */ if (unlikely(n->state & (NAPIF_STATE_NPSVC | NAPIF_STATE_IN_BUSY_POLL))) - return; + return false; if (n->gro_list) { unsigned long timeout = 0; @@ -4948,6 +4949,7 @@ void napi_complete_done(struct napi_struct *n, int work_done) __napi_complete(n); local_irq_restore(flags); } + return true; } EXPORT_SYMBOL(napi_complete_done);