}
static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
- int budget, int polling)
+ int budget)
{
struct be_adapter *adapter = rxo->adapter;
struct be_queue_info *rx_cq = &rxo->cq;
goto loop_continue;
}
- /* Don't do gro when we're busy_polling */
- if (do_gro(rxcp) && polling != BUSY_POLLING)
+ if (do_gro(rxcp))
be_rx_compl_process_gro(rxo, napi, rxcp);
else
be_rx_compl_process(rxo, napi, rxcp);
}
}
-#ifdef CONFIG_NET_RX_BUSY_POLL
-static inline bool be_lock_napi(struct be_eq_obj *eqo)
-{
- bool status = true;
-
- spin_lock(&eqo->lock); /* BH is already disabled */
- if (eqo->state & BE_EQ_LOCKED) {
- WARN_ON(eqo->state & BE_EQ_NAPI);
- eqo->state |= BE_EQ_NAPI_YIELD;
- status = false;
- } else {
- eqo->state = BE_EQ_NAPI;
- }
- spin_unlock(&eqo->lock);
- return status;
-}
-
-static inline void be_unlock_napi(struct be_eq_obj *eqo)
-{
- spin_lock(&eqo->lock); /* BH is already disabled */
-
- WARN_ON(eqo->state & (BE_EQ_POLL | BE_EQ_NAPI_YIELD));
- eqo->state = BE_EQ_IDLE;
-
- spin_unlock(&eqo->lock);
-}
-
-static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
-{
- bool status = true;
-
- spin_lock_bh(&eqo->lock);
- if (eqo->state & BE_EQ_LOCKED) {
- eqo->state |= BE_EQ_POLL_YIELD;
- status = false;
- } else {
- eqo->state |= BE_EQ_POLL;
- }
- spin_unlock_bh(&eqo->lock);
- return status;
-}
-
-static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
-{
- spin_lock_bh(&eqo->lock);
-
- WARN_ON(eqo->state & (BE_EQ_NAPI));
- eqo->state = BE_EQ_IDLE;
-
- spin_unlock_bh(&eqo->lock);
-}
-
-static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
-{
- spin_lock_init(&eqo->lock);
- eqo->state = BE_EQ_IDLE;
-}
-
-static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
-{
- local_bh_disable();
-
- /* It's enough to just acquire napi lock on the eqo to stop
- * be_busy_poll() from processing any queueus.
- */
- while (!be_lock_napi(eqo))
- mdelay(1);
-
- local_bh_enable();
-}
-
-#else /* CONFIG_NET_RX_BUSY_POLL */
-
-static inline bool be_lock_napi(struct be_eq_obj *eqo)
-{
- return true;
-}
-
-static inline void be_unlock_napi(struct be_eq_obj *eqo)
-{
-}
-
-static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
-{
- return false;
-}
-
-static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
-{
-}
-
-static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
-{
-}
-
-static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
-{
-}
-#endif /* CONFIG_NET_RX_BUSY_POLL */
-
int be_poll(struct napi_struct *napi, int budget)
{
struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
for_all_tx_queues_on_eq(adapter, eqo, txo, i)
be_process_tx(adapter, txo, i);
- if (be_lock_napi(eqo)) {
- /* This loop will iterate twice for EQ0 in which
- * completions of the last RXQ (default one) are also processed
- * For other EQs the loop iterates only once
- */
- for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
- work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
- max_work = max(work, max_work);
- }
- be_unlock_napi(eqo);
- } else {
- max_work = budget;
+ /* This loop will iterate twice for EQ0 in which
+ * completions of the last RXQ (default one) are also processed
+ * For other EQs the loop iterates only once
+ */
+ for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
+ work = be_process_rx(rxo, napi, budget);
+ max_work = max(work, max_work);
}
if (is_mcc_eqo(eqo))
return max_work;
}
-#ifdef CONFIG_NET_RX_BUSY_POLL
-static int be_busy_poll(struct napi_struct *napi)
-{
- struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
- struct be_adapter *adapter = eqo->adapter;
- struct be_rx_obj *rxo;
- int i, work = 0;
-
- if (!be_lock_busy_poll(eqo))
- return LL_FLUSH_BUSY;
-
- for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
- work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
- if (work)
- break;
- }
-
- be_unlock_busy_poll(eqo);
- return work;
-}
-#endif
-
void be_detect_error(struct be_adapter *adapter)
{
u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
for_all_evt_queues(adapter, eqo, i) {
napi_disable(&eqo->napi);
- be_disable_busy_poll(eqo);
}
adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
}
for_all_evt_queues(adapter, eqo, i) {
napi_enable(&eqo->napi);
- be_enable_busy_poll(eqo);
be_eq_notify(adapter, eqo->q.id, true, true, 0, 0);
}
adapter->flags |= BE_FLAGS_NAPI_ENABLED;
#endif
.ndo_bridge_setlink = be_ndo_bridge_setlink,
.ndo_bridge_getlink = be_ndo_bridge_getlink,
-#ifdef CONFIG_NET_RX_BUSY_POLL
- .ndo_busy_poll = be_busy_poll,
-#endif
.ndo_udp_tunnel_add = be_add_vxlan_port,
.ndo_udp_tunnel_del = be_del_vxlan_port,
.ndo_features_check = be_features_check,