2 * Linux network driver for Brocade Converged Network Adapter.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
18 #include <linux/bitops.h>
19 #include <linux/netdevice.h>
20 #include <linux/skbuff.h>
21 #include <linux/etherdevice.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_vlan.h>
25 #include <linux/if_ether.h>
27 #include <linux/prefetch.h>
33 static DEFINE_MUTEX(bnad_fwimg_mutex
);
38 static uint bnad_msix_disable
;
39 module_param(bnad_msix_disable
, uint
, 0444);
40 MODULE_PARM_DESC(bnad_msix_disable
, "Disable MSIX mode");
42 static uint bnad_ioc_auto_recover
= 1;
43 module_param(bnad_ioc_auto_recover
, uint
, 0444);
44 MODULE_PARM_DESC(bnad_ioc_auto_recover
, "Enable / Disable auto recovery");
49 u32 bnad_rxqs_per_cq
= 2;
51 static const u8 bnad_bcast_addr
[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
56 #define BNAD_TX_UNMAPQ_DEPTH (bnad->txq_depth * 2)
58 #define BNAD_RX_UNMAPQ_DEPTH (bnad->rxq_depth)
60 #define BNAD_GET_MBOX_IRQ(_bnad) \
61 (((_bnad)->cfg_flags & BNAD_CF_MSIX) ? \
62 ((_bnad)->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector) : \
63 ((_bnad)->pcidev->irq))
65 #define BNAD_FILL_UNMAPQ_MEM_REQ(_res_info, _num, _depth) \
67 (_res_info)->res_type = BNA_RES_T_MEM; \
68 (_res_info)->res_u.mem_info.mem_type = BNA_MEM_T_KVA; \
69 (_res_info)->res_u.mem_info.num = (_num); \
70 (_res_info)->res_u.mem_info.len = \
71 sizeof(struct bnad_unmap_q) + \
72 (sizeof(struct bnad_skb_unmap) * ((_depth) - 1)); \
75 #define BNAD_TXRX_SYNC_MDELAY 250 /* 250 msecs */
78 * Reinitialize completions in CQ, once Rx is taken down
81 bnad_cq_cmpl_init(struct bnad
*bnad
, struct bna_ccb
*ccb
)
83 struct bna_cq_entry
*cmpl
, *next_cmpl
;
84 unsigned int wi_range
, wis
= 0, ccb_prod
= 0;
87 BNA_CQ_QPGE_PTR_GET(ccb_prod
, ccb
->sw_qpt
, cmpl
,
90 for (i
= 0; i
< ccb
->q_depth
; i
++) {
92 if (likely(--wi_range
))
95 BNA_QE_INDX_ADD(ccb_prod
, wis
, ccb
->q_depth
);
97 BNA_CQ_QPGE_PTR_GET(ccb_prod
, ccb
->sw_qpt
,
106 * Frees all pending Tx Bufs
107 * At this point no activity is expected on the Q,
108 * so DMA unmap & freeing is fine.
111 bnad_free_all_txbufs(struct bnad
*bnad
,
115 struct bnad_unmap_q
*unmap_q
= tcb
->unmap_q
;
116 struct bnad_skb_unmap
*unmap_array
;
117 struct sk_buff
*skb
= NULL
;
120 unmap_array
= unmap_q
->unmap_array
;
123 while (unmap_cons
< unmap_q
->q_depth
) {
124 skb
= unmap_array
[unmap_cons
].skb
;
129 unmap_array
[unmap_cons
].skb
= NULL
;
131 dma_unmap_single(&bnad
->pcidev
->dev
,
132 dma_unmap_addr(&unmap_array
[unmap_cons
],
133 dma_addr
), skb_headlen(skb
),
136 dma_unmap_addr_set(&unmap_array
[unmap_cons
], dma_addr
, 0);
137 if (++unmap_cons
>= unmap_q
->q_depth
)
140 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
141 dma_unmap_page(&bnad
->pcidev
->dev
,
142 dma_unmap_addr(&unmap_array
[unmap_cons
],
144 skb_shinfo(skb
)->frags
[i
].size
,
146 dma_unmap_addr_set(&unmap_array
[unmap_cons
], dma_addr
,
148 if (++unmap_cons
>= unmap_q
->q_depth
)
151 dev_kfree_skb_any(skb
);
155 /* Data Path Handlers */
158 * bnad_free_txbufs : Frees the Tx bufs on Tx completion
159 * Can be called in a) Interrupt context
164 bnad_free_txbufs(struct bnad
*bnad
,
167 u32 sent_packets
= 0, sent_bytes
= 0;
168 u16 wis
, unmap_cons
, updated_hw_cons
;
169 struct bnad_unmap_q
*unmap_q
= tcb
->unmap_q
;
170 struct bnad_skb_unmap
*unmap_array
;
175 * Just return if TX is stopped. This check is useful
176 * when bnad_free_txbufs() runs out of a tasklet scheduled
177 * before bnad_cb_tx_cleanup() cleared BNAD_TXQ_TX_STARTED bit
178 * but this routine runs actually after the cleanup has been
181 if (!test_bit(BNAD_TXQ_TX_STARTED
, &tcb
->flags
))
184 updated_hw_cons
= *(tcb
->hw_consumer_index
);
186 wis
= BNA_Q_INDEX_CHANGE(tcb
->consumer_index
,
187 updated_hw_cons
, tcb
->q_depth
);
189 BUG_ON(!(wis
<= BNA_QE_IN_USE_CNT(tcb
, tcb
->q_depth
)));
191 unmap_array
= unmap_q
->unmap_array
;
192 unmap_cons
= unmap_q
->consumer_index
;
194 prefetch(&unmap_array
[unmap_cons
+ 1]);
196 skb
= unmap_array
[unmap_cons
].skb
;
198 unmap_array
[unmap_cons
].skb
= NULL
;
201 sent_bytes
+= skb
->len
;
202 wis
-= BNA_TXQ_WI_NEEDED(1 + skb_shinfo(skb
)->nr_frags
);
204 dma_unmap_single(&bnad
->pcidev
->dev
,
205 dma_unmap_addr(&unmap_array
[unmap_cons
],
206 dma_addr
), skb_headlen(skb
),
208 dma_unmap_addr_set(&unmap_array
[unmap_cons
], dma_addr
, 0);
209 BNA_QE_INDX_ADD(unmap_cons
, 1, unmap_q
->q_depth
);
211 prefetch(&unmap_array
[unmap_cons
+ 1]);
212 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
213 prefetch(&unmap_array
[unmap_cons
+ 1]);
215 dma_unmap_page(&bnad
->pcidev
->dev
,
216 dma_unmap_addr(&unmap_array
[unmap_cons
],
218 skb_shinfo(skb
)->frags
[i
].size
,
220 dma_unmap_addr_set(&unmap_array
[unmap_cons
], dma_addr
,
222 BNA_QE_INDX_ADD(unmap_cons
, 1, unmap_q
->q_depth
);
224 dev_kfree_skb_any(skb
);
227 /* Update consumer pointers. */
228 tcb
->consumer_index
= updated_hw_cons
;
229 unmap_q
->consumer_index
= unmap_cons
;
231 tcb
->txq
->tx_packets
+= sent_packets
;
232 tcb
->txq
->tx_bytes
+= sent_bytes
;
237 /* Tx Free Tasklet function */
238 /* Frees for all the tcb's in all the Tx's */
240 * Scheduled from sending context, so that
241 * the fat Tx lock is not held for too long
242 * in the sending context.
245 bnad_tx_free_tasklet(unsigned long bnad_ptr
)
247 struct bnad
*bnad
= (struct bnad
*)bnad_ptr
;
252 for (i
= 0; i
< bnad
->num_tx
; i
++) {
253 for (j
= 0; j
< bnad
->num_txq_per_tx
; j
++) {
254 tcb
= bnad
->tx_info
[i
].tcb
[j
];
257 if (((u16
) (*tcb
->hw_consumer_index
) !=
258 tcb
->consumer_index
) &&
259 (!test_and_set_bit(BNAD_TXQ_FREE_SENT
,
261 acked
= bnad_free_txbufs(bnad
, tcb
);
262 if (likely(test_bit(BNAD_TXQ_TX_STARTED
,
264 bna_ib_ack(tcb
->i_dbell
, acked
);
265 smp_mb__before_clear_bit();
266 clear_bit(BNAD_TXQ_FREE_SENT
, &tcb
->flags
);
268 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED
,
271 if (netif_queue_stopped(bnad
->netdev
)) {
272 if (acked
&& netif_carrier_ok(bnad
->netdev
) &&
273 BNA_QE_FREE_CNT(tcb
, tcb
->q_depth
) >=
274 BNAD_NETIF_WAKE_THRESHOLD
) {
275 netif_wake_queue(bnad
->netdev
);
277 /* Counters for individual TxQs? */
278 BNAD_UPDATE_CTR(bnad
,
287 bnad_tx(struct bnad
*bnad
, struct bna_tcb
*tcb
)
289 struct net_device
*netdev
= bnad
->netdev
;
292 if (test_and_set_bit(BNAD_TXQ_FREE_SENT
, &tcb
->flags
))
295 sent
= bnad_free_txbufs(bnad
, tcb
);
297 if (netif_queue_stopped(netdev
) &&
298 netif_carrier_ok(netdev
) &&
299 BNA_QE_FREE_CNT(tcb
, tcb
->q_depth
) >=
300 BNAD_NETIF_WAKE_THRESHOLD
) {
301 if (test_bit(BNAD_TXQ_TX_STARTED
, &tcb
->flags
)) {
302 netif_wake_queue(netdev
);
303 BNAD_UPDATE_CTR(bnad
, netif_queue_wakeup
);
308 if (likely(test_bit(BNAD_TXQ_TX_STARTED
, &tcb
->flags
)))
309 bna_ib_ack(tcb
->i_dbell
, sent
);
311 smp_mb__before_clear_bit();
312 clear_bit(BNAD_TXQ_FREE_SENT
, &tcb
->flags
);
317 /* MSIX Tx Completion Handler */
319 bnad_msix_tx(int irq
, void *data
)
321 struct bna_tcb
*tcb
= (struct bna_tcb
*)data
;
322 struct bnad
*bnad
= tcb
->bnad
;
330 bnad_reset_rcb(struct bnad
*bnad
, struct bna_rcb
*rcb
)
332 struct bnad_unmap_q
*unmap_q
= rcb
->unmap_q
;
334 rcb
->producer_index
= 0;
335 rcb
->consumer_index
= 0;
337 unmap_q
->producer_index
= 0;
338 unmap_q
->consumer_index
= 0;
342 bnad_free_all_rxbufs(struct bnad
*bnad
, struct bna_rcb
*rcb
)
344 struct bnad_unmap_q
*unmap_q
;
345 struct bnad_skb_unmap
*unmap_array
;
349 unmap_q
= rcb
->unmap_q
;
350 unmap_array
= unmap_q
->unmap_array
;
351 for (unmap_cons
= 0; unmap_cons
< unmap_q
->q_depth
; unmap_cons
++) {
352 skb
= unmap_array
[unmap_cons
].skb
;
355 unmap_array
[unmap_cons
].skb
= NULL
;
356 dma_unmap_single(&bnad
->pcidev
->dev
,
357 dma_unmap_addr(&unmap_array
[unmap_cons
],
359 rcb
->rxq
->buffer_size
,
363 bnad_reset_rcb(bnad
, rcb
);
367 bnad_alloc_n_post_rxbufs(struct bnad
*bnad
, struct bna_rcb
*rcb
)
369 u16 to_alloc
, alloced
, unmap_prod
, wi_range
;
370 struct bnad_unmap_q
*unmap_q
= rcb
->unmap_q
;
371 struct bnad_skb_unmap
*unmap_array
;
372 struct bna_rxq_entry
*rxent
;
378 BNA_QE_FREE_CNT(unmap_q
, unmap_q
->q_depth
);
380 unmap_array
= unmap_q
->unmap_array
;
381 unmap_prod
= unmap_q
->producer_index
;
383 BNA_RXQ_QPGE_PTR_GET(unmap_prod
, rcb
->sw_qpt
, rxent
, wi_range
);
387 BNA_RXQ_QPGE_PTR_GET(unmap_prod
, rcb
->sw_qpt
, rxent
,
390 skb
= netdev_alloc_skb_ip_align(bnad
->netdev
,
391 rcb
->rxq
->buffer_size
);
392 if (unlikely(!skb
)) {
393 BNAD_UPDATE_CTR(bnad
, rxbuf_alloc_failed
);
396 unmap_array
[unmap_prod
].skb
= skb
;
397 dma_addr
= dma_map_single(&bnad
->pcidev
->dev
, skb
->data
,
398 rcb
->rxq
->buffer_size
,
400 dma_unmap_addr_set(&unmap_array
[unmap_prod
], dma_addr
,
402 BNA_SET_DMA_ADDR(dma_addr
, &rxent
->host_addr
);
403 BNA_QE_INDX_ADD(unmap_prod
, 1, unmap_q
->q_depth
);
411 if (likely(alloced
)) {
412 unmap_q
->producer_index
= unmap_prod
;
413 rcb
->producer_index
= unmap_prod
;
415 if (likely(test_bit(BNAD_RXQ_STARTED
, &rcb
->flags
)))
416 bna_rxq_prod_indx_doorbell(rcb
);
421 bnad_refill_rxq(struct bnad
*bnad
, struct bna_rcb
*rcb
)
423 struct bnad_unmap_q
*unmap_q
= rcb
->unmap_q
;
425 if (!test_and_set_bit(BNAD_RXQ_REFILL
, &rcb
->flags
)) {
426 if (BNA_QE_FREE_CNT(unmap_q
, unmap_q
->q_depth
)
427 >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT
)
428 bnad_alloc_n_post_rxbufs(bnad
, rcb
);
429 smp_mb__before_clear_bit();
430 clear_bit(BNAD_RXQ_REFILL
, &rcb
->flags
);
435 bnad_poll_cq(struct bnad
*bnad
, struct bna_ccb
*ccb
, int budget
)
437 struct bna_cq_entry
*cmpl
, *next_cmpl
;
438 struct bna_rcb
*rcb
= NULL
;
439 unsigned int wi_range
, packets
= 0, wis
= 0;
440 struct bnad_unmap_q
*unmap_q
;
441 struct bnad_skb_unmap
*unmap_array
;
443 u32 flags
, unmap_cons
;
444 struct bna_pkt_rate
*pkt_rt
= &ccb
->pkt_rate
;
445 struct bnad_rx_ctrl
*rx_ctrl
= (struct bnad_rx_ctrl
*)(ccb
->ctrl
);
447 set_bit(BNAD_FP_IN_RX_PATH
, &rx_ctrl
->flags
);
449 if (!test_bit(BNAD_RXQ_STARTED
, &ccb
->rcb
[0]->flags
)) {
450 clear_bit(BNAD_FP_IN_RX_PATH
, &rx_ctrl
->flags
);
454 prefetch(bnad
->netdev
);
455 BNA_CQ_QPGE_PTR_GET(ccb
->producer_index
, ccb
->sw_qpt
, cmpl
,
457 BUG_ON(!(wi_range
<= ccb
->q_depth
));
458 while (cmpl
->valid
&& packets
< budget
) {
460 BNA_UPDATE_PKT_CNT(pkt_rt
, ntohs(cmpl
->length
));
462 if (bna_is_small_rxq(cmpl
->rxq_id
))
467 unmap_q
= rcb
->unmap_q
;
468 unmap_array
= unmap_q
->unmap_array
;
469 unmap_cons
= unmap_q
->consumer_index
;
471 skb
= unmap_array
[unmap_cons
].skb
;
473 unmap_array
[unmap_cons
].skb
= NULL
;
474 dma_unmap_single(&bnad
->pcidev
->dev
,
475 dma_unmap_addr(&unmap_array
[unmap_cons
],
477 rcb
->rxq
->buffer_size
,
479 BNA_QE_INDX_ADD(unmap_q
->consumer_index
, 1, unmap_q
->q_depth
);
481 /* Should be more efficient ? Performance ? */
482 BNA_QE_INDX_ADD(rcb
->consumer_index
, 1, rcb
->q_depth
);
485 if (likely(--wi_range
))
486 next_cmpl
= cmpl
+ 1;
488 BNA_QE_INDX_ADD(ccb
->producer_index
, wis
, ccb
->q_depth
);
490 BNA_CQ_QPGE_PTR_GET(ccb
->producer_index
, ccb
->sw_qpt
,
491 next_cmpl
, wi_range
);
492 BUG_ON(!(wi_range
<= ccb
->q_depth
));
496 flags
= ntohl(cmpl
->flags
);
499 (BNA_CQ_EF_MAC_ERROR
| BNA_CQ_EF_FCS_ERROR
|
500 BNA_CQ_EF_TOO_LONG
))) {
501 dev_kfree_skb_any(skb
);
502 rcb
->rxq
->rx_packets_with_error
++;
506 skb_put(skb
, ntohs(cmpl
->length
));
508 ((bnad
->netdev
->features
& NETIF_F_RXCSUM
) &&
509 (((flags
& BNA_CQ_EF_IPV4
) &&
510 (flags
& BNA_CQ_EF_L3_CKSUM_OK
)) ||
511 (flags
& BNA_CQ_EF_IPV6
)) &&
512 (flags
& (BNA_CQ_EF_TCP
| BNA_CQ_EF_UDP
)) &&
513 (flags
& BNA_CQ_EF_L4_CKSUM_OK
)))
514 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
516 skb_checksum_none_assert(skb
);
518 rcb
->rxq
->rx_packets
++;
519 rcb
->rxq
->rx_bytes
+= skb
->len
;
520 skb
->protocol
= eth_type_trans(skb
, bnad
->netdev
);
522 if (flags
& BNA_CQ_EF_VLAN
)
523 __vlan_hwaccel_put_tag(skb
, ntohs(cmpl
->vlan_tag
));
525 if (skb
->ip_summed
== CHECKSUM_UNNECESSARY
)
526 napi_gro_receive(&rx_ctrl
->napi
, skb
);
528 netif_receive_skb(skb
);
536 BNA_QE_INDX_ADD(ccb
->producer_index
, wis
, ccb
->q_depth
);
539 if (likely(test_bit(BNAD_RXQ_STARTED
, &ccb
->rcb
[0]->flags
)))
540 bna_ib_ack(ccb
->i_dbell
, packets
);
541 bnad_refill_rxq(bnad
, ccb
->rcb
[0]);
543 bnad_refill_rxq(bnad
, ccb
->rcb
[1]);
545 if (likely(test_bit(BNAD_RXQ_STARTED
, &ccb
->rcb
[0]->flags
)))
546 bna_ib_ack(ccb
->i_dbell
, 0);
549 clear_bit(BNAD_FP_IN_RX_PATH
, &rx_ctrl
->flags
);
555 bnad_disable_rx_irq(struct bnad
*bnad
, struct bna_ccb
*ccb
)
557 if (unlikely(!test_bit(BNAD_RXQ_STARTED
, &ccb
->rcb
[0]->flags
)))
560 bna_ib_coalescing_timer_set(ccb
->i_dbell
, 0);
561 bna_ib_ack(ccb
->i_dbell
, 0);
565 bnad_enable_rx_irq(struct bnad
*bnad
, struct bna_ccb
*ccb
)
569 /* Because of polling context */
570 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
571 bnad_enable_rx_irq_unsafe(ccb
);
572 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
576 bnad_netif_rx_schedule_poll(struct bnad
*bnad
, struct bna_ccb
*ccb
)
578 struct bnad_rx_ctrl
*rx_ctrl
= (struct bnad_rx_ctrl
*)(ccb
->ctrl
);
579 struct napi_struct
*napi
= &rx_ctrl
->napi
;
581 if (likely(napi_schedule_prep(napi
))) {
582 bnad_disable_rx_irq(bnad
, ccb
);
583 __napi_schedule(napi
);
585 BNAD_UPDATE_CTR(bnad
, netif_rx_schedule
);
588 /* MSIX Rx Path Handler */
590 bnad_msix_rx(int irq
, void *data
)
592 struct bna_ccb
*ccb
= (struct bna_ccb
*)data
;
593 struct bnad
*bnad
= ccb
->bnad
;
595 bnad_netif_rx_schedule_poll(bnad
, ccb
);
600 /* Interrupt handlers */
602 /* Mbox Interrupt Handlers */
604 bnad_msix_mbox_handler(int irq
, void *data
)
608 struct bnad
*bnad
= (struct bnad
*)data
;
610 if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED
, &bnad
->run_flags
)))
613 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
615 bna_intr_status_get(&bnad
->bna
, intr_status
);
617 if (BNA_IS_MBOX_ERR_INTR(&bnad
->bna
, intr_status
))
618 bna_mbox_handler(&bnad
->bna
, intr_status
);
620 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
626 bnad_isr(int irq
, void *data
)
631 struct bnad
*bnad
= (struct bnad
*)data
;
632 struct bnad_rx_info
*rx_info
;
633 struct bnad_rx_ctrl
*rx_ctrl
;
634 struct bna_tcb
*tcb
= NULL
;
636 if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED
, &bnad
->run_flags
)))
639 bna_intr_status_get(&bnad
->bna
, intr_status
);
641 if (unlikely(!intr_status
))
644 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
646 if (BNA_IS_MBOX_ERR_INTR(&bnad
->bna
, intr_status
))
647 bna_mbox_handler(&bnad
->bna
, intr_status
);
649 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
651 if (!BNA_IS_INTX_DATA_INTR(intr_status
))
654 /* Process data interrupts */
656 for (i
= 0; i
< bnad
->num_tx
; i
++) {
657 for (j
= 0; j
< bnad
->num_txq_per_tx
; j
++) {
658 tcb
= bnad
->tx_info
[i
].tcb
[j
];
659 if (tcb
&& test_bit(BNAD_TXQ_TX_STARTED
, &tcb
->flags
))
660 bnad_tx(bnad
, bnad
->tx_info
[i
].tcb
[j
]);
664 for (i
= 0; i
< bnad
->num_rx
; i
++) {
665 rx_info
= &bnad
->rx_info
[i
];
668 for (j
= 0; j
< bnad
->num_rxp_per_rx
; j
++) {
669 rx_ctrl
= &rx_info
->rx_ctrl
[j
];
671 bnad_netif_rx_schedule_poll(bnad
,
679 * Called in interrupt / callback context
680 * with bna_lock held, so cfg_flags access is OK
683 bnad_enable_mbox_irq(struct bnad
*bnad
)
685 clear_bit(BNAD_RF_MBOX_IRQ_DISABLED
, &bnad
->run_flags
);
687 BNAD_UPDATE_CTR(bnad
, mbox_intr_enabled
);
691 * Called with bnad->bna_lock held b'cos of
692 * bnad->cfg_flags access.
695 bnad_disable_mbox_irq(struct bnad
*bnad
)
697 set_bit(BNAD_RF_MBOX_IRQ_DISABLED
, &bnad
->run_flags
);
699 BNAD_UPDATE_CTR(bnad
, mbox_intr_disabled
);
703 bnad_set_netdev_perm_addr(struct bnad
*bnad
)
705 struct net_device
*netdev
= bnad
->netdev
;
707 memcpy(netdev
->perm_addr
, &bnad
->perm_addr
, netdev
->addr_len
);
708 if (is_zero_ether_addr(netdev
->dev_addr
))
709 memcpy(netdev
->dev_addr
, &bnad
->perm_addr
, netdev
->addr_len
);
712 /* Control Path Handlers */
716 bnad_cb_mbox_intr_enable(struct bnad
*bnad
)
718 bnad_enable_mbox_irq(bnad
);
722 bnad_cb_mbox_intr_disable(struct bnad
*bnad
)
724 bnad_disable_mbox_irq(bnad
);
728 bnad_cb_ioceth_ready(struct bnad
*bnad
)
730 bnad
->bnad_completions
.ioc_comp_status
= BNA_CB_SUCCESS
;
731 complete(&bnad
->bnad_completions
.ioc_comp
);
735 bnad_cb_ioceth_failed(struct bnad
*bnad
)
737 bnad
->bnad_completions
.ioc_comp_status
= BNA_CB_FAIL
;
738 complete(&bnad
->bnad_completions
.ioc_comp
);
742 bnad_cb_ioceth_disabled(struct bnad
*bnad
)
744 bnad
->bnad_completions
.ioc_comp_status
= BNA_CB_SUCCESS
;
745 complete(&bnad
->bnad_completions
.ioc_comp
);
749 bnad_cb_enet_disabled(void *arg
)
751 struct bnad
*bnad
= (struct bnad
*)arg
;
753 netif_carrier_off(bnad
->netdev
);
754 complete(&bnad
->bnad_completions
.enet_comp
);
758 bnad_cb_ethport_link_status(struct bnad
*bnad
,
759 enum bna_link_status link_status
)
763 link_up
= (link_status
== BNA_LINK_UP
) || (link_status
== BNA_CEE_UP
);
765 if (link_status
== BNA_CEE_UP
) {
766 if (!test_bit(BNAD_RF_CEE_RUNNING
, &bnad
->run_flags
))
767 BNAD_UPDATE_CTR(bnad
, cee_toggle
);
768 set_bit(BNAD_RF_CEE_RUNNING
, &bnad
->run_flags
);
770 if (test_bit(BNAD_RF_CEE_RUNNING
, &bnad
->run_flags
))
771 BNAD_UPDATE_CTR(bnad
, cee_toggle
);
772 clear_bit(BNAD_RF_CEE_RUNNING
, &bnad
->run_flags
);
776 if (!netif_carrier_ok(bnad
->netdev
)) {
778 printk(KERN_WARNING
"bna: %s link up\n",
780 netif_carrier_on(bnad
->netdev
);
781 BNAD_UPDATE_CTR(bnad
, link_toggle
);
782 for (tx_id
= 0; tx_id
< bnad
->num_tx
; tx_id
++) {
783 for (tcb_id
= 0; tcb_id
< bnad
->num_txq_per_tx
;
785 struct bna_tcb
*tcb
=
786 bnad
->tx_info
[tx_id
].tcb
[tcb_id
];
793 if (test_bit(BNAD_TXQ_TX_STARTED
,
797 * Transmit Schedule */
798 printk(KERN_INFO
"bna: %s %d "
805 BNAD_UPDATE_CTR(bnad
,
811 BNAD_UPDATE_CTR(bnad
,
818 if (netif_carrier_ok(bnad
->netdev
)) {
819 printk(KERN_WARNING
"bna: %s link down\n",
821 netif_carrier_off(bnad
->netdev
);
822 BNAD_UPDATE_CTR(bnad
, link_toggle
);
828 bnad_cb_tx_disabled(void *arg
, struct bna_tx
*tx
)
830 struct bnad
*bnad
= (struct bnad
*)arg
;
832 complete(&bnad
->bnad_completions
.tx_comp
);
836 bnad_cb_tcb_setup(struct bnad
*bnad
, struct bna_tcb
*tcb
)
838 struct bnad_tx_info
*tx_info
=
839 (struct bnad_tx_info
*)tcb
->txq
->tx
->priv
;
840 struct bnad_unmap_q
*unmap_q
= tcb
->unmap_q
;
842 tx_info
->tcb
[tcb
->id
] = tcb
;
843 unmap_q
->producer_index
= 0;
844 unmap_q
->consumer_index
= 0;
845 unmap_q
->q_depth
= BNAD_TX_UNMAPQ_DEPTH
;
849 bnad_cb_tcb_destroy(struct bnad
*bnad
, struct bna_tcb
*tcb
)
851 struct bnad_tx_info
*tx_info
=
852 (struct bnad_tx_info
*)tcb
->txq
->tx
->priv
;
853 struct bnad_unmap_q
*unmap_q
= tcb
->unmap_q
;
855 while (test_and_set_bit(BNAD_TXQ_FREE_SENT
, &tcb
->flags
))
858 bnad_free_all_txbufs(bnad
, tcb
);
860 unmap_q
->producer_index
= 0;
861 unmap_q
->consumer_index
= 0;
863 smp_mb__before_clear_bit();
864 clear_bit(BNAD_TXQ_FREE_SENT
, &tcb
->flags
);
866 tx_info
->tcb
[tcb
->id
] = NULL
;
870 bnad_cb_rcb_setup(struct bnad
*bnad
, struct bna_rcb
*rcb
)
872 struct bnad_unmap_q
*unmap_q
= rcb
->unmap_q
;
874 unmap_q
->producer_index
= 0;
875 unmap_q
->consumer_index
= 0;
876 unmap_q
->q_depth
= BNAD_RX_UNMAPQ_DEPTH
;
880 bnad_cb_rcb_destroy(struct bnad
*bnad
, struct bna_rcb
*rcb
)
882 bnad_free_all_rxbufs(bnad
, rcb
);
886 bnad_cb_ccb_setup(struct bnad
*bnad
, struct bna_ccb
*ccb
)
888 struct bnad_rx_info
*rx_info
=
889 (struct bnad_rx_info
*)ccb
->cq
->rx
->priv
;
891 rx_info
->rx_ctrl
[ccb
->id
].ccb
= ccb
;
892 ccb
->ctrl
= &rx_info
->rx_ctrl
[ccb
->id
];
896 bnad_cb_ccb_destroy(struct bnad
*bnad
, struct bna_ccb
*ccb
)
898 struct bnad_rx_info
*rx_info
=
899 (struct bnad_rx_info
*)ccb
->cq
->rx
->priv
;
901 rx_info
->rx_ctrl
[ccb
->id
].ccb
= NULL
;
905 bnad_cb_tx_stall(struct bnad
*bnad
, struct bna_tx
*tx
)
907 struct bnad_tx_info
*tx_info
=
908 (struct bnad_tx_info
*)tx
->priv
;
913 for (i
= 0; i
< BNAD_MAX_TXQ_PER_TX
; i
++) {
914 tcb
= tx_info
->tcb
[i
];
918 clear_bit(BNAD_TXQ_TX_STARTED
, &tcb
->flags
);
919 netif_stop_subqueue(bnad
->netdev
, txq_id
);
920 printk(KERN_INFO
"bna: %s %d TXQ_STOPPED\n",
921 bnad
->netdev
->name
, txq_id
);
926 bnad_cb_tx_resume(struct bnad
*bnad
, struct bna_tx
*tx
)
928 struct bnad_tx_info
*tx_info
= (struct bnad_tx_info
*)tx
->priv
;
930 struct bnad_unmap_q
*unmap_q
;
934 for (i
= 0; i
< BNAD_MAX_TXQ_PER_TX
; i
++) {
935 tcb
= tx_info
->tcb
[i
];
940 unmap_q
= tcb
->unmap_q
;
942 if (test_bit(BNAD_TXQ_TX_STARTED
, &tcb
->flags
))
945 while (test_and_set_bit(BNAD_TXQ_FREE_SENT
, &tcb
->flags
))
948 bnad_free_all_txbufs(bnad
, tcb
);
950 unmap_q
->producer_index
= 0;
951 unmap_q
->consumer_index
= 0;
953 smp_mb__before_clear_bit();
954 clear_bit(BNAD_TXQ_FREE_SENT
, &tcb
->flags
);
956 set_bit(BNAD_TXQ_TX_STARTED
, &tcb
->flags
);
958 if (netif_carrier_ok(bnad
->netdev
)) {
959 printk(KERN_INFO
"bna: %s %d TXQ_STARTED\n",
960 bnad
->netdev
->name
, txq_id
);
961 netif_wake_subqueue(bnad
->netdev
, txq_id
);
962 BNAD_UPDATE_CTR(bnad
, netif_queue_wakeup
);
967 * Workaround for first ioceth enable failure & we
968 * get a 0 MAC address. We try to get the MAC address
971 if (is_zero_ether_addr(&bnad
->perm_addr
.mac
[0])) {
972 bna_enet_perm_mac_get(&bnad
->bna
.enet
, &bnad
->perm_addr
);
973 bnad_set_netdev_perm_addr(bnad
);
978 bnad_cb_tx_cleanup(struct bnad
*bnad
, struct bna_tx
*tx
)
980 struct bnad_tx_info
*tx_info
= (struct bnad_tx_info
*)tx
->priv
;
984 for (i
= 0; i
< BNAD_MAX_TXQ_PER_TX
; i
++) {
985 tcb
= tx_info
->tcb
[i
];
990 mdelay(BNAD_TXRX_SYNC_MDELAY
);
991 bna_tx_cleanup_complete(tx
);
995 bnad_cb_rx_cleanup(struct bnad
*bnad
, struct bna_rx
*rx
)
997 struct bnad_rx_info
*rx_info
= (struct bnad_rx_info
*)rx
->priv
;
999 struct bnad_rx_ctrl
*rx_ctrl
;
1002 mdelay(BNAD_TXRX_SYNC_MDELAY
);
1004 for (i
= 0; i
< BNAD_MAX_RXPS_PER_RX
; i
++) {
1005 rx_ctrl
= &rx_info
->rx_ctrl
[i
];
1010 clear_bit(BNAD_RXQ_STARTED
, &ccb
->rcb
[0]->flags
);
1013 clear_bit(BNAD_RXQ_STARTED
, &ccb
->rcb
[1]->flags
);
1015 while (test_bit(BNAD_FP_IN_RX_PATH
, &rx_ctrl
->flags
))
1019 bna_rx_cleanup_complete(rx
);
1023 bnad_cb_rx_post(struct bnad
*bnad
, struct bna_rx
*rx
)
1025 struct bnad_rx_info
*rx_info
= (struct bnad_rx_info
*)rx
->priv
;
1026 struct bna_ccb
*ccb
;
1027 struct bna_rcb
*rcb
;
1028 struct bnad_rx_ctrl
*rx_ctrl
;
1029 struct bnad_unmap_q
*unmap_q
;
1033 for (i
= 0; i
< BNAD_MAX_RXPS_PER_RX
; i
++) {
1034 rx_ctrl
= &rx_info
->rx_ctrl
[i
];
1039 bnad_cq_cmpl_init(bnad
, ccb
);
1041 for (j
= 0; j
< BNAD_MAX_RXQ_PER_RXP
; j
++) {
1045 bnad_free_all_rxbufs(bnad
, rcb
);
1047 set_bit(BNAD_RXQ_STARTED
, &rcb
->flags
);
1048 unmap_q
= rcb
->unmap_q
;
1050 /* Now allocate & post buffers for this RCB */
1051 /* !!Allocation in callback context */
1052 if (!test_and_set_bit(BNAD_RXQ_REFILL
, &rcb
->flags
)) {
1053 if (BNA_QE_FREE_CNT(unmap_q
, unmap_q
->q_depth
)
1054 >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT
)
1055 bnad_alloc_n_post_rxbufs(bnad
, rcb
);
1056 smp_mb__before_clear_bit();
1057 clear_bit(BNAD_RXQ_REFILL
, &rcb
->flags
);
1064 bnad_cb_rx_disabled(void *arg
, struct bna_rx
*rx
)
1066 struct bnad
*bnad
= (struct bnad
*)arg
;
1068 complete(&bnad
->bnad_completions
.rx_comp
);
1072 bnad_cb_rx_mcast_add(struct bnad
*bnad
, struct bna_rx
*rx
)
1074 bnad
->bnad_completions
.mcast_comp_status
= BNA_CB_SUCCESS
;
1075 complete(&bnad
->bnad_completions
.mcast_comp
);
1079 bnad_cb_stats_get(struct bnad
*bnad
, enum bna_cb_status status
,
1080 struct bna_stats
*stats
)
1082 if (status
== BNA_CB_SUCCESS
)
1083 BNAD_UPDATE_CTR(bnad
, hw_stats_updates
);
1085 if (!netif_running(bnad
->netdev
) ||
1086 !test_bit(BNAD_RF_STATS_TIMER_RUNNING
, &bnad
->run_flags
))
1089 mod_timer(&bnad
->stats_timer
,
1090 jiffies
+ msecs_to_jiffies(BNAD_STATS_TIMER_FREQ
));
1094 bnad_cb_enet_mtu_set(struct bnad
*bnad
)
1096 bnad
->bnad_completions
.mtu_comp_status
= BNA_CB_SUCCESS
;
1097 complete(&bnad
->bnad_completions
.mtu_comp
);
1100 /* Resource allocation, free functions */
1103 bnad_mem_free(struct bnad
*bnad
,
1104 struct bna_mem_info
*mem_info
)
1109 if (mem_info
->mdl
== NULL
)
1112 for (i
= 0; i
< mem_info
->num
; i
++) {
1113 if (mem_info
->mdl
[i
].kva
!= NULL
) {
1114 if (mem_info
->mem_type
== BNA_MEM_T_DMA
) {
1115 BNA_GET_DMA_ADDR(&(mem_info
->mdl
[i
].dma
),
1117 dma_free_coherent(&bnad
->pcidev
->dev
,
1118 mem_info
->mdl
[i
].len
,
1119 mem_info
->mdl
[i
].kva
, dma_pa
);
1121 kfree(mem_info
->mdl
[i
].kva
);
1124 kfree(mem_info
->mdl
);
1125 mem_info
->mdl
= NULL
;
1129 bnad_mem_alloc(struct bnad
*bnad
,
1130 struct bna_mem_info
*mem_info
)
1135 if ((mem_info
->num
== 0) || (mem_info
->len
== 0)) {
1136 mem_info
->mdl
= NULL
;
1140 mem_info
->mdl
= kcalloc(mem_info
->num
, sizeof(struct bna_mem_descr
),
1142 if (mem_info
->mdl
== NULL
)
1145 if (mem_info
->mem_type
== BNA_MEM_T_DMA
) {
1146 for (i
= 0; i
< mem_info
->num
; i
++) {
1147 mem_info
->mdl
[i
].len
= mem_info
->len
;
1148 mem_info
->mdl
[i
].kva
=
1149 dma_alloc_coherent(&bnad
->pcidev
->dev
,
1150 mem_info
->len
, &dma_pa
,
1153 if (mem_info
->mdl
[i
].kva
== NULL
)
1156 BNA_SET_DMA_ADDR(dma_pa
,
1157 &(mem_info
->mdl
[i
].dma
));
1160 for (i
= 0; i
< mem_info
->num
; i
++) {
1161 mem_info
->mdl
[i
].len
= mem_info
->len
;
1162 mem_info
->mdl
[i
].kva
= kzalloc(mem_info
->len
,
1164 if (mem_info
->mdl
[i
].kva
== NULL
)
1172 bnad_mem_free(bnad
, mem_info
);
1176 /* Free IRQ for Mailbox */
1178 bnad_mbox_irq_free(struct bnad
*bnad
)
1181 unsigned long flags
;
1183 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1184 bnad_disable_mbox_irq(bnad
);
1185 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1187 irq
= BNAD_GET_MBOX_IRQ(bnad
);
1188 free_irq(irq
, bnad
);
1192 * Allocates IRQ for Mailbox, but keep it disabled
1193 * This will be enabled once we get the mbox enable callback
1197 bnad_mbox_irq_alloc(struct bnad
*bnad
)
1200 unsigned long irq_flags
, flags
;
1202 irq_handler_t irq_handler
;
1204 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1205 if (bnad
->cfg_flags
& BNAD_CF_MSIX
) {
1206 irq_handler
= (irq_handler_t
)bnad_msix_mbox_handler
;
1207 irq
= bnad
->msix_table
[BNAD_MAILBOX_MSIX_INDEX
].vector
;
1210 irq_handler
= (irq_handler_t
)bnad_isr
;
1211 irq
= bnad
->pcidev
->irq
;
1212 irq_flags
= IRQF_SHARED
;
1215 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1216 sprintf(bnad
->mbox_irq_name
, "%s", BNAD_NAME
);
1219 * Set the Mbox IRQ disable flag, so that the IRQ handler
1220 * called from request_irq() for SHARED IRQs do not execute
1222 set_bit(BNAD_RF_MBOX_IRQ_DISABLED
, &bnad
->run_flags
);
1224 BNAD_UPDATE_CTR(bnad
, mbox_intr_disabled
);
1226 err
= request_irq(irq
, irq_handler
, irq_flags
,
1227 bnad
->mbox_irq_name
, bnad
);
1233 bnad_txrx_irq_free(struct bnad
*bnad
, struct bna_intr_info
*intr_info
)
1235 kfree(intr_info
->idl
);
1236 intr_info
->idl
= NULL
;
1239 /* Allocates Interrupt Descriptor List for MSIX/INT-X vectors */
1241 bnad_txrx_irq_alloc(struct bnad
*bnad
, enum bnad_intr_source src
,
1242 u32 txrx_id
, struct bna_intr_info
*intr_info
)
1244 int i
, vector_start
= 0;
1246 unsigned long flags
;
1248 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1249 cfg_flags
= bnad
->cfg_flags
;
1250 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1252 if (cfg_flags
& BNAD_CF_MSIX
) {
1253 intr_info
->intr_type
= BNA_INTR_T_MSIX
;
1254 intr_info
->idl
= kcalloc(intr_info
->num
,
1255 sizeof(struct bna_intr_descr
),
1257 if (!intr_info
->idl
)
1262 vector_start
= BNAD_MAILBOX_MSIX_VECTORS
+ txrx_id
;
1266 vector_start
= BNAD_MAILBOX_MSIX_VECTORS
+
1267 (bnad
->num_tx
* bnad
->num_txq_per_tx
) +
1275 for (i
= 0; i
< intr_info
->num
; i
++)
1276 intr_info
->idl
[i
].vector
= vector_start
+ i
;
1278 intr_info
->intr_type
= BNA_INTR_T_INTX
;
1280 intr_info
->idl
= kcalloc(intr_info
->num
,
1281 sizeof(struct bna_intr_descr
),
1283 if (!intr_info
->idl
)
1288 intr_info
->idl
[0].vector
= BNAD_INTX_TX_IB_BITMASK
;
1292 intr_info
->idl
[0].vector
= BNAD_INTX_RX_IB_BITMASK
;
1300 * NOTE: Should be called for MSIX only
1301 * Unregisters Tx MSIX vector(s) from the kernel
1304 bnad_tx_msix_unregister(struct bnad
*bnad
, struct bnad_tx_info
*tx_info
,
1310 for (i
= 0; i
< num_txqs
; i
++) {
1311 if (tx_info
->tcb
[i
] == NULL
)
1314 vector_num
= tx_info
->tcb
[i
]->intr_vector
;
1315 free_irq(bnad
->msix_table
[vector_num
].vector
, tx_info
->tcb
[i
]);
1320 * NOTE: Should be called for MSIX only
1321 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1324 bnad_tx_msix_register(struct bnad
*bnad
, struct bnad_tx_info
*tx_info
,
1325 u32 tx_id
, int num_txqs
)
1331 for (i
= 0; i
< num_txqs
; i
++) {
1332 vector_num
= tx_info
->tcb
[i
]->intr_vector
;
1333 sprintf(tx_info
->tcb
[i
]->name
, "%s TXQ %d", bnad
->netdev
->name
,
1334 tx_id
+ tx_info
->tcb
[i
]->id
);
1335 err
= request_irq(bnad
->msix_table
[vector_num
].vector
,
1336 (irq_handler_t
)bnad_msix_tx
, 0,
1337 tx_info
->tcb
[i
]->name
,
1347 bnad_tx_msix_unregister(bnad
, tx_info
, (i
- 1));
1352 * NOTE: Should be called for MSIX only
1353 * Unregisters Rx MSIX vector(s) from the kernel
1356 bnad_rx_msix_unregister(struct bnad
*bnad
, struct bnad_rx_info
*rx_info
,
1362 for (i
= 0; i
< num_rxps
; i
++) {
1363 if (rx_info
->rx_ctrl
[i
].ccb
== NULL
)
1366 vector_num
= rx_info
->rx_ctrl
[i
].ccb
->intr_vector
;
1367 free_irq(bnad
->msix_table
[vector_num
].vector
,
1368 rx_info
->rx_ctrl
[i
].ccb
);
1373 * NOTE: Should be called for MSIX only
1374 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1377 bnad_rx_msix_register(struct bnad
*bnad
, struct bnad_rx_info
*rx_info
,
1378 u32 rx_id
, int num_rxps
)
1384 for (i
= 0; i
< num_rxps
; i
++) {
1385 vector_num
= rx_info
->rx_ctrl
[i
].ccb
->intr_vector
;
1386 sprintf(rx_info
->rx_ctrl
[i
].ccb
->name
, "%s CQ %d",
1388 rx_id
+ rx_info
->rx_ctrl
[i
].ccb
->id
);
1389 err
= request_irq(bnad
->msix_table
[vector_num
].vector
,
1390 (irq_handler_t
)bnad_msix_rx
, 0,
1391 rx_info
->rx_ctrl
[i
].ccb
->name
,
1392 rx_info
->rx_ctrl
[i
].ccb
);
1401 bnad_rx_msix_unregister(bnad
, rx_info
, (i
- 1));
1405 /* Free Tx object Resources */
1407 bnad_tx_res_free(struct bnad
*bnad
, struct bna_res_info
*res_info
)
1411 for (i
= 0; i
< BNA_TX_RES_T_MAX
; i
++) {
1412 if (res_info
[i
].res_type
== BNA_RES_T_MEM
)
1413 bnad_mem_free(bnad
, &res_info
[i
].res_u
.mem_info
);
1414 else if (res_info
[i
].res_type
== BNA_RES_T_INTR
)
1415 bnad_txrx_irq_free(bnad
, &res_info
[i
].res_u
.intr_info
);
1419 /* Allocates memory and interrupt resources for Tx object */
1421 bnad_tx_res_alloc(struct bnad
*bnad
, struct bna_res_info
*res_info
,
1426 for (i
= 0; i
< BNA_TX_RES_T_MAX
; i
++) {
1427 if (res_info
[i
].res_type
== BNA_RES_T_MEM
)
1428 err
= bnad_mem_alloc(bnad
,
1429 &res_info
[i
].res_u
.mem_info
);
1430 else if (res_info
[i
].res_type
== BNA_RES_T_INTR
)
1431 err
= bnad_txrx_irq_alloc(bnad
, BNAD_INTR_TX
, tx_id
,
1432 &res_info
[i
].res_u
.intr_info
);
1439 bnad_tx_res_free(bnad
, res_info
);
1443 /* Free Rx object Resources */
1445 bnad_rx_res_free(struct bnad
*bnad
, struct bna_res_info
*res_info
)
1449 for (i
= 0; i
< BNA_RX_RES_T_MAX
; i
++) {
1450 if (res_info
[i
].res_type
== BNA_RES_T_MEM
)
1451 bnad_mem_free(bnad
, &res_info
[i
].res_u
.mem_info
);
1452 else if (res_info
[i
].res_type
== BNA_RES_T_INTR
)
1453 bnad_txrx_irq_free(bnad
, &res_info
[i
].res_u
.intr_info
);
1457 /* Allocates memory and interrupt resources for Rx object */
1459 bnad_rx_res_alloc(struct bnad
*bnad
, struct bna_res_info
*res_info
,
1464 /* All memory needs to be allocated before setup_ccbs */
1465 for (i
= 0; i
< BNA_RX_RES_T_MAX
; i
++) {
1466 if (res_info
[i
].res_type
== BNA_RES_T_MEM
)
1467 err
= bnad_mem_alloc(bnad
,
1468 &res_info
[i
].res_u
.mem_info
);
1469 else if (res_info
[i
].res_type
== BNA_RES_T_INTR
)
1470 err
= bnad_txrx_irq_alloc(bnad
, BNAD_INTR_RX
, rx_id
,
1471 &res_info
[i
].res_u
.intr_info
);
1478 bnad_rx_res_free(bnad
, res_info
);
1482 /* Timer callbacks */
1485 bnad_ioc_timeout(unsigned long data
)
1487 struct bnad
*bnad
= (struct bnad
*)data
;
1488 unsigned long flags
;
1490 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1491 bfa_nw_ioc_timeout((void *) &bnad
->bna
.ioceth
.ioc
);
1492 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1496 bnad_ioc_hb_check(unsigned long data
)
1498 struct bnad
*bnad
= (struct bnad
*)data
;
1499 unsigned long flags
;
1501 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1502 bfa_nw_ioc_hb_check((void *) &bnad
->bna
.ioceth
.ioc
);
1503 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1507 bnad_iocpf_timeout(unsigned long data
)
1509 struct bnad
*bnad
= (struct bnad
*)data
;
1510 unsigned long flags
;
1512 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1513 bfa_nw_iocpf_timeout((void *) &bnad
->bna
.ioceth
.ioc
);
1514 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1518 bnad_iocpf_sem_timeout(unsigned long data
)
1520 struct bnad
*bnad
= (struct bnad
*)data
;
1521 unsigned long flags
;
1523 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1524 bfa_nw_iocpf_sem_timeout((void *) &bnad
->bna
.ioceth
.ioc
);
1525 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1529 * All timer routines use bnad->bna_lock to protect against
1530 * the following race, which may occur in case of no locking:
1538 /* b) Dynamic Interrupt Moderation Timer */
1540 bnad_dim_timeout(unsigned long data
)
1542 struct bnad
*bnad
= (struct bnad
*)data
;
1543 struct bnad_rx_info
*rx_info
;
1544 struct bnad_rx_ctrl
*rx_ctrl
;
1546 unsigned long flags
;
1548 if (!netif_carrier_ok(bnad
->netdev
))
1551 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1552 for (i
= 0; i
< bnad
->num_rx
; i
++) {
1553 rx_info
= &bnad
->rx_info
[i
];
1556 for (j
= 0; j
< bnad
->num_rxp_per_rx
; j
++) {
1557 rx_ctrl
= &rx_info
->rx_ctrl
[j
];
1560 bna_rx_dim_update(rx_ctrl
->ccb
);
1564 /* Check for BNAD_CF_DIM_ENABLED, does not eleminate a race */
1565 if (test_bit(BNAD_RF_DIM_TIMER_RUNNING
, &bnad
->run_flags
))
1566 mod_timer(&bnad
->dim_timer
,
1567 jiffies
+ msecs_to_jiffies(BNAD_DIM_TIMER_FREQ
));
1568 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1571 /* c) Statistics Timer */
1573 bnad_stats_timeout(unsigned long data
)
1575 struct bnad
*bnad
= (struct bnad
*)data
;
1576 unsigned long flags
;
1578 if (!netif_running(bnad
->netdev
) ||
1579 !test_bit(BNAD_RF_STATS_TIMER_RUNNING
, &bnad
->run_flags
))
1582 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1583 bna_hw_stats_get(&bnad
->bna
);
1584 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1588 * Set up timer for DIM
1589 * Called with bnad->bna_lock held
1592 bnad_dim_timer_start(struct bnad
*bnad
)
1594 if (bnad
->cfg_flags
& BNAD_CF_DIM_ENABLED
&&
1595 !test_bit(BNAD_RF_DIM_TIMER_RUNNING
, &bnad
->run_flags
)) {
1596 setup_timer(&bnad
->dim_timer
, bnad_dim_timeout
,
1597 (unsigned long)bnad
);
1598 set_bit(BNAD_RF_DIM_TIMER_RUNNING
, &bnad
->run_flags
);
1599 mod_timer(&bnad
->dim_timer
,
1600 jiffies
+ msecs_to_jiffies(BNAD_DIM_TIMER_FREQ
));
1605 * Set up timer for statistics
1606 * Called with mutex_lock(&bnad->conf_mutex) held
1609 bnad_stats_timer_start(struct bnad
*bnad
)
1611 unsigned long flags
;
1613 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1614 if (!test_and_set_bit(BNAD_RF_STATS_TIMER_RUNNING
, &bnad
->run_flags
)) {
1615 setup_timer(&bnad
->stats_timer
, bnad_stats_timeout
,
1616 (unsigned long)bnad
);
1617 mod_timer(&bnad
->stats_timer
,
1618 jiffies
+ msecs_to_jiffies(BNAD_STATS_TIMER_FREQ
));
1620 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1624 * Stops the stats timer
1625 * Called with mutex_lock(&bnad->conf_mutex) held
1628 bnad_stats_timer_stop(struct bnad
*bnad
)
1631 unsigned long flags
;
1633 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1634 if (test_and_clear_bit(BNAD_RF_STATS_TIMER_RUNNING
, &bnad
->run_flags
))
1636 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1638 del_timer_sync(&bnad
->stats_timer
);
1644 bnad_netdev_mc_list_get(struct net_device
*netdev
, u8
*mc_list
)
1646 int i
= 1; /* Index 0 has broadcast address */
1647 struct netdev_hw_addr
*mc_addr
;
1649 netdev_for_each_mc_addr(mc_addr
, netdev
) {
1650 memcpy(&mc_list
[i
* ETH_ALEN
], &mc_addr
->addr
[0],
1657 bnad_napi_poll_rx(struct napi_struct
*napi
, int budget
)
1659 struct bnad_rx_ctrl
*rx_ctrl
=
1660 container_of(napi
, struct bnad_rx_ctrl
, napi
);
1661 struct bna_ccb
*ccb
;
1669 if (!netif_carrier_ok(bnad
->netdev
))
1672 rcvd
= bnad_poll_cq(bnad
, ccb
, budget
);
1677 napi_complete((napi
));
1679 BNAD_UPDATE_CTR(bnad
, netif_rx_complete
);
1681 bnad_enable_rx_irq(bnad
, ccb
);
1686 bnad_napi_enable(struct bnad
*bnad
, u32 rx_id
)
1688 struct bnad_rx_ctrl
*rx_ctrl
;
1691 /* Initialize & enable NAPI */
1692 for (i
= 0; i
< bnad
->num_rxp_per_rx
; i
++) {
1693 rx_ctrl
= &bnad
->rx_info
[rx_id
].rx_ctrl
[i
];
1695 netif_napi_add(bnad
->netdev
, &rx_ctrl
->napi
,
1696 bnad_napi_poll_rx
, 64);
1698 napi_enable(&rx_ctrl
->napi
);
1703 bnad_napi_disable(struct bnad
*bnad
, u32 rx_id
)
1707 /* First disable and then clean up */
1708 for (i
= 0; i
< bnad
->num_rxp_per_rx
; i
++) {
1709 napi_disable(&bnad
->rx_info
[rx_id
].rx_ctrl
[i
].napi
);
1710 netif_napi_del(&bnad
->rx_info
[rx_id
].rx_ctrl
[i
].napi
);
1714 /* Should be held with conf_lock held */
1716 bnad_cleanup_tx(struct bnad
*bnad
, u32 tx_id
)
1718 struct bnad_tx_info
*tx_info
= &bnad
->tx_info
[tx_id
];
1719 struct bna_res_info
*res_info
= &bnad
->tx_res_info
[tx_id
].res_info
[0];
1720 unsigned long flags
;
1725 init_completion(&bnad
->bnad_completions
.tx_comp
);
1726 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1727 bna_tx_disable(tx_info
->tx
, BNA_HARD_CLEANUP
, bnad_cb_tx_disabled
);
1728 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1729 wait_for_completion(&bnad
->bnad_completions
.tx_comp
);
1731 if (tx_info
->tcb
[0]->intr_type
== BNA_INTR_T_MSIX
)
1732 bnad_tx_msix_unregister(bnad
, tx_info
,
1733 bnad
->num_txq_per_tx
);
1735 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1736 bna_tx_destroy(tx_info
->tx
);
1737 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1743 tasklet_kill(&bnad
->tx_free_tasklet
);
1745 bnad_tx_res_free(bnad
, res_info
);
1748 /* Should be held with conf_lock held */
1750 bnad_setup_tx(struct bnad
*bnad
, u32 tx_id
)
1753 struct bnad_tx_info
*tx_info
= &bnad
->tx_info
[tx_id
];
1754 struct bna_res_info
*res_info
= &bnad
->tx_res_info
[tx_id
].res_info
[0];
1755 struct bna_intr_info
*intr_info
=
1756 &res_info
[BNA_TX_RES_INTR_T_TXCMPL
].res_u
.intr_info
;
1757 struct bna_tx_config
*tx_config
= &bnad
->tx_config
[tx_id
];
1758 struct bna_tx_event_cbfn tx_cbfn
;
1760 unsigned long flags
;
1762 tx_info
->tx_id
= tx_id
;
1764 /* Initialize the Tx object configuration */
1765 tx_config
->num_txq
= bnad
->num_txq_per_tx
;
1766 tx_config
->txq_depth
= bnad
->txq_depth
;
1767 tx_config
->tx_type
= BNA_TX_T_REGULAR
;
1768 tx_config
->coalescing_timeo
= bnad
->tx_coalescing_timeo
;
1770 /* Initialize the tx event handlers */
1771 tx_cbfn
.tcb_setup_cbfn
= bnad_cb_tcb_setup
;
1772 tx_cbfn
.tcb_destroy_cbfn
= bnad_cb_tcb_destroy
;
1773 tx_cbfn
.tx_stall_cbfn
= bnad_cb_tx_stall
;
1774 tx_cbfn
.tx_resume_cbfn
= bnad_cb_tx_resume
;
1775 tx_cbfn
.tx_cleanup_cbfn
= bnad_cb_tx_cleanup
;
1777 /* Get BNA's resource requirement for one tx object */
1778 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1779 bna_tx_res_req(bnad
->num_txq_per_tx
,
1780 bnad
->txq_depth
, res_info
);
1781 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1783 /* Fill Unmap Q memory requirements */
1784 BNAD_FILL_UNMAPQ_MEM_REQ(
1785 &res_info
[BNA_TX_RES_MEM_T_UNMAPQ
],
1786 bnad
->num_txq_per_tx
,
1787 BNAD_TX_UNMAPQ_DEPTH
);
1789 /* Allocate resources */
1790 err
= bnad_tx_res_alloc(bnad
, res_info
, tx_id
);
1794 /* Ask BNA to create one Tx object, supplying required resources */
1795 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1796 tx
= bna_tx_create(&bnad
->bna
, bnad
, tx_config
, &tx_cbfn
, res_info
,
1798 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1803 /* Register ISR for the Tx object */
1804 if (intr_info
->intr_type
== BNA_INTR_T_MSIX
) {
1805 err
= bnad_tx_msix_register(bnad
, tx_info
,
1806 tx_id
, bnad
->num_txq_per_tx
);
1811 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1813 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1818 bnad_tx_res_free(bnad
, res_info
);
1822 /* Setup the rx config for bna_rx_create */
1823 /* bnad decides the configuration */
1825 bnad_init_rx_config(struct bnad
*bnad
, struct bna_rx_config
*rx_config
)
1827 rx_config
->rx_type
= BNA_RX_T_REGULAR
;
1828 rx_config
->num_paths
= bnad
->num_rxp_per_rx
;
1829 rx_config
->coalescing_timeo
= bnad
->rx_coalescing_timeo
;
1831 if (bnad
->num_rxp_per_rx
> 1) {
1832 rx_config
->rss_status
= BNA_STATUS_T_ENABLED
;
1833 rx_config
->rss_config
.hash_type
=
1834 (BFI_ENET_RSS_IPV6
|
1835 BFI_ENET_RSS_IPV6_TCP
|
1837 BFI_ENET_RSS_IPV4_TCP
);
1838 rx_config
->rss_config
.hash_mask
=
1839 bnad
->num_rxp_per_rx
- 1;
1840 get_random_bytes(rx_config
->rss_config
.toeplitz_hash_key
,
1841 sizeof(rx_config
->rss_config
.toeplitz_hash_key
));
1843 rx_config
->rss_status
= BNA_STATUS_T_DISABLED
;
1844 memset(&rx_config
->rss_config
, 0,
1845 sizeof(rx_config
->rss_config
));
1847 rx_config
->rxp_type
= BNA_RXP_SLR
;
1848 rx_config
->q_depth
= bnad
->rxq_depth
;
1850 rx_config
->small_buff_size
= BFI_SMALL_RXBUF_SIZE
;
1852 rx_config
->vlan_strip_status
= BNA_STATUS_T_ENABLED
;
1855 /* Called with mutex_lock(&bnad->conf_mutex) held */
1857 bnad_cleanup_rx(struct bnad
*bnad
, u32 rx_id
)
1859 struct bnad_rx_info
*rx_info
= &bnad
->rx_info
[rx_id
];
1860 struct bna_rx_config
*rx_config
= &bnad
->rx_config
[rx_id
];
1861 struct bna_res_info
*res_info
= &bnad
->rx_res_info
[rx_id
].res_info
[0];
1862 unsigned long flags
;
1863 int dim_timer_del
= 0;
1869 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1870 dim_timer_del
= bnad_dim_timer_running(bnad
);
1872 clear_bit(BNAD_RF_DIM_TIMER_RUNNING
, &bnad
->run_flags
);
1873 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1875 del_timer_sync(&bnad
->dim_timer
);
1878 bnad_napi_disable(bnad
, rx_id
);
1880 init_completion(&bnad
->bnad_completions
.rx_comp
);
1881 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1882 bna_rx_disable(rx_info
->rx
, BNA_HARD_CLEANUP
, bnad_cb_rx_disabled
);
1883 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1884 wait_for_completion(&bnad
->bnad_completions
.rx_comp
);
1886 if (rx_info
->rx_ctrl
[0].ccb
->intr_type
== BNA_INTR_T_MSIX
)
1887 bnad_rx_msix_unregister(bnad
, rx_info
, rx_config
->num_paths
);
1889 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1890 bna_rx_destroy(rx_info
->rx
);
1891 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1895 bnad_rx_res_free(bnad
, res_info
);
1898 /* Called with mutex_lock(&bnad->conf_mutex) held */
1900 bnad_setup_rx(struct bnad
*bnad
, u32 rx_id
)
1903 struct bnad_rx_info
*rx_info
= &bnad
->rx_info
[rx_id
];
1904 struct bna_res_info
*res_info
= &bnad
->rx_res_info
[rx_id
].res_info
[0];
1905 struct bna_intr_info
*intr_info
=
1906 &res_info
[BNA_RX_RES_T_INTR
].res_u
.intr_info
;
1907 struct bna_rx_config
*rx_config
= &bnad
->rx_config
[rx_id
];
1908 struct bna_rx_event_cbfn rx_cbfn
;
1910 unsigned long flags
;
1912 rx_info
->rx_id
= rx_id
;
1914 /* Initialize the Rx object configuration */
1915 bnad_init_rx_config(bnad
, rx_config
);
1917 /* Initialize the Rx event handlers */
1918 rx_cbfn
.rcb_setup_cbfn
= bnad_cb_rcb_setup
;
1919 rx_cbfn
.rcb_destroy_cbfn
= bnad_cb_rcb_destroy
;
1920 rx_cbfn
.ccb_setup_cbfn
= bnad_cb_ccb_setup
;
1921 rx_cbfn
.ccb_destroy_cbfn
= bnad_cb_ccb_destroy
;
1922 rx_cbfn
.rx_cleanup_cbfn
= bnad_cb_rx_cleanup
;
1923 rx_cbfn
.rx_post_cbfn
= bnad_cb_rx_post
;
1925 /* Get BNA's resource requirement for one Rx object */
1926 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1927 bna_rx_res_req(rx_config
, res_info
);
1928 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1930 /* Fill Unmap Q memory requirements */
1931 BNAD_FILL_UNMAPQ_MEM_REQ(
1932 &res_info
[BNA_RX_RES_MEM_T_UNMAPQ
],
1933 rx_config
->num_paths
+
1934 ((rx_config
->rxp_type
== BNA_RXP_SINGLE
) ? 0 :
1935 rx_config
->num_paths
), BNAD_RX_UNMAPQ_DEPTH
);
1937 /* Allocate resource */
1938 err
= bnad_rx_res_alloc(bnad
, res_info
, rx_id
);
1942 /* Ask BNA to create one Rx object, supplying required resources */
1943 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1944 rx
= bna_rx_create(&bnad
->bna
, bnad
, rx_config
, &rx_cbfn
, res_info
,
1946 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1951 /* Register ISR for the Rx object */
1952 if (intr_info
->intr_type
== BNA_INTR_T_MSIX
) {
1953 err
= bnad_rx_msix_register(bnad
, rx_info
, rx_id
,
1954 rx_config
->num_paths
);
1960 bnad_napi_enable(bnad
, rx_id
);
1962 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1964 /* Set up Dynamic Interrupt Moderation Vector */
1965 if (bnad
->cfg_flags
& BNAD_CF_DIM_ENABLED
)
1966 bna_rx_dim_reconfig(&bnad
->bna
, bna_napi_dim_vector
);
1968 /* Enable VLAN filtering only on the default Rx */
1969 bna_rx_vlanfilter_enable(rx
);
1971 /* Start the DIM timer */
1972 bnad_dim_timer_start(bnad
);
1976 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1981 bnad_cleanup_rx(bnad
, rx_id
);
1985 /* Called with conf_lock & bnad->bna_lock held */
1987 bnad_tx_coalescing_timeo_set(struct bnad
*bnad
)
1989 struct bnad_tx_info
*tx_info
;
1991 tx_info
= &bnad
->tx_info
[0];
1995 bna_tx_coalescing_timeo_set(tx_info
->tx
, bnad
->tx_coalescing_timeo
);
1998 /* Called with conf_lock & bnad->bna_lock held */
2000 bnad_rx_coalescing_timeo_set(struct bnad
*bnad
)
2002 struct bnad_rx_info
*rx_info
;
2005 for (i
= 0; i
< bnad
->num_rx
; i
++) {
2006 rx_info
= &bnad
->rx_info
[i
];
2009 bna_rx_coalescing_timeo_set(rx_info
->rx
,
2010 bnad
->rx_coalescing_timeo
);
2015 * Called with bnad->bna_lock held
2018 bnad_mac_addr_set_locked(struct bnad
*bnad
, u8
*mac_addr
)
2022 if (!is_valid_ether_addr(mac_addr
))
2023 return -EADDRNOTAVAIL
;
2025 /* If datapath is down, pretend everything went through */
2026 if (!bnad
->rx_info
[0].rx
)
2029 ret
= bna_rx_ucast_set(bnad
->rx_info
[0].rx
, mac_addr
, NULL
);
2030 if (ret
!= BNA_CB_SUCCESS
)
2031 return -EADDRNOTAVAIL
;
2036 /* Should be called with conf_lock held */
2038 bnad_enable_default_bcast(struct bnad
*bnad
)
2040 struct bnad_rx_info
*rx_info
= &bnad
->rx_info
[0];
2042 unsigned long flags
;
2044 init_completion(&bnad
->bnad_completions
.mcast_comp
);
2046 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2047 ret
= bna_rx_mcast_add(rx_info
->rx
, (u8
*)bnad_bcast_addr
,
2048 bnad_cb_rx_mcast_add
);
2049 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2051 if (ret
== BNA_CB_SUCCESS
)
2052 wait_for_completion(&bnad
->bnad_completions
.mcast_comp
);
2056 if (bnad
->bnad_completions
.mcast_comp_status
!= BNA_CB_SUCCESS
)
2062 /* Called with bnad_conf_lock() held */
2064 bnad_restore_vlans(struct bnad
*bnad
, u32 rx_id
)
2067 unsigned long flags
;
2069 BUG_ON(!(VLAN_N_VID
== BFI_ENET_VLAN_ID_MAX
));
2071 for_each_set_bit(vid
, bnad
->active_vlans
, VLAN_N_VID
) {
2072 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2073 bna_rx_vlan_add(bnad
->rx_info
[rx_id
].rx
, vid
);
2074 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2078 /* Statistics utilities */
2080 bnad_netdev_qstats_fill(struct bnad
*bnad
, struct rtnl_link_stats64
*stats
)
2084 for (i
= 0; i
< bnad
->num_rx
; i
++) {
2085 for (j
= 0; j
< bnad
->num_rxp_per_rx
; j
++) {
2086 if (bnad
->rx_info
[i
].rx_ctrl
[j
].ccb
) {
2087 stats
->rx_packets
+= bnad
->rx_info
[i
].
2088 rx_ctrl
[j
].ccb
->rcb
[0]->rxq
->rx_packets
;
2089 stats
->rx_bytes
+= bnad
->rx_info
[i
].
2090 rx_ctrl
[j
].ccb
->rcb
[0]->rxq
->rx_bytes
;
2091 if (bnad
->rx_info
[i
].rx_ctrl
[j
].ccb
->rcb
[1] &&
2092 bnad
->rx_info
[i
].rx_ctrl
[j
].ccb
->
2094 stats
->rx_packets
+=
2095 bnad
->rx_info
[i
].rx_ctrl
[j
].
2096 ccb
->rcb
[1]->rxq
->rx_packets
;
2098 bnad
->rx_info
[i
].rx_ctrl
[j
].
2099 ccb
->rcb
[1]->rxq
->rx_bytes
;
2104 for (i
= 0; i
< bnad
->num_tx
; i
++) {
2105 for (j
= 0; j
< bnad
->num_txq_per_tx
; j
++) {
2106 if (bnad
->tx_info
[i
].tcb
[j
]) {
2107 stats
->tx_packets
+=
2108 bnad
->tx_info
[i
].tcb
[j
]->txq
->tx_packets
;
2110 bnad
->tx_info
[i
].tcb
[j
]->txq
->tx_bytes
;
2117 * Must be called with the bna_lock held.
2120 bnad_netdev_hwstats_fill(struct bnad
*bnad
, struct rtnl_link_stats64
*stats
)
2122 struct bfi_enet_stats_mac
*mac_stats
;
2126 mac_stats
= &bnad
->stats
.bna_stats
->hw_stats
.mac_stats
;
2128 mac_stats
->rx_fcs_error
+ mac_stats
->rx_alignment_error
+
2129 mac_stats
->rx_frame_length_error
+ mac_stats
->rx_code_error
+
2130 mac_stats
->rx_undersize
;
2131 stats
->tx_errors
= mac_stats
->tx_fcs_error
+
2132 mac_stats
->tx_undersize
;
2133 stats
->rx_dropped
= mac_stats
->rx_drop
;
2134 stats
->tx_dropped
= mac_stats
->tx_drop
;
2135 stats
->multicast
= mac_stats
->rx_multicast
;
2136 stats
->collisions
= mac_stats
->tx_total_collision
;
2138 stats
->rx_length_errors
= mac_stats
->rx_frame_length_error
;
2140 /* receive ring buffer overflow ?? */
2142 stats
->rx_crc_errors
= mac_stats
->rx_fcs_error
;
2143 stats
->rx_frame_errors
= mac_stats
->rx_alignment_error
;
2144 /* recv'r fifo overrun */
2145 bmap
= bna_rx_rid_mask(&bnad
->bna
);
2146 for (i
= 0; bmap
; i
++) {
2148 stats
->rx_fifo_errors
+=
2149 bnad
->stats
.bna_stats
->
2150 hw_stats
.rxf_stats
[i
].frame_drops
;
2158 bnad_mbox_irq_sync(struct bnad
*bnad
)
2161 unsigned long flags
;
2163 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2164 if (bnad
->cfg_flags
& BNAD_CF_MSIX
)
2165 irq
= bnad
->msix_table
[BNAD_MAILBOX_MSIX_INDEX
].vector
;
2167 irq
= bnad
->pcidev
->irq
;
2168 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2170 synchronize_irq(irq
);
2173 /* Utility used by bnad_start_xmit, for doing TSO */
2175 bnad_tso_prepare(struct bnad
*bnad
, struct sk_buff
*skb
)
2179 /* SKB_GSO_TCPV4 and SKB_GSO_TCPV6 is defined since 2.6.18. */
2180 BUG_ON(!(skb_shinfo(skb
)->gso_type
== SKB_GSO_TCPV4
||
2181 skb_shinfo(skb
)->gso_type
== SKB_GSO_TCPV6
));
2182 if (skb_header_cloned(skb
)) {
2183 err
= pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
);
2185 BNAD_UPDATE_CTR(bnad
, tso_err
);
2191 * For TSO, the TCP checksum field is seeded with pseudo-header sum
2192 * excluding the length field.
2194 if (skb
->protocol
== htons(ETH_P_IP
)) {
2195 struct iphdr
*iph
= ip_hdr(skb
);
2197 /* Do we really need these? */
2201 tcp_hdr(skb
)->check
=
2202 ~csum_tcpudp_magic(iph
->saddr
, iph
->daddr
, 0,
2204 BNAD_UPDATE_CTR(bnad
, tso4
);
2206 struct ipv6hdr
*ipv6h
= ipv6_hdr(skb
);
2208 BUG_ON(!(skb
->protocol
== htons(ETH_P_IPV6
)));
2209 ipv6h
->payload_len
= 0;
2210 tcp_hdr(skb
)->check
=
2211 ~csum_ipv6_magic(&ipv6h
->saddr
, &ipv6h
->daddr
, 0,
2213 BNAD_UPDATE_CTR(bnad
, tso6
);
2220 * Initialize Q numbers depending on Rx Paths
2221 * Called with bnad->bna_lock held, because of cfg_flags
2225 bnad_q_num_init(struct bnad
*bnad
)
2229 rxps
= min((uint
)num_online_cpus(),
2230 (uint
)(BNAD_MAX_RXS
* BNAD_MAX_RXPS_PER_RX
));
2232 if (!(bnad
->cfg_flags
& BNAD_CF_MSIX
))
2233 rxps
= 1; /* INTx */
2237 bnad
->num_rxp_per_rx
= rxps
;
2238 bnad
->num_txq_per_tx
= BNAD_TXQ_NUM
;
2242 * Adjusts the Q numbers, given a number of msix vectors
2243 * Give preference to RSS as opposed to Tx priority Queues,
2244 * in such a case, just use 1 Tx Q
2245 * Called with bnad->bna_lock held b'cos of cfg_flags access
2248 bnad_q_num_adjust(struct bnad
*bnad
, int msix_vectors
, int temp
)
2250 bnad
->num_txq_per_tx
= 1;
2251 if ((msix_vectors
>= (bnad
->num_tx
* bnad
->num_txq_per_tx
) +
2252 bnad_rxqs_per_cq
+ BNAD_MAILBOX_MSIX_VECTORS
) &&
2253 (bnad
->cfg_flags
& BNAD_CF_MSIX
)) {
2254 bnad
->num_rxp_per_rx
= msix_vectors
-
2255 (bnad
->num_tx
* bnad
->num_txq_per_tx
) -
2256 BNAD_MAILBOX_MSIX_VECTORS
;
2258 bnad
->num_rxp_per_rx
= 1;
2261 /* Enable / disable ioceth */
2263 bnad_ioceth_disable(struct bnad
*bnad
)
2265 unsigned long flags
;
2268 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2269 init_completion(&bnad
->bnad_completions
.ioc_comp
);
2270 bna_ioceth_disable(&bnad
->bna
.ioceth
, BNA_HARD_CLEANUP
);
2271 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2273 wait_for_completion_timeout(&bnad
->bnad_completions
.ioc_comp
,
2274 msecs_to_jiffies(BNAD_IOCETH_TIMEOUT
));
2276 err
= bnad
->bnad_completions
.ioc_comp_status
;
2281 bnad_ioceth_enable(struct bnad
*bnad
)
2284 unsigned long flags
;
2286 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2287 init_completion(&bnad
->bnad_completions
.ioc_comp
);
2288 bnad
->bnad_completions
.ioc_comp_status
= BNA_CB_WAITING
;
2289 bna_ioceth_enable(&bnad
->bna
.ioceth
);
2290 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2292 wait_for_completion_timeout(&bnad
->bnad_completions
.ioc_comp
,
2293 msecs_to_jiffies(BNAD_IOCETH_TIMEOUT
));
2295 err
= bnad
->bnad_completions
.ioc_comp_status
;
2300 /* Free BNA resources */
2302 bnad_res_free(struct bnad
*bnad
, struct bna_res_info
*res_info
,
2307 for (i
= 0; i
< res_val_max
; i
++)
2308 bnad_mem_free(bnad
, &res_info
[i
].res_u
.mem_info
);
2311 /* Allocates memory and interrupt resources for BNA */
2313 bnad_res_alloc(struct bnad
*bnad
, struct bna_res_info
*res_info
,
2318 for (i
= 0; i
< res_val_max
; i
++) {
2319 err
= bnad_mem_alloc(bnad
, &res_info
[i
].res_u
.mem_info
);
2326 bnad_res_free(bnad
, res_info
, res_val_max
);
2330 /* Interrupt enable / disable */
2332 bnad_enable_msix(struct bnad
*bnad
)
2335 unsigned long flags
;
2337 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2338 if (!(bnad
->cfg_flags
& BNAD_CF_MSIX
)) {
2339 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2342 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2344 if (bnad
->msix_table
)
2348 kcalloc(bnad
->msix_num
, sizeof(struct msix_entry
), GFP_KERNEL
);
2350 if (!bnad
->msix_table
)
2353 for (i
= 0; i
< bnad
->msix_num
; i
++)
2354 bnad
->msix_table
[i
].entry
= i
;
2356 ret
= pci_enable_msix(bnad
->pcidev
, bnad
->msix_table
, bnad
->msix_num
);
2358 /* Not enough MSI-X vectors. */
2360 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2361 /* ret = #of vectors that we got */
2362 bnad_q_num_adjust(bnad
, ret
, 0);
2363 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2365 bnad
->msix_num
= (bnad
->num_tx
* bnad
->num_txq_per_tx
)
2367 * bnad
->num_rxp_per_rx
) +
2368 BNAD_MAILBOX_MSIX_VECTORS
;
2370 if (bnad
->msix_num
> ret
)
2373 /* Try once more with adjusted numbers */
2374 /* If this fails, fall back to INTx */
2375 ret
= pci_enable_msix(bnad
->pcidev
, bnad
->msix_table
,
2383 pci_intx(bnad
->pcidev
, 0);
2389 kfree(bnad
->msix_table
);
2390 bnad
->msix_table
= NULL
;
2392 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2393 bnad
->cfg_flags
&= ~BNAD_CF_MSIX
;
2394 bnad_q_num_init(bnad
);
2395 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2399 bnad_disable_msix(struct bnad
*bnad
)
2402 unsigned long flags
;
2404 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2405 cfg_flags
= bnad
->cfg_flags
;
2406 if (bnad
->cfg_flags
& BNAD_CF_MSIX
)
2407 bnad
->cfg_flags
&= ~BNAD_CF_MSIX
;
2408 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2410 if (cfg_flags
& BNAD_CF_MSIX
) {
2411 pci_disable_msix(bnad
->pcidev
);
2412 kfree(bnad
->msix_table
);
2413 bnad
->msix_table
= NULL
;
2417 /* Netdev entry points */
2419 bnad_open(struct net_device
*netdev
)
2422 struct bnad
*bnad
= netdev_priv(netdev
);
2423 struct bna_pause_config pause_config
;
2425 unsigned long flags
;
2427 mutex_lock(&bnad
->conf_mutex
);
2430 err
= bnad_setup_tx(bnad
, 0);
2435 err
= bnad_setup_rx(bnad
, 0);
2440 pause_config
.tx_pause
= 0;
2441 pause_config
.rx_pause
= 0;
2443 mtu
= ETH_HLEN
+ VLAN_HLEN
+ bnad
->netdev
->mtu
+ ETH_FCS_LEN
;
2445 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2446 bna_enet_mtu_set(&bnad
->bna
.enet
, mtu
, NULL
);
2447 bna_enet_pause_config(&bnad
->bna
.enet
, &pause_config
, NULL
);
2448 bna_enet_enable(&bnad
->bna
.enet
);
2449 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2451 /* Enable broadcast */
2452 bnad_enable_default_bcast(bnad
);
2454 /* Restore VLANs, if any */
2455 bnad_restore_vlans(bnad
, 0);
2457 /* Set the UCAST address */
2458 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2459 bnad_mac_addr_set_locked(bnad
, netdev
->dev_addr
);
2460 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2462 /* Start the stats timer */
2463 bnad_stats_timer_start(bnad
);
2465 mutex_unlock(&bnad
->conf_mutex
);
2470 bnad_cleanup_tx(bnad
, 0);
2473 mutex_unlock(&bnad
->conf_mutex
);
2478 bnad_stop(struct net_device
*netdev
)
2480 struct bnad
*bnad
= netdev_priv(netdev
);
2481 unsigned long flags
;
2483 mutex_lock(&bnad
->conf_mutex
);
2485 /* Stop the stats timer */
2486 bnad_stats_timer_stop(bnad
);
2488 init_completion(&bnad
->bnad_completions
.enet_comp
);
2490 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2491 bna_enet_disable(&bnad
->bna
.enet
, BNA_HARD_CLEANUP
,
2492 bnad_cb_enet_disabled
);
2493 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2495 wait_for_completion(&bnad
->bnad_completions
.enet_comp
);
2497 bnad_cleanup_tx(bnad
, 0);
2498 bnad_cleanup_rx(bnad
, 0);
2500 /* Synchronize mailbox IRQ */
2501 bnad_mbox_irq_sync(bnad
);
2503 mutex_unlock(&bnad
->conf_mutex
);
2510 * bnad_start_xmit : Netdev entry point for Transmit
2511 * Called under lock held by net_device
2514 bnad_start_xmit(struct sk_buff
*skb
, struct net_device
*netdev
)
2516 struct bnad
*bnad
= netdev_priv(netdev
);
2518 struct bna_tcb
*tcb
= bnad
->tx_info
[0].tcb
[txq_id
];
2520 u16 txq_prod
, vlan_tag
= 0;
2521 u32 unmap_prod
, wis
, wis_used
, wi_range
;
2522 u32 vectors
, vect_id
, i
, acked
;
2525 struct bnad_unmap_q
*unmap_q
= tcb
->unmap_q
;
2526 dma_addr_t dma_addr
;
2527 struct bna_txq_entry
*txqent
;
2531 (skb
->len
<= ETH_HLEN
|| skb
->len
> BFI_TX_MAX_DATA_PER_PKT
)) {
2533 return NETDEV_TX_OK
;
2537 * Takes care of the Tx that is scheduled between clearing the flag
2538 * and the netif_stop_all_queue() call.
2540 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED
, &tcb
->flags
))) {
2542 return NETDEV_TX_OK
;
2545 vectors
= 1 + skb_shinfo(skb
)->nr_frags
;
2546 if (vectors
> BFI_TX_MAX_VECTORS_PER_PKT
) {
2548 return NETDEV_TX_OK
;
2550 wis
= BNA_TXQ_WI_NEEDED(vectors
); /* 4 vectors per work item */
2552 if (unlikely(wis
> BNA_QE_FREE_CNT(tcb
, tcb
->q_depth
) ||
2553 vectors
> BNA_QE_FREE_CNT(unmap_q
, unmap_q
->q_depth
))) {
2554 if ((u16
) (*tcb
->hw_consumer_index
) !=
2555 tcb
->consumer_index
&&
2556 !test_and_set_bit(BNAD_TXQ_FREE_SENT
, &tcb
->flags
)) {
2557 acked
= bnad_free_txbufs(bnad
, tcb
);
2558 if (likely(test_bit(BNAD_TXQ_TX_STARTED
, &tcb
->flags
)))
2559 bna_ib_ack(tcb
->i_dbell
, acked
);
2560 smp_mb__before_clear_bit();
2561 clear_bit(BNAD_TXQ_FREE_SENT
, &tcb
->flags
);
2563 netif_stop_queue(netdev
);
2564 BNAD_UPDATE_CTR(bnad
, netif_queue_stop
);
2569 * Check again to deal with race condition between
2570 * netif_stop_queue here, and netif_wake_queue in
2571 * interrupt handler which is not inside netif tx lock.
2574 (wis
> BNA_QE_FREE_CNT(tcb
, tcb
->q_depth
) ||
2575 vectors
> BNA_QE_FREE_CNT(unmap_q
, unmap_q
->q_depth
))) {
2576 BNAD_UPDATE_CTR(bnad
, netif_queue_stop
);
2577 return NETDEV_TX_BUSY
;
2579 netif_wake_queue(netdev
);
2580 BNAD_UPDATE_CTR(bnad
, netif_queue_wakeup
);
2584 unmap_prod
= unmap_q
->producer_index
;
2589 txq_prod
= tcb
->producer_index
;
2590 BNA_TXQ_QPGE_PTR_GET(txq_prod
, tcb
->sw_qpt
, txqent
, wi_range
);
2591 BUG_ON(!(wi_range
<= tcb
->q_depth
));
2592 txqent
->hdr
.wi
.reserved
= 0;
2593 txqent
->hdr
.wi
.num_vectors
= vectors
;
2594 txqent
->hdr
.wi
.opcode
=
2595 htons((skb_is_gso(skb
) ? BNA_TXQ_WI_SEND_LSO
:
2598 if (vlan_tx_tag_present(skb
)) {
2599 vlan_tag
= (u16
) vlan_tx_tag_get(skb
);
2600 flags
|= (BNA_TXQ_WI_CF_INS_PRIO
| BNA_TXQ_WI_CF_INS_VLAN
);
2602 if (test_bit(BNAD_RF_CEE_RUNNING
, &bnad
->run_flags
)) {
2604 (tcb
->priority
& 0x7) << 13 | (vlan_tag
& 0x1fff);
2605 flags
|= (BNA_TXQ_WI_CF_INS_PRIO
| BNA_TXQ_WI_CF_INS_VLAN
);
2608 txqent
->hdr
.wi
.vlan_tag
= htons(vlan_tag
);
2610 if (skb_is_gso(skb
)) {
2611 err
= bnad_tso_prepare(bnad
, skb
);
2614 return NETDEV_TX_OK
;
2616 txqent
->hdr
.wi
.lso_mss
= htons(skb_is_gso(skb
));
2617 flags
|= (BNA_TXQ_WI_CF_IP_CKSUM
| BNA_TXQ_WI_CF_TCP_CKSUM
);
2618 txqent
->hdr
.wi
.l4_hdr_size_n_offset
=
2619 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2620 (tcp_hdrlen(skb
) >> 2,
2621 skb_transport_offset(skb
)));
2622 } else if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
2625 txqent
->hdr
.wi
.lso_mss
= 0;
2627 if (skb
->protocol
== htons(ETH_P_IP
))
2628 proto
= ip_hdr(skb
)->protocol
;
2629 else if (skb
->protocol
== htons(ETH_P_IPV6
)) {
2630 /* nexthdr may not be TCP immediately. */
2631 proto
= ipv6_hdr(skb
)->nexthdr
;
2633 if (proto
== IPPROTO_TCP
) {
2634 flags
|= BNA_TXQ_WI_CF_TCP_CKSUM
;
2635 txqent
->hdr
.wi
.l4_hdr_size_n_offset
=
2636 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2637 (0, skb_transport_offset(skb
)));
2639 BNAD_UPDATE_CTR(bnad
, tcpcsum_offload
);
2641 BUG_ON(!(skb_headlen(skb
) >=
2642 skb_transport_offset(skb
) + tcp_hdrlen(skb
)));
2644 } else if (proto
== IPPROTO_UDP
) {
2645 flags
|= BNA_TXQ_WI_CF_UDP_CKSUM
;
2646 txqent
->hdr
.wi
.l4_hdr_size_n_offset
=
2647 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2648 (0, skb_transport_offset(skb
)));
2650 BNAD_UPDATE_CTR(bnad
, udpcsum_offload
);
2652 BUG_ON(!(skb_headlen(skb
) >=
2653 skb_transport_offset(skb
) +
2654 sizeof(struct udphdr
)));
2656 err
= skb_checksum_help(skb
);
2657 BNAD_UPDATE_CTR(bnad
, csum_help
);
2660 BNAD_UPDATE_CTR(bnad
, csum_help_err
);
2661 return NETDEV_TX_OK
;
2665 txqent
->hdr
.wi
.lso_mss
= 0;
2666 txqent
->hdr
.wi
.l4_hdr_size_n_offset
= 0;
2669 txqent
->hdr
.wi
.flags
= htons(flags
);
2671 txqent
->hdr
.wi
.frame_length
= htonl(skb
->len
);
2673 unmap_q
->unmap_array
[unmap_prod
].skb
= skb
;
2674 BUG_ON(!(skb_headlen(skb
) <= BFI_TX_MAX_DATA_PER_VECTOR
));
2675 txqent
->vector
[vect_id
].length
= htons(skb_headlen(skb
));
2676 dma_addr
= dma_map_single(&bnad
->pcidev
->dev
, skb
->data
,
2677 skb_headlen(skb
), DMA_TO_DEVICE
);
2678 dma_unmap_addr_set(&unmap_q
->unmap_array
[unmap_prod
], dma_addr
,
2681 BNA_SET_DMA_ADDR(dma_addr
, &txqent
->vector
[vect_id
].host_addr
);
2682 BNA_QE_INDX_ADD(unmap_prod
, 1, unmap_q
->q_depth
);
2684 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
2685 struct skb_frag_struct
*frag
= &skb_shinfo(skb
)->frags
[i
];
2686 u16 size
= frag
->size
;
2688 if (++vect_id
== BFI_TX_MAX_VECTORS_PER_WI
) {
2693 BNA_QE_INDX_ADD(txq_prod
, wis_used
,
2696 BNA_TXQ_QPGE_PTR_GET(txq_prod
, tcb
->sw_qpt
,
2698 BUG_ON(!(wi_range
<= tcb
->q_depth
));
2701 txqent
->hdr
.wi_ext
.opcode
= htons(BNA_TXQ_WI_EXTENSION
);
2704 BUG_ON(!(size
<= BFI_TX_MAX_DATA_PER_VECTOR
));
2705 txqent
->vector
[vect_id
].length
= htons(size
);
2706 dma_addr
= dma_map_page(&bnad
->pcidev
->dev
, frag
->page
,
2707 frag
->page_offset
, size
, DMA_TO_DEVICE
);
2708 dma_unmap_addr_set(&unmap_q
->unmap_array
[unmap_prod
], dma_addr
,
2710 BNA_SET_DMA_ADDR(dma_addr
, &txqent
->vector
[vect_id
].host_addr
);
2711 BNA_QE_INDX_ADD(unmap_prod
, 1, unmap_q
->q_depth
);
2714 unmap_q
->producer_index
= unmap_prod
;
2715 BNA_QE_INDX_ADD(txq_prod
, wis_used
, tcb
->q_depth
);
2716 tcb
->producer_index
= txq_prod
;
2720 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED
, &tcb
->flags
)))
2721 return NETDEV_TX_OK
;
2723 bna_txq_prod_indx_doorbell(tcb
);
2725 if ((u16
) (*tcb
->hw_consumer_index
) != tcb
->consumer_index
)
2726 tasklet_schedule(&bnad
->tx_free_tasklet
);
2728 return NETDEV_TX_OK
;
2732 * Used spin_lock to synchronize reading of stats structures, which
2733 * is written by BNA under the same lock.
2735 static struct rtnl_link_stats64
*
2736 bnad_get_stats64(struct net_device
*netdev
, struct rtnl_link_stats64
*stats
)
2738 struct bnad
*bnad
= netdev_priv(netdev
);
2739 unsigned long flags
;
2741 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2743 bnad_netdev_qstats_fill(bnad
, stats
);
2744 bnad_netdev_hwstats_fill(bnad
, stats
);
2746 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2752 bnad_set_rx_mode(struct net_device
*netdev
)
2754 struct bnad
*bnad
= netdev_priv(netdev
);
2755 u32 new_mask
, valid_mask
;
2756 unsigned long flags
;
2758 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2760 new_mask
= valid_mask
= 0;
2762 if (netdev
->flags
& IFF_PROMISC
) {
2763 if (!(bnad
->cfg_flags
& BNAD_CF_PROMISC
)) {
2764 new_mask
= BNAD_RXMODE_PROMISC_DEFAULT
;
2765 valid_mask
= BNAD_RXMODE_PROMISC_DEFAULT
;
2766 bnad
->cfg_flags
|= BNAD_CF_PROMISC
;
2769 if (bnad
->cfg_flags
& BNAD_CF_PROMISC
) {
2770 new_mask
= ~BNAD_RXMODE_PROMISC_DEFAULT
;
2771 valid_mask
= BNAD_RXMODE_PROMISC_DEFAULT
;
2772 bnad
->cfg_flags
&= ~BNAD_CF_PROMISC
;
2776 if (netdev
->flags
& IFF_ALLMULTI
) {
2777 if (!(bnad
->cfg_flags
& BNAD_CF_ALLMULTI
)) {
2778 new_mask
|= BNA_RXMODE_ALLMULTI
;
2779 valid_mask
|= BNA_RXMODE_ALLMULTI
;
2780 bnad
->cfg_flags
|= BNAD_CF_ALLMULTI
;
2783 if (bnad
->cfg_flags
& BNAD_CF_ALLMULTI
) {
2784 new_mask
&= ~BNA_RXMODE_ALLMULTI
;
2785 valid_mask
|= BNA_RXMODE_ALLMULTI
;
2786 bnad
->cfg_flags
&= ~BNAD_CF_ALLMULTI
;
2790 bna_rx_mode_set(bnad
->rx_info
[0].rx
, new_mask
, valid_mask
, NULL
);
2792 if (!netdev_mc_empty(netdev
)) {
2794 int mc_count
= netdev_mc_count(netdev
);
2796 /* Index 0 holds the broadcast address */
2798 kzalloc((mc_count
+ 1) * ETH_ALEN
,
2803 memcpy(&mcaddr_list
[0], &bnad_bcast_addr
[0], ETH_ALEN
);
2805 /* Copy rest of the MC addresses */
2806 bnad_netdev_mc_list_get(netdev
, mcaddr_list
);
2808 bna_rx_mcast_listset(bnad
->rx_info
[0].rx
, mc_count
+ 1,
2811 /* Should we enable BNAD_CF_ALLMULTI for err != 0 ? */
2815 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2819 * bna_lock is used to sync writes to netdev->addr
2820 * conf_lock cannot be used since this call may be made
2821 * in a non-blocking context.
2824 bnad_set_mac_address(struct net_device
*netdev
, void *mac_addr
)
2827 struct bnad
*bnad
= netdev_priv(netdev
);
2828 struct sockaddr
*sa
= (struct sockaddr
*)mac_addr
;
2829 unsigned long flags
;
2831 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2833 err
= bnad_mac_addr_set_locked(bnad
, sa
->sa_data
);
2836 memcpy(netdev
->dev_addr
, sa
->sa_data
, netdev
->addr_len
);
2838 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2844 bnad_mtu_set(struct bnad
*bnad
, int mtu
)
2846 unsigned long flags
;
2848 init_completion(&bnad
->bnad_completions
.mtu_comp
);
2850 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2851 bna_enet_mtu_set(&bnad
->bna
.enet
, mtu
, bnad_cb_enet_mtu_set
);
2852 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2854 wait_for_completion(&bnad
->bnad_completions
.mtu_comp
);
2856 return bnad
->bnad_completions
.mtu_comp_status
;
2860 bnad_change_mtu(struct net_device
*netdev
, int new_mtu
)
2862 int err
, mtu
= netdev
->mtu
;
2863 struct bnad
*bnad
= netdev_priv(netdev
);
2865 if (new_mtu
+ ETH_HLEN
< ETH_ZLEN
|| new_mtu
> BNAD_JUMBO_MTU
)
2868 mutex_lock(&bnad
->conf_mutex
);
2870 netdev
->mtu
= new_mtu
;
2872 mtu
= ETH_HLEN
+ VLAN_HLEN
+ new_mtu
+ ETH_FCS_LEN
;
2873 err
= bnad_mtu_set(bnad
, mtu
);
2877 mutex_unlock(&bnad
->conf_mutex
);
2882 bnad_vlan_rx_add_vid(struct net_device
*netdev
,
2885 struct bnad
*bnad
= netdev_priv(netdev
);
2886 unsigned long flags
;
2888 if (!bnad
->rx_info
[0].rx
)
2891 mutex_lock(&bnad
->conf_mutex
);
2893 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2894 bna_rx_vlan_add(bnad
->rx_info
[0].rx
, vid
);
2895 set_bit(vid
, bnad
->active_vlans
);
2896 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2898 mutex_unlock(&bnad
->conf_mutex
);
2902 bnad_vlan_rx_kill_vid(struct net_device
*netdev
,
2905 struct bnad
*bnad
= netdev_priv(netdev
);
2906 unsigned long flags
;
2908 if (!bnad
->rx_info
[0].rx
)
2911 mutex_lock(&bnad
->conf_mutex
);
2913 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2914 clear_bit(vid
, bnad
->active_vlans
);
2915 bna_rx_vlan_del(bnad
->rx_info
[0].rx
, vid
);
2916 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2918 mutex_unlock(&bnad
->conf_mutex
);
2921 #ifdef CONFIG_NET_POLL_CONTROLLER
2923 bnad_netpoll(struct net_device
*netdev
)
2925 struct bnad
*bnad
= netdev_priv(netdev
);
2926 struct bnad_rx_info
*rx_info
;
2927 struct bnad_rx_ctrl
*rx_ctrl
;
2931 if (!(bnad
->cfg_flags
& BNAD_CF_MSIX
)) {
2932 bna_intx_disable(&bnad
->bna
, curr_mask
);
2933 bnad_isr(bnad
->pcidev
->irq
, netdev
);
2934 bna_intx_enable(&bnad
->bna
, curr_mask
);
2936 for (i
= 0; i
< bnad
->num_rx
; i
++) {
2937 rx_info
= &bnad
->rx_info
[i
];
2940 for (j
= 0; j
< bnad
->num_rxp_per_rx
; j
++) {
2941 rx_ctrl
= &rx_info
->rx_ctrl
[j
];
2943 bnad_disable_rx_irq(bnad
,
2945 bnad_netif_rx_schedule_poll(bnad
,
2954 static const struct net_device_ops bnad_netdev_ops
= {
2955 .ndo_open
= bnad_open
,
2956 .ndo_stop
= bnad_stop
,
2957 .ndo_start_xmit
= bnad_start_xmit
,
2958 .ndo_get_stats64
= bnad_get_stats64
,
2959 .ndo_set_rx_mode
= bnad_set_rx_mode
,
2960 .ndo_validate_addr
= eth_validate_addr
,
2961 .ndo_set_mac_address
= bnad_set_mac_address
,
2962 .ndo_change_mtu
= bnad_change_mtu
,
2963 .ndo_vlan_rx_add_vid
= bnad_vlan_rx_add_vid
,
2964 .ndo_vlan_rx_kill_vid
= bnad_vlan_rx_kill_vid
,
2965 #ifdef CONFIG_NET_POLL_CONTROLLER
2966 .ndo_poll_controller
= bnad_netpoll
2971 bnad_netdev_init(struct bnad
*bnad
, bool using_dac
)
2973 struct net_device
*netdev
= bnad
->netdev
;
2975 netdev
->hw_features
= NETIF_F_SG
| NETIF_F_RXCSUM
|
2976 NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
|
2977 NETIF_F_TSO
| NETIF_F_TSO6
| NETIF_F_HW_VLAN_TX
;
2979 netdev
->vlan_features
= NETIF_F_SG
| NETIF_F_HIGHDMA
|
2980 NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
|
2981 NETIF_F_TSO
| NETIF_F_TSO6
;
2983 netdev
->features
|= netdev
->hw_features
|
2984 NETIF_F_HW_VLAN_RX
| NETIF_F_HW_VLAN_FILTER
;
2987 netdev
->features
|= NETIF_F_HIGHDMA
;
2989 netdev
->mem_start
= bnad
->mmio_start
;
2990 netdev
->mem_end
= bnad
->mmio_start
+ bnad
->mmio_len
- 1;
2992 netdev
->netdev_ops
= &bnad_netdev_ops
;
2993 bnad_set_ethtool_ops(netdev
);
2997 * 1. Initialize the bnad structure
2998 * 2. Setup netdev pointer in pci_dev
2999 * 3. Initialze Tx free tasklet
3000 * 4. Initialize no. of TxQ & CQs & MSIX vectors
3003 bnad_init(struct bnad
*bnad
,
3004 struct pci_dev
*pdev
, struct net_device
*netdev
)
3006 unsigned long flags
;
3008 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
3009 pci_set_drvdata(pdev
, netdev
);
3011 bnad
->netdev
= netdev
;
3012 bnad
->pcidev
= pdev
;
3013 bnad
->mmio_start
= pci_resource_start(pdev
, 0);
3014 bnad
->mmio_len
= pci_resource_len(pdev
, 0);
3015 bnad
->bar0
= ioremap_nocache(bnad
->mmio_start
, bnad
->mmio_len
);
3017 dev_err(&pdev
->dev
, "ioremap for bar0 failed\n");
3018 pci_set_drvdata(pdev
, NULL
);
3021 pr_info("bar0 mapped to %p, len %llu\n", bnad
->bar0
,
3022 (unsigned long long) bnad
->mmio_len
);
3024 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
3025 if (!bnad_msix_disable
)
3026 bnad
->cfg_flags
= BNAD_CF_MSIX
;
3028 bnad
->cfg_flags
|= BNAD_CF_DIM_ENABLED
;
3030 bnad_q_num_init(bnad
);
3031 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3033 bnad
->msix_num
= (bnad
->num_tx
* bnad
->num_txq_per_tx
) +
3034 (bnad
->num_rx
* bnad
->num_rxp_per_rx
) +
3035 BNAD_MAILBOX_MSIX_VECTORS
;
3037 bnad
->txq_depth
= BNAD_TXQ_DEPTH
;
3038 bnad
->rxq_depth
= BNAD_RXQ_DEPTH
;
3040 bnad
->tx_coalescing_timeo
= BFI_TX_COALESCING_TIMEO
;
3041 bnad
->rx_coalescing_timeo
= BFI_RX_COALESCING_TIMEO
;
3043 tasklet_init(&bnad
->tx_free_tasklet
, bnad_tx_free_tasklet
,
3044 (unsigned long)bnad
);
3050 * Must be called after bnad_pci_uninit()
3051 * so that iounmap() and pci_set_drvdata(NULL)
3052 * happens only after PCI uninitialization.
3055 bnad_uninit(struct bnad
*bnad
)
3058 iounmap(bnad
->bar0
);
3059 pci_set_drvdata(bnad
->pcidev
, NULL
);
3064 a) Per ioceth mutes used for serializing configuration
3065 changes from OS interface
3066 b) spin lock used to protect bna state machine
3069 bnad_lock_init(struct bnad
*bnad
)
3071 spin_lock_init(&bnad
->bna_lock
);
3072 mutex_init(&bnad
->conf_mutex
);
3076 bnad_lock_uninit(struct bnad
*bnad
)
3078 mutex_destroy(&bnad
->conf_mutex
);
3081 /* PCI Initialization */
3083 bnad_pci_init(struct bnad
*bnad
,
3084 struct pci_dev
*pdev
, bool *using_dac
)
3088 err
= pci_enable_device(pdev
);
3091 err
= pci_request_regions(pdev
, BNAD_NAME
);
3093 goto disable_device
;
3094 if (!dma_set_mask(&pdev
->dev
, DMA_BIT_MASK(64)) &&
3095 !dma_set_coherent_mask(&pdev
->dev
, DMA_BIT_MASK(64))) {
3098 err
= dma_set_mask(&pdev
->dev
, DMA_BIT_MASK(32));
3100 err
= dma_set_coherent_mask(&pdev
->dev
,
3103 goto release_regions
;
3107 pci_set_master(pdev
);
3111 pci_release_regions(pdev
);
3113 pci_disable_device(pdev
);
3119 bnad_pci_uninit(struct pci_dev
*pdev
)
3121 pci_release_regions(pdev
);
3122 pci_disable_device(pdev
);
3125 static int __devinit
3126 bnad_pci_probe(struct pci_dev
*pdev
,
3127 const struct pci_device_id
*pcidev_id
)
3129 bool using_dac
= false;
3133 struct net_device
*netdev
;
3134 struct bfa_pcidev pcidev_info
;
3135 unsigned long flags
;
3137 pr_info("bnad_pci_probe : (0x%p, 0x%p) PCI Func : (%d)\n",
3138 pdev
, pcidev_id
, PCI_FUNC(pdev
->devfn
));
3140 mutex_lock(&bnad_fwimg_mutex
);
3141 if (!cna_get_firmware_buf(pdev
)) {
3142 mutex_unlock(&bnad_fwimg_mutex
);
3143 pr_warn("Failed to load Firmware Image!\n");
3146 mutex_unlock(&bnad_fwimg_mutex
);
3149 * Allocates sizeof(struct net_device + struct bnad)
3150 * bnad = netdev->priv
3152 netdev
= alloc_etherdev(sizeof(struct bnad
));
3154 dev_err(&pdev
->dev
, "netdev allocation failed\n");
3158 bnad
= netdev_priv(netdev
);
3160 bnad_lock_init(bnad
);
3162 mutex_lock(&bnad
->conf_mutex
);
3164 * PCI initialization
3165 * Output : using_dac = 1 for 64 bit DMA
3166 * = 0 for 32 bit DMA
3168 err
= bnad_pci_init(bnad
, pdev
, &using_dac
);
3173 * Initialize bnad structure
3174 * Setup relation between pci_dev & netdev
3175 * Init Tx free tasklet
3177 err
= bnad_init(bnad
, pdev
, netdev
);
3181 /* Initialize netdev structure, set up ethtool ops */
3182 bnad_netdev_init(bnad
, using_dac
);
3184 /* Set link to down state */
3185 netif_carrier_off(netdev
);
3187 /* Get resource requirement form bna */
3188 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
3189 bna_res_req(&bnad
->res_info
[0]);
3190 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3192 /* Allocate resources from bna */
3193 err
= bnad_res_alloc(bnad
, &bnad
->res_info
[0], BNA_RES_T_MAX
);
3199 /* Setup pcidev_info for bna_init() */
3200 pcidev_info
.pci_slot
= PCI_SLOT(bnad
->pcidev
->devfn
);
3201 pcidev_info
.pci_func
= PCI_FUNC(bnad
->pcidev
->devfn
);
3202 pcidev_info
.device_id
= bnad
->pcidev
->device
;
3203 pcidev_info
.pci_bar_kva
= bnad
->bar0
;
3205 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
3206 bna_init(bna
, bnad
, &pcidev_info
, &bnad
->res_info
[0]);
3207 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3209 bnad
->stats
.bna_stats
= &bna
->stats
;
3211 bnad_enable_msix(bnad
);
3212 err
= bnad_mbox_irq_alloc(bnad
);
3218 setup_timer(&bnad
->bna
.ioceth
.ioc
.ioc_timer
, bnad_ioc_timeout
,
3219 ((unsigned long)bnad
));
3220 setup_timer(&bnad
->bna
.ioceth
.ioc
.hb_timer
, bnad_ioc_hb_check
,
3221 ((unsigned long)bnad
));
3222 setup_timer(&bnad
->bna
.ioceth
.ioc
.iocpf_timer
, bnad_iocpf_timeout
,
3223 ((unsigned long)bnad
));
3224 setup_timer(&bnad
->bna
.ioceth
.ioc
.sem_timer
, bnad_iocpf_sem_timeout
,
3225 ((unsigned long)bnad
));
3227 /* Now start the timer before calling IOC */
3228 mod_timer(&bnad
->bna
.ioceth
.ioc
.iocpf_timer
,
3229 jiffies
+ msecs_to_jiffies(BNA_IOC_TIMER_FREQ
));
3233 * If the call back comes with error, we bail out.
3234 * This is a catastrophic error.
3236 err
= bnad_ioceth_enable(bnad
);
3238 pr_err("BNA: Initialization failed err=%d\n",
3243 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
3244 if (bna_num_txq_set(bna
, BNAD_NUM_TXQ
+ 1) ||
3245 bna_num_rxp_set(bna
, BNAD_NUM_RXP
+ 1)) {
3246 bnad_q_num_adjust(bnad
, bna_attr(bna
)->num_txq
- 1,
3247 bna_attr(bna
)->num_rxp
- 1);
3248 if (bna_num_txq_set(bna
, BNAD_NUM_TXQ
+ 1) ||
3249 bna_num_rxp_set(bna
, BNAD_NUM_RXP
+ 1))
3252 bna_mod_res_req(&bnad
->bna
, &bnad
->mod_res_info
[0]);
3253 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3255 err
= bnad_res_alloc(bnad
, &bnad
->mod_res_info
[0], BNA_MOD_RES_T_MAX
);
3257 goto disable_ioceth
;
3259 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
3260 bna_mod_init(&bnad
->bna
, &bnad
->mod_res_info
[0]);
3261 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3263 /* Get the burnt-in mac */
3264 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
3265 bna_enet_perm_mac_get(&bna
->enet
, &bnad
->perm_addr
);
3266 bnad_set_netdev_perm_addr(bnad
);
3267 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3269 /* Finally, reguister with net_device layer */
3270 err
= register_netdev(netdev
);
3272 pr_err("BNA : Registering with netdev failed\n");
3275 set_bit(BNAD_RF_NETDEV_REGISTERED
, &bnad
->run_flags
);
3278 mutex_unlock(&bnad
->conf_mutex
);
3282 bnad_res_free(bnad
, &bnad
->mod_res_info
[0], BNA_MOD_RES_T_MAX
);
3284 bnad_ioceth_disable(bnad
);
3285 del_timer_sync(&bnad
->bna
.ioceth
.ioc
.ioc_timer
);
3286 del_timer_sync(&bnad
->bna
.ioceth
.ioc
.sem_timer
);
3287 del_timer_sync(&bnad
->bna
.ioceth
.ioc
.hb_timer
);
3288 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
3290 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3291 bnad_mbox_irq_free(bnad
);
3292 bnad_disable_msix(bnad
);
3294 bnad_res_free(bnad
, &bnad
->res_info
[0], BNA_RES_T_MAX
);
3298 bnad_pci_uninit(pdev
);
3299 mutex_unlock(&bnad
->conf_mutex
);
3300 bnad_lock_uninit(bnad
);
3302 free_netdev(netdev
);
3306 static void __devexit
3307 bnad_pci_remove(struct pci_dev
*pdev
)
3309 struct net_device
*netdev
= pci_get_drvdata(pdev
);
3312 unsigned long flags
;
3317 pr_info("%s bnad_pci_remove\n", netdev
->name
);
3318 bnad
= netdev_priv(netdev
);
3321 if (test_and_clear_bit(BNAD_RF_NETDEV_REGISTERED
, &bnad
->run_flags
))
3322 unregister_netdev(netdev
);
3324 mutex_lock(&bnad
->conf_mutex
);
3325 bnad_ioceth_disable(bnad
);
3326 del_timer_sync(&bnad
->bna
.ioceth
.ioc
.ioc_timer
);
3327 del_timer_sync(&bnad
->bna
.ioceth
.ioc
.sem_timer
);
3328 del_timer_sync(&bnad
->bna
.ioceth
.ioc
.hb_timer
);
3329 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
3331 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3333 bnad_res_free(bnad
, &bnad
->mod_res_info
[0], BNA_MOD_RES_T_MAX
);
3334 bnad_res_free(bnad
, &bnad
->res_info
[0], BNA_RES_T_MAX
);
3335 bnad_mbox_irq_free(bnad
);
3336 bnad_disable_msix(bnad
);
3337 bnad_pci_uninit(pdev
);
3338 mutex_unlock(&bnad
->conf_mutex
);
3339 bnad_lock_uninit(bnad
);
3341 free_netdev(netdev
);
3344 static DEFINE_PCI_DEVICE_TABLE(bnad_pci_id_table
) = {
3346 PCI_DEVICE(PCI_VENDOR_ID_BROCADE
,
3347 PCI_DEVICE_ID_BROCADE_CT
),
3348 .class = PCI_CLASS_NETWORK_ETHERNET
<< 8,
3349 .class_mask
= 0xffff00
3353 MODULE_DEVICE_TABLE(pci
, bnad_pci_id_table
);
3355 static struct pci_driver bnad_pci_driver
= {
3357 .id_table
= bnad_pci_id_table
,
3358 .probe
= bnad_pci_probe
,
3359 .remove
= __devexit_p(bnad_pci_remove
),
3363 bnad_module_init(void)
3367 pr_info("Brocade 10G Ethernet driver - version: %s\n",
3370 bfa_nw_ioc_auto_recover(bnad_ioc_auto_recover
);
3372 err
= pci_register_driver(&bnad_pci_driver
);
3374 pr_err("bna : PCI registration failed in module init "
3383 bnad_module_exit(void)
3385 pci_unregister_driver(&bnad_pci_driver
);
3388 release_firmware(bfi_fw
);
3391 module_init(bnad_module_init
);
3392 module_exit(bnad_module_exit
);
3394 MODULE_AUTHOR("Brocade");
3395 MODULE_LICENSE("GPL");
3396 MODULE_DESCRIPTION("Brocade 10G PCIe Ethernet driver");
3397 MODULE_VERSION(BNAD_VERSION
);
3398 MODULE_FIRMWARE(CNA_FW_FILE_CT
);