2 * Linux network driver for Brocade Converged Network Adapter.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
18 #include <linux/bitops.h>
19 #include <linux/netdevice.h>
20 #include <linux/skbuff.h>
21 #include <linux/etherdevice.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_vlan.h>
25 #include <linux/if_ether.h>
27 #include <linux/prefetch.h>
28 #include <linux/module.h>
34 static DEFINE_MUTEX(bnad_fwimg_mutex
);
39 static uint bnad_msix_disable
;
40 module_param(bnad_msix_disable
, uint
, 0444);
41 MODULE_PARM_DESC(bnad_msix_disable
, "Disable MSIX mode");
43 static uint bnad_ioc_auto_recover
= 1;
44 module_param(bnad_ioc_auto_recover
, uint
, 0444);
45 MODULE_PARM_DESC(bnad_ioc_auto_recover
, "Enable / Disable auto recovery");
47 static uint bna_debugfs_enable
= 1;
48 module_param(bna_debugfs_enable
, uint
, S_IRUGO
| S_IWUSR
);
49 MODULE_PARM_DESC(bna_debugfs_enable
, "Enables debugfs feature, default=1,"
50 " Range[false:0|true:1]");
55 u32 bnad_rxqs_per_cq
= 2;
57 static struct mutex bnad_list_mutex
;
58 static LIST_HEAD(bnad_list
);
59 static const u8 bnad_bcast_addr
[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
64 #define BNAD_GET_MBOX_IRQ(_bnad) \
65 (((_bnad)->cfg_flags & BNAD_CF_MSIX) ? \
66 ((_bnad)->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector) : \
67 ((_bnad)->pcidev->irq))
69 #define BNAD_FILL_UNMAPQ_MEM_REQ(_res_info, _num, _size) \
71 (_res_info)->res_type = BNA_RES_T_MEM; \
72 (_res_info)->res_u.mem_info.mem_type = BNA_MEM_T_KVA; \
73 (_res_info)->res_u.mem_info.num = (_num); \
74 (_res_info)->res_u.mem_info.len = (_size); \
78 bnad_add_to_list(struct bnad
*bnad
)
80 mutex_lock(&bnad_list_mutex
);
81 list_add_tail(&bnad
->list_entry
, &bnad_list
);
83 mutex_unlock(&bnad_list_mutex
);
87 bnad_remove_from_list(struct bnad
*bnad
)
89 mutex_lock(&bnad_list_mutex
);
90 list_del(&bnad
->list_entry
);
91 mutex_unlock(&bnad_list_mutex
);
95 * Reinitialize completions in CQ, once Rx is taken down
98 bnad_cq_cleanup(struct bnad
*bnad
, struct bna_ccb
*ccb
)
100 struct bna_cq_entry
*cmpl
;
103 for (i
= 0; i
< ccb
->q_depth
; i
++) {
104 cmpl
= &((struct bna_cq_entry
*)ccb
->sw_q
)[i
];
109 /* Tx Datapath functions */
112 /* Caller should ensure that the entry at unmap_q[index] is valid */
114 bnad_tx_buff_unmap(struct bnad
*bnad
,
115 struct bnad_tx_unmap
*unmap_q
,
116 u32 q_depth
, u32 index
)
118 struct bnad_tx_unmap
*unmap
;
122 unmap
= &unmap_q
[index
];
123 nvecs
= unmap
->nvecs
;
128 dma_unmap_single(&bnad
->pcidev
->dev
,
129 dma_unmap_addr(&unmap
->vectors
[0], dma_addr
),
130 skb_headlen(skb
), DMA_TO_DEVICE
);
131 dma_unmap_addr_set(&unmap
->vectors
[0], dma_addr
, 0);
137 if (vector
== BFI_TX_MAX_VECTORS_PER_WI
) {
139 BNA_QE_INDX_INC(index
, q_depth
);
140 unmap
= &unmap_q
[index
];
143 dma_unmap_page(&bnad
->pcidev
->dev
,
144 dma_unmap_addr(&unmap
->vectors
[vector
], dma_addr
),
145 skb_shinfo(skb
)->frags
[nvecs
].size
, DMA_TO_DEVICE
);
146 dma_unmap_addr_set(&unmap
->vectors
[vector
], dma_addr
, 0);
150 BNA_QE_INDX_INC(index
, q_depth
);
156 * Frees all pending Tx Bufs
157 * At this point no activity is expected on the Q,
158 * so DMA unmap & freeing is fine.
161 bnad_txq_cleanup(struct bnad
*bnad
, struct bna_tcb
*tcb
)
163 struct bnad_tx_unmap
*unmap_q
= tcb
->unmap_q
;
167 for (i
= 0; i
< tcb
->q_depth
; i
++) {
168 skb
= unmap_q
[i
].skb
;
171 bnad_tx_buff_unmap(bnad
, unmap_q
, tcb
->q_depth
, i
);
173 dev_kfree_skb_any(skb
);
178 * bnad_txcmpl_process : Frees the Tx bufs on Tx completion
179 * Can be called in a) Interrupt context
183 bnad_txcmpl_process(struct bnad
*bnad
, struct bna_tcb
*tcb
)
185 u32 sent_packets
= 0, sent_bytes
= 0;
186 u32 wis
, unmap_wis
, hw_cons
, cons
, q_depth
;
187 struct bnad_tx_unmap
*unmap_q
= tcb
->unmap_q
;
188 struct bnad_tx_unmap
*unmap
;
191 /* Just return if TX is stopped */
192 if (!test_bit(BNAD_TXQ_TX_STARTED
, &tcb
->flags
))
195 hw_cons
= *(tcb
->hw_consumer_index
);
197 cons
= tcb
->consumer_index
;
198 q_depth
= tcb
->q_depth
;
200 wis
= BNA_Q_INDEX_CHANGE(cons
, hw_cons
, q_depth
);
201 BUG_ON(!(wis
<= BNA_QE_IN_USE_CNT(tcb
, tcb
->q_depth
)));
204 unmap
= &unmap_q
[cons
];
209 sent_bytes
+= skb
->len
;
211 unmap_wis
= BNA_TXQ_WI_NEEDED(unmap
->nvecs
);
214 cons
= bnad_tx_buff_unmap(bnad
, unmap_q
, q_depth
, cons
);
215 dev_kfree_skb_any(skb
);
218 /* Update consumer pointers. */
219 tcb
->consumer_index
= hw_cons
;
221 tcb
->txq
->tx_packets
+= sent_packets
;
222 tcb
->txq
->tx_bytes
+= sent_bytes
;
228 bnad_tx_complete(struct bnad
*bnad
, struct bna_tcb
*tcb
)
230 struct net_device
*netdev
= bnad
->netdev
;
233 if (test_and_set_bit(BNAD_TXQ_FREE_SENT
, &tcb
->flags
))
236 sent
= bnad_txcmpl_process(bnad
, tcb
);
238 if (netif_queue_stopped(netdev
) &&
239 netif_carrier_ok(netdev
) &&
240 BNA_QE_FREE_CNT(tcb
, tcb
->q_depth
) >=
241 BNAD_NETIF_WAKE_THRESHOLD
) {
242 if (test_bit(BNAD_TXQ_TX_STARTED
, &tcb
->flags
)) {
243 netif_wake_queue(netdev
);
244 BNAD_UPDATE_CTR(bnad
, netif_queue_wakeup
);
249 if (likely(test_bit(BNAD_TXQ_TX_STARTED
, &tcb
->flags
)))
250 bna_ib_ack(tcb
->i_dbell
, sent
);
252 smp_mb__before_clear_bit();
253 clear_bit(BNAD_TXQ_FREE_SENT
, &tcb
->flags
);
258 /* MSIX Tx Completion Handler */
260 bnad_msix_tx(int irq
, void *data
)
262 struct bna_tcb
*tcb
= (struct bna_tcb
*)data
;
263 struct bnad
*bnad
= tcb
->bnad
;
265 bnad_tx_complete(bnad
, tcb
);
271 bnad_rxq_alloc_uninit(struct bnad
*bnad
, struct bna_rcb
*rcb
)
273 struct bnad_rx_unmap_q
*unmap_q
= rcb
->unmap_q
;
275 unmap_q
->reuse_pi
= -1;
276 unmap_q
->alloc_order
= -1;
277 unmap_q
->map_size
= 0;
278 unmap_q
->type
= BNAD_RXBUF_NONE
;
281 /* Default is page-based allocation. Multi-buffer support - TBD */
283 bnad_rxq_alloc_init(struct bnad
*bnad
, struct bna_rcb
*rcb
)
285 struct bnad_rx_unmap_q
*unmap_q
= rcb
->unmap_q
;
288 bnad_rxq_alloc_uninit(bnad
, rcb
);
290 mtu
= bna_enet_mtu_get(&bnad
->bna
.enet
);
291 order
= get_order(mtu
);
293 if (bna_is_small_rxq(rcb
->id
)) {
294 unmap_q
->alloc_order
= 0;
295 unmap_q
->map_size
= rcb
->rxq
->buffer_size
;
297 unmap_q
->alloc_order
= order
;
299 (rcb
->rxq
->buffer_size
> 2048) ?
300 PAGE_SIZE
<< order
: 2048;
303 BUG_ON(((PAGE_SIZE
<< order
) % unmap_q
->map_size
));
305 unmap_q
->type
= BNAD_RXBUF_PAGE
;
311 bnad_rxq_cleanup_page(struct bnad
*bnad
, struct bnad_rx_unmap
*unmap
)
316 dma_unmap_page(&bnad
->pcidev
->dev
,
317 dma_unmap_addr(&unmap
->vector
, dma_addr
),
318 unmap
->vector
.len
, DMA_FROM_DEVICE
);
319 put_page(unmap
->page
);
321 dma_unmap_addr_set(&unmap
->vector
, dma_addr
, 0);
322 unmap
->vector
.len
= 0;
326 bnad_rxq_cleanup_skb(struct bnad
*bnad
, struct bnad_rx_unmap
*unmap
)
331 dma_unmap_single(&bnad
->pcidev
->dev
,
332 dma_unmap_addr(&unmap
->vector
, dma_addr
),
333 unmap
->vector
.len
, DMA_FROM_DEVICE
);
334 dev_kfree_skb_any(unmap
->skb
);
336 dma_unmap_addr_set(&unmap
->vector
, dma_addr
, 0);
337 unmap
->vector
.len
= 0;
341 bnad_rxq_cleanup(struct bnad
*bnad
, struct bna_rcb
*rcb
)
343 struct bnad_rx_unmap_q
*unmap_q
= rcb
->unmap_q
;
346 for (i
= 0; i
< rcb
->q_depth
; i
++) {
347 struct bnad_rx_unmap
*unmap
= &unmap_q
->unmap
[i
];
349 if (BNAD_RXBUF_IS_PAGE(unmap_q
->type
))
350 bnad_rxq_cleanup_page(bnad
, unmap
);
352 bnad_rxq_cleanup_skb(bnad
, unmap
);
354 bnad_rxq_alloc_uninit(bnad
, rcb
);
358 bnad_rxq_refill_page(struct bnad
*bnad
, struct bna_rcb
*rcb
, u32 nalloc
)
360 u32 alloced
, prod
, q_depth
;
361 struct bnad_rx_unmap_q
*unmap_q
= rcb
->unmap_q
;
362 struct bnad_rx_unmap
*unmap
, *prev
;
363 struct bna_rxq_entry
*rxent
;
365 u32 page_offset
, alloc_size
;
368 prod
= rcb
->producer_index
;
369 q_depth
= rcb
->q_depth
;
371 alloc_size
= PAGE_SIZE
<< unmap_q
->alloc_order
;
375 unmap
= &unmap_q
->unmap
[prod
];
377 if (unmap_q
->reuse_pi
< 0) {
378 page
= alloc_pages(GFP_ATOMIC
| __GFP_COMP
,
379 unmap_q
->alloc_order
);
382 prev
= &unmap_q
->unmap
[unmap_q
->reuse_pi
];
384 page_offset
= prev
->page_offset
+ unmap_q
->map_size
;
388 if (unlikely(!page
)) {
389 BNAD_UPDATE_CTR(bnad
, rxbuf_alloc_failed
);
390 rcb
->rxq
->rxbuf_alloc_failed
++;
394 dma_addr
= dma_map_page(&bnad
->pcidev
->dev
, page
, page_offset
,
395 unmap_q
->map_size
, DMA_FROM_DEVICE
);
398 unmap
->page_offset
= page_offset
;
399 dma_unmap_addr_set(&unmap
->vector
, dma_addr
, dma_addr
);
400 unmap
->vector
.len
= unmap_q
->map_size
;
401 page_offset
+= unmap_q
->map_size
;
403 if (page_offset
< alloc_size
)
404 unmap_q
->reuse_pi
= prod
;
406 unmap_q
->reuse_pi
= -1;
408 rxent
= &((struct bna_rxq_entry
*)rcb
->sw_q
)[prod
];
409 BNA_SET_DMA_ADDR(dma_addr
, &rxent
->host_addr
);
410 BNA_QE_INDX_INC(prod
, q_depth
);
415 if (likely(alloced
)) {
416 rcb
->producer_index
= prod
;
418 if (likely(test_bit(BNAD_RXQ_POST_OK
, &rcb
->flags
)))
419 bna_rxq_prod_indx_doorbell(rcb
);
426 bnad_rxq_refill_skb(struct bnad
*bnad
, struct bna_rcb
*rcb
, u32 nalloc
)
428 u32 alloced
, prod
, q_depth
, buff_sz
;
429 struct bnad_rx_unmap_q
*unmap_q
= rcb
->unmap_q
;
430 struct bnad_rx_unmap
*unmap
;
431 struct bna_rxq_entry
*rxent
;
435 buff_sz
= rcb
->rxq
->buffer_size
;
436 prod
= rcb
->producer_index
;
437 q_depth
= rcb
->q_depth
;
441 unmap
= &unmap_q
->unmap
[prod
];
443 skb
= netdev_alloc_skb_ip_align(bnad
->netdev
, buff_sz
);
445 if (unlikely(!skb
)) {
446 BNAD_UPDATE_CTR(bnad
, rxbuf_alloc_failed
);
447 rcb
->rxq
->rxbuf_alloc_failed
++;
450 dma_addr
= dma_map_single(&bnad
->pcidev
->dev
, skb
->data
,
451 buff_sz
, DMA_FROM_DEVICE
);
454 dma_unmap_addr_set(&unmap
->vector
, dma_addr
, dma_addr
);
455 unmap
->vector
.len
= buff_sz
;
457 rxent
= &((struct bna_rxq_entry
*)rcb
->sw_q
)[prod
];
458 BNA_SET_DMA_ADDR(dma_addr
, &rxent
->host_addr
);
459 BNA_QE_INDX_INC(prod
, q_depth
);
464 if (likely(alloced
)) {
465 rcb
->producer_index
= prod
;
467 if (likely(test_bit(BNAD_RXQ_POST_OK
, &rcb
->flags
)))
468 bna_rxq_prod_indx_doorbell(rcb
);
475 bnad_rxq_post(struct bnad
*bnad
, struct bna_rcb
*rcb
)
477 struct bnad_rx_unmap_q
*unmap_q
= rcb
->unmap_q
;
480 to_alloc
= BNA_QE_FREE_CNT(rcb
, rcb
->q_depth
);
481 if (!(to_alloc
>> BNAD_RXQ_REFILL_THRESHOLD_SHIFT
))
484 if (BNAD_RXBUF_IS_PAGE(unmap_q
->type
))
485 bnad_rxq_refill_page(bnad
, rcb
, to_alloc
);
487 bnad_rxq_refill_skb(bnad
, rcb
, to_alloc
);
490 #define flags_cksum_prot_mask (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
492 BNA_CQ_EF_TCP | BNA_CQ_EF_UDP | \
493 BNA_CQ_EF_L4_CKSUM_OK)
495 #define flags_tcp4 (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
496 BNA_CQ_EF_TCP | BNA_CQ_EF_L4_CKSUM_OK)
497 #define flags_tcp6 (BNA_CQ_EF_IPV6 | \
498 BNA_CQ_EF_TCP | BNA_CQ_EF_L4_CKSUM_OK)
499 #define flags_udp4 (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
500 BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK)
501 #define flags_udp6 (BNA_CQ_EF_IPV6 | \
502 BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK)
504 static inline struct sk_buff
*
505 bnad_cq_prepare_skb(struct bnad_rx_ctrl
*rx_ctrl
,
506 struct bnad_rx_unmap_q
*unmap_q
,
507 struct bnad_rx_unmap
*unmap
,
508 u32 length
, u32 flags
)
510 struct bnad
*bnad
= rx_ctrl
->bnad
;
513 if (BNAD_RXBUF_IS_PAGE(unmap_q
->type
)) {
514 skb
= napi_get_frags(&rx_ctrl
->napi
);
518 dma_unmap_page(&bnad
->pcidev
->dev
,
519 dma_unmap_addr(&unmap
->vector
, dma_addr
),
520 unmap
->vector
.len
, DMA_FROM_DEVICE
);
521 skb_fill_page_desc(skb
, skb_shinfo(skb
)->nr_frags
,
522 unmap
->page
, unmap
->page_offset
, length
);
524 skb
->data_len
+= length
;
525 skb
->truesize
+= length
;
528 unmap
->vector
.len
= 0;
536 dma_unmap_single(&bnad
->pcidev
->dev
,
537 dma_unmap_addr(&unmap
->vector
, dma_addr
),
538 unmap
->vector
.len
, DMA_FROM_DEVICE
);
540 skb_put(skb
, length
);
542 skb
->protocol
= eth_type_trans(skb
, bnad
->netdev
);
545 unmap
->vector
.len
= 0;
550 bnad_cq_process(struct bnad
*bnad
, struct bna_ccb
*ccb
, int budget
)
552 struct bna_cq_entry
*cq
, *cmpl
;
553 struct bna_rcb
*rcb
= NULL
;
554 struct bnad_rx_unmap_q
*unmap_q
;
555 struct bnad_rx_unmap
*unmap
;
557 struct bna_pkt_rate
*pkt_rt
= &ccb
->pkt_rate
;
558 struct bnad_rx_ctrl
*rx_ctrl
= ccb
->ctrl
;
559 u32 packets
= 0, length
= 0, flags
, masked_flags
;
561 prefetch(bnad
->netdev
);
564 cmpl
= &cq
[ccb
->producer_index
];
566 while (cmpl
->valid
&& (packets
< budget
)) {
568 flags
= ntohl(cmpl
->flags
);
569 length
= ntohs(cmpl
->length
);
570 BNA_UPDATE_PKT_CNT(pkt_rt
, ntohs(cmpl
->length
));
572 if (bna_is_small_rxq(cmpl
->rxq_id
))
577 unmap_q
= rcb
->unmap_q
;
578 unmap
= &unmap_q
->unmap
[rcb
->consumer_index
];
580 if (unlikely(flags
& (BNA_CQ_EF_MAC_ERROR
|
581 BNA_CQ_EF_FCS_ERROR
|
582 BNA_CQ_EF_TOO_LONG
))) {
583 if (BNAD_RXBUF_IS_PAGE(unmap_q
->type
))
584 bnad_rxq_cleanup_page(bnad
, unmap
);
586 bnad_rxq_cleanup_skb(bnad
, unmap
);
588 rcb
->rxq
->rx_packets_with_error
++;
592 skb
= bnad_cq_prepare_skb(ccb
->ctrl
, unmap_q
, unmap
,
598 masked_flags
= flags
& flags_cksum_prot_mask
;
601 ((bnad
->netdev
->features
& NETIF_F_RXCSUM
) &&
602 ((masked_flags
== flags_tcp4
) ||
603 (masked_flags
== flags_udp4
) ||
604 (masked_flags
== flags_tcp6
) ||
605 (masked_flags
== flags_udp6
))))
606 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
608 skb_checksum_none_assert(skb
);
610 rcb
->rxq
->rx_packets
++;
611 rcb
->rxq
->rx_bytes
+= length
;
613 if (flags
& BNA_CQ_EF_VLAN
)
614 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
), ntohs(cmpl
->vlan_tag
));
616 if (BNAD_RXBUF_IS_PAGE(unmap_q
->type
))
617 napi_gro_frags(&rx_ctrl
->napi
);
619 netif_receive_skb(skb
);
623 BNA_QE_INDX_INC(rcb
->consumer_index
, rcb
->q_depth
);
624 BNA_QE_INDX_INC(ccb
->producer_index
, ccb
->q_depth
);
625 cmpl
= &cq
[ccb
->producer_index
];
628 napi_gro_flush(&rx_ctrl
->napi
, false);
629 if (likely(test_bit(BNAD_RXQ_STARTED
, &ccb
->rcb
[0]->flags
)))
630 bna_ib_ack_disable_irq(ccb
->i_dbell
, packets
);
632 bnad_rxq_post(bnad
, ccb
->rcb
[0]);
634 bnad_rxq_post(bnad
, ccb
->rcb
[1]);
640 bnad_netif_rx_schedule_poll(struct bnad
*bnad
, struct bna_ccb
*ccb
)
642 struct bnad_rx_ctrl
*rx_ctrl
= (struct bnad_rx_ctrl
*)(ccb
->ctrl
);
643 struct napi_struct
*napi
= &rx_ctrl
->napi
;
645 if (likely(napi_schedule_prep(napi
))) {
646 __napi_schedule(napi
);
647 rx_ctrl
->rx_schedule
++;
651 /* MSIX Rx Path Handler */
653 bnad_msix_rx(int irq
, void *data
)
655 struct bna_ccb
*ccb
= (struct bna_ccb
*)data
;
658 ((struct bnad_rx_ctrl
*)(ccb
->ctrl
))->rx_intr_ctr
++;
659 bnad_netif_rx_schedule_poll(ccb
->bnad
, ccb
);
665 /* Interrupt handlers */
667 /* Mbox Interrupt Handlers */
669 bnad_msix_mbox_handler(int irq
, void *data
)
673 struct bnad
*bnad
= (struct bnad
*)data
;
675 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
676 if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED
, &bnad
->run_flags
))) {
677 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
681 bna_intr_status_get(&bnad
->bna
, intr_status
);
683 if (BNA_IS_MBOX_ERR_INTR(&bnad
->bna
, intr_status
))
684 bna_mbox_handler(&bnad
->bna
, intr_status
);
686 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
692 bnad_isr(int irq
, void *data
)
697 struct bnad
*bnad
= (struct bnad
*)data
;
698 struct bnad_rx_info
*rx_info
;
699 struct bnad_rx_ctrl
*rx_ctrl
;
700 struct bna_tcb
*tcb
= NULL
;
702 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
703 if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED
, &bnad
->run_flags
))) {
704 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
708 bna_intr_status_get(&bnad
->bna
, intr_status
);
710 if (unlikely(!intr_status
)) {
711 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
715 if (BNA_IS_MBOX_ERR_INTR(&bnad
->bna
, intr_status
))
716 bna_mbox_handler(&bnad
->bna
, intr_status
);
718 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
720 if (!BNA_IS_INTX_DATA_INTR(intr_status
))
723 /* Process data interrupts */
725 for (i
= 0; i
< bnad
->num_tx
; i
++) {
726 for (j
= 0; j
< bnad
->num_txq_per_tx
; j
++) {
727 tcb
= bnad
->tx_info
[i
].tcb
[j
];
728 if (tcb
&& test_bit(BNAD_TXQ_TX_STARTED
, &tcb
->flags
))
729 bnad_tx_complete(bnad
, bnad
->tx_info
[i
].tcb
[j
]);
733 for (i
= 0; i
< bnad
->num_rx
; i
++) {
734 rx_info
= &bnad
->rx_info
[i
];
737 for (j
= 0; j
< bnad
->num_rxp_per_rx
; j
++) {
738 rx_ctrl
= &rx_info
->rx_ctrl
[j
];
740 bnad_netif_rx_schedule_poll(bnad
,
748 * Called in interrupt / callback context
749 * with bna_lock held, so cfg_flags access is OK
752 bnad_enable_mbox_irq(struct bnad
*bnad
)
754 clear_bit(BNAD_RF_MBOX_IRQ_DISABLED
, &bnad
->run_flags
);
756 BNAD_UPDATE_CTR(bnad
, mbox_intr_enabled
);
760 * Called with bnad->bna_lock held b'cos of
761 * bnad->cfg_flags access.
764 bnad_disable_mbox_irq(struct bnad
*bnad
)
766 set_bit(BNAD_RF_MBOX_IRQ_DISABLED
, &bnad
->run_flags
);
768 BNAD_UPDATE_CTR(bnad
, mbox_intr_disabled
);
772 bnad_set_netdev_perm_addr(struct bnad
*bnad
)
774 struct net_device
*netdev
= bnad
->netdev
;
776 memcpy(netdev
->perm_addr
, &bnad
->perm_addr
, netdev
->addr_len
);
777 if (is_zero_ether_addr(netdev
->dev_addr
))
778 memcpy(netdev
->dev_addr
, &bnad
->perm_addr
, netdev
->addr_len
);
781 /* Control Path Handlers */
785 bnad_cb_mbox_intr_enable(struct bnad
*bnad
)
787 bnad_enable_mbox_irq(bnad
);
791 bnad_cb_mbox_intr_disable(struct bnad
*bnad
)
793 bnad_disable_mbox_irq(bnad
);
797 bnad_cb_ioceth_ready(struct bnad
*bnad
)
799 bnad
->bnad_completions
.ioc_comp_status
= BNA_CB_SUCCESS
;
800 complete(&bnad
->bnad_completions
.ioc_comp
);
804 bnad_cb_ioceth_failed(struct bnad
*bnad
)
806 bnad
->bnad_completions
.ioc_comp_status
= BNA_CB_FAIL
;
807 complete(&bnad
->bnad_completions
.ioc_comp
);
811 bnad_cb_ioceth_disabled(struct bnad
*bnad
)
813 bnad
->bnad_completions
.ioc_comp_status
= BNA_CB_SUCCESS
;
814 complete(&bnad
->bnad_completions
.ioc_comp
);
818 bnad_cb_enet_disabled(void *arg
)
820 struct bnad
*bnad
= (struct bnad
*)arg
;
822 netif_carrier_off(bnad
->netdev
);
823 complete(&bnad
->bnad_completions
.enet_comp
);
827 bnad_cb_ethport_link_status(struct bnad
*bnad
,
828 enum bna_link_status link_status
)
830 bool link_up
= false;
832 link_up
= (link_status
== BNA_LINK_UP
) || (link_status
== BNA_CEE_UP
);
834 if (link_status
== BNA_CEE_UP
) {
835 if (!test_bit(BNAD_RF_CEE_RUNNING
, &bnad
->run_flags
))
836 BNAD_UPDATE_CTR(bnad
, cee_toggle
);
837 set_bit(BNAD_RF_CEE_RUNNING
, &bnad
->run_flags
);
839 if (test_bit(BNAD_RF_CEE_RUNNING
, &bnad
->run_flags
))
840 BNAD_UPDATE_CTR(bnad
, cee_toggle
);
841 clear_bit(BNAD_RF_CEE_RUNNING
, &bnad
->run_flags
);
845 if (!netif_carrier_ok(bnad
->netdev
)) {
847 printk(KERN_WARNING
"bna: %s link up\n",
849 netif_carrier_on(bnad
->netdev
);
850 BNAD_UPDATE_CTR(bnad
, link_toggle
);
851 for (tx_id
= 0; tx_id
< bnad
->num_tx
; tx_id
++) {
852 for (tcb_id
= 0; tcb_id
< bnad
->num_txq_per_tx
;
854 struct bna_tcb
*tcb
=
855 bnad
->tx_info
[tx_id
].tcb
[tcb_id
];
862 if (test_bit(BNAD_TXQ_TX_STARTED
,
866 * Transmit Schedule */
867 printk(KERN_INFO
"bna: %s %d "
874 BNAD_UPDATE_CTR(bnad
,
880 BNAD_UPDATE_CTR(bnad
,
887 if (netif_carrier_ok(bnad
->netdev
)) {
888 printk(KERN_WARNING
"bna: %s link down\n",
890 netif_carrier_off(bnad
->netdev
);
891 BNAD_UPDATE_CTR(bnad
, link_toggle
);
897 bnad_cb_tx_disabled(void *arg
, struct bna_tx
*tx
)
899 struct bnad
*bnad
= (struct bnad
*)arg
;
901 complete(&bnad
->bnad_completions
.tx_comp
);
905 bnad_cb_tcb_setup(struct bnad
*bnad
, struct bna_tcb
*tcb
)
907 struct bnad_tx_info
*tx_info
=
908 (struct bnad_tx_info
*)tcb
->txq
->tx
->priv
;
911 tx_info
->tcb
[tcb
->id
] = tcb
;
915 bnad_cb_tcb_destroy(struct bnad
*bnad
, struct bna_tcb
*tcb
)
917 struct bnad_tx_info
*tx_info
=
918 (struct bnad_tx_info
*)tcb
->txq
->tx
->priv
;
920 tx_info
->tcb
[tcb
->id
] = NULL
;
925 bnad_cb_ccb_setup(struct bnad
*bnad
, struct bna_ccb
*ccb
)
927 struct bnad_rx_info
*rx_info
=
928 (struct bnad_rx_info
*)ccb
->cq
->rx
->priv
;
930 rx_info
->rx_ctrl
[ccb
->id
].ccb
= ccb
;
931 ccb
->ctrl
= &rx_info
->rx_ctrl
[ccb
->id
];
935 bnad_cb_ccb_destroy(struct bnad
*bnad
, struct bna_ccb
*ccb
)
937 struct bnad_rx_info
*rx_info
=
938 (struct bnad_rx_info
*)ccb
->cq
->rx
->priv
;
940 rx_info
->rx_ctrl
[ccb
->id
].ccb
= NULL
;
944 bnad_cb_tx_stall(struct bnad
*bnad
, struct bna_tx
*tx
)
946 struct bnad_tx_info
*tx_info
=
947 (struct bnad_tx_info
*)tx
->priv
;
952 for (i
= 0; i
< BNAD_MAX_TXQ_PER_TX
; i
++) {
953 tcb
= tx_info
->tcb
[i
];
957 clear_bit(BNAD_TXQ_TX_STARTED
, &tcb
->flags
);
958 netif_stop_subqueue(bnad
->netdev
, txq_id
);
959 printk(KERN_INFO
"bna: %s %d TXQ_STOPPED\n",
960 bnad
->netdev
->name
, txq_id
);
965 bnad_cb_tx_resume(struct bnad
*bnad
, struct bna_tx
*tx
)
967 struct bnad_tx_info
*tx_info
= (struct bnad_tx_info
*)tx
->priv
;
972 for (i
= 0; i
< BNAD_MAX_TXQ_PER_TX
; i
++) {
973 tcb
= tx_info
->tcb
[i
];
978 BUG_ON(test_bit(BNAD_TXQ_TX_STARTED
, &tcb
->flags
));
979 set_bit(BNAD_TXQ_TX_STARTED
, &tcb
->flags
);
980 BUG_ON(*(tcb
->hw_consumer_index
) != 0);
982 if (netif_carrier_ok(bnad
->netdev
)) {
983 printk(KERN_INFO
"bna: %s %d TXQ_STARTED\n",
984 bnad
->netdev
->name
, txq_id
);
985 netif_wake_subqueue(bnad
->netdev
, txq_id
);
986 BNAD_UPDATE_CTR(bnad
, netif_queue_wakeup
);
991 * Workaround for first ioceth enable failure & we
992 * get a 0 MAC address. We try to get the MAC address
995 if (is_zero_ether_addr(&bnad
->perm_addr
.mac
[0])) {
996 bna_enet_perm_mac_get(&bnad
->bna
.enet
, &bnad
->perm_addr
);
997 bnad_set_netdev_perm_addr(bnad
);
1002 * Free all TxQs buffers and then notify TX_E_CLEANUP_DONE to Tx fsm.
1005 bnad_tx_cleanup(struct delayed_work
*work
)
1007 struct bnad_tx_info
*tx_info
=
1008 container_of(work
, struct bnad_tx_info
, tx_cleanup_work
);
1009 struct bnad
*bnad
= NULL
;
1010 struct bna_tcb
*tcb
;
1011 unsigned long flags
;
1014 for (i
= 0; i
< BNAD_MAX_TXQ_PER_TX
; i
++) {
1015 tcb
= tx_info
->tcb
[i
];
1021 if (test_and_set_bit(BNAD_TXQ_FREE_SENT
, &tcb
->flags
)) {
1026 bnad_txq_cleanup(bnad
, tcb
);
1028 smp_mb__before_clear_bit();
1029 clear_bit(BNAD_TXQ_FREE_SENT
, &tcb
->flags
);
1033 queue_delayed_work(bnad
->work_q
, &tx_info
->tx_cleanup_work
,
1034 msecs_to_jiffies(1));
1038 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1039 bna_tx_cleanup_complete(tx_info
->tx
);
1040 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1044 bnad_cb_tx_cleanup(struct bnad
*bnad
, struct bna_tx
*tx
)
1046 struct bnad_tx_info
*tx_info
= (struct bnad_tx_info
*)tx
->priv
;
1047 struct bna_tcb
*tcb
;
1050 for (i
= 0; i
< BNAD_MAX_TXQ_PER_TX
; i
++) {
1051 tcb
= tx_info
->tcb
[i
];
1056 queue_delayed_work(bnad
->work_q
, &tx_info
->tx_cleanup_work
, 0);
1060 bnad_cb_rx_stall(struct bnad
*bnad
, struct bna_rx
*rx
)
1062 struct bnad_rx_info
*rx_info
= (struct bnad_rx_info
*)rx
->priv
;
1063 struct bna_ccb
*ccb
;
1064 struct bnad_rx_ctrl
*rx_ctrl
;
1067 for (i
= 0; i
< BNAD_MAX_RXP_PER_RX
; i
++) {
1068 rx_ctrl
= &rx_info
->rx_ctrl
[i
];
1073 clear_bit(BNAD_RXQ_POST_OK
, &ccb
->rcb
[0]->flags
);
1076 clear_bit(BNAD_RXQ_POST_OK
, &ccb
->rcb
[1]->flags
);
1081 * Free all RxQs buffers and then notify RX_E_CLEANUP_DONE to Rx fsm.
1084 bnad_rx_cleanup(void *work
)
1086 struct bnad_rx_info
*rx_info
=
1087 container_of(work
, struct bnad_rx_info
, rx_cleanup_work
);
1088 struct bnad_rx_ctrl
*rx_ctrl
;
1089 struct bnad
*bnad
= NULL
;
1090 unsigned long flags
;
1093 for (i
= 0; i
< BNAD_MAX_RXP_PER_RX
; i
++) {
1094 rx_ctrl
= &rx_info
->rx_ctrl
[i
];
1099 bnad
= rx_ctrl
->ccb
->bnad
;
1102 * Wait till the poll handler has exited
1103 * and nothing can be scheduled anymore
1105 napi_disable(&rx_ctrl
->napi
);
1107 bnad_cq_cleanup(bnad
, rx_ctrl
->ccb
);
1108 bnad_rxq_cleanup(bnad
, rx_ctrl
->ccb
->rcb
[0]);
1109 if (rx_ctrl
->ccb
->rcb
[1])
1110 bnad_rxq_cleanup(bnad
, rx_ctrl
->ccb
->rcb
[1]);
1113 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1114 bna_rx_cleanup_complete(rx_info
->rx
);
1115 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1119 bnad_cb_rx_cleanup(struct bnad
*bnad
, struct bna_rx
*rx
)
1121 struct bnad_rx_info
*rx_info
= (struct bnad_rx_info
*)rx
->priv
;
1122 struct bna_ccb
*ccb
;
1123 struct bnad_rx_ctrl
*rx_ctrl
;
1126 for (i
= 0; i
< BNAD_MAX_RXP_PER_RX
; i
++) {
1127 rx_ctrl
= &rx_info
->rx_ctrl
[i
];
1132 clear_bit(BNAD_RXQ_STARTED
, &ccb
->rcb
[0]->flags
);
1135 clear_bit(BNAD_RXQ_STARTED
, &ccb
->rcb
[1]->flags
);
1138 queue_work(bnad
->work_q
, &rx_info
->rx_cleanup_work
);
1142 bnad_cb_rx_post(struct bnad
*bnad
, struct bna_rx
*rx
)
1144 struct bnad_rx_info
*rx_info
= (struct bnad_rx_info
*)rx
->priv
;
1145 struct bna_ccb
*ccb
;
1146 struct bna_rcb
*rcb
;
1147 struct bnad_rx_ctrl
*rx_ctrl
;
1150 for (i
= 0; i
< BNAD_MAX_RXP_PER_RX
; i
++) {
1151 rx_ctrl
= &rx_info
->rx_ctrl
[i
];
1156 napi_enable(&rx_ctrl
->napi
);
1158 for (j
= 0; j
< BNAD_MAX_RXQ_PER_RXP
; j
++) {
1163 bnad_rxq_alloc_init(bnad
, rcb
);
1164 set_bit(BNAD_RXQ_STARTED
, &rcb
->flags
);
1165 set_bit(BNAD_RXQ_POST_OK
, &rcb
->flags
);
1166 bnad_rxq_post(bnad
, rcb
);
1172 bnad_cb_rx_disabled(void *arg
, struct bna_rx
*rx
)
1174 struct bnad
*bnad
= (struct bnad
*)arg
;
1176 complete(&bnad
->bnad_completions
.rx_comp
);
1180 bnad_cb_rx_mcast_add(struct bnad
*bnad
, struct bna_rx
*rx
)
1182 bnad
->bnad_completions
.mcast_comp_status
= BNA_CB_SUCCESS
;
1183 complete(&bnad
->bnad_completions
.mcast_comp
);
1187 bnad_cb_stats_get(struct bnad
*bnad
, enum bna_cb_status status
,
1188 struct bna_stats
*stats
)
1190 if (status
== BNA_CB_SUCCESS
)
1191 BNAD_UPDATE_CTR(bnad
, hw_stats_updates
);
1193 if (!netif_running(bnad
->netdev
) ||
1194 !test_bit(BNAD_RF_STATS_TIMER_RUNNING
, &bnad
->run_flags
))
1197 mod_timer(&bnad
->stats_timer
,
1198 jiffies
+ msecs_to_jiffies(BNAD_STATS_TIMER_FREQ
));
1202 bnad_cb_enet_mtu_set(struct bnad
*bnad
)
1204 bnad
->bnad_completions
.mtu_comp_status
= BNA_CB_SUCCESS
;
1205 complete(&bnad
->bnad_completions
.mtu_comp
);
1209 bnad_cb_completion(void *arg
, enum bfa_status status
)
1211 struct bnad_iocmd_comp
*iocmd_comp
=
1212 (struct bnad_iocmd_comp
*)arg
;
1214 iocmd_comp
->comp_status
= (u32
) status
;
1215 complete(&iocmd_comp
->comp
);
1218 /* Resource allocation, free functions */
1221 bnad_mem_free(struct bnad
*bnad
,
1222 struct bna_mem_info
*mem_info
)
1227 if (mem_info
->mdl
== NULL
)
1230 for (i
= 0; i
< mem_info
->num
; i
++) {
1231 if (mem_info
->mdl
[i
].kva
!= NULL
) {
1232 if (mem_info
->mem_type
== BNA_MEM_T_DMA
) {
1233 BNA_GET_DMA_ADDR(&(mem_info
->mdl
[i
].dma
),
1235 dma_free_coherent(&bnad
->pcidev
->dev
,
1236 mem_info
->mdl
[i
].len
,
1237 mem_info
->mdl
[i
].kva
, dma_pa
);
1239 kfree(mem_info
->mdl
[i
].kva
);
1242 kfree(mem_info
->mdl
);
1243 mem_info
->mdl
= NULL
;
1247 bnad_mem_alloc(struct bnad
*bnad
,
1248 struct bna_mem_info
*mem_info
)
1253 if ((mem_info
->num
== 0) || (mem_info
->len
== 0)) {
1254 mem_info
->mdl
= NULL
;
1258 mem_info
->mdl
= kcalloc(mem_info
->num
, sizeof(struct bna_mem_descr
),
1260 if (mem_info
->mdl
== NULL
)
1263 if (mem_info
->mem_type
== BNA_MEM_T_DMA
) {
1264 for (i
= 0; i
< mem_info
->num
; i
++) {
1265 mem_info
->mdl
[i
].len
= mem_info
->len
;
1266 mem_info
->mdl
[i
].kva
=
1267 dma_alloc_coherent(&bnad
->pcidev
->dev
,
1268 mem_info
->len
, &dma_pa
,
1270 if (mem_info
->mdl
[i
].kva
== NULL
)
1273 BNA_SET_DMA_ADDR(dma_pa
,
1274 &(mem_info
->mdl
[i
].dma
));
1277 for (i
= 0; i
< mem_info
->num
; i
++) {
1278 mem_info
->mdl
[i
].len
= mem_info
->len
;
1279 mem_info
->mdl
[i
].kva
= kzalloc(mem_info
->len
,
1281 if (mem_info
->mdl
[i
].kva
== NULL
)
1289 bnad_mem_free(bnad
, mem_info
);
1293 /* Free IRQ for Mailbox */
1295 bnad_mbox_irq_free(struct bnad
*bnad
)
1298 unsigned long flags
;
1300 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1301 bnad_disable_mbox_irq(bnad
);
1302 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1304 irq
= BNAD_GET_MBOX_IRQ(bnad
);
1305 free_irq(irq
, bnad
);
1309 * Allocates IRQ for Mailbox, but keep it disabled
1310 * This will be enabled once we get the mbox enable callback
1314 bnad_mbox_irq_alloc(struct bnad
*bnad
)
1317 unsigned long irq_flags
, flags
;
1319 irq_handler_t irq_handler
;
1321 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1322 if (bnad
->cfg_flags
& BNAD_CF_MSIX
) {
1323 irq_handler
= (irq_handler_t
)bnad_msix_mbox_handler
;
1324 irq
= bnad
->msix_table
[BNAD_MAILBOX_MSIX_INDEX
].vector
;
1327 irq_handler
= (irq_handler_t
)bnad_isr
;
1328 irq
= bnad
->pcidev
->irq
;
1329 irq_flags
= IRQF_SHARED
;
1332 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1333 sprintf(bnad
->mbox_irq_name
, "%s", BNAD_NAME
);
1336 * Set the Mbox IRQ disable flag, so that the IRQ handler
1337 * called from request_irq() for SHARED IRQs do not execute
1339 set_bit(BNAD_RF_MBOX_IRQ_DISABLED
, &bnad
->run_flags
);
1341 BNAD_UPDATE_CTR(bnad
, mbox_intr_disabled
);
1343 err
= request_irq(irq
, irq_handler
, irq_flags
,
1344 bnad
->mbox_irq_name
, bnad
);
1350 bnad_txrx_irq_free(struct bnad
*bnad
, struct bna_intr_info
*intr_info
)
1352 kfree(intr_info
->idl
);
1353 intr_info
->idl
= NULL
;
1356 /* Allocates Interrupt Descriptor List for MSIX/INT-X vectors */
1358 bnad_txrx_irq_alloc(struct bnad
*bnad
, enum bnad_intr_source src
,
1359 u32 txrx_id
, struct bna_intr_info
*intr_info
)
1361 int i
, vector_start
= 0;
1363 unsigned long flags
;
1365 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1366 cfg_flags
= bnad
->cfg_flags
;
1367 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1369 if (cfg_flags
& BNAD_CF_MSIX
) {
1370 intr_info
->intr_type
= BNA_INTR_T_MSIX
;
1371 intr_info
->idl
= kcalloc(intr_info
->num
,
1372 sizeof(struct bna_intr_descr
),
1374 if (!intr_info
->idl
)
1379 vector_start
= BNAD_MAILBOX_MSIX_VECTORS
+ txrx_id
;
1383 vector_start
= BNAD_MAILBOX_MSIX_VECTORS
+
1384 (bnad
->num_tx
* bnad
->num_txq_per_tx
) +
1392 for (i
= 0; i
< intr_info
->num
; i
++)
1393 intr_info
->idl
[i
].vector
= vector_start
+ i
;
1395 intr_info
->intr_type
= BNA_INTR_T_INTX
;
1397 intr_info
->idl
= kcalloc(intr_info
->num
,
1398 sizeof(struct bna_intr_descr
),
1400 if (!intr_info
->idl
)
1405 intr_info
->idl
[0].vector
= BNAD_INTX_TX_IB_BITMASK
;
1409 intr_info
->idl
[0].vector
= BNAD_INTX_RX_IB_BITMASK
;
1416 /* NOTE: Should be called for MSIX only
1417 * Unregisters Tx MSIX vector(s) from the kernel
1420 bnad_tx_msix_unregister(struct bnad
*bnad
, struct bnad_tx_info
*tx_info
,
1426 for (i
= 0; i
< num_txqs
; i
++) {
1427 if (tx_info
->tcb
[i
] == NULL
)
1430 vector_num
= tx_info
->tcb
[i
]->intr_vector
;
1431 free_irq(bnad
->msix_table
[vector_num
].vector
, tx_info
->tcb
[i
]);
1435 /* NOTE: Should be called for MSIX only
1436 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1439 bnad_tx_msix_register(struct bnad
*bnad
, struct bnad_tx_info
*tx_info
,
1440 u32 tx_id
, int num_txqs
)
1446 for (i
= 0; i
< num_txqs
; i
++) {
1447 vector_num
= tx_info
->tcb
[i
]->intr_vector
;
1448 sprintf(tx_info
->tcb
[i
]->name
, "%s TXQ %d", bnad
->netdev
->name
,
1449 tx_id
+ tx_info
->tcb
[i
]->id
);
1450 err
= request_irq(bnad
->msix_table
[vector_num
].vector
,
1451 (irq_handler_t
)bnad_msix_tx
, 0,
1452 tx_info
->tcb
[i
]->name
,
1462 bnad_tx_msix_unregister(bnad
, tx_info
, (i
- 1));
1466 /* NOTE: Should be called for MSIX only
1467 * Unregisters Rx MSIX vector(s) from the kernel
1470 bnad_rx_msix_unregister(struct bnad
*bnad
, struct bnad_rx_info
*rx_info
,
1476 for (i
= 0; i
< num_rxps
; i
++) {
1477 if (rx_info
->rx_ctrl
[i
].ccb
== NULL
)
1480 vector_num
= rx_info
->rx_ctrl
[i
].ccb
->intr_vector
;
1481 free_irq(bnad
->msix_table
[vector_num
].vector
,
1482 rx_info
->rx_ctrl
[i
].ccb
);
1486 /* NOTE: Should be called for MSIX only
1487 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1490 bnad_rx_msix_register(struct bnad
*bnad
, struct bnad_rx_info
*rx_info
,
1491 u32 rx_id
, int num_rxps
)
1497 for (i
= 0; i
< num_rxps
; i
++) {
1498 vector_num
= rx_info
->rx_ctrl
[i
].ccb
->intr_vector
;
1499 sprintf(rx_info
->rx_ctrl
[i
].ccb
->name
, "%s CQ %d",
1501 rx_id
+ rx_info
->rx_ctrl
[i
].ccb
->id
);
1502 err
= request_irq(bnad
->msix_table
[vector_num
].vector
,
1503 (irq_handler_t
)bnad_msix_rx
, 0,
1504 rx_info
->rx_ctrl
[i
].ccb
->name
,
1505 rx_info
->rx_ctrl
[i
].ccb
);
1514 bnad_rx_msix_unregister(bnad
, rx_info
, (i
- 1));
1518 /* Free Tx object Resources */
1520 bnad_tx_res_free(struct bnad
*bnad
, struct bna_res_info
*res_info
)
1524 for (i
= 0; i
< BNA_TX_RES_T_MAX
; i
++) {
1525 if (res_info
[i
].res_type
== BNA_RES_T_MEM
)
1526 bnad_mem_free(bnad
, &res_info
[i
].res_u
.mem_info
);
1527 else if (res_info
[i
].res_type
== BNA_RES_T_INTR
)
1528 bnad_txrx_irq_free(bnad
, &res_info
[i
].res_u
.intr_info
);
1532 /* Allocates memory and interrupt resources for Tx object */
1534 bnad_tx_res_alloc(struct bnad
*bnad
, struct bna_res_info
*res_info
,
1539 for (i
= 0; i
< BNA_TX_RES_T_MAX
; i
++) {
1540 if (res_info
[i
].res_type
== BNA_RES_T_MEM
)
1541 err
= bnad_mem_alloc(bnad
,
1542 &res_info
[i
].res_u
.mem_info
);
1543 else if (res_info
[i
].res_type
== BNA_RES_T_INTR
)
1544 err
= bnad_txrx_irq_alloc(bnad
, BNAD_INTR_TX
, tx_id
,
1545 &res_info
[i
].res_u
.intr_info
);
1552 bnad_tx_res_free(bnad
, res_info
);
1556 /* Free Rx object Resources */
1558 bnad_rx_res_free(struct bnad
*bnad
, struct bna_res_info
*res_info
)
1562 for (i
= 0; i
< BNA_RX_RES_T_MAX
; i
++) {
1563 if (res_info
[i
].res_type
== BNA_RES_T_MEM
)
1564 bnad_mem_free(bnad
, &res_info
[i
].res_u
.mem_info
);
1565 else if (res_info
[i
].res_type
== BNA_RES_T_INTR
)
1566 bnad_txrx_irq_free(bnad
, &res_info
[i
].res_u
.intr_info
);
1570 /* Allocates memory and interrupt resources for Rx object */
1572 bnad_rx_res_alloc(struct bnad
*bnad
, struct bna_res_info
*res_info
,
1577 /* All memory needs to be allocated before setup_ccbs */
1578 for (i
= 0; i
< BNA_RX_RES_T_MAX
; i
++) {
1579 if (res_info
[i
].res_type
== BNA_RES_T_MEM
)
1580 err
= bnad_mem_alloc(bnad
,
1581 &res_info
[i
].res_u
.mem_info
);
1582 else if (res_info
[i
].res_type
== BNA_RES_T_INTR
)
1583 err
= bnad_txrx_irq_alloc(bnad
, BNAD_INTR_RX
, rx_id
,
1584 &res_info
[i
].res_u
.intr_info
);
1591 bnad_rx_res_free(bnad
, res_info
);
1595 /* Timer callbacks */
1598 bnad_ioc_timeout(unsigned long data
)
1600 struct bnad
*bnad
= (struct bnad
*)data
;
1601 unsigned long flags
;
1603 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1604 bfa_nw_ioc_timeout((void *) &bnad
->bna
.ioceth
.ioc
);
1605 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1609 bnad_ioc_hb_check(unsigned long data
)
1611 struct bnad
*bnad
= (struct bnad
*)data
;
1612 unsigned long flags
;
1614 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1615 bfa_nw_ioc_hb_check((void *) &bnad
->bna
.ioceth
.ioc
);
1616 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1620 bnad_iocpf_timeout(unsigned long data
)
1622 struct bnad
*bnad
= (struct bnad
*)data
;
1623 unsigned long flags
;
1625 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1626 bfa_nw_iocpf_timeout((void *) &bnad
->bna
.ioceth
.ioc
);
1627 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1631 bnad_iocpf_sem_timeout(unsigned long data
)
1633 struct bnad
*bnad
= (struct bnad
*)data
;
1634 unsigned long flags
;
1636 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1637 bfa_nw_iocpf_sem_timeout((void *) &bnad
->bna
.ioceth
.ioc
);
1638 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1642 * All timer routines use bnad->bna_lock to protect against
1643 * the following race, which may occur in case of no locking:
1651 /* b) Dynamic Interrupt Moderation Timer */
1653 bnad_dim_timeout(unsigned long data
)
1655 struct bnad
*bnad
= (struct bnad
*)data
;
1656 struct bnad_rx_info
*rx_info
;
1657 struct bnad_rx_ctrl
*rx_ctrl
;
1659 unsigned long flags
;
1661 if (!netif_carrier_ok(bnad
->netdev
))
1664 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1665 for (i
= 0; i
< bnad
->num_rx
; i
++) {
1666 rx_info
= &bnad
->rx_info
[i
];
1669 for (j
= 0; j
< bnad
->num_rxp_per_rx
; j
++) {
1670 rx_ctrl
= &rx_info
->rx_ctrl
[j
];
1673 bna_rx_dim_update(rx_ctrl
->ccb
);
1677 /* Check for BNAD_CF_DIM_ENABLED, does not eleminate a race */
1678 if (test_bit(BNAD_RF_DIM_TIMER_RUNNING
, &bnad
->run_flags
))
1679 mod_timer(&bnad
->dim_timer
,
1680 jiffies
+ msecs_to_jiffies(BNAD_DIM_TIMER_FREQ
));
1681 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1684 /* c) Statistics Timer */
1686 bnad_stats_timeout(unsigned long data
)
1688 struct bnad
*bnad
= (struct bnad
*)data
;
1689 unsigned long flags
;
1691 if (!netif_running(bnad
->netdev
) ||
1692 !test_bit(BNAD_RF_STATS_TIMER_RUNNING
, &bnad
->run_flags
))
1695 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1696 bna_hw_stats_get(&bnad
->bna
);
1697 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1701 * Set up timer for DIM
1702 * Called with bnad->bna_lock held
1705 bnad_dim_timer_start(struct bnad
*bnad
)
1707 if (bnad
->cfg_flags
& BNAD_CF_DIM_ENABLED
&&
1708 !test_bit(BNAD_RF_DIM_TIMER_RUNNING
, &bnad
->run_flags
)) {
1709 setup_timer(&bnad
->dim_timer
, bnad_dim_timeout
,
1710 (unsigned long)bnad
);
1711 set_bit(BNAD_RF_DIM_TIMER_RUNNING
, &bnad
->run_flags
);
1712 mod_timer(&bnad
->dim_timer
,
1713 jiffies
+ msecs_to_jiffies(BNAD_DIM_TIMER_FREQ
));
1718 * Set up timer for statistics
1719 * Called with mutex_lock(&bnad->conf_mutex) held
1722 bnad_stats_timer_start(struct bnad
*bnad
)
1724 unsigned long flags
;
1726 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1727 if (!test_and_set_bit(BNAD_RF_STATS_TIMER_RUNNING
, &bnad
->run_flags
)) {
1728 setup_timer(&bnad
->stats_timer
, bnad_stats_timeout
,
1729 (unsigned long)bnad
);
1730 mod_timer(&bnad
->stats_timer
,
1731 jiffies
+ msecs_to_jiffies(BNAD_STATS_TIMER_FREQ
));
1733 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1737 * Stops the stats timer
1738 * Called with mutex_lock(&bnad->conf_mutex) held
1741 bnad_stats_timer_stop(struct bnad
*bnad
)
1744 unsigned long flags
;
1746 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1747 if (test_and_clear_bit(BNAD_RF_STATS_TIMER_RUNNING
, &bnad
->run_flags
))
1749 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1751 del_timer_sync(&bnad
->stats_timer
);
1757 bnad_netdev_mc_list_get(struct net_device
*netdev
, u8
*mc_list
)
1759 int i
= 1; /* Index 0 has broadcast address */
1760 struct netdev_hw_addr
*mc_addr
;
1762 netdev_for_each_mc_addr(mc_addr
, netdev
) {
1763 memcpy(&mc_list
[i
* ETH_ALEN
], &mc_addr
->addr
[0],
1770 bnad_napi_poll_rx(struct napi_struct
*napi
, int budget
)
1772 struct bnad_rx_ctrl
*rx_ctrl
=
1773 container_of(napi
, struct bnad_rx_ctrl
, napi
);
1774 struct bnad
*bnad
= rx_ctrl
->bnad
;
1777 rx_ctrl
->rx_poll_ctr
++;
1779 if (!netif_carrier_ok(bnad
->netdev
))
1782 rcvd
= bnad_cq_process(bnad
, rx_ctrl
->ccb
, budget
);
1787 napi_complete(napi
);
1789 rx_ctrl
->rx_complete
++;
1792 bnad_enable_rx_irq_unsafe(rx_ctrl
->ccb
);
1797 #define BNAD_NAPI_POLL_QUOTA 64
1799 bnad_napi_add(struct bnad
*bnad
, u32 rx_id
)
1801 struct bnad_rx_ctrl
*rx_ctrl
;
1804 /* Initialize & enable NAPI */
1805 for (i
= 0; i
< bnad
->num_rxp_per_rx
; i
++) {
1806 rx_ctrl
= &bnad
->rx_info
[rx_id
].rx_ctrl
[i
];
1807 netif_napi_add(bnad
->netdev
, &rx_ctrl
->napi
,
1808 bnad_napi_poll_rx
, BNAD_NAPI_POLL_QUOTA
);
1813 bnad_napi_delete(struct bnad
*bnad
, u32 rx_id
)
1817 /* First disable and then clean up */
1818 for (i
= 0; i
< bnad
->num_rxp_per_rx
; i
++)
1819 netif_napi_del(&bnad
->rx_info
[rx_id
].rx_ctrl
[i
].napi
);
1822 /* Should be held with conf_lock held */
1824 bnad_destroy_tx(struct bnad
*bnad
, u32 tx_id
)
1826 struct bnad_tx_info
*tx_info
= &bnad
->tx_info
[tx_id
];
1827 struct bna_res_info
*res_info
= &bnad
->tx_res_info
[tx_id
].res_info
[0];
1828 unsigned long flags
;
1833 init_completion(&bnad
->bnad_completions
.tx_comp
);
1834 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1835 bna_tx_disable(tx_info
->tx
, BNA_HARD_CLEANUP
, bnad_cb_tx_disabled
);
1836 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1837 wait_for_completion(&bnad
->bnad_completions
.tx_comp
);
1839 if (tx_info
->tcb
[0]->intr_type
== BNA_INTR_T_MSIX
)
1840 bnad_tx_msix_unregister(bnad
, tx_info
,
1841 bnad
->num_txq_per_tx
);
1843 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1844 bna_tx_destroy(tx_info
->tx
);
1845 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1850 bnad_tx_res_free(bnad
, res_info
);
1853 /* Should be held with conf_lock held */
1855 bnad_setup_tx(struct bnad
*bnad
, u32 tx_id
)
1858 struct bnad_tx_info
*tx_info
= &bnad
->tx_info
[tx_id
];
1859 struct bna_res_info
*res_info
= &bnad
->tx_res_info
[tx_id
].res_info
[0];
1860 struct bna_intr_info
*intr_info
=
1861 &res_info
[BNA_TX_RES_INTR_T_TXCMPL
].res_u
.intr_info
;
1862 struct bna_tx_config
*tx_config
= &bnad
->tx_config
[tx_id
];
1863 static const struct bna_tx_event_cbfn tx_cbfn
= {
1864 .tcb_setup_cbfn
= bnad_cb_tcb_setup
,
1865 .tcb_destroy_cbfn
= bnad_cb_tcb_destroy
,
1866 .tx_stall_cbfn
= bnad_cb_tx_stall
,
1867 .tx_resume_cbfn
= bnad_cb_tx_resume
,
1868 .tx_cleanup_cbfn
= bnad_cb_tx_cleanup
,
1872 unsigned long flags
;
1874 tx_info
->tx_id
= tx_id
;
1876 /* Initialize the Tx object configuration */
1877 tx_config
->num_txq
= bnad
->num_txq_per_tx
;
1878 tx_config
->txq_depth
= bnad
->txq_depth
;
1879 tx_config
->tx_type
= BNA_TX_T_REGULAR
;
1880 tx_config
->coalescing_timeo
= bnad
->tx_coalescing_timeo
;
1882 /* Get BNA's resource requirement for one tx object */
1883 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1884 bna_tx_res_req(bnad
->num_txq_per_tx
,
1885 bnad
->txq_depth
, res_info
);
1886 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1888 /* Fill Unmap Q memory requirements */
1889 BNAD_FILL_UNMAPQ_MEM_REQ(&res_info
[BNA_TX_RES_MEM_T_UNMAPQ
],
1890 bnad
->num_txq_per_tx
, (sizeof(struct bnad_tx_unmap
) *
1893 /* Allocate resources */
1894 err
= bnad_tx_res_alloc(bnad
, res_info
, tx_id
);
1898 /* Ask BNA to create one Tx object, supplying required resources */
1899 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1900 tx
= bna_tx_create(&bnad
->bna
, bnad
, tx_config
, &tx_cbfn
, res_info
,
1902 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1907 INIT_DELAYED_WORK(&tx_info
->tx_cleanup_work
,
1908 (work_func_t
)bnad_tx_cleanup
);
1910 /* Register ISR for the Tx object */
1911 if (intr_info
->intr_type
== BNA_INTR_T_MSIX
) {
1912 err
= bnad_tx_msix_register(bnad
, tx_info
,
1913 tx_id
, bnad
->num_txq_per_tx
);
1918 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1920 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1925 bnad_tx_res_free(bnad
, res_info
);
1929 /* Setup the rx config for bna_rx_create */
1930 /* bnad decides the configuration */
1932 bnad_init_rx_config(struct bnad
*bnad
, struct bna_rx_config
*rx_config
)
1934 rx_config
->rx_type
= BNA_RX_T_REGULAR
;
1935 rx_config
->num_paths
= bnad
->num_rxp_per_rx
;
1936 rx_config
->coalescing_timeo
= bnad
->rx_coalescing_timeo
;
1938 if (bnad
->num_rxp_per_rx
> 1) {
1939 rx_config
->rss_status
= BNA_STATUS_T_ENABLED
;
1940 rx_config
->rss_config
.hash_type
=
1941 (BFI_ENET_RSS_IPV6
|
1942 BFI_ENET_RSS_IPV6_TCP
|
1944 BFI_ENET_RSS_IPV4_TCP
);
1945 rx_config
->rss_config
.hash_mask
=
1946 bnad
->num_rxp_per_rx
- 1;
1947 get_random_bytes(rx_config
->rss_config
.toeplitz_hash_key
,
1948 sizeof(rx_config
->rss_config
.toeplitz_hash_key
));
1950 rx_config
->rss_status
= BNA_STATUS_T_DISABLED
;
1951 memset(&rx_config
->rss_config
, 0,
1952 sizeof(rx_config
->rss_config
));
1954 rx_config
->rxp_type
= BNA_RXP_SLR
;
1955 rx_config
->q_depth
= bnad
->rxq_depth
;
1957 rx_config
->small_buff_size
= BFI_SMALL_RXBUF_SIZE
;
1959 rx_config
->vlan_strip_status
= BNA_STATUS_T_ENABLED
;
1963 bnad_rx_ctrl_init(struct bnad
*bnad
, u32 rx_id
)
1965 struct bnad_rx_info
*rx_info
= &bnad
->rx_info
[rx_id
];
1968 for (i
= 0; i
< bnad
->num_rxp_per_rx
; i
++)
1969 rx_info
->rx_ctrl
[i
].bnad
= bnad
;
1972 /* Called with mutex_lock(&bnad->conf_mutex) held */
1974 bnad_destroy_rx(struct bnad
*bnad
, u32 rx_id
)
1976 struct bnad_rx_info
*rx_info
= &bnad
->rx_info
[rx_id
];
1977 struct bna_rx_config
*rx_config
= &bnad
->rx_config
[rx_id
];
1978 struct bna_res_info
*res_info
= &bnad
->rx_res_info
[rx_id
].res_info
[0];
1979 unsigned long flags
;
1986 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1987 if (bnad
->cfg_flags
& BNAD_CF_DIM_ENABLED
&&
1988 test_bit(BNAD_RF_DIM_TIMER_RUNNING
, &bnad
->run_flags
)) {
1989 clear_bit(BNAD_RF_DIM_TIMER_RUNNING
, &bnad
->run_flags
);
1992 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1994 del_timer_sync(&bnad
->dim_timer
);
1997 init_completion(&bnad
->bnad_completions
.rx_comp
);
1998 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1999 bna_rx_disable(rx_info
->rx
, BNA_HARD_CLEANUP
, bnad_cb_rx_disabled
);
2000 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2001 wait_for_completion(&bnad
->bnad_completions
.rx_comp
);
2003 if (rx_info
->rx_ctrl
[0].ccb
->intr_type
== BNA_INTR_T_MSIX
)
2004 bnad_rx_msix_unregister(bnad
, rx_info
, rx_config
->num_paths
);
2006 bnad_napi_delete(bnad
, rx_id
);
2008 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2009 bna_rx_destroy(rx_info
->rx
);
2013 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2015 bnad_rx_res_free(bnad
, res_info
);
2018 /* Called with mutex_lock(&bnad->conf_mutex) held */
2020 bnad_setup_rx(struct bnad
*bnad
, u32 rx_id
)
2023 struct bnad_rx_info
*rx_info
= &bnad
->rx_info
[rx_id
];
2024 struct bna_res_info
*res_info
= &bnad
->rx_res_info
[rx_id
].res_info
[0];
2025 struct bna_intr_info
*intr_info
=
2026 &res_info
[BNA_RX_RES_T_INTR
].res_u
.intr_info
;
2027 struct bna_rx_config
*rx_config
= &bnad
->rx_config
[rx_id
];
2028 static const struct bna_rx_event_cbfn rx_cbfn
= {
2029 .rcb_setup_cbfn
= NULL
,
2030 .rcb_destroy_cbfn
= NULL
,
2031 .ccb_setup_cbfn
= bnad_cb_ccb_setup
,
2032 .ccb_destroy_cbfn
= bnad_cb_ccb_destroy
,
2033 .rx_stall_cbfn
= bnad_cb_rx_stall
,
2034 .rx_cleanup_cbfn
= bnad_cb_rx_cleanup
,
2035 .rx_post_cbfn
= bnad_cb_rx_post
,
2038 unsigned long flags
;
2040 rx_info
->rx_id
= rx_id
;
2042 /* Initialize the Rx object configuration */
2043 bnad_init_rx_config(bnad
, rx_config
);
2045 /* Get BNA's resource requirement for one Rx object */
2046 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2047 bna_rx_res_req(rx_config
, res_info
);
2048 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2050 /* Fill Unmap Q memory requirements */
2051 BNAD_FILL_UNMAPQ_MEM_REQ(&res_info
[BNA_RX_RES_MEM_T_UNMAPQ
],
2052 rx_config
->num_paths
+
2053 ((rx_config
->rxp_type
== BNA_RXP_SINGLE
) ?
2054 0 : rx_config
->num_paths
),
2055 ((bnad
->rxq_depth
* sizeof(struct bnad_rx_unmap
)) +
2056 sizeof(struct bnad_rx_unmap_q
)));
2058 /* Allocate resource */
2059 err
= bnad_rx_res_alloc(bnad
, res_info
, rx_id
);
2063 bnad_rx_ctrl_init(bnad
, rx_id
);
2065 /* Ask BNA to create one Rx object, supplying required resources */
2066 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2067 rx
= bna_rx_create(&bnad
->bna
, bnad
, rx_config
, &rx_cbfn
, res_info
,
2071 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2075 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2077 INIT_WORK(&rx_info
->rx_cleanup_work
,
2078 (work_func_t
)(bnad_rx_cleanup
));
2081 * Init NAPI, so that state is set to NAPI_STATE_SCHED,
2082 * so that IRQ handler cannot schedule NAPI at this point.
2084 bnad_napi_add(bnad
, rx_id
);
2086 /* Register ISR for the Rx object */
2087 if (intr_info
->intr_type
== BNA_INTR_T_MSIX
) {
2088 err
= bnad_rx_msix_register(bnad
, rx_info
, rx_id
,
2089 rx_config
->num_paths
);
2094 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2096 /* Set up Dynamic Interrupt Moderation Vector */
2097 if (bnad
->cfg_flags
& BNAD_CF_DIM_ENABLED
)
2098 bna_rx_dim_reconfig(&bnad
->bna
, bna_napi_dim_vector
);
2100 /* Enable VLAN filtering only on the default Rx */
2101 bna_rx_vlanfilter_enable(rx
);
2103 /* Start the DIM timer */
2104 bnad_dim_timer_start(bnad
);
2108 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2113 bnad_destroy_rx(bnad
, rx_id
);
2117 /* Called with conf_lock & bnad->bna_lock held */
2119 bnad_tx_coalescing_timeo_set(struct bnad
*bnad
)
2121 struct bnad_tx_info
*tx_info
;
2123 tx_info
= &bnad
->tx_info
[0];
2127 bna_tx_coalescing_timeo_set(tx_info
->tx
, bnad
->tx_coalescing_timeo
);
2130 /* Called with conf_lock & bnad->bna_lock held */
2132 bnad_rx_coalescing_timeo_set(struct bnad
*bnad
)
2134 struct bnad_rx_info
*rx_info
;
2137 for (i
= 0; i
< bnad
->num_rx
; i
++) {
2138 rx_info
= &bnad
->rx_info
[i
];
2141 bna_rx_coalescing_timeo_set(rx_info
->rx
,
2142 bnad
->rx_coalescing_timeo
);
2147 * Called with bnad->bna_lock held
2150 bnad_mac_addr_set_locked(struct bnad
*bnad
, u8
*mac_addr
)
2154 if (!is_valid_ether_addr(mac_addr
))
2155 return -EADDRNOTAVAIL
;
2157 /* If datapath is down, pretend everything went through */
2158 if (!bnad
->rx_info
[0].rx
)
2161 ret
= bna_rx_ucast_set(bnad
->rx_info
[0].rx
, mac_addr
, NULL
);
2162 if (ret
!= BNA_CB_SUCCESS
)
2163 return -EADDRNOTAVAIL
;
2168 /* Should be called with conf_lock held */
2170 bnad_enable_default_bcast(struct bnad
*bnad
)
2172 struct bnad_rx_info
*rx_info
= &bnad
->rx_info
[0];
2174 unsigned long flags
;
2176 init_completion(&bnad
->bnad_completions
.mcast_comp
);
2178 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2179 ret
= bna_rx_mcast_add(rx_info
->rx
, (u8
*)bnad_bcast_addr
,
2180 bnad_cb_rx_mcast_add
);
2181 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2183 if (ret
== BNA_CB_SUCCESS
)
2184 wait_for_completion(&bnad
->bnad_completions
.mcast_comp
);
2188 if (bnad
->bnad_completions
.mcast_comp_status
!= BNA_CB_SUCCESS
)
2194 /* Called with mutex_lock(&bnad->conf_mutex) held */
2196 bnad_restore_vlans(struct bnad
*bnad
, u32 rx_id
)
2199 unsigned long flags
;
2201 for_each_set_bit(vid
, bnad
->active_vlans
, VLAN_N_VID
) {
2202 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2203 bna_rx_vlan_add(bnad
->rx_info
[rx_id
].rx
, vid
);
2204 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2208 /* Statistics utilities */
2210 bnad_netdev_qstats_fill(struct bnad
*bnad
, struct rtnl_link_stats64
*stats
)
2214 for (i
= 0; i
< bnad
->num_rx
; i
++) {
2215 for (j
= 0; j
< bnad
->num_rxp_per_rx
; j
++) {
2216 if (bnad
->rx_info
[i
].rx_ctrl
[j
].ccb
) {
2217 stats
->rx_packets
+= bnad
->rx_info
[i
].
2218 rx_ctrl
[j
].ccb
->rcb
[0]->rxq
->rx_packets
;
2219 stats
->rx_bytes
+= bnad
->rx_info
[i
].
2220 rx_ctrl
[j
].ccb
->rcb
[0]->rxq
->rx_bytes
;
2221 if (bnad
->rx_info
[i
].rx_ctrl
[j
].ccb
->rcb
[1] &&
2222 bnad
->rx_info
[i
].rx_ctrl
[j
].ccb
->
2224 stats
->rx_packets
+=
2225 bnad
->rx_info
[i
].rx_ctrl
[j
].
2226 ccb
->rcb
[1]->rxq
->rx_packets
;
2228 bnad
->rx_info
[i
].rx_ctrl
[j
].
2229 ccb
->rcb
[1]->rxq
->rx_bytes
;
2234 for (i
= 0; i
< bnad
->num_tx
; i
++) {
2235 for (j
= 0; j
< bnad
->num_txq_per_tx
; j
++) {
2236 if (bnad
->tx_info
[i
].tcb
[j
]) {
2237 stats
->tx_packets
+=
2238 bnad
->tx_info
[i
].tcb
[j
]->txq
->tx_packets
;
2240 bnad
->tx_info
[i
].tcb
[j
]->txq
->tx_bytes
;
2247 * Must be called with the bna_lock held.
2250 bnad_netdev_hwstats_fill(struct bnad
*bnad
, struct rtnl_link_stats64
*stats
)
2252 struct bfi_enet_stats_mac
*mac_stats
;
2256 mac_stats
= &bnad
->stats
.bna_stats
->hw_stats
.mac_stats
;
2258 mac_stats
->rx_fcs_error
+ mac_stats
->rx_alignment_error
+
2259 mac_stats
->rx_frame_length_error
+ mac_stats
->rx_code_error
+
2260 mac_stats
->rx_undersize
;
2261 stats
->tx_errors
= mac_stats
->tx_fcs_error
+
2262 mac_stats
->tx_undersize
;
2263 stats
->rx_dropped
= mac_stats
->rx_drop
;
2264 stats
->tx_dropped
= mac_stats
->tx_drop
;
2265 stats
->multicast
= mac_stats
->rx_multicast
;
2266 stats
->collisions
= mac_stats
->tx_total_collision
;
2268 stats
->rx_length_errors
= mac_stats
->rx_frame_length_error
;
2270 /* receive ring buffer overflow ?? */
2272 stats
->rx_crc_errors
= mac_stats
->rx_fcs_error
;
2273 stats
->rx_frame_errors
= mac_stats
->rx_alignment_error
;
2274 /* recv'r fifo overrun */
2275 bmap
= bna_rx_rid_mask(&bnad
->bna
);
2276 for (i
= 0; bmap
; i
++) {
2278 stats
->rx_fifo_errors
+=
2279 bnad
->stats
.bna_stats
->
2280 hw_stats
.rxf_stats
[i
].frame_drops
;
2288 bnad_mbox_irq_sync(struct bnad
*bnad
)
2291 unsigned long flags
;
2293 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2294 if (bnad
->cfg_flags
& BNAD_CF_MSIX
)
2295 irq
= bnad
->msix_table
[BNAD_MAILBOX_MSIX_INDEX
].vector
;
2297 irq
= bnad
->pcidev
->irq
;
2298 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2300 synchronize_irq(irq
);
2303 /* Utility used by bnad_start_xmit, for doing TSO */
2305 bnad_tso_prepare(struct bnad
*bnad
, struct sk_buff
*skb
)
2309 if (skb_header_cloned(skb
)) {
2310 err
= pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
);
2312 BNAD_UPDATE_CTR(bnad
, tso_err
);
2318 * For TSO, the TCP checksum field is seeded with pseudo-header sum
2319 * excluding the length field.
2321 if (skb
->protocol
== htons(ETH_P_IP
)) {
2322 struct iphdr
*iph
= ip_hdr(skb
);
2324 /* Do we really need these? */
2328 tcp_hdr(skb
)->check
=
2329 ~csum_tcpudp_magic(iph
->saddr
, iph
->daddr
, 0,
2331 BNAD_UPDATE_CTR(bnad
, tso4
);
2333 struct ipv6hdr
*ipv6h
= ipv6_hdr(skb
);
2335 ipv6h
->payload_len
= 0;
2336 tcp_hdr(skb
)->check
=
2337 ~csum_ipv6_magic(&ipv6h
->saddr
, &ipv6h
->daddr
, 0,
2339 BNAD_UPDATE_CTR(bnad
, tso6
);
2346 * Initialize Q numbers depending on Rx Paths
2347 * Called with bnad->bna_lock held, because of cfg_flags
2351 bnad_q_num_init(struct bnad
*bnad
)
2355 rxps
= min((uint
)num_online_cpus(),
2356 (uint
)(BNAD_MAX_RX
* BNAD_MAX_RXP_PER_RX
));
2358 if (!(bnad
->cfg_flags
& BNAD_CF_MSIX
))
2359 rxps
= 1; /* INTx */
2363 bnad
->num_rxp_per_rx
= rxps
;
2364 bnad
->num_txq_per_tx
= BNAD_TXQ_NUM
;
2368 * Adjusts the Q numbers, given a number of msix vectors
2369 * Give preference to RSS as opposed to Tx priority Queues,
2370 * in such a case, just use 1 Tx Q
2371 * Called with bnad->bna_lock held b'cos of cfg_flags access
2374 bnad_q_num_adjust(struct bnad
*bnad
, int msix_vectors
, int temp
)
2376 bnad
->num_txq_per_tx
= 1;
2377 if ((msix_vectors
>= (bnad
->num_tx
* bnad
->num_txq_per_tx
) +
2378 bnad_rxqs_per_cq
+ BNAD_MAILBOX_MSIX_VECTORS
) &&
2379 (bnad
->cfg_flags
& BNAD_CF_MSIX
)) {
2380 bnad
->num_rxp_per_rx
= msix_vectors
-
2381 (bnad
->num_tx
* bnad
->num_txq_per_tx
) -
2382 BNAD_MAILBOX_MSIX_VECTORS
;
2384 bnad
->num_rxp_per_rx
= 1;
2387 /* Enable / disable ioceth */
2389 bnad_ioceth_disable(struct bnad
*bnad
)
2391 unsigned long flags
;
2394 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2395 init_completion(&bnad
->bnad_completions
.ioc_comp
);
2396 bna_ioceth_disable(&bnad
->bna
.ioceth
, BNA_HARD_CLEANUP
);
2397 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2399 wait_for_completion_timeout(&bnad
->bnad_completions
.ioc_comp
,
2400 msecs_to_jiffies(BNAD_IOCETH_TIMEOUT
));
2402 err
= bnad
->bnad_completions
.ioc_comp_status
;
2407 bnad_ioceth_enable(struct bnad
*bnad
)
2410 unsigned long flags
;
2412 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2413 init_completion(&bnad
->bnad_completions
.ioc_comp
);
2414 bnad
->bnad_completions
.ioc_comp_status
= BNA_CB_WAITING
;
2415 bna_ioceth_enable(&bnad
->bna
.ioceth
);
2416 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2418 wait_for_completion_timeout(&bnad
->bnad_completions
.ioc_comp
,
2419 msecs_to_jiffies(BNAD_IOCETH_TIMEOUT
));
2421 err
= bnad
->bnad_completions
.ioc_comp_status
;
2426 /* Free BNA resources */
2428 bnad_res_free(struct bnad
*bnad
, struct bna_res_info
*res_info
,
2433 for (i
= 0; i
< res_val_max
; i
++)
2434 bnad_mem_free(bnad
, &res_info
[i
].res_u
.mem_info
);
2437 /* Allocates memory and interrupt resources for BNA */
2439 bnad_res_alloc(struct bnad
*bnad
, struct bna_res_info
*res_info
,
2444 for (i
= 0; i
< res_val_max
; i
++) {
2445 err
= bnad_mem_alloc(bnad
, &res_info
[i
].res_u
.mem_info
);
2452 bnad_res_free(bnad
, res_info
, res_val_max
);
2456 /* Interrupt enable / disable */
2458 bnad_enable_msix(struct bnad
*bnad
)
2461 unsigned long flags
;
2463 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2464 if (!(bnad
->cfg_flags
& BNAD_CF_MSIX
)) {
2465 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2468 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2470 if (bnad
->msix_table
)
2474 kcalloc(bnad
->msix_num
, sizeof(struct msix_entry
), GFP_KERNEL
);
2476 if (!bnad
->msix_table
)
2479 for (i
= 0; i
< bnad
->msix_num
; i
++)
2480 bnad
->msix_table
[i
].entry
= i
;
2482 ret
= pci_enable_msix(bnad
->pcidev
, bnad
->msix_table
, bnad
->msix_num
);
2484 /* Not enough MSI-X vectors. */
2485 pr_warn("BNA: %d MSI-X vectors allocated < %d requested\n",
2486 ret
, bnad
->msix_num
);
2488 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2489 /* ret = #of vectors that we got */
2490 bnad_q_num_adjust(bnad
, (ret
- BNAD_MAILBOX_MSIX_VECTORS
) / 2,
2491 (ret
- BNAD_MAILBOX_MSIX_VECTORS
) / 2);
2492 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2494 bnad
->msix_num
= BNAD_NUM_TXQ
+ BNAD_NUM_RXP
+
2495 BNAD_MAILBOX_MSIX_VECTORS
;
2497 if (bnad
->msix_num
> ret
)
2500 /* Try once more with adjusted numbers */
2501 /* If this fails, fall back to INTx */
2502 ret
= pci_enable_msix(bnad
->pcidev
, bnad
->msix_table
,
2510 pci_intx(bnad
->pcidev
, 0);
2515 pr_warn("BNA: MSI-X enable failed - operating in INTx mode\n");
2517 kfree(bnad
->msix_table
);
2518 bnad
->msix_table
= NULL
;
2520 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2521 bnad
->cfg_flags
&= ~BNAD_CF_MSIX
;
2522 bnad_q_num_init(bnad
);
2523 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2527 bnad_disable_msix(struct bnad
*bnad
)
2530 unsigned long flags
;
2532 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2533 cfg_flags
= bnad
->cfg_flags
;
2534 if (bnad
->cfg_flags
& BNAD_CF_MSIX
)
2535 bnad
->cfg_flags
&= ~BNAD_CF_MSIX
;
2536 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2538 if (cfg_flags
& BNAD_CF_MSIX
) {
2539 pci_disable_msix(bnad
->pcidev
);
2540 kfree(bnad
->msix_table
);
2541 bnad
->msix_table
= NULL
;
2545 /* Netdev entry points */
2547 bnad_open(struct net_device
*netdev
)
2550 struct bnad
*bnad
= netdev_priv(netdev
);
2551 struct bna_pause_config pause_config
;
2553 unsigned long flags
;
2555 mutex_lock(&bnad
->conf_mutex
);
2558 err
= bnad_setup_tx(bnad
, 0);
2563 err
= bnad_setup_rx(bnad
, 0);
2568 pause_config
.tx_pause
= 0;
2569 pause_config
.rx_pause
= 0;
2571 mtu
= ETH_HLEN
+ VLAN_HLEN
+ bnad
->netdev
->mtu
+ ETH_FCS_LEN
;
2573 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2574 bna_enet_mtu_set(&bnad
->bna
.enet
, mtu
, NULL
);
2575 bna_enet_pause_config(&bnad
->bna
.enet
, &pause_config
, NULL
);
2576 bna_enet_enable(&bnad
->bna
.enet
);
2577 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2579 /* Enable broadcast */
2580 bnad_enable_default_bcast(bnad
);
2582 /* Restore VLANs, if any */
2583 bnad_restore_vlans(bnad
, 0);
2585 /* Set the UCAST address */
2586 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2587 bnad_mac_addr_set_locked(bnad
, netdev
->dev_addr
);
2588 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2590 /* Start the stats timer */
2591 bnad_stats_timer_start(bnad
);
2593 mutex_unlock(&bnad
->conf_mutex
);
2598 bnad_destroy_tx(bnad
, 0);
2601 mutex_unlock(&bnad
->conf_mutex
);
2606 bnad_stop(struct net_device
*netdev
)
2608 struct bnad
*bnad
= netdev_priv(netdev
);
2609 unsigned long flags
;
2611 mutex_lock(&bnad
->conf_mutex
);
2613 /* Stop the stats timer */
2614 bnad_stats_timer_stop(bnad
);
2616 init_completion(&bnad
->bnad_completions
.enet_comp
);
2618 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2619 bna_enet_disable(&bnad
->bna
.enet
, BNA_HARD_CLEANUP
,
2620 bnad_cb_enet_disabled
);
2621 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2623 wait_for_completion(&bnad
->bnad_completions
.enet_comp
);
2625 bnad_destroy_tx(bnad
, 0);
2626 bnad_destroy_rx(bnad
, 0);
2628 /* Synchronize mailbox IRQ */
2629 bnad_mbox_irq_sync(bnad
);
2631 mutex_unlock(&bnad
->conf_mutex
);
2637 /* Returns 0 for success */
2639 bnad_txq_wi_prepare(struct bnad
*bnad
, struct bna_tcb
*tcb
,
2640 struct sk_buff
*skb
, struct bna_txq_entry
*txqent
)
2646 if (vlan_tx_tag_present(skb
)) {
2647 vlan_tag
= (u16
)vlan_tx_tag_get(skb
);
2648 flags
|= (BNA_TXQ_WI_CF_INS_PRIO
| BNA_TXQ_WI_CF_INS_VLAN
);
2650 if (test_bit(BNAD_RF_CEE_RUNNING
, &bnad
->run_flags
)) {
2651 vlan_tag
= ((tcb
->priority
& 0x7) << VLAN_PRIO_SHIFT
)
2652 | (vlan_tag
& 0x1fff);
2653 flags
|= (BNA_TXQ_WI_CF_INS_PRIO
| BNA_TXQ_WI_CF_INS_VLAN
);
2655 txqent
->hdr
.wi
.vlan_tag
= htons(vlan_tag
);
2657 if (skb_is_gso(skb
)) {
2658 gso_size
= skb_shinfo(skb
)->gso_size
;
2659 if (unlikely(gso_size
> bnad
->netdev
->mtu
)) {
2660 BNAD_UPDATE_CTR(bnad
, tx_skb_mss_too_long
);
2663 if (unlikely((gso_size
+ skb_transport_offset(skb
) +
2664 tcp_hdrlen(skb
)) >= skb
->len
)) {
2665 txqent
->hdr
.wi
.opcode
=
2666 __constant_htons(BNA_TXQ_WI_SEND
);
2667 txqent
->hdr
.wi
.lso_mss
= 0;
2668 BNAD_UPDATE_CTR(bnad
, tx_skb_tso_too_short
);
2670 txqent
->hdr
.wi
.opcode
=
2671 __constant_htons(BNA_TXQ_WI_SEND_LSO
);
2672 txqent
->hdr
.wi
.lso_mss
= htons(gso_size
);
2675 if (bnad_tso_prepare(bnad
, skb
)) {
2676 BNAD_UPDATE_CTR(bnad
, tx_skb_tso_prepare
);
2680 flags
|= (BNA_TXQ_WI_CF_IP_CKSUM
| BNA_TXQ_WI_CF_TCP_CKSUM
);
2681 txqent
->hdr
.wi
.l4_hdr_size_n_offset
=
2682 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET(
2683 tcp_hdrlen(skb
) >> 2, skb_transport_offset(skb
)));
2685 txqent
->hdr
.wi
.opcode
= __constant_htons(BNA_TXQ_WI_SEND
);
2686 txqent
->hdr
.wi
.lso_mss
= 0;
2688 if (unlikely(skb
->len
> (bnad
->netdev
->mtu
+ ETH_HLEN
))) {
2689 BNAD_UPDATE_CTR(bnad
, tx_skb_non_tso_too_long
);
2693 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
2696 if (skb
->protocol
== __constant_htons(ETH_P_IP
))
2697 proto
= ip_hdr(skb
)->protocol
;
2698 #ifdef NETIF_F_IPV6_CSUM
2699 else if (skb
->protocol
==
2700 __constant_htons(ETH_P_IPV6
)) {
2701 /* nexthdr may not be TCP immediately. */
2702 proto
= ipv6_hdr(skb
)->nexthdr
;
2705 if (proto
== IPPROTO_TCP
) {
2706 flags
|= BNA_TXQ_WI_CF_TCP_CKSUM
;
2707 txqent
->hdr
.wi
.l4_hdr_size_n_offset
=
2708 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2709 (0, skb_transport_offset(skb
)));
2711 BNAD_UPDATE_CTR(bnad
, tcpcsum_offload
);
2713 if (unlikely(skb_headlen(skb
) <
2714 skb_transport_offset(skb
) +
2716 BNAD_UPDATE_CTR(bnad
, tx_skb_tcp_hdr
);
2719 } else if (proto
== IPPROTO_UDP
) {
2720 flags
|= BNA_TXQ_WI_CF_UDP_CKSUM
;
2721 txqent
->hdr
.wi
.l4_hdr_size_n_offset
=
2722 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2723 (0, skb_transport_offset(skb
)));
2725 BNAD_UPDATE_CTR(bnad
, udpcsum_offload
);
2726 if (unlikely(skb_headlen(skb
) <
2727 skb_transport_offset(skb
) +
2728 sizeof(struct udphdr
))) {
2729 BNAD_UPDATE_CTR(bnad
, tx_skb_udp_hdr
);
2734 BNAD_UPDATE_CTR(bnad
, tx_skb_csum_err
);
2738 txqent
->hdr
.wi
.l4_hdr_size_n_offset
= 0;
2741 txqent
->hdr
.wi
.flags
= htons(flags
);
2742 txqent
->hdr
.wi
.frame_length
= htonl(skb
->len
);
2748 * bnad_start_xmit : Netdev entry point for Transmit
2749 * Called under lock held by net_device
2752 bnad_start_xmit(struct sk_buff
*skb
, struct net_device
*netdev
)
2754 struct bnad
*bnad
= netdev_priv(netdev
);
2756 struct bna_tcb
*tcb
= NULL
;
2757 struct bnad_tx_unmap
*unmap_q
, *unmap
, *head_unmap
;
2758 u32 prod
, q_depth
, vect_id
;
2759 u32 wis
, vectors
, len
;
2761 dma_addr_t dma_addr
;
2762 struct bna_txq_entry
*txqent
;
2764 len
= skb_headlen(skb
);
2766 /* Sanity checks for the skb */
2768 if (unlikely(skb
->len
<= ETH_HLEN
)) {
2770 BNAD_UPDATE_CTR(bnad
, tx_skb_too_short
);
2771 return NETDEV_TX_OK
;
2773 if (unlikely(len
> BFI_TX_MAX_DATA_PER_VECTOR
)) {
2775 BNAD_UPDATE_CTR(bnad
, tx_skb_headlen_zero
);
2776 return NETDEV_TX_OK
;
2778 if (unlikely(len
== 0)) {
2780 BNAD_UPDATE_CTR(bnad
, tx_skb_headlen_zero
);
2781 return NETDEV_TX_OK
;
2784 tcb
= bnad
->tx_info
[0].tcb
[txq_id
];
2785 q_depth
= tcb
->q_depth
;
2786 prod
= tcb
->producer_index
;
2788 unmap_q
= tcb
->unmap_q
;
2791 * Takes care of the Tx that is scheduled between clearing the flag
2792 * and the netif_tx_stop_all_queues() call.
2794 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED
, &tcb
->flags
))) {
2796 BNAD_UPDATE_CTR(bnad
, tx_skb_stopping
);
2797 return NETDEV_TX_OK
;
2800 vectors
= 1 + skb_shinfo(skb
)->nr_frags
;
2801 wis
= BNA_TXQ_WI_NEEDED(vectors
); /* 4 vectors per work item */
2803 if (unlikely(vectors
> BFI_TX_MAX_VECTORS_PER_PKT
)) {
2805 BNAD_UPDATE_CTR(bnad
, tx_skb_max_vectors
);
2806 return NETDEV_TX_OK
;
2809 /* Check for available TxQ resources */
2810 if (unlikely(wis
> BNA_QE_FREE_CNT(tcb
, q_depth
))) {
2811 if ((*tcb
->hw_consumer_index
!= tcb
->consumer_index
) &&
2812 !test_and_set_bit(BNAD_TXQ_FREE_SENT
, &tcb
->flags
)) {
2814 sent
= bnad_txcmpl_process(bnad
, tcb
);
2815 if (likely(test_bit(BNAD_TXQ_TX_STARTED
, &tcb
->flags
)))
2816 bna_ib_ack(tcb
->i_dbell
, sent
);
2817 smp_mb__before_clear_bit();
2818 clear_bit(BNAD_TXQ_FREE_SENT
, &tcb
->flags
);
2820 netif_stop_queue(netdev
);
2821 BNAD_UPDATE_CTR(bnad
, netif_queue_stop
);
2826 * Check again to deal with race condition between
2827 * netif_stop_queue here, and netif_wake_queue in
2828 * interrupt handler which is not inside netif tx lock.
2830 if (likely(wis
> BNA_QE_FREE_CNT(tcb
, q_depth
))) {
2831 BNAD_UPDATE_CTR(bnad
, netif_queue_stop
);
2832 return NETDEV_TX_BUSY
;
2834 netif_wake_queue(netdev
);
2835 BNAD_UPDATE_CTR(bnad
, netif_queue_wakeup
);
2839 txqent
= &((struct bna_txq_entry
*)tcb
->sw_q
)[prod
];
2840 head_unmap
= &unmap_q
[prod
];
2842 /* Program the opcode, flags, frame_len, num_vectors in WI */
2843 if (bnad_txq_wi_prepare(bnad
, tcb
, skb
, txqent
)) {
2845 return NETDEV_TX_OK
;
2847 txqent
->hdr
.wi
.reserved
= 0;
2848 txqent
->hdr
.wi
.num_vectors
= vectors
;
2850 head_unmap
->skb
= skb
;
2851 head_unmap
->nvecs
= 0;
2853 /* Program the vectors */
2855 dma_addr
= dma_map_single(&bnad
->pcidev
->dev
, skb
->data
,
2856 len
, DMA_TO_DEVICE
);
2857 BNA_SET_DMA_ADDR(dma_addr
, &txqent
->vector
[0].host_addr
);
2858 txqent
->vector
[0].length
= htons(len
);
2859 dma_unmap_addr_set(&unmap
->vectors
[0], dma_addr
, dma_addr
);
2860 head_unmap
->nvecs
++;
2862 for (i
= 0, vect_id
= 0; i
< vectors
- 1; i
++) {
2863 const struct skb_frag_struct
*frag
= &skb_shinfo(skb
)->frags
[i
];
2864 u16 size
= skb_frag_size(frag
);
2866 if (unlikely(size
== 0)) {
2867 /* Undo the changes starting at tcb->producer_index */
2868 bnad_tx_buff_unmap(bnad
, unmap_q
, q_depth
,
2869 tcb
->producer_index
);
2871 BNAD_UPDATE_CTR(bnad
, tx_skb_frag_zero
);
2872 return NETDEV_TX_OK
;
2878 if (vect_id
== BFI_TX_MAX_VECTORS_PER_WI
) {
2880 BNA_QE_INDX_INC(prod
, q_depth
);
2881 txqent
= &((struct bna_txq_entry
*)tcb
->sw_q
)[prod
];
2882 txqent
->hdr
.wi_ext
.opcode
=
2883 __constant_htons(BNA_TXQ_WI_EXTENSION
);
2884 unmap
= &unmap_q
[prod
];
2887 dma_addr
= skb_frag_dma_map(&bnad
->pcidev
->dev
, frag
,
2888 0, size
, DMA_TO_DEVICE
);
2889 BNA_SET_DMA_ADDR(dma_addr
, &txqent
->vector
[vect_id
].host_addr
);
2890 txqent
->vector
[vect_id
].length
= htons(size
);
2891 dma_unmap_addr_set(&unmap
->vectors
[vect_id
], dma_addr
,
2893 head_unmap
->nvecs
++;
2896 if (unlikely(len
!= skb
->len
)) {
2897 /* Undo the changes starting at tcb->producer_index */
2898 bnad_tx_buff_unmap(bnad
, unmap_q
, q_depth
, tcb
->producer_index
);
2900 BNAD_UPDATE_CTR(bnad
, tx_skb_len_mismatch
);
2901 return NETDEV_TX_OK
;
2904 BNA_QE_INDX_INC(prod
, q_depth
);
2905 tcb
->producer_index
= prod
;
2909 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED
, &tcb
->flags
)))
2910 return NETDEV_TX_OK
;
2912 bna_txq_prod_indx_doorbell(tcb
);
2914 return NETDEV_TX_OK
;
2918 * Used spin_lock to synchronize reading of stats structures, which
2919 * is written by BNA under the same lock.
2921 static struct rtnl_link_stats64
*
2922 bnad_get_stats64(struct net_device
*netdev
, struct rtnl_link_stats64
*stats
)
2924 struct bnad
*bnad
= netdev_priv(netdev
);
2925 unsigned long flags
;
2927 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2929 bnad_netdev_qstats_fill(bnad
, stats
);
2930 bnad_netdev_hwstats_fill(bnad
, stats
);
2932 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2938 bnad_set_rx_mode(struct net_device
*netdev
)
2940 struct bnad
*bnad
= netdev_priv(netdev
);
2941 u32 new_mask
, valid_mask
;
2942 unsigned long flags
;
2944 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2946 new_mask
= valid_mask
= 0;
2948 if (netdev
->flags
& IFF_PROMISC
) {
2949 if (!(bnad
->cfg_flags
& BNAD_CF_PROMISC
)) {
2950 new_mask
= BNAD_RXMODE_PROMISC_DEFAULT
;
2951 valid_mask
= BNAD_RXMODE_PROMISC_DEFAULT
;
2952 bnad
->cfg_flags
|= BNAD_CF_PROMISC
;
2955 if (bnad
->cfg_flags
& BNAD_CF_PROMISC
) {
2956 new_mask
= ~BNAD_RXMODE_PROMISC_DEFAULT
;
2957 valid_mask
= BNAD_RXMODE_PROMISC_DEFAULT
;
2958 bnad
->cfg_flags
&= ~BNAD_CF_PROMISC
;
2962 if (netdev
->flags
& IFF_ALLMULTI
) {
2963 if (!(bnad
->cfg_flags
& BNAD_CF_ALLMULTI
)) {
2964 new_mask
|= BNA_RXMODE_ALLMULTI
;
2965 valid_mask
|= BNA_RXMODE_ALLMULTI
;
2966 bnad
->cfg_flags
|= BNAD_CF_ALLMULTI
;
2969 if (bnad
->cfg_flags
& BNAD_CF_ALLMULTI
) {
2970 new_mask
&= ~BNA_RXMODE_ALLMULTI
;
2971 valid_mask
|= BNA_RXMODE_ALLMULTI
;
2972 bnad
->cfg_flags
&= ~BNAD_CF_ALLMULTI
;
2976 if (bnad
->rx_info
[0].rx
== NULL
)
2979 bna_rx_mode_set(bnad
->rx_info
[0].rx
, new_mask
, valid_mask
, NULL
);
2981 if (!netdev_mc_empty(netdev
)) {
2983 int mc_count
= netdev_mc_count(netdev
);
2985 /* Index 0 holds the broadcast address */
2987 kzalloc((mc_count
+ 1) * ETH_ALEN
,
2992 memcpy(&mcaddr_list
[0], &bnad_bcast_addr
[0], ETH_ALEN
);
2994 /* Copy rest of the MC addresses */
2995 bnad_netdev_mc_list_get(netdev
, mcaddr_list
);
2997 bna_rx_mcast_listset(bnad
->rx_info
[0].rx
, mc_count
+ 1,
3000 /* Should we enable BNAD_CF_ALLMULTI for err != 0 ? */
3004 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3008 * bna_lock is used to sync writes to netdev->addr
3009 * conf_lock cannot be used since this call may be made
3010 * in a non-blocking context.
3013 bnad_set_mac_address(struct net_device
*netdev
, void *mac_addr
)
3016 struct bnad
*bnad
= netdev_priv(netdev
);
3017 struct sockaddr
*sa
= (struct sockaddr
*)mac_addr
;
3018 unsigned long flags
;
3020 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
3022 err
= bnad_mac_addr_set_locked(bnad
, sa
->sa_data
);
3025 memcpy(netdev
->dev_addr
, sa
->sa_data
, netdev
->addr_len
);
3027 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3033 bnad_mtu_set(struct bnad
*bnad
, int mtu
)
3035 unsigned long flags
;
3037 init_completion(&bnad
->bnad_completions
.mtu_comp
);
3039 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
3040 bna_enet_mtu_set(&bnad
->bna
.enet
, mtu
, bnad_cb_enet_mtu_set
);
3041 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3043 wait_for_completion(&bnad
->bnad_completions
.mtu_comp
);
3045 return bnad
->bnad_completions
.mtu_comp_status
;
3049 bnad_change_mtu(struct net_device
*netdev
, int new_mtu
)
3051 int err
, mtu
= netdev
->mtu
;
3052 struct bnad
*bnad
= netdev_priv(netdev
);
3054 if (new_mtu
+ ETH_HLEN
< ETH_ZLEN
|| new_mtu
> BNAD_JUMBO_MTU
)
3057 mutex_lock(&bnad
->conf_mutex
);
3059 netdev
->mtu
= new_mtu
;
3061 mtu
= ETH_HLEN
+ VLAN_HLEN
+ new_mtu
+ ETH_FCS_LEN
;
3062 err
= bnad_mtu_set(bnad
, mtu
);
3066 mutex_unlock(&bnad
->conf_mutex
);
3071 bnad_vlan_rx_add_vid(struct net_device
*netdev
, __be16 proto
, u16 vid
)
3073 struct bnad
*bnad
= netdev_priv(netdev
);
3074 unsigned long flags
;
3076 if (!bnad
->rx_info
[0].rx
)
3079 mutex_lock(&bnad
->conf_mutex
);
3081 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
3082 bna_rx_vlan_add(bnad
->rx_info
[0].rx
, vid
);
3083 set_bit(vid
, bnad
->active_vlans
);
3084 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3086 mutex_unlock(&bnad
->conf_mutex
);
3092 bnad_vlan_rx_kill_vid(struct net_device
*netdev
, __be16 proto
, u16 vid
)
3094 struct bnad
*bnad
= netdev_priv(netdev
);
3095 unsigned long flags
;
3097 if (!bnad
->rx_info
[0].rx
)
3100 mutex_lock(&bnad
->conf_mutex
);
3102 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
3103 clear_bit(vid
, bnad
->active_vlans
);
3104 bna_rx_vlan_del(bnad
->rx_info
[0].rx
, vid
);
3105 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3107 mutex_unlock(&bnad
->conf_mutex
);
3112 #ifdef CONFIG_NET_POLL_CONTROLLER
3114 bnad_netpoll(struct net_device
*netdev
)
3116 struct bnad
*bnad
= netdev_priv(netdev
);
3117 struct bnad_rx_info
*rx_info
;
3118 struct bnad_rx_ctrl
*rx_ctrl
;
3122 if (!(bnad
->cfg_flags
& BNAD_CF_MSIX
)) {
3123 bna_intx_disable(&bnad
->bna
, curr_mask
);
3124 bnad_isr(bnad
->pcidev
->irq
, netdev
);
3125 bna_intx_enable(&bnad
->bna
, curr_mask
);
3128 * Tx processing may happen in sending context, so no need
3129 * to explicitly process completions here
3133 for (i
= 0; i
< bnad
->num_rx
; i
++) {
3134 rx_info
= &bnad
->rx_info
[i
];
3137 for (j
= 0; j
< bnad
->num_rxp_per_rx
; j
++) {
3138 rx_ctrl
= &rx_info
->rx_ctrl
[j
];
3140 bnad_netif_rx_schedule_poll(bnad
,
3148 static const struct net_device_ops bnad_netdev_ops
= {
3149 .ndo_open
= bnad_open
,
3150 .ndo_stop
= bnad_stop
,
3151 .ndo_start_xmit
= bnad_start_xmit
,
3152 .ndo_get_stats64
= bnad_get_stats64
,
3153 .ndo_set_rx_mode
= bnad_set_rx_mode
,
3154 .ndo_validate_addr
= eth_validate_addr
,
3155 .ndo_set_mac_address
= bnad_set_mac_address
,
3156 .ndo_change_mtu
= bnad_change_mtu
,
3157 .ndo_vlan_rx_add_vid
= bnad_vlan_rx_add_vid
,
3158 .ndo_vlan_rx_kill_vid
= bnad_vlan_rx_kill_vid
,
3159 #ifdef CONFIG_NET_POLL_CONTROLLER
3160 .ndo_poll_controller
= bnad_netpoll
3165 bnad_netdev_init(struct bnad
*bnad
, bool using_dac
)
3167 struct net_device
*netdev
= bnad
->netdev
;
3169 netdev
->hw_features
= NETIF_F_SG
| NETIF_F_RXCSUM
|
3170 NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
|
3171 NETIF_F_TSO
| NETIF_F_TSO6
| NETIF_F_HW_VLAN_CTAG_TX
;
3173 netdev
->vlan_features
= NETIF_F_SG
| NETIF_F_HIGHDMA
|
3174 NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
|
3175 NETIF_F_TSO
| NETIF_F_TSO6
;
3177 netdev
->features
|= netdev
->hw_features
|
3178 NETIF_F_HW_VLAN_CTAG_RX
| NETIF_F_HW_VLAN_CTAG_FILTER
;
3181 netdev
->features
|= NETIF_F_HIGHDMA
;
3183 netdev
->mem_start
= bnad
->mmio_start
;
3184 netdev
->mem_end
= bnad
->mmio_start
+ bnad
->mmio_len
- 1;
3186 netdev
->netdev_ops
= &bnad_netdev_ops
;
3187 bnad_set_ethtool_ops(netdev
);
3191 * 1. Initialize the bnad structure
3192 * 2. Setup netdev pointer in pci_dev
3193 * 3. Initialize no. of TxQ & CQs & MSIX vectors
3194 * 4. Initialize work queue.
3197 bnad_init(struct bnad
*bnad
,
3198 struct pci_dev
*pdev
, struct net_device
*netdev
)
3200 unsigned long flags
;
3202 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
3203 pci_set_drvdata(pdev
, netdev
);
3205 bnad
->netdev
= netdev
;
3206 bnad
->pcidev
= pdev
;
3207 bnad
->mmio_start
= pci_resource_start(pdev
, 0);
3208 bnad
->mmio_len
= pci_resource_len(pdev
, 0);
3209 bnad
->bar0
= ioremap_nocache(bnad
->mmio_start
, bnad
->mmio_len
);
3211 dev_err(&pdev
->dev
, "ioremap for bar0 failed\n");
3212 pci_set_drvdata(pdev
, NULL
);
3215 pr_info("bar0 mapped to %p, len %llu\n", bnad
->bar0
,
3216 (unsigned long long) bnad
->mmio_len
);
3218 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
3219 if (!bnad_msix_disable
)
3220 bnad
->cfg_flags
= BNAD_CF_MSIX
;
3222 bnad
->cfg_flags
|= BNAD_CF_DIM_ENABLED
;
3224 bnad_q_num_init(bnad
);
3225 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3227 bnad
->msix_num
= (bnad
->num_tx
* bnad
->num_txq_per_tx
) +
3228 (bnad
->num_rx
* bnad
->num_rxp_per_rx
) +
3229 BNAD_MAILBOX_MSIX_VECTORS
;
3231 bnad
->txq_depth
= BNAD_TXQ_DEPTH
;
3232 bnad
->rxq_depth
= BNAD_RXQ_DEPTH
;
3234 bnad
->tx_coalescing_timeo
= BFI_TX_COALESCING_TIMEO
;
3235 bnad
->rx_coalescing_timeo
= BFI_RX_COALESCING_TIMEO
;
3237 sprintf(bnad
->wq_name
, "%s_wq_%d", BNAD_NAME
, bnad
->id
);
3238 bnad
->work_q
= create_singlethread_workqueue(bnad
->wq_name
);
3239 if (!bnad
->work_q
) {
3240 iounmap(bnad
->bar0
);
3248 * Must be called after bnad_pci_uninit()
3249 * so that iounmap() and pci_set_drvdata(NULL)
3250 * happens only after PCI uninitialization.
3253 bnad_uninit(struct bnad
*bnad
)
3256 flush_workqueue(bnad
->work_q
);
3257 destroy_workqueue(bnad
->work_q
);
3258 bnad
->work_q
= NULL
;
3262 iounmap(bnad
->bar0
);
3263 pci_set_drvdata(bnad
->pcidev
, NULL
);
3268 a) Per ioceth mutes used for serializing configuration
3269 changes from OS interface
3270 b) spin lock used to protect bna state machine
3273 bnad_lock_init(struct bnad
*bnad
)
3275 spin_lock_init(&bnad
->bna_lock
);
3276 mutex_init(&bnad
->conf_mutex
);
3277 mutex_init(&bnad_list_mutex
);
3281 bnad_lock_uninit(struct bnad
*bnad
)
3283 mutex_destroy(&bnad
->conf_mutex
);
3284 mutex_destroy(&bnad_list_mutex
);
3287 /* PCI Initialization */
3289 bnad_pci_init(struct bnad
*bnad
,
3290 struct pci_dev
*pdev
, bool *using_dac
)
3294 err
= pci_enable_device(pdev
);
3297 err
= pci_request_regions(pdev
, BNAD_NAME
);
3299 goto disable_device
;
3300 if (!dma_set_mask(&pdev
->dev
, DMA_BIT_MASK(64)) &&
3301 !dma_set_coherent_mask(&pdev
->dev
, DMA_BIT_MASK(64))) {
3304 err
= dma_set_mask(&pdev
->dev
, DMA_BIT_MASK(32));
3306 err
= dma_set_coherent_mask(&pdev
->dev
,
3309 goto release_regions
;
3313 pci_set_master(pdev
);
3317 pci_release_regions(pdev
);
3319 pci_disable_device(pdev
);
3325 bnad_pci_uninit(struct pci_dev
*pdev
)
3327 pci_release_regions(pdev
);
3328 pci_disable_device(pdev
);
3332 bnad_pci_probe(struct pci_dev
*pdev
,
3333 const struct pci_device_id
*pcidev_id
)
3339 struct net_device
*netdev
;
3340 struct bfa_pcidev pcidev_info
;
3341 unsigned long flags
;
3343 pr_info("bnad_pci_probe : (0x%p, 0x%p) PCI Func : (%d)\n",
3344 pdev
, pcidev_id
, PCI_FUNC(pdev
->devfn
));
3346 mutex_lock(&bnad_fwimg_mutex
);
3347 if (!cna_get_firmware_buf(pdev
)) {
3348 mutex_unlock(&bnad_fwimg_mutex
);
3349 pr_warn("Failed to load Firmware Image!\n");
3352 mutex_unlock(&bnad_fwimg_mutex
);
3355 * Allocates sizeof(struct net_device + struct bnad)
3356 * bnad = netdev->priv
3358 netdev
= alloc_etherdev(sizeof(struct bnad
));
3363 bnad
= netdev_priv(netdev
);
3364 bnad_lock_init(bnad
);
3365 bnad_add_to_list(bnad
);
3367 mutex_lock(&bnad
->conf_mutex
);
3369 * PCI initialization
3370 * Output : using_dac = 1 for 64 bit DMA
3371 * = 0 for 32 bit DMA
3374 err
= bnad_pci_init(bnad
, pdev
, &using_dac
);
3379 * Initialize bnad structure
3380 * Setup relation between pci_dev & netdev
3382 err
= bnad_init(bnad
, pdev
, netdev
);
3386 /* Initialize netdev structure, set up ethtool ops */
3387 bnad_netdev_init(bnad
, using_dac
);
3389 /* Set link to down state */
3390 netif_carrier_off(netdev
);
3392 /* Setup the debugfs node for this bfad */
3393 if (bna_debugfs_enable
)
3394 bnad_debugfs_init(bnad
);
3396 /* Get resource requirement form bna */
3397 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
3398 bna_res_req(&bnad
->res_info
[0]);
3399 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3401 /* Allocate resources from bna */
3402 err
= bnad_res_alloc(bnad
, &bnad
->res_info
[0], BNA_RES_T_MAX
);
3408 /* Setup pcidev_info for bna_init() */
3409 pcidev_info
.pci_slot
= PCI_SLOT(bnad
->pcidev
->devfn
);
3410 pcidev_info
.pci_func
= PCI_FUNC(bnad
->pcidev
->devfn
);
3411 pcidev_info
.device_id
= bnad
->pcidev
->device
;
3412 pcidev_info
.pci_bar_kva
= bnad
->bar0
;
3414 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
3415 bna_init(bna
, bnad
, &pcidev_info
, &bnad
->res_info
[0]);
3416 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3418 bnad
->stats
.bna_stats
= &bna
->stats
;
3420 bnad_enable_msix(bnad
);
3421 err
= bnad_mbox_irq_alloc(bnad
);
3426 setup_timer(&bnad
->bna
.ioceth
.ioc
.ioc_timer
, bnad_ioc_timeout
,
3427 ((unsigned long)bnad
));
3428 setup_timer(&bnad
->bna
.ioceth
.ioc
.hb_timer
, bnad_ioc_hb_check
,
3429 ((unsigned long)bnad
));
3430 setup_timer(&bnad
->bna
.ioceth
.ioc
.iocpf_timer
, bnad_iocpf_timeout
,
3431 ((unsigned long)bnad
));
3432 setup_timer(&bnad
->bna
.ioceth
.ioc
.sem_timer
, bnad_iocpf_sem_timeout
,
3433 ((unsigned long)bnad
));
3435 /* Now start the timer before calling IOC */
3436 mod_timer(&bnad
->bna
.ioceth
.ioc
.iocpf_timer
,
3437 jiffies
+ msecs_to_jiffies(BNA_IOC_TIMER_FREQ
));
3441 * If the call back comes with error, we bail out.
3442 * This is a catastrophic error.
3444 err
= bnad_ioceth_enable(bnad
);
3446 pr_err("BNA: Initialization failed err=%d\n",
3451 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
3452 if (bna_num_txq_set(bna
, BNAD_NUM_TXQ
+ 1) ||
3453 bna_num_rxp_set(bna
, BNAD_NUM_RXP
+ 1)) {
3454 bnad_q_num_adjust(bnad
, bna_attr(bna
)->num_txq
- 1,
3455 bna_attr(bna
)->num_rxp
- 1);
3456 if (bna_num_txq_set(bna
, BNAD_NUM_TXQ
+ 1) ||
3457 bna_num_rxp_set(bna
, BNAD_NUM_RXP
+ 1))
3460 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3462 goto disable_ioceth
;
3464 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
3465 bna_mod_res_req(&bnad
->bna
, &bnad
->mod_res_info
[0]);
3466 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3468 err
= bnad_res_alloc(bnad
, &bnad
->mod_res_info
[0], BNA_MOD_RES_T_MAX
);
3471 goto disable_ioceth
;
3474 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
3475 bna_mod_init(&bnad
->bna
, &bnad
->mod_res_info
[0]);
3476 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3478 /* Get the burnt-in mac */
3479 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
3480 bna_enet_perm_mac_get(&bna
->enet
, &bnad
->perm_addr
);
3481 bnad_set_netdev_perm_addr(bnad
);
3482 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3484 mutex_unlock(&bnad
->conf_mutex
);
3486 /* Finally, reguister with net_device layer */
3487 err
= register_netdev(netdev
);
3489 pr_err("BNA : Registering with netdev failed\n");
3492 set_bit(BNAD_RF_NETDEV_REGISTERED
, &bnad
->run_flags
);
3497 mutex_unlock(&bnad
->conf_mutex
);
3501 mutex_lock(&bnad
->conf_mutex
);
3502 bnad_res_free(bnad
, &bnad
->mod_res_info
[0], BNA_MOD_RES_T_MAX
);
3504 bnad_ioceth_disable(bnad
);
3505 del_timer_sync(&bnad
->bna
.ioceth
.ioc
.ioc_timer
);
3506 del_timer_sync(&bnad
->bna
.ioceth
.ioc
.sem_timer
);
3507 del_timer_sync(&bnad
->bna
.ioceth
.ioc
.hb_timer
);
3508 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
3510 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3511 bnad_mbox_irq_free(bnad
);
3512 bnad_disable_msix(bnad
);
3514 bnad_res_free(bnad
, &bnad
->res_info
[0], BNA_RES_T_MAX
);
3516 /* Remove the debugfs node for this bnad */
3517 kfree(bnad
->regdata
);
3518 bnad_debugfs_uninit(bnad
);
3521 bnad_pci_uninit(pdev
);
3523 mutex_unlock(&bnad
->conf_mutex
);
3524 bnad_remove_from_list(bnad
);
3525 bnad_lock_uninit(bnad
);
3526 free_netdev(netdev
);
3531 bnad_pci_remove(struct pci_dev
*pdev
)
3533 struct net_device
*netdev
= pci_get_drvdata(pdev
);
3536 unsigned long flags
;
3541 pr_info("%s bnad_pci_remove\n", netdev
->name
);
3542 bnad
= netdev_priv(netdev
);
3545 if (test_and_clear_bit(BNAD_RF_NETDEV_REGISTERED
, &bnad
->run_flags
))
3546 unregister_netdev(netdev
);
3548 mutex_lock(&bnad
->conf_mutex
);
3549 bnad_ioceth_disable(bnad
);
3550 del_timer_sync(&bnad
->bna
.ioceth
.ioc
.ioc_timer
);
3551 del_timer_sync(&bnad
->bna
.ioceth
.ioc
.sem_timer
);
3552 del_timer_sync(&bnad
->bna
.ioceth
.ioc
.hb_timer
);
3553 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
3555 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3557 bnad_res_free(bnad
, &bnad
->mod_res_info
[0], BNA_MOD_RES_T_MAX
);
3558 bnad_res_free(bnad
, &bnad
->res_info
[0], BNA_RES_T_MAX
);
3559 bnad_mbox_irq_free(bnad
);
3560 bnad_disable_msix(bnad
);
3561 bnad_pci_uninit(pdev
);
3562 mutex_unlock(&bnad
->conf_mutex
);
3563 bnad_remove_from_list(bnad
);
3564 bnad_lock_uninit(bnad
);
3565 /* Remove the debugfs node for this bnad */
3566 kfree(bnad
->regdata
);
3567 bnad_debugfs_uninit(bnad
);
3569 free_netdev(netdev
);
3572 static DEFINE_PCI_DEVICE_TABLE(bnad_pci_id_table
) = {
3574 PCI_DEVICE(PCI_VENDOR_ID_BROCADE
,
3575 PCI_DEVICE_ID_BROCADE_CT
),
3576 .class = PCI_CLASS_NETWORK_ETHERNET
<< 8,
3577 .class_mask
= 0xffff00
3580 PCI_DEVICE(PCI_VENDOR_ID_BROCADE
,
3581 BFA_PCI_DEVICE_ID_CT2
),
3582 .class = PCI_CLASS_NETWORK_ETHERNET
<< 8,
3583 .class_mask
= 0xffff00
3588 MODULE_DEVICE_TABLE(pci
, bnad_pci_id_table
);
3590 static struct pci_driver bnad_pci_driver
= {
3592 .id_table
= bnad_pci_id_table
,
3593 .probe
= bnad_pci_probe
,
3594 .remove
= bnad_pci_remove
,
3598 bnad_module_init(void)
3602 pr_info("Brocade 10G Ethernet driver - version: %s\n",
3605 bfa_nw_ioc_auto_recover(bnad_ioc_auto_recover
);
3607 err
= pci_register_driver(&bnad_pci_driver
);
3609 pr_err("bna : PCI registration failed in module init "
3618 bnad_module_exit(void)
3620 pci_unregister_driver(&bnad_pci_driver
);
3621 release_firmware(bfi_fw
);
3624 module_init(bnad_module_init
);
3625 module_exit(bnad_module_exit
);
3627 MODULE_AUTHOR("Brocade");
3628 MODULE_LICENSE("GPL");
3629 MODULE_DESCRIPTION("Brocade 10G PCIe Ethernet driver");
3630 MODULE_VERSION(BNAD_VERSION
);
3631 MODULE_FIRMWARE(CNA_FW_FILE_CT
);
3632 MODULE_FIRMWARE(CNA_FW_FILE_CT2
);