Commit | Line | Data |
---|---|---|
8b230ed8 RM |
1 | /* |
2 | * Linux network driver for Brocade Converged Network Adapter. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify it | |
5 | * under the terms of the GNU General Public License (GPL) Version 2 as | |
6 | * published by the Free Software Foundation | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, but | |
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
11 | * General Public License for more details. | |
12 | */ | |
13 | /* | |
14 | * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. | |
15 | * All rights reserved | |
16 | * www.brocade.com | |
17 | */ | |
f859d7cb | 18 | #include <linux/bitops.h> |
8b230ed8 RM |
19 | #include <linux/netdevice.h> |
20 | #include <linux/skbuff.h> | |
21 | #include <linux/etherdevice.h> | |
22 | #include <linux/in.h> | |
23 | #include <linux/ethtool.h> | |
24 | #include <linux/if_vlan.h> | |
25 | #include <linux/if_ether.h> | |
26 | #include <linux/ip.h> | |
70c71606 | 27 | #include <linux/prefetch.h> |
9d9779e7 | 28 | #include <linux/module.h> |
8b230ed8 RM |
29 | |
30 | #include "bnad.h" | |
31 | #include "bna.h" | |
32 | #include "cna.h" | |
33 | ||
b7ee31c5 | 34 | static DEFINE_MUTEX(bnad_fwimg_mutex); |
8b230ed8 RM |
35 | |
36 | /* | |
37 | * Module params | |
38 | */ | |
39 | static uint bnad_msix_disable; | |
40 | module_param(bnad_msix_disable, uint, 0444); | |
41 | MODULE_PARM_DESC(bnad_msix_disable, "Disable MSIX mode"); | |
42 | ||
43 | static uint bnad_ioc_auto_recover = 1; | |
44 | module_param(bnad_ioc_auto_recover, uint, 0444); | |
45 | MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable / Disable auto recovery"); | |
46 | ||
7afc5dbd KG |
47 | static uint bna_debugfs_enable = 1; |
48 | module_param(bna_debugfs_enable, uint, S_IRUGO | S_IWUSR); | |
49 | MODULE_PARM_DESC(bna_debugfs_enable, "Enables debugfs feature, default=1," | |
50 | " Range[false:0|true:1]"); | |
51 | ||
8b230ed8 RM |
52 | /* |
53 | * Global variables | |
54 | */ | |
55 | u32 bnad_rxqs_per_cq = 2; | |
e1e0918f | 56 | static u32 bna_id; |
57 | static struct mutex bnad_list_mutex; | |
58 | static LIST_HEAD(bnad_list); | |
b7ee31c5 | 59 | static const u8 bnad_bcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; |
8b230ed8 RM |
60 | |
61 | /* | |
62 | * Local MACROS | |
63 | */ | |
8b230ed8 RM |
64 | #define BNAD_GET_MBOX_IRQ(_bnad) \ |
65 | (((_bnad)->cfg_flags & BNAD_CF_MSIX) ? \ | |
8811e267 | 66 | ((_bnad)->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector) : \ |
8b230ed8 RM |
67 | ((_bnad)->pcidev->irq)) |
68 | ||
5216562a | 69 | #define BNAD_FILL_UNMAPQ_MEM_REQ(_res_info, _num, _size) \ |
8b230ed8 RM |
70 | do { \ |
71 | (_res_info)->res_type = BNA_RES_T_MEM; \ | |
72 | (_res_info)->res_u.mem_info.mem_type = BNA_MEM_T_KVA; \ | |
73 | (_res_info)->res_u.mem_info.num = (_num); \ | |
5216562a | 74 | (_res_info)->res_u.mem_info.len = (_size); \ |
8b230ed8 RM |
75 | } while (0) |
76 | ||
72a9730b KG |
77 | static void |
78 | bnad_add_to_list(struct bnad *bnad) | |
79 | { | |
80 | mutex_lock(&bnad_list_mutex); | |
81 | list_add_tail(&bnad->list_entry, &bnad_list); | |
82 | bnad->id = bna_id++; | |
83 | mutex_unlock(&bnad_list_mutex); | |
84 | } | |
85 | ||
86 | static void | |
87 | bnad_remove_from_list(struct bnad *bnad) | |
88 | { | |
89 | mutex_lock(&bnad_list_mutex); | |
90 | list_del(&bnad->list_entry); | |
91 | mutex_unlock(&bnad_list_mutex); | |
92 | } | |
93 | ||
8b230ed8 RM |
94 | /* |
95 | * Reinitialize completions in CQ, once Rx is taken down | |
96 | */ | |
97 | static void | |
b3cc6e88 | 98 | bnad_cq_cleanup(struct bnad *bnad, struct bna_ccb *ccb) |
8b230ed8 | 99 | { |
5216562a | 100 | struct bna_cq_entry *cmpl; |
8b230ed8 RM |
101 | int i; |
102 | ||
8b230ed8 | 103 | for (i = 0; i < ccb->q_depth; i++) { |
5216562a | 104 | cmpl = &((struct bna_cq_entry *)ccb->sw_q)[i]; |
8b230ed8 | 105 | cmpl->valid = 0; |
8b230ed8 RM |
106 | } |
107 | } | |
108 | ||
5216562a RM |
109 | /* Tx Datapath functions */ |
110 | ||
111 | ||
112 | /* Caller should ensure that the entry at unmap_q[index] is valid */ | |
271e8b79 | 113 | static u32 |
5216562a RM |
114 | bnad_tx_buff_unmap(struct bnad *bnad, |
115 | struct bnad_tx_unmap *unmap_q, | |
116 | u32 q_depth, u32 index) | |
271e8b79 | 117 | { |
5216562a RM |
118 | struct bnad_tx_unmap *unmap; |
119 | struct sk_buff *skb; | |
120 | int vector, nvecs; | |
121 | ||
122 | unmap = &unmap_q[index]; | |
123 | nvecs = unmap->nvecs; | |
124 | ||
125 | skb = unmap->skb; | |
126 | unmap->skb = NULL; | |
127 | unmap->nvecs = 0; | |
128 | dma_unmap_single(&bnad->pcidev->dev, | |
129 | dma_unmap_addr(&unmap->vectors[0], dma_addr), | |
130 | skb_headlen(skb), DMA_TO_DEVICE); | |
131 | dma_unmap_addr_set(&unmap->vectors[0], dma_addr, 0); | |
132 | nvecs--; | |
133 | ||
134 | vector = 0; | |
135 | while (nvecs) { | |
136 | vector++; | |
137 | if (vector == BFI_TX_MAX_VECTORS_PER_WI) { | |
138 | vector = 0; | |
139 | BNA_QE_INDX_INC(index, q_depth); | |
140 | unmap = &unmap_q[index]; | |
141 | } | |
271e8b79 | 142 | |
5216562a RM |
143 | dma_unmap_page(&bnad->pcidev->dev, |
144 | dma_unmap_addr(&unmap->vectors[vector], dma_addr), | |
145 | skb_shinfo(skb)->frags[nvecs].size, DMA_TO_DEVICE); | |
146 | dma_unmap_addr_set(&unmap->vectors[vector], dma_addr, 0); | |
147 | nvecs--; | |
271e8b79 RM |
148 | } |
149 | ||
5216562a RM |
150 | BNA_QE_INDX_INC(index, q_depth); |
151 | ||
271e8b79 RM |
152 | return index; |
153 | } | |
154 | ||
8b230ed8 RM |
155 | /* |
156 | * Frees all pending Tx Bufs | |
157 | * At this point no activity is expected on the Q, | |
158 | * so DMA unmap & freeing is fine. | |
159 | */ | |
160 | static void | |
5216562a | 161 | bnad_txq_cleanup(struct bnad *bnad, struct bna_tcb *tcb) |
8b230ed8 | 162 | { |
5216562a RM |
163 | struct bnad_tx_unmap *unmap_q = tcb->unmap_q; |
164 | struct sk_buff *skb; | |
165 | int i; | |
8b230ed8 | 166 | |
5216562a RM |
167 | for (i = 0; i < tcb->q_depth; i++) { |
168 | skb = unmap_q[i].skb; | |
938fa488 | 169 | if (!skb) |
8b230ed8 | 170 | continue; |
5216562a | 171 | bnad_tx_buff_unmap(bnad, unmap_q, tcb->q_depth, i); |
938fa488 | 172 | |
8b230ed8 RM |
173 | dev_kfree_skb_any(skb); |
174 | } | |
175 | } | |
176 | ||
8b230ed8 | 177 | /* |
b3cc6e88 | 178 | * bnad_txcmpl_process : Frees the Tx bufs on Tx completion |
8b230ed8 RM |
179 | * Can be called in a) Interrupt context |
180 | * b) Sending context | |
8b230ed8 RM |
181 | */ |
182 | static u32 | |
5216562a | 183 | bnad_txcmpl_process(struct bnad *bnad, struct bna_tcb *tcb) |
8b230ed8 | 184 | { |
5216562a RM |
185 | u32 sent_packets = 0, sent_bytes = 0; |
186 | u32 wis, unmap_wis, hw_cons, cons, q_depth; | |
187 | struct bnad_tx_unmap *unmap_q = tcb->unmap_q; | |
188 | struct bnad_tx_unmap *unmap; | |
189 | struct sk_buff *skb; | |
8b230ed8 | 190 | |
d95d1081 | 191 | /* Just return if TX is stopped */ |
be7fa326 | 192 | if (!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) |
8b230ed8 RM |
193 | return 0; |
194 | ||
5216562a | 195 | hw_cons = *(tcb->hw_consumer_index); |
0570afff | 196 | rmb(); |
5216562a RM |
197 | cons = tcb->consumer_index; |
198 | q_depth = tcb->q_depth; | |
8b230ed8 | 199 | |
5216562a | 200 | wis = BNA_Q_INDEX_CHANGE(cons, hw_cons, q_depth); |
8b230ed8 RM |
201 | BUG_ON(!(wis <= BNA_QE_IN_USE_CNT(tcb, tcb->q_depth))); |
202 | ||
8b230ed8 | 203 | while (wis) { |
5216562a RM |
204 | unmap = &unmap_q[cons]; |
205 | ||
206 | skb = unmap->skb; | |
8b230ed8 | 207 | |
8b230ed8 RM |
208 | sent_packets++; |
209 | sent_bytes += skb->len; | |
8b230ed8 | 210 | |
5216562a RM |
211 | unmap_wis = BNA_TXQ_WI_NEEDED(unmap->nvecs); |
212 | wis -= unmap_wis; | |
8b230ed8 | 213 | |
5216562a | 214 | cons = bnad_tx_buff_unmap(bnad, unmap_q, q_depth, cons); |
8b230ed8 RM |
215 | dev_kfree_skb_any(skb); |
216 | } | |
217 | ||
218 | /* Update consumer pointers. */ | |
5216562a | 219 | tcb->consumer_index = hw_cons; |
8b230ed8 RM |
220 | |
221 | tcb->txq->tx_packets += sent_packets; | |
222 | tcb->txq->tx_bytes += sent_bytes; | |
223 | ||
224 | return sent_packets; | |
225 | } | |
226 | ||
8b230ed8 | 227 | static u32 |
b3cc6e88 | 228 | bnad_tx_complete(struct bnad *bnad, struct bna_tcb *tcb) |
8b230ed8 RM |
229 | { |
230 | struct net_device *netdev = bnad->netdev; | |
be7fa326 | 231 | u32 sent = 0; |
8b230ed8 RM |
232 | |
233 | if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) | |
234 | return 0; | |
235 | ||
b3cc6e88 | 236 | sent = bnad_txcmpl_process(bnad, tcb); |
8b230ed8 RM |
237 | if (sent) { |
238 | if (netif_queue_stopped(netdev) && | |
239 | netif_carrier_ok(netdev) && | |
240 | BNA_QE_FREE_CNT(tcb, tcb->q_depth) >= | |
241 | BNAD_NETIF_WAKE_THRESHOLD) { | |
be7fa326 RM |
242 | if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) { |
243 | netif_wake_queue(netdev); | |
244 | BNAD_UPDATE_CTR(bnad, netif_queue_wakeup); | |
245 | } | |
8b230ed8 | 246 | } |
be7fa326 RM |
247 | } |
248 | ||
249 | if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) | |
8b230ed8 | 250 | bna_ib_ack(tcb->i_dbell, sent); |
8b230ed8 RM |
251 | |
252 | smp_mb__before_clear_bit(); | |
253 | clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); | |
254 | ||
255 | return sent; | |
256 | } | |
257 | ||
258 | /* MSIX Tx Completion Handler */ | |
259 | static irqreturn_t | |
260 | bnad_msix_tx(int irq, void *data) | |
261 | { | |
262 | struct bna_tcb *tcb = (struct bna_tcb *)data; | |
263 | struct bnad *bnad = tcb->bnad; | |
264 | ||
b3cc6e88 | 265 | bnad_tx_complete(bnad, tcb); |
8b230ed8 RM |
266 | |
267 | return IRQ_HANDLED; | |
268 | } | |
269 | ||
30f9fc94 RM |
270 | static inline void |
271 | bnad_rxq_alloc_uninit(struct bnad *bnad, struct bna_rcb *rcb) | |
272 | { | |
273 | struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q; | |
274 | ||
275 | unmap_q->reuse_pi = -1; | |
276 | unmap_q->alloc_order = -1; | |
277 | unmap_q->map_size = 0; | |
278 | unmap_q->type = BNAD_RXBUF_NONE; | |
279 | } | |
280 | ||
281 | /* Default is page-based allocation. Multi-buffer support - TBD */ | |
282 | static int | |
283 | bnad_rxq_alloc_init(struct bnad *bnad, struct bna_rcb *rcb) | |
284 | { | |
285 | struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q; | |
286 | int mtu, order; | |
287 | ||
288 | bnad_rxq_alloc_uninit(bnad, rcb); | |
289 | ||
290 | mtu = bna_enet_mtu_get(&bnad->bna.enet); | |
291 | order = get_order(mtu); | |
292 | ||
293 | if (bna_is_small_rxq(rcb->id)) { | |
294 | unmap_q->alloc_order = 0; | |
295 | unmap_q->map_size = rcb->rxq->buffer_size; | |
296 | } else { | |
297 | unmap_q->alloc_order = order; | |
298 | unmap_q->map_size = | |
299 | (rcb->rxq->buffer_size > 2048) ? | |
300 | PAGE_SIZE << order : 2048; | |
301 | } | |
302 | ||
303 | BUG_ON(((PAGE_SIZE << order) % unmap_q->map_size)); | |
304 | ||
305 | unmap_q->type = BNAD_RXBUF_PAGE; | |
306 | ||
307 | return 0; | |
308 | } | |
309 | ||
310 | static inline void | |
311 | bnad_rxq_cleanup_page(struct bnad *bnad, struct bnad_rx_unmap *unmap) | |
312 | { | |
313 | if (!unmap->page) | |
314 | return; | |
315 | ||
316 | dma_unmap_page(&bnad->pcidev->dev, | |
317 | dma_unmap_addr(&unmap->vector, dma_addr), | |
318 | unmap->vector.len, DMA_FROM_DEVICE); | |
319 | put_page(unmap->page); | |
320 | unmap->page = NULL; | |
321 | dma_unmap_addr_set(&unmap->vector, dma_addr, 0); | |
322 | unmap->vector.len = 0; | |
323 | } | |
324 | ||
325 | static inline void | |
326 | bnad_rxq_cleanup_skb(struct bnad *bnad, struct bnad_rx_unmap *unmap) | |
327 | { | |
328 | if (!unmap->skb) | |
329 | return; | |
330 | ||
331 | dma_unmap_single(&bnad->pcidev->dev, | |
332 | dma_unmap_addr(&unmap->vector, dma_addr), | |
333 | unmap->vector.len, DMA_FROM_DEVICE); | |
334 | dev_kfree_skb_any(unmap->skb); | |
335 | unmap->skb = NULL; | |
336 | dma_unmap_addr_set(&unmap->vector, dma_addr, 0); | |
337 | unmap->vector.len = 0; | |
338 | } | |
339 | ||
8b230ed8 | 340 | static void |
b3cc6e88 | 341 | bnad_rxq_cleanup(struct bnad *bnad, struct bna_rcb *rcb) |
8b230ed8 | 342 | { |
30f9fc94 | 343 | struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q; |
5216562a RM |
344 | int i; |
345 | ||
346 | for (i = 0; i < rcb->q_depth; i++) { | |
30f9fc94 | 347 | struct bnad_rx_unmap *unmap = &unmap_q->unmap[i]; |
8b230ed8 | 348 | |
30f9fc94 RM |
349 | if (BNAD_RXBUF_IS_PAGE(unmap_q->type)) |
350 | bnad_rxq_cleanup_page(bnad, unmap); | |
351 | else | |
352 | bnad_rxq_cleanup_skb(bnad, unmap); | |
353 | } | |
354 | bnad_rxq_alloc_uninit(bnad, rcb); | |
355 | } | |
5216562a | 356 | |
30f9fc94 RM |
357 | static u32 |
358 | bnad_rxq_refill_page(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc) | |
359 | { | |
360 | u32 alloced, prod, q_depth; | |
361 | struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q; | |
362 | struct bnad_rx_unmap *unmap, *prev; | |
363 | struct bna_rxq_entry *rxent; | |
364 | struct page *page; | |
365 | u32 page_offset, alloc_size; | |
366 | dma_addr_t dma_addr; | |
367 | ||
368 | prod = rcb->producer_index; | |
369 | q_depth = rcb->q_depth; | |
370 | ||
371 | alloc_size = PAGE_SIZE << unmap_q->alloc_order; | |
372 | alloced = 0; | |
373 | ||
374 | while (nalloc--) { | |
375 | unmap = &unmap_q->unmap[prod]; | |
376 | ||
377 | if (unmap_q->reuse_pi < 0) { | |
378 | page = alloc_pages(GFP_ATOMIC | __GFP_COMP, | |
379 | unmap_q->alloc_order); | |
380 | page_offset = 0; | |
381 | } else { | |
382 | prev = &unmap_q->unmap[unmap_q->reuse_pi]; | |
383 | page = prev->page; | |
384 | page_offset = prev->page_offset + unmap_q->map_size; | |
385 | get_page(page); | |
386 | } | |
387 | ||
388 | if (unlikely(!page)) { | |
389 | BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed); | |
390 | rcb->rxq->rxbuf_alloc_failed++; | |
391 | goto finishing; | |
392 | } | |
393 | ||
394 | dma_addr = dma_map_page(&bnad->pcidev->dev, page, page_offset, | |
395 | unmap_q->map_size, DMA_FROM_DEVICE); | |
396 | ||
397 | unmap->page = page; | |
398 | unmap->page_offset = page_offset; | |
399 | dma_unmap_addr_set(&unmap->vector, dma_addr, dma_addr); | |
400 | unmap->vector.len = unmap_q->map_size; | |
401 | page_offset += unmap_q->map_size; | |
402 | ||
403 | if (page_offset < alloc_size) | |
404 | unmap_q->reuse_pi = prod; | |
405 | else | |
406 | unmap_q->reuse_pi = -1; | |
407 | ||
408 | rxent = &((struct bna_rxq_entry *)rcb->sw_q)[prod]; | |
409 | BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr); | |
410 | BNA_QE_INDX_INC(prod, q_depth); | |
411 | alloced++; | |
412 | } | |
413 | ||
414 | finishing: | |
415 | if (likely(alloced)) { | |
416 | rcb->producer_index = prod; | |
417 | smp_mb(); | |
418 | if (likely(test_bit(BNAD_RXQ_POST_OK, &rcb->flags))) | |
419 | bna_rxq_prod_indx_doorbell(rcb); | |
8b230ed8 | 420 | } |
30f9fc94 RM |
421 | |
422 | return alloced; | |
8b230ed8 RM |
423 | } |
424 | ||
30f9fc94 RM |
425 | static u32 |
426 | bnad_rxq_refill_skb(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc) | |
8b230ed8 | 427 | { |
30f9fc94 RM |
428 | u32 alloced, prod, q_depth, buff_sz; |
429 | struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q; | |
5216562a | 430 | struct bnad_rx_unmap *unmap; |
8b230ed8 RM |
431 | struct bna_rxq_entry *rxent; |
432 | struct sk_buff *skb; | |
433 | dma_addr_t dma_addr; | |
434 | ||
5216562a | 435 | buff_sz = rcb->rxq->buffer_size; |
5216562a RM |
436 | prod = rcb->producer_index; |
437 | q_depth = rcb->q_depth; | |
8b230ed8 | 438 | |
30f9fc94 RM |
439 | alloced = 0; |
440 | while (nalloc--) { | |
441 | unmap = &unmap_q->unmap[prod]; | |
442 | ||
443 | skb = netdev_alloc_skb_ip_align(bnad->netdev, buff_sz); | |
444 | ||
8b230ed8 RM |
445 | if (unlikely(!skb)) { |
446 | BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed); | |
3caa1e95 | 447 | rcb->rxq->rxbuf_alloc_failed++; |
8b230ed8 RM |
448 | goto finishing; |
449 | } | |
5ea74318 | 450 | dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data, |
5216562a | 451 | buff_sz, DMA_FROM_DEVICE); |
8b230ed8 | 452 | |
5216562a RM |
453 | unmap->skb = skb; |
454 | dma_unmap_addr_set(&unmap->vector, dma_addr, dma_addr); | |
455 | unmap->vector.len = buff_sz; | |
30f9fc94 RM |
456 | |
457 | rxent = &((struct bna_rxq_entry *)rcb->sw_q)[prod]; | |
458 | BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr); | |
5216562a | 459 | BNA_QE_INDX_INC(prod, q_depth); |
8b230ed8 RM |
460 | alloced++; |
461 | } | |
462 | ||
463 | finishing: | |
464 | if (likely(alloced)) { | |
5216562a | 465 | rcb->producer_index = prod; |
8b230ed8 | 466 | smp_mb(); |
5bcf6ac0 | 467 | if (likely(test_bit(BNAD_RXQ_POST_OK, &rcb->flags))) |
be7fa326 | 468 | bna_rxq_prod_indx_doorbell(rcb); |
8b230ed8 | 469 | } |
30f9fc94 RM |
470 | |
471 | return alloced; | |
472 | } | |
473 | ||
474 | static inline void | |
475 | bnad_rxq_post(struct bnad *bnad, struct bna_rcb *rcb) | |
476 | { | |
477 | struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q; | |
478 | u32 to_alloc; | |
479 | ||
480 | to_alloc = BNA_QE_FREE_CNT(rcb, rcb->q_depth); | |
481 | if (!(to_alloc >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)) | |
482 | return; | |
483 | ||
484 | if (BNAD_RXBUF_IS_PAGE(unmap_q->type)) | |
485 | bnad_rxq_refill_page(bnad, rcb, to_alloc); | |
486 | else | |
487 | bnad_rxq_refill_skb(bnad, rcb, to_alloc); | |
8b230ed8 RM |
488 | } |
489 | ||
5e46631f RM |
490 | #define flags_cksum_prot_mask (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \ |
491 | BNA_CQ_EF_IPV6 | \ | |
492 | BNA_CQ_EF_TCP | BNA_CQ_EF_UDP | \ | |
493 | BNA_CQ_EF_L4_CKSUM_OK) | |
494 | ||
495 | #define flags_tcp4 (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \ | |
496 | BNA_CQ_EF_TCP | BNA_CQ_EF_L4_CKSUM_OK) | |
497 | #define flags_tcp6 (BNA_CQ_EF_IPV6 | \ | |
498 | BNA_CQ_EF_TCP | BNA_CQ_EF_L4_CKSUM_OK) | |
499 | #define flags_udp4 (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \ | |
500 | BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK) | |
501 | #define flags_udp6 (BNA_CQ_EF_IPV6 | \ | |
502 | BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK) | |
503 | ||
30f9fc94 RM |
504 | static inline struct sk_buff * |
505 | bnad_cq_prepare_skb(struct bnad_rx_ctrl *rx_ctrl, | |
506 | struct bnad_rx_unmap_q *unmap_q, | |
507 | struct bnad_rx_unmap *unmap, | |
508 | u32 length, u32 flags) | |
509 | { | |
510 | struct bnad *bnad = rx_ctrl->bnad; | |
511 | struct sk_buff *skb; | |
512 | ||
513 | if (BNAD_RXBUF_IS_PAGE(unmap_q->type)) { | |
514 | skb = napi_get_frags(&rx_ctrl->napi); | |
515 | if (unlikely(!skb)) | |
516 | return NULL; | |
517 | ||
518 | dma_unmap_page(&bnad->pcidev->dev, | |
519 | dma_unmap_addr(&unmap->vector, dma_addr), | |
520 | unmap->vector.len, DMA_FROM_DEVICE); | |
521 | skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, | |
522 | unmap->page, unmap->page_offset, length); | |
523 | skb->len += length; | |
524 | skb->data_len += length; | |
525 | skb->truesize += length; | |
526 | ||
527 | unmap->page = NULL; | |
528 | unmap->vector.len = 0; | |
529 | ||
530 | return skb; | |
531 | } | |
532 | ||
533 | skb = unmap->skb; | |
534 | BUG_ON(!skb); | |
535 | ||
536 | dma_unmap_single(&bnad->pcidev->dev, | |
537 | dma_unmap_addr(&unmap->vector, dma_addr), | |
538 | unmap->vector.len, DMA_FROM_DEVICE); | |
539 | ||
540 | skb_put(skb, length); | |
541 | ||
542 | skb->protocol = eth_type_trans(skb, bnad->netdev); | |
543 | ||
544 | unmap->skb = NULL; | |
545 | unmap->vector.len = 0; | |
546 | return skb; | |
547 | } | |
548 | ||
8b230ed8 | 549 | static u32 |
b3cc6e88 | 550 | bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget) |
8b230ed8 | 551 | { |
30f9fc94 | 552 | struct bna_cq_entry *cq, *cmpl; |
8b230ed8 | 553 | struct bna_rcb *rcb = NULL; |
30f9fc94 RM |
554 | struct bnad_rx_unmap_q *unmap_q; |
555 | struct bnad_rx_unmap *unmap; | |
8b230ed8 | 556 | struct sk_buff *skb; |
8b230ed8 | 557 | struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate; |
30f9fc94 RM |
558 | struct bnad_rx_ctrl *rx_ctrl = ccb->ctrl; |
559 | u32 packets = 0, length = 0, flags, masked_flags; | |
078086f3 | 560 | |
8b230ed8 | 561 | prefetch(bnad->netdev); |
5216562a RM |
562 | |
563 | cq = ccb->sw_q; | |
564 | cmpl = &cq[ccb->producer_index]; | |
565 | ||
566 | while (cmpl->valid && (packets < budget)) { | |
8b230ed8 | 567 | packets++; |
30f9fc94 RM |
568 | flags = ntohl(cmpl->flags); |
569 | length = ntohs(cmpl->length); | |
8b230ed8 RM |
570 | BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length)); |
571 | ||
078086f3 | 572 | if (bna_is_small_rxq(cmpl->rxq_id)) |
8b230ed8 | 573 | rcb = ccb->rcb[1]; |
078086f3 RM |
574 | else |
575 | rcb = ccb->rcb[0]; | |
8b230ed8 RM |
576 | |
577 | unmap_q = rcb->unmap_q; | |
30f9fc94 | 578 | unmap = &unmap_q->unmap[rcb->consumer_index]; |
8b230ed8 | 579 | |
30f9fc94 RM |
580 | if (unlikely(flags & (BNA_CQ_EF_MAC_ERROR | |
581 | BNA_CQ_EF_FCS_ERROR | | |
582 | BNA_CQ_EF_TOO_LONG))) { | |
583 | if (BNAD_RXBUF_IS_PAGE(unmap_q->type)) | |
584 | bnad_rxq_cleanup_page(bnad, unmap); | |
585 | else | |
586 | bnad_rxq_cleanup_skb(bnad, unmap); | |
5216562a | 587 | |
8b230ed8 RM |
588 | rcb->rxq->rx_packets_with_error++; |
589 | goto next; | |
590 | } | |
591 | ||
30f9fc94 RM |
592 | skb = bnad_cq_prepare_skb(ccb->ctrl, unmap_q, unmap, |
593 | length, flags); | |
594 | ||
595 | if (unlikely(!skb)) | |
596 | break; | |
5e46631f RM |
597 | |
598 | masked_flags = flags & flags_cksum_prot_mask; | |
599 | ||
8b230ed8 | 600 | if (likely |
e5ee20e7 | 601 | ((bnad->netdev->features & NETIF_F_RXCSUM) && |
5e46631f RM |
602 | ((masked_flags == flags_tcp4) || |
603 | (masked_flags == flags_udp4) || | |
604 | (masked_flags == flags_tcp6) || | |
605 | (masked_flags == flags_udp6)))) | |
8b230ed8 RM |
606 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
607 | else | |
bc8acf2c | 608 | skb_checksum_none_assert(skb); |
8b230ed8 RM |
609 | |
610 | rcb->rxq->rx_packets++; | |
30f9fc94 | 611 | rcb->rxq->rx_bytes += length; |
8b230ed8 | 612 | |
f859d7cb | 613 | if (flags & BNA_CQ_EF_VLAN) |
86a9bad3 | 614 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(cmpl->vlan_tag)); |
f859d7cb | 615 | |
30f9fc94 RM |
616 | if (BNAD_RXBUF_IS_PAGE(unmap_q->type)) |
617 | napi_gro_frags(&rx_ctrl->napi); | |
01b54b14 | 618 | else |
f859d7cb | 619 | netif_receive_skb(skb); |
8b230ed8 RM |
620 | |
621 | next: | |
622 | cmpl->valid = 0; | |
30f9fc94 RM |
623 | BNA_QE_INDX_INC(rcb->consumer_index, rcb->q_depth); |
624 | BNA_QE_INDX_INC(ccb->producer_index, ccb->q_depth); | |
625 | cmpl = &cq[ccb->producer_index]; | |
8b230ed8 RM |
626 | } |
627 | ||
30f9fc94 | 628 | napi_gro_flush(&rx_ctrl->napi, false); |
2be67144 | 629 | if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags))) |
271e8b79 RM |
630 | bna_ib_ack_disable_irq(ccb->i_dbell, packets); |
631 | ||
5216562a | 632 | bnad_rxq_post(bnad, ccb->rcb[0]); |
2be67144 | 633 | if (ccb->rcb[1]) |
5216562a | 634 | bnad_rxq_post(bnad, ccb->rcb[1]); |
078086f3 | 635 | |
8b230ed8 RM |
636 | return packets; |
637 | } | |
638 | ||
8b230ed8 RM |
639 | static void |
640 | bnad_netif_rx_schedule_poll(struct bnad *bnad, struct bna_ccb *ccb) | |
641 | { | |
642 | struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl); | |
be7fa326 RM |
643 | struct napi_struct *napi = &rx_ctrl->napi; |
644 | ||
645 | if (likely(napi_schedule_prep(napi))) { | |
be7fa326 | 646 | __napi_schedule(napi); |
271e8b79 | 647 | rx_ctrl->rx_schedule++; |
8b230ed8 | 648 | } |
8b230ed8 RM |
649 | } |
650 | ||
651 | /* MSIX Rx Path Handler */ | |
652 | static irqreturn_t | |
653 | bnad_msix_rx(int irq, void *data) | |
654 | { | |
655 | struct bna_ccb *ccb = (struct bna_ccb *)data; | |
8b230ed8 | 656 | |
271e8b79 RM |
657 | if (ccb) { |
658 | ((struct bnad_rx_ctrl *)(ccb->ctrl))->rx_intr_ctr++; | |
2be67144 | 659 | bnad_netif_rx_schedule_poll(ccb->bnad, ccb); |
271e8b79 | 660 | } |
8b230ed8 RM |
661 | |
662 | return IRQ_HANDLED; | |
663 | } | |
664 | ||
665 | /* Interrupt handlers */ | |
666 | ||
667 | /* Mbox Interrupt Handlers */ | |
668 | static irqreturn_t | |
669 | bnad_msix_mbox_handler(int irq, void *data) | |
670 | { | |
671 | u32 intr_status; | |
e2fa6f2e | 672 | unsigned long flags; |
be7fa326 | 673 | struct bnad *bnad = (struct bnad *)data; |
8b230ed8 | 674 | |
8b230ed8 | 675 | spin_lock_irqsave(&bnad->bna_lock, flags); |
dfee325a RM |
676 | if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) { |
677 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
678 | return IRQ_HANDLED; | |
679 | } | |
8b230ed8 RM |
680 | |
681 | bna_intr_status_get(&bnad->bna, intr_status); | |
682 | ||
078086f3 | 683 | if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status)) |
8b230ed8 RM |
684 | bna_mbox_handler(&bnad->bna, intr_status); |
685 | ||
686 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
687 | ||
8b230ed8 RM |
688 | return IRQ_HANDLED; |
689 | } | |
690 | ||
691 | static irqreturn_t | |
692 | bnad_isr(int irq, void *data) | |
693 | { | |
694 | int i, j; | |
695 | u32 intr_status; | |
696 | unsigned long flags; | |
be7fa326 | 697 | struct bnad *bnad = (struct bnad *)data; |
8b230ed8 RM |
698 | struct bnad_rx_info *rx_info; |
699 | struct bnad_rx_ctrl *rx_ctrl; | |
078086f3 | 700 | struct bna_tcb *tcb = NULL; |
8b230ed8 | 701 | |
dfee325a RM |
702 | spin_lock_irqsave(&bnad->bna_lock, flags); |
703 | if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) { | |
704 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
e2fa6f2e | 705 | return IRQ_NONE; |
dfee325a | 706 | } |
8b230ed8 RM |
707 | |
708 | bna_intr_status_get(&bnad->bna, intr_status); | |
e2fa6f2e | 709 | |
dfee325a RM |
710 | if (unlikely(!intr_status)) { |
711 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
8b230ed8 | 712 | return IRQ_NONE; |
dfee325a | 713 | } |
8b230ed8 | 714 | |
078086f3 | 715 | if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status)) |
8b230ed8 | 716 | bna_mbox_handler(&bnad->bna, intr_status); |
be7fa326 | 717 | |
8b230ed8 RM |
718 | spin_unlock_irqrestore(&bnad->bna_lock, flags); |
719 | ||
be7fa326 RM |
720 | if (!BNA_IS_INTX_DATA_INTR(intr_status)) |
721 | return IRQ_HANDLED; | |
722 | ||
8b230ed8 | 723 | /* Process data interrupts */ |
be7fa326 RM |
724 | /* Tx processing */ |
725 | for (i = 0; i < bnad->num_tx; i++) { | |
078086f3 RM |
726 | for (j = 0; j < bnad->num_txq_per_tx; j++) { |
727 | tcb = bnad->tx_info[i].tcb[j]; | |
728 | if (tcb && test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) | |
b3cc6e88 | 729 | bnad_tx_complete(bnad, bnad->tx_info[i].tcb[j]); |
078086f3 | 730 | } |
be7fa326 RM |
731 | } |
732 | /* Rx processing */ | |
8b230ed8 RM |
733 | for (i = 0; i < bnad->num_rx; i++) { |
734 | rx_info = &bnad->rx_info[i]; | |
735 | if (!rx_info->rx) | |
736 | continue; | |
737 | for (j = 0; j < bnad->num_rxp_per_rx; j++) { | |
738 | rx_ctrl = &rx_info->rx_ctrl[j]; | |
739 | if (rx_ctrl->ccb) | |
740 | bnad_netif_rx_schedule_poll(bnad, | |
741 | rx_ctrl->ccb); | |
742 | } | |
743 | } | |
8b230ed8 RM |
744 | return IRQ_HANDLED; |
745 | } | |
746 | ||
747 | /* | |
748 | * Called in interrupt / callback context | |
749 | * with bna_lock held, so cfg_flags access is OK | |
750 | */ | |
751 | static void | |
752 | bnad_enable_mbox_irq(struct bnad *bnad) | |
753 | { | |
be7fa326 | 754 | clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags); |
e2fa6f2e | 755 | |
8b230ed8 RM |
756 | BNAD_UPDATE_CTR(bnad, mbox_intr_enabled); |
757 | } | |
758 | ||
759 | /* | |
760 | * Called with bnad->bna_lock held b'cos of | |
761 | * bnad->cfg_flags access. | |
762 | */ | |
b7ee31c5 | 763 | static void |
8b230ed8 RM |
764 | bnad_disable_mbox_irq(struct bnad *bnad) |
765 | { | |
be7fa326 | 766 | set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags); |
8b230ed8 | 767 | |
be7fa326 RM |
768 | BNAD_UPDATE_CTR(bnad, mbox_intr_disabled); |
769 | } | |
8b230ed8 | 770 | |
be7fa326 RM |
771 | static void |
772 | bnad_set_netdev_perm_addr(struct bnad *bnad) | |
773 | { | |
774 | struct net_device *netdev = bnad->netdev; | |
e2fa6f2e | 775 | |
be7fa326 RM |
776 | memcpy(netdev->perm_addr, &bnad->perm_addr, netdev->addr_len); |
777 | if (is_zero_ether_addr(netdev->dev_addr)) | |
778 | memcpy(netdev->dev_addr, &bnad->perm_addr, netdev->addr_len); | |
8b230ed8 RM |
779 | } |
780 | ||
781 | /* Control Path Handlers */ | |
782 | ||
783 | /* Callbacks */ | |
784 | void | |
078086f3 | 785 | bnad_cb_mbox_intr_enable(struct bnad *bnad) |
8b230ed8 RM |
786 | { |
787 | bnad_enable_mbox_irq(bnad); | |
788 | } | |
789 | ||
790 | void | |
078086f3 | 791 | bnad_cb_mbox_intr_disable(struct bnad *bnad) |
8b230ed8 RM |
792 | { |
793 | bnad_disable_mbox_irq(bnad); | |
794 | } | |
795 | ||
796 | void | |
078086f3 RM |
797 | bnad_cb_ioceth_ready(struct bnad *bnad) |
798 | { | |
799 | bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS; | |
800 | complete(&bnad->bnad_completions.ioc_comp); | |
801 | } | |
802 | ||
803 | void | |
804 | bnad_cb_ioceth_failed(struct bnad *bnad) | |
8b230ed8 | 805 | { |
078086f3 | 806 | bnad->bnad_completions.ioc_comp_status = BNA_CB_FAIL; |
8b230ed8 | 807 | complete(&bnad->bnad_completions.ioc_comp); |
8b230ed8 RM |
808 | } |
809 | ||
810 | void | |
078086f3 | 811 | bnad_cb_ioceth_disabled(struct bnad *bnad) |
8b230ed8 | 812 | { |
078086f3 | 813 | bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS; |
8b230ed8 | 814 | complete(&bnad->bnad_completions.ioc_comp); |
8b230ed8 RM |
815 | } |
816 | ||
817 | static void | |
078086f3 | 818 | bnad_cb_enet_disabled(void *arg) |
8b230ed8 RM |
819 | { |
820 | struct bnad *bnad = (struct bnad *)arg; | |
821 | ||
8b230ed8 | 822 | netif_carrier_off(bnad->netdev); |
078086f3 | 823 | complete(&bnad->bnad_completions.enet_comp); |
8b230ed8 RM |
824 | } |
825 | ||
826 | void | |
078086f3 | 827 | bnad_cb_ethport_link_status(struct bnad *bnad, |
8b230ed8 RM |
828 | enum bna_link_status link_status) |
829 | { | |
3db1cd5c | 830 | bool link_up = false; |
8b230ed8 RM |
831 | |
832 | link_up = (link_status == BNA_LINK_UP) || (link_status == BNA_CEE_UP); | |
833 | ||
834 | if (link_status == BNA_CEE_UP) { | |
078086f3 RM |
835 | if (!test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) |
836 | BNAD_UPDATE_CTR(bnad, cee_toggle); | |
8b230ed8 | 837 | set_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags); |
078086f3 RM |
838 | } else { |
839 | if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) | |
840 | BNAD_UPDATE_CTR(bnad, cee_toggle); | |
8b230ed8 | 841 | clear_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags); |
078086f3 | 842 | } |
8b230ed8 RM |
843 | |
844 | if (link_up) { | |
845 | if (!netif_carrier_ok(bnad->netdev)) { | |
078086f3 RM |
846 | uint tx_id, tcb_id; |
847 | printk(KERN_WARNING "bna: %s link up\n", | |
8b230ed8 RM |
848 | bnad->netdev->name); |
849 | netif_carrier_on(bnad->netdev); | |
850 | BNAD_UPDATE_CTR(bnad, link_toggle); | |
078086f3 RM |
851 | for (tx_id = 0; tx_id < bnad->num_tx; tx_id++) { |
852 | for (tcb_id = 0; tcb_id < bnad->num_txq_per_tx; | |
853 | tcb_id++) { | |
854 | struct bna_tcb *tcb = | |
855 | bnad->tx_info[tx_id].tcb[tcb_id]; | |
856 | u32 txq_id; | |
857 | if (!tcb) | |
858 | continue; | |
859 | ||
860 | txq_id = tcb->id; | |
861 | ||
862 | if (test_bit(BNAD_TXQ_TX_STARTED, | |
863 | &tcb->flags)) { | |
864 | /* | |
865 | * Force an immediate | |
866 | * Transmit Schedule */ | |
867 | printk(KERN_INFO "bna: %s %d " | |
868 | "TXQ_STARTED\n", | |
869 | bnad->netdev->name, | |
870 | txq_id); | |
871 | netif_wake_subqueue( | |
872 | bnad->netdev, | |
873 | txq_id); | |
874 | BNAD_UPDATE_CTR(bnad, | |
875 | netif_queue_wakeup); | |
876 | } else { | |
877 | netif_stop_subqueue( | |
878 | bnad->netdev, | |
879 | txq_id); | |
880 | BNAD_UPDATE_CTR(bnad, | |
881 | netif_queue_stop); | |
882 | } | |
883 | } | |
8b230ed8 RM |
884 | } |
885 | } | |
886 | } else { | |
887 | if (netif_carrier_ok(bnad->netdev)) { | |
078086f3 | 888 | printk(KERN_WARNING "bna: %s link down\n", |
8b230ed8 RM |
889 | bnad->netdev->name); |
890 | netif_carrier_off(bnad->netdev); | |
891 | BNAD_UPDATE_CTR(bnad, link_toggle); | |
892 | } | |
893 | } | |
894 | } | |
895 | ||
896 | static void | |
078086f3 | 897 | bnad_cb_tx_disabled(void *arg, struct bna_tx *tx) |
8b230ed8 RM |
898 | { |
899 | struct bnad *bnad = (struct bnad *)arg; | |
900 | ||
901 | complete(&bnad->bnad_completions.tx_comp); | |
902 | } | |
903 | ||
904 | static void | |
905 | bnad_cb_tcb_setup(struct bnad *bnad, struct bna_tcb *tcb) | |
906 | { | |
907 | struct bnad_tx_info *tx_info = | |
908 | (struct bnad_tx_info *)tcb->txq->tx->priv; | |
8b230ed8 | 909 | |
5216562a | 910 | tcb->priv = tcb; |
8b230ed8 | 911 | tx_info->tcb[tcb->id] = tcb; |
8b230ed8 RM |
912 | } |
913 | ||
914 | static void | |
915 | bnad_cb_tcb_destroy(struct bnad *bnad, struct bna_tcb *tcb) | |
916 | { | |
917 | struct bnad_tx_info *tx_info = | |
918 | (struct bnad_tx_info *)tcb->txq->tx->priv; | |
919 | ||
920 | tx_info->tcb[tcb->id] = NULL; | |
01b54b14 | 921 | tcb->priv = NULL; |
8b230ed8 RM |
922 | } |
923 | ||
8b230ed8 RM |
924 | static void |
925 | bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb) | |
926 | { | |
927 | struct bnad_rx_info *rx_info = | |
928 | (struct bnad_rx_info *)ccb->cq->rx->priv; | |
929 | ||
930 | rx_info->rx_ctrl[ccb->id].ccb = ccb; | |
931 | ccb->ctrl = &rx_info->rx_ctrl[ccb->id]; | |
932 | } | |
933 | ||
934 | static void | |
935 | bnad_cb_ccb_destroy(struct bnad *bnad, struct bna_ccb *ccb) | |
936 | { | |
937 | struct bnad_rx_info *rx_info = | |
938 | (struct bnad_rx_info *)ccb->cq->rx->priv; | |
939 | ||
940 | rx_info->rx_ctrl[ccb->id].ccb = NULL; | |
941 | } | |
942 | ||
943 | static void | |
078086f3 | 944 | bnad_cb_tx_stall(struct bnad *bnad, struct bna_tx *tx) |
8b230ed8 RM |
945 | { |
946 | struct bnad_tx_info *tx_info = | |
078086f3 RM |
947 | (struct bnad_tx_info *)tx->priv; |
948 | struct bna_tcb *tcb; | |
949 | u32 txq_id; | |
950 | int i; | |
8b230ed8 | 951 | |
078086f3 RM |
952 | for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) { |
953 | tcb = tx_info->tcb[i]; | |
954 | if (!tcb) | |
955 | continue; | |
956 | txq_id = tcb->id; | |
957 | clear_bit(BNAD_TXQ_TX_STARTED, &tcb->flags); | |
958 | netif_stop_subqueue(bnad->netdev, txq_id); | |
959 | printk(KERN_INFO "bna: %s %d TXQ_STOPPED\n", | |
960 | bnad->netdev->name, txq_id); | |
961 | } | |
8b230ed8 RM |
962 | } |
963 | ||
964 | static void | |
078086f3 | 965 | bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx) |
8b230ed8 | 966 | { |
078086f3 RM |
967 | struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv; |
968 | struct bna_tcb *tcb; | |
078086f3 RM |
969 | u32 txq_id; |
970 | int i; | |
8b230ed8 | 971 | |
078086f3 RM |
972 | for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) { |
973 | tcb = tx_info->tcb[i]; | |
974 | if (!tcb) | |
975 | continue; | |
976 | txq_id = tcb->id; | |
8b230ed8 | 977 | |
01b54b14 | 978 | BUG_ON(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)); |
078086f3 | 979 | set_bit(BNAD_TXQ_TX_STARTED, &tcb->flags); |
01b54b14 | 980 | BUG_ON(*(tcb->hw_consumer_index) != 0); |
078086f3 RM |
981 | |
982 | if (netif_carrier_ok(bnad->netdev)) { | |
983 | printk(KERN_INFO "bna: %s %d TXQ_STARTED\n", | |
984 | bnad->netdev->name, txq_id); | |
985 | netif_wake_subqueue(bnad->netdev, txq_id); | |
986 | BNAD_UPDATE_CTR(bnad, netif_queue_wakeup); | |
987 | } | |
988 | } | |
be7fa326 RM |
989 | |
990 | /* | |
078086f3 | 991 | * Workaround for first ioceth enable failure & we |
be7fa326 RM |
992 | * get a 0 MAC address. We try to get the MAC address |
993 | * again here. | |
994 | */ | |
995 | if (is_zero_ether_addr(&bnad->perm_addr.mac[0])) { | |
078086f3 | 996 | bna_enet_perm_mac_get(&bnad->bna.enet, &bnad->perm_addr); |
be7fa326 RM |
997 | bnad_set_netdev_perm_addr(bnad); |
998 | } | |
be7fa326 RM |
999 | } |
1000 | ||
01b54b14 JH |
1001 | /* |
1002 | * Free all TxQs buffers and then notify TX_E_CLEANUP_DONE to Tx fsm. | |
1003 | */ | |
1004 | static void | |
1005 | bnad_tx_cleanup(struct delayed_work *work) | |
1006 | { | |
1007 | struct bnad_tx_info *tx_info = | |
1008 | container_of(work, struct bnad_tx_info, tx_cleanup_work); | |
1009 | struct bnad *bnad = NULL; | |
01b54b14 JH |
1010 | struct bna_tcb *tcb; |
1011 | unsigned long flags; | |
5216562a | 1012 | u32 i, pending = 0; |
01b54b14 JH |
1013 | |
1014 | for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) { | |
1015 | tcb = tx_info->tcb[i]; | |
1016 | if (!tcb) | |
1017 | continue; | |
1018 | ||
1019 | bnad = tcb->bnad; | |
1020 | ||
1021 | if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) { | |
1022 | pending++; | |
1023 | continue; | |
1024 | } | |
1025 | ||
b3cc6e88 | 1026 | bnad_txq_cleanup(bnad, tcb); |
01b54b14 | 1027 | |
01b54b14 JH |
1028 | smp_mb__before_clear_bit(); |
1029 | clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); | |
1030 | } | |
1031 | ||
1032 | if (pending) { | |
1033 | queue_delayed_work(bnad->work_q, &tx_info->tx_cleanup_work, | |
1034 | msecs_to_jiffies(1)); | |
1035 | return; | |
1036 | } | |
1037 | ||
1038 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
1039 | bna_tx_cleanup_complete(tx_info->tx); | |
1040 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
1041 | } | |
1042 | ||
be7fa326 | 1043 | static void |
078086f3 | 1044 | bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tx *tx) |
be7fa326 | 1045 | { |
078086f3 RM |
1046 | struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv; |
1047 | struct bna_tcb *tcb; | |
1048 | int i; | |
1049 | ||
1050 | for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) { | |
1051 | tcb = tx_info->tcb[i]; | |
1052 | if (!tcb) | |
1053 | continue; | |
1054 | } | |
1055 | ||
01b54b14 | 1056 | queue_delayed_work(bnad->work_q, &tx_info->tx_cleanup_work, 0); |
8b230ed8 RM |
1057 | } |
1058 | ||
5bcf6ac0 RM |
1059 | static void |
1060 | bnad_cb_rx_stall(struct bnad *bnad, struct bna_rx *rx) | |
1061 | { | |
1062 | struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv; | |
1063 | struct bna_ccb *ccb; | |
1064 | struct bnad_rx_ctrl *rx_ctrl; | |
1065 | int i; | |
1066 | ||
1067 | for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) { | |
1068 | rx_ctrl = &rx_info->rx_ctrl[i]; | |
1069 | ccb = rx_ctrl->ccb; | |
1070 | if (!ccb) | |
1071 | continue; | |
1072 | ||
1073 | clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[0]->flags); | |
1074 | ||
1075 | if (ccb->rcb[1]) | |
1076 | clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[1]->flags); | |
1077 | } | |
1078 | } | |
1079 | ||
01b54b14 JH |
1080 | /* |
1081 | * Free all RxQs buffers and then notify RX_E_CLEANUP_DONE to Rx fsm. | |
1082 | */ | |
1083 | static void | |
1084 | bnad_rx_cleanup(void *work) | |
1085 | { | |
1086 | struct bnad_rx_info *rx_info = | |
1087 | container_of(work, struct bnad_rx_info, rx_cleanup_work); | |
1088 | struct bnad_rx_ctrl *rx_ctrl; | |
1089 | struct bnad *bnad = NULL; | |
1090 | unsigned long flags; | |
5216562a | 1091 | u32 i; |
01b54b14 JH |
1092 | |
1093 | for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) { | |
1094 | rx_ctrl = &rx_info->rx_ctrl[i]; | |
1095 | ||
1096 | if (!rx_ctrl->ccb) | |
1097 | continue; | |
1098 | ||
1099 | bnad = rx_ctrl->ccb->bnad; | |
1100 | ||
1101 | /* | |
1102 | * Wait till the poll handler has exited | |
1103 | * and nothing can be scheduled anymore | |
1104 | */ | |
1105 | napi_disable(&rx_ctrl->napi); | |
1106 | ||
b3cc6e88 JH |
1107 | bnad_cq_cleanup(bnad, rx_ctrl->ccb); |
1108 | bnad_rxq_cleanup(bnad, rx_ctrl->ccb->rcb[0]); | |
01b54b14 | 1109 | if (rx_ctrl->ccb->rcb[1]) |
b3cc6e88 | 1110 | bnad_rxq_cleanup(bnad, rx_ctrl->ccb->rcb[1]); |
01b54b14 JH |
1111 | } |
1112 | ||
1113 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
1114 | bna_rx_cleanup_complete(rx_info->rx); | |
1115 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
1116 | } | |
1117 | ||
8b230ed8 | 1118 | static void |
078086f3 | 1119 | bnad_cb_rx_cleanup(struct bnad *bnad, struct bna_rx *rx) |
8b230ed8 | 1120 | { |
078086f3 RM |
1121 | struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv; |
1122 | struct bna_ccb *ccb; | |
1123 | struct bnad_rx_ctrl *rx_ctrl; | |
1124 | int i; | |
1125 | ||
772b5235 | 1126 | for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) { |
078086f3 RM |
1127 | rx_ctrl = &rx_info->rx_ctrl[i]; |
1128 | ccb = rx_ctrl->ccb; | |
1129 | if (!ccb) | |
1130 | continue; | |
1131 | ||
1132 | clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags); | |
1133 | ||
1134 | if (ccb->rcb[1]) | |
1135 | clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags); | |
078086f3 | 1136 | } |
be7fa326 | 1137 | |
01b54b14 | 1138 | queue_work(bnad->work_q, &rx_info->rx_cleanup_work); |
8b230ed8 RM |
1139 | } |
1140 | ||
1141 | static void | |
078086f3 | 1142 | bnad_cb_rx_post(struct bnad *bnad, struct bna_rx *rx) |
8b230ed8 | 1143 | { |
078086f3 RM |
1144 | struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv; |
1145 | struct bna_ccb *ccb; | |
1146 | struct bna_rcb *rcb; | |
1147 | struct bnad_rx_ctrl *rx_ctrl; | |
30f9fc94 | 1148 | int i, j; |
be7fa326 | 1149 | |
772b5235 | 1150 | for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) { |
078086f3 RM |
1151 | rx_ctrl = &rx_info->rx_ctrl[i]; |
1152 | ccb = rx_ctrl->ccb; | |
1153 | if (!ccb) | |
1154 | continue; | |
be7fa326 | 1155 | |
01b54b14 | 1156 | napi_enable(&rx_ctrl->napi); |
8b230ed8 | 1157 | |
078086f3 RM |
1158 | for (j = 0; j < BNAD_MAX_RXQ_PER_RXP; j++) { |
1159 | rcb = ccb->rcb[j]; | |
1160 | if (!rcb) | |
1161 | continue; | |
078086f3 | 1162 | |
30f9fc94 | 1163 | bnad_rxq_alloc_init(bnad, rcb); |
078086f3 | 1164 | set_bit(BNAD_RXQ_STARTED, &rcb->flags); |
5bcf6ac0 | 1165 | set_bit(BNAD_RXQ_POST_OK, &rcb->flags); |
5216562a | 1166 | bnad_rxq_post(bnad, rcb); |
078086f3 | 1167 | } |
8b230ed8 RM |
1168 | } |
1169 | } | |
1170 | ||
1171 | static void | |
078086f3 | 1172 | bnad_cb_rx_disabled(void *arg, struct bna_rx *rx) |
8b230ed8 RM |
1173 | { |
1174 | struct bnad *bnad = (struct bnad *)arg; | |
1175 | ||
1176 | complete(&bnad->bnad_completions.rx_comp); | |
1177 | } | |
1178 | ||
1179 | static void | |
078086f3 | 1180 | bnad_cb_rx_mcast_add(struct bnad *bnad, struct bna_rx *rx) |
8b230ed8 | 1181 | { |
078086f3 | 1182 | bnad->bnad_completions.mcast_comp_status = BNA_CB_SUCCESS; |
8b230ed8 RM |
1183 | complete(&bnad->bnad_completions.mcast_comp); |
1184 | } | |
1185 | ||
1186 | void | |
1187 | bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status, | |
1188 | struct bna_stats *stats) | |
1189 | { | |
1190 | if (status == BNA_CB_SUCCESS) | |
1191 | BNAD_UPDATE_CTR(bnad, hw_stats_updates); | |
1192 | ||
1193 | if (!netif_running(bnad->netdev) || | |
1194 | !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) | |
1195 | return; | |
1196 | ||
1197 | mod_timer(&bnad->stats_timer, | |
1198 | jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ)); | |
1199 | } | |
1200 | ||
078086f3 RM |
1201 | static void |
1202 | bnad_cb_enet_mtu_set(struct bnad *bnad) | |
1203 | { | |
1204 | bnad->bnad_completions.mtu_comp_status = BNA_CB_SUCCESS; | |
1205 | complete(&bnad->bnad_completions.mtu_comp); | |
1206 | } | |
1207 | ||
72a9730b KG |
1208 | void |
1209 | bnad_cb_completion(void *arg, enum bfa_status status) | |
1210 | { | |
1211 | struct bnad_iocmd_comp *iocmd_comp = | |
1212 | (struct bnad_iocmd_comp *)arg; | |
1213 | ||
1214 | iocmd_comp->comp_status = (u32) status; | |
1215 | complete(&iocmd_comp->comp); | |
1216 | } | |
1217 | ||
8b230ed8 RM |
1218 | /* Resource allocation, free functions */ |
1219 | ||
1220 | static void | |
1221 | bnad_mem_free(struct bnad *bnad, | |
1222 | struct bna_mem_info *mem_info) | |
1223 | { | |
1224 | int i; | |
1225 | dma_addr_t dma_pa; | |
1226 | ||
1227 | if (mem_info->mdl == NULL) | |
1228 | return; | |
1229 | ||
1230 | for (i = 0; i < mem_info->num; i++) { | |
1231 | if (mem_info->mdl[i].kva != NULL) { | |
1232 | if (mem_info->mem_type == BNA_MEM_T_DMA) { | |
1233 | BNA_GET_DMA_ADDR(&(mem_info->mdl[i].dma), | |
1234 | dma_pa); | |
5ea74318 IV |
1235 | dma_free_coherent(&bnad->pcidev->dev, |
1236 | mem_info->mdl[i].len, | |
1237 | mem_info->mdl[i].kva, dma_pa); | |
8b230ed8 RM |
1238 | } else |
1239 | kfree(mem_info->mdl[i].kva); | |
1240 | } | |
1241 | } | |
1242 | kfree(mem_info->mdl); | |
1243 | mem_info->mdl = NULL; | |
1244 | } | |
1245 | ||
1246 | static int | |
1247 | bnad_mem_alloc(struct bnad *bnad, | |
1248 | struct bna_mem_info *mem_info) | |
1249 | { | |
1250 | int i; | |
1251 | dma_addr_t dma_pa; | |
1252 | ||
1253 | if ((mem_info->num == 0) || (mem_info->len == 0)) { | |
1254 | mem_info->mdl = NULL; | |
1255 | return 0; | |
1256 | } | |
1257 | ||
1258 | mem_info->mdl = kcalloc(mem_info->num, sizeof(struct bna_mem_descr), | |
1259 | GFP_KERNEL); | |
1260 | if (mem_info->mdl == NULL) | |
1261 | return -ENOMEM; | |
1262 | ||
1263 | if (mem_info->mem_type == BNA_MEM_T_DMA) { | |
1264 | for (i = 0; i < mem_info->num; i++) { | |
1265 | mem_info->mdl[i].len = mem_info->len; | |
1266 | mem_info->mdl[i].kva = | |
5ea74318 | 1267 | dma_alloc_coherent(&bnad->pcidev->dev, |
1f9061d2 JP |
1268 | mem_info->len, &dma_pa, |
1269 | GFP_KERNEL); | |
8b230ed8 RM |
1270 | if (mem_info->mdl[i].kva == NULL) |
1271 | goto err_return; | |
1272 | ||
1273 | BNA_SET_DMA_ADDR(dma_pa, | |
1274 | &(mem_info->mdl[i].dma)); | |
1275 | } | |
1276 | } else { | |
1277 | for (i = 0; i < mem_info->num; i++) { | |
1278 | mem_info->mdl[i].len = mem_info->len; | |
1279 | mem_info->mdl[i].kva = kzalloc(mem_info->len, | |
1280 | GFP_KERNEL); | |
1281 | if (mem_info->mdl[i].kva == NULL) | |
1282 | goto err_return; | |
1283 | } | |
1284 | } | |
1285 | ||
1286 | return 0; | |
1287 | ||
1288 | err_return: | |
1289 | bnad_mem_free(bnad, mem_info); | |
1290 | return -ENOMEM; | |
1291 | } | |
1292 | ||
1293 | /* Free IRQ for Mailbox */ | |
1294 | static void | |
078086f3 | 1295 | bnad_mbox_irq_free(struct bnad *bnad) |
8b230ed8 RM |
1296 | { |
1297 | int irq; | |
1298 | unsigned long flags; | |
1299 | ||
8b230ed8 | 1300 | spin_lock_irqsave(&bnad->bna_lock, flags); |
8b230ed8 | 1301 | bnad_disable_mbox_irq(bnad); |
e2fa6f2e | 1302 | spin_unlock_irqrestore(&bnad->bna_lock, flags); |
8b230ed8 RM |
1303 | |
1304 | irq = BNAD_GET_MBOX_IRQ(bnad); | |
be7fa326 | 1305 | free_irq(irq, bnad); |
8b230ed8 RM |
1306 | } |
1307 | ||
1308 | /* | |
1309 | * Allocates IRQ for Mailbox, but keep it disabled | |
1310 | * This will be enabled once we get the mbox enable callback | |
1311 | * from bna | |
1312 | */ | |
1313 | static int | |
078086f3 | 1314 | bnad_mbox_irq_alloc(struct bnad *bnad) |
8b230ed8 | 1315 | { |
0120b99c RM |
1316 | int err = 0; |
1317 | unsigned long irq_flags, flags; | |
8b230ed8 | 1318 | u32 irq; |
0120b99c | 1319 | irq_handler_t irq_handler; |
8b230ed8 | 1320 | |
8b230ed8 RM |
1321 | spin_lock_irqsave(&bnad->bna_lock, flags); |
1322 | if (bnad->cfg_flags & BNAD_CF_MSIX) { | |
1323 | irq_handler = (irq_handler_t)bnad_msix_mbox_handler; | |
8811e267 | 1324 | irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector; |
8279171a | 1325 | irq_flags = 0; |
8b230ed8 RM |
1326 | } else { |
1327 | irq_handler = (irq_handler_t)bnad_isr; | |
1328 | irq = bnad->pcidev->irq; | |
5f77898d | 1329 | irq_flags = IRQF_SHARED; |
8b230ed8 | 1330 | } |
8811e267 | 1331 | |
8b230ed8 | 1332 | spin_unlock_irqrestore(&bnad->bna_lock, flags); |
8b230ed8 RM |
1333 | sprintf(bnad->mbox_irq_name, "%s", BNAD_NAME); |
1334 | ||
e2fa6f2e RM |
1335 | /* |
1336 | * Set the Mbox IRQ disable flag, so that the IRQ handler | |
1337 | * called from request_irq() for SHARED IRQs do not execute | |
1338 | */ | |
1339 | set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags); | |
1340 | ||
be7fa326 RM |
1341 | BNAD_UPDATE_CTR(bnad, mbox_intr_disabled); |
1342 | ||
8279171a | 1343 | err = request_irq(irq, irq_handler, irq_flags, |
be7fa326 | 1344 | bnad->mbox_irq_name, bnad); |
e2fa6f2e | 1345 | |
be7fa326 | 1346 | return err; |
8b230ed8 RM |
1347 | } |
1348 | ||
1349 | static void | |
1350 | bnad_txrx_irq_free(struct bnad *bnad, struct bna_intr_info *intr_info) | |
1351 | { | |
1352 | kfree(intr_info->idl); | |
1353 | intr_info->idl = NULL; | |
1354 | } | |
1355 | ||
1356 | /* Allocates Interrupt Descriptor List for MSIX/INT-X vectors */ | |
1357 | static int | |
1358 | bnad_txrx_irq_alloc(struct bnad *bnad, enum bnad_intr_source src, | |
078086f3 | 1359 | u32 txrx_id, struct bna_intr_info *intr_info) |
8b230ed8 RM |
1360 | { |
1361 | int i, vector_start = 0; | |
1362 | u32 cfg_flags; | |
1363 | unsigned long flags; | |
1364 | ||
1365 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
1366 | cfg_flags = bnad->cfg_flags; | |
1367 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
1368 | ||
1369 | if (cfg_flags & BNAD_CF_MSIX) { | |
1370 | intr_info->intr_type = BNA_INTR_T_MSIX; | |
1371 | intr_info->idl = kcalloc(intr_info->num, | |
1372 | sizeof(struct bna_intr_descr), | |
1373 | GFP_KERNEL); | |
1374 | if (!intr_info->idl) | |
1375 | return -ENOMEM; | |
1376 | ||
1377 | switch (src) { | |
1378 | case BNAD_INTR_TX: | |
8811e267 | 1379 | vector_start = BNAD_MAILBOX_MSIX_VECTORS + txrx_id; |
8b230ed8 RM |
1380 | break; |
1381 | ||
1382 | case BNAD_INTR_RX: | |
8811e267 RM |
1383 | vector_start = BNAD_MAILBOX_MSIX_VECTORS + |
1384 | (bnad->num_tx * bnad->num_txq_per_tx) + | |
8b230ed8 RM |
1385 | txrx_id; |
1386 | break; | |
1387 | ||
1388 | default: | |
1389 | BUG(); | |
1390 | } | |
1391 | ||
1392 | for (i = 0; i < intr_info->num; i++) | |
1393 | intr_info->idl[i].vector = vector_start + i; | |
1394 | } else { | |
1395 | intr_info->intr_type = BNA_INTR_T_INTX; | |
1396 | intr_info->num = 1; | |
1397 | intr_info->idl = kcalloc(intr_info->num, | |
1398 | sizeof(struct bna_intr_descr), | |
1399 | GFP_KERNEL); | |
1400 | if (!intr_info->idl) | |
1401 | return -ENOMEM; | |
1402 | ||
1403 | switch (src) { | |
1404 | case BNAD_INTR_TX: | |
8811e267 | 1405 | intr_info->idl[0].vector = BNAD_INTX_TX_IB_BITMASK; |
8b230ed8 RM |
1406 | break; |
1407 | ||
1408 | case BNAD_INTR_RX: | |
8811e267 | 1409 | intr_info->idl[0].vector = BNAD_INTX_RX_IB_BITMASK; |
8b230ed8 RM |
1410 | break; |
1411 | } | |
1412 | } | |
1413 | return 0; | |
1414 | } | |
1415 | ||
1aa8b471 | 1416 | /* NOTE: Should be called for MSIX only |
8b230ed8 RM |
1417 | * Unregisters Tx MSIX vector(s) from the kernel |
1418 | */ | |
1419 | static void | |
1420 | bnad_tx_msix_unregister(struct bnad *bnad, struct bnad_tx_info *tx_info, | |
1421 | int num_txqs) | |
1422 | { | |
1423 | int i; | |
1424 | int vector_num; | |
1425 | ||
1426 | for (i = 0; i < num_txqs; i++) { | |
1427 | if (tx_info->tcb[i] == NULL) | |
1428 | continue; | |
1429 | ||
1430 | vector_num = tx_info->tcb[i]->intr_vector; | |
1431 | free_irq(bnad->msix_table[vector_num].vector, tx_info->tcb[i]); | |
1432 | } | |
1433 | } | |
1434 | ||
1aa8b471 | 1435 | /* NOTE: Should be called for MSIX only |
8b230ed8 RM |
1436 | * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel |
1437 | */ | |
1438 | static int | |
1439 | bnad_tx_msix_register(struct bnad *bnad, struct bnad_tx_info *tx_info, | |
078086f3 | 1440 | u32 tx_id, int num_txqs) |
8b230ed8 RM |
1441 | { |
1442 | int i; | |
1443 | int err; | |
1444 | int vector_num; | |
1445 | ||
1446 | for (i = 0; i < num_txqs; i++) { | |
1447 | vector_num = tx_info->tcb[i]->intr_vector; | |
1448 | sprintf(tx_info->tcb[i]->name, "%s TXQ %d", bnad->netdev->name, | |
1449 | tx_id + tx_info->tcb[i]->id); | |
1450 | err = request_irq(bnad->msix_table[vector_num].vector, | |
1451 | (irq_handler_t)bnad_msix_tx, 0, | |
1452 | tx_info->tcb[i]->name, | |
1453 | tx_info->tcb[i]); | |
1454 | if (err) | |
1455 | goto err_return; | |
1456 | } | |
1457 | ||
1458 | return 0; | |
1459 | ||
1460 | err_return: | |
1461 | if (i > 0) | |
1462 | bnad_tx_msix_unregister(bnad, tx_info, (i - 1)); | |
1463 | return -1; | |
1464 | } | |
1465 | ||
1aa8b471 | 1466 | /* NOTE: Should be called for MSIX only |
8b230ed8 RM |
1467 | * Unregisters Rx MSIX vector(s) from the kernel |
1468 | */ | |
1469 | static void | |
1470 | bnad_rx_msix_unregister(struct bnad *bnad, struct bnad_rx_info *rx_info, | |
1471 | int num_rxps) | |
1472 | { | |
1473 | int i; | |
1474 | int vector_num; | |
1475 | ||
1476 | for (i = 0; i < num_rxps; i++) { | |
1477 | if (rx_info->rx_ctrl[i].ccb == NULL) | |
1478 | continue; | |
1479 | ||
1480 | vector_num = rx_info->rx_ctrl[i].ccb->intr_vector; | |
1481 | free_irq(bnad->msix_table[vector_num].vector, | |
1482 | rx_info->rx_ctrl[i].ccb); | |
1483 | } | |
1484 | } | |
1485 | ||
1aa8b471 | 1486 | /* NOTE: Should be called for MSIX only |
8b230ed8 RM |
1487 | * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel |
1488 | */ | |
1489 | static int | |
1490 | bnad_rx_msix_register(struct bnad *bnad, struct bnad_rx_info *rx_info, | |
078086f3 | 1491 | u32 rx_id, int num_rxps) |
8b230ed8 RM |
1492 | { |
1493 | int i; | |
1494 | int err; | |
1495 | int vector_num; | |
1496 | ||
1497 | for (i = 0; i < num_rxps; i++) { | |
1498 | vector_num = rx_info->rx_ctrl[i].ccb->intr_vector; | |
1499 | sprintf(rx_info->rx_ctrl[i].ccb->name, "%s CQ %d", | |
1500 | bnad->netdev->name, | |
1501 | rx_id + rx_info->rx_ctrl[i].ccb->id); | |
1502 | err = request_irq(bnad->msix_table[vector_num].vector, | |
1503 | (irq_handler_t)bnad_msix_rx, 0, | |
1504 | rx_info->rx_ctrl[i].ccb->name, | |
1505 | rx_info->rx_ctrl[i].ccb); | |
1506 | if (err) | |
1507 | goto err_return; | |
1508 | } | |
1509 | ||
1510 | return 0; | |
1511 | ||
1512 | err_return: | |
1513 | if (i > 0) | |
1514 | bnad_rx_msix_unregister(bnad, rx_info, (i - 1)); | |
1515 | return -1; | |
1516 | } | |
1517 | ||
1518 | /* Free Tx object Resources */ | |
1519 | static void | |
1520 | bnad_tx_res_free(struct bnad *bnad, struct bna_res_info *res_info) | |
1521 | { | |
1522 | int i; | |
1523 | ||
1524 | for (i = 0; i < BNA_TX_RES_T_MAX; i++) { | |
1525 | if (res_info[i].res_type == BNA_RES_T_MEM) | |
1526 | bnad_mem_free(bnad, &res_info[i].res_u.mem_info); | |
1527 | else if (res_info[i].res_type == BNA_RES_T_INTR) | |
1528 | bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info); | |
1529 | } | |
1530 | } | |
1531 | ||
1532 | /* Allocates memory and interrupt resources for Tx object */ | |
1533 | static int | |
1534 | bnad_tx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info, | |
078086f3 | 1535 | u32 tx_id) |
8b230ed8 RM |
1536 | { |
1537 | int i, err = 0; | |
1538 | ||
1539 | for (i = 0; i < BNA_TX_RES_T_MAX; i++) { | |
1540 | if (res_info[i].res_type == BNA_RES_T_MEM) | |
1541 | err = bnad_mem_alloc(bnad, | |
1542 | &res_info[i].res_u.mem_info); | |
1543 | else if (res_info[i].res_type == BNA_RES_T_INTR) | |
1544 | err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_TX, tx_id, | |
1545 | &res_info[i].res_u.intr_info); | |
1546 | if (err) | |
1547 | goto err_return; | |
1548 | } | |
1549 | return 0; | |
1550 | ||
1551 | err_return: | |
1552 | bnad_tx_res_free(bnad, res_info); | |
1553 | return err; | |
1554 | } | |
1555 | ||
1556 | /* Free Rx object Resources */ | |
1557 | static void | |
1558 | bnad_rx_res_free(struct bnad *bnad, struct bna_res_info *res_info) | |
1559 | { | |
1560 | int i; | |
1561 | ||
1562 | for (i = 0; i < BNA_RX_RES_T_MAX; i++) { | |
1563 | if (res_info[i].res_type == BNA_RES_T_MEM) | |
1564 | bnad_mem_free(bnad, &res_info[i].res_u.mem_info); | |
1565 | else if (res_info[i].res_type == BNA_RES_T_INTR) | |
1566 | bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info); | |
1567 | } | |
1568 | } | |
1569 | ||
1570 | /* Allocates memory and interrupt resources for Rx object */ | |
1571 | static int | |
1572 | bnad_rx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info, | |
1573 | uint rx_id) | |
1574 | { | |
1575 | int i, err = 0; | |
1576 | ||
1577 | /* All memory needs to be allocated before setup_ccbs */ | |
1578 | for (i = 0; i < BNA_RX_RES_T_MAX; i++) { | |
1579 | if (res_info[i].res_type == BNA_RES_T_MEM) | |
1580 | err = bnad_mem_alloc(bnad, | |
1581 | &res_info[i].res_u.mem_info); | |
1582 | else if (res_info[i].res_type == BNA_RES_T_INTR) | |
1583 | err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_RX, rx_id, | |
1584 | &res_info[i].res_u.intr_info); | |
1585 | if (err) | |
1586 | goto err_return; | |
1587 | } | |
1588 | return 0; | |
1589 | ||
1590 | err_return: | |
1591 | bnad_rx_res_free(bnad, res_info); | |
1592 | return err; | |
1593 | } | |
1594 | ||
1595 | /* Timer callbacks */ | |
1596 | /* a) IOC timer */ | |
1597 | static void | |
1598 | bnad_ioc_timeout(unsigned long data) | |
1599 | { | |
1600 | struct bnad *bnad = (struct bnad *)data; | |
1601 | unsigned long flags; | |
1602 | ||
1603 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
078086f3 | 1604 | bfa_nw_ioc_timeout((void *) &bnad->bna.ioceth.ioc); |
8b230ed8 RM |
1605 | spin_unlock_irqrestore(&bnad->bna_lock, flags); |
1606 | } | |
1607 | ||
1608 | static void | |
1609 | bnad_ioc_hb_check(unsigned long data) | |
1610 | { | |
1611 | struct bnad *bnad = (struct bnad *)data; | |
1612 | unsigned long flags; | |
1613 | ||
1614 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
078086f3 | 1615 | bfa_nw_ioc_hb_check((void *) &bnad->bna.ioceth.ioc); |
8b230ed8 RM |
1616 | spin_unlock_irqrestore(&bnad->bna_lock, flags); |
1617 | } | |
1618 | ||
1619 | static void | |
1d32f769 | 1620 | bnad_iocpf_timeout(unsigned long data) |
8b230ed8 RM |
1621 | { |
1622 | struct bnad *bnad = (struct bnad *)data; | |
1623 | unsigned long flags; | |
1624 | ||
1625 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
078086f3 | 1626 | bfa_nw_iocpf_timeout((void *) &bnad->bna.ioceth.ioc); |
1d32f769 RM |
1627 | spin_unlock_irqrestore(&bnad->bna_lock, flags); |
1628 | } | |
1629 | ||
1630 | static void | |
1631 | bnad_iocpf_sem_timeout(unsigned long data) | |
1632 | { | |
1633 | struct bnad *bnad = (struct bnad *)data; | |
1634 | unsigned long flags; | |
1635 | ||
1636 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
078086f3 | 1637 | bfa_nw_iocpf_sem_timeout((void *) &bnad->bna.ioceth.ioc); |
8b230ed8 RM |
1638 | spin_unlock_irqrestore(&bnad->bna_lock, flags); |
1639 | } | |
1640 | ||
1641 | /* | |
1642 | * All timer routines use bnad->bna_lock to protect against | |
1643 | * the following race, which may occur in case of no locking: | |
0120b99c | 1644 | * Time CPU m CPU n |
8b230ed8 RM |
1645 | * 0 1 = test_bit |
1646 | * 1 clear_bit | |
1647 | * 2 del_timer_sync | |
1648 | * 3 mod_timer | |
1649 | */ | |
1650 | ||
1651 | /* b) Dynamic Interrupt Moderation Timer */ | |
1652 | static void | |
1653 | bnad_dim_timeout(unsigned long data) | |
1654 | { | |
1655 | struct bnad *bnad = (struct bnad *)data; | |
1656 | struct bnad_rx_info *rx_info; | |
1657 | struct bnad_rx_ctrl *rx_ctrl; | |
1658 | int i, j; | |
1659 | unsigned long flags; | |
1660 | ||
1661 | if (!netif_carrier_ok(bnad->netdev)) | |
1662 | return; | |
1663 | ||
1664 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
1665 | for (i = 0; i < bnad->num_rx; i++) { | |
1666 | rx_info = &bnad->rx_info[i]; | |
1667 | if (!rx_info->rx) | |
1668 | continue; | |
1669 | for (j = 0; j < bnad->num_rxp_per_rx; j++) { | |
1670 | rx_ctrl = &rx_info->rx_ctrl[j]; | |
1671 | if (!rx_ctrl->ccb) | |
1672 | continue; | |
1673 | bna_rx_dim_update(rx_ctrl->ccb); | |
1674 | } | |
1675 | } | |
1676 | ||
1677 | /* Check for BNAD_CF_DIM_ENABLED, does not eleminate a race */ | |
1678 | if (test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) | |
1679 | mod_timer(&bnad->dim_timer, | |
1680 | jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ)); | |
1681 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
1682 | } | |
1683 | ||
1684 | /* c) Statistics Timer */ | |
1685 | static void | |
1686 | bnad_stats_timeout(unsigned long data) | |
1687 | { | |
1688 | struct bnad *bnad = (struct bnad *)data; | |
1689 | unsigned long flags; | |
1690 | ||
1691 | if (!netif_running(bnad->netdev) || | |
1692 | !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) | |
1693 | return; | |
1694 | ||
1695 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
078086f3 | 1696 | bna_hw_stats_get(&bnad->bna); |
8b230ed8 RM |
1697 | spin_unlock_irqrestore(&bnad->bna_lock, flags); |
1698 | } | |
1699 | ||
1700 | /* | |
1701 | * Set up timer for DIM | |
1702 | * Called with bnad->bna_lock held | |
1703 | */ | |
1704 | void | |
1705 | bnad_dim_timer_start(struct bnad *bnad) | |
1706 | { | |
1707 | if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED && | |
1708 | !test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) { | |
1709 | setup_timer(&bnad->dim_timer, bnad_dim_timeout, | |
1710 | (unsigned long)bnad); | |
1711 | set_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags); | |
1712 | mod_timer(&bnad->dim_timer, | |
1713 | jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ)); | |
1714 | } | |
1715 | } | |
1716 | ||
1717 | /* | |
1718 | * Set up timer for statistics | |
1719 | * Called with mutex_lock(&bnad->conf_mutex) held | |
1720 | */ | |
1721 | static void | |
1722 | bnad_stats_timer_start(struct bnad *bnad) | |
1723 | { | |
1724 | unsigned long flags; | |
1725 | ||
1726 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
1727 | if (!test_and_set_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) { | |
1728 | setup_timer(&bnad->stats_timer, bnad_stats_timeout, | |
1729 | (unsigned long)bnad); | |
1730 | mod_timer(&bnad->stats_timer, | |
1731 | jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ)); | |
1732 | } | |
1733 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
8b230ed8 RM |
1734 | } |
1735 | ||
1736 | /* | |
1737 | * Stops the stats timer | |
1738 | * Called with mutex_lock(&bnad->conf_mutex) held | |
1739 | */ | |
1740 | static void | |
1741 | bnad_stats_timer_stop(struct bnad *bnad) | |
1742 | { | |
1743 | int to_del = 0; | |
1744 | unsigned long flags; | |
1745 | ||
1746 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
1747 | if (test_and_clear_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) | |
1748 | to_del = 1; | |
1749 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
1750 | if (to_del) | |
1751 | del_timer_sync(&bnad->stats_timer); | |
1752 | } | |
1753 | ||
1754 | /* Utilities */ | |
1755 | ||
1756 | static void | |
1757 | bnad_netdev_mc_list_get(struct net_device *netdev, u8 *mc_list) | |
1758 | { | |
1759 | int i = 1; /* Index 0 has broadcast address */ | |
1760 | struct netdev_hw_addr *mc_addr; | |
1761 | ||
1762 | netdev_for_each_mc_addr(mc_addr, netdev) { | |
1763 | memcpy(&mc_list[i * ETH_ALEN], &mc_addr->addr[0], | |
1764 | ETH_ALEN); | |
1765 | i++; | |
1766 | } | |
1767 | } | |
1768 | ||
1769 | static int | |
1770 | bnad_napi_poll_rx(struct napi_struct *napi, int budget) | |
1771 | { | |
1772 | struct bnad_rx_ctrl *rx_ctrl = | |
1773 | container_of(napi, struct bnad_rx_ctrl, napi); | |
2be67144 | 1774 | struct bnad *bnad = rx_ctrl->bnad; |
8b230ed8 RM |
1775 | int rcvd = 0; |
1776 | ||
271e8b79 | 1777 | rx_ctrl->rx_poll_ctr++; |
8b230ed8 RM |
1778 | |
1779 | if (!netif_carrier_ok(bnad->netdev)) | |
1780 | goto poll_exit; | |
1781 | ||
b3cc6e88 | 1782 | rcvd = bnad_cq_process(bnad, rx_ctrl->ccb, budget); |
271e8b79 | 1783 | if (rcvd >= budget) |
8b230ed8 RM |
1784 | return rcvd; |
1785 | ||
1786 | poll_exit: | |
19dbff9f | 1787 | napi_complete(napi); |
8b230ed8 | 1788 | |
271e8b79 | 1789 | rx_ctrl->rx_complete++; |
2be67144 RM |
1790 | |
1791 | if (rx_ctrl->ccb) | |
271e8b79 RM |
1792 | bnad_enable_rx_irq_unsafe(rx_ctrl->ccb); |
1793 | ||
8b230ed8 RM |
1794 | return rcvd; |
1795 | } | |
1796 | ||
2be67144 | 1797 | #define BNAD_NAPI_POLL_QUOTA 64 |
8b230ed8 | 1798 | static void |
01b54b14 | 1799 | bnad_napi_add(struct bnad *bnad, u32 rx_id) |
8b230ed8 | 1800 | { |
8b230ed8 RM |
1801 | struct bnad_rx_ctrl *rx_ctrl; |
1802 | int i; | |
8b230ed8 RM |
1803 | |
1804 | /* Initialize & enable NAPI */ | |
1805 | for (i = 0; i < bnad->num_rxp_per_rx; i++) { | |
1806 | rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i]; | |
1807 | netif_napi_add(bnad->netdev, &rx_ctrl->napi, | |
2be67144 RM |
1808 | bnad_napi_poll_rx, BNAD_NAPI_POLL_QUOTA); |
1809 | } | |
1810 | } | |
1811 | ||
1812 | static void | |
01b54b14 | 1813 | bnad_napi_delete(struct bnad *bnad, u32 rx_id) |
8b230ed8 RM |
1814 | { |
1815 | int i; | |
1816 | ||
1817 | /* First disable and then clean up */ | |
01b54b14 | 1818 | for (i = 0; i < bnad->num_rxp_per_rx; i++) |
8b230ed8 | 1819 | netif_napi_del(&bnad->rx_info[rx_id].rx_ctrl[i].napi); |
8b230ed8 RM |
1820 | } |
1821 | ||
1822 | /* Should be held with conf_lock held */ | |
1823 | void | |
b3cc6e88 | 1824 | bnad_destroy_tx(struct bnad *bnad, u32 tx_id) |
8b230ed8 RM |
1825 | { |
1826 | struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id]; | |
1827 | struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0]; | |
1828 | unsigned long flags; | |
1829 | ||
1830 | if (!tx_info->tx) | |
1831 | return; | |
1832 | ||
1833 | init_completion(&bnad->bnad_completions.tx_comp); | |
1834 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
1835 | bna_tx_disable(tx_info->tx, BNA_HARD_CLEANUP, bnad_cb_tx_disabled); | |
1836 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
1837 | wait_for_completion(&bnad->bnad_completions.tx_comp); | |
1838 | ||
1839 | if (tx_info->tcb[0]->intr_type == BNA_INTR_T_MSIX) | |
1840 | bnad_tx_msix_unregister(bnad, tx_info, | |
1841 | bnad->num_txq_per_tx); | |
1842 | ||
1843 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
1844 | bna_tx_destroy(tx_info->tx); | |
1845 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
1846 | ||
1847 | tx_info->tx = NULL; | |
078086f3 | 1848 | tx_info->tx_id = 0; |
8b230ed8 | 1849 | |
8b230ed8 RM |
1850 | bnad_tx_res_free(bnad, res_info); |
1851 | } | |
1852 | ||
1853 | /* Should be held with conf_lock held */ | |
1854 | int | |
078086f3 | 1855 | bnad_setup_tx(struct bnad *bnad, u32 tx_id) |
8b230ed8 RM |
1856 | { |
1857 | int err; | |
1858 | struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id]; | |
1859 | struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0]; | |
1860 | struct bna_intr_info *intr_info = | |
1861 | &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info; | |
1862 | struct bna_tx_config *tx_config = &bnad->tx_config[tx_id]; | |
d91d25d5 | 1863 | static const struct bna_tx_event_cbfn tx_cbfn = { |
1864 | .tcb_setup_cbfn = bnad_cb_tcb_setup, | |
1865 | .tcb_destroy_cbfn = bnad_cb_tcb_destroy, | |
1866 | .tx_stall_cbfn = bnad_cb_tx_stall, | |
1867 | .tx_resume_cbfn = bnad_cb_tx_resume, | |
1868 | .tx_cleanup_cbfn = bnad_cb_tx_cleanup, | |
1869 | }; | |
1870 | ||
8b230ed8 RM |
1871 | struct bna_tx *tx; |
1872 | unsigned long flags; | |
1873 | ||
078086f3 RM |
1874 | tx_info->tx_id = tx_id; |
1875 | ||
8b230ed8 RM |
1876 | /* Initialize the Tx object configuration */ |
1877 | tx_config->num_txq = bnad->num_txq_per_tx; | |
1878 | tx_config->txq_depth = bnad->txq_depth; | |
1879 | tx_config->tx_type = BNA_TX_T_REGULAR; | |
078086f3 | 1880 | tx_config->coalescing_timeo = bnad->tx_coalescing_timeo; |
8b230ed8 | 1881 | |
8b230ed8 RM |
1882 | /* Get BNA's resource requirement for one tx object */ |
1883 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
1884 | bna_tx_res_req(bnad->num_txq_per_tx, | |
1885 | bnad->txq_depth, res_info); | |
1886 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
1887 | ||
1888 | /* Fill Unmap Q memory requirements */ | |
5216562a RM |
1889 | BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_TX_RES_MEM_T_UNMAPQ], |
1890 | bnad->num_txq_per_tx, (sizeof(struct bnad_tx_unmap) * | |
1891 | bnad->txq_depth)); | |
8b230ed8 RM |
1892 | |
1893 | /* Allocate resources */ | |
1894 | err = bnad_tx_res_alloc(bnad, res_info, tx_id); | |
1895 | if (err) | |
1896 | return err; | |
1897 | ||
1898 | /* Ask BNA to create one Tx object, supplying required resources */ | |
1899 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
1900 | tx = bna_tx_create(&bnad->bna, bnad, tx_config, &tx_cbfn, res_info, | |
1901 | tx_info); | |
1902 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
1903 | if (!tx) | |
1904 | goto err_return; | |
1905 | tx_info->tx = tx; | |
1906 | ||
01b54b14 JH |
1907 | INIT_DELAYED_WORK(&tx_info->tx_cleanup_work, |
1908 | (work_func_t)bnad_tx_cleanup); | |
1909 | ||
8b230ed8 RM |
1910 | /* Register ISR for the Tx object */ |
1911 | if (intr_info->intr_type == BNA_INTR_T_MSIX) { | |
1912 | err = bnad_tx_msix_register(bnad, tx_info, | |
1913 | tx_id, bnad->num_txq_per_tx); | |
1914 | if (err) | |
1915 | goto err_return; | |
1916 | } | |
1917 | ||
1918 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
1919 | bna_tx_enable(tx); | |
1920 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
1921 | ||
1922 | return 0; | |
1923 | ||
1924 | err_return: | |
1925 | bnad_tx_res_free(bnad, res_info); | |
1926 | return err; | |
1927 | } | |
1928 | ||
1929 | /* Setup the rx config for bna_rx_create */ | |
1930 | /* bnad decides the configuration */ | |
1931 | static void | |
1932 | bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config) | |
1933 | { | |
1934 | rx_config->rx_type = BNA_RX_T_REGULAR; | |
1935 | rx_config->num_paths = bnad->num_rxp_per_rx; | |
078086f3 | 1936 | rx_config->coalescing_timeo = bnad->rx_coalescing_timeo; |
8b230ed8 RM |
1937 | |
1938 | if (bnad->num_rxp_per_rx > 1) { | |
1939 | rx_config->rss_status = BNA_STATUS_T_ENABLED; | |
1940 | rx_config->rss_config.hash_type = | |
078086f3 RM |
1941 | (BFI_ENET_RSS_IPV6 | |
1942 | BFI_ENET_RSS_IPV6_TCP | | |
1943 | BFI_ENET_RSS_IPV4 | | |
1944 | BFI_ENET_RSS_IPV4_TCP); | |
8b230ed8 RM |
1945 | rx_config->rss_config.hash_mask = |
1946 | bnad->num_rxp_per_rx - 1; | |
1947 | get_random_bytes(rx_config->rss_config.toeplitz_hash_key, | |
1948 | sizeof(rx_config->rss_config.toeplitz_hash_key)); | |
1949 | } else { | |
1950 | rx_config->rss_status = BNA_STATUS_T_DISABLED; | |
1951 | memset(&rx_config->rss_config, 0, | |
1952 | sizeof(rx_config->rss_config)); | |
1953 | } | |
1954 | rx_config->rxp_type = BNA_RXP_SLR; | |
1955 | rx_config->q_depth = bnad->rxq_depth; | |
1956 | ||
1957 | rx_config->small_buff_size = BFI_SMALL_RXBUF_SIZE; | |
1958 | ||
1959 | rx_config->vlan_strip_status = BNA_STATUS_T_ENABLED; | |
1960 | } | |
1961 | ||
2be67144 RM |
1962 | static void |
1963 | bnad_rx_ctrl_init(struct bnad *bnad, u32 rx_id) | |
1964 | { | |
1965 | struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id]; | |
1966 | int i; | |
1967 | ||
1968 | for (i = 0; i < bnad->num_rxp_per_rx; i++) | |
1969 | rx_info->rx_ctrl[i].bnad = bnad; | |
1970 | } | |
1971 | ||
8b230ed8 RM |
1972 | /* Called with mutex_lock(&bnad->conf_mutex) held */ |
1973 | void | |
b3cc6e88 | 1974 | bnad_destroy_rx(struct bnad *bnad, u32 rx_id) |
8b230ed8 RM |
1975 | { |
1976 | struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id]; | |
1977 | struct bna_rx_config *rx_config = &bnad->rx_config[rx_id]; | |
1978 | struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0]; | |
1979 | unsigned long flags; | |
271e8b79 | 1980 | int to_del = 0; |
8b230ed8 RM |
1981 | |
1982 | if (!rx_info->rx) | |
1983 | return; | |
1984 | ||
1985 | if (0 == rx_id) { | |
1986 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
271e8b79 RM |
1987 | if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED && |
1988 | test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) { | |
8b230ed8 | 1989 | clear_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags); |
271e8b79 RM |
1990 | to_del = 1; |
1991 | } | |
8b230ed8 | 1992 | spin_unlock_irqrestore(&bnad->bna_lock, flags); |
271e8b79 | 1993 | if (to_del) |
8b230ed8 RM |
1994 | del_timer_sync(&bnad->dim_timer); |
1995 | } | |
1996 | ||
8b230ed8 RM |
1997 | init_completion(&bnad->bnad_completions.rx_comp); |
1998 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
1999 | bna_rx_disable(rx_info->rx, BNA_HARD_CLEANUP, bnad_cb_rx_disabled); | |
2000 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
2001 | wait_for_completion(&bnad->bnad_completions.rx_comp); | |
2002 | ||
2003 | if (rx_info->rx_ctrl[0].ccb->intr_type == BNA_INTR_T_MSIX) | |
2004 | bnad_rx_msix_unregister(bnad, rx_info, rx_config->num_paths); | |
2005 | ||
01b54b14 | 2006 | bnad_napi_delete(bnad, rx_id); |
2be67144 | 2007 | |
8b230ed8 RM |
2008 | spin_lock_irqsave(&bnad->bna_lock, flags); |
2009 | bna_rx_destroy(rx_info->rx); | |
8b230ed8 RM |
2010 | |
2011 | rx_info->rx = NULL; | |
3caa1e95 | 2012 | rx_info->rx_id = 0; |
b9fa1fbf | 2013 | spin_unlock_irqrestore(&bnad->bna_lock, flags); |
8b230ed8 RM |
2014 | |
2015 | bnad_rx_res_free(bnad, res_info); | |
2016 | } | |
2017 | ||
2018 | /* Called with mutex_lock(&bnad->conf_mutex) held */ | |
2019 | int | |
078086f3 | 2020 | bnad_setup_rx(struct bnad *bnad, u32 rx_id) |
8b230ed8 RM |
2021 | { |
2022 | int err; | |
2023 | struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id]; | |
2024 | struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0]; | |
2025 | struct bna_intr_info *intr_info = | |
2026 | &res_info[BNA_RX_RES_T_INTR].res_u.intr_info; | |
2027 | struct bna_rx_config *rx_config = &bnad->rx_config[rx_id]; | |
d91d25d5 | 2028 | static const struct bna_rx_event_cbfn rx_cbfn = { |
5216562a | 2029 | .rcb_setup_cbfn = NULL, |
01b54b14 | 2030 | .rcb_destroy_cbfn = NULL, |
d91d25d5 | 2031 | .ccb_setup_cbfn = bnad_cb_ccb_setup, |
2032 | .ccb_destroy_cbfn = bnad_cb_ccb_destroy, | |
5bcf6ac0 | 2033 | .rx_stall_cbfn = bnad_cb_rx_stall, |
d91d25d5 | 2034 | .rx_cleanup_cbfn = bnad_cb_rx_cleanup, |
2035 | .rx_post_cbfn = bnad_cb_rx_post, | |
2036 | }; | |
8b230ed8 RM |
2037 | struct bna_rx *rx; |
2038 | unsigned long flags; | |
2039 | ||
078086f3 RM |
2040 | rx_info->rx_id = rx_id; |
2041 | ||
8b230ed8 RM |
2042 | /* Initialize the Rx object configuration */ |
2043 | bnad_init_rx_config(bnad, rx_config); | |
2044 | ||
8b230ed8 RM |
2045 | /* Get BNA's resource requirement for one Rx object */ |
2046 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
2047 | bna_rx_res_req(rx_config, res_info); | |
2048 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
2049 | ||
2050 | /* Fill Unmap Q memory requirements */ | |
5216562a | 2051 | BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPQ], |
30f9fc94 RM |
2052 | rx_config->num_paths + |
2053 | ((rx_config->rxp_type == BNA_RXP_SINGLE) ? | |
2054 | 0 : rx_config->num_paths), | |
2055 | ((bnad->rxq_depth * sizeof(struct bnad_rx_unmap)) + | |
2056 | sizeof(struct bnad_rx_unmap_q))); | |
8b230ed8 RM |
2057 | |
2058 | /* Allocate resource */ | |
2059 | err = bnad_rx_res_alloc(bnad, res_info, rx_id); | |
2060 | if (err) | |
2061 | return err; | |
2062 | ||
2be67144 RM |
2063 | bnad_rx_ctrl_init(bnad, rx_id); |
2064 | ||
8b230ed8 RM |
2065 | /* Ask BNA to create one Rx object, supplying required resources */ |
2066 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
2067 | rx = bna_rx_create(&bnad->bna, bnad, rx_config, &rx_cbfn, res_info, | |
2068 | rx_info); | |
3caa1e95 RM |
2069 | if (!rx) { |
2070 | err = -ENOMEM; | |
b9fa1fbf | 2071 | spin_unlock_irqrestore(&bnad->bna_lock, flags); |
8b230ed8 | 2072 | goto err_return; |
3caa1e95 | 2073 | } |
8b230ed8 | 2074 | rx_info->rx = rx; |
b9fa1fbf | 2075 | spin_unlock_irqrestore(&bnad->bna_lock, flags); |
8b230ed8 | 2076 | |
01b54b14 JH |
2077 | INIT_WORK(&rx_info->rx_cleanup_work, |
2078 | (work_func_t)(bnad_rx_cleanup)); | |
2079 | ||
2be67144 RM |
2080 | /* |
2081 | * Init NAPI, so that state is set to NAPI_STATE_SCHED, | |
2082 | * so that IRQ handler cannot schedule NAPI at this point. | |
2083 | */ | |
01b54b14 | 2084 | bnad_napi_add(bnad, rx_id); |
2be67144 | 2085 | |
8b230ed8 RM |
2086 | /* Register ISR for the Rx object */ |
2087 | if (intr_info->intr_type == BNA_INTR_T_MSIX) { | |
2088 | err = bnad_rx_msix_register(bnad, rx_info, rx_id, | |
2089 | rx_config->num_paths); | |
2090 | if (err) | |
2091 | goto err_return; | |
2092 | } | |
2093 | ||
8b230ed8 RM |
2094 | spin_lock_irqsave(&bnad->bna_lock, flags); |
2095 | if (0 == rx_id) { | |
2096 | /* Set up Dynamic Interrupt Moderation Vector */ | |
2097 | if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED) | |
2098 | bna_rx_dim_reconfig(&bnad->bna, bna_napi_dim_vector); | |
2099 | ||
2100 | /* Enable VLAN filtering only on the default Rx */ | |
2101 | bna_rx_vlanfilter_enable(rx); | |
2102 | ||
2103 | /* Start the DIM timer */ | |
2104 | bnad_dim_timer_start(bnad); | |
2105 | } | |
2106 | ||
2107 | bna_rx_enable(rx); | |
2108 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
2109 | ||
2110 | return 0; | |
2111 | ||
2112 | err_return: | |
b3cc6e88 | 2113 | bnad_destroy_rx(bnad, rx_id); |
8b230ed8 RM |
2114 | return err; |
2115 | } | |
2116 | ||
2117 | /* Called with conf_lock & bnad->bna_lock held */ | |
2118 | void | |
2119 | bnad_tx_coalescing_timeo_set(struct bnad *bnad) | |
2120 | { | |
2121 | struct bnad_tx_info *tx_info; | |
2122 | ||
2123 | tx_info = &bnad->tx_info[0]; | |
2124 | if (!tx_info->tx) | |
2125 | return; | |
2126 | ||
2127 | bna_tx_coalescing_timeo_set(tx_info->tx, bnad->tx_coalescing_timeo); | |
2128 | } | |
2129 | ||
2130 | /* Called with conf_lock & bnad->bna_lock held */ | |
2131 | void | |
2132 | bnad_rx_coalescing_timeo_set(struct bnad *bnad) | |
2133 | { | |
2134 | struct bnad_rx_info *rx_info; | |
0120b99c | 2135 | int i; |
8b230ed8 RM |
2136 | |
2137 | for (i = 0; i < bnad->num_rx; i++) { | |
2138 | rx_info = &bnad->rx_info[i]; | |
2139 | if (!rx_info->rx) | |
2140 | continue; | |
2141 | bna_rx_coalescing_timeo_set(rx_info->rx, | |
2142 | bnad->rx_coalescing_timeo); | |
2143 | } | |
2144 | } | |
2145 | ||
2146 | /* | |
2147 | * Called with bnad->bna_lock held | |
2148 | */ | |
a2122d95 | 2149 | int |
8b230ed8 RM |
2150 | bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr) |
2151 | { | |
2152 | int ret; | |
2153 | ||
2154 | if (!is_valid_ether_addr(mac_addr)) | |
2155 | return -EADDRNOTAVAIL; | |
2156 | ||
2157 | /* If datapath is down, pretend everything went through */ | |
2158 | if (!bnad->rx_info[0].rx) | |
2159 | return 0; | |
2160 | ||
2161 | ret = bna_rx_ucast_set(bnad->rx_info[0].rx, mac_addr, NULL); | |
2162 | if (ret != BNA_CB_SUCCESS) | |
2163 | return -EADDRNOTAVAIL; | |
2164 | ||
2165 | return 0; | |
2166 | } | |
2167 | ||
2168 | /* Should be called with conf_lock held */ | |
a2122d95 | 2169 | int |
8b230ed8 RM |
2170 | bnad_enable_default_bcast(struct bnad *bnad) |
2171 | { | |
2172 | struct bnad_rx_info *rx_info = &bnad->rx_info[0]; | |
2173 | int ret; | |
2174 | unsigned long flags; | |
2175 | ||
2176 | init_completion(&bnad->bnad_completions.mcast_comp); | |
2177 | ||
2178 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
2179 | ret = bna_rx_mcast_add(rx_info->rx, (u8 *)bnad_bcast_addr, | |
2180 | bnad_cb_rx_mcast_add); | |
2181 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
2182 | ||
2183 | if (ret == BNA_CB_SUCCESS) | |
2184 | wait_for_completion(&bnad->bnad_completions.mcast_comp); | |
2185 | else | |
2186 | return -ENODEV; | |
2187 | ||
2188 | if (bnad->bnad_completions.mcast_comp_status != BNA_CB_SUCCESS) | |
2189 | return -ENODEV; | |
2190 | ||
2191 | return 0; | |
2192 | } | |
2193 | ||
19dbff9f | 2194 | /* Called with mutex_lock(&bnad->conf_mutex) held */ |
a2122d95 | 2195 | void |
aad75b66 RM |
2196 | bnad_restore_vlans(struct bnad *bnad, u32 rx_id) |
2197 | { | |
f859d7cb | 2198 | u16 vid; |
aad75b66 RM |
2199 | unsigned long flags; |
2200 | ||
f859d7cb | 2201 | for_each_set_bit(vid, bnad->active_vlans, VLAN_N_VID) { |
aad75b66 | 2202 | spin_lock_irqsave(&bnad->bna_lock, flags); |
f859d7cb | 2203 | bna_rx_vlan_add(bnad->rx_info[rx_id].rx, vid); |
aad75b66 RM |
2204 | spin_unlock_irqrestore(&bnad->bna_lock, flags); |
2205 | } | |
2206 | } | |
2207 | ||
8b230ed8 RM |
2208 | /* Statistics utilities */ |
2209 | void | |
250e061e | 2210 | bnad_netdev_qstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats) |
8b230ed8 | 2211 | { |
8b230ed8 RM |
2212 | int i, j; |
2213 | ||
2214 | for (i = 0; i < bnad->num_rx; i++) { | |
2215 | for (j = 0; j < bnad->num_rxp_per_rx; j++) { | |
2216 | if (bnad->rx_info[i].rx_ctrl[j].ccb) { | |
250e061e | 2217 | stats->rx_packets += bnad->rx_info[i]. |
8b230ed8 | 2218 | rx_ctrl[j].ccb->rcb[0]->rxq->rx_packets; |
250e061e | 2219 | stats->rx_bytes += bnad->rx_info[i]. |
8b230ed8 RM |
2220 | rx_ctrl[j].ccb->rcb[0]->rxq->rx_bytes; |
2221 | if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] && | |
2222 | bnad->rx_info[i].rx_ctrl[j].ccb-> | |
2223 | rcb[1]->rxq) { | |
250e061e | 2224 | stats->rx_packets += |
8b230ed8 RM |
2225 | bnad->rx_info[i].rx_ctrl[j]. |
2226 | ccb->rcb[1]->rxq->rx_packets; | |
250e061e | 2227 | stats->rx_bytes += |
8b230ed8 RM |
2228 | bnad->rx_info[i].rx_ctrl[j]. |
2229 | ccb->rcb[1]->rxq->rx_bytes; | |
2230 | } | |
2231 | } | |
2232 | } | |
2233 | } | |
2234 | for (i = 0; i < bnad->num_tx; i++) { | |
2235 | for (j = 0; j < bnad->num_txq_per_tx; j++) { | |
2236 | if (bnad->tx_info[i].tcb[j]) { | |
250e061e | 2237 | stats->tx_packets += |
8b230ed8 | 2238 | bnad->tx_info[i].tcb[j]->txq->tx_packets; |
250e061e | 2239 | stats->tx_bytes += |
8b230ed8 RM |
2240 | bnad->tx_info[i].tcb[j]->txq->tx_bytes; |
2241 | } | |
2242 | } | |
2243 | } | |
2244 | } | |
2245 | ||
2246 | /* | |
2247 | * Must be called with the bna_lock held. | |
2248 | */ | |
2249 | void | |
250e061e | 2250 | bnad_netdev_hwstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats) |
8b230ed8 | 2251 | { |
078086f3 RM |
2252 | struct bfi_enet_stats_mac *mac_stats; |
2253 | u32 bmap; | |
8b230ed8 RM |
2254 | int i; |
2255 | ||
078086f3 | 2256 | mac_stats = &bnad->stats.bna_stats->hw_stats.mac_stats; |
250e061e | 2257 | stats->rx_errors = |
8b230ed8 RM |
2258 | mac_stats->rx_fcs_error + mac_stats->rx_alignment_error + |
2259 | mac_stats->rx_frame_length_error + mac_stats->rx_code_error + | |
2260 | mac_stats->rx_undersize; | |
250e061e | 2261 | stats->tx_errors = mac_stats->tx_fcs_error + |
8b230ed8 | 2262 | mac_stats->tx_undersize; |
250e061e ED |
2263 | stats->rx_dropped = mac_stats->rx_drop; |
2264 | stats->tx_dropped = mac_stats->tx_drop; | |
2265 | stats->multicast = mac_stats->rx_multicast; | |
2266 | stats->collisions = mac_stats->tx_total_collision; | |
8b230ed8 | 2267 | |
250e061e | 2268 | stats->rx_length_errors = mac_stats->rx_frame_length_error; |
8b230ed8 RM |
2269 | |
2270 | /* receive ring buffer overflow ?? */ | |
2271 | ||
250e061e ED |
2272 | stats->rx_crc_errors = mac_stats->rx_fcs_error; |
2273 | stats->rx_frame_errors = mac_stats->rx_alignment_error; | |
8b230ed8 | 2274 | /* recv'r fifo overrun */ |
078086f3 RM |
2275 | bmap = bna_rx_rid_mask(&bnad->bna); |
2276 | for (i = 0; bmap; i++) { | |
8b230ed8 | 2277 | if (bmap & 1) { |
250e061e | 2278 | stats->rx_fifo_errors += |
8b230ed8 | 2279 | bnad->stats.bna_stats-> |
078086f3 | 2280 | hw_stats.rxf_stats[i].frame_drops; |
8b230ed8 RM |
2281 | break; |
2282 | } | |
2283 | bmap >>= 1; | |
2284 | } | |
2285 | } | |
2286 | ||
2287 | static void | |
2288 | bnad_mbox_irq_sync(struct bnad *bnad) | |
2289 | { | |
2290 | u32 irq; | |
2291 | unsigned long flags; | |
2292 | ||
2293 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
2294 | if (bnad->cfg_flags & BNAD_CF_MSIX) | |
8811e267 | 2295 | irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector; |
8b230ed8 RM |
2296 | else |
2297 | irq = bnad->pcidev->irq; | |
2298 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
2299 | ||
2300 | synchronize_irq(irq); | |
2301 | } | |
2302 | ||
2303 | /* Utility used by bnad_start_xmit, for doing TSO */ | |
2304 | static int | |
2305 | bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb) | |
2306 | { | |
2307 | int err; | |
2308 | ||
8b230ed8 RM |
2309 | if (skb_header_cloned(skb)) { |
2310 | err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); | |
2311 | if (err) { | |
2312 | BNAD_UPDATE_CTR(bnad, tso_err); | |
2313 | return err; | |
2314 | } | |
2315 | } | |
2316 | ||
2317 | /* | |
2318 | * For TSO, the TCP checksum field is seeded with pseudo-header sum | |
2319 | * excluding the length field. | |
2320 | */ | |
2321 | if (skb->protocol == htons(ETH_P_IP)) { | |
2322 | struct iphdr *iph = ip_hdr(skb); | |
2323 | ||
2324 | /* Do we really need these? */ | |
2325 | iph->tot_len = 0; | |
2326 | iph->check = 0; | |
2327 | ||
2328 | tcp_hdr(skb)->check = | |
2329 | ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0, | |
2330 | IPPROTO_TCP, 0); | |
2331 | BNAD_UPDATE_CTR(bnad, tso4); | |
2332 | } else { | |
2333 | struct ipv6hdr *ipv6h = ipv6_hdr(skb); | |
2334 | ||
8b230ed8 RM |
2335 | ipv6h->payload_len = 0; |
2336 | tcp_hdr(skb)->check = | |
2337 | ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, 0, | |
2338 | IPPROTO_TCP, 0); | |
2339 | BNAD_UPDATE_CTR(bnad, tso6); | |
2340 | } | |
2341 | ||
2342 | return 0; | |
2343 | } | |
2344 | ||
2345 | /* | |
2346 | * Initialize Q numbers depending on Rx Paths | |
2347 | * Called with bnad->bna_lock held, because of cfg_flags | |
2348 | * access. | |
2349 | */ | |
2350 | static void | |
2351 | bnad_q_num_init(struct bnad *bnad) | |
2352 | { | |
2353 | int rxps; | |
2354 | ||
2355 | rxps = min((uint)num_online_cpus(), | |
772b5235 | 2356 | (uint)(BNAD_MAX_RX * BNAD_MAX_RXP_PER_RX)); |
8b230ed8 RM |
2357 | |
2358 | if (!(bnad->cfg_flags & BNAD_CF_MSIX)) | |
2359 | rxps = 1; /* INTx */ | |
2360 | ||
2361 | bnad->num_rx = 1; | |
2362 | bnad->num_tx = 1; | |
2363 | bnad->num_rxp_per_rx = rxps; | |
2364 | bnad->num_txq_per_tx = BNAD_TXQ_NUM; | |
2365 | } | |
2366 | ||
2367 | /* | |
2368 | * Adjusts the Q numbers, given a number of msix vectors | |
2369 | * Give preference to RSS as opposed to Tx priority Queues, | |
2370 | * in such a case, just use 1 Tx Q | |
2371 | * Called with bnad->bna_lock held b'cos of cfg_flags access | |
2372 | */ | |
2373 | static void | |
078086f3 | 2374 | bnad_q_num_adjust(struct bnad *bnad, int msix_vectors, int temp) |
8b230ed8 RM |
2375 | { |
2376 | bnad->num_txq_per_tx = 1; | |
2377 | if ((msix_vectors >= (bnad->num_tx * bnad->num_txq_per_tx) + | |
2378 | bnad_rxqs_per_cq + BNAD_MAILBOX_MSIX_VECTORS) && | |
2379 | (bnad->cfg_flags & BNAD_CF_MSIX)) { | |
2380 | bnad->num_rxp_per_rx = msix_vectors - | |
2381 | (bnad->num_tx * bnad->num_txq_per_tx) - | |
2382 | BNAD_MAILBOX_MSIX_VECTORS; | |
2383 | } else | |
2384 | bnad->num_rxp_per_rx = 1; | |
2385 | } | |
2386 | ||
078086f3 RM |
2387 | /* Enable / disable ioceth */ |
2388 | static int | |
2389 | bnad_ioceth_disable(struct bnad *bnad) | |
8b230ed8 RM |
2390 | { |
2391 | unsigned long flags; | |
078086f3 | 2392 | int err = 0; |
8b230ed8 RM |
2393 | |
2394 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
078086f3 RM |
2395 | init_completion(&bnad->bnad_completions.ioc_comp); |
2396 | bna_ioceth_disable(&bnad->bna.ioceth, BNA_HARD_CLEANUP); | |
8b230ed8 RM |
2397 | spin_unlock_irqrestore(&bnad->bna_lock, flags); |
2398 | ||
078086f3 RM |
2399 | wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp, |
2400 | msecs_to_jiffies(BNAD_IOCETH_TIMEOUT)); | |
2401 | ||
2402 | err = bnad->bnad_completions.ioc_comp_status; | |
2403 | return err; | |
8b230ed8 RM |
2404 | } |
2405 | ||
2406 | static int | |
078086f3 | 2407 | bnad_ioceth_enable(struct bnad *bnad) |
8b230ed8 RM |
2408 | { |
2409 | int err = 0; | |
2410 | unsigned long flags; | |
2411 | ||
8b230ed8 | 2412 | spin_lock_irqsave(&bnad->bna_lock, flags); |
078086f3 RM |
2413 | init_completion(&bnad->bnad_completions.ioc_comp); |
2414 | bnad->bnad_completions.ioc_comp_status = BNA_CB_WAITING; | |
2415 | bna_ioceth_enable(&bnad->bna.ioceth); | |
8b230ed8 RM |
2416 | spin_unlock_irqrestore(&bnad->bna_lock, flags); |
2417 | ||
078086f3 RM |
2418 | wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp, |
2419 | msecs_to_jiffies(BNAD_IOCETH_TIMEOUT)); | |
8b230ed8 | 2420 | |
078086f3 | 2421 | err = bnad->bnad_completions.ioc_comp_status; |
8b230ed8 RM |
2422 | |
2423 | return err; | |
2424 | } | |
2425 | ||
2426 | /* Free BNA resources */ | |
2427 | static void | |
078086f3 RM |
2428 | bnad_res_free(struct bnad *bnad, struct bna_res_info *res_info, |
2429 | u32 res_val_max) | |
8b230ed8 RM |
2430 | { |
2431 | int i; | |
8b230ed8 | 2432 | |
078086f3 RM |
2433 | for (i = 0; i < res_val_max; i++) |
2434 | bnad_mem_free(bnad, &res_info[i].res_u.mem_info); | |
8b230ed8 RM |
2435 | } |
2436 | ||
2437 | /* Allocates memory and interrupt resources for BNA */ | |
2438 | static int | |
078086f3 RM |
2439 | bnad_res_alloc(struct bnad *bnad, struct bna_res_info *res_info, |
2440 | u32 res_val_max) | |
8b230ed8 RM |
2441 | { |
2442 | int i, err; | |
8b230ed8 | 2443 | |
078086f3 RM |
2444 | for (i = 0; i < res_val_max; i++) { |
2445 | err = bnad_mem_alloc(bnad, &res_info[i].res_u.mem_info); | |
8b230ed8 RM |
2446 | if (err) |
2447 | goto err_return; | |
2448 | } | |
2449 | return 0; | |
2450 | ||
2451 | err_return: | |
078086f3 | 2452 | bnad_res_free(bnad, res_info, res_val_max); |
8b230ed8 RM |
2453 | return err; |
2454 | } | |
2455 | ||
2456 | /* Interrupt enable / disable */ | |
2457 | static void | |
2458 | bnad_enable_msix(struct bnad *bnad) | |
2459 | { | |
2460 | int i, ret; | |
8b230ed8 RM |
2461 | unsigned long flags; |
2462 | ||
2463 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
2464 | if (!(bnad->cfg_flags & BNAD_CF_MSIX)) { | |
2465 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
2466 | return; | |
2467 | } | |
2468 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
2469 | ||
2470 | if (bnad->msix_table) | |
2471 | return; | |
2472 | ||
8b230ed8 | 2473 | bnad->msix_table = |
b7ee31c5 | 2474 | kcalloc(bnad->msix_num, sizeof(struct msix_entry), GFP_KERNEL); |
8b230ed8 RM |
2475 | |
2476 | if (!bnad->msix_table) | |
2477 | goto intx_mode; | |
2478 | ||
b7ee31c5 | 2479 | for (i = 0; i < bnad->msix_num; i++) |
8b230ed8 RM |
2480 | bnad->msix_table[i].entry = i; |
2481 | ||
b7ee31c5 | 2482 | ret = pci_enable_msix(bnad->pcidev, bnad->msix_table, bnad->msix_num); |
8b230ed8 RM |
2483 | if (ret > 0) { |
2484 | /* Not enough MSI-X vectors. */ | |
19dbff9f RM |
2485 | pr_warn("BNA: %d MSI-X vectors allocated < %d requested\n", |
2486 | ret, bnad->msix_num); | |
8b230ed8 RM |
2487 | |
2488 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
2489 | /* ret = #of vectors that we got */ | |
271e8b79 RM |
2490 | bnad_q_num_adjust(bnad, (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2, |
2491 | (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2); | |
8b230ed8 RM |
2492 | spin_unlock_irqrestore(&bnad->bna_lock, flags); |
2493 | ||
271e8b79 | 2494 | bnad->msix_num = BNAD_NUM_TXQ + BNAD_NUM_RXP + |
8b230ed8 | 2495 | BNAD_MAILBOX_MSIX_VECTORS; |
8b230ed8 | 2496 | |
078086f3 RM |
2497 | if (bnad->msix_num > ret) |
2498 | goto intx_mode; | |
2499 | ||
8b230ed8 RM |
2500 | /* Try once more with adjusted numbers */ |
2501 | /* If this fails, fall back to INTx */ | |
2502 | ret = pci_enable_msix(bnad->pcidev, bnad->msix_table, | |
b7ee31c5 | 2503 | bnad->msix_num); |
8b230ed8 RM |
2504 | if (ret) |
2505 | goto intx_mode; | |
2506 | ||
2507 | } else if (ret < 0) | |
2508 | goto intx_mode; | |
078086f3 RM |
2509 | |
2510 | pci_intx(bnad->pcidev, 0); | |
2511 | ||
8b230ed8 RM |
2512 | return; |
2513 | ||
2514 | intx_mode: | |
19dbff9f | 2515 | pr_warn("BNA: MSI-X enable failed - operating in INTx mode\n"); |
8b230ed8 RM |
2516 | |
2517 | kfree(bnad->msix_table); | |
2518 | bnad->msix_table = NULL; | |
2519 | bnad->msix_num = 0; | |
8b230ed8 RM |
2520 | spin_lock_irqsave(&bnad->bna_lock, flags); |
2521 | bnad->cfg_flags &= ~BNAD_CF_MSIX; | |
2522 | bnad_q_num_init(bnad); | |
2523 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
2524 | } | |
2525 | ||
2526 | static void | |
2527 | bnad_disable_msix(struct bnad *bnad) | |
2528 | { | |
2529 | u32 cfg_flags; | |
2530 | unsigned long flags; | |
2531 | ||
2532 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
2533 | cfg_flags = bnad->cfg_flags; | |
2534 | if (bnad->cfg_flags & BNAD_CF_MSIX) | |
2535 | bnad->cfg_flags &= ~BNAD_CF_MSIX; | |
2536 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
2537 | ||
2538 | if (cfg_flags & BNAD_CF_MSIX) { | |
2539 | pci_disable_msix(bnad->pcidev); | |
2540 | kfree(bnad->msix_table); | |
2541 | bnad->msix_table = NULL; | |
2542 | } | |
2543 | } | |
2544 | ||
2545 | /* Netdev entry points */ | |
2546 | static int | |
2547 | bnad_open(struct net_device *netdev) | |
2548 | { | |
2549 | int err; | |
2550 | struct bnad *bnad = netdev_priv(netdev); | |
2551 | struct bna_pause_config pause_config; | |
2552 | int mtu; | |
2553 | unsigned long flags; | |
2554 | ||
2555 | mutex_lock(&bnad->conf_mutex); | |
2556 | ||
2557 | /* Tx */ | |
2558 | err = bnad_setup_tx(bnad, 0); | |
2559 | if (err) | |
2560 | goto err_return; | |
2561 | ||
2562 | /* Rx */ | |
2563 | err = bnad_setup_rx(bnad, 0); | |
2564 | if (err) | |
2565 | goto cleanup_tx; | |
2566 | ||
2567 | /* Port */ | |
2568 | pause_config.tx_pause = 0; | |
2569 | pause_config.rx_pause = 0; | |
2570 | ||
078086f3 | 2571 | mtu = ETH_HLEN + VLAN_HLEN + bnad->netdev->mtu + ETH_FCS_LEN; |
8b230ed8 RM |
2572 | |
2573 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
078086f3 RM |
2574 | bna_enet_mtu_set(&bnad->bna.enet, mtu, NULL); |
2575 | bna_enet_pause_config(&bnad->bna.enet, &pause_config, NULL); | |
2576 | bna_enet_enable(&bnad->bna.enet); | |
8b230ed8 RM |
2577 | spin_unlock_irqrestore(&bnad->bna_lock, flags); |
2578 | ||
2579 | /* Enable broadcast */ | |
2580 | bnad_enable_default_bcast(bnad); | |
2581 | ||
aad75b66 RM |
2582 | /* Restore VLANs, if any */ |
2583 | bnad_restore_vlans(bnad, 0); | |
2584 | ||
8b230ed8 RM |
2585 | /* Set the UCAST address */ |
2586 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
2587 | bnad_mac_addr_set_locked(bnad, netdev->dev_addr); | |
2588 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
2589 | ||
2590 | /* Start the stats timer */ | |
2591 | bnad_stats_timer_start(bnad); | |
2592 | ||
2593 | mutex_unlock(&bnad->conf_mutex); | |
2594 | ||
2595 | return 0; | |
2596 | ||
2597 | cleanup_tx: | |
b3cc6e88 | 2598 | bnad_destroy_tx(bnad, 0); |
8b230ed8 RM |
2599 | |
2600 | err_return: | |
2601 | mutex_unlock(&bnad->conf_mutex); | |
2602 | return err; | |
2603 | } | |
2604 | ||
2605 | static int | |
2606 | bnad_stop(struct net_device *netdev) | |
2607 | { | |
2608 | struct bnad *bnad = netdev_priv(netdev); | |
2609 | unsigned long flags; | |
2610 | ||
2611 | mutex_lock(&bnad->conf_mutex); | |
2612 | ||
2613 | /* Stop the stats timer */ | |
2614 | bnad_stats_timer_stop(bnad); | |
2615 | ||
078086f3 | 2616 | init_completion(&bnad->bnad_completions.enet_comp); |
8b230ed8 RM |
2617 | |
2618 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
078086f3 RM |
2619 | bna_enet_disable(&bnad->bna.enet, BNA_HARD_CLEANUP, |
2620 | bnad_cb_enet_disabled); | |
8b230ed8 RM |
2621 | spin_unlock_irqrestore(&bnad->bna_lock, flags); |
2622 | ||
078086f3 | 2623 | wait_for_completion(&bnad->bnad_completions.enet_comp); |
8b230ed8 | 2624 | |
b3cc6e88 JH |
2625 | bnad_destroy_tx(bnad, 0); |
2626 | bnad_destroy_rx(bnad, 0); | |
8b230ed8 RM |
2627 | |
2628 | /* Synchronize mailbox IRQ */ | |
2629 | bnad_mbox_irq_sync(bnad); | |
2630 | ||
2631 | mutex_unlock(&bnad->conf_mutex); | |
2632 | ||
2633 | return 0; | |
2634 | } | |
2635 | ||
2636 | /* TX */ | |
5216562a RM |
2637 | /* Returns 0 for success */ |
2638 | static int | |
2639 | bnad_txq_wi_prepare(struct bnad *bnad, struct bna_tcb *tcb, | |
2640 | struct sk_buff *skb, struct bna_txq_entry *txqent) | |
8b230ed8 | 2641 | { |
5216562a RM |
2642 | u16 flags = 0; |
2643 | u32 gso_size; | |
2644 | u16 vlan_tag = 0; | |
8b230ed8 | 2645 | |
eab6d18d | 2646 | if (vlan_tx_tag_present(skb)) { |
5216562a | 2647 | vlan_tag = (u16)vlan_tx_tag_get(skb); |
8b230ed8 RM |
2648 | flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN); |
2649 | } | |
2650 | if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) { | |
5216562a RM |
2651 | vlan_tag = ((tcb->priority & 0x7) << VLAN_PRIO_SHIFT) |
2652 | | (vlan_tag & 0x1fff); | |
8b230ed8 RM |
2653 | flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN); |
2654 | } | |
8b230ed8 RM |
2655 | txqent->hdr.wi.vlan_tag = htons(vlan_tag); |
2656 | ||
2657 | if (skb_is_gso(skb)) { | |
271e8b79 | 2658 | gso_size = skb_shinfo(skb)->gso_size; |
5216562a | 2659 | if (unlikely(gso_size > bnad->netdev->mtu)) { |
271e8b79 | 2660 | BNAD_UPDATE_CTR(bnad, tx_skb_mss_too_long); |
5216562a | 2661 | return -EINVAL; |
271e8b79 RM |
2662 | } |
2663 | if (unlikely((gso_size + skb_transport_offset(skb) + | |
5216562a | 2664 | tcp_hdrlen(skb)) >= skb->len)) { |
271e8b79 RM |
2665 | txqent->hdr.wi.opcode = |
2666 | __constant_htons(BNA_TXQ_WI_SEND); | |
2667 | txqent->hdr.wi.lso_mss = 0; | |
2668 | BNAD_UPDATE_CTR(bnad, tx_skb_tso_too_short); | |
2669 | } else { | |
2670 | txqent->hdr.wi.opcode = | |
2671 | __constant_htons(BNA_TXQ_WI_SEND_LSO); | |
2672 | txqent->hdr.wi.lso_mss = htons(gso_size); | |
2673 | } | |
2674 | ||
5216562a | 2675 | if (bnad_tso_prepare(bnad, skb)) { |
271e8b79 | 2676 | BNAD_UPDATE_CTR(bnad, tx_skb_tso_prepare); |
5216562a | 2677 | return -EINVAL; |
8b230ed8 | 2678 | } |
5216562a | 2679 | |
8b230ed8 RM |
2680 | flags |= (BNA_TXQ_WI_CF_IP_CKSUM | BNA_TXQ_WI_CF_TCP_CKSUM); |
2681 | txqent->hdr.wi.l4_hdr_size_n_offset = | |
5216562a RM |
2682 | htons(BNA_TXQ_WI_L4_HDR_N_OFFSET( |
2683 | tcp_hdrlen(skb) >> 2, skb_transport_offset(skb))); | |
2684 | } else { | |
271e8b79 | 2685 | txqent->hdr.wi.opcode = __constant_htons(BNA_TXQ_WI_SEND); |
8b230ed8 RM |
2686 | txqent->hdr.wi.lso_mss = 0; |
2687 | ||
5216562a | 2688 | if (unlikely(skb->len > (bnad->netdev->mtu + ETH_HLEN))) { |
271e8b79 | 2689 | BNAD_UPDATE_CTR(bnad, tx_skb_non_tso_too_long); |
5216562a | 2690 | return -EINVAL; |
8b230ed8 | 2691 | } |
8b230ed8 | 2692 | |
271e8b79 RM |
2693 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
2694 | u8 proto = 0; | |
8b230ed8 | 2695 | |
271e8b79 RM |
2696 | if (skb->protocol == __constant_htons(ETH_P_IP)) |
2697 | proto = ip_hdr(skb)->protocol; | |
5216562a | 2698 | #ifdef NETIF_F_IPV6_CSUM |
271e8b79 RM |
2699 | else if (skb->protocol == |
2700 | __constant_htons(ETH_P_IPV6)) { | |
2701 | /* nexthdr may not be TCP immediately. */ | |
2702 | proto = ipv6_hdr(skb)->nexthdr; | |
2703 | } | |
5216562a | 2704 | #endif |
271e8b79 RM |
2705 | if (proto == IPPROTO_TCP) { |
2706 | flags |= BNA_TXQ_WI_CF_TCP_CKSUM; | |
2707 | txqent->hdr.wi.l4_hdr_size_n_offset = | |
2708 | htons(BNA_TXQ_WI_L4_HDR_N_OFFSET | |
2709 | (0, skb_transport_offset(skb))); | |
2710 | ||
2711 | BNAD_UPDATE_CTR(bnad, tcpcsum_offload); | |
2712 | ||
2713 | if (unlikely(skb_headlen(skb) < | |
5216562a RM |
2714 | skb_transport_offset(skb) + |
2715 | tcp_hdrlen(skb))) { | |
271e8b79 | 2716 | BNAD_UPDATE_CTR(bnad, tx_skb_tcp_hdr); |
5216562a | 2717 | return -EINVAL; |
271e8b79 | 2718 | } |
271e8b79 RM |
2719 | } else if (proto == IPPROTO_UDP) { |
2720 | flags |= BNA_TXQ_WI_CF_UDP_CKSUM; | |
2721 | txqent->hdr.wi.l4_hdr_size_n_offset = | |
2722 | htons(BNA_TXQ_WI_L4_HDR_N_OFFSET | |
2723 | (0, skb_transport_offset(skb))); | |
2724 | ||
2725 | BNAD_UPDATE_CTR(bnad, udpcsum_offload); | |
2726 | if (unlikely(skb_headlen(skb) < | |
5216562a | 2727 | skb_transport_offset(skb) + |
271e8b79 | 2728 | sizeof(struct udphdr))) { |
271e8b79 | 2729 | BNAD_UPDATE_CTR(bnad, tx_skb_udp_hdr); |
5216562a | 2730 | return -EINVAL; |
271e8b79 RM |
2731 | } |
2732 | } else { | |
5216562a | 2733 | |
271e8b79 | 2734 | BNAD_UPDATE_CTR(bnad, tx_skb_csum_err); |
5216562a | 2735 | return -EINVAL; |
8b230ed8 | 2736 | } |
5216562a | 2737 | } else |
271e8b79 | 2738 | txqent->hdr.wi.l4_hdr_size_n_offset = 0; |
8b230ed8 RM |
2739 | } |
2740 | ||
2741 | txqent->hdr.wi.flags = htons(flags); | |
8b230ed8 RM |
2742 | txqent->hdr.wi.frame_length = htonl(skb->len); |
2743 | ||
5216562a RM |
2744 | return 0; |
2745 | } | |
2746 | ||
2747 | /* | |
2748 | * bnad_start_xmit : Netdev entry point for Transmit | |
2749 | * Called under lock held by net_device | |
2750 | */ | |
2751 | static netdev_tx_t | |
2752 | bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev) | |
2753 | { | |
2754 | struct bnad *bnad = netdev_priv(netdev); | |
2755 | u32 txq_id = 0; | |
2756 | struct bna_tcb *tcb = NULL; | |
2757 | struct bnad_tx_unmap *unmap_q, *unmap, *head_unmap; | |
2758 | u32 prod, q_depth, vect_id; | |
2759 | u32 wis, vectors, len; | |
2760 | int i; | |
2761 | dma_addr_t dma_addr; | |
2762 | struct bna_txq_entry *txqent; | |
2763 | ||
271e8b79 | 2764 | len = skb_headlen(skb); |
8b230ed8 | 2765 | |
5216562a RM |
2766 | /* Sanity checks for the skb */ |
2767 | ||
2768 | if (unlikely(skb->len <= ETH_HLEN)) { | |
2769 | dev_kfree_skb(skb); | |
2770 | BNAD_UPDATE_CTR(bnad, tx_skb_too_short); | |
2771 | return NETDEV_TX_OK; | |
2772 | } | |
2773 | if (unlikely(len > BFI_TX_MAX_DATA_PER_VECTOR)) { | |
2774 | dev_kfree_skb(skb); | |
2775 | BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero); | |
2776 | return NETDEV_TX_OK; | |
2777 | } | |
2778 | if (unlikely(len == 0)) { | |
2779 | dev_kfree_skb(skb); | |
2780 | BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero); | |
2781 | return NETDEV_TX_OK; | |
2782 | } | |
2783 | ||
2784 | tcb = bnad->tx_info[0].tcb[txq_id]; | |
2785 | q_depth = tcb->q_depth; | |
2786 | prod = tcb->producer_index; | |
8b230ed8 | 2787 | |
5216562a | 2788 | unmap_q = tcb->unmap_q; |
271e8b79 | 2789 | |
5216562a RM |
2790 | /* |
2791 | * Takes care of the Tx that is scheduled between clearing the flag | |
2792 | * and the netif_tx_stop_all_queues() call. | |
2793 | */ | |
2794 | if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) { | |
2795 | dev_kfree_skb(skb); | |
2796 | BNAD_UPDATE_CTR(bnad, tx_skb_stopping); | |
2797 | return NETDEV_TX_OK; | |
2798 | } | |
2799 | ||
2800 | vectors = 1 + skb_shinfo(skb)->nr_frags; | |
2801 | wis = BNA_TXQ_WI_NEEDED(vectors); /* 4 vectors per work item */ | |
2802 | ||
2803 | if (unlikely(vectors > BFI_TX_MAX_VECTORS_PER_PKT)) { | |
2804 | dev_kfree_skb(skb); | |
2805 | BNAD_UPDATE_CTR(bnad, tx_skb_max_vectors); | |
2806 | return NETDEV_TX_OK; | |
2807 | } | |
2808 | ||
2809 | /* Check for available TxQ resources */ | |
2810 | if (unlikely(wis > BNA_QE_FREE_CNT(tcb, q_depth))) { | |
2811 | if ((*tcb->hw_consumer_index != tcb->consumer_index) && | |
2812 | !test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) { | |
2813 | u32 sent; | |
2814 | sent = bnad_txcmpl_process(bnad, tcb); | |
2815 | if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) | |
2816 | bna_ib_ack(tcb->i_dbell, sent); | |
2817 | smp_mb__before_clear_bit(); | |
2818 | clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); | |
2819 | } else { | |
2820 | netif_stop_queue(netdev); | |
2821 | BNAD_UPDATE_CTR(bnad, netif_queue_stop); | |
2822 | } | |
2823 | ||
2824 | smp_mb(); | |
2825 | /* | |
2826 | * Check again to deal with race condition between | |
2827 | * netif_stop_queue here, and netif_wake_queue in | |
2828 | * interrupt handler which is not inside netif tx lock. | |
2829 | */ | |
2830 | if (likely(wis > BNA_QE_FREE_CNT(tcb, q_depth))) { | |
2831 | BNAD_UPDATE_CTR(bnad, netif_queue_stop); | |
2832 | return NETDEV_TX_BUSY; | |
2833 | } else { | |
2834 | netif_wake_queue(netdev); | |
2835 | BNAD_UPDATE_CTR(bnad, netif_queue_wakeup); | |
2836 | } | |
2837 | } | |
2838 | ||
2839 | txqent = &((struct bna_txq_entry *)tcb->sw_q)[prod]; | |
2840 | head_unmap = &unmap_q[prod]; | |
2841 | ||
2842 | /* Program the opcode, flags, frame_len, num_vectors in WI */ | |
2843 | if (bnad_txq_wi_prepare(bnad, tcb, skb, txqent)) { | |
2844 | dev_kfree_skb(skb); | |
2845 | return NETDEV_TX_OK; | |
2846 | } | |
2847 | txqent->hdr.wi.reserved = 0; | |
2848 | txqent->hdr.wi.num_vectors = vectors; | |
2849 | ||
2850 | head_unmap->skb = skb; | |
2851 | head_unmap->nvecs = 0; | |
2852 | ||
2853 | /* Program the vectors */ | |
2854 | unmap = head_unmap; | |
2855 | dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data, | |
2856 | len, DMA_TO_DEVICE); | |
2857 | BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[0].host_addr); | |
2858 | txqent->vector[0].length = htons(len); | |
2859 | dma_unmap_addr_set(&unmap->vectors[0], dma_addr, dma_addr); | |
2860 | head_unmap->nvecs++; | |
2861 | ||
2862 | for (i = 0, vect_id = 0; i < vectors - 1; i++) { | |
9e903e08 ED |
2863 | const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; |
2864 | u16 size = skb_frag_size(frag); | |
8b230ed8 | 2865 | |
271e8b79 | 2866 | if (unlikely(size == 0)) { |
5216562a RM |
2867 | /* Undo the changes starting at tcb->producer_index */ |
2868 | bnad_tx_buff_unmap(bnad, unmap_q, q_depth, | |
2869 | tcb->producer_index); | |
271e8b79 RM |
2870 | dev_kfree_skb(skb); |
2871 | BNAD_UPDATE_CTR(bnad, tx_skb_frag_zero); | |
2872 | return NETDEV_TX_OK; | |
2873 | } | |
2874 | ||
2875 | len += size; | |
2876 | ||
5216562a RM |
2877 | vect_id++; |
2878 | if (vect_id == BFI_TX_MAX_VECTORS_PER_WI) { | |
8b230ed8 | 2879 | vect_id = 0; |
5216562a RM |
2880 | BNA_QE_INDX_INC(prod, q_depth); |
2881 | txqent = &((struct bna_txq_entry *)tcb->sw_q)[prod]; | |
271e8b79 RM |
2882 | txqent->hdr.wi_ext.opcode = |
2883 | __constant_htons(BNA_TXQ_WI_EXTENSION); | |
5216562a | 2884 | unmap = &unmap_q[prod]; |
8b230ed8 RM |
2885 | } |
2886 | ||
4d5b1a67 IC |
2887 | dma_addr = skb_frag_dma_map(&bnad->pcidev->dev, frag, |
2888 | 0, size, DMA_TO_DEVICE); | |
8b230ed8 | 2889 | BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr); |
5216562a RM |
2890 | txqent->vector[vect_id].length = htons(size); |
2891 | dma_unmap_addr_set(&unmap->vectors[vect_id], dma_addr, | |
2892 | dma_addr); | |
2893 | head_unmap->nvecs++; | |
8b230ed8 RM |
2894 | } |
2895 | ||
271e8b79 | 2896 | if (unlikely(len != skb->len)) { |
5216562a RM |
2897 | /* Undo the changes starting at tcb->producer_index */ |
2898 | bnad_tx_buff_unmap(bnad, unmap_q, q_depth, tcb->producer_index); | |
271e8b79 RM |
2899 | dev_kfree_skb(skb); |
2900 | BNAD_UPDATE_CTR(bnad, tx_skb_len_mismatch); | |
2901 | return NETDEV_TX_OK; | |
2902 | } | |
2903 | ||
5216562a RM |
2904 | BNA_QE_INDX_INC(prod, q_depth); |
2905 | tcb->producer_index = prod; | |
8b230ed8 | 2906 | |
0570afff | 2907 | wmb(); |
be7fa326 RM |
2908 | |
2909 | if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) | |
2910 | return NETDEV_TX_OK; | |
2911 | ||
8b230ed8 RM |
2912 | bna_txq_prod_indx_doorbell(tcb); |
2913 | ||
8b230ed8 RM |
2914 | return NETDEV_TX_OK; |
2915 | } | |
2916 | ||
2917 | /* | |
2918 | * Used spin_lock to synchronize reading of stats structures, which | |
2919 | * is written by BNA under the same lock. | |
2920 | */ | |
250e061e ED |
2921 | static struct rtnl_link_stats64 * |
2922 | bnad_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) | |
8b230ed8 RM |
2923 | { |
2924 | struct bnad *bnad = netdev_priv(netdev); | |
2925 | unsigned long flags; | |
2926 | ||
2927 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
2928 | ||
250e061e ED |
2929 | bnad_netdev_qstats_fill(bnad, stats); |
2930 | bnad_netdev_hwstats_fill(bnad, stats); | |
8b230ed8 RM |
2931 | |
2932 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
2933 | ||
250e061e | 2934 | return stats; |
8b230ed8 RM |
2935 | } |
2936 | ||
a2122d95 | 2937 | void |
8b230ed8 RM |
2938 | bnad_set_rx_mode(struct net_device *netdev) |
2939 | { | |
2940 | struct bnad *bnad = netdev_priv(netdev); | |
2941 | u32 new_mask, valid_mask; | |
2942 | unsigned long flags; | |
2943 | ||
2944 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
2945 | ||
2946 | new_mask = valid_mask = 0; | |
2947 | ||
2948 | if (netdev->flags & IFF_PROMISC) { | |
2949 | if (!(bnad->cfg_flags & BNAD_CF_PROMISC)) { | |
2950 | new_mask = BNAD_RXMODE_PROMISC_DEFAULT; | |
2951 | valid_mask = BNAD_RXMODE_PROMISC_DEFAULT; | |
2952 | bnad->cfg_flags |= BNAD_CF_PROMISC; | |
2953 | } | |
2954 | } else { | |
2955 | if (bnad->cfg_flags & BNAD_CF_PROMISC) { | |
2956 | new_mask = ~BNAD_RXMODE_PROMISC_DEFAULT; | |
2957 | valid_mask = BNAD_RXMODE_PROMISC_DEFAULT; | |
2958 | bnad->cfg_flags &= ~BNAD_CF_PROMISC; | |
2959 | } | |
2960 | } | |
2961 | ||
2962 | if (netdev->flags & IFF_ALLMULTI) { | |
2963 | if (!(bnad->cfg_flags & BNAD_CF_ALLMULTI)) { | |
2964 | new_mask |= BNA_RXMODE_ALLMULTI; | |
2965 | valid_mask |= BNA_RXMODE_ALLMULTI; | |
2966 | bnad->cfg_flags |= BNAD_CF_ALLMULTI; | |
2967 | } | |
2968 | } else { | |
2969 | if (bnad->cfg_flags & BNAD_CF_ALLMULTI) { | |
2970 | new_mask &= ~BNA_RXMODE_ALLMULTI; | |
2971 | valid_mask |= BNA_RXMODE_ALLMULTI; | |
2972 | bnad->cfg_flags &= ~BNAD_CF_ALLMULTI; | |
2973 | } | |
2974 | } | |
2975 | ||
271e8b79 RM |
2976 | if (bnad->rx_info[0].rx == NULL) |
2977 | goto unlock; | |
2978 | ||
8b230ed8 RM |
2979 | bna_rx_mode_set(bnad->rx_info[0].rx, new_mask, valid_mask, NULL); |
2980 | ||
2981 | if (!netdev_mc_empty(netdev)) { | |
2982 | u8 *mcaddr_list; | |
2983 | int mc_count = netdev_mc_count(netdev); | |
2984 | ||
2985 | /* Index 0 holds the broadcast address */ | |
2986 | mcaddr_list = | |
2987 | kzalloc((mc_count + 1) * ETH_ALEN, | |
2988 | GFP_ATOMIC); | |
2989 | if (!mcaddr_list) | |
ca1cef3a | 2990 | goto unlock; |
8b230ed8 RM |
2991 | |
2992 | memcpy(&mcaddr_list[0], &bnad_bcast_addr[0], ETH_ALEN); | |
2993 | ||
2994 | /* Copy rest of the MC addresses */ | |
2995 | bnad_netdev_mc_list_get(netdev, mcaddr_list); | |
2996 | ||
2997 | bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1, | |
2998 | mcaddr_list, NULL); | |
2999 | ||
3000 | /* Should we enable BNAD_CF_ALLMULTI for err != 0 ? */ | |
3001 | kfree(mcaddr_list); | |
3002 | } | |
ca1cef3a | 3003 | unlock: |
8b230ed8 RM |
3004 | spin_unlock_irqrestore(&bnad->bna_lock, flags); |
3005 | } | |
3006 | ||
3007 | /* | |
3008 | * bna_lock is used to sync writes to netdev->addr | |
3009 | * conf_lock cannot be used since this call may be made | |
3010 | * in a non-blocking context. | |
3011 | */ | |
3012 | static int | |
3013 | bnad_set_mac_address(struct net_device *netdev, void *mac_addr) | |
3014 | { | |
3015 | int err; | |
3016 | struct bnad *bnad = netdev_priv(netdev); | |
3017 | struct sockaddr *sa = (struct sockaddr *)mac_addr; | |
3018 | unsigned long flags; | |
3019 | ||
3020 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
3021 | ||
3022 | err = bnad_mac_addr_set_locked(bnad, sa->sa_data); | |
3023 | ||
3024 | if (!err) | |
3025 | memcpy(netdev->dev_addr, sa->sa_data, netdev->addr_len); | |
3026 | ||
3027 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
3028 | ||
3029 | return err; | |
3030 | } | |
3031 | ||
3032 | static int | |
078086f3 | 3033 | bnad_mtu_set(struct bnad *bnad, int mtu) |
8b230ed8 | 3034 | { |
8b230ed8 RM |
3035 | unsigned long flags; |
3036 | ||
078086f3 RM |
3037 | init_completion(&bnad->bnad_completions.mtu_comp); |
3038 | ||
3039 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
3040 | bna_enet_mtu_set(&bnad->bna.enet, mtu, bnad_cb_enet_mtu_set); | |
3041 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
3042 | ||
3043 | wait_for_completion(&bnad->bnad_completions.mtu_comp); | |
3044 | ||
3045 | return bnad->bnad_completions.mtu_comp_status; | |
3046 | } | |
3047 | ||
3048 | static int | |
3049 | bnad_change_mtu(struct net_device *netdev, int new_mtu) | |
3050 | { | |
3051 | int err, mtu = netdev->mtu; | |
8b230ed8 RM |
3052 | struct bnad *bnad = netdev_priv(netdev); |
3053 | ||
3054 | if (new_mtu + ETH_HLEN < ETH_ZLEN || new_mtu > BNAD_JUMBO_MTU) | |
3055 | return -EINVAL; | |
3056 | ||
3057 | mutex_lock(&bnad->conf_mutex); | |
3058 | ||
3059 | netdev->mtu = new_mtu; | |
3060 | ||
078086f3 RM |
3061 | mtu = ETH_HLEN + VLAN_HLEN + new_mtu + ETH_FCS_LEN; |
3062 | err = bnad_mtu_set(bnad, mtu); | |
3063 | if (err) | |
3064 | err = -EBUSY; | |
8b230ed8 RM |
3065 | |
3066 | mutex_unlock(&bnad->conf_mutex); | |
3067 | return err; | |
3068 | } | |
3069 | ||
8e586137 | 3070 | static int |
80d5c368 | 3071 | bnad_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid) |
8b230ed8 RM |
3072 | { |
3073 | struct bnad *bnad = netdev_priv(netdev); | |
3074 | unsigned long flags; | |
3075 | ||
3076 | if (!bnad->rx_info[0].rx) | |
8e586137 | 3077 | return 0; |
8b230ed8 RM |
3078 | |
3079 | mutex_lock(&bnad->conf_mutex); | |
3080 | ||
3081 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
3082 | bna_rx_vlan_add(bnad->rx_info[0].rx, vid); | |
f859d7cb | 3083 | set_bit(vid, bnad->active_vlans); |
8b230ed8 RM |
3084 | spin_unlock_irqrestore(&bnad->bna_lock, flags); |
3085 | ||
3086 | mutex_unlock(&bnad->conf_mutex); | |
8e586137 JP |
3087 | |
3088 | return 0; | |
8b230ed8 RM |
3089 | } |
3090 | ||
8e586137 | 3091 | static int |
80d5c368 | 3092 | bnad_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid) |
8b230ed8 RM |
3093 | { |
3094 | struct bnad *bnad = netdev_priv(netdev); | |
3095 | unsigned long flags; | |
3096 | ||
3097 | if (!bnad->rx_info[0].rx) | |
8e586137 | 3098 | return 0; |
8b230ed8 RM |
3099 | |
3100 | mutex_lock(&bnad->conf_mutex); | |
3101 | ||
3102 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
f859d7cb | 3103 | clear_bit(vid, bnad->active_vlans); |
8b230ed8 RM |
3104 | bna_rx_vlan_del(bnad->rx_info[0].rx, vid); |
3105 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
3106 | ||
3107 | mutex_unlock(&bnad->conf_mutex); | |
8e586137 JP |
3108 | |
3109 | return 0; | |
8b230ed8 RM |
3110 | } |
3111 | ||
3112 | #ifdef CONFIG_NET_POLL_CONTROLLER | |
3113 | static void | |
3114 | bnad_netpoll(struct net_device *netdev) | |
3115 | { | |
3116 | struct bnad *bnad = netdev_priv(netdev); | |
3117 | struct bnad_rx_info *rx_info; | |
3118 | struct bnad_rx_ctrl *rx_ctrl; | |
3119 | u32 curr_mask; | |
3120 | int i, j; | |
3121 | ||
3122 | if (!(bnad->cfg_flags & BNAD_CF_MSIX)) { | |
3123 | bna_intx_disable(&bnad->bna, curr_mask); | |
3124 | bnad_isr(bnad->pcidev->irq, netdev); | |
3125 | bna_intx_enable(&bnad->bna, curr_mask); | |
3126 | } else { | |
19dbff9f RM |
3127 | /* |
3128 | * Tx processing may happen in sending context, so no need | |
3129 | * to explicitly process completions here | |
3130 | */ | |
3131 | ||
3132 | /* Rx processing */ | |
8b230ed8 RM |
3133 | for (i = 0; i < bnad->num_rx; i++) { |
3134 | rx_info = &bnad->rx_info[i]; | |
3135 | if (!rx_info->rx) | |
3136 | continue; | |
3137 | for (j = 0; j < bnad->num_rxp_per_rx; j++) { | |
3138 | rx_ctrl = &rx_info->rx_ctrl[j]; | |
271e8b79 | 3139 | if (rx_ctrl->ccb) |
8b230ed8 RM |
3140 | bnad_netif_rx_schedule_poll(bnad, |
3141 | rx_ctrl->ccb); | |
8b230ed8 RM |
3142 | } |
3143 | } | |
3144 | } | |
3145 | } | |
3146 | #endif | |
3147 | ||
3148 | static const struct net_device_ops bnad_netdev_ops = { | |
3149 | .ndo_open = bnad_open, | |
3150 | .ndo_stop = bnad_stop, | |
3151 | .ndo_start_xmit = bnad_start_xmit, | |
250e061e | 3152 | .ndo_get_stats64 = bnad_get_stats64, |
8b230ed8 | 3153 | .ndo_set_rx_mode = bnad_set_rx_mode, |
8b230ed8 RM |
3154 | .ndo_validate_addr = eth_validate_addr, |
3155 | .ndo_set_mac_address = bnad_set_mac_address, | |
3156 | .ndo_change_mtu = bnad_change_mtu, | |
8b230ed8 RM |
3157 | .ndo_vlan_rx_add_vid = bnad_vlan_rx_add_vid, |
3158 | .ndo_vlan_rx_kill_vid = bnad_vlan_rx_kill_vid, | |
3159 | #ifdef CONFIG_NET_POLL_CONTROLLER | |
3160 | .ndo_poll_controller = bnad_netpoll | |
3161 | #endif | |
3162 | }; | |
3163 | ||
3164 | static void | |
3165 | bnad_netdev_init(struct bnad *bnad, bool using_dac) | |
3166 | { | |
3167 | struct net_device *netdev = bnad->netdev; | |
3168 | ||
e5ee20e7 MM |
3169 | netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM | |
3170 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | | |
f646968f | 3171 | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_CTAG_TX; |
8b230ed8 | 3172 | |
e5ee20e7 MM |
3173 | netdev->vlan_features = NETIF_F_SG | NETIF_F_HIGHDMA | |
3174 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | | |
3175 | NETIF_F_TSO | NETIF_F_TSO6; | |
8b230ed8 | 3176 | |
e5ee20e7 | 3177 | netdev->features |= netdev->hw_features | |
f646968f | 3178 | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER; |
8b230ed8 RM |
3179 | |
3180 | if (using_dac) | |
3181 | netdev->features |= NETIF_F_HIGHDMA; | |
3182 | ||
8b230ed8 RM |
3183 | netdev->mem_start = bnad->mmio_start; |
3184 | netdev->mem_end = bnad->mmio_start + bnad->mmio_len - 1; | |
3185 | ||
3186 | netdev->netdev_ops = &bnad_netdev_ops; | |
3187 | bnad_set_ethtool_ops(netdev); | |
3188 | } | |
3189 | ||
3190 | /* | |
3191 | * 1. Initialize the bnad structure | |
3192 | * 2. Setup netdev pointer in pci_dev | |
d95d1081 JH |
3193 | * 3. Initialize no. of TxQ & CQs & MSIX vectors |
3194 | * 4. Initialize work queue. | |
8b230ed8 RM |
3195 | */ |
3196 | static int | |
3197 | bnad_init(struct bnad *bnad, | |
3198 | struct pci_dev *pdev, struct net_device *netdev) | |
3199 | { | |
3200 | unsigned long flags; | |
3201 | ||
3202 | SET_NETDEV_DEV(netdev, &pdev->dev); | |
3203 | pci_set_drvdata(pdev, netdev); | |
3204 | ||
3205 | bnad->netdev = netdev; | |
3206 | bnad->pcidev = pdev; | |
3207 | bnad->mmio_start = pci_resource_start(pdev, 0); | |
3208 | bnad->mmio_len = pci_resource_len(pdev, 0); | |
3209 | bnad->bar0 = ioremap_nocache(bnad->mmio_start, bnad->mmio_len); | |
3210 | if (!bnad->bar0) { | |
3211 | dev_err(&pdev->dev, "ioremap for bar0 failed\n"); | |
3212 | pci_set_drvdata(pdev, NULL); | |
3213 | return -ENOMEM; | |
3214 | } | |
3215 | pr_info("bar0 mapped to %p, len %llu\n", bnad->bar0, | |
3216 | (unsigned long long) bnad->mmio_len); | |
3217 | ||
3218 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
3219 | if (!bnad_msix_disable) | |
3220 | bnad->cfg_flags = BNAD_CF_MSIX; | |
3221 | ||
3222 | bnad->cfg_flags |= BNAD_CF_DIM_ENABLED; | |
3223 | ||
3224 | bnad_q_num_init(bnad); | |
3225 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
3226 | ||
3227 | bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx) + | |
3228 | (bnad->num_rx * bnad->num_rxp_per_rx) + | |
3229 | BNAD_MAILBOX_MSIX_VECTORS; | |
8b230ed8 RM |
3230 | |
3231 | bnad->txq_depth = BNAD_TXQ_DEPTH; | |
3232 | bnad->rxq_depth = BNAD_RXQ_DEPTH; | |
8b230ed8 RM |
3233 | |
3234 | bnad->tx_coalescing_timeo = BFI_TX_COALESCING_TIMEO; | |
3235 | bnad->rx_coalescing_timeo = BFI_RX_COALESCING_TIMEO; | |
3236 | ||
01b54b14 JH |
3237 | sprintf(bnad->wq_name, "%s_wq_%d", BNAD_NAME, bnad->id); |
3238 | bnad->work_q = create_singlethread_workqueue(bnad->wq_name); | |
ba21fc69 WY |
3239 | if (!bnad->work_q) { |
3240 | iounmap(bnad->bar0); | |
01b54b14 | 3241 | return -ENOMEM; |
ba21fc69 | 3242 | } |
01b54b14 | 3243 | |
8b230ed8 RM |
3244 | return 0; |
3245 | } | |
3246 | ||
3247 | /* | |
3248 | * Must be called after bnad_pci_uninit() | |
3249 | * so that iounmap() and pci_set_drvdata(NULL) | |
3250 | * happens only after PCI uninitialization. | |
3251 | */ | |
3252 | static void | |
3253 | bnad_uninit(struct bnad *bnad) | |
3254 | { | |
01b54b14 JH |
3255 | if (bnad->work_q) { |
3256 | flush_workqueue(bnad->work_q); | |
3257 | destroy_workqueue(bnad->work_q); | |
3258 | bnad->work_q = NULL; | |
3259 | } | |
3260 | ||
8b230ed8 RM |
3261 | if (bnad->bar0) |
3262 | iounmap(bnad->bar0); | |
3263 | pci_set_drvdata(bnad->pcidev, NULL); | |
3264 | } | |
3265 | ||
3266 | /* | |
3267 | * Initialize locks | |
078086f3 | 3268 | a) Per ioceth mutes used for serializing configuration |
8b230ed8 RM |
3269 | changes from OS interface |
3270 | b) spin lock used to protect bna state machine | |
3271 | */ | |
3272 | static void | |
3273 | bnad_lock_init(struct bnad *bnad) | |
3274 | { | |
3275 | spin_lock_init(&bnad->bna_lock); | |
3276 | mutex_init(&bnad->conf_mutex); | |
72a9730b | 3277 | mutex_init(&bnad_list_mutex); |
8b230ed8 RM |
3278 | } |
3279 | ||
3280 | static void | |
3281 | bnad_lock_uninit(struct bnad *bnad) | |
3282 | { | |
3283 | mutex_destroy(&bnad->conf_mutex); | |
72a9730b | 3284 | mutex_destroy(&bnad_list_mutex); |
8b230ed8 RM |
3285 | } |
3286 | ||
3287 | /* PCI Initialization */ | |
3288 | static int | |
3289 | bnad_pci_init(struct bnad *bnad, | |
3290 | struct pci_dev *pdev, bool *using_dac) | |
3291 | { | |
3292 | int err; | |
3293 | ||
3294 | err = pci_enable_device(pdev); | |
3295 | if (err) | |
3296 | return err; | |
3297 | err = pci_request_regions(pdev, BNAD_NAME); | |
3298 | if (err) | |
3299 | goto disable_device; | |
5ea74318 IV |
3300 | if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) && |
3301 | !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) { | |
3db1cd5c | 3302 | *using_dac = true; |
8b230ed8 | 3303 | } else { |
5ea74318 | 3304 | err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); |
8b230ed8 | 3305 | if (err) { |
5ea74318 IV |
3306 | err = dma_set_coherent_mask(&pdev->dev, |
3307 | DMA_BIT_MASK(32)); | |
8b230ed8 RM |
3308 | if (err) |
3309 | goto release_regions; | |
3310 | } | |
3db1cd5c | 3311 | *using_dac = false; |
8b230ed8 RM |
3312 | } |
3313 | pci_set_master(pdev); | |
3314 | return 0; | |
3315 | ||
3316 | release_regions: | |
3317 | pci_release_regions(pdev); | |
3318 | disable_device: | |
3319 | pci_disable_device(pdev); | |
3320 | ||
3321 | return err; | |
3322 | } | |
3323 | ||
3324 | static void | |
3325 | bnad_pci_uninit(struct pci_dev *pdev) | |
3326 | { | |
3327 | pci_release_regions(pdev); | |
3328 | pci_disable_device(pdev); | |
3329 | } | |
3330 | ||
c4eef189 | 3331 | static int |
8b230ed8 RM |
3332 | bnad_pci_probe(struct pci_dev *pdev, |
3333 | const struct pci_device_id *pcidev_id) | |
3334 | { | |
3caa1e95 | 3335 | bool using_dac; |
0120b99c | 3336 | int err; |
8b230ed8 RM |
3337 | struct bnad *bnad; |
3338 | struct bna *bna; | |
3339 | struct net_device *netdev; | |
3340 | struct bfa_pcidev pcidev_info; | |
3341 | unsigned long flags; | |
3342 | ||
3343 | pr_info("bnad_pci_probe : (0x%p, 0x%p) PCI Func : (%d)\n", | |
3344 | pdev, pcidev_id, PCI_FUNC(pdev->devfn)); | |
3345 | ||
3346 | mutex_lock(&bnad_fwimg_mutex); | |
3347 | if (!cna_get_firmware_buf(pdev)) { | |
3348 | mutex_unlock(&bnad_fwimg_mutex); | |
3349 | pr_warn("Failed to load Firmware Image!\n"); | |
3350 | return -ENODEV; | |
3351 | } | |
3352 | mutex_unlock(&bnad_fwimg_mutex); | |
3353 | ||
3354 | /* | |
3355 | * Allocates sizeof(struct net_device + struct bnad) | |
3356 | * bnad = netdev->priv | |
3357 | */ | |
3358 | netdev = alloc_etherdev(sizeof(struct bnad)); | |
3359 | if (!netdev) { | |
8b230ed8 RM |
3360 | err = -ENOMEM; |
3361 | return err; | |
3362 | } | |
3363 | bnad = netdev_priv(netdev); | |
078086f3 | 3364 | bnad_lock_init(bnad); |
72a9730b | 3365 | bnad_add_to_list(bnad); |
078086f3 RM |
3366 | |
3367 | mutex_lock(&bnad->conf_mutex); | |
8b230ed8 RM |
3368 | /* |
3369 | * PCI initialization | |
0120b99c | 3370 | * Output : using_dac = 1 for 64 bit DMA |
be7fa326 | 3371 | * = 0 for 32 bit DMA |
8b230ed8 | 3372 | */ |
e905ed57 | 3373 | using_dac = false; |
8b230ed8 RM |
3374 | err = bnad_pci_init(bnad, pdev, &using_dac); |
3375 | if (err) | |
44861f44 | 3376 | goto unlock_mutex; |
8b230ed8 | 3377 | |
8b230ed8 RM |
3378 | /* |
3379 | * Initialize bnad structure | |
3380 | * Setup relation between pci_dev & netdev | |
8b230ed8 RM |
3381 | */ |
3382 | err = bnad_init(bnad, pdev, netdev); | |
3383 | if (err) | |
3384 | goto pci_uninit; | |
078086f3 | 3385 | |
8b230ed8 RM |
3386 | /* Initialize netdev structure, set up ethtool ops */ |
3387 | bnad_netdev_init(bnad, using_dac); | |
3388 | ||
815f41e7 RM |
3389 | /* Set link to down state */ |
3390 | netif_carrier_off(netdev); | |
3391 | ||
7afc5dbd KG |
3392 | /* Setup the debugfs node for this bfad */ |
3393 | if (bna_debugfs_enable) | |
3394 | bnad_debugfs_init(bnad); | |
3395 | ||
8b230ed8 | 3396 | /* Get resource requirement form bna */ |
078086f3 | 3397 | spin_lock_irqsave(&bnad->bna_lock, flags); |
8b230ed8 | 3398 | bna_res_req(&bnad->res_info[0]); |
078086f3 | 3399 | spin_unlock_irqrestore(&bnad->bna_lock, flags); |
8b230ed8 RM |
3400 | |
3401 | /* Allocate resources from bna */ | |
078086f3 | 3402 | err = bnad_res_alloc(bnad, &bnad->res_info[0], BNA_RES_T_MAX); |
8b230ed8 | 3403 | if (err) |
078086f3 | 3404 | goto drv_uninit; |
8b230ed8 RM |
3405 | |
3406 | bna = &bnad->bna; | |
3407 | ||
3408 | /* Setup pcidev_info for bna_init() */ | |
3409 | pcidev_info.pci_slot = PCI_SLOT(bnad->pcidev->devfn); | |
3410 | pcidev_info.pci_func = PCI_FUNC(bnad->pcidev->devfn); | |
3411 | pcidev_info.device_id = bnad->pcidev->device; | |
3412 | pcidev_info.pci_bar_kva = bnad->bar0; | |
3413 | ||
8b230ed8 RM |
3414 | spin_lock_irqsave(&bnad->bna_lock, flags); |
3415 | bna_init(bna, bnad, &pcidev_info, &bnad->res_info[0]); | |
8b230ed8 RM |
3416 | spin_unlock_irqrestore(&bnad->bna_lock, flags); |
3417 | ||
3418 | bnad->stats.bna_stats = &bna->stats; | |
3419 | ||
078086f3 RM |
3420 | bnad_enable_msix(bnad); |
3421 | err = bnad_mbox_irq_alloc(bnad); | |
3422 | if (err) | |
3423 | goto res_free; | |
3424 | ||
8b230ed8 | 3425 | /* Set up timers */ |
078086f3 | 3426 | setup_timer(&bnad->bna.ioceth.ioc.ioc_timer, bnad_ioc_timeout, |
8b230ed8 | 3427 | ((unsigned long)bnad)); |
078086f3 | 3428 | setup_timer(&bnad->bna.ioceth.ioc.hb_timer, bnad_ioc_hb_check, |
8b230ed8 | 3429 | ((unsigned long)bnad)); |
078086f3 | 3430 | setup_timer(&bnad->bna.ioceth.ioc.iocpf_timer, bnad_iocpf_timeout, |
1d32f769 | 3431 | ((unsigned long)bnad)); |
078086f3 | 3432 | setup_timer(&bnad->bna.ioceth.ioc.sem_timer, bnad_iocpf_sem_timeout, |
8b230ed8 RM |
3433 | ((unsigned long)bnad)); |
3434 | ||
3435 | /* Now start the timer before calling IOC */ | |
078086f3 | 3436 | mod_timer(&bnad->bna.ioceth.ioc.iocpf_timer, |
8b230ed8 RM |
3437 | jiffies + msecs_to_jiffies(BNA_IOC_TIMER_FREQ)); |
3438 | ||
3439 | /* | |
3440 | * Start the chip | |
078086f3 RM |
3441 | * If the call back comes with error, we bail out. |
3442 | * This is a catastrophic error. | |
8b230ed8 | 3443 | */ |
078086f3 RM |
3444 | err = bnad_ioceth_enable(bnad); |
3445 | if (err) { | |
3446 | pr_err("BNA: Initialization failed err=%d\n", | |
3447 | err); | |
3448 | goto probe_success; | |
3449 | } | |
3450 | ||
3451 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
3452 | if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) || | |
3453 | bna_num_rxp_set(bna, BNAD_NUM_RXP + 1)) { | |
3454 | bnad_q_num_adjust(bnad, bna_attr(bna)->num_txq - 1, | |
3455 | bna_attr(bna)->num_rxp - 1); | |
3456 | if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) || | |
3457 | bna_num_rxp_set(bna, BNAD_NUM_RXP + 1)) | |
3458 | err = -EIO; | |
3459 | } | |
3caa1e95 RM |
3460 | spin_unlock_irqrestore(&bnad->bna_lock, flags); |
3461 | if (err) | |
3462 | goto disable_ioceth; | |
3463 | ||
3464 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
078086f3 RM |
3465 | bna_mod_res_req(&bnad->bna, &bnad->mod_res_info[0]); |
3466 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
3467 | ||
3468 | err = bnad_res_alloc(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX); | |
0caa9aae RM |
3469 | if (err) { |
3470 | err = -EIO; | |
078086f3 | 3471 | goto disable_ioceth; |
0caa9aae | 3472 | } |
078086f3 RM |
3473 | |
3474 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
3475 | bna_mod_init(&bnad->bna, &bnad->mod_res_info[0]); | |
3476 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
8b230ed8 RM |
3477 | |
3478 | /* Get the burnt-in mac */ | |
3479 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
078086f3 | 3480 | bna_enet_perm_mac_get(&bna->enet, &bnad->perm_addr); |
8b230ed8 RM |
3481 | bnad_set_netdev_perm_addr(bnad); |
3482 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
3483 | ||
0caa9aae RM |
3484 | mutex_unlock(&bnad->conf_mutex); |
3485 | ||
8b230ed8 RM |
3486 | /* Finally, reguister with net_device layer */ |
3487 | err = register_netdev(netdev); | |
3488 | if (err) { | |
3489 | pr_err("BNA : Registering with netdev failed\n"); | |
078086f3 | 3490 | goto probe_uninit; |
8b230ed8 | 3491 | } |
078086f3 | 3492 | set_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags); |
8b230ed8 | 3493 | |
0caa9aae RM |
3494 | return 0; |
3495 | ||
078086f3 RM |
3496 | probe_success: |
3497 | mutex_unlock(&bnad->conf_mutex); | |
8b230ed8 RM |
3498 | return 0; |
3499 | ||
078086f3 | 3500 | probe_uninit: |
3fc72370 | 3501 | mutex_lock(&bnad->conf_mutex); |
078086f3 RM |
3502 | bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX); |
3503 | disable_ioceth: | |
3504 | bnad_ioceth_disable(bnad); | |
3505 | del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer); | |
3506 | del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer); | |
3507 | del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer); | |
8b230ed8 RM |
3508 | spin_lock_irqsave(&bnad->bna_lock, flags); |
3509 | bna_uninit(bna); | |
3510 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
078086f3 | 3511 | bnad_mbox_irq_free(bnad); |
8b230ed8 | 3512 | bnad_disable_msix(bnad); |
078086f3 RM |
3513 | res_free: |
3514 | bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX); | |
3515 | drv_uninit: | |
7afc5dbd KG |
3516 | /* Remove the debugfs node for this bnad */ |
3517 | kfree(bnad->regdata); | |
3518 | bnad_debugfs_uninit(bnad); | |
078086f3 | 3519 | bnad_uninit(bnad); |
8b230ed8 RM |
3520 | pci_uninit: |
3521 | bnad_pci_uninit(pdev); | |
44861f44 | 3522 | unlock_mutex: |
078086f3 | 3523 | mutex_unlock(&bnad->conf_mutex); |
72a9730b | 3524 | bnad_remove_from_list(bnad); |
8b230ed8 | 3525 | bnad_lock_uninit(bnad); |
8b230ed8 RM |
3526 | free_netdev(netdev); |
3527 | return err; | |
3528 | } | |
3529 | ||
c4eef189 | 3530 | static void |
8b230ed8 RM |
3531 | bnad_pci_remove(struct pci_dev *pdev) |
3532 | { | |
3533 | struct net_device *netdev = pci_get_drvdata(pdev); | |
3534 | struct bnad *bnad; | |
3535 | struct bna *bna; | |
3536 | unsigned long flags; | |
3537 | ||
3538 | if (!netdev) | |
3539 | return; | |
3540 | ||
3541 | pr_info("%s bnad_pci_remove\n", netdev->name); | |
3542 | bnad = netdev_priv(netdev); | |
3543 | bna = &bnad->bna; | |
3544 | ||
078086f3 RM |
3545 | if (test_and_clear_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags)) |
3546 | unregister_netdev(netdev); | |
8b230ed8 RM |
3547 | |
3548 | mutex_lock(&bnad->conf_mutex); | |
078086f3 RM |
3549 | bnad_ioceth_disable(bnad); |
3550 | del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer); | |
3551 | del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer); | |
3552 | del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer); | |
8b230ed8 RM |
3553 | spin_lock_irqsave(&bnad->bna_lock, flags); |
3554 | bna_uninit(bna); | |
3555 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
8b230ed8 | 3556 | |
078086f3 RM |
3557 | bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX); |
3558 | bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX); | |
3559 | bnad_mbox_irq_free(bnad); | |
8b230ed8 RM |
3560 | bnad_disable_msix(bnad); |
3561 | bnad_pci_uninit(pdev); | |
078086f3 | 3562 | mutex_unlock(&bnad->conf_mutex); |
72a9730b | 3563 | bnad_remove_from_list(bnad); |
8b230ed8 | 3564 | bnad_lock_uninit(bnad); |
7afc5dbd KG |
3565 | /* Remove the debugfs node for this bnad */ |
3566 | kfree(bnad->regdata); | |
3567 | bnad_debugfs_uninit(bnad); | |
8b230ed8 RM |
3568 | bnad_uninit(bnad); |
3569 | free_netdev(netdev); | |
3570 | } | |
3571 | ||
0120b99c | 3572 | static DEFINE_PCI_DEVICE_TABLE(bnad_pci_id_table) = { |
8b230ed8 RM |
3573 | { |
3574 | PCI_DEVICE(PCI_VENDOR_ID_BROCADE, | |
3575 | PCI_DEVICE_ID_BROCADE_CT), | |
3576 | .class = PCI_CLASS_NETWORK_ETHERNET << 8, | |
3577 | .class_mask = 0xffff00 | |
586b2816 RM |
3578 | }, |
3579 | { | |
3580 | PCI_DEVICE(PCI_VENDOR_ID_BROCADE, | |
3581 | BFA_PCI_DEVICE_ID_CT2), | |
3582 | .class = PCI_CLASS_NETWORK_ETHERNET << 8, | |
3583 | .class_mask = 0xffff00 | |
3584 | }, | |
3585 | {0, }, | |
8b230ed8 RM |
3586 | }; |
3587 | ||
3588 | MODULE_DEVICE_TABLE(pci, bnad_pci_id_table); | |
3589 | ||
3590 | static struct pci_driver bnad_pci_driver = { | |
3591 | .name = BNAD_NAME, | |
3592 | .id_table = bnad_pci_id_table, | |
3593 | .probe = bnad_pci_probe, | |
c4eef189 | 3594 | .remove = bnad_pci_remove, |
8b230ed8 RM |
3595 | }; |
3596 | ||
3597 | static int __init | |
3598 | bnad_module_init(void) | |
3599 | { | |
3600 | int err; | |
3601 | ||
5aad0011 RM |
3602 | pr_info("Brocade 10G Ethernet driver - version: %s\n", |
3603 | BNAD_VERSION); | |
8b230ed8 | 3604 | |
8a891429 | 3605 | bfa_nw_ioc_auto_recover(bnad_ioc_auto_recover); |
8b230ed8 RM |
3606 | |
3607 | err = pci_register_driver(&bnad_pci_driver); | |
3608 | if (err < 0) { | |
3609 | pr_err("bna : PCI registration failed in module init " | |
3610 | "(%d)\n", err); | |
3611 | return err; | |
3612 | } | |
3613 | ||
3614 | return 0; | |
3615 | } | |
3616 | ||
3617 | static void __exit | |
3618 | bnad_module_exit(void) | |
3619 | { | |
3620 | pci_unregister_driver(&bnad_pci_driver); | |
294ca868 | 3621 | release_firmware(bfi_fw); |
8b230ed8 RM |
3622 | } |
3623 | ||
3624 | module_init(bnad_module_init); | |
3625 | module_exit(bnad_module_exit); | |
3626 | ||
3627 | MODULE_AUTHOR("Brocade"); | |
3628 | MODULE_LICENSE("GPL"); | |
3629 | MODULE_DESCRIPTION("Brocade 10G PCIe Ethernet driver"); | |
3630 | MODULE_VERSION(BNAD_VERSION); | |
3631 | MODULE_FIRMWARE(CNA_FW_FILE_CT); | |
1bf9fd70 | 3632 | MODULE_FIRMWARE(CNA_FW_FILE_CT2); |