[SK_BUFF]: Introduce skb_transport_offset()
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / net / cxgb3 / sge.c
1 /*
2 * Copyright (c) 2005-2007 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32 #include <linux/skbuff.h>
33 #include <linux/netdevice.h>
34 #include <linux/etherdevice.h>
35 #include <linux/if_vlan.h>
36 #include <linux/ip.h>
37 #include <linux/tcp.h>
38 #include <linux/dma-mapping.h>
39 #include "common.h"
40 #include "regs.h"
41 #include "sge_defs.h"
42 #include "t3_cpl.h"
43 #include "firmware_exports.h"
44
45 #define USE_GTS 0
46
47 #define SGE_RX_SM_BUF_SIZE 1536
48
49 /*
50 * If USE_RX_PAGE is defined, the small freelist populated with (partial)
51 * pages instead of skbs. Pages are carved up into RX_PAGE_SIZE chunks (must
52 * be a multiple of the host page size).
53 */
54 #define USE_RX_PAGE
55 #define RX_PAGE_SIZE 2048
56
57 /*
58 * skb freelist packets are copied into a new skb (and the freelist one is
59 * reused) if their len is <=
60 */
61 #define SGE_RX_COPY_THRES 256
62
63 /*
64 * Minimum number of freelist entries before we start dropping TUNNEL frames.
65 */
66 #define SGE_RX_DROP_THRES 16
67
68 /*
69 * Period of the Tx buffer reclaim timer. This timer does not need to run
70 * frequently as Tx buffers are usually reclaimed by new Tx packets.
71 */
72 #define TX_RECLAIM_PERIOD (HZ / 4)
73
74 /* WR size in bytes */
75 #define WR_LEN (WR_FLITS * 8)
76
77 /*
78 * Types of Tx queues in each queue set. Order here matters, do not change.
79 */
80 enum { TXQ_ETH, TXQ_OFLD, TXQ_CTRL };
81
82 /* Values for sge_txq.flags */
83 enum {
84 TXQ_RUNNING = 1 << 0, /* fetch engine is running */
85 TXQ_LAST_PKT_DB = 1 << 1, /* last packet rang the doorbell */
86 };
87
88 struct tx_desc {
89 u64 flit[TX_DESC_FLITS];
90 };
91
92 struct rx_desc {
93 __be32 addr_lo;
94 __be32 len_gen;
95 __be32 gen2;
96 __be32 addr_hi;
97 };
98
99 struct tx_sw_desc { /* SW state per Tx descriptor */
100 struct sk_buff *skb;
101 };
102
103 struct rx_sw_desc { /* SW state per Rx descriptor */
104 union {
105 struct sk_buff *skb;
106 struct sge_fl_page page;
107 } t;
108 DECLARE_PCI_UNMAP_ADDR(dma_addr);
109 };
110
111 struct rsp_desc { /* response queue descriptor */
112 struct rss_header rss_hdr;
113 __be32 flags;
114 __be32 len_cq;
115 u8 imm_data[47];
116 u8 intr_gen;
117 };
118
119 struct unmap_info { /* packet unmapping info, overlays skb->cb */
120 int sflit; /* start flit of first SGL entry in Tx descriptor */
121 u16 fragidx; /* first page fragment in current Tx descriptor */
122 u16 addr_idx; /* buffer index of first SGL entry in descriptor */
123 u32 len; /* mapped length of skb main body */
124 };
125
126 /*
127 * Holds unmapping information for Tx packets that need deferred unmapping.
128 * This structure lives at skb->head and must be allocated by callers.
129 */
130 struct deferred_unmap_info {
131 struct pci_dev *pdev;
132 dma_addr_t addr[MAX_SKB_FRAGS + 1];
133 };
134
135 /*
136 * Maps a number of flits to the number of Tx descriptors that can hold them.
137 * The formula is
138 *
139 * desc = 1 + (flits - 2) / (WR_FLITS - 1).
140 *
141 * HW allows up to 4 descriptors to be combined into a WR.
142 */
143 static u8 flit_desc_map[] = {
144 0,
145 #if SGE_NUM_GENBITS == 1
146 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
147 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
148 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
149 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4
150 #elif SGE_NUM_GENBITS == 2
151 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
152 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
153 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
154 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
155 #else
156 # error "SGE_NUM_GENBITS must be 1 or 2"
157 #endif
158 };
159
160 static inline struct sge_qset *fl_to_qset(const struct sge_fl *q, int qidx)
161 {
162 return container_of(q, struct sge_qset, fl[qidx]);
163 }
164
165 static inline struct sge_qset *rspq_to_qset(const struct sge_rspq *q)
166 {
167 return container_of(q, struct sge_qset, rspq);
168 }
169
170 static inline struct sge_qset *txq_to_qset(const struct sge_txq *q, int qidx)
171 {
172 return container_of(q, struct sge_qset, txq[qidx]);
173 }
174
175 /**
176 * refill_rspq - replenish an SGE response queue
177 * @adapter: the adapter
178 * @q: the response queue to replenish
179 * @credits: how many new responses to make available
180 *
181 * Replenishes a response queue by making the supplied number of responses
182 * available to HW.
183 */
184 static inline void refill_rspq(struct adapter *adapter,
185 const struct sge_rspq *q, unsigned int credits)
186 {
187 t3_write_reg(adapter, A_SG_RSPQ_CREDIT_RETURN,
188 V_RSPQ(q->cntxt_id) | V_CREDITS(credits));
189 }
190
191 /**
192 * need_skb_unmap - does the platform need unmapping of sk_buffs?
193 *
194 * Returns true if the platfrom needs sk_buff unmapping. The compiler
195 * optimizes away unecessary code if this returns true.
196 */
197 static inline int need_skb_unmap(void)
198 {
199 /*
200 * This structure is used to tell if the platfrom needs buffer
201 * unmapping by checking if DECLARE_PCI_UNMAP_ADDR defines anything.
202 */
203 struct dummy {
204 DECLARE_PCI_UNMAP_ADDR(addr);
205 };
206
207 return sizeof(struct dummy) != 0;
208 }
209
210 /**
211 * unmap_skb - unmap a packet main body and its page fragments
212 * @skb: the packet
213 * @q: the Tx queue containing Tx descriptors for the packet
214 * @cidx: index of Tx descriptor
215 * @pdev: the PCI device
216 *
217 * Unmap the main body of an sk_buff and its page fragments, if any.
218 * Because of the fairly complicated structure of our SGLs and the desire
219 * to conserve space for metadata, we keep the information necessary to
220 * unmap an sk_buff partly in the sk_buff itself (in its cb), and partly
221 * in the Tx descriptors (the physical addresses of the various data
222 * buffers). The send functions initialize the state in skb->cb so we
223 * can unmap the buffers held in the first Tx descriptor here, and we
224 * have enough information at this point to update the state for the next
225 * Tx descriptor.
226 */
227 static inline void unmap_skb(struct sk_buff *skb, struct sge_txq *q,
228 unsigned int cidx, struct pci_dev *pdev)
229 {
230 const struct sg_ent *sgp;
231 struct unmap_info *ui = (struct unmap_info *)skb->cb;
232 int nfrags, frag_idx, curflit, j = ui->addr_idx;
233
234 sgp = (struct sg_ent *)&q->desc[cidx].flit[ui->sflit];
235
236 if (ui->len) {
237 pci_unmap_single(pdev, be64_to_cpu(sgp->addr[0]), ui->len,
238 PCI_DMA_TODEVICE);
239 ui->len = 0; /* so we know for next descriptor for this skb */
240 j = 1;
241 }
242
243 frag_idx = ui->fragidx;
244 curflit = ui->sflit + 1 + j;
245 nfrags = skb_shinfo(skb)->nr_frags;
246
247 while (frag_idx < nfrags && curflit < WR_FLITS) {
248 pci_unmap_page(pdev, be64_to_cpu(sgp->addr[j]),
249 skb_shinfo(skb)->frags[frag_idx].size,
250 PCI_DMA_TODEVICE);
251 j ^= 1;
252 if (j == 0) {
253 sgp++;
254 curflit++;
255 }
256 curflit++;
257 frag_idx++;
258 }
259
260 if (frag_idx < nfrags) { /* SGL continues into next Tx descriptor */
261 ui->fragidx = frag_idx;
262 ui->addr_idx = j;
263 ui->sflit = curflit - WR_FLITS - j; /* sflit can be -1 */
264 }
265 }
266
267 /**
268 * free_tx_desc - reclaims Tx descriptors and their buffers
269 * @adapter: the adapter
270 * @q: the Tx queue to reclaim descriptors from
271 * @n: the number of descriptors to reclaim
272 *
273 * Reclaims Tx descriptors from an SGE Tx queue and frees the associated
274 * Tx buffers. Called with the Tx queue lock held.
275 */
276 static void free_tx_desc(struct adapter *adapter, struct sge_txq *q,
277 unsigned int n)
278 {
279 struct tx_sw_desc *d;
280 struct pci_dev *pdev = adapter->pdev;
281 unsigned int cidx = q->cidx;
282
283 const int need_unmap = need_skb_unmap() &&
284 q->cntxt_id >= FW_TUNNEL_SGEEC_START;
285
286 d = &q->sdesc[cidx];
287 while (n--) {
288 if (d->skb) { /* an SGL is present */
289 if (need_unmap)
290 unmap_skb(d->skb, q, cidx, pdev);
291 if (d->skb->priority == cidx)
292 kfree_skb(d->skb);
293 }
294 ++d;
295 if (++cidx == q->size) {
296 cidx = 0;
297 d = q->sdesc;
298 }
299 }
300 q->cidx = cidx;
301 }
302
303 /**
304 * reclaim_completed_tx - reclaims completed Tx descriptors
305 * @adapter: the adapter
306 * @q: the Tx queue to reclaim completed descriptors from
307 *
308 * Reclaims Tx descriptors that the SGE has indicated it has processed,
309 * and frees the associated buffers if possible. Called with the Tx
310 * queue's lock held.
311 */
312 static inline void reclaim_completed_tx(struct adapter *adapter,
313 struct sge_txq *q)
314 {
315 unsigned int reclaim = q->processed - q->cleaned;
316
317 if (reclaim) {
318 free_tx_desc(adapter, q, reclaim);
319 q->cleaned += reclaim;
320 q->in_use -= reclaim;
321 }
322 }
323
324 /**
325 * should_restart_tx - are there enough resources to restart a Tx queue?
326 * @q: the Tx queue
327 *
328 * Checks if there are enough descriptors to restart a suspended Tx queue.
329 */
330 static inline int should_restart_tx(const struct sge_txq *q)
331 {
332 unsigned int r = q->processed - q->cleaned;
333
334 return q->in_use - r < (q->size >> 1);
335 }
336
337 /**
338 * free_rx_bufs - free the Rx buffers on an SGE free list
339 * @pdev: the PCI device associated with the adapter
340 * @rxq: the SGE free list to clean up
341 *
342 * Release the buffers on an SGE free-buffer Rx queue. HW fetching from
343 * this queue should be stopped before calling this function.
344 */
345 static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q)
346 {
347 unsigned int cidx = q->cidx;
348
349 while (q->credits--) {
350 struct rx_sw_desc *d = &q->sdesc[cidx];
351
352 pci_unmap_single(pdev, pci_unmap_addr(d, dma_addr),
353 q->buf_size, PCI_DMA_FROMDEVICE);
354
355 if (q->buf_size != RX_PAGE_SIZE) {
356 kfree_skb(d->t.skb);
357 d->t.skb = NULL;
358 } else {
359 if (d->t.page.frag.page)
360 put_page(d->t.page.frag.page);
361 d->t.page.frag.page = NULL;
362 }
363 if (++cidx == q->size)
364 cidx = 0;
365 }
366
367 if (q->page.frag.page)
368 put_page(q->page.frag.page);
369 q->page.frag.page = NULL;
370 }
371
372 /**
373 * add_one_rx_buf - add a packet buffer to a free-buffer list
374 * @va: va of the buffer to add
375 * @len: the buffer length
376 * @d: the HW Rx descriptor to write
377 * @sd: the SW Rx descriptor to write
378 * @gen: the generation bit value
379 * @pdev: the PCI device associated with the adapter
380 *
381 * Add a buffer of the given length to the supplied HW and SW Rx
382 * descriptors.
383 */
384 static inline void add_one_rx_buf(unsigned char *va, unsigned int len,
385 struct rx_desc *d, struct rx_sw_desc *sd,
386 unsigned int gen, struct pci_dev *pdev)
387 {
388 dma_addr_t mapping;
389
390 mapping = pci_map_single(pdev, va, len, PCI_DMA_FROMDEVICE);
391 pci_unmap_addr_set(sd, dma_addr, mapping);
392
393 d->addr_lo = cpu_to_be32(mapping);
394 d->addr_hi = cpu_to_be32((u64) mapping >> 32);
395 wmb();
396 d->len_gen = cpu_to_be32(V_FLD_GEN1(gen));
397 d->gen2 = cpu_to_be32(V_FLD_GEN2(gen));
398 }
399
400 /**
401 * refill_fl - refill an SGE free-buffer list
402 * @adapter: the adapter
403 * @q: the free-list to refill
404 * @n: the number of new buffers to allocate
405 * @gfp: the gfp flags for allocating new buffers
406 *
407 * (Re)populate an SGE free-buffer list with up to @n new packet buffers,
408 * allocated with the supplied gfp flags. The caller must assure that
409 * @n does not exceed the queue's capacity.
410 */
411 static void refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp)
412 {
413 struct rx_sw_desc *sd = &q->sdesc[q->pidx];
414 struct rx_desc *d = &q->desc[q->pidx];
415 struct sge_fl_page *p = &q->page;
416
417 while (n--) {
418 unsigned char *va;
419
420 if (unlikely(q->buf_size != RX_PAGE_SIZE)) {
421 struct sk_buff *skb = alloc_skb(q->buf_size, gfp);
422
423 if (!skb) {
424 q->alloc_failed++;
425 break;
426 }
427 va = skb->data;
428 sd->t.skb = skb;
429 } else {
430 if (!p->frag.page) {
431 p->frag.page = alloc_pages(gfp, 0);
432 if (unlikely(!p->frag.page)) {
433 q->alloc_failed++;
434 break;
435 } else {
436 p->frag.size = RX_PAGE_SIZE;
437 p->frag.page_offset = 0;
438 p->va = page_address(p->frag.page);
439 }
440 }
441
442 memcpy(&sd->t, p, sizeof(*p));
443 va = p->va;
444
445 p->frag.page_offset += RX_PAGE_SIZE;
446 BUG_ON(p->frag.page_offset > PAGE_SIZE);
447 p->va += RX_PAGE_SIZE;
448 if (p->frag.page_offset == PAGE_SIZE)
449 p->frag.page = NULL;
450 else
451 get_page(p->frag.page);
452 }
453
454 add_one_rx_buf(va, q->buf_size, d, sd, q->gen, adap->pdev);
455
456 d++;
457 sd++;
458 if (++q->pidx == q->size) {
459 q->pidx = 0;
460 q->gen ^= 1;
461 sd = q->sdesc;
462 d = q->desc;
463 }
464 q->credits++;
465 }
466
467 t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
468 }
469
470 static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
471 {
472 refill_fl(adap, fl, min(16U, fl->size - fl->credits), GFP_ATOMIC);
473 }
474
475 /**
476 * recycle_rx_buf - recycle a receive buffer
477 * @adapter: the adapter
478 * @q: the SGE free list
479 * @idx: index of buffer to recycle
480 *
481 * Recycles the specified buffer on the given free list by adding it at
482 * the next available slot on the list.
483 */
484 static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q,
485 unsigned int idx)
486 {
487 struct rx_desc *from = &q->desc[idx];
488 struct rx_desc *to = &q->desc[q->pidx];
489
490 memcpy(&q->sdesc[q->pidx], &q->sdesc[idx], sizeof(struct rx_sw_desc));
491 to->addr_lo = from->addr_lo; /* already big endian */
492 to->addr_hi = from->addr_hi; /* likewise */
493 wmb();
494 to->len_gen = cpu_to_be32(V_FLD_GEN1(q->gen));
495 to->gen2 = cpu_to_be32(V_FLD_GEN2(q->gen));
496 q->credits++;
497
498 if (++q->pidx == q->size) {
499 q->pidx = 0;
500 q->gen ^= 1;
501 }
502 t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
503 }
504
505 /**
506 * alloc_ring - allocate resources for an SGE descriptor ring
507 * @pdev: the PCI device
508 * @nelem: the number of descriptors
509 * @elem_size: the size of each descriptor
510 * @sw_size: the size of the SW state associated with each ring element
511 * @phys: the physical address of the allocated ring
512 * @metadata: address of the array holding the SW state for the ring
513 *
514 * Allocates resources for an SGE descriptor ring, such as Tx queues,
515 * free buffer lists, or response queues. Each SGE ring requires
516 * space for its HW descriptors plus, optionally, space for the SW state
517 * associated with each HW entry (the metadata). The function returns
518 * three values: the virtual address for the HW ring (the return value
519 * of the function), the physical address of the HW ring, and the address
520 * of the SW ring.
521 */
522 static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
523 size_t sw_size, dma_addr_t * phys, void *metadata)
524 {
525 size_t len = nelem * elem_size;
526 void *s = NULL;
527 void *p = dma_alloc_coherent(&pdev->dev, len, phys, GFP_KERNEL);
528
529 if (!p)
530 return NULL;
531 if (sw_size) {
532 s = kcalloc(nelem, sw_size, GFP_KERNEL);
533
534 if (!s) {
535 dma_free_coherent(&pdev->dev, len, p, *phys);
536 return NULL;
537 }
538 }
539 if (metadata)
540 *(void **)metadata = s;
541 memset(p, 0, len);
542 return p;
543 }
544
545 /**
546 * free_qset - free the resources of an SGE queue set
547 * @adapter: the adapter owning the queue set
548 * @q: the queue set
549 *
550 * Release the HW and SW resources associated with an SGE queue set, such
551 * as HW contexts, packet buffers, and descriptor rings. Traffic to the
552 * queue set must be quiesced prior to calling this.
553 */
554 void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
555 {
556 int i;
557 struct pci_dev *pdev = adapter->pdev;
558
559 if (q->tx_reclaim_timer.function)
560 del_timer_sync(&q->tx_reclaim_timer);
561
562 for (i = 0; i < SGE_RXQ_PER_SET; ++i)
563 if (q->fl[i].desc) {
564 spin_lock(&adapter->sge.reg_lock);
565 t3_sge_disable_fl(adapter, q->fl[i].cntxt_id);
566 spin_unlock(&adapter->sge.reg_lock);
567 free_rx_bufs(pdev, &q->fl[i]);
568 kfree(q->fl[i].sdesc);
569 dma_free_coherent(&pdev->dev,
570 q->fl[i].size *
571 sizeof(struct rx_desc), q->fl[i].desc,
572 q->fl[i].phys_addr);
573 }
574
575 for (i = 0; i < SGE_TXQ_PER_SET; ++i)
576 if (q->txq[i].desc) {
577 spin_lock(&adapter->sge.reg_lock);
578 t3_sge_enable_ecntxt(adapter, q->txq[i].cntxt_id, 0);
579 spin_unlock(&adapter->sge.reg_lock);
580 if (q->txq[i].sdesc) {
581 free_tx_desc(adapter, &q->txq[i],
582 q->txq[i].in_use);
583 kfree(q->txq[i].sdesc);
584 }
585 dma_free_coherent(&pdev->dev,
586 q->txq[i].size *
587 sizeof(struct tx_desc),
588 q->txq[i].desc, q->txq[i].phys_addr);
589 __skb_queue_purge(&q->txq[i].sendq);
590 }
591
592 if (q->rspq.desc) {
593 spin_lock(&adapter->sge.reg_lock);
594 t3_sge_disable_rspcntxt(adapter, q->rspq.cntxt_id);
595 spin_unlock(&adapter->sge.reg_lock);
596 dma_free_coherent(&pdev->dev,
597 q->rspq.size * sizeof(struct rsp_desc),
598 q->rspq.desc, q->rspq.phys_addr);
599 }
600
601 if (q->netdev)
602 q->netdev->atalk_ptr = NULL;
603
604 memset(q, 0, sizeof(*q));
605 }
606
607 /**
608 * init_qset_cntxt - initialize an SGE queue set context info
609 * @qs: the queue set
610 * @id: the queue set id
611 *
612 * Initializes the TIDs and context ids for the queues of a queue set.
613 */
614 static void init_qset_cntxt(struct sge_qset *qs, unsigned int id)
615 {
616 qs->rspq.cntxt_id = id;
617 qs->fl[0].cntxt_id = 2 * id;
618 qs->fl[1].cntxt_id = 2 * id + 1;
619 qs->txq[TXQ_ETH].cntxt_id = FW_TUNNEL_SGEEC_START + id;
620 qs->txq[TXQ_ETH].token = FW_TUNNEL_TID_START + id;
621 qs->txq[TXQ_OFLD].cntxt_id = FW_OFLD_SGEEC_START + id;
622 qs->txq[TXQ_CTRL].cntxt_id = FW_CTRL_SGEEC_START + id;
623 qs->txq[TXQ_CTRL].token = FW_CTRL_TID_START + id;
624 }
625
626 /**
627 * sgl_len - calculates the size of an SGL of the given capacity
628 * @n: the number of SGL entries
629 *
630 * Calculates the number of flits needed for a scatter/gather list that
631 * can hold the given number of entries.
632 */
633 static inline unsigned int sgl_len(unsigned int n)
634 {
635 /* alternatively: 3 * (n / 2) + 2 * (n & 1) */
636 return (3 * n) / 2 + (n & 1);
637 }
638
639 /**
640 * flits_to_desc - returns the num of Tx descriptors for the given flits
641 * @n: the number of flits
642 *
643 * Calculates the number of Tx descriptors needed for the supplied number
644 * of flits.
645 */
646 static inline unsigned int flits_to_desc(unsigned int n)
647 {
648 BUG_ON(n >= ARRAY_SIZE(flit_desc_map));
649 return flit_desc_map[n];
650 }
651
652 /**
653 * get_imm_packet - return the next ingress packet buffer from a response
654 * @resp: the response descriptor containing the packet data
655 *
656 * Return a packet containing the immediate data of the given response.
657 */
658 static inline struct sk_buff *get_imm_packet(const struct rsp_desc *resp)
659 {
660 struct sk_buff *skb = alloc_skb(IMMED_PKT_SIZE, GFP_ATOMIC);
661
662 if (skb) {
663 __skb_put(skb, IMMED_PKT_SIZE);
664 memcpy(skb->data, resp->imm_data, IMMED_PKT_SIZE);
665 }
666 return skb;
667 }
668
669 /**
670 * calc_tx_descs - calculate the number of Tx descriptors for a packet
671 * @skb: the packet
672 *
673 * Returns the number of Tx descriptors needed for the given Ethernet
674 * packet. Ethernet packets require addition of WR and CPL headers.
675 */
676 static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
677 {
678 unsigned int flits;
679
680 if (skb->len <= WR_LEN - sizeof(struct cpl_tx_pkt))
681 return 1;
682
683 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 2;
684 if (skb_shinfo(skb)->gso_size)
685 flits++;
686 return flits_to_desc(flits);
687 }
688
689 /**
690 * make_sgl - populate a scatter/gather list for a packet
691 * @skb: the packet
692 * @sgp: the SGL to populate
693 * @start: start address of skb main body data to include in the SGL
694 * @len: length of skb main body data to include in the SGL
695 * @pdev: the PCI device
696 *
697 * Generates a scatter/gather list for the buffers that make up a packet
698 * and returns the SGL size in 8-byte words. The caller must size the SGL
699 * appropriately.
700 */
701 static inline unsigned int make_sgl(const struct sk_buff *skb,
702 struct sg_ent *sgp, unsigned char *start,
703 unsigned int len, struct pci_dev *pdev)
704 {
705 dma_addr_t mapping;
706 unsigned int i, j = 0, nfrags;
707
708 if (len) {
709 mapping = pci_map_single(pdev, start, len, PCI_DMA_TODEVICE);
710 sgp->len[0] = cpu_to_be32(len);
711 sgp->addr[0] = cpu_to_be64(mapping);
712 j = 1;
713 }
714
715 nfrags = skb_shinfo(skb)->nr_frags;
716 for (i = 0; i < nfrags; i++) {
717 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
718
719 mapping = pci_map_page(pdev, frag->page, frag->page_offset,
720 frag->size, PCI_DMA_TODEVICE);
721 sgp->len[j] = cpu_to_be32(frag->size);
722 sgp->addr[j] = cpu_to_be64(mapping);
723 j ^= 1;
724 if (j == 0)
725 ++sgp;
726 }
727 if (j)
728 sgp->len[j] = 0;
729 return ((nfrags + (len != 0)) * 3) / 2 + j;
730 }
731
732 /**
733 * check_ring_tx_db - check and potentially ring a Tx queue's doorbell
734 * @adap: the adapter
735 * @q: the Tx queue
736 *
737 * Ring the doorbel if a Tx queue is asleep. There is a natural race,
738 * where the HW is going to sleep just after we checked, however,
739 * then the interrupt handler will detect the outstanding TX packet
740 * and ring the doorbell for us.
741 *
742 * When GTS is disabled we unconditionally ring the doorbell.
743 */
744 static inline void check_ring_tx_db(struct adapter *adap, struct sge_txq *q)
745 {
746 #if USE_GTS
747 clear_bit(TXQ_LAST_PKT_DB, &q->flags);
748 if (test_and_set_bit(TXQ_RUNNING, &q->flags) == 0) {
749 set_bit(TXQ_LAST_PKT_DB, &q->flags);
750 t3_write_reg(adap, A_SG_KDOORBELL,
751 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
752 }
753 #else
754 wmb(); /* write descriptors before telling HW */
755 t3_write_reg(adap, A_SG_KDOORBELL,
756 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
757 #endif
758 }
759
760 static inline void wr_gen2(struct tx_desc *d, unsigned int gen)
761 {
762 #if SGE_NUM_GENBITS == 2
763 d->flit[TX_DESC_FLITS - 1] = cpu_to_be64(gen);
764 #endif
765 }
766
767 /**
768 * write_wr_hdr_sgl - write a WR header and, optionally, SGL
769 * @ndesc: number of Tx descriptors spanned by the SGL
770 * @skb: the packet corresponding to the WR
771 * @d: first Tx descriptor to be written
772 * @pidx: index of above descriptors
773 * @q: the SGE Tx queue
774 * @sgl: the SGL
775 * @flits: number of flits to the start of the SGL in the first descriptor
776 * @sgl_flits: the SGL size in flits
777 * @gen: the Tx descriptor generation
778 * @wr_hi: top 32 bits of WR header based on WR type (big endian)
779 * @wr_lo: low 32 bits of WR header based on WR type (big endian)
780 *
781 * Write a work request header and an associated SGL. If the SGL is
782 * small enough to fit into one Tx descriptor it has already been written
783 * and we just need to write the WR header. Otherwise we distribute the
784 * SGL across the number of descriptors it spans.
785 */
786 static void write_wr_hdr_sgl(unsigned int ndesc, struct sk_buff *skb,
787 struct tx_desc *d, unsigned int pidx,
788 const struct sge_txq *q,
789 const struct sg_ent *sgl,
790 unsigned int flits, unsigned int sgl_flits,
791 unsigned int gen, unsigned int wr_hi,
792 unsigned int wr_lo)
793 {
794 struct work_request_hdr *wrp = (struct work_request_hdr *)d;
795 struct tx_sw_desc *sd = &q->sdesc[pidx];
796
797 sd->skb = skb;
798 if (need_skb_unmap()) {
799 struct unmap_info *ui = (struct unmap_info *)skb->cb;
800
801 ui->fragidx = 0;
802 ui->addr_idx = 0;
803 ui->sflit = flits;
804 }
805
806 if (likely(ndesc == 1)) {
807 skb->priority = pidx;
808 wrp->wr_hi = htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) |
809 V_WR_SGLSFLT(flits)) | wr_hi;
810 wmb();
811 wrp->wr_lo = htonl(V_WR_LEN(flits + sgl_flits) |
812 V_WR_GEN(gen)) | wr_lo;
813 wr_gen2(d, gen);
814 } else {
815 unsigned int ogen = gen;
816 const u64 *fp = (const u64 *)sgl;
817 struct work_request_hdr *wp = wrp;
818
819 wrp->wr_hi = htonl(F_WR_SOP | V_WR_DATATYPE(1) |
820 V_WR_SGLSFLT(flits)) | wr_hi;
821
822 while (sgl_flits) {
823 unsigned int avail = WR_FLITS - flits;
824
825 if (avail > sgl_flits)
826 avail = sgl_flits;
827 memcpy(&d->flit[flits], fp, avail * sizeof(*fp));
828 sgl_flits -= avail;
829 ndesc--;
830 if (!sgl_flits)
831 break;
832
833 fp += avail;
834 d++;
835 sd++;
836 if (++pidx == q->size) {
837 pidx = 0;
838 gen ^= 1;
839 d = q->desc;
840 sd = q->sdesc;
841 }
842
843 sd->skb = skb;
844 wrp = (struct work_request_hdr *)d;
845 wrp->wr_hi = htonl(V_WR_DATATYPE(1) |
846 V_WR_SGLSFLT(1)) | wr_hi;
847 wrp->wr_lo = htonl(V_WR_LEN(min(WR_FLITS,
848 sgl_flits + 1)) |
849 V_WR_GEN(gen)) | wr_lo;
850 wr_gen2(d, gen);
851 flits = 1;
852 }
853 skb->priority = pidx;
854 wrp->wr_hi |= htonl(F_WR_EOP);
855 wmb();
856 wp->wr_lo = htonl(V_WR_LEN(WR_FLITS) | V_WR_GEN(ogen)) | wr_lo;
857 wr_gen2((struct tx_desc *)wp, ogen);
858 WARN_ON(ndesc != 0);
859 }
860 }
861
862 /**
863 * write_tx_pkt_wr - write a TX_PKT work request
864 * @adap: the adapter
865 * @skb: the packet to send
866 * @pi: the egress interface
867 * @pidx: index of the first Tx descriptor to write
868 * @gen: the generation value to use
869 * @q: the Tx queue
870 * @ndesc: number of descriptors the packet will occupy
871 * @compl: the value of the COMPL bit to use
872 *
873 * Generate a TX_PKT work request to send the supplied packet.
874 */
875 static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
876 const struct port_info *pi,
877 unsigned int pidx, unsigned int gen,
878 struct sge_txq *q, unsigned int ndesc,
879 unsigned int compl)
880 {
881 unsigned int flits, sgl_flits, cntrl, tso_info;
882 struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
883 struct tx_desc *d = &q->desc[pidx];
884 struct cpl_tx_pkt *cpl = (struct cpl_tx_pkt *)d;
885
886 cpl->len = htonl(skb->len | 0x80000000);
887 cntrl = V_TXPKT_INTF(pi->port_id);
888
889 if (vlan_tx_tag_present(skb) && pi->vlan_grp)
890 cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(vlan_tx_tag_get(skb));
891
892 tso_info = V_LSO_MSS(skb_shinfo(skb)->gso_size);
893 if (tso_info) {
894 int eth_type;
895 struct cpl_tx_pkt_lso *hdr = (struct cpl_tx_pkt_lso *)cpl;
896
897 d->flit[2] = 0;
898 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT_LSO);
899 hdr->cntrl = htonl(cntrl);
900 eth_type = skb_network_offset(skb) == ETH_HLEN ?
901 CPL_ETH_II : CPL_ETH_II_VLAN;
902 tso_info |= V_LSO_ETH_TYPE(eth_type) |
903 V_LSO_IPHDR_WORDS(ip_hdr(skb)->ihl) |
904 V_LSO_TCPHDR_WORDS(skb->h.th->doff);
905 hdr->lso_info = htonl(tso_info);
906 flits = 3;
907 } else {
908 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT);
909 cntrl |= F_TXPKT_IPCSUM_DIS; /* SW calculates IP csum */
910 cntrl |= V_TXPKT_L4CSUM_DIS(skb->ip_summed != CHECKSUM_PARTIAL);
911 cpl->cntrl = htonl(cntrl);
912
913 if (skb->len <= WR_LEN - sizeof(*cpl)) {
914 q->sdesc[pidx].skb = NULL;
915 if (!skb->data_len)
916 memcpy(&d->flit[2], skb->data, skb->len);
917 else
918 skb_copy_bits(skb, 0, &d->flit[2], skb->len);
919
920 flits = (skb->len + 7) / 8 + 2;
921 cpl->wr.wr_hi = htonl(V_WR_BCNTLFLT(skb->len & 7) |
922 V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT)
923 | F_WR_SOP | F_WR_EOP | compl);
924 wmb();
925 cpl->wr.wr_lo = htonl(V_WR_LEN(flits) | V_WR_GEN(gen) |
926 V_WR_TID(q->token));
927 wr_gen2(d, gen);
928 kfree_skb(skb);
929 return;
930 }
931
932 flits = 2;
933 }
934
935 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
936 sgl_flits = make_sgl(skb, sgp, skb->data, skb_headlen(skb), adap->pdev);
937 if (need_skb_unmap())
938 ((struct unmap_info *)skb->cb)->len = skb_headlen(skb);
939
940 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen,
941 htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | compl),
942 htonl(V_WR_TID(q->token)));
943 }
944
945 /**
946 * eth_xmit - add a packet to the Ethernet Tx queue
947 * @skb: the packet
948 * @dev: the egress net device
949 *
950 * Add a packet to an SGE Tx queue. Runs with softirqs disabled.
951 */
952 int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
953 {
954 unsigned int ndesc, pidx, credits, gen, compl;
955 const struct port_info *pi = netdev_priv(dev);
956 struct adapter *adap = dev->priv;
957 struct sge_qset *qs = dev2qset(dev);
958 struct sge_txq *q = &qs->txq[TXQ_ETH];
959
960 /*
961 * The chip min packet length is 9 octets but play safe and reject
962 * anything shorter than an Ethernet header.
963 */
964 if (unlikely(skb->len < ETH_HLEN)) {
965 dev_kfree_skb(skb);
966 return NETDEV_TX_OK;
967 }
968
969 spin_lock(&q->lock);
970 reclaim_completed_tx(adap, q);
971
972 credits = q->size - q->in_use;
973 ndesc = calc_tx_descs(skb);
974
975 if (unlikely(credits < ndesc)) {
976 if (!netif_queue_stopped(dev)) {
977 netif_stop_queue(dev);
978 set_bit(TXQ_ETH, &qs->txq_stopped);
979 q->stops++;
980 dev_err(&adap->pdev->dev,
981 "%s: Tx ring %u full while queue awake!\n",
982 dev->name, q->cntxt_id & 7);
983 }
984 spin_unlock(&q->lock);
985 return NETDEV_TX_BUSY;
986 }
987
988 q->in_use += ndesc;
989 if (unlikely(credits - ndesc < q->stop_thres)) {
990 q->stops++;
991 netif_stop_queue(dev);
992 set_bit(TXQ_ETH, &qs->txq_stopped);
993 #if !USE_GTS
994 if (should_restart_tx(q) &&
995 test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
996 q->restarts++;
997 netif_wake_queue(dev);
998 }
999 #endif
1000 }
1001
1002 gen = q->gen;
1003 q->unacked += ndesc;
1004 compl = (q->unacked & 8) << (S_WR_COMPL - 3);
1005 q->unacked &= 7;
1006 pidx = q->pidx;
1007 q->pidx += ndesc;
1008 if (q->pidx >= q->size) {
1009 q->pidx -= q->size;
1010 q->gen ^= 1;
1011 }
1012
1013 /* update port statistics */
1014 if (skb->ip_summed == CHECKSUM_COMPLETE)
1015 qs->port_stats[SGE_PSTAT_TX_CSUM]++;
1016 if (skb_shinfo(skb)->gso_size)
1017 qs->port_stats[SGE_PSTAT_TSO]++;
1018 if (vlan_tx_tag_present(skb) && pi->vlan_grp)
1019 qs->port_stats[SGE_PSTAT_VLANINS]++;
1020
1021 dev->trans_start = jiffies;
1022 spin_unlock(&q->lock);
1023
1024 /*
1025 * We do not use Tx completion interrupts to free DMAd Tx packets.
1026 * This is good for performamce but means that we rely on new Tx
1027 * packets arriving to run the destructors of completed packets,
1028 * which open up space in their sockets' send queues. Sometimes
1029 * we do not get such new packets causing Tx to stall. A single
1030 * UDP transmitter is a good example of this situation. We have
1031 * a clean up timer that periodically reclaims completed packets
1032 * but it doesn't run often enough (nor do we want it to) to prevent
1033 * lengthy stalls. A solution to this problem is to run the
1034 * destructor early, after the packet is queued but before it's DMAd.
1035 * A cons is that we lie to socket memory accounting, but the amount
1036 * of extra memory is reasonable (limited by the number of Tx
1037 * descriptors), the packets do actually get freed quickly by new
1038 * packets almost always, and for protocols like TCP that wait for
1039 * acks to really free up the data the extra memory is even less.
1040 * On the positive side we run the destructors on the sending CPU
1041 * rather than on a potentially different completing CPU, usually a
1042 * good thing. We also run them without holding our Tx queue lock,
1043 * unlike what reclaim_completed_tx() would otherwise do.
1044 *
1045 * Run the destructor before telling the DMA engine about the packet
1046 * to make sure it doesn't complete and get freed prematurely.
1047 */
1048 if (likely(!skb_shared(skb)))
1049 skb_orphan(skb);
1050
1051 write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl);
1052 check_ring_tx_db(adap, q);
1053 return NETDEV_TX_OK;
1054 }
1055
1056 /**
1057 * write_imm - write a packet into a Tx descriptor as immediate data
1058 * @d: the Tx descriptor to write
1059 * @skb: the packet
1060 * @len: the length of packet data to write as immediate data
1061 * @gen: the generation bit value to write
1062 *
1063 * Writes a packet as immediate data into a Tx descriptor. The packet
1064 * contains a work request at its beginning. We must write the packet
1065 * carefully so the SGE doesn't read accidentally before it's written in
1066 * its entirety.
1067 */
1068 static inline void write_imm(struct tx_desc *d, struct sk_buff *skb,
1069 unsigned int len, unsigned int gen)
1070 {
1071 struct work_request_hdr *from = (struct work_request_hdr *)skb->data;
1072 struct work_request_hdr *to = (struct work_request_hdr *)d;
1073
1074 memcpy(&to[1], &from[1], len - sizeof(*from));
1075 to->wr_hi = from->wr_hi | htonl(F_WR_SOP | F_WR_EOP |
1076 V_WR_BCNTLFLT(len & 7));
1077 wmb();
1078 to->wr_lo = from->wr_lo | htonl(V_WR_GEN(gen) |
1079 V_WR_LEN((len + 7) / 8));
1080 wr_gen2(d, gen);
1081 kfree_skb(skb);
1082 }
1083
1084 /**
1085 * check_desc_avail - check descriptor availability on a send queue
1086 * @adap: the adapter
1087 * @q: the send queue
1088 * @skb: the packet needing the descriptors
1089 * @ndesc: the number of Tx descriptors needed
1090 * @qid: the Tx queue number in its queue set (TXQ_OFLD or TXQ_CTRL)
1091 *
1092 * Checks if the requested number of Tx descriptors is available on an
1093 * SGE send queue. If the queue is already suspended or not enough
1094 * descriptors are available the packet is queued for later transmission.
1095 * Must be called with the Tx queue locked.
1096 *
1097 * Returns 0 if enough descriptors are available, 1 if there aren't
1098 * enough descriptors and the packet has been queued, and 2 if the caller
1099 * needs to retry because there weren't enough descriptors at the
1100 * beginning of the call but some freed up in the mean time.
1101 */
1102 static inline int check_desc_avail(struct adapter *adap, struct sge_txq *q,
1103 struct sk_buff *skb, unsigned int ndesc,
1104 unsigned int qid)
1105 {
1106 if (unlikely(!skb_queue_empty(&q->sendq))) {
1107 addq_exit:__skb_queue_tail(&q->sendq, skb);
1108 return 1;
1109 }
1110 if (unlikely(q->size - q->in_use < ndesc)) {
1111 struct sge_qset *qs = txq_to_qset(q, qid);
1112
1113 set_bit(qid, &qs->txq_stopped);
1114 smp_mb__after_clear_bit();
1115
1116 if (should_restart_tx(q) &&
1117 test_and_clear_bit(qid, &qs->txq_stopped))
1118 return 2;
1119
1120 q->stops++;
1121 goto addq_exit;
1122 }
1123 return 0;
1124 }
1125
1126 /**
1127 * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
1128 * @q: the SGE control Tx queue
1129 *
1130 * This is a variant of reclaim_completed_tx() that is used for Tx queues
1131 * that send only immediate data (presently just the control queues) and
1132 * thus do not have any sk_buffs to release.
1133 */
1134 static inline void reclaim_completed_tx_imm(struct sge_txq *q)
1135 {
1136 unsigned int reclaim = q->processed - q->cleaned;
1137
1138 q->in_use -= reclaim;
1139 q->cleaned += reclaim;
1140 }
1141
1142 static inline int immediate(const struct sk_buff *skb)
1143 {
1144 return skb->len <= WR_LEN && !skb->data_len;
1145 }
1146
1147 /**
1148 * ctrl_xmit - send a packet through an SGE control Tx queue
1149 * @adap: the adapter
1150 * @q: the control queue
1151 * @skb: the packet
1152 *
1153 * Send a packet through an SGE control Tx queue. Packets sent through
1154 * a control queue must fit entirely as immediate data in a single Tx
1155 * descriptor and have no page fragments.
1156 */
1157 static int ctrl_xmit(struct adapter *adap, struct sge_txq *q,
1158 struct sk_buff *skb)
1159 {
1160 int ret;
1161 struct work_request_hdr *wrp = (struct work_request_hdr *)skb->data;
1162
1163 if (unlikely(!immediate(skb))) {
1164 WARN_ON(1);
1165 dev_kfree_skb(skb);
1166 return NET_XMIT_SUCCESS;
1167 }
1168
1169 wrp->wr_hi |= htonl(F_WR_SOP | F_WR_EOP);
1170 wrp->wr_lo = htonl(V_WR_TID(q->token));
1171
1172 spin_lock(&q->lock);
1173 again:reclaim_completed_tx_imm(q);
1174
1175 ret = check_desc_avail(adap, q, skb, 1, TXQ_CTRL);
1176 if (unlikely(ret)) {
1177 if (ret == 1) {
1178 spin_unlock(&q->lock);
1179 return NET_XMIT_CN;
1180 }
1181 goto again;
1182 }
1183
1184 write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
1185
1186 q->in_use++;
1187 if (++q->pidx >= q->size) {
1188 q->pidx = 0;
1189 q->gen ^= 1;
1190 }
1191 spin_unlock(&q->lock);
1192 wmb();
1193 t3_write_reg(adap, A_SG_KDOORBELL,
1194 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1195 return NET_XMIT_SUCCESS;
1196 }
1197
1198 /**
1199 * restart_ctrlq - restart a suspended control queue
1200 * @qs: the queue set cotaining the control queue
1201 *
1202 * Resumes transmission on a suspended Tx control queue.
1203 */
1204 static void restart_ctrlq(unsigned long data)
1205 {
1206 struct sk_buff *skb;
1207 struct sge_qset *qs = (struct sge_qset *)data;
1208 struct sge_txq *q = &qs->txq[TXQ_CTRL];
1209 struct adapter *adap = qs->netdev->priv;
1210
1211 spin_lock(&q->lock);
1212 again:reclaim_completed_tx_imm(q);
1213
1214 while (q->in_use < q->size && (skb = __skb_dequeue(&q->sendq)) != NULL) {
1215
1216 write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
1217
1218 if (++q->pidx >= q->size) {
1219 q->pidx = 0;
1220 q->gen ^= 1;
1221 }
1222 q->in_use++;
1223 }
1224
1225 if (!skb_queue_empty(&q->sendq)) {
1226 set_bit(TXQ_CTRL, &qs->txq_stopped);
1227 smp_mb__after_clear_bit();
1228
1229 if (should_restart_tx(q) &&
1230 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped))
1231 goto again;
1232 q->stops++;
1233 }
1234
1235 spin_unlock(&q->lock);
1236 t3_write_reg(adap, A_SG_KDOORBELL,
1237 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1238 }
1239
1240 /*
1241 * Send a management message through control queue 0
1242 */
1243 int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
1244 {
1245 return ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb);
1246 }
1247
1248 /**
1249 * deferred_unmap_destructor - unmap a packet when it is freed
1250 * @skb: the packet
1251 *
1252 * This is the packet destructor used for Tx packets that need to remain
1253 * mapped until they are freed rather than until their Tx descriptors are
1254 * freed.
1255 */
1256 static void deferred_unmap_destructor(struct sk_buff *skb)
1257 {
1258 int i;
1259 const dma_addr_t *p;
1260 const struct skb_shared_info *si;
1261 const struct deferred_unmap_info *dui;
1262 const struct unmap_info *ui = (struct unmap_info *)skb->cb;
1263
1264 dui = (struct deferred_unmap_info *)skb->head;
1265 p = dui->addr;
1266
1267 if (ui->len)
1268 pci_unmap_single(dui->pdev, *p++, ui->len, PCI_DMA_TODEVICE);
1269
1270 si = skb_shinfo(skb);
1271 for (i = 0; i < si->nr_frags; i++)
1272 pci_unmap_page(dui->pdev, *p++, si->frags[i].size,
1273 PCI_DMA_TODEVICE);
1274 }
1275
1276 static void setup_deferred_unmapping(struct sk_buff *skb, struct pci_dev *pdev,
1277 const struct sg_ent *sgl, int sgl_flits)
1278 {
1279 dma_addr_t *p;
1280 struct deferred_unmap_info *dui;
1281
1282 dui = (struct deferred_unmap_info *)skb->head;
1283 dui->pdev = pdev;
1284 for (p = dui->addr; sgl_flits >= 3; sgl++, sgl_flits -= 3) {
1285 *p++ = be64_to_cpu(sgl->addr[0]);
1286 *p++ = be64_to_cpu(sgl->addr[1]);
1287 }
1288 if (sgl_flits)
1289 *p = be64_to_cpu(sgl->addr[0]);
1290 }
1291
1292 /**
1293 * write_ofld_wr - write an offload work request
1294 * @adap: the adapter
1295 * @skb: the packet to send
1296 * @q: the Tx queue
1297 * @pidx: index of the first Tx descriptor to write
1298 * @gen: the generation value to use
1299 * @ndesc: number of descriptors the packet will occupy
1300 *
1301 * Write an offload work request to send the supplied packet. The packet
1302 * data already carry the work request with most fields populated.
1303 */
1304 static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
1305 struct sge_txq *q, unsigned int pidx,
1306 unsigned int gen, unsigned int ndesc)
1307 {
1308 unsigned int sgl_flits, flits;
1309 struct work_request_hdr *from;
1310 struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
1311 struct tx_desc *d = &q->desc[pidx];
1312
1313 if (immediate(skb)) {
1314 q->sdesc[pidx].skb = NULL;
1315 write_imm(d, skb, skb->len, gen);
1316 return;
1317 }
1318
1319 /* Only TX_DATA builds SGLs */
1320
1321 from = (struct work_request_hdr *)skb->data;
1322 memcpy(&d->flit[1], &from[1],
1323 skb_transport_offset(skb) - sizeof(*from));
1324
1325 flits = skb_transport_offset(skb) / 8;
1326 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
1327 sgl_flits = make_sgl(skb, sgp, skb->h.raw, skb->tail - skb->h.raw,
1328 adap->pdev);
1329 if (need_skb_unmap()) {
1330 setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits);
1331 skb->destructor = deferred_unmap_destructor;
1332 ((struct unmap_info *)skb->cb)->len = skb->tail - skb->h.raw;
1333 }
1334
1335 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits,
1336 gen, from->wr_hi, from->wr_lo);
1337 }
1338
1339 /**
1340 * calc_tx_descs_ofld - calculate # of Tx descriptors for an offload packet
1341 * @skb: the packet
1342 *
1343 * Returns the number of Tx descriptors needed for the given offload
1344 * packet. These packets are already fully constructed.
1345 */
1346 static inline unsigned int calc_tx_descs_ofld(const struct sk_buff *skb)
1347 {
1348 unsigned int flits, cnt = skb_shinfo(skb)->nr_frags;
1349
1350 if (skb->len <= WR_LEN && cnt == 0)
1351 return 1; /* packet fits as immediate data */
1352
1353 flits = skb_transport_offset(skb) / 8; /* headers */
1354 if (skb->tail != skb->h.raw)
1355 cnt++;
1356 return flits_to_desc(flits + sgl_len(cnt));
1357 }
1358
1359 /**
1360 * ofld_xmit - send a packet through an offload queue
1361 * @adap: the adapter
1362 * @q: the Tx offload queue
1363 * @skb: the packet
1364 *
1365 * Send an offload packet through an SGE offload queue.
1366 */
1367 static int ofld_xmit(struct adapter *adap, struct sge_txq *q,
1368 struct sk_buff *skb)
1369 {
1370 int ret;
1371 unsigned int ndesc = calc_tx_descs_ofld(skb), pidx, gen;
1372
1373 spin_lock(&q->lock);
1374 again:reclaim_completed_tx(adap, q);
1375
1376 ret = check_desc_avail(adap, q, skb, ndesc, TXQ_OFLD);
1377 if (unlikely(ret)) {
1378 if (ret == 1) {
1379 skb->priority = ndesc; /* save for restart */
1380 spin_unlock(&q->lock);
1381 return NET_XMIT_CN;
1382 }
1383 goto again;
1384 }
1385
1386 gen = q->gen;
1387 q->in_use += ndesc;
1388 pidx = q->pidx;
1389 q->pidx += ndesc;
1390 if (q->pidx >= q->size) {
1391 q->pidx -= q->size;
1392 q->gen ^= 1;
1393 }
1394 spin_unlock(&q->lock);
1395
1396 write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
1397 check_ring_tx_db(adap, q);
1398 return NET_XMIT_SUCCESS;
1399 }
1400
1401 /**
1402 * restart_offloadq - restart a suspended offload queue
1403 * @qs: the queue set cotaining the offload queue
1404 *
1405 * Resumes transmission on a suspended Tx offload queue.
1406 */
1407 static void restart_offloadq(unsigned long data)
1408 {
1409 struct sk_buff *skb;
1410 struct sge_qset *qs = (struct sge_qset *)data;
1411 struct sge_txq *q = &qs->txq[TXQ_OFLD];
1412 struct adapter *adap = qs->netdev->priv;
1413
1414 spin_lock(&q->lock);
1415 again:reclaim_completed_tx(adap, q);
1416
1417 while ((skb = skb_peek(&q->sendq)) != NULL) {
1418 unsigned int gen, pidx;
1419 unsigned int ndesc = skb->priority;
1420
1421 if (unlikely(q->size - q->in_use < ndesc)) {
1422 set_bit(TXQ_OFLD, &qs->txq_stopped);
1423 smp_mb__after_clear_bit();
1424
1425 if (should_restart_tx(q) &&
1426 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped))
1427 goto again;
1428 q->stops++;
1429 break;
1430 }
1431
1432 gen = q->gen;
1433 q->in_use += ndesc;
1434 pidx = q->pidx;
1435 q->pidx += ndesc;
1436 if (q->pidx >= q->size) {
1437 q->pidx -= q->size;
1438 q->gen ^= 1;
1439 }
1440 __skb_unlink(skb, &q->sendq);
1441 spin_unlock(&q->lock);
1442
1443 write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
1444 spin_lock(&q->lock);
1445 }
1446 spin_unlock(&q->lock);
1447
1448 #if USE_GTS
1449 set_bit(TXQ_RUNNING, &q->flags);
1450 set_bit(TXQ_LAST_PKT_DB, &q->flags);
1451 #endif
1452 t3_write_reg(adap, A_SG_KDOORBELL,
1453 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1454 }
1455
1456 /**
1457 * queue_set - return the queue set a packet should use
1458 * @skb: the packet
1459 *
1460 * Maps a packet to the SGE queue set it should use. The desired queue
1461 * set is carried in bits 1-3 in the packet's priority.
1462 */
1463 static inline int queue_set(const struct sk_buff *skb)
1464 {
1465 return skb->priority >> 1;
1466 }
1467
1468 /**
1469 * is_ctrl_pkt - return whether an offload packet is a control packet
1470 * @skb: the packet
1471 *
1472 * Determines whether an offload packet should use an OFLD or a CTRL
1473 * Tx queue. This is indicated by bit 0 in the packet's priority.
1474 */
1475 static inline int is_ctrl_pkt(const struct sk_buff *skb)
1476 {
1477 return skb->priority & 1;
1478 }
1479
1480 /**
1481 * t3_offload_tx - send an offload packet
1482 * @tdev: the offload device to send to
1483 * @skb: the packet
1484 *
1485 * Sends an offload packet. We use the packet priority to select the
1486 * appropriate Tx queue as follows: bit 0 indicates whether the packet
1487 * should be sent as regular or control, bits 1-3 select the queue set.
1488 */
1489 int t3_offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
1490 {
1491 struct adapter *adap = tdev2adap(tdev);
1492 struct sge_qset *qs = &adap->sge.qs[queue_set(skb)];
1493
1494 if (unlikely(is_ctrl_pkt(skb)))
1495 return ctrl_xmit(adap, &qs->txq[TXQ_CTRL], skb);
1496
1497 return ofld_xmit(adap, &qs->txq[TXQ_OFLD], skb);
1498 }
1499
1500 /**
1501 * offload_enqueue - add an offload packet to an SGE offload receive queue
1502 * @q: the SGE response queue
1503 * @skb: the packet
1504 *
1505 * Add a new offload packet to an SGE response queue's offload packet
1506 * queue. If the packet is the first on the queue it schedules the RX
1507 * softirq to process the queue.
1508 */
1509 static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb)
1510 {
1511 skb->next = skb->prev = NULL;
1512 if (q->rx_tail)
1513 q->rx_tail->next = skb;
1514 else {
1515 struct sge_qset *qs = rspq_to_qset(q);
1516
1517 if (__netif_rx_schedule_prep(qs->netdev))
1518 __netif_rx_schedule(qs->netdev);
1519 q->rx_head = skb;
1520 }
1521 q->rx_tail = skb;
1522 }
1523
1524 /**
1525 * deliver_partial_bundle - deliver a (partial) bundle of Rx offload pkts
1526 * @tdev: the offload device that will be receiving the packets
1527 * @q: the SGE response queue that assembled the bundle
1528 * @skbs: the partial bundle
1529 * @n: the number of packets in the bundle
1530 *
1531 * Delivers a (partial) bundle of Rx offload packets to an offload device.
1532 */
1533 static inline void deliver_partial_bundle(struct t3cdev *tdev,
1534 struct sge_rspq *q,
1535 struct sk_buff *skbs[], int n)
1536 {
1537 if (n) {
1538 q->offload_bundles++;
1539 tdev->recv(tdev, skbs, n);
1540 }
1541 }
1542
1543 /**
1544 * ofld_poll - NAPI handler for offload packets in interrupt mode
1545 * @dev: the network device doing the polling
1546 * @budget: polling budget
1547 *
1548 * The NAPI handler for offload packets when a response queue is serviced
1549 * by the hard interrupt handler, i.e., when it's operating in non-polling
1550 * mode. Creates small packet batches and sends them through the offload
1551 * receive handler. Batches need to be of modest size as we do prefetches
1552 * on the packets in each.
1553 */
1554 static int ofld_poll(struct net_device *dev, int *budget)
1555 {
1556 struct adapter *adapter = dev->priv;
1557 struct sge_qset *qs = dev2qset(dev);
1558 struct sge_rspq *q = &qs->rspq;
1559 int work_done, limit = min(*budget, dev->quota), avail = limit;
1560
1561 while (avail) {
1562 struct sk_buff *head, *tail, *skbs[RX_BUNDLE_SIZE];
1563 int ngathered;
1564
1565 spin_lock_irq(&q->lock);
1566 head = q->rx_head;
1567 if (!head) {
1568 work_done = limit - avail;
1569 *budget -= work_done;
1570 dev->quota -= work_done;
1571 __netif_rx_complete(dev);
1572 spin_unlock_irq(&q->lock);
1573 return 0;
1574 }
1575
1576 tail = q->rx_tail;
1577 q->rx_head = q->rx_tail = NULL;
1578 spin_unlock_irq(&q->lock);
1579
1580 for (ngathered = 0; avail && head; avail--) {
1581 prefetch(head->data);
1582 skbs[ngathered] = head;
1583 head = head->next;
1584 skbs[ngathered]->next = NULL;
1585 if (++ngathered == RX_BUNDLE_SIZE) {
1586 q->offload_bundles++;
1587 adapter->tdev.recv(&adapter->tdev, skbs,
1588 ngathered);
1589 ngathered = 0;
1590 }
1591 }
1592 if (head) { /* splice remaining packets back onto Rx queue */
1593 spin_lock_irq(&q->lock);
1594 tail->next = q->rx_head;
1595 if (!q->rx_head)
1596 q->rx_tail = tail;
1597 q->rx_head = head;
1598 spin_unlock_irq(&q->lock);
1599 }
1600 deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered);
1601 }
1602 work_done = limit - avail;
1603 *budget -= work_done;
1604 dev->quota -= work_done;
1605 return 1;
1606 }
1607
1608 /**
1609 * rx_offload - process a received offload packet
1610 * @tdev: the offload device receiving the packet
1611 * @rq: the response queue that received the packet
1612 * @skb: the packet
1613 * @rx_gather: a gather list of packets if we are building a bundle
1614 * @gather_idx: index of the next available slot in the bundle
1615 *
1616 * Process an ingress offload pakcet and add it to the offload ingress
1617 * queue. Returns the index of the next available slot in the bundle.
1618 */
1619 static inline int rx_offload(struct t3cdev *tdev, struct sge_rspq *rq,
1620 struct sk_buff *skb, struct sk_buff *rx_gather[],
1621 unsigned int gather_idx)
1622 {
1623 rq->offload_pkts++;
1624 skb_reset_mac_header(skb);
1625 skb_reset_network_header(skb);
1626 skb_reset_transport_header(skb);
1627
1628 if (rq->polling) {
1629 rx_gather[gather_idx++] = skb;
1630 if (gather_idx == RX_BUNDLE_SIZE) {
1631 tdev->recv(tdev, rx_gather, RX_BUNDLE_SIZE);
1632 gather_idx = 0;
1633 rq->offload_bundles++;
1634 }
1635 } else
1636 offload_enqueue(rq, skb);
1637
1638 return gather_idx;
1639 }
1640
1641 /**
1642 * restart_tx - check whether to restart suspended Tx queues
1643 * @qs: the queue set to resume
1644 *
1645 * Restarts suspended Tx queues of an SGE queue set if they have enough
1646 * free resources to resume operation.
1647 */
1648 static void restart_tx(struct sge_qset *qs)
1649 {
1650 if (test_bit(TXQ_ETH, &qs->txq_stopped) &&
1651 should_restart_tx(&qs->txq[TXQ_ETH]) &&
1652 test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
1653 qs->txq[TXQ_ETH].restarts++;
1654 if (netif_running(qs->netdev))
1655 netif_wake_queue(qs->netdev);
1656 }
1657
1658 if (test_bit(TXQ_OFLD, &qs->txq_stopped) &&
1659 should_restart_tx(&qs->txq[TXQ_OFLD]) &&
1660 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) {
1661 qs->txq[TXQ_OFLD].restarts++;
1662 tasklet_schedule(&qs->txq[TXQ_OFLD].qresume_tsk);
1663 }
1664 if (test_bit(TXQ_CTRL, &qs->txq_stopped) &&
1665 should_restart_tx(&qs->txq[TXQ_CTRL]) &&
1666 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) {
1667 qs->txq[TXQ_CTRL].restarts++;
1668 tasklet_schedule(&qs->txq[TXQ_CTRL].qresume_tsk);
1669 }
1670 }
1671
1672 /**
1673 * rx_eth - process an ingress ethernet packet
1674 * @adap: the adapter
1675 * @rq: the response queue that received the packet
1676 * @skb: the packet
1677 * @pad: amount of padding at the start of the buffer
1678 *
1679 * Process an ingress ethernet pakcet and deliver it to the stack.
1680 * The padding is 2 if the packet was delivered in an Rx buffer and 0
1681 * if it was immediate data in a response.
1682 */
1683 static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
1684 struct sk_buff *skb, int pad)
1685 {
1686 struct cpl_rx_pkt *p = (struct cpl_rx_pkt *)(skb->data + pad);
1687 struct port_info *pi;
1688
1689 skb_pull(skb, sizeof(*p) + pad);
1690 skb->dev->last_rx = jiffies;
1691 skb->protocol = eth_type_trans(skb, adap->port[p->iff]);
1692 pi = netdev_priv(skb->dev);
1693 if (pi->rx_csum_offload && p->csum_valid && p->csum == 0xffff &&
1694 !p->fragment) {
1695 rspq_to_qset(rq)->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
1696 skb->ip_summed = CHECKSUM_UNNECESSARY;
1697 } else
1698 skb->ip_summed = CHECKSUM_NONE;
1699
1700 if (unlikely(p->vlan_valid)) {
1701 struct vlan_group *grp = pi->vlan_grp;
1702
1703 rspq_to_qset(rq)->port_stats[SGE_PSTAT_VLANEX]++;
1704 if (likely(grp))
1705 __vlan_hwaccel_rx(skb, grp, ntohs(p->vlan),
1706 rq->polling);
1707 else
1708 dev_kfree_skb_any(skb);
1709 } else if (rq->polling)
1710 netif_receive_skb(skb);
1711 else
1712 netif_rx(skb);
1713 }
1714
1715 #define SKB_DATA_SIZE 128
1716
1717 static void skb_data_init(struct sk_buff *skb, struct sge_fl_page *p,
1718 unsigned int len)
1719 {
1720 skb->len = len;
1721 if (len <= SKB_DATA_SIZE) {
1722 memcpy(skb->data, p->va, len);
1723 skb->tail += len;
1724 put_page(p->frag.page);
1725 } else {
1726 memcpy(skb->data, p->va, SKB_DATA_SIZE);
1727 skb_shinfo(skb)->frags[0].page = p->frag.page;
1728 skb_shinfo(skb)->frags[0].page_offset =
1729 p->frag.page_offset + SKB_DATA_SIZE;
1730 skb_shinfo(skb)->frags[0].size = len - SKB_DATA_SIZE;
1731 skb_shinfo(skb)->nr_frags = 1;
1732 skb->data_len = len - SKB_DATA_SIZE;
1733 skb->tail += SKB_DATA_SIZE;
1734 skb->truesize += skb->data_len;
1735 }
1736 }
1737
1738 /**
1739 * get_packet - return the next ingress packet buffer from a free list
1740 * @adap: the adapter that received the packet
1741 * @fl: the SGE free list holding the packet
1742 * @len: the packet length including any SGE padding
1743 * @drop_thres: # of remaining buffers before we start dropping packets
1744 *
1745 * Get the next packet from a free list and complete setup of the
1746 * sk_buff. If the packet is small we make a copy and recycle the
1747 * original buffer, otherwise we use the original buffer itself. If a
1748 * positive drop threshold is supplied packets are dropped and their
1749 * buffers recycled if (a) the number of remaining buffers is under the
1750 * threshold and the packet is too big to copy, or (b) the packet should
1751 * be copied but there is no memory for the copy.
1752 */
1753 static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
1754 unsigned int len, unsigned int drop_thres)
1755 {
1756 struct sk_buff *skb = NULL;
1757 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
1758
1759 prefetch(sd->t.skb->data);
1760
1761 if (len <= SGE_RX_COPY_THRES) {
1762 skb = alloc_skb(len, GFP_ATOMIC);
1763 if (likely(skb != NULL)) {
1764 struct rx_desc *d = &fl->desc[fl->cidx];
1765 dma_addr_t mapping =
1766 (dma_addr_t)((u64) be32_to_cpu(d->addr_hi) << 32 |
1767 be32_to_cpu(d->addr_lo));
1768
1769 __skb_put(skb, len);
1770 pci_dma_sync_single_for_cpu(adap->pdev, mapping, len,
1771 PCI_DMA_FROMDEVICE);
1772 memcpy(skb->data, sd->t.skb->data, len);
1773 pci_dma_sync_single_for_device(adap->pdev, mapping, len,
1774 PCI_DMA_FROMDEVICE);
1775 } else if (!drop_thres)
1776 goto use_orig_buf;
1777 recycle:
1778 recycle_rx_buf(adap, fl, fl->cidx);
1779 return skb;
1780 }
1781
1782 if (unlikely(fl->credits < drop_thres))
1783 goto recycle;
1784
1785 use_orig_buf:
1786 pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr),
1787 fl->buf_size, PCI_DMA_FROMDEVICE);
1788 skb = sd->t.skb;
1789 skb_put(skb, len);
1790 __refill_fl(adap, fl);
1791 return skb;
1792 }
1793
1794 /**
1795 * handle_rsp_cntrl_info - handles control information in a response
1796 * @qs: the queue set corresponding to the response
1797 * @flags: the response control flags
1798 *
1799 * Handles the control information of an SGE response, such as GTS
1800 * indications and completion credits for the queue set's Tx queues.
1801 * HW coalesces credits, we don't do any extra SW coalescing.
1802 */
1803 static inline void handle_rsp_cntrl_info(struct sge_qset *qs, u32 flags)
1804 {
1805 unsigned int credits;
1806
1807 #if USE_GTS
1808 if (flags & F_RSPD_TXQ0_GTS)
1809 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_ETH].flags);
1810 #endif
1811
1812 credits = G_RSPD_TXQ0_CR(flags);
1813 if (credits)
1814 qs->txq[TXQ_ETH].processed += credits;
1815
1816 credits = G_RSPD_TXQ2_CR(flags);
1817 if (credits)
1818 qs->txq[TXQ_CTRL].processed += credits;
1819
1820 # if USE_GTS
1821 if (flags & F_RSPD_TXQ1_GTS)
1822 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_OFLD].flags);
1823 # endif
1824 credits = G_RSPD_TXQ1_CR(flags);
1825 if (credits)
1826 qs->txq[TXQ_OFLD].processed += credits;
1827 }
1828
1829 /**
1830 * check_ring_db - check if we need to ring any doorbells
1831 * @adapter: the adapter
1832 * @qs: the queue set whose Tx queues are to be examined
1833 * @sleeping: indicates which Tx queue sent GTS
1834 *
1835 * Checks if some of a queue set's Tx queues need to ring their doorbells
1836 * to resume transmission after idling while they still have unprocessed
1837 * descriptors.
1838 */
1839 static void check_ring_db(struct adapter *adap, struct sge_qset *qs,
1840 unsigned int sleeping)
1841 {
1842 if (sleeping & F_RSPD_TXQ0_GTS) {
1843 struct sge_txq *txq = &qs->txq[TXQ_ETH];
1844
1845 if (txq->cleaned + txq->in_use != txq->processed &&
1846 !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
1847 set_bit(TXQ_RUNNING, &txq->flags);
1848 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
1849 V_EGRCNTX(txq->cntxt_id));
1850 }
1851 }
1852
1853 if (sleeping & F_RSPD_TXQ1_GTS) {
1854 struct sge_txq *txq = &qs->txq[TXQ_OFLD];
1855
1856 if (txq->cleaned + txq->in_use != txq->processed &&
1857 !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
1858 set_bit(TXQ_RUNNING, &txq->flags);
1859 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
1860 V_EGRCNTX(txq->cntxt_id));
1861 }
1862 }
1863 }
1864
1865 /**
1866 * is_new_response - check if a response is newly written
1867 * @r: the response descriptor
1868 * @q: the response queue
1869 *
1870 * Returns true if a response descriptor contains a yet unprocessed
1871 * response.
1872 */
1873 static inline int is_new_response(const struct rsp_desc *r,
1874 const struct sge_rspq *q)
1875 {
1876 return (r->intr_gen & F_RSPD_GEN2) == q->gen;
1877 }
1878
1879 #define RSPD_GTS_MASK (F_RSPD_TXQ0_GTS | F_RSPD_TXQ1_GTS)
1880 #define RSPD_CTRL_MASK (RSPD_GTS_MASK | \
1881 V_RSPD_TXQ0_CR(M_RSPD_TXQ0_CR) | \
1882 V_RSPD_TXQ1_CR(M_RSPD_TXQ1_CR) | \
1883 V_RSPD_TXQ2_CR(M_RSPD_TXQ2_CR))
1884
1885 /* How long to delay the next interrupt in case of memory shortage, in 0.1us. */
1886 #define NOMEM_INTR_DELAY 2500
1887
1888 /**
1889 * process_responses - process responses from an SGE response queue
1890 * @adap: the adapter
1891 * @qs: the queue set to which the response queue belongs
1892 * @budget: how many responses can be processed in this round
1893 *
1894 * Process responses from an SGE response queue up to the supplied budget.
1895 * Responses include received packets as well as credits and other events
1896 * for the queues that belong to the response queue's queue set.
1897 * A negative budget is effectively unlimited.
1898 *
1899 * Additionally choose the interrupt holdoff time for the next interrupt
1900 * on this queue. If the system is under memory shortage use a fairly
1901 * long delay to help recovery.
1902 */
1903 static int process_responses(struct adapter *adap, struct sge_qset *qs,
1904 int budget)
1905 {
1906 struct sge_rspq *q = &qs->rspq;
1907 struct rsp_desc *r = &q->desc[q->cidx];
1908 int budget_left = budget;
1909 unsigned int sleeping = 0;
1910 struct sk_buff *offload_skbs[RX_BUNDLE_SIZE];
1911 int ngathered = 0;
1912
1913 q->next_holdoff = q->holdoff_tmr;
1914
1915 while (likely(budget_left && is_new_response(r, q))) {
1916 int eth, ethpad = 2;
1917 struct sk_buff *skb = NULL;
1918 u32 len, flags = ntohl(r->flags);
1919 u32 rss_hi = *(const u32 *)r, rss_lo = r->rss_hdr.rss_hash_val;
1920
1921 eth = r->rss_hdr.opcode == CPL_RX_PKT;
1922
1923 if (unlikely(flags & F_RSPD_ASYNC_NOTIF)) {
1924 skb = alloc_skb(AN_PKT_SIZE, GFP_ATOMIC);
1925 if (!skb)
1926 goto no_mem;
1927
1928 memcpy(__skb_put(skb, AN_PKT_SIZE), r, AN_PKT_SIZE);
1929 skb->data[0] = CPL_ASYNC_NOTIF;
1930 rss_hi = htonl(CPL_ASYNC_NOTIF << 24);
1931 q->async_notif++;
1932 } else if (flags & F_RSPD_IMM_DATA_VALID) {
1933 skb = get_imm_packet(r);
1934 if (unlikely(!skb)) {
1935 no_mem:
1936 q->next_holdoff = NOMEM_INTR_DELAY;
1937 q->nomem++;
1938 /* consume one credit since we tried */
1939 budget_left--;
1940 break;
1941 }
1942 q->imm_data++;
1943 ethpad = 0;
1944 } else if ((len = ntohl(r->len_cq)) != 0) {
1945 struct sge_fl *fl =
1946 (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0];
1947
1948 if (fl->buf_size == RX_PAGE_SIZE) {
1949 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
1950 struct sge_fl_page *p = &sd->t.page;
1951
1952 prefetch(p->va);
1953 prefetch(p->va + L1_CACHE_BYTES);
1954
1955 __refill_fl(adap, fl);
1956
1957 pci_unmap_single(adap->pdev,
1958 pci_unmap_addr(sd, dma_addr),
1959 fl->buf_size,
1960 PCI_DMA_FROMDEVICE);
1961
1962 if (eth) {
1963 if (unlikely(fl->credits <
1964 SGE_RX_DROP_THRES))
1965 goto eth_recycle;
1966
1967 skb = alloc_skb(SKB_DATA_SIZE,
1968 GFP_ATOMIC);
1969 if (unlikely(!skb)) {
1970 eth_recycle:
1971 q->rx_drops++;
1972 recycle_rx_buf(adap, fl,
1973 fl->cidx);
1974 goto eth_done;
1975 }
1976 } else {
1977 skb = alloc_skb(SKB_DATA_SIZE,
1978 GFP_ATOMIC);
1979 if (unlikely(!skb))
1980 goto no_mem;
1981 }
1982
1983 skb_data_init(skb, p, G_RSPD_LEN(len));
1984 eth_done:
1985 fl->credits--;
1986 q->eth_pkts++;
1987 } else {
1988 fl->credits--;
1989 skb = get_packet(adap, fl, G_RSPD_LEN(len),
1990 eth ? SGE_RX_DROP_THRES : 0);
1991 }
1992
1993 if (++fl->cidx == fl->size)
1994 fl->cidx = 0;
1995 } else
1996 q->pure_rsps++;
1997
1998 if (flags & RSPD_CTRL_MASK) {
1999 sleeping |= flags & RSPD_GTS_MASK;
2000 handle_rsp_cntrl_info(qs, flags);
2001 }
2002
2003 r++;
2004 if (unlikely(++q->cidx == q->size)) {
2005 q->cidx = 0;
2006 q->gen ^= 1;
2007 r = q->desc;
2008 }
2009 prefetch(r);
2010
2011 if (++q->credits >= (q->size / 4)) {
2012 refill_rspq(adap, q, q->credits);
2013 q->credits = 0;
2014 }
2015
2016 if (skb) {
2017 /* Preserve the RSS info in csum & priority */
2018 skb->csum = rss_hi;
2019 skb->priority = rss_lo;
2020
2021 if (eth)
2022 rx_eth(adap, q, skb, ethpad);
2023 else {
2024 if (unlikely(r->rss_hdr.opcode ==
2025 CPL_TRACE_PKT))
2026 __skb_pull(skb, ethpad);
2027
2028 ngathered = rx_offload(&adap->tdev, q,
2029 skb, offload_skbs,
2030 ngathered);
2031 }
2032 }
2033 --budget_left;
2034 }
2035
2036 deliver_partial_bundle(&adap->tdev, q, offload_skbs, ngathered);
2037 if (sleeping)
2038 check_ring_db(adap, qs, sleeping);
2039
2040 smp_mb(); /* commit Tx queue .processed updates */
2041 if (unlikely(qs->txq_stopped != 0))
2042 restart_tx(qs);
2043
2044 budget -= budget_left;
2045 return budget;
2046 }
2047
2048 static inline int is_pure_response(const struct rsp_desc *r)
2049 {
2050 u32 n = ntohl(r->flags) & (F_RSPD_ASYNC_NOTIF | F_RSPD_IMM_DATA_VALID);
2051
2052 return (n | r->len_cq) == 0;
2053 }
2054
2055 /**
2056 * napi_rx_handler - the NAPI handler for Rx processing
2057 * @dev: the net device
2058 * @budget: how many packets we can process in this round
2059 *
2060 * Handler for new data events when using NAPI.
2061 */
2062 static int napi_rx_handler(struct net_device *dev, int *budget)
2063 {
2064 struct adapter *adap = dev->priv;
2065 struct sge_qset *qs = dev2qset(dev);
2066 int effective_budget = min(*budget, dev->quota);
2067
2068 int work_done = process_responses(adap, qs, effective_budget);
2069 *budget -= work_done;
2070 dev->quota -= work_done;
2071
2072 if (work_done >= effective_budget)
2073 return 1;
2074
2075 netif_rx_complete(dev);
2076
2077 /*
2078 * Because we don't atomically flush the following write it is
2079 * possible that in very rare cases it can reach the device in a way
2080 * that races with a new response being written plus an error interrupt
2081 * causing the NAPI interrupt handler below to return unhandled status
2082 * to the OS. To protect against this would require flushing the write
2083 * and doing both the write and the flush with interrupts off. Way too
2084 * expensive and unjustifiable given the rarity of the race.
2085 *
2086 * The race cannot happen at all with MSI-X.
2087 */
2088 t3_write_reg(adap, A_SG_GTS, V_RSPQ(qs->rspq.cntxt_id) |
2089 V_NEWTIMER(qs->rspq.next_holdoff) |
2090 V_NEWINDEX(qs->rspq.cidx));
2091 return 0;
2092 }
2093
2094 /*
2095 * Returns true if the device is already scheduled for polling.
2096 */
2097 static inline int napi_is_scheduled(struct net_device *dev)
2098 {
2099 return test_bit(__LINK_STATE_RX_SCHED, &dev->state);
2100 }
2101
2102 /**
2103 * process_pure_responses - process pure responses from a response queue
2104 * @adap: the adapter
2105 * @qs: the queue set owning the response queue
2106 * @r: the first pure response to process
2107 *
2108 * A simpler version of process_responses() that handles only pure (i.e.,
2109 * non data-carrying) responses. Such respones are too light-weight to
2110 * justify calling a softirq under NAPI, so we handle them specially in
2111 * the interrupt handler. The function is called with a pointer to a
2112 * response, which the caller must ensure is a valid pure response.
2113 *
2114 * Returns 1 if it encounters a valid data-carrying response, 0 otherwise.
2115 */
2116 static int process_pure_responses(struct adapter *adap, struct sge_qset *qs,
2117 struct rsp_desc *r)
2118 {
2119 struct sge_rspq *q = &qs->rspq;
2120 unsigned int sleeping = 0;
2121
2122 do {
2123 u32 flags = ntohl(r->flags);
2124
2125 r++;
2126 if (unlikely(++q->cidx == q->size)) {
2127 q->cidx = 0;
2128 q->gen ^= 1;
2129 r = q->desc;
2130 }
2131 prefetch(r);
2132
2133 if (flags & RSPD_CTRL_MASK) {
2134 sleeping |= flags & RSPD_GTS_MASK;
2135 handle_rsp_cntrl_info(qs, flags);
2136 }
2137
2138 q->pure_rsps++;
2139 if (++q->credits >= (q->size / 4)) {
2140 refill_rspq(adap, q, q->credits);
2141 q->credits = 0;
2142 }
2143 } while (is_new_response(r, q) && is_pure_response(r));
2144
2145 if (sleeping)
2146 check_ring_db(adap, qs, sleeping);
2147
2148 smp_mb(); /* commit Tx queue .processed updates */
2149 if (unlikely(qs->txq_stopped != 0))
2150 restart_tx(qs);
2151
2152 return is_new_response(r, q);
2153 }
2154
2155 /**
2156 * handle_responses - decide what to do with new responses in NAPI mode
2157 * @adap: the adapter
2158 * @q: the response queue
2159 *
2160 * This is used by the NAPI interrupt handlers to decide what to do with
2161 * new SGE responses. If there are no new responses it returns -1. If
2162 * there are new responses and they are pure (i.e., non-data carrying)
2163 * it handles them straight in hard interrupt context as they are very
2164 * cheap and don't deliver any packets. Finally, if there are any data
2165 * signaling responses it schedules the NAPI handler. Returns 1 if it
2166 * schedules NAPI, 0 if all new responses were pure.
2167 *
2168 * The caller must ascertain NAPI is not already running.
2169 */
2170 static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
2171 {
2172 struct sge_qset *qs = rspq_to_qset(q);
2173 struct rsp_desc *r = &q->desc[q->cidx];
2174
2175 if (!is_new_response(r, q))
2176 return -1;
2177 if (is_pure_response(r) && process_pure_responses(adap, qs, r) == 0) {
2178 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2179 V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx));
2180 return 0;
2181 }
2182 if (likely(__netif_rx_schedule_prep(qs->netdev)))
2183 __netif_rx_schedule(qs->netdev);
2184 return 1;
2185 }
2186
2187 /*
2188 * The MSI-X interrupt handler for an SGE response queue for the non-NAPI case
2189 * (i.e., response queue serviced in hard interrupt).
2190 */
2191 irqreturn_t t3_sge_intr_msix(int irq, void *cookie)
2192 {
2193 struct sge_qset *qs = cookie;
2194 struct adapter *adap = qs->netdev->priv;
2195 struct sge_rspq *q = &qs->rspq;
2196
2197 spin_lock(&q->lock);
2198 if (process_responses(adap, qs, -1) == 0)
2199 q->unhandled_irqs++;
2200 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2201 V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
2202 spin_unlock(&q->lock);
2203 return IRQ_HANDLED;
2204 }
2205
2206 /*
2207 * The MSI-X interrupt handler for an SGE response queue for the NAPI case
2208 * (i.e., response queue serviced by NAPI polling).
2209 */
2210 irqreturn_t t3_sge_intr_msix_napi(int irq, void *cookie)
2211 {
2212 struct sge_qset *qs = cookie;
2213 struct adapter *adap = qs->netdev->priv;
2214 struct sge_rspq *q = &qs->rspq;
2215
2216 spin_lock(&q->lock);
2217 BUG_ON(napi_is_scheduled(qs->netdev));
2218
2219 if (handle_responses(adap, q) < 0)
2220 q->unhandled_irqs++;
2221 spin_unlock(&q->lock);
2222 return IRQ_HANDLED;
2223 }
2224
2225 /*
2226 * The non-NAPI MSI interrupt handler. This needs to handle data events from
2227 * SGE response queues as well as error and other async events as they all use
2228 * the same MSI vector. We use one SGE response queue per port in this mode
2229 * and protect all response queues with queue 0's lock.
2230 */
2231 static irqreturn_t t3_intr_msi(int irq, void *cookie)
2232 {
2233 int new_packets = 0;
2234 struct adapter *adap = cookie;
2235 struct sge_rspq *q = &adap->sge.qs[0].rspq;
2236
2237 spin_lock(&q->lock);
2238
2239 if (process_responses(adap, &adap->sge.qs[0], -1)) {
2240 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2241 V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
2242 new_packets = 1;
2243 }
2244
2245 if (adap->params.nports == 2 &&
2246 process_responses(adap, &adap->sge.qs[1], -1)) {
2247 struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
2248
2249 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q1->cntxt_id) |
2250 V_NEWTIMER(q1->next_holdoff) |
2251 V_NEWINDEX(q1->cidx));
2252 new_packets = 1;
2253 }
2254
2255 if (!new_packets && t3_slow_intr_handler(adap) == 0)
2256 q->unhandled_irqs++;
2257
2258 spin_unlock(&q->lock);
2259 return IRQ_HANDLED;
2260 }
2261
2262 static int rspq_check_napi(struct net_device *dev, struct sge_rspq *q)
2263 {
2264 if (!napi_is_scheduled(dev) && is_new_response(&q->desc[q->cidx], q)) {
2265 if (likely(__netif_rx_schedule_prep(dev)))
2266 __netif_rx_schedule(dev);
2267 return 1;
2268 }
2269 return 0;
2270 }
2271
2272 /*
2273 * The MSI interrupt handler for the NAPI case (i.e., response queues serviced
2274 * by NAPI polling). Handles data events from SGE response queues as well as
2275 * error and other async events as they all use the same MSI vector. We use
2276 * one SGE response queue per port in this mode and protect all response
2277 * queues with queue 0's lock.
2278 */
2279 irqreturn_t t3_intr_msi_napi(int irq, void *cookie)
2280 {
2281 int new_packets;
2282 struct adapter *adap = cookie;
2283 struct sge_rspq *q = &adap->sge.qs[0].rspq;
2284
2285 spin_lock(&q->lock);
2286
2287 new_packets = rspq_check_napi(adap->sge.qs[0].netdev, q);
2288 if (adap->params.nports == 2)
2289 new_packets += rspq_check_napi(adap->sge.qs[1].netdev,
2290 &adap->sge.qs[1].rspq);
2291 if (!new_packets && t3_slow_intr_handler(adap) == 0)
2292 q->unhandled_irqs++;
2293
2294 spin_unlock(&q->lock);
2295 return IRQ_HANDLED;
2296 }
2297
2298 /*
2299 * A helper function that processes responses and issues GTS.
2300 */
2301 static inline int process_responses_gts(struct adapter *adap,
2302 struct sge_rspq *rq)
2303 {
2304 int work;
2305
2306 work = process_responses(adap, rspq_to_qset(rq), -1);
2307 t3_write_reg(adap, A_SG_GTS, V_RSPQ(rq->cntxt_id) |
2308 V_NEWTIMER(rq->next_holdoff) | V_NEWINDEX(rq->cidx));
2309 return work;
2310 }
2311
2312 /*
2313 * The legacy INTx interrupt handler. This needs to handle data events from
2314 * SGE response queues as well as error and other async events as they all use
2315 * the same interrupt pin. We use one SGE response queue per port in this mode
2316 * and protect all response queues with queue 0's lock.
2317 */
2318 static irqreturn_t t3_intr(int irq, void *cookie)
2319 {
2320 int work_done, w0, w1;
2321 struct adapter *adap = cookie;
2322 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2323 struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
2324
2325 spin_lock(&q0->lock);
2326
2327 w0 = is_new_response(&q0->desc[q0->cidx], q0);
2328 w1 = adap->params.nports == 2 &&
2329 is_new_response(&q1->desc[q1->cidx], q1);
2330
2331 if (likely(w0 | w1)) {
2332 t3_write_reg(adap, A_PL_CLI, 0);
2333 t3_read_reg(adap, A_PL_CLI); /* flush */
2334
2335 if (likely(w0))
2336 process_responses_gts(adap, q0);
2337
2338 if (w1)
2339 process_responses_gts(adap, q1);
2340
2341 work_done = w0 | w1;
2342 } else
2343 work_done = t3_slow_intr_handler(adap);
2344
2345 spin_unlock(&q0->lock);
2346 return IRQ_RETVAL(work_done != 0);
2347 }
2348
2349 /*
2350 * Interrupt handler for legacy INTx interrupts for T3B-based cards.
2351 * Handles data events from SGE response queues as well as error and other
2352 * async events as they all use the same interrupt pin. We use one SGE
2353 * response queue per port in this mode and protect all response queues with
2354 * queue 0's lock.
2355 */
2356 static irqreturn_t t3b_intr(int irq, void *cookie)
2357 {
2358 u32 map;
2359 struct adapter *adap = cookie;
2360 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2361
2362 t3_write_reg(adap, A_PL_CLI, 0);
2363 map = t3_read_reg(adap, A_SG_DATA_INTR);
2364
2365 if (unlikely(!map)) /* shared interrupt, most likely */
2366 return IRQ_NONE;
2367
2368 spin_lock(&q0->lock);
2369
2370 if (unlikely(map & F_ERRINTR))
2371 t3_slow_intr_handler(adap);
2372
2373 if (likely(map & 1))
2374 process_responses_gts(adap, q0);
2375
2376 if (map & 2)
2377 process_responses_gts(adap, &adap->sge.qs[1].rspq);
2378
2379 spin_unlock(&q0->lock);
2380 return IRQ_HANDLED;
2381 }
2382
2383 /*
2384 * NAPI interrupt handler for legacy INTx interrupts for T3B-based cards.
2385 * Handles data events from SGE response queues as well as error and other
2386 * async events as they all use the same interrupt pin. We use one SGE
2387 * response queue per port in this mode and protect all response queues with
2388 * queue 0's lock.
2389 */
2390 static irqreturn_t t3b_intr_napi(int irq, void *cookie)
2391 {
2392 u32 map;
2393 struct net_device *dev;
2394 struct adapter *adap = cookie;
2395 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2396
2397 t3_write_reg(adap, A_PL_CLI, 0);
2398 map = t3_read_reg(adap, A_SG_DATA_INTR);
2399
2400 if (unlikely(!map)) /* shared interrupt, most likely */
2401 return IRQ_NONE;
2402
2403 spin_lock(&q0->lock);
2404
2405 if (unlikely(map & F_ERRINTR))
2406 t3_slow_intr_handler(adap);
2407
2408 if (likely(map & 1)) {
2409 dev = adap->sge.qs[0].netdev;
2410
2411 if (likely(__netif_rx_schedule_prep(dev)))
2412 __netif_rx_schedule(dev);
2413 }
2414 if (map & 2) {
2415 dev = adap->sge.qs[1].netdev;
2416
2417 if (likely(__netif_rx_schedule_prep(dev)))
2418 __netif_rx_schedule(dev);
2419 }
2420
2421 spin_unlock(&q0->lock);
2422 return IRQ_HANDLED;
2423 }
2424
2425 /**
2426 * t3_intr_handler - select the top-level interrupt handler
2427 * @adap: the adapter
2428 * @polling: whether using NAPI to service response queues
2429 *
2430 * Selects the top-level interrupt handler based on the type of interrupts
2431 * (MSI-X, MSI, or legacy) and whether NAPI will be used to service the
2432 * response queues.
2433 */
2434 intr_handler_t t3_intr_handler(struct adapter *adap, int polling)
2435 {
2436 if (adap->flags & USING_MSIX)
2437 return polling ? t3_sge_intr_msix_napi : t3_sge_intr_msix;
2438 if (adap->flags & USING_MSI)
2439 return polling ? t3_intr_msi_napi : t3_intr_msi;
2440 if (adap->params.rev > 0)
2441 return polling ? t3b_intr_napi : t3b_intr;
2442 return t3_intr;
2443 }
2444
2445 /**
2446 * t3_sge_err_intr_handler - SGE async event interrupt handler
2447 * @adapter: the adapter
2448 *
2449 * Interrupt handler for SGE asynchronous (non-data) events.
2450 */
2451 void t3_sge_err_intr_handler(struct adapter *adapter)
2452 {
2453 unsigned int v, status = t3_read_reg(adapter, A_SG_INT_CAUSE);
2454
2455 if (status & F_RSPQCREDITOVERFOW)
2456 CH_ALERT(adapter, "SGE response queue credit overflow\n");
2457
2458 if (status & F_RSPQDISABLED) {
2459 v = t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS);
2460
2461 CH_ALERT(adapter,
2462 "packet delivered to disabled response queue "
2463 "(0x%x)\n", (v >> S_RSPQ0DISABLED) & 0xff);
2464 }
2465
2466 t3_write_reg(adapter, A_SG_INT_CAUSE, status);
2467 if (status & (F_RSPQCREDITOVERFOW | F_RSPQDISABLED))
2468 t3_fatal_err(adapter);
2469 }
2470
2471 /**
2472 * sge_timer_cb - perform periodic maintenance of an SGE qset
2473 * @data: the SGE queue set to maintain
2474 *
2475 * Runs periodically from a timer to perform maintenance of an SGE queue
2476 * set. It performs two tasks:
2477 *
2478 * a) Cleans up any completed Tx descriptors that may still be pending.
2479 * Normal descriptor cleanup happens when new packets are added to a Tx
2480 * queue so this timer is relatively infrequent and does any cleanup only
2481 * if the Tx queue has not seen any new packets in a while. We make a
2482 * best effort attempt to reclaim descriptors, in that we don't wait
2483 * around if we cannot get a queue's lock (which most likely is because
2484 * someone else is queueing new packets and so will also handle the clean
2485 * up). Since control queues use immediate data exclusively we don't
2486 * bother cleaning them up here.
2487 *
2488 * b) Replenishes Rx queues that have run out due to memory shortage.
2489 * Normally new Rx buffers are added when existing ones are consumed but
2490 * when out of memory a queue can become empty. We try to add only a few
2491 * buffers here, the queue will be replenished fully as these new buffers
2492 * are used up if memory shortage has subsided.
2493 */
2494 static void sge_timer_cb(unsigned long data)
2495 {
2496 spinlock_t *lock;
2497 struct sge_qset *qs = (struct sge_qset *)data;
2498 struct adapter *adap = qs->netdev->priv;
2499
2500 if (spin_trylock(&qs->txq[TXQ_ETH].lock)) {
2501 reclaim_completed_tx(adap, &qs->txq[TXQ_ETH]);
2502 spin_unlock(&qs->txq[TXQ_ETH].lock);
2503 }
2504 if (spin_trylock(&qs->txq[TXQ_OFLD].lock)) {
2505 reclaim_completed_tx(adap, &qs->txq[TXQ_OFLD]);
2506 spin_unlock(&qs->txq[TXQ_OFLD].lock);
2507 }
2508 lock = (adap->flags & USING_MSIX) ? &qs->rspq.lock :
2509 &adap->sge.qs[0].rspq.lock;
2510 if (spin_trylock_irq(lock)) {
2511 if (!napi_is_scheduled(qs->netdev)) {
2512 u32 status = t3_read_reg(adap, A_SG_RSPQ_FL_STATUS);
2513
2514 if (qs->fl[0].credits < qs->fl[0].size)
2515 __refill_fl(adap, &qs->fl[0]);
2516 if (qs->fl[1].credits < qs->fl[1].size)
2517 __refill_fl(adap, &qs->fl[1]);
2518
2519 if (status & (1 << qs->rspq.cntxt_id)) {
2520 qs->rspq.starved++;
2521 if (qs->rspq.credits) {
2522 refill_rspq(adap, &qs->rspq, 1);
2523 qs->rspq.credits--;
2524 qs->rspq.restarted++;
2525 t3_write_reg(adap, A_SG_RSPQ_FL_STATUS,
2526 1 << qs->rspq.cntxt_id);
2527 }
2528 }
2529 }
2530 spin_unlock_irq(lock);
2531 }
2532 mod_timer(&qs->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
2533 }
2534
2535 /**
2536 * t3_update_qset_coalesce - update coalescing settings for a queue set
2537 * @qs: the SGE queue set
2538 * @p: new queue set parameters
2539 *
2540 * Update the coalescing settings for an SGE queue set. Nothing is done
2541 * if the queue set is not initialized yet.
2542 */
2543 void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
2544 {
2545 if (!qs->netdev)
2546 return;
2547
2548 qs->rspq.holdoff_tmr = max(p->coalesce_usecs * 10, 1U);/* can't be 0 */
2549 qs->rspq.polling = p->polling;
2550 qs->netdev->poll = p->polling ? napi_rx_handler : ofld_poll;
2551 }
2552
2553 /**
2554 * t3_sge_alloc_qset - initialize an SGE queue set
2555 * @adapter: the adapter
2556 * @id: the queue set id
2557 * @nports: how many Ethernet ports will be using this queue set
2558 * @irq_vec_idx: the IRQ vector index for response queue interrupts
2559 * @p: configuration parameters for this queue set
2560 * @ntxq: number of Tx queues for the queue set
2561 * @netdev: net device associated with this queue set
2562 *
2563 * Allocate resources and initialize an SGE queue set. A queue set
2564 * comprises a response queue, two Rx free-buffer queues, and up to 3
2565 * Tx queues. The Tx queues are assigned roles in the order Ethernet
2566 * queue, offload queue, and control queue.
2567 */
2568 int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
2569 int irq_vec_idx, const struct qset_params *p,
2570 int ntxq, struct net_device *netdev)
2571 {
2572 int i, ret = -ENOMEM;
2573 struct sge_qset *q = &adapter->sge.qs[id];
2574
2575 init_qset_cntxt(q, id);
2576 init_timer(&q->tx_reclaim_timer);
2577 q->tx_reclaim_timer.data = (unsigned long)q;
2578 q->tx_reclaim_timer.function = sge_timer_cb;
2579
2580 q->fl[0].desc = alloc_ring(adapter->pdev, p->fl_size,
2581 sizeof(struct rx_desc),
2582 sizeof(struct rx_sw_desc),
2583 &q->fl[0].phys_addr, &q->fl[0].sdesc);
2584 if (!q->fl[0].desc)
2585 goto err;
2586
2587 q->fl[1].desc = alloc_ring(adapter->pdev, p->jumbo_size,
2588 sizeof(struct rx_desc),
2589 sizeof(struct rx_sw_desc),
2590 &q->fl[1].phys_addr, &q->fl[1].sdesc);
2591 if (!q->fl[1].desc)
2592 goto err;
2593
2594 q->rspq.desc = alloc_ring(adapter->pdev, p->rspq_size,
2595 sizeof(struct rsp_desc), 0,
2596 &q->rspq.phys_addr, NULL);
2597 if (!q->rspq.desc)
2598 goto err;
2599
2600 for (i = 0; i < ntxq; ++i) {
2601 /*
2602 * The control queue always uses immediate data so does not
2603 * need to keep track of any sk_buffs.
2604 */
2605 size_t sz = i == TXQ_CTRL ? 0 : sizeof(struct tx_sw_desc);
2606
2607 q->txq[i].desc = alloc_ring(adapter->pdev, p->txq_size[i],
2608 sizeof(struct tx_desc), sz,
2609 &q->txq[i].phys_addr,
2610 &q->txq[i].sdesc);
2611 if (!q->txq[i].desc)
2612 goto err;
2613
2614 q->txq[i].gen = 1;
2615 q->txq[i].size = p->txq_size[i];
2616 spin_lock_init(&q->txq[i].lock);
2617 skb_queue_head_init(&q->txq[i].sendq);
2618 }
2619
2620 tasklet_init(&q->txq[TXQ_OFLD].qresume_tsk, restart_offloadq,
2621 (unsigned long)q);
2622 tasklet_init(&q->txq[TXQ_CTRL].qresume_tsk, restart_ctrlq,
2623 (unsigned long)q);
2624
2625 q->fl[0].gen = q->fl[1].gen = 1;
2626 q->fl[0].size = p->fl_size;
2627 q->fl[1].size = p->jumbo_size;
2628
2629 q->rspq.gen = 1;
2630 q->rspq.size = p->rspq_size;
2631 spin_lock_init(&q->rspq.lock);
2632
2633 q->txq[TXQ_ETH].stop_thres = nports *
2634 flits_to_desc(sgl_len(MAX_SKB_FRAGS + 1) + 3);
2635
2636 if (!is_offload(adapter)) {
2637 #ifdef USE_RX_PAGE
2638 q->fl[0].buf_size = RX_PAGE_SIZE;
2639 #else
2640 q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + 2 +
2641 sizeof(struct cpl_rx_pkt);
2642 #endif
2643 q->fl[1].buf_size = MAX_FRAME_SIZE + 2 +
2644 sizeof(struct cpl_rx_pkt);
2645 } else {
2646 #ifdef USE_RX_PAGE
2647 q->fl[0].buf_size = RX_PAGE_SIZE;
2648 #else
2649 q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE +
2650 sizeof(struct cpl_rx_data);
2651 #endif
2652 q->fl[1].buf_size = (16 * 1024) -
2653 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2654 }
2655
2656 spin_lock(&adapter->sge.reg_lock);
2657
2658 /* FL threshold comparison uses < */
2659 ret = t3_sge_init_rspcntxt(adapter, q->rspq.cntxt_id, irq_vec_idx,
2660 q->rspq.phys_addr, q->rspq.size,
2661 q->fl[0].buf_size, 1, 0);
2662 if (ret)
2663 goto err_unlock;
2664
2665 for (i = 0; i < SGE_RXQ_PER_SET; ++i) {
2666 ret = t3_sge_init_flcntxt(adapter, q->fl[i].cntxt_id, 0,
2667 q->fl[i].phys_addr, q->fl[i].size,
2668 q->fl[i].buf_size, p->cong_thres, 1,
2669 0);
2670 if (ret)
2671 goto err_unlock;
2672 }
2673
2674 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_ETH].cntxt_id, USE_GTS,
2675 SGE_CNTXT_ETH, id, q->txq[TXQ_ETH].phys_addr,
2676 q->txq[TXQ_ETH].size, q->txq[TXQ_ETH].token,
2677 1, 0);
2678 if (ret)
2679 goto err_unlock;
2680
2681 if (ntxq > 1) {
2682 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_OFLD].cntxt_id,
2683 USE_GTS, SGE_CNTXT_OFLD, id,
2684 q->txq[TXQ_OFLD].phys_addr,
2685 q->txq[TXQ_OFLD].size, 0, 1, 0);
2686 if (ret)
2687 goto err_unlock;
2688 }
2689
2690 if (ntxq > 2) {
2691 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_CTRL].cntxt_id, 0,
2692 SGE_CNTXT_CTRL, id,
2693 q->txq[TXQ_CTRL].phys_addr,
2694 q->txq[TXQ_CTRL].size,
2695 q->txq[TXQ_CTRL].token, 1, 0);
2696 if (ret)
2697 goto err_unlock;
2698 }
2699
2700 spin_unlock(&adapter->sge.reg_lock);
2701 q->netdev = netdev;
2702 t3_update_qset_coalesce(q, p);
2703
2704 /*
2705 * We use atalk_ptr as a backpointer to a qset. In case a device is
2706 * associated with multiple queue sets only the first one sets
2707 * atalk_ptr.
2708 */
2709 if (netdev->atalk_ptr == NULL)
2710 netdev->atalk_ptr = q;
2711
2712 refill_fl(adapter, &q->fl[0], q->fl[0].size, GFP_KERNEL);
2713 refill_fl(adapter, &q->fl[1], q->fl[1].size, GFP_KERNEL);
2714 refill_rspq(adapter, &q->rspq, q->rspq.size - 1);
2715
2716 t3_write_reg(adapter, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) |
2717 V_NEWTIMER(q->rspq.holdoff_tmr));
2718
2719 mod_timer(&q->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
2720 return 0;
2721
2722 err_unlock:
2723 spin_unlock(&adapter->sge.reg_lock);
2724 err:
2725 t3_free_qset(adapter, q);
2726 return ret;
2727 }
2728
2729 /**
2730 * t3_free_sge_resources - free SGE resources
2731 * @adap: the adapter
2732 *
2733 * Frees resources used by the SGE queue sets.
2734 */
2735 void t3_free_sge_resources(struct adapter *adap)
2736 {
2737 int i;
2738
2739 for (i = 0; i < SGE_QSETS; ++i)
2740 t3_free_qset(adap, &adap->sge.qs[i]);
2741 }
2742
2743 /**
2744 * t3_sge_start - enable SGE
2745 * @adap: the adapter
2746 *
2747 * Enables the SGE for DMAs. This is the last step in starting packet
2748 * transfers.
2749 */
2750 void t3_sge_start(struct adapter *adap)
2751 {
2752 t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, F_GLOBALENABLE);
2753 }
2754
2755 /**
2756 * t3_sge_stop - disable SGE operation
2757 * @adap: the adapter
2758 *
2759 * Disables the DMA engine. This can be called in emeregencies (e.g.,
2760 * from error interrupts) or from normal process context. In the latter
2761 * case it also disables any pending queue restart tasklets. Note that
2762 * if it is called in interrupt context it cannot disable the restart
2763 * tasklets as it cannot wait, however the tasklets will have no effect
2764 * since the doorbells are disabled and the driver will call this again
2765 * later from process context, at which time the tasklets will be stopped
2766 * if they are still running.
2767 */
2768 void t3_sge_stop(struct adapter *adap)
2769 {
2770 t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, 0);
2771 if (!in_interrupt()) {
2772 int i;
2773
2774 for (i = 0; i < SGE_QSETS; ++i) {
2775 struct sge_qset *qs = &adap->sge.qs[i];
2776
2777 tasklet_kill(&qs->txq[TXQ_OFLD].qresume_tsk);
2778 tasklet_kill(&qs->txq[TXQ_CTRL].qresume_tsk);
2779 }
2780 }
2781 }
2782
2783 /**
2784 * t3_sge_init - initialize SGE
2785 * @adap: the adapter
2786 * @p: the SGE parameters
2787 *
2788 * Performs SGE initialization needed every time after a chip reset.
2789 * We do not initialize any of the queue sets here, instead the driver
2790 * top-level must request those individually. We also do not enable DMA
2791 * here, that should be done after the queues have been set up.
2792 */
2793 void t3_sge_init(struct adapter *adap, struct sge_params *p)
2794 {
2795 unsigned int ctrl, ups = ffs(pci_resource_len(adap->pdev, 2) >> 12);
2796
2797 ctrl = F_DROPPKT | V_PKTSHIFT(2) | F_FLMODE | F_AVOIDCQOVFL |
2798 F_CQCRDTCTRL |
2799 V_HOSTPAGESIZE(PAGE_SHIFT - 11) | F_BIGENDIANINGRESS |
2800 V_USERSPACESIZE(ups ? ups - 1 : 0) | F_ISCSICOALESCING;
2801 #if SGE_NUM_GENBITS == 1
2802 ctrl |= F_EGRGENCTRL;
2803 #endif
2804 if (adap->params.rev > 0) {
2805 if (!(adap->flags & (USING_MSIX | USING_MSI)))
2806 ctrl |= F_ONEINTMULTQ | F_OPTONEINTMULTQ;
2807 ctrl |= F_CQCRDTCTRL | F_AVOIDCQOVFL;
2808 }
2809 t3_write_reg(adap, A_SG_CONTROL, ctrl);
2810 t3_write_reg(adap, A_SG_EGR_RCQ_DRB_THRSH, V_HIRCQDRBTHRSH(512) |
2811 V_LORCQDRBTHRSH(512));
2812 t3_write_reg(adap, A_SG_TIMER_TICK, core_ticks_per_usec(adap) / 10);
2813 t3_write_reg(adap, A_SG_CMDQ_CREDIT_TH, V_THRESHOLD(32) |
2814 V_TIMEOUT(200 * core_ticks_per_usec(adap)));
2815 t3_write_reg(adap, A_SG_HI_DRB_HI_THRSH, 1000);
2816 t3_write_reg(adap, A_SG_HI_DRB_LO_THRSH, 256);
2817 t3_write_reg(adap, A_SG_LO_DRB_HI_THRSH, 1000);
2818 t3_write_reg(adap, A_SG_LO_DRB_LO_THRSH, 256);
2819 t3_write_reg(adap, A_SG_OCO_BASE, V_BASE1(0xfff));
2820 t3_write_reg(adap, A_SG_DRB_PRI_THRESH, 63 * 1024);
2821 }
2822
2823 /**
2824 * t3_sge_prep - one-time SGE initialization
2825 * @adap: the associated adapter
2826 * @p: SGE parameters
2827 *
2828 * Performs one-time initialization of SGE SW state. Includes determining
2829 * defaults for the assorted SGE parameters, which admins can change until
2830 * they are used to initialize the SGE.
2831 */
2832 void __devinit t3_sge_prep(struct adapter *adap, struct sge_params *p)
2833 {
2834 int i;
2835
2836 p->max_pkt_size = (16 * 1024) - sizeof(struct cpl_rx_data) -
2837 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2838
2839 for (i = 0; i < SGE_QSETS; ++i) {
2840 struct qset_params *q = p->qset + i;
2841
2842 q->polling = adap->params.rev > 0;
2843 q->coalesce_usecs = 5;
2844 q->rspq_size = 1024;
2845 q->fl_size = 1024;
2846 q->jumbo_size = 512;
2847 q->txq_size[TXQ_ETH] = 1024;
2848 q->txq_size[TXQ_OFLD] = 1024;
2849 q->txq_size[TXQ_CTRL] = 256;
2850 q->cong_thres = 0;
2851 }
2852
2853 spin_lock_init(&adap->sge.reg_lock);
2854 }
2855
2856 /**
2857 * t3_get_desc - dump an SGE descriptor for debugging purposes
2858 * @qs: the queue set
2859 * @qnum: identifies the specific queue (0..2: Tx, 3:response, 4..5: Rx)
2860 * @idx: the descriptor index in the queue
2861 * @data: where to dump the descriptor contents
2862 *
2863 * Dumps the contents of a HW descriptor of an SGE queue. Returns the
2864 * size of the descriptor.
2865 */
2866 int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
2867 unsigned char *data)
2868 {
2869 if (qnum >= 6)
2870 return -EINVAL;
2871
2872 if (qnum < 3) {
2873 if (!qs->txq[qnum].desc || idx >= qs->txq[qnum].size)
2874 return -EINVAL;
2875 memcpy(data, &qs->txq[qnum].desc[idx], sizeof(struct tx_desc));
2876 return sizeof(struct tx_desc);
2877 }
2878
2879 if (qnum == 3) {
2880 if (!qs->rspq.desc || idx >= qs->rspq.size)
2881 return -EINVAL;
2882 memcpy(data, &qs->rspq.desc[idx], sizeof(struct rsp_desc));
2883 return sizeof(struct rsp_desc);
2884 }
2885
2886 qnum -= 4;
2887 if (!qs->fl[qnum].desc || idx >= qs->fl[qnum].size)
2888 return -EINVAL;
2889 memcpy(data, &qs->fl[qnum].desc[idx], sizeof(struct rx_desc));
2890 return sizeof(struct rx_desc);
2891 }