[SK_BUFF]: Introduce skb_reset_transport_header(skb)
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / net / cxgb3 / sge.c
1 /*
2 * Copyright (c) 2005-2007 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32 #include <linux/skbuff.h>
33 #include <linux/netdevice.h>
34 #include <linux/etherdevice.h>
35 #include <linux/if_vlan.h>
36 #include <linux/ip.h>
37 #include <linux/tcp.h>
38 #include <linux/dma-mapping.h>
39 #include "common.h"
40 #include "regs.h"
41 #include "sge_defs.h"
42 #include "t3_cpl.h"
43 #include "firmware_exports.h"
44
45 #define USE_GTS 0
46
47 #define SGE_RX_SM_BUF_SIZE 1536
48
49 /*
50 * If USE_RX_PAGE is defined, the small freelist populated with (partial)
51 * pages instead of skbs. Pages are carved up into RX_PAGE_SIZE chunks (must
52 * be a multiple of the host page size).
53 */
54 #define USE_RX_PAGE
55 #define RX_PAGE_SIZE 2048
56
57 /*
58 * skb freelist packets are copied into a new skb (and the freelist one is
59 * reused) if their len is <=
60 */
61 #define SGE_RX_COPY_THRES 256
62
63 /*
64 * Minimum number of freelist entries before we start dropping TUNNEL frames.
65 */
66 #define SGE_RX_DROP_THRES 16
67
68 /*
69 * Period of the Tx buffer reclaim timer. This timer does not need to run
70 * frequently as Tx buffers are usually reclaimed by new Tx packets.
71 */
72 #define TX_RECLAIM_PERIOD (HZ / 4)
73
74 /* WR size in bytes */
75 #define WR_LEN (WR_FLITS * 8)
76
77 /*
78 * Types of Tx queues in each queue set. Order here matters, do not change.
79 */
80 enum { TXQ_ETH, TXQ_OFLD, TXQ_CTRL };
81
82 /* Values for sge_txq.flags */
83 enum {
84 TXQ_RUNNING = 1 << 0, /* fetch engine is running */
85 TXQ_LAST_PKT_DB = 1 << 1, /* last packet rang the doorbell */
86 };
87
88 struct tx_desc {
89 u64 flit[TX_DESC_FLITS];
90 };
91
92 struct rx_desc {
93 __be32 addr_lo;
94 __be32 len_gen;
95 __be32 gen2;
96 __be32 addr_hi;
97 };
98
99 struct tx_sw_desc { /* SW state per Tx descriptor */
100 struct sk_buff *skb;
101 };
102
103 struct rx_sw_desc { /* SW state per Rx descriptor */
104 union {
105 struct sk_buff *skb;
106 struct sge_fl_page page;
107 } t;
108 DECLARE_PCI_UNMAP_ADDR(dma_addr);
109 };
110
111 struct rsp_desc { /* response queue descriptor */
112 struct rss_header rss_hdr;
113 __be32 flags;
114 __be32 len_cq;
115 u8 imm_data[47];
116 u8 intr_gen;
117 };
118
119 struct unmap_info { /* packet unmapping info, overlays skb->cb */
120 int sflit; /* start flit of first SGL entry in Tx descriptor */
121 u16 fragidx; /* first page fragment in current Tx descriptor */
122 u16 addr_idx; /* buffer index of first SGL entry in descriptor */
123 u32 len; /* mapped length of skb main body */
124 };
125
126 /*
127 * Holds unmapping information for Tx packets that need deferred unmapping.
128 * This structure lives at skb->head and must be allocated by callers.
129 */
130 struct deferred_unmap_info {
131 struct pci_dev *pdev;
132 dma_addr_t addr[MAX_SKB_FRAGS + 1];
133 };
134
135 /*
136 * Maps a number of flits to the number of Tx descriptors that can hold them.
137 * The formula is
138 *
139 * desc = 1 + (flits - 2) / (WR_FLITS - 1).
140 *
141 * HW allows up to 4 descriptors to be combined into a WR.
142 */
143 static u8 flit_desc_map[] = {
144 0,
145 #if SGE_NUM_GENBITS == 1
146 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
147 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
148 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
149 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4
150 #elif SGE_NUM_GENBITS == 2
151 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
152 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
153 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
154 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
155 #else
156 # error "SGE_NUM_GENBITS must be 1 or 2"
157 #endif
158 };
159
160 static inline struct sge_qset *fl_to_qset(const struct sge_fl *q, int qidx)
161 {
162 return container_of(q, struct sge_qset, fl[qidx]);
163 }
164
165 static inline struct sge_qset *rspq_to_qset(const struct sge_rspq *q)
166 {
167 return container_of(q, struct sge_qset, rspq);
168 }
169
170 static inline struct sge_qset *txq_to_qset(const struct sge_txq *q, int qidx)
171 {
172 return container_of(q, struct sge_qset, txq[qidx]);
173 }
174
175 /**
176 * refill_rspq - replenish an SGE response queue
177 * @adapter: the adapter
178 * @q: the response queue to replenish
179 * @credits: how many new responses to make available
180 *
181 * Replenishes a response queue by making the supplied number of responses
182 * available to HW.
183 */
184 static inline void refill_rspq(struct adapter *adapter,
185 const struct sge_rspq *q, unsigned int credits)
186 {
187 t3_write_reg(adapter, A_SG_RSPQ_CREDIT_RETURN,
188 V_RSPQ(q->cntxt_id) | V_CREDITS(credits));
189 }
190
191 /**
192 * need_skb_unmap - does the platform need unmapping of sk_buffs?
193 *
194 * Returns true if the platfrom needs sk_buff unmapping. The compiler
195 * optimizes away unecessary code if this returns true.
196 */
197 static inline int need_skb_unmap(void)
198 {
199 /*
200 * This structure is used to tell if the platfrom needs buffer
201 * unmapping by checking if DECLARE_PCI_UNMAP_ADDR defines anything.
202 */
203 struct dummy {
204 DECLARE_PCI_UNMAP_ADDR(addr);
205 };
206
207 return sizeof(struct dummy) != 0;
208 }
209
210 /**
211 * unmap_skb - unmap a packet main body and its page fragments
212 * @skb: the packet
213 * @q: the Tx queue containing Tx descriptors for the packet
214 * @cidx: index of Tx descriptor
215 * @pdev: the PCI device
216 *
217 * Unmap the main body of an sk_buff and its page fragments, if any.
218 * Because of the fairly complicated structure of our SGLs and the desire
219 * to conserve space for metadata, we keep the information necessary to
220 * unmap an sk_buff partly in the sk_buff itself (in its cb), and partly
221 * in the Tx descriptors (the physical addresses of the various data
222 * buffers). The send functions initialize the state in skb->cb so we
223 * can unmap the buffers held in the first Tx descriptor here, and we
224 * have enough information at this point to update the state for the next
225 * Tx descriptor.
226 */
227 static inline void unmap_skb(struct sk_buff *skb, struct sge_txq *q,
228 unsigned int cidx, struct pci_dev *pdev)
229 {
230 const struct sg_ent *sgp;
231 struct unmap_info *ui = (struct unmap_info *)skb->cb;
232 int nfrags, frag_idx, curflit, j = ui->addr_idx;
233
234 sgp = (struct sg_ent *)&q->desc[cidx].flit[ui->sflit];
235
236 if (ui->len) {
237 pci_unmap_single(pdev, be64_to_cpu(sgp->addr[0]), ui->len,
238 PCI_DMA_TODEVICE);
239 ui->len = 0; /* so we know for next descriptor for this skb */
240 j = 1;
241 }
242
243 frag_idx = ui->fragidx;
244 curflit = ui->sflit + 1 + j;
245 nfrags = skb_shinfo(skb)->nr_frags;
246
247 while (frag_idx < nfrags && curflit < WR_FLITS) {
248 pci_unmap_page(pdev, be64_to_cpu(sgp->addr[j]),
249 skb_shinfo(skb)->frags[frag_idx].size,
250 PCI_DMA_TODEVICE);
251 j ^= 1;
252 if (j == 0) {
253 sgp++;
254 curflit++;
255 }
256 curflit++;
257 frag_idx++;
258 }
259
260 if (frag_idx < nfrags) { /* SGL continues into next Tx descriptor */
261 ui->fragidx = frag_idx;
262 ui->addr_idx = j;
263 ui->sflit = curflit - WR_FLITS - j; /* sflit can be -1 */
264 }
265 }
266
267 /**
268 * free_tx_desc - reclaims Tx descriptors and their buffers
269 * @adapter: the adapter
270 * @q: the Tx queue to reclaim descriptors from
271 * @n: the number of descriptors to reclaim
272 *
273 * Reclaims Tx descriptors from an SGE Tx queue and frees the associated
274 * Tx buffers. Called with the Tx queue lock held.
275 */
276 static void free_tx_desc(struct adapter *adapter, struct sge_txq *q,
277 unsigned int n)
278 {
279 struct tx_sw_desc *d;
280 struct pci_dev *pdev = adapter->pdev;
281 unsigned int cidx = q->cidx;
282
283 const int need_unmap = need_skb_unmap() &&
284 q->cntxt_id >= FW_TUNNEL_SGEEC_START;
285
286 d = &q->sdesc[cidx];
287 while (n--) {
288 if (d->skb) { /* an SGL is present */
289 if (need_unmap)
290 unmap_skb(d->skb, q, cidx, pdev);
291 if (d->skb->priority == cidx)
292 kfree_skb(d->skb);
293 }
294 ++d;
295 if (++cidx == q->size) {
296 cidx = 0;
297 d = q->sdesc;
298 }
299 }
300 q->cidx = cidx;
301 }
302
303 /**
304 * reclaim_completed_tx - reclaims completed Tx descriptors
305 * @adapter: the adapter
306 * @q: the Tx queue to reclaim completed descriptors from
307 *
308 * Reclaims Tx descriptors that the SGE has indicated it has processed,
309 * and frees the associated buffers if possible. Called with the Tx
310 * queue's lock held.
311 */
312 static inline void reclaim_completed_tx(struct adapter *adapter,
313 struct sge_txq *q)
314 {
315 unsigned int reclaim = q->processed - q->cleaned;
316
317 if (reclaim) {
318 free_tx_desc(adapter, q, reclaim);
319 q->cleaned += reclaim;
320 q->in_use -= reclaim;
321 }
322 }
323
324 /**
325 * should_restart_tx - are there enough resources to restart a Tx queue?
326 * @q: the Tx queue
327 *
328 * Checks if there are enough descriptors to restart a suspended Tx queue.
329 */
330 static inline int should_restart_tx(const struct sge_txq *q)
331 {
332 unsigned int r = q->processed - q->cleaned;
333
334 return q->in_use - r < (q->size >> 1);
335 }
336
337 /**
338 * free_rx_bufs - free the Rx buffers on an SGE free list
339 * @pdev: the PCI device associated with the adapter
340 * @rxq: the SGE free list to clean up
341 *
342 * Release the buffers on an SGE free-buffer Rx queue. HW fetching from
343 * this queue should be stopped before calling this function.
344 */
345 static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q)
346 {
347 unsigned int cidx = q->cidx;
348
349 while (q->credits--) {
350 struct rx_sw_desc *d = &q->sdesc[cidx];
351
352 pci_unmap_single(pdev, pci_unmap_addr(d, dma_addr),
353 q->buf_size, PCI_DMA_FROMDEVICE);
354
355 if (q->buf_size != RX_PAGE_SIZE) {
356 kfree_skb(d->t.skb);
357 d->t.skb = NULL;
358 } else {
359 if (d->t.page.frag.page)
360 put_page(d->t.page.frag.page);
361 d->t.page.frag.page = NULL;
362 }
363 if (++cidx == q->size)
364 cidx = 0;
365 }
366
367 if (q->page.frag.page)
368 put_page(q->page.frag.page);
369 q->page.frag.page = NULL;
370 }
371
372 /**
373 * add_one_rx_buf - add a packet buffer to a free-buffer list
374 * @va: va of the buffer to add
375 * @len: the buffer length
376 * @d: the HW Rx descriptor to write
377 * @sd: the SW Rx descriptor to write
378 * @gen: the generation bit value
379 * @pdev: the PCI device associated with the adapter
380 *
381 * Add a buffer of the given length to the supplied HW and SW Rx
382 * descriptors.
383 */
384 static inline void add_one_rx_buf(unsigned char *va, unsigned int len,
385 struct rx_desc *d, struct rx_sw_desc *sd,
386 unsigned int gen, struct pci_dev *pdev)
387 {
388 dma_addr_t mapping;
389
390 mapping = pci_map_single(pdev, va, len, PCI_DMA_FROMDEVICE);
391 pci_unmap_addr_set(sd, dma_addr, mapping);
392
393 d->addr_lo = cpu_to_be32(mapping);
394 d->addr_hi = cpu_to_be32((u64) mapping >> 32);
395 wmb();
396 d->len_gen = cpu_to_be32(V_FLD_GEN1(gen));
397 d->gen2 = cpu_to_be32(V_FLD_GEN2(gen));
398 }
399
400 /**
401 * refill_fl - refill an SGE free-buffer list
402 * @adapter: the adapter
403 * @q: the free-list to refill
404 * @n: the number of new buffers to allocate
405 * @gfp: the gfp flags for allocating new buffers
406 *
407 * (Re)populate an SGE free-buffer list with up to @n new packet buffers,
408 * allocated with the supplied gfp flags. The caller must assure that
409 * @n does not exceed the queue's capacity.
410 */
411 static void refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp)
412 {
413 struct rx_sw_desc *sd = &q->sdesc[q->pidx];
414 struct rx_desc *d = &q->desc[q->pidx];
415 struct sge_fl_page *p = &q->page;
416
417 while (n--) {
418 unsigned char *va;
419
420 if (unlikely(q->buf_size != RX_PAGE_SIZE)) {
421 struct sk_buff *skb = alloc_skb(q->buf_size, gfp);
422
423 if (!skb) {
424 q->alloc_failed++;
425 break;
426 }
427 va = skb->data;
428 sd->t.skb = skb;
429 } else {
430 if (!p->frag.page) {
431 p->frag.page = alloc_pages(gfp, 0);
432 if (unlikely(!p->frag.page)) {
433 q->alloc_failed++;
434 break;
435 } else {
436 p->frag.size = RX_PAGE_SIZE;
437 p->frag.page_offset = 0;
438 p->va = page_address(p->frag.page);
439 }
440 }
441
442 memcpy(&sd->t, p, sizeof(*p));
443 va = p->va;
444
445 p->frag.page_offset += RX_PAGE_SIZE;
446 BUG_ON(p->frag.page_offset > PAGE_SIZE);
447 p->va += RX_PAGE_SIZE;
448 if (p->frag.page_offset == PAGE_SIZE)
449 p->frag.page = NULL;
450 else
451 get_page(p->frag.page);
452 }
453
454 add_one_rx_buf(va, q->buf_size, d, sd, q->gen, adap->pdev);
455
456 d++;
457 sd++;
458 if (++q->pidx == q->size) {
459 q->pidx = 0;
460 q->gen ^= 1;
461 sd = q->sdesc;
462 d = q->desc;
463 }
464 q->credits++;
465 }
466
467 t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
468 }
469
470 static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
471 {
472 refill_fl(adap, fl, min(16U, fl->size - fl->credits), GFP_ATOMIC);
473 }
474
475 /**
476 * recycle_rx_buf - recycle a receive buffer
477 * @adapter: the adapter
478 * @q: the SGE free list
479 * @idx: index of buffer to recycle
480 *
481 * Recycles the specified buffer on the given free list by adding it at
482 * the next available slot on the list.
483 */
484 static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q,
485 unsigned int idx)
486 {
487 struct rx_desc *from = &q->desc[idx];
488 struct rx_desc *to = &q->desc[q->pidx];
489
490 memcpy(&q->sdesc[q->pidx], &q->sdesc[idx], sizeof(struct rx_sw_desc));
491 to->addr_lo = from->addr_lo; /* already big endian */
492 to->addr_hi = from->addr_hi; /* likewise */
493 wmb();
494 to->len_gen = cpu_to_be32(V_FLD_GEN1(q->gen));
495 to->gen2 = cpu_to_be32(V_FLD_GEN2(q->gen));
496 q->credits++;
497
498 if (++q->pidx == q->size) {
499 q->pidx = 0;
500 q->gen ^= 1;
501 }
502 t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
503 }
504
505 /**
506 * alloc_ring - allocate resources for an SGE descriptor ring
507 * @pdev: the PCI device
508 * @nelem: the number of descriptors
509 * @elem_size: the size of each descriptor
510 * @sw_size: the size of the SW state associated with each ring element
511 * @phys: the physical address of the allocated ring
512 * @metadata: address of the array holding the SW state for the ring
513 *
514 * Allocates resources for an SGE descriptor ring, such as Tx queues,
515 * free buffer lists, or response queues. Each SGE ring requires
516 * space for its HW descriptors plus, optionally, space for the SW state
517 * associated with each HW entry (the metadata). The function returns
518 * three values: the virtual address for the HW ring (the return value
519 * of the function), the physical address of the HW ring, and the address
520 * of the SW ring.
521 */
522 static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
523 size_t sw_size, dma_addr_t * phys, void *metadata)
524 {
525 size_t len = nelem * elem_size;
526 void *s = NULL;
527 void *p = dma_alloc_coherent(&pdev->dev, len, phys, GFP_KERNEL);
528
529 if (!p)
530 return NULL;
531 if (sw_size) {
532 s = kcalloc(nelem, sw_size, GFP_KERNEL);
533
534 if (!s) {
535 dma_free_coherent(&pdev->dev, len, p, *phys);
536 return NULL;
537 }
538 }
539 if (metadata)
540 *(void **)metadata = s;
541 memset(p, 0, len);
542 return p;
543 }
544
545 /**
546 * free_qset - free the resources of an SGE queue set
547 * @adapter: the adapter owning the queue set
548 * @q: the queue set
549 *
550 * Release the HW and SW resources associated with an SGE queue set, such
551 * as HW contexts, packet buffers, and descriptor rings. Traffic to the
552 * queue set must be quiesced prior to calling this.
553 */
554 void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
555 {
556 int i;
557 struct pci_dev *pdev = adapter->pdev;
558
559 if (q->tx_reclaim_timer.function)
560 del_timer_sync(&q->tx_reclaim_timer);
561
562 for (i = 0; i < SGE_RXQ_PER_SET; ++i)
563 if (q->fl[i].desc) {
564 spin_lock(&adapter->sge.reg_lock);
565 t3_sge_disable_fl(adapter, q->fl[i].cntxt_id);
566 spin_unlock(&adapter->sge.reg_lock);
567 free_rx_bufs(pdev, &q->fl[i]);
568 kfree(q->fl[i].sdesc);
569 dma_free_coherent(&pdev->dev,
570 q->fl[i].size *
571 sizeof(struct rx_desc), q->fl[i].desc,
572 q->fl[i].phys_addr);
573 }
574
575 for (i = 0; i < SGE_TXQ_PER_SET; ++i)
576 if (q->txq[i].desc) {
577 spin_lock(&adapter->sge.reg_lock);
578 t3_sge_enable_ecntxt(adapter, q->txq[i].cntxt_id, 0);
579 spin_unlock(&adapter->sge.reg_lock);
580 if (q->txq[i].sdesc) {
581 free_tx_desc(adapter, &q->txq[i],
582 q->txq[i].in_use);
583 kfree(q->txq[i].sdesc);
584 }
585 dma_free_coherent(&pdev->dev,
586 q->txq[i].size *
587 sizeof(struct tx_desc),
588 q->txq[i].desc, q->txq[i].phys_addr);
589 __skb_queue_purge(&q->txq[i].sendq);
590 }
591
592 if (q->rspq.desc) {
593 spin_lock(&adapter->sge.reg_lock);
594 t3_sge_disable_rspcntxt(adapter, q->rspq.cntxt_id);
595 spin_unlock(&adapter->sge.reg_lock);
596 dma_free_coherent(&pdev->dev,
597 q->rspq.size * sizeof(struct rsp_desc),
598 q->rspq.desc, q->rspq.phys_addr);
599 }
600
601 if (q->netdev)
602 q->netdev->atalk_ptr = NULL;
603
604 memset(q, 0, sizeof(*q));
605 }
606
607 /**
608 * init_qset_cntxt - initialize an SGE queue set context info
609 * @qs: the queue set
610 * @id: the queue set id
611 *
612 * Initializes the TIDs and context ids for the queues of a queue set.
613 */
614 static void init_qset_cntxt(struct sge_qset *qs, unsigned int id)
615 {
616 qs->rspq.cntxt_id = id;
617 qs->fl[0].cntxt_id = 2 * id;
618 qs->fl[1].cntxt_id = 2 * id + 1;
619 qs->txq[TXQ_ETH].cntxt_id = FW_TUNNEL_SGEEC_START + id;
620 qs->txq[TXQ_ETH].token = FW_TUNNEL_TID_START + id;
621 qs->txq[TXQ_OFLD].cntxt_id = FW_OFLD_SGEEC_START + id;
622 qs->txq[TXQ_CTRL].cntxt_id = FW_CTRL_SGEEC_START + id;
623 qs->txq[TXQ_CTRL].token = FW_CTRL_TID_START + id;
624 }
625
626 /**
627 * sgl_len - calculates the size of an SGL of the given capacity
628 * @n: the number of SGL entries
629 *
630 * Calculates the number of flits needed for a scatter/gather list that
631 * can hold the given number of entries.
632 */
633 static inline unsigned int sgl_len(unsigned int n)
634 {
635 /* alternatively: 3 * (n / 2) + 2 * (n & 1) */
636 return (3 * n) / 2 + (n & 1);
637 }
638
639 /**
640 * flits_to_desc - returns the num of Tx descriptors for the given flits
641 * @n: the number of flits
642 *
643 * Calculates the number of Tx descriptors needed for the supplied number
644 * of flits.
645 */
646 static inline unsigned int flits_to_desc(unsigned int n)
647 {
648 BUG_ON(n >= ARRAY_SIZE(flit_desc_map));
649 return flit_desc_map[n];
650 }
651
652 /**
653 * get_imm_packet - return the next ingress packet buffer from a response
654 * @resp: the response descriptor containing the packet data
655 *
656 * Return a packet containing the immediate data of the given response.
657 */
658 static inline struct sk_buff *get_imm_packet(const struct rsp_desc *resp)
659 {
660 struct sk_buff *skb = alloc_skb(IMMED_PKT_SIZE, GFP_ATOMIC);
661
662 if (skb) {
663 __skb_put(skb, IMMED_PKT_SIZE);
664 memcpy(skb->data, resp->imm_data, IMMED_PKT_SIZE);
665 }
666 return skb;
667 }
668
669 /**
670 * calc_tx_descs - calculate the number of Tx descriptors for a packet
671 * @skb: the packet
672 *
673 * Returns the number of Tx descriptors needed for the given Ethernet
674 * packet. Ethernet packets require addition of WR and CPL headers.
675 */
676 static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
677 {
678 unsigned int flits;
679
680 if (skb->len <= WR_LEN - sizeof(struct cpl_tx_pkt))
681 return 1;
682
683 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 2;
684 if (skb_shinfo(skb)->gso_size)
685 flits++;
686 return flits_to_desc(flits);
687 }
688
689 /**
690 * make_sgl - populate a scatter/gather list for a packet
691 * @skb: the packet
692 * @sgp: the SGL to populate
693 * @start: start address of skb main body data to include in the SGL
694 * @len: length of skb main body data to include in the SGL
695 * @pdev: the PCI device
696 *
697 * Generates a scatter/gather list for the buffers that make up a packet
698 * and returns the SGL size in 8-byte words. The caller must size the SGL
699 * appropriately.
700 */
701 static inline unsigned int make_sgl(const struct sk_buff *skb,
702 struct sg_ent *sgp, unsigned char *start,
703 unsigned int len, struct pci_dev *pdev)
704 {
705 dma_addr_t mapping;
706 unsigned int i, j = 0, nfrags;
707
708 if (len) {
709 mapping = pci_map_single(pdev, start, len, PCI_DMA_TODEVICE);
710 sgp->len[0] = cpu_to_be32(len);
711 sgp->addr[0] = cpu_to_be64(mapping);
712 j = 1;
713 }
714
715 nfrags = skb_shinfo(skb)->nr_frags;
716 for (i = 0; i < nfrags; i++) {
717 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
718
719 mapping = pci_map_page(pdev, frag->page, frag->page_offset,
720 frag->size, PCI_DMA_TODEVICE);
721 sgp->len[j] = cpu_to_be32(frag->size);
722 sgp->addr[j] = cpu_to_be64(mapping);
723 j ^= 1;
724 if (j == 0)
725 ++sgp;
726 }
727 if (j)
728 sgp->len[j] = 0;
729 return ((nfrags + (len != 0)) * 3) / 2 + j;
730 }
731
732 /**
733 * check_ring_tx_db - check and potentially ring a Tx queue's doorbell
734 * @adap: the adapter
735 * @q: the Tx queue
736 *
737 * Ring the doorbel if a Tx queue is asleep. There is a natural race,
738 * where the HW is going to sleep just after we checked, however,
739 * then the interrupt handler will detect the outstanding TX packet
740 * and ring the doorbell for us.
741 *
742 * When GTS is disabled we unconditionally ring the doorbell.
743 */
744 static inline void check_ring_tx_db(struct adapter *adap, struct sge_txq *q)
745 {
746 #if USE_GTS
747 clear_bit(TXQ_LAST_PKT_DB, &q->flags);
748 if (test_and_set_bit(TXQ_RUNNING, &q->flags) == 0) {
749 set_bit(TXQ_LAST_PKT_DB, &q->flags);
750 t3_write_reg(adap, A_SG_KDOORBELL,
751 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
752 }
753 #else
754 wmb(); /* write descriptors before telling HW */
755 t3_write_reg(adap, A_SG_KDOORBELL,
756 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
757 #endif
758 }
759
760 static inline void wr_gen2(struct tx_desc *d, unsigned int gen)
761 {
762 #if SGE_NUM_GENBITS == 2
763 d->flit[TX_DESC_FLITS - 1] = cpu_to_be64(gen);
764 #endif
765 }
766
767 /**
768 * write_wr_hdr_sgl - write a WR header and, optionally, SGL
769 * @ndesc: number of Tx descriptors spanned by the SGL
770 * @skb: the packet corresponding to the WR
771 * @d: first Tx descriptor to be written
772 * @pidx: index of above descriptors
773 * @q: the SGE Tx queue
774 * @sgl: the SGL
775 * @flits: number of flits to the start of the SGL in the first descriptor
776 * @sgl_flits: the SGL size in flits
777 * @gen: the Tx descriptor generation
778 * @wr_hi: top 32 bits of WR header based on WR type (big endian)
779 * @wr_lo: low 32 bits of WR header based on WR type (big endian)
780 *
781 * Write a work request header and an associated SGL. If the SGL is
782 * small enough to fit into one Tx descriptor it has already been written
783 * and we just need to write the WR header. Otherwise we distribute the
784 * SGL across the number of descriptors it spans.
785 */
786 static void write_wr_hdr_sgl(unsigned int ndesc, struct sk_buff *skb,
787 struct tx_desc *d, unsigned int pidx,
788 const struct sge_txq *q,
789 const struct sg_ent *sgl,
790 unsigned int flits, unsigned int sgl_flits,
791 unsigned int gen, unsigned int wr_hi,
792 unsigned int wr_lo)
793 {
794 struct work_request_hdr *wrp = (struct work_request_hdr *)d;
795 struct tx_sw_desc *sd = &q->sdesc[pidx];
796
797 sd->skb = skb;
798 if (need_skb_unmap()) {
799 struct unmap_info *ui = (struct unmap_info *)skb->cb;
800
801 ui->fragidx = 0;
802 ui->addr_idx = 0;
803 ui->sflit = flits;
804 }
805
806 if (likely(ndesc == 1)) {
807 skb->priority = pidx;
808 wrp->wr_hi = htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) |
809 V_WR_SGLSFLT(flits)) | wr_hi;
810 wmb();
811 wrp->wr_lo = htonl(V_WR_LEN(flits + sgl_flits) |
812 V_WR_GEN(gen)) | wr_lo;
813 wr_gen2(d, gen);
814 } else {
815 unsigned int ogen = gen;
816 const u64 *fp = (const u64 *)sgl;
817 struct work_request_hdr *wp = wrp;
818
819 wrp->wr_hi = htonl(F_WR_SOP | V_WR_DATATYPE(1) |
820 V_WR_SGLSFLT(flits)) | wr_hi;
821
822 while (sgl_flits) {
823 unsigned int avail = WR_FLITS - flits;
824
825 if (avail > sgl_flits)
826 avail = sgl_flits;
827 memcpy(&d->flit[flits], fp, avail * sizeof(*fp));
828 sgl_flits -= avail;
829 ndesc--;
830 if (!sgl_flits)
831 break;
832
833 fp += avail;
834 d++;
835 sd++;
836 if (++pidx == q->size) {
837 pidx = 0;
838 gen ^= 1;
839 d = q->desc;
840 sd = q->sdesc;
841 }
842
843 sd->skb = skb;
844 wrp = (struct work_request_hdr *)d;
845 wrp->wr_hi = htonl(V_WR_DATATYPE(1) |
846 V_WR_SGLSFLT(1)) | wr_hi;
847 wrp->wr_lo = htonl(V_WR_LEN(min(WR_FLITS,
848 sgl_flits + 1)) |
849 V_WR_GEN(gen)) | wr_lo;
850 wr_gen2(d, gen);
851 flits = 1;
852 }
853 skb->priority = pidx;
854 wrp->wr_hi |= htonl(F_WR_EOP);
855 wmb();
856 wp->wr_lo = htonl(V_WR_LEN(WR_FLITS) | V_WR_GEN(ogen)) | wr_lo;
857 wr_gen2((struct tx_desc *)wp, ogen);
858 WARN_ON(ndesc != 0);
859 }
860 }
861
862 /**
863 * write_tx_pkt_wr - write a TX_PKT work request
864 * @adap: the adapter
865 * @skb: the packet to send
866 * @pi: the egress interface
867 * @pidx: index of the first Tx descriptor to write
868 * @gen: the generation value to use
869 * @q: the Tx queue
870 * @ndesc: number of descriptors the packet will occupy
871 * @compl: the value of the COMPL bit to use
872 *
873 * Generate a TX_PKT work request to send the supplied packet.
874 */
875 static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
876 const struct port_info *pi,
877 unsigned int pidx, unsigned int gen,
878 struct sge_txq *q, unsigned int ndesc,
879 unsigned int compl)
880 {
881 unsigned int flits, sgl_flits, cntrl, tso_info;
882 struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
883 struct tx_desc *d = &q->desc[pidx];
884 struct cpl_tx_pkt *cpl = (struct cpl_tx_pkt *)d;
885
886 cpl->len = htonl(skb->len | 0x80000000);
887 cntrl = V_TXPKT_INTF(pi->port_id);
888
889 if (vlan_tx_tag_present(skb) && pi->vlan_grp)
890 cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(vlan_tx_tag_get(skb));
891
892 tso_info = V_LSO_MSS(skb_shinfo(skb)->gso_size);
893 if (tso_info) {
894 int eth_type;
895 struct cpl_tx_pkt_lso *hdr = (struct cpl_tx_pkt_lso *)cpl;
896
897 d->flit[2] = 0;
898 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT_LSO);
899 hdr->cntrl = htonl(cntrl);
900 eth_type = skb_network_offset(skb) == ETH_HLEN ?
901 CPL_ETH_II : CPL_ETH_II_VLAN;
902 tso_info |= V_LSO_ETH_TYPE(eth_type) |
903 V_LSO_IPHDR_WORDS(ip_hdr(skb)->ihl) |
904 V_LSO_TCPHDR_WORDS(skb->h.th->doff);
905 hdr->lso_info = htonl(tso_info);
906 flits = 3;
907 } else {
908 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT);
909 cntrl |= F_TXPKT_IPCSUM_DIS; /* SW calculates IP csum */
910 cntrl |= V_TXPKT_L4CSUM_DIS(skb->ip_summed != CHECKSUM_PARTIAL);
911 cpl->cntrl = htonl(cntrl);
912
913 if (skb->len <= WR_LEN - sizeof(*cpl)) {
914 q->sdesc[pidx].skb = NULL;
915 if (!skb->data_len)
916 memcpy(&d->flit[2], skb->data, skb->len);
917 else
918 skb_copy_bits(skb, 0, &d->flit[2], skb->len);
919
920 flits = (skb->len + 7) / 8 + 2;
921 cpl->wr.wr_hi = htonl(V_WR_BCNTLFLT(skb->len & 7) |
922 V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT)
923 | F_WR_SOP | F_WR_EOP | compl);
924 wmb();
925 cpl->wr.wr_lo = htonl(V_WR_LEN(flits) | V_WR_GEN(gen) |
926 V_WR_TID(q->token));
927 wr_gen2(d, gen);
928 kfree_skb(skb);
929 return;
930 }
931
932 flits = 2;
933 }
934
935 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
936 sgl_flits = make_sgl(skb, sgp, skb->data, skb_headlen(skb), adap->pdev);
937 if (need_skb_unmap())
938 ((struct unmap_info *)skb->cb)->len = skb_headlen(skb);
939
940 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen,
941 htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | compl),
942 htonl(V_WR_TID(q->token)));
943 }
944
945 /**
946 * eth_xmit - add a packet to the Ethernet Tx queue
947 * @skb: the packet
948 * @dev: the egress net device
949 *
950 * Add a packet to an SGE Tx queue. Runs with softirqs disabled.
951 */
952 int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
953 {
954 unsigned int ndesc, pidx, credits, gen, compl;
955 const struct port_info *pi = netdev_priv(dev);
956 struct adapter *adap = dev->priv;
957 struct sge_qset *qs = dev2qset(dev);
958 struct sge_txq *q = &qs->txq[TXQ_ETH];
959
960 /*
961 * The chip min packet length is 9 octets but play safe and reject
962 * anything shorter than an Ethernet header.
963 */
964 if (unlikely(skb->len < ETH_HLEN)) {
965 dev_kfree_skb(skb);
966 return NETDEV_TX_OK;
967 }
968
969 spin_lock(&q->lock);
970 reclaim_completed_tx(adap, q);
971
972 credits = q->size - q->in_use;
973 ndesc = calc_tx_descs(skb);
974
975 if (unlikely(credits < ndesc)) {
976 if (!netif_queue_stopped(dev)) {
977 netif_stop_queue(dev);
978 set_bit(TXQ_ETH, &qs->txq_stopped);
979 q->stops++;
980 dev_err(&adap->pdev->dev,
981 "%s: Tx ring %u full while queue awake!\n",
982 dev->name, q->cntxt_id & 7);
983 }
984 spin_unlock(&q->lock);
985 return NETDEV_TX_BUSY;
986 }
987
988 q->in_use += ndesc;
989 if (unlikely(credits - ndesc < q->stop_thres)) {
990 q->stops++;
991 netif_stop_queue(dev);
992 set_bit(TXQ_ETH, &qs->txq_stopped);
993 #if !USE_GTS
994 if (should_restart_tx(q) &&
995 test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
996 q->restarts++;
997 netif_wake_queue(dev);
998 }
999 #endif
1000 }
1001
1002 gen = q->gen;
1003 q->unacked += ndesc;
1004 compl = (q->unacked & 8) << (S_WR_COMPL - 3);
1005 q->unacked &= 7;
1006 pidx = q->pidx;
1007 q->pidx += ndesc;
1008 if (q->pidx >= q->size) {
1009 q->pidx -= q->size;
1010 q->gen ^= 1;
1011 }
1012
1013 /* update port statistics */
1014 if (skb->ip_summed == CHECKSUM_COMPLETE)
1015 qs->port_stats[SGE_PSTAT_TX_CSUM]++;
1016 if (skb_shinfo(skb)->gso_size)
1017 qs->port_stats[SGE_PSTAT_TSO]++;
1018 if (vlan_tx_tag_present(skb) && pi->vlan_grp)
1019 qs->port_stats[SGE_PSTAT_VLANINS]++;
1020
1021 dev->trans_start = jiffies;
1022 spin_unlock(&q->lock);
1023
1024 /*
1025 * We do not use Tx completion interrupts to free DMAd Tx packets.
1026 * This is good for performamce but means that we rely on new Tx
1027 * packets arriving to run the destructors of completed packets,
1028 * which open up space in their sockets' send queues. Sometimes
1029 * we do not get such new packets causing Tx to stall. A single
1030 * UDP transmitter is a good example of this situation. We have
1031 * a clean up timer that periodically reclaims completed packets
1032 * but it doesn't run often enough (nor do we want it to) to prevent
1033 * lengthy stalls. A solution to this problem is to run the
1034 * destructor early, after the packet is queued but before it's DMAd.
1035 * A cons is that we lie to socket memory accounting, but the amount
1036 * of extra memory is reasonable (limited by the number of Tx
1037 * descriptors), the packets do actually get freed quickly by new
1038 * packets almost always, and for protocols like TCP that wait for
1039 * acks to really free up the data the extra memory is even less.
1040 * On the positive side we run the destructors on the sending CPU
1041 * rather than on a potentially different completing CPU, usually a
1042 * good thing. We also run them without holding our Tx queue lock,
1043 * unlike what reclaim_completed_tx() would otherwise do.
1044 *
1045 * Run the destructor before telling the DMA engine about the packet
1046 * to make sure it doesn't complete and get freed prematurely.
1047 */
1048 if (likely(!skb_shared(skb)))
1049 skb_orphan(skb);
1050
1051 write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl);
1052 check_ring_tx_db(adap, q);
1053 return NETDEV_TX_OK;
1054 }
1055
1056 /**
1057 * write_imm - write a packet into a Tx descriptor as immediate data
1058 * @d: the Tx descriptor to write
1059 * @skb: the packet
1060 * @len: the length of packet data to write as immediate data
1061 * @gen: the generation bit value to write
1062 *
1063 * Writes a packet as immediate data into a Tx descriptor. The packet
1064 * contains a work request at its beginning. We must write the packet
1065 * carefully so the SGE doesn't read accidentally before it's written in
1066 * its entirety.
1067 */
1068 static inline void write_imm(struct tx_desc *d, struct sk_buff *skb,
1069 unsigned int len, unsigned int gen)
1070 {
1071 struct work_request_hdr *from = (struct work_request_hdr *)skb->data;
1072 struct work_request_hdr *to = (struct work_request_hdr *)d;
1073
1074 memcpy(&to[1], &from[1], len - sizeof(*from));
1075 to->wr_hi = from->wr_hi | htonl(F_WR_SOP | F_WR_EOP |
1076 V_WR_BCNTLFLT(len & 7));
1077 wmb();
1078 to->wr_lo = from->wr_lo | htonl(V_WR_GEN(gen) |
1079 V_WR_LEN((len + 7) / 8));
1080 wr_gen2(d, gen);
1081 kfree_skb(skb);
1082 }
1083
1084 /**
1085 * check_desc_avail - check descriptor availability on a send queue
1086 * @adap: the adapter
1087 * @q: the send queue
1088 * @skb: the packet needing the descriptors
1089 * @ndesc: the number of Tx descriptors needed
1090 * @qid: the Tx queue number in its queue set (TXQ_OFLD or TXQ_CTRL)
1091 *
1092 * Checks if the requested number of Tx descriptors is available on an
1093 * SGE send queue. If the queue is already suspended or not enough
1094 * descriptors are available the packet is queued for later transmission.
1095 * Must be called with the Tx queue locked.
1096 *
1097 * Returns 0 if enough descriptors are available, 1 if there aren't
1098 * enough descriptors and the packet has been queued, and 2 if the caller
1099 * needs to retry because there weren't enough descriptors at the
1100 * beginning of the call but some freed up in the mean time.
1101 */
1102 static inline int check_desc_avail(struct adapter *adap, struct sge_txq *q,
1103 struct sk_buff *skb, unsigned int ndesc,
1104 unsigned int qid)
1105 {
1106 if (unlikely(!skb_queue_empty(&q->sendq))) {
1107 addq_exit:__skb_queue_tail(&q->sendq, skb);
1108 return 1;
1109 }
1110 if (unlikely(q->size - q->in_use < ndesc)) {
1111 struct sge_qset *qs = txq_to_qset(q, qid);
1112
1113 set_bit(qid, &qs->txq_stopped);
1114 smp_mb__after_clear_bit();
1115
1116 if (should_restart_tx(q) &&
1117 test_and_clear_bit(qid, &qs->txq_stopped))
1118 return 2;
1119
1120 q->stops++;
1121 goto addq_exit;
1122 }
1123 return 0;
1124 }
1125
1126 /**
1127 * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
1128 * @q: the SGE control Tx queue
1129 *
1130 * This is a variant of reclaim_completed_tx() that is used for Tx queues
1131 * that send only immediate data (presently just the control queues) and
1132 * thus do not have any sk_buffs to release.
1133 */
1134 static inline void reclaim_completed_tx_imm(struct sge_txq *q)
1135 {
1136 unsigned int reclaim = q->processed - q->cleaned;
1137
1138 q->in_use -= reclaim;
1139 q->cleaned += reclaim;
1140 }
1141
1142 static inline int immediate(const struct sk_buff *skb)
1143 {
1144 return skb->len <= WR_LEN && !skb->data_len;
1145 }
1146
1147 /**
1148 * ctrl_xmit - send a packet through an SGE control Tx queue
1149 * @adap: the adapter
1150 * @q: the control queue
1151 * @skb: the packet
1152 *
1153 * Send a packet through an SGE control Tx queue. Packets sent through
1154 * a control queue must fit entirely as immediate data in a single Tx
1155 * descriptor and have no page fragments.
1156 */
1157 static int ctrl_xmit(struct adapter *adap, struct sge_txq *q,
1158 struct sk_buff *skb)
1159 {
1160 int ret;
1161 struct work_request_hdr *wrp = (struct work_request_hdr *)skb->data;
1162
1163 if (unlikely(!immediate(skb))) {
1164 WARN_ON(1);
1165 dev_kfree_skb(skb);
1166 return NET_XMIT_SUCCESS;
1167 }
1168
1169 wrp->wr_hi |= htonl(F_WR_SOP | F_WR_EOP);
1170 wrp->wr_lo = htonl(V_WR_TID(q->token));
1171
1172 spin_lock(&q->lock);
1173 again:reclaim_completed_tx_imm(q);
1174
1175 ret = check_desc_avail(adap, q, skb, 1, TXQ_CTRL);
1176 if (unlikely(ret)) {
1177 if (ret == 1) {
1178 spin_unlock(&q->lock);
1179 return NET_XMIT_CN;
1180 }
1181 goto again;
1182 }
1183
1184 write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
1185
1186 q->in_use++;
1187 if (++q->pidx >= q->size) {
1188 q->pidx = 0;
1189 q->gen ^= 1;
1190 }
1191 spin_unlock(&q->lock);
1192 wmb();
1193 t3_write_reg(adap, A_SG_KDOORBELL,
1194 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1195 return NET_XMIT_SUCCESS;
1196 }
1197
1198 /**
1199 * restart_ctrlq - restart a suspended control queue
1200 * @qs: the queue set cotaining the control queue
1201 *
1202 * Resumes transmission on a suspended Tx control queue.
1203 */
1204 static void restart_ctrlq(unsigned long data)
1205 {
1206 struct sk_buff *skb;
1207 struct sge_qset *qs = (struct sge_qset *)data;
1208 struct sge_txq *q = &qs->txq[TXQ_CTRL];
1209 struct adapter *adap = qs->netdev->priv;
1210
1211 spin_lock(&q->lock);
1212 again:reclaim_completed_tx_imm(q);
1213
1214 while (q->in_use < q->size && (skb = __skb_dequeue(&q->sendq)) != NULL) {
1215
1216 write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
1217
1218 if (++q->pidx >= q->size) {
1219 q->pidx = 0;
1220 q->gen ^= 1;
1221 }
1222 q->in_use++;
1223 }
1224
1225 if (!skb_queue_empty(&q->sendq)) {
1226 set_bit(TXQ_CTRL, &qs->txq_stopped);
1227 smp_mb__after_clear_bit();
1228
1229 if (should_restart_tx(q) &&
1230 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped))
1231 goto again;
1232 q->stops++;
1233 }
1234
1235 spin_unlock(&q->lock);
1236 t3_write_reg(adap, A_SG_KDOORBELL,
1237 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1238 }
1239
1240 /*
1241 * Send a management message through control queue 0
1242 */
1243 int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
1244 {
1245 return ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb);
1246 }
1247
1248 /**
1249 * deferred_unmap_destructor - unmap a packet when it is freed
1250 * @skb: the packet
1251 *
1252 * This is the packet destructor used for Tx packets that need to remain
1253 * mapped until they are freed rather than until their Tx descriptors are
1254 * freed.
1255 */
1256 static void deferred_unmap_destructor(struct sk_buff *skb)
1257 {
1258 int i;
1259 const dma_addr_t *p;
1260 const struct skb_shared_info *si;
1261 const struct deferred_unmap_info *dui;
1262 const struct unmap_info *ui = (struct unmap_info *)skb->cb;
1263
1264 dui = (struct deferred_unmap_info *)skb->head;
1265 p = dui->addr;
1266
1267 if (ui->len)
1268 pci_unmap_single(dui->pdev, *p++, ui->len, PCI_DMA_TODEVICE);
1269
1270 si = skb_shinfo(skb);
1271 for (i = 0; i < si->nr_frags; i++)
1272 pci_unmap_page(dui->pdev, *p++, si->frags[i].size,
1273 PCI_DMA_TODEVICE);
1274 }
1275
1276 static void setup_deferred_unmapping(struct sk_buff *skb, struct pci_dev *pdev,
1277 const struct sg_ent *sgl, int sgl_flits)
1278 {
1279 dma_addr_t *p;
1280 struct deferred_unmap_info *dui;
1281
1282 dui = (struct deferred_unmap_info *)skb->head;
1283 dui->pdev = pdev;
1284 for (p = dui->addr; sgl_flits >= 3; sgl++, sgl_flits -= 3) {
1285 *p++ = be64_to_cpu(sgl->addr[0]);
1286 *p++ = be64_to_cpu(sgl->addr[1]);
1287 }
1288 if (sgl_flits)
1289 *p = be64_to_cpu(sgl->addr[0]);
1290 }
1291
1292 /**
1293 * write_ofld_wr - write an offload work request
1294 * @adap: the adapter
1295 * @skb: the packet to send
1296 * @q: the Tx queue
1297 * @pidx: index of the first Tx descriptor to write
1298 * @gen: the generation value to use
1299 * @ndesc: number of descriptors the packet will occupy
1300 *
1301 * Write an offload work request to send the supplied packet. The packet
1302 * data already carry the work request with most fields populated.
1303 */
1304 static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
1305 struct sge_txq *q, unsigned int pidx,
1306 unsigned int gen, unsigned int ndesc)
1307 {
1308 unsigned int sgl_flits, flits;
1309 struct work_request_hdr *from;
1310 struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
1311 struct tx_desc *d = &q->desc[pidx];
1312
1313 if (immediate(skb)) {
1314 q->sdesc[pidx].skb = NULL;
1315 write_imm(d, skb, skb->len, gen);
1316 return;
1317 }
1318
1319 /* Only TX_DATA builds SGLs */
1320
1321 from = (struct work_request_hdr *)skb->data;
1322 memcpy(&d->flit[1], &from[1], skb->h.raw - skb->data - sizeof(*from));
1323
1324 flits = (skb->h.raw - skb->data) / 8;
1325 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
1326 sgl_flits = make_sgl(skb, sgp, skb->h.raw, skb->tail - skb->h.raw,
1327 adap->pdev);
1328 if (need_skb_unmap()) {
1329 setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits);
1330 skb->destructor = deferred_unmap_destructor;
1331 ((struct unmap_info *)skb->cb)->len = skb->tail - skb->h.raw;
1332 }
1333
1334 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits,
1335 gen, from->wr_hi, from->wr_lo);
1336 }
1337
1338 /**
1339 * calc_tx_descs_ofld - calculate # of Tx descriptors for an offload packet
1340 * @skb: the packet
1341 *
1342 * Returns the number of Tx descriptors needed for the given offload
1343 * packet. These packets are already fully constructed.
1344 */
1345 static inline unsigned int calc_tx_descs_ofld(const struct sk_buff *skb)
1346 {
1347 unsigned int flits, cnt = skb_shinfo(skb)->nr_frags;
1348
1349 if (skb->len <= WR_LEN && cnt == 0)
1350 return 1; /* packet fits as immediate data */
1351
1352 flits = (skb->h.raw - skb->data) / 8; /* headers */
1353 if (skb->tail != skb->h.raw)
1354 cnt++;
1355 return flits_to_desc(flits + sgl_len(cnt));
1356 }
1357
1358 /**
1359 * ofld_xmit - send a packet through an offload queue
1360 * @adap: the adapter
1361 * @q: the Tx offload queue
1362 * @skb: the packet
1363 *
1364 * Send an offload packet through an SGE offload queue.
1365 */
1366 static int ofld_xmit(struct adapter *adap, struct sge_txq *q,
1367 struct sk_buff *skb)
1368 {
1369 int ret;
1370 unsigned int ndesc = calc_tx_descs_ofld(skb), pidx, gen;
1371
1372 spin_lock(&q->lock);
1373 again:reclaim_completed_tx(adap, q);
1374
1375 ret = check_desc_avail(adap, q, skb, ndesc, TXQ_OFLD);
1376 if (unlikely(ret)) {
1377 if (ret == 1) {
1378 skb->priority = ndesc; /* save for restart */
1379 spin_unlock(&q->lock);
1380 return NET_XMIT_CN;
1381 }
1382 goto again;
1383 }
1384
1385 gen = q->gen;
1386 q->in_use += ndesc;
1387 pidx = q->pidx;
1388 q->pidx += ndesc;
1389 if (q->pidx >= q->size) {
1390 q->pidx -= q->size;
1391 q->gen ^= 1;
1392 }
1393 spin_unlock(&q->lock);
1394
1395 write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
1396 check_ring_tx_db(adap, q);
1397 return NET_XMIT_SUCCESS;
1398 }
1399
1400 /**
1401 * restart_offloadq - restart a suspended offload queue
1402 * @qs: the queue set cotaining the offload queue
1403 *
1404 * Resumes transmission on a suspended Tx offload queue.
1405 */
1406 static void restart_offloadq(unsigned long data)
1407 {
1408 struct sk_buff *skb;
1409 struct sge_qset *qs = (struct sge_qset *)data;
1410 struct sge_txq *q = &qs->txq[TXQ_OFLD];
1411 struct adapter *adap = qs->netdev->priv;
1412
1413 spin_lock(&q->lock);
1414 again:reclaim_completed_tx(adap, q);
1415
1416 while ((skb = skb_peek(&q->sendq)) != NULL) {
1417 unsigned int gen, pidx;
1418 unsigned int ndesc = skb->priority;
1419
1420 if (unlikely(q->size - q->in_use < ndesc)) {
1421 set_bit(TXQ_OFLD, &qs->txq_stopped);
1422 smp_mb__after_clear_bit();
1423
1424 if (should_restart_tx(q) &&
1425 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped))
1426 goto again;
1427 q->stops++;
1428 break;
1429 }
1430
1431 gen = q->gen;
1432 q->in_use += ndesc;
1433 pidx = q->pidx;
1434 q->pidx += ndesc;
1435 if (q->pidx >= q->size) {
1436 q->pidx -= q->size;
1437 q->gen ^= 1;
1438 }
1439 __skb_unlink(skb, &q->sendq);
1440 spin_unlock(&q->lock);
1441
1442 write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
1443 spin_lock(&q->lock);
1444 }
1445 spin_unlock(&q->lock);
1446
1447 #if USE_GTS
1448 set_bit(TXQ_RUNNING, &q->flags);
1449 set_bit(TXQ_LAST_PKT_DB, &q->flags);
1450 #endif
1451 t3_write_reg(adap, A_SG_KDOORBELL,
1452 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1453 }
1454
1455 /**
1456 * queue_set - return the queue set a packet should use
1457 * @skb: the packet
1458 *
1459 * Maps a packet to the SGE queue set it should use. The desired queue
1460 * set is carried in bits 1-3 in the packet's priority.
1461 */
1462 static inline int queue_set(const struct sk_buff *skb)
1463 {
1464 return skb->priority >> 1;
1465 }
1466
1467 /**
1468 * is_ctrl_pkt - return whether an offload packet is a control packet
1469 * @skb: the packet
1470 *
1471 * Determines whether an offload packet should use an OFLD or a CTRL
1472 * Tx queue. This is indicated by bit 0 in the packet's priority.
1473 */
1474 static inline int is_ctrl_pkt(const struct sk_buff *skb)
1475 {
1476 return skb->priority & 1;
1477 }
1478
1479 /**
1480 * t3_offload_tx - send an offload packet
1481 * @tdev: the offload device to send to
1482 * @skb: the packet
1483 *
1484 * Sends an offload packet. We use the packet priority to select the
1485 * appropriate Tx queue as follows: bit 0 indicates whether the packet
1486 * should be sent as regular or control, bits 1-3 select the queue set.
1487 */
1488 int t3_offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
1489 {
1490 struct adapter *adap = tdev2adap(tdev);
1491 struct sge_qset *qs = &adap->sge.qs[queue_set(skb)];
1492
1493 if (unlikely(is_ctrl_pkt(skb)))
1494 return ctrl_xmit(adap, &qs->txq[TXQ_CTRL], skb);
1495
1496 return ofld_xmit(adap, &qs->txq[TXQ_OFLD], skb);
1497 }
1498
1499 /**
1500 * offload_enqueue - add an offload packet to an SGE offload receive queue
1501 * @q: the SGE response queue
1502 * @skb: the packet
1503 *
1504 * Add a new offload packet to an SGE response queue's offload packet
1505 * queue. If the packet is the first on the queue it schedules the RX
1506 * softirq to process the queue.
1507 */
1508 static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb)
1509 {
1510 skb->next = skb->prev = NULL;
1511 if (q->rx_tail)
1512 q->rx_tail->next = skb;
1513 else {
1514 struct sge_qset *qs = rspq_to_qset(q);
1515
1516 if (__netif_rx_schedule_prep(qs->netdev))
1517 __netif_rx_schedule(qs->netdev);
1518 q->rx_head = skb;
1519 }
1520 q->rx_tail = skb;
1521 }
1522
1523 /**
1524 * deliver_partial_bundle - deliver a (partial) bundle of Rx offload pkts
1525 * @tdev: the offload device that will be receiving the packets
1526 * @q: the SGE response queue that assembled the bundle
1527 * @skbs: the partial bundle
1528 * @n: the number of packets in the bundle
1529 *
1530 * Delivers a (partial) bundle of Rx offload packets to an offload device.
1531 */
1532 static inline void deliver_partial_bundle(struct t3cdev *tdev,
1533 struct sge_rspq *q,
1534 struct sk_buff *skbs[], int n)
1535 {
1536 if (n) {
1537 q->offload_bundles++;
1538 tdev->recv(tdev, skbs, n);
1539 }
1540 }
1541
1542 /**
1543 * ofld_poll - NAPI handler for offload packets in interrupt mode
1544 * @dev: the network device doing the polling
1545 * @budget: polling budget
1546 *
1547 * The NAPI handler for offload packets when a response queue is serviced
1548 * by the hard interrupt handler, i.e., when it's operating in non-polling
1549 * mode. Creates small packet batches and sends them through the offload
1550 * receive handler. Batches need to be of modest size as we do prefetches
1551 * on the packets in each.
1552 */
1553 static int ofld_poll(struct net_device *dev, int *budget)
1554 {
1555 struct adapter *adapter = dev->priv;
1556 struct sge_qset *qs = dev2qset(dev);
1557 struct sge_rspq *q = &qs->rspq;
1558 int work_done, limit = min(*budget, dev->quota), avail = limit;
1559
1560 while (avail) {
1561 struct sk_buff *head, *tail, *skbs[RX_BUNDLE_SIZE];
1562 int ngathered;
1563
1564 spin_lock_irq(&q->lock);
1565 head = q->rx_head;
1566 if (!head) {
1567 work_done = limit - avail;
1568 *budget -= work_done;
1569 dev->quota -= work_done;
1570 __netif_rx_complete(dev);
1571 spin_unlock_irq(&q->lock);
1572 return 0;
1573 }
1574
1575 tail = q->rx_tail;
1576 q->rx_head = q->rx_tail = NULL;
1577 spin_unlock_irq(&q->lock);
1578
1579 for (ngathered = 0; avail && head; avail--) {
1580 prefetch(head->data);
1581 skbs[ngathered] = head;
1582 head = head->next;
1583 skbs[ngathered]->next = NULL;
1584 if (++ngathered == RX_BUNDLE_SIZE) {
1585 q->offload_bundles++;
1586 adapter->tdev.recv(&adapter->tdev, skbs,
1587 ngathered);
1588 ngathered = 0;
1589 }
1590 }
1591 if (head) { /* splice remaining packets back onto Rx queue */
1592 spin_lock_irq(&q->lock);
1593 tail->next = q->rx_head;
1594 if (!q->rx_head)
1595 q->rx_tail = tail;
1596 q->rx_head = head;
1597 spin_unlock_irq(&q->lock);
1598 }
1599 deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered);
1600 }
1601 work_done = limit - avail;
1602 *budget -= work_done;
1603 dev->quota -= work_done;
1604 return 1;
1605 }
1606
1607 /**
1608 * rx_offload - process a received offload packet
1609 * @tdev: the offload device receiving the packet
1610 * @rq: the response queue that received the packet
1611 * @skb: the packet
1612 * @rx_gather: a gather list of packets if we are building a bundle
1613 * @gather_idx: index of the next available slot in the bundle
1614 *
1615 * Process an ingress offload pakcet and add it to the offload ingress
1616 * queue. Returns the index of the next available slot in the bundle.
1617 */
1618 static inline int rx_offload(struct t3cdev *tdev, struct sge_rspq *rq,
1619 struct sk_buff *skb, struct sk_buff *rx_gather[],
1620 unsigned int gather_idx)
1621 {
1622 rq->offload_pkts++;
1623 skb_reset_mac_header(skb);
1624 skb_reset_network_header(skb);
1625 skb_reset_transport_header(skb);
1626
1627 if (rq->polling) {
1628 rx_gather[gather_idx++] = skb;
1629 if (gather_idx == RX_BUNDLE_SIZE) {
1630 tdev->recv(tdev, rx_gather, RX_BUNDLE_SIZE);
1631 gather_idx = 0;
1632 rq->offload_bundles++;
1633 }
1634 } else
1635 offload_enqueue(rq, skb);
1636
1637 return gather_idx;
1638 }
1639
1640 /**
1641 * restart_tx - check whether to restart suspended Tx queues
1642 * @qs: the queue set to resume
1643 *
1644 * Restarts suspended Tx queues of an SGE queue set if they have enough
1645 * free resources to resume operation.
1646 */
1647 static void restart_tx(struct sge_qset *qs)
1648 {
1649 if (test_bit(TXQ_ETH, &qs->txq_stopped) &&
1650 should_restart_tx(&qs->txq[TXQ_ETH]) &&
1651 test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
1652 qs->txq[TXQ_ETH].restarts++;
1653 if (netif_running(qs->netdev))
1654 netif_wake_queue(qs->netdev);
1655 }
1656
1657 if (test_bit(TXQ_OFLD, &qs->txq_stopped) &&
1658 should_restart_tx(&qs->txq[TXQ_OFLD]) &&
1659 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) {
1660 qs->txq[TXQ_OFLD].restarts++;
1661 tasklet_schedule(&qs->txq[TXQ_OFLD].qresume_tsk);
1662 }
1663 if (test_bit(TXQ_CTRL, &qs->txq_stopped) &&
1664 should_restart_tx(&qs->txq[TXQ_CTRL]) &&
1665 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) {
1666 qs->txq[TXQ_CTRL].restarts++;
1667 tasklet_schedule(&qs->txq[TXQ_CTRL].qresume_tsk);
1668 }
1669 }
1670
1671 /**
1672 * rx_eth - process an ingress ethernet packet
1673 * @adap: the adapter
1674 * @rq: the response queue that received the packet
1675 * @skb: the packet
1676 * @pad: amount of padding at the start of the buffer
1677 *
1678 * Process an ingress ethernet pakcet and deliver it to the stack.
1679 * The padding is 2 if the packet was delivered in an Rx buffer and 0
1680 * if it was immediate data in a response.
1681 */
1682 static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
1683 struct sk_buff *skb, int pad)
1684 {
1685 struct cpl_rx_pkt *p = (struct cpl_rx_pkt *)(skb->data + pad);
1686 struct port_info *pi;
1687
1688 skb_pull(skb, sizeof(*p) + pad);
1689 skb->dev->last_rx = jiffies;
1690 skb->protocol = eth_type_trans(skb, adap->port[p->iff]);
1691 pi = netdev_priv(skb->dev);
1692 if (pi->rx_csum_offload && p->csum_valid && p->csum == 0xffff &&
1693 !p->fragment) {
1694 rspq_to_qset(rq)->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
1695 skb->ip_summed = CHECKSUM_UNNECESSARY;
1696 } else
1697 skb->ip_summed = CHECKSUM_NONE;
1698
1699 if (unlikely(p->vlan_valid)) {
1700 struct vlan_group *grp = pi->vlan_grp;
1701
1702 rspq_to_qset(rq)->port_stats[SGE_PSTAT_VLANEX]++;
1703 if (likely(grp))
1704 __vlan_hwaccel_rx(skb, grp, ntohs(p->vlan),
1705 rq->polling);
1706 else
1707 dev_kfree_skb_any(skb);
1708 } else if (rq->polling)
1709 netif_receive_skb(skb);
1710 else
1711 netif_rx(skb);
1712 }
1713
1714 #define SKB_DATA_SIZE 128
1715
1716 static void skb_data_init(struct sk_buff *skb, struct sge_fl_page *p,
1717 unsigned int len)
1718 {
1719 skb->len = len;
1720 if (len <= SKB_DATA_SIZE) {
1721 memcpy(skb->data, p->va, len);
1722 skb->tail += len;
1723 put_page(p->frag.page);
1724 } else {
1725 memcpy(skb->data, p->va, SKB_DATA_SIZE);
1726 skb_shinfo(skb)->frags[0].page = p->frag.page;
1727 skb_shinfo(skb)->frags[0].page_offset =
1728 p->frag.page_offset + SKB_DATA_SIZE;
1729 skb_shinfo(skb)->frags[0].size = len - SKB_DATA_SIZE;
1730 skb_shinfo(skb)->nr_frags = 1;
1731 skb->data_len = len - SKB_DATA_SIZE;
1732 skb->tail += SKB_DATA_SIZE;
1733 skb->truesize += skb->data_len;
1734 }
1735 }
1736
1737 /**
1738 * get_packet - return the next ingress packet buffer from a free list
1739 * @adap: the adapter that received the packet
1740 * @fl: the SGE free list holding the packet
1741 * @len: the packet length including any SGE padding
1742 * @drop_thres: # of remaining buffers before we start dropping packets
1743 *
1744 * Get the next packet from a free list and complete setup of the
1745 * sk_buff. If the packet is small we make a copy and recycle the
1746 * original buffer, otherwise we use the original buffer itself. If a
1747 * positive drop threshold is supplied packets are dropped and their
1748 * buffers recycled if (a) the number of remaining buffers is under the
1749 * threshold and the packet is too big to copy, or (b) the packet should
1750 * be copied but there is no memory for the copy.
1751 */
1752 static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
1753 unsigned int len, unsigned int drop_thres)
1754 {
1755 struct sk_buff *skb = NULL;
1756 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
1757
1758 prefetch(sd->t.skb->data);
1759
1760 if (len <= SGE_RX_COPY_THRES) {
1761 skb = alloc_skb(len, GFP_ATOMIC);
1762 if (likely(skb != NULL)) {
1763 struct rx_desc *d = &fl->desc[fl->cidx];
1764 dma_addr_t mapping =
1765 (dma_addr_t)((u64) be32_to_cpu(d->addr_hi) << 32 |
1766 be32_to_cpu(d->addr_lo));
1767
1768 __skb_put(skb, len);
1769 pci_dma_sync_single_for_cpu(adap->pdev, mapping, len,
1770 PCI_DMA_FROMDEVICE);
1771 memcpy(skb->data, sd->t.skb->data, len);
1772 pci_dma_sync_single_for_device(adap->pdev, mapping, len,
1773 PCI_DMA_FROMDEVICE);
1774 } else if (!drop_thres)
1775 goto use_orig_buf;
1776 recycle:
1777 recycle_rx_buf(adap, fl, fl->cidx);
1778 return skb;
1779 }
1780
1781 if (unlikely(fl->credits < drop_thres))
1782 goto recycle;
1783
1784 use_orig_buf:
1785 pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr),
1786 fl->buf_size, PCI_DMA_FROMDEVICE);
1787 skb = sd->t.skb;
1788 skb_put(skb, len);
1789 __refill_fl(adap, fl);
1790 return skb;
1791 }
1792
1793 /**
1794 * handle_rsp_cntrl_info - handles control information in a response
1795 * @qs: the queue set corresponding to the response
1796 * @flags: the response control flags
1797 *
1798 * Handles the control information of an SGE response, such as GTS
1799 * indications and completion credits for the queue set's Tx queues.
1800 * HW coalesces credits, we don't do any extra SW coalescing.
1801 */
1802 static inline void handle_rsp_cntrl_info(struct sge_qset *qs, u32 flags)
1803 {
1804 unsigned int credits;
1805
1806 #if USE_GTS
1807 if (flags & F_RSPD_TXQ0_GTS)
1808 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_ETH].flags);
1809 #endif
1810
1811 credits = G_RSPD_TXQ0_CR(flags);
1812 if (credits)
1813 qs->txq[TXQ_ETH].processed += credits;
1814
1815 credits = G_RSPD_TXQ2_CR(flags);
1816 if (credits)
1817 qs->txq[TXQ_CTRL].processed += credits;
1818
1819 # if USE_GTS
1820 if (flags & F_RSPD_TXQ1_GTS)
1821 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_OFLD].flags);
1822 # endif
1823 credits = G_RSPD_TXQ1_CR(flags);
1824 if (credits)
1825 qs->txq[TXQ_OFLD].processed += credits;
1826 }
1827
1828 /**
1829 * check_ring_db - check if we need to ring any doorbells
1830 * @adapter: the adapter
1831 * @qs: the queue set whose Tx queues are to be examined
1832 * @sleeping: indicates which Tx queue sent GTS
1833 *
1834 * Checks if some of a queue set's Tx queues need to ring their doorbells
1835 * to resume transmission after idling while they still have unprocessed
1836 * descriptors.
1837 */
1838 static void check_ring_db(struct adapter *adap, struct sge_qset *qs,
1839 unsigned int sleeping)
1840 {
1841 if (sleeping & F_RSPD_TXQ0_GTS) {
1842 struct sge_txq *txq = &qs->txq[TXQ_ETH];
1843
1844 if (txq->cleaned + txq->in_use != txq->processed &&
1845 !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
1846 set_bit(TXQ_RUNNING, &txq->flags);
1847 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
1848 V_EGRCNTX(txq->cntxt_id));
1849 }
1850 }
1851
1852 if (sleeping & F_RSPD_TXQ1_GTS) {
1853 struct sge_txq *txq = &qs->txq[TXQ_OFLD];
1854
1855 if (txq->cleaned + txq->in_use != txq->processed &&
1856 !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
1857 set_bit(TXQ_RUNNING, &txq->flags);
1858 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
1859 V_EGRCNTX(txq->cntxt_id));
1860 }
1861 }
1862 }
1863
1864 /**
1865 * is_new_response - check if a response is newly written
1866 * @r: the response descriptor
1867 * @q: the response queue
1868 *
1869 * Returns true if a response descriptor contains a yet unprocessed
1870 * response.
1871 */
1872 static inline int is_new_response(const struct rsp_desc *r,
1873 const struct sge_rspq *q)
1874 {
1875 return (r->intr_gen & F_RSPD_GEN2) == q->gen;
1876 }
1877
1878 #define RSPD_GTS_MASK (F_RSPD_TXQ0_GTS | F_RSPD_TXQ1_GTS)
1879 #define RSPD_CTRL_MASK (RSPD_GTS_MASK | \
1880 V_RSPD_TXQ0_CR(M_RSPD_TXQ0_CR) | \
1881 V_RSPD_TXQ1_CR(M_RSPD_TXQ1_CR) | \
1882 V_RSPD_TXQ2_CR(M_RSPD_TXQ2_CR))
1883
1884 /* How long to delay the next interrupt in case of memory shortage, in 0.1us. */
1885 #define NOMEM_INTR_DELAY 2500
1886
1887 /**
1888 * process_responses - process responses from an SGE response queue
1889 * @adap: the adapter
1890 * @qs: the queue set to which the response queue belongs
1891 * @budget: how many responses can be processed in this round
1892 *
1893 * Process responses from an SGE response queue up to the supplied budget.
1894 * Responses include received packets as well as credits and other events
1895 * for the queues that belong to the response queue's queue set.
1896 * A negative budget is effectively unlimited.
1897 *
1898 * Additionally choose the interrupt holdoff time for the next interrupt
1899 * on this queue. If the system is under memory shortage use a fairly
1900 * long delay to help recovery.
1901 */
1902 static int process_responses(struct adapter *adap, struct sge_qset *qs,
1903 int budget)
1904 {
1905 struct sge_rspq *q = &qs->rspq;
1906 struct rsp_desc *r = &q->desc[q->cidx];
1907 int budget_left = budget;
1908 unsigned int sleeping = 0;
1909 struct sk_buff *offload_skbs[RX_BUNDLE_SIZE];
1910 int ngathered = 0;
1911
1912 q->next_holdoff = q->holdoff_tmr;
1913
1914 while (likely(budget_left && is_new_response(r, q))) {
1915 int eth, ethpad = 2;
1916 struct sk_buff *skb = NULL;
1917 u32 len, flags = ntohl(r->flags);
1918 u32 rss_hi = *(const u32 *)r, rss_lo = r->rss_hdr.rss_hash_val;
1919
1920 eth = r->rss_hdr.opcode == CPL_RX_PKT;
1921
1922 if (unlikely(flags & F_RSPD_ASYNC_NOTIF)) {
1923 skb = alloc_skb(AN_PKT_SIZE, GFP_ATOMIC);
1924 if (!skb)
1925 goto no_mem;
1926
1927 memcpy(__skb_put(skb, AN_PKT_SIZE), r, AN_PKT_SIZE);
1928 skb->data[0] = CPL_ASYNC_NOTIF;
1929 rss_hi = htonl(CPL_ASYNC_NOTIF << 24);
1930 q->async_notif++;
1931 } else if (flags & F_RSPD_IMM_DATA_VALID) {
1932 skb = get_imm_packet(r);
1933 if (unlikely(!skb)) {
1934 no_mem:
1935 q->next_holdoff = NOMEM_INTR_DELAY;
1936 q->nomem++;
1937 /* consume one credit since we tried */
1938 budget_left--;
1939 break;
1940 }
1941 q->imm_data++;
1942 ethpad = 0;
1943 } else if ((len = ntohl(r->len_cq)) != 0) {
1944 struct sge_fl *fl =
1945 (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0];
1946
1947 if (fl->buf_size == RX_PAGE_SIZE) {
1948 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
1949 struct sge_fl_page *p = &sd->t.page;
1950
1951 prefetch(p->va);
1952 prefetch(p->va + L1_CACHE_BYTES);
1953
1954 __refill_fl(adap, fl);
1955
1956 pci_unmap_single(adap->pdev,
1957 pci_unmap_addr(sd, dma_addr),
1958 fl->buf_size,
1959 PCI_DMA_FROMDEVICE);
1960
1961 if (eth) {
1962 if (unlikely(fl->credits <
1963 SGE_RX_DROP_THRES))
1964 goto eth_recycle;
1965
1966 skb = alloc_skb(SKB_DATA_SIZE,
1967 GFP_ATOMIC);
1968 if (unlikely(!skb)) {
1969 eth_recycle:
1970 q->rx_drops++;
1971 recycle_rx_buf(adap, fl,
1972 fl->cidx);
1973 goto eth_done;
1974 }
1975 } else {
1976 skb = alloc_skb(SKB_DATA_SIZE,
1977 GFP_ATOMIC);
1978 if (unlikely(!skb))
1979 goto no_mem;
1980 }
1981
1982 skb_data_init(skb, p, G_RSPD_LEN(len));
1983 eth_done:
1984 fl->credits--;
1985 q->eth_pkts++;
1986 } else {
1987 fl->credits--;
1988 skb = get_packet(adap, fl, G_RSPD_LEN(len),
1989 eth ? SGE_RX_DROP_THRES : 0);
1990 }
1991
1992 if (++fl->cidx == fl->size)
1993 fl->cidx = 0;
1994 } else
1995 q->pure_rsps++;
1996
1997 if (flags & RSPD_CTRL_MASK) {
1998 sleeping |= flags & RSPD_GTS_MASK;
1999 handle_rsp_cntrl_info(qs, flags);
2000 }
2001
2002 r++;
2003 if (unlikely(++q->cidx == q->size)) {
2004 q->cidx = 0;
2005 q->gen ^= 1;
2006 r = q->desc;
2007 }
2008 prefetch(r);
2009
2010 if (++q->credits >= (q->size / 4)) {
2011 refill_rspq(adap, q, q->credits);
2012 q->credits = 0;
2013 }
2014
2015 if (skb) {
2016 /* Preserve the RSS info in csum & priority */
2017 skb->csum = rss_hi;
2018 skb->priority = rss_lo;
2019
2020 if (eth)
2021 rx_eth(adap, q, skb, ethpad);
2022 else {
2023 if (unlikely(r->rss_hdr.opcode ==
2024 CPL_TRACE_PKT))
2025 __skb_pull(skb, ethpad);
2026
2027 ngathered = rx_offload(&adap->tdev, q,
2028 skb, offload_skbs,
2029 ngathered);
2030 }
2031 }
2032 --budget_left;
2033 }
2034
2035 deliver_partial_bundle(&adap->tdev, q, offload_skbs, ngathered);
2036 if (sleeping)
2037 check_ring_db(adap, qs, sleeping);
2038
2039 smp_mb(); /* commit Tx queue .processed updates */
2040 if (unlikely(qs->txq_stopped != 0))
2041 restart_tx(qs);
2042
2043 budget -= budget_left;
2044 return budget;
2045 }
2046
2047 static inline int is_pure_response(const struct rsp_desc *r)
2048 {
2049 u32 n = ntohl(r->flags) & (F_RSPD_ASYNC_NOTIF | F_RSPD_IMM_DATA_VALID);
2050
2051 return (n | r->len_cq) == 0;
2052 }
2053
2054 /**
2055 * napi_rx_handler - the NAPI handler for Rx processing
2056 * @dev: the net device
2057 * @budget: how many packets we can process in this round
2058 *
2059 * Handler for new data events when using NAPI.
2060 */
2061 static int napi_rx_handler(struct net_device *dev, int *budget)
2062 {
2063 struct adapter *adap = dev->priv;
2064 struct sge_qset *qs = dev2qset(dev);
2065 int effective_budget = min(*budget, dev->quota);
2066
2067 int work_done = process_responses(adap, qs, effective_budget);
2068 *budget -= work_done;
2069 dev->quota -= work_done;
2070
2071 if (work_done >= effective_budget)
2072 return 1;
2073
2074 netif_rx_complete(dev);
2075
2076 /*
2077 * Because we don't atomically flush the following write it is
2078 * possible that in very rare cases it can reach the device in a way
2079 * that races with a new response being written plus an error interrupt
2080 * causing the NAPI interrupt handler below to return unhandled status
2081 * to the OS. To protect against this would require flushing the write
2082 * and doing both the write and the flush with interrupts off. Way too
2083 * expensive and unjustifiable given the rarity of the race.
2084 *
2085 * The race cannot happen at all with MSI-X.
2086 */
2087 t3_write_reg(adap, A_SG_GTS, V_RSPQ(qs->rspq.cntxt_id) |
2088 V_NEWTIMER(qs->rspq.next_holdoff) |
2089 V_NEWINDEX(qs->rspq.cidx));
2090 return 0;
2091 }
2092
2093 /*
2094 * Returns true if the device is already scheduled for polling.
2095 */
2096 static inline int napi_is_scheduled(struct net_device *dev)
2097 {
2098 return test_bit(__LINK_STATE_RX_SCHED, &dev->state);
2099 }
2100
2101 /**
2102 * process_pure_responses - process pure responses from a response queue
2103 * @adap: the adapter
2104 * @qs: the queue set owning the response queue
2105 * @r: the first pure response to process
2106 *
2107 * A simpler version of process_responses() that handles only pure (i.e.,
2108 * non data-carrying) responses. Such respones are too light-weight to
2109 * justify calling a softirq under NAPI, so we handle them specially in
2110 * the interrupt handler. The function is called with a pointer to a
2111 * response, which the caller must ensure is a valid pure response.
2112 *
2113 * Returns 1 if it encounters a valid data-carrying response, 0 otherwise.
2114 */
2115 static int process_pure_responses(struct adapter *adap, struct sge_qset *qs,
2116 struct rsp_desc *r)
2117 {
2118 struct sge_rspq *q = &qs->rspq;
2119 unsigned int sleeping = 0;
2120
2121 do {
2122 u32 flags = ntohl(r->flags);
2123
2124 r++;
2125 if (unlikely(++q->cidx == q->size)) {
2126 q->cidx = 0;
2127 q->gen ^= 1;
2128 r = q->desc;
2129 }
2130 prefetch(r);
2131
2132 if (flags & RSPD_CTRL_MASK) {
2133 sleeping |= flags & RSPD_GTS_MASK;
2134 handle_rsp_cntrl_info(qs, flags);
2135 }
2136
2137 q->pure_rsps++;
2138 if (++q->credits >= (q->size / 4)) {
2139 refill_rspq(adap, q, q->credits);
2140 q->credits = 0;
2141 }
2142 } while (is_new_response(r, q) && is_pure_response(r));
2143
2144 if (sleeping)
2145 check_ring_db(adap, qs, sleeping);
2146
2147 smp_mb(); /* commit Tx queue .processed updates */
2148 if (unlikely(qs->txq_stopped != 0))
2149 restart_tx(qs);
2150
2151 return is_new_response(r, q);
2152 }
2153
2154 /**
2155 * handle_responses - decide what to do with new responses in NAPI mode
2156 * @adap: the adapter
2157 * @q: the response queue
2158 *
2159 * This is used by the NAPI interrupt handlers to decide what to do with
2160 * new SGE responses. If there are no new responses it returns -1. If
2161 * there are new responses and they are pure (i.e., non-data carrying)
2162 * it handles them straight in hard interrupt context as they are very
2163 * cheap and don't deliver any packets. Finally, if there are any data
2164 * signaling responses it schedules the NAPI handler. Returns 1 if it
2165 * schedules NAPI, 0 if all new responses were pure.
2166 *
2167 * The caller must ascertain NAPI is not already running.
2168 */
2169 static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
2170 {
2171 struct sge_qset *qs = rspq_to_qset(q);
2172 struct rsp_desc *r = &q->desc[q->cidx];
2173
2174 if (!is_new_response(r, q))
2175 return -1;
2176 if (is_pure_response(r) && process_pure_responses(adap, qs, r) == 0) {
2177 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2178 V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx));
2179 return 0;
2180 }
2181 if (likely(__netif_rx_schedule_prep(qs->netdev)))
2182 __netif_rx_schedule(qs->netdev);
2183 return 1;
2184 }
2185
2186 /*
2187 * The MSI-X interrupt handler for an SGE response queue for the non-NAPI case
2188 * (i.e., response queue serviced in hard interrupt).
2189 */
2190 irqreturn_t t3_sge_intr_msix(int irq, void *cookie)
2191 {
2192 struct sge_qset *qs = cookie;
2193 struct adapter *adap = qs->netdev->priv;
2194 struct sge_rspq *q = &qs->rspq;
2195
2196 spin_lock(&q->lock);
2197 if (process_responses(adap, qs, -1) == 0)
2198 q->unhandled_irqs++;
2199 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2200 V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
2201 spin_unlock(&q->lock);
2202 return IRQ_HANDLED;
2203 }
2204
2205 /*
2206 * The MSI-X interrupt handler for an SGE response queue for the NAPI case
2207 * (i.e., response queue serviced by NAPI polling).
2208 */
2209 irqreturn_t t3_sge_intr_msix_napi(int irq, void *cookie)
2210 {
2211 struct sge_qset *qs = cookie;
2212 struct adapter *adap = qs->netdev->priv;
2213 struct sge_rspq *q = &qs->rspq;
2214
2215 spin_lock(&q->lock);
2216 BUG_ON(napi_is_scheduled(qs->netdev));
2217
2218 if (handle_responses(adap, q) < 0)
2219 q->unhandled_irqs++;
2220 spin_unlock(&q->lock);
2221 return IRQ_HANDLED;
2222 }
2223
2224 /*
2225 * The non-NAPI MSI interrupt handler. This needs to handle data events from
2226 * SGE response queues as well as error and other async events as they all use
2227 * the same MSI vector. We use one SGE response queue per port in this mode
2228 * and protect all response queues with queue 0's lock.
2229 */
2230 static irqreturn_t t3_intr_msi(int irq, void *cookie)
2231 {
2232 int new_packets = 0;
2233 struct adapter *adap = cookie;
2234 struct sge_rspq *q = &adap->sge.qs[0].rspq;
2235
2236 spin_lock(&q->lock);
2237
2238 if (process_responses(adap, &adap->sge.qs[0], -1)) {
2239 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2240 V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
2241 new_packets = 1;
2242 }
2243
2244 if (adap->params.nports == 2 &&
2245 process_responses(adap, &adap->sge.qs[1], -1)) {
2246 struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
2247
2248 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q1->cntxt_id) |
2249 V_NEWTIMER(q1->next_holdoff) |
2250 V_NEWINDEX(q1->cidx));
2251 new_packets = 1;
2252 }
2253
2254 if (!new_packets && t3_slow_intr_handler(adap) == 0)
2255 q->unhandled_irqs++;
2256
2257 spin_unlock(&q->lock);
2258 return IRQ_HANDLED;
2259 }
2260
2261 static int rspq_check_napi(struct net_device *dev, struct sge_rspq *q)
2262 {
2263 if (!napi_is_scheduled(dev) && is_new_response(&q->desc[q->cidx], q)) {
2264 if (likely(__netif_rx_schedule_prep(dev)))
2265 __netif_rx_schedule(dev);
2266 return 1;
2267 }
2268 return 0;
2269 }
2270
2271 /*
2272 * The MSI interrupt handler for the NAPI case (i.e., response queues serviced
2273 * by NAPI polling). Handles data events from SGE response queues as well as
2274 * error and other async events as they all use the same MSI vector. We use
2275 * one SGE response queue per port in this mode and protect all response
2276 * queues with queue 0's lock.
2277 */
2278 irqreturn_t t3_intr_msi_napi(int irq, void *cookie)
2279 {
2280 int new_packets;
2281 struct adapter *adap = cookie;
2282 struct sge_rspq *q = &adap->sge.qs[0].rspq;
2283
2284 spin_lock(&q->lock);
2285
2286 new_packets = rspq_check_napi(adap->sge.qs[0].netdev, q);
2287 if (adap->params.nports == 2)
2288 new_packets += rspq_check_napi(adap->sge.qs[1].netdev,
2289 &adap->sge.qs[1].rspq);
2290 if (!new_packets && t3_slow_intr_handler(adap) == 0)
2291 q->unhandled_irqs++;
2292
2293 spin_unlock(&q->lock);
2294 return IRQ_HANDLED;
2295 }
2296
2297 /*
2298 * A helper function that processes responses and issues GTS.
2299 */
2300 static inline int process_responses_gts(struct adapter *adap,
2301 struct sge_rspq *rq)
2302 {
2303 int work;
2304
2305 work = process_responses(adap, rspq_to_qset(rq), -1);
2306 t3_write_reg(adap, A_SG_GTS, V_RSPQ(rq->cntxt_id) |
2307 V_NEWTIMER(rq->next_holdoff) | V_NEWINDEX(rq->cidx));
2308 return work;
2309 }
2310
2311 /*
2312 * The legacy INTx interrupt handler. This needs to handle data events from
2313 * SGE response queues as well as error and other async events as they all use
2314 * the same interrupt pin. We use one SGE response queue per port in this mode
2315 * and protect all response queues with queue 0's lock.
2316 */
2317 static irqreturn_t t3_intr(int irq, void *cookie)
2318 {
2319 int work_done, w0, w1;
2320 struct adapter *adap = cookie;
2321 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2322 struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
2323
2324 spin_lock(&q0->lock);
2325
2326 w0 = is_new_response(&q0->desc[q0->cidx], q0);
2327 w1 = adap->params.nports == 2 &&
2328 is_new_response(&q1->desc[q1->cidx], q1);
2329
2330 if (likely(w0 | w1)) {
2331 t3_write_reg(adap, A_PL_CLI, 0);
2332 t3_read_reg(adap, A_PL_CLI); /* flush */
2333
2334 if (likely(w0))
2335 process_responses_gts(adap, q0);
2336
2337 if (w1)
2338 process_responses_gts(adap, q1);
2339
2340 work_done = w0 | w1;
2341 } else
2342 work_done = t3_slow_intr_handler(adap);
2343
2344 spin_unlock(&q0->lock);
2345 return IRQ_RETVAL(work_done != 0);
2346 }
2347
2348 /*
2349 * Interrupt handler for legacy INTx interrupts for T3B-based cards.
2350 * Handles data events from SGE response queues as well as error and other
2351 * async events as they all use the same interrupt pin. We use one SGE
2352 * response queue per port in this mode and protect all response queues with
2353 * queue 0's lock.
2354 */
2355 static irqreturn_t t3b_intr(int irq, void *cookie)
2356 {
2357 u32 map;
2358 struct adapter *adap = cookie;
2359 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2360
2361 t3_write_reg(adap, A_PL_CLI, 0);
2362 map = t3_read_reg(adap, A_SG_DATA_INTR);
2363
2364 if (unlikely(!map)) /* shared interrupt, most likely */
2365 return IRQ_NONE;
2366
2367 spin_lock(&q0->lock);
2368
2369 if (unlikely(map & F_ERRINTR))
2370 t3_slow_intr_handler(adap);
2371
2372 if (likely(map & 1))
2373 process_responses_gts(adap, q0);
2374
2375 if (map & 2)
2376 process_responses_gts(adap, &adap->sge.qs[1].rspq);
2377
2378 spin_unlock(&q0->lock);
2379 return IRQ_HANDLED;
2380 }
2381
2382 /*
2383 * NAPI interrupt handler for legacy INTx interrupts for T3B-based cards.
2384 * Handles data events from SGE response queues as well as error and other
2385 * async events as they all use the same interrupt pin. We use one SGE
2386 * response queue per port in this mode and protect all response queues with
2387 * queue 0's lock.
2388 */
2389 static irqreturn_t t3b_intr_napi(int irq, void *cookie)
2390 {
2391 u32 map;
2392 struct net_device *dev;
2393 struct adapter *adap = cookie;
2394 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2395
2396 t3_write_reg(adap, A_PL_CLI, 0);
2397 map = t3_read_reg(adap, A_SG_DATA_INTR);
2398
2399 if (unlikely(!map)) /* shared interrupt, most likely */
2400 return IRQ_NONE;
2401
2402 spin_lock(&q0->lock);
2403
2404 if (unlikely(map & F_ERRINTR))
2405 t3_slow_intr_handler(adap);
2406
2407 if (likely(map & 1)) {
2408 dev = adap->sge.qs[0].netdev;
2409
2410 if (likely(__netif_rx_schedule_prep(dev)))
2411 __netif_rx_schedule(dev);
2412 }
2413 if (map & 2) {
2414 dev = adap->sge.qs[1].netdev;
2415
2416 if (likely(__netif_rx_schedule_prep(dev)))
2417 __netif_rx_schedule(dev);
2418 }
2419
2420 spin_unlock(&q0->lock);
2421 return IRQ_HANDLED;
2422 }
2423
2424 /**
2425 * t3_intr_handler - select the top-level interrupt handler
2426 * @adap: the adapter
2427 * @polling: whether using NAPI to service response queues
2428 *
2429 * Selects the top-level interrupt handler based on the type of interrupts
2430 * (MSI-X, MSI, or legacy) and whether NAPI will be used to service the
2431 * response queues.
2432 */
2433 intr_handler_t t3_intr_handler(struct adapter *adap, int polling)
2434 {
2435 if (adap->flags & USING_MSIX)
2436 return polling ? t3_sge_intr_msix_napi : t3_sge_intr_msix;
2437 if (adap->flags & USING_MSI)
2438 return polling ? t3_intr_msi_napi : t3_intr_msi;
2439 if (adap->params.rev > 0)
2440 return polling ? t3b_intr_napi : t3b_intr;
2441 return t3_intr;
2442 }
2443
2444 /**
2445 * t3_sge_err_intr_handler - SGE async event interrupt handler
2446 * @adapter: the adapter
2447 *
2448 * Interrupt handler for SGE asynchronous (non-data) events.
2449 */
2450 void t3_sge_err_intr_handler(struct adapter *adapter)
2451 {
2452 unsigned int v, status = t3_read_reg(adapter, A_SG_INT_CAUSE);
2453
2454 if (status & F_RSPQCREDITOVERFOW)
2455 CH_ALERT(adapter, "SGE response queue credit overflow\n");
2456
2457 if (status & F_RSPQDISABLED) {
2458 v = t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS);
2459
2460 CH_ALERT(adapter,
2461 "packet delivered to disabled response queue "
2462 "(0x%x)\n", (v >> S_RSPQ0DISABLED) & 0xff);
2463 }
2464
2465 t3_write_reg(adapter, A_SG_INT_CAUSE, status);
2466 if (status & (F_RSPQCREDITOVERFOW | F_RSPQDISABLED))
2467 t3_fatal_err(adapter);
2468 }
2469
2470 /**
2471 * sge_timer_cb - perform periodic maintenance of an SGE qset
2472 * @data: the SGE queue set to maintain
2473 *
2474 * Runs periodically from a timer to perform maintenance of an SGE queue
2475 * set. It performs two tasks:
2476 *
2477 * a) Cleans up any completed Tx descriptors that may still be pending.
2478 * Normal descriptor cleanup happens when new packets are added to a Tx
2479 * queue so this timer is relatively infrequent and does any cleanup only
2480 * if the Tx queue has not seen any new packets in a while. We make a
2481 * best effort attempt to reclaim descriptors, in that we don't wait
2482 * around if we cannot get a queue's lock (which most likely is because
2483 * someone else is queueing new packets and so will also handle the clean
2484 * up). Since control queues use immediate data exclusively we don't
2485 * bother cleaning them up here.
2486 *
2487 * b) Replenishes Rx queues that have run out due to memory shortage.
2488 * Normally new Rx buffers are added when existing ones are consumed but
2489 * when out of memory a queue can become empty. We try to add only a few
2490 * buffers here, the queue will be replenished fully as these new buffers
2491 * are used up if memory shortage has subsided.
2492 */
2493 static void sge_timer_cb(unsigned long data)
2494 {
2495 spinlock_t *lock;
2496 struct sge_qset *qs = (struct sge_qset *)data;
2497 struct adapter *adap = qs->netdev->priv;
2498
2499 if (spin_trylock(&qs->txq[TXQ_ETH].lock)) {
2500 reclaim_completed_tx(adap, &qs->txq[TXQ_ETH]);
2501 spin_unlock(&qs->txq[TXQ_ETH].lock);
2502 }
2503 if (spin_trylock(&qs->txq[TXQ_OFLD].lock)) {
2504 reclaim_completed_tx(adap, &qs->txq[TXQ_OFLD]);
2505 spin_unlock(&qs->txq[TXQ_OFLD].lock);
2506 }
2507 lock = (adap->flags & USING_MSIX) ? &qs->rspq.lock :
2508 &adap->sge.qs[0].rspq.lock;
2509 if (spin_trylock_irq(lock)) {
2510 if (!napi_is_scheduled(qs->netdev)) {
2511 u32 status = t3_read_reg(adap, A_SG_RSPQ_FL_STATUS);
2512
2513 if (qs->fl[0].credits < qs->fl[0].size)
2514 __refill_fl(adap, &qs->fl[0]);
2515 if (qs->fl[1].credits < qs->fl[1].size)
2516 __refill_fl(adap, &qs->fl[1]);
2517
2518 if (status & (1 << qs->rspq.cntxt_id)) {
2519 qs->rspq.starved++;
2520 if (qs->rspq.credits) {
2521 refill_rspq(adap, &qs->rspq, 1);
2522 qs->rspq.credits--;
2523 qs->rspq.restarted++;
2524 t3_write_reg(adap, A_SG_RSPQ_FL_STATUS,
2525 1 << qs->rspq.cntxt_id);
2526 }
2527 }
2528 }
2529 spin_unlock_irq(lock);
2530 }
2531 mod_timer(&qs->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
2532 }
2533
2534 /**
2535 * t3_update_qset_coalesce - update coalescing settings for a queue set
2536 * @qs: the SGE queue set
2537 * @p: new queue set parameters
2538 *
2539 * Update the coalescing settings for an SGE queue set. Nothing is done
2540 * if the queue set is not initialized yet.
2541 */
2542 void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
2543 {
2544 if (!qs->netdev)
2545 return;
2546
2547 qs->rspq.holdoff_tmr = max(p->coalesce_usecs * 10, 1U);/* can't be 0 */
2548 qs->rspq.polling = p->polling;
2549 qs->netdev->poll = p->polling ? napi_rx_handler : ofld_poll;
2550 }
2551
2552 /**
2553 * t3_sge_alloc_qset - initialize an SGE queue set
2554 * @adapter: the adapter
2555 * @id: the queue set id
2556 * @nports: how many Ethernet ports will be using this queue set
2557 * @irq_vec_idx: the IRQ vector index for response queue interrupts
2558 * @p: configuration parameters for this queue set
2559 * @ntxq: number of Tx queues for the queue set
2560 * @netdev: net device associated with this queue set
2561 *
2562 * Allocate resources and initialize an SGE queue set. A queue set
2563 * comprises a response queue, two Rx free-buffer queues, and up to 3
2564 * Tx queues. The Tx queues are assigned roles in the order Ethernet
2565 * queue, offload queue, and control queue.
2566 */
2567 int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
2568 int irq_vec_idx, const struct qset_params *p,
2569 int ntxq, struct net_device *netdev)
2570 {
2571 int i, ret = -ENOMEM;
2572 struct sge_qset *q = &adapter->sge.qs[id];
2573
2574 init_qset_cntxt(q, id);
2575 init_timer(&q->tx_reclaim_timer);
2576 q->tx_reclaim_timer.data = (unsigned long)q;
2577 q->tx_reclaim_timer.function = sge_timer_cb;
2578
2579 q->fl[0].desc = alloc_ring(adapter->pdev, p->fl_size,
2580 sizeof(struct rx_desc),
2581 sizeof(struct rx_sw_desc),
2582 &q->fl[0].phys_addr, &q->fl[0].sdesc);
2583 if (!q->fl[0].desc)
2584 goto err;
2585
2586 q->fl[1].desc = alloc_ring(adapter->pdev, p->jumbo_size,
2587 sizeof(struct rx_desc),
2588 sizeof(struct rx_sw_desc),
2589 &q->fl[1].phys_addr, &q->fl[1].sdesc);
2590 if (!q->fl[1].desc)
2591 goto err;
2592
2593 q->rspq.desc = alloc_ring(adapter->pdev, p->rspq_size,
2594 sizeof(struct rsp_desc), 0,
2595 &q->rspq.phys_addr, NULL);
2596 if (!q->rspq.desc)
2597 goto err;
2598
2599 for (i = 0; i < ntxq; ++i) {
2600 /*
2601 * The control queue always uses immediate data so does not
2602 * need to keep track of any sk_buffs.
2603 */
2604 size_t sz = i == TXQ_CTRL ? 0 : sizeof(struct tx_sw_desc);
2605
2606 q->txq[i].desc = alloc_ring(adapter->pdev, p->txq_size[i],
2607 sizeof(struct tx_desc), sz,
2608 &q->txq[i].phys_addr,
2609 &q->txq[i].sdesc);
2610 if (!q->txq[i].desc)
2611 goto err;
2612
2613 q->txq[i].gen = 1;
2614 q->txq[i].size = p->txq_size[i];
2615 spin_lock_init(&q->txq[i].lock);
2616 skb_queue_head_init(&q->txq[i].sendq);
2617 }
2618
2619 tasklet_init(&q->txq[TXQ_OFLD].qresume_tsk, restart_offloadq,
2620 (unsigned long)q);
2621 tasklet_init(&q->txq[TXQ_CTRL].qresume_tsk, restart_ctrlq,
2622 (unsigned long)q);
2623
2624 q->fl[0].gen = q->fl[1].gen = 1;
2625 q->fl[0].size = p->fl_size;
2626 q->fl[1].size = p->jumbo_size;
2627
2628 q->rspq.gen = 1;
2629 q->rspq.size = p->rspq_size;
2630 spin_lock_init(&q->rspq.lock);
2631
2632 q->txq[TXQ_ETH].stop_thres = nports *
2633 flits_to_desc(sgl_len(MAX_SKB_FRAGS + 1) + 3);
2634
2635 if (!is_offload(adapter)) {
2636 #ifdef USE_RX_PAGE
2637 q->fl[0].buf_size = RX_PAGE_SIZE;
2638 #else
2639 q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + 2 +
2640 sizeof(struct cpl_rx_pkt);
2641 #endif
2642 q->fl[1].buf_size = MAX_FRAME_SIZE + 2 +
2643 sizeof(struct cpl_rx_pkt);
2644 } else {
2645 #ifdef USE_RX_PAGE
2646 q->fl[0].buf_size = RX_PAGE_SIZE;
2647 #else
2648 q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE +
2649 sizeof(struct cpl_rx_data);
2650 #endif
2651 q->fl[1].buf_size = (16 * 1024) -
2652 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2653 }
2654
2655 spin_lock(&adapter->sge.reg_lock);
2656
2657 /* FL threshold comparison uses < */
2658 ret = t3_sge_init_rspcntxt(adapter, q->rspq.cntxt_id, irq_vec_idx,
2659 q->rspq.phys_addr, q->rspq.size,
2660 q->fl[0].buf_size, 1, 0);
2661 if (ret)
2662 goto err_unlock;
2663
2664 for (i = 0; i < SGE_RXQ_PER_SET; ++i) {
2665 ret = t3_sge_init_flcntxt(adapter, q->fl[i].cntxt_id, 0,
2666 q->fl[i].phys_addr, q->fl[i].size,
2667 q->fl[i].buf_size, p->cong_thres, 1,
2668 0);
2669 if (ret)
2670 goto err_unlock;
2671 }
2672
2673 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_ETH].cntxt_id, USE_GTS,
2674 SGE_CNTXT_ETH, id, q->txq[TXQ_ETH].phys_addr,
2675 q->txq[TXQ_ETH].size, q->txq[TXQ_ETH].token,
2676 1, 0);
2677 if (ret)
2678 goto err_unlock;
2679
2680 if (ntxq > 1) {
2681 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_OFLD].cntxt_id,
2682 USE_GTS, SGE_CNTXT_OFLD, id,
2683 q->txq[TXQ_OFLD].phys_addr,
2684 q->txq[TXQ_OFLD].size, 0, 1, 0);
2685 if (ret)
2686 goto err_unlock;
2687 }
2688
2689 if (ntxq > 2) {
2690 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_CTRL].cntxt_id, 0,
2691 SGE_CNTXT_CTRL, id,
2692 q->txq[TXQ_CTRL].phys_addr,
2693 q->txq[TXQ_CTRL].size,
2694 q->txq[TXQ_CTRL].token, 1, 0);
2695 if (ret)
2696 goto err_unlock;
2697 }
2698
2699 spin_unlock(&adapter->sge.reg_lock);
2700 q->netdev = netdev;
2701 t3_update_qset_coalesce(q, p);
2702
2703 /*
2704 * We use atalk_ptr as a backpointer to a qset. In case a device is
2705 * associated with multiple queue sets only the first one sets
2706 * atalk_ptr.
2707 */
2708 if (netdev->atalk_ptr == NULL)
2709 netdev->atalk_ptr = q;
2710
2711 refill_fl(adapter, &q->fl[0], q->fl[0].size, GFP_KERNEL);
2712 refill_fl(adapter, &q->fl[1], q->fl[1].size, GFP_KERNEL);
2713 refill_rspq(adapter, &q->rspq, q->rspq.size - 1);
2714
2715 t3_write_reg(adapter, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) |
2716 V_NEWTIMER(q->rspq.holdoff_tmr));
2717
2718 mod_timer(&q->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
2719 return 0;
2720
2721 err_unlock:
2722 spin_unlock(&adapter->sge.reg_lock);
2723 err:
2724 t3_free_qset(adapter, q);
2725 return ret;
2726 }
2727
2728 /**
2729 * t3_free_sge_resources - free SGE resources
2730 * @adap: the adapter
2731 *
2732 * Frees resources used by the SGE queue sets.
2733 */
2734 void t3_free_sge_resources(struct adapter *adap)
2735 {
2736 int i;
2737
2738 for (i = 0; i < SGE_QSETS; ++i)
2739 t3_free_qset(adap, &adap->sge.qs[i]);
2740 }
2741
2742 /**
2743 * t3_sge_start - enable SGE
2744 * @adap: the adapter
2745 *
2746 * Enables the SGE for DMAs. This is the last step in starting packet
2747 * transfers.
2748 */
2749 void t3_sge_start(struct adapter *adap)
2750 {
2751 t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, F_GLOBALENABLE);
2752 }
2753
2754 /**
2755 * t3_sge_stop - disable SGE operation
2756 * @adap: the adapter
2757 *
2758 * Disables the DMA engine. This can be called in emeregencies (e.g.,
2759 * from error interrupts) or from normal process context. In the latter
2760 * case it also disables any pending queue restart tasklets. Note that
2761 * if it is called in interrupt context it cannot disable the restart
2762 * tasklets as it cannot wait, however the tasklets will have no effect
2763 * since the doorbells are disabled and the driver will call this again
2764 * later from process context, at which time the tasklets will be stopped
2765 * if they are still running.
2766 */
2767 void t3_sge_stop(struct adapter *adap)
2768 {
2769 t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, 0);
2770 if (!in_interrupt()) {
2771 int i;
2772
2773 for (i = 0; i < SGE_QSETS; ++i) {
2774 struct sge_qset *qs = &adap->sge.qs[i];
2775
2776 tasklet_kill(&qs->txq[TXQ_OFLD].qresume_tsk);
2777 tasklet_kill(&qs->txq[TXQ_CTRL].qresume_tsk);
2778 }
2779 }
2780 }
2781
2782 /**
2783 * t3_sge_init - initialize SGE
2784 * @adap: the adapter
2785 * @p: the SGE parameters
2786 *
2787 * Performs SGE initialization needed every time after a chip reset.
2788 * We do not initialize any of the queue sets here, instead the driver
2789 * top-level must request those individually. We also do not enable DMA
2790 * here, that should be done after the queues have been set up.
2791 */
2792 void t3_sge_init(struct adapter *adap, struct sge_params *p)
2793 {
2794 unsigned int ctrl, ups = ffs(pci_resource_len(adap->pdev, 2) >> 12);
2795
2796 ctrl = F_DROPPKT | V_PKTSHIFT(2) | F_FLMODE | F_AVOIDCQOVFL |
2797 F_CQCRDTCTRL |
2798 V_HOSTPAGESIZE(PAGE_SHIFT - 11) | F_BIGENDIANINGRESS |
2799 V_USERSPACESIZE(ups ? ups - 1 : 0) | F_ISCSICOALESCING;
2800 #if SGE_NUM_GENBITS == 1
2801 ctrl |= F_EGRGENCTRL;
2802 #endif
2803 if (adap->params.rev > 0) {
2804 if (!(adap->flags & (USING_MSIX | USING_MSI)))
2805 ctrl |= F_ONEINTMULTQ | F_OPTONEINTMULTQ;
2806 ctrl |= F_CQCRDTCTRL | F_AVOIDCQOVFL;
2807 }
2808 t3_write_reg(adap, A_SG_CONTROL, ctrl);
2809 t3_write_reg(adap, A_SG_EGR_RCQ_DRB_THRSH, V_HIRCQDRBTHRSH(512) |
2810 V_LORCQDRBTHRSH(512));
2811 t3_write_reg(adap, A_SG_TIMER_TICK, core_ticks_per_usec(adap) / 10);
2812 t3_write_reg(adap, A_SG_CMDQ_CREDIT_TH, V_THRESHOLD(32) |
2813 V_TIMEOUT(200 * core_ticks_per_usec(adap)));
2814 t3_write_reg(adap, A_SG_HI_DRB_HI_THRSH, 1000);
2815 t3_write_reg(adap, A_SG_HI_DRB_LO_THRSH, 256);
2816 t3_write_reg(adap, A_SG_LO_DRB_HI_THRSH, 1000);
2817 t3_write_reg(adap, A_SG_LO_DRB_LO_THRSH, 256);
2818 t3_write_reg(adap, A_SG_OCO_BASE, V_BASE1(0xfff));
2819 t3_write_reg(adap, A_SG_DRB_PRI_THRESH, 63 * 1024);
2820 }
2821
2822 /**
2823 * t3_sge_prep - one-time SGE initialization
2824 * @adap: the associated adapter
2825 * @p: SGE parameters
2826 *
2827 * Performs one-time initialization of SGE SW state. Includes determining
2828 * defaults for the assorted SGE parameters, which admins can change until
2829 * they are used to initialize the SGE.
2830 */
2831 void __devinit t3_sge_prep(struct adapter *adap, struct sge_params *p)
2832 {
2833 int i;
2834
2835 p->max_pkt_size = (16 * 1024) - sizeof(struct cpl_rx_data) -
2836 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2837
2838 for (i = 0; i < SGE_QSETS; ++i) {
2839 struct qset_params *q = p->qset + i;
2840
2841 q->polling = adap->params.rev > 0;
2842 q->coalesce_usecs = 5;
2843 q->rspq_size = 1024;
2844 q->fl_size = 1024;
2845 q->jumbo_size = 512;
2846 q->txq_size[TXQ_ETH] = 1024;
2847 q->txq_size[TXQ_OFLD] = 1024;
2848 q->txq_size[TXQ_CTRL] = 256;
2849 q->cong_thres = 0;
2850 }
2851
2852 spin_lock_init(&adap->sge.reg_lock);
2853 }
2854
2855 /**
2856 * t3_get_desc - dump an SGE descriptor for debugging purposes
2857 * @qs: the queue set
2858 * @qnum: identifies the specific queue (0..2: Tx, 3:response, 4..5: Rx)
2859 * @idx: the descriptor index in the queue
2860 * @data: where to dump the descriptor contents
2861 *
2862 * Dumps the contents of a HW descriptor of an SGE queue. Returns the
2863 * size of the descriptor.
2864 */
2865 int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
2866 unsigned char *data)
2867 {
2868 if (qnum >= 6)
2869 return -EINVAL;
2870
2871 if (qnum < 3) {
2872 if (!qs->txq[qnum].desc || idx >= qs->txq[qnum].size)
2873 return -EINVAL;
2874 memcpy(data, &qs->txq[qnum].desc[idx], sizeof(struct tx_desc));
2875 return sizeof(struct tx_desc);
2876 }
2877
2878 if (qnum == 3) {
2879 if (!qs->rspq.desc || idx >= qs->rspq.size)
2880 return -EINVAL;
2881 memcpy(data, &qs->rspq.desc[idx], sizeof(struct rsp_desc));
2882 return sizeof(struct rsp_desc);
2883 }
2884
2885 qnum -= 4;
2886 if (!qs->fl[qnum].desc || idx >= qs->fl[qnum].size)
2887 return -EINVAL;
2888 memcpy(data, &qs->fl[qnum].desc[idx], sizeof(struct rx_desc));
2889 return sizeof(struct rx_desc);
2890 }