Merge branches 'acpi_pad', 'acpica', 'apei-bugzilla-43282', 'battery', 'cpuidle-coupl...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / net / ethernet / broadcom / bnx2x / bnx2x_cmn.c
CommitLineData
9f6c9258
DK
1/* bnx2x_cmn.c: Broadcom Everest network driver.
2 *
85b26ea1 3 * Copyright (c) 2007-2012 Broadcom Corporation
9f6c9258
DK
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
15 *
16 */
17
f1deab50
JP
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
9f6c9258 20#include <linux/etherdevice.h>
9bcc0893 21#include <linux/if_vlan.h>
a6b7a407 22#include <linux/interrupt.h>
9f6c9258 23#include <linux/ip.h>
f2e0899f 24#include <net/ipv6.h>
7f3e01fe 25#include <net/ip6_checksum.h>
c0cba59e 26#include <linux/prefetch.h>
9f6c9258 27#include "bnx2x_cmn.h"
523224a3 28#include "bnx2x_init.h"
042181f5 29#include "bnx2x_sp.h"
523224a3 30
619c5cb6 31
9f6c9258 32
b3b83c3f
DK
33/**
34 * bnx2x_move_fp - move content of the fastpath structure.
35 *
36 * @bp: driver handle
37 * @from: source FP index
38 * @to: destination FP index
39 *
40 * Makes sure the contents of the bp->fp[to].napi is kept
72754080
AE
41 * intact. This is done by first copying the napi struct from
42 * the target to the source, and then mem copying the entire
43 * source onto the target
b3b83c3f
DK
44 */
45static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
46{
47 struct bnx2x_fastpath *from_fp = &bp->fp[from];
48 struct bnx2x_fastpath *to_fp = &bp->fp[to];
72754080
AE
49
50 /* Copy the NAPI object as it has been already initialized */
51 from_fp->napi = to_fp->napi;
52
b3b83c3f
DK
53 /* Move bnx2x_fastpath contents */
54 memcpy(to_fp, from_fp, sizeof(*to_fp));
55 to_fp->index = to;
b3b83c3f
DK
56}
57
619c5cb6
VZ
58int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
59
9f6c9258
DK
60/* free skb in the packet ring at pos idx
61 * return idx of last bd freed
62 */
6383c0b3 63static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
2df1a70a
TH
64 u16 idx, unsigned int *pkts_compl,
65 unsigned int *bytes_compl)
9f6c9258 66{
6383c0b3 67 struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
9f6c9258
DK
68 struct eth_tx_start_bd *tx_start_bd;
69 struct eth_tx_bd *tx_data_bd;
70 struct sk_buff *skb = tx_buf->skb;
71 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
72 int nbd;
73
74 /* prefetch skb end pointer to speedup dev_kfree_skb() */
75 prefetch(&skb->end);
76
51c1a580 77 DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
6383c0b3 78 txdata->txq_index, idx, tx_buf, skb);
9f6c9258
DK
79
80 /* unmap first bd */
6383c0b3 81 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
9f6c9258 82 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
4bca60f4 83 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
9f6c9258 84
619c5cb6 85
9f6c9258
DK
86 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
87#ifdef BNX2X_STOP_ON_ERROR
88 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
89 BNX2X_ERR("BAD nbd!\n");
90 bnx2x_panic();
91 }
92#endif
93 new_cons = nbd + tx_buf->first_bd;
94
95 /* Get the next bd */
96 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
97
98 /* Skip a parse bd... */
99 --nbd;
100 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
101
102 /* ...and the TSO split header bd since they have no mapping */
103 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
104 --nbd;
105 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
106 }
107
108 /* now free frags */
109 while (nbd > 0) {
110
6383c0b3 111 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
9f6c9258
DK
112 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
113 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
114 if (--nbd)
115 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
116 }
117
118 /* release skb */
119 WARN_ON(!skb);
d8290ae5 120 if (likely(skb)) {
2df1a70a
TH
121 (*pkts_compl)++;
122 (*bytes_compl) += skb->len;
123 }
d8290ae5 124
40955532 125 dev_kfree_skb_any(skb);
9f6c9258
DK
126 tx_buf->first_bd = 0;
127 tx_buf->skb = NULL;
128
129 return new_cons;
130}
131
6383c0b3 132int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
9f6c9258 133{
9f6c9258 134 struct netdev_queue *txq;
6383c0b3 135 u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
2df1a70a 136 unsigned int pkts_compl = 0, bytes_compl = 0;
9f6c9258
DK
137
138#ifdef BNX2X_STOP_ON_ERROR
139 if (unlikely(bp->panic))
140 return -1;
141#endif
142
6383c0b3
AE
143 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
144 hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
145 sw_cons = txdata->tx_pkt_cons;
9f6c9258
DK
146
147 while (sw_cons != hw_cons) {
148 u16 pkt_cons;
149
150 pkt_cons = TX_BD(sw_cons);
151
51c1a580
MS
152 DP(NETIF_MSG_TX_DONE,
153 "queue[%d]: hw_cons %u sw_cons %u pkt_cons %u\n",
6383c0b3 154 txdata->txq_index, hw_cons, sw_cons, pkt_cons);
9f6c9258 155
2df1a70a
TH
156 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
157 &pkts_compl, &bytes_compl);
158
9f6c9258
DK
159 sw_cons++;
160 }
161
2df1a70a
TH
162 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
163
6383c0b3
AE
164 txdata->tx_pkt_cons = sw_cons;
165 txdata->tx_bd_cons = bd_cons;
9f6c9258
DK
166
167 /* Need to make the tx_bd_cons update visible to start_xmit()
168 * before checking for netif_tx_queue_stopped(). Without the
169 * memory barrier, there is a small possibility that
170 * start_xmit() will miss it and cause the queue to be stopped
171 * forever.
619c5cb6
VZ
172 * On the other hand we need an rmb() here to ensure the proper
173 * ordering of bit testing in the following
174 * netif_tx_queue_stopped(txq) call.
9f6c9258
DK
175 */
176 smp_mb();
177
9f6c9258
DK
178 if (unlikely(netif_tx_queue_stopped(txq))) {
179 /* Taking tx_lock() is needed to prevent reenabling the queue
180 * while it's empty. This could have happen if rx_action() gets
181 * suspended in bnx2x_tx_int() after the condition before
182 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
183 *
184 * stops the queue->sees fresh tx_bd_cons->releases the queue->
185 * sends some packets consuming the whole queue again->
186 * stops the queue
187 */
188
189 __netif_tx_lock(txq, smp_processor_id());
190
191 if ((netif_tx_queue_stopped(txq)) &&
192 (bp->state == BNX2X_STATE_OPEN) &&
bc14786a 193 (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 4))
9f6c9258
DK
194 netif_tx_wake_queue(txq);
195
196 __netif_tx_unlock(txq);
197 }
198 return 0;
199}
200
201static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
202 u16 idx)
203{
204 u16 last_max = fp->last_max_sge;
205
206 if (SUB_S16(idx, last_max) > 0)
207 fp->last_max_sge = idx;
208}
209
621b4d66
DK
210static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
211 u16 sge_len,
212 struct eth_end_agg_rx_cqe *cqe)
9f6c9258
DK
213{
214 struct bnx2x *bp = fp->bp;
9f6c9258
DK
215 u16 last_max, last_elem, first_elem;
216 u16 delta = 0;
217 u16 i;
218
219 if (!sge_len)
220 return;
221
222 /* First mark all used pages */
223 for (i = 0; i < sge_len; i++)
619c5cb6 224 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
621b4d66 225 RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
9f6c9258
DK
226
227 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
621b4d66 228 sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
9f6c9258
DK
229
230 /* Here we assume that the last SGE index is the biggest */
231 prefetch((void *)(fp->sge_mask));
523224a3 232 bnx2x_update_last_max_sge(fp,
621b4d66 233 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
9f6c9258
DK
234
235 last_max = RX_SGE(fp->last_max_sge);
619c5cb6
VZ
236 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
237 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
9f6c9258
DK
238
239 /* If ring is not full */
240 if (last_elem + 1 != first_elem)
241 last_elem++;
242
243 /* Now update the prod */
244 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
245 if (likely(fp->sge_mask[i]))
246 break;
247
619c5cb6
VZ
248 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
249 delta += BIT_VEC64_ELEM_SZ;
9f6c9258
DK
250 }
251
252 if (delta > 0) {
253 fp->rx_sge_prod += delta;
254 /* clear page-end entries */
255 bnx2x_clear_sge_mask_next_elems(fp);
256 }
257
258 DP(NETIF_MSG_RX_STATUS,
259 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
260 fp->last_max_sge, fp->rx_sge_prod);
261}
262
e52fcb24
ED
263/* Set Toeplitz hash value in the skb using the value from the
264 * CQE (calculated by HW).
265 */
266static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
267 const struct eth_fast_path_rx_cqe *cqe)
268{
269 /* Set Toeplitz hash from CQE */
270 if ((bp->dev->features & NETIF_F_RXHASH) &&
271 (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
272 return le32_to_cpu(cqe->rss_hash_result);
273 return 0;
274}
275
9f6c9258 276static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
e52fcb24 277 u16 cons, u16 prod,
619c5cb6 278 struct eth_fast_path_rx_cqe *cqe)
9f6c9258
DK
279{
280 struct bnx2x *bp = fp->bp;
281 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
282 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
283 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
284 dma_addr_t mapping;
619c5cb6
VZ
285 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
286 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
9f6c9258 287
619c5cb6
VZ
288 /* print error if current state != stop */
289 if (tpa_info->tpa_state != BNX2X_TPA_STOP)
9f6c9258
DK
290 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
291
e52fcb24 292 /* Try to map an empty data buffer from the aggregation info */
619c5cb6 293 mapping = dma_map_single(&bp->pdev->dev,
e52fcb24 294 first_buf->data + NET_SKB_PAD,
619c5cb6
VZ
295 fp->rx_buf_size, DMA_FROM_DEVICE);
296 /*
297 * ...if it fails - move the skb from the consumer to the producer
298 * and set the current aggregation state as ERROR to drop it
299 * when TPA_STOP arrives.
300 */
301
302 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
303 /* Move the BD from the consumer to the producer */
e52fcb24 304 bnx2x_reuse_rx_data(fp, cons, prod);
619c5cb6
VZ
305 tpa_info->tpa_state = BNX2X_TPA_ERROR;
306 return;
307 }
9f6c9258 308
e52fcb24
ED
309 /* move empty data from pool to prod */
310 prod_rx_buf->data = first_buf->data;
619c5cb6 311 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
e52fcb24 312 /* point prod_bd to new data */
9f6c9258
DK
313 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
314 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
315
619c5cb6
VZ
316 /* move partial skb from cons to pool (don't unmap yet) */
317 *first_buf = *cons_rx_buf;
318
319 /* mark bin state as START */
320 tpa_info->parsing_flags =
321 le16_to_cpu(cqe->pars_flags.flags);
322 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
323 tpa_info->tpa_state = BNX2X_TPA_START;
324 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
325 tpa_info->placement_offset = cqe->placement_offset;
e52fcb24 326 tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe);
621b4d66
DK
327 if (fp->mode == TPA_MODE_GRO) {
328 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
329 tpa_info->full_page =
330 SGE_PAGE_SIZE * PAGES_PER_SGE / gro_size * gro_size;
331 tpa_info->gro_size = gro_size;
332 }
619c5cb6 333
9f6c9258
DK
334#ifdef BNX2X_STOP_ON_ERROR
335 fp->tpa_queue_used |= (1 << queue);
336#ifdef _ASM_GENERIC_INT_L64_H
337 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
338#else
339 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
340#endif
341 fp->tpa_queue_used);
342#endif
343}
344
e4e3c02a
VZ
345/* Timestamp option length allowed for TPA aggregation:
346 *
347 * nop nop kind length echo val
348 */
349#define TPA_TSTAMP_OPT_LEN 12
350/**
e8920674 351 * bnx2x_set_lro_mss - calculate the approximate value of the MSS
e4e3c02a 352 *
e8920674
DK
353 * @bp: driver handle
354 * @parsing_flags: parsing flags from the START CQE
355 * @len_on_bd: total length of the first packet for the
356 * aggregation.
357 *
358 * Approximate value of the MSS for this aggregation calculated using
359 * the first packet of it.
e4e3c02a 360 */
1191cb83
ED
361static u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
362 u16 len_on_bd)
e4e3c02a 363{
619c5cb6
VZ
364 /*
365 * TPA arrgregation won't have either IP options or TCP options
366 * other than timestamp or IPv6 extension headers.
e4e3c02a 367 */
619c5cb6
VZ
368 u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
369
370 if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
371 PRS_FLAG_OVERETH_IPV6)
372 hdrs_len += sizeof(struct ipv6hdr);
373 else /* IPv4 */
374 hdrs_len += sizeof(struct iphdr);
e4e3c02a
VZ
375
376
377 /* Check if there was a TCP timestamp, if there is it's will
378 * always be 12 bytes length: nop nop kind length echo val.
379 *
380 * Otherwise FW would close the aggregation.
381 */
382 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
383 hdrs_len += TPA_TSTAMP_OPT_LEN;
384
385 return len_on_bd - hdrs_len;
386}
387
1191cb83
ED
388static int bnx2x_alloc_rx_sge(struct bnx2x *bp,
389 struct bnx2x_fastpath *fp, u16 index)
390{
391 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
392 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
393 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
394 dma_addr_t mapping;
395
396 if (unlikely(page == NULL)) {
397 BNX2X_ERR("Can't alloc sge\n");
398 return -ENOMEM;
399 }
400
401 mapping = dma_map_page(&bp->pdev->dev, page, 0,
402 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
403 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
404 __free_pages(page, PAGES_PER_SGE_SHIFT);
405 BNX2X_ERR("Can't map sge\n");
406 return -ENOMEM;
407 }
408
409 sw_buf->page = page;
410 dma_unmap_addr_set(sw_buf, mapping, mapping);
411
412 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
413 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
414
415 return 0;
416}
417
9f6c9258 418static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
621b4d66
DK
419 struct bnx2x_agg_info *tpa_info,
420 u16 pages,
421 struct sk_buff *skb,
619c5cb6
VZ
422 struct eth_end_agg_rx_cqe *cqe,
423 u16 cqe_idx)
9f6c9258
DK
424{
425 struct sw_rx_page *rx_pg, old_rx_pg;
621b4d66
DK
426 u32 i, frag_len, frag_size;
427 int err, j, frag_id = 0;
619c5cb6 428 u16 len_on_bd = tpa_info->len_on_bd;
621b4d66 429 u16 full_page = 0, gro_size = 0;
9f6c9258 430
619c5cb6 431 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
621b4d66
DK
432
433 if (fp->mode == TPA_MODE_GRO) {
434 gro_size = tpa_info->gro_size;
435 full_page = tpa_info->full_page;
436 }
9f6c9258
DK
437
438 /* This is needed in order to enable forwarding support */
621b4d66 439 if (frag_size) {
619c5cb6
VZ
440 skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp,
441 tpa_info->parsing_flags, len_on_bd);
9f6c9258 442
621b4d66
DK
443 /* set for GRO */
444 if (fp->mode == TPA_MODE_GRO)
445 skb_shinfo(skb)->gso_type =
446 (GET_FLAG(tpa_info->parsing_flags,
447 PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
448 PRS_FLAG_OVERETH_IPV6) ?
449 SKB_GSO_TCPV6 : SKB_GSO_TCPV4;
450 }
451
452
9f6c9258
DK
453#ifdef BNX2X_STOP_ON_ERROR
454 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
455 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
456 pages, cqe_idx);
619c5cb6 457 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
9f6c9258
DK
458 bnx2x_panic();
459 return -EINVAL;
460 }
461#endif
462
463 /* Run through the SGL and compose the fragmented skb */
464 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
619c5cb6 465 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
9f6c9258
DK
466
467 /* FW gives the indices of the SGE as if the ring is an array
468 (meaning that "next" element will consume 2 indices) */
621b4d66
DK
469 if (fp->mode == TPA_MODE_GRO)
470 frag_len = min_t(u32, frag_size, (u32)full_page);
471 else /* LRO */
472 frag_len = min_t(u32, frag_size,
473 (u32)(SGE_PAGE_SIZE * PAGES_PER_SGE));
474
9f6c9258
DK
475 rx_pg = &fp->rx_page_ring[sge_idx];
476 old_rx_pg = *rx_pg;
477
478 /* If we fail to allocate a substitute page, we simply stop
479 where we are and drop the whole packet */
480 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
481 if (unlikely(err)) {
482 fp->eth_q_stats.rx_skb_alloc_failed++;
483 return err;
484 }
485
486 /* Unmap the page as we r going to pass it to the stack */
487 dma_unmap_page(&bp->pdev->dev,
488 dma_unmap_addr(&old_rx_pg, mapping),
489 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
9f6c9258 490 /* Add one frag and update the appropriate fields in the skb */
621b4d66
DK
491 if (fp->mode == TPA_MODE_LRO)
492 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
493 else { /* GRO */
494 int rem;
495 int offset = 0;
496 for (rem = frag_len; rem > 0; rem -= gro_size) {
497 int len = rem > gro_size ? gro_size : rem;
498 skb_fill_page_desc(skb, frag_id++,
499 old_rx_pg.page, offset, len);
500 if (offset)
501 get_page(old_rx_pg.page);
502 offset += len;
503 }
504 }
9f6c9258
DK
505
506 skb->data_len += frag_len;
e1ac50f6 507 skb->truesize += SGE_PAGE_SIZE * PAGES_PER_SGE;
9f6c9258
DK
508 skb->len += frag_len;
509
510 frag_size -= frag_len;
511 }
512
513 return 0;
514}
515
1191cb83
ED
516static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
517 struct bnx2x_agg_info *tpa_info,
518 u16 pages,
519 struct eth_end_agg_rx_cqe *cqe,
520 u16 cqe_idx)
9f6c9258 521{
619c5cb6 522 struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
621b4d66 523 u8 pad = tpa_info->placement_offset;
619c5cb6 524 u16 len = tpa_info->len_on_bd;
e52fcb24 525 struct sk_buff *skb = NULL;
621b4d66 526 u8 *new_data, *data = rx_buf->data;
619c5cb6
VZ
527 u8 old_tpa_state = tpa_info->tpa_state;
528
529 tpa_info->tpa_state = BNX2X_TPA_STOP;
530
531 /* If we there was an error during the handling of the TPA_START -
532 * drop this aggregation.
533 */
534 if (old_tpa_state == BNX2X_TPA_ERROR)
535 goto drop;
536
e52fcb24
ED
537 /* Try to allocate the new data */
538 new_data = kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
9f6c9258
DK
539
540 /* Unmap skb in the pool anyway, as we are going to change
541 pool entry status to BNX2X_TPA_STOP even if new skb allocation
542 fails. */
543 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
a8c94b91 544 fp->rx_buf_size, DMA_FROM_DEVICE);
e52fcb24 545 if (likely(new_data))
d3836f21 546 skb = build_skb(data, 0);
9f6c9258 547
e52fcb24 548 if (likely(skb)) {
9f6c9258 549#ifdef BNX2X_STOP_ON_ERROR
a8c94b91 550 if (pad + len > fp->rx_buf_size) {
51c1a580 551 BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n",
a8c94b91 552 pad, len, fp->rx_buf_size);
9f6c9258
DK
553 bnx2x_panic();
554 return;
555 }
556#endif
557
e52fcb24 558 skb_reserve(skb, pad + NET_SKB_PAD);
9f6c9258 559 skb_put(skb, len);
e52fcb24 560 skb->rxhash = tpa_info->rxhash;
9f6c9258
DK
561
562 skb->protocol = eth_type_trans(skb, bp->dev);
563 skb->ip_summed = CHECKSUM_UNNECESSARY;
564
621b4d66
DK
565 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
566 skb, cqe, cqe_idx)) {
619c5cb6
VZ
567 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
568 __vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag);
9bcc0893 569 napi_gro_receive(&fp->napi, skb);
9f6c9258 570 } else {
51c1a580
MS
571 DP(NETIF_MSG_RX_STATUS,
572 "Failed to allocate new pages - dropping packet!\n");
40955532 573 dev_kfree_skb_any(skb);
9f6c9258
DK
574 }
575
576
e52fcb24
ED
577 /* put new data in bin */
578 rx_buf->data = new_data;
9f6c9258 579
619c5cb6 580 return;
9f6c9258 581 }
3f61cd87 582 kfree(new_data);
619c5cb6
VZ
583drop:
584 /* drop the packet and keep the buffer in the bin */
585 DP(NETIF_MSG_RX_STATUS,
586 "Failed to allocate or map a new skb - dropping packet!\n");
587 fp->eth_q_stats.rx_skb_alloc_failed++;
9f6c9258
DK
588}
589
1191cb83
ED
590static int bnx2x_alloc_rx_data(struct bnx2x *bp,
591 struct bnx2x_fastpath *fp, u16 index)
592{
593 u8 *data;
594 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
595 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
596 dma_addr_t mapping;
597
598 data = kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
599 if (unlikely(data == NULL))
600 return -ENOMEM;
601
602 mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
603 fp->rx_buf_size,
604 DMA_FROM_DEVICE);
605 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
606 kfree(data);
607 BNX2X_ERR("Can't map rx data\n");
608 return -ENOMEM;
609 }
610
611 rx_buf->data = data;
612 dma_unmap_addr_set(rx_buf, mapping, mapping);
613
614 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
615 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
616
617 return 0;
618}
619
d6cb3e41
ED
620static void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
621 struct bnx2x_fastpath *fp)
622{
623 /* Do nothing if no IP/L4 csum validation was done */
624
625 if (cqe->fast_path_cqe.status_flags &
626 (ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG |
627 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG))
628 return;
629
630 /* If both IP/L4 validation were done, check if an error was found. */
631
632 if (cqe->fast_path_cqe.type_error_flags &
633 (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
634 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
635 fp->eth_q_stats.hw_csum_err++;
636 else
637 skb->ip_summed = CHECKSUM_UNNECESSARY;
638}
9f6c9258
DK
639
640int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
641{
642 struct bnx2x *bp = fp->bp;
643 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
644 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
645 int rx_pkt = 0;
646
647#ifdef BNX2X_STOP_ON_ERROR
648 if (unlikely(bp->panic))
649 return 0;
650#endif
651
652 /* CQ "next element" is of the size of the regular element,
653 that's why it's ok here */
654 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
655 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
656 hw_comp_cons++;
657
658 bd_cons = fp->rx_bd_cons;
659 bd_prod = fp->rx_bd_prod;
660 bd_prod_fw = bd_prod;
661 sw_comp_cons = fp->rx_comp_cons;
662 sw_comp_prod = fp->rx_comp_prod;
663
664 /* Memory barrier necessary as speculative reads of the rx
665 * buffer can be ahead of the index in the status block
666 */
667 rmb();
668
669 DP(NETIF_MSG_RX_STATUS,
670 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
671 fp->index, hw_comp_cons, sw_comp_cons);
672
673 while (sw_comp_cons != hw_comp_cons) {
674 struct sw_rx_bd *rx_buf = NULL;
675 struct sk_buff *skb;
676 union eth_rx_cqe *cqe;
619c5cb6 677 struct eth_fast_path_rx_cqe *cqe_fp;
9f6c9258 678 u8 cqe_fp_flags;
619c5cb6 679 enum eth_rx_cqe_type cqe_fp_type;
621b4d66 680 u16 len, pad, queue;
e52fcb24 681 u8 *data;
9f6c9258 682
619c5cb6
VZ
683#ifdef BNX2X_STOP_ON_ERROR
684 if (unlikely(bp->panic))
685 return 0;
686#endif
687
9f6c9258
DK
688 comp_ring_cons = RCQ_BD(sw_comp_cons);
689 bd_prod = RX_BD(bd_prod);
690 bd_cons = RX_BD(bd_cons);
691
9f6c9258 692 cqe = &fp->rx_comp_ring[comp_ring_cons];
619c5cb6
VZ
693 cqe_fp = &cqe->fast_path_cqe;
694 cqe_fp_flags = cqe_fp->type_error_flags;
695 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
9f6c9258 696
51c1a580
MS
697 DP(NETIF_MSG_RX_STATUS,
698 "CQE type %x err %x status %x queue %x vlan %x len %u\n",
699 CQE_TYPE(cqe_fp_flags),
619c5cb6
VZ
700 cqe_fp_flags, cqe_fp->status_flags,
701 le32_to_cpu(cqe_fp->rss_hash_result),
621b4d66
DK
702 le16_to_cpu(cqe_fp->vlan_tag),
703 le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
9f6c9258
DK
704
705 /* is this a slowpath msg? */
619c5cb6 706 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
9f6c9258
DK
707 bnx2x_sp_event(fp, cqe);
708 goto next_cqe;
e52fcb24 709 }
621b4d66 710
e52fcb24
ED
711 rx_buf = &fp->rx_buf_ring[bd_cons];
712 data = rx_buf->data;
9f6c9258 713
e52fcb24 714 if (!CQE_TYPE_FAST(cqe_fp_type)) {
621b4d66
DK
715 struct bnx2x_agg_info *tpa_info;
716 u16 frag_size, pages;
619c5cb6 717#ifdef BNX2X_STOP_ON_ERROR
e52fcb24
ED
718 /* sanity check */
719 if (fp->disable_tpa &&
720 (CQE_TYPE_START(cqe_fp_type) ||
721 CQE_TYPE_STOP(cqe_fp_type)))
51c1a580 722 BNX2X_ERR("START/STOP packet while disable_tpa type %x\n",
e52fcb24 723 CQE_TYPE(cqe_fp_type));
619c5cb6 724#endif
9f6c9258 725
e52fcb24
ED
726 if (CQE_TYPE_START(cqe_fp_type)) {
727 u16 queue = cqe_fp->queue_index;
728 DP(NETIF_MSG_RX_STATUS,
729 "calling tpa_start on queue %d\n",
730 queue);
9f6c9258 731
e52fcb24
ED
732 bnx2x_tpa_start(fp, queue,
733 bd_cons, bd_prod,
734 cqe_fp);
621b4d66 735
e52fcb24 736 goto next_rx;
e52fcb24 737
621b4d66
DK
738 }
739 queue = cqe->end_agg_cqe.queue_index;
740 tpa_info = &fp->tpa_info[queue];
741 DP(NETIF_MSG_RX_STATUS,
742 "calling tpa_stop on queue %d\n",
743 queue);
744
745 frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
746 tpa_info->len_on_bd;
747
748 if (fp->mode == TPA_MODE_GRO)
749 pages = (frag_size + tpa_info->full_page - 1) /
750 tpa_info->full_page;
751 else
752 pages = SGE_PAGE_ALIGN(frag_size) >>
753 SGE_PAGE_SHIFT;
754
755 bnx2x_tpa_stop(bp, fp, tpa_info, pages,
756 &cqe->end_agg_cqe, comp_ring_cons);
9f6c9258 757#ifdef BNX2X_STOP_ON_ERROR
621b4d66
DK
758 if (bp->panic)
759 return 0;
9f6c9258
DK
760#endif
761
621b4d66
DK
762 bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
763 goto next_cqe;
e52fcb24
ED
764 }
765 /* non TPA */
621b4d66 766 len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
e52fcb24
ED
767 pad = cqe_fp->placement_offset;
768 dma_sync_single_for_cpu(&bp->pdev->dev,
9f6c9258 769 dma_unmap_addr(rx_buf, mapping),
e52fcb24
ED
770 pad + RX_COPY_THRESH,
771 DMA_FROM_DEVICE);
772 pad += NET_SKB_PAD;
773 prefetch(data + pad); /* speedup eth_type_trans() */
774 /* is this an error packet? */
775 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
51c1a580 776 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
e52fcb24
ED
777 "ERROR flags %x rx packet %u\n",
778 cqe_fp_flags, sw_comp_cons);
779 fp->eth_q_stats.rx_err_discard_pkt++;
780 goto reuse_rx;
781 }
9f6c9258 782
e52fcb24
ED
783 /* Since we don't have a jumbo ring
784 * copy small packets if mtu > 1500
785 */
786 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
787 (len <= RX_COPY_THRESH)) {
788 skb = netdev_alloc_skb_ip_align(bp->dev, len);
789 if (skb == NULL) {
51c1a580 790 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
e52fcb24
ED
791 "ERROR packet dropped because of alloc failure\n");
792 fp->eth_q_stats.rx_skb_alloc_failed++;
9f6c9258
DK
793 goto reuse_rx;
794 }
e52fcb24
ED
795 memcpy(skb->data, data + pad, len);
796 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
797 } else {
798 if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod) == 0)) {
9f6c9258 799 dma_unmap_single(&bp->pdev->dev,
e52fcb24 800 dma_unmap_addr(rx_buf, mapping),
a8c94b91 801 fp->rx_buf_size,
9f6c9258 802 DMA_FROM_DEVICE);
d3836f21 803 skb = build_skb(data, 0);
e52fcb24
ED
804 if (unlikely(!skb)) {
805 kfree(data);
806 fp->eth_q_stats.rx_skb_alloc_failed++;
807 goto next_rx;
808 }
9f6c9258 809 skb_reserve(skb, pad);
9f6c9258 810 } else {
51c1a580
MS
811 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
812 "ERROR packet dropped because of alloc failure\n");
9f6c9258
DK
813 fp->eth_q_stats.rx_skb_alloc_failed++;
814reuse_rx:
e52fcb24 815 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
9f6c9258
DK
816 goto next_rx;
817 }
036d2df9 818 }
9f6c9258 819
036d2df9
DK
820 skb_put(skb, len);
821 skb->protocol = eth_type_trans(skb, bp->dev);
9f6c9258 822
036d2df9
DK
823 /* Set Toeplitz hash for a none-LRO skb */
824 skb->rxhash = bnx2x_get_rxhash(bp, cqe_fp);
9f6c9258 825
036d2df9 826 skb_checksum_none_assert(skb);
f85582f8 827
d6cb3e41
ED
828 if (bp->dev->features & NETIF_F_RXCSUM)
829 bnx2x_csum_validate(skb, cqe, fp);
619c5cb6 830
9f6c9258 831
f233cafe 832 skb_record_rx_queue(skb, fp->rx_queue);
9f6c9258 833
619c5cb6
VZ
834 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
835 PARSING_FLAGS_VLAN)
9bcc0893 836 __vlan_hwaccel_put_tag(skb,
619c5cb6 837 le16_to_cpu(cqe_fp->vlan_tag));
9bcc0893 838 napi_gro_receive(&fp->napi, skb);
9f6c9258
DK
839
840
841next_rx:
e52fcb24 842 rx_buf->data = NULL;
9f6c9258
DK
843
844 bd_cons = NEXT_RX_IDX(bd_cons);
845 bd_prod = NEXT_RX_IDX(bd_prod);
846 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
847 rx_pkt++;
848next_cqe:
849 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
850 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
851
852 if (rx_pkt == budget)
853 break;
854 } /* while */
855
856 fp->rx_bd_cons = bd_cons;
857 fp->rx_bd_prod = bd_prod_fw;
858 fp->rx_comp_cons = sw_comp_cons;
859 fp->rx_comp_prod = sw_comp_prod;
860
861 /* Update producers */
862 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
863 fp->rx_sge_prod);
864
865 fp->rx_pkt += rx_pkt;
866 fp->rx_calls++;
867
868 return rx_pkt;
869}
870
871static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
872{
873 struct bnx2x_fastpath *fp = fp_cookie;
874 struct bnx2x *bp = fp->bp;
6383c0b3 875 u8 cos;
9f6c9258 876
51c1a580
MS
877 DP(NETIF_MSG_INTR,
878 "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
523224a3
DK
879 fp->index, fp->fw_sb_id, fp->igu_sb_id);
880 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
9f6c9258
DK
881
882#ifdef BNX2X_STOP_ON_ERROR
883 if (unlikely(bp->panic))
884 return IRQ_HANDLED;
885#endif
886
887 /* Handle Rx and Tx according to MSI-X vector */
888 prefetch(fp->rx_cons_sb);
6383c0b3
AE
889
890 for_each_cos_in_tx_queue(fp, cos)
891 prefetch(fp->txdata[cos].tx_cons_sb);
892
523224a3 893 prefetch(&fp->sb_running_index[SM_RX_ID]);
9f6c9258
DK
894 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
895
896 return IRQ_HANDLED;
897}
898
9f6c9258
DK
899/* HW Lock for shared dual port PHYs */
900void bnx2x_acquire_phy_lock(struct bnx2x *bp)
901{
902 mutex_lock(&bp->port.phy_mutex);
903
904 if (bp->port.need_hw_lock)
905 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
906}
907
908void bnx2x_release_phy_lock(struct bnx2x *bp)
909{
910 if (bp->port.need_hw_lock)
911 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
912
913 mutex_unlock(&bp->port.phy_mutex);
914}
915
0793f83f
DK
916/* calculates MF speed according to current linespeed and MF configuration */
917u16 bnx2x_get_mf_speed(struct bnx2x *bp)
918{
919 u16 line_speed = bp->link_vars.line_speed;
920 if (IS_MF(bp)) {
faa6fcbb
DK
921 u16 maxCfg = bnx2x_extract_max_cfg(bp,
922 bp->mf_config[BP_VN(bp)]);
923
924 /* Calculate the current MAX line speed limit for the MF
925 * devices
0793f83f 926 */
faa6fcbb
DK
927 if (IS_MF_SI(bp))
928 line_speed = (line_speed * maxCfg) / 100;
929 else { /* SD mode */
0793f83f
DK
930 u16 vn_max_rate = maxCfg * 100;
931
932 if (vn_max_rate < line_speed)
933 line_speed = vn_max_rate;
faa6fcbb 934 }
0793f83f
DK
935 }
936
937 return line_speed;
938}
939
2ae17f66
VZ
940/**
941 * bnx2x_fill_report_data - fill link report data to report
942 *
943 * @bp: driver handle
944 * @data: link state to update
945 *
946 * It uses a none-atomic bit operations because is called under the mutex.
947 */
1191cb83
ED
948static void bnx2x_fill_report_data(struct bnx2x *bp,
949 struct bnx2x_link_report_data *data)
2ae17f66
VZ
950{
951 u16 line_speed = bnx2x_get_mf_speed(bp);
952
953 memset(data, 0, sizeof(*data));
954
955 /* Fill the report data: efective line speed */
956 data->line_speed = line_speed;
957
958 /* Link is down */
959 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
960 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
961 &data->link_report_flags);
962
963 /* Full DUPLEX */
964 if (bp->link_vars.duplex == DUPLEX_FULL)
965 __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
966
967 /* Rx Flow Control is ON */
968 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
969 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
970
971 /* Tx Flow Control is ON */
972 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
973 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
974}
975
976/**
977 * bnx2x_link_report - report link status to OS.
978 *
979 * @bp: driver handle
980 *
981 * Calls the __bnx2x_link_report() under the same locking scheme
982 * as a link/PHY state managing code to ensure a consistent link
983 * reporting.
984 */
985
9f6c9258
DK
986void bnx2x_link_report(struct bnx2x *bp)
987{
2ae17f66
VZ
988 bnx2x_acquire_phy_lock(bp);
989 __bnx2x_link_report(bp);
990 bnx2x_release_phy_lock(bp);
991}
9f6c9258 992
2ae17f66
VZ
993/**
994 * __bnx2x_link_report - report link status to OS.
995 *
996 * @bp: driver handle
997 *
998 * None atomic inmlementation.
999 * Should be called under the phy_lock.
1000 */
1001void __bnx2x_link_report(struct bnx2x *bp)
1002{
1003 struct bnx2x_link_report_data cur_data;
9f6c9258 1004
2ae17f66
VZ
1005 /* reread mf_cfg */
1006 if (!CHIP_IS_E1(bp))
1007 bnx2x_read_mf_cfg(bp);
1008
1009 /* Read the current link report info */
1010 bnx2x_fill_report_data(bp, &cur_data);
1011
1012 /* Don't report link down or exactly the same link status twice */
1013 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
1014 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1015 &bp->last_reported_link.link_report_flags) &&
1016 test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1017 &cur_data.link_report_flags)))
1018 return;
1019
1020 bp->link_cnt++;
9f6c9258 1021
2ae17f66
VZ
1022 /* We are going to report a new link parameters now -
1023 * remember the current data for the next time.
1024 */
1025 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
9f6c9258 1026
2ae17f66
VZ
1027 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1028 &cur_data.link_report_flags)) {
1029 netif_carrier_off(bp->dev);
1030 netdev_err(bp->dev, "NIC Link is Down\n");
1031 return;
1032 } else {
94f05b0f
JP
1033 const char *duplex;
1034 const char *flow;
1035
2ae17f66 1036 netif_carrier_on(bp->dev);
9f6c9258 1037
2ae17f66
VZ
1038 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1039 &cur_data.link_report_flags))
94f05b0f 1040 duplex = "full";
9f6c9258 1041 else
94f05b0f 1042 duplex = "half";
9f6c9258 1043
2ae17f66
VZ
1044 /* Handle the FC at the end so that only these flags would be
1045 * possibly set. This way we may easily check if there is no FC
1046 * enabled.
1047 */
1048 if (cur_data.link_report_flags) {
1049 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1050 &cur_data.link_report_flags)) {
2ae17f66
VZ
1051 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1052 &cur_data.link_report_flags))
94f05b0f
JP
1053 flow = "ON - receive & transmit";
1054 else
1055 flow = "ON - receive";
9f6c9258 1056 } else {
94f05b0f 1057 flow = "ON - transmit";
9f6c9258 1058 }
94f05b0f
JP
1059 } else {
1060 flow = "none";
9f6c9258 1061 }
94f05b0f
JP
1062 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1063 cur_data.line_speed, duplex, flow);
9f6c9258
DK
1064 }
1065}
1066
1191cb83
ED
1067static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1068{
1069 int i;
1070
1071 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1072 struct eth_rx_sge *sge;
1073
1074 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1075 sge->addr_hi =
1076 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1077 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1078
1079 sge->addr_lo =
1080 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1081 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1082 }
1083}
1084
1085static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1086 struct bnx2x_fastpath *fp, int last)
1087{
1088 int i;
1089
1090 for (i = 0; i < last; i++) {
1091 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1092 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1093 u8 *data = first_buf->data;
1094
1095 if (data == NULL) {
1096 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1097 continue;
1098 }
1099 if (tpa_info->tpa_state == BNX2X_TPA_START)
1100 dma_unmap_single(&bp->pdev->dev,
1101 dma_unmap_addr(first_buf, mapping),
1102 fp->rx_buf_size, DMA_FROM_DEVICE);
1103 kfree(data);
1104 first_buf->data = NULL;
1105 }
1106}
1107
9f6c9258
DK
1108void bnx2x_init_rx_rings(struct bnx2x *bp)
1109{
1110 int func = BP_FUNC(bp);
523224a3 1111 u16 ring_prod;
9f6c9258 1112 int i, j;
25141580 1113
b3b83c3f 1114 /* Allocate TPA resources */
ec6ba945 1115 for_each_rx_queue(bp, j) {
523224a3 1116 struct bnx2x_fastpath *fp = &bp->fp[j];
9f6c9258 1117
a8c94b91
VZ
1118 DP(NETIF_MSG_IFUP,
1119 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1120
523224a3 1121 if (!fp->disable_tpa) {
619c5cb6 1122 /* Fill the per-aggregtion pool */
dfacf138 1123 for (i = 0; i < MAX_AGG_QS(bp); i++) {
619c5cb6
VZ
1124 struct bnx2x_agg_info *tpa_info =
1125 &fp->tpa_info[i];
1126 struct sw_rx_bd *first_buf =
1127 &tpa_info->first_buf;
1128
e52fcb24
ED
1129 first_buf->data = kmalloc(fp->rx_buf_size + NET_SKB_PAD,
1130 GFP_ATOMIC);
1131 if (!first_buf->data) {
51c1a580
MS
1132 BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1133 j);
9f6c9258
DK
1134 bnx2x_free_tpa_pool(bp, fp, i);
1135 fp->disable_tpa = 1;
1136 break;
1137 }
619c5cb6
VZ
1138 dma_unmap_addr_set(first_buf, mapping, 0);
1139 tpa_info->tpa_state = BNX2X_TPA_STOP;
9f6c9258 1140 }
523224a3
DK
1141
1142 /* "next page" elements initialization */
1143 bnx2x_set_next_page_sgl(fp);
1144
1145 /* set SGEs bit mask */
1146 bnx2x_init_sge_ring_bit_mask(fp);
1147
1148 /* Allocate SGEs and initialize the ring elements */
1149 for (i = 0, ring_prod = 0;
1150 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1151
1152 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
51c1a580
MS
1153 BNX2X_ERR("was only able to allocate %d rx sges\n",
1154 i);
1155 BNX2X_ERR("disabling TPA for queue[%d]\n",
1156 j);
523224a3 1157 /* Cleanup already allocated elements */
619c5cb6
VZ
1158 bnx2x_free_rx_sge_range(bp, fp,
1159 ring_prod);
1160 bnx2x_free_tpa_pool(bp, fp,
dfacf138 1161 MAX_AGG_QS(bp));
523224a3
DK
1162 fp->disable_tpa = 1;
1163 ring_prod = 0;
1164 break;
1165 }
1166 ring_prod = NEXT_SGE_IDX(ring_prod);
1167 }
1168
1169 fp->rx_sge_prod = ring_prod;
9f6c9258
DK
1170 }
1171 }
1172
ec6ba945 1173 for_each_rx_queue(bp, j) {
9f6c9258
DK
1174 struct bnx2x_fastpath *fp = &bp->fp[j];
1175
1176 fp->rx_bd_cons = 0;
9f6c9258 1177
b3b83c3f
DK
1178 /* Activate BD ring */
1179 /* Warning!
1180 * this will generate an interrupt (to the TSTORM)
1181 * must only be done after chip is initialized
1182 */
1183 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1184 fp->rx_sge_prod);
9f6c9258 1185
9f6c9258
DK
1186 if (j != 0)
1187 continue;
1188
619c5cb6 1189 if (CHIP_IS_E1(bp)) {
f2e0899f
DK
1190 REG_WR(bp, BAR_USTRORM_INTMEM +
1191 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1192 U64_LO(fp->rx_comp_mapping));
1193 REG_WR(bp, BAR_USTRORM_INTMEM +
1194 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1195 U64_HI(fp->rx_comp_mapping));
1196 }
9f6c9258
DK
1197 }
1198}
f85582f8 1199
9f6c9258
DK
1200static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1201{
1202 int i;
6383c0b3 1203 u8 cos;
9f6c9258 1204
ec6ba945 1205 for_each_tx_queue(bp, i) {
9f6c9258 1206 struct bnx2x_fastpath *fp = &bp->fp[i];
6383c0b3
AE
1207 for_each_cos_in_tx_queue(fp, cos) {
1208 struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
2df1a70a 1209 unsigned pkts_compl = 0, bytes_compl = 0;
9f6c9258 1210
6383c0b3
AE
1211 u16 sw_prod = txdata->tx_pkt_prod;
1212 u16 sw_cons = txdata->tx_pkt_cons;
9f6c9258 1213
6383c0b3 1214 while (sw_cons != sw_prod) {
2df1a70a
TH
1215 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1216 &pkts_compl, &bytes_compl);
6383c0b3
AE
1217 sw_cons++;
1218 }
2df1a70a
TH
1219 netdev_tx_reset_queue(
1220 netdev_get_tx_queue(bp->dev, txdata->txq_index));
9f6c9258
DK
1221 }
1222 }
1223}
1224
b3b83c3f
DK
1225static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1226{
1227 struct bnx2x *bp = fp->bp;
1228 int i;
1229
1230 /* ring wasn't allocated */
1231 if (fp->rx_buf_ring == NULL)
1232 return;
1233
1234 for (i = 0; i < NUM_RX_BD; i++) {
1235 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
e52fcb24 1236 u8 *data = rx_buf->data;
b3b83c3f 1237
e52fcb24 1238 if (data == NULL)
b3b83c3f 1239 continue;
b3b83c3f
DK
1240 dma_unmap_single(&bp->pdev->dev,
1241 dma_unmap_addr(rx_buf, mapping),
1242 fp->rx_buf_size, DMA_FROM_DEVICE);
1243
e52fcb24
ED
1244 rx_buf->data = NULL;
1245 kfree(data);
b3b83c3f
DK
1246 }
1247}
1248
9f6c9258
DK
1249static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1250{
b3b83c3f 1251 int j;
9f6c9258 1252
ec6ba945 1253 for_each_rx_queue(bp, j) {
9f6c9258
DK
1254 struct bnx2x_fastpath *fp = &bp->fp[j];
1255
b3b83c3f 1256 bnx2x_free_rx_bds(fp);
9f6c9258 1257
9f6c9258 1258 if (!fp->disable_tpa)
dfacf138 1259 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
9f6c9258
DK
1260 }
1261}
1262
1263void bnx2x_free_skbs(struct bnx2x *bp)
1264{
1265 bnx2x_free_tx_skbs(bp);
1266 bnx2x_free_rx_skbs(bp);
1267}
1268
e3835b99
DK
1269void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1270{
1271 /* load old values */
1272 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1273
1274 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1275 /* leave all but MAX value */
1276 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1277
1278 /* set new MAX value */
1279 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1280 & FUNC_MF_CFG_MAX_BW_MASK;
1281
1282 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1283 }
1284}
1285
ca92429f
DK
1286/**
1287 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1288 *
1289 * @bp: driver handle
1290 * @nvecs: number of vectors to be released
1291 */
1292static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
9f6c9258 1293{
ca92429f 1294 int i, offset = 0;
9f6c9258 1295
ca92429f
DK
1296 if (nvecs == offset)
1297 return;
1298 free_irq(bp->msix_table[offset].vector, bp->dev);
9f6c9258 1299 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
ca92429f
DK
1300 bp->msix_table[offset].vector);
1301 offset++;
9f6c9258 1302#ifdef BCM_CNIC
ca92429f
DK
1303 if (nvecs == offset)
1304 return;
9f6c9258
DK
1305 offset++;
1306#endif
ca92429f 1307
ec6ba945 1308 for_each_eth_queue(bp, i) {
ca92429f
DK
1309 if (nvecs == offset)
1310 return;
51c1a580
MS
1311 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1312 i, bp->msix_table[offset].vector);
9f6c9258 1313
ca92429f 1314 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
9f6c9258
DK
1315 }
1316}
1317
d6214d7a 1318void bnx2x_free_irq(struct bnx2x *bp)
9f6c9258 1319{
30a5de77
DK
1320 if (bp->flags & USING_MSIX_FLAG &&
1321 !(bp->flags & USING_SINGLE_MSIX_FLAG))
ca92429f 1322 bnx2x_free_msix_irqs(bp, BNX2X_NUM_ETH_QUEUES(bp) +
6383c0b3 1323 CNIC_PRESENT + 1);
d6214d7a 1324 else
30a5de77 1325 free_irq(bp->dev->irq, bp->dev);
9f6c9258
DK
1326}
1327
30a5de77 1328int __devinit bnx2x_enable_msix(struct bnx2x *bp)
9f6c9258 1329{
d6214d7a 1330 int msix_vec = 0, i, rc, req_cnt;
9f6c9258 1331
d6214d7a 1332 bp->msix_table[msix_vec].entry = msix_vec;
51c1a580 1333 BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
d6214d7a
DK
1334 bp->msix_table[0].entry);
1335 msix_vec++;
9f6c9258
DK
1336
1337#ifdef BCM_CNIC
d6214d7a 1338 bp->msix_table[msix_vec].entry = msix_vec;
51c1a580 1339 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
d6214d7a
DK
1340 bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry);
1341 msix_vec++;
9f6c9258 1342#endif
6383c0b3 1343 /* We need separate vectors for ETH queues only (not FCoE) */
ec6ba945 1344 for_each_eth_queue(bp, i) {
d6214d7a 1345 bp->msix_table[msix_vec].entry = msix_vec;
51c1a580
MS
1346 BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1347 msix_vec, msix_vec, i);
d6214d7a 1348 msix_vec++;
9f6c9258
DK
1349 }
1350
6383c0b3 1351 req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_PRESENT + 1;
d6214d7a
DK
1352
1353 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt);
9f6c9258
DK
1354
1355 /*
1356 * reconfigure number of tx/rx queues according to available
1357 * MSI-X vectors
1358 */
1359 if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
d6214d7a
DK
1360 /* how less vectors we will have? */
1361 int diff = req_cnt - rc;
9f6c9258 1362
51c1a580 1363 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
9f6c9258
DK
1364
1365 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1366
1367 if (rc) {
30a5de77
DK
1368 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1369 goto no_msix;
9f6c9258 1370 }
d6214d7a
DK
1371 /*
1372 * decrease number of queues by number of unallocated entries
1373 */
1374 bp->num_queues -= diff;
9f6c9258 1375
51c1a580 1376 BNX2X_DEV_INFO("New queue configuration set: %d\n",
30a5de77
DK
1377 bp->num_queues);
1378 } else if (rc > 0) {
1379 /* Get by with single vector */
1380 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], 1);
1381 if (rc) {
1382 BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1383 rc);
1384 goto no_msix;
1385 }
1386
1387 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1388 bp->flags |= USING_SINGLE_MSIX_FLAG;
1389
1390 } else if (rc < 0) {
51c1a580 1391 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
30a5de77 1392 goto no_msix;
9f6c9258
DK
1393 }
1394
1395 bp->flags |= USING_MSIX_FLAG;
1396
1397 return 0;
30a5de77
DK
1398
1399no_msix:
1400 /* fall to INTx if not enough memory */
1401 if (rc == -ENOMEM)
1402 bp->flags |= DISABLE_MSI_FLAG;
1403
1404 return rc;
9f6c9258
DK
1405}
1406
1407static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1408{
ca92429f 1409 int i, rc, offset = 0;
9f6c9258 1410
ca92429f
DK
1411 rc = request_irq(bp->msix_table[offset++].vector,
1412 bnx2x_msix_sp_int, 0,
9f6c9258
DK
1413 bp->dev->name, bp->dev);
1414 if (rc) {
1415 BNX2X_ERR("request sp irq failed\n");
1416 return -EBUSY;
1417 }
1418
1419#ifdef BCM_CNIC
1420 offset++;
1421#endif
ec6ba945 1422 for_each_eth_queue(bp, i) {
9f6c9258
DK
1423 struct bnx2x_fastpath *fp = &bp->fp[i];
1424 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1425 bp->dev->name, i);
1426
d6214d7a 1427 rc = request_irq(bp->msix_table[offset].vector,
9f6c9258
DK
1428 bnx2x_msix_fp_int, 0, fp->name, fp);
1429 if (rc) {
ca92429f
DK
1430 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i,
1431 bp->msix_table[offset].vector, rc);
1432 bnx2x_free_msix_irqs(bp, offset);
9f6c9258
DK
1433 return -EBUSY;
1434 }
1435
d6214d7a 1436 offset++;
9f6c9258
DK
1437 }
1438
ec6ba945 1439 i = BNX2X_NUM_ETH_QUEUES(bp);
6383c0b3 1440 offset = 1 + CNIC_PRESENT;
51c1a580 1441 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
9f6c9258
DK
1442 bp->msix_table[0].vector,
1443 0, bp->msix_table[offset].vector,
1444 i - 1, bp->msix_table[offset + i - 1].vector);
1445
1446 return 0;
1447}
1448
d6214d7a 1449int bnx2x_enable_msi(struct bnx2x *bp)
9f6c9258
DK
1450{
1451 int rc;
1452
1453 rc = pci_enable_msi(bp->pdev);
1454 if (rc) {
51c1a580 1455 BNX2X_DEV_INFO("MSI is not attainable\n");
9f6c9258
DK
1456 return -1;
1457 }
1458 bp->flags |= USING_MSI_FLAG;
1459
1460 return 0;
1461}
1462
1463static int bnx2x_req_irq(struct bnx2x *bp)
1464{
1465 unsigned long flags;
30a5de77 1466 unsigned int irq;
9f6c9258 1467
30a5de77 1468 if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
9f6c9258
DK
1469 flags = 0;
1470 else
1471 flags = IRQF_SHARED;
1472
30a5de77
DK
1473 if (bp->flags & USING_MSIX_FLAG)
1474 irq = bp->msix_table[0].vector;
1475 else
1476 irq = bp->pdev->irq;
1477
1478 return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
9f6c9258
DK
1479}
1480
1191cb83 1481static int bnx2x_setup_irqs(struct bnx2x *bp)
619c5cb6
VZ
1482{
1483 int rc = 0;
30a5de77
DK
1484 if (bp->flags & USING_MSIX_FLAG &&
1485 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
619c5cb6
VZ
1486 rc = bnx2x_req_msix_irqs(bp);
1487 if (rc)
1488 return rc;
1489 } else {
1490 bnx2x_ack_int(bp);
1491 rc = bnx2x_req_irq(bp);
1492 if (rc) {
1493 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1494 return rc;
1495 }
1496 if (bp->flags & USING_MSI_FLAG) {
1497 bp->dev->irq = bp->pdev->irq;
30a5de77
DK
1498 netdev_info(bp->dev, "using MSI IRQ %d\n",
1499 bp->dev->irq);
1500 }
1501 if (bp->flags & USING_MSIX_FLAG) {
1502 bp->dev->irq = bp->msix_table[0].vector;
1503 netdev_info(bp->dev, "using MSIX IRQ %d\n",
1504 bp->dev->irq);
619c5cb6
VZ
1505 }
1506 }
1507
1508 return 0;
1509}
1510
1191cb83 1511static void bnx2x_napi_enable(struct bnx2x *bp)
9f6c9258
DK
1512{
1513 int i;
1514
619c5cb6 1515 for_each_rx_queue(bp, i)
9f6c9258
DK
1516 napi_enable(&bnx2x_fp(bp, i, napi));
1517}
1518
1191cb83 1519static void bnx2x_napi_disable(struct bnx2x *bp)
9f6c9258
DK
1520{
1521 int i;
1522
619c5cb6 1523 for_each_rx_queue(bp, i)
9f6c9258
DK
1524 napi_disable(&bnx2x_fp(bp, i, napi));
1525}
1526
1527void bnx2x_netif_start(struct bnx2x *bp)
1528{
4b7ed897
DK
1529 if (netif_running(bp->dev)) {
1530 bnx2x_napi_enable(bp);
1531 bnx2x_int_enable(bp);
1532 if (bp->state == BNX2X_STATE_OPEN)
1533 netif_tx_wake_all_queues(bp->dev);
9f6c9258
DK
1534 }
1535}
1536
1537void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1538{
1539 bnx2x_int_disable_sync(bp, disable_hw);
1540 bnx2x_napi_disable(bp);
9f6c9258 1541}
9f6c9258 1542
8307fa3e
VZ
1543u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1544{
8307fa3e 1545 struct bnx2x *bp = netdev_priv(dev);
cdb9d6ae 1546
faa28314 1547#ifdef BCM_CNIC
cdb9d6ae 1548 if (!NO_FCOE(bp)) {
8307fa3e
VZ
1549 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1550 u16 ether_type = ntohs(hdr->h_proto);
1551
1552 /* Skip VLAN tag if present */
1553 if (ether_type == ETH_P_8021Q) {
1554 struct vlan_ethhdr *vhdr =
1555 (struct vlan_ethhdr *)skb->data;
1556
1557 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1558 }
1559
1560 /* If ethertype is FCoE or FIP - use FCoE ring */
1561 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
6383c0b3 1562 return bnx2x_fcoe_tx(bp, txq_index);
8307fa3e
VZ
1563 }
1564#endif
cdb9d6ae 1565 /* select a non-FCoE queue */
6383c0b3 1566 return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp));
8307fa3e
VZ
1567}
1568
96305234 1569
d6214d7a
DK
1570void bnx2x_set_num_queues(struct bnx2x *bp)
1571{
96305234
DK
1572 /* RSS queues */
1573 bp->num_queues = bnx2x_calc_num_queues(bp);
ec6ba945 1574
614c76df 1575#ifdef BCM_CNIC
a3348722
BW
1576 /* override in STORAGE SD modes */
1577 if (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))
614c76df
DK
1578 bp->num_queues = 1;
1579#endif
ec6ba945 1580 /* Add special queues */
6383c0b3 1581 bp->num_queues += NON_ETH_CONTEXT_USE;
ec6ba945
VZ
1582}
1583
cdb9d6ae
VZ
1584/**
1585 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1586 *
1587 * @bp: Driver handle
1588 *
1589 * We currently support for at most 16 Tx queues for each CoS thus we will
1590 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1591 * bp->max_cos.
1592 *
1593 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1594 * index after all ETH L2 indices.
1595 *
1596 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1597 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
1598 * 16..31,...) with indicies that are not coupled with any real Tx queue.
1599 *
1600 * The proper configuration of skb->queue_mapping is handled by
1601 * bnx2x_select_queue() and __skb_tx_hash().
1602 *
1603 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1604 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1605 */
1191cb83 1606static int bnx2x_set_real_num_queues(struct bnx2x *bp)
ec6ba945 1607{
6383c0b3 1608 int rc, tx, rx;
ec6ba945 1609
6383c0b3
AE
1610 tx = MAX_TXQS_PER_COS * bp->max_cos;
1611 rx = BNX2X_NUM_ETH_QUEUES(bp);
ec6ba945 1612
6383c0b3
AE
1613/* account for fcoe queue */
1614#ifdef BCM_CNIC
1615 if (!NO_FCOE(bp)) {
1616 rx += FCOE_PRESENT;
1617 tx += FCOE_PRESENT;
1618 }
ec6ba945 1619#endif
6383c0b3
AE
1620
1621 rc = netif_set_real_num_tx_queues(bp->dev, tx);
1622 if (rc) {
1623 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1624 return rc;
1625 }
1626 rc = netif_set_real_num_rx_queues(bp->dev, rx);
1627 if (rc) {
1628 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
1629 return rc;
1630 }
1631
51c1a580 1632 DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
6383c0b3
AE
1633 tx, rx);
1634
ec6ba945
VZ
1635 return rc;
1636}
1637
1191cb83 1638static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
a8c94b91
VZ
1639{
1640 int i;
1641
1642 for_each_queue(bp, i) {
1643 struct bnx2x_fastpath *fp = &bp->fp[i];
e52fcb24 1644 u32 mtu;
a8c94b91
VZ
1645
1646 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1647 if (IS_FCOE_IDX(i))
1648 /*
1649 * Although there are no IP frames expected to arrive to
1650 * this ring we still want to add an
1651 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1652 * overrun attack.
1653 */
e52fcb24 1654 mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
a8c94b91 1655 else
e52fcb24
ED
1656 mtu = bp->dev->mtu;
1657 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
1658 IP_HEADER_ALIGNMENT_PADDING +
1659 ETH_OVREHEAD +
1660 mtu +
1661 BNX2X_FW_RX_ALIGN_END;
1662 /* Note : rx_buf_size doesnt take into account NET_SKB_PAD */
a8c94b91
VZ
1663 }
1664}
1665
1191cb83 1666static int bnx2x_init_rss_pf(struct bnx2x *bp)
619c5cb6
VZ
1667{
1668 int i;
1669 u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
1670 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
1671
96305234 1672 /* Prepare the initial contents fo the indirection table if RSS is
619c5cb6
VZ
1673 * enabled
1674 */
96305234
DK
1675 for (i = 0; i < sizeof(ind_table); i++)
1676 ind_table[i] =
1677 bp->fp->cl_id +
1678 ethtool_rxfh_indir_default(i, num_eth_queues);
619c5cb6
VZ
1679
1680 /*
1681 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
1682 * per-port, so if explicit configuration is needed , do it only
1683 * for a PMF.
1684 *
1685 * For 57712 and newer on the other hand it's a per-function
1686 * configuration.
1687 */
96305234
DK
1688 return bnx2x_config_rss_eth(bp, ind_table,
1689 bp->port.pmf || !CHIP_IS_E1x(bp));
619c5cb6
VZ
1690}
1691
96305234
DK
1692int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
1693 u8 *ind_table, bool config_hash)
619c5cb6 1694{
3b603066 1695 struct bnx2x_config_rss_params params = {NULL};
619c5cb6
VZ
1696 int i;
1697
1698 /* Although RSS is meaningless when there is a single HW queue we
1699 * still need it enabled in order to have HW Rx hash generated.
1700 *
1701 * if (!is_eth_multi(bp))
1702 * bp->multi_mode = ETH_RSS_MODE_DISABLED;
1703 */
1704
96305234 1705 params.rss_obj = rss_obj;
619c5cb6
VZ
1706
1707 __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
1708
96305234 1709 __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
619c5cb6 1710
96305234
DK
1711 /* RSS configuration */
1712 __set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
1713 __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
1714 __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
1715 __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
619c5cb6 1716
96305234
DK
1717 /* Hash bits */
1718 params.rss_result_mask = MULTI_MASK;
619c5cb6 1719
96305234 1720 memcpy(params.ind_table, ind_table, sizeof(params.ind_table));
619c5cb6 1721
96305234
DK
1722 if (config_hash) {
1723 /* RSS keys */
1724 for (i = 0; i < sizeof(params.rss_key) / 4; i++)
1725 params.rss_key[i] = random32();
619c5cb6 1726
96305234 1727 __set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
619c5cb6
VZ
1728 }
1729
1730 return bnx2x_config_rss(bp, &params);
1731}
1732
1191cb83 1733static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
619c5cb6 1734{
3b603066 1735 struct bnx2x_func_state_params func_params = {NULL};
619c5cb6
VZ
1736
1737 /* Prepare parameters for function state transitions */
1738 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
1739
1740 func_params.f_obj = &bp->func_obj;
1741 func_params.cmd = BNX2X_F_CMD_HW_INIT;
1742
1743 func_params.params.hw_init.load_phase = load_code;
1744
1745 return bnx2x_func_state_change(bp, &func_params);
1746}
1747
1748/*
1749 * Cleans the object that have internal lists without sending
1750 * ramrods. Should be run when interrutps are disabled.
1751 */
1752static void bnx2x_squeeze_objects(struct bnx2x *bp)
1753{
1754 int rc;
1755 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
3b603066 1756 struct bnx2x_mcast_ramrod_params rparam = {NULL};
619c5cb6
VZ
1757 struct bnx2x_vlan_mac_obj *mac_obj = &bp->fp->mac_obj;
1758
1759 /***************** Cleanup MACs' object first *************************/
1760
1761 /* Wait for completion of requested */
1762 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
1763 /* Perform a dry cleanup */
1764 __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
1765
1766 /* Clean ETH primary MAC */
1767 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
1768 rc = mac_obj->delete_all(bp, &bp->fp->mac_obj, &vlan_mac_flags,
1769 &ramrod_flags);
1770 if (rc != 0)
1771 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
1772
1773 /* Cleanup UC list */
1774 vlan_mac_flags = 0;
1775 __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
1776 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
1777 &ramrod_flags);
1778 if (rc != 0)
1779 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
1780
1781 /***************** Now clean mcast object *****************************/
1782 rparam.mcast_obj = &bp->mcast_obj;
1783 __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
1784
1785 /* Add a DEL command... */
1786 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
1787 if (rc < 0)
51c1a580
MS
1788 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
1789 rc);
619c5cb6
VZ
1790
1791 /* ...and wait until all pending commands are cleared */
1792 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1793 while (rc != 0) {
1794 if (rc < 0) {
1795 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
1796 rc);
1797 return;
1798 }
1799
1800 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1801 }
1802}
1803
1804#ifndef BNX2X_STOP_ON_ERROR
1805#define LOAD_ERROR_EXIT(bp, label) \
1806 do { \
1807 (bp)->state = BNX2X_STATE_ERROR; \
1808 goto label; \
1809 } while (0)
1810#else
1811#define LOAD_ERROR_EXIT(bp, label) \
1812 do { \
1813 (bp)->state = BNX2X_STATE_ERROR; \
1814 (bp)->panic = 1; \
1815 return -EBUSY; \
1816 } while (0)
1817#endif
1818
452427b0
YM
1819bool bnx2x_test_firmware_version(struct bnx2x *bp, bool is_err)
1820{
1821 /* build FW version dword */
1822 u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
1823 (BCM_5710_FW_MINOR_VERSION << 8) +
1824 (BCM_5710_FW_REVISION_VERSION << 16) +
1825 (BCM_5710_FW_ENGINEERING_VERSION << 24);
1826
1827 /* read loaded FW from chip */
1828 u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
1829
1830 DP(NETIF_MSG_IFUP, "loaded fw %x, my fw %x\n", loaded_fw, my_fw);
1831
1832 if (loaded_fw != my_fw) {
1833 if (is_err)
1834 BNX2X_ERR("bnx2x with FW %x was already loaded, which mismatches my %x FW. aborting\n",
1835 loaded_fw, my_fw);
1836 return false;
1837 }
1838
1839 return true;
1840}
1841
1191cb83
ED
1842/**
1843 * bnx2x_bz_fp - zero content of the fastpath structure.
1844 *
1845 * @bp: driver handle
1846 * @index: fastpath index to be zeroed
1847 *
1848 * Makes sure the contents of the bp->fp[index].napi is kept
1849 * intact.
1850 */
1851static void bnx2x_bz_fp(struct bnx2x *bp, int index)
1852{
1853 struct bnx2x_fastpath *fp = &bp->fp[index];
1854 struct napi_struct orig_napi = fp->napi;
1855 /* bzero bnx2x_fastpath contents */
1856 if (bp->stats_init)
1857 memset(fp, 0, sizeof(*fp));
1858 else {
1859 /* Keep Queue statistics */
1860 struct bnx2x_eth_q_stats *tmp_eth_q_stats;
1861 struct bnx2x_eth_q_stats_old *tmp_eth_q_stats_old;
1862
1863 tmp_eth_q_stats = kzalloc(sizeof(struct bnx2x_eth_q_stats),
1864 GFP_KERNEL);
1865 if (tmp_eth_q_stats)
1866 memcpy(tmp_eth_q_stats, &fp->eth_q_stats,
1867 sizeof(struct bnx2x_eth_q_stats));
1868
1869 tmp_eth_q_stats_old =
1870 kzalloc(sizeof(struct bnx2x_eth_q_stats_old),
1871 GFP_KERNEL);
1872 if (tmp_eth_q_stats_old)
1873 memcpy(tmp_eth_q_stats_old, &fp->eth_q_stats_old,
1874 sizeof(struct bnx2x_eth_q_stats_old));
1875
1876 memset(fp, 0, sizeof(*fp));
1877
1878 if (tmp_eth_q_stats) {
1879 memcpy(&fp->eth_q_stats, tmp_eth_q_stats,
1880 sizeof(struct bnx2x_eth_q_stats));
1881 kfree(tmp_eth_q_stats);
1882 }
1883
1884 if (tmp_eth_q_stats_old) {
1885 memcpy(&fp->eth_q_stats_old, tmp_eth_q_stats_old,
1886 sizeof(struct bnx2x_eth_q_stats_old));
1887 kfree(tmp_eth_q_stats_old);
1888 }
1889
1890 }
1891
1892 /* Restore the NAPI object as it has been already initialized */
1893 fp->napi = orig_napi;
1894
1895 fp->bp = bp;
1896 fp->index = index;
1897 if (IS_ETH_FP(fp))
1898 fp->max_cos = bp->max_cos;
1899 else
1900 /* Special queues support only one CoS */
1901 fp->max_cos = 1;
1902
1903 /*
1904 * set the tpa flag for each queue. The tpa flag determines the queue
1905 * minimal size so it must be set prior to queue memory allocation
1906 */
1907 fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG ||
1908 (bp->flags & GRO_ENABLE_FLAG &&
1909 bnx2x_mtu_allows_gro(bp->dev->mtu)));
1910 if (bp->flags & TPA_ENABLE_FLAG)
1911 fp->mode = TPA_MODE_LRO;
1912 else if (bp->flags & GRO_ENABLE_FLAG)
1913 fp->mode = TPA_MODE_GRO;
1914
1915#ifdef BCM_CNIC
1916 /* We don't want TPA on an FCoE L2 ring */
1917 if (IS_FCOE_FP(fp))
1918 fp->disable_tpa = 1;
1919#endif
1920}
1921
1922
9f6c9258
DK
1923/* must be called with rtnl_lock */
1924int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1925{
619c5cb6 1926 int port = BP_PORT(bp);
9f6c9258
DK
1927 u32 load_code;
1928 int i, rc;
1929
1930#ifdef BNX2X_STOP_ON_ERROR
51c1a580
MS
1931 if (unlikely(bp->panic)) {
1932 BNX2X_ERR("Can't load NIC when there is panic\n");
9f6c9258 1933 return -EPERM;
51c1a580 1934 }
9f6c9258
DK
1935#endif
1936
1937 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
1938
2ae17f66
VZ
1939 /* Set the initial link reported state to link down */
1940 bnx2x_acquire_phy_lock(bp);
1941 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
1942 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1943 &bp->last_reported_link.link_report_flags);
1944 bnx2x_release_phy_lock(bp);
1945
523224a3
DK
1946 /* must be called before memory allocation and HW init */
1947 bnx2x_ilt_set_info(bp);
1948
6383c0b3
AE
1949 /*
1950 * Zero fastpath structures preserving invariants like napi, which are
1951 * allocated only once, fp index, max_cos, bp pointer.
1952 * Also set fp->disable_tpa.
b3b83c3f 1953 */
51c1a580 1954 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
b3b83c3f
DK
1955 for_each_queue(bp, i)
1956 bnx2x_bz_fp(bp, i);
1957
6383c0b3 1958
a8c94b91
VZ
1959 /* Set the receive queues buffer size */
1960 bnx2x_set_rx_buf_size(bp);
1961
d6214d7a 1962 if (bnx2x_alloc_mem(bp))
9f6c9258 1963 return -ENOMEM;
d6214d7a 1964
b3b83c3f
DK
1965 /* As long as bnx2x_alloc_mem() may possibly update
1966 * bp->num_queues, bnx2x_set_real_num_queues() should always
1967 * come after it.
1968 */
ec6ba945 1969 rc = bnx2x_set_real_num_queues(bp);
d6214d7a 1970 if (rc) {
ec6ba945 1971 BNX2X_ERR("Unable to set real_num_queues\n");
619c5cb6 1972 LOAD_ERROR_EXIT(bp, load_error0);
9f6c9258
DK
1973 }
1974
6383c0b3
AE
1975 /* configure multi cos mappings in kernel.
1976 * this configuration may be overriden by a multi class queue discipline
1977 * or by a dcbx negotiation result.
1978 */
1979 bnx2x_setup_tc(bp->dev, bp->max_cos);
1980
9f6c9258
DK
1981 bnx2x_napi_enable(bp);
1982
889b9af3
AE
1983 /* set pf load just before approaching the MCP */
1984 bnx2x_set_pf_load(bp);
1985
9f6c9258 1986 /* Send LOAD_REQUEST command to MCP
619c5cb6
VZ
1987 * Returns the type of LOAD command:
1988 * if it is the first port to be initialized
1989 * common blocks should be initialized, otherwise - not
1990 */
9f6c9258 1991 if (!BP_NOMCP(bp)) {
95c6c616
AE
1992 /* init fw_seq */
1993 bp->fw_seq =
1994 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
1995 DRV_MSG_SEQ_NUMBER_MASK);
1996 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
1997
1998 /* Get current FW pulse sequence */
1999 bp->fw_drv_pulse_wr_seq =
2000 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
2001 DRV_PULSE_SEQ_MASK);
2002 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2003
a22f0788 2004 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0);
9f6c9258
DK
2005 if (!load_code) {
2006 BNX2X_ERR("MCP response failure, aborting\n");
2007 rc = -EBUSY;
619c5cb6 2008 LOAD_ERROR_EXIT(bp, load_error1);
9f6c9258
DK
2009 }
2010 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
51c1a580 2011 BNX2X_ERR("Driver load refused\n");
9f6c9258 2012 rc = -EBUSY; /* other port in diagnostic mode */
619c5cb6 2013 LOAD_ERROR_EXIT(bp, load_error1);
9f6c9258 2014 }
d1e2d966
AE
2015 if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2016 load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
d1e2d966 2017 /* abort nic load if version mismatch */
452427b0 2018 if (!bnx2x_test_firmware_version(bp, true)) {
d1e2d966
AE
2019 rc = -EBUSY;
2020 LOAD_ERROR_EXIT(bp, load_error2);
2021 }
2022 }
9f6c9258
DK
2023
2024 } else {
f2e0899f 2025 int path = BP_PATH(bp);
9f6c9258 2026
f2e0899f
DK
2027 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
2028 path, load_count[path][0], load_count[path][1],
2029 load_count[path][2]);
2030 load_count[path][0]++;
2031 load_count[path][1 + port]++;
2032 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
2033 path, load_count[path][0], load_count[path][1],
2034 load_count[path][2]);
2035 if (load_count[path][0] == 1)
9f6c9258 2036 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
f2e0899f 2037 else if (load_count[path][1 + port] == 1)
9f6c9258
DK
2038 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
2039 else
2040 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
2041 }
2042
2043 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
f2e0899f 2044 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
3deb8167 2045 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
9f6c9258 2046 bp->port.pmf = 1;
3deb8167
YR
2047 /*
2048 * We need the barrier to ensure the ordering between the
2049 * writing to bp->port.pmf here and reading it from the
2050 * bnx2x_periodic_task().
2051 */
2052 smp_mb();
3deb8167 2053 } else
9f6c9258 2054 bp->port.pmf = 0;
6383c0b3 2055
51c1a580 2056 DP(NETIF_MSG_IFUP, "pmf %d\n", bp->port.pmf);
9f6c9258 2057
619c5cb6
VZ
2058 /* Init Function state controlling object */
2059 bnx2x__init_func_obj(bp);
2060
9f6c9258
DK
2061 /* Initialize HW */
2062 rc = bnx2x_init_hw(bp, load_code);
2063 if (rc) {
2064 BNX2X_ERR("HW init failed, aborting\n");
a22f0788 2065 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
619c5cb6 2066 LOAD_ERROR_EXIT(bp, load_error2);
9f6c9258
DK
2067 }
2068
d6214d7a
DK
2069 /* Connect to IRQs */
2070 rc = bnx2x_setup_irqs(bp);
523224a3 2071 if (rc) {
51c1a580 2072 BNX2X_ERR("IRQs setup failed\n");
523224a3 2073 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
619c5cb6 2074 LOAD_ERROR_EXIT(bp, load_error2);
523224a3
DK
2075 }
2076
9f6c9258
DK
2077 /* Setup NIC internals and enable interrupts */
2078 bnx2x_nic_init(bp, load_code);
2079
619c5cb6
VZ
2080 /* Init per-function objects */
2081 bnx2x_init_bp_objs(bp);
2082
f2e0899f
DK
2083 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2084 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
619c5cb6
VZ
2085 (bp->common.shmem2_base)) {
2086 if (SHMEM2_HAS(bp, dcc_support))
2087 SHMEM2_WR(bp, dcc_support,
2088 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2089 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
a3348722
BW
2090 if (SHMEM2_HAS(bp, afex_driver_support))
2091 SHMEM2_WR(bp, afex_driver_support,
2092 SHMEM_AFEX_SUPPORTED_VERSION_ONE);
619c5cb6
VZ
2093 }
2094
a3348722
BW
2095 /* Set AFEX default VLAN tag to an invalid value */
2096 bp->afex_def_vlan_tag = -1;
2097
619c5cb6
VZ
2098 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2099 rc = bnx2x_func_start(bp);
2100 if (rc) {
2101 BNX2X_ERR("Function start failed!\n");
c636322b 2102 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
619c5cb6
VZ
2103 LOAD_ERROR_EXIT(bp, load_error3);
2104 }
9f6c9258
DK
2105
2106 /* Send LOAD_DONE command to MCP */
2107 if (!BP_NOMCP(bp)) {
a22f0788 2108 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
9f6c9258
DK
2109 if (!load_code) {
2110 BNX2X_ERR("MCP response failure, aborting\n");
2111 rc = -EBUSY;
619c5cb6 2112 LOAD_ERROR_EXIT(bp, load_error3);
9f6c9258
DK
2113 }
2114 }
2115
619c5cb6 2116 rc = bnx2x_setup_leading(bp);
9f6c9258
DK
2117 if (rc) {
2118 BNX2X_ERR("Setup leading failed!\n");
619c5cb6 2119 LOAD_ERROR_EXIT(bp, load_error3);
f2e0899f 2120 }
9f6c9258 2121
9f6c9258 2122#ifdef BCM_CNIC
523224a3 2123 /* Enable Timer scan */
619c5cb6 2124 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
9f6c9258 2125#endif
f85582f8 2126
523224a3 2127 for_each_nondefault_queue(bp, i) {
619c5cb6 2128 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
51c1a580
MS
2129 if (rc) {
2130 BNX2X_ERR("Queue setup failed\n");
619c5cb6 2131 LOAD_ERROR_EXIT(bp, load_error4);
51c1a580 2132 }
523224a3
DK
2133 }
2134
619c5cb6 2135 rc = bnx2x_init_rss_pf(bp);
51c1a580
MS
2136 if (rc) {
2137 BNX2X_ERR("PF RSS init failed\n");
619c5cb6 2138 LOAD_ERROR_EXIT(bp, load_error4);
51c1a580 2139 }
619c5cb6 2140
523224a3
DK
2141 /* Now when Clients are configured we are ready to work */
2142 bp->state = BNX2X_STATE_OPEN;
2143
619c5cb6
VZ
2144 /* Configure a ucast MAC */
2145 rc = bnx2x_set_eth_mac(bp, true);
51c1a580
MS
2146 if (rc) {
2147 BNX2X_ERR("Setting Ethernet MAC failed\n");
619c5cb6 2148 LOAD_ERROR_EXIT(bp, load_error4);
51c1a580 2149 }
6e30dd4e 2150
e3835b99
DK
2151 if (bp->pending_max) {
2152 bnx2x_update_max_mf_config(bp, bp->pending_max);
2153 bp->pending_max = 0;
2154 }
2155
9f6c9258
DK
2156 if (bp->port.pmf)
2157 bnx2x_initial_phy_init(bp, load_mode);
2158
619c5cb6
VZ
2159 /* Start fast path */
2160
2161 /* Initialize Rx filter. */
2162 netif_addr_lock_bh(bp->dev);
6e30dd4e 2163 bnx2x_set_rx_mode(bp->dev);
619c5cb6 2164 netif_addr_unlock_bh(bp->dev);
6e30dd4e 2165
619c5cb6 2166 /* Start the Tx */
9f6c9258
DK
2167 switch (load_mode) {
2168 case LOAD_NORMAL:
523224a3
DK
2169 /* Tx queue should be only reenabled */
2170 netif_tx_wake_all_queues(bp->dev);
9f6c9258
DK
2171 break;
2172
2173 case LOAD_OPEN:
2174 netif_tx_start_all_queues(bp->dev);
523224a3 2175 smp_mb__after_clear_bit();
9f6c9258
DK
2176 break;
2177
2178 case LOAD_DIAG:
9f6c9258
DK
2179 bp->state = BNX2X_STATE_DIAG;
2180 break;
2181
2182 default:
2183 break;
2184 }
2185
00253a8c 2186 if (bp->port.pmf)
e695a2dd 2187 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_DCB_CONFIGURED, 0);
00253a8c 2188 else
9f6c9258
DK
2189 bnx2x__link_status_update(bp);
2190
2191 /* start the timer */
2192 mod_timer(&bp->timer, jiffies + bp->current_interval);
2193
2194#ifdef BCM_CNIC
b306f5ed
DK
2195 /* re-read iscsi info */
2196 bnx2x_get_iscsi_info(bp);
9f6c9258
DK
2197 bnx2x_setup_cnic_irq_info(bp);
2198 if (bp->state == BNX2X_STATE_OPEN)
2199 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2200#endif
9f6c9258 2201
9ce392d4
YM
2202 /* mark driver is loaded in shmem2 */
2203 if (SHMEM2_HAS(bp, drv_capabilities_flag)) {
2204 u32 val;
2205 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2206 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2207 val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2208 DRV_FLAGS_CAPABILITIES_LOADED_L2);
2209 }
2210
619c5cb6
VZ
2211 /* Wait for all pending SP commands to complete */
2212 if (!bnx2x_wait_sp_comp(bp, ~0x0UL)) {
2213 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
2214 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
2215 return -EBUSY;
2216 }
6891dd25 2217
619c5cb6 2218 bnx2x_dcbx_init(bp);
9f6c9258
DK
2219 return 0;
2220
619c5cb6 2221#ifndef BNX2X_STOP_ON_ERROR
9f6c9258 2222load_error4:
619c5cb6 2223#ifdef BCM_CNIC
9f6c9258 2224 /* Disable Timer scan */
619c5cb6 2225 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
9f6c9258
DK
2226#endif
2227load_error3:
2228 bnx2x_int_disable_sync(bp, 1);
d6214d7a 2229
619c5cb6
VZ
2230 /* Clean queueable objects */
2231 bnx2x_squeeze_objects(bp);
2232
9f6c9258
DK
2233 /* Free SKBs, SGEs, TPA pool and driver internals */
2234 bnx2x_free_skbs(bp);
ec6ba945 2235 for_each_rx_queue(bp, i)
9f6c9258 2236 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
d6214d7a 2237
9f6c9258 2238 /* Release IRQs */
d6214d7a
DK
2239 bnx2x_free_irq(bp);
2240load_error2:
2241 if (!BP_NOMCP(bp)) {
2242 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2243 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2244 }
2245
2246 bp->port.pmf = 0;
9f6c9258
DK
2247load_error1:
2248 bnx2x_napi_disable(bp);
889b9af3
AE
2249 /* clear pf_load status, as it was already set */
2250 bnx2x_clear_pf_load(bp);
d6214d7a 2251load_error0:
9f6c9258
DK
2252 bnx2x_free_mem(bp);
2253
2254 return rc;
619c5cb6 2255#endif /* ! BNX2X_STOP_ON_ERROR */
9f6c9258
DK
2256}
2257
2258/* must be called with rtnl_lock */
2259int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
2260{
2261 int i;
c9ee9206
VZ
2262 bool global = false;
2263
9ce392d4
YM
2264 /* mark driver is unloaded in shmem2 */
2265 if (SHMEM2_HAS(bp, drv_capabilities_flag)) {
2266 u32 val;
2267 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2268 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2269 val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
2270 }
2271
c9ee9206
VZ
2272 if ((bp->state == BNX2X_STATE_CLOSED) ||
2273 (bp->state == BNX2X_STATE_ERROR)) {
2274 /* We can get here if the driver has been unloaded
2275 * during parity error recovery and is either waiting for a
2276 * leader to complete or for other functions to unload and
2277 * then ifdown has been issued. In this case we want to
2278 * unload and let other functions to complete a recovery
2279 * process.
2280 */
9f6c9258
DK
2281 bp->recovery_state = BNX2X_RECOVERY_DONE;
2282 bp->is_leader = 0;
c9ee9206
VZ
2283 bnx2x_release_leader_lock(bp);
2284 smp_mb();
2285
51c1a580
MS
2286 DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
2287 BNX2X_ERR("Can't unload in closed or error state\n");
9f6c9258
DK
2288 return -EINVAL;
2289 }
2290
87b7ba3d
VZ
2291 /*
2292 * It's important to set the bp->state to the value different from
2293 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
2294 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
2295 */
2296 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
2297 smp_mb();
2298
9505ee37
VZ
2299 /* Stop Tx */
2300 bnx2x_tx_disable(bp);
2301
9f6c9258
DK
2302#ifdef BCM_CNIC
2303 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
2304#endif
9f6c9258 2305
9f6c9258 2306 bp->rx_mode = BNX2X_RX_MODE_NONE;
9f6c9258 2307
9f6c9258 2308 del_timer_sync(&bp->timer);
f85582f8 2309
619c5cb6
VZ
2310 /* Set ALWAYS_ALIVE bit in shmem */
2311 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
2312
2313 bnx2x_drv_pulse(bp);
9f6c9258 2314
f85582f8 2315 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1355b704 2316 bnx2x_save_statistics(bp);
9f6c9258
DK
2317
2318 /* Cleanup the chip if needed */
2319 if (unload_mode != UNLOAD_RECOVERY)
2320 bnx2x_chip_cleanup(bp, unload_mode);
523224a3 2321 else {
c9ee9206
VZ
2322 /* Send the UNLOAD_REQUEST to the MCP */
2323 bnx2x_send_unload_req(bp, unload_mode);
2324
2325 /*
2326 * Prevent transactions to host from the functions on the
2327 * engine that doesn't reset global blocks in case of global
2328 * attention once gloabl blocks are reset and gates are opened
2329 * (the engine which leader will perform the recovery
2330 * last).
2331 */
2332 if (!CHIP_IS_E1x(bp))
2333 bnx2x_pf_disable(bp);
2334
2335 /* Disable HW interrupts, NAPI */
523224a3
DK
2336 bnx2x_netif_stop(bp, 1);
2337
2338 /* Release IRQs */
d6214d7a 2339 bnx2x_free_irq(bp);
c9ee9206
VZ
2340
2341 /* Report UNLOAD_DONE to MCP */
2342 bnx2x_send_unload_done(bp);
523224a3 2343 }
9f6c9258 2344
619c5cb6
VZ
2345 /*
2346 * At this stage no more interrupts will arrive so we may safly clean
2347 * the queueable objects here in case they failed to get cleaned so far.
2348 */
2349 bnx2x_squeeze_objects(bp);
2350
79616895
VZ
2351 /* There should be no more pending SP commands at this stage */
2352 bp->sp_state = 0;
2353
9f6c9258
DK
2354 bp->port.pmf = 0;
2355
2356 /* Free SKBs, SGEs, TPA pool and driver internals */
2357 bnx2x_free_skbs(bp);
ec6ba945 2358 for_each_rx_queue(bp, i)
9f6c9258 2359 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
d6214d7a 2360
9f6c9258
DK
2361 bnx2x_free_mem(bp);
2362
2363 bp->state = BNX2X_STATE_CLOSED;
2364
c9ee9206
VZ
2365 /* Check if there are pending parity attentions. If there are - set
2366 * RECOVERY_IN_PROGRESS.
2367 */
2368 if (bnx2x_chk_parity_attn(bp, &global, false)) {
2369 bnx2x_set_reset_in_progress(bp);
2370
2371 /* Set RESET_IS_GLOBAL if needed */
2372 if (global)
2373 bnx2x_set_reset_global(bp);
2374 }
2375
2376
9f6c9258
DK
2377 /* The last driver must disable a "close the gate" if there is no
2378 * parity attention or "process kill" pending.
2379 */
889b9af3 2380 if (!bnx2x_clear_pf_load(bp) && bnx2x_reset_is_done(bp, BP_PATH(bp)))
9f6c9258
DK
2381 bnx2x_disable_close_the_gate(bp);
2382
9f6c9258
DK
2383 return 0;
2384}
f85582f8 2385
9f6c9258
DK
2386int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
2387{
2388 u16 pmcsr;
2389
adf5f6a1
DK
2390 /* If there is no power capability, silently succeed */
2391 if (!bp->pm_cap) {
51c1a580 2392 BNX2X_DEV_INFO("No power capability. Breaking.\n");
adf5f6a1
DK
2393 return 0;
2394 }
2395
9f6c9258
DK
2396 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2397
2398 switch (state) {
2399 case PCI_D0:
2400 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2401 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2402 PCI_PM_CTRL_PME_STATUS));
2403
2404 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2405 /* delay required during transition out of D3hot */
2406 msleep(20);
2407 break;
2408
2409 case PCI_D3hot:
2410 /* If there are other clients above don't
2411 shut down the power */
2412 if (atomic_read(&bp->pdev->enable_cnt) != 1)
2413 return 0;
2414 /* Don't shut down the power for emulation and FPGA */
2415 if (CHIP_REV_IS_SLOW(bp))
2416 return 0;
2417
2418 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2419 pmcsr |= 3;
2420
2421 if (bp->wol)
2422 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2423
2424 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2425 pmcsr);
2426
2427 /* No more memory access after this point until
2428 * device is brought back to D0.
2429 */
2430 break;
2431
2432 default:
51c1a580 2433 dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
9f6c9258
DK
2434 return -EINVAL;
2435 }
2436 return 0;
2437}
2438
9f6c9258
DK
2439/*
2440 * net_device service functions
2441 */
d6214d7a 2442int bnx2x_poll(struct napi_struct *napi, int budget)
9f6c9258
DK
2443{
2444 int work_done = 0;
6383c0b3 2445 u8 cos;
9f6c9258
DK
2446 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
2447 napi);
2448 struct bnx2x *bp = fp->bp;
2449
2450 while (1) {
2451#ifdef BNX2X_STOP_ON_ERROR
2452 if (unlikely(bp->panic)) {
2453 napi_complete(napi);
2454 return 0;
2455 }
2456#endif
2457
6383c0b3
AE
2458 for_each_cos_in_tx_queue(fp, cos)
2459 if (bnx2x_tx_queue_has_work(&fp->txdata[cos]))
2460 bnx2x_tx_int(bp, &fp->txdata[cos]);
2461
9f6c9258
DK
2462
2463 if (bnx2x_has_rx_work(fp)) {
2464 work_done += bnx2x_rx_int(fp, budget - work_done);
2465
2466 /* must not complete if we consumed full budget */
2467 if (work_done >= budget)
2468 break;
2469 }
2470
2471 /* Fall out from the NAPI loop if needed */
2472 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
ec6ba945
VZ
2473#ifdef BCM_CNIC
2474 /* No need to update SB for FCoE L2 ring as long as
2475 * it's connected to the default SB and the SB
2476 * has been updated when NAPI was scheduled.
2477 */
2478 if (IS_FCOE_FP(fp)) {
2479 napi_complete(napi);
2480 break;
2481 }
2482#endif
2483
9f6c9258 2484 bnx2x_update_fpsb_idx(fp);
f85582f8
DK
2485 /* bnx2x_has_rx_work() reads the status block,
2486 * thus we need to ensure that status block indices
2487 * have been actually read (bnx2x_update_fpsb_idx)
2488 * prior to this check (bnx2x_has_rx_work) so that
2489 * we won't write the "newer" value of the status block
2490 * to IGU (if there was a DMA right after
2491 * bnx2x_has_rx_work and if there is no rmb, the memory
2492 * reading (bnx2x_update_fpsb_idx) may be postponed
2493 * to right before bnx2x_ack_sb). In this case there
2494 * will never be another interrupt until there is
2495 * another update of the status block, while there
2496 * is still unhandled work.
2497 */
9f6c9258
DK
2498 rmb();
2499
2500 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
2501 napi_complete(napi);
2502 /* Re-enable interrupts */
51c1a580 2503 DP(NETIF_MSG_RX_STATUS,
523224a3
DK
2504 "Update index to %d\n", fp->fp_hc_idx);
2505 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
2506 le16_to_cpu(fp->fp_hc_idx),
9f6c9258
DK
2507 IGU_INT_ENABLE, 1);
2508 break;
2509 }
2510 }
2511 }
2512
2513 return work_done;
2514}
2515
9f6c9258
DK
2516/* we split the first BD into headers and data BDs
2517 * to ease the pain of our fellow microcode engineers
2518 * we use one mapping for both BDs
9f6c9258
DK
2519 */
2520static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
6383c0b3 2521 struct bnx2x_fp_txdata *txdata,
9f6c9258
DK
2522 struct sw_tx_bd *tx_buf,
2523 struct eth_tx_start_bd **tx_bd, u16 hlen,
2524 u16 bd_prod, int nbd)
2525{
2526 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
2527 struct eth_tx_bd *d_tx_bd;
2528 dma_addr_t mapping;
2529 int old_len = le16_to_cpu(h_tx_bd->nbytes);
2530
2531 /* first fix first BD */
2532 h_tx_bd->nbd = cpu_to_le16(nbd);
2533 h_tx_bd->nbytes = cpu_to_le16(hlen);
2534
51c1a580
MS
2535 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x) nbd %d\n",
2536 h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo, h_tx_bd->nbd);
9f6c9258
DK
2537
2538 /* now get a new data BD
2539 * (after the pbd) and fill it */
2540 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
6383c0b3 2541 d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
9f6c9258
DK
2542
2543 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
2544 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
2545
2546 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2547 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2548 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
2549
2550 /* this marks the BD as one that has no individual mapping */
2551 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
2552
2553 DP(NETIF_MSG_TX_QUEUED,
2554 "TSO split data size is %d (%x:%x)\n",
2555 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
2556
2557 /* update tx_bd */
2558 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
2559
2560 return bd_prod;
2561}
2562
2563static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
2564{
2565 if (fix > 0)
2566 csum = (u16) ~csum_fold(csum_sub(csum,
2567 csum_partial(t_header - fix, fix, 0)));
2568
2569 else if (fix < 0)
2570 csum = (u16) ~csum_fold(csum_add(csum,
2571 csum_partial(t_header, -fix, 0)));
2572
2573 return swab16(csum);
2574}
2575
2576static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
2577{
2578 u32 rc;
2579
2580 if (skb->ip_summed != CHECKSUM_PARTIAL)
2581 rc = XMIT_PLAIN;
2582
2583 else {
d0d9d8ef 2584 if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) {
9f6c9258
DK
2585 rc = XMIT_CSUM_V6;
2586 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2587 rc |= XMIT_CSUM_TCP;
2588
2589 } else {
2590 rc = XMIT_CSUM_V4;
2591 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2592 rc |= XMIT_CSUM_TCP;
2593 }
2594 }
2595
5892b9e9
VZ
2596 if (skb_is_gso_v6(skb))
2597 rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6;
2598 else if (skb_is_gso(skb))
2599 rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP;
9f6c9258
DK
2600
2601 return rc;
2602}
2603
2604#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2605/* check if packet requires linearization (packet is too fragmented)
2606 no need to check fragmentation if page size > 8K (there will be no
2607 violation to FW restrictions) */
2608static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
2609 u32 xmit_type)
2610{
2611 int to_copy = 0;
2612 int hlen = 0;
2613 int first_bd_sz = 0;
2614
2615 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
2616 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
2617
2618 if (xmit_type & XMIT_GSO) {
2619 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
2620 /* Check if LSO packet needs to be copied:
2621 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
2622 int wnd_size = MAX_FETCH_BD - 3;
2623 /* Number of windows to check */
2624 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
2625 int wnd_idx = 0;
2626 int frag_idx = 0;
2627 u32 wnd_sum = 0;
2628
2629 /* Headers length */
2630 hlen = (int)(skb_transport_header(skb) - skb->data) +
2631 tcp_hdrlen(skb);
2632
2633 /* Amount of data (w/o headers) on linear part of SKB*/
2634 first_bd_sz = skb_headlen(skb) - hlen;
2635
2636 wnd_sum = first_bd_sz;
2637
2638 /* Calculate the first sum - it's special */
2639 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
2640 wnd_sum +=
9e903e08 2641 skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
9f6c9258
DK
2642
2643 /* If there was data on linear skb data - check it */
2644 if (first_bd_sz > 0) {
2645 if (unlikely(wnd_sum < lso_mss)) {
2646 to_copy = 1;
2647 goto exit_lbl;
2648 }
2649
2650 wnd_sum -= first_bd_sz;
2651 }
2652
2653 /* Others are easier: run through the frag list and
2654 check all windows */
2655 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
2656 wnd_sum +=
9e903e08 2657 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
9f6c9258
DK
2658
2659 if (unlikely(wnd_sum < lso_mss)) {
2660 to_copy = 1;
2661 break;
2662 }
2663 wnd_sum -=
9e903e08 2664 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
9f6c9258
DK
2665 }
2666 } else {
2667 /* in non-LSO too fragmented packet should always
2668 be linearized */
2669 to_copy = 1;
2670 }
2671 }
2672
2673exit_lbl:
2674 if (unlikely(to_copy))
2675 DP(NETIF_MSG_TX_QUEUED,
51c1a580 2676 "Linearization IS REQUIRED for %s packet. num_frags %d hlen %d first_bd_sz %d\n",
9f6c9258
DK
2677 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
2678 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
2679
2680 return to_copy;
2681}
2682#endif
2683
2297a2da
VZ
2684static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
2685 u32 xmit_type)
f2e0899f 2686{
2297a2da
VZ
2687 *parsing_data |= (skb_shinfo(skb)->gso_size <<
2688 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
2689 ETH_TX_PARSE_BD_E2_LSO_MSS;
f2e0899f
DK
2690 if ((xmit_type & XMIT_GSO_V6) &&
2691 (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
2297a2da 2692 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
f2e0899f
DK
2693}
2694
2695/**
e8920674 2696 * bnx2x_set_pbd_gso - update PBD in GSO case.
f2e0899f 2697 *
e8920674
DK
2698 * @skb: packet skb
2699 * @pbd: parse BD
2700 * @xmit_type: xmit flags
f2e0899f
DK
2701 */
2702static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
2703 struct eth_tx_parse_bd_e1x *pbd,
2704 u32 xmit_type)
2705{
2706 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2707 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
2708 pbd->tcp_flags = pbd_tcp_flags(skb);
2709
2710 if (xmit_type & XMIT_GSO_V4) {
2711 pbd->ip_id = swab16(ip_hdr(skb)->id);
2712 pbd->tcp_pseudo_csum =
2713 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
2714 ip_hdr(skb)->daddr,
2715 0, IPPROTO_TCP, 0));
2716
2717 } else
2718 pbd->tcp_pseudo_csum =
2719 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2720 &ipv6_hdr(skb)->daddr,
2721 0, IPPROTO_TCP, 0));
2722
2723 pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
2724}
f85582f8 2725
f2e0899f 2726/**
e8920674 2727 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
f2e0899f 2728 *
e8920674
DK
2729 * @bp: driver handle
2730 * @skb: packet skb
2731 * @parsing_data: data to be updated
2732 * @xmit_type: xmit flags
f2e0899f 2733 *
e8920674 2734 * 57712 related
f2e0899f
DK
2735 */
2736static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
2297a2da 2737 u32 *parsing_data, u32 xmit_type)
f2e0899f 2738{
e39aece7
VZ
2739 *parsing_data |=
2740 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
2741 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) &
2742 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W;
f2e0899f 2743
e39aece7
VZ
2744 if (xmit_type & XMIT_CSUM_TCP) {
2745 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
2746 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
2747 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
f2e0899f 2748
e39aece7
VZ
2749 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
2750 } else
2751 /* We support checksum offload for TCP and UDP only.
2752 * No need to pass the UDP header length - it's a constant.
2753 */
2754 return skb_transport_header(skb) +
2755 sizeof(struct udphdr) - skb->data;
f2e0899f
DK
2756}
2757
93ef5c02
DK
2758static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
2759 struct eth_tx_start_bd *tx_start_bd, u32 xmit_type)
2760{
93ef5c02
DK
2761 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
2762
2763 if (xmit_type & XMIT_CSUM_V4)
2764 tx_start_bd->bd_flags.as_bitfield |=
2765 ETH_TX_BD_FLAGS_IP_CSUM;
2766 else
2767 tx_start_bd->bd_flags.as_bitfield |=
2768 ETH_TX_BD_FLAGS_IPV6;
2769
2770 if (!(xmit_type & XMIT_CSUM_TCP))
2771 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
93ef5c02
DK
2772}
2773
f2e0899f 2774/**
e8920674 2775 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
f2e0899f 2776 *
e8920674
DK
2777 * @bp: driver handle
2778 * @skb: packet skb
2779 * @pbd: parse BD to be updated
2780 * @xmit_type: xmit flags
f2e0899f
DK
2781 */
2782static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
2783 struct eth_tx_parse_bd_e1x *pbd,
2784 u32 xmit_type)
2785{
e39aece7 2786 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
f2e0899f
DK
2787
2788 /* for now NS flag is not used in Linux */
2789 pbd->global_data =
2790 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
2791 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
2792
2793 pbd->ip_hlen_w = (skb_transport_header(skb) -
e39aece7 2794 skb_network_header(skb)) >> 1;
f2e0899f 2795
e39aece7
VZ
2796 hlen += pbd->ip_hlen_w;
2797
2798 /* We support checksum offload for TCP and UDP only */
2799 if (xmit_type & XMIT_CSUM_TCP)
2800 hlen += tcp_hdrlen(skb) / 2;
2801 else
2802 hlen += sizeof(struct udphdr) / 2;
f2e0899f
DK
2803
2804 pbd->total_hlen_w = cpu_to_le16(hlen);
2805 hlen = hlen*2;
2806
2807 if (xmit_type & XMIT_CSUM_TCP) {
2808 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
2809
2810 } else {
2811 s8 fix = SKB_CS_OFF(skb); /* signed! */
2812
2813 DP(NETIF_MSG_TX_QUEUED,
2814 "hlen %d fix %d csum before fix %x\n",
2815 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
2816
2817 /* HW bug: fixup the CSUM */
2818 pbd->tcp_pseudo_csum =
2819 bnx2x_csum_fix(skb_transport_header(skb),
2820 SKB_CS(skb), fix);
2821
2822 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
2823 pbd->tcp_pseudo_csum);
2824 }
2825
2826 return hlen;
2827}
f85582f8 2828
9f6c9258
DK
2829/* called with netif_tx_lock
2830 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
2831 * netif_wake_queue()
2832 */
2833netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2834{
2835 struct bnx2x *bp = netdev_priv(dev);
6383c0b3 2836
9f6c9258
DK
2837 struct bnx2x_fastpath *fp;
2838 struct netdev_queue *txq;
6383c0b3 2839 struct bnx2x_fp_txdata *txdata;
9f6c9258 2840 struct sw_tx_bd *tx_buf;
619c5cb6 2841 struct eth_tx_start_bd *tx_start_bd, *first_bd;
9f6c9258 2842 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
523224a3 2843 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
f2e0899f 2844 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
2297a2da 2845 u32 pbd_e2_parsing_data = 0;
9f6c9258 2846 u16 pkt_prod, bd_prod;
6383c0b3 2847 int nbd, txq_index, fp_index, txdata_index;
9f6c9258
DK
2848 dma_addr_t mapping;
2849 u32 xmit_type = bnx2x_xmit_type(bp, skb);
2850 int i;
2851 u8 hlen = 0;
2852 __le16 pkt_size = 0;
2853 struct ethhdr *eth;
2854 u8 mac_type = UNICAST_ADDRESS;
2855
2856#ifdef BNX2X_STOP_ON_ERROR
2857 if (unlikely(bp->panic))
2858 return NETDEV_TX_BUSY;
2859#endif
2860
6383c0b3
AE
2861 txq_index = skb_get_queue_mapping(skb);
2862 txq = netdev_get_tx_queue(dev, txq_index);
2863
2864 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + FCOE_PRESENT);
2865
2866 /* decode the fastpath index and the cos index from the txq */
2867 fp_index = TXQ_TO_FP(txq_index);
2868 txdata_index = TXQ_TO_COS(txq_index);
2869
2870#ifdef BCM_CNIC
2871 /*
2872 * Override the above for the FCoE queue:
2873 * - FCoE fp entry is right after the ETH entries.
2874 * - FCoE L2 queue uses bp->txdata[0] only.
2875 */
2876 if (unlikely(!NO_FCOE(bp) && (txq_index ==
2877 bnx2x_fcoe_tx(bp, txq_index)))) {
2878 fp_index = FCOE_IDX;
2879 txdata_index = 0;
2880 }
2881#endif
2882
2883 /* enable this debug print to view the transmission queue being used
51c1a580 2884 DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
6383c0b3 2885 txq_index, fp_index, txdata_index); */
9f6c9258 2886
6383c0b3 2887 /* locate the fastpath and the txdata */
9f6c9258 2888 fp = &bp->fp[fp_index];
6383c0b3
AE
2889 txdata = &fp->txdata[txdata_index];
2890
2891 /* enable this debug print to view the tranmission details
51c1a580
MS
2892 DP(NETIF_MSG_TX_QUEUED,
2893 "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
6383c0b3 2894 txdata->cid, fp_index, txdata_index, txdata, fp); */
9f6c9258 2895
6383c0b3
AE
2896 if (unlikely(bnx2x_tx_avail(bp, txdata) <
2897 (skb_shinfo(skb)->nr_frags + 3))) {
9f6c9258
DK
2898 fp->eth_q_stats.driver_xoff++;
2899 netif_tx_stop_queue(txq);
2900 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
2901 return NETDEV_TX_BUSY;
2902 }
2903
51c1a580
MS
2904 DP(NETIF_MSG_TX_QUEUED,
2905 "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x\n",
6383c0b3 2906 txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
9f6c9258
DK
2907 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
2908
2909 eth = (struct ethhdr *)skb->data;
2910
2911 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
2912 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
2913 if (is_broadcast_ether_addr(eth->h_dest))
2914 mac_type = BROADCAST_ADDRESS;
2915 else
2916 mac_type = MULTICAST_ADDRESS;
2917 }
2918
2919#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2920 /* First, check if we need to linearize the skb (due to FW
2921 restrictions). No need to check fragmentation if page size > 8K
2922 (there will be no violation to FW restrictions) */
2923 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
2924 /* Statistics of linearization */
2925 bp->lin_cnt++;
2926 if (skb_linearize(skb) != 0) {
51c1a580
MS
2927 DP(NETIF_MSG_TX_QUEUED,
2928 "SKB linearization failed - silently dropping this SKB\n");
9f6c9258
DK
2929 dev_kfree_skb_any(skb);
2930 return NETDEV_TX_OK;
2931 }
2932 }
2933#endif
619c5cb6
VZ
2934 /* Map skb linear data for DMA */
2935 mapping = dma_map_single(&bp->pdev->dev, skb->data,
2936 skb_headlen(skb), DMA_TO_DEVICE);
2937 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
51c1a580
MS
2938 DP(NETIF_MSG_TX_QUEUED,
2939 "SKB mapping failed - silently dropping this SKB\n");
619c5cb6
VZ
2940 dev_kfree_skb_any(skb);
2941 return NETDEV_TX_OK;
2942 }
9f6c9258
DK
2943 /*
2944 Please read carefully. First we use one BD which we mark as start,
2945 then we have a parsing info BD (used for TSO or xsum),
2946 and only then we have the rest of the TSO BDs.
2947 (don't forget to mark the last one as last,
2948 and to unmap only AFTER you write to the BD ...)
2949 And above all, all pdb sizes are in words - NOT DWORDS!
2950 */
2951
619c5cb6
VZ
2952 /* get current pkt produced now - advance it just before sending packet
2953 * since mapping of pages may fail and cause packet to be dropped
2954 */
6383c0b3
AE
2955 pkt_prod = txdata->tx_pkt_prod;
2956 bd_prod = TX_BD(txdata->tx_bd_prod);
9f6c9258 2957
619c5cb6
VZ
2958 /* get a tx_buf and first BD
2959 * tx_start_bd may be changed during SPLIT,
2960 * but first_bd will always stay first
2961 */
6383c0b3
AE
2962 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
2963 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
619c5cb6 2964 first_bd = tx_start_bd;
9f6c9258
DK
2965
2966 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
f85582f8
DK
2967 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_ETH_ADDR_TYPE,
2968 mac_type);
2969
9f6c9258 2970 /* header nbd */
f85582f8 2971 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
9f6c9258
DK
2972
2973 /* remember the first BD of the packet */
6383c0b3 2974 tx_buf->first_bd = txdata->tx_bd_prod;
9f6c9258
DK
2975 tx_buf->skb = skb;
2976 tx_buf->flags = 0;
2977
2978 DP(NETIF_MSG_TX_QUEUED,
2979 "sending pkt %u @%p next_idx %u bd %u @%p\n",
6383c0b3 2980 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
9f6c9258 2981
eab6d18d 2982 if (vlan_tx_tag_present(skb)) {
523224a3
DK
2983 tx_start_bd->vlan_or_ethertype =
2984 cpu_to_le16(vlan_tx_tag_get(skb));
2985 tx_start_bd->bd_flags.as_bitfield |=
2986 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
9f6c9258 2987 } else
523224a3 2988 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
9f6c9258
DK
2989
2990 /* turn on parsing and get a BD */
2991 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9f6c9258 2992
93ef5c02
DK
2993 if (xmit_type & XMIT_CSUM)
2994 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
9f6c9258 2995
619c5cb6 2996 if (!CHIP_IS_E1x(bp)) {
6383c0b3 2997 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
f2e0899f
DK
2998 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
2999 /* Set PBD in checksum offload case */
3000 if (xmit_type & XMIT_CSUM)
2297a2da
VZ
3001 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
3002 &pbd_e2_parsing_data,
3003 xmit_type);
619c5cb6
VZ
3004 if (IS_MF_SI(bp)) {
3005 /*
3006 * fill in the MAC addresses in the PBD - for local
3007 * switching
3008 */
3009 bnx2x_set_fw_mac_addr(&pbd_e2->src_mac_addr_hi,
3010 &pbd_e2->src_mac_addr_mid,
3011 &pbd_e2->src_mac_addr_lo,
3012 eth->h_source);
3013 bnx2x_set_fw_mac_addr(&pbd_e2->dst_mac_addr_hi,
3014 &pbd_e2->dst_mac_addr_mid,
3015 &pbd_e2->dst_mac_addr_lo,
3016 eth->h_dest);
3017 }
f2e0899f 3018 } else {
6383c0b3 3019 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
f2e0899f
DK
3020 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
3021 /* Set PBD in checksum offload case */
3022 if (xmit_type & XMIT_CSUM)
3023 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
9f6c9258 3024
9f6c9258
DK
3025 }
3026
f85582f8 3027 /* Setup the data pointer of the first BD of the packet */
9f6c9258
DK
3028 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3029 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
619c5cb6 3030 nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
9f6c9258
DK
3031 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
3032 pkt_size = tx_start_bd->nbytes;
3033
51c1a580
MS
3034 DP(NETIF_MSG_TX_QUEUED,
3035 "first bd @%p addr (%x:%x) nbd %d nbytes %d flags %x vlan %x\n",
9f6c9258
DK
3036 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
3037 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
523224a3
DK
3038 tx_start_bd->bd_flags.as_bitfield,
3039 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
9f6c9258
DK
3040
3041 if (xmit_type & XMIT_GSO) {
3042
3043 DP(NETIF_MSG_TX_QUEUED,
3044 "TSO packet len %d hlen %d total len %d tso size %d\n",
3045 skb->len, hlen, skb_headlen(skb),
3046 skb_shinfo(skb)->gso_size);
3047
3048 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
3049
3050 if (unlikely(skb_headlen(skb) > hlen))
6383c0b3
AE
3051 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
3052 &tx_start_bd, hlen,
3053 bd_prod, ++nbd);
619c5cb6 3054 if (!CHIP_IS_E1x(bp))
2297a2da
VZ
3055 bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
3056 xmit_type);
f2e0899f
DK
3057 else
3058 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
9f6c9258 3059 }
2297a2da
VZ
3060
3061 /* Set the PBD's parsing_data field if not zero
3062 * (for the chips newer than 57711).
3063 */
3064 if (pbd_e2_parsing_data)
3065 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
3066
9f6c9258
DK
3067 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
3068
f85582f8 3069 /* Handle fragmented skb */
9f6c9258
DK
3070 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3071 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3072
9e903e08
ED
3073 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
3074 skb_frag_size(frag), DMA_TO_DEVICE);
619c5cb6 3075 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
2df1a70a 3076 unsigned int pkts_compl = 0, bytes_compl = 0;
619c5cb6 3077
51c1a580
MS
3078 DP(NETIF_MSG_TX_QUEUED,
3079 "Unable to map page - dropping packet...\n");
619c5cb6
VZ
3080
3081 /* we need unmap all buffers already mapped
3082 * for this SKB;
3083 * first_bd->nbd need to be properly updated
3084 * before call to bnx2x_free_tx_pkt
3085 */
3086 first_bd->nbd = cpu_to_le16(nbd);
6383c0b3 3087 bnx2x_free_tx_pkt(bp, txdata,
2df1a70a
TH
3088 TX_BD(txdata->tx_pkt_prod),
3089 &pkts_compl, &bytes_compl);
619c5cb6
VZ
3090 return NETDEV_TX_OK;
3091 }
3092
9f6c9258 3093 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
6383c0b3 3094 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
9f6c9258 3095 if (total_pkt_bd == NULL)
6383c0b3 3096 total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
9f6c9258 3097
9f6c9258
DK
3098 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3099 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9e903e08
ED
3100 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
3101 le16_add_cpu(&pkt_size, skb_frag_size(frag));
619c5cb6 3102 nbd++;
9f6c9258
DK
3103
3104 DP(NETIF_MSG_TX_QUEUED,
3105 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
3106 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
3107 le16_to_cpu(tx_data_bd->nbytes));
3108 }
3109
3110 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
3111
619c5cb6
VZ
3112 /* update with actual num BDs */
3113 first_bd->nbd = cpu_to_le16(nbd);
3114
9f6c9258
DK
3115 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3116
3117 /* now send a tx doorbell, counting the next BD
3118 * if the packet contains or ends with it
3119 */
3120 if (TX_BD_POFF(bd_prod) < nbd)
3121 nbd++;
3122
619c5cb6
VZ
3123 /* total_pkt_bytes should be set on the first data BD if
3124 * it's not an LSO packet and there is more than one
3125 * data BD. In this case pkt_size is limited by an MTU value.
3126 * However we prefer to set it for an LSO packet (while we don't
3127 * have to) in order to save some CPU cycles in a none-LSO
3128 * case, when we much more care about them.
3129 */
9f6c9258
DK
3130 if (total_pkt_bd != NULL)
3131 total_pkt_bd->total_pkt_bytes = pkt_size;
3132
523224a3 3133 if (pbd_e1x)
9f6c9258 3134 DP(NETIF_MSG_TX_QUEUED,
51c1a580 3135 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u tcp_flags %x xsum %x seq %u hlen %u\n",
523224a3
DK
3136 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
3137 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
3138 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
3139 le16_to_cpu(pbd_e1x->total_hlen_w));
f2e0899f
DK
3140 if (pbd_e2)
3141 DP(NETIF_MSG_TX_QUEUED,
3142 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
3143 pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid,
3144 pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi,
3145 pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo,
3146 pbd_e2->parsing_data);
9f6c9258
DK
3147 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
3148
2df1a70a
TH
3149 netdev_tx_sent_queue(txq, skb->len);
3150
8373c57d
WB
3151 skb_tx_timestamp(skb);
3152
6383c0b3 3153 txdata->tx_pkt_prod++;
9f6c9258
DK
3154 /*
3155 * Make sure that the BD data is updated before updating the producer
3156 * since FW might read the BD right after the producer is updated.
3157 * This is only applicable for weak-ordered memory model archs such
3158 * as IA-64. The following barrier is also mandatory since FW will
3159 * assumes packets must have BDs.
3160 */
3161 wmb();
3162
6383c0b3 3163 txdata->tx_db.data.prod += nbd;
9f6c9258 3164 barrier();
f85582f8 3165
6383c0b3 3166 DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
9f6c9258
DK
3167
3168 mmiowb();
3169
6383c0b3 3170 txdata->tx_bd_prod += nbd;
9f6c9258 3171
bc14786a 3172 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_SKB_FRAGS + 4)) {
9f6c9258
DK
3173 netif_tx_stop_queue(txq);
3174
3175 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
3176 * ordering of set_bit() in netif_tx_stop_queue() and read of
3177 * fp->bd_tx_cons */
3178 smp_mb();
3179
3180 fp->eth_q_stats.driver_xoff++;
bc14786a 3181 if (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 4)
9f6c9258
DK
3182 netif_tx_wake_queue(txq);
3183 }
6383c0b3 3184 txdata->tx_pkt++;
9f6c9258
DK
3185
3186 return NETDEV_TX_OK;
3187}
f85582f8 3188
6383c0b3
AE
3189/**
3190 * bnx2x_setup_tc - routine to configure net_device for multi tc
3191 *
3192 * @netdev: net device to configure
3193 * @tc: number of traffic classes to enable
3194 *
3195 * callback connected to the ndo_setup_tc function pointer
3196 */
3197int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
3198{
3199 int cos, prio, count, offset;
3200 struct bnx2x *bp = netdev_priv(dev);
3201
3202 /* setup tc must be called under rtnl lock */
3203 ASSERT_RTNL();
3204
3205 /* no traffic classes requested. aborting */
3206 if (!num_tc) {
3207 netdev_reset_tc(dev);
3208 return 0;
3209 }
3210
3211 /* requested to support too many traffic classes */
3212 if (num_tc > bp->max_cos) {
51c1a580
MS
3213 BNX2X_ERR("support for too many traffic classes requested: %d. max supported is %d\n",
3214 num_tc, bp->max_cos);
6383c0b3
AE
3215 return -EINVAL;
3216 }
3217
3218 /* declare amount of supported traffic classes */
3219 if (netdev_set_num_tc(dev, num_tc)) {
51c1a580 3220 BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
6383c0b3
AE
3221 return -EINVAL;
3222 }
3223
3224 /* configure priority to traffic class mapping */
3225 for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
3226 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
51c1a580
MS
3227 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
3228 "mapping priority %d to tc %d\n",
6383c0b3
AE
3229 prio, bp->prio_to_cos[prio]);
3230 }
3231
3232
3233 /* Use this configuration to diffrentiate tc0 from other COSes
3234 This can be used for ets or pfc, and save the effort of setting
3235 up a multio class queue disc or negotiating DCBX with a switch
3236 netdev_set_prio_tc_map(dev, 0, 0);
94f05b0f 3237 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
6383c0b3
AE
3238 for (prio = 1; prio < 16; prio++) {
3239 netdev_set_prio_tc_map(dev, prio, 1);
94f05b0f 3240 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
6383c0b3
AE
3241 } */
3242
3243 /* configure traffic class to transmission queue mapping */
3244 for (cos = 0; cos < bp->max_cos; cos++) {
3245 count = BNX2X_NUM_ETH_QUEUES(bp);
3246 offset = cos * MAX_TXQS_PER_COS;
3247 netdev_set_tc_queue(dev, cos, count, offset);
51c1a580
MS
3248 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
3249 "mapping tc %d to offset %d count %d\n",
6383c0b3
AE
3250 cos, offset, count);
3251 }
3252
3253 return 0;
3254}
3255
9f6c9258
DK
3256/* called with rtnl_lock */
3257int bnx2x_change_mac_addr(struct net_device *dev, void *p)
3258{
3259 struct sockaddr *addr = p;
3260 struct bnx2x *bp = netdev_priv(dev);
619c5cb6 3261 int rc = 0;
9f6c9258 3262
51c1a580
MS
3263 if (!bnx2x_is_valid_ether_addr(bp, addr->sa_data)) {
3264 BNX2X_ERR("Requested MAC address is not valid\n");
614c76df 3265 return -EINVAL;
51c1a580 3266 }
614c76df
DK
3267
3268#ifdef BCM_CNIC
a3348722
BW
3269 if ((IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)) &&
3270 !is_zero_ether_addr(addr->sa_data)) {
51c1a580 3271 BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n");
9f6c9258 3272 return -EINVAL;
51c1a580 3273 }
614c76df 3274#endif
9f6c9258 3275
619c5cb6
VZ
3276 if (netif_running(dev)) {
3277 rc = bnx2x_set_eth_mac(bp, false);
3278 if (rc)
3279 return rc;
3280 }
3281
7ce5d222 3282 dev->addr_assign_type &= ~NET_ADDR_RANDOM;
9f6c9258 3283 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
619c5cb6 3284
523224a3 3285 if (netif_running(dev))
619c5cb6 3286 rc = bnx2x_set_eth_mac(bp, true);
9f6c9258 3287
619c5cb6 3288 return rc;
9f6c9258
DK
3289}
3290
b3b83c3f
DK
3291static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
3292{
3293 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
3294 struct bnx2x_fastpath *fp = &bp->fp[fp_index];
6383c0b3 3295 u8 cos;
b3b83c3f
DK
3296
3297 /* Common */
3298#ifdef BCM_CNIC
3299 if (IS_FCOE_IDX(fp_index)) {
3300 memset(sb, 0, sizeof(union host_hc_status_block));
3301 fp->status_blk_mapping = 0;
3302
3303 } else {
3304#endif
3305 /* status blocks */
619c5cb6 3306 if (!CHIP_IS_E1x(bp))
b3b83c3f
DK
3307 BNX2X_PCI_FREE(sb->e2_sb,
3308 bnx2x_fp(bp, fp_index,
3309 status_blk_mapping),
3310 sizeof(struct host_hc_status_block_e2));
3311 else
3312 BNX2X_PCI_FREE(sb->e1x_sb,
3313 bnx2x_fp(bp, fp_index,
3314 status_blk_mapping),
3315 sizeof(struct host_hc_status_block_e1x));
3316#ifdef BCM_CNIC
3317 }
3318#endif
3319 /* Rx */
3320 if (!skip_rx_queue(bp, fp_index)) {
3321 bnx2x_free_rx_bds(fp);
3322
3323 /* fastpath rx rings: rx_buf rx_desc rx_comp */
3324 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
3325 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
3326 bnx2x_fp(bp, fp_index, rx_desc_mapping),
3327 sizeof(struct eth_rx_bd) * NUM_RX_BD);
3328
3329 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
3330 bnx2x_fp(bp, fp_index, rx_comp_mapping),
3331 sizeof(struct eth_fast_path_rx_cqe) *
3332 NUM_RCQ_BD);
3333
3334 /* SGE ring */
3335 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
3336 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
3337 bnx2x_fp(bp, fp_index, rx_sge_mapping),
3338 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
3339 }
3340
3341 /* Tx */
3342 if (!skip_tx_queue(bp, fp_index)) {
3343 /* fastpath tx rings: tx_buf tx_desc */
6383c0b3
AE
3344 for_each_cos_in_tx_queue(fp, cos) {
3345 struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
3346
51c1a580 3347 DP(NETIF_MSG_IFDOWN,
94f05b0f 3348 "freeing tx memory of fp %d cos %d cid %d\n",
6383c0b3
AE
3349 fp_index, cos, txdata->cid);
3350
3351 BNX2X_FREE(txdata->tx_buf_ring);
3352 BNX2X_PCI_FREE(txdata->tx_desc_ring,
3353 txdata->tx_desc_mapping,
3354 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
3355 }
b3b83c3f
DK
3356 }
3357 /* end of fastpath */
3358}
3359
3360void bnx2x_free_fp_mem(struct bnx2x *bp)
3361{
3362 int i;
3363 for_each_queue(bp, i)
3364 bnx2x_free_fp_mem_at(bp, i);
3365}
3366
1191cb83 3367static void set_sb_shortcuts(struct bnx2x *bp, int index)
b3b83c3f
DK
3368{
3369 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
619c5cb6 3370 if (!CHIP_IS_E1x(bp)) {
b3b83c3f
DK
3371 bnx2x_fp(bp, index, sb_index_values) =
3372 (__le16 *)status_blk.e2_sb->sb.index_values;
3373 bnx2x_fp(bp, index, sb_running_index) =
3374 (__le16 *)status_blk.e2_sb->sb.running_index;
3375 } else {
3376 bnx2x_fp(bp, index, sb_index_values) =
3377 (__le16 *)status_blk.e1x_sb->sb.index_values;
3378 bnx2x_fp(bp, index, sb_running_index) =
3379 (__le16 *)status_blk.e1x_sb->sb.running_index;
3380 }
3381}
3382
1191cb83
ED
3383/* Returns the number of actually allocated BDs */
3384static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
3385 int rx_ring_size)
3386{
3387 struct bnx2x *bp = fp->bp;
3388 u16 ring_prod, cqe_ring_prod;
3389 int i, failure_cnt = 0;
3390
3391 fp->rx_comp_cons = 0;
3392 cqe_ring_prod = ring_prod = 0;
3393
3394 /* This routine is called only during fo init so
3395 * fp->eth_q_stats.rx_skb_alloc_failed = 0
3396 */
3397 for (i = 0; i < rx_ring_size; i++) {
3398 if (bnx2x_alloc_rx_data(bp, fp, ring_prod) < 0) {
3399 failure_cnt++;
3400 continue;
3401 }
3402 ring_prod = NEXT_RX_IDX(ring_prod);
3403 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
3404 WARN_ON(ring_prod <= (i - failure_cnt));
3405 }
3406
3407 if (failure_cnt)
3408 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
3409 i - failure_cnt, fp->index);
3410
3411 fp->rx_bd_prod = ring_prod;
3412 /* Limit the CQE producer by the CQE ring size */
3413 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
3414 cqe_ring_prod);
3415 fp->rx_pkt = fp->rx_calls = 0;
3416
3417 fp->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
3418
3419 return i - failure_cnt;
3420}
3421
3422static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
3423{
3424 int i;
3425
3426 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
3427 struct eth_rx_cqe_next_page *nextpg;
3428
3429 nextpg = (struct eth_rx_cqe_next_page *)
3430 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
3431 nextpg->addr_hi =
3432 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
3433 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
3434 nextpg->addr_lo =
3435 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
3436 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
3437 }
3438}
3439
b3b83c3f
DK
3440static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
3441{
3442 union host_hc_status_block *sb;
3443 struct bnx2x_fastpath *fp = &bp->fp[index];
3444 int ring_size = 0;
6383c0b3 3445 u8 cos;
c2188952 3446 int rx_ring_size = 0;
b3b83c3f 3447
614c76df 3448#ifdef BCM_CNIC
a3348722
BW
3449 if (!bp->rx_ring_size &&
3450 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
614c76df
DK
3451 rx_ring_size = MIN_RX_SIZE_NONTPA;
3452 bp->rx_ring_size = rx_ring_size;
3453 } else
3454#endif
c2188952 3455 if (!bp->rx_ring_size) {
d760fc37
MY
3456 u32 cfg = SHMEM_RD(bp,
3457 dev_info.port_hw_config[BP_PORT(bp)].default_cfg);
b3b83c3f 3458
c2188952
VZ
3459 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
3460
d760fc37
MY
3461 /* Dercease ring size for 1G functions */
3462 if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
3463 PORT_HW_CFG_NET_SERDES_IF_SGMII)
3464 rx_ring_size /= 10;
3465
c2188952
VZ
3466 /* allocate at least number of buffers required by FW */
3467 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
3468 MIN_RX_SIZE_TPA, rx_ring_size);
3469
3470 bp->rx_ring_size = rx_ring_size;
614c76df 3471 } else /* if rx_ring_size specified - use it */
c2188952 3472 rx_ring_size = bp->rx_ring_size;
b3b83c3f 3473
b3b83c3f
DK
3474 /* Common */
3475 sb = &bnx2x_fp(bp, index, status_blk);
3476#ifdef BCM_CNIC
3477 if (!IS_FCOE_IDX(index)) {
3478#endif
3479 /* status blocks */
619c5cb6 3480 if (!CHIP_IS_E1x(bp))
b3b83c3f
DK
3481 BNX2X_PCI_ALLOC(sb->e2_sb,
3482 &bnx2x_fp(bp, index, status_blk_mapping),
3483 sizeof(struct host_hc_status_block_e2));
3484 else
3485 BNX2X_PCI_ALLOC(sb->e1x_sb,
3486 &bnx2x_fp(bp, index, status_blk_mapping),
3487 sizeof(struct host_hc_status_block_e1x));
3488#ifdef BCM_CNIC
3489 }
3490#endif
8eef2af1
DK
3491
3492 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
3493 * set shortcuts for it.
3494 */
3495 if (!IS_FCOE_IDX(index))
3496 set_sb_shortcuts(bp, index);
b3b83c3f
DK
3497
3498 /* Tx */
3499 if (!skip_tx_queue(bp, index)) {
3500 /* fastpath tx rings: tx_buf tx_desc */
6383c0b3
AE
3501 for_each_cos_in_tx_queue(fp, cos) {
3502 struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
3503
51c1a580
MS
3504 DP(NETIF_MSG_IFUP,
3505 "allocating tx memory of fp %d cos %d\n",
6383c0b3
AE
3506 index, cos);
3507
3508 BNX2X_ALLOC(txdata->tx_buf_ring,
b3b83c3f 3509 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6383c0b3
AE
3510 BNX2X_PCI_ALLOC(txdata->tx_desc_ring,
3511 &txdata->tx_desc_mapping,
b3b83c3f 3512 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
6383c0b3 3513 }
b3b83c3f
DK
3514 }
3515
3516 /* Rx */
3517 if (!skip_rx_queue(bp, index)) {
3518 /* fastpath rx rings: rx_buf rx_desc rx_comp */
3519 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring),
3520 sizeof(struct sw_rx_bd) * NUM_RX_BD);
3521 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring),
3522 &bnx2x_fp(bp, index, rx_desc_mapping),
3523 sizeof(struct eth_rx_bd) * NUM_RX_BD);
3524
3525 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_comp_ring),
3526 &bnx2x_fp(bp, index, rx_comp_mapping),
3527 sizeof(struct eth_fast_path_rx_cqe) *
3528 NUM_RCQ_BD);
3529
3530 /* SGE ring */
3531 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
3532 sizeof(struct sw_rx_page) * NUM_RX_SGE);
3533 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring),
3534 &bnx2x_fp(bp, index, rx_sge_mapping),
3535 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
3536 /* RX BD ring */
3537 bnx2x_set_next_page_rx_bd(fp);
3538
3539 /* CQ ring */
3540 bnx2x_set_next_page_rx_cq(fp);
3541
3542 /* BDs */
3543 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
3544 if (ring_size < rx_ring_size)
3545 goto alloc_mem_err;
3546 }
3547
3548 return 0;
3549
3550/* handles low memory cases */
3551alloc_mem_err:
3552 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
3553 index, ring_size);
3554 /* FW will drop all packets if queue is not big enough,
3555 * In these cases we disable the queue
6383c0b3 3556 * Min size is different for OOO, TPA and non-TPA queues
b3b83c3f
DK
3557 */
3558 if (ring_size < (fp->disable_tpa ?
eb722d7a 3559 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
b3b83c3f
DK
3560 /* release memory allocated for this queue */
3561 bnx2x_free_fp_mem_at(bp, index);
3562 return -ENOMEM;
3563 }
3564 return 0;
3565}
3566
3567int bnx2x_alloc_fp_mem(struct bnx2x *bp)
3568{
3569 int i;
3570
3571 /**
3572 * 1. Allocate FP for leading - fatal if error
3573 * 2. {CNIC} Allocate FCoE FP - fatal if error
6383c0b3
AE
3574 * 3. {CNIC} Allocate OOO + FWD - disable OOO if error
3575 * 4. Allocate RSS - fix number of queues if error
b3b83c3f
DK
3576 */
3577
3578 /* leading */
3579 if (bnx2x_alloc_fp_mem_at(bp, 0))
3580 return -ENOMEM;
6383c0b3 3581
b3b83c3f 3582#ifdef BCM_CNIC
8eef2af1
DK
3583 if (!NO_FCOE(bp))
3584 /* FCoE */
3585 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX))
3586 /* we will fail load process instead of mark
3587 * NO_FCOE_FLAG
3588 */
3589 return -ENOMEM;
b3b83c3f 3590#endif
6383c0b3 3591
b3b83c3f
DK
3592 /* RSS */
3593 for_each_nondefault_eth_queue(bp, i)
3594 if (bnx2x_alloc_fp_mem_at(bp, i))
3595 break;
3596
3597 /* handle memory failures */
3598 if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
3599 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
3600
3601 WARN_ON(delta < 0);
3602#ifdef BCM_CNIC
3603 /**
3604 * move non eth FPs next to last eth FP
3605 * must be done in that order
3606 * FCOE_IDX < FWD_IDX < OOO_IDX
3607 */
3608
6383c0b3 3609 /* move FCoE fp even NO_FCOE_FLAG is on */
b3b83c3f
DK
3610 bnx2x_move_fp(bp, FCOE_IDX, FCOE_IDX - delta);
3611#endif
3612 bp->num_queues -= delta;
3613 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
3614 bp->num_queues + delta, bp->num_queues);
3615 }
3616
3617 return 0;
3618}
d6214d7a 3619
523224a3
DK
3620void bnx2x_free_mem_bp(struct bnx2x *bp)
3621{
3622 kfree(bp->fp);
3623 kfree(bp->msix_table);
3624 kfree(bp->ilt);
3625}
3626
3627int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
3628{
3629 struct bnx2x_fastpath *fp;
3630 struct msix_entry *tbl;
3631 struct bnx2x_ilt *ilt;
6383c0b3
AE
3632 int msix_table_size = 0;
3633
3634 /*
3635 * The biggest MSI-X table we might need is as a maximum number of fast
3636 * path IGU SBs plus default SB (for PF).
3637 */
3638 msix_table_size = bp->igu_sb_cnt + 1;
523224a3 3639
6383c0b3 3640 /* fp array: RSS plus CNIC related L2 queues */
01e23742 3641 fp = kcalloc(BNX2X_MAX_RSS_COUNT(bp) + NON_ETH_CONTEXT_USE,
6383c0b3 3642 sizeof(*fp), GFP_KERNEL);
523224a3
DK
3643 if (!fp)
3644 goto alloc_err;
3645 bp->fp = fp;
3646
3647 /* msix table */
01e23742 3648 tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
523224a3
DK
3649 if (!tbl)
3650 goto alloc_err;
3651 bp->msix_table = tbl;
3652
3653 /* ilt */
3654 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
3655 if (!ilt)
3656 goto alloc_err;
3657 bp->ilt = ilt;
3658
3659 return 0;
3660alloc_err:
3661 bnx2x_free_mem_bp(bp);
3662 return -ENOMEM;
3663
3664}
3665
a9fccec7 3666int bnx2x_reload_if_running(struct net_device *dev)
66371c44
MM
3667{
3668 struct bnx2x *bp = netdev_priv(dev);
3669
3670 if (unlikely(!netif_running(dev)))
3671 return 0;
3672
3673 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
3674 return bnx2x_nic_load(bp, LOAD_NORMAL);
3675}
3676
1ac9e428
YR
3677int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
3678{
3679 u32 sel_phy_idx = 0;
3680 if (bp->link_params.num_phys <= 1)
3681 return INT_PHY;
3682
3683 if (bp->link_vars.link_up) {
3684 sel_phy_idx = EXT_PHY1;
3685 /* In case link is SERDES, check if the EXT_PHY2 is the one */
3686 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
3687 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
3688 sel_phy_idx = EXT_PHY2;
3689 } else {
3690
3691 switch (bnx2x_phy_selection(&bp->link_params)) {
3692 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
3693 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
3694 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
3695 sel_phy_idx = EXT_PHY1;
3696 break;
3697 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
3698 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
3699 sel_phy_idx = EXT_PHY2;
3700 break;
3701 }
3702 }
3703
3704 return sel_phy_idx;
3705
3706}
3707int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
3708{
3709 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
3710 /*
3711 * The selected actived PHY is always after swapping (in case PHY
3712 * swapping is enabled). So when swapping is enabled, we need to reverse
3713 * the configuration
3714 */
3715
3716 if (bp->link_params.multi_phy_config &
3717 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
3718 if (sel_phy_idx == EXT_PHY1)
3719 sel_phy_idx = EXT_PHY2;
3720 else if (sel_phy_idx == EXT_PHY2)
3721 sel_phy_idx = EXT_PHY1;
3722 }
3723 return LINK_CONFIG_IDX(sel_phy_idx);
3724}
3725
bf61ee14
VZ
3726#if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC)
3727int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
3728{
3729 struct bnx2x *bp = netdev_priv(dev);
3730 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
3731
3732 switch (type) {
3733 case NETDEV_FCOE_WWNN:
3734 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
3735 cp->fcoe_wwn_node_name_lo);
3736 break;
3737 case NETDEV_FCOE_WWPN:
3738 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
3739 cp->fcoe_wwn_port_name_lo);
3740 break;
3741 default:
51c1a580 3742 BNX2X_ERR("Wrong WWN type requested - %d\n", type);
bf61ee14
VZ
3743 return -EINVAL;
3744 }
3745
3746 return 0;
3747}
3748#endif
3749
9f6c9258
DK
3750/* called with rtnl_lock */
3751int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
3752{
3753 struct bnx2x *bp = netdev_priv(dev);
9f6c9258
DK
3754
3755 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
51c1a580 3756 BNX2X_ERR("Can't perform change MTU during parity recovery\n");
9f6c9258
DK
3757 return -EAGAIN;
3758 }
3759
3760 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
51c1a580
MS
3761 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) {
3762 BNX2X_ERR("Can't support requested MTU size\n");
9f6c9258 3763 return -EINVAL;
51c1a580 3764 }
9f6c9258
DK
3765
3766 /* This does not race with packet allocation
3767 * because the actual alloc size is
3768 * only updated as part of load
3769 */
3770 dev->mtu = new_mtu;
3771
66371c44
MM
3772 return bnx2x_reload_if_running(dev);
3773}
3774
c8f44aff 3775netdev_features_t bnx2x_fix_features(struct net_device *dev,
621b4d66 3776 netdev_features_t features)
66371c44
MM
3777{
3778 struct bnx2x *bp = netdev_priv(dev);
3779
3780 /* TPA requires Rx CSUM offloading */
621b4d66 3781 if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa) {
66371c44 3782 features &= ~NETIF_F_LRO;
621b4d66
DK
3783 features &= ~NETIF_F_GRO;
3784 }
66371c44
MM
3785
3786 return features;
3787}
3788
c8f44aff 3789int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
66371c44
MM
3790{
3791 struct bnx2x *bp = netdev_priv(dev);
3792 u32 flags = bp->flags;
538dd2e3 3793 bool bnx2x_reload = false;
66371c44
MM
3794
3795 if (features & NETIF_F_LRO)
3796 flags |= TPA_ENABLE_FLAG;
3797 else
3798 flags &= ~TPA_ENABLE_FLAG;
3799
621b4d66
DK
3800 if (features & NETIF_F_GRO)
3801 flags |= GRO_ENABLE_FLAG;
3802 else
3803 flags &= ~GRO_ENABLE_FLAG;
3804
538dd2e3
MB
3805 if (features & NETIF_F_LOOPBACK) {
3806 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
3807 bp->link_params.loopback_mode = LOOPBACK_BMAC;
3808 bnx2x_reload = true;
3809 }
3810 } else {
3811 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
3812 bp->link_params.loopback_mode = LOOPBACK_NONE;
3813 bnx2x_reload = true;
3814 }
3815 }
3816
66371c44
MM
3817 if (flags ^ bp->flags) {
3818 bp->flags = flags;
538dd2e3
MB
3819 bnx2x_reload = true;
3820 }
66371c44 3821
538dd2e3 3822 if (bnx2x_reload) {
66371c44
MM
3823 if (bp->recovery_state == BNX2X_RECOVERY_DONE)
3824 return bnx2x_reload_if_running(dev);
3825 /* else: bnx2x_nic_load() will be called at end of recovery */
9f6c9258
DK
3826 }
3827
66371c44 3828 return 0;
9f6c9258
DK
3829}
3830
3831void bnx2x_tx_timeout(struct net_device *dev)
3832{
3833 struct bnx2x *bp = netdev_priv(dev);
3834
3835#ifdef BNX2X_STOP_ON_ERROR
3836 if (!bp->panic)
3837 bnx2x_panic();
3838#endif
7be08a72
AE
3839
3840 smp_mb__before_clear_bit();
3841 set_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state);
3842 smp_mb__after_clear_bit();
3843
9f6c9258 3844 /* This allows the netif to be shutdown gracefully before resetting */
7be08a72 3845 schedule_delayed_work(&bp->sp_rtnl_task, 0);
9f6c9258
DK
3846}
3847
9f6c9258
DK
3848int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
3849{
3850 struct net_device *dev = pci_get_drvdata(pdev);
3851 struct bnx2x *bp;
3852
3853 if (!dev) {
3854 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
3855 return -ENODEV;
3856 }
3857 bp = netdev_priv(dev);
3858
3859 rtnl_lock();
3860
3861 pci_save_state(pdev);
3862
3863 if (!netif_running(dev)) {
3864 rtnl_unlock();
3865 return 0;
3866 }
3867
3868 netif_device_detach(dev);
3869
3870 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
3871
3872 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
3873
3874 rtnl_unlock();
3875
3876 return 0;
3877}
3878
3879int bnx2x_resume(struct pci_dev *pdev)
3880{
3881 struct net_device *dev = pci_get_drvdata(pdev);
3882 struct bnx2x *bp;
3883 int rc;
3884
3885 if (!dev) {
3886 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
3887 return -ENODEV;
3888 }
3889 bp = netdev_priv(dev);
3890
3891 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
51c1a580 3892 BNX2X_ERR("Handling parity error recovery. Try again later\n");
9f6c9258
DK
3893 return -EAGAIN;
3894 }
3895
3896 rtnl_lock();
3897
3898 pci_restore_state(pdev);
3899
3900 if (!netif_running(dev)) {
3901 rtnl_unlock();
3902 return 0;
3903 }
3904
3905 bnx2x_set_power_state(bp, PCI_D0);
3906 netif_device_attach(dev);
3907
3908 rc = bnx2x_nic_load(bp, LOAD_OPEN);
3909
3910 rtnl_unlock();
3911
3912 return rc;
3913}
619c5cb6
VZ
3914
3915
3916void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
3917 u32 cid)
3918{
3919 /* ustorm cxt validation */
3920 cxt->ustorm_ag_context.cdu_usage =
3921 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
3922 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
3923 /* xcontext validation */
3924 cxt->xstorm_ag_context.cdu_reserved =
3925 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
3926 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
3927}
3928
1191cb83
ED
3929static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
3930 u8 fw_sb_id, u8 sb_index,
3931 u8 ticks)
619c5cb6
VZ
3932{
3933
3934 u32 addr = BAR_CSTRORM_INTMEM +
3935 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
3936 REG_WR8(bp, addr, ticks);
51c1a580
MS
3937 DP(NETIF_MSG_IFUP,
3938 "port %x fw_sb_id %d sb_index %d ticks %d\n",
3939 port, fw_sb_id, sb_index, ticks);
619c5cb6
VZ
3940}
3941
1191cb83
ED
3942static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
3943 u16 fw_sb_id, u8 sb_index,
3944 u8 disable)
619c5cb6
VZ
3945{
3946 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
3947 u32 addr = BAR_CSTRORM_INTMEM +
3948 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
3949 u16 flags = REG_RD16(bp, addr);
3950 /* clear and set */
3951 flags &= ~HC_INDEX_DATA_HC_ENABLED;
3952 flags |= enable_flag;
3953 REG_WR16(bp, addr, flags);
51c1a580
MS
3954 DP(NETIF_MSG_IFUP,
3955 "port %x fw_sb_id %d sb_index %d disable %d\n",
3956 port, fw_sb_id, sb_index, disable);
619c5cb6
VZ
3957}
3958
3959void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
3960 u8 sb_index, u8 disable, u16 usec)
3961{
3962 int port = BP_PORT(bp);
3963 u8 ticks = usec / BNX2X_BTR;
3964
3965 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
3966
3967 disable = disable ? 1 : (usec ? 0 : 1);
3968 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
3969}