f9a15cbf73ef3aa6a384675e89220b4b5e2cc7c1
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / net / ethernet / broadcom / bnx2x / bnx2x_cmn.c
1 /* bnx2x_cmn.c: Broadcom Everest network driver.
2 *
3 * Copyright (c) 2007-2012 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
15 *
16 */
17
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
20 #include <linux/etherdevice.h>
21 #include <linux/if_vlan.h>
22 #include <linux/interrupt.h>
23 #include <linux/ip.h>
24 #include <net/ipv6.h>
25 #include <net/ip6_checksum.h>
26 #include <linux/prefetch.h>
27 #include "bnx2x_cmn.h"
28 #include "bnx2x_init.h"
29 #include "bnx2x_sp.h"
30 #include "bnx2x_sriov.h"
31
32 /**
33 * bnx2x_move_fp - move content of the fastpath structure.
34 *
35 * @bp: driver handle
36 * @from: source FP index
37 * @to: destination FP index
38 *
39 * Makes sure the contents of the bp->fp[to].napi is kept
40 * intact. This is done by first copying the napi struct from
41 * the target to the source, and then mem copying the entire
42 * source onto the target. Update txdata pointers and related
43 * content.
44 */
45 static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
46 {
47 struct bnx2x_fastpath *from_fp = &bp->fp[from];
48 struct bnx2x_fastpath *to_fp = &bp->fp[to];
49 struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
50 struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
51 struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
52 struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
53 int old_max_eth_txqs, new_max_eth_txqs;
54 int old_txdata_index = 0, new_txdata_index = 0;
55
56 /* Copy the NAPI object as it has been already initialized */
57 from_fp->napi = to_fp->napi;
58
59 /* Move bnx2x_fastpath contents */
60 memcpy(to_fp, from_fp, sizeof(*to_fp));
61 to_fp->index = to;
62
63 /* move sp_objs contents as well, as their indices match fp ones */
64 memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
65
66 /* move fp_stats contents as well, as their indices match fp ones */
67 memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
68
69 /* Update txdata pointers in fp and move txdata content accordingly:
70 * Each fp consumes 'max_cos' txdata structures, so the index should be
71 * decremented by max_cos x delta.
72 */
73
74 old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
75 new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
76 (bp)->max_cos;
77 if (from == FCOE_IDX(bp)) {
78 old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
79 new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
80 }
81
82 memcpy(&bp->bnx2x_txq[old_txdata_index],
83 &bp->bnx2x_txq[new_txdata_index],
84 sizeof(struct bnx2x_fp_txdata));
85 to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
86 }
87
88 /**
89 * bnx2x_fill_fw_str - Fill buffer with FW version string.
90 *
91 * @bp: driver handle
92 * @buf: character buffer to fill with the fw name
93 * @buf_len: length of the above buffer
94 *
95 */
96 void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
97 {
98 if (IS_PF(bp)) {
99 u8 phy_fw_ver[PHY_FW_VER_LEN];
100
101 phy_fw_ver[0] = '\0';
102 bnx2x_get_ext_phy_fw_version(&bp->link_params,
103 phy_fw_ver, PHY_FW_VER_LEN);
104 strlcpy(buf, bp->fw_ver, buf_len);
105 snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
106 "bc %d.%d.%d%s%s",
107 (bp->common.bc_ver & 0xff0000) >> 16,
108 (bp->common.bc_ver & 0xff00) >> 8,
109 (bp->common.bc_ver & 0xff),
110 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
111 } else {
112 strlcpy(buf, bp->acquire_resp.pfdev_info.fw_ver, buf_len);
113 }
114 }
115
116 int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
117
118 /* free skb in the packet ring at pos idx
119 * return idx of last bd freed
120 */
121 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
122 u16 idx, unsigned int *pkts_compl,
123 unsigned int *bytes_compl)
124 {
125 struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
126 struct eth_tx_start_bd *tx_start_bd;
127 struct eth_tx_bd *tx_data_bd;
128 struct sk_buff *skb = tx_buf->skb;
129 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
130 int nbd;
131
132 /* prefetch skb end pointer to speedup dev_kfree_skb() */
133 prefetch(&skb->end);
134
135 DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
136 txdata->txq_index, idx, tx_buf, skb);
137
138 /* unmap first bd */
139 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
140 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
141 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
142
143
144 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
145 #ifdef BNX2X_STOP_ON_ERROR
146 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
147 BNX2X_ERR("BAD nbd!\n");
148 bnx2x_panic();
149 }
150 #endif
151 new_cons = nbd + tx_buf->first_bd;
152
153 /* Get the next bd */
154 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
155
156 /* Skip a parse bd... */
157 --nbd;
158 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
159
160 /* ...and the TSO split header bd since they have no mapping */
161 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
162 --nbd;
163 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
164 }
165
166 /* now free frags */
167 while (nbd > 0) {
168
169 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
170 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
171 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
172 if (--nbd)
173 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
174 }
175
176 /* release skb */
177 WARN_ON(!skb);
178 if (likely(skb)) {
179 (*pkts_compl)++;
180 (*bytes_compl) += skb->len;
181 }
182
183 dev_kfree_skb_any(skb);
184 tx_buf->first_bd = 0;
185 tx_buf->skb = NULL;
186
187 return new_cons;
188 }
189
190 int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
191 {
192 struct netdev_queue *txq;
193 u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
194 unsigned int pkts_compl = 0, bytes_compl = 0;
195
196 #ifdef BNX2X_STOP_ON_ERROR
197 if (unlikely(bp->panic))
198 return -1;
199 #endif
200
201 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
202 hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
203 sw_cons = txdata->tx_pkt_cons;
204
205 while (sw_cons != hw_cons) {
206 u16 pkt_cons;
207
208 pkt_cons = TX_BD(sw_cons);
209
210 DP(NETIF_MSG_TX_DONE,
211 "queue[%d]: hw_cons %u sw_cons %u pkt_cons %u\n",
212 txdata->txq_index, hw_cons, sw_cons, pkt_cons);
213
214 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
215 &pkts_compl, &bytes_compl);
216
217 sw_cons++;
218 }
219
220 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
221
222 txdata->tx_pkt_cons = sw_cons;
223 txdata->tx_bd_cons = bd_cons;
224
225 /* Need to make the tx_bd_cons update visible to start_xmit()
226 * before checking for netif_tx_queue_stopped(). Without the
227 * memory barrier, there is a small possibility that
228 * start_xmit() will miss it and cause the queue to be stopped
229 * forever.
230 * On the other hand we need an rmb() here to ensure the proper
231 * ordering of bit testing in the following
232 * netif_tx_queue_stopped(txq) call.
233 */
234 smp_mb();
235
236 if (unlikely(netif_tx_queue_stopped(txq))) {
237 /* Taking tx_lock() is needed to prevent reenabling the queue
238 * while it's empty. This could have happen if rx_action() gets
239 * suspended in bnx2x_tx_int() after the condition before
240 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
241 *
242 * stops the queue->sees fresh tx_bd_cons->releases the queue->
243 * sends some packets consuming the whole queue again->
244 * stops the queue
245 */
246
247 __netif_tx_lock(txq, smp_processor_id());
248
249 if ((netif_tx_queue_stopped(txq)) &&
250 (bp->state == BNX2X_STATE_OPEN) &&
251 (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
252 netif_tx_wake_queue(txq);
253
254 __netif_tx_unlock(txq);
255 }
256 return 0;
257 }
258
259 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
260 u16 idx)
261 {
262 u16 last_max = fp->last_max_sge;
263
264 if (SUB_S16(idx, last_max) > 0)
265 fp->last_max_sge = idx;
266 }
267
268 static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
269 u16 sge_len,
270 struct eth_end_agg_rx_cqe *cqe)
271 {
272 struct bnx2x *bp = fp->bp;
273 u16 last_max, last_elem, first_elem;
274 u16 delta = 0;
275 u16 i;
276
277 if (!sge_len)
278 return;
279
280 /* First mark all used pages */
281 for (i = 0; i < sge_len; i++)
282 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
283 RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
284
285 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
286 sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
287
288 /* Here we assume that the last SGE index is the biggest */
289 prefetch((void *)(fp->sge_mask));
290 bnx2x_update_last_max_sge(fp,
291 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
292
293 last_max = RX_SGE(fp->last_max_sge);
294 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
295 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
296
297 /* If ring is not full */
298 if (last_elem + 1 != first_elem)
299 last_elem++;
300
301 /* Now update the prod */
302 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
303 if (likely(fp->sge_mask[i]))
304 break;
305
306 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
307 delta += BIT_VEC64_ELEM_SZ;
308 }
309
310 if (delta > 0) {
311 fp->rx_sge_prod += delta;
312 /* clear page-end entries */
313 bnx2x_clear_sge_mask_next_elems(fp);
314 }
315
316 DP(NETIF_MSG_RX_STATUS,
317 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
318 fp->last_max_sge, fp->rx_sge_prod);
319 }
320
321 /* Set Toeplitz hash value in the skb using the value from the
322 * CQE (calculated by HW).
323 */
324 static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
325 const struct eth_fast_path_rx_cqe *cqe,
326 bool *l4_rxhash)
327 {
328 /* Set Toeplitz hash from CQE */
329 if ((bp->dev->features & NETIF_F_RXHASH) &&
330 (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
331 enum eth_rss_hash_type htype;
332
333 htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
334 *l4_rxhash = (htype == TCP_IPV4_HASH_TYPE) ||
335 (htype == TCP_IPV6_HASH_TYPE);
336 return le32_to_cpu(cqe->rss_hash_result);
337 }
338 *l4_rxhash = false;
339 return 0;
340 }
341
342 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
343 u16 cons, u16 prod,
344 struct eth_fast_path_rx_cqe *cqe)
345 {
346 struct bnx2x *bp = fp->bp;
347 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
348 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
349 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
350 dma_addr_t mapping;
351 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
352 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
353
354 /* print error if current state != stop */
355 if (tpa_info->tpa_state != BNX2X_TPA_STOP)
356 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
357
358 /* Try to map an empty data buffer from the aggregation info */
359 mapping = dma_map_single(&bp->pdev->dev,
360 first_buf->data + NET_SKB_PAD,
361 fp->rx_buf_size, DMA_FROM_DEVICE);
362 /*
363 * ...if it fails - move the skb from the consumer to the producer
364 * and set the current aggregation state as ERROR to drop it
365 * when TPA_STOP arrives.
366 */
367
368 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
369 /* Move the BD from the consumer to the producer */
370 bnx2x_reuse_rx_data(fp, cons, prod);
371 tpa_info->tpa_state = BNX2X_TPA_ERROR;
372 return;
373 }
374
375 /* move empty data from pool to prod */
376 prod_rx_buf->data = first_buf->data;
377 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
378 /* point prod_bd to new data */
379 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
380 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
381
382 /* move partial skb from cons to pool (don't unmap yet) */
383 *first_buf = *cons_rx_buf;
384
385 /* mark bin state as START */
386 tpa_info->parsing_flags =
387 le16_to_cpu(cqe->pars_flags.flags);
388 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
389 tpa_info->tpa_state = BNX2X_TPA_START;
390 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
391 tpa_info->placement_offset = cqe->placement_offset;
392 tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->l4_rxhash);
393 if (fp->mode == TPA_MODE_GRO) {
394 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
395 tpa_info->full_page =
396 SGE_PAGE_SIZE * PAGES_PER_SGE / gro_size * gro_size;
397 tpa_info->gro_size = gro_size;
398 }
399
400 #ifdef BNX2X_STOP_ON_ERROR
401 fp->tpa_queue_used |= (1 << queue);
402 #ifdef _ASM_GENERIC_INT_L64_H
403 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
404 #else
405 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
406 #endif
407 fp->tpa_queue_used);
408 #endif
409 }
410
411 /* Timestamp option length allowed for TPA aggregation:
412 *
413 * nop nop kind length echo val
414 */
415 #define TPA_TSTAMP_OPT_LEN 12
416 /**
417 * bnx2x_set_lro_mss - calculate the approximate value of the MSS
418 *
419 * @bp: driver handle
420 * @parsing_flags: parsing flags from the START CQE
421 * @len_on_bd: total length of the first packet for the
422 * aggregation.
423 *
424 * Approximate value of the MSS for this aggregation calculated using
425 * the first packet of it.
426 */
427 static u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
428 u16 len_on_bd)
429 {
430 /*
431 * TPA arrgregation won't have either IP options or TCP options
432 * other than timestamp or IPv6 extension headers.
433 */
434 u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
435
436 if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
437 PRS_FLAG_OVERETH_IPV6)
438 hdrs_len += sizeof(struct ipv6hdr);
439 else /* IPv4 */
440 hdrs_len += sizeof(struct iphdr);
441
442
443 /* Check if there was a TCP timestamp, if there is it's will
444 * always be 12 bytes length: nop nop kind length echo val.
445 *
446 * Otherwise FW would close the aggregation.
447 */
448 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
449 hdrs_len += TPA_TSTAMP_OPT_LEN;
450
451 return len_on_bd - hdrs_len;
452 }
453
454 static int bnx2x_alloc_rx_sge(struct bnx2x *bp,
455 struct bnx2x_fastpath *fp, u16 index)
456 {
457 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
458 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
459 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
460 dma_addr_t mapping;
461
462 if (unlikely(page == NULL)) {
463 BNX2X_ERR("Can't alloc sge\n");
464 return -ENOMEM;
465 }
466
467 mapping = dma_map_page(&bp->pdev->dev, page, 0,
468 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
469 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
470 __free_pages(page, PAGES_PER_SGE_SHIFT);
471 BNX2X_ERR("Can't map sge\n");
472 return -ENOMEM;
473 }
474
475 sw_buf->page = page;
476 dma_unmap_addr_set(sw_buf, mapping, mapping);
477
478 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
479 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
480
481 return 0;
482 }
483
484 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
485 struct bnx2x_agg_info *tpa_info,
486 u16 pages,
487 struct sk_buff *skb,
488 struct eth_end_agg_rx_cqe *cqe,
489 u16 cqe_idx)
490 {
491 struct sw_rx_page *rx_pg, old_rx_pg;
492 u32 i, frag_len, frag_size;
493 int err, j, frag_id = 0;
494 u16 len_on_bd = tpa_info->len_on_bd;
495 u16 full_page = 0, gro_size = 0;
496
497 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
498
499 if (fp->mode == TPA_MODE_GRO) {
500 gro_size = tpa_info->gro_size;
501 full_page = tpa_info->full_page;
502 }
503
504 /* This is needed in order to enable forwarding support */
505 if (frag_size) {
506 skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp,
507 tpa_info->parsing_flags, len_on_bd);
508
509 /* set for GRO */
510 if (fp->mode == TPA_MODE_GRO)
511 skb_shinfo(skb)->gso_type =
512 (GET_FLAG(tpa_info->parsing_flags,
513 PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
514 PRS_FLAG_OVERETH_IPV6) ?
515 SKB_GSO_TCPV6 : SKB_GSO_TCPV4;
516 }
517
518
519 #ifdef BNX2X_STOP_ON_ERROR
520 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
521 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
522 pages, cqe_idx);
523 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
524 bnx2x_panic();
525 return -EINVAL;
526 }
527 #endif
528
529 /* Run through the SGL and compose the fragmented skb */
530 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
531 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
532
533 /* FW gives the indices of the SGE as if the ring is an array
534 (meaning that "next" element will consume 2 indices) */
535 if (fp->mode == TPA_MODE_GRO)
536 frag_len = min_t(u32, frag_size, (u32)full_page);
537 else /* LRO */
538 frag_len = min_t(u32, frag_size,
539 (u32)(SGE_PAGE_SIZE * PAGES_PER_SGE));
540
541 rx_pg = &fp->rx_page_ring[sge_idx];
542 old_rx_pg = *rx_pg;
543
544 /* If we fail to allocate a substitute page, we simply stop
545 where we are and drop the whole packet */
546 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
547 if (unlikely(err)) {
548 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
549 return err;
550 }
551
552 /* Unmap the page as we r going to pass it to the stack */
553 dma_unmap_page(&bp->pdev->dev,
554 dma_unmap_addr(&old_rx_pg, mapping),
555 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
556 /* Add one frag and update the appropriate fields in the skb */
557 if (fp->mode == TPA_MODE_LRO)
558 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
559 else { /* GRO */
560 int rem;
561 int offset = 0;
562 for (rem = frag_len; rem > 0; rem -= gro_size) {
563 int len = rem > gro_size ? gro_size : rem;
564 skb_fill_page_desc(skb, frag_id++,
565 old_rx_pg.page, offset, len);
566 if (offset)
567 get_page(old_rx_pg.page);
568 offset += len;
569 }
570 }
571
572 skb->data_len += frag_len;
573 skb->truesize += SGE_PAGE_SIZE * PAGES_PER_SGE;
574 skb->len += frag_len;
575
576 frag_size -= frag_len;
577 }
578
579 return 0;
580 }
581
582 static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
583 {
584 if (fp->rx_frag_size)
585 put_page(virt_to_head_page(data));
586 else
587 kfree(data);
588 }
589
590 static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp)
591 {
592 if (fp->rx_frag_size)
593 return netdev_alloc_frag(fp->rx_frag_size);
594
595 return kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
596 }
597
598
599 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
600 struct bnx2x_agg_info *tpa_info,
601 u16 pages,
602 struct eth_end_agg_rx_cqe *cqe,
603 u16 cqe_idx)
604 {
605 struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
606 u8 pad = tpa_info->placement_offset;
607 u16 len = tpa_info->len_on_bd;
608 struct sk_buff *skb = NULL;
609 u8 *new_data, *data = rx_buf->data;
610 u8 old_tpa_state = tpa_info->tpa_state;
611
612 tpa_info->tpa_state = BNX2X_TPA_STOP;
613
614 /* If we there was an error during the handling of the TPA_START -
615 * drop this aggregation.
616 */
617 if (old_tpa_state == BNX2X_TPA_ERROR)
618 goto drop;
619
620 /* Try to allocate the new data */
621 new_data = bnx2x_frag_alloc(fp);
622 /* Unmap skb in the pool anyway, as we are going to change
623 pool entry status to BNX2X_TPA_STOP even if new skb allocation
624 fails. */
625 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
626 fp->rx_buf_size, DMA_FROM_DEVICE);
627 if (likely(new_data))
628 skb = build_skb(data, fp->rx_frag_size);
629
630 if (likely(skb)) {
631 #ifdef BNX2X_STOP_ON_ERROR
632 if (pad + len > fp->rx_buf_size) {
633 BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n",
634 pad, len, fp->rx_buf_size);
635 bnx2x_panic();
636 return;
637 }
638 #endif
639
640 skb_reserve(skb, pad + NET_SKB_PAD);
641 skb_put(skb, len);
642 skb->rxhash = tpa_info->rxhash;
643 skb->l4_rxhash = tpa_info->l4_rxhash;
644
645 skb->protocol = eth_type_trans(skb, bp->dev);
646 skb->ip_summed = CHECKSUM_UNNECESSARY;
647
648 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
649 skb, cqe, cqe_idx)) {
650 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
651 __vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag);
652 napi_gro_receive(&fp->napi, skb);
653 } else {
654 DP(NETIF_MSG_RX_STATUS,
655 "Failed to allocate new pages - dropping packet!\n");
656 dev_kfree_skb_any(skb);
657 }
658
659
660 /* put new data in bin */
661 rx_buf->data = new_data;
662
663 return;
664 }
665 bnx2x_frag_free(fp, new_data);
666 drop:
667 /* drop the packet and keep the buffer in the bin */
668 DP(NETIF_MSG_RX_STATUS,
669 "Failed to allocate or map a new skb - dropping packet!\n");
670 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
671 }
672
673 static int bnx2x_alloc_rx_data(struct bnx2x *bp,
674 struct bnx2x_fastpath *fp, u16 index)
675 {
676 u8 *data;
677 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
678 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
679 dma_addr_t mapping;
680
681 data = bnx2x_frag_alloc(fp);
682 if (unlikely(data == NULL))
683 return -ENOMEM;
684
685 mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
686 fp->rx_buf_size,
687 DMA_FROM_DEVICE);
688 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
689 bnx2x_frag_free(fp, data);
690 BNX2X_ERR("Can't map rx data\n");
691 return -ENOMEM;
692 }
693
694 rx_buf->data = data;
695 dma_unmap_addr_set(rx_buf, mapping, mapping);
696
697 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
698 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
699
700 return 0;
701 }
702
703 static
704 void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
705 struct bnx2x_fastpath *fp,
706 struct bnx2x_eth_q_stats *qstats)
707 {
708 /* Do nothing if no L4 csum validation was done.
709 * We do not check whether IP csum was validated. For IPv4 we assume
710 * that if the card got as far as validating the L4 csum, it also
711 * validated the IP csum. IPv6 has no IP csum.
712 */
713 if (cqe->fast_path_cqe.status_flags &
714 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
715 return;
716
717 /* If L4 validation was done, check if an error was found. */
718
719 if (cqe->fast_path_cqe.type_error_flags &
720 (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
721 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
722 qstats->hw_csum_err++;
723 else
724 skb->ip_summed = CHECKSUM_UNNECESSARY;
725 }
726
727 int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
728 {
729 struct bnx2x *bp = fp->bp;
730 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
731 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
732 int rx_pkt = 0;
733
734 #ifdef BNX2X_STOP_ON_ERROR
735 if (unlikely(bp->panic))
736 return 0;
737 #endif
738
739 /* CQ "next element" is of the size of the regular element,
740 that's why it's ok here */
741 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
742 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
743 hw_comp_cons++;
744
745 bd_cons = fp->rx_bd_cons;
746 bd_prod = fp->rx_bd_prod;
747 bd_prod_fw = bd_prod;
748 sw_comp_cons = fp->rx_comp_cons;
749 sw_comp_prod = fp->rx_comp_prod;
750
751 /* Memory barrier necessary as speculative reads of the rx
752 * buffer can be ahead of the index in the status block
753 */
754 rmb();
755
756 DP(NETIF_MSG_RX_STATUS,
757 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
758 fp->index, hw_comp_cons, sw_comp_cons);
759
760 while (sw_comp_cons != hw_comp_cons) {
761 struct sw_rx_bd *rx_buf = NULL;
762 struct sk_buff *skb;
763 union eth_rx_cqe *cqe;
764 struct eth_fast_path_rx_cqe *cqe_fp;
765 u8 cqe_fp_flags;
766 enum eth_rx_cqe_type cqe_fp_type;
767 u16 len, pad, queue;
768 u8 *data;
769 bool l4_rxhash;
770
771 #ifdef BNX2X_STOP_ON_ERROR
772 if (unlikely(bp->panic))
773 return 0;
774 #endif
775
776 comp_ring_cons = RCQ_BD(sw_comp_cons);
777 bd_prod = RX_BD(bd_prod);
778 bd_cons = RX_BD(bd_cons);
779
780 cqe = &fp->rx_comp_ring[comp_ring_cons];
781 cqe_fp = &cqe->fast_path_cqe;
782 cqe_fp_flags = cqe_fp->type_error_flags;
783 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
784
785 DP(NETIF_MSG_RX_STATUS,
786 "CQE type %x err %x status %x queue %x vlan %x len %u\n",
787 CQE_TYPE(cqe_fp_flags),
788 cqe_fp_flags, cqe_fp->status_flags,
789 le32_to_cpu(cqe_fp->rss_hash_result),
790 le16_to_cpu(cqe_fp->vlan_tag),
791 le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
792
793 /* is this a slowpath msg? */
794 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
795 bnx2x_sp_event(fp, cqe);
796 goto next_cqe;
797 }
798
799 rx_buf = &fp->rx_buf_ring[bd_cons];
800 data = rx_buf->data;
801
802 if (!CQE_TYPE_FAST(cqe_fp_type)) {
803 struct bnx2x_agg_info *tpa_info;
804 u16 frag_size, pages;
805 #ifdef BNX2X_STOP_ON_ERROR
806 /* sanity check */
807 if (fp->disable_tpa &&
808 (CQE_TYPE_START(cqe_fp_type) ||
809 CQE_TYPE_STOP(cqe_fp_type)))
810 BNX2X_ERR("START/STOP packet while disable_tpa type %x\n",
811 CQE_TYPE(cqe_fp_type));
812 #endif
813
814 if (CQE_TYPE_START(cqe_fp_type)) {
815 u16 queue = cqe_fp->queue_index;
816 DP(NETIF_MSG_RX_STATUS,
817 "calling tpa_start on queue %d\n",
818 queue);
819
820 bnx2x_tpa_start(fp, queue,
821 bd_cons, bd_prod,
822 cqe_fp);
823
824 goto next_rx;
825
826 }
827 queue = cqe->end_agg_cqe.queue_index;
828 tpa_info = &fp->tpa_info[queue];
829 DP(NETIF_MSG_RX_STATUS,
830 "calling tpa_stop on queue %d\n",
831 queue);
832
833 frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
834 tpa_info->len_on_bd;
835
836 if (fp->mode == TPA_MODE_GRO)
837 pages = (frag_size + tpa_info->full_page - 1) /
838 tpa_info->full_page;
839 else
840 pages = SGE_PAGE_ALIGN(frag_size) >>
841 SGE_PAGE_SHIFT;
842
843 bnx2x_tpa_stop(bp, fp, tpa_info, pages,
844 &cqe->end_agg_cqe, comp_ring_cons);
845 #ifdef BNX2X_STOP_ON_ERROR
846 if (bp->panic)
847 return 0;
848 #endif
849
850 bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
851 goto next_cqe;
852 }
853 /* non TPA */
854 len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
855 pad = cqe_fp->placement_offset;
856 dma_sync_single_for_cpu(&bp->pdev->dev,
857 dma_unmap_addr(rx_buf, mapping),
858 pad + RX_COPY_THRESH,
859 DMA_FROM_DEVICE);
860 pad += NET_SKB_PAD;
861 prefetch(data + pad); /* speedup eth_type_trans() */
862 /* is this an error packet? */
863 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
864 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
865 "ERROR flags %x rx packet %u\n",
866 cqe_fp_flags, sw_comp_cons);
867 bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
868 goto reuse_rx;
869 }
870
871 /* Since we don't have a jumbo ring
872 * copy small packets if mtu > 1500
873 */
874 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
875 (len <= RX_COPY_THRESH)) {
876 skb = netdev_alloc_skb_ip_align(bp->dev, len);
877 if (skb == NULL) {
878 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
879 "ERROR packet dropped because of alloc failure\n");
880 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
881 goto reuse_rx;
882 }
883 memcpy(skb->data, data + pad, len);
884 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
885 } else {
886 if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod) == 0)) {
887 dma_unmap_single(&bp->pdev->dev,
888 dma_unmap_addr(rx_buf, mapping),
889 fp->rx_buf_size,
890 DMA_FROM_DEVICE);
891 skb = build_skb(data, fp->rx_frag_size);
892 if (unlikely(!skb)) {
893 bnx2x_frag_free(fp, data);
894 bnx2x_fp_qstats(bp, fp)->
895 rx_skb_alloc_failed++;
896 goto next_rx;
897 }
898 skb_reserve(skb, pad);
899 } else {
900 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
901 "ERROR packet dropped because of alloc failure\n");
902 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
903 reuse_rx:
904 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
905 goto next_rx;
906 }
907 }
908
909 skb_put(skb, len);
910 skb->protocol = eth_type_trans(skb, bp->dev);
911
912 /* Set Toeplitz hash for a none-LRO skb */
913 skb->rxhash = bnx2x_get_rxhash(bp, cqe_fp, &l4_rxhash);
914 skb->l4_rxhash = l4_rxhash;
915
916 skb_checksum_none_assert(skb);
917
918 if (bp->dev->features & NETIF_F_RXCSUM)
919 bnx2x_csum_validate(skb, cqe, fp,
920 bnx2x_fp_qstats(bp, fp));
921
922 skb_record_rx_queue(skb, fp->rx_queue);
923
924 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
925 PARSING_FLAGS_VLAN)
926 __vlan_hwaccel_put_tag(skb,
927 le16_to_cpu(cqe_fp->vlan_tag));
928 napi_gro_receive(&fp->napi, skb);
929
930
931 next_rx:
932 rx_buf->data = NULL;
933
934 bd_cons = NEXT_RX_IDX(bd_cons);
935 bd_prod = NEXT_RX_IDX(bd_prod);
936 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
937 rx_pkt++;
938 next_cqe:
939 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
940 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
941
942 if (rx_pkt == budget)
943 break;
944 } /* while */
945
946 fp->rx_bd_cons = bd_cons;
947 fp->rx_bd_prod = bd_prod_fw;
948 fp->rx_comp_cons = sw_comp_cons;
949 fp->rx_comp_prod = sw_comp_prod;
950
951 /* Update producers */
952 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
953 fp->rx_sge_prod);
954
955 fp->rx_pkt += rx_pkt;
956 fp->rx_calls++;
957
958 return rx_pkt;
959 }
960
961 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
962 {
963 struct bnx2x_fastpath *fp = fp_cookie;
964 struct bnx2x *bp = fp->bp;
965 u8 cos;
966
967 DP(NETIF_MSG_INTR,
968 "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
969 fp->index, fp->fw_sb_id, fp->igu_sb_id);
970 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
971
972 #ifdef BNX2X_STOP_ON_ERROR
973 if (unlikely(bp->panic))
974 return IRQ_HANDLED;
975 #endif
976
977 /* Handle Rx and Tx according to MSI-X vector */
978 prefetch(fp->rx_cons_sb);
979
980 for_each_cos_in_tx_queue(fp, cos)
981 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
982
983 prefetch(&fp->sb_running_index[SM_RX_ID]);
984 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
985
986 return IRQ_HANDLED;
987 }
988
989 /* HW Lock for shared dual port PHYs */
990 void bnx2x_acquire_phy_lock(struct bnx2x *bp)
991 {
992 mutex_lock(&bp->port.phy_mutex);
993
994 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
995 }
996
997 void bnx2x_release_phy_lock(struct bnx2x *bp)
998 {
999 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1000
1001 mutex_unlock(&bp->port.phy_mutex);
1002 }
1003
1004 /* calculates MF speed according to current linespeed and MF configuration */
1005 u16 bnx2x_get_mf_speed(struct bnx2x *bp)
1006 {
1007 u16 line_speed = bp->link_vars.line_speed;
1008 if (IS_MF(bp)) {
1009 u16 maxCfg = bnx2x_extract_max_cfg(bp,
1010 bp->mf_config[BP_VN(bp)]);
1011
1012 /* Calculate the current MAX line speed limit for the MF
1013 * devices
1014 */
1015 if (IS_MF_SI(bp))
1016 line_speed = (line_speed * maxCfg) / 100;
1017 else { /* SD mode */
1018 u16 vn_max_rate = maxCfg * 100;
1019
1020 if (vn_max_rate < line_speed)
1021 line_speed = vn_max_rate;
1022 }
1023 }
1024
1025 return line_speed;
1026 }
1027
1028 /**
1029 * bnx2x_fill_report_data - fill link report data to report
1030 *
1031 * @bp: driver handle
1032 * @data: link state to update
1033 *
1034 * It uses a none-atomic bit operations because is called under the mutex.
1035 */
1036 static void bnx2x_fill_report_data(struct bnx2x *bp,
1037 struct bnx2x_link_report_data *data)
1038 {
1039 u16 line_speed = bnx2x_get_mf_speed(bp);
1040
1041 memset(data, 0, sizeof(*data));
1042
1043 /* Fill the report data: efective line speed */
1044 data->line_speed = line_speed;
1045
1046 /* Link is down */
1047 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
1048 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1049 &data->link_report_flags);
1050
1051 /* Full DUPLEX */
1052 if (bp->link_vars.duplex == DUPLEX_FULL)
1053 __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
1054
1055 /* Rx Flow Control is ON */
1056 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
1057 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
1058
1059 /* Tx Flow Control is ON */
1060 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1061 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
1062 }
1063
1064 /**
1065 * bnx2x_link_report - report link status to OS.
1066 *
1067 * @bp: driver handle
1068 *
1069 * Calls the __bnx2x_link_report() under the same locking scheme
1070 * as a link/PHY state managing code to ensure a consistent link
1071 * reporting.
1072 */
1073
1074 void bnx2x_link_report(struct bnx2x *bp)
1075 {
1076 bnx2x_acquire_phy_lock(bp);
1077 __bnx2x_link_report(bp);
1078 bnx2x_release_phy_lock(bp);
1079 }
1080
1081 /**
1082 * __bnx2x_link_report - report link status to OS.
1083 *
1084 * @bp: driver handle
1085 *
1086 * None atomic inmlementation.
1087 * Should be called under the phy_lock.
1088 */
1089 void __bnx2x_link_report(struct bnx2x *bp)
1090 {
1091 struct bnx2x_link_report_data cur_data;
1092
1093 /* reread mf_cfg */
1094 if (IS_PF(bp) && !CHIP_IS_E1(bp))
1095 bnx2x_read_mf_cfg(bp);
1096
1097 /* Read the current link report info */
1098 bnx2x_fill_report_data(bp, &cur_data);
1099
1100 /* Don't report link down or exactly the same link status twice */
1101 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
1102 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1103 &bp->last_reported_link.link_report_flags) &&
1104 test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1105 &cur_data.link_report_flags)))
1106 return;
1107
1108 bp->link_cnt++;
1109
1110 /* We are going to report a new link parameters now -
1111 * remember the current data for the next time.
1112 */
1113 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
1114
1115 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1116 &cur_data.link_report_flags)) {
1117 netif_carrier_off(bp->dev);
1118 netdev_err(bp->dev, "NIC Link is Down\n");
1119 return;
1120 } else {
1121 const char *duplex;
1122 const char *flow;
1123
1124 netif_carrier_on(bp->dev);
1125
1126 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1127 &cur_data.link_report_flags))
1128 duplex = "full";
1129 else
1130 duplex = "half";
1131
1132 /* Handle the FC at the end so that only these flags would be
1133 * possibly set. This way we may easily check if there is no FC
1134 * enabled.
1135 */
1136 if (cur_data.link_report_flags) {
1137 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1138 &cur_data.link_report_flags)) {
1139 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1140 &cur_data.link_report_flags))
1141 flow = "ON - receive & transmit";
1142 else
1143 flow = "ON - receive";
1144 } else {
1145 flow = "ON - transmit";
1146 }
1147 } else {
1148 flow = "none";
1149 }
1150 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1151 cur_data.line_speed, duplex, flow);
1152 }
1153 }
1154
1155 static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1156 {
1157 int i;
1158
1159 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1160 struct eth_rx_sge *sge;
1161
1162 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1163 sge->addr_hi =
1164 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1165 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1166
1167 sge->addr_lo =
1168 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1169 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1170 }
1171 }
1172
1173 static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1174 struct bnx2x_fastpath *fp, int last)
1175 {
1176 int i;
1177
1178 for (i = 0; i < last; i++) {
1179 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1180 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1181 u8 *data = first_buf->data;
1182
1183 if (data == NULL) {
1184 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1185 continue;
1186 }
1187 if (tpa_info->tpa_state == BNX2X_TPA_START)
1188 dma_unmap_single(&bp->pdev->dev,
1189 dma_unmap_addr(first_buf, mapping),
1190 fp->rx_buf_size, DMA_FROM_DEVICE);
1191 bnx2x_frag_free(fp, data);
1192 first_buf->data = NULL;
1193 }
1194 }
1195
1196 void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
1197 {
1198 int j;
1199
1200 for_each_rx_queue_cnic(bp, j) {
1201 struct bnx2x_fastpath *fp = &bp->fp[j];
1202
1203 fp->rx_bd_cons = 0;
1204
1205 /* Activate BD ring */
1206 /* Warning!
1207 * this will generate an interrupt (to the TSTORM)
1208 * must only be done after chip is initialized
1209 */
1210 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1211 fp->rx_sge_prod);
1212 }
1213 }
1214
1215 void bnx2x_init_rx_rings(struct bnx2x *bp)
1216 {
1217 int func = BP_FUNC(bp);
1218 u16 ring_prod;
1219 int i, j;
1220
1221 /* Allocate TPA resources */
1222 for_each_eth_queue(bp, j) {
1223 struct bnx2x_fastpath *fp = &bp->fp[j];
1224
1225 DP(NETIF_MSG_IFUP,
1226 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1227
1228 if (!fp->disable_tpa) {
1229 /* Fill the per-aggregtion pool */
1230 for (i = 0; i < MAX_AGG_QS(bp); i++) {
1231 struct bnx2x_agg_info *tpa_info =
1232 &fp->tpa_info[i];
1233 struct sw_rx_bd *first_buf =
1234 &tpa_info->first_buf;
1235
1236 first_buf->data = bnx2x_frag_alloc(fp);
1237 if (!first_buf->data) {
1238 BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1239 j);
1240 bnx2x_free_tpa_pool(bp, fp, i);
1241 fp->disable_tpa = 1;
1242 break;
1243 }
1244 dma_unmap_addr_set(first_buf, mapping, 0);
1245 tpa_info->tpa_state = BNX2X_TPA_STOP;
1246 }
1247
1248 /* "next page" elements initialization */
1249 bnx2x_set_next_page_sgl(fp);
1250
1251 /* set SGEs bit mask */
1252 bnx2x_init_sge_ring_bit_mask(fp);
1253
1254 /* Allocate SGEs and initialize the ring elements */
1255 for (i = 0, ring_prod = 0;
1256 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1257
1258 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
1259 BNX2X_ERR("was only able to allocate %d rx sges\n",
1260 i);
1261 BNX2X_ERR("disabling TPA for queue[%d]\n",
1262 j);
1263 /* Cleanup already allocated elements */
1264 bnx2x_free_rx_sge_range(bp, fp,
1265 ring_prod);
1266 bnx2x_free_tpa_pool(bp, fp,
1267 MAX_AGG_QS(bp));
1268 fp->disable_tpa = 1;
1269 ring_prod = 0;
1270 break;
1271 }
1272 ring_prod = NEXT_SGE_IDX(ring_prod);
1273 }
1274
1275 fp->rx_sge_prod = ring_prod;
1276 }
1277 }
1278
1279 for_each_eth_queue(bp, j) {
1280 struct bnx2x_fastpath *fp = &bp->fp[j];
1281
1282 fp->rx_bd_cons = 0;
1283
1284 /* Activate BD ring */
1285 /* Warning!
1286 * this will generate an interrupt (to the TSTORM)
1287 * must only be done after chip is initialized
1288 */
1289 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1290 fp->rx_sge_prod);
1291
1292 if (j != 0)
1293 continue;
1294
1295 if (CHIP_IS_E1(bp)) {
1296 REG_WR(bp, BAR_USTRORM_INTMEM +
1297 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1298 U64_LO(fp->rx_comp_mapping));
1299 REG_WR(bp, BAR_USTRORM_INTMEM +
1300 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1301 U64_HI(fp->rx_comp_mapping));
1302 }
1303 }
1304 }
1305
1306 static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp)
1307 {
1308 u8 cos;
1309 struct bnx2x *bp = fp->bp;
1310
1311 for_each_cos_in_tx_queue(fp, cos) {
1312 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1313 unsigned pkts_compl = 0, bytes_compl = 0;
1314
1315 u16 sw_prod = txdata->tx_pkt_prod;
1316 u16 sw_cons = txdata->tx_pkt_cons;
1317
1318 while (sw_cons != sw_prod) {
1319 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1320 &pkts_compl, &bytes_compl);
1321 sw_cons++;
1322 }
1323
1324 netdev_tx_reset_queue(
1325 netdev_get_tx_queue(bp->dev,
1326 txdata->txq_index));
1327 }
1328 }
1329
1330 static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp)
1331 {
1332 int i;
1333
1334 for_each_tx_queue_cnic(bp, i) {
1335 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1336 }
1337 }
1338
1339 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1340 {
1341 int i;
1342
1343 for_each_eth_queue(bp, i) {
1344 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1345 }
1346 }
1347
1348 static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1349 {
1350 struct bnx2x *bp = fp->bp;
1351 int i;
1352
1353 /* ring wasn't allocated */
1354 if (fp->rx_buf_ring == NULL)
1355 return;
1356
1357 for (i = 0; i < NUM_RX_BD; i++) {
1358 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
1359 u8 *data = rx_buf->data;
1360
1361 if (data == NULL)
1362 continue;
1363 dma_unmap_single(&bp->pdev->dev,
1364 dma_unmap_addr(rx_buf, mapping),
1365 fp->rx_buf_size, DMA_FROM_DEVICE);
1366
1367 rx_buf->data = NULL;
1368 bnx2x_frag_free(fp, data);
1369 }
1370 }
1371
1372 static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp)
1373 {
1374 int j;
1375
1376 for_each_rx_queue_cnic(bp, j) {
1377 bnx2x_free_rx_bds(&bp->fp[j]);
1378 }
1379 }
1380
1381 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1382 {
1383 int j;
1384
1385 for_each_eth_queue(bp, j) {
1386 struct bnx2x_fastpath *fp = &bp->fp[j];
1387
1388 bnx2x_free_rx_bds(fp);
1389
1390 if (!fp->disable_tpa)
1391 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
1392 }
1393 }
1394
1395 void bnx2x_free_skbs_cnic(struct bnx2x *bp)
1396 {
1397 bnx2x_free_tx_skbs_cnic(bp);
1398 bnx2x_free_rx_skbs_cnic(bp);
1399 }
1400
1401 void bnx2x_free_skbs(struct bnx2x *bp)
1402 {
1403 bnx2x_free_tx_skbs(bp);
1404 bnx2x_free_rx_skbs(bp);
1405 }
1406
1407 void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1408 {
1409 /* load old values */
1410 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1411
1412 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1413 /* leave all but MAX value */
1414 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1415
1416 /* set new MAX value */
1417 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1418 & FUNC_MF_CFG_MAX_BW_MASK;
1419
1420 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1421 }
1422 }
1423
1424 /**
1425 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1426 *
1427 * @bp: driver handle
1428 * @nvecs: number of vectors to be released
1429 */
1430 static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
1431 {
1432 int i, offset = 0;
1433
1434 if (nvecs == offset)
1435 return;
1436
1437 /* VFs don't have a default SB */
1438 if (IS_PF(bp)) {
1439 free_irq(bp->msix_table[offset].vector, bp->dev);
1440 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1441 bp->msix_table[offset].vector);
1442 offset++;
1443 }
1444
1445 if (CNIC_SUPPORT(bp)) {
1446 if (nvecs == offset)
1447 return;
1448 offset++;
1449 }
1450
1451 for_each_eth_queue(bp, i) {
1452 if (nvecs == offset)
1453 return;
1454 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1455 i, bp->msix_table[offset].vector);
1456
1457 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
1458 }
1459 }
1460
1461 void bnx2x_free_irq(struct bnx2x *bp)
1462 {
1463 if (bp->flags & USING_MSIX_FLAG &&
1464 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1465 int nvecs = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp);
1466
1467 /* vfs don't have a default status block */
1468 if (IS_PF(bp))
1469 nvecs++;
1470
1471 bnx2x_free_msix_irqs(bp, nvecs);
1472 } else {
1473 free_irq(bp->dev->irq, bp->dev);
1474 }
1475 }
1476
1477 int bnx2x_enable_msix(struct bnx2x *bp)
1478 {
1479 int msix_vec = 0, i, rc;
1480
1481 /* VFs don't have a default status block */
1482 if (IS_PF(bp)) {
1483 bp->msix_table[msix_vec].entry = msix_vec;
1484 BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1485 bp->msix_table[0].entry);
1486 msix_vec++;
1487 }
1488
1489 /* Cnic requires an msix vector for itself */
1490 if (CNIC_SUPPORT(bp)) {
1491 bp->msix_table[msix_vec].entry = msix_vec;
1492 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1493 msix_vec, bp->msix_table[msix_vec].entry);
1494 msix_vec++;
1495 }
1496
1497 /* We need separate vectors for ETH queues only (not FCoE) */
1498 for_each_eth_queue(bp, i) {
1499 bp->msix_table[msix_vec].entry = msix_vec;
1500 BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1501 msix_vec, msix_vec, i);
1502 msix_vec++;
1503 }
1504
1505 DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n",
1506 msix_vec);
1507
1508 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], msix_vec);
1509
1510 /*
1511 * reconfigure number of tx/rx queues according to available
1512 * MSI-X vectors
1513 */
1514 if (rc >= BNX2X_MIN_MSIX_VEC_CNT(bp)) {
1515 /* how less vectors we will have? */
1516 int diff = msix_vec - rc;
1517
1518 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
1519
1520 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1521
1522 if (rc) {
1523 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1524 goto no_msix;
1525 }
1526 /*
1527 * decrease number of queues by number of unallocated entries
1528 */
1529 bp->num_ethernet_queues -= diff;
1530 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1531
1532 BNX2X_DEV_INFO("New queue configuration set: %d\n",
1533 bp->num_queues);
1534 } else if (rc > 0) {
1535 /* Get by with single vector */
1536 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], 1);
1537 if (rc) {
1538 BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1539 rc);
1540 goto no_msix;
1541 }
1542
1543 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1544 bp->flags |= USING_SINGLE_MSIX_FLAG;
1545
1546 BNX2X_DEV_INFO("set number of queues to 1\n");
1547 bp->num_ethernet_queues = 1;
1548 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1549 } else if (rc < 0) {
1550 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1551 goto no_msix;
1552 }
1553
1554 bp->flags |= USING_MSIX_FLAG;
1555
1556 return 0;
1557
1558 no_msix:
1559 /* fall to INTx if not enough memory */
1560 if (rc == -ENOMEM)
1561 bp->flags |= DISABLE_MSI_FLAG;
1562
1563 return rc;
1564 }
1565
1566 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1567 {
1568 int i, rc, offset = 0;
1569
1570 /* no default status block for vf */
1571 if (IS_PF(bp)) {
1572 rc = request_irq(bp->msix_table[offset++].vector,
1573 bnx2x_msix_sp_int, 0,
1574 bp->dev->name, bp->dev);
1575 if (rc) {
1576 BNX2X_ERR("request sp irq failed\n");
1577 return -EBUSY;
1578 }
1579 }
1580
1581 if (CNIC_SUPPORT(bp))
1582 offset++;
1583
1584 for_each_eth_queue(bp, i) {
1585 struct bnx2x_fastpath *fp = &bp->fp[i];
1586 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1587 bp->dev->name, i);
1588
1589 rc = request_irq(bp->msix_table[offset].vector,
1590 bnx2x_msix_fp_int, 0, fp->name, fp);
1591 if (rc) {
1592 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i,
1593 bp->msix_table[offset].vector, rc);
1594 bnx2x_free_msix_irqs(bp, offset);
1595 return -EBUSY;
1596 }
1597
1598 offset++;
1599 }
1600
1601 i = BNX2X_NUM_ETH_QUEUES(bp);
1602 if (IS_PF(bp)) {
1603 offset = 1 + CNIC_SUPPORT(bp);
1604 netdev_info(bp->dev,
1605 "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
1606 bp->msix_table[0].vector,
1607 0, bp->msix_table[offset].vector,
1608 i - 1, bp->msix_table[offset + i - 1].vector);
1609 } else {
1610 offset = CNIC_SUPPORT(bp);
1611 netdev_info(bp->dev,
1612 "using MSI-X IRQs: fp[%d] %d ... fp[%d] %d\n",
1613 0, bp->msix_table[offset].vector,
1614 i - 1, bp->msix_table[offset + i - 1].vector);
1615 }
1616 return 0;
1617 }
1618
1619 int bnx2x_enable_msi(struct bnx2x *bp)
1620 {
1621 int rc;
1622
1623 rc = pci_enable_msi(bp->pdev);
1624 if (rc) {
1625 BNX2X_DEV_INFO("MSI is not attainable\n");
1626 return -1;
1627 }
1628 bp->flags |= USING_MSI_FLAG;
1629
1630 return 0;
1631 }
1632
1633 static int bnx2x_req_irq(struct bnx2x *bp)
1634 {
1635 unsigned long flags;
1636 unsigned int irq;
1637
1638 if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
1639 flags = 0;
1640 else
1641 flags = IRQF_SHARED;
1642
1643 if (bp->flags & USING_MSIX_FLAG)
1644 irq = bp->msix_table[0].vector;
1645 else
1646 irq = bp->pdev->irq;
1647
1648 return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
1649 }
1650
1651 static int bnx2x_setup_irqs(struct bnx2x *bp)
1652 {
1653 int rc = 0;
1654 if (bp->flags & USING_MSIX_FLAG &&
1655 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1656 rc = bnx2x_req_msix_irqs(bp);
1657 if (rc)
1658 return rc;
1659 } else {
1660 bnx2x_ack_int(bp);
1661 rc = bnx2x_req_irq(bp);
1662 if (rc) {
1663 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1664 return rc;
1665 }
1666 if (bp->flags & USING_MSI_FLAG) {
1667 bp->dev->irq = bp->pdev->irq;
1668 netdev_info(bp->dev, "using MSI IRQ %d\n",
1669 bp->dev->irq);
1670 }
1671 if (bp->flags & USING_MSIX_FLAG) {
1672 bp->dev->irq = bp->msix_table[0].vector;
1673 netdev_info(bp->dev, "using MSIX IRQ %d\n",
1674 bp->dev->irq);
1675 }
1676 }
1677
1678 return 0;
1679 }
1680
1681 static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
1682 {
1683 int i;
1684
1685 for_each_rx_queue_cnic(bp, i)
1686 napi_enable(&bnx2x_fp(bp, i, napi));
1687 }
1688
1689 static void bnx2x_napi_enable(struct bnx2x *bp)
1690 {
1691 int i;
1692
1693 for_each_eth_queue(bp, i)
1694 napi_enable(&bnx2x_fp(bp, i, napi));
1695 }
1696
1697 static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
1698 {
1699 int i;
1700
1701 for_each_rx_queue_cnic(bp, i)
1702 napi_disable(&bnx2x_fp(bp, i, napi));
1703 }
1704
1705 static void bnx2x_napi_disable(struct bnx2x *bp)
1706 {
1707 int i;
1708
1709 for_each_eth_queue(bp, i)
1710 napi_disable(&bnx2x_fp(bp, i, napi));
1711 }
1712
1713 void bnx2x_netif_start(struct bnx2x *bp)
1714 {
1715 if (netif_running(bp->dev)) {
1716 bnx2x_napi_enable(bp);
1717 if (CNIC_LOADED(bp))
1718 bnx2x_napi_enable_cnic(bp);
1719 bnx2x_int_enable(bp);
1720 if (bp->state == BNX2X_STATE_OPEN)
1721 netif_tx_wake_all_queues(bp->dev);
1722 }
1723 }
1724
1725 void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1726 {
1727 bnx2x_int_disable_sync(bp, disable_hw);
1728 bnx2x_napi_disable(bp);
1729 if (CNIC_LOADED(bp))
1730 bnx2x_napi_disable_cnic(bp);
1731 }
1732
1733 u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1734 {
1735 struct bnx2x *bp = netdev_priv(dev);
1736
1737 if (CNIC_LOADED(bp) && !NO_FCOE(bp)) {
1738 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1739 u16 ether_type = ntohs(hdr->h_proto);
1740
1741 /* Skip VLAN tag if present */
1742 if (ether_type == ETH_P_8021Q) {
1743 struct vlan_ethhdr *vhdr =
1744 (struct vlan_ethhdr *)skb->data;
1745
1746 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1747 }
1748
1749 /* If ethertype is FCoE or FIP - use FCoE ring */
1750 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
1751 return bnx2x_fcoe_tx(bp, txq_index);
1752 }
1753
1754 /* select a non-FCoE queue */
1755 return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp));
1756 }
1757
1758
1759 void bnx2x_set_num_queues(struct bnx2x *bp)
1760 {
1761 /* RSS queues */
1762 bp->num_ethernet_queues = bnx2x_calc_num_queues(bp);
1763
1764 /* override in STORAGE SD modes */
1765 if (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))
1766 bp->num_ethernet_queues = 1;
1767
1768 /* Add special queues */
1769 bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */
1770 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1771
1772 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
1773 }
1774
1775 /**
1776 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1777 *
1778 * @bp: Driver handle
1779 *
1780 * We currently support for at most 16 Tx queues for each CoS thus we will
1781 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1782 * bp->max_cos.
1783 *
1784 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1785 * index after all ETH L2 indices.
1786 *
1787 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1788 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
1789 * 16..31,...) with indicies that are not coupled with any real Tx queue.
1790 *
1791 * The proper configuration of skb->queue_mapping is handled by
1792 * bnx2x_select_queue() and __skb_tx_hash().
1793 *
1794 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1795 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1796 */
1797 static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic)
1798 {
1799 int rc, tx, rx;
1800
1801 tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
1802 rx = BNX2X_NUM_ETH_QUEUES(bp);
1803
1804 /* account for fcoe queue */
1805 if (include_cnic && !NO_FCOE(bp)) {
1806 rx++;
1807 tx++;
1808 }
1809
1810 rc = netif_set_real_num_tx_queues(bp->dev, tx);
1811 if (rc) {
1812 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1813 return rc;
1814 }
1815 rc = netif_set_real_num_rx_queues(bp->dev, rx);
1816 if (rc) {
1817 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
1818 return rc;
1819 }
1820
1821 DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
1822 tx, rx);
1823
1824 return rc;
1825 }
1826
1827 static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
1828 {
1829 int i;
1830
1831 for_each_queue(bp, i) {
1832 struct bnx2x_fastpath *fp = &bp->fp[i];
1833 u32 mtu;
1834
1835 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1836 if (IS_FCOE_IDX(i))
1837 /*
1838 * Although there are no IP frames expected to arrive to
1839 * this ring we still want to add an
1840 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1841 * overrun attack.
1842 */
1843 mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
1844 else
1845 mtu = bp->dev->mtu;
1846 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
1847 IP_HEADER_ALIGNMENT_PADDING +
1848 ETH_OVREHEAD +
1849 mtu +
1850 BNX2X_FW_RX_ALIGN_END;
1851 /* Note : rx_buf_size doesnt take into account NET_SKB_PAD */
1852 if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
1853 fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
1854 else
1855 fp->rx_frag_size = 0;
1856 }
1857 }
1858
1859 static int bnx2x_init_rss_pf(struct bnx2x *bp)
1860 {
1861 int i;
1862 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
1863
1864 /* Prepare the initial contents fo the indirection table if RSS is
1865 * enabled
1866 */
1867 for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
1868 bp->rss_conf_obj.ind_table[i] =
1869 bp->fp->cl_id +
1870 ethtool_rxfh_indir_default(i, num_eth_queues);
1871
1872 /*
1873 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
1874 * per-port, so if explicit configuration is needed , do it only
1875 * for a PMF.
1876 *
1877 * For 57712 and newer on the other hand it's a per-function
1878 * configuration.
1879 */
1880 return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
1881 }
1882
1883 int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
1884 bool config_hash)
1885 {
1886 struct bnx2x_config_rss_params params = {NULL};
1887
1888 /* Although RSS is meaningless when there is a single HW queue we
1889 * still need it enabled in order to have HW Rx hash generated.
1890 *
1891 * if (!is_eth_multi(bp))
1892 * bp->multi_mode = ETH_RSS_MODE_DISABLED;
1893 */
1894
1895 params.rss_obj = rss_obj;
1896
1897 __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
1898
1899 __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
1900
1901 /* RSS configuration */
1902 __set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
1903 __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
1904 __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
1905 __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
1906 if (rss_obj->udp_rss_v4)
1907 __set_bit(BNX2X_RSS_IPV4_UDP, &params.rss_flags);
1908 if (rss_obj->udp_rss_v6)
1909 __set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags);
1910
1911 /* Hash bits */
1912 params.rss_result_mask = MULTI_MASK;
1913
1914 memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
1915
1916 if (config_hash) {
1917 /* RSS keys */
1918 prandom_bytes(params.rss_key, sizeof(params.rss_key));
1919 __set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
1920 }
1921
1922 return bnx2x_config_rss(bp, &params);
1923 }
1924
1925 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
1926 {
1927 struct bnx2x_func_state_params func_params = {NULL};
1928
1929 /* Prepare parameters for function state transitions */
1930 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
1931
1932 func_params.f_obj = &bp->func_obj;
1933 func_params.cmd = BNX2X_F_CMD_HW_INIT;
1934
1935 func_params.params.hw_init.load_phase = load_code;
1936
1937 return bnx2x_func_state_change(bp, &func_params);
1938 }
1939
1940 /*
1941 * Cleans the object that have internal lists without sending
1942 * ramrods. Should be run when interrutps are disabled.
1943 */
1944 static void bnx2x_squeeze_objects(struct bnx2x *bp)
1945 {
1946 int rc;
1947 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
1948 struct bnx2x_mcast_ramrod_params rparam = {NULL};
1949 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
1950
1951 /***************** Cleanup MACs' object first *************************/
1952
1953 /* Wait for completion of requested */
1954 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
1955 /* Perform a dry cleanup */
1956 __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
1957
1958 /* Clean ETH primary MAC */
1959 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
1960 rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
1961 &ramrod_flags);
1962 if (rc != 0)
1963 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
1964
1965 /* Cleanup UC list */
1966 vlan_mac_flags = 0;
1967 __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
1968 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
1969 &ramrod_flags);
1970 if (rc != 0)
1971 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
1972
1973 /***************** Now clean mcast object *****************************/
1974 rparam.mcast_obj = &bp->mcast_obj;
1975 __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
1976
1977 /* Add a DEL command... */
1978 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
1979 if (rc < 0)
1980 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
1981 rc);
1982
1983 /* ...and wait until all pending commands are cleared */
1984 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1985 while (rc != 0) {
1986 if (rc < 0) {
1987 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
1988 rc);
1989 return;
1990 }
1991
1992 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1993 }
1994 }
1995
1996 #ifndef BNX2X_STOP_ON_ERROR
1997 #define LOAD_ERROR_EXIT(bp, label) \
1998 do { \
1999 (bp)->state = BNX2X_STATE_ERROR; \
2000 goto label; \
2001 } while (0)
2002
2003 #define LOAD_ERROR_EXIT_CNIC(bp, label) \
2004 do { \
2005 bp->cnic_loaded = false; \
2006 goto label; \
2007 } while (0)
2008 #else /*BNX2X_STOP_ON_ERROR*/
2009 #define LOAD_ERROR_EXIT(bp, label) \
2010 do { \
2011 (bp)->state = BNX2X_STATE_ERROR; \
2012 (bp)->panic = 1; \
2013 return -EBUSY; \
2014 } while (0)
2015 #define LOAD_ERROR_EXIT_CNIC(bp, label) \
2016 do { \
2017 bp->cnic_loaded = false; \
2018 (bp)->panic = 1; \
2019 return -EBUSY; \
2020 } while (0)
2021 #endif /*BNX2X_STOP_ON_ERROR*/
2022
2023 static void bnx2x_free_fw_stats_mem(struct bnx2x *bp)
2024 {
2025 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
2026 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2027 return;
2028 }
2029
2030 static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
2031 {
2032 int num_groups, vf_headroom = 0;
2033 int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
2034
2035 /* number of queues for statistics is number of eth queues + FCoE */
2036 u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
2037
2038 /* Total number of FW statistics requests =
2039 * 1 for port stats + 1 for PF stats + potential 2 for FCoE (fcoe proper
2040 * and fcoe l2 queue) stats + num of queues (which includes another 1
2041 * for fcoe l2 queue if applicable)
2042 */
2043 bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
2044
2045 /* vf stats appear in the request list, but their data is allocated by
2046 * the VFs themselves. We don't include them in the bp->fw_stats_num as
2047 * it is used to determine where to place the vf stats queries in the
2048 * request struct
2049 */
2050 if (IS_SRIOV(bp))
2051 vf_headroom = bp->vfdb->sriov.nr_virtfn * BNX2X_CLIENTS_PER_VF;
2052
2053 /* Request is built from stats_query_header and an array of
2054 * stats_query_cmd_group each of which contains
2055 * STATS_QUERY_CMD_COUNT rules. The real number or requests is
2056 * configured in the stats_query_header.
2057 */
2058 num_groups =
2059 (((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) +
2060 (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ?
2061 1 : 0));
2062
2063 DP(BNX2X_MSG_SP, "stats fw_stats_num %d, vf headroom %d, num_groups %d\n",
2064 bp->fw_stats_num, vf_headroom, num_groups);
2065 bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
2066 num_groups * sizeof(struct stats_query_cmd_group);
2067
2068 /* Data for statistics requests + stats_counter
2069 * stats_counter holds per-STORM counters that are incremented
2070 * when STORM has finished with the current request.
2071 * memory for FCoE offloaded statistics are counted anyway,
2072 * even if they will not be sent.
2073 * VF stats are not accounted for here as the data of VF stats is stored
2074 * in memory allocated by the VF, not here.
2075 */
2076 bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
2077 sizeof(struct per_pf_stats) +
2078 sizeof(struct fcoe_statistics_params) +
2079 sizeof(struct per_queue_stats) * num_queue_stats +
2080 sizeof(struct stats_counter);
2081
2082 BNX2X_PCI_ALLOC(bp->fw_stats, &bp->fw_stats_mapping,
2083 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2084
2085 /* Set shortcuts */
2086 bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
2087 bp->fw_stats_req_mapping = bp->fw_stats_mapping;
2088 bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
2089 ((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
2090 bp->fw_stats_data_mapping = bp->fw_stats_mapping +
2091 bp->fw_stats_req_sz;
2092
2093 DP(BNX2X_MSG_SP, "statistics request base address set to %x %x",
2094 U64_HI(bp->fw_stats_req_mapping),
2095 U64_LO(bp->fw_stats_req_mapping));
2096 DP(BNX2X_MSG_SP, "statistics data base address set to %x %x",
2097 U64_HI(bp->fw_stats_data_mapping),
2098 U64_LO(bp->fw_stats_data_mapping));
2099 return 0;
2100
2101 alloc_mem_err:
2102 bnx2x_free_fw_stats_mem(bp);
2103 BNX2X_ERR("Can't allocate FW stats memory\n");
2104 return -ENOMEM;
2105 }
2106
2107 /* send load request to mcp and analyze response */
2108 static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code)
2109 {
2110 /* init fw_seq */
2111 bp->fw_seq =
2112 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
2113 DRV_MSG_SEQ_NUMBER_MASK);
2114 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
2115
2116 /* Get current FW pulse sequence */
2117 bp->fw_drv_pulse_wr_seq =
2118 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
2119 DRV_PULSE_SEQ_MASK);
2120 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2121
2122 /* load request */
2123 (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ,
2124 DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
2125
2126 /* if mcp fails to respond we must abort */
2127 if (!(*load_code)) {
2128 BNX2X_ERR("MCP response failure, aborting\n");
2129 return -EBUSY;
2130 }
2131
2132 /* If mcp refused (e.g. other port is in diagnostic mode) we
2133 * must abort
2134 */
2135 if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
2136 BNX2X_ERR("MCP refused load request, aborting\n");
2137 return -EBUSY;
2138 }
2139 return 0;
2140 }
2141
2142 /* check whether another PF has already loaded FW to chip. In
2143 * virtualized environments a pf from another VM may have already
2144 * initialized the device including loading FW
2145 */
2146 int bnx2x_nic_load_analyze_req(struct bnx2x *bp, u32 load_code)
2147 {
2148 /* is another pf loaded on this engine? */
2149 if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2150 load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
2151 /* build my FW version dword */
2152 u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
2153 (BCM_5710_FW_MINOR_VERSION << 8) +
2154 (BCM_5710_FW_REVISION_VERSION << 16) +
2155 (BCM_5710_FW_ENGINEERING_VERSION << 24);
2156
2157 /* read loaded FW from chip */
2158 u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
2159
2160 DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x\n",
2161 loaded_fw, my_fw);
2162
2163 /* abort nic load if version mismatch */
2164 if (my_fw != loaded_fw) {
2165 BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. aborting\n",
2166 loaded_fw, my_fw);
2167 return -EBUSY;
2168 }
2169 }
2170 return 0;
2171 }
2172
2173 /* returns the "mcp load_code" according to global load_count array */
2174 static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port)
2175 {
2176 int path = BP_PATH(bp);
2177
2178 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
2179 path, load_count[path][0], load_count[path][1],
2180 load_count[path][2]);
2181 load_count[path][0]++;
2182 load_count[path][1 + port]++;
2183 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
2184 path, load_count[path][0], load_count[path][1],
2185 load_count[path][2]);
2186 if (load_count[path][0] == 1)
2187 return FW_MSG_CODE_DRV_LOAD_COMMON;
2188 else if (load_count[path][1 + port] == 1)
2189 return FW_MSG_CODE_DRV_LOAD_PORT;
2190 else
2191 return FW_MSG_CODE_DRV_LOAD_FUNCTION;
2192 }
2193
2194 /* mark PMF if applicable */
2195 static void bnx2x_nic_load_pmf(struct bnx2x *bp, u32 load_code)
2196 {
2197 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2198 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
2199 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
2200 bp->port.pmf = 1;
2201 /* We need the barrier to ensure the ordering between the
2202 * writing to bp->port.pmf here and reading it from the
2203 * bnx2x_periodic_task().
2204 */
2205 smp_mb();
2206 } else {
2207 bp->port.pmf = 0;
2208 }
2209
2210 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2211 }
2212
2213 static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code)
2214 {
2215 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2216 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
2217 (bp->common.shmem2_base)) {
2218 if (SHMEM2_HAS(bp, dcc_support))
2219 SHMEM2_WR(bp, dcc_support,
2220 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2221 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
2222 if (SHMEM2_HAS(bp, afex_driver_support))
2223 SHMEM2_WR(bp, afex_driver_support,
2224 SHMEM_AFEX_SUPPORTED_VERSION_ONE);
2225 }
2226
2227 /* Set AFEX default VLAN tag to an invalid value */
2228 bp->afex_def_vlan_tag = -1;
2229 }
2230
2231 /**
2232 * bnx2x_bz_fp - zero content of the fastpath structure.
2233 *
2234 * @bp: driver handle
2235 * @index: fastpath index to be zeroed
2236 *
2237 * Makes sure the contents of the bp->fp[index].napi is kept
2238 * intact.
2239 */
2240 static void bnx2x_bz_fp(struct bnx2x *bp, int index)
2241 {
2242 struct bnx2x_fastpath *fp = &bp->fp[index];
2243 struct bnx2x_fp_stats *fp_stats = &bp->fp_stats[index];
2244
2245 int cos;
2246 struct napi_struct orig_napi = fp->napi;
2247 struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
2248 /* bzero bnx2x_fastpath contents */
2249 if (bp->stats_init) {
2250 memset(fp->tpa_info, 0, sizeof(*fp->tpa_info));
2251 memset(fp, 0, sizeof(*fp));
2252 } else {
2253 /* Keep Queue statistics */
2254 struct bnx2x_eth_q_stats *tmp_eth_q_stats;
2255 struct bnx2x_eth_q_stats_old *tmp_eth_q_stats_old;
2256
2257 tmp_eth_q_stats = kzalloc(sizeof(struct bnx2x_eth_q_stats),
2258 GFP_KERNEL);
2259 if (tmp_eth_q_stats)
2260 memcpy(tmp_eth_q_stats, &fp_stats->eth_q_stats,
2261 sizeof(struct bnx2x_eth_q_stats));
2262
2263 tmp_eth_q_stats_old =
2264 kzalloc(sizeof(struct bnx2x_eth_q_stats_old),
2265 GFP_KERNEL);
2266 if (tmp_eth_q_stats_old)
2267 memcpy(tmp_eth_q_stats_old, &fp_stats->eth_q_stats_old,
2268 sizeof(struct bnx2x_eth_q_stats_old));
2269
2270 memset(fp->tpa_info, 0, sizeof(*fp->tpa_info));
2271 memset(fp, 0, sizeof(*fp));
2272
2273 if (tmp_eth_q_stats) {
2274 memcpy(&fp_stats->eth_q_stats, tmp_eth_q_stats,
2275 sizeof(struct bnx2x_eth_q_stats));
2276 kfree(tmp_eth_q_stats);
2277 }
2278
2279 if (tmp_eth_q_stats_old) {
2280 memcpy(&fp_stats->eth_q_stats_old, tmp_eth_q_stats_old,
2281 sizeof(struct bnx2x_eth_q_stats_old));
2282 kfree(tmp_eth_q_stats_old);
2283 }
2284
2285 }
2286
2287 /* Restore the NAPI object as it has been already initialized */
2288 fp->napi = orig_napi;
2289 fp->tpa_info = orig_tpa_info;
2290 fp->bp = bp;
2291 fp->index = index;
2292 if (IS_ETH_FP(fp))
2293 fp->max_cos = bp->max_cos;
2294 else
2295 /* Special queues support only one CoS */
2296 fp->max_cos = 1;
2297
2298 /* Init txdata pointers */
2299 if (IS_FCOE_FP(fp))
2300 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
2301 if (IS_ETH_FP(fp))
2302 for_each_cos_in_tx_queue(fp, cos)
2303 fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
2304 BNX2X_NUM_ETH_QUEUES(bp) + index];
2305
2306 /*
2307 * set the tpa flag for each queue. The tpa flag determines the queue
2308 * minimal size so it must be set prior to queue memory allocation
2309 */
2310 fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG ||
2311 (bp->flags & GRO_ENABLE_FLAG &&
2312 bnx2x_mtu_allows_gro(bp->dev->mtu)));
2313 if (bp->flags & TPA_ENABLE_FLAG)
2314 fp->mode = TPA_MODE_LRO;
2315 else if (bp->flags & GRO_ENABLE_FLAG)
2316 fp->mode = TPA_MODE_GRO;
2317
2318 /* We don't want TPA on an FCoE L2 ring */
2319 if (IS_FCOE_FP(fp))
2320 fp->disable_tpa = 1;
2321 }
2322
2323 int bnx2x_load_cnic(struct bnx2x *bp)
2324 {
2325 int i, rc, port = BP_PORT(bp);
2326
2327 DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n");
2328
2329 mutex_init(&bp->cnic_mutex);
2330
2331 if (IS_PF(bp)) {
2332 rc = bnx2x_alloc_mem_cnic(bp);
2333 if (rc) {
2334 BNX2X_ERR("Unable to allocate bp memory for cnic\n");
2335 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2336 }
2337 }
2338
2339 rc = bnx2x_alloc_fp_mem_cnic(bp);
2340 if (rc) {
2341 BNX2X_ERR("Unable to allocate memory for cnic fps\n");
2342 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2343 }
2344
2345 /* Update the number of queues with the cnic queues */
2346 rc = bnx2x_set_real_num_queues(bp, 1);
2347 if (rc) {
2348 BNX2X_ERR("Unable to set real_num_queues including cnic\n");
2349 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2350 }
2351
2352 /* Add all CNIC NAPI objects */
2353 bnx2x_add_all_napi_cnic(bp);
2354 DP(NETIF_MSG_IFUP, "cnic napi added\n");
2355 bnx2x_napi_enable_cnic(bp);
2356
2357 rc = bnx2x_init_hw_func_cnic(bp);
2358 if (rc)
2359 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1);
2360
2361 bnx2x_nic_init_cnic(bp);
2362
2363 if (IS_PF(bp)) {
2364 /* Enable Timer scan */
2365 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
2366
2367 /* setup cnic queues */
2368 for_each_cnic_queue(bp, i) {
2369 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2370 if (rc) {
2371 BNX2X_ERR("Queue setup failed\n");
2372 LOAD_ERROR_EXIT(bp, load_error_cnic2);
2373 }
2374 }
2375 }
2376
2377 /* Initialize Rx filter. */
2378 netif_addr_lock_bh(bp->dev);
2379 bnx2x_set_rx_mode(bp->dev);
2380 netif_addr_unlock_bh(bp->dev);
2381
2382 /* re-read iscsi info */
2383 bnx2x_get_iscsi_info(bp);
2384 bnx2x_setup_cnic_irq_info(bp);
2385 bnx2x_setup_cnic_info(bp);
2386 bp->cnic_loaded = true;
2387 if (bp->state == BNX2X_STATE_OPEN)
2388 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2389
2390
2391 DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
2392
2393 return 0;
2394
2395 #ifndef BNX2X_STOP_ON_ERROR
2396 load_error_cnic2:
2397 /* Disable Timer scan */
2398 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2399
2400 load_error_cnic1:
2401 bnx2x_napi_disable_cnic(bp);
2402 /* Update the number of queues without the cnic queues */
2403 rc = bnx2x_set_real_num_queues(bp, 0);
2404 if (rc)
2405 BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2406 load_error_cnic0:
2407 BNX2X_ERR("CNIC-related load failed\n");
2408 bnx2x_free_fp_mem_cnic(bp);
2409 bnx2x_free_mem_cnic(bp);
2410 return rc;
2411 #endif /* ! BNX2X_STOP_ON_ERROR */
2412 }
2413
2414
2415 /* must be called with rtnl_lock */
2416 int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2417 {
2418 int port = BP_PORT(bp);
2419 int i, rc = 0, load_code = 0;
2420
2421 DP(NETIF_MSG_IFUP, "Starting NIC load\n");
2422 DP(NETIF_MSG_IFUP,
2423 "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled");
2424
2425 #ifdef BNX2X_STOP_ON_ERROR
2426 if (unlikely(bp->panic)) {
2427 BNX2X_ERR("Can't load NIC when there is panic\n");
2428 return -EPERM;
2429 }
2430 #endif
2431
2432 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
2433
2434 /* Set the initial link reported state to link down */
2435 bnx2x_acquire_phy_lock(bp);
2436 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
2437 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
2438 &bp->last_reported_link.link_report_flags);
2439 bnx2x_release_phy_lock(bp);
2440
2441 if (IS_PF(bp))
2442 /* must be called before memory allocation and HW init */
2443 bnx2x_ilt_set_info(bp);
2444
2445 /*
2446 * Zero fastpath structures preserving invariants like napi, which are
2447 * allocated only once, fp index, max_cos, bp pointer.
2448 * Also set fp->disable_tpa and txdata_ptr.
2449 */
2450 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
2451 for_each_queue(bp, i)
2452 bnx2x_bz_fp(bp, i);
2453 memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS +
2454 bp->num_cnic_queues) *
2455 sizeof(struct bnx2x_fp_txdata));
2456
2457 bp->fcoe_init = false;
2458
2459 /* Set the receive queues buffer size */
2460 bnx2x_set_rx_buf_size(bp);
2461
2462 if (IS_PF(bp)) {
2463 rc = bnx2x_alloc_mem(bp);
2464 if (rc) {
2465 BNX2X_ERR("Unable to allocate bp memory\n");
2466 return rc;
2467 }
2468 }
2469
2470 /* Allocated memory for FW statistics */
2471 if (bnx2x_alloc_fw_stats_mem(bp))
2472 LOAD_ERROR_EXIT(bp, load_error0);
2473
2474 /* need to be done after alloc mem, since it's self adjusting to amount
2475 * of memory available for RSS queues
2476 */
2477 rc = bnx2x_alloc_fp_mem(bp);
2478 if (rc) {
2479 BNX2X_ERR("Unable to allocate memory for fps\n");
2480 LOAD_ERROR_EXIT(bp, load_error0);
2481 }
2482
2483 /* request pf to initialize status blocks */
2484 if (IS_VF(bp)) {
2485 rc = bnx2x_vfpf_init(bp);
2486 if (rc)
2487 LOAD_ERROR_EXIT(bp, load_error0);
2488 }
2489
2490 /* As long as bnx2x_alloc_mem() may possibly update
2491 * bp->num_queues, bnx2x_set_real_num_queues() should always
2492 * come after it. At this stage cnic queues are not counted.
2493 */
2494 rc = bnx2x_set_real_num_queues(bp, 0);
2495 if (rc) {
2496 BNX2X_ERR("Unable to set real_num_queues\n");
2497 LOAD_ERROR_EXIT(bp, load_error0);
2498 }
2499
2500 /* configure multi cos mappings in kernel.
2501 * this configuration may be overriden by a multi class queue discipline
2502 * or by a dcbx negotiation result.
2503 */
2504 bnx2x_setup_tc(bp->dev, bp->max_cos);
2505
2506 /* Add all NAPI objects */
2507 bnx2x_add_all_napi(bp);
2508 DP(NETIF_MSG_IFUP, "napi added\n");
2509 bnx2x_napi_enable(bp);
2510
2511 if (IS_PF(bp)) {
2512 /* set pf load just before approaching the MCP */
2513 bnx2x_set_pf_load(bp);
2514
2515 /* if mcp exists send load request and analyze response */
2516 if (!BP_NOMCP(bp)) {
2517 /* attempt to load pf */
2518 rc = bnx2x_nic_load_request(bp, &load_code);
2519 if (rc)
2520 LOAD_ERROR_EXIT(bp, load_error1);
2521
2522 /* what did mcp say? */
2523 rc = bnx2x_nic_load_analyze_req(bp, load_code);
2524 if (rc) {
2525 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2526 LOAD_ERROR_EXIT(bp, load_error2);
2527 }
2528 } else {
2529 load_code = bnx2x_nic_load_no_mcp(bp, port);
2530 }
2531
2532 /* mark pmf if applicable */
2533 bnx2x_nic_load_pmf(bp, load_code);
2534
2535 /* Init Function state controlling object */
2536 bnx2x__init_func_obj(bp);
2537
2538 /* Initialize HW */
2539 rc = bnx2x_init_hw(bp, load_code);
2540 if (rc) {
2541 BNX2X_ERR("HW init failed, aborting\n");
2542 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2543 LOAD_ERROR_EXIT(bp, load_error2);
2544 }
2545 }
2546
2547 /* Connect to IRQs */
2548 rc = bnx2x_setup_irqs(bp);
2549 if (rc) {
2550 BNX2X_ERR("setup irqs failed\n");
2551 if (IS_PF(bp))
2552 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2553 LOAD_ERROR_EXIT(bp, load_error2);
2554 }
2555
2556 /* Setup NIC internals and enable interrupts */
2557 bnx2x_nic_init(bp, load_code);
2558
2559 /* Init per-function objects */
2560 if (IS_PF(bp)) {
2561 bnx2x_init_bp_objs(bp);
2562 bnx2x_iov_nic_init(bp);
2563
2564 /* Set AFEX default VLAN tag to an invalid value */
2565 bp->afex_def_vlan_tag = -1;
2566 bnx2x_nic_load_afex_dcc(bp, load_code);
2567 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2568 rc = bnx2x_func_start(bp);
2569 if (rc) {
2570 BNX2X_ERR("Function start failed!\n");
2571 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2572
2573 LOAD_ERROR_EXIT(bp, load_error3);
2574 }
2575
2576 /* Send LOAD_DONE command to MCP */
2577 if (!BP_NOMCP(bp)) {
2578 load_code = bnx2x_fw_command(bp,
2579 DRV_MSG_CODE_LOAD_DONE, 0);
2580 if (!load_code) {
2581 BNX2X_ERR("MCP response failure, aborting\n");
2582 rc = -EBUSY;
2583 LOAD_ERROR_EXIT(bp, load_error3);
2584 }
2585 }
2586
2587 /* setup the leading queue */
2588 rc = bnx2x_setup_leading(bp);
2589 if (rc) {
2590 BNX2X_ERR("Setup leading failed!\n");
2591 LOAD_ERROR_EXIT(bp, load_error3);
2592 }
2593
2594 /* set up the rest of the queues */
2595 for_each_nondefault_eth_queue(bp, i) {
2596 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2597 if (rc) {
2598 BNX2X_ERR("Queue setup failed\n");
2599 LOAD_ERROR_EXIT(bp, load_error3);
2600 }
2601 }
2602
2603 /* setup rss */
2604 rc = bnx2x_init_rss_pf(bp);
2605 if (rc) {
2606 BNX2X_ERR("PF RSS init failed\n");
2607 LOAD_ERROR_EXIT(bp, load_error3);
2608 }
2609
2610 } else { /* vf */
2611 for_each_eth_queue(bp, i) {
2612 rc = bnx2x_vfpf_setup_q(bp, i);
2613 if (rc) {
2614 BNX2X_ERR("Queue setup failed\n");
2615 LOAD_ERROR_EXIT(bp, load_error3);
2616 }
2617 }
2618 }
2619
2620 /* Now when Clients are configured we are ready to work */
2621 bp->state = BNX2X_STATE_OPEN;
2622
2623 /* Configure a ucast MAC */
2624 if (IS_PF(bp))
2625 rc = bnx2x_set_eth_mac(bp, true);
2626 else /* vf */
2627 rc = bnx2x_vfpf_set_mac(bp);
2628 if (rc) {
2629 BNX2X_ERR("Setting Ethernet MAC failed\n");
2630 LOAD_ERROR_EXIT(bp, load_error3);
2631 }
2632
2633 if (IS_PF(bp) && bp->pending_max) {
2634 bnx2x_update_max_mf_config(bp, bp->pending_max);
2635 bp->pending_max = 0;
2636 }
2637
2638 if (bp->port.pmf) {
2639 rc = bnx2x_initial_phy_init(bp, load_mode);
2640 if (rc)
2641 LOAD_ERROR_EXIT(bp, load_error3);
2642 }
2643 bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN;
2644
2645 /* Start fast path */
2646
2647 /* Initialize Rx filter. */
2648 netif_addr_lock_bh(bp->dev);
2649 bnx2x_set_rx_mode(bp->dev);
2650 netif_addr_unlock_bh(bp->dev);
2651
2652 /* Start the Tx */
2653 switch (load_mode) {
2654 case LOAD_NORMAL:
2655 /* Tx queue should be only reenabled */
2656 netif_tx_wake_all_queues(bp->dev);
2657 break;
2658
2659 case LOAD_OPEN:
2660 netif_tx_start_all_queues(bp->dev);
2661 smp_mb__after_clear_bit();
2662 break;
2663
2664 case LOAD_DIAG:
2665 case LOAD_LOOPBACK_EXT:
2666 bp->state = BNX2X_STATE_DIAG;
2667 break;
2668
2669 default:
2670 break;
2671 }
2672
2673 if (bp->port.pmf)
2674 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0);
2675 else
2676 bnx2x__link_status_update(bp);
2677
2678 /* start the timer */
2679 mod_timer(&bp->timer, jiffies + bp->current_interval);
2680
2681 if (CNIC_ENABLED(bp))
2682 bnx2x_load_cnic(bp);
2683
2684 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2685 /* mark driver is loaded in shmem2 */
2686 u32 val;
2687 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2688 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2689 val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2690 DRV_FLAGS_CAPABILITIES_LOADED_L2);
2691 }
2692
2693 /* Wait for all pending SP commands to complete */
2694 if (IS_PF(bp) && !bnx2x_wait_sp_comp(bp, ~0x0UL)) {
2695 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
2696 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
2697 return -EBUSY;
2698 }
2699
2700 /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
2701 if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2702 bnx2x_dcbx_init(bp, false);
2703
2704 DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
2705
2706 return 0;
2707
2708 #ifndef BNX2X_STOP_ON_ERROR
2709 load_error3:
2710 if (IS_PF(bp)) {
2711 bnx2x_int_disable_sync(bp, 1);
2712
2713 /* Clean queueable objects */
2714 bnx2x_squeeze_objects(bp);
2715 }
2716
2717 /* Free SKBs, SGEs, TPA pool and driver internals */
2718 bnx2x_free_skbs(bp);
2719 for_each_rx_queue(bp, i)
2720 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2721
2722 /* Release IRQs */
2723 bnx2x_free_irq(bp);
2724 load_error2:
2725 if (IS_PF(bp) && !BP_NOMCP(bp)) {
2726 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2727 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2728 }
2729
2730 bp->port.pmf = 0;
2731 load_error1:
2732 bnx2x_napi_disable(bp);
2733
2734 /* clear pf_load status, as it was already set */
2735 if (IS_PF(bp))
2736 bnx2x_clear_pf_load(bp);
2737 load_error0:
2738 bnx2x_free_fp_mem(bp);
2739 bnx2x_free_fw_stats_mem(bp);
2740 bnx2x_free_mem(bp);
2741
2742 return rc;
2743 #endif /* ! BNX2X_STOP_ON_ERROR */
2744 }
2745
2746 static int bnx2x_drain_tx_queues(struct bnx2x *bp)
2747 {
2748 u8 rc = 0, cos, i;
2749
2750 /* Wait until tx fastpath tasks complete */
2751 for_each_tx_queue(bp, i) {
2752 struct bnx2x_fastpath *fp = &bp->fp[i];
2753
2754 for_each_cos_in_tx_queue(fp, cos)
2755 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
2756 if (rc)
2757 return rc;
2758 }
2759 return 0;
2760 }
2761
2762 /* must be called with rtnl_lock */
2763 int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
2764 {
2765 int i;
2766 bool global = false;
2767
2768 DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
2769
2770 /* mark driver is unloaded in shmem2 */
2771 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2772 u32 val;
2773 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2774 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2775 val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
2776 }
2777
2778 if (IS_PF(bp) &&
2779 (bp->state == BNX2X_STATE_CLOSED ||
2780 bp->state == BNX2X_STATE_ERROR)) {
2781 /* We can get here if the driver has been unloaded
2782 * during parity error recovery and is either waiting for a
2783 * leader to complete or for other functions to unload and
2784 * then ifdown has been issued. In this case we want to
2785 * unload and let other functions to complete a recovery
2786 * process.
2787 */
2788 bp->recovery_state = BNX2X_RECOVERY_DONE;
2789 bp->is_leader = 0;
2790 bnx2x_release_leader_lock(bp);
2791 smp_mb();
2792
2793 DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
2794 BNX2X_ERR("Can't unload in closed or error state\n");
2795 return -EINVAL;
2796 }
2797
2798 /*
2799 * It's important to set the bp->state to the value different from
2800 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
2801 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
2802 */
2803 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
2804 smp_mb();
2805
2806 if (CNIC_LOADED(bp))
2807 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
2808
2809 /* Stop Tx */
2810 bnx2x_tx_disable(bp);
2811 netdev_reset_tc(bp->dev);
2812
2813 bp->rx_mode = BNX2X_RX_MODE_NONE;
2814
2815 del_timer_sync(&bp->timer);
2816
2817 if (IS_PF(bp)) {
2818 /* Set ALWAYS_ALIVE bit in shmem */
2819 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
2820 bnx2x_drv_pulse(bp);
2821 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2822 bnx2x_save_statistics(bp);
2823 }
2824
2825 /* wait till consumers catch up with producers in all queues */
2826 bnx2x_drain_tx_queues(bp);
2827
2828 /* if VF indicate to PF this function is going down (PF will delete sp
2829 * elements and clear initializations
2830 */
2831 if (IS_VF(bp))
2832 bnx2x_vfpf_close_vf(bp);
2833 else if (unload_mode != UNLOAD_RECOVERY)
2834 /* if this is a normal/close unload need to clean up chip*/
2835 bnx2x_chip_cleanup(bp, unload_mode, keep_link);
2836 else {
2837 /* Send the UNLOAD_REQUEST to the MCP */
2838 bnx2x_send_unload_req(bp, unload_mode);
2839
2840 /*
2841 * Prevent transactions to host from the functions on the
2842 * engine that doesn't reset global blocks in case of global
2843 * attention once gloabl blocks are reset and gates are opened
2844 * (the engine which leader will perform the recovery
2845 * last).
2846 */
2847 if (!CHIP_IS_E1x(bp))
2848 bnx2x_pf_disable(bp);
2849
2850 /* Disable HW interrupts, NAPI */
2851 bnx2x_netif_stop(bp, 1);
2852 /* Delete all NAPI objects */
2853 bnx2x_del_all_napi(bp);
2854 if (CNIC_LOADED(bp))
2855 bnx2x_del_all_napi_cnic(bp);
2856 /* Release IRQs */
2857 bnx2x_free_irq(bp);
2858
2859 /* Report UNLOAD_DONE to MCP */
2860 bnx2x_send_unload_done(bp, false);
2861 }
2862
2863 /*
2864 * At this stage no more interrupts will arrive so we may safly clean
2865 * the queueable objects here in case they failed to get cleaned so far.
2866 */
2867 if (IS_PF(bp))
2868 bnx2x_squeeze_objects(bp);
2869
2870 /* There should be no more pending SP commands at this stage */
2871 bp->sp_state = 0;
2872
2873 bp->port.pmf = 0;
2874
2875 /* Free SKBs, SGEs, TPA pool and driver internals */
2876 bnx2x_free_skbs(bp);
2877 if (CNIC_LOADED(bp))
2878 bnx2x_free_skbs_cnic(bp);
2879 for_each_rx_queue(bp, i)
2880 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2881
2882 bnx2x_free_fp_mem(bp);
2883 if (CNIC_LOADED(bp))
2884 bnx2x_free_fp_mem_cnic(bp);
2885
2886 if (IS_PF(bp)) {
2887 bnx2x_free_mem(bp);
2888 if (CNIC_LOADED(bp))
2889 bnx2x_free_mem_cnic(bp);
2890 }
2891 bp->state = BNX2X_STATE_CLOSED;
2892 bp->cnic_loaded = false;
2893
2894 /* Check if there are pending parity attentions. If there are - set
2895 * RECOVERY_IN_PROGRESS.
2896 */
2897 if (IS_PF(bp) && bnx2x_chk_parity_attn(bp, &global, false)) {
2898 bnx2x_set_reset_in_progress(bp);
2899
2900 /* Set RESET_IS_GLOBAL if needed */
2901 if (global)
2902 bnx2x_set_reset_global(bp);
2903 }
2904
2905
2906 /* The last driver must disable a "close the gate" if there is no
2907 * parity attention or "process kill" pending.
2908 */
2909 if (IS_PF(bp) &&
2910 !bnx2x_clear_pf_load(bp) &&
2911 bnx2x_reset_is_done(bp, BP_PATH(bp)))
2912 bnx2x_disable_close_the_gate(bp);
2913
2914 DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
2915
2916 return 0;
2917 }
2918
2919 int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
2920 {
2921 u16 pmcsr;
2922
2923 /* If there is no power capability, silently succeed */
2924 if (!bp->pm_cap) {
2925 BNX2X_DEV_INFO("No power capability. Breaking.\n");
2926 return 0;
2927 }
2928
2929 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2930
2931 switch (state) {
2932 case PCI_D0:
2933 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2934 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2935 PCI_PM_CTRL_PME_STATUS));
2936
2937 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2938 /* delay required during transition out of D3hot */
2939 msleep(20);
2940 break;
2941
2942 case PCI_D3hot:
2943 /* If there are other clients above don't
2944 shut down the power */
2945 if (atomic_read(&bp->pdev->enable_cnt) != 1)
2946 return 0;
2947 /* Don't shut down the power for emulation and FPGA */
2948 if (CHIP_REV_IS_SLOW(bp))
2949 return 0;
2950
2951 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2952 pmcsr |= 3;
2953
2954 if (bp->wol)
2955 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2956
2957 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2958 pmcsr);
2959
2960 /* No more memory access after this point until
2961 * device is brought back to D0.
2962 */
2963 break;
2964
2965 default:
2966 dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
2967 return -EINVAL;
2968 }
2969 return 0;
2970 }
2971
2972 /*
2973 * net_device service functions
2974 */
2975 int bnx2x_poll(struct napi_struct *napi, int budget)
2976 {
2977 int work_done = 0;
2978 u8 cos;
2979 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
2980 napi);
2981 struct bnx2x *bp = fp->bp;
2982
2983 while (1) {
2984 #ifdef BNX2X_STOP_ON_ERROR
2985 if (unlikely(bp->panic)) {
2986 napi_complete(napi);
2987 return 0;
2988 }
2989 #endif
2990
2991 for_each_cos_in_tx_queue(fp, cos)
2992 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
2993 bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
2994
2995
2996 if (bnx2x_has_rx_work(fp)) {
2997 work_done += bnx2x_rx_int(fp, budget - work_done);
2998
2999 /* must not complete if we consumed full budget */
3000 if (work_done >= budget)
3001 break;
3002 }
3003
3004 /* Fall out from the NAPI loop if needed */
3005 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3006
3007 /* No need to update SB for FCoE L2 ring as long as
3008 * it's connected to the default SB and the SB
3009 * has been updated when NAPI was scheduled.
3010 */
3011 if (IS_FCOE_FP(fp)) {
3012 napi_complete(napi);
3013 break;
3014 }
3015 bnx2x_update_fpsb_idx(fp);
3016 /* bnx2x_has_rx_work() reads the status block,
3017 * thus we need to ensure that status block indices
3018 * have been actually read (bnx2x_update_fpsb_idx)
3019 * prior to this check (bnx2x_has_rx_work) so that
3020 * we won't write the "newer" value of the status block
3021 * to IGU (if there was a DMA right after
3022 * bnx2x_has_rx_work and if there is no rmb, the memory
3023 * reading (bnx2x_update_fpsb_idx) may be postponed
3024 * to right before bnx2x_ack_sb). In this case there
3025 * will never be another interrupt until there is
3026 * another update of the status block, while there
3027 * is still unhandled work.
3028 */
3029 rmb();
3030
3031 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3032 napi_complete(napi);
3033 /* Re-enable interrupts */
3034 DP(NETIF_MSG_RX_STATUS,
3035 "Update index to %d\n", fp->fp_hc_idx);
3036 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
3037 le16_to_cpu(fp->fp_hc_idx),
3038 IGU_INT_ENABLE, 1);
3039 break;
3040 }
3041 }
3042 }
3043
3044 return work_done;
3045 }
3046
3047 /* we split the first BD into headers and data BDs
3048 * to ease the pain of our fellow microcode engineers
3049 * we use one mapping for both BDs
3050 */
3051 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
3052 struct bnx2x_fp_txdata *txdata,
3053 struct sw_tx_bd *tx_buf,
3054 struct eth_tx_start_bd **tx_bd, u16 hlen,
3055 u16 bd_prod, int nbd)
3056 {
3057 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
3058 struct eth_tx_bd *d_tx_bd;
3059 dma_addr_t mapping;
3060 int old_len = le16_to_cpu(h_tx_bd->nbytes);
3061
3062 /* first fix first BD */
3063 h_tx_bd->nbd = cpu_to_le16(nbd);
3064 h_tx_bd->nbytes = cpu_to_le16(hlen);
3065
3066 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x) nbd %d\n",
3067 h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo, h_tx_bd->nbd);
3068
3069 /* now get a new data BD
3070 * (after the pbd) and fill it */
3071 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3072 d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3073
3074 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
3075 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
3076
3077 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3078 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3079 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
3080
3081 /* this marks the BD as one that has no individual mapping */
3082 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
3083
3084 DP(NETIF_MSG_TX_QUEUED,
3085 "TSO split data size is %d (%x:%x)\n",
3086 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
3087
3088 /* update tx_bd */
3089 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
3090
3091 return bd_prod;
3092 }
3093
3094 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
3095 {
3096 if (fix > 0)
3097 csum = (u16) ~csum_fold(csum_sub(csum,
3098 csum_partial(t_header - fix, fix, 0)));
3099
3100 else if (fix < 0)
3101 csum = (u16) ~csum_fold(csum_add(csum,
3102 csum_partial(t_header, -fix, 0)));
3103
3104 return swab16(csum);
3105 }
3106
3107 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
3108 {
3109 u32 rc;
3110
3111 if (skb->ip_summed != CHECKSUM_PARTIAL)
3112 rc = XMIT_PLAIN;
3113
3114 else {
3115 if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) {
3116 rc = XMIT_CSUM_V6;
3117 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3118 rc |= XMIT_CSUM_TCP;
3119
3120 } else {
3121 rc = XMIT_CSUM_V4;
3122 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
3123 rc |= XMIT_CSUM_TCP;
3124 }
3125 }
3126
3127 if (skb_is_gso_v6(skb))
3128 rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6;
3129 else if (skb_is_gso(skb))
3130 rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP;
3131
3132 return rc;
3133 }
3134
3135 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
3136 /* check if packet requires linearization (packet is too fragmented)
3137 no need to check fragmentation if page size > 8K (there will be no
3138 violation to FW restrictions) */
3139 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
3140 u32 xmit_type)
3141 {
3142 int to_copy = 0;
3143 int hlen = 0;
3144 int first_bd_sz = 0;
3145
3146 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
3147 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
3148
3149 if (xmit_type & XMIT_GSO) {
3150 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
3151 /* Check if LSO packet needs to be copied:
3152 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
3153 int wnd_size = MAX_FETCH_BD - 3;
3154 /* Number of windows to check */
3155 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
3156 int wnd_idx = 0;
3157 int frag_idx = 0;
3158 u32 wnd_sum = 0;
3159
3160 /* Headers length */
3161 hlen = (int)(skb_transport_header(skb) - skb->data) +
3162 tcp_hdrlen(skb);
3163
3164 /* Amount of data (w/o headers) on linear part of SKB*/
3165 first_bd_sz = skb_headlen(skb) - hlen;
3166
3167 wnd_sum = first_bd_sz;
3168
3169 /* Calculate the first sum - it's special */
3170 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
3171 wnd_sum +=
3172 skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
3173
3174 /* If there was data on linear skb data - check it */
3175 if (first_bd_sz > 0) {
3176 if (unlikely(wnd_sum < lso_mss)) {
3177 to_copy = 1;
3178 goto exit_lbl;
3179 }
3180
3181 wnd_sum -= first_bd_sz;
3182 }
3183
3184 /* Others are easier: run through the frag list and
3185 check all windows */
3186 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
3187 wnd_sum +=
3188 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
3189
3190 if (unlikely(wnd_sum < lso_mss)) {
3191 to_copy = 1;
3192 break;
3193 }
3194 wnd_sum -=
3195 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
3196 }
3197 } else {
3198 /* in non-LSO too fragmented packet should always
3199 be linearized */
3200 to_copy = 1;
3201 }
3202 }
3203
3204 exit_lbl:
3205 if (unlikely(to_copy))
3206 DP(NETIF_MSG_TX_QUEUED,
3207 "Linearization IS REQUIRED for %s packet. num_frags %d hlen %d first_bd_sz %d\n",
3208 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
3209 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
3210
3211 return to_copy;
3212 }
3213 #endif
3214
3215 static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
3216 u32 xmit_type)
3217 {
3218 *parsing_data |= (skb_shinfo(skb)->gso_size <<
3219 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
3220 ETH_TX_PARSE_BD_E2_LSO_MSS;
3221 if ((xmit_type & XMIT_GSO_V6) &&
3222 (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
3223 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
3224 }
3225
3226 /**
3227 * bnx2x_set_pbd_gso - update PBD in GSO case.
3228 *
3229 * @skb: packet skb
3230 * @pbd: parse BD
3231 * @xmit_type: xmit flags
3232 */
3233 static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
3234 struct eth_tx_parse_bd_e1x *pbd,
3235 u32 xmit_type)
3236 {
3237 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
3238 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
3239 pbd->tcp_flags = pbd_tcp_flags(skb);
3240
3241 if (xmit_type & XMIT_GSO_V4) {
3242 pbd->ip_id = swab16(ip_hdr(skb)->id);
3243 pbd->tcp_pseudo_csum =
3244 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
3245 ip_hdr(skb)->daddr,
3246 0, IPPROTO_TCP, 0));
3247
3248 } else
3249 pbd->tcp_pseudo_csum =
3250 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3251 &ipv6_hdr(skb)->daddr,
3252 0, IPPROTO_TCP, 0));
3253
3254 pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
3255 }
3256
3257 /**
3258 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
3259 *
3260 * @bp: driver handle
3261 * @skb: packet skb
3262 * @parsing_data: data to be updated
3263 * @xmit_type: xmit flags
3264 *
3265 * 57712 related
3266 */
3267 static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
3268 u32 *parsing_data, u32 xmit_type)
3269 {
3270 *parsing_data |=
3271 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
3272 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) &
3273 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W;
3274
3275 if (xmit_type & XMIT_CSUM_TCP) {
3276 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
3277 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3278 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3279
3280 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
3281 } else
3282 /* We support checksum offload for TCP and UDP only.
3283 * No need to pass the UDP header length - it's a constant.
3284 */
3285 return skb_transport_header(skb) +
3286 sizeof(struct udphdr) - skb->data;
3287 }
3288
3289 static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3290 struct eth_tx_start_bd *tx_start_bd, u32 xmit_type)
3291 {
3292 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
3293
3294 if (xmit_type & XMIT_CSUM_V4)
3295 tx_start_bd->bd_flags.as_bitfield |=
3296 ETH_TX_BD_FLAGS_IP_CSUM;
3297 else
3298 tx_start_bd->bd_flags.as_bitfield |=
3299 ETH_TX_BD_FLAGS_IPV6;
3300
3301 if (!(xmit_type & XMIT_CSUM_TCP))
3302 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
3303 }
3304
3305 /**
3306 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
3307 *
3308 * @bp: driver handle
3309 * @skb: packet skb
3310 * @pbd: parse BD to be updated
3311 * @xmit_type: xmit flags
3312 */
3313 static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3314 struct eth_tx_parse_bd_e1x *pbd,
3315 u32 xmit_type)
3316 {
3317 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
3318
3319 /* for now NS flag is not used in Linux */
3320 pbd->global_data =
3321 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3322 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
3323
3324 pbd->ip_hlen_w = (skb_transport_header(skb) -
3325 skb_network_header(skb)) >> 1;
3326
3327 hlen += pbd->ip_hlen_w;
3328
3329 /* We support checksum offload for TCP and UDP only */
3330 if (xmit_type & XMIT_CSUM_TCP)
3331 hlen += tcp_hdrlen(skb) / 2;
3332 else
3333 hlen += sizeof(struct udphdr) / 2;
3334
3335 pbd->total_hlen_w = cpu_to_le16(hlen);
3336 hlen = hlen*2;
3337
3338 if (xmit_type & XMIT_CSUM_TCP) {
3339 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
3340
3341 } else {
3342 s8 fix = SKB_CS_OFF(skb); /* signed! */
3343
3344 DP(NETIF_MSG_TX_QUEUED,
3345 "hlen %d fix %d csum before fix %x\n",
3346 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
3347
3348 /* HW bug: fixup the CSUM */
3349 pbd->tcp_pseudo_csum =
3350 bnx2x_csum_fix(skb_transport_header(skb),
3351 SKB_CS(skb), fix);
3352
3353 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
3354 pbd->tcp_pseudo_csum);
3355 }
3356
3357 return hlen;
3358 }
3359
3360 /* called with netif_tx_lock
3361 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
3362 * netif_wake_queue()
3363 */
3364 netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3365 {
3366 struct bnx2x *bp = netdev_priv(dev);
3367
3368 struct netdev_queue *txq;
3369 struct bnx2x_fp_txdata *txdata;
3370 struct sw_tx_bd *tx_buf;
3371 struct eth_tx_start_bd *tx_start_bd, *first_bd;
3372 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
3373 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
3374 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
3375 u32 pbd_e2_parsing_data = 0;
3376 u16 pkt_prod, bd_prod;
3377 int nbd, txq_index;
3378 dma_addr_t mapping;
3379 u32 xmit_type = bnx2x_xmit_type(bp, skb);
3380 int i;
3381 u8 hlen = 0;
3382 __le16 pkt_size = 0;
3383 struct ethhdr *eth;
3384 u8 mac_type = UNICAST_ADDRESS;
3385
3386 #ifdef BNX2X_STOP_ON_ERROR
3387 if (unlikely(bp->panic))
3388 return NETDEV_TX_BUSY;
3389 #endif
3390
3391 txq_index = skb_get_queue_mapping(skb);
3392 txq = netdev_get_tx_queue(dev, txq_index);
3393
3394 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0));
3395
3396 txdata = &bp->bnx2x_txq[txq_index];
3397
3398 /* enable this debug print to view the transmission queue being used
3399 DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
3400 txq_index, fp_index, txdata_index); */
3401
3402 /* enable this debug print to view the tranmission details
3403 DP(NETIF_MSG_TX_QUEUED,
3404 "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
3405 txdata->cid, fp_index, txdata_index, txdata, fp); */
3406
3407 if (unlikely(bnx2x_tx_avail(bp, txdata) <
3408 skb_shinfo(skb)->nr_frags +
3409 BDS_PER_TX_PKT +
3410 NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
3411 /* Handle special storage cases separately */
3412 if (txdata->tx_ring_size == 0) {
3413 struct bnx2x_eth_q_stats *q_stats =
3414 bnx2x_fp_qstats(bp, txdata->parent_fp);
3415 q_stats->driver_filtered_tx_pkt++;
3416 dev_kfree_skb(skb);
3417 return NETDEV_TX_OK;
3418 }
3419 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3420 netif_tx_stop_queue(txq);
3421 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
3422
3423 return NETDEV_TX_BUSY;
3424 }
3425
3426 DP(NETIF_MSG_TX_QUEUED,
3427 "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x\n",
3428 txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
3429 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
3430
3431 eth = (struct ethhdr *)skb->data;
3432
3433 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
3434 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
3435 if (is_broadcast_ether_addr(eth->h_dest))
3436 mac_type = BROADCAST_ADDRESS;
3437 else
3438 mac_type = MULTICAST_ADDRESS;
3439 }
3440
3441 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
3442 /* First, check if we need to linearize the skb (due to FW
3443 restrictions). No need to check fragmentation if page size > 8K
3444 (there will be no violation to FW restrictions) */
3445 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
3446 /* Statistics of linearization */
3447 bp->lin_cnt++;
3448 if (skb_linearize(skb) != 0) {
3449 DP(NETIF_MSG_TX_QUEUED,
3450 "SKB linearization failed - silently dropping this SKB\n");
3451 dev_kfree_skb_any(skb);
3452 return NETDEV_TX_OK;
3453 }
3454 }
3455 #endif
3456 /* Map skb linear data for DMA */
3457 mapping = dma_map_single(&bp->pdev->dev, skb->data,
3458 skb_headlen(skb), DMA_TO_DEVICE);
3459 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
3460 DP(NETIF_MSG_TX_QUEUED,
3461 "SKB mapping failed - silently dropping this SKB\n");
3462 dev_kfree_skb_any(skb);
3463 return NETDEV_TX_OK;
3464 }
3465 /*
3466 Please read carefully. First we use one BD which we mark as start,
3467 then we have a parsing info BD (used for TSO or xsum),
3468 and only then we have the rest of the TSO BDs.
3469 (don't forget to mark the last one as last,
3470 and to unmap only AFTER you write to the BD ...)
3471 And above all, all pdb sizes are in words - NOT DWORDS!
3472 */
3473
3474 /* get current pkt produced now - advance it just before sending packet
3475 * since mapping of pages may fail and cause packet to be dropped
3476 */
3477 pkt_prod = txdata->tx_pkt_prod;
3478 bd_prod = TX_BD(txdata->tx_bd_prod);
3479
3480 /* get a tx_buf and first BD
3481 * tx_start_bd may be changed during SPLIT,
3482 * but first_bd will always stay first
3483 */
3484 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
3485 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
3486 first_bd = tx_start_bd;
3487
3488 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
3489 SET_FLAG(tx_start_bd->general_data,
3490 ETH_TX_START_BD_PARSE_NBDS,
3491 0);
3492
3493 /* header nbd */
3494 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
3495
3496 /* remember the first BD of the packet */
3497 tx_buf->first_bd = txdata->tx_bd_prod;
3498 tx_buf->skb = skb;
3499 tx_buf->flags = 0;
3500
3501 DP(NETIF_MSG_TX_QUEUED,
3502 "sending pkt %u @%p next_idx %u bd %u @%p\n",
3503 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
3504
3505 if (vlan_tx_tag_present(skb)) {
3506 tx_start_bd->vlan_or_ethertype =
3507 cpu_to_le16(vlan_tx_tag_get(skb));
3508 tx_start_bd->bd_flags.as_bitfield |=
3509 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
3510 } else {
3511 /* when transmitting in a vf, start bd must hold the ethertype
3512 * for fw to enforce it
3513 */
3514 if (IS_VF(bp)) {
3515 tx_start_bd->vlan_or_ethertype =
3516 cpu_to_le16(ntohs(eth->h_proto));
3517 } else {
3518 /* used by FW for packet accounting */
3519 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
3520 }
3521 }
3522
3523 /* turn on parsing and get a BD */
3524 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3525
3526 if (xmit_type & XMIT_CSUM)
3527 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
3528
3529 if (!CHIP_IS_E1x(bp)) {
3530 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
3531 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
3532 /* Set PBD in checksum offload case */
3533 if (xmit_type & XMIT_CSUM)
3534 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
3535 &pbd_e2_parsing_data,
3536 xmit_type);
3537
3538 if (IS_MF_SI(bp) || IS_VF(bp)) {
3539 /* fill in the MAC addresses in the PBD - for local
3540 * switching
3541 */
3542 bnx2x_set_fw_mac_addr(&pbd_e2->src_mac_addr_hi,
3543 &pbd_e2->src_mac_addr_mid,
3544 &pbd_e2->src_mac_addr_lo,
3545 eth->h_source);
3546 bnx2x_set_fw_mac_addr(&pbd_e2->dst_mac_addr_hi,
3547 &pbd_e2->dst_mac_addr_mid,
3548 &pbd_e2->dst_mac_addr_lo,
3549 eth->h_dest);
3550 }
3551
3552 SET_FLAG(pbd_e2_parsing_data,
3553 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type);
3554 } else {
3555 u16 global_data = 0;
3556 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
3557 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
3558 /* Set PBD in checksum offload case */
3559 if (xmit_type & XMIT_CSUM)
3560 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
3561
3562 SET_FLAG(global_data,
3563 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
3564 pbd_e1x->global_data |= cpu_to_le16(global_data);
3565 }
3566
3567 /* Setup the data pointer of the first BD of the packet */
3568 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3569 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3570 nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
3571 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
3572 pkt_size = tx_start_bd->nbytes;
3573
3574 DP(NETIF_MSG_TX_QUEUED,
3575 "first bd @%p addr (%x:%x) nbd %d nbytes %d flags %x vlan %x\n",
3576 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
3577 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
3578 tx_start_bd->bd_flags.as_bitfield,
3579 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
3580
3581 if (xmit_type & XMIT_GSO) {
3582
3583 DP(NETIF_MSG_TX_QUEUED,
3584 "TSO packet len %d hlen %d total len %d tso size %d\n",
3585 skb->len, hlen, skb_headlen(skb),
3586 skb_shinfo(skb)->gso_size);
3587
3588 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
3589
3590 if (unlikely(skb_headlen(skb) > hlen))
3591 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
3592 &tx_start_bd, hlen,
3593 bd_prod, ++nbd);
3594 if (!CHIP_IS_E1x(bp))
3595 bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
3596 xmit_type);
3597 else
3598 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
3599 }
3600
3601 /* Set the PBD's parsing_data field if not zero
3602 * (for the chips newer than 57711).
3603 */
3604 if (pbd_e2_parsing_data)
3605 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
3606
3607 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
3608
3609 /* Handle fragmented skb */
3610 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3611 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3612
3613 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
3614 skb_frag_size(frag), DMA_TO_DEVICE);
3615 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
3616 unsigned int pkts_compl = 0, bytes_compl = 0;
3617
3618 DP(NETIF_MSG_TX_QUEUED,
3619 "Unable to map page - dropping packet...\n");
3620
3621 /* we need unmap all buffers already mapped
3622 * for this SKB;
3623 * first_bd->nbd need to be properly updated
3624 * before call to bnx2x_free_tx_pkt
3625 */
3626 first_bd->nbd = cpu_to_le16(nbd);
3627 bnx2x_free_tx_pkt(bp, txdata,
3628 TX_BD(txdata->tx_pkt_prod),
3629 &pkts_compl, &bytes_compl);
3630 return NETDEV_TX_OK;
3631 }
3632
3633 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3634 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3635 if (total_pkt_bd == NULL)
3636 total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3637
3638 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3639 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3640 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
3641 le16_add_cpu(&pkt_size, skb_frag_size(frag));
3642 nbd++;
3643
3644 DP(NETIF_MSG_TX_QUEUED,
3645 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
3646 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
3647 le16_to_cpu(tx_data_bd->nbytes));
3648 }
3649
3650 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
3651
3652 /* update with actual num BDs */
3653 first_bd->nbd = cpu_to_le16(nbd);
3654
3655 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3656
3657 /* now send a tx doorbell, counting the next BD
3658 * if the packet contains or ends with it
3659 */
3660 if (TX_BD_POFF(bd_prod) < nbd)
3661 nbd++;
3662
3663 /* total_pkt_bytes should be set on the first data BD if
3664 * it's not an LSO packet and there is more than one
3665 * data BD. In this case pkt_size is limited by an MTU value.
3666 * However we prefer to set it for an LSO packet (while we don't
3667 * have to) in order to save some CPU cycles in a none-LSO
3668 * case, when we much more care about them.
3669 */
3670 if (total_pkt_bd != NULL)
3671 total_pkt_bd->total_pkt_bytes = pkt_size;
3672
3673 if (pbd_e1x)
3674 DP(NETIF_MSG_TX_QUEUED,
3675 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u tcp_flags %x xsum %x seq %u hlen %u\n",
3676 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
3677 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
3678 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
3679 le16_to_cpu(pbd_e1x->total_hlen_w));
3680 if (pbd_e2)
3681 DP(NETIF_MSG_TX_QUEUED,
3682 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
3683 pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid,
3684 pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi,
3685 pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo,
3686 pbd_e2->parsing_data);
3687 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
3688
3689 netdev_tx_sent_queue(txq, skb->len);
3690
3691 skb_tx_timestamp(skb);
3692
3693 txdata->tx_pkt_prod++;
3694 /*
3695 * Make sure that the BD data is updated before updating the producer
3696 * since FW might read the BD right after the producer is updated.
3697 * This is only applicable for weak-ordered memory model archs such
3698 * as IA-64. The following barrier is also mandatory since FW will
3699 * assumes packets must have BDs.
3700 */
3701 wmb();
3702
3703 txdata->tx_db.data.prod += nbd;
3704 barrier();
3705
3706 DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
3707
3708 mmiowb();
3709
3710 txdata->tx_bd_prod += nbd;
3711
3712 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) {
3713 netif_tx_stop_queue(txq);
3714
3715 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
3716 * ordering of set_bit() in netif_tx_stop_queue() and read of
3717 * fp->bd_tx_cons */
3718 smp_mb();
3719
3720 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3721 if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)
3722 netif_tx_wake_queue(txq);
3723 }
3724 txdata->tx_pkt++;
3725
3726 return NETDEV_TX_OK;
3727 }
3728
3729 /**
3730 * bnx2x_setup_tc - routine to configure net_device for multi tc
3731 *
3732 * @netdev: net device to configure
3733 * @tc: number of traffic classes to enable
3734 *
3735 * callback connected to the ndo_setup_tc function pointer
3736 */
3737 int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
3738 {
3739 int cos, prio, count, offset;
3740 struct bnx2x *bp = netdev_priv(dev);
3741
3742 /* setup tc must be called under rtnl lock */
3743 ASSERT_RTNL();
3744
3745 /* no traffic classes requested. aborting */
3746 if (!num_tc) {
3747 netdev_reset_tc(dev);
3748 return 0;
3749 }
3750
3751 /* requested to support too many traffic classes */
3752 if (num_tc > bp->max_cos) {
3753 BNX2X_ERR("support for too many traffic classes requested: %d. max supported is %d\n",
3754 num_tc, bp->max_cos);
3755 return -EINVAL;
3756 }
3757
3758 /* declare amount of supported traffic classes */
3759 if (netdev_set_num_tc(dev, num_tc)) {
3760 BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
3761 return -EINVAL;
3762 }
3763
3764 /* configure priority to traffic class mapping */
3765 for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
3766 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
3767 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
3768 "mapping priority %d to tc %d\n",
3769 prio, bp->prio_to_cos[prio]);
3770 }
3771
3772
3773 /* Use this configuration to diffrentiate tc0 from other COSes
3774 This can be used for ets or pfc, and save the effort of setting
3775 up a multio class queue disc or negotiating DCBX with a switch
3776 netdev_set_prio_tc_map(dev, 0, 0);
3777 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
3778 for (prio = 1; prio < 16; prio++) {
3779 netdev_set_prio_tc_map(dev, prio, 1);
3780 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
3781 } */
3782
3783 /* configure traffic class to transmission queue mapping */
3784 for (cos = 0; cos < bp->max_cos; cos++) {
3785 count = BNX2X_NUM_ETH_QUEUES(bp);
3786 offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
3787 netdev_set_tc_queue(dev, cos, count, offset);
3788 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
3789 "mapping tc %d to offset %d count %d\n",
3790 cos, offset, count);
3791 }
3792
3793 return 0;
3794 }
3795
3796 /* New mac for VF. Consider these cases:
3797 * 1. VF hasn't been acquired yet - save the mac in local bulletin board and
3798 * supply at acquire.
3799 * 2. VF has already been acquired but has not yet initialized - store in local
3800 * bulletin board. mac will be posted on VF bulletin board after VF init. VF
3801 * will configure this mac when it is ready.
3802 * 3. VF has already initialized but has not yet setup a queue - post the new
3803 * mac on VF's bulletin board right now. VF will configure this mac when it
3804 * is ready.
3805 * 4. VF has already set a queue - delete any macs already configured for this
3806 * queue and manually config the new mac.
3807 * In any event, once this function has been called refuse any attempts by the
3808 * VF to configure any mac for itself except for this mac. In case of a race
3809 * where the VF fails to see the new post on its bulletin board before sending a
3810 * mac configuration request, the PF will simply fail the request and VF can try
3811 * again after consulting its bulletin board
3812 */
3813 int bnx2x_set_vf_mac(struct net_device *dev, int queue, u8 *mac)
3814 {
3815 struct bnx2x *bp = netdev_priv(dev);
3816 int rc, q_logical_state, vfidx = queue;
3817 struct bnx2x_virtf *vf = BP_VF(bp, vfidx);
3818 struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vfidx);
3819
3820 /* if SRIOV is disabled there is nothing to do (and somewhere, someone
3821 * has erred).
3822 */
3823 if (!IS_SRIOV(bp)) {
3824 BNX2X_ERR("bnx2x_set_vf_mac called though sriov is disabled\n");
3825 return -EINVAL;
3826 }
3827
3828 if (!is_valid_ether_addr(mac)) {
3829 BNX2X_ERR("mac address invalid\n");
3830 return -EINVAL;
3831 }
3832
3833 /* update PF's copy of the VF's bulletin. will no longer accept mac
3834 * configuration requests from vf unless match this mac
3835 */
3836 bulletin->valid_bitmap |= 1 << MAC_ADDR_VALID;
3837 memcpy(bulletin->mac, mac, ETH_ALEN);
3838
3839 /* Post update on VF's bulletin board */
3840 rc = bnx2x_post_vf_bulletin(bp, vfidx);
3841 if (rc) {
3842 BNX2X_ERR("failed to update VF[%d] bulletin\n", vfidx);
3843 return rc;
3844 }
3845
3846 /* is vf initialized and queue set up? */
3847 q_logical_state =
3848 bnx2x_get_q_logical_state(bp, &bnx2x_vfq(vf, 0, sp_obj));
3849 if (vf->state == VF_ENABLED &&
3850 q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) {
3851 /* configure the mac in device on this vf's queue */
3852 unsigned long flags = 0;
3853 struct bnx2x_vlan_mac_obj *mac_obj = &bnx2x_vfq(vf, 0, mac_obj);
3854
3855 /* must lock vfpf channel to protect against vf flows */
3856 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
3857
3858 /* remove existing eth macs */
3859 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_ETH_MAC, true);
3860 if (rc) {
3861 BNX2X_ERR("failed to delete eth macs\n");
3862 return -EINVAL;
3863 }
3864
3865 /* remove existing uc list macs */
3866 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, true);
3867 if (rc) {
3868 BNX2X_ERR("failed to delete uc_list macs\n");
3869 return -EINVAL;
3870 }
3871
3872 /* configure the new mac to device */
3873 __set_bit(RAMROD_COMP_WAIT, &flags);
3874 bnx2x_set_mac_one(bp, (u8 *)&bulletin->mac, mac_obj, true,
3875 BNX2X_ETH_MAC, &flags);
3876
3877 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
3878 }
3879
3880 return rc;
3881 }
3882
3883 /* called with rtnl_lock */
3884 int bnx2x_change_mac_addr(struct net_device *dev, void *p)
3885 {
3886 struct sockaddr *addr = p;
3887 struct bnx2x *bp = netdev_priv(dev);
3888 int rc = 0;
3889
3890 if (!bnx2x_is_valid_ether_addr(bp, addr->sa_data)) {
3891 BNX2X_ERR("Requested MAC address is not valid\n");
3892 return -EINVAL;
3893 }
3894
3895 if ((IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)) &&
3896 !is_zero_ether_addr(addr->sa_data)) {
3897 BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n");
3898 return -EINVAL;
3899 }
3900
3901 if (netif_running(dev)) {
3902 rc = bnx2x_set_eth_mac(bp, false);
3903 if (rc)
3904 return rc;
3905 }
3906
3907 dev->addr_assign_type &= ~NET_ADDR_RANDOM;
3908 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
3909
3910 if (netif_running(dev))
3911 rc = bnx2x_set_eth_mac(bp, true);
3912
3913 return rc;
3914 }
3915
3916 static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
3917 {
3918 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
3919 struct bnx2x_fastpath *fp = &bp->fp[fp_index];
3920 u8 cos;
3921
3922 /* Common */
3923
3924 if (IS_FCOE_IDX(fp_index)) {
3925 memset(sb, 0, sizeof(union host_hc_status_block));
3926 fp->status_blk_mapping = 0;
3927 } else {
3928 /* status blocks */
3929 if (!CHIP_IS_E1x(bp))
3930 BNX2X_PCI_FREE(sb->e2_sb,
3931 bnx2x_fp(bp, fp_index,
3932 status_blk_mapping),
3933 sizeof(struct host_hc_status_block_e2));
3934 else
3935 BNX2X_PCI_FREE(sb->e1x_sb,
3936 bnx2x_fp(bp, fp_index,
3937 status_blk_mapping),
3938 sizeof(struct host_hc_status_block_e1x));
3939 }
3940
3941 /* Rx */
3942 if (!skip_rx_queue(bp, fp_index)) {
3943 bnx2x_free_rx_bds(fp);
3944
3945 /* fastpath rx rings: rx_buf rx_desc rx_comp */
3946 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
3947 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
3948 bnx2x_fp(bp, fp_index, rx_desc_mapping),
3949 sizeof(struct eth_rx_bd) * NUM_RX_BD);
3950
3951 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
3952 bnx2x_fp(bp, fp_index, rx_comp_mapping),
3953 sizeof(struct eth_fast_path_rx_cqe) *
3954 NUM_RCQ_BD);
3955
3956 /* SGE ring */
3957 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
3958 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
3959 bnx2x_fp(bp, fp_index, rx_sge_mapping),
3960 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
3961 }
3962
3963 /* Tx */
3964 if (!skip_tx_queue(bp, fp_index)) {
3965 /* fastpath tx rings: tx_buf tx_desc */
3966 for_each_cos_in_tx_queue(fp, cos) {
3967 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
3968
3969 DP(NETIF_MSG_IFDOWN,
3970 "freeing tx memory of fp %d cos %d cid %d\n",
3971 fp_index, cos, txdata->cid);
3972
3973 BNX2X_FREE(txdata->tx_buf_ring);
3974 BNX2X_PCI_FREE(txdata->tx_desc_ring,
3975 txdata->tx_desc_mapping,
3976 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
3977 }
3978 }
3979 /* end of fastpath */
3980 }
3981
3982 void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
3983 {
3984 int i;
3985 for_each_cnic_queue(bp, i)
3986 bnx2x_free_fp_mem_at(bp, i);
3987 }
3988
3989 void bnx2x_free_fp_mem(struct bnx2x *bp)
3990 {
3991 int i;
3992 for_each_eth_queue(bp, i)
3993 bnx2x_free_fp_mem_at(bp, i);
3994 }
3995
3996 static void set_sb_shortcuts(struct bnx2x *bp, int index)
3997 {
3998 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
3999 if (!CHIP_IS_E1x(bp)) {
4000 bnx2x_fp(bp, index, sb_index_values) =
4001 (__le16 *)status_blk.e2_sb->sb.index_values;
4002 bnx2x_fp(bp, index, sb_running_index) =
4003 (__le16 *)status_blk.e2_sb->sb.running_index;
4004 } else {
4005 bnx2x_fp(bp, index, sb_index_values) =
4006 (__le16 *)status_blk.e1x_sb->sb.index_values;
4007 bnx2x_fp(bp, index, sb_running_index) =
4008 (__le16 *)status_blk.e1x_sb->sb.running_index;
4009 }
4010 }
4011
4012 /* Returns the number of actually allocated BDs */
4013 static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
4014 int rx_ring_size)
4015 {
4016 struct bnx2x *bp = fp->bp;
4017 u16 ring_prod, cqe_ring_prod;
4018 int i, failure_cnt = 0;
4019
4020 fp->rx_comp_cons = 0;
4021 cqe_ring_prod = ring_prod = 0;
4022
4023 /* This routine is called only during fo init so
4024 * fp->eth_q_stats.rx_skb_alloc_failed = 0
4025 */
4026 for (i = 0; i < rx_ring_size; i++) {
4027 if (bnx2x_alloc_rx_data(bp, fp, ring_prod) < 0) {
4028 failure_cnt++;
4029 continue;
4030 }
4031 ring_prod = NEXT_RX_IDX(ring_prod);
4032 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4033 WARN_ON(ring_prod <= (i - failure_cnt));
4034 }
4035
4036 if (failure_cnt)
4037 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
4038 i - failure_cnt, fp->index);
4039
4040 fp->rx_bd_prod = ring_prod;
4041 /* Limit the CQE producer by the CQE ring size */
4042 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
4043 cqe_ring_prod);
4044 fp->rx_pkt = fp->rx_calls = 0;
4045
4046 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
4047
4048 return i - failure_cnt;
4049 }
4050
4051 static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
4052 {
4053 int i;
4054
4055 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4056 struct eth_rx_cqe_next_page *nextpg;
4057
4058 nextpg = (struct eth_rx_cqe_next_page *)
4059 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4060 nextpg->addr_hi =
4061 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4062 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4063 nextpg->addr_lo =
4064 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4065 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4066 }
4067 }
4068
4069 static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
4070 {
4071 union host_hc_status_block *sb;
4072 struct bnx2x_fastpath *fp = &bp->fp[index];
4073 int ring_size = 0;
4074 u8 cos;
4075 int rx_ring_size = 0;
4076
4077 if (!bp->rx_ring_size &&
4078 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
4079 rx_ring_size = MIN_RX_SIZE_NONTPA;
4080 bp->rx_ring_size = rx_ring_size;
4081 } else if (!bp->rx_ring_size) {
4082 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
4083
4084 if (CHIP_IS_E3(bp)) {
4085 u32 cfg = SHMEM_RD(bp,
4086 dev_info.port_hw_config[BP_PORT(bp)].
4087 default_cfg);
4088
4089 /* Decrease ring size for 1G functions */
4090 if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
4091 PORT_HW_CFG_NET_SERDES_IF_SGMII)
4092 rx_ring_size /= 10;
4093 }
4094
4095 /* allocate at least number of buffers required by FW */
4096 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
4097 MIN_RX_SIZE_TPA, rx_ring_size);
4098
4099 bp->rx_ring_size = rx_ring_size;
4100 } else /* if rx_ring_size specified - use it */
4101 rx_ring_size = bp->rx_ring_size;
4102
4103 /* Common */
4104 sb = &bnx2x_fp(bp, index, status_blk);
4105
4106 if (!IS_FCOE_IDX(index)) {
4107 /* status blocks */
4108 if (!CHIP_IS_E1x(bp))
4109 BNX2X_PCI_ALLOC(sb->e2_sb,
4110 &bnx2x_fp(bp, index, status_blk_mapping),
4111 sizeof(struct host_hc_status_block_e2));
4112 else
4113 BNX2X_PCI_ALLOC(sb->e1x_sb,
4114 &bnx2x_fp(bp, index, status_blk_mapping),
4115 sizeof(struct host_hc_status_block_e1x));
4116 }
4117
4118 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
4119 * set shortcuts for it.
4120 */
4121 if (!IS_FCOE_IDX(index))
4122 set_sb_shortcuts(bp, index);
4123
4124 /* Tx */
4125 if (!skip_tx_queue(bp, index)) {
4126 /* fastpath tx rings: tx_buf tx_desc */
4127 for_each_cos_in_tx_queue(fp, cos) {
4128 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
4129
4130 DP(NETIF_MSG_IFUP,
4131 "allocating tx memory of fp %d cos %d\n",
4132 index, cos);
4133
4134 BNX2X_ALLOC(txdata->tx_buf_ring,
4135 sizeof(struct sw_tx_bd) * NUM_TX_BD);
4136 BNX2X_PCI_ALLOC(txdata->tx_desc_ring,
4137 &txdata->tx_desc_mapping,
4138 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4139 }
4140 }
4141
4142 /* Rx */
4143 if (!skip_rx_queue(bp, index)) {
4144 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4145 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring),
4146 sizeof(struct sw_rx_bd) * NUM_RX_BD);
4147 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring),
4148 &bnx2x_fp(bp, index, rx_desc_mapping),
4149 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4150
4151 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_comp_ring),
4152 &bnx2x_fp(bp, index, rx_comp_mapping),
4153 sizeof(struct eth_fast_path_rx_cqe) *
4154 NUM_RCQ_BD);
4155
4156 /* SGE ring */
4157 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
4158 sizeof(struct sw_rx_page) * NUM_RX_SGE);
4159 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring),
4160 &bnx2x_fp(bp, index, rx_sge_mapping),
4161 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4162 /* RX BD ring */
4163 bnx2x_set_next_page_rx_bd(fp);
4164
4165 /* CQ ring */
4166 bnx2x_set_next_page_rx_cq(fp);
4167
4168 /* BDs */
4169 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
4170 if (ring_size < rx_ring_size)
4171 goto alloc_mem_err;
4172 }
4173
4174 return 0;
4175
4176 /* handles low memory cases */
4177 alloc_mem_err:
4178 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
4179 index, ring_size);
4180 /* FW will drop all packets if queue is not big enough,
4181 * In these cases we disable the queue
4182 * Min size is different for OOO, TPA and non-TPA queues
4183 */
4184 if (ring_size < (fp->disable_tpa ?
4185 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
4186 /* release memory allocated for this queue */
4187 bnx2x_free_fp_mem_at(bp, index);
4188 return -ENOMEM;
4189 }
4190 return 0;
4191 }
4192
4193 int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
4194 {
4195 if (!NO_FCOE(bp))
4196 /* FCoE */
4197 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
4198 /* we will fail load process instead of mark
4199 * NO_FCOE_FLAG
4200 */
4201 return -ENOMEM;
4202
4203 return 0;
4204 }
4205
4206 int bnx2x_alloc_fp_mem(struct bnx2x *bp)
4207 {
4208 int i;
4209
4210 /* 1. Allocate FP for leading - fatal if error
4211 * 2. Allocate RSS - fix number of queues if error
4212 */
4213
4214 /* leading */
4215 if (bnx2x_alloc_fp_mem_at(bp, 0))
4216 return -ENOMEM;
4217
4218 /* RSS */
4219 for_each_nondefault_eth_queue(bp, i)
4220 if (bnx2x_alloc_fp_mem_at(bp, i))
4221 break;
4222
4223 /* handle memory failures */
4224 if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
4225 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
4226
4227 WARN_ON(delta < 0);
4228 if (CNIC_SUPPORT(bp))
4229 /* move non eth FPs next to last eth FP
4230 * must be done in that order
4231 * FCOE_IDX < FWD_IDX < OOO_IDX
4232 */
4233
4234 /* move FCoE fp even NO_FCOE_FLAG is on */
4235 bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
4236 bp->num_ethernet_queues -= delta;
4237 bp->num_queues = bp->num_ethernet_queues +
4238 bp->num_cnic_queues;
4239 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
4240 bp->num_queues + delta, bp->num_queues);
4241 }
4242
4243 return 0;
4244 }
4245
4246 void bnx2x_free_mem_bp(struct bnx2x *bp)
4247 {
4248 kfree(bp->fp->tpa_info);
4249 kfree(bp->fp);
4250 kfree(bp->sp_objs);
4251 kfree(bp->fp_stats);
4252 kfree(bp->bnx2x_txq);
4253 kfree(bp->msix_table);
4254 kfree(bp->ilt);
4255 }
4256
4257 int bnx2x_alloc_mem_bp(struct bnx2x *bp)
4258 {
4259 struct bnx2x_fastpath *fp;
4260 struct msix_entry *tbl;
4261 struct bnx2x_ilt *ilt;
4262 int msix_table_size = 0;
4263 int fp_array_size, txq_array_size;
4264 int i;
4265
4266 /*
4267 * The biggest MSI-X table we might need is as a maximum number of fast
4268 * path IGU SBs plus default SB (for PF).
4269 */
4270 msix_table_size = bp->igu_sb_cnt;
4271 if (IS_PF(bp))
4272 msix_table_size++;
4273 BNX2X_DEV_INFO("msix_table_size %d\n", msix_table_size);
4274
4275 /* fp array: RSS plus CNIC related L2 queues */
4276 fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
4277 BNX2X_DEV_INFO("fp_array_size %d", fp_array_size);
4278
4279 fp = kcalloc(fp_array_size, sizeof(*fp), GFP_KERNEL);
4280 if (!fp)
4281 goto alloc_err;
4282 for (i = 0; i < fp_array_size; i++) {
4283 fp[i].tpa_info =
4284 kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
4285 sizeof(struct bnx2x_agg_info), GFP_KERNEL);
4286 if (!(fp[i].tpa_info))
4287 goto alloc_err;
4288 }
4289
4290 bp->fp = fp;
4291
4292 /* allocate sp objs */
4293 bp->sp_objs = kcalloc(fp_array_size, sizeof(struct bnx2x_sp_objs),
4294 GFP_KERNEL);
4295 if (!bp->sp_objs)
4296 goto alloc_err;
4297
4298 /* allocate fp_stats */
4299 bp->fp_stats = kcalloc(fp_array_size, sizeof(struct bnx2x_fp_stats),
4300 GFP_KERNEL);
4301 if (!bp->fp_stats)
4302 goto alloc_err;
4303
4304 /* Allocate memory for the transmission queues array */
4305 txq_array_size =
4306 BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp);
4307 BNX2X_DEV_INFO("txq_array_size %d", txq_array_size);
4308
4309 bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata),
4310 GFP_KERNEL);
4311 if (!bp->bnx2x_txq)
4312 goto alloc_err;
4313
4314 /* msix table */
4315 tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
4316 if (!tbl)
4317 goto alloc_err;
4318 bp->msix_table = tbl;
4319
4320 /* ilt */
4321 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
4322 if (!ilt)
4323 goto alloc_err;
4324 bp->ilt = ilt;
4325
4326 return 0;
4327 alloc_err:
4328 bnx2x_free_mem_bp(bp);
4329 return -ENOMEM;
4330
4331 }
4332
4333 int bnx2x_reload_if_running(struct net_device *dev)
4334 {
4335 struct bnx2x *bp = netdev_priv(dev);
4336
4337 if (unlikely(!netif_running(dev)))
4338 return 0;
4339
4340 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
4341 return bnx2x_nic_load(bp, LOAD_NORMAL);
4342 }
4343
4344 int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
4345 {
4346 u32 sel_phy_idx = 0;
4347 if (bp->link_params.num_phys <= 1)
4348 return INT_PHY;
4349
4350 if (bp->link_vars.link_up) {
4351 sel_phy_idx = EXT_PHY1;
4352 /* In case link is SERDES, check if the EXT_PHY2 is the one */
4353 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
4354 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
4355 sel_phy_idx = EXT_PHY2;
4356 } else {
4357
4358 switch (bnx2x_phy_selection(&bp->link_params)) {
4359 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
4360 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
4361 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
4362 sel_phy_idx = EXT_PHY1;
4363 break;
4364 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
4365 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
4366 sel_phy_idx = EXT_PHY2;
4367 break;
4368 }
4369 }
4370
4371 return sel_phy_idx;
4372
4373 }
4374 int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
4375 {
4376 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
4377 /*
4378 * The selected actived PHY is always after swapping (in case PHY
4379 * swapping is enabled). So when swapping is enabled, we need to reverse
4380 * the configuration
4381 */
4382
4383 if (bp->link_params.multi_phy_config &
4384 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
4385 if (sel_phy_idx == EXT_PHY1)
4386 sel_phy_idx = EXT_PHY2;
4387 else if (sel_phy_idx == EXT_PHY2)
4388 sel_phy_idx = EXT_PHY1;
4389 }
4390 return LINK_CONFIG_IDX(sel_phy_idx);
4391 }
4392
4393 #ifdef NETDEV_FCOE_WWNN
4394 int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
4395 {
4396 struct bnx2x *bp = netdev_priv(dev);
4397 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
4398
4399 switch (type) {
4400 case NETDEV_FCOE_WWNN:
4401 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
4402 cp->fcoe_wwn_node_name_lo);
4403 break;
4404 case NETDEV_FCOE_WWPN:
4405 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
4406 cp->fcoe_wwn_port_name_lo);
4407 break;
4408 default:
4409 BNX2X_ERR("Wrong WWN type requested - %d\n", type);
4410 return -EINVAL;
4411 }
4412
4413 return 0;
4414 }
4415 #endif
4416
4417 /* called with rtnl_lock */
4418 int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
4419 {
4420 struct bnx2x *bp = netdev_priv(dev);
4421
4422 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
4423 BNX2X_ERR("Can't perform change MTU during parity recovery\n");
4424 return -EAGAIN;
4425 }
4426
4427 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
4428 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) {
4429 BNX2X_ERR("Can't support requested MTU size\n");
4430 return -EINVAL;
4431 }
4432
4433 /* This does not race with packet allocation
4434 * because the actual alloc size is
4435 * only updated as part of load
4436 */
4437 dev->mtu = new_mtu;
4438
4439 return bnx2x_reload_if_running(dev);
4440 }
4441
4442 netdev_features_t bnx2x_fix_features(struct net_device *dev,
4443 netdev_features_t features)
4444 {
4445 struct bnx2x *bp = netdev_priv(dev);
4446
4447 /* TPA requires Rx CSUM offloading */
4448 if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa) {
4449 features &= ~NETIF_F_LRO;
4450 features &= ~NETIF_F_GRO;
4451 }
4452
4453 return features;
4454 }
4455
4456 int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
4457 {
4458 struct bnx2x *bp = netdev_priv(dev);
4459 u32 flags = bp->flags;
4460 bool bnx2x_reload = false;
4461
4462 if (features & NETIF_F_LRO)
4463 flags |= TPA_ENABLE_FLAG;
4464 else
4465 flags &= ~TPA_ENABLE_FLAG;
4466
4467 if (features & NETIF_F_GRO)
4468 flags |= GRO_ENABLE_FLAG;
4469 else
4470 flags &= ~GRO_ENABLE_FLAG;
4471
4472 if (features & NETIF_F_LOOPBACK) {
4473 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
4474 bp->link_params.loopback_mode = LOOPBACK_BMAC;
4475 bnx2x_reload = true;
4476 }
4477 } else {
4478 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
4479 bp->link_params.loopback_mode = LOOPBACK_NONE;
4480 bnx2x_reload = true;
4481 }
4482 }
4483
4484 if (flags ^ bp->flags) {
4485 bp->flags = flags;
4486 bnx2x_reload = true;
4487 }
4488
4489 if (bnx2x_reload) {
4490 if (bp->recovery_state == BNX2X_RECOVERY_DONE)
4491 return bnx2x_reload_if_running(dev);
4492 /* else: bnx2x_nic_load() will be called at end of recovery */
4493 }
4494
4495 return 0;
4496 }
4497
4498 void bnx2x_tx_timeout(struct net_device *dev)
4499 {
4500 struct bnx2x *bp = netdev_priv(dev);
4501
4502 #ifdef BNX2X_STOP_ON_ERROR
4503 if (!bp->panic)
4504 bnx2x_panic();
4505 #endif
4506
4507 smp_mb__before_clear_bit();
4508 set_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state);
4509 smp_mb__after_clear_bit();
4510
4511 /* This allows the netif to be shutdown gracefully before resetting */
4512 schedule_delayed_work(&bp->sp_rtnl_task, 0);
4513 }
4514
4515 int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
4516 {
4517 struct net_device *dev = pci_get_drvdata(pdev);
4518 struct bnx2x *bp;
4519
4520 if (!dev) {
4521 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4522 return -ENODEV;
4523 }
4524 bp = netdev_priv(dev);
4525
4526 rtnl_lock();
4527
4528 pci_save_state(pdev);
4529
4530 if (!netif_running(dev)) {
4531 rtnl_unlock();
4532 return 0;
4533 }
4534
4535 netif_device_detach(dev);
4536
4537 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
4538
4539 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
4540
4541 rtnl_unlock();
4542
4543 return 0;
4544 }
4545
4546 int bnx2x_resume(struct pci_dev *pdev)
4547 {
4548 struct net_device *dev = pci_get_drvdata(pdev);
4549 struct bnx2x *bp;
4550 int rc;
4551
4552 if (!dev) {
4553 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4554 return -ENODEV;
4555 }
4556 bp = netdev_priv(dev);
4557
4558 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
4559 BNX2X_ERR("Handling parity error recovery. Try again later\n");
4560 return -EAGAIN;
4561 }
4562
4563 rtnl_lock();
4564
4565 pci_restore_state(pdev);
4566
4567 if (!netif_running(dev)) {
4568 rtnl_unlock();
4569 return 0;
4570 }
4571
4572 bnx2x_set_power_state(bp, PCI_D0);
4573 netif_device_attach(dev);
4574
4575 rc = bnx2x_nic_load(bp, LOAD_OPEN);
4576
4577 rtnl_unlock();
4578
4579 return rc;
4580 }
4581
4582
4583 void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
4584 u32 cid)
4585 {
4586 /* ustorm cxt validation */
4587 cxt->ustorm_ag_context.cdu_usage =
4588 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4589 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
4590 /* xcontext validation */
4591 cxt->xstorm_ag_context.cdu_reserved =
4592 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4593 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
4594 }
4595
4596 static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
4597 u8 fw_sb_id, u8 sb_index,
4598 u8 ticks)
4599 {
4600
4601 u32 addr = BAR_CSTRORM_INTMEM +
4602 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
4603 REG_WR8(bp, addr, ticks);
4604 DP(NETIF_MSG_IFUP,
4605 "port %x fw_sb_id %d sb_index %d ticks %d\n",
4606 port, fw_sb_id, sb_index, ticks);
4607 }
4608
4609 static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
4610 u16 fw_sb_id, u8 sb_index,
4611 u8 disable)
4612 {
4613 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
4614 u32 addr = BAR_CSTRORM_INTMEM +
4615 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
4616 u16 flags = REG_RD16(bp, addr);
4617 /* clear and set */
4618 flags &= ~HC_INDEX_DATA_HC_ENABLED;
4619 flags |= enable_flag;
4620 REG_WR16(bp, addr, flags);
4621 DP(NETIF_MSG_IFUP,
4622 "port %x fw_sb_id %d sb_index %d disable %d\n",
4623 port, fw_sb_id, sb_index, disable);
4624 }
4625
4626 void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
4627 u8 sb_index, u8 disable, u16 usec)
4628 {
4629 int port = BP_PORT(bp);
4630 u8 ticks = usec / BNX2X_BTR;
4631
4632 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
4633
4634 disable = disable ? 1 : (usec ? 0 : 1);
4635 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
4636 }