1 /* bnx2x_cmn.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2012 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <linux/etherdevice.h>
21 #include <linux/if_vlan.h>
22 #include <linux/interrupt.h>
25 #include <net/ip6_checksum.h>
26 #include <linux/prefetch.h>
27 #include "bnx2x_cmn.h"
28 #include "bnx2x_init.h"
32 * bnx2x_move_fp - move content of the fastpath structure.
35 * @from: source FP index
36 * @to: destination FP index
38 * Makes sure the contents of the bp->fp[to].napi is kept
39 * intact. This is done by first copying the napi struct from
40 * the target to the source, and then mem copying the entire
41 * source onto the target. Update txdata pointers and related
44 static inline void bnx2x_move_fp(struct bnx2x
*bp
, int from
, int to
)
46 struct bnx2x_fastpath
*from_fp
= &bp
->fp
[from
];
47 struct bnx2x_fastpath
*to_fp
= &bp
->fp
[to
];
48 struct bnx2x_sp_objs
*from_sp_objs
= &bp
->sp_objs
[from
];
49 struct bnx2x_sp_objs
*to_sp_objs
= &bp
->sp_objs
[to
];
50 struct bnx2x_fp_stats
*from_fp_stats
= &bp
->fp_stats
[from
];
51 struct bnx2x_fp_stats
*to_fp_stats
= &bp
->fp_stats
[to
];
52 int old_max_eth_txqs
, new_max_eth_txqs
;
53 int old_txdata_index
= 0, new_txdata_index
= 0;
55 /* Copy the NAPI object as it has been already initialized */
56 from_fp
->napi
= to_fp
->napi
;
58 /* Move bnx2x_fastpath contents */
59 memcpy(to_fp
, from_fp
, sizeof(*to_fp
));
62 /* move sp_objs contents as well, as their indices match fp ones */
63 memcpy(to_sp_objs
, from_sp_objs
, sizeof(*to_sp_objs
));
65 /* move fp_stats contents as well, as their indices match fp ones */
66 memcpy(to_fp_stats
, from_fp_stats
, sizeof(*to_fp_stats
));
68 /* Update txdata pointers in fp and move txdata content accordingly:
69 * Each fp consumes 'max_cos' txdata structures, so the index should be
70 * decremented by max_cos x delta.
73 old_max_eth_txqs
= BNX2X_NUM_ETH_QUEUES(bp
) * (bp
)->max_cos
;
74 new_max_eth_txqs
= (BNX2X_NUM_ETH_QUEUES(bp
) - from
+ to
) *
76 if (from
== FCOE_IDX(bp
)) {
77 old_txdata_index
= old_max_eth_txqs
+ FCOE_TXQ_IDX_OFFSET
;
78 new_txdata_index
= new_max_eth_txqs
+ FCOE_TXQ_IDX_OFFSET
;
81 memcpy(&bp
->bnx2x_txq
[new_txdata_index
],
82 &bp
->bnx2x_txq
[old_txdata_index
],
83 sizeof(struct bnx2x_fp_txdata
));
84 to_fp
->txdata_ptr
[0] = &bp
->bnx2x_txq
[new_txdata_index
];
88 * bnx2x_fill_fw_str - Fill buffer with FW version string.
91 * @buf: character buffer to fill with the fw name
92 * @buf_len: length of the above buffer
95 void bnx2x_fill_fw_str(struct bnx2x
*bp
, char *buf
, size_t buf_len
)
98 u8 phy_fw_ver
[PHY_FW_VER_LEN
];
100 phy_fw_ver
[0] = '\0';
101 bnx2x_get_ext_phy_fw_version(&bp
->link_params
,
102 phy_fw_ver
, PHY_FW_VER_LEN
);
103 strlcpy(buf
, bp
->fw_ver
, buf_len
);
104 snprintf(buf
+ strlen(bp
->fw_ver
), 32 - strlen(bp
->fw_ver
),
106 (bp
->common
.bc_ver
& 0xff0000) >> 16,
107 (bp
->common
.bc_ver
& 0xff00) >> 8,
108 (bp
->common
.bc_ver
& 0xff),
109 ((phy_fw_ver
[0] != '\0') ? " phy " : ""), phy_fw_ver
);
111 bnx2x_vf_fill_fw_str(bp
, buf
, buf_len
);
116 * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact
119 * @delta: number of eth queues which were not allocated
121 static void bnx2x_shrink_eth_fp(struct bnx2x
*bp
, int delta
)
123 int i
, cos
, old_eth_num
= BNX2X_NUM_ETH_QUEUES(bp
);
125 /* Queue pointer cannot be re-set on an fp-basis, as moving pointer
126 * backward along the array could cause memory to be overriden
128 for (cos
= 1; cos
< bp
->max_cos
; cos
++) {
129 for (i
= 0; i
< old_eth_num
- delta
; i
++) {
130 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
131 int new_idx
= cos
* (old_eth_num
- delta
) + i
;
133 memcpy(&bp
->bnx2x_txq
[new_idx
], fp
->txdata_ptr
[cos
],
134 sizeof(struct bnx2x_fp_txdata
));
135 fp
->txdata_ptr
[cos
] = &bp
->bnx2x_txq
[new_idx
];
140 int load_count
[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
142 /* free skb in the packet ring at pos idx
143 * return idx of last bd freed
145 static u16
bnx2x_free_tx_pkt(struct bnx2x
*bp
, struct bnx2x_fp_txdata
*txdata
,
146 u16 idx
, unsigned int *pkts_compl
,
147 unsigned int *bytes_compl
)
149 struct sw_tx_bd
*tx_buf
= &txdata
->tx_buf_ring
[idx
];
150 struct eth_tx_start_bd
*tx_start_bd
;
151 struct eth_tx_bd
*tx_data_bd
;
152 struct sk_buff
*skb
= tx_buf
->skb
;
153 u16 bd_idx
= TX_BD(tx_buf
->first_bd
), new_cons
;
156 /* prefetch skb end pointer to speedup dev_kfree_skb() */
159 DP(NETIF_MSG_TX_DONE
, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
160 txdata
->txq_index
, idx
, tx_buf
, skb
);
163 tx_start_bd
= &txdata
->tx_desc_ring
[bd_idx
].start_bd
;
164 dma_unmap_single(&bp
->pdev
->dev
, BD_UNMAP_ADDR(tx_start_bd
),
165 BD_UNMAP_LEN(tx_start_bd
), DMA_TO_DEVICE
);
168 nbd
= le16_to_cpu(tx_start_bd
->nbd
) - 1;
169 #ifdef BNX2X_STOP_ON_ERROR
170 if ((nbd
- 1) > (MAX_SKB_FRAGS
+ 2)) {
171 BNX2X_ERR("BAD nbd!\n");
175 new_cons
= nbd
+ tx_buf
->first_bd
;
177 /* Get the next bd */
178 bd_idx
= TX_BD(NEXT_TX_IDX(bd_idx
));
180 /* Skip a parse bd... */
182 bd_idx
= TX_BD(NEXT_TX_IDX(bd_idx
));
184 /* ...and the TSO split header bd since they have no mapping */
185 if (tx_buf
->flags
& BNX2X_TSO_SPLIT_BD
) {
187 bd_idx
= TX_BD(NEXT_TX_IDX(bd_idx
));
193 tx_data_bd
= &txdata
->tx_desc_ring
[bd_idx
].reg_bd
;
194 dma_unmap_page(&bp
->pdev
->dev
, BD_UNMAP_ADDR(tx_data_bd
),
195 BD_UNMAP_LEN(tx_data_bd
), DMA_TO_DEVICE
);
197 bd_idx
= TX_BD(NEXT_TX_IDX(bd_idx
));
204 (*bytes_compl
) += skb
->len
;
207 dev_kfree_skb_any(skb
);
208 tx_buf
->first_bd
= 0;
214 int bnx2x_tx_int(struct bnx2x
*bp
, struct bnx2x_fp_txdata
*txdata
)
216 struct netdev_queue
*txq
;
217 u16 hw_cons
, sw_cons
, bd_cons
= txdata
->tx_bd_cons
;
218 unsigned int pkts_compl
= 0, bytes_compl
= 0;
220 #ifdef BNX2X_STOP_ON_ERROR
221 if (unlikely(bp
->panic
))
225 txq
= netdev_get_tx_queue(bp
->dev
, txdata
->txq_index
);
226 hw_cons
= le16_to_cpu(*txdata
->tx_cons_sb
);
227 sw_cons
= txdata
->tx_pkt_cons
;
229 while (sw_cons
!= hw_cons
) {
232 pkt_cons
= TX_BD(sw_cons
);
234 DP(NETIF_MSG_TX_DONE
,
235 "queue[%d]: hw_cons %u sw_cons %u pkt_cons %u\n",
236 txdata
->txq_index
, hw_cons
, sw_cons
, pkt_cons
);
238 bd_cons
= bnx2x_free_tx_pkt(bp
, txdata
, pkt_cons
,
239 &pkts_compl
, &bytes_compl
);
244 netdev_tx_completed_queue(txq
, pkts_compl
, bytes_compl
);
246 txdata
->tx_pkt_cons
= sw_cons
;
247 txdata
->tx_bd_cons
= bd_cons
;
249 /* Need to make the tx_bd_cons update visible to start_xmit()
250 * before checking for netif_tx_queue_stopped(). Without the
251 * memory barrier, there is a small possibility that
252 * start_xmit() will miss it and cause the queue to be stopped
254 * On the other hand we need an rmb() here to ensure the proper
255 * ordering of bit testing in the following
256 * netif_tx_queue_stopped(txq) call.
260 if (unlikely(netif_tx_queue_stopped(txq
))) {
261 /* Taking tx_lock() is needed to prevent reenabling the queue
262 * while it's empty. This could have happen if rx_action() gets
263 * suspended in bnx2x_tx_int() after the condition before
264 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
266 * stops the queue->sees fresh tx_bd_cons->releases the queue->
267 * sends some packets consuming the whole queue again->
271 __netif_tx_lock(txq
, smp_processor_id());
273 if ((netif_tx_queue_stopped(txq
)) &&
274 (bp
->state
== BNX2X_STATE_OPEN
) &&
275 (bnx2x_tx_avail(bp
, txdata
) >= MAX_DESC_PER_TX_PKT
))
276 netif_tx_wake_queue(txq
);
278 __netif_tx_unlock(txq
);
283 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath
*fp
,
286 u16 last_max
= fp
->last_max_sge
;
288 if (SUB_S16(idx
, last_max
) > 0)
289 fp
->last_max_sge
= idx
;
292 static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath
*fp
,
294 struct eth_end_agg_rx_cqe
*cqe
)
296 struct bnx2x
*bp
= fp
->bp
;
297 u16 last_max
, last_elem
, first_elem
;
304 /* First mark all used pages */
305 for (i
= 0; i
< sge_len
; i
++)
306 BIT_VEC64_CLEAR_BIT(fp
->sge_mask
,
307 RX_SGE(le16_to_cpu(cqe
->sgl_or_raw_data
.sgl
[i
])));
309 DP(NETIF_MSG_RX_STATUS
, "fp_cqe->sgl[%d] = %d\n",
310 sge_len
- 1, le16_to_cpu(cqe
->sgl_or_raw_data
.sgl
[sge_len
- 1]));
312 /* Here we assume that the last SGE index is the biggest */
313 prefetch((void *)(fp
->sge_mask
));
314 bnx2x_update_last_max_sge(fp
,
315 le16_to_cpu(cqe
->sgl_or_raw_data
.sgl
[sge_len
- 1]));
317 last_max
= RX_SGE(fp
->last_max_sge
);
318 last_elem
= last_max
>> BIT_VEC64_ELEM_SHIFT
;
319 first_elem
= RX_SGE(fp
->rx_sge_prod
) >> BIT_VEC64_ELEM_SHIFT
;
321 /* If ring is not full */
322 if (last_elem
+ 1 != first_elem
)
325 /* Now update the prod */
326 for (i
= first_elem
; i
!= last_elem
; i
= NEXT_SGE_MASK_ELEM(i
)) {
327 if (likely(fp
->sge_mask
[i
]))
330 fp
->sge_mask
[i
] = BIT_VEC64_ELEM_ONE_MASK
;
331 delta
+= BIT_VEC64_ELEM_SZ
;
335 fp
->rx_sge_prod
+= delta
;
336 /* clear page-end entries */
337 bnx2x_clear_sge_mask_next_elems(fp
);
340 DP(NETIF_MSG_RX_STATUS
,
341 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
342 fp
->last_max_sge
, fp
->rx_sge_prod
);
345 /* Set Toeplitz hash value in the skb using the value from the
346 * CQE (calculated by HW).
348 static u32
bnx2x_get_rxhash(const struct bnx2x
*bp
,
349 const struct eth_fast_path_rx_cqe
*cqe
,
352 /* Set Toeplitz hash from CQE */
353 if ((bp
->dev
->features
& NETIF_F_RXHASH
) &&
354 (cqe
->status_flags
& ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG
)) {
355 enum eth_rss_hash_type htype
;
357 htype
= cqe
->status_flags
& ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE
;
358 *l4_rxhash
= (htype
== TCP_IPV4_HASH_TYPE
) ||
359 (htype
== TCP_IPV6_HASH_TYPE
);
360 return le32_to_cpu(cqe
->rss_hash_result
);
366 static void bnx2x_tpa_start(struct bnx2x_fastpath
*fp
, u16 queue
,
368 struct eth_fast_path_rx_cqe
*cqe
)
370 struct bnx2x
*bp
= fp
->bp
;
371 struct sw_rx_bd
*cons_rx_buf
= &fp
->rx_buf_ring
[cons
];
372 struct sw_rx_bd
*prod_rx_buf
= &fp
->rx_buf_ring
[prod
];
373 struct eth_rx_bd
*prod_bd
= &fp
->rx_desc_ring
[prod
];
375 struct bnx2x_agg_info
*tpa_info
= &fp
->tpa_info
[queue
];
376 struct sw_rx_bd
*first_buf
= &tpa_info
->first_buf
;
378 /* print error if current state != stop */
379 if (tpa_info
->tpa_state
!= BNX2X_TPA_STOP
)
380 BNX2X_ERR("start of bin not in stop [%d]\n", queue
);
382 /* Try to map an empty data buffer from the aggregation info */
383 mapping
= dma_map_single(&bp
->pdev
->dev
,
384 first_buf
->data
+ NET_SKB_PAD
,
385 fp
->rx_buf_size
, DMA_FROM_DEVICE
);
387 * ...if it fails - move the skb from the consumer to the producer
388 * and set the current aggregation state as ERROR to drop it
389 * when TPA_STOP arrives.
392 if (unlikely(dma_mapping_error(&bp
->pdev
->dev
, mapping
))) {
393 /* Move the BD from the consumer to the producer */
394 bnx2x_reuse_rx_data(fp
, cons
, prod
);
395 tpa_info
->tpa_state
= BNX2X_TPA_ERROR
;
399 /* move empty data from pool to prod */
400 prod_rx_buf
->data
= first_buf
->data
;
401 dma_unmap_addr_set(prod_rx_buf
, mapping
, mapping
);
402 /* point prod_bd to new data */
403 prod_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
404 prod_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
406 /* move partial skb from cons to pool (don't unmap yet) */
407 *first_buf
= *cons_rx_buf
;
409 /* mark bin state as START */
410 tpa_info
->parsing_flags
=
411 le16_to_cpu(cqe
->pars_flags
.flags
);
412 tpa_info
->vlan_tag
= le16_to_cpu(cqe
->vlan_tag
);
413 tpa_info
->tpa_state
= BNX2X_TPA_START
;
414 tpa_info
->len_on_bd
= le16_to_cpu(cqe
->len_on_bd
);
415 tpa_info
->placement_offset
= cqe
->placement_offset
;
416 tpa_info
->rxhash
= bnx2x_get_rxhash(bp
, cqe
, &tpa_info
->l4_rxhash
);
417 if (fp
->mode
== TPA_MODE_GRO
) {
418 u16 gro_size
= le16_to_cpu(cqe
->pkt_len_or_gro_seg_len
);
419 tpa_info
->full_page
=
420 SGE_PAGE_SIZE
* PAGES_PER_SGE
/ gro_size
* gro_size
;
421 tpa_info
->gro_size
= gro_size
;
424 #ifdef BNX2X_STOP_ON_ERROR
425 fp
->tpa_queue_used
|= (1 << queue
);
426 #ifdef _ASM_GENERIC_INT_L64_H
427 DP(NETIF_MSG_RX_STATUS
, "fp->tpa_queue_used = 0x%lx\n",
429 DP(NETIF_MSG_RX_STATUS
, "fp->tpa_queue_used = 0x%llx\n",
435 /* Timestamp option length allowed for TPA aggregation:
437 * nop nop kind length echo val
439 #define TPA_TSTAMP_OPT_LEN 12
441 * bnx2x_set_lro_mss - calculate the approximate value of the MSS
444 * @parsing_flags: parsing flags from the START CQE
445 * @len_on_bd: total length of the first packet for the
448 * Approximate value of the MSS for this aggregation calculated using
449 * the first packet of it.
451 static u16
bnx2x_set_lro_mss(struct bnx2x
*bp
, u16 parsing_flags
,
455 * TPA arrgregation won't have either IP options or TCP options
456 * other than timestamp or IPv6 extension headers.
458 u16 hdrs_len
= ETH_HLEN
+ sizeof(struct tcphdr
);
460 if (GET_FLAG(parsing_flags
, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL
) ==
461 PRS_FLAG_OVERETH_IPV6
)
462 hdrs_len
+= sizeof(struct ipv6hdr
);
464 hdrs_len
+= sizeof(struct iphdr
);
467 /* Check if there was a TCP timestamp, if there is it's will
468 * always be 12 bytes length: nop nop kind length echo val.
470 * Otherwise FW would close the aggregation.
472 if (parsing_flags
& PARSING_FLAGS_TIME_STAMP_EXIST_FLAG
)
473 hdrs_len
+= TPA_TSTAMP_OPT_LEN
;
475 return len_on_bd
- hdrs_len
;
478 static int bnx2x_alloc_rx_sge(struct bnx2x
*bp
,
479 struct bnx2x_fastpath
*fp
, u16 index
)
481 struct page
*page
= alloc_pages(GFP_ATOMIC
, PAGES_PER_SGE_SHIFT
);
482 struct sw_rx_page
*sw_buf
= &fp
->rx_page_ring
[index
];
483 struct eth_rx_sge
*sge
= &fp
->rx_sge_ring
[index
];
486 if (unlikely(page
== NULL
)) {
487 BNX2X_ERR("Can't alloc sge\n");
491 mapping
= dma_map_page(&bp
->pdev
->dev
, page
, 0,
492 SGE_PAGE_SIZE
*PAGES_PER_SGE
, DMA_FROM_DEVICE
);
493 if (unlikely(dma_mapping_error(&bp
->pdev
->dev
, mapping
))) {
494 __free_pages(page
, PAGES_PER_SGE_SHIFT
);
495 BNX2X_ERR("Can't map sge\n");
500 dma_unmap_addr_set(sw_buf
, mapping
, mapping
);
502 sge
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
503 sge
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
508 static int bnx2x_fill_frag_skb(struct bnx2x
*bp
, struct bnx2x_fastpath
*fp
,
509 struct bnx2x_agg_info
*tpa_info
,
512 struct eth_end_agg_rx_cqe
*cqe
,
515 struct sw_rx_page
*rx_pg
, old_rx_pg
;
516 u32 i
, frag_len
, frag_size
;
517 int err
, j
, frag_id
= 0;
518 u16 len_on_bd
= tpa_info
->len_on_bd
;
519 u16 full_page
= 0, gro_size
= 0;
521 frag_size
= le16_to_cpu(cqe
->pkt_len
) - len_on_bd
;
523 if (fp
->mode
== TPA_MODE_GRO
) {
524 gro_size
= tpa_info
->gro_size
;
525 full_page
= tpa_info
->full_page
;
528 /* This is needed in order to enable forwarding support */
530 skb_shinfo(skb
)->gso_size
= bnx2x_set_lro_mss(bp
,
531 tpa_info
->parsing_flags
, len_on_bd
);
534 if (fp
->mode
== TPA_MODE_GRO
)
535 skb_shinfo(skb
)->gso_type
=
536 (GET_FLAG(tpa_info
->parsing_flags
,
537 PARSING_FLAGS_OVER_ETHERNET_PROTOCOL
) ==
538 PRS_FLAG_OVERETH_IPV6
) ?
539 SKB_GSO_TCPV6
: SKB_GSO_TCPV4
;
543 #ifdef BNX2X_STOP_ON_ERROR
544 if (pages
> min_t(u32
, 8, MAX_SKB_FRAGS
)*SGE_PAGE_SIZE
*PAGES_PER_SGE
) {
545 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
547 BNX2X_ERR("cqe->pkt_len = %d\n", cqe
->pkt_len
);
553 /* Run through the SGL and compose the fragmented skb */
554 for (i
= 0, j
= 0; i
< pages
; i
+= PAGES_PER_SGE
, j
++) {
555 u16 sge_idx
= RX_SGE(le16_to_cpu(cqe
->sgl_or_raw_data
.sgl
[j
]));
557 /* FW gives the indices of the SGE as if the ring is an array
558 (meaning that "next" element will consume 2 indices) */
559 if (fp
->mode
== TPA_MODE_GRO
)
560 frag_len
= min_t(u32
, frag_size
, (u32
)full_page
);
562 frag_len
= min_t(u32
, frag_size
,
563 (u32
)(SGE_PAGE_SIZE
* PAGES_PER_SGE
));
565 rx_pg
= &fp
->rx_page_ring
[sge_idx
];
568 /* If we fail to allocate a substitute page, we simply stop
569 where we are and drop the whole packet */
570 err
= bnx2x_alloc_rx_sge(bp
, fp
, sge_idx
);
572 bnx2x_fp_qstats(bp
, fp
)->rx_skb_alloc_failed
++;
576 /* Unmap the page as we r going to pass it to the stack */
577 dma_unmap_page(&bp
->pdev
->dev
,
578 dma_unmap_addr(&old_rx_pg
, mapping
),
579 SGE_PAGE_SIZE
*PAGES_PER_SGE
, DMA_FROM_DEVICE
);
580 /* Add one frag and update the appropriate fields in the skb */
581 if (fp
->mode
== TPA_MODE_LRO
)
582 skb_fill_page_desc(skb
, j
, old_rx_pg
.page
, 0, frag_len
);
586 for (rem
= frag_len
; rem
> 0; rem
-= gro_size
) {
587 int len
= rem
> gro_size
? gro_size
: rem
;
588 skb_fill_page_desc(skb
, frag_id
++,
589 old_rx_pg
.page
, offset
, len
);
591 get_page(old_rx_pg
.page
);
596 skb
->data_len
+= frag_len
;
597 skb
->truesize
+= SGE_PAGE_SIZE
* PAGES_PER_SGE
;
598 skb
->len
+= frag_len
;
600 frag_size
-= frag_len
;
606 static void bnx2x_frag_free(const struct bnx2x_fastpath
*fp
, void *data
)
608 if (fp
->rx_frag_size
)
609 put_page(virt_to_head_page(data
));
614 static void *bnx2x_frag_alloc(const struct bnx2x_fastpath
*fp
)
616 if (fp
->rx_frag_size
)
617 return netdev_alloc_frag(fp
->rx_frag_size
);
619 return kmalloc(fp
->rx_buf_size
+ NET_SKB_PAD
, GFP_ATOMIC
);
623 static void bnx2x_tpa_stop(struct bnx2x
*bp
, struct bnx2x_fastpath
*fp
,
624 struct bnx2x_agg_info
*tpa_info
,
626 struct eth_end_agg_rx_cqe
*cqe
,
629 struct sw_rx_bd
*rx_buf
= &tpa_info
->first_buf
;
630 u8 pad
= tpa_info
->placement_offset
;
631 u16 len
= tpa_info
->len_on_bd
;
632 struct sk_buff
*skb
= NULL
;
633 u8
*new_data
, *data
= rx_buf
->data
;
634 u8 old_tpa_state
= tpa_info
->tpa_state
;
636 tpa_info
->tpa_state
= BNX2X_TPA_STOP
;
638 /* If we there was an error during the handling of the TPA_START -
639 * drop this aggregation.
641 if (old_tpa_state
== BNX2X_TPA_ERROR
)
644 /* Try to allocate the new data */
645 new_data
= bnx2x_frag_alloc(fp
);
646 /* Unmap skb in the pool anyway, as we are going to change
647 pool entry status to BNX2X_TPA_STOP even if new skb allocation
649 dma_unmap_single(&bp
->pdev
->dev
, dma_unmap_addr(rx_buf
, mapping
),
650 fp
->rx_buf_size
, DMA_FROM_DEVICE
);
651 if (likely(new_data
))
652 skb
= build_skb(data
, fp
->rx_frag_size
);
655 #ifdef BNX2X_STOP_ON_ERROR
656 if (pad
+ len
> fp
->rx_buf_size
) {
657 BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n",
658 pad
, len
, fp
->rx_buf_size
);
664 skb_reserve(skb
, pad
+ NET_SKB_PAD
);
666 skb
->rxhash
= tpa_info
->rxhash
;
667 skb
->l4_rxhash
= tpa_info
->l4_rxhash
;
669 skb
->protocol
= eth_type_trans(skb
, bp
->dev
);
670 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
672 if (!bnx2x_fill_frag_skb(bp
, fp
, tpa_info
, pages
,
673 skb
, cqe
, cqe_idx
)) {
674 if (tpa_info
->parsing_flags
& PARSING_FLAGS_VLAN
)
675 __vlan_hwaccel_put_tag(skb
, tpa_info
->vlan_tag
);
676 napi_gro_receive(&fp
->napi
, skb
);
678 DP(NETIF_MSG_RX_STATUS
,
679 "Failed to allocate new pages - dropping packet!\n");
680 dev_kfree_skb_any(skb
);
684 /* put new data in bin */
685 rx_buf
->data
= new_data
;
689 bnx2x_frag_free(fp
, new_data
);
691 /* drop the packet and keep the buffer in the bin */
692 DP(NETIF_MSG_RX_STATUS
,
693 "Failed to allocate or map a new skb - dropping packet!\n");
694 bnx2x_fp_stats(bp
, fp
)->eth_q_stats
.rx_skb_alloc_failed
++;
697 static int bnx2x_alloc_rx_data(struct bnx2x
*bp
,
698 struct bnx2x_fastpath
*fp
, u16 index
)
701 struct sw_rx_bd
*rx_buf
= &fp
->rx_buf_ring
[index
];
702 struct eth_rx_bd
*rx_bd
= &fp
->rx_desc_ring
[index
];
705 data
= bnx2x_frag_alloc(fp
);
706 if (unlikely(data
== NULL
))
709 mapping
= dma_map_single(&bp
->pdev
->dev
, data
+ NET_SKB_PAD
,
712 if (unlikely(dma_mapping_error(&bp
->pdev
->dev
, mapping
))) {
713 bnx2x_frag_free(fp
, data
);
714 BNX2X_ERR("Can't map rx data\n");
719 dma_unmap_addr_set(rx_buf
, mapping
, mapping
);
721 rx_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
722 rx_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
728 void bnx2x_csum_validate(struct sk_buff
*skb
, union eth_rx_cqe
*cqe
,
729 struct bnx2x_fastpath
*fp
,
730 struct bnx2x_eth_q_stats
*qstats
)
732 /* Do nothing if no L4 csum validation was done.
733 * We do not check whether IP csum was validated. For IPv4 we assume
734 * that if the card got as far as validating the L4 csum, it also
735 * validated the IP csum. IPv6 has no IP csum.
737 if (cqe
->fast_path_cqe
.status_flags
&
738 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG
)
741 /* If L4 validation was done, check if an error was found. */
743 if (cqe
->fast_path_cqe
.type_error_flags
&
744 (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG
|
745 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG
))
746 qstats
->hw_csum_err
++;
748 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
751 int bnx2x_rx_int(struct bnx2x_fastpath
*fp
, int budget
)
753 struct bnx2x
*bp
= fp
->bp
;
754 u16 bd_cons
, bd_prod
, bd_prod_fw
, comp_ring_cons
;
755 u16 hw_comp_cons
, sw_comp_cons
, sw_comp_prod
;
758 #ifdef BNX2X_STOP_ON_ERROR
759 if (unlikely(bp
->panic
))
763 /* CQ "next element" is of the size of the regular element,
764 that's why it's ok here */
765 hw_comp_cons
= le16_to_cpu(*fp
->rx_cons_sb
);
766 if ((hw_comp_cons
& MAX_RCQ_DESC_CNT
) == MAX_RCQ_DESC_CNT
)
769 bd_cons
= fp
->rx_bd_cons
;
770 bd_prod
= fp
->rx_bd_prod
;
771 bd_prod_fw
= bd_prod
;
772 sw_comp_cons
= fp
->rx_comp_cons
;
773 sw_comp_prod
= fp
->rx_comp_prod
;
775 /* Memory barrier necessary as speculative reads of the rx
776 * buffer can be ahead of the index in the status block
780 DP(NETIF_MSG_RX_STATUS
,
781 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
782 fp
->index
, hw_comp_cons
, sw_comp_cons
);
784 while (sw_comp_cons
!= hw_comp_cons
) {
785 struct sw_rx_bd
*rx_buf
= NULL
;
787 union eth_rx_cqe
*cqe
;
788 struct eth_fast_path_rx_cqe
*cqe_fp
;
790 enum eth_rx_cqe_type cqe_fp_type
;
795 #ifdef BNX2X_STOP_ON_ERROR
796 if (unlikely(bp
->panic
))
800 comp_ring_cons
= RCQ_BD(sw_comp_cons
);
801 bd_prod
= RX_BD(bd_prod
);
802 bd_cons
= RX_BD(bd_cons
);
804 cqe
= &fp
->rx_comp_ring
[comp_ring_cons
];
805 cqe_fp
= &cqe
->fast_path_cqe
;
806 cqe_fp_flags
= cqe_fp
->type_error_flags
;
807 cqe_fp_type
= cqe_fp_flags
& ETH_FAST_PATH_RX_CQE_TYPE
;
809 DP(NETIF_MSG_RX_STATUS
,
810 "CQE type %x err %x status %x queue %x vlan %x len %u\n",
811 CQE_TYPE(cqe_fp_flags
),
812 cqe_fp_flags
, cqe_fp
->status_flags
,
813 le32_to_cpu(cqe_fp
->rss_hash_result
),
814 le16_to_cpu(cqe_fp
->vlan_tag
),
815 le16_to_cpu(cqe_fp
->pkt_len_or_gro_seg_len
));
817 /* is this a slowpath msg? */
818 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type
))) {
819 bnx2x_sp_event(fp
, cqe
);
823 rx_buf
= &fp
->rx_buf_ring
[bd_cons
];
826 if (!CQE_TYPE_FAST(cqe_fp_type
)) {
827 struct bnx2x_agg_info
*tpa_info
;
828 u16 frag_size
, pages
;
829 #ifdef BNX2X_STOP_ON_ERROR
831 if (fp
->disable_tpa
&&
832 (CQE_TYPE_START(cqe_fp_type
) ||
833 CQE_TYPE_STOP(cqe_fp_type
)))
834 BNX2X_ERR("START/STOP packet while disable_tpa type %x\n",
835 CQE_TYPE(cqe_fp_type
));
838 if (CQE_TYPE_START(cqe_fp_type
)) {
839 u16 queue
= cqe_fp
->queue_index
;
840 DP(NETIF_MSG_RX_STATUS
,
841 "calling tpa_start on queue %d\n",
844 bnx2x_tpa_start(fp
, queue
,
851 queue
= cqe
->end_agg_cqe
.queue_index
;
852 tpa_info
= &fp
->tpa_info
[queue
];
853 DP(NETIF_MSG_RX_STATUS
,
854 "calling tpa_stop on queue %d\n",
857 frag_size
= le16_to_cpu(cqe
->end_agg_cqe
.pkt_len
) -
860 if (fp
->mode
== TPA_MODE_GRO
)
861 pages
= (frag_size
+ tpa_info
->full_page
- 1) /
864 pages
= SGE_PAGE_ALIGN(frag_size
) >>
867 bnx2x_tpa_stop(bp
, fp
, tpa_info
, pages
,
868 &cqe
->end_agg_cqe
, comp_ring_cons
);
869 #ifdef BNX2X_STOP_ON_ERROR
874 bnx2x_update_sge_prod(fp
, pages
, &cqe
->end_agg_cqe
);
878 len
= le16_to_cpu(cqe_fp
->pkt_len_or_gro_seg_len
);
879 pad
= cqe_fp
->placement_offset
;
880 dma_sync_single_for_cpu(&bp
->pdev
->dev
,
881 dma_unmap_addr(rx_buf
, mapping
),
882 pad
+ RX_COPY_THRESH
,
885 prefetch(data
+ pad
); /* speedup eth_type_trans() */
886 /* is this an error packet? */
887 if (unlikely(cqe_fp_flags
& ETH_RX_ERROR_FALGS
)) {
888 DP(NETIF_MSG_RX_ERR
| NETIF_MSG_RX_STATUS
,
889 "ERROR flags %x rx packet %u\n",
890 cqe_fp_flags
, sw_comp_cons
);
891 bnx2x_fp_qstats(bp
, fp
)->rx_err_discard_pkt
++;
895 /* Since we don't have a jumbo ring
896 * copy small packets if mtu > 1500
898 if ((bp
->dev
->mtu
> ETH_MAX_PACKET_SIZE
) &&
899 (len
<= RX_COPY_THRESH
)) {
900 skb
= netdev_alloc_skb_ip_align(bp
->dev
, len
);
902 DP(NETIF_MSG_RX_ERR
| NETIF_MSG_RX_STATUS
,
903 "ERROR packet dropped because of alloc failure\n");
904 bnx2x_fp_qstats(bp
, fp
)->rx_skb_alloc_failed
++;
907 memcpy(skb
->data
, data
+ pad
, len
);
908 bnx2x_reuse_rx_data(fp
, bd_cons
, bd_prod
);
910 if (likely(bnx2x_alloc_rx_data(bp
, fp
, bd_prod
) == 0)) {
911 dma_unmap_single(&bp
->pdev
->dev
,
912 dma_unmap_addr(rx_buf
, mapping
),
915 skb
= build_skb(data
, fp
->rx_frag_size
);
916 if (unlikely(!skb
)) {
917 bnx2x_frag_free(fp
, data
);
918 bnx2x_fp_qstats(bp
, fp
)->
919 rx_skb_alloc_failed
++;
922 skb_reserve(skb
, pad
);
924 DP(NETIF_MSG_RX_ERR
| NETIF_MSG_RX_STATUS
,
925 "ERROR packet dropped because of alloc failure\n");
926 bnx2x_fp_qstats(bp
, fp
)->rx_skb_alloc_failed
++;
928 bnx2x_reuse_rx_data(fp
, bd_cons
, bd_prod
);
934 skb
->protocol
= eth_type_trans(skb
, bp
->dev
);
936 /* Set Toeplitz hash for a none-LRO skb */
937 skb
->rxhash
= bnx2x_get_rxhash(bp
, cqe_fp
, &l4_rxhash
);
938 skb
->l4_rxhash
= l4_rxhash
;
940 skb_checksum_none_assert(skb
);
942 if (bp
->dev
->features
& NETIF_F_RXCSUM
)
943 bnx2x_csum_validate(skb
, cqe
, fp
,
944 bnx2x_fp_qstats(bp
, fp
));
946 skb_record_rx_queue(skb
, fp
->rx_queue
);
948 if (le16_to_cpu(cqe_fp
->pars_flags
.flags
) &
950 __vlan_hwaccel_put_tag(skb
,
951 le16_to_cpu(cqe_fp
->vlan_tag
));
952 napi_gro_receive(&fp
->napi
, skb
);
958 bd_cons
= NEXT_RX_IDX(bd_cons
);
959 bd_prod
= NEXT_RX_IDX(bd_prod
);
960 bd_prod_fw
= NEXT_RX_IDX(bd_prod_fw
);
963 sw_comp_prod
= NEXT_RCQ_IDX(sw_comp_prod
);
964 sw_comp_cons
= NEXT_RCQ_IDX(sw_comp_cons
);
966 if (rx_pkt
== budget
)
970 fp
->rx_bd_cons
= bd_cons
;
971 fp
->rx_bd_prod
= bd_prod_fw
;
972 fp
->rx_comp_cons
= sw_comp_cons
;
973 fp
->rx_comp_prod
= sw_comp_prod
;
975 /* Update producers */
976 bnx2x_update_rx_prod(bp
, fp
, bd_prod_fw
, sw_comp_prod
,
979 fp
->rx_pkt
+= rx_pkt
;
985 static irqreturn_t
bnx2x_msix_fp_int(int irq
, void *fp_cookie
)
987 struct bnx2x_fastpath
*fp
= fp_cookie
;
988 struct bnx2x
*bp
= fp
->bp
;
992 "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
993 fp
->index
, fp
->fw_sb_id
, fp
->igu_sb_id
);
994 bnx2x_ack_sb(bp
, fp
->igu_sb_id
, USTORM_ID
, 0, IGU_INT_DISABLE
, 0);
996 #ifdef BNX2X_STOP_ON_ERROR
997 if (unlikely(bp
->panic
))
1001 /* Handle Rx and Tx according to MSI-X vector */
1002 prefetch(fp
->rx_cons_sb
);
1004 for_each_cos_in_tx_queue(fp
, cos
)
1005 prefetch(fp
->txdata_ptr
[cos
]->tx_cons_sb
);
1007 prefetch(&fp
->sb_running_index
[SM_RX_ID
]);
1008 napi_schedule(&bnx2x_fp(bp
, fp
->index
, napi
));
1013 /* HW Lock for shared dual port PHYs */
1014 void bnx2x_acquire_phy_lock(struct bnx2x
*bp
)
1016 mutex_lock(&bp
->port
.phy_mutex
);
1018 bnx2x_acquire_hw_lock(bp
, HW_LOCK_RESOURCE_MDIO
);
1021 void bnx2x_release_phy_lock(struct bnx2x
*bp
)
1023 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_MDIO
);
1025 mutex_unlock(&bp
->port
.phy_mutex
);
1028 /* calculates MF speed according to current linespeed and MF configuration */
1029 u16
bnx2x_get_mf_speed(struct bnx2x
*bp
)
1031 u16 line_speed
= bp
->link_vars
.line_speed
;
1033 u16 maxCfg
= bnx2x_extract_max_cfg(bp
,
1034 bp
->mf_config
[BP_VN(bp
)]);
1036 /* Calculate the current MAX line speed limit for the MF
1040 line_speed
= (line_speed
* maxCfg
) / 100;
1041 else { /* SD mode */
1042 u16 vn_max_rate
= maxCfg
* 100;
1044 if (vn_max_rate
< line_speed
)
1045 line_speed
= vn_max_rate
;
1053 * bnx2x_fill_report_data - fill link report data to report
1055 * @bp: driver handle
1056 * @data: link state to update
1058 * It uses a none-atomic bit operations because is called under the mutex.
1060 static void bnx2x_fill_report_data(struct bnx2x
*bp
,
1061 struct bnx2x_link_report_data
*data
)
1063 u16 line_speed
= bnx2x_get_mf_speed(bp
);
1065 memset(data
, 0, sizeof(*data
));
1067 /* Fill the report data: efective line speed */
1068 data
->line_speed
= line_speed
;
1071 if (!bp
->link_vars
.link_up
|| (bp
->flags
& MF_FUNC_DIS
))
1072 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN
,
1073 &data
->link_report_flags
);
1076 if (bp
->link_vars
.duplex
== DUPLEX_FULL
)
1077 __set_bit(BNX2X_LINK_REPORT_FD
, &data
->link_report_flags
);
1079 /* Rx Flow Control is ON */
1080 if (bp
->link_vars
.flow_ctrl
& BNX2X_FLOW_CTRL_RX
)
1081 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON
, &data
->link_report_flags
);
1083 /* Tx Flow Control is ON */
1084 if (bp
->link_vars
.flow_ctrl
& BNX2X_FLOW_CTRL_TX
)
1085 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON
, &data
->link_report_flags
);
1089 * bnx2x_link_report - report link status to OS.
1091 * @bp: driver handle
1093 * Calls the __bnx2x_link_report() under the same locking scheme
1094 * as a link/PHY state managing code to ensure a consistent link
1098 void bnx2x_link_report(struct bnx2x
*bp
)
1100 bnx2x_acquire_phy_lock(bp
);
1101 __bnx2x_link_report(bp
);
1102 bnx2x_release_phy_lock(bp
);
1106 * __bnx2x_link_report - report link status to OS.
1108 * @bp: driver handle
1110 * None atomic inmlementation.
1111 * Should be called under the phy_lock.
1113 void __bnx2x_link_report(struct bnx2x
*bp
)
1115 struct bnx2x_link_report_data cur_data
;
1118 if (IS_PF(bp
) && !CHIP_IS_E1(bp
))
1119 bnx2x_read_mf_cfg(bp
);
1121 /* Read the current link report info */
1122 bnx2x_fill_report_data(bp
, &cur_data
);
1124 /* Don't report link down or exactly the same link status twice */
1125 if (!memcmp(&cur_data
, &bp
->last_reported_link
, sizeof(cur_data
)) ||
1126 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN
,
1127 &bp
->last_reported_link
.link_report_flags
) &&
1128 test_bit(BNX2X_LINK_REPORT_LINK_DOWN
,
1129 &cur_data
.link_report_flags
)))
1134 /* We are going to report a new link parameters now -
1135 * remember the current data for the next time.
1137 memcpy(&bp
->last_reported_link
, &cur_data
, sizeof(cur_data
));
1139 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN
,
1140 &cur_data
.link_report_flags
)) {
1141 netif_carrier_off(bp
->dev
);
1142 netdev_err(bp
->dev
, "NIC Link is Down\n");
1148 netif_carrier_on(bp
->dev
);
1150 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD
,
1151 &cur_data
.link_report_flags
))
1156 /* Handle the FC at the end so that only these flags would be
1157 * possibly set. This way we may easily check if there is no FC
1160 if (cur_data
.link_report_flags
) {
1161 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON
,
1162 &cur_data
.link_report_flags
)) {
1163 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON
,
1164 &cur_data
.link_report_flags
))
1165 flow
= "ON - receive & transmit";
1167 flow
= "ON - receive";
1169 flow
= "ON - transmit";
1174 netdev_info(bp
->dev
, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1175 cur_data
.line_speed
, duplex
, flow
);
1179 static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath
*fp
)
1183 for (i
= 1; i
<= NUM_RX_SGE_PAGES
; i
++) {
1184 struct eth_rx_sge
*sge
;
1186 sge
= &fp
->rx_sge_ring
[RX_SGE_CNT
* i
- 2];
1188 cpu_to_le32(U64_HI(fp
->rx_sge_mapping
+
1189 BCM_PAGE_SIZE
*(i
% NUM_RX_SGE_PAGES
)));
1192 cpu_to_le32(U64_LO(fp
->rx_sge_mapping
+
1193 BCM_PAGE_SIZE
*(i
% NUM_RX_SGE_PAGES
)));
1197 static void bnx2x_free_tpa_pool(struct bnx2x
*bp
,
1198 struct bnx2x_fastpath
*fp
, int last
)
1202 for (i
= 0; i
< last
; i
++) {
1203 struct bnx2x_agg_info
*tpa_info
= &fp
->tpa_info
[i
];
1204 struct sw_rx_bd
*first_buf
= &tpa_info
->first_buf
;
1205 u8
*data
= first_buf
->data
;
1208 DP(NETIF_MSG_IFDOWN
, "tpa bin %d empty on free\n", i
);
1211 if (tpa_info
->tpa_state
== BNX2X_TPA_START
)
1212 dma_unmap_single(&bp
->pdev
->dev
,
1213 dma_unmap_addr(first_buf
, mapping
),
1214 fp
->rx_buf_size
, DMA_FROM_DEVICE
);
1215 bnx2x_frag_free(fp
, data
);
1216 first_buf
->data
= NULL
;
1220 void bnx2x_init_rx_rings_cnic(struct bnx2x
*bp
)
1224 for_each_rx_queue_cnic(bp
, j
) {
1225 struct bnx2x_fastpath
*fp
= &bp
->fp
[j
];
1229 /* Activate BD ring */
1231 * this will generate an interrupt (to the TSTORM)
1232 * must only be done after chip is initialized
1234 bnx2x_update_rx_prod(bp
, fp
, fp
->rx_bd_prod
, fp
->rx_comp_prod
,
1239 void bnx2x_init_rx_rings(struct bnx2x
*bp
)
1241 int func
= BP_FUNC(bp
);
1245 /* Allocate TPA resources */
1246 for_each_eth_queue(bp
, j
) {
1247 struct bnx2x_fastpath
*fp
= &bp
->fp
[j
];
1250 "mtu %d rx_buf_size %d\n", bp
->dev
->mtu
, fp
->rx_buf_size
);
1252 if (!fp
->disable_tpa
) {
1253 /* Fill the per-aggregtion pool */
1254 for (i
= 0; i
< MAX_AGG_QS(bp
); i
++) {
1255 struct bnx2x_agg_info
*tpa_info
=
1257 struct sw_rx_bd
*first_buf
=
1258 &tpa_info
->first_buf
;
1260 first_buf
->data
= bnx2x_frag_alloc(fp
);
1261 if (!first_buf
->data
) {
1262 BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1264 bnx2x_free_tpa_pool(bp
, fp
, i
);
1265 fp
->disable_tpa
= 1;
1268 dma_unmap_addr_set(first_buf
, mapping
, 0);
1269 tpa_info
->tpa_state
= BNX2X_TPA_STOP
;
1272 /* "next page" elements initialization */
1273 bnx2x_set_next_page_sgl(fp
);
1275 /* set SGEs bit mask */
1276 bnx2x_init_sge_ring_bit_mask(fp
);
1278 /* Allocate SGEs and initialize the ring elements */
1279 for (i
= 0, ring_prod
= 0;
1280 i
< MAX_RX_SGE_CNT
*NUM_RX_SGE_PAGES
; i
++) {
1282 if (bnx2x_alloc_rx_sge(bp
, fp
, ring_prod
) < 0) {
1283 BNX2X_ERR("was only able to allocate %d rx sges\n",
1285 BNX2X_ERR("disabling TPA for queue[%d]\n",
1287 /* Cleanup already allocated elements */
1288 bnx2x_free_rx_sge_range(bp
, fp
,
1290 bnx2x_free_tpa_pool(bp
, fp
,
1292 fp
->disable_tpa
= 1;
1296 ring_prod
= NEXT_SGE_IDX(ring_prod
);
1299 fp
->rx_sge_prod
= ring_prod
;
1303 for_each_eth_queue(bp
, j
) {
1304 struct bnx2x_fastpath
*fp
= &bp
->fp
[j
];
1308 /* Activate BD ring */
1310 * this will generate an interrupt (to the TSTORM)
1311 * must only be done after chip is initialized
1313 bnx2x_update_rx_prod(bp
, fp
, fp
->rx_bd_prod
, fp
->rx_comp_prod
,
1319 if (CHIP_IS_E1(bp
)) {
1320 REG_WR(bp
, BAR_USTRORM_INTMEM
+
1321 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func
),
1322 U64_LO(fp
->rx_comp_mapping
));
1323 REG_WR(bp
, BAR_USTRORM_INTMEM
+
1324 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func
) + 4,
1325 U64_HI(fp
->rx_comp_mapping
));
1330 static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath
*fp
)
1333 struct bnx2x
*bp
= fp
->bp
;
1335 for_each_cos_in_tx_queue(fp
, cos
) {
1336 struct bnx2x_fp_txdata
*txdata
= fp
->txdata_ptr
[cos
];
1337 unsigned pkts_compl
= 0, bytes_compl
= 0;
1339 u16 sw_prod
= txdata
->tx_pkt_prod
;
1340 u16 sw_cons
= txdata
->tx_pkt_cons
;
1342 while (sw_cons
!= sw_prod
) {
1343 bnx2x_free_tx_pkt(bp
, txdata
, TX_BD(sw_cons
),
1344 &pkts_compl
, &bytes_compl
);
1348 netdev_tx_reset_queue(
1349 netdev_get_tx_queue(bp
->dev
,
1350 txdata
->txq_index
));
1354 static void bnx2x_free_tx_skbs_cnic(struct bnx2x
*bp
)
1358 for_each_tx_queue_cnic(bp
, i
) {
1359 bnx2x_free_tx_skbs_queue(&bp
->fp
[i
]);
1363 static void bnx2x_free_tx_skbs(struct bnx2x
*bp
)
1367 for_each_eth_queue(bp
, i
) {
1368 bnx2x_free_tx_skbs_queue(&bp
->fp
[i
]);
1372 static void bnx2x_free_rx_bds(struct bnx2x_fastpath
*fp
)
1374 struct bnx2x
*bp
= fp
->bp
;
1377 /* ring wasn't allocated */
1378 if (fp
->rx_buf_ring
== NULL
)
1381 for (i
= 0; i
< NUM_RX_BD
; i
++) {
1382 struct sw_rx_bd
*rx_buf
= &fp
->rx_buf_ring
[i
];
1383 u8
*data
= rx_buf
->data
;
1387 dma_unmap_single(&bp
->pdev
->dev
,
1388 dma_unmap_addr(rx_buf
, mapping
),
1389 fp
->rx_buf_size
, DMA_FROM_DEVICE
);
1391 rx_buf
->data
= NULL
;
1392 bnx2x_frag_free(fp
, data
);
1396 static void bnx2x_free_rx_skbs_cnic(struct bnx2x
*bp
)
1400 for_each_rx_queue_cnic(bp
, j
) {
1401 bnx2x_free_rx_bds(&bp
->fp
[j
]);
1405 static void bnx2x_free_rx_skbs(struct bnx2x
*bp
)
1409 for_each_eth_queue(bp
, j
) {
1410 struct bnx2x_fastpath
*fp
= &bp
->fp
[j
];
1412 bnx2x_free_rx_bds(fp
);
1414 if (!fp
->disable_tpa
)
1415 bnx2x_free_tpa_pool(bp
, fp
, MAX_AGG_QS(bp
));
1419 void bnx2x_free_skbs_cnic(struct bnx2x
*bp
)
1421 bnx2x_free_tx_skbs_cnic(bp
);
1422 bnx2x_free_rx_skbs_cnic(bp
);
1425 void bnx2x_free_skbs(struct bnx2x
*bp
)
1427 bnx2x_free_tx_skbs(bp
);
1428 bnx2x_free_rx_skbs(bp
);
1431 void bnx2x_update_max_mf_config(struct bnx2x
*bp
, u32 value
)
1433 /* load old values */
1434 u32 mf_cfg
= bp
->mf_config
[BP_VN(bp
)];
1436 if (value
!= bnx2x_extract_max_cfg(bp
, mf_cfg
)) {
1437 /* leave all but MAX value */
1438 mf_cfg
&= ~FUNC_MF_CFG_MAX_BW_MASK
;
1440 /* set new MAX value */
1441 mf_cfg
|= (value
<< FUNC_MF_CFG_MAX_BW_SHIFT
)
1442 & FUNC_MF_CFG_MAX_BW_MASK
;
1444 bnx2x_fw_command(bp
, DRV_MSG_CODE_SET_MF_BW
, mf_cfg
);
1449 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1451 * @bp: driver handle
1452 * @nvecs: number of vectors to be released
1454 static void bnx2x_free_msix_irqs(struct bnx2x
*bp
, int nvecs
)
1458 if (nvecs
== offset
)
1461 /* VFs don't have a default SB */
1463 free_irq(bp
->msix_table
[offset
].vector
, bp
->dev
);
1464 DP(NETIF_MSG_IFDOWN
, "released sp irq (%d)\n",
1465 bp
->msix_table
[offset
].vector
);
1469 if (CNIC_SUPPORT(bp
)) {
1470 if (nvecs
== offset
)
1475 for_each_eth_queue(bp
, i
) {
1476 if (nvecs
== offset
)
1478 DP(NETIF_MSG_IFDOWN
, "about to release fp #%d->%d irq\n",
1479 i
, bp
->msix_table
[offset
].vector
);
1481 free_irq(bp
->msix_table
[offset
++].vector
, &bp
->fp
[i
]);
1485 void bnx2x_free_irq(struct bnx2x
*bp
)
1487 if (bp
->flags
& USING_MSIX_FLAG
&&
1488 !(bp
->flags
& USING_SINGLE_MSIX_FLAG
)) {
1489 int nvecs
= BNX2X_NUM_ETH_QUEUES(bp
) + CNIC_SUPPORT(bp
);
1491 /* vfs don't have a default status block */
1495 bnx2x_free_msix_irqs(bp
, nvecs
);
1497 free_irq(bp
->dev
->irq
, bp
->dev
);
1501 int bnx2x_enable_msix(struct bnx2x
*bp
)
1503 int msix_vec
= 0, i
, rc
;
1505 /* VFs don't have a default status block */
1507 bp
->msix_table
[msix_vec
].entry
= msix_vec
;
1508 BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1509 bp
->msix_table
[0].entry
);
1513 /* Cnic requires an msix vector for itself */
1514 if (CNIC_SUPPORT(bp
)) {
1515 bp
->msix_table
[msix_vec
].entry
= msix_vec
;
1516 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1517 msix_vec
, bp
->msix_table
[msix_vec
].entry
);
1521 /* We need separate vectors for ETH queues only (not FCoE) */
1522 for_each_eth_queue(bp
, i
) {
1523 bp
->msix_table
[msix_vec
].entry
= msix_vec
;
1524 BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1525 msix_vec
, msix_vec
, i
);
1529 DP(BNX2X_MSG_SP
, "about to request enable msix with %d vectors\n",
1532 rc
= pci_enable_msix(bp
->pdev
, &bp
->msix_table
[0], msix_vec
);
1535 * reconfigure number of tx/rx queues according to available
1538 if (rc
>= BNX2X_MIN_MSIX_VEC_CNT(bp
)) {
1539 /* how less vectors we will have? */
1540 int diff
= msix_vec
- rc
;
1542 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc
);
1544 rc
= pci_enable_msix(bp
->pdev
, &bp
->msix_table
[0], rc
);
1547 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc
);
1551 * decrease number of queues by number of unallocated entries
1553 bp
->num_ethernet_queues
-= diff
;
1554 bp
->num_queues
= bp
->num_ethernet_queues
+ bp
->num_cnic_queues
;
1556 BNX2X_DEV_INFO("New queue configuration set: %d\n",
1558 } else if (rc
> 0) {
1559 /* Get by with single vector */
1560 rc
= pci_enable_msix(bp
->pdev
, &bp
->msix_table
[0], 1);
1562 BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1567 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1568 bp
->flags
|= USING_SINGLE_MSIX_FLAG
;
1570 BNX2X_DEV_INFO("set number of queues to 1\n");
1571 bp
->num_ethernet_queues
= 1;
1572 bp
->num_queues
= bp
->num_ethernet_queues
+ bp
->num_cnic_queues
;
1573 } else if (rc
< 0) {
1574 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc
);
1578 bp
->flags
|= USING_MSIX_FLAG
;
1583 /* fall to INTx if not enough memory */
1585 bp
->flags
|= DISABLE_MSI_FLAG
;
1590 static int bnx2x_req_msix_irqs(struct bnx2x
*bp
)
1592 int i
, rc
, offset
= 0;
1594 /* no default status block for vf */
1596 rc
= request_irq(bp
->msix_table
[offset
++].vector
,
1597 bnx2x_msix_sp_int
, 0,
1598 bp
->dev
->name
, bp
->dev
);
1600 BNX2X_ERR("request sp irq failed\n");
1605 if (CNIC_SUPPORT(bp
))
1608 for_each_eth_queue(bp
, i
) {
1609 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
1610 snprintf(fp
->name
, sizeof(fp
->name
), "%s-fp-%d",
1613 rc
= request_irq(bp
->msix_table
[offset
].vector
,
1614 bnx2x_msix_fp_int
, 0, fp
->name
, fp
);
1616 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i
,
1617 bp
->msix_table
[offset
].vector
, rc
);
1618 bnx2x_free_msix_irqs(bp
, offset
);
1625 i
= BNX2X_NUM_ETH_QUEUES(bp
);
1627 offset
= 1 + CNIC_SUPPORT(bp
);
1628 netdev_info(bp
->dev
,
1629 "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
1630 bp
->msix_table
[0].vector
,
1631 0, bp
->msix_table
[offset
].vector
,
1632 i
- 1, bp
->msix_table
[offset
+ i
- 1].vector
);
1634 offset
= CNIC_SUPPORT(bp
);
1635 netdev_info(bp
->dev
,
1636 "using MSI-X IRQs: fp[%d] %d ... fp[%d] %d\n",
1637 0, bp
->msix_table
[offset
].vector
,
1638 i
- 1, bp
->msix_table
[offset
+ i
- 1].vector
);
1643 int bnx2x_enable_msi(struct bnx2x
*bp
)
1647 rc
= pci_enable_msi(bp
->pdev
);
1649 BNX2X_DEV_INFO("MSI is not attainable\n");
1652 bp
->flags
|= USING_MSI_FLAG
;
1657 static int bnx2x_req_irq(struct bnx2x
*bp
)
1659 unsigned long flags
;
1662 if (bp
->flags
& (USING_MSI_FLAG
| USING_MSIX_FLAG
))
1665 flags
= IRQF_SHARED
;
1667 if (bp
->flags
& USING_MSIX_FLAG
)
1668 irq
= bp
->msix_table
[0].vector
;
1670 irq
= bp
->pdev
->irq
;
1672 return request_irq(irq
, bnx2x_interrupt
, flags
, bp
->dev
->name
, bp
->dev
);
1675 static int bnx2x_setup_irqs(struct bnx2x
*bp
)
1678 if (bp
->flags
& USING_MSIX_FLAG
&&
1679 !(bp
->flags
& USING_SINGLE_MSIX_FLAG
)) {
1680 rc
= bnx2x_req_msix_irqs(bp
);
1684 rc
= bnx2x_req_irq(bp
);
1686 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc
);
1689 if (bp
->flags
& USING_MSI_FLAG
) {
1690 bp
->dev
->irq
= bp
->pdev
->irq
;
1691 netdev_info(bp
->dev
, "using MSI IRQ %d\n",
1694 if (bp
->flags
& USING_MSIX_FLAG
) {
1695 bp
->dev
->irq
= bp
->msix_table
[0].vector
;
1696 netdev_info(bp
->dev
, "using MSIX IRQ %d\n",
1704 static void bnx2x_napi_enable_cnic(struct bnx2x
*bp
)
1708 for_each_rx_queue_cnic(bp
, i
)
1709 napi_enable(&bnx2x_fp(bp
, i
, napi
));
1712 static void bnx2x_napi_enable(struct bnx2x
*bp
)
1716 for_each_eth_queue(bp
, i
)
1717 napi_enable(&bnx2x_fp(bp
, i
, napi
));
1720 static void bnx2x_napi_disable_cnic(struct bnx2x
*bp
)
1724 for_each_rx_queue_cnic(bp
, i
)
1725 napi_disable(&bnx2x_fp(bp
, i
, napi
));
1728 static void bnx2x_napi_disable(struct bnx2x
*bp
)
1732 for_each_eth_queue(bp
, i
)
1733 napi_disable(&bnx2x_fp(bp
, i
, napi
));
1736 void bnx2x_netif_start(struct bnx2x
*bp
)
1738 if (netif_running(bp
->dev
)) {
1739 bnx2x_napi_enable(bp
);
1740 if (CNIC_LOADED(bp
))
1741 bnx2x_napi_enable_cnic(bp
);
1742 bnx2x_int_enable(bp
);
1743 if (bp
->state
== BNX2X_STATE_OPEN
)
1744 netif_tx_wake_all_queues(bp
->dev
);
1748 void bnx2x_netif_stop(struct bnx2x
*bp
, int disable_hw
)
1750 bnx2x_int_disable_sync(bp
, disable_hw
);
1751 bnx2x_napi_disable(bp
);
1752 if (CNIC_LOADED(bp
))
1753 bnx2x_napi_disable_cnic(bp
);
1756 u16
bnx2x_select_queue(struct net_device
*dev
, struct sk_buff
*skb
)
1758 struct bnx2x
*bp
= netdev_priv(dev
);
1760 if (CNIC_LOADED(bp
) && !NO_FCOE(bp
)) {
1761 struct ethhdr
*hdr
= (struct ethhdr
*)skb
->data
;
1762 u16 ether_type
= ntohs(hdr
->h_proto
);
1764 /* Skip VLAN tag if present */
1765 if (ether_type
== ETH_P_8021Q
) {
1766 struct vlan_ethhdr
*vhdr
=
1767 (struct vlan_ethhdr
*)skb
->data
;
1769 ether_type
= ntohs(vhdr
->h_vlan_encapsulated_proto
);
1772 /* If ethertype is FCoE or FIP - use FCoE ring */
1773 if ((ether_type
== ETH_P_FCOE
) || (ether_type
== ETH_P_FIP
))
1774 return bnx2x_fcoe_tx(bp
, txq_index
);
1777 /* select a non-FCoE queue */
1778 return __skb_tx_hash(dev
, skb
, BNX2X_NUM_ETH_QUEUES(bp
));
1782 void bnx2x_set_num_queues(struct bnx2x
*bp
)
1785 bp
->num_ethernet_queues
= bnx2x_calc_num_queues(bp
);
1787 /* override in STORAGE SD modes */
1788 if (IS_MF_STORAGE_SD(bp
) || IS_MF_FCOE_AFEX(bp
))
1789 bp
->num_ethernet_queues
= 1;
1791 /* Add special queues */
1792 bp
->num_cnic_queues
= CNIC_SUPPORT(bp
); /* For FCOE */
1793 bp
->num_queues
= bp
->num_ethernet_queues
+ bp
->num_cnic_queues
;
1795 BNX2X_DEV_INFO("set number of queues to %d\n", bp
->num_queues
);
1799 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1801 * @bp: Driver handle
1803 * We currently support for at most 16 Tx queues for each CoS thus we will
1804 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1807 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1808 * index after all ETH L2 indices.
1810 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1811 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
1812 * 16..31,...) with indicies that are not coupled with any real Tx queue.
1814 * The proper configuration of skb->queue_mapping is handled by
1815 * bnx2x_select_queue() and __skb_tx_hash().
1817 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1818 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1820 static int bnx2x_set_real_num_queues(struct bnx2x
*bp
, int include_cnic
)
1824 tx
= BNX2X_NUM_ETH_QUEUES(bp
) * bp
->max_cos
;
1825 rx
= BNX2X_NUM_ETH_QUEUES(bp
);
1827 /* account for fcoe queue */
1828 if (include_cnic
&& !NO_FCOE(bp
)) {
1833 rc
= netif_set_real_num_tx_queues(bp
->dev
, tx
);
1835 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc
);
1838 rc
= netif_set_real_num_rx_queues(bp
->dev
, rx
);
1840 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc
);
1844 DP(NETIF_MSG_IFUP
, "Setting real num queues to (tx, rx) (%d, %d)\n",
1850 static void bnx2x_set_rx_buf_size(struct bnx2x
*bp
)
1854 for_each_queue(bp
, i
) {
1855 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
1858 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1861 * Although there are no IP frames expected to arrive to
1862 * this ring we still want to add an
1863 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1866 mtu
= BNX2X_FCOE_MINI_JUMBO_MTU
;
1869 fp
->rx_buf_size
= BNX2X_FW_RX_ALIGN_START
+
1870 IP_HEADER_ALIGNMENT_PADDING
+
1873 BNX2X_FW_RX_ALIGN_END
;
1874 /* Note : rx_buf_size doesnt take into account NET_SKB_PAD */
1875 if (fp
->rx_buf_size
+ NET_SKB_PAD
<= PAGE_SIZE
)
1876 fp
->rx_frag_size
= fp
->rx_buf_size
+ NET_SKB_PAD
;
1878 fp
->rx_frag_size
= 0;
1882 static int bnx2x_init_rss_pf(struct bnx2x
*bp
)
1885 u8 num_eth_queues
= BNX2X_NUM_ETH_QUEUES(bp
);
1887 /* Prepare the initial contents fo the indirection table if RSS is
1890 for (i
= 0; i
< sizeof(bp
->rss_conf_obj
.ind_table
); i
++)
1891 bp
->rss_conf_obj
.ind_table
[i
] =
1893 ethtool_rxfh_indir_default(i
, num_eth_queues
);
1896 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
1897 * per-port, so if explicit configuration is needed , do it only
1900 * For 57712 and newer on the other hand it's a per-function
1903 return bnx2x_config_rss_eth(bp
, bp
->port
.pmf
|| !CHIP_IS_E1x(bp
));
1906 int bnx2x_config_rss_pf(struct bnx2x
*bp
, struct bnx2x_rss_config_obj
*rss_obj
,
1909 struct bnx2x_config_rss_params params
= {NULL
};
1911 /* Although RSS is meaningless when there is a single HW queue we
1912 * still need it enabled in order to have HW Rx hash generated.
1914 * if (!is_eth_multi(bp))
1915 * bp->multi_mode = ETH_RSS_MODE_DISABLED;
1918 params
.rss_obj
= rss_obj
;
1920 __set_bit(RAMROD_COMP_WAIT
, ¶ms
.ramrod_flags
);
1922 __set_bit(BNX2X_RSS_MODE_REGULAR
, ¶ms
.rss_flags
);
1924 /* RSS configuration */
1925 __set_bit(BNX2X_RSS_IPV4
, ¶ms
.rss_flags
);
1926 __set_bit(BNX2X_RSS_IPV4_TCP
, ¶ms
.rss_flags
);
1927 __set_bit(BNX2X_RSS_IPV6
, ¶ms
.rss_flags
);
1928 __set_bit(BNX2X_RSS_IPV6_TCP
, ¶ms
.rss_flags
);
1929 if (rss_obj
->udp_rss_v4
)
1930 __set_bit(BNX2X_RSS_IPV4_UDP
, ¶ms
.rss_flags
);
1931 if (rss_obj
->udp_rss_v6
)
1932 __set_bit(BNX2X_RSS_IPV6_UDP
, ¶ms
.rss_flags
);
1935 params
.rss_result_mask
= MULTI_MASK
;
1937 memcpy(params
.ind_table
, rss_obj
->ind_table
, sizeof(params
.ind_table
));
1941 prandom_bytes(params
.rss_key
, sizeof(params
.rss_key
));
1942 __set_bit(BNX2X_RSS_SET_SRCH
, ¶ms
.rss_flags
);
1945 return bnx2x_config_rss(bp
, ¶ms
);
1948 static int bnx2x_init_hw(struct bnx2x
*bp
, u32 load_code
)
1950 struct bnx2x_func_state_params func_params
= {NULL
};
1952 /* Prepare parameters for function state transitions */
1953 __set_bit(RAMROD_COMP_WAIT
, &func_params
.ramrod_flags
);
1955 func_params
.f_obj
= &bp
->func_obj
;
1956 func_params
.cmd
= BNX2X_F_CMD_HW_INIT
;
1958 func_params
.params
.hw_init
.load_phase
= load_code
;
1960 return bnx2x_func_state_change(bp
, &func_params
);
1964 * Cleans the object that have internal lists without sending
1965 * ramrods. Should be run when interrutps are disabled.
1967 static void bnx2x_squeeze_objects(struct bnx2x
*bp
)
1970 unsigned long ramrod_flags
= 0, vlan_mac_flags
= 0;
1971 struct bnx2x_mcast_ramrod_params rparam
= {NULL
};
1972 struct bnx2x_vlan_mac_obj
*mac_obj
= &bp
->sp_objs
->mac_obj
;
1974 /***************** Cleanup MACs' object first *************************/
1976 /* Wait for completion of requested */
1977 __set_bit(RAMROD_COMP_WAIT
, &ramrod_flags
);
1978 /* Perform a dry cleanup */
1979 __set_bit(RAMROD_DRV_CLR_ONLY
, &ramrod_flags
);
1981 /* Clean ETH primary MAC */
1982 __set_bit(BNX2X_ETH_MAC
, &vlan_mac_flags
);
1983 rc
= mac_obj
->delete_all(bp
, &bp
->sp_objs
->mac_obj
, &vlan_mac_flags
,
1986 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc
);
1988 /* Cleanup UC list */
1990 __set_bit(BNX2X_UC_LIST_MAC
, &vlan_mac_flags
);
1991 rc
= mac_obj
->delete_all(bp
, mac_obj
, &vlan_mac_flags
,
1994 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc
);
1996 /***************** Now clean mcast object *****************************/
1997 rparam
.mcast_obj
= &bp
->mcast_obj
;
1998 __set_bit(RAMROD_DRV_CLR_ONLY
, &rparam
.ramrod_flags
);
2000 /* Add a DEL command... */
2001 rc
= bnx2x_config_mcast(bp
, &rparam
, BNX2X_MCAST_CMD_DEL
);
2003 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
2006 /* ...and wait until all pending commands are cleared */
2007 rc
= bnx2x_config_mcast(bp
, &rparam
, BNX2X_MCAST_CMD_CONT
);
2010 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
2015 rc
= bnx2x_config_mcast(bp
, &rparam
, BNX2X_MCAST_CMD_CONT
);
2019 #ifndef BNX2X_STOP_ON_ERROR
2020 #define LOAD_ERROR_EXIT(bp, label) \
2022 (bp)->state = BNX2X_STATE_ERROR; \
2026 #define LOAD_ERROR_EXIT_CNIC(bp, label) \
2028 bp->cnic_loaded = false; \
2031 #else /*BNX2X_STOP_ON_ERROR*/
2032 #define LOAD_ERROR_EXIT(bp, label) \
2034 (bp)->state = BNX2X_STATE_ERROR; \
2038 #define LOAD_ERROR_EXIT_CNIC(bp, label) \
2040 bp->cnic_loaded = false; \
2044 #endif /*BNX2X_STOP_ON_ERROR*/
2046 static void bnx2x_free_fw_stats_mem(struct bnx2x
*bp
)
2048 BNX2X_PCI_FREE(bp
->fw_stats
, bp
->fw_stats_mapping
,
2049 bp
->fw_stats_data_sz
+ bp
->fw_stats_req_sz
);
2053 static int bnx2x_alloc_fw_stats_mem(struct bnx2x
*bp
)
2055 int num_groups
, vf_headroom
= 0;
2056 int is_fcoe_stats
= NO_FCOE(bp
) ? 0 : 1;
2058 /* number of queues for statistics is number of eth queues + FCoE */
2059 u8 num_queue_stats
= BNX2X_NUM_ETH_QUEUES(bp
) + is_fcoe_stats
;
2061 /* Total number of FW statistics requests =
2062 * 1 for port stats + 1 for PF stats + potential 2 for FCoE (fcoe proper
2063 * and fcoe l2 queue) stats + num of queues (which includes another 1
2064 * for fcoe l2 queue if applicable)
2066 bp
->fw_stats_num
= 2 + is_fcoe_stats
+ num_queue_stats
;
2068 /* vf stats appear in the request list, but their data is allocated by
2069 * the VFs themselves. We don't include them in the bp->fw_stats_num as
2070 * it is used to determine where to place the vf stats queries in the
2074 vf_headroom
= bnx2x_vf_headroom(bp
);
2076 /* Request is built from stats_query_header and an array of
2077 * stats_query_cmd_group each of which contains
2078 * STATS_QUERY_CMD_COUNT rules. The real number or requests is
2079 * configured in the stats_query_header.
2082 (((bp
->fw_stats_num
+ vf_headroom
) / STATS_QUERY_CMD_COUNT
) +
2083 (((bp
->fw_stats_num
+ vf_headroom
) % STATS_QUERY_CMD_COUNT
) ?
2086 DP(BNX2X_MSG_SP
, "stats fw_stats_num %d, vf headroom %d, num_groups %d\n",
2087 bp
->fw_stats_num
, vf_headroom
, num_groups
);
2088 bp
->fw_stats_req_sz
= sizeof(struct stats_query_header
) +
2089 num_groups
* sizeof(struct stats_query_cmd_group
);
2091 /* Data for statistics requests + stats_counter
2092 * stats_counter holds per-STORM counters that are incremented
2093 * when STORM has finished with the current request.
2094 * memory for FCoE offloaded statistics are counted anyway,
2095 * even if they will not be sent.
2096 * VF stats are not accounted for here as the data of VF stats is stored
2097 * in memory allocated by the VF, not here.
2099 bp
->fw_stats_data_sz
= sizeof(struct per_port_stats
) +
2100 sizeof(struct per_pf_stats
) +
2101 sizeof(struct fcoe_statistics_params
) +
2102 sizeof(struct per_queue_stats
) * num_queue_stats
+
2103 sizeof(struct stats_counter
);
2105 BNX2X_PCI_ALLOC(bp
->fw_stats
, &bp
->fw_stats_mapping
,
2106 bp
->fw_stats_data_sz
+ bp
->fw_stats_req_sz
);
2109 bp
->fw_stats_req
= (struct bnx2x_fw_stats_req
*)bp
->fw_stats
;
2110 bp
->fw_stats_req_mapping
= bp
->fw_stats_mapping
;
2111 bp
->fw_stats_data
= (struct bnx2x_fw_stats_data
*)
2112 ((u8
*)bp
->fw_stats
+ bp
->fw_stats_req_sz
);
2113 bp
->fw_stats_data_mapping
= bp
->fw_stats_mapping
+
2114 bp
->fw_stats_req_sz
;
2116 DP(BNX2X_MSG_SP
, "statistics request base address set to %x %x",
2117 U64_HI(bp
->fw_stats_req_mapping
),
2118 U64_LO(bp
->fw_stats_req_mapping
));
2119 DP(BNX2X_MSG_SP
, "statistics data base address set to %x %x",
2120 U64_HI(bp
->fw_stats_data_mapping
),
2121 U64_LO(bp
->fw_stats_data_mapping
));
2125 bnx2x_free_fw_stats_mem(bp
);
2126 BNX2X_ERR("Can't allocate FW stats memory\n");
2130 /* send load request to mcp and analyze response */
2131 static int bnx2x_nic_load_request(struct bnx2x
*bp
, u32
*load_code
)
2135 (SHMEM_RD(bp
, func_mb
[BP_FW_MB_IDX(bp
)].drv_mb_header
) &
2136 DRV_MSG_SEQ_NUMBER_MASK
);
2137 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp
->fw_seq
);
2139 /* Get current FW pulse sequence */
2140 bp
->fw_drv_pulse_wr_seq
=
2141 (SHMEM_RD(bp
, func_mb
[BP_FW_MB_IDX(bp
)].drv_pulse_mb
) &
2142 DRV_PULSE_SEQ_MASK
);
2143 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp
->fw_drv_pulse_wr_seq
);
2146 (*load_code
) = bnx2x_fw_command(bp
, DRV_MSG_CODE_LOAD_REQ
,
2147 DRV_MSG_CODE_LOAD_REQ_WITH_LFA
);
2149 /* if mcp fails to respond we must abort */
2150 if (!(*load_code
)) {
2151 BNX2X_ERR("MCP response failure, aborting\n");
2155 /* If mcp refused (e.g. other port is in diagnostic mode) we
2158 if ((*load_code
) == FW_MSG_CODE_DRV_LOAD_REFUSED
) {
2159 BNX2X_ERR("MCP refused load request, aborting\n");
2165 /* check whether another PF has already loaded FW to chip. In
2166 * virtualized environments a pf from another VM may have already
2167 * initialized the device including loading FW
2169 int bnx2x_nic_load_analyze_req(struct bnx2x
*bp
, u32 load_code
)
2171 /* is another pf loaded on this engine? */
2172 if (load_code
!= FW_MSG_CODE_DRV_LOAD_COMMON_CHIP
&&
2173 load_code
!= FW_MSG_CODE_DRV_LOAD_COMMON
) {
2174 /* build my FW version dword */
2175 u32 my_fw
= (BCM_5710_FW_MAJOR_VERSION
) +
2176 (BCM_5710_FW_MINOR_VERSION
<< 8) +
2177 (BCM_5710_FW_REVISION_VERSION
<< 16) +
2178 (BCM_5710_FW_ENGINEERING_VERSION
<< 24);
2180 /* read loaded FW from chip */
2181 u32 loaded_fw
= REG_RD(bp
, XSEM_REG_PRAM
);
2183 DP(BNX2X_MSG_SP
, "loaded fw %x, my fw %x\n",
2186 /* abort nic load if version mismatch */
2187 if (my_fw
!= loaded_fw
) {
2188 BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. aborting\n",
2196 /* returns the "mcp load_code" according to global load_count array */
2197 static int bnx2x_nic_load_no_mcp(struct bnx2x
*bp
, int port
)
2199 int path
= BP_PATH(bp
);
2201 DP(NETIF_MSG_IFUP
, "NO MCP - load counts[%d] %d, %d, %d\n",
2202 path
, load_count
[path
][0], load_count
[path
][1],
2203 load_count
[path
][2]);
2204 load_count
[path
][0]++;
2205 load_count
[path
][1 + port
]++;
2206 DP(NETIF_MSG_IFUP
, "NO MCP - new load counts[%d] %d, %d, %d\n",
2207 path
, load_count
[path
][0], load_count
[path
][1],
2208 load_count
[path
][2]);
2209 if (load_count
[path
][0] == 1)
2210 return FW_MSG_CODE_DRV_LOAD_COMMON
;
2211 else if (load_count
[path
][1 + port
] == 1)
2212 return FW_MSG_CODE_DRV_LOAD_PORT
;
2214 return FW_MSG_CODE_DRV_LOAD_FUNCTION
;
2217 /* mark PMF if applicable */
2218 static void bnx2x_nic_load_pmf(struct bnx2x
*bp
, u32 load_code
)
2220 if ((load_code
== FW_MSG_CODE_DRV_LOAD_COMMON
) ||
2221 (load_code
== FW_MSG_CODE_DRV_LOAD_COMMON_CHIP
) ||
2222 (load_code
== FW_MSG_CODE_DRV_LOAD_PORT
)) {
2224 /* We need the barrier to ensure the ordering between the
2225 * writing to bp->port.pmf here and reading it from the
2226 * bnx2x_periodic_task().
2233 DP(NETIF_MSG_LINK
, "pmf %d\n", bp
->port
.pmf
);
2236 static void bnx2x_nic_load_afex_dcc(struct bnx2x
*bp
, int load_code
)
2238 if (((load_code
== FW_MSG_CODE_DRV_LOAD_COMMON
) ||
2239 (load_code
== FW_MSG_CODE_DRV_LOAD_COMMON_CHIP
)) &&
2240 (bp
->common
.shmem2_base
)) {
2241 if (SHMEM2_HAS(bp
, dcc_support
))
2242 SHMEM2_WR(bp
, dcc_support
,
2243 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV
|
2244 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV
));
2245 if (SHMEM2_HAS(bp
, afex_driver_support
))
2246 SHMEM2_WR(bp
, afex_driver_support
,
2247 SHMEM_AFEX_SUPPORTED_VERSION_ONE
);
2250 /* Set AFEX default VLAN tag to an invalid value */
2251 bp
->afex_def_vlan_tag
= -1;
2255 * bnx2x_bz_fp - zero content of the fastpath structure.
2257 * @bp: driver handle
2258 * @index: fastpath index to be zeroed
2260 * Makes sure the contents of the bp->fp[index].napi is kept
2263 static void bnx2x_bz_fp(struct bnx2x
*bp
, int index
)
2265 struct bnx2x_fastpath
*fp
= &bp
->fp
[index
];
2266 struct bnx2x_fp_stats
*fp_stats
= &bp
->fp_stats
[index
];
2269 struct napi_struct orig_napi
= fp
->napi
;
2270 struct bnx2x_agg_info
*orig_tpa_info
= fp
->tpa_info
;
2271 /* bzero bnx2x_fastpath contents */
2272 if (bp
->stats_init
) {
2273 memset(fp
->tpa_info
, 0, sizeof(*fp
->tpa_info
));
2274 memset(fp
, 0, sizeof(*fp
));
2276 /* Keep Queue statistics */
2277 struct bnx2x_eth_q_stats
*tmp_eth_q_stats
;
2278 struct bnx2x_eth_q_stats_old
*tmp_eth_q_stats_old
;
2280 tmp_eth_q_stats
= kzalloc(sizeof(struct bnx2x_eth_q_stats
),
2282 if (tmp_eth_q_stats
)
2283 memcpy(tmp_eth_q_stats
, &fp_stats
->eth_q_stats
,
2284 sizeof(struct bnx2x_eth_q_stats
));
2286 tmp_eth_q_stats_old
=
2287 kzalloc(sizeof(struct bnx2x_eth_q_stats_old
),
2289 if (tmp_eth_q_stats_old
)
2290 memcpy(tmp_eth_q_stats_old
, &fp_stats
->eth_q_stats_old
,
2291 sizeof(struct bnx2x_eth_q_stats_old
));
2293 memset(fp
->tpa_info
, 0, sizeof(*fp
->tpa_info
));
2294 memset(fp
, 0, sizeof(*fp
));
2296 if (tmp_eth_q_stats
) {
2297 memcpy(&fp_stats
->eth_q_stats
, tmp_eth_q_stats
,
2298 sizeof(struct bnx2x_eth_q_stats
));
2299 kfree(tmp_eth_q_stats
);
2302 if (tmp_eth_q_stats_old
) {
2303 memcpy(&fp_stats
->eth_q_stats_old
, tmp_eth_q_stats_old
,
2304 sizeof(struct bnx2x_eth_q_stats_old
));
2305 kfree(tmp_eth_q_stats_old
);
2310 /* Restore the NAPI object as it has been already initialized */
2311 fp
->napi
= orig_napi
;
2312 fp
->tpa_info
= orig_tpa_info
;
2316 fp
->max_cos
= bp
->max_cos
;
2318 /* Special queues support only one CoS */
2321 /* Init txdata pointers */
2323 fp
->txdata_ptr
[0] = &bp
->bnx2x_txq
[FCOE_TXQ_IDX(bp
)];
2325 for_each_cos_in_tx_queue(fp
, cos
)
2326 fp
->txdata_ptr
[cos
] = &bp
->bnx2x_txq
[cos
*
2327 BNX2X_NUM_ETH_QUEUES(bp
) + index
];
2330 * set the tpa flag for each queue. The tpa flag determines the queue
2331 * minimal size so it must be set prior to queue memory allocation
2333 fp
->disable_tpa
= !(bp
->flags
& TPA_ENABLE_FLAG
||
2334 (bp
->flags
& GRO_ENABLE_FLAG
&&
2335 bnx2x_mtu_allows_gro(bp
->dev
->mtu
)));
2336 if (bp
->flags
& TPA_ENABLE_FLAG
)
2337 fp
->mode
= TPA_MODE_LRO
;
2338 else if (bp
->flags
& GRO_ENABLE_FLAG
)
2339 fp
->mode
= TPA_MODE_GRO
;
2341 /* We don't want TPA on an FCoE L2 ring */
2343 fp
->disable_tpa
= 1;
2346 int bnx2x_load_cnic(struct bnx2x
*bp
)
2348 int i
, rc
, port
= BP_PORT(bp
);
2350 DP(NETIF_MSG_IFUP
, "Starting CNIC-related load\n");
2352 mutex_init(&bp
->cnic_mutex
);
2355 rc
= bnx2x_alloc_mem_cnic(bp
);
2357 BNX2X_ERR("Unable to allocate bp memory for cnic\n");
2358 LOAD_ERROR_EXIT_CNIC(bp
, load_error_cnic0
);
2362 rc
= bnx2x_alloc_fp_mem_cnic(bp
);
2364 BNX2X_ERR("Unable to allocate memory for cnic fps\n");
2365 LOAD_ERROR_EXIT_CNIC(bp
, load_error_cnic0
);
2368 /* Update the number of queues with the cnic queues */
2369 rc
= bnx2x_set_real_num_queues(bp
, 1);
2371 BNX2X_ERR("Unable to set real_num_queues including cnic\n");
2372 LOAD_ERROR_EXIT_CNIC(bp
, load_error_cnic0
);
2375 /* Add all CNIC NAPI objects */
2376 bnx2x_add_all_napi_cnic(bp
);
2377 DP(NETIF_MSG_IFUP
, "cnic napi added\n");
2378 bnx2x_napi_enable_cnic(bp
);
2380 rc
= bnx2x_init_hw_func_cnic(bp
);
2382 LOAD_ERROR_EXIT_CNIC(bp
, load_error_cnic1
);
2384 bnx2x_nic_init_cnic(bp
);
2387 /* Enable Timer scan */
2388 REG_WR(bp
, TM_REG_EN_LINEAR0_TIMER
+ port
*4, 1);
2390 /* setup cnic queues */
2391 for_each_cnic_queue(bp
, i
) {
2392 rc
= bnx2x_setup_queue(bp
, &bp
->fp
[i
], 0);
2394 BNX2X_ERR("Queue setup failed\n");
2395 LOAD_ERROR_EXIT(bp
, load_error_cnic2
);
2400 /* Initialize Rx filter. */
2401 netif_addr_lock_bh(bp
->dev
);
2402 bnx2x_set_rx_mode(bp
->dev
);
2403 netif_addr_unlock_bh(bp
->dev
);
2405 /* re-read iscsi info */
2406 bnx2x_get_iscsi_info(bp
);
2407 bnx2x_setup_cnic_irq_info(bp
);
2408 bnx2x_setup_cnic_info(bp
);
2409 bp
->cnic_loaded
= true;
2410 if (bp
->state
== BNX2X_STATE_OPEN
)
2411 bnx2x_cnic_notify(bp
, CNIC_CTL_START_CMD
);
2414 DP(NETIF_MSG_IFUP
, "Ending successfully CNIC-related load\n");
2418 #ifndef BNX2X_STOP_ON_ERROR
2420 /* Disable Timer scan */
2421 REG_WR(bp
, TM_REG_EN_LINEAR0_TIMER
+ port
*4, 0);
2424 bnx2x_napi_disable_cnic(bp
);
2425 /* Update the number of queues without the cnic queues */
2426 rc
= bnx2x_set_real_num_queues(bp
, 0);
2428 BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2430 BNX2X_ERR("CNIC-related load failed\n");
2431 bnx2x_free_fp_mem_cnic(bp
);
2432 bnx2x_free_mem_cnic(bp
);
2434 #endif /* ! BNX2X_STOP_ON_ERROR */
2438 /* must be called with rtnl_lock */
2439 int bnx2x_nic_load(struct bnx2x
*bp
, int load_mode
)
2441 int port
= BP_PORT(bp
);
2442 int i
, rc
= 0, load_code
= 0;
2444 DP(NETIF_MSG_IFUP
, "Starting NIC load\n");
2446 "CNIC is %s\n", CNIC_ENABLED(bp
) ? "enabled" : "disabled");
2448 #ifdef BNX2X_STOP_ON_ERROR
2449 if (unlikely(bp
->panic
)) {
2450 BNX2X_ERR("Can't load NIC when there is panic\n");
2455 bp
->state
= BNX2X_STATE_OPENING_WAIT4_LOAD
;
2457 /* Set the initial link reported state to link down */
2458 bnx2x_acquire_phy_lock(bp
);
2459 memset(&bp
->last_reported_link
, 0, sizeof(bp
->last_reported_link
));
2460 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN
,
2461 &bp
->last_reported_link
.link_report_flags
);
2462 bnx2x_release_phy_lock(bp
);
2465 /* must be called before memory allocation and HW init */
2466 bnx2x_ilt_set_info(bp
);
2469 * Zero fastpath structures preserving invariants like napi, which are
2470 * allocated only once, fp index, max_cos, bp pointer.
2471 * Also set fp->disable_tpa and txdata_ptr.
2473 DP(NETIF_MSG_IFUP
, "num queues: %d", bp
->num_queues
);
2474 for_each_queue(bp
, i
)
2476 memset(bp
->bnx2x_txq
, 0, (BNX2X_MAX_RSS_COUNT(bp
) * BNX2X_MULTI_TX_COS
+
2477 bp
->num_cnic_queues
) *
2478 sizeof(struct bnx2x_fp_txdata
));
2480 bp
->fcoe_init
= false;
2482 /* Set the receive queues buffer size */
2483 bnx2x_set_rx_buf_size(bp
);
2486 rc
= bnx2x_alloc_mem(bp
);
2488 BNX2X_ERR("Unable to allocate bp memory\n");
2493 /* Allocated memory for FW statistics */
2494 if (bnx2x_alloc_fw_stats_mem(bp
))
2495 LOAD_ERROR_EXIT(bp
, load_error0
);
2497 /* need to be done after alloc mem, since it's self adjusting to amount
2498 * of memory available for RSS queues
2500 rc
= bnx2x_alloc_fp_mem(bp
);
2502 BNX2X_ERR("Unable to allocate memory for fps\n");
2503 LOAD_ERROR_EXIT(bp
, load_error0
);
2506 /* request pf to initialize status blocks */
2508 rc
= bnx2x_vfpf_init(bp
);
2510 LOAD_ERROR_EXIT(bp
, load_error0
);
2513 /* As long as bnx2x_alloc_mem() may possibly update
2514 * bp->num_queues, bnx2x_set_real_num_queues() should always
2515 * come after it. At this stage cnic queues are not counted.
2517 rc
= bnx2x_set_real_num_queues(bp
, 0);
2519 BNX2X_ERR("Unable to set real_num_queues\n");
2520 LOAD_ERROR_EXIT(bp
, load_error0
);
2523 /* configure multi cos mappings in kernel.
2524 * this configuration may be overriden by a multi class queue discipline
2525 * or by a dcbx negotiation result.
2527 bnx2x_setup_tc(bp
->dev
, bp
->max_cos
);
2529 /* Add all NAPI objects */
2530 bnx2x_add_all_napi(bp
);
2531 DP(NETIF_MSG_IFUP
, "napi added\n");
2532 bnx2x_napi_enable(bp
);
2535 /* set pf load just before approaching the MCP */
2536 bnx2x_set_pf_load(bp
);
2538 /* if mcp exists send load request and analyze response */
2539 if (!BP_NOMCP(bp
)) {
2540 /* attempt to load pf */
2541 rc
= bnx2x_nic_load_request(bp
, &load_code
);
2543 LOAD_ERROR_EXIT(bp
, load_error1
);
2545 /* what did mcp say? */
2546 rc
= bnx2x_nic_load_analyze_req(bp
, load_code
);
2548 bnx2x_fw_command(bp
, DRV_MSG_CODE_LOAD_DONE
, 0);
2549 LOAD_ERROR_EXIT(bp
, load_error2
);
2552 load_code
= bnx2x_nic_load_no_mcp(bp
, port
);
2555 /* mark pmf if applicable */
2556 bnx2x_nic_load_pmf(bp
, load_code
);
2558 /* Init Function state controlling object */
2559 bnx2x__init_func_obj(bp
);
2562 rc
= bnx2x_init_hw(bp
, load_code
);
2564 BNX2X_ERR("HW init failed, aborting\n");
2565 bnx2x_fw_command(bp
, DRV_MSG_CODE_LOAD_DONE
, 0);
2566 LOAD_ERROR_EXIT(bp
, load_error2
);
2570 /* Connect to IRQs */
2571 rc
= bnx2x_setup_irqs(bp
);
2573 BNX2X_ERR("setup irqs failed\n");
2575 bnx2x_fw_command(bp
, DRV_MSG_CODE_LOAD_DONE
, 0);
2576 LOAD_ERROR_EXIT(bp
, load_error2
);
2579 /* Setup NIC internals and enable interrupts */
2580 bnx2x_nic_init(bp
, load_code
);
2582 /* Init per-function objects */
2584 bnx2x_init_bp_objs(bp
);
2585 bnx2x_iov_nic_init(bp
);
2587 /* Set AFEX default VLAN tag to an invalid value */
2588 bp
->afex_def_vlan_tag
= -1;
2589 bnx2x_nic_load_afex_dcc(bp
, load_code
);
2590 bp
->state
= BNX2X_STATE_OPENING_WAIT4_PORT
;
2591 rc
= bnx2x_func_start(bp
);
2593 BNX2X_ERR("Function start failed!\n");
2594 bnx2x_fw_command(bp
, DRV_MSG_CODE_LOAD_DONE
, 0);
2596 LOAD_ERROR_EXIT(bp
, load_error3
);
2599 /* Send LOAD_DONE command to MCP */
2600 if (!BP_NOMCP(bp
)) {
2601 load_code
= bnx2x_fw_command(bp
,
2602 DRV_MSG_CODE_LOAD_DONE
, 0);
2604 BNX2X_ERR("MCP response failure, aborting\n");
2606 LOAD_ERROR_EXIT(bp
, load_error3
);
2610 /* setup the leading queue */
2611 rc
= bnx2x_setup_leading(bp
);
2613 BNX2X_ERR("Setup leading failed!\n");
2614 LOAD_ERROR_EXIT(bp
, load_error3
);
2617 /* set up the rest of the queues */
2618 for_each_nondefault_eth_queue(bp
, i
) {
2619 rc
= bnx2x_setup_queue(bp
, &bp
->fp
[i
], 0);
2621 BNX2X_ERR("Queue setup failed\n");
2622 LOAD_ERROR_EXIT(bp
, load_error3
);
2627 rc
= bnx2x_init_rss_pf(bp
);
2629 BNX2X_ERR("PF RSS init failed\n");
2630 LOAD_ERROR_EXIT(bp
, load_error3
);
2634 for_each_eth_queue(bp
, i
) {
2635 rc
= bnx2x_vfpf_setup_q(bp
, i
);
2637 BNX2X_ERR("Queue setup failed\n");
2638 LOAD_ERROR_EXIT(bp
, load_error3
);
2643 /* Now when Clients are configured we are ready to work */
2644 bp
->state
= BNX2X_STATE_OPEN
;
2646 /* Configure a ucast MAC */
2648 rc
= bnx2x_set_eth_mac(bp
, true);
2650 rc
= bnx2x_vfpf_set_mac(bp
);
2652 BNX2X_ERR("Setting Ethernet MAC failed\n");
2653 LOAD_ERROR_EXIT(bp
, load_error3
);
2656 if (IS_PF(bp
) && bp
->pending_max
) {
2657 bnx2x_update_max_mf_config(bp
, bp
->pending_max
);
2658 bp
->pending_max
= 0;
2662 rc
= bnx2x_initial_phy_init(bp
, load_mode
);
2664 LOAD_ERROR_EXIT(bp
, load_error3
);
2666 bp
->link_params
.feature_config_flags
&= ~FEATURE_CONFIG_BOOT_FROM_SAN
;
2668 /* Start fast path */
2670 /* Initialize Rx filter. */
2671 netif_addr_lock_bh(bp
->dev
);
2672 bnx2x_set_rx_mode(bp
->dev
);
2673 netif_addr_unlock_bh(bp
->dev
);
2676 switch (load_mode
) {
2678 /* Tx queue should be only reenabled */
2679 netif_tx_wake_all_queues(bp
->dev
);
2683 netif_tx_start_all_queues(bp
->dev
);
2684 smp_mb__after_clear_bit();
2688 case LOAD_LOOPBACK_EXT
:
2689 bp
->state
= BNX2X_STATE_DIAG
;
2697 bnx2x_update_drv_flags(bp
, 1 << DRV_FLAGS_PORT_MASK
, 0);
2699 bnx2x__link_status_update(bp
);
2701 /* start the timer */
2702 mod_timer(&bp
->timer
, jiffies
+ bp
->current_interval
);
2704 if (CNIC_ENABLED(bp
))
2705 bnx2x_load_cnic(bp
);
2707 if (IS_PF(bp
) && SHMEM2_HAS(bp
, drv_capabilities_flag
)) {
2708 /* mark driver is loaded in shmem2 */
2710 val
= SHMEM2_RD(bp
, drv_capabilities_flag
[BP_FW_MB_IDX(bp
)]);
2711 SHMEM2_WR(bp
, drv_capabilities_flag
[BP_FW_MB_IDX(bp
)],
2712 val
| DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED
|
2713 DRV_FLAGS_CAPABILITIES_LOADED_L2
);
2716 /* Wait for all pending SP commands to complete */
2717 if (IS_PF(bp
) && !bnx2x_wait_sp_comp(bp
, ~0x0UL
)) {
2718 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
2719 bnx2x_nic_unload(bp
, UNLOAD_CLOSE
, false);
2723 /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
2724 if (bp
->port
.pmf
&& (bp
->state
!= BNX2X_STATE_DIAG
))
2725 bnx2x_dcbx_init(bp
, false);
2727 DP(NETIF_MSG_IFUP
, "Ending successfully NIC load\n");
2731 #ifndef BNX2X_STOP_ON_ERROR
2734 bnx2x_int_disable_sync(bp
, 1);
2736 /* Clean queueable objects */
2737 bnx2x_squeeze_objects(bp
);
2740 /* Free SKBs, SGEs, TPA pool and driver internals */
2741 bnx2x_free_skbs(bp
);
2742 for_each_rx_queue(bp
, i
)
2743 bnx2x_free_rx_sge_range(bp
, bp
->fp
+ i
, NUM_RX_SGE
);
2748 if (IS_PF(bp
) && !BP_NOMCP(bp
)) {
2749 bnx2x_fw_command(bp
, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP
, 0);
2750 bnx2x_fw_command(bp
, DRV_MSG_CODE_UNLOAD_DONE
, 0);
2755 bnx2x_napi_disable(bp
);
2757 /* clear pf_load status, as it was already set */
2759 bnx2x_clear_pf_load(bp
);
2761 bnx2x_free_fp_mem(bp
);
2762 bnx2x_free_fw_stats_mem(bp
);
2766 #endif /* ! BNX2X_STOP_ON_ERROR */
2769 static int bnx2x_drain_tx_queues(struct bnx2x
*bp
)
2773 /* Wait until tx fastpath tasks complete */
2774 for_each_tx_queue(bp
, i
) {
2775 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
2777 for_each_cos_in_tx_queue(fp
, cos
)
2778 rc
= bnx2x_clean_tx_queue(bp
, fp
->txdata_ptr
[cos
]);
2785 /* must be called with rtnl_lock */
2786 int bnx2x_nic_unload(struct bnx2x
*bp
, int unload_mode
, bool keep_link
)
2789 bool global
= false;
2791 DP(NETIF_MSG_IFUP
, "Starting NIC unload\n");
2793 /* mark driver is unloaded in shmem2 */
2794 if (IS_PF(bp
) && SHMEM2_HAS(bp
, drv_capabilities_flag
)) {
2796 val
= SHMEM2_RD(bp
, drv_capabilities_flag
[BP_FW_MB_IDX(bp
)]);
2797 SHMEM2_WR(bp
, drv_capabilities_flag
[BP_FW_MB_IDX(bp
)],
2798 val
& ~DRV_FLAGS_CAPABILITIES_LOADED_L2
);
2802 (bp
->state
== BNX2X_STATE_CLOSED
||
2803 bp
->state
== BNX2X_STATE_ERROR
)) {
2804 /* We can get here if the driver has been unloaded
2805 * during parity error recovery and is either waiting for a
2806 * leader to complete or for other functions to unload and
2807 * then ifdown has been issued. In this case we want to
2808 * unload and let other functions to complete a recovery
2811 bp
->recovery_state
= BNX2X_RECOVERY_DONE
;
2813 bnx2x_release_leader_lock(bp
);
2816 DP(NETIF_MSG_IFDOWN
, "Releasing a leadership...\n");
2817 BNX2X_ERR("Can't unload in closed or error state\n");
2822 * It's important to set the bp->state to the value different from
2823 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
2824 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
2826 bp
->state
= BNX2X_STATE_CLOSING_WAIT4_HALT
;
2829 if (CNIC_LOADED(bp
))
2830 bnx2x_cnic_notify(bp
, CNIC_CTL_STOP_CMD
);
2833 bnx2x_tx_disable(bp
);
2834 netdev_reset_tc(bp
->dev
);
2836 bp
->rx_mode
= BNX2X_RX_MODE_NONE
;
2838 del_timer_sync(&bp
->timer
);
2841 /* Set ALWAYS_ALIVE bit in shmem */
2842 bp
->fw_drv_pulse_wr_seq
|= DRV_PULSE_ALWAYS_ALIVE
;
2843 bnx2x_drv_pulse(bp
);
2844 bnx2x_stats_handle(bp
, STATS_EVENT_STOP
);
2845 bnx2x_save_statistics(bp
);
2848 /* wait till consumers catch up with producers in all queues */
2849 bnx2x_drain_tx_queues(bp
);
2851 /* if VF indicate to PF this function is going down (PF will delete sp
2852 * elements and clear initializations
2855 bnx2x_vfpf_close_vf(bp
);
2856 else if (unload_mode
!= UNLOAD_RECOVERY
)
2857 /* if this is a normal/close unload need to clean up chip*/
2858 bnx2x_chip_cleanup(bp
, unload_mode
, keep_link
);
2860 /* Send the UNLOAD_REQUEST to the MCP */
2861 bnx2x_send_unload_req(bp
, unload_mode
);
2864 * Prevent transactions to host from the functions on the
2865 * engine that doesn't reset global blocks in case of global
2866 * attention once gloabl blocks are reset and gates are opened
2867 * (the engine which leader will perform the recovery
2870 if (!CHIP_IS_E1x(bp
))
2871 bnx2x_pf_disable(bp
);
2873 /* Disable HW interrupts, NAPI */
2874 bnx2x_netif_stop(bp
, 1);
2875 /* Delete all NAPI objects */
2876 bnx2x_del_all_napi(bp
);
2877 if (CNIC_LOADED(bp
))
2878 bnx2x_del_all_napi_cnic(bp
);
2882 /* Report UNLOAD_DONE to MCP */
2883 bnx2x_send_unload_done(bp
, false);
2887 * At this stage no more interrupts will arrive so we may safly clean
2888 * the queueable objects here in case they failed to get cleaned so far.
2891 bnx2x_squeeze_objects(bp
);
2893 /* There should be no more pending SP commands at this stage */
2898 /* Free SKBs, SGEs, TPA pool and driver internals */
2899 bnx2x_free_skbs(bp
);
2900 if (CNIC_LOADED(bp
))
2901 bnx2x_free_skbs_cnic(bp
);
2902 for_each_rx_queue(bp
, i
)
2903 bnx2x_free_rx_sge_range(bp
, bp
->fp
+ i
, NUM_RX_SGE
);
2905 bnx2x_free_fp_mem(bp
);
2906 if (CNIC_LOADED(bp
))
2907 bnx2x_free_fp_mem_cnic(bp
);
2911 if (CNIC_LOADED(bp
))
2912 bnx2x_free_mem_cnic(bp
);
2914 bp
->state
= BNX2X_STATE_CLOSED
;
2915 bp
->cnic_loaded
= false;
2917 /* Check if there are pending parity attentions. If there are - set
2918 * RECOVERY_IN_PROGRESS.
2920 if (IS_PF(bp
) && bnx2x_chk_parity_attn(bp
, &global
, false)) {
2921 bnx2x_set_reset_in_progress(bp
);
2923 /* Set RESET_IS_GLOBAL if needed */
2925 bnx2x_set_reset_global(bp
);
2929 /* The last driver must disable a "close the gate" if there is no
2930 * parity attention or "process kill" pending.
2933 !bnx2x_clear_pf_load(bp
) &&
2934 bnx2x_reset_is_done(bp
, BP_PATH(bp
)))
2935 bnx2x_disable_close_the_gate(bp
);
2937 DP(NETIF_MSG_IFUP
, "Ending NIC unload\n");
2942 int bnx2x_set_power_state(struct bnx2x
*bp
, pci_power_t state
)
2946 /* If there is no power capability, silently succeed */
2948 BNX2X_DEV_INFO("No power capability. Breaking.\n");
2952 pci_read_config_word(bp
->pdev
, bp
->pm_cap
+ PCI_PM_CTRL
, &pmcsr
);
2956 pci_write_config_word(bp
->pdev
, bp
->pm_cap
+ PCI_PM_CTRL
,
2957 ((pmcsr
& ~PCI_PM_CTRL_STATE_MASK
) |
2958 PCI_PM_CTRL_PME_STATUS
));
2960 if (pmcsr
& PCI_PM_CTRL_STATE_MASK
)
2961 /* delay required during transition out of D3hot */
2966 /* If there are other clients above don't
2967 shut down the power */
2968 if (atomic_read(&bp
->pdev
->enable_cnt
) != 1)
2970 /* Don't shut down the power for emulation and FPGA */
2971 if (CHIP_REV_IS_SLOW(bp
))
2974 pmcsr
&= ~PCI_PM_CTRL_STATE_MASK
;
2978 pmcsr
|= PCI_PM_CTRL_PME_ENABLE
;
2980 pci_write_config_word(bp
->pdev
, bp
->pm_cap
+ PCI_PM_CTRL
,
2983 /* No more memory access after this point until
2984 * device is brought back to D0.
2989 dev_err(&bp
->pdev
->dev
, "Can't support state = %d\n", state
);
2996 * net_device service functions
2998 int bnx2x_poll(struct napi_struct
*napi
, int budget
)
3002 struct bnx2x_fastpath
*fp
= container_of(napi
, struct bnx2x_fastpath
,
3004 struct bnx2x
*bp
= fp
->bp
;
3007 #ifdef BNX2X_STOP_ON_ERROR
3008 if (unlikely(bp
->panic
)) {
3009 napi_complete(napi
);
3014 for_each_cos_in_tx_queue(fp
, cos
)
3015 if (bnx2x_tx_queue_has_work(fp
->txdata_ptr
[cos
]))
3016 bnx2x_tx_int(bp
, fp
->txdata_ptr
[cos
]);
3019 if (bnx2x_has_rx_work(fp
)) {
3020 work_done
+= bnx2x_rx_int(fp
, budget
- work_done
);
3022 /* must not complete if we consumed full budget */
3023 if (work_done
>= budget
)
3027 /* Fall out from the NAPI loop if needed */
3028 if (!(bnx2x_has_rx_work(fp
) || bnx2x_has_tx_work(fp
))) {
3030 /* No need to update SB for FCoE L2 ring as long as
3031 * it's connected to the default SB and the SB
3032 * has been updated when NAPI was scheduled.
3034 if (IS_FCOE_FP(fp
)) {
3035 napi_complete(napi
);
3038 bnx2x_update_fpsb_idx(fp
);
3039 /* bnx2x_has_rx_work() reads the status block,
3040 * thus we need to ensure that status block indices
3041 * have been actually read (bnx2x_update_fpsb_idx)
3042 * prior to this check (bnx2x_has_rx_work) so that
3043 * we won't write the "newer" value of the status block
3044 * to IGU (if there was a DMA right after
3045 * bnx2x_has_rx_work and if there is no rmb, the memory
3046 * reading (bnx2x_update_fpsb_idx) may be postponed
3047 * to right before bnx2x_ack_sb). In this case there
3048 * will never be another interrupt until there is
3049 * another update of the status block, while there
3050 * is still unhandled work.
3054 if (!(bnx2x_has_rx_work(fp
) || bnx2x_has_tx_work(fp
))) {
3055 napi_complete(napi
);
3056 /* Re-enable interrupts */
3057 DP(NETIF_MSG_RX_STATUS
,
3058 "Update index to %d\n", fp
->fp_hc_idx
);
3059 bnx2x_ack_sb(bp
, fp
->igu_sb_id
, USTORM_ID
,
3060 le16_to_cpu(fp
->fp_hc_idx
),
3070 /* we split the first BD into headers and data BDs
3071 * to ease the pain of our fellow microcode engineers
3072 * we use one mapping for both BDs
3074 static noinline u16
bnx2x_tx_split(struct bnx2x
*bp
,
3075 struct bnx2x_fp_txdata
*txdata
,
3076 struct sw_tx_bd
*tx_buf
,
3077 struct eth_tx_start_bd
**tx_bd
, u16 hlen
,
3078 u16 bd_prod
, int nbd
)
3080 struct eth_tx_start_bd
*h_tx_bd
= *tx_bd
;
3081 struct eth_tx_bd
*d_tx_bd
;
3083 int old_len
= le16_to_cpu(h_tx_bd
->nbytes
);
3085 /* first fix first BD */
3086 h_tx_bd
->nbd
= cpu_to_le16(nbd
);
3087 h_tx_bd
->nbytes
= cpu_to_le16(hlen
);
3089 DP(NETIF_MSG_TX_QUEUED
, "TSO split header size is %d (%x:%x) nbd %d\n",
3090 h_tx_bd
->nbytes
, h_tx_bd
->addr_hi
, h_tx_bd
->addr_lo
, h_tx_bd
->nbd
);
3092 /* now get a new data BD
3093 * (after the pbd) and fill it */
3094 bd_prod
= TX_BD(NEXT_TX_IDX(bd_prod
));
3095 d_tx_bd
= &txdata
->tx_desc_ring
[bd_prod
].reg_bd
;
3097 mapping
= HILO_U64(le32_to_cpu(h_tx_bd
->addr_hi
),
3098 le32_to_cpu(h_tx_bd
->addr_lo
)) + hlen
;
3100 d_tx_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
3101 d_tx_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
3102 d_tx_bd
->nbytes
= cpu_to_le16(old_len
- hlen
);
3104 /* this marks the BD as one that has no individual mapping */
3105 tx_buf
->flags
|= BNX2X_TSO_SPLIT_BD
;
3107 DP(NETIF_MSG_TX_QUEUED
,
3108 "TSO split data size is %d (%x:%x)\n",
3109 d_tx_bd
->nbytes
, d_tx_bd
->addr_hi
, d_tx_bd
->addr_lo
);
3112 *tx_bd
= (struct eth_tx_start_bd
*)d_tx_bd
;
3117 static inline u16
bnx2x_csum_fix(unsigned char *t_header
, u16 csum
, s8 fix
)
3120 csum
= (u16
) ~csum_fold(csum_sub(csum
,
3121 csum_partial(t_header
- fix
, fix
, 0)));
3124 csum
= (u16
) ~csum_fold(csum_add(csum
,
3125 csum_partial(t_header
, -fix
, 0)));
3127 return swab16(csum
);
3130 static inline u32
bnx2x_xmit_type(struct bnx2x
*bp
, struct sk_buff
*skb
)
3134 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
3138 if (vlan_get_protocol(skb
) == htons(ETH_P_IPV6
)) {
3140 if (ipv6_hdr(skb
)->nexthdr
== IPPROTO_TCP
)
3141 rc
|= XMIT_CSUM_TCP
;
3145 if (ip_hdr(skb
)->protocol
== IPPROTO_TCP
)
3146 rc
|= XMIT_CSUM_TCP
;
3150 if (skb_is_gso_v6(skb
))
3151 rc
|= XMIT_GSO_V6
| XMIT_CSUM_TCP
| XMIT_CSUM_V6
;
3152 else if (skb_is_gso(skb
))
3153 rc
|= XMIT_GSO_V4
| XMIT_CSUM_V4
| XMIT_CSUM_TCP
;
3158 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
3159 /* check if packet requires linearization (packet is too fragmented)
3160 no need to check fragmentation if page size > 8K (there will be no
3161 violation to FW restrictions) */
3162 static int bnx2x_pkt_req_lin(struct bnx2x
*bp
, struct sk_buff
*skb
,
3167 int first_bd_sz
= 0;
3169 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
3170 if (skb_shinfo(skb
)->nr_frags
>= (MAX_FETCH_BD
- 3)) {
3172 if (xmit_type
& XMIT_GSO
) {
3173 unsigned short lso_mss
= skb_shinfo(skb
)->gso_size
;
3174 /* Check if LSO packet needs to be copied:
3175 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
3176 int wnd_size
= MAX_FETCH_BD
- 3;
3177 /* Number of windows to check */
3178 int num_wnds
= skb_shinfo(skb
)->nr_frags
- wnd_size
;
3183 /* Headers length */
3184 hlen
= (int)(skb_transport_header(skb
) - skb
->data
) +
3187 /* Amount of data (w/o headers) on linear part of SKB*/
3188 first_bd_sz
= skb_headlen(skb
) - hlen
;
3190 wnd_sum
= first_bd_sz
;
3192 /* Calculate the first sum - it's special */
3193 for (frag_idx
= 0; frag_idx
< wnd_size
- 1; frag_idx
++)
3195 skb_frag_size(&skb_shinfo(skb
)->frags
[frag_idx
]);
3197 /* If there was data on linear skb data - check it */
3198 if (first_bd_sz
> 0) {
3199 if (unlikely(wnd_sum
< lso_mss
)) {
3204 wnd_sum
-= first_bd_sz
;
3207 /* Others are easier: run through the frag list and
3208 check all windows */
3209 for (wnd_idx
= 0; wnd_idx
<= num_wnds
; wnd_idx
++) {
3211 skb_frag_size(&skb_shinfo(skb
)->frags
[wnd_idx
+ wnd_size
- 1]);
3213 if (unlikely(wnd_sum
< lso_mss
)) {
3218 skb_frag_size(&skb_shinfo(skb
)->frags
[wnd_idx
]);
3221 /* in non-LSO too fragmented packet should always
3228 if (unlikely(to_copy
))
3229 DP(NETIF_MSG_TX_QUEUED
,
3230 "Linearization IS REQUIRED for %s packet. num_frags %d hlen %d first_bd_sz %d\n",
3231 (xmit_type
& XMIT_GSO
) ? "LSO" : "non-LSO",
3232 skb_shinfo(skb
)->nr_frags
, hlen
, first_bd_sz
);
3238 static inline void bnx2x_set_pbd_gso_e2(struct sk_buff
*skb
, u32
*parsing_data
,
3241 *parsing_data
|= (skb_shinfo(skb
)->gso_size
<<
3242 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT
) &
3243 ETH_TX_PARSE_BD_E2_LSO_MSS
;
3244 if ((xmit_type
& XMIT_GSO_V6
) &&
3245 (ipv6_hdr(skb
)->nexthdr
== NEXTHDR_IPV6
))
3246 *parsing_data
|= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR
;
3250 * bnx2x_set_pbd_gso - update PBD in GSO case.
3254 * @xmit_type: xmit flags
3256 static inline void bnx2x_set_pbd_gso(struct sk_buff
*skb
,
3257 struct eth_tx_parse_bd_e1x
*pbd
,
3260 pbd
->lso_mss
= cpu_to_le16(skb_shinfo(skb
)->gso_size
);
3261 pbd
->tcp_send_seq
= swab32(tcp_hdr(skb
)->seq
);
3262 pbd
->tcp_flags
= pbd_tcp_flags(skb
);
3264 if (xmit_type
& XMIT_GSO_V4
) {
3265 pbd
->ip_id
= swab16(ip_hdr(skb
)->id
);
3266 pbd
->tcp_pseudo_csum
=
3267 swab16(~csum_tcpudp_magic(ip_hdr(skb
)->saddr
,
3269 0, IPPROTO_TCP
, 0));
3272 pbd
->tcp_pseudo_csum
=
3273 swab16(~csum_ipv6_magic(&ipv6_hdr(skb
)->saddr
,
3274 &ipv6_hdr(skb
)->daddr
,
3275 0, IPPROTO_TCP
, 0));
3277 pbd
->global_data
|= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN
;
3281 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
3283 * @bp: driver handle
3285 * @parsing_data: data to be updated
3286 * @xmit_type: xmit flags
3290 static inline u8
bnx2x_set_pbd_csum_e2(struct bnx2x
*bp
, struct sk_buff
*skb
,
3291 u32
*parsing_data
, u32 xmit_type
)
3294 ((((u8
*)skb_transport_header(skb
) - skb
->data
) >> 1) <<
3295 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT
) &
3296 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W
;
3298 if (xmit_type
& XMIT_CSUM_TCP
) {
3299 *parsing_data
|= ((tcp_hdrlen(skb
) / 4) <<
3300 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT
) &
3301 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW
;
3303 return skb_transport_header(skb
) + tcp_hdrlen(skb
) - skb
->data
;
3305 /* We support checksum offload for TCP and UDP only.
3306 * No need to pass the UDP header length - it's a constant.
3308 return skb_transport_header(skb
) +
3309 sizeof(struct udphdr
) - skb
->data
;
3312 static inline void bnx2x_set_sbd_csum(struct bnx2x
*bp
, struct sk_buff
*skb
,
3313 struct eth_tx_start_bd
*tx_start_bd
, u32 xmit_type
)
3315 tx_start_bd
->bd_flags
.as_bitfield
|= ETH_TX_BD_FLAGS_L4_CSUM
;
3317 if (xmit_type
& XMIT_CSUM_V4
)
3318 tx_start_bd
->bd_flags
.as_bitfield
|=
3319 ETH_TX_BD_FLAGS_IP_CSUM
;
3321 tx_start_bd
->bd_flags
.as_bitfield
|=
3322 ETH_TX_BD_FLAGS_IPV6
;
3324 if (!(xmit_type
& XMIT_CSUM_TCP
))
3325 tx_start_bd
->bd_flags
.as_bitfield
|= ETH_TX_BD_FLAGS_IS_UDP
;
3329 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
3331 * @bp: driver handle
3333 * @pbd: parse BD to be updated
3334 * @xmit_type: xmit flags
3336 static inline u8
bnx2x_set_pbd_csum(struct bnx2x
*bp
, struct sk_buff
*skb
,
3337 struct eth_tx_parse_bd_e1x
*pbd
,
3340 u8 hlen
= (skb_network_header(skb
) - skb
->data
) >> 1;
3342 /* for now NS flag is not used in Linux */
3344 (hlen
| ((skb
->protocol
== cpu_to_be16(ETH_P_8021Q
)) <<
3345 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT
));
3347 pbd
->ip_hlen_w
= (skb_transport_header(skb
) -
3348 skb_network_header(skb
)) >> 1;
3350 hlen
+= pbd
->ip_hlen_w
;
3352 /* We support checksum offload for TCP and UDP only */
3353 if (xmit_type
& XMIT_CSUM_TCP
)
3354 hlen
+= tcp_hdrlen(skb
) / 2;
3356 hlen
+= sizeof(struct udphdr
) / 2;
3358 pbd
->total_hlen_w
= cpu_to_le16(hlen
);
3361 if (xmit_type
& XMIT_CSUM_TCP
) {
3362 pbd
->tcp_pseudo_csum
= swab16(tcp_hdr(skb
)->check
);
3365 s8 fix
= SKB_CS_OFF(skb
); /* signed! */
3367 DP(NETIF_MSG_TX_QUEUED
,
3368 "hlen %d fix %d csum before fix %x\n",
3369 le16_to_cpu(pbd
->total_hlen_w
), fix
, SKB_CS(skb
));
3371 /* HW bug: fixup the CSUM */
3372 pbd
->tcp_pseudo_csum
=
3373 bnx2x_csum_fix(skb_transport_header(skb
),
3376 DP(NETIF_MSG_TX_QUEUED
, "csum after fix %x\n",
3377 pbd
->tcp_pseudo_csum
);
3383 /* called with netif_tx_lock
3384 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
3385 * netif_wake_queue()
3387 netdev_tx_t
bnx2x_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
3389 struct bnx2x
*bp
= netdev_priv(dev
);
3391 struct netdev_queue
*txq
;
3392 struct bnx2x_fp_txdata
*txdata
;
3393 struct sw_tx_bd
*tx_buf
;
3394 struct eth_tx_start_bd
*tx_start_bd
, *first_bd
;
3395 struct eth_tx_bd
*tx_data_bd
, *total_pkt_bd
= NULL
;
3396 struct eth_tx_parse_bd_e1x
*pbd_e1x
= NULL
;
3397 struct eth_tx_parse_bd_e2
*pbd_e2
= NULL
;
3398 u32 pbd_e2_parsing_data
= 0;
3399 u16 pkt_prod
, bd_prod
;
3402 u32 xmit_type
= bnx2x_xmit_type(bp
, skb
);
3405 __le16 pkt_size
= 0;
3407 u8 mac_type
= UNICAST_ADDRESS
;
3409 #ifdef BNX2X_STOP_ON_ERROR
3410 if (unlikely(bp
->panic
))
3411 return NETDEV_TX_BUSY
;
3414 txq_index
= skb_get_queue_mapping(skb
);
3415 txq
= netdev_get_tx_queue(dev
, txq_index
);
3417 BUG_ON(txq_index
>= MAX_ETH_TXQ_IDX(bp
) + (CNIC_LOADED(bp
) ? 1 : 0));
3419 txdata
= &bp
->bnx2x_txq
[txq_index
];
3421 /* enable this debug print to view the transmission queue being used
3422 DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
3423 txq_index, fp_index, txdata_index); */
3425 /* enable this debug print to view the tranmission details
3426 DP(NETIF_MSG_TX_QUEUED,
3427 "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
3428 txdata->cid, fp_index, txdata_index, txdata, fp); */
3430 if (unlikely(bnx2x_tx_avail(bp
, txdata
) <
3431 skb_shinfo(skb
)->nr_frags
+
3433 NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT
))) {
3434 /* Handle special storage cases separately */
3435 if (txdata
->tx_ring_size
== 0) {
3436 struct bnx2x_eth_q_stats
*q_stats
=
3437 bnx2x_fp_qstats(bp
, txdata
->parent_fp
);
3438 q_stats
->driver_filtered_tx_pkt
++;
3440 return NETDEV_TX_OK
;
3442 bnx2x_fp_qstats(bp
, txdata
->parent_fp
)->driver_xoff
++;
3443 netif_tx_stop_queue(txq
);
3444 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
3446 return NETDEV_TX_BUSY
;
3449 DP(NETIF_MSG_TX_QUEUED
,
3450 "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x\n",
3451 txq_index
, skb
->ip_summed
, skb
->protocol
, ipv6_hdr(skb
)->nexthdr
,
3452 ip_hdr(skb
)->protocol
, skb_shinfo(skb
)->gso_type
, xmit_type
);
3454 eth
= (struct ethhdr
*)skb
->data
;
3456 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
3457 if (unlikely(is_multicast_ether_addr(eth
->h_dest
))) {
3458 if (is_broadcast_ether_addr(eth
->h_dest
))
3459 mac_type
= BROADCAST_ADDRESS
;
3461 mac_type
= MULTICAST_ADDRESS
;
3464 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
3465 /* First, check if we need to linearize the skb (due to FW
3466 restrictions). No need to check fragmentation if page size > 8K
3467 (there will be no violation to FW restrictions) */
3468 if (bnx2x_pkt_req_lin(bp
, skb
, xmit_type
)) {
3469 /* Statistics of linearization */
3471 if (skb_linearize(skb
) != 0) {
3472 DP(NETIF_MSG_TX_QUEUED
,
3473 "SKB linearization failed - silently dropping this SKB\n");
3474 dev_kfree_skb_any(skb
);
3475 return NETDEV_TX_OK
;
3479 /* Map skb linear data for DMA */
3480 mapping
= dma_map_single(&bp
->pdev
->dev
, skb
->data
,
3481 skb_headlen(skb
), DMA_TO_DEVICE
);
3482 if (unlikely(dma_mapping_error(&bp
->pdev
->dev
, mapping
))) {
3483 DP(NETIF_MSG_TX_QUEUED
,
3484 "SKB mapping failed - silently dropping this SKB\n");
3485 dev_kfree_skb_any(skb
);
3486 return NETDEV_TX_OK
;
3489 Please read carefully. First we use one BD which we mark as start,
3490 then we have a parsing info BD (used for TSO or xsum),
3491 and only then we have the rest of the TSO BDs.
3492 (don't forget to mark the last one as last,
3493 and to unmap only AFTER you write to the BD ...)
3494 And above all, all pdb sizes are in words - NOT DWORDS!
3497 /* get current pkt produced now - advance it just before sending packet
3498 * since mapping of pages may fail and cause packet to be dropped
3500 pkt_prod
= txdata
->tx_pkt_prod
;
3501 bd_prod
= TX_BD(txdata
->tx_bd_prod
);
3503 /* get a tx_buf and first BD
3504 * tx_start_bd may be changed during SPLIT,
3505 * but first_bd will always stay first
3507 tx_buf
= &txdata
->tx_buf_ring
[TX_BD(pkt_prod
)];
3508 tx_start_bd
= &txdata
->tx_desc_ring
[bd_prod
].start_bd
;
3509 first_bd
= tx_start_bd
;
3511 tx_start_bd
->bd_flags
.as_bitfield
= ETH_TX_BD_FLAGS_START_BD
;
3512 SET_FLAG(tx_start_bd
->general_data
,
3513 ETH_TX_START_BD_PARSE_NBDS
,
3517 SET_FLAG(tx_start_bd
->general_data
, ETH_TX_START_BD_HDR_NBDS
, 1);
3519 /* remember the first BD of the packet */
3520 tx_buf
->first_bd
= txdata
->tx_bd_prod
;
3524 DP(NETIF_MSG_TX_QUEUED
,
3525 "sending pkt %u @%p next_idx %u bd %u @%p\n",
3526 pkt_prod
, tx_buf
, txdata
->tx_pkt_prod
, bd_prod
, tx_start_bd
);
3528 if (vlan_tx_tag_present(skb
)) {
3529 tx_start_bd
->vlan_or_ethertype
=
3530 cpu_to_le16(vlan_tx_tag_get(skb
));
3531 tx_start_bd
->bd_flags
.as_bitfield
|=
3532 (X_ETH_OUTBAND_VLAN
<< ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT
);
3534 /* when transmitting in a vf, start bd must hold the ethertype
3535 * for fw to enforce it
3537 #ifndef BNX2X_STOP_ON_ERROR
3540 tx_start_bd
->vlan_or_ethertype
=
3541 cpu_to_le16(ntohs(eth
->h_proto
));
3542 #ifndef BNX2X_STOP_ON_ERROR
3544 /* used by FW for packet accounting */
3545 tx_start_bd
->vlan_or_ethertype
= cpu_to_le16(pkt_prod
);
3550 /* turn on parsing and get a BD */
3551 bd_prod
= TX_BD(NEXT_TX_IDX(bd_prod
));
3553 if (xmit_type
& XMIT_CSUM
)
3554 bnx2x_set_sbd_csum(bp
, skb
, tx_start_bd
, xmit_type
);
3556 if (!CHIP_IS_E1x(bp
)) {
3557 pbd_e2
= &txdata
->tx_desc_ring
[bd_prod
].parse_bd_e2
;
3558 memset(pbd_e2
, 0, sizeof(struct eth_tx_parse_bd_e2
));
3559 /* Set PBD in checksum offload case */
3560 if (xmit_type
& XMIT_CSUM
)
3561 hlen
= bnx2x_set_pbd_csum_e2(bp
, skb
,
3562 &pbd_e2_parsing_data
,
3565 if (IS_MF_SI(bp
) || IS_VF(bp
)) {
3566 /* fill in the MAC addresses in the PBD - for local
3569 bnx2x_set_fw_mac_addr(&pbd_e2
->src_mac_addr_hi
,
3570 &pbd_e2
->src_mac_addr_mid
,
3571 &pbd_e2
->src_mac_addr_lo
,
3573 bnx2x_set_fw_mac_addr(&pbd_e2
->dst_mac_addr_hi
,
3574 &pbd_e2
->dst_mac_addr_mid
,
3575 &pbd_e2
->dst_mac_addr_lo
,
3579 SET_FLAG(pbd_e2_parsing_data
,
3580 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE
, mac_type
);
3582 u16 global_data
= 0;
3583 pbd_e1x
= &txdata
->tx_desc_ring
[bd_prod
].parse_bd_e1x
;
3584 memset(pbd_e1x
, 0, sizeof(struct eth_tx_parse_bd_e1x
));
3585 /* Set PBD in checksum offload case */
3586 if (xmit_type
& XMIT_CSUM
)
3587 hlen
= bnx2x_set_pbd_csum(bp
, skb
, pbd_e1x
, xmit_type
);
3589 SET_FLAG(global_data
,
3590 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE
, mac_type
);
3591 pbd_e1x
->global_data
|= cpu_to_le16(global_data
);
3594 /* Setup the data pointer of the first BD of the packet */
3595 tx_start_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
3596 tx_start_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
3597 nbd
= 2; /* start_bd + pbd + frags (updated when pages are mapped) */
3598 tx_start_bd
->nbytes
= cpu_to_le16(skb_headlen(skb
));
3599 pkt_size
= tx_start_bd
->nbytes
;
3601 DP(NETIF_MSG_TX_QUEUED
,
3602 "first bd @%p addr (%x:%x) nbd %d nbytes %d flags %x vlan %x\n",
3603 tx_start_bd
, tx_start_bd
->addr_hi
, tx_start_bd
->addr_lo
,
3604 le16_to_cpu(tx_start_bd
->nbd
), le16_to_cpu(tx_start_bd
->nbytes
),
3605 tx_start_bd
->bd_flags
.as_bitfield
,
3606 le16_to_cpu(tx_start_bd
->vlan_or_ethertype
));
3608 if (xmit_type
& XMIT_GSO
) {
3610 DP(NETIF_MSG_TX_QUEUED
,
3611 "TSO packet len %d hlen %d total len %d tso size %d\n",
3612 skb
->len
, hlen
, skb_headlen(skb
),
3613 skb_shinfo(skb
)->gso_size
);
3615 tx_start_bd
->bd_flags
.as_bitfield
|= ETH_TX_BD_FLAGS_SW_LSO
;
3617 if (unlikely(skb_headlen(skb
) > hlen
))
3618 bd_prod
= bnx2x_tx_split(bp
, txdata
, tx_buf
,
3621 if (!CHIP_IS_E1x(bp
))
3622 bnx2x_set_pbd_gso_e2(skb
, &pbd_e2_parsing_data
,
3625 bnx2x_set_pbd_gso(skb
, pbd_e1x
, xmit_type
);
3628 /* Set the PBD's parsing_data field if not zero
3629 * (for the chips newer than 57711).
3631 if (pbd_e2_parsing_data
)
3632 pbd_e2
->parsing_data
= cpu_to_le32(pbd_e2_parsing_data
);
3634 tx_data_bd
= (struct eth_tx_bd
*)tx_start_bd
;
3636 /* Handle fragmented skb */
3637 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
3638 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
3640 mapping
= skb_frag_dma_map(&bp
->pdev
->dev
, frag
, 0,
3641 skb_frag_size(frag
), DMA_TO_DEVICE
);
3642 if (unlikely(dma_mapping_error(&bp
->pdev
->dev
, mapping
))) {
3643 unsigned int pkts_compl
= 0, bytes_compl
= 0;
3645 DP(NETIF_MSG_TX_QUEUED
,
3646 "Unable to map page - dropping packet...\n");
3648 /* we need unmap all buffers already mapped
3650 * first_bd->nbd need to be properly updated
3651 * before call to bnx2x_free_tx_pkt
3653 first_bd
->nbd
= cpu_to_le16(nbd
);
3654 bnx2x_free_tx_pkt(bp
, txdata
,
3655 TX_BD(txdata
->tx_pkt_prod
),
3656 &pkts_compl
, &bytes_compl
);
3657 return NETDEV_TX_OK
;
3660 bd_prod
= TX_BD(NEXT_TX_IDX(bd_prod
));
3661 tx_data_bd
= &txdata
->tx_desc_ring
[bd_prod
].reg_bd
;
3662 if (total_pkt_bd
== NULL
)
3663 total_pkt_bd
= &txdata
->tx_desc_ring
[bd_prod
].reg_bd
;
3665 tx_data_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
3666 tx_data_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
3667 tx_data_bd
->nbytes
= cpu_to_le16(skb_frag_size(frag
));
3668 le16_add_cpu(&pkt_size
, skb_frag_size(frag
));
3671 DP(NETIF_MSG_TX_QUEUED
,
3672 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
3673 i
, tx_data_bd
, tx_data_bd
->addr_hi
, tx_data_bd
->addr_lo
,
3674 le16_to_cpu(tx_data_bd
->nbytes
));
3677 DP(NETIF_MSG_TX_QUEUED
, "last bd @%p\n", tx_data_bd
);
3679 /* update with actual num BDs */
3680 first_bd
->nbd
= cpu_to_le16(nbd
);
3682 bd_prod
= TX_BD(NEXT_TX_IDX(bd_prod
));
3684 /* now send a tx doorbell, counting the next BD
3685 * if the packet contains or ends with it
3687 if (TX_BD_POFF(bd_prod
) < nbd
)
3690 /* total_pkt_bytes should be set on the first data BD if
3691 * it's not an LSO packet and there is more than one
3692 * data BD. In this case pkt_size is limited by an MTU value.
3693 * However we prefer to set it for an LSO packet (while we don't
3694 * have to) in order to save some CPU cycles in a none-LSO
3695 * case, when we much more care about them.
3697 if (total_pkt_bd
!= NULL
)
3698 total_pkt_bd
->total_pkt_bytes
= pkt_size
;
3701 DP(NETIF_MSG_TX_QUEUED
,
3702 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u tcp_flags %x xsum %x seq %u hlen %u\n",
3703 pbd_e1x
, pbd_e1x
->global_data
, pbd_e1x
->ip_hlen_w
,
3704 pbd_e1x
->ip_id
, pbd_e1x
->lso_mss
, pbd_e1x
->tcp_flags
,
3705 pbd_e1x
->tcp_pseudo_csum
, pbd_e1x
->tcp_send_seq
,
3706 le16_to_cpu(pbd_e1x
->total_hlen_w
));
3708 DP(NETIF_MSG_TX_QUEUED
,
3709 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
3710 pbd_e2
, pbd_e2
->dst_mac_addr_hi
, pbd_e2
->dst_mac_addr_mid
,
3711 pbd_e2
->dst_mac_addr_lo
, pbd_e2
->src_mac_addr_hi
,
3712 pbd_e2
->src_mac_addr_mid
, pbd_e2
->src_mac_addr_lo
,
3713 pbd_e2
->parsing_data
);
3714 DP(NETIF_MSG_TX_QUEUED
, "doorbell: nbd %d bd %u\n", nbd
, bd_prod
);
3716 netdev_tx_sent_queue(txq
, skb
->len
);
3718 skb_tx_timestamp(skb
);
3720 txdata
->tx_pkt_prod
++;
3722 * Make sure that the BD data is updated before updating the producer
3723 * since FW might read the BD right after the producer is updated.
3724 * This is only applicable for weak-ordered memory model archs such
3725 * as IA-64. The following barrier is also mandatory since FW will
3726 * assumes packets must have BDs.
3730 txdata
->tx_db
.data
.prod
+= nbd
;
3733 DOORBELL(bp
, txdata
->cid
, txdata
->tx_db
.raw
);
3737 txdata
->tx_bd_prod
+= nbd
;
3739 if (unlikely(bnx2x_tx_avail(bp
, txdata
) < MAX_DESC_PER_TX_PKT
)) {
3740 netif_tx_stop_queue(txq
);
3742 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
3743 * ordering of set_bit() in netif_tx_stop_queue() and read of
3747 bnx2x_fp_qstats(bp
, txdata
->parent_fp
)->driver_xoff
++;
3748 if (bnx2x_tx_avail(bp
, txdata
) >= MAX_DESC_PER_TX_PKT
)
3749 netif_tx_wake_queue(txq
);
3753 return NETDEV_TX_OK
;
3757 * bnx2x_setup_tc - routine to configure net_device for multi tc
3759 * @netdev: net device to configure
3760 * @tc: number of traffic classes to enable
3762 * callback connected to the ndo_setup_tc function pointer
3764 int bnx2x_setup_tc(struct net_device
*dev
, u8 num_tc
)
3766 int cos
, prio
, count
, offset
;
3767 struct bnx2x
*bp
= netdev_priv(dev
);
3769 /* setup tc must be called under rtnl lock */
3772 /* no traffic classes requested. aborting */
3774 netdev_reset_tc(dev
);
3778 /* requested to support too many traffic classes */
3779 if (num_tc
> bp
->max_cos
) {
3780 BNX2X_ERR("support for too many traffic classes requested: %d. max supported is %d\n",
3781 num_tc
, bp
->max_cos
);
3785 /* declare amount of supported traffic classes */
3786 if (netdev_set_num_tc(dev
, num_tc
)) {
3787 BNX2X_ERR("failed to declare %d traffic classes\n", num_tc
);
3791 /* configure priority to traffic class mapping */
3792 for (prio
= 0; prio
< BNX2X_MAX_PRIORITY
; prio
++) {
3793 netdev_set_prio_tc_map(dev
, prio
, bp
->prio_to_cos
[prio
]);
3794 DP(BNX2X_MSG_SP
| NETIF_MSG_IFUP
,
3795 "mapping priority %d to tc %d\n",
3796 prio
, bp
->prio_to_cos
[prio
]);
3800 /* Use this configuration to diffrentiate tc0 from other COSes
3801 This can be used for ets or pfc, and save the effort of setting
3802 up a multio class queue disc or negotiating DCBX with a switch
3803 netdev_set_prio_tc_map(dev, 0, 0);
3804 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
3805 for (prio = 1; prio < 16; prio++) {
3806 netdev_set_prio_tc_map(dev, prio, 1);
3807 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
3810 /* configure traffic class to transmission queue mapping */
3811 for (cos
= 0; cos
< bp
->max_cos
; cos
++) {
3812 count
= BNX2X_NUM_ETH_QUEUES(bp
);
3813 offset
= cos
* BNX2X_NUM_NON_CNIC_QUEUES(bp
);
3814 netdev_set_tc_queue(dev
, cos
, count
, offset
);
3815 DP(BNX2X_MSG_SP
| NETIF_MSG_IFUP
,
3816 "mapping tc %d to offset %d count %d\n",
3817 cos
, offset
, count
);
3823 /* called with rtnl_lock */
3824 int bnx2x_change_mac_addr(struct net_device
*dev
, void *p
)
3826 struct sockaddr
*addr
= p
;
3827 struct bnx2x
*bp
= netdev_priv(dev
);
3830 if (!bnx2x_is_valid_ether_addr(bp
, addr
->sa_data
)) {
3831 BNX2X_ERR("Requested MAC address is not valid\n");
3835 if ((IS_MF_STORAGE_SD(bp
) || IS_MF_FCOE_AFEX(bp
)) &&
3836 !is_zero_ether_addr(addr
->sa_data
)) {
3837 BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n");
3841 if (netif_running(dev
)) {
3842 rc
= bnx2x_set_eth_mac(bp
, false);
3847 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
3849 if (netif_running(dev
))
3850 rc
= bnx2x_set_eth_mac(bp
, true);
3855 static void bnx2x_free_fp_mem_at(struct bnx2x
*bp
, int fp_index
)
3857 union host_hc_status_block
*sb
= &bnx2x_fp(bp
, fp_index
, status_blk
);
3858 struct bnx2x_fastpath
*fp
= &bp
->fp
[fp_index
];
3863 if (IS_FCOE_IDX(fp_index
)) {
3864 memset(sb
, 0, sizeof(union host_hc_status_block
));
3865 fp
->status_blk_mapping
= 0;
3868 if (!CHIP_IS_E1x(bp
))
3869 BNX2X_PCI_FREE(sb
->e2_sb
,
3870 bnx2x_fp(bp
, fp_index
,
3871 status_blk_mapping
),
3872 sizeof(struct host_hc_status_block_e2
));
3874 BNX2X_PCI_FREE(sb
->e1x_sb
,
3875 bnx2x_fp(bp
, fp_index
,
3876 status_blk_mapping
),
3877 sizeof(struct host_hc_status_block_e1x
));
3881 if (!skip_rx_queue(bp
, fp_index
)) {
3882 bnx2x_free_rx_bds(fp
);
3884 /* fastpath rx rings: rx_buf rx_desc rx_comp */
3885 BNX2X_FREE(bnx2x_fp(bp
, fp_index
, rx_buf_ring
));
3886 BNX2X_PCI_FREE(bnx2x_fp(bp
, fp_index
, rx_desc_ring
),
3887 bnx2x_fp(bp
, fp_index
, rx_desc_mapping
),
3888 sizeof(struct eth_rx_bd
) * NUM_RX_BD
);
3890 BNX2X_PCI_FREE(bnx2x_fp(bp
, fp_index
, rx_comp_ring
),
3891 bnx2x_fp(bp
, fp_index
, rx_comp_mapping
),
3892 sizeof(struct eth_fast_path_rx_cqe
) *
3896 BNX2X_FREE(bnx2x_fp(bp
, fp_index
, rx_page_ring
));
3897 BNX2X_PCI_FREE(bnx2x_fp(bp
, fp_index
, rx_sge_ring
),
3898 bnx2x_fp(bp
, fp_index
, rx_sge_mapping
),
3899 BCM_PAGE_SIZE
* NUM_RX_SGE_PAGES
);
3903 if (!skip_tx_queue(bp
, fp_index
)) {
3904 /* fastpath tx rings: tx_buf tx_desc */
3905 for_each_cos_in_tx_queue(fp
, cos
) {
3906 struct bnx2x_fp_txdata
*txdata
= fp
->txdata_ptr
[cos
];
3908 DP(NETIF_MSG_IFDOWN
,
3909 "freeing tx memory of fp %d cos %d cid %d\n",
3910 fp_index
, cos
, txdata
->cid
);
3912 BNX2X_FREE(txdata
->tx_buf_ring
);
3913 BNX2X_PCI_FREE(txdata
->tx_desc_ring
,
3914 txdata
->tx_desc_mapping
,
3915 sizeof(union eth_tx_bd_types
) * NUM_TX_BD
);
3918 /* end of fastpath */
3921 void bnx2x_free_fp_mem_cnic(struct bnx2x
*bp
)
3924 for_each_cnic_queue(bp
, i
)
3925 bnx2x_free_fp_mem_at(bp
, i
);
3928 void bnx2x_free_fp_mem(struct bnx2x
*bp
)
3931 for_each_eth_queue(bp
, i
)
3932 bnx2x_free_fp_mem_at(bp
, i
);
3935 static void set_sb_shortcuts(struct bnx2x
*bp
, int index
)
3937 union host_hc_status_block status_blk
= bnx2x_fp(bp
, index
, status_blk
);
3938 if (!CHIP_IS_E1x(bp
)) {
3939 bnx2x_fp(bp
, index
, sb_index_values
) =
3940 (__le16
*)status_blk
.e2_sb
->sb
.index_values
;
3941 bnx2x_fp(bp
, index
, sb_running_index
) =
3942 (__le16
*)status_blk
.e2_sb
->sb
.running_index
;
3944 bnx2x_fp(bp
, index
, sb_index_values
) =
3945 (__le16
*)status_blk
.e1x_sb
->sb
.index_values
;
3946 bnx2x_fp(bp
, index
, sb_running_index
) =
3947 (__le16
*)status_blk
.e1x_sb
->sb
.running_index
;
3951 /* Returns the number of actually allocated BDs */
3952 static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath
*fp
,
3955 struct bnx2x
*bp
= fp
->bp
;
3956 u16 ring_prod
, cqe_ring_prod
;
3957 int i
, failure_cnt
= 0;
3959 fp
->rx_comp_cons
= 0;
3960 cqe_ring_prod
= ring_prod
= 0;
3962 /* This routine is called only during fo init so
3963 * fp->eth_q_stats.rx_skb_alloc_failed = 0
3965 for (i
= 0; i
< rx_ring_size
; i
++) {
3966 if (bnx2x_alloc_rx_data(bp
, fp
, ring_prod
) < 0) {
3970 ring_prod
= NEXT_RX_IDX(ring_prod
);
3971 cqe_ring_prod
= NEXT_RCQ_IDX(cqe_ring_prod
);
3972 WARN_ON(ring_prod
<= (i
- failure_cnt
));
3976 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
3977 i
- failure_cnt
, fp
->index
);
3979 fp
->rx_bd_prod
= ring_prod
;
3980 /* Limit the CQE producer by the CQE ring size */
3981 fp
->rx_comp_prod
= min_t(u16
, NUM_RCQ_RINGS
*RCQ_DESC_CNT
,
3983 fp
->rx_pkt
= fp
->rx_calls
= 0;
3985 bnx2x_fp_stats(bp
, fp
)->eth_q_stats
.rx_skb_alloc_failed
+= failure_cnt
;
3987 return i
- failure_cnt
;
3990 static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath
*fp
)
3994 for (i
= 1; i
<= NUM_RCQ_RINGS
; i
++) {
3995 struct eth_rx_cqe_next_page
*nextpg
;
3997 nextpg
= (struct eth_rx_cqe_next_page
*)
3998 &fp
->rx_comp_ring
[RCQ_DESC_CNT
* i
- 1];
4000 cpu_to_le32(U64_HI(fp
->rx_comp_mapping
+
4001 BCM_PAGE_SIZE
*(i
% NUM_RCQ_RINGS
)));
4003 cpu_to_le32(U64_LO(fp
->rx_comp_mapping
+
4004 BCM_PAGE_SIZE
*(i
% NUM_RCQ_RINGS
)));
4008 static int bnx2x_alloc_fp_mem_at(struct bnx2x
*bp
, int index
)
4010 union host_hc_status_block
*sb
;
4011 struct bnx2x_fastpath
*fp
= &bp
->fp
[index
];
4014 int rx_ring_size
= 0;
4016 if (!bp
->rx_ring_size
&&
4017 (IS_MF_STORAGE_SD(bp
) || IS_MF_FCOE_AFEX(bp
))) {
4018 rx_ring_size
= MIN_RX_SIZE_NONTPA
;
4019 bp
->rx_ring_size
= rx_ring_size
;
4020 } else if (!bp
->rx_ring_size
) {
4021 rx_ring_size
= MAX_RX_AVAIL
/BNX2X_NUM_RX_QUEUES(bp
);
4023 if (CHIP_IS_E3(bp
)) {
4024 u32 cfg
= SHMEM_RD(bp
,
4025 dev_info
.port_hw_config
[BP_PORT(bp
)].
4028 /* Decrease ring size for 1G functions */
4029 if ((cfg
& PORT_HW_CFG_NET_SERDES_IF_MASK
) ==
4030 PORT_HW_CFG_NET_SERDES_IF_SGMII
)
4034 /* allocate at least number of buffers required by FW */
4035 rx_ring_size
= max_t(int, bp
->disable_tpa
? MIN_RX_SIZE_NONTPA
:
4036 MIN_RX_SIZE_TPA
, rx_ring_size
);
4038 bp
->rx_ring_size
= rx_ring_size
;
4039 } else /* if rx_ring_size specified - use it */
4040 rx_ring_size
= bp
->rx_ring_size
;
4043 sb
= &bnx2x_fp(bp
, index
, status_blk
);
4045 if (!IS_FCOE_IDX(index
)) {
4047 if (!CHIP_IS_E1x(bp
))
4048 BNX2X_PCI_ALLOC(sb
->e2_sb
,
4049 &bnx2x_fp(bp
, index
, status_blk_mapping
),
4050 sizeof(struct host_hc_status_block_e2
));
4052 BNX2X_PCI_ALLOC(sb
->e1x_sb
,
4053 &bnx2x_fp(bp
, index
, status_blk_mapping
),
4054 sizeof(struct host_hc_status_block_e1x
));
4057 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
4058 * set shortcuts for it.
4060 if (!IS_FCOE_IDX(index
))
4061 set_sb_shortcuts(bp
, index
);
4064 if (!skip_tx_queue(bp
, index
)) {
4065 /* fastpath tx rings: tx_buf tx_desc */
4066 for_each_cos_in_tx_queue(fp
, cos
) {
4067 struct bnx2x_fp_txdata
*txdata
= fp
->txdata_ptr
[cos
];
4070 "allocating tx memory of fp %d cos %d\n",
4073 BNX2X_ALLOC(txdata
->tx_buf_ring
,
4074 sizeof(struct sw_tx_bd
) * NUM_TX_BD
);
4075 BNX2X_PCI_ALLOC(txdata
->tx_desc_ring
,
4076 &txdata
->tx_desc_mapping
,
4077 sizeof(union eth_tx_bd_types
) * NUM_TX_BD
);
4082 if (!skip_rx_queue(bp
, index
)) {
4083 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4084 BNX2X_ALLOC(bnx2x_fp(bp
, index
, rx_buf_ring
),
4085 sizeof(struct sw_rx_bd
) * NUM_RX_BD
);
4086 BNX2X_PCI_ALLOC(bnx2x_fp(bp
, index
, rx_desc_ring
),
4087 &bnx2x_fp(bp
, index
, rx_desc_mapping
),
4088 sizeof(struct eth_rx_bd
) * NUM_RX_BD
);
4090 BNX2X_PCI_ALLOC(bnx2x_fp(bp
, index
, rx_comp_ring
),
4091 &bnx2x_fp(bp
, index
, rx_comp_mapping
),
4092 sizeof(struct eth_fast_path_rx_cqe
) *
4096 BNX2X_ALLOC(bnx2x_fp(bp
, index
, rx_page_ring
),
4097 sizeof(struct sw_rx_page
) * NUM_RX_SGE
);
4098 BNX2X_PCI_ALLOC(bnx2x_fp(bp
, index
, rx_sge_ring
),
4099 &bnx2x_fp(bp
, index
, rx_sge_mapping
),
4100 BCM_PAGE_SIZE
* NUM_RX_SGE_PAGES
);
4102 bnx2x_set_next_page_rx_bd(fp
);
4105 bnx2x_set_next_page_rx_cq(fp
);
4108 ring_size
= bnx2x_alloc_rx_bds(fp
, rx_ring_size
);
4109 if (ring_size
< rx_ring_size
)
4115 /* handles low memory cases */
4117 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
4119 /* FW will drop all packets if queue is not big enough,
4120 * In these cases we disable the queue
4121 * Min size is different for OOO, TPA and non-TPA queues
4123 if (ring_size
< (fp
->disable_tpa
?
4124 MIN_RX_SIZE_NONTPA
: MIN_RX_SIZE_TPA
)) {
4125 /* release memory allocated for this queue */
4126 bnx2x_free_fp_mem_at(bp
, index
);
4132 int bnx2x_alloc_fp_mem_cnic(struct bnx2x
*bp
)
4136 if (bnx2x_alloc_fp_mem_at(bp
, FCOE_IDX(bp
)))
4137 /* we will fail load process instead of mark
4145 int bnx2x_alloc_fp_mem(struct bnx2x
*bp
)
4149 /* 1. Allocate FP for leading - fatal if error
4150 * 2. Allocate RSS - fix number of queues if error
4154 if (bnx2x_alloc_fp_mem_at(bp
, 0))
4158 for_each_nondefault_eth_queue(bp
, i
)
4159 if (bnx2x_alloc_fp_mem_at(bp
, i
))
4162 /* handle memory failures */
4163 if (i
!= BNX2X_NUM_ETH_QUEUES(bp
)) {
4164 int delta
= BNX2X_NUM_ETH_QUEUES(bp
) - i
;
4167 bnx2x_shrink_eth_fp(bp
, delta
);
4168 if (CNIC_SUPPORT(bp
))
4169 /* move non eth FPs next to last eth FP
4170 * must be done in that order
4171 * FCOE_IDX < FWD_IDX < OOO_IDX
4174 /* move FCoE fp even NO_FCOE_FLAG is on */
4175 bnx2x_move_fp(bp
, FCOE_IDX(bp
), FCOE_IDX(bp
) - delta
);
4176 bp
->num_ethernet_queues
-= delta
;
4177 bp
->num_queues
= bp
->num_ethernet_queues
+
4178 bp
->num_cnic_queues
;
4179 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
4180 bp
->num_queues
+ delta
, bp
->num_queues
);
4186 void bnx2x_free_mem_bp(struct bnx2x
*bp
)
4188 kfree(bp
->fp
->tpa_info
);
4191 kfree(bp
->fp_stats
);
4192 kfree(bp
->bnx2x_txq
);
4193 kfree(bp
->msix_table
);
4197 int bnx2x_alloc_mem_bp(struct bnx2x
*bp
)
4199 struct bnx2x_fastpath
*fp
;
4200 struct msix_entry
*tbl
;
4201 struct bnx2x_ilt
*ilt
;
4202 int msix_table_size
= 0;
4203 int fp_array_size
, txq_array_size
;
4207 * The biggest MSI-X table we might need is as a maximum number of fast
4208 * path IGU SBs plus default SB (for PF).
4210 msix_table_size
= bp
->igu_sb_cnt
;
4213 BNX2X_DEV_INFO("msix_table_size %d\n", msix_table_size
);
4215 /* fp array: RSS plus CNIC related L2 queues */
4216 fp_array_size
= BNX2X_MAX_RSS_COUNT(bp
) + CNIC_SUPPORT(bp
);
4217 BNX2X_DEV_INFO("fp_array_size %d", fp_array_size
);
4219 fp
= kcalloc(fp_array_size
, sizeof(*fp
), GFP_KERNEL
);
4222 for (i
= 0; i
< fp_array_size
; i
++) {
4224 kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2
,
4225 sizeof(struct bnx2x_agg_info
), GFP_KERNEL
);
4226 if (!(fp
[i
].tpa_info
))
4232 /* allocate sp objs */
4233 bp
->sp_objs
= kcalloc(fp_array_size
, sizeof(struct bnx2x_sp_objs
),
4238 /* allocate fp_stats */
4239 bp
->fp_stats
= kcalloc(fp_array_size
, sizeof(struct bnx2x_fp_stats
),
4244 /* Allocate memory for the transmission queues array */
4246 BNX2X_MAX_RSS_COUNT(bp
) * BNX2X_MULTI_TX_COS
+ CNIC_SUPPORT(bp
);
4247 BNX2X_DEV_INFO("txq_array_size %d", txq_array_size
);
4249 bp
->bnx2x_txq
= kcalloc(txq_array_size
, sizeof(struct bnx2x_fp_txdata
),
4255 tbl
= kcalloc(msix_table_size
, sizeof(*tbl
), GFP_KERNEL
);
4258 bp
->msix_table
= tbl
;
4261 ilt
= kzalloc(sizeof(*ilt
), GFP_KERNEL
);
4268 bnx2x_free_mem_bp(bp
);
4273 int bnx2x_reload_if_running(struct net_device
*dev
)
4275 struct bnx2x
*bp
= netdev_priv(dev
);
4277 if (unlikely(!netif_running(dev
)))
4280 bnx2x_nic_unload(bp
, UNLOAD_NORMAL
, true);
4281 return bnx2x_nic_load(bp
, LOAD_NORMAL
);
4284 int bnx2x_get_cur_phy_idx(struct bnx2x
*bp
)
4286 u32 sel_phy_idx
= 0;
4287 if (bp
->link_params
.num_phys
<= 1)
4290 if (bp
->link_vars
.link_up
) {
4291 sel_phy_idx
= EXT_PHY1
;
4292 /* In case link is SERDES, check if the EXT_PHY2 is the one */
4293 if ((bp
->link_vars
.link_status
& LINK_STATUS_SERDES_LINK
) &&
4294 (bp
->link_params
.phy
[EXT_PHY2
].supported
& SUPPORTED_FIBRE
))
4295 sel_phy_idx
= EXT_PHY2
;
4298 switch (bnx2x_phy_selection(&bp
->link_params
)) {
4299 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT
:
4300 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY
:
4301 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY
:
4302 sel_phy_idx
= EXT_PHY1
;
4304 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY
:
4305 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY
:
4306 sel_phy_idx
= EXT_PHY2
;
4314 int bnx2x_get_link_cfg_idx(struct bnx2x
*bp
)
4316 u32 sel_phy_idx
= bnx2x_get_cur_phy_idx(bp
);
4318 * The selected actived PHY is always after swapping (in case PHY
4319 * swapping is enabled). So when swapping is enabled, we need to reverse
4323 if (bp
->link_params
.multi_phy_config
&
4324 PORT_HW_CFG_PHY_SWAPPED_ENABLED
) {
4325 if (sel_phy_idx
== EXT_PHY1
)
4326 sel_phy_idx
= EXT_PHY2
;
4327 else if (sel_phy_idx
== EXT_PHY2
)
4328 sel_phy_idx
= EXT_PHY1
;
4330 return LINK_CONFIG_IDX(sel_phy_idx
);
4333 #ifdef NETDEV_FCOE_WWNN
4334 int bnx2x_fcoe_get_wwn(struct net_device
*dev
, u64
*wwn
, int type
)
4336 struct bnx2x
*bp
= netdev_priv(dev
);
4337 struct cnic_eth_dev
*cp
= &bp
->cnic_eth_dev
;
4340 case NETDEV_FCOE_WWNN
:
4341 *wwn
= HILO_U64(cp
->fcoe_wwn_node_name_hi
,
4342 cp
->fcoe_wwn_node_name_lo
);
4344 case NETDEV_FCOE_WWPN
:
4345 *wwn
= HILO_U64(cp
->fcoe_wwn_port_name_hi
,
4346 cp
->fcoe_wwn_port_name_lo
);
4349 BNX2X_ERR("Wrong WWN type requested - %d\n", type
);
4357 /* called with rtnl_lock */
4358 int bnx2x_change_mtu(struct net_device
*dev
, int new_mtu
)
4360 struct bnx2x
*bp
= netdev_priv(dev
);
4362 if (bp
->recovery_state
!= BNX2X_RECOVERY_DONE
) {
4363 BNX2X_ERR("Can't perform change MTU during parity recovery\n");
4367 if ((new_mtu
> ETH_MAX_JUMBO_PACKET_SIZE
) ||
4368 ((new_mtu
+ ETH_HLEN
) < ETH_MIN_PACKET_SIZE
)) {
4369 BNX2X_ERR("Can't support requested MTU size\n");
4373 /* This does not race with packet allocation
4374 * because the actual alloc size is
4375 * only updated as part of load
4379 return bnx2x_reload_if_running(dev
);
4382 netdev_features_t
bnx2x_fix_features(struct net_device
*dev
,
4383 netdev_features_t features
)
4385 struct bnx2x
*bp
= netdev_priv(dev
);
4387 /* TPA requires Rx CSUM offloading */
4388 if (!(features
& NETIF_F_RXCSUM
) || bp
->disable_tpa
) {
4389 features
&= ~NETIF_F_LRO
;
4390 features
&= ~NETIF_F_GRO
;
4396 int bnx2x_set_features(struct net_device
*dev
, netdev_features_t features
)
4398 struct bnx2x
*bp
= netdev_priv(dev
);
4399 u32 flags
= bp
->flags
;
4400 bool bnx2x_reload
= false;
4402 if (features
& NETIF_F_LRO
)
4403 flags
|= TPA_ENABLE_FLAG
;
4405 flags
&= ~TPA_ENABLE_FLAG
;
4407 if (features
& NETIF_F_GRO
)
4408 flags
|= GRO_ENABLE_FLAG
;
4410 flags
&= ~GRO_ENABLE_FLAG
;
4412 if (features
& NETIF_F_LOOPBACK
) {
4413 if (bp
->link_params
.loopback_mode
!= LOOPBACK_BMAC
) {
4414 bp
->link_params
.loopback_mode
= LOOPBACK_BMAC
;
4415 bnx2x_reload
= true;
4418 if (bp
->link_params
.loopback_mode
!= LOOPBACK_NONE
) {
4419 bp
->link_params
.loopback_mode
= LOOPBACK_NONE
;
4420 bnx2x_reload
= true;
4424 if (flags
^ bp
->flags
) {
4426 bnx2x_reload
= true;
4430 if (bp
->recovery_state
== BNX2X_RECOVERY_DONE
)
4431 return bnx2x_reload_if_running(dev
);
4432 /* else: bnx2x_nic_load() will be called at end of recovery */
4438 void bnx2x_tx_timeout(struct net_device
*dev
)
4440 struct bnx2x
*bp
= netdev_priv(dev
);
4442 #ifdef BNX2X_STOP_ON_ERROR
4447 smp_mb__before_clear_bit();
4448 set_bit(BNX2X_SP_RTNL_TX_TIMEOUT
, &bp
->sp_rtnl_state
);
4449 smp_mb__after_clear_bit();
4451 /* This allows the netif to be shutdown gracefully before resetting */
4452 schedule_delayed_work(&bp
->sp_rtnl_task
, 0);
4455 int bnx2x_suspend(struct pci_dev
*pdev
, pm_message_t state
)
4457 struct net_device
*dev
= pci_get_drvdata(pdev
);
4461 dev_err(&pdev
->dev
, "BAD net device from bnx2x_init_one\n");
4464 bp
= netdev_priv(dev
);
4468 pci_save_state(pdev
);
4470 if (!netif_running(dev
)) {
4475 netif_device_detach(dev
);
4477 bnx2x_nic_unload(bp
, UNLOAD_CLOSE
, false);
4479 bnx2x_set_power_state(bp
, pci_choose_state(pdev
, state
));
4486 int bnx2x_resume(struct pci_dev
*pdev
)
4488 struct net_device
*dev
= pci_get_drvdata(pdev
);
4493 dev_err(&pdev
->dev
, "BAD net device from bnx2x_init_one\n");
4496 bp
= netdev_priv(dev
);
4498 if (bp
->recovery_state
!= BNX2X_RECOVERY_DONE
) {
4499 BNX2X_ERR("Handling parity error recovery. Try again later\n");
4505 pci_restore_state(pdev
);
4507 if (!netif_running(dev
)) {
4512 bnx2x_set_power_state(bp
, PCI_D0
);
4513 netif_device_attach(dev
);
4515 rc
= bnx2x_nic_load(bp
, LOAD_OPEN
);
4523 void bnx2x_set_ctx_validation(struct bnx2x
*bp
, struct eth_context
*cxt
,
4526 /* ustorm cxt validation */
4527 cxt
->ustorm_ag_context
.cdu_usage
=
4528 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp
, cid
),
4529 CDU_REGION_NUMBER_UCM_AG
, ETH_CONNECTION_TYPE
);
4530 /* xcontext validation */
4531 cxt
->xstorm_ag_context
.cdu_reserved
=
4532 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp
, cid
),
4533 CDU_REGION_NUMBER_XCM_AG
, ETH_CONNECTION_TYPE
);
4536 static void storm_memset_hc_timeout(struct bnx2x
*bp
, u8 port
,
4537 u8 fw_sb_id
, u8 sb_index
,
4541 u32 addr
= BAR_CSTRORM_INTMEM
+
4542 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id
, sb_index
);
4543 REG_WR8(bp
, addr
, ticks
);
4545 "port %x fw_sb_id %d sb_index %d ticks %d\n",
4546 port
, fw_sb_id
, sb_index
, ticks
);
4549 static void storm_memset_hc_disable(struct bnx2x
*bp
, u8 port
,
4550 u16 fw_sb_id
, u8 sb_index
,
4553 u32 enable_flag
= disable
? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT
);
4554 u32 addr
= BAR_CSTRORM_INTMEM
+
4555 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id
, sb_index
);
4556 u16 flags
= REG_RD16(bp
, addr
);
4558 flags
&= ~HC_INDEX_DATA_HC_ENABLED
;
4559 flags
|= enable_flag
;
4560 REG_WR16(bp
, addr
, flags
);
4562 "port %x fw_sb_id %d sb_index %d disable %d\n",
4563 port
, fw_sb_id
, sb_index
, disable
);
4566 void bnx2x_update_coalesce_sb_index(struct bnx2x
*bp
, u8 fw_sb_id
,
4567 u8 sb_index
, u8 disable
, u16 usec
)
4569 int port
= BP_PORT(bp
);
4570 u8 ticks
= usec
/ BNX2X_BTR
;
4572 storm_memset_hc_timeout(bp
, port
, fw_sb_id
, sb_index
, ticks
);
4574 disable
= disable
? 1 : (usec
? 0 : 1);
4575 storm_memset_hc_disable(bp
, port
, fw_sb_id
, sb_index
, disable
);