1 /* bnx2x_cmn.c: QLogic Everest network driver.
3 * Copyright (c) 2007-2013 Broadcom Corporation
4 * Copyright (c) 2014 QLogic Corporation
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation.
11 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
12 * Written by: Eliezer Tamir
13 * Based on code from Michael Chan's bnx2 driver
14 * UDP CSUM errata workaround by Arik Gendelman
15 * Slowpath and fastpath rework by Vladislav Zolotarov
16 * Statistics and Link management by Yitchak Gertner
20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22 #include <linux/etherdevice.h>
23 #include <linux/if_vlan.h>
24 #include <linux/interrupt.h>
26 #include <linux/crash_dump.h>
29 #include <net/ip6_checksum.h>
30 #include <net/busy_poll.h>
31 #include <linux/prefetch.h>
32 #include "bnx2x_cmn.h"
33 #include "bnx2x_init.h"
36 static void bnx2x_free_fp_mem_cnic(struct bnx2x
*bp
);
37 static int bnx2x_alloc_fp_mem_cnic(struct bnx2x
*bp
);
38 static int bnx2x_alloc_fp_mem(struct bnx2x
*bp
);
39 static int bnx2x_poll(struct napi_struct
*napi
, int budget
);
41 static void bnx2x_add_all_napi_cnic(struct bnx2x
*bp
)
45 /* Add NAPI objects */
46 for_each_rx_queue_cnic(bp
, i
) {
47 netif_napi_add(bp
->dev
, &bnx2x_fp(bp
, i
, napi
),
48 bnx2x_poll
, NAPI_POLL_WEIGHT
);
52 static void bnx2x_add_all_napi(struct bnx2x
*bp
)
56 /* Add NAPI objects */
57 for_each_eth_queue(bp
, i
) {
58 netif_napi_add(bp
->dev
, &bnx2x_fp(bp
, i
, napi
),
59 bnx2x_poll
, NAPI_POLL_WEIGHT
);
63 static int bnx2x_calc_num_queues(struct bnx2x
*bp
)
65 int nq
= bnx2x_num_queues
? : netif_get_num_default_rss_queues();
67 /* Reduce memory usage in kdump environment by using only one queue */
68 if (is_kdump_kernel())
71 nq
= clamp(nq
, 1, BNX2X_MAX_QUEUES(bp
));
76 * bnx2x_move_fp - move content of the fastpath structure.
79 * @from: source FP index
80 * @to: destination FP index
82 * Makes sure the contents of the bp->fp[to].napi is kept
83 * intact. This is done by first copying the napi struct from
84 * the target to the source, and then mem copying the entire
85 * source onto the target. Update txdata pointers and related
88 static inline void bnx2x_move_fp(struct bnx2x
*bp
, int from
, int to
)
90 struct bnx2x_fastpath
*from_fp
= &bp
->fp
[from
];
91 struct bnx2x_fastpath
*to_fp
= &bp
->fp
[to
];
92 struct bnx2x_sp_objs
*from_sp_objs
= &bp
->sp_objs
[from
];
93 struct bnx2x_sp_objs
*to_sp_objs
= &bp
->sp_objs
[to
];
94 struct bnx2x_fp_stats
*from_fp_stats
= &bp
->fp_stats
[from
];
95 struct bnx2x_fp_stats
*to_fp_stats
= &bp
->fp_stats
[to
];
96 int old_max_eth_txqs
, new_max_eth_txqs
;
97 int old_txdata_index
= 0, new_txdata_index
= 0;
98 struct bnx2x_agg_info
*old_tpa_info
= to_fp
->tpa_info
;
100 /* Copy the NAPI object as it has been already initialized */
101 from_fp
->napi
= to_fp
->napi
;
103 /* Move bnx2x_fastpath contents */
104 memcpy(to_fp
, from_fp
, sizeof(*to_fp
));
107 /* Retain the tpa_info of the original `to' version as we don't want
108 * 2 FPs to contain the same tpa_info pointer.
110 to_fp
->tpa_info
= old_tpa_info
;
112 /* move sp_objs contents as well, as their indices match fp ones */
113 memcpy(to_sp_objs
, from_sp_objs
, sizeof(*to_sp_objs
));
115 /* move fp_stats contents as well, as their indices match fp ones */
116 memcpy(to_fp_stats
, from_fp_stats
, sizeof(*to_fp_stats
));
118 /* Update txdata pointers in fp and move txdata content accordingly:
119 * Each fp consumes 'max_cos' txdata structures, so the index should be
120 * decremented by max_cos x delta.
123 old_max_eth_txqs
= BNX2X_NUM_ETH_QUEUES(bp
) * (bp
)->max_cos
;
124 new_max_eth_txqs
= (BNX2X_NUM_ETH_QUEUES(bp
) - from
+ to
) *
126 if (from
== FCOE_IDX(bp
)) {
127 old_txdata_index
= old_max_eth_txqs
+ FCOE_TXQ_IDX_OFFSET
;
128 new_txdata_index
= new_max_eth_txqs
+ FCOE_TXQ_IDX_OFFSET
;
131 memcpy(&bp
->bnx2x_txq
[new_txdata_index
],
132 &bp
->bnx2x_txq
[old_txdata_index
],
133 sizeof(struct bnx2x_fp_txdata
));
134 to_fp
->txdata_ptr
[0] = &bp
->bnx2x_txq
[new_txdata_index
];
138 * bnx2x_fill_fw_str - Fill buffer with FW version string.
141 * @buf: character buffer to fill with the fw name
142 * @buf_len: length of the above buffer
145 void bnx2x_fill_fw_str(struct bnx2x
*bp
, char *buf
, size_t buf_len
)
148 u8 phy_fw_ver
[PHY_FW_VER_LEN
];
150 phy_fw_ver
[0] = '\0';
151 bnx2x_get_ext_phy_fw_version(&bp
->link_params
,
152 phy_fw_ver
, PHY_FW_VER_LEN
);
153 strlcpy(buf
, bp
->fw_ver
, buf_len
);
154 snprintf(buf
+ strlen(bp
->fw_ver
), 32 - strlen(bp
->fw_ver
),
156 (bp
->common
.bc_ver
& 0xff0000) >> 16,
157 (bp
->common
.bc_ver
& 0xff00) >> 8,
158 (bp
->common
.bc_ver
& 0xff),
159 ((phy_fw_ver
[0] != '\0') ? " phy " : ""), phy_fw_ver
);
161 bnx2x_vf_fill_fw_str(bp
, buf
, buf_len
);
166 * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact
169 * @delta: number of eth queues which were not allocated
171 static void bnx2x_shrink_eth_fp(struct bnx2x
*bp
, int delta
)
173 int i
, cos
, old_eth_num
= BNX2X_NUM_ETH_QUEUES(bp
);
175 /* Queue pointer cannot be re-set on an fp-basis, as moving pointer
176 * backward along the array could cause memory to be overridden
178 for (cos
= 1; cos
< bp
->max_cos
; cos
++) {
179 for (i
= 0; i
< old_eth_num
- delta
; i
++) {
180 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
181 int new_idx
= cos
* (old_eth_num
- delta
) + i
;
183 memcpy(&bp
->bnx2x_txq
[new_idx
], fp
->txdata_ptr
[cos
],
184 sizeof(struct bnx2x_fp_txdata
));
185 fp
->txdata_ptr
[cos
] = &bp
->bnx2x_txq
[new_idx
];
190 int bnx2x_load_count
[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
192 /* free skb in the packet ring at pos idx
193 * return idx of last bd freed
195 static u16
bnx2x_free_tx_pkt(struct bnx2x
*bp
, struct bnx2x_fp_txdata
*txdata
,
196 u16 idx
, unsigned int *pkts_compl
,
197 unsigned int *bytes_compl
)
199 struct sw_tx_bd
*tx_buf
= &txdata
->tx_buf_ring
[idx
];
200 struct eth_tx_start_bd
*tx_start_bd
;
201 struct eth_tx_bd
*tx_data_bd
;
202 struct sk_buff
*skb
= tx_buf
->skb
;
203 u16 bd_idx
= TX_BD(tx_buf
->first_bd
), new_cons
;
205 u16 split_bd_len
= 0;
207 /* prefetch skb end pointer to speedup dev_kfree_skb() */
210 DP(NETIF_MSG_TX_DONE
, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
211 txdata
->txq_index
, idx
, tx_buf
, skb
);
213 tx_start_bd
= &txdata
->tx_desc_ring
[bd_idx
].start_bd
;
215 nbd
= le16_to_cpu(tx_start_bd
->nbd
) - 1;
216 #ifdef BNX2X_STOP_ON_ERROR
217 if ((nbd
- 1) > (MAX_SKB_FRAGS
+ 2)) {
218 BNX2X_ERR("BAD nbd!\n");
222 new_cons
= nbd
+ tx_buf
->first_bd
;
224 /* Get the next bd */
225 bd_idx
= TX_BD(NEXT_TX_IDX(bd_idx
));
227 /* Skip a parse bd... */
229 bd_idx
= TX_BD(NEXT_TX_IDX(bd_idx
));
231 if (tx_buf
->flags
& BNX2X_HAS_SECOND_PBD
) {
232 /* Skip second parse bd... */
234 bd_idx
= TX_BD(NEXT_TX_IDX(bd_idx
));
237 /* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */
238 if (tx_buf
->flags
& BNX2X_TSO_SPLIT_BD
) {
239 tx_data_bd
= &txdata
->tx_desc_ring
[bd_idx
].reg_bd
;
240 split_bd_len
= BD_UNMAP_LEN(tx_data_bd
);
242 bd_idx
= TX_BD(NEXT_TX_IDX(bd_idx
));
246 dma_unmap_single(&bp
->pdev
->dev
, BD_UNMAP_ADDR(tx_start_bd
),
247 BD_UNMAP_LEN(tx_start_bd
) + split_bd_len
,
253 tx_data_bd
= &txdata
->tx_desc_ring
[bd_idx
].reg_bd
;
254 dma_unmap_page(&bp
->pdev
->dev
, BD_UNMAP_ADDR(tx_data_bd
),
255 BD_UNMAP_LEN(tx_data_bd
), DMA_TO_DEVICE
);
257 bd_idx
= TX_BD(NEXT_TX_IDX(bd_idx
));
264 (*bytes_compl
) += skb
->len
;
265 dev_kfree_skb_any(skb
);
268 tx_buf
->first_bd
= 0;
274 int bnx2x_tx_int(struct bnx2x
*bp
, struct bnx2x_fp_txdata
*txdata
)
276 struct netdev_queue
*txq
;
277 u16 hw_cons
, sw_cons
, bd_cons
= txdata
->tx_bd_cons
;
278 unsigned int pkts_compl
= 0, bytes_compl
= 0;
280 #ifdef BNX2X_STOP_ON_ERROR
281 if (unlikely(bp
->panic
))
285 txq
= netdev_get_tx_queue(bp
->dev
, txdata
->txq_index
);
286 hw_cons
= le16_to_cpu(*txdata
->tx_cons_sb
);
287 sw_cons
= txdata
->tx_pkt_cons
;
289 while (sw_cons
!= hw_cons
) {
292 pkt_cons
= TX_BD(sw_cons
);
294 DP(NETIF_MSG_TX_DONE
,
295 "queue[%d]: hw_cons %u sw_cons %u pkt_cons %u\n",
296 txdata
->txq_index
, hw_cons
, sw_cons
, pkt_cons
);
298 bd_cons
= bnx2x_free_tx_pkt(bp
, txdata
, pkt_cons
,
299 &pkts_compl
, &bytes_compl
);
304 netdev_tx_completed_queue(txq
, pkts_compl
, bytes_compl
);
306 txdata
->tx_pkt_cons
= sw_cons
;
307 txdata
->tx_bd_cons
= bd_cons
;
309 /* Need to make the tx_bd_cons update visible to start_xmit()
310 * before checking for netif_tx_queue_stopped(). Without the
311 * memory barrier, there is a small possibility that
312 * start_xmit() will miss it and cause the queue to be stopped
314 * On the other hand we need an rmb() here to ensure the proper
315 * ordering of bit testing in the following
316 * netif_tx_queue_stopped(txq) call.
320 if (unlikely(netif_tx_queue_stopped(txq
))) {
321 /* Taking tx_lock() is needed to prevent re-enabling the queue
322 * while it's empty. This could have happen if rx_action() gets
323 * suspended in bnx2x_tx_int() after the condition before
324 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
326 * stops the queue->sees fresh tx_bd_cons->releases the queue->
327 * sends some packets consuming the whole queue again->
331 __netif_tx_lock(txq
, smp_processor_id());
333 if ((netif_tx_queue_stopped(txq
)) &&
334 (bp
->state
== BNX2X_STATE_OPEN
) &&
335 (bnx2x_tx_avail(bp
, txdata
) >= MAX_DESC_PER_TX_PKT
))
336 netif_tx_wake_queue(txq
);
338 __netif_tx_unlock(txq
);
343 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath
*fp
,
346 u16 last_max
= fp
->last_max_sge
;
348 if (SUB_S16(idx
, last_max
) > 0)
349 fp
->last_max_sge
= idx
;
352 static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath
*fp
,
354 struct eth_end_agg_rx_cqe
*cqe
)
356 struct bnx2x
*bp
= fp
->bp
;
357 u16 last_max
, last_elem
, first_elem
;
364 /* First mark all used pages */
365 for (i
= 0; i
< sge_len
; i
++)
366 BIT_VEC64_CLEAR_BIT(fp
->sge_mask
,
367 RX_SGE(le16_to_cpu(cqe
->sgl_or_raw_data
.sgl
[i
])));
369 DP(NETIF_MSG_RX_STATUS
, "fp_cqe->sgl[%d] = %d\n",
370 sge_len
- 1, le16_to_cpu(cqe
->sgl_or_raw_data
.sgl
[sge_len
- 1]));
372 /* Here we assume that the last SGE index is the biggest */
373 prefetch((void *)(fp
->sge_mask
));
374 bnx2x_update_last_max_sge(fp
,
375 le16_to_cpu(cqe
->sgl_or_raw_data
.sgl
[sge_len
- 1]));
377 last_max
= RX_SGE(fp
->last_max_sge
);
378 last_elem
= last_max
>> BIT_VEC64_ELEM_SHIFT
;
379 first_elem
= RX_SGE(fp
->rx_sge_prod
) >> BIT_VEC64_ELEM_SHIFT
;
381 /* If ring is not full */
382 if (last_elem
+ 1 != first_elem
)
385 /* Now update the prod */
386 for (i
= first_elem
; i
!= last_elem
; i
= NEXT_SGE_MASK_ELEM(i
)) {
387 if (likely(fp
->sge_mask
[i
]))
390 fp
->sge_mask
[i
] = BIT_VEC64_ELEM_ONE_MASK
;
391 delta
+= BIT_VEC64_ELEM_SZ
;
395 fp
->rx_sge_prod
+= delta
;
396 /* clear page-end entries */
397 bnx2x_clear_sge_mask_next_elems(fp
);
400 DP(NETIF_MSG_RX_STATUS
,
401 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
402 fp
->last_max_sge
, fp
->rx_sge_prod
);
405 /* Get Toeplitz hash value in the skb using the value from the
406 * CQE (calculated by HW).
408 static u32
bnx2x_get_rxhash(const struct bnx2x
*bp
,
409 const struct eth_fast_path_rx_cqe
*cqe
,
410 enum pkt_hash_types
*rxhash_type
)
412 /* Get Toeplitz hash from CQE */
413 if ((bp
->dev
->features
& NETIF_F_RXHASH
) &&
414 (cqe
->status_flags
& ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG
)) {
415 enum eth_rss_hash_type htype
;
417 htype
= cqe
->status_flags
& ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE
;
418 *rxhash_type
= ((htype
== TCP_IPV4_HASH_TYPE
) ||
419 (htype
== TCP_IPV6_HASH_TYPE
)) ?
420 PKT_HASH_TYPE_L4
: PKT_HASH_TYPE_L3
;
422 return le32_to_cpu(cqe
->rss_hash_result
);
424 *rxhash_type
= PKT_HASH_TYPE_NONE
;
428 static void bnx2x_tpa_start(struct bnx2x_fastpath
*fp
, u16 queue
,
430 struct eth_fast_path_rx_cqe
*cqe
)
432 struct bnx2x
*bp
= fp
->bp
;
433 struct sw_rx_bd
*cons_rx_buf
= &fp
->rx_buf_ring
[cons
];
434 struct sw_rx_bd
*prod_rx_buf
= &fp
->rx_buf_ring
[prod
];
435 struct eth_rx_bd
*prod_bd
= &fp
->rx_desc_ring
[prod
];
437 struct bnx2x_agg_info
*tpa_info
= &fp
->tpa_info
[queue
];
438 struct sw_rx_bd
*first_buf
= &tpa_info
->first_buf
;
440 /* print error if current state != stop */
441 if (tpa_info
->tpa_state
!= BNX2X_TPA_STOP
)
442 BNX2X_ERR("start of bin not in stop [%d]\n", queue
);
444 /* Try to map an empty data buffer from the aggregation info */
445 mapping
= dma_map_single(&bp
->pdev
->dev
,
446 first_buf
->data
+ NET_SKB_PAD
,
447 fp
->rx_buf_size
, DMA_FROM_DEVICE
);
449 * ...if it fails - move the skb from the consumer to the producer
450 * and set the current aggregation state as ERROR to drop it
451 * when TPA_STOP arrives.
454 if (unlikely(dma_mapping_error(&bp
->pdev
->dev
, mapping
))) {
455 /* Move the BD from the consumer to the producer */
456 bnx2x_reuse_rx_data(fp
, cons
, prod
);
457 tpa_info
->tpa_state
= BNX2X_TPA_ERROR
;
461 /* move empty data from pool to prod */
462 prod_rx_buf
->data
= first_buf
->data
;
463 dma_unmap_addr_set(prod_rx_buf
, mapping
, mapping
);
464 /* point prod_bd to new data */
465 prod_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
466 prod_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
468 /* move partial skb from cons to pool (don't unmap yet) */
469 *first_buf
= *cons_rx_buf
;
471 /* mark bin state as START */
472 tpa_info
->parsing_flags
=
473 le16_to_cpu(cqe
->pars_flags
.flags
);
474 tpa_info
->vlan_tag
= le16_to_cpu(cqe
->vlan_tag
);
475 tpa_info
->tpa_state
= BNX2X_TPA_START
;
476 tpa_info
->len_on_bd
= le16_to_cpu(cqe
->len_on_bd
);
477 tpa_info
->placement_offset
= cqe
->placement_offset
;
478 tpa_info
->rxhash
= bnx2x_get_rxhash(bp
, cqe
, &tpa_info
->rxhash_type
);
479 if (fp
->mode
== TPA_MODE_GRO
) {
480 u16 gro_size
= le16_to_cpu(cqe
->pkt_len_or_gro_seg_len
);
481 tpa_info
->full_page
= SGE_PAGES
/ gro_size
* gro_size
;
482 tpa_info
->gro_size
= gro_size
;
485 #ifdef BNX2X_STOP_ON_ERROR
486 fp
->tpa_queue_used
|= (1 << queue
);
487 DP(NETIF_MSG_RX_STATUS
, "fp->tpa_queue_used = 0x%llx\n",
492 /* Timestamp option length allowed for TPA aggregation:
494 * nop nop kind length echo val
496 #define TPA_TSTAMP_OPT_LEN 12
498 * bnx2x_set_gro_params - compute GRO values
501 * @parsing_flags: parsing flags from the START CQE
502 * @len_on_bd: total length of the first packet for the
504 * @pkt_len: length of all segments
506 * Approximate value of the MSS for this aggregation calculated using
507 * the first packet of it.
508 * Compute number of aggregated segments, and gso_type.
510 static void bnx2x_set_gro_params(struct sk_buff
*skb
, u16 parsing_flags
,
511 u16 len_on_bd
, unsigned int pkt_len
,
512 u16 num_of_coalesced_segs
)
514 /* TPA aggregation won't have either IP options or TCP options
515 * other than timestamp or IPv6 extension headers.
517 u16 hdrs_len
= ETH_HLEN
+ sizeof(struct tcphdr
);
519 if (GET_FLAG(parsing_flags
, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL
) ==
520 PRS_FLAG_OVERETH_IPV6
) {
521 hdrs_len
+= sizeof(struct ipv6hdr
);
522 skb_shinfo(skb
)->gso_type
= SKB_GSO_TCPV6
;
524 hdrs_len
+= sizeof(struct iphdr
);
525 skb_shinfo(skb
)->gso_type
= SKB_GSO_TCPV4
;
528 /* Check if there was a TCP timestamp, if there is it's will
529 * always be 12 bytes length: nop nop kind length echo val.
531 * Otherwise FW would close the aggregation.
533 if (parsing_flags
& PARSING_FLAGS_TIME_STAMP_EXIST_FLAG
)
534 hdrs_len
+= TPA_TSTAMP_OPT_LEN
;
536 skb_shinfo(skb
)->gso_size
= len_on_bd
- hdrs_len
;
538 /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
539 * to skb_shinfo(skb)->gso_segs
541 NAPI_GRO_CB(skb
)->count
= num_of_coalesced_segs
;
544 static int bnx2x_alloc_rx_sge(struct bnx2x
*bp
, struct bnx2x_fastpath
*fp
,
545 u16 index
, gfp_t gfp_mask
)
547 struct sw_rx_page
*sw_buf
= &fp
->rx_page_ring
[index
];
548 struct eth_rx_sge
*sge
= &fp
->rx_sge_ring
[index
];
549 struct bnx2x_alloc_pool
*pool
= &fp
->page_pool
;
553 pool
->page
= alloc_pages(gfp_mask
, PAGES_PER_SGE_SHIFT
);
554 if (unlikely(!pool
->page
))
560 mapping
= dma_map_page(&bp
->pdev
->dev
, pool
->page
,
561 pool
->offset
, SGE_PAGE_SIZE
, DMA_FROM_DEVICE
);
562 if (unlikely(dma_mapping_error(&bp
->pdev
->dev
, mapping
))) {
563 BNX2X_ERR("Can't map sge\n");
567 sw_buf
->page
= pool
->page
;
568 sw_buf
->offset
= pool
->offset
;
570 dma_unmap_addr_set(sw_buf
, mapping
, mapping
);
572 sge
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
573 sge
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
575 pool
->offset
+= SGE_PAGE_SIZE
;
576 if (PAGE_SIZE
- pool
->offset
>= SGE_PAGE_SIZE
)
577 get_page(pool
->page
);
583 static int bnx2x_fill_frag_skb(struct bnx2x
*bp
, struct bnx2x_fastpath
*fp
,
584 struct bnx2x_agg_info
*tpa_info
,
587 struct eth_end_agg_rx_cqe
*cqe
,
590 struct sw_rx_page
*rx_pg
, old_rx_pg
;
591 u32 i
, frag_len
, frag_size
;
592 int err
, j
, frag_id
= 0;
593 u16 len_on_bd
= tpa_info
->len_on_bd
;
594 u16 full_page
= 0, gro_size
= 0;
596 frag_size
= le16_to_cpu(cqe
->pkt_len
) - len_on_bd
;
598 if (fp
->mode
== TPA_MODE_GRO
) {
599 gro_size
= tpa_info
->gro_size
;
600 full_page
= tpa_info
->full_page
;
603 /* This is needed in order to enable forwarding support */
605 bnx2x_set_gro_params(skb
, tpa_info
->parsing_flags
, len_on_bd
,
606 le16_to_cpu(cqe
->pkt_len
),
607 le16_to_cpu(cqe
->num_of_coalesced_segs
));
609 #ifdef BNX2X_STOP_ON_ERROR
610 if (pages
> min_t(u32
, 8, MAX_SKB_FRAGS
) * SGE_PAGES
) {
611 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
613 BNX2X_ERR("cqe->pkt_len = %d\n", cqe
->pkt_len
);
619 /* Run through the SGL and compose the fragmented skb */
620 for (i
= 0, j
= 0; i
< pages
; i
+= PAGES_PER_SGE
, j
++) {
621 u16 sge_idx
= RX_SGE(le16_to_cpu(cqe
->sgl_or_raw_data
.sgl
[j
]));
623 /* FW gives the indices of the SGE as if the ring is an array
624 (meaning that "next" element will consume 2 indices) */
625 if (fp
->mode
== TPA_MODE_GRO
)
626 frag_len
= min_t(u32
, frag_size
, (u32
)full_page
);
628 frag_len
= min_t(u32
, frag_size
, (u32
)SGE_PAGES
);
630 rx_pg
= &fp
->rx_page_ring
[sge_idx
];
633 /* If we fail to allocate a substitute page, we simply stop
634 where we are and drop the whole packet */
635 err
= bnx2x_alloc_rx_sge(bp
, fp
, sge_idx
, GFP_ATOMIC
);
637 bnx2x_fp_qstats(bp
, fp
)->rx_skb_alloc_failed
++;
641 dma_unmap_page(&bp
->pdev
->dev
,
642 dma_unmap_addr(&old_rx_pg
, mapping
),
643 SGE_PAGE_SIZE
, DMA_FROM_DEVICE
);
644 /* Add one frag and update the appropriate fields in the skb */
645 if (fp
->mode
== TPA_MODE_LRO
)
646 skb_fill_page_desc(skb
, j
, old_rx_pg
.page
,
647 old_rx_pg
.offset
, frag_len
);
651 for (rem
= frag_len
; rem
> 0; rem
-= gro_size
) {
652 int len
= rem
> gro_size
? gro_size
: rem
;
653 skb_fill_page_desc(skb
, frag_id
++,
655 old_rx_pg
.offset
+ offset
,
658 get_page(old_rx_pg
.page
);
663 skb
->data_len
+= frag_len
;
664 skb
->truesize
+= SGE_PAGES
;
665 skb
->len
+= frag_len
;
667 frag_size
-= frag_len
;
673 static void bnx2x_frag_free(const struct bnx2x_fastpath
*fp
, void *data
)
675 if (fp
->rx_frag_size
)
681 static void *bnx2x_frag_alloc(const struct bnx2x_fastpath
*fp
, gfp_t gfp_mask
)
683 if (fp
->rx_frag_size
) {
684 /* GFP_KERNEL allocations are used only during initialization */
685 if (unlikely(gfpflags_allow_blocking(gfp_mask
)))
686 return (void *)__get_free_page(gfp_mask
);
688 return netdev_alloc_frag(fp
->rx_frag_size
);
691 return kmalloc(fp
->rx_buf_size
+ NET_SKB_PAD
, gfp_mask
);
695 static void bnx2x_gro_ip_csum(struct bnx2x
*bp
, struct sk_buff
*skb
)
697 const struct iphdr
*iph
= ip_hdr(skb
);
700 skb_set_transport_header(skb
, sizeof(struct iphdr
));
703 th
->check
= ~tcp_v4_check(skb
->len
- skb_transport_offset(skb
),
704 iph
->saddr
, iph
->daddr
, 0);
707 static void bnx2x_gro_ipv6_csum(struct bnx2x
*bp
, struct sk_buff
*skb
)
709 struct ipv6hdr
*iph
= ipv6_hdr(skb
);
712 skb_set_transport_header(skb
, sizeof(struct ipv6hdr
));
715 th
->check
= ~tcp_v6_check(skb
->len
- skb_transport_offset(skb
),
716 &iph
->saddr
, &iph
->daddr
, 0);
719 static void bnx2x_gro_csum(struct bnx2x
*bp
, struct sk_buff
*skb
,
720 void (*gro_func
)(struct bnx2x
*, struct sk_buff
*))
722 skb_reset_network_header(skb
);
724 tcp_gro_complete(skb
);
728 static void bnx2x_gro_receive(struct bnx2x
*bp
, struct bnx2x_fastpath
*fp
,
732 if (skb_shinfo(skb
)->gso_size
) {
733 switch (be16_to_cpu(skb
->protocol
)) {
735 bnx2x_gro_csum(bp
, skb
, bnx2x_gro_ip_csum
);
738 bnx2x_gro_csum(bp
, skb
, bnx2x_gro_ipv6_csum
);
741 WARN_ONCE(1, "Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
742 be16_to_cpu(skb
->protocol
));
746 skb_record_rx_queue(skb
, fp
->rx_queue
);
747 napi_gro_receive(&fp
->napi
, skb
);
750 static void bnx2x_tpa_stop(struct bnx2x
*bp
, struct bnx2x_fastpath
*fp
,
751 struct bnx2x_agg_info
*tpa_info
,
753 struct eth_end_agg_rx_cqe
*cqe
,
756 struct sw_rx_bd
*rx_buf
= &tpa_info
->first_buf
;
757 u8 pad
= tpa_info
->placement_offset
;
758 u16 len
= tpa_info
->len_on_bd
;
759 struct sk_buff
*skb
= NULL
;
760 u8
*new_data
, *data
= rx_buf
->data
;
761 u8 old_tpa_state
= tpa_info
->tpa_state
;
763 tpa_info
->tpa_state
= BNX2X_TPA_STOP
;
765 /* If we there was an error during the handling of the TPA_START -
766 * drop this aggregation.
768 if (old_tpa_state
== BNX2X_TPA_ERROR
)
771 /* Try to allocate the new data */
772 new_data
= bnx2x_frag_alloc(fp
, GFP_ATOMIC
);
773 /* Unmap skb in the pool anyway, as we are going to change
774 pool entry status to BNX2X_TPA_STOP even if new skb allocation
776 dma_unmap_single(&bp
->pdev
->dev
, dma_unmap_addr(rx_buf
, mapping
),
777 fp
->rx_buf_size
, DMA_FROM_DEVICE
);
778 if (likely(new_data
))
779 skb
= build_skb(data
, fp
->rx_frag_size
);
782 #ifdef BNX2X_STOP_ON_ERROR
783 if (pad
+ len
> fp
->rx_buf_size
) {
784 BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n",
785 pad
, len
, fp
->rx_buf_size
);
791 skb_reserve(skb
, pad
+ NET_SKB_PAD
);
793 skb_set_hash(skb
, tpa_info
->rxhash
, tpa_info
->rxhash_type
);
795 skb
->protocol
= eth_type_trans(skb
, bp
->dev
);
796 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
798 if (!bnx2x_fill_frag_skb(bp
, fp
, tpa_info
, pages
,
799 skb
, cqe
, cqe_idx
)) {
800 if (tpa_info
->parsing_flags
& PARSING_FLAGS_VLAN
)
801 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
), tpa_info
->vlan_tag
);
802 bnx2x_gro_receive(bp
, fp
, skb
);
804 DP(NETIF_MSG_RX_STATUS
,
805 "Failed to allocate new pages - dropping packet!\n");
806 dev_kfree_skb_any(skb
);
809 /* put new data in bin */
810 rx_buf
->data
= new_data
;
815 bnx2x_frag_free(fp
, new_data
);
817 /* drop the packet and keep the buffer in the bin */
818 DP(NETIF_MSG_RX_STATUS
,
819 "Failed to allocate or map a new skb - dropping packet!\n");
820 bnx2x_fp_stats(bp
, fp
)->eth_q_stats
.rx_skb_alloc_failed
++;
823 static int bnx2x_alloc_rx_data(struct bnx2x
*bp
, struct bnx2x_fastpath
*fp
,
824 u16 index
, gfp_t gfp_mask
)
827 struct sw_rx_bd
*rx_buf
= &fp
->rx_buf_ring
[index
];
828 struct eth_rx_bd
*rx_bd
= &fp
->rx_desc_ring
[index
];
831 data
= bnx2x_frag_alloc(fp
, gfp_mask
);
832 if (unlikely(data
== NULL
))
835 mapping
= dma_map_single(&bp
->pdev
->dev
, data
+ NET_SKB_PAD
,
838 if (unlikely(dma_mapping_error(&bp
->pdev
->dev
, mapping
))) {
839 bnx2x_frag_free(fp
, data
);
840 BNX2X_ERR("Can't map rx data\n");
845 dma_unmap_addr_set(rx_buf
, mapping
, mapping
);
847 rx_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
848 rx_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
854 void bnx2x_csum_validate(struct sk_buff
*skb
, union eth_rx_cqe
*cqe
,
855 struct bnx2x_fastpath
*fp
,
856 struct bnx2x_eth_q_stats
*qstats
)
858 /* Do nothing if no L4 csum validation was done.
859 * We do not check whether IP csum was validated. For IPv4 we assume
860 * that if the card got as far as validating the L4 csum, it also
861 * validated the IP csum. IPv6 has no IP csum.
863 if (cqe
->fast_path_cqe
.status_flags
&
864 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG
)
867 /* If L4 validation was done, check if an error was found. */
869 if (cqe
->fast_path_cqe
.type_error_flags
&
870 (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG
|
871 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG
))
872 qstats
->hw_csum_err
++;
874 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
877 static int bnx2x_rx_int(struct bnx2x_fastpath
*fp
, int budget
)
879 struct bnx2x
*bp
= fp
->bp
;
880 u16 bd_cons
, bd_prod
, bd_prod_fw
, comp_ring_cons
;
881 u16 sw_comp_cons
, sw_comp_prod
;
883 union eth_rx_cqe
*cqe
;
884 struct eth_fast_path_rx_cqe
*cqe_fp
;
886 #ifdef BNX2X_STOP_ON_ERROR
887 if (unlikely(bp
->panic
))
893 bd_cons
= fp
->rx_bd_cons
;
894 bd_prod
= fp
->rx_bd_prod
;
895 bd_prod_fw
= bd_prod
;
896 sw_comp_cons
= fp
->rx_comp_cons
;
897 sw_comp_prod
= fp
->rx_comp_prod
;
899 comp_ring_cons
= RCQ_BD(sw_comp_cons
);
900 cqe
= &fp
->rx_comp_ring
[comp_ring_cons
];
901 cqe_fp
= &cqe
->fast_path_cqe
;
903 DP(NETIF_MSG_RX_STATUS
,
904 "queue[%d]: sw_comp_cons %u\n", fp
->index
, sw_comp_cons
);
906 while (BNX2X_IS_CQE_COMPLETED(cqe_fp
)) {
907 struct sw_rx_bd
*rx_buf
= NULL
;
910 enum eth_rx_cqe_type cqe_fp_type
;
914 enum pkt_hash_types rxhash_type
;
916 #ifdef BNX2X_STOP_ON_ERROR
917 if (unlikely(bp
->panic
))
921 bd_prod
= RX_BD(bd_prod
);
922 bd_cons
= RX_BD(bd_cons
);
924 /* A rmb() is required to ensure that the CQE is not read
925 * before it is written by the adapter DMA. PCI ordering
926 * rules will make sure the other fields are written before
927 * the marker at the end of struct eth_fast_path_rx_cqe
928 * but without rmb() a weakly ordered processor can process
929 * stale data. Without the barrier TPA state-machine might
930 * enter inconsistent state and kernel stack might be
931 * provided with incorrect packet description - these lead
932 * to various kernel crashed.
936 cqe_fp_flags
= cqe_fp
->type_error_flags
;
937 cqe_fp_type
= cqe_fp_flags
& ETH_FAST_PATH_RX_CQE_TYPE
;
939 DP(NETIF_MSG_RX_STATUS
,
940 "CQE type %x err %x status %x queue %x vlan %x len %u\n",
941 CQE_TYPE(cqe_fp_flags
),
942 cqe_fp_flags
, cqe_fp
->status_flags
,
943 le32_to_cpu(cqe_fp
->rss_hash_result
),
944 le16_to_cpu(cqe_fp
->vlan_tag
),
945 le16_to_cpu(cqe_fp
->pkt_len_or_gro_seg_len
));
947 /* is this a slowpath msg? */
948 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type
))) {
949 bnx2x_sp_event(fp
, cqe
);
953 rx_buf
= &fp
->rx_buf_ring
[bd_cons
];
956 if (!CQE_TYPE_FAST(cqe_fp_type
)) {
957 struct bnx2x_agg_info
*tpa_info
;
958 u16 frag_size
, pages
;
959 #ifdef BNX2X_STOP_ON_ERROR
961 if (fp
->mode
== TPA_MODE_DISABLED
&&
962 (CQE_TYPE_START(cqe_fp_type
) ||
963 CQE_TYPE_STOP(cqe_fp_type
)))
964 BNX2X_ERR("START/STOP packet while TPA disabled, type %x\n",
965 CQE_TYPE(cqe_fp_type
));
968 if (CQE_TYPE_START(cqe_fp_type
)) {
969 u16 queue
= cqe_fp
->queue_index
;
970 DP(NETIF_MSG_RX_STATUS
,
971 "calling tpa_start on queue %d\n",
974 bnx2x_tpa_start(fp
, queue
,
980 queue
= cqe
->end_agg_cqe
.queue_index
;
981 tpa_info
= &fp
->tpa_info
[queue
];
982 DP(NETIF_MSG_RX_STATUS
,
983 "calling tpa_stop on queue %d\n",
986 frag_size
= le16_to_cpu(cqe
->end_agg_cqe
.pkt_len
) -
989 if (fp
->mode
== TPA_MODE_GRO
)
990 pages
= (frag_size
+ tpa_info
->full_page
- 1) /
993 pages
= SGE_PAGE_ALIGN(frag_size
) >>
996 bnx2x_tpa_stop(bp
, fp
, tpa_info
, pages
,
997 &cqe
->end_agg_cqe
, comp_ring_cons
);
998 #ifdef BNX2X_STOP_ON_ERROR
1003 bnx2x_update_sge_prod(fp
, pages
, &cqe
->end_agg_cqe
);
1007 len
= le16_to_cpu(cqe_fp
->pkt_len_or_gro_seg_len
);
1008 pad
= cqe_fp
->placement_offset
;
1009 dma_sync_single_for_cpu(&bp
->pdev
->dev
,
1010 dma_unmap_addr(rx_buf
, mapping
),
1011 pad
+ RX_COPY_THRESH
,
1014 prefetch(data
+ pad
); /* speedup eth_type_trans() */
1015 /* is this an error packet? */
1016 if (unlikely(cqe_fp_flags
& ETH_RX_ERROR_FALGS
)) {
1017 DP(NETIF_MSG_RX_ERR
| NETIF_MSG_RX_STATUS
,
1018 "ERROR flags %x rx packet %u\n",
1019 cqe_fp_flags
, sw_comp_cons
);
1020 bnx2x_fp_qstats(bp
, fp
)->rx_err_discard_pkt
++;
1024 /* Since we don't have a jumbo ring
1025 * copy small packets if mtu > 1500
1027 if ((bp
->dev
->mtu
> ETH_MAX_PACKET_SIZE
) &&
1028 (len
<= RX_COPY_THRESH
)) {
1029 skb
= napi_alloc_skb(&fp
->napi
, len
);
1031 DP(NETIF_MSG_RX_ERR
| NETIF_MSG_RX_STATUS
,
1032 "ERROR packet dropped because of alloc failure\n");
1033 bnx2x_fp_qstats(bp
, fp
)->rx_skb_alloc_failed
++;
1036 memcpy(skb
->data
, data
+ pad
, len
);
1037 bnx2x_reuse_rx_data(fp
, bd_cons
, bd_prod
);
1039 if (likely(bnx2x_alloc_rx_data(bp
, fp
, bd_prod
,
1040 GFP_ATOMIC
) == 0)) {
1041 dma_unmap_single(&bp
->pdev
->dev
,
1042 dma_unmap_addr(rx_buf
, mapping
),
1045 skb
= build_skb(data
, fp
->rx_frag_size
);
1046 if (unlikely(!skb
)) {
1047 bnx2x_frag_free(fp
, data
);
1048 bnx2x_fp_qstats(bp
, fp
)->
1049 rx_skb_alloc_failed
++;
1052 skb_reserve(skb
, pad
);
1054 DP(NETIF_MSG_RX_ERR
| NETIF_MSG_RX_STATUS
,
1055 "ERROR packet dropped because of alloc failure\n");
1056 bnx2x_fp_qstats(bp
, fp
)->rx_skb_alloc_failed
++;
1058 bnx2x_reuse_rx_data(fp
, bd_cons
, bd_prod
);
1064 skb
->protocol
= eth_type_trans(skb
, bp
->dev
);
1066 /* Set Toeplitz hash for a none-LRO skb */
1067 rxhash
= bnx2x_get_rxhash(bp
, cqe_fp
, &rxhash_type
);
1068 skb_set_hash(skb
, rxhash
, rxhash_type
);
1070 skb_checksum_none_assert(skb
);
1072 if (bp
->dev
->features
& NETIF_F_RXCSUM
)
1073 bnx2x_csum_validate(skb
, cqe
, fp
,
1074 bnx2x_fp_qstats(bp
, fp
));
1076 skb_record_rx_queue(skb
, fp
->rx_queue
);
1078 /* Check if this packet was timestamped */
1079 if (unlikely(cqe
->fast_path_cqe
.type_error_flags
&
1080 (1 << ETH_FAST_PATH_RX_CQE_PTP_PKT_SHIFT
)))
1081 bnx2x_set_rx_ts(bp
, skb
);
1083 if (le16_to_cpu(cqe_fp
->pars_flags
.flags
) &
1085 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
),
1086 le16_to_cpu(cqe_fp
->vlan_tag
));
1088 napi_gro_receive(&fp
->napi
, skb
);
1090 rx_buf
->data
= NULL
;
1092 bd_cons
= NEXT_RX_IDX(bd_cons
);
1093 bd_prod
= NEXT_RX_IDX(bd_prod
);
1094 bd_prod_fw
= NEXT_RX_IDX(bd_prod_fw
);
1097 sw_comp_prod
= NEXT_RCQ_IDX(sw_comp_prod
);
1098 sw_comp_cons
= NEXT_RCQ_IDX(sw_comp_cons
);
1100 /* mark CQE as free */
1101 BNX2X_SEED_CQE(cqe_fp
);
1103 if (rx_pkt
== budget
)
1106 comp_ring_cons
= RCQ_BD(sw_comp_cons
);
1107 cqe
= &fp
->rx_comp_ring
[comp_ring_cons
];
1108 cqe_fp
= &cqe
->fast_path_cqe
;
1111 fp
->rx_bd_cons
= bd_cons
;
1112 fp
->rx_bd_prod
= bd_prod_fw
;
1113 fp
->rx_comp_cons
= sw_comp_cons
;
1114 fp
->rx_comp_prod
= sw_comp_prod
;
1116 /* Update producers */
1117 bnx2x_update_rx_prod(bp
, fp
, bd_prod_fw
, sw_comp_prod
,
1123 static irqreturn_t
bnx2x_msix_fp_int(int irq
, void *fp_cookie
)
1125 struct bnx2x_fastpath
*fp
= fp_cookie
;
1126 struct bnx2x
*bp
= fp
->bp
;
1130 "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
1131 fp
->index
, fp
->fw_sb_id
, fp
->igu_sb_id
);
1133 bnx2x_ack_sb(bp
, fp
->igu_sb_id
, USTORM_ID
, 0, IGU_INT_DISABLE
, 0);
1135 #ifdef BNX2X_STOP_ON_ERROR
1136 if (unlikely(bp
->panic
))
1140 /* Handle Rx and Tx according to MSI-X vector */
1141 for_each_cos_in_tx_queue(fp
, cos
)
1142 prefetch(fp
->txdata_ptr
[cos
]->tx_cons_sb
);
1144 prefetch(&fp
->sb_running_index
[SM_RX_ID
]);
1145 napi_schedule_irqoff(&bnx2x_fp(bp
, fp
->index
, napi
));
1150 /* HW Lock for shared dual port PHYs */
1151 void bnx2x_acquire_phy_lock(struct bnx2x
*bp
)
1153 mutex_lock(&bp
->port
.phy_mutex
);
1155 bnx2x_acquire_hw_lock(bp
, HW_LOCK_RESOURCE_MDIO
);
1158 void bnx2x_release_phy_lock(struct bnx2x
*bp
)
1160 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_MDIO
);
1162 mutex_unlock(&bp
->port
.phy_mutex
);
1165 /* calculates MF speed according to current linespeed and MF configuration */
1166 u16
bnx2x_get_mf_speed(struct bnx2x
*bp
)
1168 u16 line_speed
= bp
->link_vars
.line_speed
;
1170 u16 maxCfg
= bnx2x_extract_max_cfg(bp
,
1171 bp
->mf_config
[BP_VN(bp
)]);
1173 /* Calculate the current MAX line speed limit for the MF
1176 if (IS_MF_PERCENT_BW(bp
))
1177 line_speed
= (line_speed
* maxCfg
) / 100;
1178 else { /* SD mode */
1179 u16 vn_max_rate
= maxCfg
* 100;
1181 if (vn_max_rate
< line_speed
)
1182 line_speed
= vn_max_rate
;
1190 * bnx2x_fill_report_data - fill link report data to report
1192 * @bp: driver handle
1193 * @data: link state to update
1195 * It uses a none-atomic bit operations because is called under the mutex.
1197 static void bnx2x_fill_report_data(struct bnx2x
*bp
,
1198 struct bnx2x_link_report_data
*data
)
1200 memset(data
, 0, sizeof(*data
));
1203 /* Fill the report data: effective line speed */
1204 data
->line_speed
= bnx2x_get_mf_speed(bp
);
1207 if (!bp
->link_vars
.link_up
|| (bp
->flags
& MF_FUNC_DIS
))
1208 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN
,
1209 &data
->link_report_flags
);
1211 if (!BNX2X_NUM_ETH_QUEUES(bp
))
1212 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN
,
1213 &data
->link_report_flags
);
1216 if (bp
->link_vars
.duplex
== DUPLEX_FULL
)
1217 __set_bit(BNX2X_LINK_REPORT_FD
,
1218 &data
->link_report_flags
);
1220 /* Rx Flow Control is ON */
1221 if (bp
->link_vars
.flow_ctrl
& BNX2X_FLOW_CTRL_RX
)
1222 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON
,
1223 &data
->link_report_flags
);
1225 /* Tx Flow Control is ON */
1226 if (bp
->link_vars
.flow_ctrl
& BNX2X_FLOW_CTRL_TX
)
1227 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON
,
1228 &data
->link_report_flags
);
1230 *data
= bp
->vf_link_vars
;
1235 * bnx2x_link_report - report link status to OS.
1237 * @bp: driver handle
1239 * Calls the __bnx2x_link_report() under the same locking scheme
1240 * as a link/PHY state managing code to ensure a consistent link
1244 void bnx2x_link_report(struct bnx2x
*bp
)
1246 bnx2x_acquire_phy_lock(bp
);
1247 __bnx2x_link_report(bp
);
1248 bnx2x_release_phy_lock(bp
);
1252 * __bnx2x_link_report - report link status to OS.
1254 * @bp: driver handle
1256 * None atomic implementation.
1257 * Should be called under the phy_lock.
1259 void __bnx2x_link_report(struct bnx2x
*bp
)
1261 struct bnx2x_link_report_data cur_data
;
1264 if (IS_PF(bp
) && !CHIP_IS_E1(bp
))
1265 bnx2x_read_mf_cfg(bp
);
1267 /* Read the current link report info */
1268 bnx2x_fill_report_data(bp
, &cur_data
);
1270 /* Don't report link down or exactly the same link status twice */
1271 if (!memcmp(&cur_data
, &bp
->last_reported_link
, sizeof(cur_data
)) ||
1272 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN
,
1273 &bp
->last_reported_link
.link_report_flags
) &&
1274 test_bit(BNX2X_LINK_REPORT_LINK_DOWN
,
1275 &cur_data
.link_report_flags
)))
1280 /* We are going to report a new link parameters now -
1281 * remember the current data for the next time.
1283 memcpy(&bp
->last_reported_link
, &cur_data
, sizeof(cur_data
));
1285 /* propagate status to VFs */
1287 bnx2x_iov_link_update(bp
);
1289 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN
,
1290 &cur_data
.link_report_flags
)) {
1291 netif_carrier_off(bp
->dev
);
1292 netdev_err(bp
->dev
, "NIC Link is Down\n");
1298 netif_carrier_on(bp
->dev
);
1300 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD
,
1301 &cur_data
.link_report_flags
))
1306 /* Handle the FC at the end so that only these flags would be
1307 * possibly set. This way we may easily check if there is no FC
1310 if (cur_data
.link_report_flags
) {
1311 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON
,
1312 &cur_data
.link_report_flags
)) {
1313 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON
,
1314 &cur_data
.link_report_flags
))
1315 flow
= "ON - receive & transmit";
1317 flow
= "ON - receive";
1319 flow
= "ON - transmit";
1324 netdev_info(bp
->dev
, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1325 cur_data
.line_speed
, duplex
, flow
);
1329 static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath
*fp
)
1333 for (i
= 1; i
<= NUM_RX_SGE_PAGES
; i
++) {
1334 struct eth_rx_sge
*sge
;
1336 sge
= &fp
->rx_sge_ring
[RX_SGE_CNT
* i
- 2];
1338 cpu_to_le32(U64_HI(fp
->rx_sge_mapping
+
1339 BCM_PAGE_SIZE
*(i
% NUM_RX_SGE_PAGES
)));
1342 cpu_to_le32(U64_LO(fp
->rx_sge_mapping
+
1343 BCM_PAGE_SIZE
*(i
% NUM_RX_SGE_PAGES
)));
1347 static void bnx2x_free_tpa_pool(struct bnx2x
*bp
,
1348 struct bnx2x_fastpath
*fp
, int last
)
1352 for (i
= 0; i
< last
; i
++) {
1353 struct bnx2x_agg_info
*tpa_info
= &fp
->tpa_info
[i
];
1354 struct sw_rx_bd
*first_buf
= &tpa_info
->first_buf
;
1355 u8
*data
= first_buf
->data
;
1358 DP(NETIF_MSG_IFDOWN
, "tpa bin %d empty on free\n", i
);
1361 if (tpa_info
->tpa_state
== BNX2X_TPA_START
)
1362 dma_unmap_single(&bp
->pdev
->dev
,
1363 dma_unmap_addr(first_buf
, mapping
),
1364 fp
->rx_buf_size
, DMA_FROM_DEVICE
);
1365 bnx2x_frag_free(fp
, data
);
1366 first_buf
->data
= NULL
;
1370 void bnx2x_init_rx_rings_cnic(struct bnx2x
*bp
)
1374 for_each_rx_queue_cnic(bp
, j
) {
1375 struct bnx2x_fastpath
*fp
= &bp
->fp
[j
];
1379 /* Activate BD ring */
1381 * this will generate an interrupt (to the TSTORM)
1382 * must only be done after chip is initialized
1384 bnx2x_update_rx_prod(bp
, fp
, fp
->rx_bd_prod
, fp
->rx_comp_prod
,
1389 void bnx2x_init_rx_rings(struct bnx2x
*bp
)
1391 int func
= BP_FUNC(bp
);
1395 /* Allocate TPA resources */
1396 for_each_eth_queue(bp
, j
) {
1397 struct bnx2x_fastpath
*fp
= &bp
->fp
[j
];
1400 "mtu %d rx_buf_size %d\n", bp
->dev
->mtu
, fp
->rx_buf_size
);
1402 if (fp
->mode
!= TPA_MODE_DISABLED
) {
1403 /* Fill the per-aggregation pool */
1404 for (i
= 0; i
< MAX_AGG_QS(bp
); i
++) {
1405 struct bnx2x_agg_info
*tpa_info
=
1407 struct sw_rx_bd
*first_buf
=
1408 &tpa_info
->first_buf
;
1411 bnx2x_frag_alloc(fp
, GFP_KERNEL
);
1412 if (!first_buf
->data
) {
1413 BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1415 bnx2x_free_tpa_pool(bp
, fp
, i
);
1416 fp
->mode
= TPA_MODE_DISABLED
;
1419 dma_unmap_addr_set(first_buf
, mapping
, 0);
1420 tpa_info
->tpa_state
= BNX2X_TPA_STOP
;
1423 /* "next page" elements initialization */
1424 bnx2x_set_next_page_sgl(fp
);
1426 /* set SGEs bit mask */
1427 bnx2x_init_sge_ring_bit_mask(fp
);
1429 /* Allocate SGEs and initialize the ring elements */
1430 for (i
= 0, ring_prod
= 0;
1431 i
< MAX_RX_SGE_CNT
*NUM_RX_SGE_PAGES
; i
++) {
1433 if (bnx2x_alloc_rx_sge(bp
, fp
, ring_prod
,
1435 BNX2X_ERR("was only able to allocate %d rx sges\n",
1437 BNX2X_ERR("disabling TPA for queue[%d]\n",
1439 /* Cleanup already allocated elements */
1440 bnx2x_free_rx_sge_range(bp
, fp
,
1442 bnx2x_free_tpa_pool(bp
, fp
,
1444 fp
->mode
= TPA_MODE_DISABLED
;
1448 ring_prod
= NEXT_SGE_IDX(ring_prod
);
1451 fp
->rx_sge_prod
= ring_prod
;
1455 for_each_eth_queue(bp
, j
) {
1456 struct bnx2x_fastpath
*fp
= &bp
->fp
[j
];
1460 /* Activate BD ring */
1462 * this will generate an interrupt (to the TSTORM)
1463 * must only be done after chip is initialized
1465 bnx2x_update_rx_prod(bp
, fp
, fp
->rx_bd_prod
, fp
->rx_comp_prod
,
1471 if (CHIP_IS_E1(bp
)) {
1472 REG_WR(bp
, BAR_USTRORM_INTMEM
+
1473 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func
),
1474 U64_LO(fp
->rx_comp_mapping
));
1475 REG_WR(bp
, BAR_USTRORM_INTMEM
+
1476 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func
) + 4,
1477 U64_HI(fp
->rx_comp_mapping
));
1482 static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath
*fp
)
1485 struct bnx2x
*bp
= fp
->bp
;
1487 for_each_cos_in_tx_queue(fp
, cos
) {
1488 struct bnx2x_fp_txdata
*txdata
= fp
->txdata_ptr
[cos
];
1489 unsigned pkts_compl
= 0, bytes_compl
= 0;
1491 u16 sw_prod
= txdata
->tx_pkt_prod
;
1492 u16 sw_cons
= txdata
->tx_pkt_cons
;
1494 while (sw_cons
!= sw_prod
) {
1495 bnx2x_free_tx_pkt(bp
, txdata
, TX_BD(sw_cons
),
1496 &pkts_compl
, &bytes_compl
);
1500 netdev_tx_reset_queue(
1501 netdev_get_tx_queue(bp
->dev
,
1502 txdata
->txq_index
));
1506 static void bnx2x_free_tx_skbs_cnic(struct bnx2x
*bp
)
1510 for_each_tx_queue_cnic(bp
, i
) {
1511 bnx2x_free_tx_skbs_queue(&bp
->fp
[i
]);
1515 static void bnx2x_free_tx_skbs(struct bnx2x
*bp
)
1519 for_each_eth_queue(bp
, i
) {
1520 bnx2x_free_tx_skbs_queue(&bp
->fp
[i
]);
1524 static void bnx2x_free_rx_bds(struct bnx2x_fastpath
*fp
)
1526 struct bnx2x
*bp
= fp
->bp
;
1529 /* ring wasn't allocated */
1530 if (fp
->rx_buf_ring
== NULL
)
1533 for (i
= 0; i
< NUM_RX_BD
; i
++) {
1534 struct sw_rx_bd
*rx_buf
= &fp
->rx_buf_ring
[i
];
1535 u8
*data
= rx_buf
->data
;
1539 dma_unmap_single(&bp
->pdev
->dev
,
1540 dma_unmap_addr(rx_buf
, mapping
),
1541 fp
->rx_buf_size
, DMA_FROM_DEVICE
);
1543 rx_buf
->data
= NULL
;
1544 bnx2x_frag_free(fp
, data
);
1548 static void bnx2x_free_rx_skbs_cnic(struct bnx2x
*bp
)
1552 for_each_rx_queue_cnic(bp
, j
) {
1553 bnx2x_free_rx_bds(&bp
->fp
[j
]);
1557 static void bnx2x_free_rx_skbs(struct bnx2x
*bp
)
1561 for_each_eth_queue(bp
, j
) {
1562 struct bnx2x_fastpath
*fp
= &bp
->fp
[j
];
1564 bnx2x_free_rx_bds(fp
);
1566 if (fp
->mode
!= TPA_MODE_DISABLED
)
1567 bnx2x_free_tpa_pool(bp
, fp
, MAX_AGG_QS(bp
));
1571 static void bnx2x_free_skbs_cnic(struct bnx2x
*bp
)
1573 bnx2x_free_tx_skbs_cnic(bp
);
1574 bnx2x_free_rx_skbs_cnic(bp
);
1577 void bnx2x_free_skbs(struct bnx2x
*bp
)
1579 bnx2x_free_tx_skbs(bp
);
1580 bnx2x_free_rx_skbs(bp
);
1583 void bnx2x_update_max_mf_config(struct bnx2x
*bp
, u32 value
)
1585 /* load old values */
1586 u32 mf_cfg
= bp
->mf_config
[BP_VN(bp
)];
1588 if (value
!= bnx2x_extract_max_cfg(bp
, mf_cfg
)) {
1589 /* leave all but MAX value */
1590 mf_cfg
&= ~FUNC_MF_CFG_MAX_BW_MASK
;
1592 /* set new MAX value */
1593 mf_cfg
|= (value
<< FUNC_MF_CFG_MAX_BW_SHIFT
)
1594 & FUNC_MF_CFG_MAX_BW_MASK
;
1596 bnx2x_fw_command(bp
, DRV_MSG_CODE_SET_MF_BW
, mf_cfg
);
1601 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1603 * @bp: driver handle
1604 * @nvecs: number of vectors to be released
1606 static void bnx2x_free_msix_irqs(struct bnx2x
*bp
, int nvecs
)
1610 if (nvecs
== offset
)
1613 /* VFs don't have a default SB */
1615 free_irq(bp
->msix_table
[offset
].vector
, bp
->dev
);
1616 DP(NETIF_MSG_IFDOWN
, "released sp irq (%d)\n",
1617 bp
->msix_table
[offset
].vector
);
1621 if (CNIC_SUPPORT(bp
)) {
1622 if (nvecs
== offset
)
1627 for_each_eth_queue(bp
, i
) {
1628 if (nvecs
== offset
)
1630 DP(NETIF_MSG_IFDOWN
, "about to release fp #%d->%d irq\n",
1631 i
, bp
->msix_table
[offset
].vector
);
1633 free_irq(bp
->msix_table
[offset
++].vector
, &bp
->fp
[i
]);
1637 void bnx2x_free_irq(struct bnx2x
*bp
)
1639 if (bp
->flags
& USING_MSIX_FLAG
&&
1640 !(bp
->flags
& USING_SINGLE_MSIX_FLAG
)) {
1641 int nvecs
= BNX2X_NUM_ETH_QUEUES(bp
) + CNIC_SUPPORT(bp
);
1643 /* vfs don't have a default status block */
1647 bnx2x_free_msix_irqs(bp
, nvecs
);
1649 free_irq(bp
->dev
->irq
, bp
->dev
);
1653 int bnx2x_enable_msix(struct bnx2x
*bp
)
1655 int msix_vec
= 0, i
, rc
;
1657 /* VFs don't have a default status block */
1659 bp
->msix_table
[msix_vec
].entry
= msix_vec
;
1660 BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1661 bp
->msix_table
[0].entry
);
1665 /* Cnic requires an msix vector for itself */
1666 if (CNIC_SUPPORT(bp
)) {
1667 bp
->msix_table
[msix_vec
].entry
= msix_vec
;
1668 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1669 msix_vec
, bp
->msix_table
[msix_vec
].entry
);
1673 /* We need separate vectors for ETH queues only (not FCoE) */
1674 for_each_eth_queue(bp
, i
) {
1675 bp
->msix_table
[msix_vec
].entry
= msix_vec
;
1676 BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1677 msix_vec
, msix_vec
, i
);
1681 DP(BNX2X_MSG_SP
, "about to request enable msix with %d vectors\n",
1684 rc
= pci_enable_msix_range(bp
->pdev
, &bp
->msix_table
[0],
1685 BNX2X_MIN_MSIX_VEC_CNT(bp
), msix_vec
);
1687 * reconfigure number of tx/rx queues according to available
1690 if (rc
== -ENOSPC
) {
1691 /* Get by with single vector */
1692 rc
= pci_enable_msix_range(bp
->pdev
, &bp
->msix_table
[0], 1, 1);
1694 BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1699 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1700 bp
->flags
|= USING_SINGLE_MSIX_FLAG
;
1702 BNX2X_DEV_INFO("set number of queues to 1\n");
1703 bp
->num_ethernet_queues
= 1;
1704 bp
->num_queues
= bp
->num_ethernet_queues
+ bp
->num_cnic_queues
;
1705 } else if (rc
< 0) {
1706 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc
);
1708 } else if (rc
< msix_vec
) {
1709 /* how less vectors we will have? */
1710 int diff
= msix_vec
- rc
;
1712 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc
);
1715 * decrease number of queues by number of unallocated entries
1717 bp
->num_ethernet_queues
-= diff
;
1718 bp
->num_queues
= bp
->num_ethernet_queues
+ bp
->num_cnic_queues
;
1720 BNX2X_DEV_INFO("New queue configuration set: %d\n",
1724 bp
->flags
|= USING_MSIX_FLAG
;
1729 /* fall to INTx if not enough memory */
1731 bp
->flags
|= DISABLE_MSI_FLAG
;
1736 static int bnx2x_req_msix_irqs(struct bnx2x
*bp
)
1738 int i
, rc
, offset
= 0;
1740 /* no default status block for vf */
1742 rc
= request_irq(bp
->msix_table
[offset
++].vector
,
1743 bnx2x_msix_sp_int
, 0,
1744 bp
->dev
->name
, bp
->dev
);
1746 BNX2X_ERR("request sp irq failed\n");
1751 if (CNIC_SUPPORT(bp
))
1754 for_each_eth_queue(bp
, i
) {
1755 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
1756 snprintf(fp
->name
, sizeof(fp
->name
), "%s-fp-%d",
1759 rc
= request_irq(bp
->msix_table
[offset
].vector
,
1760 bnx2x_msix_fp_int
, 0, fp
->name
, fp
);
1762 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i
,
1763 bp
->msix_table
[offset
].vector
, rc
);
1764 bnx2x_free_msix_irqs(bp
, offset
);
1771 i
= BNX2X_NUM_ETH_QUEUES(bp
);
1773 offset
= 1 + CNIC_SUPPORT(bp
);
1774 netdev_info(bp
->dev
,
1775 "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
1776 bp
->msix_table
[0].vector
,
1777 0, bp
->msix_table
[offset
].vector
,
1778 i
- 1, bp
->msix_table
[offset
+ i
- 1].vector
);
1780 offset
= CNIC_SUPPORT(bp
);
1781 netdev_info(bp
->dev
,
1782 "using MSI-X IRQs: fp[%d] %d ... fp[%d] %d\n",
1783 0, bp
->msix_table
[offset
].vector
,
1784 i
- 1, bp
->msix_table
[offset
+ i
- 1].vector
);
1789 int bnx2x_enable_msi(struct bnx2x
*bp
)
1793 rc
= pci_enable_msi(bp
->pdev
);
1795 BNX2X_DEV_INFO("MSI is not attainable\n");
1798 bp
->flags
|= USING_MSI_FLAG
;
1803 static int bnx2x_req_irq(struct bnx2x
*bp
)
1805 unsigned long flags
;
1808 if (bp
->flags
& (USING_MSI_FLAG
| USING_MSIX_FLAG
))
1811 flags
= IRQF_SHARED
;
1813 if (bp
->flags
& USING_MSIX_FLAG
)
1814 irq
= bp
->msix_table
[0].vector
;
1816 irq
= bp
->pdev
->irq
;
1818 return request_irq(irq
, bnx2x_interrupt
, flags
, bp
->dev
->name
, bp
->dev
);
1821 static int bnx2x_setup_irqs(struct bnx2x
*bp
)
1824 if (bp
->flags
& USING_MSIX_FLAG
&&
1825 !(bp
->flags
& USING_SINGLE_MSIX_FLAG
)) {
1826 rc
= bnx2x_req_msix_irqs(bp
);
1830 rc
= bnx2x_req_irq(bp
);
1832 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc
);
1835 if (bp
->flags
& USING_MSI_FLAG
) {
1836 bp
->dev
->irq
= bp
->pdev
->irq
;
1837 netdev_info(bp
->dev
, "using MSI IRQ %d\n",
1840 if (bp
->flags
& USING_MSIX_FLAG
) {
1841 bp
->dev
->irq
= bp
->msix_table
[0].vector
;
1842 netdev_info(bp
->dev
, "using MSIX IRQ %d\n",
1850 static void bnx2x_napi_enable_cnic(struct bnx2x
*bp
)
1854 for_each_rx_queue_cnic(bp
, i
) {
1855 napi_enable(&bnx2x_fp(bp
, i
, napi
));
1859 static void bnx2x_napi_enable(struct bnx2x
*bp
)
1863 for_each_eth_queue(bp
, i
) {
1864 napi_enable(&bnx2x_fp(bp
, i
, napi
));
1868 static void bnx2x_napi_disable_cnic(struct bnx2x
*bp
)
1872 for_each_rx_queue_cnic(bp
, i
) {
1873 napi_disable(&bnx2x_fp(bp
, i
, napi
));
1877 static void bnx2x_napi_disable(struct bnx2x
*bp
)
1881 for_each_eth_queue(bp
, i
) {
1882 napi_disable(&bnx2x_fp(bp
, i
, napi
));
1886 void bnx2x_netif_start(struct bnx2x
*bp
)
1888 if (netif_running(bp
->dev
)) {
1889 bnx2x_napi_enable(bp
);
1890 if (CNIC_LOADED(bp
))
1891 bnx2x_napi_enable_cnic(bp
);
1892 bnx2x_int_enable(bp
);
1893 if (bp
->state
== BNX2X_STATE_OPEN
)
1894 netif_tx_wake_all_queues(bp
->dev
);
1898 void bnx2x_netif_stop(struct bnx2x
*bp
, int disable_hw
)
1900 bnx2x_int_disable_sync(bp
, disable_hw
);
1901 bnx2x_napi_disable(bp
);
1902 if (CNIC_LOADED(bp
))
1903 bnx2x_napi_disable_cnic(bp
);
1906 u16
bnx2x_select_queue(struct net_device
*dev
, struct sk_buff
*skb
,
1907 void *accel_priv
, select_queue_fallback_t fallback
)
1909 struct bnx2x
*bp
= netdev_priv(dev
);
1911 if (CNIC_LOADED(bp
) && !NO_FCOE(bp
)) {
1912 struct ethhdr
*hdr
= (struct ethhdr
*)skb
->data
;
1913 u16 ether_type
= ntohs(hdr
->h_proto
);
1915 /* Skip VLAN tag if present */
1916 if (ether_type
== ETH_P_8021Q
) {
1917 struct vlan_ethhdr
*vhdr
=
1918 (struct vlan_ethhdr
*)skb
->data
;
1920 ether_type
= ntohs(vhdr
->h_vlan_encapsulated_proto
);
1923 /* If ethertype is FCoE or FIP - use FCoE ring */
1924 if ((ether_type
== ETH_P_FCOE
) || (ether_type
== ETH_P_FIP
))
1925 return bnx2x_fcoe_tx(bp
, txq_index
);
1928 /* select a non-FCoE queue */
1929 return fallback(dev
, skb
) % (BNX2X_NUM_ETH_QUEUES(bp
) * bp
->max_cos
);
1932 void bnx2x_set_num_queues(struct bnx2x
*bp
)
1935 bp
->num_ethernet_queues
= bnx2x_calc_num_queues(bp
);
1937 /* override in STORAGE SD modes */
1938 if (IS_MF_STORAGE_ONLY(bp
))
1939 bp
->num_ethernet_queues
= 1;
1941 /* Add special queues */
1942 bp
->num_cnic_queues
= CNIC_SUPPORT(bp
); /* For FCOE */
1943 bp
->num_queues
= bp
->num_ethernet_queues
+ bp
->num_cnic_queues
;
1945 BNX2X_DEV_INFO("set number of queues to %d\n", bp
->num_queues
);
1949 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1951 * @bp: Driver handle
1953 * We currently support for at most 16 Tx queues for each CoS thus we will
1954 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1957 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1958 * index after all ETH L2 indices.
1960 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1961 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
1962 * 16..31,...) with indices that are not coupled with any real Tx queue.
1964 * The proper configuration of skb->queue_mapping is handled by
1965 * bnx2x_select_queue() and __skb_tx_hash().
1967 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1968 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1970 static int bnx2x_set_real_num_queues(struct bnx2x
*bp
, int include_cnic
)
1974 tx
= BNX2X_NUM_ETH_QUEUES(bp
) * bp
->max_cos
;
1975 rx
= BNX2X_NUM_ETH_QUEUES(bp
);
1977 /* account for fcoe queue */
1978 if (include_cnic
&& !NO_FCOE(bp
)) {
1983 rc
= netif_set_real_num_tx_queues(bp
->dev
, tx
);
1985 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc
);
1988 rc
= netif_set_real_num_rx_queues(bp
->dev
, rx
);
1990 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc
);
1994 DP(NETIF_MSG_IFUP
, "Setting real num queues to (tx, rx) (%d, %d)\n",
2000 static void bnx2x_set_rx_buf_size(struct bnx2x
*bp
)
2004 for_each_queue(bp
, i
) {
2005 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
2008 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
2011 * Although there are no IP frames expected to arrive to
2012 * this ring we still want to add an
2013 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
2016 mtu
= BNX2X_FCOE_MINI_JUMBO_MTU
;
2019 fp
->rx_buf_size
= BNX2X_FW_RX_ALIGN_START
+
2020 IP_HEADER_ALIGNMENT_PADDING
+
2023 BNX2X_FW_RX_ALIGN_END
;
2024 fp
->rx_buf_size
= SKB_DATA_ALIGN(fp
->rx_buf_size
);
2025 /* Note : rx_buf_size doesn't take into account NET_SKB_PAD */
2026 if (fp
->rx_buf_size
+ NET_SKB_PAD
<= PAGE_SIZE
)
2027 fp
->rx_frag_size
= fp
->rx_buf_size
+ NET_SKB_PAD
;
2029 fp
->rx_frag_size
= 0;
2033 static int bnx2x_init_rss(struct bnx2x
*bp
)
2036 u8 num_eth_queues
= BNX2X_NUM_ETH_QUEUES(bp
);
2038 /* Prepare the initial contents for the indirection table if RSS is
2041 for (i
= 0; i
< sizeof(bp
->rss_conf_obj
.ind_table
); i
++)
2042 bp
->rss_conf_obj
.ind_table
[i
] =
2044 ethtool_rxfh_indir_default(i
, num_eth_queues
);
2047 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
2048 * per-port, so if explicit configuration is needed , do it only
2051 * For 57712 and newer on the other hand it's a per-function
2054 return bnx2x_config_rss_eth(bp
, bp
->port
.pmf
|| !CHIP_IS_E1x(bp
));
2057 int bnx2x_rss(struct bnx2x
*bp
, struct bnx2x_rss_config_obj
*rss_obj
,
2058 bool config_hash
, bool enable
)
2060 struct bnx2x_config_rss_params params
= {NULL
};
2062 /* Although RSS is meaningless when there is a single HW queue we
2063 * still need it enabled in order to have HW Rx hash generated.
2065 * if (!is_eth_multi(bp))
2066 * bp->multi_mode = ETH_RSS_MODE_DISABLED;
2069 params
.rss_obj
= rss_obj
;
2071 __set_bit(RAMROD_COMP_WAIT
, ¶ms
.ramrod_flags
);
2074 __set_bit(BNX2X_RSS_MODE_REGULAR
, ¶ms
.rss_flags
);
2076 /* RSS configuration */
2077 __set_bit(BNX2X_RSS_IPV4
, ¶ms
.rss_flags
);
2078 __set_bit(BNX2X_RSS_IPV4_TCP
, ¶ms
.rss_flags
);
2079 __set_bit(BNX2X_RSS_IPV6
, ¶ms
.rss_flags
);
2080 __set_bit(BNX2X_RSS_IPV6_TCP
, ¶ms
.rss_flags
);
2081 if (rss_obj
->udp_rss_v4
)
2082 __set_bit(BNX2X_RSS_IPV4_UDP
, ¶ms
.rss_flags
);
2083 if (rss_obj
->udp_rss_v6
)
2084 __set_bit(BNX2X_RSS_IPV6_UDP
, ¶ms
.rss_flags
);
2086 if (!CHIP_IS_E1x(bp
)) {
2087 /* valid only for TUNN_MODE_VXLAN tunnel mode */
2088 __set_bit(BNX2X_RSS_IPV4_VXLAN
, ¶ms
.rss_flags
);
2089 __set_bit(BNX2X_RSS_IPV6_VXLAN
, ¶ms
.rss_flags
);
2091 /* valid only for TUNN_MODE_GRE tunnel mode */
2092 __set_bit(BNX2X_RSS_TUNN_INNER_HDRS
, ¶ms
.rss_flags
);
2095 __set_bit(BNX2X_RSS_MODE_DISABLED
, ¶ms
.rss_flags
);
2099 params
.rss_result_mask
= MULTI_MASK
;
2101 memcpy(params
.ind_table
, rss_obj
->ind_table
, sizeof(params
.ind_table
));
2105 netdev_rss_key_fill(params
.rss_key
, T_ETH_RSS_KEY
* 4);
2106 __set_bit(BNX2X_RSS_SET_SRCH
, ¶ms
.rss_flags
);
2110 return bnx2x_config_rss(bp
, ¶ms
);
2112 return bnx2x_vfpf_config_rss(bp
, ¶ms
);
2115 static int bnx2x_init_hw(struct bnx2x
*bp
, u32 load_code
)
2117 struct bnx2x_func_state_params func_params
= {NULL
};
2119 /* Prepare parameters for function state transitions */
2120 __set_bit(RAMROD_COMP_WAIT
, &func_params
.ramrod_flags
);
2122 func_params
.f_obj
= &bp
->func_obj
;
2123 func_params
.cmd
= BNX2X_F_CMD_HW_INIT
;
2125 func_params
.params
.hw_init
.load_phase
= load_code
;
2127 return bnx2x_func_state_change(bp
, &func_params
);
2131 * Cleans the object that have internal lists without sending
2132 * ramrods. Should be run when interrupts are disabled.
2134 void bnx2x_squeeze_objects(struct bnx2x
*bp
)
2137 unsigned long ramrod_flags
= 0, vlan_mac_flags
= 0;
2138 struct bnx2x_mcast_ramrod_params rparam
= {NULL
};
2139 struct bnx2x_vlan_mac_obj
*mac_obj
= &bp
->sp_objs
->mac_obj
;
2141 /***************** Cleanup MACs' object first *************************/
2143 /* Wait for completion of requested */
2144 __set_bit(RAMROD_COMP_WAIT
, &ramrod_flags
);
2145 /* Perform a dry cleanup */
2146 __set_bit(RAMROD_DRV_CLR_ONLY
, &ramrod_flags
);
2148 /* Clean ETH primary MAC */
2149 __set_bit(BNX2X_ETH_MAC
, &vlan_mac_flags
);
2150 rc
= mac_obj
->delete_all(bp
, &bp
->sp_objs
->mac_obj
, &vlan_mac_flags
,
2153 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc
);
2155 /* Cleanup UC list */
2157 __set_bit(BNX2X_UC_LIST_MAC
, &vlan_mac_flags
);
2158 rc
= mac_obj
->delete_all(bp
, mac_obj
, &vlan_mac_flags
,
2161 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc
);
2163 /***************** Now clean mcast object *****************************/
2164 rparam
.mcast_obj
= &bp
->mcast_obj
;
2165 __set_bit(RAMROD_DRV_CLR_ONLY
, &rparam
.ramrod_flags
);
2167 /* Add a DEL command... - Since we're doing a driver cleanup only,
2168 * we take a lock surrounding both the initial send and the CONTs,
2169 * as we don't want a true completion to disrupt us in the middle.
2171 netif_addr_lock_bh(bp
->dev
);
2172 rc
= bnx2x_config_mcast(bp
, &rparam
, BNX2X_MCAST_CMD_DEL
);
2174 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
2177 /* ...and wait until all pending commands are cleared */
2178 rc
= bnx2x_config_mcast(bp
, &rparam
, BNX2X_MCAST_CMD_CONT
);
2181 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
2183 netif_addr_unlock_bh(bp
->dev
);
2187 rc
= bnx2x_config_mcast(bp
, &rparam
, BNX2X_MCAST_CMD_CONT
);
2189 netif_addr_unlock_bh(bp
->dev
);
2192 #ifndef BNX2X_STOP_ON_ERROR
2193 #define LOAD_ERROR_EXIT(bp, label) \
2195 (bp)->state = BNX2X_STATE_ERROR; \
2199 #define LOAD_ERROR_EXIT_CNIC(bp, label) \
2201 bp->cnic_loaded = false; \
2204 #else /*BNX2X_STOP_ON_ERROR*/
2205 #define LOAD_ERROR_EXIT(bp, label) \
2207 (bp)->state = BNX2X_STATE_ERROR; \
2211 #define LOAD_ERROR_EXIT_CNIC(bp, label) \
2213 bp->cnic_loaded = false; \
2217 #endif /*BNX2X_STOP_ON_ERROR*/
2219 static void bnx2x_free_fw_stats_mem(struct bnx2x
*bp
)
2221 BNX2X_PCI_FREE(bp
->fw_stats
, bp
->fw_stats_mapping
,
2222 bp
->fw_stats_data_sz
+ bp
->fw_stats_req_sz
);
2226 static int bnx2x_alloc_fw_stats_mem(struct bnx2x
*bp
)
2228 int num_groups
, vf_headroom
= 0;
2229 int is_fcoe_stats
= NO_FCOE(bp
) ? 0 : 1;
2231 /* number of queues for statistics is number of eth queues + FCoE */
2232 u8 num_queue_stats
= BNX2X_NUM_ETH_QUEUES(bp
) + is_fcoe_stats
;
2234 /* Total number of FW statistics requests =
2235 * 1 for port stats + 1 for PF stats + potential 2 for FCoE (fcoe proper
2236 * and fcoe l2 queue) stats + num of queues (which includes another 1
2237 * for fcoe l2 queue if applicable)
2239 bp
->fw_stats_num
= 2 + is_fcoe_stats
+ num_queue_stats
;
2241 /* vf stats appear in the request list, but their data is allocated by
2242 * the VFs themselves. We don't include them in the bp->fw_stats_num as
2243 * it is used to determine where to place the vf stats queries in the
2247 vf_headroom
= bnx2x_vf_headroom(bp
);
2249 /* Request is built from stats_query_header and an array of
2250 * stats_query_cmd_group each of which contains
2251 * STATS_QUERY_CMD_COUNT rules. The real number or requests is
2252 * configured in the stats_query_header.
2255 (((bp
->fw_stats_num
+ vf_headroom
) / STATS_QUERY_CMD_COUNT
) +
2256 (((bp
->fw_stats_num
+ vf_headroom
) % STATS_QUERY_CMD_COUNT
) ?
2259 DP(BNX2X_MSG_SP
, "stats fw_stats_num %d, vf headroom %d, num_groups %d\n",
2260 bp
->fw_stats_num
, vf_headroom
, num_groups
);
2261 bp
->fw_stats_req_sz
= sizeof(struct stats_query_header
) +
2262 num_groups
* sizeof(struct stats_query_cmd_group
);
2264 /* Data for statistics requests + stats_counter
2265 * stats_counter holds per-STORM counters that are incremented
2266 * when STORM has finished with the current request.
2267 * memory for FCoE offloaded statistics are counted anyway,
2268 * even if they will not be sent.
2269 * VF stats are not accounted for here as the data of VF stats is stored
2270 * in memory allocated by the VF, not here.
2272 bp
->fw_stats_data_sz
= sizeof(struct per_port_stats
) +
2273 sizeof(struct per_pf_stats
) +
2274 sizeof(struct fcoe_statistics_params
) +
2275 sizeof(struct per_queue_stats
) * num_queue_stats
+
2276 sizeof(struct stats_counter
);
2278 bp
->fw_stats
= BNX2X_PCI_ALLOC(&bp
->fw_stats_mapping
,
2279 bp
->fw_stats_data_sz
+ bp
->fw_stats_req_sz
);
2284 bp
->fw_stats_req
= (struct bnx2x_fw_stats_req
*)bp
->fw_stats
;
2285 bp
->fw_stats_req_mapping
= bp
->fw_stats_mapping
;
2286 bp
->fw_stats_data
= (struct bnx2x_fw_stats_data
*)
2287 ((u8
*)bp
->fw_stats
+ bp
->fw_stats_req_sz
);
2288 bp
->fw_stats_data_mapping
= bp
->fw_stats_mapping
+
2289 bp
->fw_stats_req_sz
;
2291 DP(BNX2X_MSG_SP
, "statistics request base address set to %x %x\n",
2292 U64_HI(bp
->fw_stats_req_mapping
),
2293 U64_LO(bp
->fw_stats_req_mapping
));
2294 DP(BNX2X_MSG_SP
, "statistics data base address set to %x %x\n",
2295 U64_HI(bp
->fw_stats_data_mapping
),
2296 U64_LO(bp
->fw_stats_data_mapping
));
2300 bnx2x_free_fw_stats_mem(bp
);
2301 BNX2X_ERR("Can't allocate FW stats memory\n");
2305 /* send load request to mcp and analyze response */
2306 static int bnx2x_nic_load_request(struct bnx2x
*bp
, u32
*load_code
)
2312 (SHMEM_RD(bp
, func_mb
[BP_FW_MB_IDX(bp
)].drv_mb_header
) &
2313 DRV_MSG_SEQ_NUMBER_MASK
);
2314 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp
->fw_seq
);
2316 /* Get current FW pulse sequence */
2317 bp
->fw_drv_pulse_wr_seq
=
2318 (SHMEM_RD(bp
, func_mb
[BP_FW_MB_IDX(bp
)].drv_pulse_mb
) &
2319 DRV_PULSE_SEQ_MASK
);
2320 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp
->fw_drv_pulse_wr_seq
);
2322 param
= DRV_MSG_CODE_LOAD_REQ_WITH_LFA
;
2324 if (IS_MF_SD(bp
) && bnx2x_port_after_undi(bp
))
2325 param
|= DRV_MSG_CODE_LOAD_REQ_FORCE_LFA
;
2328 (*load_code
) = bnx2x_fw_command(bp
, DRV_MSG_CODE_LOAD_REQ
, param
);
2330 /* if mcp fails to respond we must abort */
2331 if (!(*load_code
)) {
2332 BNX2X_ERR("MCP response failure, aborting\n");
2336 /* If mcp refused (e.g. other port is in diagnostic mode) we
2339 if ((*load_code
) == FW_MSG_CODE_DRV_LOAD_REFUSED
) {
2340 BNX2X_ERR("MCP refused load request, aborting\n");
2346 /* check whether another PF has already loaded FW to chip. In
2347 * virtualized environments a pf from another VM may have already
2348 * initialized the device including loading FW
2350 int bnx2x_compare_fw_ver(struct bnx2x
*bp
, u32 load_code
, bool print_err
)
2352 /* is another pf loaded on this engine? */
2353 if (load_code
!= FW_MSG_CODE_DRV_LOAD_COMMON_CHIP
&&
2354 load_code
!= FW_MSG_CODE_DRV_LOAD_COMMON
) {
2355 /* build my FW version dword */
2356 u32 my_fw
= (BCM_5710_FW_MAJOR_VERSION
) +
2357 (BCM_5710_FW_MINOR_VERSION
<< 8) +
2358 (BCM_5710_FW_REVISION_VERSION
<< 16) +
2359 (BCM_5710_FW_ENGINEERING_VERSION
<< 24);
2361 /* read loaded FW from chip */
2362 u32 loaded_fw
= REG_RD(bp
, XSEM_REG_PRAM
);
2364 DP(BNX2X_MSG_SP
, "loaded fw %x, my fw %x\n",
2367 /* abort nic load if version mismatch */
2368 if (my_fw
!= loaded_fw
) {
2370 BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n",
2373 BNX2X_DEV_INFO("bnx2x with FW %x was already loaded which mismatches my %x FW, possibly due to MF UNDI\n",
2381 /* returns the "mcp load_code" according to global load_count array */
2382 static int bnx2x_nic_load_no_mcp(struct bnx2x
*bp
, int port
)
2384 int path
= BP_PATH(bp
);
2386 DP(NETIF_MSG_IFUP
, "NO MCP - load counts[%d] %d, %d, %d\n",
2387 path
, bnx2x_load_count
[path
][0], bnx2x_load_count
[path
][1],
2388 bnx2x_load_count
[path
][2]);
2389 bnx2x_load_count
[path
][0]++;
2390 bnx2x_load_count
[path
][1 + port
]++;
2391 DP(NETIF_MSG_IFUP
, "NO MCP - new load counts[%d] %d, %d, %d\n",
2392 path
, bnx2x_load_count
[path
][0], bnx2x_load_count
[path
][1],
2393 bnx2x_load_count
[path
][2]);
2394 if (bnx2x_load_count
[path
][0] == 1)
2395 return FW_MSG_CODE_DRV_LOAD_COMMON
;
2396 else if (bnx2x_load_count
[path
][1 + port
] == 1)
2397 return FW_MSG_CODE_DRV_LOAD_PORT
;
2399 return FW_MSG_CODE_DRV_LOAD_FUNCTION
;
2402 /* mark PMF if applicable */
2403 static void bnx2x_nic_load_pmf(struct bnx2x
*bp
, u32 load_code
)
2405 if ((load_code
== FW_MSG_CODE_DRV_LOAD_COMMON
) ||
2406 (load_code
== FW_MSG_CODE_DRV_LOAD_COMMON_CHIP
) ||
2407 (load_code
== FW_MSG_CODE_DRV_LOAD_PORT
)) {
2409 /* We need the barrier to ensure the ordering between the
2410 * writing to bp->port.pmf here and reading it from the
2411 * bnx2x_periodic_task().
2418 DP(NETIF_MSG_LINK
, "pmf %d\n", bp
->port
.pmf
);
2421 static void bnx2x_nic_load_afex_dcc(struct bnx2x
*bp
, int load_code
)
2423 if (((load_code
== FW_MSG_CODE_DRV_LOAD_COMMON
) ||
2424 (load_code
== FW_MSG_CODE_DRV_LOAD_COMMON_CHIP
)) &&
2425 (bp
->common
.shmem2_base
)) {
2426 if (SHMEM2_HAS(bp
, dcc_support
))
2427 SHMEM2_WR(bp
, dcc_support
,
2428 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV
|
2429 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV
));
2430 if (SHMEM2_HAS(bp
, afex_driver_support
))
2431 SHMEM2_WR(bp
, afex_driver_support
,
2432 SHMEM_AFEX_SUPPORTED_VERSION_ONE
);
2435 /* Set AFEX default VLAN tag to an invalid value */
2436 bp
->afex_def_vlan_tag
= -1;
2440 * bnx2x_bz_fp - zero content of the fastpath structure.
2442 * @bp: driver handle
2443 * @index: fastpath index to be zeroed
2445 * Makes sure the contents of the bp->fp[index].napi is kept
2448 static void bnx2x_bz_fp(struct bnx2x
*bp
, int index
)
2450 struct bnx2x_fastpath
*fp
= &bp
->fp
[index
];
2452 struct napi_struct orig_napi
= fp
->napi
;
2453 struct bnx2x_agg_info
*orig_tpa_info
= fp
->tpa_info
;
2455 /* bzero bnx2x_fastpath contents */
2457 memset(fp
->tpa_info
, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2
*
2458 sizeof(struct bnx2x_agg_info
));
2459 memset(fp
, 0, sizeof(*fp
));
2461 /* Restore the NAPI object as it has been already initialized */
2462 fp
->napi
= orig_napi
;
2463 fp
->tpa_info
= orig_tpa_info
;
2467 fp
->max_cos
= bp
->max_cos
;
2469 /* Special queues support only one CoS */
2472 /* Init txdata pointers */
2474 fp
->txdata_ptr
[0] = &bp
->bnx2x_txq
[FCOE_TXQ_IDX(bp
)];
2476 for_each_cos_in_tx_queue(fp
, cos
)
2477 fp
->txdata_ptr
[cos
] = &bp
->bnx2x_txq
[cos
*
2478 BNX2X_NUM_ETH_QUEUES(bp
) + index
];
2480 /* set the tpa flag for each queue. The tpa flag determines the queue
2481 * minimal size so it must be set prior to queue memory allocation
2483 if (bp
->dev
->features
& NETIF_F_LRO
)
2484 fp
->mode
= TPA_MODE_LRO
;
2485 else if (bp
->dev
->features
& NETIF_F_GRO
&&
2486 bnx2x_mtu_allows_gro(bp
->dev
->mtu
))
2487 fp
->mode
= TPA_MODE_GRO
;
2489 fp
->mode
= TPA_MODE_DISABLED
;
2491 /* We don't want TPA if it's disabled in bp
2492 * or if this is an FCoE L2 ring.
2494 if (bp
->disable_tpa
|| IS_FCOE_FP(fp
))
2495 fp
->mode
= TPA_MODE_DISABLED
;
2498 void bnx2x_set_os_driver_state(struct bnx2x
*bp
, u32 state
)
2502 if (!IS_MF_BD(bp
) || !SHMEM2_HAS(bp
, os_driver_state
) || IS_VF(bp
))
2505 cur
= SHMEM2_RD(bp
, os_driver_state
[BP_FW_MB_IDX(bp
)]);
2506 DP(NETIF_MSG_IFUP
, "Driver state %08x-->%08x\n",
2509 SHMEM2_WR(bp
, os_driver_state
[BP_FW_MB_IDX(bp
)], state
);
2512 int bnx2x_load_cnic(struct bnx2x
*bp
)
2514 int i
, rc
, port
= BP_PORT(bp
);
2516 DP(NETIF_MSG_IFUP
, "Starting CNIC-related load\n");
2518 mutex_init(&bp
->cnic_mutex
);
2521 rc
= bnx2x_alloc_mem_cnic(bp
);
2523 BNX2X_ERR("Unable to allocate bp memory for cnic\n");
2524 LOAD_ERROR_EXIT_CNIC(bp
, load_error_cnic0
);
2528 rc
= bnx2x_alloc_fp_mem_cnic(bp
);
2530 BNX2X_ERR("Unable to allocate memory for cnic fps\n");
2531 LOAD_ERROR_EXIT_CNIC(bp
, load_error_cnic0
);
2534 /* Update the number of queues with the cnic queues */
2535 rc
= bnx2x_set_real_num_queues(bp
, 1);
2537 BNX2X_ERR("Unable to set real_num_queues including cnic\n");
2538 LOAD_ERROR_EXIT_CNIC(bp
, load_error_cnic0
);
2541 /* Add all CNIC NAPI objects */
2542 bnx2x_add_all_napi_cnic(bp
);
2543 DP(NETIF_MSG_IFUP
, "cnic napi added\n");
2544 bnx2x_napi_enable_cnic(bp
);
2546 rc
= bnx2x_init_hw_func_cnic(bp
);
2548 LOAD_ERROR_EXIT_CNIC(bp
, load_error_cnic1
);
2550 bnx2x_nic_init_cnic(bp
);
2553 /* Enable Timer scan */
2554 REG_WR(bp
, TM_REG_EN_LINEAR0_TIMER
+ port
*4, 1);
2556 /* setup cnic queues */
2557 for_each_cnic_queue(bp
, i
) {
2558 rc
= bnx2x_setup_queue(bp
, &bp
->fp
[i
], 0);
2560 BNX2X_ERR("Queue setup failed\n");
2561 LOAD_ERROR_EXIT(bp
, load_error_cnic2
);
2566 /* Initialize Rx filter. */
2567 bnx2x_set_rx_mode_inner(bp
);
2569 /* re-read iscsi info */
2570 bnx2x_get_iscsi_info(bp
);
2571 bnx2x_setup_cnic_irq_info(bp
);
2572 bnx2x_setup_cnic_info(bp
);
2573 bp
->cnic_loaded
= true;
2574 if (bp
->state
== BNX2X_STATE_OPEN
)
2575 bnx2x_cnic_notify(bp
, CNIC_CTL_START_CMD
);
2577 DP(NETIF_MSG_IFUP
, "Ending successfully CNIC-related load\n");
2581 #ifndef BNX2X_STOP_ON_ERROR
2583 /* Disable Timer scan */
2584 REG_WR(bp
, TM_REG_EN_LINEAR0_TIMER
+ port
*4, 0);
2587 bnx2x_napi_disable_cnic(bp
);
2588 /* Update the number of queues without the cnic queues */
2589 if (bnx2x_set_real_num_queues(bp
, 0))
2590 BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2592 BNX2X_ERR("CNIC-related load failed\n");
2593 bnx2x_free_fp_mem_cnic(bp
);
2594 bnx2x_free_mem_cnic(bp
);
2596 #endif /* ! BNX2X_STOP_ON_ERROR */
2599 /* must be called with rtnl_lock */
2600 int bnx2x_nic_load(struct bnx2x
*bp
, int load_mode
)
2602 int port
= BP_PORT(bp
);
2603 int i
, rc
= 0, load_code
= 0;
2605 DP(NETIF_MSG_IFUP
, "Starting NIC load\n");
2607 "CNIC is %s\n", CNIC_ENABLED(bp
) ? "enabled" : "disabled");
2609 #ifdef BNX2X_STOP_ON_ERROR
2610 if (unlikely(bp
->panic
)) {
2611 BNX2X_ERR("Can't load NIC when there is panic\n");
2616 bp
->state
= BNX2X_STATE_OPENING_WAIT4_LOAD
;
2618 /* zero the structure w/o any lock, before SP handler is initialized */
2619 memset(&bp
->last_reported_link
, 0, sizeof(bp
->last_reported_link
));
2620 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN
,
2621 &bp
->last_reported_link
.link_report_flags
);
2624 /* must be called before memory allocation and HW init */
2625 bnx2x_ilt_set_info(bp
);
2628 * Zero fastpath structures preserving invariants like napi, which are
2629 * allocated only once, fp index, max_cos, bp pointer.
2630 * Also set fp->mode and txdata_ptr.
2632 DP(NETIF_MSG_IFUP
, "num queues: %d", bp
->num_queues
);
2633 for_each_queue(bp
, i
)
2635 memset(bp
->bnx2x_txq
, 0, (BNX2X_MAX_RSS_COUNT(bp
) * BNX2X_MULTI_TX_COS
+
2636 bp
->num_cnic_queues
) *
2637 sizeof(struct bnx2x_fp_txdata
));
2639 bp
->fcoe_init
= false;
2641 /* Set the receive queues buffer size */
2642 bnx2x_set_rx_buf_size(bp
);
2645 rc
= bnx2x_alloc_mem(bp
);
2647 BNX2X_ERR("Unable to allocate bp memory\n");
2652 /* need to be done after alloc mem, since it's self adjusting to amount
2653 * of memory available for RSS queues
2655 rc
= bnx2x_alloc_fp_mem(bp
);
2657 BNX2X_ERR("Unable to allocate memory for fps\n");
2658 LOAD_ERROR_EXIT(bp
, load_error0
);
2661 /* Allocated memory for FW statistics */
2662 if (bnx2x_alloc_fw_stats_mem(bp
))
2663 LOAD_ERROR_EXIT(bp
, load_error0
);
2665 /* request pf to initialize status blocks */
2667 rc
= bnx2x_vfpf_init(bp
);
2669 LOAD_ERROR_EXIT(bp
, load_error0
);
2672 /* As long as bnx2x_alloc_mem() may possibly update
2673 * bp->num_queues, bnx2x_set_real_num_queues() should always
2674 * come after it. At this stage cnic queues are not counted.
2676 rc
= bnx2x_set_real_num_queues(bp
, 0);
2678 BNX2X_ERR("Unable to set real_num_queues\n");
2679 LOAD_ERROR_EXIT(bp
, load_error0
);
2682 /* configure multi cos mappings in kernel.
2683 * this configuration may be overridden by a multi class queue
2684 * discipline or by a dcbx negotiation result.
2686 bnx2x_setup_tc(bp
->dev
, bp
->max_cos
);
2688 /* Add all NAPI objects */
2689 bnx2x_add_all_napi(bp
);
2690 DP(NETIF_MSG_IFUP
, "napi added\n");
2691 bnx2x_napi_enable(bp
);
2694 /* set pf load just before approaching the MCP */
2695 bnx2x_set_pf_load(bp
);
2697 /* if mcp exists send load request and analyze response */
2698 if (!BP_NOMCP(bp
)) {
2699 /* attempt to load pf */
2700 rc
= bnx2x_nic_load_request(bp
, &load_code
);
2702 LOAD_ERROR_EXIT(bp
, load_error1
);
2704 /* what did mcp say? */
2705 rc
= bnx2x_compare_fw_ver(bp
, load_code
, true);
2707 bnx2x_fw_command(bp
, DRV_MSG_CODE_LOAD_DONE
, 0);
2708 LOAD_ERROR_EXIT(bp
, load_error2
);
2711 load_code
= bnx2x_nic_load_no_mcp(bp
, port
);
2714 /* mark pmf if applicable */
2715 bnx2x_nic_load_pmf(bp
, load_code
);
2717 /* Init Function state controlling object */
2718 bnx2x__init_func_obj(bp
);
2721 rc
= bnx2x_init_hw(bp
, load_code
);
2723 BNX2X_ERR("HW init failed, aborting\n");
2724 bnx2x_fw_command(bp
, DRV_MSG_CODE_LOAD_DONE
, 0);
2725 LOAD_ERROR_EXIT(bp
, load_error2
);
2729 bnx2x_pre_irq_nic_init(bp
);
2731 /* Connect to IRQs */
2732 rc
= bnx2x_setup_irqs(bp
);
2734 BNX2X_ERR("setup irqs failed\n");
2736 bnx2x_fw_command(bp
, DRV_MSG_CODE_LOAD_DONE
, 0);
2737 LOAD_ERROR_EXIT(bp
, load_error2
);
2740 /* Init per-function objects */
2742 /* Setup NIC internals and enable interrupts */
2743 bnx2x_post_irq_nic_init(bp
, load_code
);
2745 bnx2x_init_bp_objs(bp
);
2746 bnx2x_iov_nic_init(bp
);
2748 /* Set AFEX default VLAN tag to an invalid value */
2749 bp
->afex_def_vlan_tag
= -1;
2750 bnx2x_nic_load_afex_dcc(bp
, load_code
);
2751 bp
->state
= BNX2X_STATE_OPENING_WAIT4_PORT
;
2752 rc
= bnx2x_func_start(bp
);
2754 BNX2X_ERR("Function start failed!\n");
2755 bnx2x_fw_command(bp
, DRV_MSG_CODE_LOAD_DONE
, 0);
2757 LOAD_ERROR_EXIT(bp
, load_error3
);
2760 /* Send LOAD_DONE command to MCP */
2761 if (!BP_NOMCP(bp
)) {
2762 load_code
= bnx2x_fw_command(bp
,
2763 DRV_MSG_CODE_LOAD_DONE
, 0);
2765 BNX2X_ERR("MCP response failure, aborting\n");
2767 LOAD_ERROR_EXIT(bp
, load_error3
);
2771 /* initialize FW coalescing state machines in RAM */
2772 bnx2x_update_coalesce(bp
);
2775 /* setup the leading queue */
2776 rc
= bnx2x_setup_leading(bp
);
2778 BNX2X_ERR("Setup leading failed!\n");
2779 LOAD_ERROR_EXIT(bp
, load_error3
);
2782 /* set up the rest of the queues */
2783 for_each_nondefault_eth_queue(bp
, i
) {
2785 rc
= bnx2x_setup_queue(bp
, &bp
->fp
[i
], false);
2787 rc
= bnx2x_vfpf_setup_q(bp
, &bp
->fp
[i
], false);
2789 BNX2X_ERR("Queue %d setup failed\n", i
);
2790 LOAD_ERROR_EXIT(bp
, load_error3
);
2795 rc
= bnx2x_init_rss(bp
);
2797 BNX2X_ERR("PF RSS init failed\n");
2798 LOAD_ERROR_EXIT(bp
, load_error3
);
2801 /* Now when Clients are configured we are ready to work */
2802 bp
->state
= BNX2X_STATE_OPEN
;
2804 /* Configure a ucast MAC */
2806 rc
= bnx2x_set_eth_mac(bp
, true);
2808 rc
= bnx2x_vfpf_config_mac(bp
, bp
->dev
->dev_addr
, bp
->fp
->index
,
2811 BNX2X_ERR("Setting Ethernet MAC failed\n");
2812 LOAD_ERROR_EXIT(bp
, load_error3
);
2815 if (IS_PF(bp
) && bp
->pending_max
) {
2816 bnx2x_update_max_mf_config(bp
, bp
->pending_max
);
2817 bp
->pending_max
= 0;
2821 rc
= bnx2x_initial_phy_init(bp
, load_mode
);
2823 LOAD_ERROR_EXIT(bp
, load_error3
);
2825 bp
->link_params
.feature_config_flags
&= ~FEATURE_CONFIG_BOOT_FROM_SAN
;
2827 /* Start fast path */
2829 /* Re-configure vlan filters */
2830 rc
= bnx2x_vlan_reconfigure_vid(bp
);
2832 LOAD_ERROR_EXIT(bp
, load_error3
);
2834 /* Initialize Rx filter. */
2835 bnx2x_set_rx_mode_inner(bp
);
2837 if (bp
->flags
& PTP_SUPPORTED
) {
2839 bnx2x_configure_ptp_filters(bp
);
2842 switch (load_mode
) {
2844 /* Tx queue should be only re-enabled */
2845 netif_tx_wake_all_queues(bp
->dev
);
2849 netif_tx_start_all_queues(bp
->dev
);
2850 smp_mb__after_atomic();
2854 case LOAD_LOOPBACK_EXT
:
2855 bp
->state
= BNX2X_STATE_DIAG
;
2863 bnx2x_update_drv_flags(bp
, 1 << DRV_FLAGS_PORT_MASK
, 0);
2865 bnx2x__link_status_update(bp
);
2867 /* start the timer */
2868 mod_timer(&bp
->timer
, jiffies
+ bp
->current_interval
);
2870 if (CNIC_ENABLED(bp
))
2871 bnx2x_load_cnic(bp
);
2874 bnx2x_schedule_sp_rtnl(bp
, BNX2X_SP_RTNL_GET_DRV_VERSION
, 0);
2876 if (IS_PF(bp
) && SHMEM2_HAS(bp
, drv_capabilities_flag
)) {
2877 /* mark driver is loaded in shmem2 */
2879 val
= SHMEM2_RD(bp
, drv_capabilities_flag
[BP_FW_MB_IDX(bp
)]);
2880 val
&= ~DRV_FLAGS_MTU_MASK
;
2881 val
|= (bp
->dev
->mtu
<< DRV_FLAGS_MTU_SHIFT
);
2882 SHMEM2_WR(bp
, drv_capabilities_flag
[BP_FW_MB_IDX(bp
)],
2883 val
| DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED
|
2884 DRV_FLAGS_CAPABILITIES_LOADED_L2
);
2887 /* Wait for all pending SP commands to complete */
2888 if (IS_PF(bp
) && !bnx2x_wait_sp_comp(bp
, ~0x0UL
)) {
2889 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
2890 bnx2x_nic_unload(bp
, UNLOAD_CLOSE
, false);
2894 /* Update driver data for On-Chip MFW dump. */
2896 bnx2x_update_mfw_dump(bp
);
2898 /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
2899 if (bp
->port
.pmf
&& (bp
->state
!= BNX2X_STATE_DIAG
))
2900 bnx2x_dcbx_init(bp
, false);
2902 if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp
))
2903 bnx2x_set_os_driver_state(bp
, OS_DRIVER_STATE_ACTIVE
);
2905 DP(NETIF_MSG_IFUP
, "Ending successfully NIC load\n");
2909 #ifndef BNX2X_STOP_ON_ERROR
2912 bnx2x_int_disable_sync(bp
, 1);
2914 /* Clean queueable objects */
2915 bnx2x_squeeze_objects(bp
);
2918 /* Free SKBs, SGEs, TPA pool and driver internals */
2919 bnx2x_free_skbs(bp
);
2920 for_each_rx_queue(bp
, i
)
2921 bnx2x_free_rx_sge_range(bp
, bp
->fp
+ i
, NUM_RX_SGE
);
2926 if (IS_PF(bp
) && !BP_NOMCP(bp
)) {
2927 bnx2x_fw_command(bp
, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP
, 0);
2928 bnx2x_fw_command(bp
, DRV_MSG_CODE_UNLOAD_DONE
, 0);
2933 bnx2x_napi_disable(bp
);
2934 bnx2x_del_all_napi(bp
);
2936 /* clear pf_load status, as it was already set */
2938 bnx2x_clear_pf_load(bp
);
2940 bnx2x_free_fw_stats_mem(bp
);
2941 bnx2x_free_fp_mem(bp
);
2945 #endif /* ! BNX2X_STOP_ON_ERROR */
2948 int bnx2x_drain_tx_queues(struct bnx2x
*bp
)
2952 /* Wait until tx fastpath tasks complete */
2953 for_each_tx_queue(bp
, i
) {
2954 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
2956 for_each_cos_in_tx_queue(fp
, cos
)
2957 rc
= bnx2x_clean_tx_queue(bp
, fp
->txdata_ptr
[cos
]);
2964 /* must be called with rtnl_lock */
2965 int bnx2x_nic_unload(struct bnx2x
*bp
, int unload_mode
, bool keep_link
)
2968 bool global
= false;
2970 DP(NETIF_MSG_IFUP
, "Starting NIC unload\n");
2972 if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp
))
2973 bnx2x_set_os_driver_state(bp
, OS_DRIVER_STATE_DISABLED
);
2975 /* mark driver is unloaded in shmem2 */
2976 if (IS_PF(bp
) && SHMEM2_HAS(bp
, drv_capabilities_flag
)) {
2978 val
= SHMEM2_RD(bp
, drv_capabilities_flag
[BP_FW_MB_IDX(bp
)]);
2979 SHMEM2_WR(bp
, drv_capabilities_flag
[BP_FW_MB_IDX(bp
)],
2980 val
& ~DRV_FLAGS_CAPABILITIES_LOADED_L2
);
2983 if (IS_PF(bp
) && bp
->recovery_state
!= BNX2X_RECOVERY_DONE
&&
2984 (bp
->state
== BNX2X_STATE_CLOSED
||
2985 bp
->state
== BNX2X_STATE_ERROR
)) {
2986 /* We can get here if the driver has been unloaded
2987 * during parity error recovery and is either waiting for a
2988 * leader to complete or for other functions to unload and
2989 * then ifdown has been issued. In this case we want to
2990 * unload and let other functions to complete a recovery
2993 bp
->recovery_state
= BNX2X_RECOVERY_DONE
;
2995 bnx2x_release_leader_lock(bp
);
2998 DP(NETIF_MSG_IFDOWN
, "Releasing a leadership...\n");
2999 BNX2X_ERR("Can't unload in closed or error state\n");
3003 /* Nothing to do during unload if previous bnx2x_nic_load()
3004 * have not completed successfully - all resources are released.
3006 * we can get here only after unsuccessful ndo_* callback, during which
3007 * dev->IFF_UP flag is still on.
3009 if (bp
->state
== BNX2X_STATE_CLOSED
|| bp
->state
== BNX2X_STATE_ERROR
)
3012 /* It's important to set the bp->state to the value different from
3013 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
3014 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
3016 bp
->state
= BNX2X_STATE_CLOSING_WAIT4_HALT
;
3019 /* indicate to VFs that the PF is going down */
3020 bnx2x_iov_channel_down(bp
);
3022 if (CNIC_LOADED(bp
))
3023 bnx2x_cnic_notify(bp
, CNIC_CTL_STOP_CMD
);
3026 bnx2x_tx_disable(bp
);
3027 netdev_reset_tc(bp
->dev
);
3029 bp
->rx_mode
= BNX2X_RX_MODE_NONE
;
3031 del_timer_sync(&bp
->timer
);
3034 /* Set ALWAYS_ALIVE bit in shmem */
3035 bp
->fw_drv_pulse_wr_seq
|= DRV_PULSE_ALWAYS_ALIVE
;
3036 bnx2x_drv_pulse(bp
);
3037 bnx2x_stats_handle(bp
, STATS_EVENT_STOP
);
3038 bnx2x_save_statistics(bp
);
3041 /* wait till consumers catch up with producers in all queues.
3042 * If we're recovering, FW can't write to host so no reason
3043 * to wait for the queues to complete all Tx.
3045 if (unload_mode
!= UNLOAD_RECOVERY
)
3046 bnx2x_drain_tx_queues(bp
);
3048 /* if VF indicate to PF this function is going down (PF will delete sp
3049 * elements and clear initializations
3052 bnx2x_vfpf_close_vf(bp
);
3053 else if (unload_mode
!= UNLOAD_RECOVERY
)
3054 /* if this is a normal/close unload need to clean up chip*/
3055 bnx2x_chip_cleanup(bp
, unload_mode
, keep_link
);
3057 /* Send the UNLOAD_REQUEST to the MCP */
3058 bnx2x_send_unload_req(bp
, unload_mode
);
3060 /* Prevent transactions to host from the functions on the
3061 * engine that doesn't reset global blocks in case of global
3062 * attention once global blocks are reset and gates are opened
3063 * (the engine which leader will perform the recovery
3066 if (!CHIP_IS_E1x(bp
))
3067 bnx2x_pf_disable(bp
);
3069 /* Disable HW interrupts, NAPI */
3070 bnx2x_netif_stop(bp
, 1);
3071 /* Delete all NAPI objects */
3072 bnx2x_del_all_napi(bp
);
3073 if (CNIC_LOADED(bp
))
3074 bnx2x_del_all_napi_cnic(bp
);
3078 /* Report UNLOAD_DONE to MCP */
3079 bnx2x_send_unload_done(bp
, false);
3083 * At this stage no more interrupts will arrive so we may safely clean
3084 * the queueable objects here in case they failed to get cleaned so far.
3087 bnx2x_squeeze_objects(bp
);
3089 /* There should be no more pending SP commands at this stage */
3094 /* clear pending work in rtnl task */
3095 bp
->sp_rtnl_state
= 0;
3098 /* Free SKBs, SGEs, TPA pool and driver internals */
3099 bnx2x_free_skbs(bp
);
3100 if (CNIC_LOADED(bp
))
3101 bnx2x_free_skbs_cnic(bp
);
3102 for_each_rx_queue(bp
, i
)
3103 bnx2x_free_rx_sge_range(bp
, bp
->fp
+ i
, NUM_RX_SGE
);
3105 bnx2x_free_fp_mem(bp
);
3106 if (CNIC_LOADED(bp
))
3107 bnx2x_free_fp_mem_cnic(bp
);
3110 if (CNIC_LOADED(bp
))
3111 bnx2x_free_mem_cnic(bp
);
3115 bp
->state
= BNX2X_STATE_CLOSED
;
3116 bp
->cnic_loaded
= false;
3118 /* Clear driver version indication in shmem */
3120 bnx2x_update_mng_version(bp
);
3122 /* Check if there are pending parity attentions. If there are - set
3123 * RECOVERY_IN_PROGRESS.
3125 if (IS_PF(bp
) && bnx2x_chk_parity_attn(bp
, &global
, false)) {
3126 bnx2x_set_reset_in_progress(bp
);
3128 /* Set RESET_IS_GLOBAL if needed */
3130 bnx2x_set_reset_global(bp
);
3133 /* The last driver must disable a "close the gate" if there is no
3134 * parity attention or "process kill" pending.
3137 !bnx2x_clear_pf_load(bp
) &&
3138 bnx2x_reset_is_done(bp
, BP_PATH(bp
)))
3139 bnx2x_disable_close_the_gate(bp
);
3141 DP(NETIF_MSG_IFUP
, "Ending NIC unload\n");
3146 int bnx2x_set_power_state(struct bnx2x
*bp
, pci_power_t state
)
3150 /* If there is no power capability, silently succeed */
3151 if (!bp
->pdev
->pm_cap
) {
3152 BNX2X_DEV_INFO("No power capability. Breaking.\n");
3156 pci_read_config_word(bp
->pdev
, bp
->pdev
->pm_cap
+ PCI_PM_CTRL
, &pmcsr
);
3160 pci_write_config_word(bp
->pdev
, bp
->pdev
->pm_cap
+ PCI_PM_CTRL
,
3161 ((pmcsr
& ~PCI_PM_CTRL_STATE_MASK
) |
3162 PCI_PM_CTRL_PME_STATUS
));
3164 if (pmcsr
& PCI_PM_CTRL_STATE_MASK
)
3165 /* delay required during transition out of D3hot */
3170 /* If there are other clients above don't
3171 shut down the power */
3172 if (atomic_read(&bp
->pdev
->enable_cnt
) != 1)
3174 /* Don't shut down the power for emulation and FPGA */
3175 if (CHIP_REV_IS_SLOW(bp
))
3178 pmcsr
&= ~PCI_PM_CTRL_STATE_MASK
;
3182 pmcsr
|= PCI_PM_CTRL_PME_ENABLE
;
3184 pci_write_config_word(bp
->pdev
, bp
->pdev
->pm_cap
+ PCI_PM_CTRL
,
3187 /* No more memory access after this point until
3188 * device is brought back to D0.
3193 dev_err(&bp
->pdev
->dev
, "Can't support state = %d\n", state
);
3200 * net_device service functions
3202 static int bnx2x_poll(struct napi_struct
*napi
, int budget
)
3204 struct bnx2x_fastpath
*fp
= container_of(napi
, struct bnx2x_fastpath
,
3206 struct bnx2x
*bp
= fp
->bp
;
3210 #ifdef BNX2X_STOP_ON_ERROR
3211 if (unlikely(bp
->panic
)) {
3212 napi_complete(napi
);
3216 for_each_cos_in_tx_queue(fp
, cos
)
3217 if (bnx2x_tx_queue_has_work(fp
->txdata_ptr
[cos
]))
3218 bnx2x_tx_int(bp
, fp
->txdata_ptr
[cos
]);
3220 rx_work_done
= (bnx2x_has_rx_work(fp
)) ? bnx2x_rx_int(fp
, budget
) : 0;
3222 if (rx_work_done
< budget
) {
3223 /* No need to update SB for FCoE L2 ring as long as
3224 * it's connected to the default SB and the SB
3225 * has been updated when NAPI was scheduled.
3227 if (IS_FCOE_FP(fp
)) {
3228 napi_complete_done(napi
, rx_work_done
);
3230 bnx2x_update_fpsb_idx(fp
);
3231 /* bnx2x_has_rx_work() reads the status block,
3232 * thus we need to ensure that status block indices
3233 * have been actually read (bnx2x_update_fpsb_idx)
3234 * prior to this check (bnx2x_has_rx_work) so that
3235 * we won't write the "newer" value of the status block
3236 * to IGU (if there was a DMA right after
3237 * bnx2x_has_rx_work and if there is no rmb, the memory
3238 * reading (bnx2x_update_fpsb_idx) may be postponed
3239 * to right before bnx2x_ack_sb). In this case there
3240 * will never be another interrupt until there is
3241 * another update of the status block, while there
3242 * is still unhandled work.
3246 if (!(bnx2x_has_rx_work(fp
) || bnx2x_has_tx_work(fp
))) {
3247 if (napi_complete_done(napi
, rx_work_done
)) {
3248 /* Re-enable interrupts */
3249 DP(NETIF_MSG_RX_STATUS
,
3250 "Update index to %d\n", fp
->fp_hc_idx
);
3251 bnx2x_ack_sb(bp
, fp
->igu_sb_id
, USTORM_ID
,
3252 le16_to_cpu(fp
->fp_hc_idx
),
3256 rx_work_done
= budget
;
3261 return rx_work_done
;
3264 /* we split the first BD into headers and data BDs
3265 * to ease the pain of our fellow microcode engineers
3266 * we use one mapping for both BDs
3268 static u16
bnx2x_tx_split(struct bnx2x
*bp
,
3269 struct bnx2x_fp_txdata
*txdata
,
3270 struct sw_tx_bd
*tx_buf
,
3271 struct eth_tx_start_bd
**tx_bd
, u16 hlen
,
3274 struct eth_tx_start_bd
*h_tx_bd
= *tx_bd
;
3275 struct eth_tx_bd
*d_tx_bd
;
3277 int old_len
= le16_to_cpu(h_tx_bd
->nbytes
);
3279 /* first fix first BD */
3280 h_tx_bd
->nbytes
= cpu_to_le16(hlen
);
3282 DP(NETIF_MSG_TX_QUEUED
, "TSO split header size is %d (%x:%x)\n",
3283 h_tx_bd
->nbytes
, h_tx_bd
->addr_hi
, h_tx_bd
->addr_lo
);
3285 /* now get a new data BD
3286 * (after the pbd) and fill it */
3287 bd_prod
= TX_BD(NEXT_TX_IDX(bd_prod
));
3288 d_tx_bd
= &txdata
->tx_desc_ring
[bd_prod
].reg_bd
;
3290 mapping
= HILO_U64(le32_to_cpu(h_tx_bd
->addr_hi
),
3291 le32_to_cpu(h_tx_bd
->addr_lo
)) + hlen
;
3293 d_tx_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
3294 d_tx_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
3295 d_tx_bd
->nbytes
= cpu_to_le16(old_len
- hlen
);
3297 /* this marks the BD as one that has no individual mapping */
3298 tx_buf
->flags
|= BNX2X_TSO_SPLIT_BD
;
3300 DP(NETIF_MSG_TX_QUEUED
,
3301 "TSO split data size is %d (%x:%x)\n",
3302 d_tx_bd
->nbytes
, d_tx_bd
->addr_hi
, d_tx_bd
->addr_lo
);
3305 *tx_bd
= (struct eth_tx_start_bd
*)d_tx_bd
;
3310 #define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32)))
3311 #define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16)))
3312 static __le16
bnx2x_csum_fix(unsigned char *t_header
, u16 csum
, s8 fix
)
3314 __sum16 tsum
= (__force __sum16
) csum
;
3317 tsum
= ~csum_fold(csum_sub((__force __wsum
) csum
,
3318 csum_partial(t_header
- fix
, fix
, 0)));
3321 tsum
= ~csum_fold(csum_add((__force __wsum
) csum
,
3322 csum_partial(t_header
, -fix
, 0)));
3324 return bswab16(tsum
);
3327 static u32
bnx2x_xmit_type(struct bnx2x
*bp
, struct sk_buff
*skb
)
3333 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
3336 protocol
= vlan_get_protocol(skb
);
3337 if (protocol
== htons(ETH_P_IPV6
)) {
3339 prot
= ipv6_hdr(skb
)->nexthdr
;
3342 prot
= ip_hdr(skb
)->protocol
;
3345 if (!CHIP_IS_E1x(bp
) && skb
->encapsulation
) {
3346 if (inner_ip_hdr(skb
)->version
== 6) {
3347 rc
|= XMIT_CSUM_ENC_V6
;
3348 if (inner_ipv6_hdr(skb
)->nexthdr
== IPPROTO_TCP
)
3349 rc
|= XMIT_CSUM_TCP
;
3351 rc
|= XMIT_CSUM_ENC_V4
;
3352 if (inner_ip_hdr(skb
)->protocol
== IPPROTO_TCP
)
3353 rc
|= XMIT_CSUM_TCP
;
3356 if (prot
== IPPROTO_TCP
)
3357 rc
|= XMIT_CSUM_TCP
;
3359 if (skb_is_gso(skb
)) {
3360 if (skb_is_gso_v6(skb
)) {
3361 rc
|= (XMIT_GSO_V6
| XMIT_CSUM_TCP
);
3362 if (rc
& XMIT_CSUM_ENC
)
3363 rc
|= XMIT_GSO_ENC_V6
;
3365 rc
|= (XMIT_GSO_V4
| XMIT_CSUM_TCP
);
3366 if (rc
& XMIT_CSUM_ENC
)
3367 rc
|= XMIT_GSO_ENC_V4
;
3374 /* VXLAN: 4 = 1 (for linear data BD) + 3 (2 for PBD and last BD) */
3375 #define BNX2X_NUM_VXLAN_TSO_WIN_SUB_BDS 4
3377 /* Regular: 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
3378 #define BNX2X_NUM_TSO_WIN_SUB_BDS 3
3380 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
3381 /* check if packet requires linearization (packet is too fragmented)
3382 no need to check fragmentation if page size > 8K (there will be no
3383 violation to FW restrictions) */
3384 static int bnx2x_pkt_req_lin(struct bnx2x
*bp
, struct sk_buff
*skb
,
3387 int first_bd_sz
= 0, num_tso_win_sub
= BNX2X_NUM_TSO_WIN_SUB_BDS
;
3388 int to_copy
= 0, hlen
= 0;
3390 if (xmit_type
& XMIT_GSO_ENC
)
3391 num_tso_win_sub
= BNX2X_NUM_VXLAN_TSO_WIN_SUB_BDS
;
3393 if (skb_shinfo(skb
)->nr_frags
>= (MAX_FETCH_BD
- num_tso_win_sub
)) {
3394 if (xmit_type
& XMIT_GSO
) {
3395 unsigned short lso_mss
= skb_shinfo(skb
)->gso_size
;
3396 int wnd_size
= MAX_FETCH_BD
- num_tso_win_sub
;
3397 /* Number of windows to check */
3398 int num_wnds
= skb_shinfo(skb
)->nr_frags
- wnd_size
;
3403 /* Headers length */
3404 if (xmit_type
& XMIT_GSO_ENC
)
3405 hlen
= (int)(skb_inner_transport_header(skb
) -
3407 inner_tcp_hdrlen(skb
);
3409 hlen
= (int)(skb_transport_header(skb
) -
3410 skb
->data
) + tcp_hdrlen(skb
);
3412 /* Amount of data (w/o headers) on linear part of SKB*/
3413 first_bd_sz
= skb_headlen(skb
) - hlen
;
3415 wnd_sum
= first_bd_sz
;
3417 /* Calculate the first sum - it's special */
3418 for (frag_idx
= 0; frag_idx
< wnd_size
- 1; frag_idx
++)
3420 skb_frag_size(&skb_shinfo(skb
)->frags
[frag_idx
]);
3422 /* If there was data on linear skb data - check it */
3423 if (first_bd_sz
> 0) {
3424 if (unlikely(wnd_sum
< lso_mss
)) {
3429 wnd_sum
-= first_bd_sz
;
3432 /* Others are easier: run through the frag list and
3433 check all windows */
3434 for (wnd_idx
= 0; wnd_idx
<= num_wnds
; wnd_idx
++) {
3436 skb_frag_size(&skb_shinfo(skb
)->frags
[wnd_idx
+ wnd_size
- 1]);
3438 if (unlikely(wnd_sum
< lso_mss
)) {
3443 skb_frag_size(&skb_shinfo(skb
)->frags
[wnd_idx
]);
3446 /* in non-LSO too fragmented packet should always
3453 if (unlikely(to_copy
))
3454 DP(NETIF_MSG_TX_QUEUED
,
3455 "Linearization IS REQUIRED for %s packet. num_frags %d hlen %d first_bd_sz %d\n",
3456 (xmit_type
& XMIT_GSO
) ? "LSO" : "non-LSO",
3457 skb_shinfo(skb
)->nr_frags
, hlen
, first_bd_sz
);
3464 * bnx2x_set_pbd_gso - update PBD in GSO case.
3468 * @xmit_type: xmit flags
3470 static void bnx2x_set_pbd_gso(struct sk_buff
*skb
,
3471 struct eth_tx_parse_bd_e1x
*pbd
,
3474 pbd
->lso_mss
= cpu_to_le16(skb_shinfo(skb
)->gso_size
);
3475 pbd
->tcp_send_seq
= bswab32(tcp_hdr(skb
)->seq
);
3476 pbd
->tcp_flags
= pbd_tcp_flags(tcp_hdr(skb
));
3478 if (xmit_type
& XMIT_GSO_V4
) {
3479 pbd
->ip_id
= bswab16(ip_hdr(skb
)->id
);
3480 pbd
->tcp_pseudo_csum
=
3481 bswab16(~csum_tcpudp_magic(ip_hdr(skb
)->saddr
,
3483 0, IPPROTO_TCP
, 0));
3485 pbd
->tcp_pseudo_csum
=
3486 bswab16(~csum_ipv6_magic(&ipv6_hdr(skb
)->saddr
,
3487 &ipv6_hdr(skb
)->daddr
,
3488 0, IPPROTO_TCP
, 0));
3492 cpu_to_le16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN
);
3496 * bnx2x_set_pbd_csum_enc - update PBD with checksum and return header length
3498 * @bp: driver handle
3500 * @parsing_data: data to be updated
3501 * @xmit_type: xmit flags
3503 * 57712/578xx related, when skb has encapsulation
3505 static u8
bnx2x_set_pbd_csum_enc(struct bnx2x
*bp
, struct sk_buff
*skb
,
3506 u32
*parsing_data
, u32 xmit_type
)
3509 ((((u8
*)skb_inner_transport_header(skb
) - skb
->data
) >> 1) <<
3510 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT
) &
3511 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W
;
3513 if (xmit_type
& XMIT_CSUM_TCP
) {
3514 *parsing_data
|= ((inner_tcp_hdrlen(skb
) / 4) <<
3515 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT
) &
3516 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW
;
3518 return skb_inner_transport_header(skb
) +
3519 inner_tcp_hdrlen(skb
) - skb
->data
;
3522 /* We support checksum offload for TCP and UDP only.
3523 * No need to pass the UDP header length - it's a constant.
3525 return skb_inner_transport_header(skb
) +
3526 sizeof(struct udphdr
) - skb
->data
;
3530 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
3532 * @bp: driver handle
3534 * @parsing_data: data to be updated
3535 * @xmit_type: xmit flags
3537 * 57712/578xx related
3539 static u8
bnx2x_set_pbd_csum_e2(struct bnx2x
*bp
, struct sk_buff
*skb
,
3540 u32
*parsing_data
, u32 xmit_type
)
3543 ((((u8
*)skb_transport_header(skb
) - skb
->data
) >> 1) <<
3544 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT
) &
3545 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W
;
3547 if (xmit_type
& XMIT_CSUM_TCP
) {
3548 *parsing_data
|= ((tcp_hdrlen(skb
) / 4) <<
3549 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT
) &
3550 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW
;
3552 return skb_transport_header(skb
) + tcp_hdrlen(skb
) - skb
->data
;
3554 /* We support checksum offload for TCP and UDP only.
3555 * No need to pass the UDP header length - it's a constant.
3557 return skb_transport_header(skb
) + sizeof(struct udphdr
) - skb
->data
;
3560 /* set FW indication according to inner or outer protocols if tunneled */
3561 static void bnx2x_set_sbd_csum(struct bnx2x
*bp
, struct sk_buff
*skb
,
3562 struct eth_tx_start_bd
*tx_start_bd
,
3565 tx_start_bd
->bd_flags
.as_bitfield
|= ETH_TX_BD_FLAGS_L4_CSUM
;
3567 if (xmit_type
& (XMIT_CSUM_ENC_V6
| XMIT_CSUM_V6
))
3568 tx_start_bd
->bd_flags
.as_bitfield
|= ETH_TX_BD_FLAGS_IPV6
;
3570 if (!(xmit_type
& XMIT_CSUM_TCP
))
3571 tx_start_bd
->bd_flags
.as_bitfield
|= ETH_TX_BD_FLAGS_IS_UDP
;
3575 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
3577 * @bp: driver handle
3579 * @pbd: parse BD to be updated
3580 * @xmit_type: xmit flags
3582 static u8
bnx2x_set_pbd_csum(struct bnx2x
*bp
, struct sk_buff
*skb
,
3583 struct eth_tx_parse_bd_e1x
*pbd
,
3586 u8 hlen
= (skb_network_header(skb
) - skb
->data
) >> 1;
3588 /* for now NS flag is not used in Linux */
3591 ((skb
->protocol
== cpu_to_be16(ETH_P_8021Q
)) <<
3592 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT
));
3594 pbd
->ip_hlen_w
= (skb_transport_header(skb
) -
3595 skb_network_header(skb
)) >> 1;
3597 hlen
+= pbd
->ip_hlen_w
;
3599 /* We support checksum offload for TCP and UDP only */
3600 if (xmit_type
& XMIT_CSUM_TCP
)
3601 hlen
+= tcp_hdrlen(skb
) / 2;
3603 hlen
+= sizeof(struct udphdr
) / 2;
3605 pbd
->total_hlen_w
= cpu_to_le16(hlen
);
3608 if (xmit_type
& XMIT_CSUM_TCP
) {
3609 pbd
->tcp_pseudo_csum
= bswab16(tcp_hdr(skb
)->check
);
3612 s8 fix
= SKB_CS_OFF(skb
); /* signed! */
3614 DP(NETIF_MSG_TX_QUEUED
,
3615 "hlen %d fix %d csum before fix %x\n",
3616 le16_to_cpu(pbd
->total_hlen_w
), fix
, SKB_CS(skb
));
3618 /* HW bug: fixup the CSUM */
3619 pbd
->tcp_pseudo_csum
=
3620 bnx2x_csum_fix(skb_transport_header(skb
),
3623 DP(NETIF_MSG_TX_QUEUED
, "csum after fix %x\n",
3624 pbd
->tcp_pseudo_csum
);
3630 static void bnx2x_update_pbds_gso_enc(struct sk_buff
*skb
,
3631 struct eth_tx_parse_bd_e2
*pbd_e2
,
3632 struct eth_tx_parse_2nd_bd
*pbd2
,
3637 u8 outerip_off
, outerip_len
= 0;
3639 /* from outer IP to transport */
3640 hlen_w
= (skb_inner_transport_header(skb
) -
3641 skb_network_header(skb
)) >> 1;
3644 hlen_w
+= inner_tcp_hdrlen(skb
) >> 1;
3646 pbd2
->fw_ip_hdr_to_payload_w
= hlen_w
;
3648 /* outer IP header info */
3649 if (xmit_type
& XMIT_CSUM_V4
) {
3650 struct iphdr
*iph
= ip_hdr(skb
);
3651 u32 csum
= (__force u32
)(~iph
->check
) -
3652 (__force u32
)iph
->tot_len
-
3653 (__force u32
)iph
->frag_off
;
3655 outerip_len
= iph
->ihl
<< 1;
3657 pbd2
->fw_ip_csum_wo_len_flags_frag
=
3658 bswab16(csum_fold((__force __wsum
)csum
));
3660 pbd2
->fw_ip_hdr_to_payload_w
=
3661 hlen_w
- ((sizeof(struct ipv6hdr
)) >> 1);
3662 pbd_e2
->data
.tunnel_data
.flags
|=
3663 ETH_TUNNEL_DATA_IPV6_OUTER
;
3666 pbd2
->tcp_send_seq
= bswab32(inner_tcp_hdr(skb
)->seq
);
3668 pbd2
->tcp_flags
= pbd_tcp_flags(inner_tcp_hdr(skb
));
3670 /* inner IP header info */
3671 if (xmit_type
& XMIT_CSUM_ENC_V4
) {
3672 pbd2
->hw_ip_id
= bswab16(inner_ip_hdr(skb
)->id
);
3674 pbd_e2
->data
.tunnel_data
.pseudo_csum
=
3675 bswab16(~csum_tcpudp_magic(
3676 inner_ip_hdr(skb
)->saddr
,
3677 inner_ip_hdr(skb
)->daddr
,
3678 0, IPPROTO_TCP
, 0));
3680 pbd_e2
->data
.tunnel_data
.pseudo_csum
=
3681 bswab16(~csum_ipv6_magic(
3682 &inner_ipv6_hdr(skb
)->saddr
,
3683 &inner_ipv6_hdr(skb
)->daddr
,
3684 0, IPPROTO_TCP
, 0));
3687 outerip_off
= (skb_network_header(skb
) - skb
->data
) >> 1;
3692 ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT
) |
3693 ((skb
->protocol
== cpu_to_be16(ETH_P_8021Q
)) <<
3694 ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT
);
3696 if (ip_hdr(skb
)->protocol
== IPPROTO_UDP
) {
3697 SET_FLAG(*global_data
, ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST
, 1);
3698 pbd2
->tunnel_udp_hdr_start_w
= skb_transport_offset(skb
) >> 1;
3702 static inline void bnx2x_set_ipv6_ext_e2(struct sk_buff
*skb
, u32
*parsing_data
,
3705 struct ipv6hdr
*ipv6
;
3707 if (!(xmit_type
& (XMIT_GSO_ENC_V6
| XMIT_GSO_V6
)))
3710 if (xmit_type
& XMIT_GSO_ENC_V6
)
3711 ipv6
= inner_ipv6_hdr(skb
);
3712 else /* XMIT_GSO_V6 */
3713 ipv6
= ipv6_hdr(skb
);
3715 if (ipv6
->nexthdr
== NEXTHDR_IPV6
)
3716 *parsing_data
|= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR
;
3719 /* called with netif_tx_lock
3720 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
3721 * netif_wake_queue()
3723 netdev_tx_t
bnx2x_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
3725 struct bnx2x
*bp
= netdev_priv(dev
);
3727 struct netdev_queue
*txq
;
3728 struct bnx2x_fp_txdata
*txdata
;
3729 struct sw_tx_bd
*tx_buf
;
3730 struct eth_tx_start_bd
*tx_start_bd
, *first_bd
;
3731 struct eth_tx_bd
*tx_data_bd
, *total_pkt_bd
= NULL
;
3732 struct eth_tx_parse_bd_e1x
*pbd_e1x
= NULL
;
3733 struct eth_tx_parse_bd_e2
*pbd_e2
= NULL
;
3734 struct eth_tx_parse_2nd_bd
*pbd2
= NULL
;
3735 u32 pbd_e2_parsing_data
= 0;
3736 u16 pkt_prod
, bd_prod
;
3739 u32 xmit_type
= bnx2x_xmit_type(bp
, skb
);
3742 __le16 pkt_size
= 0;
3744 u8 mac_type
= UNICAST_ADDRESS
;
3746 #ifdef BNX2X_STOP_ON_ERROR
3747 if (unlikely(bp
->panic
))
3748 return NETDEV_TX_BUSY
;
3751 txq_index
= skb_get_queue_mapping(skb
);
3752 txq
= netdev_get_tx_queue(dev
, txq_index
);
3754 BUG_ON(txq_index
>= MAX_ETH_TXQ_IDX(bp
) + (CNIC_LOADED(bp
) ? 1 : 0));
3756 txdata
= &bp
->bnx2x_txq
[txq_index
];
3758 /* enable this debug print to view the transmission queue being used
3759 DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
3760 txq_index, fp_index, txdata_index); */
3762 /* enable this debug print to view the transmission details
3763 DP(NETIF_MSG_TX_QUEUED,
3764 "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
3765 txdata->cid, fp_index, txdata_index, txdata, fp); */
3767 if (unlikely(bnx2x_tx_avail(bp
, txdata
) <
3768 skb_shinfo(skb
)->nr_frags
+
3770 NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT
))) {
3771 /* Handle special storage cases separately */
3772 if (txdata
->tx_ring_size
== 0) {
3773 struct bnx2x_eth_q_stats
*q_stats
=
3774 bnx2x_fp_qstats(bp
, txdata
->parent_fp
);
3775 q_stats
->driver_filtered_tx_pkt
++;
3777 return NETDEV_TX_OK
;
3779 bnx2x_fp_qstats(bp
, txdata
->parent_fp
)->driver_xoff
++;
3780 netif_tx_stop_queue(txq
);
3781 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
3783 return NETDEV_TX_BUSY
;
3786 DP(NETIF_MSG_TX_QUEUED
,
3787 "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x len %d\n",
3788 txq_index
, skb
->ip_summed
, skb
->protocol
, ipv6_hdr(skb
)->nexthdr
,
3789 ip_hdr(skb
)->protocol
, skb_shinfo(skb
)->gso_type
, xmit_type
,
3792 eth
= (struct ethhdr
*)skb
->data
;
3794 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
3795 if (unlikely(is_multicast_ether_addr(eth
->h_dest
))) {
3796 if (is_broadcast_ether_addr(eth
->h_dest
))
3797 mac_type
= BROADCAST_ADDRESS
;
3799 mac_type
= MULTICAST_ADDRESS
;
3802 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
3803 /* First, check if we need to linearize the skb (due to FW
3804 restrictions). No need to check fragmentation if page size > 8K
3805 (there will be no violation to FW restrictions) */
3806 if (bnx2x_pkt_req_lin(bp
, skb
, xmit_type
)) {
3807 /* Statistics of linearization */
3809 if (skb_linearize(skb
) != 0) {
3810 DP(NETIF_MSG_TX_QUEUED
,
3811 "SKB linearization failed - silently dropping this SKB\n");
3812 dev_kfree_skb_any(skb
);
3813 return NETDEV_TX_OK
;
3817 /* Map skb linear data for DMA */
3818 mapping
= dma_map_single(&bp
->pdev
->dev
, skb
->data
,
3819 skb_headlen(skb
), DMA_TO_DEVICE
);
3820 if (unlikely(dma_mapping_error(&bp
->pdev
->dev
, mapping
))) {
3821 DP(NETIF_MSG_TX_QUEUED
,
3822 "SKB mapping failed - silently dropping this SKB\n");
3823 dev_kfree_skb_any(skb
);
3824 return NETDEV_TX_OK
;
3827 Please read carefully. First we use one BD which we mark as start,
3828 then we have a parsing info BD (used for TSO or xsum),
3829 and only then we have the rest of the TSO BDs.
3830 (don't forget to mark the last one as last,
3831 and to unmap only AFTER you write to the BD ...)
3832 And above all, all pdb sizes are in words - NOT DWORDS!
3835 /* get current pkt produced now - advance it just before sending packet
3836 * since mapping of pages may fail and cause packet to be dropped
3838 pkt_prod
= txdata
->tx_pkt_prod
;
3839 bd_prod
= TX_BD(txdata
->tx_bd_prod
);
3841 /* get a tx_buf and first BD
3842 * tx_start_bd may be changed during SPLIT,
3843 * but first_bd will always stay first
3845 tx_buf
= &txdata
->tx_buf_ring
[TX_BD(pkt_prod
)];
3846 tx_start_bd
= &txdata
->tx_desc_ring
[bd_prod
].start_bd
;
3847 first_bd
= tx_start_bd
;
3849 tx_start_bd
->bd_flags
.as_bitfield
= ETH_TX_BD_FLAGS_START_BD
;
3851 if (unlikely(skb_shinfo(skb
)->tx_flags
& SKBTX_HW_TSTAMP
)) {
3852 if (!(bp
->flags
& TX_TIMESTAMPING_EN
)) {
3853 BNX2X_ERR("Tx timestamping was not enabled, this packet will not be timestamped\n");
3854 } else if (bp
->ptp_tx_skb
) {
3855 BNX2X_ERR("The device supports only a single outstanding packet to timestamp, this packet will not be timestamped\n");
3857 skb_shinfo(skb
)->tx_flags
|= SKBTX_IN_PROGRESS
;
3858 /* schedule check for Tx timestamp */
3859 bp
->ptp_tx_skb
= skb_get(skb
);
3860 bp
->ptp_tx_start
= jiffies
;
3861 schedule_work(&bp
->ptp_task
);
3865 /* header nbd: indirectly zero other flags! */
3866 tx_start_bd
->general_data
= 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT
;
3868 /* remember the first BD of the packet */
3869 tx_buf
->first_bd
= txdata
->tx_bd_prod
;
3873 DP(NETIF_MSG_TX_QUEUED
,
3874 "sending pkt %u @%p next_idx %u bd %u @%p\n",
3875 pkt_prod
, tx_buf
, txdata
->tx_pkt_prod
, bd_prod
, tx_start_bd
);
3877 if (skb_vlan_tag_present(skb
)) {
3878 tx_start_bd
->vlan_or_ethertype
=
3879 cpu_to_le16(skb_vlan_tag_get(skb
));
3880 tx_start_bd
->bd_flags
.as_bitfield
|=
3881 (X_ETH_OUTBAND_VLAN
<< ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT
);
3883 /* when transmitting in a vf, start bd must hold the ethertype
3884 * for fw to enforce it
3887 #ifndef BNX2X_STOP_ON_ERROR
3890 /* Still need to consider inband vlan for enforced */
3891 if (__vlan_get_tag(skb
, &vlan_tci
)) {
3892 tx_start_bd
->vlan_or_ethertype
=
3893 cpu_to_le16(ntohs(eth
->h_proto
));
3895 tx_start_bd
->bd_flags
.as_bitfield
|=
3896 (X_ETH_INBAND_VLAN
<<
3897 ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT
);
3898 tx_start_bd
->vlan_or_ethertype
=
3899 cpu_to_le16(vlan_tci
);
3901 #ifndef BNX2X_STOP_ON_ERROR
3903 /* used by FW for packet accounting */
3904 tx_start_bd
->vlan_or_ethertype
= cpu_to_le16(pkt_prod
);
3909 nbd
= 2; /* start_bd + pbd + frags (updated when pages are mapped) */
3911 /* turn on parsing and get a BD */
3912 bd_prod
= TX_BD(NEXT_TX_IDX(bd_prod
));
3914 if (xmit_type
& XMIT_CSUM
)
3915 bnx2x_set_sbd_csum(bp
, skb
, tx_start_bd
, xmit_type
);
3917 if (!CHIP_IS_E1x(bp
)) {
3918 pbd_e2
= &txdata
->tx_desc_ring
[bd_prod
].parse_bd_e2
;
3919 memset(pbd_e2
, 0, sizeof(struct eth_tx_parse_bd_e2
));
3921 if (xmit_type
& XMIT_CSUM_ENC
) {
3922 u16 global_data
= 0;
3924 /* Set PBD in enc checksum offload case */
3925 hlen
= bnx2x_set_pbd_csum_enc(bp
, skb
,
3926 &pbd_e2_parsing_data
,
3929 /* turn on 2nd parsing and get a BD */
3930 bd_prod
= TX_BD(NEXT_TX_IDX(bd_prod
));
3932 pbd2
= &txdata
->tx_desc_ring
[bd_prod
].parse_2nd_bd
;
3934 memset(pbd2
, 0, sizeof(*pbd2
));
3936 pbd_e2
->data
.tunnel_data
.ip_hdr_start_inner_w
=
3937 (skb_inner_network_header(skb
) -
3940 if (xmit_type
& XMIT_GSO_ENC
)
3941 bnx2x_update_pbds_gso_enc(skb
, pbd_e2
, pbd2
,
3945 pbd2
->global_data
= cpu_to_le16(global_data
);
3947 /* add addition parse BD indication to start BD */
3948 SET_FLAG(tx_start_bd
->general_data
,
3949 ETH_TX_START_BD_PARSE_NBDS
, 1);
3950 /* set encapsulation flag in start BD */
3951 SET_FLAG(tx_start_bd
->general_data
,
3952 ETH_TX_START_BD_TUNNEL_EXIST
, 1);
3954 tx_buf
->flags
|= BNX2X_HAS_SECOND_PBD
;
3957 } else if (xmit_type
& XMIT_CSUM
) {
3958 /* Set PBD in checksum offload case w/o encapsulation */
3959 hlen
= bnx2x_set_pbd_csum_e2(bp
, skb
,
3960 &pbd_e2_parsing_data
,
3964 bnx2x_set_ipv6_ext_e2(skb
, &pbd_e2_parsing_data
, xmit_type
);
3965 /* Add the macs to the parsing BD if this is a vf or if
3966 * Tx Switching is enabled.
3969 /* override GRE parameters in BD */
3970 bnx2x_set_fw_mac_addr(&pbd_e2
->data
.mac_addr
.src_hi
,
3971 &pbd_e2
->data
.mac_addr
.src_mid
,
3972 &pbd_e2
->data
.mac_addr
.src_lo
,
3975 bnx2x_set_fw_mac_addr(&pbd_e2
->data
.mac_addr
.dst_hi
,
3976 &pbd_e2
->data
.mac_addr
.dst_mid
,
3977 &pbd_e2
->data
.mac_addr
.dst_lo
,
3980 if (bp
->flags
& TX_SWITCHING
)
3981 bnx2x_set_fw_mac_addr(
3982 &pbd_e2
->data
.mac_addr
.dst_hi
,
3983 &pbd_e2
->data
.mac_addr
.dst_mid
,
3984 &pbd_e2
->data
.mac_addr
.dst_lo
,
3986 #ifdef BNX2X_STOP_ON_ERROR
3987 /* Enforce security is always set in Stop on Error -
3988 * source mac should be present in the parsing BD
3990 bnx2x_set_fw_mac_addr(&pbd_e2
->data
.mac_addr
.src_hi
,
3991 &pbd_e2
->data
.mac_addr
.src_mid
,
3992 &pbd_e2
->data
.mac_addr
.src_lo
,
3997 SET_FLAG(pbd_e2_parsing_data
,
3998 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE
, mac_type
);
4000 u16 global_data
= 0;
4001 pbd_e1x
= &txdata
->tx_desc_ring
[bd_prod
].parse_bd_e1x
;
4002 memset(pbd_e1x
, 0, sizeof(struct eth_tx_parse_bd_e1x
));
4003 /* Set PBD in checksum offload case */
4004 if (xmit_type
& XMIT_CSUM
)
4005 hlen
= bnx2x_set_pbd_csum(bp
, skb
, pbd_e1x
, xmit_type
);
4007 SET_FLAG(global_data
,
4008 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE
, mac_type
);
4009 pbd_e1x
->global_data
|= cpu_to_le16(global_data
);
4012 /* Setup the data pointer of the first BD of the packet */
4013 tx_start_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
4014 tx_start_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
4015 tx_start_bd
->nbytes
= cpu_to_le16(skb_headlen(skb
));
4016 pkt_size
= tx_start_bd
->nbytes
;
4018 DP(NETIF_MSG_TX_QUEUED
,
4019 "first bd @%p addr (%x:%x) nbytes %d flags %x vlan %x\n",
4020 tx_start_bd
, tx_start_bd
->addr_hi
, tx_start_bd
->addr_lo
,
4021 le16_to_cpu(tx_start_bd
->nbytes
),
4022 tx_start_bd
->bd_flags
.as_bitfield
,
4023 le16_to_cpu(tx_start_bd
->vlan_or_ethertype
));
4025 if (xmit_type
& XMIT_GSO
) {
4027 DP(NETIF_MSG_TX_QUEUED
,
4028 "TSO packet len %d hlen %d total len %d tso size %d\n",
4029 skb
->len
, hlen
, skb_headlen(skb
),
4030 skb_shinfo(skb
)->gso_size
);
4032 tx_start_bd
->bd_flags
.as_bitfield
|= ETH_TX_BD_FLAGS_SW_LSO
;
4034 if (unlikely(skb_headlen(skb
) > hlen
)) {
4036 bd_prod
= bnx2x_tx_split(bp
, txdata
, tx_buf
,
4040 if (!CHIP_IS_E1x(bp
))
4041 pbd_e2_parsing_data
|=
4042 (skb_shinfo(skb
)->gso_size
<<
4043 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT
) &
4044 ETH_TX_PARSE_BD_E2_LSO_MSS
;
4046 bnx2x_set_pbd_gso(skb
, pbd_e1x
, xmit_type
);
4049 /* Set the PBD's parsing_data field if not zero
4050 * (for the chips newer than 57711).
4052 if (pbd_e2_parsing_data
)
4053 pbd_e2
->parsing_data
= cpu_to_le32(pbd_e2_parsing_data
);
4055 tx_data_bd
= (struct eth_tx_bd
*)tx_start_bd
;
4057 /* Handle fragmented skb */
4058 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
4059 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
4061 mapping
= skb_frag_dma_map(&bp
->pdev
->dev
, frag
, 0,
4062 skb_frag_size(frag
), DMA_TO_DEVICE
);
4063 if (unlikely(dma_mapping_error(&bp
->pdev
->dev
, mapping
))) {
4064 unsigned int pkts_compl
= 0, bytes_compl
= 0;
4066 DP(NETIF_MSG_TX_QUEUED
,
4067 "Unable to map page - dropping packet...\n");
4069 /* we need unmap all buffers already mapped
4071 * first_bd->nbd need to be properly updated
4072 * before call to bnx2x_free_tx_pkt
4074 first_bd
->nbd
= cpu_to_le16(nbd
);
4075 bnx2x_free_tx_pkt(bp
, txdata
,
4076 TX_BD(txdata
->tx_pkt_prod
),
4077 &pkts_compl
, &bytes_compl
);
4078 return NETDEV_TX_OK
;
4081 bd_prod
= TX_BD(NEXT_TX_IDX(bd_prod
));
4082 tx_data_bd
= &txdata
->tx_desc_ring
[bd_prod
].reg_bd
;
4083 if (total_pkt_bd
== NULL
)
4084 total_pkt_bd
= &txdata
->tx_desc_ring
[bd_prod
].reg_bd
;
4086 tx_data_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
4087 tx_data_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
4088 tx_data_bd
->nbytes
= cpu_to_le16(skb_frag_size(frag
));
4089 le16_add_cpu(&pkt_size
, skb_frag_size(frag
));
4092 DP(NETIF_MSG_TX_QUEUED
,
4093 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
4094 i
, tx_data_bd
, tx_data_bd
->addr_hi
, tx_data_bd
->addr_lo
,
4095 le16_to_cpu(tx_data_bd
->nbytes
));
4098 DP(NETIF_MSG_TX_QUEUED
, "last bd @%p\n", tx_data_bd
);
4100 /* update with actual num BDs */
4101 first_bd
->nbd
= cpu_to_le16(nbd
);
4103 bd_prod
= TX_BD(NEXT_TX_IDX(bd_prod
));
4105 /* now send a tx doorbell, counting the next BD
4106 * if the packet contains or ends with it
4108 if (TX_BD_POFF(bd_prod
) < nbd
)
4111 /* total_pkt_bytes should be set on the first data BD if
4112 * it's not an LSO packet and there is more than one
4113 * data BD. In this case pkt_size is limited by an MTU value.
4114 * However we prefer to set it for an LSO packet (while we don't
4115 * have to) in order to save some CPU cycles in a none-LSO
4116 * case, when we much more care about them.
4118 if (total_pkt_bd
!= NULL
)
4119 total_pkt_bd
->total_pkt_bytes
= pkt_size
;
4122 DP(NETIF_MSG_TX_QUEUED
,
4123 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u tcp_flags %x xsum %x seq %u hlen %u\n",
4124 pbd_e1x
, pbd_e1x
->global_data
, pbd_e1x
->ip_hlen_w
,
4125 pbd_e1x
->ip_id
, pbd_e1x
->lso_mss
, pbd_e1x
->tcp_flags
,
4126 pbd_e1x
->tcp_pseudo_csum
, pbd_e1x
->tcp_send_seq
,
4127 le16_to_cpu(pbd_e1x
->total_hlen_w
));
4129 DP(NETIF_MSG_TX_QUEUED
,
4130 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
4132 pbd_e2
->data
.mac_addr
.dst_hi
,
4133 pbd_e2
->data
.mac_addr
.dst_mid
,
4134 pbd_e2
->data
.mac_addr
.dst_lo
,
4135 pbd_e2
->data
.mac_addr
.src_hi
,
4136 pbd_e2
->data
.mac_addr
.src_mid
,
4137 pbd_e2
->data
.mac_addr
.src_lo
,
4138 pbd_e2
->parsing_data
);
4139 DP(NETIF_MSG_TX_QUEUED
, "doorbell: nbd %d bd %u\n", nbd
, bd_prod
);
4141 netdev_tx_sent_queue(txq
, skb
->len
);
4143 skb_tx_timestamp(skb
);
4145 txdata
->tx_pkt_prod
++;
4147 * Make sure that the BD data is updated before updating the producer
4148 * since FW might read the BD right after the producer is updated.
4149 * This is only applicable for weak-ordered memory model archs such
4150 * as IA-64. The following barrier is also mandatory since FW will
4151 * assumes packets must have BDs.
4155 txdata
->tx_db
.data
.prod
+= nbd
;
4158 DOORBELL(bp
, txdata
->cid
, txdata
->tx_db
.raw
);
4162 txdata
->tx_bd_prod
+= nbd
;
4164 if (unlikely(bnx2x_tx_avail(bp
, txdata
) < MAX_DESC_PER_TX_PKT
)) {
4165 netif_tx_stop_queue(txq
);
4167 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
4168 * ordering of set_bit() in netif_tx_stop_queue() and read of
4172 bnx2x_fp_qstats(bp
, txdata
->parent_fp
)->driver_xoff
++;
4173 if (bnx2x_tx_avail(bp
, txdata
) >= MAX_DESC_PER_TX_PKT
)
4174 netif_tx_wake_queue(txq
);
4178 return NETDEV_TX_OK
;
4181 void bnx2x_get_c2s_mapping(struct bnx2x
*bp
, u8
*c2s_map
, u8
*c2s_default
)
4183 int mfw_vn
= BP_FW_MB_IDX(bp
);
4186 /* If the shmem shouldn't affect configuration, reflect */
4187 if (!IS_MF_BD(bp
)) {
4190 for (i
= 0; i
< BNX2X_MAX_PRIORITY
; i
++)
4197 tmp
= SHMEM2_RD(bp
, c2s_pcp_map_lower
[mfw_vn
]);
4198 tmp
= (__force u32
)be32_to_cpu((__force __be32
)tmp
);
4199 c2s_map
[0] = tmp
& 0xff;
4200 c2s_map
[1] = (tmp
>> 8) & 0xff;
4201 c2s_map
[2] = (tmp
>> 16) & 0xff;
4202 c2s_map
[3] = (tmp
>> 24) & 0xff;
4204 tmp
= SHMEM2_RD(bp
, c2s_pcp_map_upper
[mfw_vn
]);
4205 tmp
= (__force u32
)be32_to_cpu((__force __be32
)tmp
);
4206 c2s_map
[4] = tmp
& 0xff;
4207 c2s_map
[5] = (tmp
>> 8) & 0xff;
4208 c2s_map
[6] = (tmp
>> 16) & 0xff;
4209 c2s_map
[7] = (tmp
>> 24) & 0xff;
4211 tmp
= SHMEM2_RD(bp
, c2s_pcp_map_default
[mfw_vn
]);
4212 tmp
= (__force u32
)be32_to_cpu((__force __be32
)tmp
);
4213 *c2s_default
= (tmp
>> (8 * mfw_vn
)) & 0xff;
4217 * bnx2x_setup_tc - routine to configure net_device for multi tc
4219 * @netdev: net device to configure
4220 * @tc: number of traffic classes to enable
4222 * callback connected to the ndo_setup_tc function pointer
4224 int bnx2x_setup_tc(struct net_device
*dev
, u8 num_tc
)
4226 struct bnx2x
*bp
= netdev_priv(dev
);
4227 u8 c2s_map
[BNX2X_MAX_PRIORITY
], c2s_def
;
4228 int cos
, prio
, count
, offset
;
4230 /* setup tc must be called under rtnl lock */
4233 /* no traffic classes requested. Aborting */
4235 netdev_reset_tc(dev
);
4239 /* requested to support too many traffic classes */
4240 if (num_tc
> bp
->max_cos
) {
4241 BNX2X_ERR("support for too many traffic classes requested: %d. Max supported is %d\n",
4242 num_tc
, bp
->max_cos
);
4246 /* declare amount of supported traffic classes */
4247 if (netdev_set_num_tc(dev
, num_tc
)) {
4248 BNX2X_ERR("failed to declare %d traffic classes\n", num_tc
);
4252 bnx2x_get_c2s_mapping(bp
, c2s_map
, &c2s_def
);
4254 /* configure priority to traffic class mapping */
4255 for (prio
= 0; prio
< BNX2X_MAX_PRIORITY
; prio
++) {
4256 int outer_prio
= c2s_map
[prio
];
4258 netdev_set_prio_tc_map(dev
, prio
, bp
->prio_to_cos
[outer_prio
]);
4259 DP(BNX2X_MSG_SP
| NETIF_MSG_IFUP
,
4260 "mapping priority %d to tc %d\n",
4261 outer_prio
, bp
->prio_to_cos
[outer_prio
]);
4264 /* Use this configuration to differentiate tc0 from other COSes
4265 This can be used for ets or pfc, and save the effort of setting
4266 up a multio class queue disc or negotiating DCBX with a switch
4267 netdev_set_prio_tc_map(dev, 0, 0);
4268 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
4269 for (prio = 1; prio < 16; prio++) {
4270 netdev_set_prio_tc_map(dev, prio, 1);
4271 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
4274 /* configure traffic class to transmission queue mapping */
4275 for (cos
= 0; cos
< bp
->max_cos
; cos
++) {
4276 count
= BNX2X_NUM_ETH_QUEUES(bp
);
4277 offset
= cos
* BNX2X_NUM_NON_CNIC_QUEUES(bp
);
4278 netdev_set_tc_queue(dev
, cos
, count
, offset
);
4279 DP(BNX2X_MSG_SP
| NETIF_MSG_IFUP
,
4280 "mapping tc %d to offset %d count %d\n",
4281 cos
, offset
, count
);
4287 int __bnx2x_setup_tc(struct net_device
*dev
, u32 handle
, __be16 proto
,
4288 struct tc_to_netdev
*tc
)
4290 if (tc
->type
!= TC_SETUP_MQPRIO
)
4293 tc
->mqprio
->hw
= TC_MQPRIO_HW_OFFLOAD_TCS
;
4295 return bnx2x_setup_tc(dev
, tc
->mqprio
->num_tc
);
4298 /* called with rtnl_lock */
4299 int bnx2x_change_mac_addr(struct net_device
*dev
, void *p
)
4301 struct sockaddr
*addr
= p
;
4302 struct bnx2x
*bp
= netdev_priv(dev
);
4305 if (!is_valid_ether_addr(addr
->sa_data
)) {
4306 BNX2X_ERR("Requested MAC address is not valid\n");
4310 if (IS_MF_STORAGE_ONLY(bp
)) {
4311 BNX2X_ERR("Can't change address on STORAGE ONLY function\n");
4315 if (netif_running(dev
)) {
4316 rc
= bnx2x_set_eth_mac(bp
, false);
4321 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
4323 if (netif_running(dev
))
4324 rc
= bnx2x_set_eth_mac(bp
, true);
4326 if (IS_PF(bp
) && SHMEM2_HAS(bp
, curr_cfg
))
4327 SHMEM2_WR(bp
, curr_cfg
, CURR_CFG_MET_OS
);
4332 static void bnx2x_free_fp_mem_at(struct bnx2x
*bp
, int fp_index
)
4334 union host_hc_status_block
*sb
= &bnx2x_fp(bp
, fp_index
, status_blk
);
4335 struct bnx2x_fastpath
*fp
= &bp
->fp
[fp_index
];
4340 if (IS_FCOE_IDX(fp_index
)) {
4341 memset(sb
, 0, sizeof(union host_hc_status_block
));
4342 fp
->status_blk_mapping
= 0;
4345 if (!CHIP_IS_E1x(bp
))
4346 BNX2X_PCI_FREE(sb
->e2_sb
,
4347 bnx2x_fp(bp
, fp_index
,
4348 status_blk_mapping
),
4349 sizeof(struct host_hc_status_block_e2
));
4351 BNX2X_PCI_FREE(sb
->e1x_sb
,
4352 bnx2x_fp(bp
, fp_index
,
4353 status_blk_mapping
),
4354 sizeof(struct host_hc_status_block_e1x
));
4358 if (!skip_rx_queue(bp
, fp_index
)) {
4359 bnx2x_free_rx_bds(fp
);
4361 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4362 BNX2X_FREE(bnx2x_fp(bp
, fp_index
, rx_buf_ring
));
4363 BNX2X_PCI_FREE(bnx2x_fp(bp
, fp_index
, rx_desc_ring
),
4364 bnx2x_fp(bp
, fp_index
, rx_desc_mapping
),
4365 sizeof(struct eth_rx_bd
) * NUM_RX_BD
);
4367 BNX2X_PCI_FREE(bnx2x_fp(bp
, fp_index
, rx_comp_ring
),
4368 bnx2x_fp(bp
, fp_index
, rx_comp_mapping
),
4369 sizeof(struct eth_fast_path_rx_cqe
) *
4373 BNX2X_FREE(bnx2x_fp(bp
, fp_index
, rx_page_ring
));
4374 BNX2X_PCI_FREE(bnx2x_fp(bp
, fp_index
, rx_sge_ring
),
4375 bnx2x_fp(bp
, fp_index
, rx_sge_mapping
),
4376 BCM_PAGE_SIZE
* NUM_RX_SGE_PAGES
);
4380 if (!skip_tx_queue(bp
, fp_index
)) {
4381 /* fastpath tx rings: tx_buf tx_desc */
4382 for_each_cos_in_tx_queue(fp
, cos
) {
4383 struct bnx2x_fp_txdata
*txdata
= fp
->txdata_ptr
[cos
];
4385 DP(NETIF_MSG_IFDOWN
,
4386 "freeing tx memory of fp %d cos %d cid %d\n",
4387 fp_index
, cos
, txdata
->cid
);
4389 BNX2X_FREE(txdata
->tx_buf_ring
);
4390 BNX2X_PCI_FREE(txdata
->tx_desc_ring
,
4391 txdata
->tx_desc_mapping
,
4392 sizeof(union eth_tx_bd_types
) * NUM_TX_BD
);
4395 /* end of fastpath */
4398 static void bnx2x_free_fp_mem_cnic(struct bnx2x
*bp
)
4401 for_each_cnic_queue(bp
, i
)
4402 bnx2x_free_fp_mem_at(bp
, i
);
4405 void bnx2x_free_fp_mem(struct bnx2x
*bp
)
4408 for_each_eth_queue(bp
, i
)
4409 bnx2x_free_fp_mem_at(bp
, i
);
4412 static void set_sb_shortcuts(struct bnx2x
*bp
, int index
)
4414 union host_hc_status_block status_blk
= bnx2x_fp(bp
, index
, status_blk
);
4415 if (!CHIP_IS_E1x(bp
)) {
4416 bnx2x_fp(bp
, index
, sb_index_values
) =
4417 (__le16
*)status_blk
.e2_sb
->sb
.index_values
;
4418 bnx2x_fp(bp
, index
, sb_running_index
) =
4419 (__le16
*)status_blk
.e2_sb
->sb
.running_index
;
4421 bnx2x_fp(bp
, index
, sb_index_values
) =
4422 (__le16
*)status_blk
.e1x_sb
->sb
.index_values
;
4423 bnx2x_fp(bp
, index
, sb_running_index
) =
4424 (__le16
*)status_blk
.e1x_sb
->sb
.running_index
;
4428 /* Returns the number of actually allocated BDs */
4429 static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath
*fp
,
4432 struct bnx2x
*bp
= fp
->bp
;
4433 u16 ring_prod
, cqe_ring_prod
;
4434 int i
, failure_cnt
= 0;
4436 fp
->rx_comp_cons
= 0;
4437 cqe_ring_prod
= ring_prod
= 0;
4439 /* This routine is called only during fo init so
4440 * fp->eth_q_stats.rx_skb_alloc_failed = 0
4442 for (i
= 0; i
< rx_ring_size
; i
++) {
4443 if (bnx2x_alloc_rx_data(bp
, fp
, ring_prod
, GFP_KERNEL
) < 0) {
4447 ring_prod
= NEXT_RX_IDX(ring_prod
);
4448 cqe_ring_prod
= NEXT_RCQ_IDX(cqe_ring_prod
);
4449 WARN_ON(ring_prod
<= (i
- failure_cnt
));
4453 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
4454 i
- failure_cnt
, fp
->index
);
4456 fp
->rx_bd_prod
= ring_prod
;
4457 /* Limit the CQE producer by the CQE ring size */
4458 fp
->rx_comp_prod
= min_t(u16
, NUM_RCQ_RINGS
*RCQ_DESC_CNT
,
4461 bnx2x_fp_stats(bp
, fp
)->eth_q_stats
.rx_skb_alloc_failed
+= failure_cnt
;
4463 return i
- failure_cnt
;
4466 static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath
*fp
)
4470 for (i
= 1; i
<= NUM_RCQ_RINGS
; i
++) {
4471 struct eth_rx_cqe_next_page
*nextpg
;
4473 nextpg
= (struct eth_rx_cqe_next_page
*)
4474 &fp
->rx_comp_ring
[RCQ_DESC_CNT
* i
- 1];
4476 cpu_to_le32(U64_HI(fp
->rx_comp_mapping
+
4477 BCM_PAGE_SIZE
*(i
% NUM_RCQ_RINGS
)));
4479 cpu_to_le32(U64_LO(fp
->rx_comp_mapping
+
4480 BCM_PAGE_SIZE
*(i
% NUM_RCQ_RINGS
)));
4484 static int bnx2x_alloc_fp_mem_at(struct bnx2x
*bp
, int index
)
4486 union host_hc_status_block
*sb
;
4487 struct bnx2x_fastpath
*fp
= &bp
->fp
[index
];
4490 int rx_ring_size
= 0;
4492 if (!bp
->rx_ring_size
&& IS_MF_STORAGE_ONLY(bp
)) {
4493 rx_ring_size
= MIN_RX_SIZE_NONTPA
;
4494 bp
->rx_ring_size
= rx_ring_size
;
4495 } else if (!bp
->rx_ring_size
) {
4496 rx_ring_size
= MAX_RX_AVAIL
/BNX2X_NUM_RX_QUEUES(bp
);
4498 if (CHIP_IS_E3(bp
)) {
4499 u32 cfg
= SHMEM_RD(bp
,
4500 dev_info
.port_hw_config
[BP_PORT(bp
)].
4503 /* Decrease ring size for 1G functions */
4504 if ((cfg
& PORT_HW_CFG_NET_SERDES_IF_MASK
) ==
4505 PORT_HW_CFG_NET_SERDES_IF_SGMII
)
4509 /* allocate at least number of buffers required by FW */
4510 rx_ring_size
= max_t(int, bp
->disable_tpa
? MIN_RX_SIZE_NONTPA
:
4511 MIN_RX_SIZE_TPA
, rx_ring_size
);
4513 bp
->rx_ring_size
= rx_ring_size
;
4514 } else /* if rx_ring_size specified - use it */
4515 rx_ring_size
= bp
->rx_ring_size
;
4517 DP(BNX2X_MSG_SP
, "calculated rx_ring_size %d\n", rx_ring_size
);
4520 sb
= &bnx2x_fp(bp
, index
, status_blk
);
4522 if (!IS_FCOE_IDX(index
)) {
4524 if (!CHIP_IS_E1x(bp
)) {
4525 sb
->e2_sb
= BNX2X_PCI_ALLOC(&bnx2x_fp(bp
, index
, status_blk_mapping
),
4526 sizeof(struct host_hc_status_block_e2
));
4530 sb
->e1x_sb
= BNX2X_PCI_ALLOC(&bnx2x_fp(bp
, index
, status_blk_mapping
),
4531 sizeof(struct host_hc_status_block_e1x
));
4537 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
4538 * set shortcuts for it.
4540 if (!IS_FCOE_IDX(index
))
4541 set_sb_shortcuts(bp
, index
);
4544 if (!skip_tx_queue(bp
, index
)) {
4545 /* fastpath tx rings: tx_buf tx_desc */
4546 for_each_cos_in_tx_queue(fp
, cos
) {
4547 struct bnx2x_fp_txdata
*txdata
= fp
->txdata_ptr
[cos
];
4550 "allocating tx memory of fp %d cos %d\n",
4553 txdata
->tx_buf_ring
= kcalloc(NUM_TX_BD
,
4554 sizeof(struct sw_tx_bd
),
4556 if (!txdata
->tx_buf_ring
)
4558 txdata
->tx_desc_ring
= BNX2X_PCI_ALLOC(&txdata
->tx_desc_mapping
,
4559 sizeof(union eth_tx_bd_types
) * NUM_TX_BD
);
4560 if (!txdata
->tx_desc_ring
)
4566 if (!skip_rx_queue(bp
, index
)) {
4567 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4568 bnx2x_fp(bp
, index
, rx_buf_ring
) =
4569 kcalloc(NUM_RX_BD
, sizeof(struct sw_rx_bd
), GFP_KERNEL
);
4570 if (!bnx2x_fp(bp
, index
, rx_buf_ring
))
4572 bnx2x_fp(bp
, index
, rx_desc_ring
) =
4573 BNX2X_PCI_ALLOC(&bnx2x_fp(bp
, index
, rx_desc_mapping
),
4574 sizeof(struct eth_rx_bd
) * NUM_RX_BD
);
4575 if (!bnx2x_fp(bp
, index
, rx_desc_ring
))
4578 /* Seed all CQEs by 1s */
4579 bnx2x_fp(bp
, index
, rx_comp_ring
) =
4580 BNX2X_PCI_FALLOC(&bnx2x_fp(bp
, index
, rx_comp_mapping
),
4581 sizeof(struct eth_fast_path_rx_cqe
) * NUM_RCQ_BD
);
4582 if (!bnx2x_fp(bp
, index
, rx_comp_ring
))
4586 bnx2x_fp(bp
, index
, rx_page_ring
) =
4587 kcalloc(NUM_RX_SGE
, sizeof(struct sw_rx_page
),
4589 if (!bnx2x_fp(bp
, index
, rx_page_ring
))
4591 bnx2x_fp(bp
, index
, rx_sge_ring
) =
4592 BNX2X_PCI_ALLOC(&bnx2x_fp(bp
, index
, rx_sge_mapping
),
4593 BCM_PAGE_SIZE
* NUM_RX_SGE_PAGES
);
4594 if (!bnx2x_fp(bp
, index
, rx_sge_ring
))
4597 bnx2x_set_next_page_rx_bd(fp
);
4600 bnx2x_set_next_page_rx_cq(fp
);
4603 ring_size
= bnx2x_alloc_rx_bds(fp
, rx_ring_size
);
4604 if (ring_size
< rx_ring_size
)
4610 /* handles low memory cases */
4612 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
4614 /* FW will drop all packets if queue is not big enough,
4615 * In these cases we disable the queue
4616 * Min size is different for OOO, TPA and non-TPA queues
4618 if (ring_size
< (fp
->mode
== TPA_MODE_DISABLED
?
4619 MIN_RX_SIZE_NONTPA
: MIN_RX_SIZE_TPA
)) {
4620 /* release memory allocated for this queue */
4621 bnx2x_free_fp_mem_at(bp
, index
);
4627 static int bnx2x_alloc_fp_mem_cnic(struct bnx2x
*bp
)
4631 if (bnx2x_alloc_fp_mem_at(bp
, FCOE_IDX(bp
)))
4632 /* we will fail load process instead of mark
4640 static int bnx2x_alloc_fp_mem(struct bnx2x
*bp
)
4644 /* 1. Allocate FP for leading - fatal if error
4645 * 2. Allocate RSS - fix number of queues if error
4649 if (bnx2x_alloc_fp_mem_at(bp
, 0))
4653 for_each_nondefault_eth_queue(bp
, i
)
4654 if (bnx2x_alloc_fp_mem_at(bp
, i
))
4657 /* handle memory failures */
4658 if (i
!= BNX2X_NUM_ETH_QUEUES(bp
)) {
4659 int delta
= BNX2X_NUM_ETH_QUEUES(bp
) - i
;
4662 bnx2x_shrink_eth_fp(bp
, delta
);
4663 if (CNIC_SUPPORT(bp
))
4664 /* move non eth FPs next to last eth FP
4665 * must be done in that order
4666 * FCOE_IDX < FWD_IDX < OOO_IDX
4669 /* move FCoE fp even NO_FCOE_FLAG is on */
4670 bnx2x_move_fp(bp
, FCOE_IDX(bp
), FCOE_IDX(bp
) - delta
);
4671 bp
->num_ethernet_queues
-= delta
;
4672 bp
->num_queues
= bp
->num_ethernet_queues
+
4673 bp
->num_cnic_queues
;
4674 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
4675 bp
->num_queues
+ delta
, bp
->num_queues
);
4681 void bnx2x_free_mem_bp(struct bnx2x
*bp
)
4685 for (i
= 0; i
< bp
->fp_array_size
; i
++)
4686 kfree(bp
->fp
[i
].tpa_info
);
4689 kfree(bp
->fp_stats
);
4690 kfree(bp
->bnx2x_txq
);
4691 kfree(bp
->msix_table
);
4695 int bnx2x_alloc_mem_bp(struct bnx2x
*bp
)
4697 struct bnx2x_fastpath
*fp
;
4698 struct msix_entry
*tbl
;
4699 struct bnx2x_ilt
*ilt
;
4700 int msix_table_size
= 0;
4701 int fp_array_size
, txq_array_size
;
4705 * The biggest MSI-X table we might need is as a maximum number of fast
4706 * path IGU SBs plus default SB (for PF only).
4708 msix_table_size
= bp
->igu_sb_cnt
;
4711 BNX2X_DEV_INFO("msix_table_size %d\n", msix_table_size
);
4713 /* fp array: RSS plus CNIC related L2 queues */
4714 fp_array_size
= BNX2X_MAX_RSS_COUNT(bp
) + CNIC_SUPPORT(bp
);
4715 bp
->fp_array_size
= fp_array_size
;
4716 BNX2X_DEV_INFO("fp_array_size %d\n", bp
->fp_array_size
);
4718 fp
= kcalloc(bp
->fp_array_size
, sizeof(*fp
), GFP_KERNEL
);
4721 for (i
= 0; i
< bp
->fp_array_size
; i
++) {
4723 kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2
,
4724 sizeof(struct bnx2x_agg_info
), GFP_KERNEL
);
4725 if (!(fp
[i
].tpa_info
))
4731 /* allocate sp objs */
4732 bp
->sp_objs
= kcalloc(bp
->fp_array_size
, sizeof(struct bnx2x_sp_objs
),
4737 /* allocate fp_stats */
4738 bp
->fp_stats
= kcalloc(bp
->fp_array_size
, sizeof(struct bnx2x_fp_stats
),
4743 /* Allocate memory for the transmission queues array */
4745 BNX2X_MAX_RSS_COUNT(bp
) * BNX2X_MULTI_TX_COS
+ CNIC_SUPPORT(bp
);
4746 BNX2X_DEV_INFO("txq_array_size %d", txq_array_size
);
4748 bp
->bnx2x_txq
= kcalloc(txq_array_size
, sizeof(struct bnx2x_fp_txdata
),
4754 tbl
= kcalloc(msix_table_size
, sizeof(*tbl
), GFP_KERNEL
);
4757 bp
->msix_table
= tbl
;
4760 ilt
= kzalloc(sizeof(*ilt
), GFP_KERNEL
);
4767 bnx2x_free_mem_bp(bp
);
4771 int bnx2x_reload_if_running(struct net_device
*dev
)
4773 struct bnx2x
*bp
= netdev_priv(dev
);
4775 if (unlikely(!netif_running(dev
)))
4778 bnx2x_nic_unload(bp
, UNLOAD_NORMAL
, true);
4779 return bnx2x_nic_load(bp
, LOAD_NORMAL
);
4782 int bnx2x_get_cur_phy_idx(struct bnx2x
*bp
)
4784 u32 sel_phy_idx
= 0;
4785 if (bp
->link_params
.num_phys
<= 1)
4788 if (bp
->link_vars
.link_up
) {
4789 sel_phy_idx
= EXT_PHY1
;
4790 /* In case link is SERDES, check if the EXT_PHY2 is the one */
4791 if ((bp
->link_vars
.link_status
& LINK_STATUS_SERDES_LINK
) &&
4792 (bp
->link_params
.phy
[EXT_PHY2
].supported
& SUPPORTED_FIBRE
))
4793 sel_phy_idx
= EXT_PHY2
;
4796 switch (bnx2x_phy_selection(&bp
->link_params
)) {
4797 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT
:
4798 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY
:
4799 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY
:
4800 sel_phy_idx
= EXT_PHY1
;
4802 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY
:
4803 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY
:
4804 sel_phy_idx
= EXT_PHY2
;
4811 int bnx2x_get_link_cfg_idx(struct bnx2x
*bp
)
4813 u32 sel_phy_idx
= bnx2x_get_cur_phy_idx(bp
);
4815 * The selected activated PHY is always after swapping (in case PHY
4816 * swapping is enabled). So when swapping is enabled, we need to reverse
4820 if (bp
->link_params
.multi_phy_config
&
4821 PORT_HW_CFG_PHY_SWAPPED_ENABLED
) {
4822 if (sel_phy_idx
== EXT_PHY1
)
4823 sel_phy_idx
= EXT_PHY2
;
4824 else if (sel_phy_idx
== EXT_PHY2
)
4825 sel_phy_idx
= EXT_PHY1
;
4827 return LINK_CONFIG_IDX(sel_phy_idx
);
4830 #ifdef NETDEV_FCOE_WWNN
4831 int bnx2x_fcoe_get_wwn(struct net_device
*dev
, u64
*wwn
, int type
)
4833 struct bnx2x
*bp
= netdev_priv(dev
);
4834 struct cnic_eth_dev
*cp
= &bp
->cnic_eth_dev
;
4837 case NETDEV_FCOE_WWNN
:
4838 *wwn
= HILO_U64(cp
->fcoe_wwn_node_name_hi
,
4839 cp
->fcoe_wwn_node_name_lo
);
4841 case NETDEV_FCOE_WWPN
:
4842 *wwn
= HILO_U64(cp
->fcoe_wwn_port_name_hi
,
4843 cp
->fcoe_wwn_port_name_lo
);
4846 BNX2X_ERR("Wrong WWN type requested - %d\n", type
);
4854 /* called with rtnl_lock */
4855 int bnx2x_change_mtu(struct net_device
*dev
, int new_mtu
)
4857 struct bnx2x
*bp
= netdev_priv(dev
);
4859 if (pci_num_vf(bp
->pdev
)) {
4860 DP(BNX2X_MSG_IOV
, "VFs are enabled, can not change MTU\n");
4864 if (bp
->recovery_state
!= BNX2X_RECOVERY_DONE
) {
4865 BNX2X_ERR("Can't perform change MTU during parity recovery\n");
4869 /* This does not race with packet allocation
4870 * because the actual alloc size is
4871 * only updated as part of load
4875 if (IS_PF(bp
) && SHMEM2_HAS(bp
, curr_cfg
))
4876 SHMEM2_WR(bp
, curr_cfg
, CURR_CFG_MET_OS
);
4878 return bnx2x_reload_if_running(dev
);
4881 netdev_features_t
bnx2x_fix_features(struct net_device
*dev
,
4882 netdev_features_t features
)
4884 struct bnx2x
*bp
= netdev_priv(dev
);
4886 if (pci_num_vf(bp
->pdev
)) {
4887 netdev_features_t changed
= dev
->features
^ features
;
4889 /* Revert the requested changes in features if they
4890 * would require internal reload of PF in bnx2x_set_features().
4892 if (!(features
& NETIF_F_RXCSUM
) && !bp
->disable_tpa
) {
4893 features
&= ~NETIF_F_RXCSUM
;
4894 features
|= dev
->features
& NETIF_F_RXCSUM
;
4897 if (changed
& NETIF_F_LOOPBACK
) {
4898 features
&= ~NETIF_F_LOOPBACK
;
4899 features
|= dev
->features
& NETIF_F_LOOPBACK
;
4903 /* TPA requires Rx CSUM offloading */
4904 if (!(features
& NETIF_F_RXCSUM
)) {
4905 features
&= ~NETIF_F_LRO
;
4906 features
&= ~NETIF_F_GRO
;
4912 int bnx2x_set_features(struct net_device
*dev
, netdev_features_t features
)
4914 struct bnx2x
*bp
= netdev_priv(dev
);
4915 netdev_features_t changes
= features
^ dev
->features
;
4916 bool bnx2x_reload
= false;
4919 /* VFs or non SRIOV PFs should be able to change loopback feature */
4920 if (!pci_num_vf(bp
->pdev
)) {
4921 if (features
& NETIF_F_LOOPBACK
) {
4922 if (bp
->link_params
.loopback_mode
!= LOOPBACK_BMAC
) {
4923 bp
->link_params
.loopback_mode
= LOOPBACK_BMAC
;
4924 bnx2x_reload
= true;
4927 if (bp
->link_params
.loopback_mode
!= LOOPBACK_NONE
) {
4928 bp
->link_params
.loopback_mode
= LOOPBACK_NONE
;
4929 bnx2x_reload
= true;
4934 /* if GRO is changed while LRO is enabled, don't force a reload */
4935 if ((changes
& NETIF_F_GRO
) && (features
& NETIF_F_LRO
))
4936 changes
&= ~NETIF_F_GRO
;
4938 /* if GRO is changed while HW TPA is off, don't force a reload */
4939 if ((changes
& NETIF_F_GRO
) && bp
->disable_tpa
)
4940 changes
&= ~NETIF_F_GRO
;
4943 bnx2x_reload
= true;
4946 if (bp
->recovery_state
== BNX2X_RECOVERY_DONE
) {
4947 dev
->features
= features
;
4948 rc
= bnx2x_reload_if_running(dev
);
4951 /* else: bnx2x_nic_load() will be called at end of recovery */
4957 void bnx2x_tx_timeout(struct net_device
*dev
)
4959 struct bnx2x
*bp
= netdev_priv(dev
);
4961 #ifdef BNX2X_STOP_ON_ERROR
4966 /* This allows the netif to be shutdown gracefully before resetting */
4967 bnx2x_schedule_sp_rtnl(bp
, BNX2X_SP_RTNL_TX_TIMEOUT
, 0);
4970 int bnx2x_suspend(struct pci_dev
*pdev
, pm_message_t state
)
4972 struct net_device
*dev
= pci_get_drvdata(pdev
);
4976 dev_err(&pdev
->dev
, "BAD net device from bnx2x_init_one\n");
4979 bp
= netdev_priv(dev
);
4983 pci_save_state(pdev
);
4985 if (!netif_running(dev
)) {
4990 netif_device_detach(dev
);
4992 bnx2x_nic_unload(bp
, UNLOAD_CLOSE
, false);
4994 bnx2x_set_power_state(bp
, pci_choose_state(pdev
, state
));
5001 int bnx2x_resume(struct pci_dev
*pdev
)
5003 struct net_device
*dev
= pci_get_drvdata(pdev
);
5008 dev_err(&pdev
->dev
, "BAD net device from bnx2x_init_one\n");
5011 bp
= netdev_priv(dev
);
5013 if (bp
->recovery_state
!= BNX2X_RECOVERY_DONE
) {
5014 BNX2X_ERR("Handling parity error recovery. Try again later\n");
5020 pci_restore_state(pdev
);
5022 if (!netif_running(dev
)) {
5027 bnx2x_set_power_state(bp
, PCI_D0
);
5028 netif_device_attach(dev
);
5030 rc
= bnx2x_nic_load(bp
, LOAD_OPEN
);
5037 void bnx2x_set_ctx_validation(struct bnx2x
*bp
, struct eth_context
*cxt
,
5041 BNX2X_ERR("bad context pointer %p\n", cxt
);
5045 /* ustorm cxt validation */
5046 cxt
->ustorm_ag_context
.cdu_usage
=
5047 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp
, cid
),
5048 CDU_REGION_NUMBER_UCM_AG
, ETH_CONNECTION_TYPE
);
5049 /* xcontext validation */
5050 cxt
->xstorm_ag_context
.cdu_reserved
=
5051 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp
, cid
),
5052 CDU_REGION_NUMBER_XCM_AG
, ETH_CONNECTION_TYPE
);
5055 static void storm_memset_hc_timeout(struct bnx2x
*bp
, u8 port
,
5056 u8 fw_sb_id
, u8 sb_index
,
5059 u32 addr
= BAR_CSTRORM_INTMEM
+
5060 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id
, sb_index
);
5061 REG_WR8(bp
, addr
, ticks
);
5063 "port %x fw_sb_id %d sb_index %d ticks %d\n",
5064 port
, fw_sb_id
, sb_index
, ticks
);
5067 static void storm_memset_hc_disable(struct bnx2x
*bp
, u8 port
,
5068 u16 fw_sb_id
, u8 sb_index
,
5071 u32 enable_flag
= disable
? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT
);
5072 u32 addr
= BAR_CSTRORM_INTMEM
+
5073 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id
, sb_index
);
5074 u8 flags
= REG_RD8(bp
, addr
);
5076 flags
&= ~HC_INDEX_DATA_HC_ENABLED
;
5077 flags
|= enable_flag
;
5078 REG_WR8(bp
, addr
, flags
);
5080 "port %x fw_sb_id %d sb_index %d disable %d\n",
5081 port
, fw_sb_id
, sb_index
, disable
);
5084 void bnx2x_update_coalesce_sb_index(struct bnx2x
*bp
, u8 fw_sb_id
,
5085 u8 sb_index
, u8 disable
, u16 usec
)
5087 int port
= BP_PORT(bp
);
5088 u8 ticks
= usec
/ BNX2X_BTR
;
5090 storm_memset_hc_timeout(bp
, port
, fw_sb_id
, sb_index
, ticks
);
5092 disable
= disable
? 1 : (usec
? 0 : 1);
5093 storm_memset_hc_disable(bp
, port
, fw_sb_id
, sb_index
, disable
);
5096 void bnx2x_schedule_sp_rtnl(struct bnx2x
*bp
, enum sp_rtnl_flag flag
,
5099 smp_mb__before_atomic();
5100 set_bit(flag
, &bp
->sp_rtnl_state
);
5101 smp_mb__after_atomic();
5102 DP((BNX2X_MSG_SP
| verbose
), "Scheduling sp_rtnl task [Flag: %d]\n",
5104 schedule_delayed_work(&bp
->sp_rtnl_task
, 0);