2 * Copyright (C) 2005 - 2011 Emulex
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
10 * Contact Information:
11 * linux-drivers@emulex.com
15 * Costa Mesa, CA 92626
18 #include <linux/prefetch.h>
19 #include <linux/module.h>
22 #include <asm/div64.h>
23 #include <linux/aer.h>
25 MODULE_VERSION(DRV_VER
);
26 MODULE_DEVICE_TABLE(pci
, be_dev_ids
);
27 MODULE_DESCRIPTION(DRV_DESC
" " DRV_VER
);
28 MODULE_AUTHOR("Emulex Corporation");
29 MODULE_LICENSE("GPL");
31 static unsigned int num_vfs
;
32 module_param(num_vfs
, uint
, S_IRUGO
);
33 MODULE_PARM_DESC(num_vfs
, "Number of PCI VFs to initialize");
35 static ushort rx_frag_size
= 2048;
36 module_param(rx_frag_size
, ushort
, S_IRUGO
);
37 MODULE_PARM_DESC(rx_frag_size
, "Size of a fragment that holds rcvd data.");
39 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids
) = {
40 { PCI_DEVICE(BE_VENDOR_ID
, BE_DEVICE_ID1
) },
41 { PCI_DEVICE(BE_VENDOR_ID
, BE_DEVICE_ID2
) },
42 { PCI_DEVICE(BE_VENDOR_ID
, OC_DEVICE_ID1
) },
43 { PCI_DEVICE(BE_VENDOR_ID
, OC_DEVICE_ID2
) },
44 { PCI_DEVICE(EMULEX_VENDOR_ID
, OC_DEVICE_ID3
)},
45 { PCI_DEVICE(EMULEX_VENDOR_ID
, OC_DEVICE_ID4
)},
46 { PCI_DEVICE(EMULEX_VENDOR_ID
, OC_DEVICE_ID5
)},
47 { PCI_DEVICE(EMULEX_VENDOR_ID
, OC_DEVICE_ID6
)},
50 MODULE_DEVICE_TABLE(pci
, be_dev_ids
);
51 /* UE Status Low CSR */
52 static const char * const ue_status_low_desc
[] = {
86 /* UE Status High CSR */
87 static const char * const ue_status_hi_desc
[] = {
122 /* Is BE in a multi-channel mode */
123 static inline bool be_is_mc(struct be_adapter
*adapter
) {
124 return (adapter
->function_mode
& FLEX10_MODE
||
125 adapter
->function_mode
& VNIC_MODE
||
126 adapter
->function_mode
& UMC_ENABLED
);
129 static void be_queue_free(struct be_adapter
*adapter
, struct be_queue_info
*q
)
131 struct be_dma_mem
*mem
= &q
->dma_mem
;
133 dma_free_coherent(&adapter
->pdev
->dev
, mem
->size
, mem
->va
,
139 static int be_queue_alloc(struct be_adapter
*adapter
, struct be_queue_info
*q
,
140 u16 len
, u16 entry_size
)
142 struct be_dma_mem
*mem
= &q
->dma_mem
;
144 memset(q
, 0, sizeof(*q
));
146 q
->entry_size
= entry_size
;
147 mem
->size
= len
* entry_size
;
148 mem
->va
= dma_alloc_coherent(&adapter
->pdev
->dev
, mem
->size
, &mem
->dma
,
152 memset(mem
->va
, 0, mem
->size
);
156 static void be_intr_set(struct be_adapter
*adapter
, bool enable
)
160 if (adapter
->eeh_error
)
163 pci_read_config_dword(adapter
->pdev
, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET
,
165 enabled
= reg
& MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK
;
167 if (!enabled
&& enable
)
168 reg
|= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK
;
169 else if (enabled
&& !enable
)
170 reg
&= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK
;
174 pci_write_config_dword(adapter
->pdev
,
175 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET
, reg
);
178 static void be_rxq_notify(struct be_adapter
*adapter
, u16 qid
, u16 posted
)
181 val
|= qid
& DB_RQ_RING_ID_MASK
;
182 val
|= posted
<< DB_RQ_NUM_POSTED_SHIFT
;
185 iowrite32(val
, adapter
->db
+ DB_RQ_OFFSET
);
188 static void be_txq_notify(struct be_adapter
*adapter
, u16 qid
, u16 posted
)
191 val
|= qid
& DB_TXULP_RING_ID_MASK
;
192 val
|= (posted
& DB_TXULP_NUM_POSTED_MASK
) << DB_TXULP_NUM_POSTED_SHIFT
;
195 iowrite32(val
, adapter
->db
+ DB_TXULP1_OFFSET
);
198 static void be_eq_notify(struct be_adapter
*adapter
, u16 qid
,
199 bool arm
, bool clear_int
, u16 num_popped
)
202 val
|= qid
& DB_EQ_RING_ID_MASK
;
203 val
|= ((qid
& DB_EQ_RING_ID_EXT_MASK
) <<
204 DB_EQ_RING_ID_EXT_MASK_SHIFT
);
206 if (adapter
->eeh_error
)
210 val
|= 1 << DB_EQ_REARM_SHIFT
;
212 val
|= 1 << DB_EQ_CLR_SHIFT
;
213 val
|= 1 << DB_EQ_EVNT_SHIFT
;
214 val
|= num_popped
<< DB_EQ_NUM_POPPED_SHIFT
;
215 iowrite32(val
, adapter
->db
+ DB_EQ_OFFSET
);
218 void be_cq_notify(struct be_adapter
*adapter
, u16 qid
, bool arm
, u16 num_popped
)
221 val
|= qid
& DB_CQ_RING_ID_MASK
;
222 val
|= ((qid
& DB_CQ_RING_ID_EXT_MASK
) <<
223 DB_CQ_RING_ID_EXT_MASK_SHIFT
);
225 if (adapter
->eeh_error
)
229 val
|= 1 << DB_CQ_REARM_SHIFT
;
230 val
|= num_popped
<< DB_CQ_NUM_POPPED_SHIFT
;
231 iowrite32(val
, adapter
->db
+ DB_CQ_OFFSET
);
234 static int be_mac_addr_set(struct net_device
*netdev
, void *p
)
236 struct be_adapter
*adapter
= netdev_priv(netdev
);
237 struct sockaddr
*addr
= p
;
239 u8 current_mac
[ETH_ALEN
];
240 u32 pmac_id
= adapter
->pmac_id
[0];
241 bool active_mac
= true;
243 if (!is_valid_ether_addr(addr
->sa_data
))
244 return -EADDRNOTAVAIL
;
246 /* For BE VF, MAC address is already activated by PF.
247 * Hence only operation left is updating netdev->devaddr.
248 * Update it if user is passing the same MAC which was used
249 * during configuring VF MAC from PF(Hypervisor).
251 if (!lancer_chip(adapter
) && !be_physfn(adapter
)) {
252 status
= be_cmd_mac_addr_query(adapter
, current_mac
,
253 false, adapter
->if_handle
, 0);
254 if (!status
&& !memcmp(current_mac
, addr
->sa_data
, ETH_ALEN
))
260 if (!memcmp(addr
->sa_data
, netdev
->dev_addr
, ETH_ALEN
))
263 /* For Lancer check if any MAC is active.
264 * If active, get its mac id.
266 if (lancer_chip(adapter
) && !be_physfn(adapter
))
267 be_cmd_get_mac_from_list(adapter
, current_mac
, &active_mac
,
270 status
= be_cmd_pmac_add(adapter
, (u8
*)addr
->sa_data
,
272 &adapter
->pmac_id
[0], 0);
278 be_cmd_pmac_del(adapter
, adapter
->if_handle
,
281 memcpy(netdev
->dev_addr
, addr
->sa_data
, netdev
->addr_len
);
284 dev_err(&adapter
->pdev
->dev
, "MAC %pM set Failed\n", addr
->sa_data
);
288 /* BE2 supports only v0 cmd */
289 static void *hw_stats_from_cmd(struct be_adapter
*adapter
)
291 if (BE2_chip(adapter
)) {
292 struct be_cmd_resp_get_stats_v0
*cmd
= adapter
->stats_cmd
.va
;
294 return &cmd
->hw_stats
;
296 struct be_cmd_resp_get_stats_v1
*cmd
= adapter
->stats_cmd
.va
;
298 return &cmd
->hw_stats
;
302 /* BE2 supports only v0 cmd */
303 static void *be_erx_stats_from_cmd(struct be_adapter
*adapter
)
305 if (BE2_chip(adapter
)) {
306 struct be_hw_stats_v0
*hw_stats
= hw_stats_from_cmd(adapter
);
308 return &hw_stats
->erx
;
310 struct be_hw_stats_v1
*hw_stats
= hw_stats_from_cmd(adapter
);
312 return &hw_stats
->erx
;
316 static void populate_be_v0_stats(struct be_adapter
*adapter
)
318 struct be_hw_stats_v0
*hw_stats
= hw_stats_from_cmd(adapter
);
319 struct be_pmem_stats
*pmem_sts
= &hw_stats
->pmem
;
320 struct be_rxf_stats_v0
*rxf_stats
= &hw_stats
->rxf
;
321 struct be_port_rxf_stats_v0
*port_stats
=
322 &rxf_stats
->port
[adapter
->port_num
];
323 struct be_drv_stats
*drvs
= &adapter
->drv_stats
;
325 be_dws_le_to_cpu(hw_stats
, sizeof(*hw_stats
));
326 drvs
->rx_pause_frames
= port_stats
->rx_pause_frames
;
327 drvs
->rx_crc_errors
= port_stats
->rx_crc_errors
;
328 drvs
->rx_control_frames
= port_stats
->rx_control_frames
;
329 drvs
->rx_in_range_errors
= port_stats
->rx_in_range_errors
;
330 drvs
->rx_frame_too_long
= port_stats
->rx_frame_too_long
;
331 drvs
->rx_dropped_runt
= port_stats
->rx_dropped_runt
;
332 drvs
->rx_ip_checksum_errs
= port_stats
->rx_ip_checksum_errs
;
333 drvs
->rx_tcp_checksum_errs
= port_stats
->rx_tcp_checksum_errs
;
334 drvs
->rx_udp_checksum_errs
= port_stats
->rx_udp_checksum_errs
;
335 drvs
->rxpp_fifo_overflow_drop
= port_stats
->rx_fifo_overflow
;
336 drvs
->rx_dropped_tcp_length
= port_stats
->rx_dropped_tcp_length
;
337 drvs
->rx_dropped_too_small
= port_stats
->rx_dropped_too_small
;
338 drvs
->rx_dropped_too_short
= port_stats
->rx_dropped_too_short
;
339 drvs
->rx_out_range_errors
= port_stats
->rx_out_range_errors
;
340 drvs
->rx_input_fifo_overflow_drop
= port_stats
->rx_input_fifo_overflow
;
341 drvs
->rx_dropped_header_too_small
=
342 port_stats
->rx_dropped_header_too_small
;
343 drvs
->rx_address_mismatch_drops
=
344 port_stats
->rx_address_mismatch_drops
+
345 port_stats
->rx_vlan_mismatch_drops
;
346 drvs
->rx_alignment_symbol_errors
=
347 port_stats
->rx_alignment_symbol_errors
;
349 drvs
->tx_pauseframes
= port_stats
->tx_pauseframes
;
350 drvs
->tx_controlframes
= port_stats
->tx_controlframes
;
352 if (adapter
->port_num
)
353 drvs
->jabber_events
= rxf_stats
->port1_jabber_events
;
355 drvs
->jabber_events
= rxf_stats
->port0_jabber_events
;
356 drvs
->rx_drops_no_pbuf
= rxf_stats
->rx_drops_no_pbuf
;
357 drvs
->rx_drops_no_erx_descr
= rxf_stats
->rx_drops_no_erx_descr
;
358 drvs
->forwarded_packets
= rxf_stats
->forwarded_packets
;
359 drvs
->rx_drops_mtu
= rxf_stats
->rx_drops_mtu
;
360 drvs
->rx_drops_no_tpre_descr
= rxf_stats
->rx_drops_no_tpre_descr
;
361 drvs
->rx_drops_too_many_frags
= rxf_stats
->rx_drops_too_many_frags
;
362 adapter
->drv_stats
.eth_red_drops
= pmem_sts
->eth_red_drops
;
365 static void populate_be_v1_stats(struct be_adapter
*adapter
)
367 struct be_hw_stats_v1
*hw_stats
= hw_stats_from_cmd(adapter
);
368 struct be_pmem_stats
*pmem_sts
= &hw_stats
->pmem
;
369 struct be_rxf_stats_v1
*rxf_stats
= &hw_stats
->rxf
;
370 struct be_port_rxf_stats_v1
*port_stats
=
371 &rxf_stats
->port
[adapter
->port_num
];
372 struct be_drv_stats
*drvs
= &adapter
->drv_stats
;
374 be_dws_le_to_cpu(hw_stats
, sizeof(*hw_stats
));
375 drvs
->pmem_fifo_overflow_drop
= port_stats
->pmem_fifo_overflow_drop
;
376 drvs
->rx_priority_pause_frames
= port_stats
->rx_priority_pause_frames
;
377 drvs
->rx_pause_frames
= port_stats
->rx_pause_frames
;
378 drvs
->rx_crc_errors
= port_stats
->rx_crc_errors
;
379 drvs
->rx_control_frames
= port_stats
->rx_control_frames
;
380 drvs
->rx_in_range_errors
= port_stats
->rx_in_range_errors
;
381 drvs
->rx_frame_too_long
= port_stats
->rx_frame_too_long
;
382 drvs
->rx_dropped_runt
= port_stats
->rx_dropped_runt
;
383 drvs
->rx_ip_checksum_errs
= port_stats
->rx_ip_checksum_errs
;
384 drvs
->rx_tcp_checksum_errs
= port_stats
->rx_tcp_checksum_errs
;
385 drvs
->rx_udp_checksum_errs
= port_stats
->rx_udp_checksum_errs
;
386 drvs
->rx_dropped_tcp_length
= port_stats
->rx_dropped_tcp_length
;
387 drvs
->rx_dropped_too_small
= port_stats
->rx_dropped_too_small
;
388 drvs
->rx_dropped_too_short
= port_stats
->rx_dropped_too_short
;
389 drvs
->rx_out_range_errors
= port_stats
->rx_out_range_errors
;
390 drvs
->rx_dropped_header_too_small
=
391 port_stats
->rx_dropped_header_too_small
;
392 drvs
->rx_input_fifo_overflow_drop
=
393 port_stats
->rx_input_fifo_overflow_drop
;
394 drvs
->rx_address_mismatch_drops
= port_stats
->rx_address_mismatch_drops
;
395 drvs
->rx_alignment_symbol_errors
=
396 port_stats
->rx_alignment_symbol_errors
;
397 drvs
->rxpp_fifo_overflow_drop
= port_stats
->rxpp_fifo_overflow_drop
;
398 drvs
->tx_pauseframes
= port_stats
->tx_pauseframes
;
399 drvs
->tx_controlframes
= port_stats
->tx_controlframes
;
400 drvs
->jabber_events
= port_stats
->jabber_events
;
401 drvs
->rx_drops_no_pbuf
= rxf_stats
->rx_drops_no_pbuf
;
402 drvs
->rx_drops_no_erx_descr
= rxf_stats
->rx_drops_no_erx_descr
;
403 drvs
->forwarded_packets
= rxf_stats
->forwarded_packets
;
404 drvs
->rx_drops_mtu
= rxf_stats
->rx_drops_mtu
;
405 drvs
->rx_drops_no_tpre_descr
= rxf_stats
->rx_drops_no_tpre_descr
;
406 drvs
->rx_drops_too_many_frags
= rxf_stats
->rx_drops_too_many_frags
;
407 adapter
->drv_stats
.eth_red_drops
= pmem_sts
->eth_red_drops
;
410 static void populate_lancer_stats(struct be_adapter
*adapter
)
413 struct be_drv_stats
*drvs
= &adapter
->drv_stats
;
414 struct lancer_pport_stats
*pport_stats
=
415 pport_stats_from_cmd(adapter
);
417 be_dws_le_to_cpu(pport_stats
, sizeof(*pport_stats
));
418 drvs
->rx_pause_frames
= pport_stats
->rx_pause_frames_lo
;
419 drvs
->rx_crc_errors
= pport_stats
->rx_crc_errors_lo
;
420 drvs
->rx_control_frames
= pport_stats
->rx_control_frames_lo
;
421 drvs
->rx_in_range_errors
= pport_stats
->rx_in_range_errors
;
422 drvs
->rx_frame_too_long
= pport_stats
->rx_frames_too_long_lo
;
423 drvs
->rx_dropped_runt
= pport_stats
->rx_dropped_runt
;
424 drvs
->rx_ip_checksum_errs
= pport_stats
->rx_ip_checksum_errors
;
425 drvs
->rx_tcp_checksum_errs
= pport_stats
->rx_tcp_checksum_errors
;
426 drvs
->rx_udp_checksum_errs
= pport_stats
->rx_udp_checksum_errors
;
427 drvs
->rx_dropped_tcp_length
=
428 pport_stats
->rx_dropped_invalid_tcp_length
;
429 drvs
->rx_dropped_too_small
= pport_stats
->rx_dropped_too_small
;
430 drvs
->rx_dropped_too_short
= pport_stats
->rx_dropped_too_short
;
431 drvs
->rx_out_range_errors
= pport_stats
->rx_out_of_range_errors
;
432 drvs
->rx_dropped_header_too_small
=
433 pport_stats
->rx_dropped_header_too_small
;
434 drvs
->rx_input_fifo_overflow_drop
= pport_stats
->rx_fifo_overflow
;
435 drvs
->rx_address_mismatch_drops
=
436 pport_stats
->rx_address_mismatch_drops
+
437 pport_stats
->rx_vlan_mismatch_drops
;
438 drvs
->rx_alignment_symbol_errors
= pport_stats
->rx_symbol_errors_lo
;
439 drvs
->rxpp_fifo_overflow_drop
= pport_stats
->rx_fifo_overflow
;
440 drvs
->tx_pauseframes
= pport_stats
->tx_pause_frames_lo
;
441 drvs
->tx_controlframes
= pport_stats
->tx_control_frames_lo
;
442 drvs
->jabber_events
= pport_stats
->rx_jabbers
;
443 drvs
->forwarded_packets
= pport_stats
->num_forwards_lo
;
444 drvs
->rx_drops_mtu
= pport_stats
->rx_drops_mtu_lo
;
445 drvs
->rx_drops_too_many_frags
=
446 pport_stats
->rx_drops_too_many_frags_lo
;
449 static void accumulate_16bit_val(u32
*acc
, u16 val
)
451 #define lo(x) (x & 0xFFFF)
452 #define hi(x) (x & 0xFFFF0000)
453 bool wrapped
= val
< lo(*acc
);
454 u32 newacc
= hi(*acc
) + val
;
458 ACCESS_ONCE(*acc
) = newacc
;
461 void be_parse_stats(struct be_adapter
*adapter
)
463 struct be_erx_stats_v1
*erx
= be_erx_stats_from_cmd(adapter
);
464 struct be_rx_obj
*rxo
;
467 if (lancer_chip(adapter
)) {
468 populate_lancer_stats(adapter
);
470 if (BE2_chip(adapter
))
471 populate_be_v0_stats(adapter
);
473 /* for BE3 and Skyhawk */
474 populate_be_v1_stats(adapter
);
476 /* as erx_v1 is longer than v0, ok to use v1 for v0 access */
477 for_all_rx_queues(adapter
, rxo
, i
) {
478 /* below erx HW counter can actually wrap around after
479 * 65535. Driver accumulates a 32-bit value
481 accumulate_16bit_val(&rx_stats(rxo
)->rx_drops_no_frags
,
482 (u16
)erx
->rx_drops_no_fragments \
488 static struct rtnl_link_stats64
*be_get_stats64(struct net_device
*netdev
,
489 struct rtnl_link_stats64
*stats
)
491 struct be_adapter
*adapter
= netdev_priv(netdev
);
492 struct be_drv_stats
*drvs
= &adapter
->drv_stats
;
493 struct be_rx_obj
*rxo
;
494 struct be_tx_obj
*txo
;
499 for_all_rx_queues(adapter
, rxo
, i
) {
500 const struct be_rx_stats
*rx_stats
= rx_stats(rxo
);
502 start
= u64_stats_fetch_begin_bh(&rx_stats
->sync
);
503 pkts
= rx_stats(rxo
)->rx_pkts
;
504 bytes
= rx_stats(rxo
)->rx_bytes
;
505 } while (u64_stats_fetch_retry_bh(&rx_stats
->sync
, start
));
506 stats
->rx_packets
+= pkts
;
507 stats
->rx_bytes
+= bytes
;
508 stats
->multicast
+= rx_stats(rxo
)->rx_mcast_pkts
;
509 stats
->rx_dropped
+= rx_stats(rxo
)->rx_drops_no_skbs
+
510 rx_stats(rxo
)->rx_drops_no_frags
;
513 for_all_tx_queues(adapter
, txo
, i
) {
514 const struct be_tx_stats
*tx_stats
= tx_stats(txo
);
516 start
= u64_stats_fetch_begin_bh(&tx_stats
->sync
);
517 pkts
= tx_stats(txo
)->tx_pkts
;
518 bytes
= tx_stats(txo
)->tx_bytes
;
519 } while (u64_stats_fetch_retry_bh(&tx_stats
->sync
, start
));
520 stats
->tx_packets
+= pkts
;
521 stats
->tx_bytes
+= bytes
;
524 /* bad pkts received */
525 stats
->rx_errors
= drvs
->rx_crc_errors
+
526 drvs
->rx_alignment_symbol_errors
+
527 drvs
->rx_in_range_errors
+
528 drvs
->rx_out_range_errors
+
529 drvs
->rx_frame_too_long
+
530 drvs
->rx_dropped_too_small
+
531 drvs
->rx_dropped_too_short
+
532 drvs
->rx_dropped_header_too_small
+
533 drvs
->rx_dropped_tcp_length
+
534 drvs
->rx_dropped_runt
;
536 /* detailed rx errors */
537 stats
->rx_length_errors
= drvs
->rx_in_range_errors
+
538 drvs
->rx_out_range_errors
+
539 drvs
->rx_frame_too_long
;
541 stats
->rx_crc_errors
= drvs
->rx_crc_errors
;
543 /* frame alignment errors */
544 stats
->rx_frame_errors
= drvs
->rx_alignment_symbol_errors
;
546 /* receiver fifo overrun */
547 /* drops_no_pbuf is no per i/f, it's per BE card */
548 stats
->rx_fifo_errors
= drvs
->rxpp_fifo_overflow_drop
+
549 drvs
->rx_input_fifo_overflow_drop
+
550 drvs
->rx_drops_no_pbuf
;
554 void be_link_status_update(struct be_adapter
*adapter
, u8 link_status
)
556 struct net_device
*netdev
= adapter
->netdev
;
558 if (!(adapter
->flags
& BE_FLAGS_LINK_STATUS_INIT
)) {
559 netif_carrier_off(netdev
);
560 adapter
->flags
|= BE_FLAGS_LINK_STATUS_INIT
;
563 if ((link_status
& LINK_STATUS_MASK
) == LINK_UP
)
564 netif_carrier_on(netdev
);
566 netif_carrier_off(netdev
);
569 static void be_tx_stats_update(struct be_tx_obj
*txo
,
570 u32 wrb_cnt
, u32 copied
, u32 gso_segs
, bool stopped
)
572 struct be_tx_stats
*stats
= tx_stats(txo
);
574 u64_stats_update_begin(&stats
->sync
);
576 stats
->tx_wrbs
+= wrb_cnt
;
577 stats
->tx_bytes
+= copied
;
578 stats
->tx_pkts
+= (gso_segs
? gso_segs
: 1);
581 u64_stats_update_end(&stats
->sync
);
584 /* Determine number of WRB entries needed to xmit data in an skb */
585 static u32
wrb_cnt_for_skb(struct be_adapter
*adapter
, struct sk_buff
*skb
,
588 int cnt
= (skb
->len
> skb
->data_len
);
590 cnt
+= skb_shinfo(skb
)->nr_frags
;
592 /* to account for hdr wrb */
594 if (lancer_chip(adapter
) || !(cnt
& 1)) {
597 /* add a dummy to make it an even num */
601 BUG_ON(cnt
> BE_MAX_TX_FRAG_COUNT
);
605 static inline void wrb_fill(struct be_eth_wrb
*wrb
, u64 addr
, int len
)
607 wrb
->frag_pa_hi
= upper_32_bits(addr
);
608 wrb
->frag_pa_lo
= addr
& 0xFFFFFFFF;
609 wrb
->frag_len
= len
& ETH_WRB_FRAG_LEN_MASK
;
613 static inline u16
be_get_tx_vlan_tag(struct be_adapter
*adapter
,
619 vlan_tag
= vlan_tx_tag_get(skb
);
620 vlan_prio
= (vlan_tag
& VLAN_PRIO_MASK
) >> VLAN_PRIO_SHIFT
;
621 /* If vlan priority provided by OS is NOT in available bmap */
622 if (!(adapter
->vlan_prio_bmap
& (1 << vlan_prio
)))
623 vlan_tag
= (vlan_tag
& ~VLAN_PRIO_MASK
) |
624 adapter
->recommended_prio
;
629 static int be_vlan_tag_chk(struct be_adapter
*adapter
, struct sk_buff
*skb
)
631 return vlan_tx_tag_present(skb
) || adapter
->pvid
;
634 static void wrb_fill_hdr(struct be_adapter
*adapter
, struct be_eth_hdr_wrb
*hdr
,
635 struct sk_buff
*skb
, u32 wrb_cnt
, u32 len
)
639 memset(hdr
, 0, sizeof(*hdr
));
641 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, crc
, hdr
, 1);
643 if (skb_is_gso(skb
)) {
644 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, lso
, hdr
, 1);
645 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, lso_mss
,
646 hdr
, skb_shinfo(skb
)->gso_size
);
647 if (skb_is_gso_v6(skb
) && !lancer_chip(adapter
))
648 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, lso6
, hdr
, 1);
649 } else if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
651 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, tcpcs
, hdr
, 1);
652 else if (is_udp_pkt(skb
))
653 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, udpcs
, hdr
, 1);
656 if (vlan_tx_tag_present(skb
)) {
657 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, vlan
, hdr
, 1);
658 vlan_tag
= be_get_tx_vlan_tag(adapter
, skb
);
659 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, vlan_tag
, hdr
, vlan_tag
);
662 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, event
, hdr
, 1);
663 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, complete
, hdr
, 1);
664 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, num_wrb
, hdr
, wrb_cnt
);
665 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, len
, hdr
, len
);
668 static void unmap_tx_frag(struct device
*dev
, struct be_eth_wrb
*wrb
,
673 be_dws_le_to_cpu(wrb
, sizeof(*wrb
));
675 dma
= (u64
)wrb
->frag_pa_hi
<< 32 | (u64
)wrb
->frag_pa_lo
;
678 dma_unmap_single(dev
, dma
, wrb
->frag_len
,
681 dma_unmap_page(dev
, dma
, wrb
->frag_len
, DMA_TO_DEVICE
);
685 static int make_tx_wrbs(struct be_adapter
*adapter
, struct be_queue_info
*txq
,
686 struct sk_buff
*skb
, u32 wrb_cnt
, bool dummy_wrb
)
690 struct device
*dev
= &adapter
->pdev
->dev
;
691 struct sk_buff
*first_skb
= skb
;
692 struct be_eth_wrb
*wrb
;
693 struct be_eth_hdr_wrb
*hdr
;
694 bool map_single
= false;
697 hdr
= queue_head_node(txq
);
699 map_head
= txq
->head
;
701 if (skb
->len
> skb
->data_len
) {
702 int len
= skb_headlen(skb
);
703 busaddr
= dma_map_single(dev
, skb
->data
, len
, DMA_TO_DEVICE
);
704 if (dma_mapping_error(dev
, busaddr
))
707 wrb
= queue_head_node(txq
);
708 wrb_fill(wrb
, busaddr
, len
);
709 be_dws_cpu_to_le(wrb
, sizeof(*wrb
));
714 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
715 const struct skb_frag_struct
*frag
=
716 &skb_shinfo(skb
)->frags
[i
];
717 busaddr
= skb_frag_dma_map(dev
, frag
, 0,
718 skb_frag_size(frag
), DMA_TO_DEVICE
);
719 if (dma_mapping_error(dev
, busaddr
))
721 wrb
= queue_head_node(txq
);
722 wrb_fill(wrb
, busaddr
, skb_frag_size(frag
));
723 be_dws_cpu_to_le(wrb
, sizeof(*wrb
));
725 copied
+= skb_frag_size(frag
);
729 wrb
= queue_head_node(txq
);
731 be_dws_cpu_to_le(wrb
, sizeof(*wrb
));
735 wrb_fill_hdr(adapter
, hdr
, first_skb
, wrb_cnt
, copied
);
736 be_dws_cpu_to_le(hdr
, sizeof(*hdr
));
740 txq
->head
= map_head
;
742 wrb
= queue_head_node(txq
);
743 unmap_tx_frag(dev
, wrb
, map_single
);
745 copied
-= wrb
->frag_len
;
751 static struct sk_buff
*be_insert_vlan_in_pkt(struct be_adapter
*adapter
,
756 skb
= skb_share_check(skb
, GFP_ATOMIC
);
760 if (vlan_tx_tag_present(skb
)) {
761 vlan_tag
= be_get_tx_vlan_tag(adapter
, skb
);
762 __vlan_put_tag(skb
, vlan_tag
);
769 static netdev_tx_t
be_xmit(struct sk_buff
*skb
,
770 struct net_device
*netdev
)
772 struct be_adapter
*adapter
= netdev_priv(netdev
);
773 struct be_tx_obj
*txo
= &adapter
->tx_obj
[skb_get_queue_mapping(skb
)];
774 struct be_queue_info
*txq
= &txo
->q
;
775 struct iphdr
*ip
= NULL
;
776 u32 wrb_cnt
= 0, copied
= 0;
777 u32 start
= txq
->head
, eth_hdr_len
;
778 bool dummy_wrb
, stopped
= false;
780 eth_hdr_len
= ntohs(skb
->protocol
) == ETH_P_8021Q
?
781 VLAN_ETH_HLEN
: ETH_HLEN
;
783 /* HW has a bug which considers padding bytes as legal
784 * and modifies the IPv4 hdr's 'tot_len' field
786 if (skb
->len
<= 60 && be_vlan_tag_chk(adapter
, skb
) &&
788 ip
= (struct iphdr
*)ip_hdr(skb
);
789 pskb_trim(skb
, eth_hdr_len
+ ntohs(ip
->tot_len
));
792 /* HW has a bug wherein it will calculate CSUM for VLAN
793 * pkts even though it is disabled.
794 * Manually insert VLAN in pkt.
796 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
&&
797 be_vlan_tag_chk(adapter
, skb
)) {
798 skb
= be_insert_vlan_in_pkt(adapter
, skb
);
803 wrb_cnt
= wrb_cnt_for_skb(adapter
, skb
, &dummy_wrb
);
805 copied
= make_tx_wrbs(adapter
, txq
, skb
, wrb_cnt
, dummy_wrb
);
807 int gso_segs
= skb_shinfo(skb
)->gso_segs
;
809 /* record the sent skb in the sent_skb table */
810 BUG_ON(txo
->sent_skb_list
[start
]);
811 txo
->sent_skb_list
[start
] = skb
;
813 /* Ensure txq has space for the next skb; Else stop the queue
814 * *BEFORE* ringing the tx doorbell, so that we serialze the
815 * tx compls of the current transmit which'll wake up the queue
817 atomic_add(wrb_cnt
, &txq
->used
);
818 if ((BE_MAX_TX_FRAG_COUNT
+ atomic_read(&txq
->used
)) >=
820 netif_stop_subqueue(netdev
, skb_get_queue_mapping(skb
));
824 be_txq_notify(adapter
, txq
->id
, wrb_cnt
);
826 be_tx_stats_update(txo
, wrb_cnt
, copied
, gso_segs
, stopped
);
829 dev_kfree_skb_any(skb
);
835 static int be_change_mtu(struct net_device
*netdev
, int new_mtu
)
837 struct be_adapter
*adapter
= netdev_priv(netdev
);
838 if (new_mtu
< BE_MIN_MTU
||
839 new_mtu
> (BE_MAX_JUMBO_FRAME_SIZE
-
840 (ETH_HLEN
+ ETH_FCS_LEN
))) {
841 dev_info(&adapter
->pdev
->dev
,
842 "MTU must be between %d and %d bytes\n",
844 (BE_MAX_JUMBO_FRAME_SIZE
- (ETH_HLEN
+ ETH_FCS_LEN
)));
847 dev_info(&adapter
->pdev
->dev
, "MTU changed from %d to %d bytes\n",
848 netdev
->mtu
, new_mtu
);
849 netdev
->mtu
= new_mtu
;
854 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
855 * If the user configures more, place BE in vlan promiscuous mode.
857 static int be_vid_config(struct be_adapter
*adapter
)
859 u16 vids
[BE_NUM_VLANS_SUPPORTED
];
863 /* No need to further configure vids if in promiscuous mode */
864 if (adapter
->promiscuous
)
867 if (adapter
->vlans_added
> adapter
->max_vlans
)
868 goto set_vlan_promisc
;
870 /* Construct VLAN Table to give to HW */
871 for (i
= 0; i
< VLAN_N_VID
; i
++)
872 if (adapter
->vlan_tag
[i
])
873 vids
[num
++] = cpu_to_le16(i
);
875 status
= be_cmd_vlan_config(adapter
, adapter
->if_handle
,
878 /* Set to VLAN promisc mode as setting VLAN filter failed */
880 dev_info(&adapter
->pdev
->dev
, "Exhausted VLAN HW filters.\n");
881 dev_info(&adapter
->pdev
->dev
, "Disabling HW VLAN filtering.\n");
882 goto set_vlan_promisc
;
888 status
= be_cmd_vlan_config(adapter
, adapter
->if_handle
,
893 static int be_vlan_add_vid(struct net_device
*netdev
, u16 vid
)
895 struct be_adapter
*adapter
= netdev_priv(netdev
);
898 if (!lancer_chip(adapter
) && !be_physfn(adapter
)) {
903 /* Packets with VID 0 are always received by Lancer by default */
904 if (lancer_chip(adapter
) && vid
== 0)
907 adapter
->vlan_tag
[vid
] = 1;
908 if (adapter
->vlans_added
<= (adapter
->max_vlans
+ 1))
909 status
= be_vid_config(adapter
);
912 adapter
->vlans_added
++;
914 adapter
->vlan_tag
[vid
] = 0;
919 static int be_vlan_rem_vid(struct net_device
*netdev
, u16 vid
)
921 struct be_adapter
*adapter
= netdev_priv(netdev
);
924 if (!lancer_chip(adapter
) && !be_physfn(adapter
)) {
929 /* Packets with VID 0 are always received by Lancer by default */
930 if (lancer_chip(adapter
) && vid
== 0)
933 adapter
->vlan_tag
[vid
] = 0;
934 if (adapter
->vlans_added
<= adapter
->max_vlans
)
935 status
= be_vid_config(adapter
);
938 adapter
->vlans_added
--;
940 adapter
->vlan_tag
[vid
] = 1;
945 static void be_set_rx_mode(struct net_device
*netdev
)
947 struct be_adapter
*adapter
= netdev_priv(netdev
);
950 if (netdev
->flags
& IFF_PROMISC
) {
951 be_cmd_rx_filter(adapter
, IFF_PROMISC
, ON
);
952 adapter
->promiscuous
= true;
956 /* BE was previously in promiscuous mode; disable it */
957 if (adapter
->promiscuous
) {
958 adapter
->promiscuous
= false;
959 be_cmd_rx_filter(adapter
, IFF_PROMISC
, OFF
);
961 if (adapter
->vlans_added
)
962 be_vid_config(adapter
);
965 /* Enable multicast promisc if num configured exceeds what we support */
966 if (netdev
->flags
& IFF_ALLMULTI
||
967 netdev_mc_count(netdev
) > adapter
->max_mcast_mac
) {
968 be_cmd_rx_filter(adapter
, IFF_ALLMULTI
, ON
);
972 if (netdev_uc_count(netdev
) != adapter
->uc_macs
) {
973 struct netdev_hw_addr
*ha
;
974 int i
= 1; /* First slot is claimed by the Primary MAC */
976 for (; adapter
->uc_macs
> 0; adapter
->uc_macs
--, i
++) {
977 be_cmd_pmac_del(adapter
, adapter
->if_handle
,
978 adapter
->pmac_id
[i
], 0);
981 if (netdev_uc_count(netdev
) > adapter
->max_pmac_cnt
) {
982 be_cmd_rx_filter(adapter
, IFF_PROMISC
, ON
);
983 adapter
->promiscuous
= true;
987 netdev_for_each_uc_addr(ha
, adapter
->netdev
) {
988 adapter
->uc_macs
++; /* First slot is for Primary MAC */
989 be_cmd_pmac_add(adapter
, (u8
*)ha
->addr
,
991 &adapter
->pmac_id
[adapter
->uc_macs
], 0);
995 status
= be_cmd_rx_filter(adapter
, IFF_MULTICAST
, ON
);
997 /* Set to MCAST promisc mode if setting MULTICAST address fails */
999 dev_info(&adapter
->pdev
->dev
, "Exhausted multicast HW filters.\n");
1000 dev_info(&adapter
->pdev
->dev
, "Disabling HW multicast filtering.\n");
1001 be_cmd_rx_filter(adapter
, IFF_ALLMULTI
, ON
);
1007 static int be_set_vf_mac(struct net_device
*netdev
, int vf
, u8
*mac
)
1009 struct be_adapter
*adapter
= netdev_priv(netdev
);
1010 struct be_vf_cfg
*vf_cfg
= &adapter
->vf_cfg
[vf
];
1012 bool active_mac
= false;
1014 u8 old_mac
[ETH_ALEN
];
1016 if (!sriov_enabled(adapter
))
1019 if (!is_valid_ether_addr(mac
) || vf
>= adapter
->num_vfs
)
1022 if (lancer_chip(adapter
)) {
1023 status
= be_cmd_get_mac_from_list(adapter
, old_mac
, &active_mac
,
1025 if (!status
&& active_mac
)
1026 be_cmd_pmac_del(adapter
, vf_cfg
->if_handle
,
1029 status
= be_cmd_set_mac_list(adapter
, mac
, 1, vf
+ 1);
1031 status
= be_cmd_pmac_del(adapter
, vf_cfg
->if_handle
,
1032 vf_cfg
->pmac_id
, vf
+ 1);
1034 status
= be_cmd_pmac_add(adapter
, mac
, vf_cfg
->if_handle
,
1035 &vf_cfg
->pmac_id
, vf
+ 1);
1039 dev_err(&adapter
->pdev
->dev
, "MAC %pM set on VF %d Failed\n",
1042 memcpy(vf_cfg
->mac_addr
, mac
, ETH_ALEN
);
1047 static int be_get_vf_config(struct net_device
*netdev
, int vf
,
1048 struct ifla_vf_info
*vi
)
1050 struct be_adapter
*adapter
= netdev_priv(netdev
);
1051 struct be_vf_cfg
*vf_cfg
= &adapter
->vf_cfg
[vf
];
1053 if (!sriov_enabled(adapter
))
1056 if (vf
>= adapter
->num_vfs
)
1060 vi
->tx_rate
= vf_cfg
->tx_rate
;
1061 vi
->vlan
= vf_cfg
->vlan_tag
;
1063 memcpy(&vi
->mac
, vf_cfg
->mac_addr
, ETH_ALEN
);
1068 static int be_set_vf_vlan(struct net_device
*netdev
,
1069 int vf
, u16 vlan
, u8 qos
)
1071 struct be_adapter
*adapter
= netdev_priv(netdev
);
1074 if (!sriov_enabled(adapter
))
1077 if (vf
>= adapter
->num_vfs
|| vlan
> 4095)
1081 if (adapter
->vf_cfg
[vf
].vlan_tag
!= vlan
) {
1082 /* If this is new value, program it. Else skip. */
1083 adapter
->vf_cfg
[vf
].vlan_tag
= vlan
;
1085 status
= be_cmd_set_hsw_config(adapter
, vlan
,
1086 vf
+ 1, adapter
->vf_cfg
[vf
].if_handle
);
1089 /* Reset Transparent Vlan Tagging. */
1090 adapter
->vf_cfg
[vf
].vlan_tag
= 0;
1091 vlan
= adapter
->vf_cfg
[vf
].def_vid
;
1092 status
= be_cmd_set_hsw_config(adapter
, vlan
, vf
+ 1,
1093 adapter
->vf_cfg
[vf
].if_handle
);
1098 dev_info(&adapter
->pdev
->dev
,
1099 "VLAN %d config on VF %d failed\n", vlan
, vf
);
1103 static int be_set_vf_tx_rate(struct net_device
*netdev
,
1106 struct be_adapter
*adapter
= netdev_priv(netdev
);
1109 if (!sriov_enabled(adapter
))
1112 if (vf
>= adapter
->num_vfs
)
1115 if (rate
< 100 || rate
> 10000) {
1116 dev_err(&adapter
->pdev
->dev
,
1117 "tx rate must be between 100 and 10000 Mbps\n");
1121 if (lancer_chip(adapter
))
1122 status
= be_cmd_set_profile_config(adapter
, rate
/ 10, vf
+ 1);
1124 status
= be_cmd_set_qos(adapter
, rate
/ 10, vf
+ 1);
1127 dev_err(&adapter
->pdev
->dev
,
1128 "tx rate %d on VF %d failed\n", rate
, vf
);
1130 adapter
->vf_cfg
[vf
].tx_rate
= rate
;
1134 static int be_find_vfs(struct be_adapter
*adapter
, int vf_state
)
1136 struct pci_dev
*dev
, *pdev
= adapter
->pdev
;
1137 int vfs
= 0, assigned_vfs
= 0, pos
;
1140 pos
= pci_find_ext_capability(pdev
, PCI_EXT_CAP_ID_SRIOV
);
1143 pci_read_config_word(pdev
, pos
+ PCI_SRIOV_VF_OFFSET
, &offset
);
1144 pci_read_config_word(pdev
, pos
+ PCI_SRIOV_VF_STRIDE
, &stride
);
1146 dev
= pci_get_device(pdev
->vendor
, PCI_ANY_ID
, NULL
);
1148 if (dev
->is_virtfn
&& pci_physfn(dev
) == pdev
) {
1150 if (dev
->dev_flags
& PCI_DEV_FLAGS_ASSIGNED
)
1153 dev
= pci_get_device(pdev
->vendor
, PCI_ANY_ID
, dev
);
1155 return (vf_state
== ASSIGNED
) ? assigned_vfs
: vfs
;
1158 static void be_eqd_update(struct be_adapter
*adapter
, struct be_eq_obj
*eqo
)
1160 struct be_rx_stats
*stats
= rx_stats(&adapter
->rx_obj
[eqo
->idx
]);
1161 ulong now
= jiffies
;
1162 ulong delta
= now
- stats
->rx_jiffies
;
1164 unsigned int start
, eqd
;
1166 if (!eqo
->enable_aic
) {
1171 if (eqo
->idx
>= adapter
->num_rx_qs
)
1174 stats
= rx_stats(&adapter
->rx_obj
[eqo
->idx
]);
1176 /* Wrapped around */
1177 if (time_before(now
, stats
->rx_jiffies
)) {
1178 stats
->rx_jiffies
= now
;
1182 /* Update once a second */
1187 start
= u64_stats_fetch_begin_bh(&stats
->sync
);
1188 pkts
= stats
->rx_pkts
;
1189 } while (u64_stats_fetch_retry_bh(&stats
->sync
, start
));
1191 stats
->rx_pps
= (unsigned long)(pkts
- stats
->rx_pkts_prev
) / (delta
/ HZ
);
1192 stats
->rx_pkts_prev
= pkts
;
1193 stats
->rx_jiffies
= now
;
1194 eqd
= (stats
->rx_pps
/ 110000) << 3;
1195 eqd
= min(eqd
, eqo
->max_eqd
);
1196 eqd
= max(eqd
, eqo
->min_eqd
);
1201 if (eqd
!= eqo
->cur_eqd
) {
1202 be_cmd_modify_eqd(adapter
, eqo
->q
.id
, eqd
);
1207 static void be_rx_stats_update(struct be_rx_obj
*rxo
,
1208 struct be_rx_compl_info
*rxcp
)
1210 struct be_rx_stats
*stats
= rx_stats(rxo
);
1212 u64_stats_update_begin(&stats
->sync
);
1214 stats
->rx_bytes
+= rxcp
->pkt_size
;
1216 if (rxcp
->pkt_type
== BE_MULTICAST_PACKET
)
1217 stats
->rx_mcast_pkts
++;
1219 stats
->rx_compl_err
++;
1220 u64_stats_update_end(&stats
->sync
);
1223 static inline bool csum_passed(struct be_rx_compl_info
*rxcp
)
1225 /* L4 checksum is not reliable for non TCP/UDP packets.
1226 * Also ignore ipcksm for ipv6 pkts */
1227 return (rxcp
->tcpf
|| rxcp
->udpf
) && rxcp
->l4_csum
&&
1228 (rxcp
->ip_csum
|| rxcp
->ipv6
);
1231 static struct be_rx_page_info
*get_rx_page_info(struct be_rx_obj
*rxo
,
1234 struct be_adapter
*adapter
= rxo
->adapter
;
1235 struct be_rx_page_info
*rx_page_info
;
1236 struct be_queue_info
*rxq
= &rxo
->q
;
1238 rx_page_info
= &rxo
->page_info_tbl
[frag_idx
];
1239 BUG_ON(!rx_page_info
->page
);
1241 if (rx_page_info
->last_page_user
) {
1242 dma_unmap_page(&adapter
->pdev
->dev
,
1243 dma_unmap_addr(rx_page_info
, bus
),
1244 adapter
->big_page_size
, DMA_FROM_DEVICE
);
1245 rx_page_info
->last_page_user
= false;
1248 atomic_dec(&rxq
->used
);
1249 return rx_page_info
;
1252 /* Throwaway the data in the Rx completion */
1253 static void be_rx_compl_discard(struct be_rx_obj
*rxo
,
1254 struct be_rx_compl_info
*rxcp
)
1256 struct be_queue_info
*rxq
= &rxo
->q
;
1257 struct be_rx_page_info
*page_info
;
1258 u16 i
, num_rcvd
= rxcp
->num_rcvd
;
1260 for (i
= 0; i
< num_rcvd
; i
++) {
1261 page_info
= get_rx_page_info(rxo
, rxcp
->rxq_idx
);
1262 put_page(page_info
->page
);
1263 memset(page_info
, 0, sizeof(*page_info
));
1264 index_inc(&rxcp
->rxq_idx
, rxq
->len
);
1269 * skb_fill_rx_data forms a complete skb for an ether frame
1270 * indicated by rxcp.
1272 static void skb_fill_rx_data(struct be_rx_obj
*rxo
, struct sk_buff
*skb
,
1273 struct be_rx_compl_info
*rxcp
)
1275 struct be_queue_info
*rxq
= &rxo
->q
;
1276 struct be_rx_page_info
*page_info
;
1278 u16 hdr_len
, curr_frag_len
, remaining
;
1281 page_info
= get_rx_page_info(rxo
, rxcp
->rxq_idx
);
1282 start
= page_address(page_info
->page
) + page_info
->page_offset
;
1285 /* Copy data in the first descriptor of this completion */
1286 curr_frag_len
= min(rxcp
->pkt_size
, rx_frag_size
);
1288 skb
->len
= curr_frag_len
;
1289 if (curr_frag_len
<= BE_HDR_LEN
) { /* tiny packet */
1290 memcpy(skb
->data
, start
, curr_frag_len
);
1291 /* Complete packet has now been moved to data */
1292 put_page(page_info
->page
);
1294 skb
->tail
+= curr_frag_len
;
1297 memcpy(skb
->data
, start
, hdr_len
);
1298 skb_shinfo(skb
)->nr_frags
= 1;
1299 skb_frag_set_page(skb
, 0, page_info
->page
);
1300 skb_shinfo(skb
)->frags
[0].page_offset
=
1301 page_info
->page_offset
+ hdr_len
;
1302 skb_frag_size_set(&skb_shinfo(skb
)->frags
[0], curr_frag_len
- hdr_len
);
1303 skb
->data_len
= curr_frag_len
- hdr_len
;
1304 skb
->truesize
+= rx_frag_size
;
1305 skb
->tail
+= hdr_len
;
1307 page_info
->page
= NULL
;
1309 if (rxcp
->pkt_size
<= rx_frag_size
) {
1310 BUG_ON(rxcp
->num_rcvd
!= 1);
1314 /* More frags present for this completion */
1315 index_inc(&rxcp
->rxq_idx
, rxq
->len
);
1316 remaining
= rxcp
->pkt_size
- curr_frag_len
;
1317 for (i
= 1, j
= 0; i
< rxcp
->num_rcvd
; i
++) {
1318 page_info
= get_rx_page_info(rxo
, rxcp
->rxq_idx
);
1319 curr_frag_len
= min(remaining
, rx_frag_size
);
1321 /* Coalesce all frags from the same physical page in one slot */
1322 if (page_info
->page_offset
== 0) {
1325 skb_frag_set_page(skb
, j
, page_info
->page
);
1326 skb_shinfo(skb
)->frags
[j
].page_offset
=
1327 page_info
->page_offset
;
1328 skb_frag_size_set(&skb_shinfo(skb
)->frags
[j
], 0);
1329 skb_shinfo(skb
)->nr_frags
++;
1331 put_page(page_info
->page
);
1334 skb_frag_size_add(&skb_shinfo(skb
)->frags
[j
], curr_frag_len
);
1335 skb
->len
+= curr_frag_len
;
1336 skb
->data_len
+= curr_frag_len
;
1337 skb
->truesize
+= rx_frag_size
;
1338 remaining
-= curr_frag_len
;
1339 index_inc(&rxcp
->rxq_idx
, rxq
->len
);
1340 page_info
->page
= NULL
;
1342 BUG_ON(j
> MAX_SKB_FRAGS
);
1345 /* Process the RX completion indicated by rxcp when GRO is disabled */
1346 static void be_rx_compl_process(struct be_rx_obj
*rxo
,
1347 struct be_rx_compl_info
*rxcp
)
1349 struct be_adapter
*adapter
= rxo
->adapter
;
1350 struct net_device
*netdev
= adapter
->netdev
;
1351 struct sk_buff
*skb
;
1353 skb
= netdev_alloc_skb_ip_align(netdev
, BE_RX_SKB_ALLOC_SIZE
);
1354 if (unlikely(!skb
)) {
1355 rx_stats(rxo
)->rx_drops_no_skbs
++;
1356 be_rx_compl_discard(rxo
, rxcp
);
1360 skb_fill_rx_data(rxo
, skb
, rxcp
);
1362 if (likely((netdev
->features
& NETIF_F_RXCSUM
) && csum_passed(rxcp
)))
1363 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1365 skb_checksum_none_assert(skb
);
1367 skb
->protocol
= eth_type_trans(skb
, netdev
);
1368 skb_record_rx_queue(skb
, rxo
- &adapter
->rx_obj
[0]);
1369 if (netdev
->features
& NETIF_F_RXHASH
)
1370 skb
->rxhash
= rxcp
->rss_hash
;
1374 __vlan_hwaccel_put_tag(skb
, rxcp
->vlan_tag
);
1376 netif_receive_skb(skb
);
1379 /* Process the RX completion indicated by rxcp when GRO is enabled */
1380 void be_rx_compl_process_gro(struct be_rx_obj
*rxo
, struct napi_struct
*napi
,
1381 struct be_rx_compl_info
*rxcp
)
1383 struct be_adapter
*adapter
= rxo
->adapter
;
1384 struct be_rx_page_info
*page_info
;
1385 struct sk_buff
*skb
= NULL
;
1386 struct be_queue_info
*rxq
= &rxo
->q
;
1387 u16 remaining
, curr_frag_len
;
1390 skb
= napi_get_frags(napi
);
1392 be_rx_compl_discard(rxo
, rxcp
);
1396 remaining
= rxcp
->pkt_size
;
1397 for (i
= 0, j
= -1; i
< rxcp
->num_rcvd
; i
++) {
1398 page_info
= get_rx_page_info(rxo
, rxcp
->rxq_idx
);
1400 curr_frag_len
= min(remaining
, rx_frag_size
);
1402 /* Coalesce all frags from the same physical page in one slot */
1403 if (i
== 0 || page_info
->page_offset
== 0) {
1404 /* First frag or Fresh page */
1406 skb_frag_set_page(skb
, j
, page_info
->page
);
1407 skb_shinfo(skb
)->frags
[j
].page_offset
=
1408 page_info
->page_offset
;
1409 skb_frag_size_set(&skb_shinfo(skb
)->frags
[j
], 0);
1411 put_page(page_info
->page
);
1413 skb_frag_size_add(&skb_shinfo(skb
)->frags
[j
], curr_frag_len
);
1414 skb
->truesize
+= rx_frag_size
;
1415 remaining
-= curr_frag_len
;
1416 index_inc(&rxcp
->rxq_idx
, rxq
->len
);
1417 memset(page_info
, 0, sizeof(*page_info
));
1419 BUG_ON(j
> MAX_SKB_FRAGS
);
1421 skb_shinfo(skb
)->nr_frags
= j
+ 1;
1422 skb
->len
= rxcp
->pkt_size
;
1423 skb
->data_len
= rxcp
->pkt_size
;
1424 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1425 skb_record_rx_queue(skb
, rxo
- &adapter
->rx_obj
[0]);
1426 if (adapter
->netdev
->features
& NETIF_F_RXHASH
)
1427 skb
->rxhash
= rxcp
->rss_hash
;
1430 __vlan_hwaccel_put_tag(skb
, rxcp
->vlan_tag
);
1432 napi_gro_frags(napi
);
1435 static void be_parse_rx_compl_v1(struct be_eth_rx_compl
*compl,
1436 struct be_rx_compl_info
*rxcp
)
1439 AMAP_GET_BITS(struct amap_eth_rx_compl_v1
, pktsize
, compl);
1440 rxcp
->vlanf
= AMAP_GET_BITS(struct amap_eth_rx_compl_v1
, vtp
, compl);
1441 rxcp
->err
= AMAP_GET_BITS(struct amap_eth_rx_compl_v1
, err
, compl);
1442 rxcp
->tcpf
= AMAP_GET_BITS(struct amap_eth_rx_compl_v1
, tcpf
, compl);
1443 rxcp
->udpf
= AMAP_GET_BITS(struct amap_eth_rx_compl_v1
, udpf
, compl);
1445 AMAP_GET_BITS(struct amap_eth_rx_compl_v1
, ipcksm
, compl);
1447 AMAP_GET_BITS(struct amap_eth_rx_compl_v1
, l4_cksm
, compl);
1449 AMAP_GET_BITS(struct amap_eth_rx_compl_v1
, ip_version
, compl);
1451 AMAP_GET_BITS(struct amap_eth_rx_compl_v1
, fragndx
, compl);
1453 AMAP_GET_BITS(struct amap_eth_rx_compl_v1
, numfrags
, compl);
1455 AMAP_GET_BITS(struct amap_eth_rx_compl_v1
, cast_enc
, compl);
1457 AMAP_GET_BITS(struct amap_eth_rx_compl_v1
, rsshash
, compl);
1459 rxcp
->vtm
= AMAP_GET_BITS(struct amap_eth_rx_compl_v1
, vtm
,
1461 rxcp
->vlan_tag
= AMAP_GET_BITS(struct amap_eth_rx_compl_v1
, vlan_tag
,
1464 rxcp
->port
= AMAP_GET_BITS(struct amap_eth_rx_compl_v1
, port
, compl);
1467 static void be_parse_rx_compl_v0(struct be_eth_rx_compl
*compl,
1468 struct be_rx_compl_info
*rxcp
)
1471 AMAP_GET_BITS(struct amap_eth_rx_compl_v0
, pktsize
, compl);
1472 rxcp
->vlanf
= AMAP_GET_BITS(struct amap_eth_rx_compl_v0
, vtp
, compl);
1473 rxcp
->err
= AMAP_GET_BITS(struct amap_eth_rx_compl_v0
, err
, compl);
1474 rxcp
->tcpf
= AMAP_GET_BITS(struct amap_eth_rx_compl_v0
, tcpf
, compl);
1475 rxcp
->udpf
= AMAP_GET_BITS(struct amap_eth_rx_compl_v0
, udpf
, compl);
1477 AMAP_GET_BITS(struct amap_eth_rx_compl_v0
, ipcksm
, compl);
1479 AMAP_GET_BITS(struct amap_eth_rx_compl_v0
, l4_cksm
, compl);
1481 AMAP_GET_BITS(struct amap_eth_rx_compl_v0
, ip_version
, compl);
1483 AMAP_GET_BITS(struct amap_eth_rx_compl_v0
, fragndx
, compl);
1485 AMAP_GET_BITS(struct amap_eth_rx_compl_v0
, numfrags
, compl);
1487 AMAP_GET_BITS(struct amap_eth_rx_compl_v0
, cast_enc
, compl);
1489 AMAP_GET_BITS(struct amap_eth_rx_compl_v0
, rsshash
, compl);
1491 rxcp
->vtm
= AMAP_GET_BITS(struct amap_eth_rx_compl_v0
, vtm
,
1493 rxcp
->vlan_tag
= AMAP_GET_BITS(struct amap_eth_rx_compl_v0
, vlan_tag
,
1496 rxcp
->port
= AMAP_GET_BITS(struct amap_eth_rx_compl_v0
, port
, compl);
1499 static struct be_rx_compl_info
*be_rx_compl_get(struct be_rx_obj
*rxo
)
1501 struct be_eth_rx_compl
*compl = queue_tail_node(&rxo
->cq
);
1502 struct be_rx_compl_info
*rxcp
= &rxo
->rxcp
;
1503 struct be_adapter
*adapter
= rxo
->adapter
;
1505 /* For checking the valid bit it is Ok to use either definition as the
1506 * valid bit is at the same position in both v0 and v1 Rx compl */
1507 if (compl->dw
[offsetof(struct amap_eth_rx_compl_v1
, valid
) / 32] == 0)
1511 be_dws_le_to_cpu(compl, sizeof(*compl));
1513 if (adapter
->be3_native
)
1514 be_parse_rx_compl_v1(compl, rxcp
);
1516 be_parse_rx_compl_v0(compl, rxcp
);
1519 /* vlanf could be wrongly set in some cards.
1520 * ignore if vtm is not set */
1521 if ((adapter
->function_mode
& FLEX10_MODE
) && !rxcp
->vtm
)
1524 if (!lancer_chip(adapter
))
1525 rxcp
->vlan_tag
= swab16(rxcp
->vlan_tag
);
1527 if (adapter
->pvid
== (rxcp
->vlan_tag
& VLAN_VID_MASK
) &&
1528 !adapter
->vlan_tag
[rxcp
->vlan_tag
])
1532 /* As the compl has been parsed, reset it; we wont touch it again */
1533 compl->dw
[offsetof(struct amap_eth_rx_compl_v1
, valid
) / 32] = 0;
1535 queue_tail_inc(&rxo
->cq
);
1539 static inline struct page
*be_alloc_pages(u32 size
, gfp_t gfp
)
1541 u32 order
= get_order(size
);
1545 return alloc_pages(gfp
, order
);
1549 * Allocate a page, split it to fragments of size rx_frag_size and post as
1550 * receive buffers to BE
1552 static void be_post_rx_frags(struct be_rx_obj
*rxo
, gfp_t gfp
)
1554 struct be_adapter
*adapter
= rxo
->adapter
;
1555 struct be_rx_page_info
*page_info
= NULL
, *prev_page_info
= NULL
;
1556 struct be_queue_info
*rxq
= &rxo
->q
;
1557 struct page
*pagep
= NULL
;
1558 struct be_eth_rx_d
*rxd
;
1559 u64 page_dmaaddr
= 0, frag_dmaaddr
;
1560 u32 posted
, page_offset
= 0;
1562 page_info
= &rxo
->page_info_tbl
[rxq
->head
];
1563 for (posted
= 0; posted
< MAX_RX_POST
&& !page_info
->page
; posted
++) {
1565 pagep
= be_alloc_pages(adapter
->big_page_size
, gfp
);
1566 if (unlikely(!pagep
)) {
1567 rx_stats(rxo
)->rx_post_fail
++;
1570 page_dmaaddr
= dma_map_page(&adapter
->pdev
->dev
, pagep
,
1571 0, adapter
->big_page_size
,
1573 page_info
->page_offset
= 0;
1576 page_info
->page_offset
= page_offset
+ rx_frag_size
;
1578 page_offset
= page_info
->page_offset
;
1579 page_info
->page
= pagep
;
1580 dma_unmap_addr_set(page_info
, bus
, page_dmaaddr
);
1581 frag_dmaaddr
= page_dmaaddr
+ page_info
->page_offset
;
1583 rxd
= queue_head_node(rxq
);
1584 rxd
->fragpa_lo
= cpu_to_le32(frag_dmaaddr
& 0xFFFFFFFF);
1585 rxd
->fragpa_hi
= cpu_to_le32(upper_32_bits(frag_dmaaddr
));
1587 /* Any space left in the current big page for another frag? */
1588 if ((page_offset
+ rx_frag_size
+ rx_frag_size
) >
1589 adapter
->big_page_size
) {
1591 page_info
->last_page_user
= true;
1594 prev_page_info
= page_info
;
1595 queue_head_inc(rxq
);
1596 page_info
= &rxo
->page_info_tbl
[rxq
->head
];
1599 prev_page_info
->last_page_user
= true;
1602 atomic_add(posted
, &rxq
->used
);
1603 be_rxq_notify(adapter
, rxq
->id
, posted
);
1604 } else if (atomic_read(&rxq
->used
) == 0) {
1605 /* Let be_worker replenish when memory is available */
1606 rxo
->rx_post_starved
= true;
1610 static struct be_eth_tx_compl
*be_tx_compl_get(struct be_queue_info
*tx_cq
)
1612 struct be_eth_tx_compl
*txcp
= queue_tail_node(tx_cq
);
1614 if (txcp
->dw
[offsetof(struct amap_eth_tx_compl
, valid
) / 32] == 0)
1618 be_dws_le_to_cpu(txcp
, sizeof(*txcp
));
1620 txcp
->dw
[offsetof(struct amap_eth_tx_compl
, valid
) / 32] = 0;
1622 queue_tail_inc(tx_cq
);
1626 static u16
be_tx_compl_process(struct be_adapter
*adapter
,
1627 struct be_tx_obj
*txo
, u16 last_index
)
1629 struct be_queue_info
*txq
= &txo
->q
;
1630 struct be_eth_wrb
*wrb
;
1631 struct sk_buff
**sent_skbs
= txo
->sent_skb_list
;
1632 struct sk_buff
*sent_skb
;
1633 u16 cur_index
, num_wrbs
= 1; /* account for hdr wrb */
1634 bool unmap_skb_hdr
= true;
1636 sent_skb
= sent_skbs
[txq
->tail
];
1638 sent_skbs
[txq
->tail
] = NULL
;
1640 /* skip header wrb */
1641 queue_tail_inc(txq
);
1644 cur_index
= txq
->tail
;
1645 wrb
= queue_tail_node(txq
);
1646 unmap_tx_frag(&adapter
->pdev
->dev
, wrb
,
1647 (unmap_skb_hdr
&& skb_headlen(sent_skb
)));
1648 unmap_skb_hdr
= false;
1651 queue_tail_inc(txq
);
1652 } while (cur_index
!= last_index
);
1654 kfree_skb(sent_skb
);
1658 /* Return the number of events in the event queue */
1659 static inline int events_get(struct be_eq_obj
*eqo
)
1661 struct be_eq_entry
*eqe
;
1665 eqe
= queue_tail_node(&eqo
->q
);
1672 queue_tail_inc(&eqo
->q
);
1678 /* Leaves the EQ is disarmed state */
1679 static void be_eq_clean(struct be_eq_obj
*eqo
)
1681 int num
= events_get(eqo
);
1683 be_eq_notify(eqo
->adapter
, eqo
->q
.id
, false, true, num
);
1686 static void be_rx_cq_clean(struct be_rx_obj
*rxo
)
1688 struct be_rx_page_info
*page_info
;
1689 struct be_queue_info
*rxq
= &rxo
->q
;
1690 struct be_queue_info
*rx_cq
= &rxo
->cq
;
1691 struct be_rx_compl_info
*rxcp
;
1692 struct be_adapter
*adapter
= rxo
->adapter
;
1696 /* Consume pending rx completions.
1697 * Wait for the flush completion (identified by zero num_rcvd)
1698 * to arrive. Notify CQ even when there are no more CQ entries
1699 * for HW to flush partially coalesced CQ entries.
1700 * In Lancer, there is no need to wait for flush compl.
1703 rxcp
= be_rx_compl_get(rxo
);
1705 if (lancer_chip(adapter
))
1708 if (flush_wait
++ > 10 || be_hw_error(adapter
)) {
1709 dev_warn(&adapter
->pdev
->dev
,
1710 "did not receive flush compl\n");
1713 be_cq_notify(adapter
, rx_cq
->id
, true, 0);
1716 be_rx_compl_discard(rxo
, rxcp
);
1717 be_cq_notify(adapter
, rx_cq
->id
, true, 1);
1718 if (rxcp
->num_rcvd
== 0)
1723 /* After cleanup, leave the CQ in unarmed state */
1724 be_cq_notify(adapter
, rx_cq
->id
, false, 0);
1726 /* Then free posted rx buffers that were not used */
1727 tail
= (rxq
->head
+ rxq
->len
- atomic_read(&rxq
->used
)) % rxq
->len
;
1728 for (; atomic_read(&rxq
->used
) > 0; index_inc(&tail
, rxq
->len
)) {
1729 page_info
= get_rx_page_info(rxo
, tail
);
1730 put_page(page_info
->page
);
1731 memset(page_info
, 0, sizeof(*page_info
));
1733 BUG_ON(atomic_read(&rxq
->used
));
1734 rxq
->tail
= rxq
->head
= 0;
1737 static void be_tx_compl_clean(struct be_adapter
*adapter
)
1739 struct be_tx_obj
*txo
;
1740 struct be_queue_info
*txq
;
1741 struct be_eth_tx_compl
*txcp
;
1742 u16 end_idx
, cmpl
= 0, timeo
= 0, num_wrbs
= 0;
1743 struct sk_buff
*sent_skb
;
1745 int i
, pending_txqs
;
1747 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1749 pending_txqs
= adapter
->num_tx_qs
;
1751 for_all_tx_queues(adapter
, txo
, i
) {
1753 while ((txcp
= be_tx_compl_get(&txo
->cq
))) {
1755 AMAP_GET_BITS(struct amap_eth_tx_compl
,
1757 num_wrbs
+= be_tx_compl_process(adapter
, txo
,
1762 be_cq_notify(adapter
, txo
->cq
.id
, false, cmpl
);
1763 atomic_sub(num_wrbs
, &txq
->used
);
1767 if (atomic_read(&txq
->used
) == 0)
1771 if (pending_txqs
== 0 || ++timeo
> 200)
1777 for_all_tx_queues(adapter
, txo
, i
) {
1779 if (atomic_read(&txq
->used
))
1780 dev_err(&adapter
->pdev
->dev
, "%d pending tx-compls\n",
1781 atomic_read(&txq
->used
));
1783 /* free posted tx for which compls will never arrive */
1784 while (atomic_read(&txq
->used
)) {
1785 sent_skb
= txo
->sent_skb_list
[txq
->tail
];
1786 end_idx
= txq
->tail
;
1787 num_wrbs
= wrb_cnt_for_skb(adapter
, sent_skb
,
1789 index_adv(&end_idx
, num_wrbs
- 1, txq
->len
);
1790 num_wrbs
= be_tx_compl_process(adapter
, txo
, end_idx
);
1791 atomic_sub(num_wrbs
, &txq
->used
);
1796 static void be_evt_queues_destroy(struct be_adapter
*adapter
)
1798 struct be_eq_obj
*eqo
;
1801 for_all_evt_queues(adapter
, eqo
, i
) {
1802 if (eqo
->q
.created
) {
1804 be_cmd_q_destroy(adapter
, &eqo
->q
, QTYPE_EQ
);
1806 be_queue_free(adapter
, &eqo
->q
);
1810 static int be_evt_queues_create(struct be_adapter
*adapter
)
1812 struct be_queue_info
*eq
;
1813 struct be_eq_obj
*eqo
;
1816 adapter
->num_evt_qs
= num_irqs(adapter
);
1818 for_all_evt_queues(adapter
, eqo
, i
) {
1819 eqo
->adapter
= adapter
;
1820 eqo
->tx_budget
= BE_TX_BUDGET
;
1822 eqo
->max_eqd
= BE_MAX_EQD
;
1823 eqo
->enable_aic
= true;
1826 rc
= be_queue_alloc(adapter
, eq
, EVNT_Q_LEN
,
1827 sizeof(struct be_eq_entry
));
1831 rc
= be_cmd_eq_create(adapter
, eq
, eqo
->cur_eqd
);
1838 static void be_mcc_queues_destroy(struct be_adapter
*adapter
)
1840 struct be_queue_info
*q
;
1842 q
= &adapter
->mcc_obj
.q
;
1844 be_cmd_q_destroy(adapter
, q
, QTYPE_MCCQ
);
1845 be_queue_free(adapter
, q
);
1847 q
= &adapter
->mcc_obj
.cq
;
1849 be_cmd_q_destroy(adapter
, q
, QTYPE_CQ
);
1850 be_queue_free(adapter
, q
);
1853 /* Must be called only after TX qs are created as MCC shares TX EQ */
1854 static int be_mcc_queues_create(struct be_adapter
*adapter
)
1856 struct be_queue_info
*q
, *cq
;
1858 cq
= &adapter
->mcc_obj
.cq
;
1859 if (be_queue_alloc(adapter
, cq
, MCC_CQ_LEN
,
1860 sizeof(struct be_mcc_compl
)))
1863 /* Use the default EQ for MCC completions */
1864 if (be_cmd_cq_create(adapter
, cq
, &mcc_eqo(adapter
)->q
, true, 0))
1867 q
= &adapter
->mcc_obj
.q
;
1868 if (be_queue_alloc(adapter
, q
, MCC_Q_LEN
, sizeof(struct be_mcc_wrb
)))
1869 goto mcc_cq_destroy
;
1871 if (be_cmd_mccq_create(adapter
, q
, cq
))
1877 be_queue_free(adapter
, q
);
1879 be_cmd_q_destroy(adapter
, cq
, QTYPE_CQ
);
1881 be_queue_free(adapter
, cq
);
1886 static void be_tx_queues_destroy(struct be_adapter
*adapter
)
1888 struct be_queue_info
*q
;
1889 struct be_tx_obj
*txo
;
1892 for_all_tx_queues(adapter
, txo
, i
) {
1895 be_cmd_q_destroy(adapter
, q
, QTYPE_TXQ
);
1896 be_queue_free(adapter
, q
);
1900 be_cmd_q_destroy(adapter
, q
, QTYPE_CQ
);
1901 be_queue_free(adapter
, q
);
1905 static int be_num_txqs_want(struct be_adapter
*adapter
)
1907 if ((!lancer_chip(adapter
) && sriov_want(adapter
)) ||
1908 be_is_mc(adapter
) ||
1909 (!lancer_chip(adapter
) && !be_physfn(adapter
)) ||
1913 return adapter
->max_tx_queues
;
1916 static int be_tx_cqs_create(struct be_adapter
*adapter
)
1918 struct be_queue_info
*cq
, *eq
;
1920 struct be_tx_obj
*txo
;
1923 adapter
->num_tx_qs
= be_num_txqs_want(adapter
);
1924 if (adapter
->num_tx_qs
!= MAX_TX_QS
) {
1926 netif_set_real_num_tx_queues(adapter
->netdev
,
1927 adapter
->num_tx_qs
);
1931 for_all_tx_queues(adapter
, txo
, i
) {
1933 status
= be_queue_alloc(adapter
, cq
, TX_CQ_LEN
,
1934 sizeof(struct be_eth_tx_compl
));
1938 /* If num_evt_qs is less than num_tx_qs, then more than
1939 * one txq share an eq
1941 eq
= &adapter
->eq_obj
[i
% adapter
->num_evt_qs
].q
;
1942 status
= be_cmd_cq_create(adapter
, cq
, eq
, false, 3);
1949 static int be_tx_qs_create(struct be_adapter
*adapter
)
1951 struct be_tx_obj
*txo
;
1954 for_all_tx_queues(adapter
, txo
, i
) {
1955 status
= be_queue_alloc(adapter
, &txo
->q
, TX_Q_LEN
,
1956 sizeof(struct be_eth_wrb
));
1960 status
= be_cmd_txq_create(adapter
, &txo
->q
, &txo
->cq
);
1965 dev_info(&adapter
->pdev
->dev
, "created %d TX queue(s)\n",
1966 adapter
->num_tx_qs
);
1970 static void be_rx_cqs_destroy(struct be_adapter
*adapter
)
1972 struct be_queue_info
*q
;
1973 struct be_rx_obj
*rxo
;
1976 for_all_rx_queues(adapter
, rxo
, i
) {
1979 be_cmd_q_destroy(adapter
, q
, QTYPE_CQ
);
1980 be_queue_free(adapter
, q
);
1984 static int be_rx_cqs_create(struct be_adapter
*adapter
)
1986 struct be_queue_info
*eq
, *cq
;
1987 struct be_rx_obj
*rxo
;
1990 /* We'll create as many RSS rings as there are irqs.
1991 * But when there's only one irq there's no use creating RSS rings
1993 adapter
->num_rx_qs
= (num_irqs(adapter
) > 1) ?
1994 num_irqs(adapter
) + 1 : 1;
1995 if (adapter
->num_rx_qs
!= MAX_RX_QS
) {
1997 netif_set_real_num_rx_queues(adapter
->netdev
,
1998 adapter
->num_rx_qs
);
2002 adapter
->big_page_size
= (1 << get_order(rx_frag_size
)) * PAGE_SIZE
;
2003 for_all_rx_queues(adapter
, rxo
, i
) {
2004 rxo
->adapter
= adapter
;
2006 rc
= be_queue_alloc(adapter
, cq
, RX_CQ_LEN
,
2007 sizeof(struct be_eth_rx_compl
));
2011 eq
= &adapter
->eq_obj
[i
% adapter
->num_evt_qs
].q
;
2012 rc
= be_cmd_cq_create(adapter
, cq
, eq
, false, 3);
2017 dev_info(&adapter
->pdev
->dev
,
2018 "created %d RSS queue(s) and 1 default RX queue\n",
2019 adapter
->num_rx_qs
- 1);
2023 static irqreturn_t
be_intx(int irq
, void *dev
)
2025 struct be_eq_obj
*eqo
= dev
;
2026 struct be_adapter
*adapter
= eqo
->adapter
;
2029 /* IRQ is not expected when NAPI is scheduled as the EQ
2030 * will not be armed.
2031 * But, this can happen on Lancer INTx where it takes
2032 * a while to de-assert INTx or in BE2 where occasionaly
2033 * an interrupt may be raised even when EQ is unarmed.
2034 * If NAPI is already scheduled, then counting & notifying
2035 * events will orphan them.
2037 if (napi_schedule_prep(&eqo
->napi
)) {
2038 num_evts
= events_get(eqo
);
2039 __napi_schedule(&eqo
->napi
);
2041 eqo
->spurious_intr
= 0;
2043 be_eq_notify(adapter
, eqo
->q
.id
, false, true, num_evts
);
2045 /* Return IRQ_HANDLED only for the the first spurious intr
2046 * after a valid intr to stop the kernel from branding
2047 * this irq as a bad one!
2049 if (num_evts
|| eqo
->spurious_intr
++ == 0)
2055 static irqreturn_t
be_msix(int irq
, void *dev
)
2057 struct be_eq_obj
*eqo
= dev
;
2059 be_eq_notify(eqo
->adapter
, eqo
->q
.id
, false, true, 0);
2060 napi_schedule(&eqo
->napi
);
2064 static inline bool do_gro(struct be_rx_compl_info
*rxcp
)
2066 return (rxcp
->tcpf
&& !rxcp
->err
) ? true : false;
2069 static int be_process_rx(struct be_rx_obj
*rxo
, struct napi_struct
*napi
,
2072 struct be_adapter
*adapter
= rxo
->adapter
;
2073 struct be_queue_info
*rx_cq
= &rxo
->cq
;
2074 struct be_rx_compl_info
*rxcp
;
2077 for (work_done
= 0; work_done
< budget
; work_done
++) {
2078 rxcp
= be_rx_compl_get(rxo
);
2082 /* Is it a flush compl that has no data */
2083 if (unlikely(rxcp
->num_rcvd
== 0))
2086 /* Discard compl with partial DMA Lancer B0 */
2087 if (unlikely(!rxcp
->pkt_size
)) {
2088 be_rx_compl_discard(rxo
, rxcp
);
2092 /* On BE drop pkts that arrive due to imperfect filtering in
2093 * promiscuous mode on some skews
2095 if (unlikely(rxcp
->port
!= adapter
->port_num
&&
2096 !lancer_chip(adapter
))) {
2097 be_rx_compl_discard(rxo
, rxcp
);
2102 be_rx_compl_process_gro(rxo
, napi
, rxcp
);
2104 be_rx_compl_process(rxo
, rxcp
);
2106 be_rx_stats_update(rxo
, rxcp
);
2110 be_cq_notify(adapter
, rx_cq
->id
, true, work_done
);
2112 if (atomic_read(&rxo
->q
.used
) < RX_FRAGS_REFILL_WM
)
2113 be_post_rx_frags(rxo
, GFP_ATOMIC
);
2119 static bool be_process_tx(struct be_adapter
*adapter
, struct be_tx_obj
*txo
,
2120 int budget
, int idx
)
2122 struct be_eth_tx_compl
*txcp
;
2123 int num_wrbs
= 0, work_done
;
2125 for (work_done
= 0; work_done
< budget
; work_done
++) {
2126 txcp
= be_tx_compl_get(&txo
->cq
);
2129 num_wrbs
+= be_tx_compl_process(adapter
, txo
,
2130 AMAP_GET_BITS(struct amap_eth_tx_compl
,
2135 be_cq_notify(adapter
, txo
->cq
.id
, true, work_done
);
2136 atomic_sub(num_wrbs
, &txo
->q
.used
);
2138 /* As Tx wrbs have been freed up, wake up netdev queue
2139 * if it was stopped due to lack of tx wrbs. */
2140 if (__netif_subqueue_stopped(adapter
->netdev
, idx
) &&
2141 atomic_read(&txo
->q
.used
) < txo
->q
.len
/ 2) {
2142 netif_wake_subqueue(adapter
->netdev
, idx
);
2145 u64_stats_update_begin(&tx_stats(txo
)->sync_compl
);
2146 tx_stats(txo
)->tx_compl
+= work_done
;
2147 u64_stats_update_end(&tx_stats(txo
)->sync_compl
);
2149 return (work_done
< budget
); /* Done */
2152 int be_poll(struct napi_struct
*napi
, int budget
)
2154 struct be_eq_obj
*eqo
= container_of(napi
, struct be_eq_obj
, napi
);
2155 struct be_adapter
*adapter
= eqo
->adapter
;
2156 int max_work
= 0, work
, i
, num_evts
;
2159 num_evts
= events_get(eqo
);
2161 /* Process all TXQs serviced by this EQ */
2162 for (i
= eqo
->idx
; i
< adapter
->num_tx_qs
; i
+= adapter
->num_evt_qs
) {
2163 tx_done
= be_process_tx(adapter
, &adapter
->tx_obj
[i
],
2169 /* This loop will iterate twice for EQ0 in which
2170 * completions of the last RXQ (default one) are also processed
2171 * For other EQs the loop iterates only once
2173 for (i
= eqo
->idx
; i
< adapter
->num_rx_qs
; i
+= adapter
->num_evt_qs
) {
2174 work
= be_process_rx(&adapter
->rx_obj
[i
], napi
, budget
);
2175 max_work
= max(work
, max_work
);
2178 if (is_mcc_eqo(eqo
))
2179 be_process_mcc(adapter
);
2181 if (max_work
< budget
) {
2182 napi_complete(napi
);
2183 be_eq_notify(adapter
, eqo
->q
.id
, true, false, num_evts
);
2185 /* As we'll continue in polling mode, count and clear events */
2186 be_eq_notify(adapter
, eqo
->q
.id
, false, false, num_evts
);
2191 void be_detect_error(struct be_adapter
*adapter
)
2193 u32 ue_lo
= 0, ue_hi
= 0, ue_lo_mask
= 0, ue_hi_mask
= 0;
2194 u32 sliport_status
= 0, sliport_err1
= 0, sliport_err2
= 0;
2197 if (be_hw_error(adapter
))
2200 if (lancer_chip(adapter
)) {
2201 sliport_status
= ioread32(adapter
->db
+ SLIPORT_STATUS_OFFSET
);
2202 if (sliport_status
& SLIPORT_STATUS_ERR_MASK
) {
2203 sliport_err1
= ioread32(adapter
->db
+
2204 SLIPORT_ERROR1_OFFSET
);
2205 sliport_err2
= ioread32(adapter
->db
+
2206 SLIPORT_ERROR2_OFFSET
);
2209 pci_read_config_dword(adapter
->pdev
,
2210 PCICFG_UE_STATUS_LOW
, &ue_lo
);
2211 pci_read_config_dword(adapter
->pdev
,
2212 PCICFG_UE_STATUS_HIGH
, &ue_hi
);
2213 pci_read_config_dword(adapter
->pdev
,
2214 PCICFG_UE_STATUS_LOW_MASK
, &ue_lo_mask
);
2215 pci_read_config_dword(adapter
->pdev
,
2216 PCICFG_UE_STATUS_HI_MASK
, &ue_hi_mask
);
2218 ue_lo
= (ue_lo
& ~ue_lo_mask
);
2219 ue_hi
= (ue_hi
& ~ue_hi_mask
);
2222 /* On certain platforms BE hardware can indicate spurious UEs.
2223 * Allow the h/w to stop working completely in case of a real UE.
2224 * Hence not setting the hw_error for UE detection.
2226 if (sliport_status
& SLIPORT_STATUS_ERR_MASK
) {
2227 adapter
->hw_error
= true;
2228 dev_err(&adapter
->pdev
->dev
,
2229 "Error detected in the card\n");
2232 if (sliport_status
& SLIPORT_STATUS_ERR_MASK
) {
2233 dev_err(&adapter
->pdev
->dev
,
2234 "ERR: sliport status 0x%x\n", sliport_status
);
2235 dev_err(&adapter
->pdev
->dev
,
2236 "ERR: sliport error1 0x%x\n", sliport_err1
);
2237 dev_err(&adapter
->pdev
->dev
,
2238 "ERR: sliport error2 0x%x\n", sliport_err2
);
2242 for (i
= 0; ue_lo
; ue_lo
>>= 1, i
++) {
2244 dev_err(&adapter
->pdev
->dev
,
2245 "UE: %s bit set\n", ue_status_low_desc
[i
]);
2250 for (i
= 0; ue_hi
; ue_hi
>>= 1, i
++) {
2252 dev_err(&adapter
->pdev
->dev
,
2253 "UE: %s bit set\n", ue_status_hi_desc
[i
]);
2259 static void be_msix_disable(struct be_adapter
*adapter
)
2261 if (msix_enabled(adapter
)) {
2262 pci_disable_msix(adapter
->pdev
);
2263 adapter
->num_msix_vec
= 0;
2267 static uint
be_num_rss_want(struct be_adapter
*adapter
)
2271 if ((adapter
->function_caps
& BE_FUNCTION_CAPS_RSS
) &&
2272 (lancer_chip(adapter
) ||
2273 (!sriov_want(adapter
) && be_physfn(adapter
)))) {
2274 num
= adapter
->max_rss_queues
;
2275 num
= min_t(u32
, num
, (u32
)netif_get_num_default_rss_queues());
2280 static void be_msix_enable(struct be_adapter
*adapter
)
2282 #define BE_MIN_MSIX_VECTORS 1
2283 int i
, status
, num_vec
, num_roce_vec
= 0;
2284 struct device
*dev
= &adapter
->pdev
->dev
;
2286 /* If RSS queues are not used, need a vec for default RX Q */
2287 num_vec
= min(be_num_rss_want(adapter
), num_online_cpus());
2288 if (be_roce_supported(adapter
)) {
2289 num_roce_vec
= min_t(u32
, MAX_ROCE_MSIX_VECTORS
,
2290 (num_online_cpus() + 1));
2291 num_roce_vec
= min(num_roce_vec
, MAX_ROCE_EQS
);
2292 num_vec
+= num_roce_vec
;
2293 num_vec
= min(num_vec
, MAX_MSIX_VECTORS
);
2295 num_vec
= max(num_vec
, BE_MIN_MSIX_VECTORS
);
2297 for (i
= 0; i
< num_vec
; i
++)
2298 adapter
->msix_entries
[i
].entry
= i
;
2300 status
= pci_enable_msix(adapter
->pdev
, adapter
->msix_entries
, num_vec
);
2303 } else if (status
>= BE_MIN_MSIX_VECTORS
) {
2305 if (pci_enable_msix(adapter
->pdev
, adapter
->msix_entries
,
2310 dev_warn(dev
, "MSIx enable failed\n");
2313 if (be_roce_supported(adapter
)) {
2314 if (num_vec
> num_roce_vec
) {
2315 adapter
->num_msix_vec
= num_vec
- num_roce_vec
;
2316 adapter
->num_msix_roce_vec
=
2317 num_vec
- adapter
->num_msix_vec
;
2319 adapter
->num_msix_vec
= num_vec
;
2320 adapter
->num_msix_roce_vec
= 0;
2323 adapter
->num_msix_vec
= num_vec
;
2324 dev_info(dev
, "enabled %d MSI-x vector(s)\n", adapter
->num_msix_vec
);
2328 static inline int be_msix_vec_get(struct be_adapter
*adapter
,
2329 struct be_eq_obj
*eqo
)
2331 return adapter
->msix_entries
[eqo
->idx
].vector
;
2334 static int be_msix_register(struct be_adapter
*adapter
)
2336 struct net_device
*netdev
= adapter
->netdev
;
2337 struct be_eq_obj
*eqo
;
2340 for_all_evt_queues(adapter
, eqo
, i
) {
2341 sprintf(eqo
->desc
, "%s-q%d", netdev
->name
, i
);
2342 vec
= be_msix_vec_get(adapter
, eqo
);
2343 status
= request_irq(vec
, be_msix
, 0, eqo
->desc
, eqo
);
2350 for (i
--, eqo
= &adapter
->eq_obj
[i
]; i
>= 0; i
--, eqo
--)
2351 free_irq(be_msix_vec_get(adapter
, eqo
), eqo
);
2352 dev_warn(&adapter
->pdev
->dev
, "MSIX Request IRQ failed - err %d\n",
2354 be_msix_disable(adapter
);
2358 static int be_irq_register(struct be_adapter
*adapter
)
2360 struct net_device
*netdev
= adapter
->netdev
;
2363 if (msix_enabled(adapter
)) {
2364 status
= be_msix_register(adapter
);
2367 /* INTx is not supported for VF */
2368 if (!be_physfn(adapter
))
2372 /* INTx: only the first EQ is used */
2373 netdev
->irq
= adapter
->pdev
->irq
;
2374 status
= request_irq(netdev
->irq
, be_intx
, IRQF_SHARED
, netdev
->name
,
2375 &adapter
->eq_obj
[0]);
2377 dev_err(&adapter
->pdev
->dev
,
2378 "INTx request IRQ failed - err %d\n", status
);
2382 adapter
->isr_registered
= true;
2386 static void be_irq_unregister(struct be_adapter
*adapter
)
2388 struct net_device
*netdev
= adapter
->netdev
;
2389 struct be_eq_obj
*eqo
;
2392 if (!adapter
->isr_registered
)
2396 if (!msix_enabled(adapter
)) {
2397 free_irq(netdev
->irq
, &adapter
->eq_obj
[0]);
2402 for_all_evt_queues(adapter
, eqo
, i
)
2403 free_irq(be_msix_vec_get(adapter
, eqo
), eqo
);
2406 adapter
->isr_registered
= false;
2409 static void be_rx_qs_destroy(struct be_adapter
*adapter
)
2411 struct be_queue_info
*q
;
2412 struct be_rx_obj
*rxo
;
2415 for_all_rx_queues(adapter
, rxo
, i
) {
2418 be_cmd_rxq_destroy(adapter
, q
);
2419 /* After the rxq is invalidated, wait for a grace time
2420 * of 1ms for all dma to end and the flush compl to
2424 be_rx_cq_clean(rxo
);
2426 be_queue_free(adapter
, q
);
2430 static int be_close(struct net_device
*netdev
)
2432 struct be_adapter
*adapter
= netdev_priv(netdev
);
2433 struct be_eq_obj
*eqo
;
2436 be_roce_dev_close(adapter
);
2438 if (!lancer_chip(adapter
))
2439 be_intr_set(adapter
, false);
2441 for_all_evt_queues(adapter
, eqo
, i
)
2442 napi_disable(&eqo
->napi
);
2444 be_async_mcc_disable(adapter
);
2446 /* Wait for all pending tx completions to arrive so that
2447 * all tx skbs are freed.
2449 be_tx_compl_clean(adapter
);
2451 be_rx_qs_destroy(adapter
);
2453 for_all_evt_queues(adapter
, eqo
, i
) {
2454 if (msix_enabled(adapter
))
2455 synchronize_irq(be_msix_vec_get(adapter
, eqo
));
2457 synchronize_irq(netdev
->irq
);
2461 be_irq_unregister(adapter
);
2466 static int be_rx_qs_create(struct be_adapter
*adapter
)
2468 struct be_rx_obj
*rxo
;
2472 for_all_rx_queues(adapter
, rxo
, i
) {
2473 rc
= be_queue_alloc(adapter
, &rxo
->q
, RX_Q_LEN
,
2474 sizeof(struct be_eth_rx_d
));
2479 /* The FW would like the default RXQ to be created first */
2480 rxo
= default_rxo(adapter
);
2481 rc
= be_cmd_rxq_create(adapter
, &rxo
->q
, rxo
->cq
.id
, rx_frag_size
,
2482 adapter
->if_handle
, false, &rxo
->rss_id
);
2486 for_all_rss_queues(adapter
, rxo
, i
) {
2487 rc
= be_cmd_rxq_create(adapter
, &rxo
->q
, rxo
->cq
.id
,
2488 rx_frag_size
, adapter
->if_handle
,
2489 true, &rxo
->rss_id
);
2494 if (be_multi_rxq(adapter
)) {
2495 for (j
= 0; j
< 128; j
+= adapter
->num_rx_qs
- 1) {
2496 for_all_rss_queues(adapter
, rxo
, i
) {
2499 rsstable
[j
+ i
] = rxo
->rss_id
;
2502 rc
= be_cmd_rss_config(adapter
, rsstable
, 128);
2507 /* First time posting */
2508 for_all_rx_queues(adapter
, rxo
, i
)
2509 be_post_rx_frags(rxo
, GFP_KERNEL
);
2513 static int be_open(struct net_device
*netdev
)
2515 struct be_adapter
*adapter
= netdev_priv(netdev
);
2516 struct be_eq_obj
*eqo
;
2517 struct be_rx_obj
*rxo
;
2518 struct be_tx_obj
*txo
;
2522 status
= be_rx_qs_create(adapter
);
2526 be_irq_register(adapter
);
2528 if (!lancer_chip(adapter
))
2529 be_intr_set(adapter
, true);
2531 for_all_rx_queues(adapter
, rxo
, i
)
2532 be_cq_notify(adapter
, rxo
->cq
.id
, true, 0);
2534 for_all_tx_queues(adapter
, txo
, i
)
2535 be_cq_notify(adapter
, txo
->cq
.id
, true, 0);
2537 be_async_mcc_enable(adapter
);
2539 for_all_evt_queues(adapter
, eqo
, i
) {
2540 napi_enable(&eqo
->napi
);
2541 be_eq_notify(adapter
, eqo
->q
.id
, true, false, 0);
2544 status
= be_cmd_link_status_query(adapter
, NULL
, &link_status
, 0);
2546 be_link_status_update(adapter
, link_status
);
2548 be_roce_dev_open(adapter
);
2551 be_close(adapter
->netdev
);
2555 static int be_setup_wol(struct be_adapter
*adapter
, bool enable
)
2557 struct be_dma_mem cmd
;
2561 memset(mac
, 0, ETH_ALEN
);
2563 cmd
.size
= sizeof(struct be_cmd_req_acpi_wol_magic_config
);
2564 cmd
.va
= dma_alloc_coherent(&adapter
->pdev
->dev
, cmd
.size
, &cmd
.dma
,
2568 memset(cmd
.va
, 0, cmd
.size
);
2571 status
= pci_write_config_dword(adapter
->pdev
,
2572 PCICFG_PM_CONTROL_OFFSET
, PCICFG_PM_CONTROL_MASK
);
2574 dev_err(&adapter
->pdev
->dev
,
2575 "Could not enable Wake-on-lan\n");
2576 dma_free_coherent(&adapter
->pdev
->dev
, cmd
.size
, cmd
.va
,
2580 status
= be_cmd_enable_magic_wol(adapter
,
2581 adapter
->netdev
->dev_addr
, &cmd
);
2582 pci_enable_wake(adapter
->pdev
, PCI_D3hot
, 1);
2583 pci_enable_wake(adapter
->pdev
, PCI_D3cold
, 1);
2585 status
= be_cmd_enable_magic_wol(adapter
, mac
, &cmd
);
2586 pci_enable_wake(adapter
->pdev
, PCI_D3hot
, 0);
2587 pci_enable_wake(adapter
->pdev
, PCI_D3cold
, 0);
2590 dma_free_coherent(&adapter
->pdev
->dev
, cmd
.size
, cmd
.va
, cmd
.dma
);
2595 * Generate a seed MAC address from the PF MAC Address using jhash.
2596 * MAC Address for VFs are assigned incrementally starting from the seed.
2597 * These addresses are programmed in the ASIC by the PF and the VF driver
2598 * queries for the MAC address during its probe.
2600 static int be_vf_eth_addr_config(struct be_adapter
*adapter
)
2605 struct be_vf_cfg
*vf_cfg
;
2607 be_vf_eth_addr_generate(adapter
, mac
);
2609 for_all_vfs(adapter
, vf_cfg
, vf
) {
2610 if (lancer_chip(adapter
)) {
2611 status
= be_cmd_set_mac_list(adapter
, mac
, 1, vf
+ 1);
2613 status
= be_cmd_pmac_add(adapter
, mac
,
2615 &vf_cfg
->pmac_id
, vf
+ 1);
2619 dev_err(&adapter
->pdev
->dev
,
2620 "Mac address assignment failed for VF %d\n", vf
);
2622 memcpy(vf_cfg
->mac_addr
, mac
, ETH_ALEN
);
2629 static int be_vfs_mac_query(struct be_adapter
*adapter
)
2633 struct be_vf_cfg
*vf_cfg
;
2636 for_all_vfs(adapter
, vf_cfg
, vf
) {
2637 be_cmd_get_mac_from_list(adapter
, mac
, &active
,
2638 &vf_cfg
->pmac_id
, 0);
2640 status
= be_cmd_mac_addr_query(adapter
, mac
, false,
2641 vf_cfg
->if_handle
, 0);
2644 memcpy(vf_cfg
->mac_addr
, mac
, ETH_ALEN
);
2649 static void be_vf_clear(struct be_adapter
*adapter
)
2651 struct be_vf_cfg
*vf_cfg
;
2654 if (be_find_vfs(adapter
, ASSIGNED
)) {
2655 dev_warn(&adapter
->pdev
->dev
,
2656 "VFs are assigned to VMs: not disabling VFs\n");
2660 for_all_vfs(adapter
, vf_cfg
, vf
) {
2661 if (lancer_chip(adapter
))
2662 be_cmd_set_mac_list(adapter
, NULL
, 0, vf
+ 1);
2664 be_cmd_pmac_del(adapter
, vf_cfg
->if_handle
,
2665 vf_cfg
->pmac_id
, vf
+ 1);
2667 be_cmd_if_destroy(adapter
, vf_cfg
->if_handle
, vf
+ 1);
2669 pci_disable_sriov(adapter
->pdev
);
2671 kfree(adapter
->vf_cfg
);
2672 adapter
->num_vfs
= 0;
2675 static int be_clear(struct be_adapter
*adapter
)
2679 if (adapter
->flags
& BE_FLAGS_WORKER_SCHEDULED
) {
2680 cancel_delayed_work_sync(&adapter
->work
);
2681 adapter
->flags
&= ~BE_FLAGS_WORKER_SCHEDULED
;
2684 if (sriov_enabled(adapter
))
2685 be_vf_clear(adapter
);
2687 for (; adapter
->uc_macs
> 0; adapter
->uc_macs
--, i
++)
2688 be_cmd_pmac_del(adapter
, adapter
->if_handle
,
2689 adapter
->pmac_id
[i
], 0);
2691 be_cmd_if_destroy(adapter
, adapter
->if_handle
, 0);
2693 be_mcc_queues_destroy(adapter
);
2694 be_rx_cqs_destroy(adapter
);
2695 be_tx_queues_destroy(adapter
);
2696 be_evt_queues_destroy(adapter
);
2698 kfree(adapter
->pmac_id
);
2699 adapter
->pmac_id
= NULL
;
2701 be_msix_disable(adapter
);
2705 static int be_vfs_if_create(struct be_adapter
*adapter
)
2707 struct be_vf_cfg
*vf_cfg
;
2708 u32 cap_flags
, en_flags
, vf
;
2711 cap_flags
= BE_IF_FLAGS_UNTAGGED
| BE_IF_FLAGS_BROADCAST
|
2712 BE_IF_FLAGS_MULTICAST
;
2714 for_all_vfs(adapter
, vf_cfg
, vf
) {
2715 if (!BE3_chip(adapter
))
2716 be_cmd_get_profile_config(adapter
, &cap_flags
, vf
+ 1);
2718 /* If a FW profile exists, then cap_flags are updated */
2719 en_flags
= cap_flags
& (BE_IF_FLAGS_UNTAGGED
|
2720 BE_IF_FLAGS_BROADCAST
| BE_IF_FLAGS_MULTICAST
);
2721 status
= be_cmd_if_create(adapter
, cap_flags
, en_flags
,
2722 &vf_cfg
->if_handle
, vf
+ 1);
2730 static int be_vf_setup_init(struct be_adapter
*adapter
)
2732 struct be_vf_cfg
*vf_cfg
;
2735 adapter
->vf_cfg
= kcalloc(adapter
->num_vfs
, sizeof(*vf_cfg
),
2737 if (!adapter
->vf_cfg
)
2740 for_all_vfs(adapter
, vf_cfg
, vf
) {
2741 vf_cfg
->if_handle
= -1;
2742 vf_cfg
->pmac_id
= -1;
2747 static int be_vf_setup(struct be_adapter
*adapter
)
2749 struct be_vf_cfg
*vf_cfg
;
2750 u16 def_vlan
, lnk_speed
;
2751 int status
, old_vfs
, vf
;
2752 struct device
*dev
= &adapter
->pdev
->dev
;
2754 old_vfs
= be_find_vfs(adapter
, ENABLED
);
2756 dev_info(dev
, "%d VFs are already enabled\n", old_vfs
);
2757 if (old_vfs
!= num_vfs
)
2758 dev_warn(dev
, "Ignoring num_vfs=%d setting\n", num_vfs
);
2759 adapter
->num_vfs
= old_vfs
;
2761 if (num_vfs
> adapter
->dev_num_vfs
)
2762 dev_info(dev
, "Device supports %d VFs and not %d\n",
2763 adapter
->dev_num_vfs
, num_vfs
);
2764 adapter
->num_vfs
= min_t(u16
, num_vfs
, adapter
->dev_num_vfs
);
2766 status
= pci_enable_sriov(adapter
->pdev
, num_vfs
);
2768 dev_err(dev
, "SRIOV enable failed\n");
2769 adapter
->num_vfs
= 0;
2774 status
= be_vf_setup_init(adapter
);
2779 for_all_vfs(adapter
, vf_cfg
, vf
) {
2780 status
= be_cmd_get_if_id(adapter
, vf_cfg
, vf
);
2785 status
= be_vfs_if_create(adapter
);
2791 status
= be_vfs_mac_query(adapter
);
2795 status
= be_vf_eth_addr_config(adapter
);
2800 for_all_vfs(adapter
, vf_cfg
, vf
) {
2801 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
2802 * Allow full available bandwidth
2804 if (BE3_chip(adapter
) && !old_vfs
)
2805 be_cmd_set_qos(adapter
, 1000, vf
+1);
2807 status
= be_cmd_link_status_query(adapter
, &lnk_speed
,
2810 vf_cfg
->tx_rate
= lnk_speed
;
2812 status
= be_cmd_get_hsw_config(adapter
, &def_vlan
,
2813 vf
+ 1, vf_cfg
->if_handle
);
2816 vf_cfg
->def_vid
= def_vlan
;
2818 be_cmd_enable_vf(adapter
, vf
+ 1);
2822 dev_err(dev
, "VF setup failed\n");
2823 be_vf_clear(adapter
);
2827 static void be_setup_init(struct be_adapter
*adapter
)
2829 adapter
->vlan_prio_bmap
= 0xff;
2830 adapter
->phy
.link_speed
= -1;
2831 adapter
->if_handle
= -1;
2832 adapter
->be3_native
= false;
2833 adapter
->promiscuous
= false;
2834 if (be_physfn(adapter
))
2835 adapter
->cmd_privileges
= MAX_PRIVILEGES
;
2837 adapter
->cmd_privileges
= MIN_PRIVILEGES
;
2840 static int be_get_mac_addr(struct be_adapter
*adapter
, u8
*mac
, u32 if_handle
,
2841 bool *active_mac
, u32
*pmac_id
)
2845 if (!is_zero_ether_addr(adapter
->netdev
->perm_addr
)) {
2846 memcpy(mac
, adapter
->netdev
->dev_addr
, ETH_ALEN
);
2847 if (!lancer_chip(adapter
) && !be_physfn(adapter
))
2850 *active_mac
= false;
2855 if (lancer_chip(adapter
)) {
2856 status
= be_cmd_get_mac_from_list(adapter
, mac
,
2857 active_mac
, pmac_id
, 0);
2859 status
= be_cmd_mac_addr_query(adapter
, mac
, false,
2860 if_handle
, *pmac_id
);
2862 } else if (be_physfn(adapter
)) {
2863 /* For BE3, for PF get permanent MAC */
2864 status
= be_cmd_mac_addr_query(adapter
, mac
, true, 0, 0);
2865 *active_mac
= false;
2867 /* For BE3, for VF get soft MAC assigned by PF*/
2868 status
= be_cmd_mac_addr_query(adapter
, mac
, false,
2875 static void be_get_resources(struct be_adapter
*adapter
)
2879 bool profile_present
= false;
2881 if (!BEx_chip(adapter
)) {
2882 status
= be_cmd_get_func_config(adapter
);
2884 profile_present
= true;
2887 if (profile_present
) {
2888 /* Sanity fixes for Lancer */
2889 adapter
->max_pmac_cnt
= min_t(u16
, adapter
->max_pmac_cnt
,
2891 adapter
->max_vlans
= min_t(u16
, adapter
->max_vlans
,
2892 BE_NUM_VLANS_SUPPORTED
);
2893 adapter
->max_mcast_mac
= min_t(u16
, adapter
->max_mcast_mac
,
2895 adapter
->max_tx_queues
= min_t(u16
, adapter
->max_tx_queues
,
2897 adapter
->max_rss_queues
= min_t(u16
, adapter
->max_rss_queues
,
2899 adapter
->max_event_queues
= min_t(u16
,
2900 adapter
->max_event_queues
,
2903 if (adapter
->max_rss_queues
&&
2904 adapter
->max_rss_queues
== adapter
->max_rx_queues
)
2905 adapter
->max_rss_queues
-= 1;
2907 if (adapter
->max_event_queues
< adapter
->max_rss_queues
)
2908 adapter
->max_rss_queues
= adapter
->max_event_queues
;
2911 if (be_physfn(adapter
))
2912 adapter
->max_pmac_cnt
= BE_UC_PMAC_COUNT
;
2914 adapter
->max_pmac_cnt
= BE_VF_UC_PMAC_COUNT
;
2916 if (adapter
->function_mode
& FLEX10_MODE
)
2917 adapter
->max_vlans
= BE_NUM_VLANS_SUPPORTED
/8;
2919 adapter
->max_vlans
= BE_NUM_VLANS_SUPPORTED
;
2921 adapter
->max_mcast_mac
= BE_MAX_MC
;
2922 adapter
->max_tx_queues
= MAX_TX_QS
;
2923 adapter
->max_rss_queues
= (adapter
->be3_native
) ?
2924 BE3_MAX_RSS_QS
: BE2_MAX_RSS_QS
;
2925 adapter
->max_event_queues
= BE3_MAX_RSS_QS
;
2927 adapter
->if_cap_flags
= BE_IF_FLAGS_UNTAGGED
|
2928 BE_IF_FLAGS_BROADCAST
|
2929 BE_IF_FLAGS_MULTICAST
|
2930 BE_IF_FLAGS_PASS_L3L4_ERRORS
|
2931 BE_IF_FLAGS_MCAST_PROMISCUOUS
|
2932 BE_IF_FLAGS_VLAN_PROMISCUOUS
|
2933 BE_IF_FLAGS_PROMISCUOUS
;
2935 if (adapter
->function_caps
& BE_FUNCTION_CAPS_RSS
)
2936 adapter
->if_cap_flags
|= BE_IF_FLAGS_RSS
;
2939 pos
= pci_find_ext_capability(adapter
->pdev
, PCI_EXT_CAP_ID_SRIOV
);
2941 pci_read_config_word(adapter
->pdev
, pos
+ PCI_SRIOV_TOTAL_VF
,
2943 if (BE3_chip(adapter
))
2944 dev_num_vfs
= min_t(u16
, dev_num_vfs
, MAX_VFS
);
2945 adapter
->dev_num_vfs
= dev_num_vfs
;
2949 /* Routine to query per function resource limits */
2950 static int be_get_config(struct be_adapter
*adapter
)
2954 status
= be_cmd_query_fw_cfg(adapter
, &adapter
->port_num
,
2955 &adapter
->function_mode
,
2956 &adapter
->function_caps
);
2960 be_get_resources(adapter
);
2962 /* primary mac needs 1 pmac entry */
2963 adapter
->pmac_id
= kcalloc(adapter
->max_pmac_cnt
+ 1,
2964 sizeof(u32
), GFP_KERNEL
);
2965 if (!adapter
->pmac_id
) {
2974 static int be_setup(struct be_adapter
*adapter
)
2976 struct device
*dev
= &adapter
->pdev
->dev
;
2983 be_setup_init(adapter
);
2985 if (!lancer_chip(adapter
))
2986 be_cmd_req_native_mode(adapter
);
2988 status
= be_get_config(adapter
);
2992 be_msix_enable(adapter
);
2994 status
= be_evt_queues_create(adapter
);
2998 status
= be_tx_cqs_create(adapter
);
3002 status
= be_rx_cqs_create(adapter
);
3006 status
= be_mcc_queues_create(adapter
);
3010 be_cmd_get_fn_privileges(adapter
, &adapter
->cmd_privileges
, 0);
3011 /* In UMC mode FW does not return right privileges.
3012 * Override with correct privilege equivalent to PF.
3014 if (be_is_mc(adapter
))
3015 adapter
->cmd_privileges
= MAX_PRIVILEGES
;
3017 en_flags
= BE_IF_FLAGS_UNTAGGED
| BE_IF_FLAGS_BROADCAST
|
3018 BE_IF_FLAGS_MULTICAST
| BE_IF_FLAGS_PASS_L3L4_ERRORS
;
3020 if (adapter
->function_caps
& BE_FUNCTION_CAPS_RSS
)
3021 en_flags
|= BE_IF_FLAGS_RSS
;
3023 en_flags
= en_flags
& adapter
->if_cap_flags
;
3025 status
= be_cmd_if_create(adapter
, adapter
->if_cap_flags
, en_flags
,
3026 &adapter
->if_handle
, 0);
3030 memset(mac
, 0, ETH_ALEN
);
3032 status
= be_get_mac_addr(adapter
, mac
, adapter
->if_handle
,
3033 &active_mac
, &adapter
->pmac_id
[0]);
3038 status
= be_cmd_pmac_add(adapter
, mac
, adapter
->if_handle
,
3039 &adapter
->pmac_id
[0], 0);
3044 if (is_zero_ether_addr(adapter
->netdev
->dev_addr
)) {
3045 memcpy(adapter
->netdev
->dev_addr
, mac
, ETH_ALEN
);
3046 memcpy(adapter
->netdev
->perm_addr
, mac
, ETH_ALEN
);
3049 status
= be_tx_qs_create(adapter
);
3053 be_cmd_get_fw_ver(adapter
, adapter
->fw_ver
, NULL
);
3055 if (adapter
->vlans_added
)
3056 be_vid_config(adapter
);
3058 be_set_rx_mode(adapter
->netdev
);
3060 be_cmd_get_flow_control(adapter
, &tx_fc
, &rx_fc
);
3062 if (rx_fc
!= adapter
->rx_fc
|| tx_fc
!= adapter
->tx_fc
)
3063 be_cmd_set_flow_control(adapter
, adapter
->tx_fc
,
3066 if (be_physfn(adapter
) && num_vfs
) {
3067 if (adapter
->dev_num_vfs
)
3068 be_vf_setup(adapter
);
3070 dev_warn(dev
, "device doesn't support SRIOV\n");
3073 status
= be_cmd_get_phy_info(adapter
);
3074 if (!status
&& be_pause_supported(adapter
))
3075 adapter
->phy
.fc_autoneg
= 1;
3077 schedule_delayed_work(&adapter
->work
, msecs_to_jiffies(1000));
3078 adapter
->flags
|= BE_FLAGS_WORKER_SCHEDULED
;
3085 #ifdef CONFIG_NET_POLL_CONTROLLER
3086 static void be_netpoll(struct net_device
*netdev
)
3088 struct be_adapter
*adapter
= netdev_priv(netdev
);
3089 struct be_eq_obj
*eqo
;
3092 for_all_evt_queues(adapter
, eqo
, i
) {
3093 be_eq_notify(eqo
->adapter
, eqo
->q
.id
, false, true, 0);
3094 napi_schedule(&eqo
->napi
);
3101 #define FW_FILE_HDR_SIGN "ServerEngines Corp. "
3102 char flash_cookie
[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
3104 static bool be_flash_redboot(struct be_adapter
*adapter
,
3105 const u8
*p
, u32 img_start
, int image_size
,
3112 crc_offset
= hdr_size
+ img_start
+ image_size
- 4;
3116 status
= be_cmd_get_flash_crc(adapter
, flashed_crc
,
3119 dev_err(&adapter
->pdev
->dev
,
3120 "could not get crc from flash, not flashing redboot\n");
3124 /*update redboot only if crc does not match*/
3125 if (!memcmp(flashed_crc
, p
, 4))
3131 static bool phy_flashing_required(struct be_adapter
*adapter
)
3133 return (adapter
->phy
.phy_type
== TN_8022
&&
3134 adapter
->phy
.interface_type
== PHY_TYPE_BASET_10GB
);
3137 static bool is_comp_in_ufi(struct be_adapter
*adapter
,
3138 struct flash_section_info
*fsec
, int type
)
3140 int i
= 0, img_type
= 0;
3141 struct flash_section_info_g2
*fsec_g2
= NULL
;
3143 if (BE2_chip(adapter
))
3144 fsec_g2
= (struct flash_section_info_g2
*)fsec
;
3146 for (i
= 0; i
< MAX_FLASH_COMP
; i
++) {
3148 img_type
= le32_to_cpu(fsec_g2
->fsec_entry
[i
].type
);
3150 img_type
= le32_to_cpu(fsec
->fsec_entry
[i
].type
);
3152 if (img_type
== type
)
3159 struct flash_section_info
*get_fsec_info(struct be_adapter
*adapter
,
3161 const struct firmware
*fw
)
3163 struct flash_section_info
*fsec
= NULL
;
3164 const u8
*p
= fw
->data
;
3167 while (p
< (fw
->data
+ fw
->size
)) {
3168 fsec
= (struct flash_section_info
*)p
;
3169 if (!memcmp(flash_cookie
, fsec
->cookie
, sizeof(flash_cookie
)))
3176 static int be_flash(struct be_adapter
*adapter
, const u8
*img
,
3177 struct be_dma_mem
*flash_cmd
, int optype
, int img_size
)
3179 u32 total_bytes
= 0, flash_op
, num_bytes
= 0;
3181 struct be_cmd_write_flashrom
*req
= flash_cmd
->va
;
3183 total_bytes
= img_size
;
3184 while (total_bytes
) {
3185 num_bytes
= min_t(u32
, 32*1024, total_bytes
);
3187 total_bytes
-= num_bytes
;
3190 if (optype
== OPTYPE_PHY_FW
)
3191 flash_op
= FLASHROM_OPER_PHY_FLASH
;
3193 flash_op
= FLASHROM_OPER_FLASH
;
3195 if (optype
== OPTYPE_PHY_FW
)
3196 flash_op
= FLASHROM_OPER_PHY_SAVE
;
3198 flash_op
= FLASHROM_OPER_SAVE
;
3201 memcpy(req
->data_buf
, img
, num_bytes
);
3203 status
= be_cmd_write_flashrom(adapter
, flash_cmd
, optype
,
3204 flash_op
, num_bytes
);
3206 if (status
== ILLEGAL_IOCTL_REQ
&&
3207 optype
== OPTYPE_PHY_FW
)
3209 dev_err(&adapter
->pdev
->dev
,
3210 "cmd to write to flash rom failed.\n");
3217 /* For BE2 and BE3 */
3218 static int be_flash_BEx(struct be_adapter
*adapter
,
3219 const struct firmware
*fw
,
3220 struct be_dma_mem
*flash_cmd
,
3224 int status
= 0, i
, filehdr_size
= 0;
3225 int img_hdrs_size
= (num_of_images
* sizeof(struct image_hdr
));
3226 const u8
*p
= fw
->data
;
3227 const struct flash_comp
*pflashcomp
;
3228 int num_comp
, redboot
;
3229 struct flash_section_info
*fsec
= NULL
;
3231 struct flash_comp gen3_flash_types
[] = {
3232 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3
, OPTYPE_ISCSI_ACTIVE
,
3233 FLASH_IMAGE_MAX_SIZE_g3
, IMAGE_FIRMWARE_iSCSI
},
3234 { FLASH_REDBOOT_START_g3
, OPTYPE_REDBOOT
,
3235 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3
, IMAGE_BOOT_CODE
},
3236 { FLASH_iSCSI_BIOS_START_g3
, OPTYPE_BIOS
,
3237 FLASH_BIOS_IMAGE_MAX_SIZE_g3
, IMAGE_OPTION_ROM_ISCSI
},
3238 { FLASH_PXE_BIOS_START_g3
, OPTYPE_PXE_BIOS
,
3239 FLASH_BIOS_IMAGE_MAX_SIZE_g3
, IMAGE_OPTION_ROM_PXE
},
3240 { FLASH_FCoE_BIOS_START_g3
, OPTYPE_FCOE_BIOS
,
3241 FLASH_BIOS_IMAGE_MAX_SIZE_g3
, IMAGE_OPTION_ROM_FCoE
},
3242 { FLASH_iSCSI_BACKUP_IMAGE_START_g3
, OPTYPE_ISCSI_BACKUP
,
3243 FLASH_IMAGE_MAX_SIZE_g3
, IMAGE_FIRMWARE_BACKUP_iSCSI
},
3244 { FLASH_FCoE_PRIMARY_IMAGE_START_g3
, OPTYPE_FCOE_FW_ACTIVE
,
3245 FLASH_IMAGE_MAX_SIZE_g3
, IMAGE_FIRMWARE_FCoE
},
3246 { FLASH_FCoE_BACKUP_IMAGE_START_g3
, OPTYPE_FCOE_FW_BACKUP
,
3247 FLASH_IMAGE_MAX_SIZE_g3
, IMAGE_FIRMWARE_BACKUP_FCoE
},
3248 { FLASH_NCSI_START_g3
, OPTYPE_NCSI_FW
,
3249 FLASH_NCSI_IMAGE_MAX_SIZE_g3
, IMAGE_NCSI
},
3250 { FLASH_PHY_FW_START_g3
, OPTYPE_PHY_FW
,
3251 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3
, IMAGE_FIRMWARE_PHY
}
3254 struct flash_comp gen2_flash_types
[] = {
3255 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2
, OPTYPE_ISCSI_ACTIVE
,
3256 FLASH_IMAGE_MAX_SIZE_g2
, IMAGE_FIRMWARE_iSCSI
},
3257 { FLASH_REDBOOT_START_g2
, OPTYPE_REDBOOT
,
3258 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2
, IMAGE_BOOT_CODE
},
3259 { FLASH_iSCSI_BIOS_START_g2
, OPTYPE_BIOS
,
3260 FLASH_BIOS_IMAGE_MAX_SIZE_g2
, IMAGE_OPTION_ROM_ISCSI
},
3261 { FLASH_PXE_BIOS_START_g2
, OPTYPE_PXE_BIOS
,
3262 FLASH_BIOS_IMAGE_MAX_SIZE_g2
, IMAGE_OPTION_ROM_PXE
},
3263 { FLASH_FCoE_BIOS_START_g2
, OPTYPE_FCOE_BIOS
,
3264 FLASH_BIOS_IMAGE_MAX_SIZE_g2
, IMAGE_OPTION_ROM_FCoE
},
3265 { FLASH_iSCSI_BACKUP_IMAGE_START_g2
, OPTYPE_ISCSI_BACKUP
,
3266 FLASH_IMAGE_MAX_SIZE_g2
, IMAGE_FIRMWARE_BACKUP_iSCSI
},
3267 { FLASH_FCoE_PRIMARY_IMAGE_START_g2
, OPTYPE_FCOE_FW_ACTIVE
,
3268 FLASH_IMAGE_MAX_SIZE_g2
, IMAGE_FIRMWARE_FCoE
},
3269 { FLASH_FCoE_BACKUP_IMAGE_START_g2
, OPTYPE_FCOE_FW_BACKUP
,
3270 FLASH_IMAGE_MAX_SIZE_g2
, IMAGE_FIRMWARE_BACKUP_FCoE
}
3273 if (BE3_chip(adapter
)) {
3274 pflashcomp
= gen3_flash_types
;
3275 filehdr_size
= sizeof(struct flash_file_hdr_g3
);
3276 num_comp
= ARRAY_SIZE(gen3_flash_types
);
3278 pflashcomp
= gen2_flash_types
;
3279 filehdr_size
= sizeof(struct flash_file_hdr_g2
);
3280 num_comp
= ARRAY_SIZE(gen2_flash_types
);
3283 /* Get flash section info*/
3284 fsec
= get_fsec_info(adapter
, filehdr_size
+ img_hdrs_size
, fw
);
3286 dev_err(&adapter
->pdev
->dev
,
3287 "Invalid Cookie. UFI corrupted ?\n");
3290 for (i
= 0; i
< num_comp
; i
++) {
3291 if (!is_comp_in_ufi(adapter
, fsec
, pflashcomp
[i
].img_type
))
3294 if ((pflashcomp
[i
].optype
== OPTYPE_NCSI_FW
) &&
3295 memcmp(adapter
->fw_ver
, "3.102.148.0", 11) < 0)
3298 if (pflashcomp
[i
].optype
== OPTYPE_PHY_FW
&&
3299 !phy_flashing_required(adapter
))
3302 if (pflashcomp
[i
].optype
== OPTYPE_REDBOOT
) {
3303 redboot
= be_flash_redboot(adapter
, fw
->data
,
3304 pflashcomp
[i
].offset
, pflashcomp
[i
].size
,
3305 filehdr_size
+ img_hdrs_size
);
3311 p
+= filehdr_size
+ pflashcomp
[i
].offset
+ img_hdrs_size
;
3312 if (p
+ pflashcomp
[i
].size
> fw
->data
+ fw
->size
)
3315 status
= be_flash(adapter
, p
, flash_cmd
, pflashcomp
[i
].optype
,
3316 pflashcomp
[i
].size
);
3318 dev_err(&adapter
->pdev
->dev
,
3319 "Flashing section type %d failed.\n",
3320 pflashcomp
[i
].img_type
);
3327 static int be_flash_skyhawk(struct be_adapter
*adapter
,
3328 const struct firmware
*fw
,
3329 struct be_dma_mem
*flash_cmd
, int num_of_images
)
3331 int status
= 0, i
, filehdr_size
= 0;
3332 int img_offset
, img_size
, img_optype
, redboot
;
3333 int img_hdrs_size
= num_of_images
* sizeof(struct image_hdr
);
3334 const u8
*p
= fw
->data
;
3335 struct flash_section_info
*fsec
= NULL
;
3337 filehdr_size
= sizeof(struct flash_file_hdr_g3
);
3338 fsec
= get_fsec_info(adapter
, filehdr_size
+ img_hdrs_size
, fw
);
3340 dev_err(&adapter
->pdev
->dev
,
3341 "Invalid Cookie. UFI corrupted ?\n");
3345 for (i
= 0; i
< le32_to_cpu(fsec
->fsec_hdr
.num_images
); i
++) {
3346 img_offset
= le32_to_cpu(fsec
->fsec_entry
[i
].offset
);
3347 img_size
= le32_to_cpu(fsec
->fsec_entry
[i
].pad_size
);
3349 switch (le32_to_cpu(fsec
->fsec_entry
[i
].type
)) {
3350 case IMAGE_FIRMWARE_iSCSI
:
3351 img_optype
= OPTYPE_ISCSI_ACTIVE
;
3353 case IMAGE_BOOT_CODE
:
3354 img_optype
= OPTYPE_REDBOOT
;
3356 case IMAGE_OPTION_ROM_ISCSI
:
3357 img_optype
= OPTYPE_BIOS
;
3359 case IMAGE_OPTION_ROM_PXE
:
3360 img_optype
= OPTYPE_PXE_BIOS
;
3362 case IMAGE_OPTION_ROM_FCoE
:
3363 img_optype
= OPTYPE_FCOE_BIOS
;
3365 case IMAGE_FIRMWARE_BACKUP_iSCSI
:
3366 img_optype
= OPTYPE_ISCSI_BACKUP
;
3369 img_optype
= OPTYPE_NCSI_FW
;
3375 if (img_optype
== OPTYPE_REDBOOT
) {
3376 redboot
= be_flash_redboot(adapter
, fw
->data
,
3377 img_offset
, img_size
,
3378 filehdr_size
+ img_hdrs_size
);
3384 p
+= filehdr_size
+ img_offset
+ img_hdrs_size
;
3385 if (p
+ img_size
> fw
->data
+ fw
->size
)
3388 status
= be_flash(adapter
, p
, flash_cmd
, img_optype
, img_size
);
3390 dev_err(&adapter
->pdev
->dev
,
3391 "Flashing section type %d failed.\n",
3392 fsec
->fsec_entry
[i
].type
);
3399 static int lancer_wait_idle(struct be_adapter
*adapter
)
3401 #define SLIPORT_IDLE_TIMEOUT 30
3405 for (i
= 0; i
< SLIPORT_IDLE_TIMEOUT
; i
++) {
3406 reg_val
= ioread32(adapter
->db
+ PHYSDEV_CONTROL_OFFSET
);
3407 if ((reg_val
& PHYSDEV_CONTROL_INP_MASK
) == 0)
3413 if (i
== SLIPORT_IDLE_TIMEOUT
)
3419 static int lancer_fw_reset(struct be_adapter
*adapter
)
3423 status
= lancer_wait_idle(adapter
);
3427 iowrite32(PHYSDEV_CONTROL_FW_RESET_MASK
, adapter
->db
+
3428 PHYSDEV_CONTROL_OFFSET
);
3433 static int lancer_fw_download(struct be_adapter
*adapter
,
3434 const struct firmware
*fw
)
3436 #define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3437 #define LANCER_FW_DOWNLOAD_LOCATION "/prg"
3438 struct be_dma_mem flash_cmd
;
3439 const u8
*data_ptr
= NULL
;
3440 u8
*dest_image_ptr
= NULL
;
3441 size_t image_size
= 0;
3443 u32 data_written
= 0;
3449 if (!IS_ALIGNED(fw
->size
, sizeof(u32
))) {
3450 dev_err(&adapter
->pdev
->dev
,
3451 "FW Image not properly aligned. "
3452 "Length must be 4 byte aligned.\n");
3454 goto lancer_fw_exit
;
3457 flash_cmd
.size
= sizeof(struct lancer_cmd_req_write_object
)
3458 + LANCER_FW_DOWNLOAD_CHUNK
;
3459 flash_cmd
.va
= dma_alloc_coherent(&adapter
->pdev
->dev
, flash_cmd
.size
,
3460 &flash_cmd
.dma
, GFP_KERNEL
);
3461 if (!flash_cmd
.va
) {
3463 dev_err(&adapter
->pdev
->dev
,
3464 "Memory allocation failure while flashing\n");
3465 goto lancer_fw_exit
;
3468 dest_image_ptr
= flash_cmd
.va
+
3469 sizeof(struct lancer_cmd_req_write_object
);
3470 image_size
= fw
->size
;
3471 data_ptr
= fw
->data
;
3473 while (image_size
) {
3474 chunk_size
= min_t(u32
, image_size
, LANCER_FW_DOWNLOAD_CHUNK
);
3476 /* Copy the image chunk content. */
3477 memcpy(dest_image_ptr
, data_ptr
, chunk_size
);
3479 status
= lancer_cmd_write_object(adapter
, &flash_cmd
,
3481 LANCER_FW_DOWNLOAD_LOCATION
,
3482 &data_written
, &change_status
,
3487 offset
+= data_written
;
3488 data_ptr
+= data_written
;
3489 image_size
-= data_written
;
3493 /* Commit the FW written */
3494 status
= lancer_cmd_write_object(adapter
, &flash_cmd
,
3496 LANCER_FW_DOWNLOAD_LOCATION
,
3497 &data_written
, &change_status
,
3501 dma_free_coherent(&adapter
->pdev
->dev
, flash_cmd
.size
, flash_cmd
.va
,
3504 dev_err(&adapter
->pdev
->dev
,
3505 "Firmware load error. "
3506 "Status code: 0x%x Additional Status: 0x%x\n",
3507 status
, add_status
);
3508 goto lancer_fw_exit
;
3511 if (change_status
== LANCER_FW_RESET_NEEDED
) {
3512 status
= lancer_fw_reset(adapter
);
3514 dev_err(&adapter
->pdev
->dev
,
3515 "Adapter busy for FW reset.\n"
3516 "New FW will not be active.\n");
3517 goto lancer_fw_exit
;
3519 } else if (change_status
!= LANCER_NO_RESET_NEEDED
) {
3520 dev_err(&adapter
->pdev
->dev
,
3521 "System reboot required for new FW"
3525 dev_info(&adapter
->pdev
->dev
, "Firmware flashed successfully\n");
3533 static int be_get_ufi_type(struct be_adapter
*adapter
,
3534 struct flash_file_hdr_g2
*fhdr
)
3537 goto be_get_ufi_exit
;
3539 if (skyhawk_chip(adapter
) && fhdr
->build
[0] == '4')
3541 else if (BE3_chip(adapter
) && fhdr
->build
[0] == '3')
3543 else if (BE2_chip(adapter
) && fhdr
->build
[0] == '2')
3547 dev_err(&adapter
->pdev
->dev
,
3548 "UFI and Interface are not compatible for flashing\n");
3552 static int be_fw_download(struct be_adapter
*adapter
, const struct firmware
* fw
)
3554 struct flash_file_hdr_g2
*fhdr
;
3555 struct flash_file_hdr_g3
*fhdr3
;
3556 struct image_hdr
*img_hdr_ptr
= NULL
;
3557 struct be_dma_mem flash_cmd
;
3559 int status
= 0, i
= 0, num_imgs
= 0, ufi_type
= 0;
3561 flash_cmd
.size
= sizeof(struct be_cmd_write_flashrom
);
3562 flash_cmd
.va
= dma_alloc_coherent(&adapter
->pdev
->dev
, flash_cmd
.size
,
3563 &flash_cmd
.dma
, GFP_KERNEL
);
3564 if (!flash_cmd
.va
) {
3566 dev_err(&adapter
->pdev
->dev
,
3567 "Memory allocation failure while flashing\n");
3572 fhdr
= (struct flash_file_hdr_g2
*)p
;
3574 ufi_type
= be_get_ufi_type(adapter
, fhdr
);
3576 fhdr3
= (struct flash_file_hdr_g3
*)fw
->data
;
3577 num_imgs
= le32_to_cpu(fhdr3
->num_imgs
);
3578 for (i
= 0; i
< num_imgs
; i
++) {
3579 img_hdr_ptr
= (struct image_hdr
*)(fw
->data
+
3580 (sizeof(struct flash_file_hdr_g3
) +
3581 i
* sizeof(struct image_hdr
)));
3582 if (le32_to_cpu(img_hdr_ptr
->imageid
) == 1) {
3583 if (ufi_type
== UFI_TYPE4
)
3584 status
= be_flash_skyhawk(adapter
, fw
,
3585 &flash_cmd
, num_imgs
);
3586 else if (ufi_type
== UFI_TYPE3
)
3587 status
= be_flash_BEx(adapter
, fw
, &flash_cmd
,
3592 if (ufi_type
== UFI_TYPE2
)
3593 status
= be_flash_BEx(adapter
, fw
, &flash_cmd
, 0);
3594 else if (ufi_type
== -1)
3597 dma_free_coherent(&adapter
->pdev
->dev
, flash_cmd
.size
, flash_cmd
.va
,
3600 dev_err(&adapter
->pdev
->dev
, "Firmware load error\n");
3604 dev_info(&adapter
->pdev
->dev
, "Firmware flashed successfully\n");
3610 int be_load_fw(struct be_adapter
*adapter
, u8
*fw_file
)
3612 const struct firmware
*fw
;
3615 if (!netif_running(adapter
->netdev
)) {
3616 dev_err(&adapter
->pdev
->dev
,
3617 "Firmware load not allowed (interface is down)\n");
3621 status
= request_firmware(&fw
, fw_file
, &adapter
->pdev
->dev
);
3625 dev_info(&adapter
->pdev
->dev
, "Flashing firmware file %s\n", fw_file
);
3627 if (lancer_chip(adapter
))
3628 status
= lancer_fw_download(adapter
, fw
);
3630 status
= be_fw_download(adapter
, fw
);
3633 release_firmware(fw
);
3637 static const struct net_device_ops be_netdev_ops
= {
3638 .ndo_open
= be_open
,
3639 .ndo_stop
= be_close
,
3640 .ndo_start_xmit
= be_xmit
,
3641 .ndo_set_rx_mode
= be_set_rx_mode
,
3642 .ndo_set_mac_address
= be_mac_addr_set
,
3643 .ndo_change_mtu
= be_change_mtu
,
3644 .ndo_get_stats64
= be_get_stats64
,
3645 .ndo_validate_addr
= eth_validate_addr
,
3646 .ndo_vlan_rx_add_vid
= be_vlan_add_vid
,
3647 .ndo_vlan_rx_kill_vid
= be_vlan_rem_vid
,
3648 .ndo_set_vf_mac
= be_set_vf_mac
,
3649 .ndo_set_vf_vlan
= be_set_vf_vlan
,
3650 .ndo_set_vf_tx_rate
= be_set_vf_tx_rate
,
3651 .ndo_get_vf_config
= be_get_vf_config
,
3652 #ifdef CONFIG_NET_POLL_CONTROLLER
3653 .ndo_poll_controller
= be_netpoll
,
3657 static void be_netdev_init(struct net_device
*netdev
)
3659 struct be_adapter
*adapter
= netdev_priv(netdev
);
3660 struct be_eq_obj
*eqo
;
3663 netdev
->hw_features
|= NETIF_F_SG
| NETIF_F_TSO
| NETIF_F_TSO6
|
3664 NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
| NETIF_F_RXCSUM
|
3666 if (be_multi_rxq(adapter
))
3667 netdev
->hw_features
|= NETIF_F_RXHASH
;
3669 netdev
->features
|= netdev
->hw_features
|
3670 NETIF_F_HW_VLAN_RX
| NETIF_F_HW_VLAN_FILTER
;
3672 netdev
->vlan_features
|= NETIF_F_SG
| NETIF_F_TSO
| NETIF_F_TSO6
|
3673 NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
;
3675 netdev
->priv_flags
|= IFF_UNICAST_FLT
;
3677 netdev
->flags
|= IFF_MULTICAST
;
3679 netif_set_gso_max_size(netdev
, 65535 - ETH_HLEN
);
3681 netdev
->netdev_ops
= &be_netdev_ops
;
3683 SET_ETHTOOL_OPS(netdev
, &be_ethtool_ops
);
3685 for_all_evt_queues(adapter
, eqo
, i
)
3686 netif_napi_add(netdev
, &eqo
->napi
, be_poll
, BE_NAPI_WEIGHT
);
3689 static void be_unmap_pci_bars(struct be_adapter
*adapter
)
3692 pci_iounmap(adapter
->pdev
, adapter
->csr
);
3694 pci_iounmap(adapter
->pdev
, adapter
->db
);
3697 static int db_bar(struct be_adapter
*adapter
)
3699 if (lancer_chip(adapter
) || !be_physfn(adapter
))
3705 static int be_roce_map_pci_bars(struct be_adapter
*adapter
)
3707 if (skyhawk_chip(adapter
)) {
3708 adapter
->roce_db
.size
= 4096;
3709 adapter
->roce_db
.io_addr
= pci_resource_start(adapter
->pdev
,
3711 adapter
->roce_db
.total_size
= pci_resource_len(adapter
->pdev
,
3717 static int be_map_pci_bars(struct be_adapter
*adapter
)
3722 pci_read_config_dword(adapter
->pdev
, SLI_INTF_REG_OFFSET
, &sli_intf
);
3723 adapter
->if_type
= (sli_intf
& SLI_INTF_IF_TYPE_MASK
) >>
3724 SLI_INTF_IF_TYPE_SHIFT
;
3726 if (BEx_chip(adapter
) && be_physfn(adapter
)) {
3727 adapter
->csr
= pci_iomap(adapter
->pdev
, 2, 0);
3728 if (adapter
->csr
== NULL
)
3732 addr
= pci_iomap(adapter
->pdev
, db_bar(adapter
), 0);
3737 be_roce_map_pci_bars(adapter
);
3741 be_unmap_pci_bars(adapter
);
3745 static void be_ctrl_cleanup(struct be_adapter
*adapter
)
3747 struct be_dma_mem
*mem
= &adapter
->mbox_mem_alloced
;
3749 be_unmap_pci_bars(adapter
);
3752 dma_free_coherent(&adapter
->pdev
->dev
, mem
->size
, mem
->va
,
3755 mem
= &adapter
->rx_filter
;
3757 dma_free_coherent(&adapter
->pdev
->dev
, mem
->size
, mem
->va
,
3761 static int be_ctrl_init(struct be_adapter
*adapter
)
3763 struct be_dma_mem
*mbox_mem_alloc
= &adapter
->mbox_mem_alloced
;
3764 struct be_dma_mem
*mbox_mem_align
= &adapter
->mbox_mem
;
3765 struct be_dma_mem
*rx_filter
= &adapter
->rx_filter
;
3769 pci_read_config_dword(adapter
->pdev
, SLI_INTF_REG_OFFSET
, &sli_intf
);
3770 adapter
->sli_family
= (sli_intf
& SLI_INTF_FAMILY_MASK
) >>
3771 SLI_INTF_FAMILY_SHIFT
;
3772 adapter
->virtfn
= (sli_intf
& SLI_INTF_FT_MASK
) ? 1 : 0;
3774 status
= be_map_pci_bars(adapter
);
3778 mbox_mem_alloc
->size
= sizeof(struct be_mcc_mailbox
) + 16;
3779 mbox_mem_alloc
->va
= dma_alloc_coherent(&adapter
->pdev
->dev
,
3780 mbox_mem_alloc
->size
,
3781 &mbox_mem_alloc
->dma
,
3783 if (!mbox_mem_alloc
->va
) {
3785 goto unmap_pci_bars
;
3787 mbox_mem_align
->size
= sizeof(struct be_mcc_mailbox
);
3788 mbox_mem_align
->va
= PTR_ALIGN(mbox_mem_alloc
->va
, 16);
3789 mbox_mem_align
->dma
= PTR_ALIGN(mbox_mem_alloc
->dma
, 16);
3790 memset(mbox_mem_align
->va
, 0, sizeof(struct be_mcc_mailbox
));
3792 rx_filter
->size
= sizeof(struct be_cmd_req_rx_filter
);
3793 rx_filter
->va
= dma_alloc_coherent(&adapter
->pdev
->dev
, rx_filter
->size
,
3794 &rx_filter
->dma
, GFP_KERNEL
);
3795 if (rx_filter
->va
== NULL
) {
3799 memset(rx_filter
->va
, 0, rx_filter
->size
);
3800 mutex_init(&adapter
->mbox_lock
);
3801 spin_lock_init(&adapter
->mcc_lock
);
3802 spin_lock_init(&adapter
->mcc_cq_lock
);
3804 init_completion(&adapter
->flash_compl
);
3805 pci_save_state(adapter
->pdev
);
3809 dma_free_coherent(&adapter
->pdev
->dev
, mbox_mem_alloc
->size
,
3810 mbox_mem_alloc
->va
, mbox_mem_alloc
->dma
);
3813 be_unmap_pci_bars(adapter
);
3819 static void be_stats_cleanup(struct be_adapter
*adapter
)
3821 struct be_dma_mem
*cmd
= &adapter
->stats_cmd
;
3824 dma_free_coherent(&adapter
->pdev
->dev
, cmd
->size
,
3828 static int be_stats_init(struct be_adapter
*adapter
)
3830 struct be_dma_mem
*cmd
= &adapter
->stats_cmd
;
3832 if (lancer_chip(adapter
))
3833 cmd
->size
= sizeof(struct lancer_cmd_req_pport_stats
);
3834 else if (BE2_chip(adapter
))
3835 cmd
->size
= sizeof(struct be_cmd_req_get_stats_v0
);
3837 /* BE3 and Skyhawk */
3838 cmd
->size
= sizeof(struct be_cmd_req_get_stats_v1
);
3840 cmd
->va
= dma_alloc_coherent(&adapter
->pdev
->dev
, cmd
->size
, &cmd
->dma
,
3842 if (cmd
->va
== NULL
)
3844 memset(cmd
->va
, 0, cmd
->size
);
3848 static void be_remove(struct pci_dev
*pdev
)
3850 struct be_adapter
*adapter
= pci_get_drvdata(pdev
);
3855 be_roce_dev_remove(adapter
);
3857 cancel_delayed_work_sync(&adapter
->func_recovery_work
);
3859 unregister_netdev(adapter
->netdev
);
3863 /* tell fw we're done with firing cmds */
3864 be_cmd_fw_clean(adapter
);
3866 be_stats_cleanup(adapter
);
3868 be_ctrl_cleanup(adapter
);
3870 pci_disable_pcie_error_reporting(pdev
);
3872 pci_set_drvdata(pdev
, NULL
);
3873 pci_release_regions(pdev
);
3874 pci_disable_device(pdev
);
3876 free_netdev(adapter
->netdev
);
3879 bool be_is_wol_supported(struct be_adapter
*adapter
)
3881 return ((adapter
->wol_cap
& BE_WOL_CAP
) &&
3882 !be_is_wol_excluded(adapter
)) ? true : false;
3885 u32
be_get_fw_log_level(struct be_adapter
*adapter
)
3887 struct be_dma_mem extfat_cmd
;
3888 struct be_fat_conf_params
*cfgs
;
3893 if (lancer_chip(adapter
))
3896 memset(&extfat_cmd
, 0, sizeof(struct be_dma_mem
));
3897 extfat_cmd
.size
= sizeof(struct be_cmd_resp_get_ext_fat_caps
);
3898 extfat_cmd
.va
= pci_alloc_consistent(adapter
->pdev
, extfat_cmd
.size
,
3901 if (!extfat_cmd
.va
) {
3902 dev_err(&adapter
->pdev
->dev
, "%s: Memory allocation failure\n",
3907 status
= be_cmd_get_ext_fat_capabilites(adapter
, &extfat_cmd
);
3909 cfgs
= (struct be_fat_conf_params
*)(extfat_cmd
.va
+
3910 sizeof(struct be_cmd_resp_hdr
));
3911 for (j
= 0; j
< le32_to_cpu(cfgs
->module
[0].num_modes
); j
++) {
3912 if (cfgs
->module
[0].trace_lvl
[j
].mode
== MODE_UART
)
3913 level
= cfgs
->module
[0].trace_lvl
[j
].dbg_lvl
;
3916 pci_free_consistent(adapter
->pdev
, extfat_cmd
.size
, extfat_cmd
.va
,
3922 static int be_get_initial_config(struct be_adapter
*adapter
)
3927 status
= be_cmd_get_cntl_attributes(adapter
);
3931 status
= be_cmd_get_acpi_wol_cap(adapter
);
3933 /* in case of a failure to get wol capabillities
3934 * check the exclusion list to determine WOL capability */
3935 if (!be_is_wol_excluded(adapter
))
3936 adapter
->wol_cap
|= BE_WOL_CAP
;
3939 if (be_is_wol_supported(adapter
))
3940 adapter
->wol
= true;
3942 /* Must be a power of 2 or else MODULO will BUG_ON */
3943 adapter
->be_get_temp_freq
= 64;
3945 level
= be_get_fw_log_level(adapter
);
3946 adapter
->msg_enable
= level
<= FW_LOG_LEVEL_DEFAULT
? NETIF_MSG_HW
: 0;
3951 static int lancer_recover_func(struct be_adapter
*adapter
)
3955 status
= lancer_test_and_set_rdy_state(adapter
);
3959 if (netif_running(adapter
->netdev
))
3960 be_close(adapter
->netdev
);
3964 adapter
->hw_error
= false;
3965 adapter
->fw_timeout
= false;
3967 status
= be_setup(adapter
);
3971 if (netif_running(adapter
->netdev
)) {
3972 status
= be_open(adapter
->netdev
);
3977 dev_err(&adapter
->pdev
->dev
,
3978 "Adapter SLIPORT recovery succeeded\n");
3981 if (adapter
->eeh_error
)
3982 dev_err(&adapter
->pdev
->dev
,
3983 "Adapter SLIPORT recovery failed\n");
3988 static void be_func_recovery_task(struct work_struct
*work
)
3990 struct be_adapter
*adapter
=
3991 container_of(work
, struct be_adapter
, func_recovery_work
.work
);
3994 be_detect_error(adapter
);
3996 if (adapter
->hw_error
&& lancer_chip(adapter
)) {
3998 if (adapter
->eeh_error
)
4002 netif_device_detach(adapter
->netdev
);
4005 status
= lancer_recover_func(adapter
);
4008 netif_device_attach(adapter
->netdev
);
4012 schedule_delayed_work(&adapter
->func_recovery_work
,
4013 msecs_to_jiffies(1000));
4016 static void be_worker(struct work_struct
*work
)
4018 struct be_adapter
*adapter
=
4019 container_of(work
, struct be_adapter
, work
.work
);
4020 struct be_rx_obj
*rxo
;
4021 struct be_eq_obj
*eqo
;
4024 /* when interrupts are not yet enabled, just reap any pending
4025 * mcc completions */
4026 if (!netif_running(adapter
->netdev
)) {
4028 be_process_mcc(adapter
);
4033 if (!adapter
->stats_cmd_sent
) {
4034 if (lancer_chip(adapter
))
4035 lancer_cmd_get_pport_stats(adapter
,
4036 &adapter
->stats_cmd
);
4038 be_cmd_get_stats(adapter
, &adapter
->stats_cmd
);
4041 if (MODULO(adapter
->work_counter
, adapter
->be_get_temp_freq
) == 0)
4042 be_cmd_get_die_temperature(adapter
);
4044 for_all_rx_queues(adapter
, rxo
, i
) {
4045 if (rxo
->rx_post_starved
) {
4046 rxo
->rx_post_starved
= false;
4047 be_post_rx_frags(rxo
, GFP_KERNEL
);
4051 for_all_evt_queues(adapter
, eqo
, i
)
4052 be_eqd_update(adapter
, eqo
);
4055 adapter
->work_counter
++;
4056 schedule_delayed_work(&adapter
->work
, msecs_to_jiffies(1000));
4059 static bool be_reset_required(struct be_adapter
*adapter
)
4061 return be_find_vfs(adapter
, ENABLED
) > 0 ? false : true;
4064 static char *mc_name(struct be_adapter
*adapter
)
4066 if (adapter
->function_mode
& FLEX10_MODE
)
4068 else if (adapter
->function_mode
& VNIC_MODE
)
4070 else if (adapter
->function_mode
& UMC_ENABLED
)
4076 static inline char *func_name(struct be_adapter
*adapter
)
4078 return be_physfn(adapter
) ? "PF" : "VF";
4081 static int be_probe(struct pci_dev
*pdev
, const struct pci_device_id
*pdev_id
)
4084 struct be_adapter
*adapter
;
4085 struct net_device
*netdev
;
4088 status
= pci_enable_device(pdev
);
4092 status
= pci_request_regions(pdev
, DRV_NAME
);
4095 pci_set_master(pdev
);
4097 netdev
= alloc_etherdev_mqs(sizeof(*adapter
), MAX_TX_QS
, MAX_RX_QS
);
4098 if (netdev
== NULL
) {
4102 adapter
= netdev_priv(netdev
);
4103 adapter
->pdev
= pdev
;
4104 pci_set_drvdata(pdev
, adapter
);
4105 adapter
->netdev
= netdev
;
4106 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
4108 status
= dma_set_mask(&pdev
->dev
, DMA_BIT_MASK(64));
4110 netdev
->features
|= NETIF_F_HIGHDMA
;
4112 status
= dma_set_mask(&pdev
->dev
, DMA_BIT_MASK(32));
4114 dev_err(&pdev
->dev
, "Could not set PCI DMA Mask\n");
4119 status
= pci_enable_pcie_error_reporting(pdev
);
4121 dev_err(&pdev
->dev
, "Could not use PCIe error reporting\n");
4123 status
= be_ctrl_init(adapter
);
4127 /* sync up with fw's ready state */
4128 if (be_physfn(adapter
)) {
4129 status
= be_fw_wait_ready(adapter
);
4134 /* tell fw we're ready to fire cmds */
4135 status
= be_cmd_fw_init(adapter
);
4139 if (be_reset_required(adapter
)) {
4140 status
= be_cmd_reset_function(adapter
);
4145 /* The INTR bit may be set in the card when probed by a kdump kernel
4148 if (!lancer_chip(adapter
))
4149 be_intr_set(adapter
, false);
4151 status
= be_stats_init(adapter
);
4155 status
= be_get_initial_config(adapter
);
4159 INIT_DELAYED_WORK(&adapter
->work
, be_worker
);
4160 INIT_DELAYED_WORK(&adapter
->func_recovery_work
, be_func_recovery_task
);
4161 adapter
->rx_fc
= adapter
->tx_fc
= true;
4163 status
= be_setup(adapter
);
4167 be_netdev_init(netdev
);
4168 status
= register_netdev(netdev
);
4172 be_roce_dev_add(adapter
);
4174 schedule_delayed_work(&adapter
->func_recovery_work
,
4175 msecs_to_jiffies(1000));
4177 be_cmd_query_port_name(adapter
, &port_name
);
4179 dev_info(&pdev
->dev
, "%s: %s %s port %c\n", nic_name(pdev
),
4180 func_name(adapter
), mc_name(adapter
), port_name
);
4187 be_stats_cleanup(adapter
);
4189 be_ctrl_cleanup(adapter
);
4191 free_netdev(netdev
);
4192 pci_set_drvdata(pdev
, NULL
);
4194 pci_release_regions(pdev
);
4196 pci_disable_device(pdev
);
4198 dev_err(&pdev
->dev
, "%s initialization failed\n", nic_name(pdev
));
4202 static int be_suspend(struct pci_dev
*pdev
, pm_message_t state
)
4204 struct be_adapter
*adapter
= pci_get_drvdata(pdev
);
4205 struct net_device
*netdev
= adapter
->netdev
;
4208 be_setup_wol(adapter
, true);
4210 cancel_delayed_work_sync(&adapter
->func_recovery_work
);
4212 netif_device_detach(netdev
);
4213 if (netif_running(netdev
)) {
4220 pci_save_state(pdev
);
4221 pci_disable_device(pdev
);
4222 pci_set_power_state(pdev
, pci_choose_state(pdev
, state
));
4226 static int be_resume(struct pci_dev
*pdev
)
4229 struct be_adapter
*adapter
= pci_get_drvdata(pdev
);
4230 struct net_device
*netdev
= adapter
->netdev
;
4232 netif_device_detach(netdev
);
4234 status
= pci_enable_device(pdev
);
4238 pci_set_power_state(pdev
, 0);
4239 pci_restore_state(pdev
);
4241 /* tell fw we're ready to fire cmds */
4242 status
= be_cmd_fw_init(adapter
);
4247 if (netif_running(netdev
)) {
4253 schedule_delayed_work(&adapter
->func_recovery_work
,
4254 msecs_to_jiffies(1000));
4255 netif_device_attach(netdev
);
4258 be_setup_wol(adapter
, false);
4264 * An FLR will stop BE from DMAing any data.
4266 static void be_shutdown(struct pci_dev
*pdev
)
4268 struct be_adapter
*adapter
= pci_get_drvdata(pdev
);
4273 cancel_delayed_work_sync(&adapter
->work
);
4274 cancel_delayed_work_sync(&adapter
->func_recovery_work
);
4276 netif_device_detach(adapter
->netdev
);
4278 be_cmd_reset_function(adapter
);
4280 pci_disable_device(pdev
);
4283 static pci_ers_result_t
be_eeh_err_detected(struct pci_dev
*pdev
,
4284 pci_channel_state_t state
)
4286 struct be_adapter
*adapter
= pci_get_drvdata(pdev
);
4287 struct net_device
*netdev
= adapter
->netdev
;
4289 dev_err(&adapter
->pdev
->dev
, "EEH error detected\n");
4291 adapter
->eeh_error
= true;
4293 cancel_delayed_work_sync(&adapter
->func_recovery_work
);
4296 netif_device_detach(netdev
);
4299 if (netif_running(netdev
)) {
4306 if (state
== pci_channel_io_perm_failure
)
4307 return PCI_ERS_RESULT_DISCONNECT
;
4309 pci_disable_device(pdev
);
4311 /* The error could cause the FW to trigger a flash debug dump.
4312 * Resetting the card while flash dump is in progress
4313 * can cause it not to recover; wait for it to finish.
4314 * Wait only for first function as it is needed only once per
4317 if (pdev
->devfn
== 0)
4320 return PCI_ERS_RESULT_NEED_RESET
;
4323 static pci_ers_result_t
be_eeh_reset(struct pci_dev
*pdev
)
4325 struct be_adapter
*adapter
= pci_get_drvdata(pdev
);
4328 dev_info(&adapter
->pdev
->dev
, "EEH reset\n");
4329 be_clear_all_error(adapter
);
4331 status
= pci_enable_device(pdev
);
4333 return PCI_ERS_RESULT_DISCONNECT
;
4335 pci_set_master(pdev
);
4336 pci_set_power_state(pdev
, 0);
4337 pci_restore_state(pdev
);
4339 /* Check if card is ok and fw is ready */
4340 dev_info(&adapter
->pdev
->dev
,
4341 "Waiting for FW to be ready after EEH reset\n");
4342 status
= be_fw_wait_ready(adapter
);
4344 return PCI_ERS_RESULT_DISCONNECT
;
4346 pci_cleanup_aer_uncorrect_error_status(pdev
);
4347 return PCI_ERS_RESULT_RECOVERED
;
4350 static void be_eeh_resume(struct pci_dev
*pdev
)
4353 struct be_adapter
*adapter
= pci_get_drvdata(pdev
);
4354 struct net_device
*netdev
= adapter
->netdev
;
4356 dev_info(&adapter
->pdev
->dev
, "EEH resume\n");
4358 pci_save_state(pdev
);
4360 /* tell fw we're ready to fire cmds */
4361 status
= be_cmd_fw_init(adapter
);
4365 status
= be_cmd_reset_function(adapter
);
4369 status
= be_setup(adapter
);
4373 if (netif_running(netdev
)) {
4374 status
= be_open(netdev
);
4379 schedule_delayed_work(&adapter
->func_recovery_work
,
4380 msecs_to_jiffies(1000));
4381 netif_device_attach(netdev
);
4384 dev_err(&adapter
->pdev
->dev
, "EEH resume failed\n");
4387 static const struct pci_error_handlers be_eeh_handlers
= {
4388 .error_detected
= be_eeh_err_detected
,
4389 .slot_reset
= be_eeh_reset
,
4390 .resume
= be_eeh_resume
,
4393 static struct pci_driver be_driver
= {
4395 .id_table
= be_dev_ids
,
4397 .remove
= be_remove
,
4398 .suspend
= be_suspend
,
4399 .resume
= be_resume
,
4400 .shutdown
= be_shutdown
,
4401 .err_handler
= &be_eeh_handlers
4404 static int __init
be_init_module(void)
4406 if (rx_frag_size
!= 8192 && rx_frag_size
!= 4096 &&
4407 rx_frag_size
!= 2048) {
4408 printk(KERN_WARNING DRV_NAME
4409 " : Module param rx_frag_size must be 2048/4096/8192."
4411 rx_frag_size
= 2048;
4414 return pci_register_driver(&be_driver
);
4416 module_init(be_init_module
);
4418 static void __exit
be_exit_module(void)
4420 pci_unregister_driver(&be_driver
);
4422 module_exit(be_exit_module
);