2 * Copyright (C) 2005 - 2010 ServerEngines
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
10 * Contact Information:
11 * linux-drivers@serverengines.com
14 * 209 N. Fair Oaks Ave
20 #include <asm/div64.h>
22 MODULE_VERSION(DRV_VER
);
23 MODULE_DEVICE_TABLE(pci
, be_dev_ids
);
24 MODULE_DESCRIPTION(DRV_DESC
" " DRV_VER
);
25 MODULE_AUTHOR("ServerEngines Corporation");
26 MODULE_LICENSE("GPL");
28 static unsigned int rx_frag_size
= 2048;
29 static unsigned int num_vfs
;
30 module_param(rx_frag_size
, uint
, S_IRUGO
);
31 module_param(num_vfs
, uint
, S_IRUGO
);
32 MODULE_PARM_DESC(rx_frag_size
, "Size of a fragment that holds rcvd data.");
33 MODULE_PARM_DESC(num_vfs
, "Number of PCI VFs to initialize");
35 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids
) = {
36 { PCI_DEVICE(BE_VENDOR_ID
, BE_DEVICE_ID1
) },
37 { PCI_DEVICE(BE_VENDOR_ID
, BE_DEVICE_ID2
) },
38 { PCI_DEVICE(BE_VENDOR_ID
, OC_DEVICE_ID1
) },
39 { PCI_DEVICE(BE_VENDOR_ID
, OC_DEVICE_ID2
) },
42 MODULE_DEVICE_TABLE(pci
, be_dev_ids
);
43 /* UE Status Low CSR */
44 static char *ue_status_low_desc
[] = {
78 /* UE Status High CSR */
79 static char *ue_status_hi_desc
[] = {
114 static void be_queue_free(struct be_adapter
*adapter
, struct be_queue_info
*q
)
116 struct be_dma_mem
*mem
= &q
->dma_mem
;
118 pci_free_consistent(adapter
->pdev
, mem
->size
,
122 static int be_queue_alloc(struct be_adapter
*adapter
, struct be_queue_info
*q
,
123 u16 len
, u16 entry_size
)
125 struct be_dma_mem
*mem
= &q
->dma_mem
;
127 memset(q
, 0, sizeof(*q
));
129 q
->entry_size
= entry_size
;
130 mem
->size
= len
* entry_size
;
131 mem
->va
= pci_alloc_consistent(adapter
->pdev
, mem
->size
, &mem
->dma
);
134 memset(mem
->va
, 0, mem
->size
);
138 static void be_intr_set(struct be_adapter
*adapter
, bool enable
)
140 u8 __iomem
*addr
= adapter
->pcicfg
+ PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET
;
141 u32 reg
= ioread32(addr
);
142 u32 enabled
= reg
& MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK
;
144 if (adapter
->eeh_err
)
147 if (!enabled
&& enable
)
148 reg
|= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK
;
149 else if (enabled
&& !enable
)
150 reg
&= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK
;
154 iowrite32(reg
, addr
);
157 static void be_rxq_notify(struct be_adapter
*adapter
, u16 qid
, u16 posted
)
160 val
|= qid
& DB_RQ_RING_ID_MASK
;
161 val
|= posted
<< DB_RQ_NUM_POSTED_SHIFT
;
164 iowrite32(val
, adapter
->db
+ DB_RQ_OFFSET
);
167 static void be_txq_notify(struct be_adapter
*adapter
, u16 qid
, u16 posted
)
170 val
|= qid
& DB_TXULP_RING_ID_MASK
;
171 val
|= (posted
& DB_TXULP_NUM_POSTED_MASK
) << DB_TXULP_NUM_POSTED_SHIFT
;
174 iowrite32(val
, adapter
->db
+ DB_TXULP1_OFFSET
);
177 static void be_eq_notify(struct be_adapter
*adapter
, u16 qid
,
178 bool arm
, bool clear_int
, u16 num_popped
)
181 val
|= qid
& DB_EQ_RING_ID_MASK
;
183 if (adapter
->eeh_err
)
187 val
|= 1 << DB_EQ_REARM_SHIFT
;
189 val
|= 1 << DB_EQ_CLR_SHIFT
;
190 val
|= 1 << DB_EQ_EVNT_SHIFT
;
191 val
|= num_popped
<< DB_EQ_NUM_POPPED_SHIFT
;
192 iowrite32(val
, adapter
->db
+ DB_EQ_OFFSET
);
195 void be_cq_notify(struct be_adapter
*adapter
, u16 qid
, bool arm
, u16 num_popped
)
198 val
|= qid
& DB_CQ_RING_ID_MASK
;
200 if (adapter
->eeh_err
)
204 val
|= 1 << DB_CQ_REARM_SHIFT
;
205 val
|= num_popped
<< DB_CQ_NUM_POPPED_SHIFT
;
206 iowrite32(val
, adapter
->db
+ DB_CQ_OFFSET
);
209 static int be_mac_addr_set(struct net_device
*netdev
, void *p
)
211 struct be_adapter
*adapter
= netdev_priv(netdev
);
212 struct sockaddr
*addr
= p
;
215 if (!is_valid_ether_addr(addr
->sa_data
))
216 return -EADDRNOTAVAIL
;
218 /* MAC addr configuration will be done in hardware for VFs
219 * by their corresponding PFs. Just copy to netdev addr here
221 if (!be_physfn(adapter
))
224 status
= be_cmd_pmac_del(adapter
, adapter
->if_handle
, adapter
->pmac_id
);
228 status
= be_cmd_pmac_add(adapter
, (u8
*)addr
->sa_data
,
229 adapter
->if_handle
, &adapter
->pmac_id
);
232 memcpy(netdev
->dev_addr
, addr
->sa_data
, netdev
->addr_len
);
237 void netdev_stats_update(struct be_adapter
*adapter
)
239 struct be_hw_stats
*hw_stats
= hw_stats_from_cmd(adapter
->stats
.cmd
.va
);
240 struct be_rxf_stats
*rxf_stats
= &hw_stats
->rxf
;
241 struct be_port_rxf_stats
*port_stats
=
242 &rxf_stats
->port
[adapter
->port_num
];
243 struct net_device_stats
*dev_stats
= &adapter
->netdev
->stats
;
244 struct be_erx_stats
*erx_stats
= &hw_stats
->erx
;
246 dev_stats
->rx_packets
= drvr_stats(adapter
)->be_rx_pkts
;
247 dev_stats
->tx_packets
= drvr_stats(adapter
)->be_tx_pkts
;
248 dev_stats
->rx_bytes
= drvr_stats(adapter
)->be_rx_bytes
;
249 dev_stats
->tx_bytes
= drvr_stats(adapter
)->be_tx_bytes
;
250 dev_stats
->multicast
= drvr_stats(adapter
)->be_rx_mcast_pkt
;
252 /* bad pkts received */
253 dev_stats
->rx_errors
= port_stats
->rx_crc_errors
+
254 port_stats
->rx_alignment_symbol_errors
+
255 port_stats
->rx_in_range_errors
+
256 port_stats
->rx_out_range_errors
+
257 port_stats
->rx_frame_too_long
+
258 port_stats
->rx_dropped_too_small
+
259 port_stats
->rx_dropped_too_short
+
260 port_stats
->rx_dropped_header_too_small
+
261 port_stats
->rx_dropped_tcp_length
+
262 port_stats
->rx_dropped_runt
+
263 port_stats
->rx_tcp_checksum_errs
+
264 port_stats
->rx_ip_checksum_errs
+
265 port_stats
->rx_udp_checksum_errs
;
267 /* no space in linux buffers: best possible approximation */
268 dev_stats
->rx_dropped
=
269 erx_stats
->rx_drops_no_fragments
[adapter
->rx_obj
.q
.id
];
271 /* detailed rx errors */
272 dev_stats
->rx_length_errors
= port_stats
->rx_in_range_errors
+
273 port_stats
->rx_out_range_errors
+
274 port_stats
->rx_frame_too_long
;
276 /* receive ring buffer overflow */
277 dev_stats
->rx_over_errors
= 0;
279 dev_stats
->rx_crc_errors
= port_stats
->rx_crc_errors
;
281 /* frame alignment errors */
282 dev_stats
->rx_frame_errors
= port_stats
->rx_alignment_symbol_errors
;
284 /* receiver fifo overrun */
285 /* drops_no_pbuf is no per i/f, it's per BE card */
286 dev_stats
->rx_fifo_errors
= port_stats
->rx_fifo_overflow
+
287 port_stats
->rx_input_fifo_overflow
+
288 rxf_stats
->rx_drops_no_pbuf
;
289 /* receiver missed packetd */
290 dev_stats
->rx_missed_errors
= 0;
292 /* packet transmit problems */
293 dev_stats
->tx_errors
= 0;
295 /* no space available in linux */
296 dev_stats
->tx_dropped
= 0;
298 dev_stats
->collisions
= 0;
300 /* detailed tx_errors */
301 dev_stats
->tx_aborted_errors
= 0;
302 dev_stats
->tx_carrier_errors
= 0;
303 dev_stats
->tx_fifo_errors
= 0;
304 dev_stats
->tx_heartbeat_errors
= 0;
305 dev_stats
->tx_window_errors
= 0;
308 void be_link_status_update(struct be_adapter
*adapter
, bool link_up
)
310 struct net_device
*netdev
= adapter
->netdev
;
312 /* If link came up or went down */
313 if (adapter
->link_up
!= link_up
) {
314 adapter
->link_speed
= -1;
316 netif_start_queue(netdev
);
317 netif_carrier_on(netdev
);
318 printk(KERN_INFO
"%s: Link up\n", netdev
->name
);
320 netif_stop_queue(netdev
);
321 netif_carrier_off(netdev
);
322 printk(KERN_INFO
"%s: Link down\n", netdev
->name
);
324 adapter
->link_up
= link_up
;
328 /* Update the EQ delay n BE based on the RX frags consumed / sec */
329 static void be_rx_eqd_update(struct be_adapter
*adapter
)
331 struct be_eq_obj
*rx_eq
= &adapter
->rx_eq
;
332 struct be_drvr_stats
*stats
= &adapter
->stats
.drvr_stats
;
336 if (!rx_eq
->enable_aic
)
340 if (time_before(now
, stats
->rx_fps_jiffies
)) {
341 stats
->rx_fps_jiffies
= now
;
345 /* Update once a second */
346 if ((now
- stats
->rx_fps_jiffies
) < HZ
)
349 stats
->be_rx_fps
= (stats
->be_rx_frags
- stats
->be_prev_rx_frags
) /
350 ((now
- stats
->rx_fps_jiffies
) / HZ
);
352 stats
->rx_fps_jiffies
= now
;
353 stats
->be_prev_rx_frags
= stats
->be_rx_frags
;
354 eqd
= stats
->be_rx_fps
/ 110000;
356 if (eqd
> rx_eq
->max_eqd
)
357 eqd
= rx_eq
->max_eqd
;
358 if (eqd
< rx_eq
->min_eqd
)
359 eqd
= rx_eq
->min_eqd
;
362 if (eqd
!= rx_eq
->cur_eqd
)
363 be_cmd_modify_eqd(adapter
, rx_eq
->q
.id
, eqd
);
365 rx_eq
->cur_eqd
= eqd
;
368 static u32
be_calc_rate(u64 bytes
, unsigned long ticks
)
372 do_div(rate
, ticks
/ HZ
);
373 rate
<<= 3; /* bytes/sec -> bits/sec */
374 do_div(rate
, 1000000ul); /* MB/Sec */
379 static void be_tx_rate_update(struct be_adapter
*adapter
)
381 struct be_drvr_stats
*stats
= drvr_stats(adapter
);
384 /* Wrapped around? */
385 if (time_before(now
, stats
->be_tx_jiffies
)) {
386 stats
->be_tx_jiffies
= now
;
390 /* Update tx rate once in two seconds */
391 if ((now
- stats
->be_tx_jiffies
) > 2 * HZ
) {
392 stats
->be_tx_rate
= be_calc_rate(stats
->be_tx_bytes
393 - stats
->be_tx_bytes_prev
,
394 now
- stats
->be_tx_jiffies
);
395 stats
->be_tx_jiffies
= now
;
396 stats
->be_tx_bytes_prev
= stats
->be_tx_bytes
;
400 static void be_tx_stats_update(struct be_adapter
*adapter
,
401 u32 wrb_cnt
, u32 copied
, u32 gso_segs
, bool stopped
)
403 struct be_drvr_stats
*stats
= drvr_stats(adapter
);
405 stats
->be_tx_wrbs
+= wrb_cnt
;
406 stats
->be_tx_bytes
+= copied
;
407 stats
->be_tx_pkts
+= (gso_segs
? gso_segs
: 1);
409 stats
->be_tx_stops
++;
412 /* Determine number of WRB entries needed to xmit data in an skb */
413 static u32
wrb_cnt_for_skb(struct sk_buff
*skb
, bool *dummy
)
415 int cnt
= (skb
->len
> skb
->data_len
);
417 cnt
+= skb_shinfo(skb
)->nr_frags
;
419 /* to account for hdr wrb */
422 /* add a dummy to make it an even num */
427 BUG_ON(cnt
> BE_MAX_TX_FRAG_COUNT
);
431 static inline void wrb_fill(struct be_eth_wrb
*wrb
, u64 addr
, int len
)
433 wrb
->frag_pa_hi
= upper_32_bits(addr
);
434 wrb
->frag_pa_lo
= addr
& 0xFFFFFFFF;
435 wrb
->frag_len
= len
& ETH_WRB_FRAG_LEN_MASK
;
438 static void wrb_fill_hdr(struct be_eth_hdr_wrb
*hdr
, struct sk_buff
*skb
,
439 bool vlan
, u32 wrb_cnt
, u32 len
)
441 memset(hdr
, 0, sizeof(*hdr
));
443 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, crc
, hdr
, 1);
445 if (skb_is_gso(skb
)) {
446 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, lso
, hdr
, 1);
447 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, lso_mss
,
448 hdr
, skb_shinfo(skb
)->gso_size
);
449 if (skb_is_gso_v6(skb
))
450 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, lso6
, hdr
, 1);
451 } else if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
453 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, tcpcs
, hdr
, 1);
454 else if (is_udp_pkt(skb
))
455 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, udpcs
, hdr
, 1);
458 if (vlan
&& vlan_tx_tag_present(skb
)) {
459 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, vlan
, hdr
, 1);
460 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, vlan_tag
,
461 hdr
, vlan_tx_tag_get(skb
));
464 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, event
, hdr
, 1);
465 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, complete
, hdr
, 1);
466 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, num_wrb
, hdr
, wrb_cnt
);
467 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, len
, hdr
, len
);
470 static void unmap_tx_frag(struct pci_dev
*pdev
, struct be_eth_wrb
*wrb
,
475 be_dws_le_to_cpu(wrb
, sizeof(*wrb
));
477 dma
= (u64
)wrb
->frag_pa_hi
<< 32 | (u64
)wrb
->frag_pa_lo
;
480 pci_unmap_single(pdev
, dma
, wrb
->frag_len
,
483 pci_unmap_page(pdev
, dma
, wrb
->frag_len
,
488 static int make_tx_wrbs(struct be_adapter
*adapter
,
489 struct sk_buff
*skb
, u32 wrb_cnt
, bool dummy_wrb
)
493 struct pci_dev
*pdev
= adapter
->pdev
;
494 struct sk_buff
*first_skb
= skb
;
495 struct be_queue_info
*txq
= &adapter
->tx_obj
.q
;
496 struct be_eth_wrb
*wrb
;
497 struct be_eth_hdr_wrb
*hdr
;
498 bool map_single
= false;
501 hdr
= queue_head_node(txq
);
503 map_head
= txq
->head
;
505 if (skb
->len
> skb
->data_len
) {
506 int len
= skb_headlen(skb
);
507 busaddr
= pci_map_single(pdev
, skb
->data
, len
,
509 if (pci_dma_mapping_error(pdev
, busaddr
))
512 wrb
= queue_head_node(txq
);
513 wrb_fill(wrb
, busaddr
, len
);
514 be_dws_cpu_to_le(wrb
, sizeof(*wrb
));
519 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
520 struct skb_frag_struct
*frag
=
521 &skb_shinfo(skb
)->frags
[i
];
522 busaddr
= pci_map_page(pdev
, frag
->page
,
524 frag
->size
, PCI_DMA_TODEVICE
);
525 if (pci_dma_mapping_error(pdev
, busaddr
))
527 wrb
= queue_head_node(txq
);
528 wrb_fill(wrb
, busaddr
, frag
->size
);
529 be_dws_cpu_to_le(wrb
, sizeof(*wrb
));
531 copied
+= frag
->size
;
535 wrb
= queue_head_node(txq
);
537 be_dws_cpu_to_le(wrb
, sizeof(*wrb
));
541 wrb_fill_hdr(hdr
, first_skb
, adapter
->vlan_grp
? true : false,
543 be_dws_cpu_to_le(hdr
, sizeof(*hdr
));
547 txq
->head
= map_head
;
549 wrb
= queue_head_node(txq
);
550 unmap_tx_frag(pdev
, wrb
, map_single
);
552 copied
-= wrb
->frag_len
;
558 static netdev_tx_t
be_xmit(struct sk_buff
*skb
,
559 struct net_device
*netdev
)
561 struct be_adapter
*adapter
= netdev_priv(netdev
);
562 struct be_tx_obj
*tx_obj
= &adapter
->tx_obj
;
563 struct be_queue_info
*txq
= &tx_obj
->q
;
564 u32 wrb_cnt
= 0, copied
= 0;
565 u32 start
= txq
->head
;
566 bool dummy_wrb
, stopped
= false;
568 wrb_cnt
= wrb_cnt_for_skb(skb
, &dummy_wrb
);
570 copied
= make_tx_wrbs(adapter
, skb
, wrb_cnt
, dummy_wrb
);
572 /* record the sent skb in the sent_skb table */
573 BUG_ON(tx_obj
->sent_skb_list
[start
]);
574 tx_obj
->sent_skb_list
[start
] = skb
;
576 /* Ensure txq has space for the next skb; Else stop the queue
577 * *BEFORE* ringing the tx doorbell, so that we serialze the
578 * tx compls of the current transmit which'll wake up the queue
580 atomic_add(wrb_cnt
, &txq
->used
);
581 if ((BE_MAX_TX_FRAG_COUNT
+ atomic_read(&txq
->used
)) >=
583 netif_stop_queue(netdev
);
587 be_txq_notify(adapter
, txq
->id
, wrb_cnt
);
589 be_tx_stats_update(adapter
, wrb_cnt
, copied
,
590 skb_shinfo(skb
)->gso_segs
, stopped
);
593 dev_kfree_skb_any(skb
);
598 static int be_change_mtu(struct net_device
*netdev
, int new_mtu
)
600 struct be_adapter
*adapter
= netdev_priv(netdev
);
601 if (new_mtu
< BE_MIN_MTU
||
602 new_mtu
> (BE_MAX_JUMBO_FRAME_SIZE
-
603 (ETH_HLEN
+ ETH_FCS_LEN
))) {
604 dev_info(&adapter
->pdev
->dev
,
605 "MTU must be between %d and %d bytes\n",
607 (BE_MAX_JUMBO_FRAME_SIZE
- (ETH_HLEN
+ ETH_FCS_LEN
)));
610 dev_info(&adapter
->pdev
->dev
, "MTU changed from %d to %d bytes\n",
611 netdev
->mtu
, new_mtu
);
612 netdev
->mtu
= new_mtu
;
617 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
618 * If the user configures more, place BE in vlan promiscuous mode.
620 static int be_vid_config(struct be_adapter
*adapter
, bool vf
, u32 vf_num
)
622 u16 vtag
[BE_NUM_VLANS_SUPPORTED
];
628 if_handle
= adapter
->vf_cfg
[vf_num
].vf_if_handle
;
629 vtag
[0] = cpu_to_le16(adapter
->vf_cfg
[vf_num
].vf_vlan_tag
);
630 status
= be_cmd_vlan_config(adapter
, if_handle
, vtag
, 1, 1, 0);
633 if (adapter
->vlans_added
<= adapter
->max_vlans
) {
634 /* Construct VLAN Table to give to HW */
635 for (i
= 0; i
< VLAN_GROUP_ARRAY_LEN
; i
++) {
636 if (adapter
->vlan_tag
[i
]) {
637 vtag
[ntags
] = cpu_to_le16(i
);
641 status
= be_cmd_vlan_config(adapter
, adapter
->if_handle
,
644 status
= be_cmd_vlan_config(adapter
, adapter
->if_handle
,
651 static void be_vlan_register(struct net_device
*netdev
, struct vlan_group
*grp
)
653 struct be_adapter
*adapter
= netdev_priv(netdev
);
654 struct be_eq_obj
*rx_eq
= &adapter
->rx_eq
;
655 struct be_eq_obj
*tx_eq
= &adapter
->tx_eq
;
657 be_eq_notify(adapter
, rx_eq
->q
.id
, false, false, 0);
658 be_eq_notify(adapter
, tx_eq
->q
.id
, false, false, 0);
659 adapter
->vlan_grp
= grp
;
660 be_eq_notify(adapter
, rx_eq
->q
.id
, true, false, 0);
661 be_eq_notify(adapter
, tx_eq
->q
.id
, true, false, 0);
664 static void be_vlan_add_vid(struct net_device
*netdev
, u16 vid
)
666 struct be_adapter
*adapter
= netdev_priv(netdev
);
668 adapter
->vlans_added
++;
669 if (!be_physfn(adapter
))
672 adapter
->vlan_tag
[vid
] = 1;
673 if (adapter
->vlans_added
<= (adapter
->max_vlans
+ 1))
674 be_vid_config(adapter
, false, 0);
677 static void be_vlan_rem_vid(struct net_device
*netdev
, u16 vid
)
679 struct be_adapter
*adapter
= netdev_priv(netdev
);
681 adapter
->vlans_added
--;
682 vlan_group_set_device(adapter
->vlan_grp
, vid
, NULL
);
684 if (!be_physfn(adapter
))
687 adapter
->vlan_tag
[vid
] = 0;
688 if (adapter
->vlans_added
<= adapter
->max_vlans
)
689 be_vid_config(adapter
, false, 0);
692 static void be_set_multicast_list(struct net_device
*netdev
)
694 struct be_adapter
*adapter
= netdev_priv(netdev
);
696 if (netdev
->flags
& IFF_PROMISC
) {
697 be_cmd_promiscuous_config(adapter
, adapter
->port_num
, 1);
698 adapter
->promiscuous
= true;
702 /* BE was previously in promiscous mode; disable it */
703 if (adapter
->promiscuous
) {
704 adapter
->promiscuous
= false;
705 be_cmd_promiscuous_config(adapter
, adapter
->port_num
, 0);
708 /* Enable multicast promisc if num configured exceeds what we support */
709 if (netdev
->flags
& IFF_ALLMULTI
||
710 netdev_mc_count(netdev
) > BE_MAX_MC
) {
711 be_cmd_multicast_set(adapter
, adapter
->if_handle
, NULL
,
712 &adapter
->mc_cmd_mem
);
716 be_cmd_multicast_set(adapter
, adapter
->if_handle
, netdev
,
717 &adapter
->mc_cmd_mem
);
722 static int be_set_vf_mac(struct net_device
*netdev
, int vf
, u8
*mac
)
724 struct be_adapter
*adapter
= netdev_priv(netdev
);
727 if (!adapter
->sriov_enabled
)
730 if (!is_valid_ether_addr(mac
) || (vf
>= num_vfs
))
733 if (adapter
->vf_cfg
[vf
].vf_pmac_id
!= BE_INVALID_PMAC_ID
)
734 status
= be_cmd_pmac_del(adapter
,
735 adapter
->vf_cfg
[vf
].vf_if_handle
,
736 adapter
->vf_cfg
[vf
].vf_pmac_id
);
738 status
= be_cmd_pmac_add(adapter
, mac
,
739 adapter
->vf_cfg
[vf
].vf_if_handle
,
740 &adapter
->vf_cfg
[vf
].vf_pmac_id
);
743 dev_err(&adapter
->pdev
->dev
, "MAC %pM set on VF %d Failed\n",
746 memcpy(adapter
->vf_cfg
[vf
].vf_mac_addr
, mac
, ETH_ALEN
);
751 static int be_get_vf_config(struct net_device
*netdev
, int vf
,
752 struct ifla_vf_info
*vi
)
754 struct be_adapter
*adapter
= netdev_priv(netdev
);
756 if (!adapter
->sriov_enabled
)
763 vi
->tx_rate
= adapter
->vf_cfg
[vf
].vf_tx_rate
;
764 vi
->vlan
= adapter
->vf_cfg
[vf
].vf_vlan_tag
;
766 memcpy(&vi
->mac
, adapter
->vf_cfg
[vf
].vf_mac_addr
, ETH_ALEN
);
771 static int be_set_vf_vlan(struct net_device
*netdev
,
772 int vf
, u16 vlan
, u8 qos
)
774 struct be_adapter
*adapter
= netdev_priv(netdev
);
777 if (!adapter
->sriov_enabled
)
780 if ((vf
>= num_vfs
) || (vlan
> 4095))
784 adapter
->vf_cfg
[vf
].vf_vlan_tag
= vlan
;
785 adapter
->vlans_added
++;
787 adapter
->vf_cfg
[vf
].vf_vlan_tag
= 0;
788 adapter
->vlans_added
--;
791 status
= be_vid_config(adapter
, true, vf
);
794 dev_info(&adapter
->pdev
->dev
,
795 "VLAN %d config on VF %d failed\n", vlan
, vf
);
799 static int be_set_vf_tx_rate(struct net_device
*netdev
,
802 struct be_adapter
*adapter
= netdev_priv(netdev
);
805 if (!adapter
->sriov_enabled
)
808 if ((vf
>= num_vfs
) || (rate
< 0))
814 adapter
->vf_cfg
[vf
].vf_tx_rate
= rate
;
815 status
= be_cmd_set_qos(adapter
, rate
/ 10, vf
);
818 dev_info(&adapter
->pdev
->dev
,
819 "tx rate %d on VF %d failed\n", rate
, vf
);
823 static void be_rx_rate_update(struct be_adapter
*adapter
)
825 struct be_drvr_stats
*stats
= drvr_stats(adapter
);
829 if (time_before(now
, stats
->be_rx_jiffies
)) {
830 stats
->be_rx_jiffies
= now
;
834 /* Update the rate once in two seconds */
835 if ((now
- stats
->be_rx_jiffies
) < 2 * HZ
)
838 stats
->be_rx_rate
= be_calc_rate(stats
->be_rx_bytes
839 - stats
->be_rx_bytes_prev
,
840 now
- stats
->be_rx_jiffies
);
841 stats
->be_rx_jiffies
= now
;
842 stats
->be_rx_bytes_prev
= stats
->be_rx_bytes
;
845 static void be_rx_stats_update(struct be_adapter
*adapter
,
846 u32 pktsize
, u16 numfrags
, u8 pkt_type
)
848 struct be_drvr_stats
*stats
= drvr_stats(adapter
);
850 stats
->be_rx_compl
++;
851 stats
->be_rx_frags
+= numfrags
;
852 stats
->be_rx_bytes
+= pktsize
;
855 if (pkt_type
== BE_MULTICAST_PACKET
)
856 stats
->be_rx_mcast_pkt
++;
859 static inline bool do_pkt_csum(struct be_eth_rx_compl
*rxcp
, bool cso
)
861 u8 l4_cksm
, ip_version
, ipcksm
, tcpf
= 0, udpf
= 0, ipv6_chk
;
863 l4_cksm
= AMAP_GET_BITS(struct amap_eth_rx_compl
, l4_cksm
, rxcp
);
864 ipcksm
= AMAP_GET_BITS(struct amap_eth_rx_compl
, ipcksm
, rxcp
);
865 ip_version
= AMAP_GET_BITS(struct amap_eth_rx_compl
, ip_version
, rxcp
);
867 tcpf
= AMAP_GET_BITS(struct amap_eth_rx_compl
, tcpf
, rxcp
);
868 udpf
= AMAP_GET_BITS(struct amap_eth_rx_compl
, udpf
, rxcp
);
870 ipv6_chk
= (ip_version
&& (tcpf
|| udpf
));
872 return ((l4_cksm
&& ipv6_chk
&& ipcksm
) && cso
) ? false : true;
875 static struct be_rx_page_info
*
876 get_rx_page_info(struct be_adapter
*adapter
, u16 frag_idx
)
878 struct be_rx_page_info
*rx_page_info
;
879 struct be_queue_info
*rxq
= &adapter
->rx_obj
.q
;
881 rx_page_info
= &adapter
->rx_obj
.page_info_tbl
[frag_idx
];
882 BUG_ON(!rx_page_info
->page
);
884 if (rx_page_info
->last_page_user
) {
885 pci_unmap_page(adapter
->pdev
, dma_unmap_addr(rx_page_info
, bus
),
886 adapter
->big_page_size
, PCI_DMA_FROMDEVICE
);
887 rx_page_info
->last_page_user
= false;
890 atomic_dec(&rxq
->used
);
894 /* Throwaway the data in the Rx completion */
895 static void be_rx_compl_discard(struct be_adapter
*adapter
,
896 struct be_eth_rx_compl
*rxcp
)
898 struct be_queue_info
*rxq
= &adapter
->rx_obj
.q
;
899 struct be_rx_page_info
*page_info
;
900 u16 rxq_idx
, i
, num_rcvd
;
902 rxq_idx
= AMAP_GET_BITS(struct amap_eth_rx_compl
, fragndx
, rxcp
);
903 num_rcvd
= AMAP_GET_BITS(struct amap_eth_rx_compl
, numfrags
, rxcp
);
905 for (i
= 0; i
< num_rcvd
; i
++) {
906 page_info
= get_rx_page_info(adapter
, rxq_idx
);
907 put_page(page_info
->page
);
908 memset(page_info
, 0, sizeof(*page_info
));
909 index_inc(&rxq_idx
, rxq
->len
);
914 * skb_fill_rx_data forms a complete skb for an ether frame
917 static void skb_fill_rx_data(struct be_adapter
*adapter
,
918 struct sk_buff
*skb
, struct be_eth_rx_compl
*rxcp
,
921 struct be_queue_info
*rxq
= &adapter
->rx_obj
.q
;
922 struct be_rx_page_info
*page_info
;
924 u32 pktsize
, hdr_len
, curr_frag_len
, size
;
928 rxq_idx
= AMAP_GET_BITS(struct amap_eth_rx_compl
, fragndx
, rxcp
);
929 pktsize
= AMAP_GET_BITS(struct amap_eth_rx_compl
, pktsize
, rxcp
);
930 pkt_type
= AMAP_GET_BITS(struct amap_eth_rx_compl
, cast_enc
, rxcp
);
932 page_info
= get_rx_page_info(adapter
, rxq_idx
);
934 start
= page_address(page_info
->page
) + page_info
->page_offset
;
937 /* Copy data in the first descriptor of this completion */
938 curr_frag_len
= min(pktsize
, rx_frag_size
);
940 /* Copy the header portion into skb_data */
941 hdr_len
= min((u32
)BE_HDR_LEN
, curr_frag_len
);
942 memcpy(skb
->data
, start
, hdr_len
);
943 skb
->len
= curr_frag_len
;
944 if (curr_frag_len
<= BE_HDR_LEN
) { /* tiny packet */
945 /* Complete packet has now been moved to data */
946 put_page(page_info
->page
);
948 skb
->tail
+= curr_frag_len
;
950 skb_shinfo(skb
)->nr_frags
= 1;
951 skb_shinfo(skb
)->frags
[0].page
= page_info
->page
;
952 skb_shinfo(skb
)->frags
[0].page_offset
=
953 page_info
->page_offset
+ hdr_len
;
954 skb_shinfo(skb
)->frags
[0].size
= curr_frag_len
- hdr_len
;
955 skb
->data_len
= curr_frag_len
- hdr_len
;
956 skb
->tail
+= hdr_len
;
958 page_info
->page
= NULL
;
960 if (pktsize
<= rx_frag_size
) {
961 BUG_ON(num_rcvd
!= 1);
965 /* More frags present for this completion */
967 for (i
= 1, j
= 0; i
< num_rcvd
; i
++) {
968 size
-= curr_frag_len
;
969 index_inc(&rxq_idx
, rxq
->len
);
970 page_info
= get_rx_page_info(adapter
, rxq_idx
);
972 curr_frag_len
= min(size
, rx_frag_size
);
974 /* Coalesce all frags from the same physical page in one slot */
975 if (page_info
->page_offset
== 0) {
978 skb_shinfo(skb
)->frags
[j
].page
= page_info
->page
;
979 skb_shinfo(skb
)->frags
[j
].page_offset
=
980 page_info
->page_offset
;
981 skb_shinfo(skb
)->frags
[j
].size
= 0;
982 skb_shinfo(skb
)->nr_frags
++;
984 put_page(page_info
->page
);
987 skb_shinfo(skb
)->frags
[j
].size
+= curr_frag_len
;
988 skb
->len
+= curr_frag_len
;
989 skb
->data_len
+= curr_frag_len
;
991 page_info
->page
= NULL
;
993 BUG_ON(j
> MAX_SKB_FRAGS
);
996 be_rx_stats_update(adapter
, pktsize
, num_rcvd
, pkt_type
);
999 /* Process the RX completion indicated by rxcp when GRO is disabled */
1000 static void be_rx_compl_process(struct be_adapter
*adapter
,
1001 struct be_eth_rx_compl
*rxcp
)
1003 struct sk_buff
*skb
;
1008 num_rcvd
= AMAP_GET_BITS(struct amap_eth_rx_compl
, numfrags
, rxcp
);
1009 /* Is it a flush compl that has no data */
1010 if (unlikely(num_rcvd
== 0))
1013 skb
= netdev_alloc_skb_ip_align(adapter
->netdev
, BE_HDR_LEN
);
1014 if (unlikely(!skb
)) {
1015 if (net_ratelimit())
1016 dev_warn(&adapter
->pdev
->dev
, "skb alloc failed\n");
1017 be_rx_compl_discard(adapter
, rxcp
);
1021 skb_fill_rx_data(adapter
, skb
, rxcp
, num_rcvd
);
1023 if (do_pkt_csum(rxcp
, adapter
->rx_csum
))
1024 skb_checksum_none_assert(skb
);
1026 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1028 skb
->truesize
= skb
->len
+ sizeof(struct sk_buff
);
1029 skb
->protocol
= eth_type_trans(skb
, adapter
->netdev
);
1031 vlanf
= AMAP_GET_BITS(struct amap_eth_rx_compl
, vtp
, rxcp
);
1032 vtm
= AMAP_GET_BITS(struct amap_eth_rx_compl
, vtm
, rxcp
);
1034 /* vlanf could be wrongly set in some cards.
1035 * ignore if vtm is not set */
1036 if ((adapter
->function_mode
& 0x400) && !vtm
)
1039 if (unlikely(vlanf
)) {
1040 if (!adapter
->vlan_grp
|| adapter
->vlans_added
== 0) {
1044 vid
= AMAP_GET_BITS(struct amap_eth_rx_compl
, vlan_tag
, rxcp
);
1046 vlan_hwaccel_receive_skb(skb
, adapter
->vlan_grp
, vid
);
1048 netif_receive_skb(skb
);
1052 /* Process the RX completion indicated by rxcp when GRO is enabled */
1053 static void be_rx_compl_process_gro(struct be_adapter
*adapter
,
1054 struct be_eth_rx_compl
*rxcp
)
1056 struct be_rx_page_info
*page_info
;
1057 struct sk_buff
*skb
= NULL
;
1058 struct be_queue_info
*rxq
= &adapter
->rx_obj
.q
;
1059 struct be_eq_obj
*eq_obj
= &adapter
->rx_eq
;
1060 u32 num_rcvd
, pkt_size
, remaining
, vlanf
, curr_frag_len
;
1061 u16 i
, rxq_idx
= 0, vid
, j
;
1065 num_rcvd
= AMAP_GET_BITS(struct amap_eth_rx_compl
, numfrags
, rxcp
);
1066 /* Is it a flush compl that has no data */
1067 if (unlikely(num_rcvd
== 0))
1070 pkt_size
= AMAP_GET_BITS(struct amap_eth_rx_compl
, pktsize
, rxcp
);
1071 vlanf
= AMAP_GET_BITS(struct amap_eth_rx_compl
, vtp
, rxcp
);
1072 rxq_idx
= AMAP_GET_BITS(struct amap_eth_rx_compl
, fragndx
, rxcp
);
1073 vtm
= AMAP_GET_BITS(struct amap_eth_rx_compl
, vtm
, rxcp
);
1074 pkt_type
= AMAP_GET_BITS(struct amap_eth_rx_compl
, cast_enc
, rxcp
);
1076 /* vlanf could be wrongly set in some cards.
1077 * ignore if vtm is not set */
1078 if ((adapter
->function_mode
& 0x400) && !vtm
)
1081 skb
= napi_get_frags(&eq_obj
->napi
);
1083 be_rx_compl_discard(adapter
, rxcp
);
1087 remaining
= pkt_size
;
1088 for (i
= 0, j
= -1; i
< num_rcvd
; i
++) {
1089 page_info
= get_rx_page_info(adapter
, rxq_idx
);
1091 curr_frag_len
= min(remaining
, rx_frag_size
);
1093 /* Coalesce all frags from the same physical page in one slot */
1094 if (i
== 0 || page_info
->page_offset
== 0) {
1095 /* First frag or Fresh page */
1097 skb_shinfo(skb
)->frags
[j
].page
= page_info
->page
;
1098 skb_shinfo(skb
)->frags
[j
].page_offset
=
1099 page_info
->page_offset
;
1100 skb_shinfo(skb
)->frags
[j
].size
= 0;
1102 put_page(page_info
->page
);
1104 skb_shinfo(skb
)->frags
[j
].size
+= curr_frag_len
;
1106 remaining
-= curr_frag_len
;
1107 index_inc(&rxq_idx
, rxq
->len
);
1108 memset(page_info
, 0, sizeof(*page_info
));
1110 BUG_ON(j
> MAX_SKB_FRAGS
);
1112 skb_shinfo(skb
)->nr_frags
= j
+ 1;
1113 skb
->len
= pkt_size
;
1114 skb
->data_len
= pkt_size
;
1115 skb
->truesize
+= pkt_size
;
1116 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1118 if (likely(!vlanf
)) {
1119 napi_gro_frags(&eq_obj
->napi
);
1121 vid
= AMAP_GET_BITS(struct amap_eth_rx_compl
, vlan_tag
, rxcp
);
1124 if (!adapter
->vlan_grp
|| adapter
->vlans_added
== 0)
1127 vlan_gro_frags(&eq_obj
->napi
, adapter
->vlan_grp
, vid
);
1130 be_rx_stats_update(adapter
, pkt_size
, num_rcvd
, pkt_type
);
1133 static struct be_eth_rx_compl
*be_rx_compl_get(struct be_adapter
*adapter
)
1135 struct be_eth_rx_compl
*rxcp
= queue_tail_node(&adapter
->rx_obj
.cq
);
1137 if (rxcp
->dw
[offsetof(struct amap_eth_rx_compl
, valid
) / 32] == 0)
1141 be_dws_le_to_cpu(rxcp
, sizeof(*rxcp
));
1143 queue_tail_inc(&adapter
->rx_obj
.cq
);
1147 /* To reset the valid bit, we need to reset the whole word as
1148 * when walking the queue the valid entries are little-endian
1149 * and invalid entries are host endian
1151 static inline void be_rx_compl_reset(struct be_eth_rx_compl
*rxcp
)
1153 rxcp
->dw
[offsetof(struct amap_eth_rx_compl
, valid
) / 32] = 0;
1156 static inline struct page
*be_alloc_pages(u32 size
)
1158 gfp_t alloc_flags
= GFP_ATOMIC
;
1159 u32 order
= get_order(size
);
1161 alloc_flags
|= __GFP_COMP
;
1162 return alloc_pages(alloc_flags
, order
);
1166 * Allocate a page, split it to fragments of size rx_frag_size and post as
1167 * receive buffers to BE
1169 static void be_post_rx_frags(struct be_adapter
*adapter
)
1171 struct be_rx_page_info
*page_info_tbl
= adapter
->rx_obj
.page_info_tbl
;
1172 struct be_rx_page_info
*page_info
= NULL
, *prev_page_info
= NULL
;
1173 struct be_queue_info
*rxq
= &adapter
->rx_obj
.q
;
1174 struct page
*pagep
= NULL
;
1175 struct be_eth_rx_d
*rxd
;
1176 u64 page_dmaaddr
= 0, frag_dmaaddr
;
1177 u32 posted
, page_offset
= 0;
1179 page_info
= &page_info_tbl
[rxq
->head
];
1180 for (posted
= 0; posted
< MAX_RX_POST
&& !page_info
->page
; posted
++) {
1182 pagep
= be_alloc_pages(adapter
->big_page_size
);
1183 if (unlikely(!pagep
)) {
1184 drvr_stats(adapter
)->be_ethrx_post_fail
++;
1187 page_dmaaddr
= pci_map_page(adapter
->pdev
, pagep
, 0,
1188 adapter
->big_page_size
,
1189 PCI_DMA_FROMDEVICE
);
1190 page_info
->page_offset
= 0;
1193 page_info
->page_offset
= page_offset
+ rx_frag_size
;
1195 page_offset
= page_info
->page_offset
;
1196 page_info
->page
= pagep
;
1197 dma_unmap_addr_set(page_info
, bus
, page_dmaaddr
);
1198 frag_dmaaddr
= page_dmaaddr
+ page_info
->page_offset
;
1200 rxd
= queue_head_node(rxq
);
1201 rxd
->fragpa_lo
= cpu_to_le32(frag_dmaaddr
& 0xFFFFFFFF);
1202 rxd
->fragpa_hi
= cpu_to_le32(upper_32_bits(frag_dmaaddr
));
1204 /* Any space left in the current big page for another frag? */
1205 if ((page_offset
+ rx_frag_size
+ rx_frag_size
) >
1206 adapter
->big_page_size
) {
1208 page_info
->last_page_user
= true;
1211 prev_page_info
= page_info
;
1212 queue_head_inc(rxq
);
1213 page_info
= &page_info_tbl
[rxq
->head
];
1216 prev_page_info
->last_page_user
= true;
1219 atomic_add(posted
, &rxq
->used
);
1220 be_rxq_notify(adapter
, rxq
->id
, posted
);
1221 } else if (atomic_read(&rxq
->used
) == 0) {
1222 /* Let be_worker replenish when memory is available */
1223 adapter
->rx_post_starved
= true;
1227 static struct be_eth_tx_compl
*be_tx_compl_get(struct be_queue_info
*tx_cq
)
1229 struct be_eth_tx_compl
*txcp
= queue_tail_node(tx_cq
);
1231 if (txcp
->dw
[offsetof(struct amap_eth_tx_compl
, valid
) / 32] == 0)
1235 be_dws_le_to_cpu(txcp
, sizeof(*txcp
));
1237 txcp
->dw
[offsetof(struct amap_eth_tx_compl
, valid
) / 32] = 0;
1239 queue_tail_inc(tx_cq
);
1243 static void be_tx_compl_process(struct be_adapter
*adapter
, u16 last_index
)
1245 struct be_queue_info
*txq
= &adapter
->tx_obj
.q
;
1246 struct be_eth_wrb
*wrb
;
1247 struct sk_buff
**sent_skbs
= adapter
->tx_obj
.sent_skb_list
;
1248 struct sk_buff
*sent_skb
;
1249 u16 cur_index
, num_wrbs
= 1; /* account for hdr wrb */
1250 bool unmap_skb_hdr
= true;
1252 sent_skb
= sent_skbs
[txq
->tail
];
1254 sent_skbs
[txq
->tail
] = NULL
;
1256 /* skip header wrb */
1257 queue_tail_inc(txq
);
1260 cur_index
= txq
->tail
;
1261 wrb
= queue_tail_node(txq
);
1262 unmap_tx_frag(adapter
->pdev
, wrb
, (unmap_skb_hdr
&&
1263 skb_headlen(sent_skb
)));
1264 unmap_skb_hdr
= false;
1267 queue_tail_inc(txq
);
1268 } while (cur_index
!= last_index
);
1270 atomic_sub(num_wrbs
, &txq
->used
);
1272 kfree_skb(sent_skb
);
1275 static inline struct be_eq_entry
*event_get(struct be_eq_obj
*eq_obj
)
1277 struct be_eq_entry
*eqe
= queue_tail_node(&eq_obj
->q
);
1283 eqe
->evt
= le32_to_cpu(eqe
->evt
);
1284 queue_tail_inc(&eq_obj
->q
);
1288 static int event_handle(struct be_adapter
*adapter
,
1289 struct be_eq_obj
*eq_obj
)
1291 struct be_eq_entry
*eqe
;
1294 while ((eqe
= event_get(eq_obj
)) != NULL
) {
1299 /* Deal with any spurious interrupts that come
1302 be_eq_notify(adapter
, eq_obj
->q
.id
, true, true, num
);
1304 napi_schedule(&eq_obj
->napi
);
1309 /* Just read and notify events without processing them.
1310 * Used at the time of destroying event queues */
1311 static void be_eq_clean(struct be_adapter
*adapter
,
1312 struct be_eq_obj
*eq_obj
)
1314 struct be_eq_entry
*eqe
;
1317 while ((eqe
= event_get(eq_obj
)) != NULL
) {
1323 be_eq_notify(adapter
, eq_obj
->q
.id
, false, true, num
);
1326 static void be_rx_q_clean(struct be_adapter
*adapter
)
1328 struct be_rx_page_info
*page_info
;
1329 struct be_queue_info
*rxq
= &adapter
->rx_obj
.q
;
1330 struct be_queue_info
*rx_cq
= &adapter
->rx_obj
.cq
;
1331 struct be_eth_rx_compl
*rxcp
;
1334 /* First cleanup pending rx completions */
1335 while ((rxcp
= be_rx_compl_get(adapter
)) != NULL
) {
1336 be_rx_compl_discard(adapter
, rxcp
);
1337 be_rx_compl_reset(rxcp
);
1338 be_cq_notify(adapter
, rx_cq
->id
, true, 1);
1341 /* Then free posted rx buffer that were not used */
1342 tail
= (rxq
->head
+ rxq
->len
- atomic_read(&rxq
->used
)) % rxq
->len
;
1343 for (; atomic_read(&rxq
->used
) > 0; index_inc(&tail
, rxq
->len
)) {
1344 page_info
= get_rx_page_info(adapter
, tail
);
1345 put_page(page_info
->page
);
1346 memset(page_info
, 0, sizeof(*page_info
));
1348 BUG_ON(atomic_read(&rxq
->used
));
1351 static void be_tx_compl_clean(struct be_adapter
*adapter
)
1353 struct be_queue_info
*tx_cq
= &adapter
->tx_obj
.cq
;
1354 struct be_queue_info
*txq
= &adapter
->tx_obj
.q
;
1355 struct be_eth_tx_compl
*txcp
;
1356 u16 end_idx
, cmpl
= 0, timeo
= 0;
1357 struct sk_buff
**sent_skbs
= adapter
->tx_obj
.sent_skb_list
;
1358 struct sk_buff
*sent_skb
;
1361 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1363 while ((txcp
= be_tx_compl_get(tx_cq
))) {
1364 end_idx
= AMAP_GET_BITS(struct amap_eth_tx_compl
,
1366 be_tx_compl_process(adapter
, end_idx
);
1370 be_cq_notify(adapter
, tx_cq
->id
, false, cmpl
);
1374 if (atomic_read(&txq
->used
) == 0 || ++timeo
> 200)
1380 if (atomic_read(&txq
->used
))
1381 dev_err(&adapter
->pdev
->dev
, "%d pending tx-completions\n",
1382 atomic_read(&txq
->used
));
1384 /* free posted tx for which compls will never arrive */
1385 while (atomic_read(&txq
->used
)) {
1386 sent_skb
= sent_skbs
[txq
->tail
];
1387 end_idx
= txq
->tail
;
1389 wrb_cnt_for_skb(sent_skb
, &dummy_wrb
) - 1, txq
->len
);
1390 be_tx_compl_process(adapter
, end_idx
);
1394 static void be_mcc_queues_destroy(struct be_adapter
*adapter
)
1396 struct be_queue_info
*q
;
1398 q
= &adapter
->mcc_obj
.q
;
1400 be_cmd_q_destroy(adapter
, q
, QTYPE_MCCQ
);
1401 be_queue_free(adapter
, q
);
1403 q
= &adapter
->mcc_obj
.cq
;
1405 be_cmd_q_destroy(adapter
, q
, QTYPE_CQ
);
1406 be_queue_free(adapter
, q
);
1409 /* Must be called only after TX qs are created as MCC shares TX EQ */
1410 static int be_mcc_queues_create(struct be_adapter
*adapter
)
1412 struct be_queue_info
*q
, *cq
;
1414 /* Alloc MCC compl queue */
1415 cq
= &adapter
->mcc_obj
.cq
;
1416 if (be_queue_alloc(adapter
, cq
, MCC_CQ_LEN
,
1417 sizeof(struct be_mcc_compl
)))
1420 /* Ask BE to create MCC compl queue; share TX's eq */
1421 if (be_cmd_cq_create(adapter
, cq
, &adapter
->tx_eq
.q
, false, true, 0))
1424 /* Alloc MCC queue */
1425 q
= &adapter
->mcc_obj
.q
;
1426 if (be_queue_alloc(adapter
, q
, MCC_Q_LEN
, sizeof(struct be_mcc_wrb
)))
1427 goto mcc_cq_destroy
;
1429 /* Ask BE to create MCC queue */
1430 if (be_cmd_mccq_create(adapter
, q
, cq
))
1436 be_queue_free(adapter
, q
);
1438 be_cmd_q_destroy(adapter
, cq
, QTYPE_CQ
);
1440 be_queue_free(adapter
, cq
);
1445 static void be_tx_queues_destroy(struct be_adapter
*adapter
)
1447 struct be_queue_info
*q
;
1449 q
= &adapter
->tx_obj
.q
;
1451 be_cmd_q_destroy(adapter
, q
, QTYPE_TXQ
);
1452 be_queue_free(adapter
, q
);
1454 q
= &adapter
->tx_obj
.cq
;
1456 be_cmd_q_destroy(adapter
, q
, QTYPE_CQ
);
1457 be_queue_free(adapter
, q
);
1459 /* Clear any residual events */
1460 be_eq_clean(adapter
, &adapter
->tx_eq
);
1462 q
= &adapter
->tx_eq
.q
;
1464 be_cmd_q_destroy(adapter
, q
, QTYPE_EQ
);
1465 be_queue_free(adapter
, q
);
1468 static int be_tx_queues_create(struct be_adapter
*adapter
)
1470 struct be_queue_info
*eq
, *q
, *cq
;
1472 adapter
->tx_eq
.max_eqd
= 0;
1473 adapter
->tx_eq
.min_eqd
= 0;
1474 adapter
->tx_eq
.cur_eqd
= 96;
1475 adapter
->tx_eq
.enable_aic
= false;
1476 /* Alloc Tx Event queue */
1477 eq
= &adapter
->tx_eq
.q
;
1478 if (be_queue_alloc(adapter
, eq
, EVNT_Q_LEN
, sizeof(struct be_eq_entry
)))
1481 /* Ask BE to create Tx Event queue */
1482 if (be_cmd_eq_create(adapter
, eq
, adapter
->tx_eq
.cur_eqd
))
1484 adapter
->base_eq_id
= adapter
->tx_eq
.q
.id
;
1486 /* Alloc TX eth compl queue */
1487 cq
= &adapter
->tx_obj
.cq
;
1488 if (be_queue_alloc(adapter
, cq
, TX_CQ_LEN
,
1489 sizeof(struct be_eth_tx_compl
)))
1492 /* Ask BE to create Tx eth compl queue */
1493 if (be_cmd_cq_create(adapter
, cq
, eq
, false, false, 3))
1496 /* Alloc TX eth queue */
1497 q
= &adapter
->tx_obj
.q
;
1498 if (be_queue_alloc(adapter
, q
, TX_Q_LEN
, sizeof(struct be_eth_wrb
)))
1501 /* Ask BE to create Tx eth queue */
1502 if (be_cmd_txq_create(adapter
, q
, cq
))
1507 be_queue_free(adapter
, q
);
1509 be_cmd_q_destroy(adapter
, cq
, QTYPE_CQ
);
1511 be_queue_free(adapter
, cq
);
1513 be_cmd_q_destroy(adapter
, eq
, QTYPE_EQ
);
1515 be_queue_free(adapter
, eq
);
1519 static void be_rx_queues_destroy(struct be_adapter
*adapter
)
1521 struct be_queue_info
*q
;
1523 q
= &adapter
->rx_obj
.q
;
1525 be_cmd_q_destroy(adapter
, q
, QTYPE_RXQ
);
1527 /* After the rxq is invalidated, wait for a grace time
1528 * of 1ms for all dma to end and the flush compl to arrive
1531 be_rx_q_clean(adapter
);
1533 be_queue_free(adapter
, q
);
1535 q
= &adapter
->rx_obj
.cq
;
1537 be_cmd_q_destroy(adapter
, q
, QTYPE_CQ
);
1538 be_queue_free(adapter
, q
);
1540 /* Clear any residual events */
1541 be_eq_clean(adapter
, &adapter
->rx_eq
);
1543 q
= &adapter
->rx_eq
.q
;
1545 be_cmd_q_destroy(adapter
, q
, QTYPE_EQ
);
1546 be_queue_free(adapter
, q
);
1549 static int be_rx_queues_create(struct be_adapter
*adapter
)
1551 struct be_queue_info
*eq
, *q
, *cq
;
1554 adapter
->big_page_size
= (1 << get_order(rx_frag_size
)) * PAGE_SIZE
;
1555 adapter
->rx_eq
.max_eqd
= BE_MAX_EQD
;
1556 adapter
->rx_eq
.min_eqd
= 0;
1557 adapter
->rx_eq
.cur_eqd
= 0;
1558 adapter
->rx_eq
.enable_aic
= true;
1560 /* Alloc Rx Event queue */
1561 eq
= &adapter
->rx_eq
.q
;
1562 rc
= be_queue_alloc(adapter
, eq
, EVNT_Q_LEN
,
1563 sizeof(struct be_eq_entry
));
1567 /* Ask BE to create Rx Event queue */
1568 rc
= be_cmd_eq_create(adapter
, eq
, adapter
->rx_eq
.cur_eqd
);
1572 /* Alloc RX eth compl queue */
1573 cq
= &adapter
->rx_obj
.cq
;
1574 rc
= be_queue_alloc(adapter
, cq
, RX_CQ_LEN
,
1575 sizeof(struct be_eth_rx_compl
));
1579 /* Ask BE to create Rx eth compl queue */
1580 rc
= be_cmd_cq_create(adapter
, cq
, eq
, false, false, 3);
1584 /* Alloc RX eth queue */
1585 q
= &adapter
->rx_obj
.q
;
1586 rc
= be_queue_alloc(adapter
, q
, RX_Q_LEN
, sizeof(struct be_eth_rx_d
));
1590 /* Ask BE to create Rx eth queue */
1591 rc
= be_cmd_rxq_create(adapter
, q
, cq
->id
, rx_frag_size
,
1592 BE_MAX_JUMBO_FRAME_SIZE
, adapter
->if_handle
, false);
1598 be_queue_free(adapter
, q
);
1600 be_cmd_q_destroy(adapter
, cq
, QTYPE_CQ
);
1602 be_queue_free(adapter
, cq
);
1604 be_cmd_q_destroy(adapter
, eq
, QTYPE_EQ
);
1606 be_queue_free(adapter
, eq
);
1610 /* There are 8 evt ids per func. Retruns the evt id's bit number */
1611 static inline int be_evt_bit_get(struct be_adapter
*adapter
, u32 eq_id
)
1613 return eq_id
- adapter
->base_eq_id
;
1616 static irqreturn_t
be_intx(int irq
, void *dev
)
1618 struct be_adapter
*adapter
= dev
;
1621 isr
= ioread32(adapter
->csr
+ CEV_ISR0_OFFSET
+
1622 (adapter
->tx_eq
.q
.id
/ 8) * CEV_ISR_SIZE
);
1626 event_handle(adapter
, &adapter
->tx_eq
);
1627 event_handle(adapter
, &adapter
->rx_eq
);
1632 static irqreturn_t
be_msix_rx(int irq
, void *dev
)
1634 struct be_adapter
*adapter
= dev
;
1636 event_handle(adapter
, &adapter
->rx_eq
);
1641 static irqreturn_t
be_msix_tx_mcc(int irq
, void *dev
)
1643 struct be_adapter
*adapter
= dev
;
1645 event_handle(adapter
, &adapter
->tx_eq
);
1650 static inline bool do_gro(struct be_adapter
*adapter
,
1651 struct be_eth_rx_compl
*rxcp
)
1653 int err
= AMAP_GET_BITS(struct amap_eth_rx_compl
, err
, rxcp
);
1654 int tcp_frame
= AMAP_GET_BITS(struct amap_eth_rx_compl
, tcpf
, rxcp
);
1657 drvr_stats(adapter
)->be_rxcp_err
++;
1659 return (tcp_frame
&& !err
) ? true : false;
1662 int be_poll_rx(struct napi_struct
*napi
, int budget
)
1664 struct be_eq_obj
*rx_eq
= container_of(napi
, struct be_eq_obj
, napi
);
1665 struct be_adapter
*adapter
=
1666 container_of(rx_eq
, struct be_adapter
, rx_eq
);
1667 struct be_queue_info
*rx_cq
= &adapter
->rx_obj
.cq
;
1668 struct be_eth_rx_compl
*rxcp
;
1671 adapter
->stats
.drvr_stats
.be_rx_polls
++;
1672 for (work_done
= 0; work_done
< budget
; work_done
++) {
1673 rxcp
= be_rx_compl_get(adapter
);
1677 if (do_gro(adapter
, rxcp
))
1678 be_rx_compl_process_gro(adapter
, rxcp
);
1680 be_rx_compl_process(adapter
, rxcp
);
1682 be_rx_compl_reset(rxcp
);
1685 /* Refill the queue */
1686 if (atomic_read(&adapter
->rx_obj
.q
.used
) < RX_FRAGS_REFILL_WM
)
1687 be_post_rx_frags(adapter
);
1690 if (work_done
< budget
) {
1691 napi_complete(napi
);
1692 be_cq_notify(adapter
, rx_cq
->id
, true, work_done
);
1694 /* More to be consumed; continue with interrupts disabled */
1695 be_cq_notify(adapter
, rx_cq
->id
, false, work_done
);
1700 /* As TX and MCC share the same EQ check for both TX and MCC completions.
1701 * For TX/MCC we don't honour budget; consume everything
1703 static int be_poll_tx_mcc(struct napi_struct
*napi
, int budget
)
1705 struct be_eq_obj
*tx_eq
= container_of(napi
, struct be_eq_obj
, napi
);
1706 struct be_adapter
*adapter
=
1707 container_of(tx_eq
, struct be_adapter
, tx_eq
);
1708 struct be_queue_info
*txq
= &adapter
->tx_obj
.q
;
1709 struct be_queue_info
*tx_cq
= &adapter
->tx_obj
.cq
;
1710 struct be_eth_tx_compl
*txcp
;
1711 int tx_compl
= 0, mcc_compl
, status
= 0;
1714 while ((txcp
= be_tx_compl_get(tx_cq
))) {
1715 end_idx
= AMAP_GET_BITS(struct amap_eth_tx_compl
,
1717 be_tx_compl_process(adapter
, end_idx
);
1721 mcc_compl
= be_process_mcc(adapter
, &status
);
1723 napi_complete(napi
);
1726 struct be_mcc_obj
*mcc_obj
= &adapter
->mcc_obj
;
1727 be_cq_notify(adapter
, mcc_obj
->cq
.id
, true, mcc_compl
);
1731 be_cq_notify(adapter
, adapter
->tx_obj
.cq
.id
, true, tx_compl
);
1733 /* As Tx wrbs have been freed up, wake up netdev queue if
1734 * it was stopped due to lack of tx wrbs.
1736 if (netif_queue_stopped(adapter
->netdev
) &&
1737 atomic_read(&txq
->used
) < txq
->len
/ 2) {
1738 netif_wake_queue(adapter
->netdev
);
1741 drvr_stats(adapter
)->be_tx_events
++;
1742 drvr_stats(adapter
)->be_tx_compl
+= tx_compl
;
1748 void be_detect_dump_ue(struct be_adapter
*adapter
)
1750 u32 ue_status_lo
, ue_status_hi
, ue_status_lo_mask
, ue_status_hi_mask
;
1753 pci_read_config_dword(adapter
->pdev
,
1754 PCICFG_UE_STATUS_LOW
, &ue_status_lo
);
1755 pci_read_config_dword(adapter
->pdev
,
1756 PCICFG_UE_STATUS_HIGH
, &ue_status_hi
);
1757 pci_read_config_dword(adapter
->pdev
,
1758 PCICFG_UE_STATUS_LOW_MASK
, &ue_status_lo_mask
);
1759 pci_read_config_dword(adapter
->pdev
,
1760 PCICFG_UE_STATUS_HI_MASK
, &ue_status_hi_mask
);
1762 ue_status_lo
= (ue_status_lo
& (~ue_status_lo_mask
));
1763 ue_status_hi
= (ue_status_hi
& (~ue_status_hi_mask
));
1765 if (ue_status_lo
|| ue_status_hi
) {
1766 adapter
->ue_detected
= true;
1767 dev_err(&adapter
->pdev
->dev
, "UE Detected!!\n");
1771 for (i
= 0; ue_status_lo
; ue_status_lo
>>= 1, i
++) {
1772 if (ue_status_lo
& 1)
1773 dev_err(&adapter
->pdev
->dev
,
1774 "UE: %s bit set\n", ue_status_low_desc
[i
]);
1778 for (i
= 0; ue_status_hi
; ue_status_hi
>>= 1, i
++) {
1779 if (ue_status_hi
& 1)
1780 dev_err(&adapter
->pdev
->dev
,
1781 "UE: %s bit set\n", ue_status_hi_desc
[i
]);
1787 static void be_worker(struct work_struct
*work
)
1789 struct be_adapter
*adapter
=
1790 container_of(work
, struct be_adapter
, work
.work
);
1792 if (!adapter
->stats_ioctl_sent
)
1793 be_cmd_get_stats(adapter
, &adapter
->stats
.cmd
);
1796 be_rx_eqd_update(adapter
);
1798 be_tx_rate_update(adapter
);
1799 be_rx_rate_update(adapter
);
1801 if (adapter
->rx_post_starved
) {
1802 adapter
->rx_post_starved
= false;
1803 be_post_rx_frags(adapter
);
1805 if (!adapter
->ue_detected
)
1806 be_detect_dump_ue(adapter
);
1808 schedule_delayed_work(&adapter
->work
, msecs_to_jiffies(1000));
1811 static void be_msix_disable(struct be_adapter
*adapter
)
1813 if (adapter
->msix_enabled
) {
1814 pci_disable_msix(adapter
->pdev
);
1815 adapter
->msix_enabled
= false;
1819 static void be_msix_enable(struct be_adapter
*adapter
)
1823 for (i
= 0; i
< BE_NUM_MSIX_VECTORS
; i
++)
1824 adapter
->msix_entries
[i
].entry
= i
;
1826 status
= pci_enable_msix(adapter
->pdev
, adapter
->msix_entries
,
1827 BE_NUM_MSIX_VECTORS
);
1829 adapter
->msix_enabled
= true;
1832 static void be_sriov_enable(struct be_adapter
*adapter
)
1834 be_check_sriov_fn_type(adapter
);
1835 #ifdef CONFIG_PCI_IOV
1836 if (be_physfn(adapter
) && num_vfs
) {
1839 status
= pci_enable_sriov(adapter
->pdev
, num_vfs
);
1840 adapter
->sriov_enabled
= status
? false : true;
1845 static void be_sriov_disable(struct be_adapter
*adapter
)
1847 #ifdef CONFIG_PCI_IOV
1848 if (adapter
->sriov_enabled
) {
1849 pci_disable_sriov(adapter
->pdev
);
1850 adapter
->sriov_enabled
= false;
1855 static inline int be_msix_vec_get(struct be_adapter
*adapter
, u32 eq_id
)
1857 return adapter
->msix_entries
[
1858 be_evt_bit_get(adapter
, eq_id
)].vector
;
1861 static int be_request_irq(struct be_adapter
*adapter
,
1862 struct be_eq_obj
*eq_obj
,
1863 void *handler
, char *desc
)
1865 struct net_device
*netdev
= adapter
->netdev
;
1868 sprintf(eq_obj
->desc
, "%s-%s", netdev
->name
, desc
);
1869 vec
= be_msix_vec_get(adapter
, eq_obj
->q
.id
);
1870 return request_irq(vec
, handler
, 0, eq_obj
->desc
, adapter
);
1873 static void be_free_irq(struct be_adapter
*adapter
, struct be_eq_obj
*eq_obj
)
1875 int vec
= be_msix_vec_get(adapter
, eq_obj
->q
.id
);
1876 free_irq(vec
, adapter
);
1879 static int be_msix_register(struct be_adapter
*adapter
)
1883 status
= be_request_irq(adapter
, &adapter
->tx_eq
, be_msix_tx_mcc
, "tx");
1887 status
= be_request_irq(adapter
, &adapter
->rx_eq
, be_msix_rx
, "rx");
1894 be_free_irq(adapter
, &adapter
->tx_eq
);
1896 dev_warn(&adapter
->pdev
->dev
,
1897 "MSIX Request IRQ failed - err %d\n", status
);
1898 pci_disable_msix(adapter
->pdev
);
1899 adapter
->msix_enabled
= false;
1903 static int be_irq_register(struct be_adapter
*adapter
)
1905 struct net_device
*netdev
= adapter
->netdev
;
1908 if (adapter
->msix_enabled
) {
1909 status
= be_msix_register(adapter
);
1912 /* INTx is not supported for VF */
1913 if (!be_physfn(adapter
))
1918 netdev
->irq
= adapter
->pdev
->irq
;
1919 status
= request_irq(netdev
->irq
, be_intx
, IRQF_SHARED
, netdev
->name
,
1922 dev_err(&adapter
->pdev
->dev
,
1923 "INTx request IRQ failed - err %d\n", status
);
1927 adapter
->isr_registered
= true;
1931 static void be_irq_unregister(struct be_adapter
*adapter
)
1933 struct net_device
*netdev
= adapter
->netdev
;
1935 if (!adapter
->isr_registered
)
1939 if (!adapter
->msix_enabled
) {
1940 free_irq(netdev
->irq
, adapter
);
1945 be_free_irq(adapter
, &adapter
->tx_eq
);
1946 be_free_irq(adapter
, &adapter
->rx_eq
);
1948 adapter
->isr_registered
= false;
1951 static int be_close(struct net_device
*netdev
)
1953 struct be_adapter
*adapter
= netdev_priv(netdev
);
1954 struct be_eq_obj
*rx_eq
= &adapter
->rx_eq
;
1955 struct be_eq_obj
*tx_eq
= &adapter
->tx_eq
;
1958 cancel_delayed_work_sync(&adapter
->work
);
1960 be_async_mcc_disable(adapter
);
1962 netif_stop_queue(netdev
);
1963 netif_carrier_off(netdev
);
1964 adapter
->link_up
= false;
1966 be_intr_set(adapter
, false);
1968 if (adapter
->msix_enabled
) {
1969 vec
= be_msix_vec_get(adapter
, tx_eq
->q
.id
);
1970 synchronize_irq(vec
);
1971 vec
= be_msix_vec_get(adapter
, rx_eq
->q
.id
);
1972 synchronize_irq(vec
);
1974 synchronize_irq(netdev
->irq
);
1976 be_irq_unregister(adapter
);
1978 napi_disable(&rx_eq
->napi
);
1979 napi_disable(&tx_eq
->napi
);
1981 /* Wait for all pending tx completions to arrive so that
1982 * all tx skbs are freed.
1984 be_tx_compl_clean(adapter
);
1989 static int be_open(struct net_device
*netdev
)
1991 struct be_adapter
*adapter
= netdev_priv(netdev
);
1992 struct be_eq_obj
*rx_eq
= &adapter
->rx_eq
;
1993 struct be_eq_obj
*tx_eq
= &adapter
->tx_eq
;
1999 /* First time posting */
2000 be_post_rx_frags(adapter
);
2002 napi_enable(&rx_eq
->napi
);
2003 napi_enable(&tx_eq
->napi
);
2005 be_irq_register(adapter
);
2007 be_intr_set(adapter
, true);
2009 /* The evt queues are created in unarmed state; arm them */
2010 be_eq_notify(adapter
, rx_eq
->q
.id
, true, false, 0);
2011 be_eq_notify(adapter
, tx_eq
->q
.id
, true, false, 0);
2013 /* Rx compl queue may be in unarmed state; rearm it */
2014 be_cq_notify(adapter
, adapter
->rx_obj
.cq
.id
, true, 0);
2016 /* Now that interrupts are on we can process async mcc */
2017 be_async_mcc_enable(adapter
);
2019 schedule_delayed_work(&adapter
->work
, msecs_to_jiffies(100));
2021 status
= be_cmd_link_status_query(adapter
, &link_up
, &mac_speed
,
2025 be_link_status_update(adapter
, link_up
);
2027 if (be_physfn(adapter
)) {
2028 status
= be_vid_config(adapter
, false, 0);
2032 status
= be_cmd_set_flow_control(adapter
,
2033 adapter
->tx_fc
, adapter
->rx_fc
);
2040 be_close(adapter
->netdev
);
2044 static int be_setup_wol(struct be_adapter
*adapter
, bool enable
)
2046 struct be_dma_mem cmd
;
2050 memset(mac
, 0, ETH_ALEN
);
2052 cmd
.size
= sizeof(struct be_cmd_req_acpi_wol_magic_config
);
2053 cmd
.va
= pci_alloc_consistent(adapter
->pdev
, cmd
.size
, &cmd
.dma
);
2056 memset(cmd
.va
, 0, cmd
.size
);
2059 status
= pci_write_config_dword(adapter
->pdev
,
2060 PCICFG_PM_CONTROL_OFFSET
, PCICFG_PM_CONTROL_MASK
);
2062 dev_err(&adapter
->pdev
->dev
,
2063 "Could not enable Wake-on-lan\n");
2064 pci_free_consistent(adapter
->pdev
, cmd
.size
, cmd
.va
,
2068 status
= be_cmd_enable_magic_wol(adapter
,
2069 adapter
->netdev
->dev_addr
, &cmd
);
2070 pci_enable_wake(adapter
->pdev
, PCI_D3hot
, 1);
2071 pci_enable_wake(adapter
->pdev
, PCI_D3cold
, 1);
2073 status
= be_cmd_enable_magic_wol(adapter
, mac
, &cmd
);
2074 pci_enable_wake(adapter
->pdev
, PCI_D3hot
, 0);
2075 pci_enable_wake(adapter
->pdev
, PCI_D3cold
, 0);
2078 pci_free_consistent(adapter
->pdev
, cmd
.size
, cmd
.va
, cmd
.dma
);
2083 * Generate a seed MAC address from the PF MAC Address using jhash.
2084 * MAC Address for VFs are assigned incrementally starting from the seed.
2085 * These addresses are programmed in the ASIC by the PF and the VF driver
2086 * queries for the MAC address during its probe.
2088 static inline int be_vf_eth_addr_config(struct be_adapter
*adapter
)
2094 be_vf_eth_addr_generate(adapter
, mac
);
2096 for (vf
= 0; vf
< num_vfs
; vf
++) {
2097 status
= be_cmd_pmac_add(adapter
, mac
,
2098 adapter
->vf_cfg
[vf
].vf_if_handle
,
2099 &adapter
->vf_cfg
[vf
].vf_pmac_id
);
2101 dev_err(&adapter
->pdev
->dev
,
2102 "Mac address add failed for VF %d\n", vf
);
2104 memcpy(adapter
->vf_cfg
[vf
].vf_mac_addr
, mac
, ETH_ALEN
);
2111 static inline void be_vf_eth_addr_rem(struct be_adapter
*adapter
)
2115 for (vf
= 0; vf
< num_vfs
; vf
++) {
2116 if (adapter
->vf_cfg
[vf
].vf_pmac_id
!= BE_INVALID_PMAC_ID
)
2117 be_cmd_pmac_del(adapter
,
2118 adapter
->vf_cfg
[vf
].vf_if_handle
,
2119 adapter
->vf_cfg
[vf
].vf_pmac_id
);
2123 static int be_setup(struct be_adapter
*adapter
)
2125 struct net_device
*netdev
= adapter
->netdev
;
2126 u32 cap_flags
, en_flags
, vf
= 0;
2130 cap_flags
= en_flags
= BE_IF_FLAGS_UNTAGGED
| BE_IF_FLAGS_BROADCAST
;
2132 if (be_physfn(adapter
)) {
2133 cap_flags
|= BE_IF_FLAGS_MCAST_PROMISCUOUS
|
2134 BE_IF_FLAGS_PROMISCUOUS
|
2135 BE_IF_FLAGS_PASS_L3L4_ERRORS
;
2136 en_flags
|= BE_IF_FLAGS_PASS_L3L4_ERRORS
;
2139 status
= be_cmd_if_create(adapter
, cap_flags
, en_flags
,
2140 netdev
->dev_addr
, false/* pmac_invalid */,
2141 &adapter
->if_handle
, &adapter
->pmac_id
, 0);
2145 if (be_physfn(adapter
)) {
2146 while (vf
< num_vfs
) {
2147 cap_flags
= en_flags
= BE_IF_FLAGS_UNTAGGED
2148 | BE_IF_FLAGS_BROADCAST
;
2149 status
= be_cmd_if_create(adapter
, cap_flags
, en_flags
,
2151 &adapter
->vf_cfg
[vf
].vf_if_handle
,
2154 dev_err(&adapter
->pdev
->dev
,
2155 "Interface Create failed for VF %d\n", vf
);
2158 adapter
->vf_cfg
[vf
].vf_pmac_id
= BE_INVALID_PMAC_ID
;
2161 } else if (!be_physfn(adapter
)) {
2162 status
= be_cmd_mac_addr_query(adapter
, mac
,
2163 MAC_ADDRESS_TYPE_NETWORK
, false, adapter
->if_handle
);
2165 memcpy(adapter
->netdev
->dev_addr
, mac
, ETH_ALEN
);
2166 memcpy(adapter
->netdev
->perm_addr
, mac
, ETH_ALEN
);
2170 status
= be_tx_queues_create(adapter
);
2174 status
= be_rx_queues_create(adapter
);
2178 status
= be_mcc_queues_create(adapter
);
2182 if (be_physfn(adapter
)) {
2183 status
= be_vf_eth_addr_config(adapter
);
2188 adapter
->link_speed
= -1;
2193 if (be_physfn(adapter
))
2194 be_vf_eth_addr_rem(adapter
);
2195 be_mcc_queues_destroy(adapter
);
2197 be_rx_queues_destroy(adapter
);
2199 be_tx_queues_destroy(adapter
);
2201 for (vf
= 0; vf
< num_vfs
; vf
++)
2202 if (adapter
->vf_cfg
[vf
].vf_if_handle
)
2203 be_cmd_if_destroy(adapter
,
2204 adapter
->vf_cfg
[vf
].vf_if_handle
);
2205 be_cmd_if_destroy(adapter
, adapter
->if_handle
);
2210 static int be_clear(struct be_adapter
*adapter
)
2212 if (be_physfn(adapter
))
2213 be_vf_eth_addr_rem(adapter
);
2215 be_mcc_queues_destroy(adapter
);
2216 be_rx_queues_destroy(adapter
);
2217 be_tx_queues_destroy(adapter
);
2219 be_cmd_if_destroy(adapter
, adapter
->if_handle
);
2221 /* tell fw we're done with firing cmds */
2222 be_cmd_fw_clean(adapter
);
2227 #define FW_FILE_HDR_SIGN "ServerEngines Corp. "
2228 char flash_cookie
[2][16] = {"*** SE FLAS",
2229 "H DIRECTORY *** "};
2231 static bool be_flash_redboot(struct be_adapter
*adapter
,
2232 const u8
*p
, u32 img_start
, int image_size
,
2239 crc_offset
= hdr_size
+ img_start
+ image_size
- 4;
2243 status
= be_cmd_get_flash_crc(adapter
, flashed_crc
,
2246 dev_err(&adapter
->pdev
->dev
,
2247 "could not get crc from flash, not flashing redboot\n");
2251 /*update redboot only if crc does not match*/
2252 if (!memcmp(flashed_crc
, p
, 4))
2258 static int be_flash_data(struct be_adapter
*adapter
,
2259 const struct firmware
*fw
,
2260 struct be_dma_mem
*flash_cmd
, int num_of_images
)
2263 int status
= 0, i
, filehdr_size
= 0;
2264 u32 total_bytes
= 0, flash_op
;
2266 const u8
*p
= fw
->data
;
2267 struct be_cmd_write_flashrom
*req
= flash_cmd
->va
;
2268 struct flash_comp
*pflashcomp
;
2271 struct flash_comp gen3_flash_types
[9] = {
2272 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3
, IMG_TYPE_ISCSI_ACTIVE
,
2273 FLASH_IMAGE_MAX_SIZE_g3
},
2274 { FLASH_REDBOOT_START_g3
, IMG_TYPE_REDBOOT
,
2275 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3
},
2276 { FLASH_iSCSI_BIOS_START_g3
, IMG_TYPE_BIOS
,
2277 FLASH_BIOS_IMAGE_MAX_SIZE_g3
},
2278 { FLASH_PXE_BIOS_START_g3
, IMG_TYPE_PXE_BIOS
,
2279 FLASH_BIOS_IMAGE_MAX_SIZE_g3
},
2280 { FLASH_FCoE_BIOS_START_g3
, IMG_TYPE_FCOE_BIOS
,
2281 FLASH_BIOS_IMAGE_MAX_SIZE_g3
},
2282 { FLASH_iSCSI_BACKUP_IMAGE_START_g3
, IMG_TYPE_ISCSI_BACKUP
,
2283 FLASH_IMAGE_MAX_SIZE_g3
},
2284 { FLASH_FCoE_PRIMARY_IMAGE_START_g3
, IMG_TYPE_FCOE_FW_ACTIVE
,
2285 FLASH_IMAGE_MAX_SIZE_g3
},
2286 { FLASH_FCoE_BACKUP_IMAGE_START_g3
, IMG_TYPE_FCOE_FW_BACKUP
,
2287 FLASH_IMAGE_MAX_SIZE_g3
},
2288 { FLASH_NCSI_START_g3
, IMG_TYPE_NCSI_FW
,
2289 FLASH_NCSI_IMAGE_MAX_SIZE_g3
}
2291 struct flash_comp gen2_flash_types
[8] = {
2292 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2
, IMG_TYPE_ISCSI_ACTIVE
,
2293 FLASH_IMAGE_MAX_SIZE_g2
},
2294 { FLASH_REDBOOT_START_g2
, IMG_TYPE_REDBOOT
,
2295 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2
},
2296 { FLASH_iSCSI_BIOS_START_g2
, IMG_TYPE_BIOS
,
2297 FLASH_BIOS_IMAGE_MAX_SIZE_g2
},
2298 { FLASH_PXE_BIOS_START_g2
, IMG_TYPE_PXE_BIOS
,
2299 FLASH_BIOS_IMAGE_MAX_SIZE_g2
},
2300 { FLASH_FCoE_BIOS_START_g2
, IMG_TYPE_FCOE_BIOS
,
2301 FLASH_BIOS_IMAGE_MAX_SIZE_g2
},
2302 { FLASH_iSCSI_BACKUP_IMAGE_START_g2
, IMG_TYPE_ISCSI_BACKUP
,
2303 FLASH_IMAGE_MAX_SIZE_g2
},
2304 { FLASH_FCoE_PRIMARY_IMAGE_START_g2
, IMG_TYPE_FCOE_FW_ACTIVE
,
2305 FLASH_IMAGE_MAX_SIZE_g2
},
2306 { FLASH_FCoE_BACKUP_IMAGE_START_g2
, IMG_TYPE_FCOE_FW_BACKUP
,
2307 FLASH_IMAGE_MAX_SIZE_g2
}
2310 if (adapter
->generation
== BE_GEN3
) {
2311 pflashcomp
= gen3_flash_types
;
2312 filehdr_size
= sizeof(struct flash_file_hdr_g3
);
2315 pflashcomp
= gen2_flash_types
;
2316 filehdr_size
= sizeof(struct flash_file_hdr_g2
);
2319 for (i
= 0; i
< num_comp
; i
++) {
2320 if ((pflashcomp
[i
].optype
== IMG_TYPE_NCSI_FW
) &&
2321 memcmp(adapter
->fw_ver
, "3.102.148.0", 11) < 0)
2323 if ((pflashcomp
[i
].optype
== IMG_TYPE_REDBOOT
) &&
2324 (!be_flash_redboot(adapter
, fw
->data
,
2325 pflashcomp
[i
].offset
, pflashcomp
[i
].size
,
2329 p
+= filehdr_size
+ pflashcomp
[i
].offset
2330 + (num_of_images
* sizeof(struct image_hdr
));
2331 if (p
+ pflashcomp
[i
].size
> fw
->data
+ fw
->size
)
2333 total_bytes
= pflashcomp
[i
].size
;
2334 while (total_bytes
) {
2335 if (total_bytes
> 32*1024)
2336 num_bytes
= 32*1024;
2338 num_bytes
= total_bytes
;
2339 total_bytes
-= num_bytes
;
2342 flash_op
= FLASHROM_OPER_FLASH
;
2344 flash_op
= FLASHROM_OPER_SAVE
;
2345 memcpy(req
->params
.data_buf
, p
, num_bytes
);
2347 status
= be_cmd_write_flashrom(adapter
, flash_cmd
,
2348 pflashcomp
[i
].optype
, flash_op
, num_bytes
);
2350 dev_err(&adapter
->pdev
->dev
,
2351 "cmd to write to flash rom failed.\n");
2360 static int get_ufigen_type(struct flash_file_hdr_g2
*fhdr
)
2364 if (fhdr
->build
[0] == '3')
2366 else if (fhdr
->build
[0] == '2')
2372 int be_load_fw(struct be_adapter
*adapter
, u8
*func
)
2374 char fw_file
[ETHTOOL_FLASH_MAX_FILENAME
];
2375 const struct firmware
*fw
;
2376 struct flash_file_hdr_g2
*fhdr
;
2377 struct flash_file_hdr_g3
*fhdr3
;
2378 struct image_hdr
*img_hdr_ptr
= NULL
;
2379 struct be_dma_mem flash_cmd
;
2380 int status
, i
= 0, num_imgs
= 0;
2383 strcpy(fw_file
, func
);
2385 status
= request_firmware(&fw
, fw_file
, &adapter
->pdev
->dev
);
2390 fhdr
= (struct flash_file_hdr_g2
*) p
;
2391 dev_info(&adapter
->pdev
->dev
, "Flashing firmware file %s\n", fw_file
);
2393 flash_cmd
.size
= sizeof(struct be_cmd_write_flashrom
) + 32*1024;
2394 flash_cmd
.va
= pci_alloc_consistent(adapter
->pdev
, flash_cmd
.size
,
2396 if (!flash_cmd
.va
) {
2398 dev_err(&adapter
->pdev
->dev
,
2399 "Memory allocation failure while flashing\n");
2403 if ((adapter
->generation
== BE_GEN3
) &&
2404 (get_ufigen_type(fhdr
) == BE_GEN3
)) {
2405 fhdr3
= (struct flash_file_hdr_g3
*) fw
->data
;
2406 num_imgs
= le32_to_cpu(fhdr3
->num_imgs
);
2407 for (i
= 0; i
< num_imgs
; i
++) {
2408 img_hdr_ptr
= (struct image_hdr
*) (fw
->data
+
2409 (sizeof(struct flash_file_hdr_g3
) +
2410 i
* sizeof(struct image_hdr
)));
2411 if (le32_to_cpu(img_hdr_ptr
->imageid
) == 1)
2412 status
= be_flash_data(adapter
, fw
, &flash_cmd
,
2415 } else if ((adapter
->generation
== BE_GEN2
) &&
2416 (get_ufigen_type(fhdr
) == BE_GEN2
)) {
2417 status
= be_flash_data(adapter
, fw
, &flash_cmd
, 0);
2419 dev_err(&adapter
->pdev
->dev
,
2420 "UFI and Interface are not compatible for flashing\n");
2424 pci_free_consistent(adapter
->pdev
, flash_cmd
.size
, flash_cmd
.va
,
2427 dev_err(&adapter
->pdev
->dev
, "Firmware load error\n");
2431 dev_info(&adapter
->pdev
->dev
, "Firmware flashed successfully\n");
2434 release_firmware(fw
);
2438 static struct net_device_ops be_netdev_ops
= {
2439 .ndo_open
= be_open
,
2440 .ndo_stop
= be_close
,
2441 .ndo_start_xmit
= be_xmit
,
2442 .ndo_set_rx_mode
= be_set_multicast_list
,
2443 .ndo_set_mac_address
= be_mac_addr_set
,
2444 .ndo_change_mtu
= be_change_mtu
,
2445 .ndo_validate_addr
= eth_validate_addr
,
2446 .ndo_vlan_rx_register
= be_vlan_register
,
2447 .ndo_vlan_rx_add_vid
= be_vlan_add_vid
,
2448 .ndo_vlan_rx_kill_vid
= be_vlan_rem_vid
,
2449 .ndo_set_vf_mac
= be_set_vf_mac
,
2450 .ndo_set_vf_vlan
= be_set_vf_vlan
,
2451 .ndo_set_vf_tx_rate
= be_set_vf_tx_rate
,
2452 .ndo_get_vf_config
= be_get_vf_config
2455 static void be_netdev_init(struct net_device
*netdev
)
2457 struct be_adapter
*adapter
= netdev_priv(netdev
);
2459 netdev
->features
|= NETIF_F_SG
| NETIF_F_HW_VLAN_RX
| NETIF_F_TSO
|
2460 NETIF_F_HW_VLAN_TX
| NETIF_F_HW_VLAN_FILTER
| NETIF_F_HW_CSUM
|
2461 NETIF_F_GRO
| NETIF_F_TSO6
;
2463 netdev
->vlan_features
|= NETIF_F_SG
| NETIF_F_TSO
| NETIF_F_HW_CSUM
;
2465 netdev
->flags
|= IFF_MULTICAST
;
2467 adapter
->rx_csum
= true;
2469 /* Default settings for Rx and Tx flow control */
2470 adapter
->rx_fc
= true;
2471 adapter
->tx_fc
= true;
2473 netif_set_gso_max_size(netdev
, 65535);
2475 BE_SET_NETDEV_OPS(netdev
, &be_netdev_ops
);
2477 SET_ETHTOOL_OPS(netdev
, &be_ethtool_ops
);
2479 netif_napi_add(netdev
, &adapter
->rx_eq
.napi
, be_poll_rx
,
2481 netif_napi_add(netdev
, &adapter
->tx_eq
.napi
, be_poll_tx_mcc
,
2484 netif_carrier_off(netdev
);
2485 netif_stop_queue(netdev
);
2488 static void be_unmap_pci_bars(struct be_adapter
*adapter
)
2491 iounmap(adapter
->csr
);
2493 iounmap(adapter
->db
);
2494 if (adapter
->pcicfg
&& be_physfn(adapter
))
2495 iounmap(adapter
->pcicfg
);
2498 static int be_map_pci_bars(struct be_adapter
*adapter
)
2501 int pcicfg_reg
, db_reg
;
2503 if (be_physfn(adapter
)) {
2504 addr
= ioremap_nocache(pci_resource_start(adapter
->pdev
, 2),
2505 pci_resource_len(adapter
->pdev
, 2));
2508 adapter
->csr
= addr
;
2511 if (adapter
->generation
== BE_GEN2
) {
2516 if (be_physfn(adapter
))
2521 addr
= ioremap_nocache(pci_resource_start(adapter
->pdev
, db_reg
),
2522 pci_resource_len(adapter
->pdev
, db_reg
));
2527 if (be_physfn(adapter
)) {
2528 addr
= ioremap_nocache(
2529 pci_resource_start(adapter
->pdev
, pcicfg_reg
),
2530 pci_resource_len(adapter
->pdev
, pcicfg_reg
));
2533 adapter
->pcicfg
= addr
;
2535 adapter
->pcicfg
= adapter
->db
+ SRIOV_VF_PCICFG_OFFSET
;
2539 be_unmap_pci_bars(adapter
);
2544 static void be_ctrl_cleanup(struct be_adapter
*adapter
)
2546 struct be_dma_mem
*mem
= &adapter
->mbox_mem_alloced
;
2548 be_unmap_pci_bars(adapter
);
2551 pci_free_consistent(adapter
->pdev
, mem
->size
,
2554 mem
= &adapter
->mc_cmd_mem
;
2556 pci_free_consistent(adapter
->pdev
, mem
->size
,
2560 static int be_ctrl_init(struct be_adapter
*adapter
)
2562 struct be_dma_mem
*mbox_mem_alloc
= &adapter
->mbox_mem_alloced
;
2563 struct be_dma_mem
*mbox_mem_align
= &adapter
->mbox_mem
;
2564 struct be_dma_mem
*mc_cmd_mem
= &adapter
->mc_cmd_mem
;
2567 status
= be_map_pci_bars(adapter
);
2571 mbox_mem_alloc
->size
= sizeof(struct be_mcc_mailbox
) + 16;
2572 mbox_mem_alloc
->va
= pci_alloc_consistent(adapter
->pdev
,
2573 mbox_mem_alloc
->size
, &mbox_mem_alloc
->dma
);
2574 if (!mbox_mem_alloc
->va
) {
2576 goto unmap_pci_bars
;
2579 mbox_mem_align
->size
= sizeof(struct be_mcc_mailbox
);
2580 mbox_mem_align
->va
= PTR_ALIGN(mbox_mem_alloc
->va
, 16);
2581 mbox_mem_align
->dma
= PTR_ALIGN(mbox_mem_alloc
->dma
, 16);
2582 memset(mbox_mem_align
->va
, 0, sizeof(struct be_mcc_mailbox
));
2584 mc_cmd_mem
->size
= sizeof(struct be_cmd_req_mcast_mac_config
);
2585 mc_cmd_mem
->va
= pci_alloc_consistent(adapter
->pdev
, mc_cmd_mem
->size
,
2587 if (mc_cmd_mem
->va
== NULL
) {
2591 memset(mc_cmd_mem
->va
, 0, mc_cmd_mem
->size
);
2593 spin_lock_init(&adapter
->mbox_lock
);
2594 spin_lock_init(&adapter
->mcc_lock
);
2595 spin_lock_init(&adapter
->mcc_cq_lock
);
2597 init_completion(&adapter
->flash_compl
);
2598 pci_save_state(adapter
->pdev
);
2602 pci_free_consistent(adapter
->pdev
, mbox_mem_alloc
->size
,
2603 mbox_mem_alloc
->va
, mbox_mem_alloc
->dma
);
2606 be_unmap_pci_bars(adapter
);
2612 static void be_stats_cleanup(struct be_adapter
*adapter
)
2614 struct be_stats_obj
*stats
= &adapter
->stats
;
2615 struct be_dma_mem
*cmd
= &stats
->cmd
;
2618 pci_free_consistent(adapter
->pdev
, cmd
->size
,
2622 static int be_stats_init(struct be_adapter
*adapter
)
2624 struct be_stats_obj
*stats
= &adapter
->stats
;
2625 struct be_dma_mem
*cmd
= &stats
->cmd
;
2627 cmd
->size
= sizeof(struct be_cmd_req_get_stats
);
2628 cmd
->va
= pci_alloc_consistent(adapter
->pdev
, cmd
->size
, &cmd
->dma
);
2629 if (cmd
->va
== NULL
)
2631 memset(cmd
->va
, 0, cmd
->size
);
2635 static void __devexit
be_remove(struct pci_dev
*pdev
)
2637 struct be_adapter
*adapter
= pci_get_drvdata(pdev
);
2642 unregister_netdev(adapter
->netdev
);
2646 be_stats_cleanup(adapter
);
2648 be_ctrl_cleanup(adapter
);
2650 be_sriov_disable(adapter
);
2652 be_msix_disable(adapter
);
2654 pci_set_drvdata(pdev
, NULL
);
2655 pci_release_regions(pdev
);
2656 pci_disable_device(pdev
);
2658 free_netdev(adapter
->netdev
);
2661 static int be_get_config(struct be_adapter
*adapter
)
2666 status
= be_cmd_get_fw_ver(adapter
, adapter
->fw_ver
);
2670 status
= be_cmd_query_fw_cfg(adapter
,
2671 &adapter
->port_num
, &adapter
->function_mode
);
2675 memset(mac
, 0, ETH_ALEN
);
2677 if (be_physfn(adapter
)) {
2678 status
= be_cmd_mac_addr_query(adapter
, mac
,
2679 MAC_ADDRESS_TYPE_NETWORK
, true /*permanent */, 0);
2684 if (!is_valid_ether_addr(mac
))
2685 return -EADDRNOTAVAIL
;
2687 memcpy(adapter
->netdev
->dev_addr
, mac
, ETH_ALEN
);
2688 memcpy(adapter
->netdev
->perm_addr
, mac
, ETH_ALEN
);
2691 if (adapter
->function_mode
& 0x400)
2692 adapter
->max_vlans
= BE_NUM_VLANS_SUPPORTED
/4;
2694 adapter
->max_vlans
= BE_NUM_VLANS_SUPPORTED
;
2699 static int __devinit
be_probe(struct pci_dev
*pdev
,
2700 const struct pci_device_id
*pdev_id
)
2703 struct be_adapter
*adapter
;
2704 struct net_device
*netdev
;
2707 status
= pci_enable_device(pdev
);
2711 status
= pci_request_regions(pdev
, DRV_NAME
);
2714 pci_set_master(pdev
);
2716 netdev
= alloc_etherdev(sizeof(struct be_adapter
));
2717 if (netdev
== NULL
) {
2721 adapter
= netdev_priv(netdev
);
2723 switch (pdev
->device
) {
2726 adapter
->generation
= BE_GEN2
;
2730 adapter
->generation
= BE_GEN3
;
2733 adapter
->generation
= 0;
2736 adapter
->pdev
= pdev
;
2737 pci_set_drvdata(pdev
, adapter
);
2738 adapter
->netdev
= netdev
;
2739 be_netdev_init(netdev
);
2740 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
2742 be_msix_enable(adapter
);
2744 status
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(64));
2746 netdev
->features
|= NETIF_F_HIGHDMA
;
2748 status
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
2750 dev_err(&pdev
->dev
, "Could not set PCI DMA Mask\n");
2755 be_sriov_enable(adapter
);
2757 status
= be_ctrl_init(adapter
);
2761 /* sync up with fw's ready state */
2762 if (be_physfn(adapter
)) {
2763 status
= be_cmd_POST(adapter
);
2768 /* tell fw we're ready to fire cmds */
2769 status
= be_cmd_fw_init(adapter
);
2773 if (be_physfn(adapter
)) {
2774 status
= be_cmd_reset_function(adapter
);
2779 status
= be_stats_init(adapter
);
2783 status
= be_get_config(adapter
);
2787 INIT_DELAYED_WORK(&adapter
->work
, be_worker
);
2789 status
= be_setup(adapter
);
2793 status
= register_netdev(netdev
);
2797 dev_info(&pdev
->dev
, "%s port %d\n", nic_name(pdev
), adapter
->port_num
);
2803 be_stats_cleanup(adapter
);
2805 be_ctrl_cleanup(adapter
);
2807 be_msix_disable(adapter
);
2808 be_sriov_disable(adapter
);
2809 free_netdev(adapter
->netdev
);
2810 pci_set_drvdata(pdev
, NULL
);
2812 pci_release_regions(pdev
);
2814 pci_disable_device(pdev
);
2816 dev_err(&pdev
->dev
, "%s initialization failed\n", nic_name(pdev
));
2820 static int be_suspend(struct pci_dev
*pdev
, pm_message_t state
)
2822 struct be_adapter
*adapter
= pci_get_drvdata(pdev
);
2823 struct net_device
*netdev
= adapter
->netdev
;
2826 be_setup_wol(adapter
, true);
2828 netif_device_detach(netdev
);
2829 if (netif_running(netdev
)) {
2834 be_cmd_get_flow_control(adapter
, &adapter
->tx_fc
, &adapter
->rx_fc
);
2837 pci_save_state(pdev
);
2838 pci_disable_device(pdev
);
2839 pci_set_power_state(pdev
, pci_choose_state(pdev
, state
));
2843 static int be_resume(struct pci_dev
*pdev
)
2846 struct be_adapter
*adapter
= pci_get_drvdata(pdev
);
2847 struct net_device
*netdev
= adapter
->netdev
;
2849 netif_device_detach(netdev
);
2851 status
= pci_enable_device(pdev
);
2855 pci_set_power_state(pdev
, 0);
2856 pci_restore_state(pdev
);
2858 /* tell fw we're ready to fire cmds */
2859 status
= be_cmd_fw_init(adapter
);
2864 if (netif_running(netdev
)) {
2869 netif_device_attach(netdev
);
2872 be_setup_wol(adapter
, false);
2877 * An FLR will stop BE from DMAing any data.
2879 static void be_shutdown(struct pci_dev
*pdev
)
2881 struct be_adapter
*adapter
= pci_get_drvdata(pdev
);
2882 struct net_device
*netdev
= adapter
->netdev
;
2884 netif_device_detach(netdev
);
2886 be_cmd_reset_function(adapter
);
2889 be_setup_wol(adapter
, true);
2891 pci_disable_device(pdev
);
2894 static pci_ers_result_t
be_eeh_err_detected(struct pci_dev
*pdev
,
2895 pci_channel_state_t state
)
2897 struct be_adapter
*adapter
= pci_get_drvdata(pdev
);
2898 struct net_device
*netdev
= adapter
->netdev
;
2900 dev_err(&adapter
->pdev
->dev
, "EEH error detected\n");
2902 adapter
->eeh_err
= true;
2904 netif_device_detach(netdev
);
2906 if (netif_running(netdev
)) {
2913 if (state
== pci_channel_io_perm_failure
)
2914 return PCI_ERS_RESULT_DISCONNECT
;
2916 pci_disable_device(pdev
);
2918 return PCI_ERS_RESULT_NEED_RESET
;
2921 static pci_ers_result_t
be_eeh_reset(struct pci_dev
*pdev
)
2923 struct be_adapter
*adapter
= pci_get_drvdata(pdev
);
2926 dev_info(&adapter
->pdev
->dev
, "EEH reset\n");
2927 adapter
->eeh_err
= false;
2929 status
= pci_enable_device(pdev
);
2931 return PCI_ERS_RESULT_DISCONNECT
;
2933 pci_set_master(pdev
);
2934 pci_set_power_state(pdev
, 0);
2935 pci_restore_state(pdev
);
2937 /* Check if card is ok and fw is ready */
2938 status
= be_cmd_POST(adapter
);
2940 return PCI_ERS_RESULT_DISCONNECT
;
2942 return PCI_ERS_RESULT_RECOVERED
;
2945 static void be_eeh_resume(struct pci_dev
*pdev
)
2948 struct be_adapter
*adapter
= pci_get_drvdata(pdev
);
2949 struct net_device
*netdev
= adapter
->netdev
;
2951 dev_info(&adapter
->pdev
->dev
, "EEH resume\n");
2953 pci_save_state(pdev
);
2955 /* tell fw we're ready to fire cmds */
2956 status
= be_cmd_fw_init(adapter
);
2960 status
= be_setup(adapter
);
2964 if (netif_running(netdev
)) {
2965 status
= be_open(netdev
);
2969 netif_device_attach(netdev
);
2972 dev_err(&adapter
->pdev
->dev
, "EEH resume failed\n");
2975 static struct pci_error_handlers be_eeh_handlers
= {
2976 .error_detected
= be_eeh_err_detected
,
2977 .slot_reset
= be_eeh_reset
,
2978 .resume
= be_eeh_resume
,
2981 static struct pci_driver be_driver
= {
2983 .id_table
= be_dev_ids
,
2985 .remove
= be_remove
,
2986 .suspend
= be_suspend
,
2987 .resume
= be_resume
,
2988 .shutdown
= be_shutdown
,
2989 .err_handler
= &be_eeh_handlers
2992 static int __init
be_init_module(void)
2994 if (rx_frag_size
!= 8192 && rx_frag_size
!= 4096 &&
2995 rx_frag_size
!= 2048) {
2996 printk(KERN_WARNING DRV_NAME
2997 " : Module param rx_frag_size must be 2048/4096/8192."
2999 rx_frag_size
= 2048;
3003 printk(KERN_WARNING DRV_NAME
3004 " : Module param num_vfs must not be greater than 32."
3009 return pci_register_driver(&be_driver
);
3011 module_init(be_init_module
);
3013 static void __exit
be_exit_module(void)
3015 pci_unregister_driver(&be_driver
);
3017 module_exit(be_exit_module
);