net/fec: make FEC driver buildable as module
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / net / ethernet / emulex / benet / be_main.c
CommitLineData
6b7c5b94 1/*
d2145cde 2 * Copyright (C) 2005 - 2011 Emulex
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
70c71606 18#include <linux/prefetch.h>
9d9779e7 19#include <linux/module.h>
6b7c5b94 20#include "be.h"
8788fdc2 21#include "be_cmds.h"
65f71b8b 22#include <asm/div64.h>
6b7c5b94
SP
23
24MODULE_VERSION(DRV_VER);
25MODULE_DEVICE_TABLE(pci, be_dev_ids);
26MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
27MODULE_AUTHOR("ServerEngines Corporation");
28MODULE_LICENSE("GPL");
29
2e588f84 30static ushort rx_frag_size = 2048;
ba343c77 31static unsigned int num_vfs;
2e588f84 32module_param(rx_frag_size, ushort, S_IRUGO);
ba343c77 33module_param(num_vfs, uint, S_IRUGO);
6b7c5b94 34MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
ba343c77 35MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 36
6b7c5b94 37static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
c4ca2374 38 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 39 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
40 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
41 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
fe6d2a38 42 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
12f4d0a8 43 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
6b7c5b94
SP
44 { 0 }
45};
46MODULE_DEVICE_TABLE(pci, be_dev_ids);
7c185276 47/* UE Status Low CSR */
42c8b11e 48static const char * const ue_status_low_desc[] = {
7c185276
AK
49 "CEV",
50 "CTX",
51 "DBUF",
52 "ERX",
53 "Host",
54 "MPU",
55 "NDMA",
56 "PTC ",
57 "RDMA ",
58 "RXF ",
59 "RXIPS ",
60 "RXULP0 ",
61 "RXULP1 ",
62 "RXULP2 ",
63 "TIM ",
64 "TPOST ",
65 "TPRE ",
66 "TXIPS ",
67 "TXULP0 ",
68 "TXULP1 ",
69 "UC ",
70 "WDMA ",
71 "TXULP2 ",
72 "HOST1 ",
73 "P0_OB_LINK ",
74 "P1_OB_LINK ",
75 "HOST_GPIO ",
76 "MBOX ",
77 "AXGMAC0",
78 "AXGMAC1",
79 "JTAG",
80 "MPU_INTPEND"
81};
82/* UE Status High CSR */
42c8b11e 83static const char * const ue_status_hi_desc[] = {
7c185276
AK
84 "LPCMEMHOST",
85 "MGMT_MAC",
86 "PCS0ONLINE",
87 "MPU_IRAM",
88 "PCS1ONLINE",
89 "PCTL0",
90 "PCTL1",
91 "PMEM",
92 "RR",
93 "TXPB",
94 "RXPP",
95 "XAUI",
96 "TXP",
97 "ARM",
98 "IPC",
99 "HOST2",
100 "HOST3",
101 "HOST4",
102 "HOST5",
103 "HOST6",
104 "HOST7",
105 "HOST8",
106 "HOST9",
42c8b11e 107 "NETC",
7c185276
AK
108 "Unknown",
109 "Unknown",
110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown"
116};
6b7c5b94 117
752961a1
SP
118/* Is BE in a multi-channel mode */
119static inline bool be_is_mc(struct be_adapter *adapter) {
120 return (adapter->function_mode & FLEX10_MODE ||
121 adapter->function_mode & VNIC_MODE ||
122 adapter->function_mode & UMC_ENABLED);
123}
124
6b7c5b94
SP
125static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
126{
127 struct be_dma_mem *mem = &q->dma_mem;
128 if (mem->va)
2b7bcebf
IV
129 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
130 mem->dma);
6b7c5b94
SP
131}
132
133static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
134 u16 len, u16 entry_size)
135{
136 struct be_dma_mem *mem = &q->dma_mem;
137
138 memset(q, 0, sizeof(*q));
139 q->len = len;
140 q->entry_size = entry_size;
141 mem->size = len * entry_size;
2b7bcebf
IV
142 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
143 GFP_KERNEL);
6b7c5b94
SP
144 if (!mem->va)
145 return -1;
146 memset(mem->va, 0, mem->size);
147 return 0;
148}
149
8788fdc2 150static void be_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 151{
db3ea781 152 u32 reg, enabled;
5f0b849e 153
cf588477
SP
154 if (adapter->eeh_err)
155 return;
156
db3ea781
SP
157 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
158 &reg);
159 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
160
5f0b849e 161 if (!enabled && enable)
6b7c5b94 162 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 163 else if (enabled && !enable)
6b7c5b94 164 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 165 else
6b7c5b94 166 return;
5f0b849e 167
db3ea781
SP
168 pci_write_config_dword(adapter->pdev,
169 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
6b7c5b94
SP
170}
171
8788fdc2 172static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
173{
174 u32 val = 0;
175 val |= qid & DB_RQ_RING_ID_MASK;
176 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
177
178 wmb();
8788fdc2 179 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
180}
181
8788fdc2 182static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
183{
184 u32 val = 0;
185 val |= qid & DB_TXULP_RING_ID_MASK;
186 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
187
188 wmb();
8788fdc2 189 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
6b7c5b94
SP
190}
191
8788fdc2 192static void be_eq_notify(struct be_adapter *adapter, u16 qid,
6b7c5b94
SP
193 bool arm, bool clear_int, u16 num_popped)
194{
195 u32 val = 0;
196 val |= qid & DB_EQ_RING_ID_MASK;
fe6d2a38
SP
197 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
198 DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477
SP
199
200 if (adapter->eeh_err)
201 return;
202
6b7c5b94
SP
203 if (arm)
204 val |= 1 << DB_EQ_REARM_SHIFT;
205 if (clear_int)
206 val |= 1 << DB_EQ_CLR_SHIFT;
207 val |= 1 << DB_EQ_EVNT_SHIFT;
208 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
8788fdc2 209 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
210}
211
8788fdc2 212void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
213{
214 u32 val = 0;
215 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
216 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
217 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477
SP
218
219 if (adapter->eeh_err)
220 return;
221
6b7c5b94
SP
222 if (arm)
223 val |= 1 << DB_CQ_REARM_SHIFT;
224 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 225 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
226}
227
6b7c5b94
SP
228static int be_mac_addr_set(struct net_device *netdev, void *p)
229{
230 struct be_adapter *adapter = netdev_priv(netdev);
231 struct sockaddr *addr = p;
232 int status = 0;
e3a7ae2c
SK
233 u8 current_mac[ETH_ALEN];
234 u32 pmac_id = adapter->pmac_id;
6b7c5b94 235
ca9e4988
AK
236 if (!is_valid_ether_addr(addr->sa_data))
237 return -EADDRNOTAVAIL;
238
e3a7ae2c 239 status = be_cmd_mac_addr_query(adapter, current_mac,
590c391d
PR
240 MAC_ADDRESS_TYPE_NETWORK, false,
241 adapter->if_handle, 0);
a65027e4 242 if (status)
e3a7ae2c 243 goto err;
6b7c5b94 244
e3a7ae2c
SK
245 if (memcmp(addr->sa_data, current_mac, ETH_ALEN)) {
246 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
f8617e08 247 adapter->if_handle, &adapter->pmac_id, 0);
e3a7ae2c
SK
248 if (status)
249 goto err;
6b7c5b94 250
e3a7ae2c
SK
251 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
252 }
253 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
254 return 0;
255err:
256 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
6b7c5b94
SP
257 return status;
258}
259
89a88ab8
AK
260static void populate_be2_stats(struct be_adapter *adapter)
261{
ac124ff9
SP
262 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
263 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
264 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
89a88ab8 265 struct be_port_rxf_stats_v0 *port_stats =
ac124ff9
SP
266 &rxf_stats->port[adapter->port_num];
267 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 268
ac124ff9 269 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
270 drvs->rx_pause_frames = port_stats->rx_pause_frames;
271 drvs->rx_crc_errors = port_stats->rx_crc_errors;
272 drvs->rx_control_frames = port_stats->rx_control_frames;
273 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
274 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
275 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
276 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
277 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
278 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
279 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
280 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
281 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
282 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
283 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
ac124ff9 284 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
89a88ab8
AK
285 drvs->rx_dropped_header_too_small =
286 port_stats->rx_dropped_header_too_small;
ac124ff9 287 drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
89a88ab8
AK
288 drvs->rx_alignment_symbol_errors =
289 port_stats->rx_alignment_symbol_errors;
290
291 drvs->tx_pauseframes = port_stats->tx_pauseframes;
292 drvs->tx_controlframes = port_stats->tx_controlframes;
293
294 if (adapter->port_num)
ac124ff9 295 drvs->jabber_events = rxf_stats->port1_jabber_events;
89a88ab8 296 else
ac124ff9 297 drvs->jabber_events = rxf_stats->port0_jabber_events;
89a88ab8
AK
298 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
299 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
300 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
301 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
302 drvs->forwarded_packets = rxf_stats->forwarded_packets;
303 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
304 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
305 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
306 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
307}
308
309static void populate_be3_stats(struct be_adapter *adapter)
310{
ac124ff9
SP
311 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
312 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
313 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
89a88ab8 314 struct be_port_rxf_stats_v1 *port_stats =
ac124ff9
SP
315 &rxf_stats->port[adapter->port_num];
316 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 317
ac124ff9 318 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
319 drvs->rx_pause_frames = port_stats->rx_pause_frames;
320 drvs->rx_crc_errors = port_stats->rx_crc_errors;
321 drvs->rx_control_frames = port_stats->rx_control_frames;
322 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
323 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
324 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
325 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
326 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
327 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
328 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
329 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
330 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
331 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
332 drvs->rx_dropped_header_too_small =
333 port_stats->rx_dropped_header_too_small;
334 drvs->rx_input_fifo_overflow_drop =
335 port_stats->rx_input_fifo_overflow_drop;
ac124ff9 336 drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
89a88ab8
AK
337 drvs->rx_alignment_symbol_errors =
338 port_stats->rx_alignment_symbol_errors;
ac124ff9 339 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
89a88ab8
AK
340 drvs->tx_pauseframes = port_stats->tx_pauseframes;
341 drvs->tx_controlframes = port_stats->tx_controlframes;
342 drvs->jabber_events = port_stats->jabber_events;
343 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
344 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
345 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
346 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
347 drvs->forwarded_packets = rxf_stats->forwarded_packets;
348 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
349 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
350 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
351 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
352}
353
005d5696
SX
354static void populate_lancer_stats(struct be_adapter *adapter)
355{
89a88ab8 356
005d5696 357 struct be_drv_stats *drvs = &adapter->drv_stats;
ac124ff9
SP
358 struct lancer_pport_stats *pport_stats =
359 pport_stats_from_cmd(adapter);
360
361 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
362 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
363 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
364 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
005d5696 365 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
ac124ff9 366 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
005d5696
SX
367 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
368 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
369 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
370 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
371 drvs->rx_dropped_tcp_length =
372 pport_stats->rx_dropped_invalid_tcp_length;
373 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
374 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
375 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
376 drvs->rx_dropped_header_too_small =
377 pport_stats->rx_dropped_header_too_small;
378 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
379 drvs->rx_address_match_errors = pport_stats->rx_address_match_errors;
ac124ff9 380 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
005d5696 381 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
ac124ff9
SP
382 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
383 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
005d5696 384 drvs->jabber_events = pport_stats->rx_jabbers;
005d5696 385 drvs->rx_drops_invalid_ring = pport_stats->rx_drops_invalid_queue;
ac124ff9
SP
386 drvs->forwarded_packets = pport_stats->num_forwards_lo;
387 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
005d5696 388 drvs->rx_drops_too_many_frags =
ac124ff9 389 pport_stats->rx_drops_too_many_frags_lo;
005d5696 390}
89a88ab8 391
09c1c68f
SP
392static void accumulate_16bit_val(u32 *acc, u16 val)
393{
394#define lo(x) (x & 0xFFFF)
395#define hi(x) (x & 0xFFFF0000)
396 bool wrapped = val < lo(*acc);
397 u32 newacc = hi(*acc) + val;
398
399 if (wrapped)
400 newacc += 65536;
401 ACCESS_ONCE(*acc) = newacc;
402}
403
89a88ab8
AK
404void be_parse_stats(struct be_adapter *adapter)
405{
ac124ff9
SP
406 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
407 struct be_rx_obj *rxo;
408 int i;
409
005d5696
SX
410 if (adapter->generation == BE_GEN3) {
411 if (lancer_chip(adapter))
412 populate_lancer_stats(adapter);
413 else
414 populate_be3_stats(adapter);
415 } else {
89a88ab8 416 populate_be2_stats(adapter);
005d5696 417 }
ac124ff9
SP
418
419 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
09c1c68f
SP
420 for_all_rx_queues(adapter, rxo, i) {
421 /* below erx HW counter can actually wrap around after
422 * 65535. Driver accumulates a 32-bit value
423 */
424 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
425 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
426 }
89a88ab8
AK
427}
428
ab1594e9
SP
429static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
430 struct rtnl_link_stats64 *stats)
6b7c5b94 431{
ab1594e9 432 struct be_adapter *adapter = netdev_priv(netdev);
89a88ab8 433 struct be_drv_stats *drvs = &adapter->drv_stats;
3abcdeda 434 struct be_rx_obj *rxo;
3c8def97 435 struct be_tx_obj *txo;
ab1594e9
SP
436 u64 pkts, bytes;
437 unsigned int start;
3abcdeda 438 int i;
6b7c5b94 439
3abcdeda 440 for_all_rx_queues(adapter, rxo, i) {
ab1594e9
SP
441 const struct be_rx_stats *rx_stats = rx_stats(rxo);
442 do {
443 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
444 pkts = rx_stats(rxo)->rx_pkts;
445 bytes = rx_stats(rxo)->rx_bytes;
446 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
447 stats->rx_packets += pkts;
448 stats->rx_bytes += bytes;
449 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
450 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
451 rx_stats(rxo)->rx_drops_no_frags;
3abcdeda
SP
452 }
453
3c8def97 454 for_all_tx_queues(adapter, txo, i) {
ab1594e9
SP
455 const struct be_tx_stats *tx_stats = tx_stats(txo);
456 do {
457 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
458 pkts = tx_stats(txo)->tx_pkts;
459 bytes = tx_stats(txo)->tx_bytes;
460 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
461 stats->tx_packets += pkts;
462 stats->tx_bytes += bytes;
3c8def97 463 }
6b7c5b94
SP
464
465 /* bad pkts received */
ab1594e9 466 stats->rx_errors = drvs->rx_crc_errors +
89a88ab8
AK
467 drvs->rx_alignment_symbol_errors +
468 drvs->rx_in_range_errors +
469 drvs->rx_out_range_errors +
470 drvs->rx_frame_too_long +
471 drvs->rx_dropped_too_small +
472 drvs->rx_dropped_too_short +
473 drvs->rx_dropped_header_too_small +
474 drvs->rx_dropped_tcp_length +
ab1594e9 475 drvs->rx_dropped_runt;
68110868 476
6b7c5b94 477 /* detailed rx errors */
ab1594e9 478 stats->rx_length_errors = drvs->rx_in_range_errors +
89a88ab8
AK
479 drvs->rx_out_range_errors +
480 drvs->rx_frame_too_long;
68110868 481
ab1594e9 482 stats->rx_crc_errors = drvs->rx_crc_errors;
6b7c5b94
SP
483
484 /* frame alignment errors */
ab1594e9 485 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
68110868 486
6b7c5b94
SP
487 /* receiver fifo overrun */
488 /* drops_no_pbuf is no per i/f, it's per BE card */
ab1594e9 489 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
89a88ab8
AK
490 drvs->rx_input_fifo_overflow_drop +
491 drvs->rx_drops_no_pbuf;
ab1594e9 492 return stats;
6b7c5b94
SP
493}
494
ea172a01 495void be_link_status_update(struct be_adapter *adapter, u32 link_status)
6b7c5b94 496{
6b7c5b94
SP
497 struct net_device *netdev = adapter->netdev;
498
ea172a01
SP
499 /* when link status changes, link speed must be re-queried from card */
500 adapter->link_speed = -1;
501 if ((link_status & LINK_STATUS_MASK) == LINK_UP) {
502 netif_carrier_on(netdev);
503 dev_info(&adapter->pdev->dev, "%s: Link up\n", netdev->name);
504 } else {
505 netif_carrier_off(netdev);
506 dev_info(&adapter->pdev->dev, "%s: Link down\n", netdev->name);
6b7c5b94 507 }
6b7c5b94
SP
508}
509
3c8def97 510static void be_tx_stats_update(struct be_tx_obj *txo,
91992e44 511 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
6b7c5b94 512{
3c8def97
SP
513 struct be_tx_stats *stats = tx_stats(txo);
514
ab1594e9 515 u64_stats_update_begin(&stats->sync);
ac124ff9
SP
516 stats->tx_reqs++;
517 stats->tx_wrbs += wrb_cnt;
518 stats->tx_bytes += copied;
519 stats->tx_pkts += (gso_segs ? gso_segs : 1);
6b7c5b94 520 if (stopped)
ac124ff9 521 stats->tx_stops++;
ab1594e9 522 u64_stats_update_end(&stats->sync);
6b7c5b94
SP
523}
524
525/* Determine number of WRB entries needed to xmit data in an skb */
fe6d2a38
SP
526static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
527 bool *dummy)
6b7c5b94 528{
ebc8d2ab
DM
529 int cnt = (skb->len > skb->data_len);
530
531 cnt += skb_shinfo(skb)->nr_frags;
532
6b7c5b94
SP
533 /* to account for hdr wrb */
534 cnt++;
fe6d2a38
SP
535 if (lancer_chip(adapter) || !(cnt & 1)) {
536 *dummy = false;
537 } else {
6b7c5b94
SP
538 /* add a dummy to make it an even num */
539 cnt++;
540 *dummy = true;
fe6d2a38 541 }
6b7c5b94
SP
542 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
543 return cnt;
544}
545
546static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
547{
548 wrb->frag_pa_hi = upper_32_bits(addr);
549 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
550 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
551}
552
cc4ce020
SK
553static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
554 struct sk_buff *skb, u32 wrb_cnt, u32 len)
6b7c5b94 555{
cc4ce020
SK
556 u8 vlan_prio = 0;
557 u16 vlan_tag = 0;
558
6b7c5b94
SP
559 memset(hdr, 0, sizeof(*hdr));
560
561 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
562
49e4b847 563 if (skb_is_gso(skb)) {
6b7c5b94
SP
564 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
565 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
566 hdr, skb_shinfo(skb)->gso_size);
fe6d2a38 567 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
49e4b847 568 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
fe6d2a38
SP
569 if (lancer_chip(adapter) && adapter->sli_family ==
570 LANCER_A0_SLI_FAMILY) {
571 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
572 if (is_tcp_pkt(skb))
573 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
574 tcpcs, hdr, 1);
575 else if (is_udp_pkt(skb))
576 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
577 udpcs, hdr, 1);
578 }
6b7c5b94
SP
579 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
580 if (is_tcp_pkt(skb))
581 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
582 else if (is_udp_pkt(skb))
583 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
584 }
585
4c5102f9 586 if (vlan_tx_tag_present(skb)) {
6b7c5b94 587 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
cc4ce020
SK
588 vlan_tag = vlan_tx_tag_get(skb);
589 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
590 /* If vlan priority provided by OS is NOT in available bmap */
591 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
592 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
593 adapter->recommended_prio;
594 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
6b7c5b94
SP
595 }
596
597 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
598 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
599 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
600 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
601}
602
2b7bcebf 603static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
7101e111
SP
604 bool unmap_single)
605{
606 dma_addr_t dma;
607
608 be_dws_le_to_cpu(wrb, sizeof(*wrb));
609
610 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
b681ee77 611 if (wrb->frag_len) {
7101e111 612 if (unmap_single)
2b7bcebf
IV
613 dma_unmap_single(dev, dma, wrb->frag_len,
614 DMA_TO_DEVICE);
7101e111 615 else
2b7bcebf 616 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
7101e111
SP
617 }
618}
6b7c5b94 619
3c8def97 620static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
6b7c5b94
SP
621 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
622{
7101e111
SP
623 dma_addr_t busaddr;
624 int i, copied = 0;
2b7bcebf 625 struct device *dev = &adapter->pdev->dev;
6b7c5b94 626 struct sk_buff *first_skb = skb;
6b7c5b94
SP
627 struct be_eth_wrb *wrb;
628 struct be_eth_hdr_wrb *hdr;
7101e111
SP
629 bool map_single = false;
630 u16 map_head;
6b7c5b94 631
6b7c5b94
SP
632 hdr = queue_head_node(txq);
633 queue_head_inc(txq);
7101e111 634 map_head = txq->head;
6b7c5b94 635
ebc8d2ab 636 if (skb->len > skb->data_len) {
e743d313 637 int len = skb_headlen(skb);
2b7bcebf
IV
638 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
639 if (dma_mapping_error(dev, busaddr))
7101e111
SP
640 goto dma_err;
641 map_single = true;
ebc8d2ab
DM
642 wrb = queue_head_node(txq);
643 wrb_fill(wrb, busaddr, len);
644 be_dws_cpu_to_le(wrb, sizeof(*wrb));
645 queue_head_inc(txq);
646 copied += len;
647 }
6b7c5b94 648
ebc8d2ab 649 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9e903e08 650 const struct skb_frag_struct *frag =
ebc8d2ab 651 &skb_shinfo(skb)->frags[i];
b061b39e 652 busaddr = skb_frag_dma_map(dev, frag, 0,
9e903e08 653 skb_frag_size(frag), DMA_TO_DEVICE);
2b7bcebf 654 if (dma_mapping_error(dev, busaddr))
7101e111 655 goto dma_err;
ebc8d2ab 656 wrb = queue_head_node(txq);
9e903e08 657 wrb_fill(wrb, busaddr, skb_frag_size(frag));
ebc8d2ab
DM
658 be_dws_cpu_to_le(wrb, sizeof(*wrb));
659 queue_head_inc(txq);
9e903e08 660 copied += skb_frag_size(frag);
6b7c5b94
SP
661 }
662
663 if (dummy_wrb) {
664 wrb = queue_head_node(txq);
665 wrb_fill(wrb, 0, 0);
666 be_dws_cpu_to_le(wrb, sizeof(*wrb));
667 queue_head_inc(txq);
668 }
669
cc4ce020 670 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
6b7c5b94
SP
671 be_dws_cpu_to_le(hdr, sizeof(*hdr));
672
673 return copied;
7101e111
SP
674dma_err:
675 txq->head = map_head;
676 while (copied) {
677 wrb = queue_head_node(txq);
2b7bcebf 678 unmap_tx_frag(dev, wrb, map_single);
7101e111
SP
679 map_single = false;
680 copied -= wrb->frag_len;
681 queue_head_inc(txq);
682 }
683 return 0;
6b7c5b94
SP
684}
685
61357325 686static netdev_tx_t be_xmit(struct sk_buff *skb,
b31c50a7 687 struct net_device *netdev)
6b7c5b94
SP
688{
689 struct be_adapter *adapter = netdev_priv(netdev);
3c8def97
SP
690 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
691 struct be_queue_info *txq = &txo->q;
6b7c5b94
SP
692 u32 wrb_cnt = 0, copied = 0;
693 u32 start = txq->head;
694 bool dummy_wrb, stopped = false;
695
fe6d2a38 696 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
6b7c5b94 697
3c8def97 698 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
c190e3c8
AK
699 if (copied) {
700 /* record the sent skb in the sent_skb table */
3c8def97
SP
701 BUG_ON(txo->sent_skb_list[start]);
702 txo->sent_skb_list[start] = skb;
c190e3c8
AK
703
704 /* Ensure txq has space for the next skb; Else stop the queue
705 * *BEFORE* ringing the tx doorbell, so that we serialze the
706 * tx compls of the current transmit which'll wake up the queue
707 */
7101e111 708 atomic_add(wrb_cnt, &txq->used);
c190e3c8
AK
709 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
710 txq->len) {
3c8def97 711 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
c190e3c8
AK
712 stopped = true;
713 }
6b7c5b94 714
c190e3c8 715 be_txq_notify(adapter, txq->id, wrb_cnt);
6b7c5b94 716
3c8def97 717 be_tx_stats_update(txo, wrb_cnt, copied,
91992e44 718 skb_shinfo(skb)->gso_segs, stopped);
c190e3c8
AK
719 } else {
720 txq->head = start;
721 dev_kfree_skb_any(skb);
6b7c5b94 722 }
6b7c5b94
SP
723 return NETDEV_TX_OK;
724}
725
726static int be_change_mtu(struct net_device *netdev, int new_mtu)
727{
728 struct be_adapter *adapter = netdev_priv(netdev);
729 if (new_mtu < BE_MIN_MTU ||
34a89b8c
AK
730 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
731 (ETH_HLEN + ETH_FCS_LEN))) {
6b7c5b94
SP
732 dev_info(&adapter->pdev->dev,
733 "MTU must be between %d and %d bytes\n",
34a89b8c
AK
734 BE_MIN_MTU,
735 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
6b7c5b94
SP
736 return -EINVAL;
737 }
738 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
739 netdev->mtu, new_mtu);
740 netdev->mtu = new_mtu;
741 return 0;
742}
743
744/*
82903e4b
AK
745 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
746 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 747 */
1da87b7f 748static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
6b7c5b94 749{
6b7c5b94
SP
750 u16 vtag[BE_NUM_VLANS_SUPPORTED];
751 u16 ntags = 0, i;
82903e4b 752 int status = 0;
1da87b7f
AK
753 u32 if_handle;
754
755 if (vf) {
756 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
757 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
758 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
759 }
6b7c5b94 760
c0e64ef4
SP
761 /* No need to further configure vids if in promiscuous mode */
762 if (adapter->promiscuous)
763 return 0;
764
82903e4b 765 if (adapter->vlans_added <= adapter->max_vlans) {
6b7c5b94 766 /* Construct VLAN Table to give to HW */
b738127d 767 for (i = 0; i < VLAN_N_VID; i++) {
6b7c5b94
SP
768 if (adapter->vlan_tag[i]) {
769 vtag[ntags] = cpu_to_le16(i);
770 ntags++;
771 }
772 }
b31c50a7
SP
773 status = be_cmd_vlan_config(adapter, adapter->if_handle,
774 vtag, ntags, 1, 0);
6b7c5b94 775 } else {
b31c50a7
SP
776 status = be_cmd_vlan_config(adapter, adapter->if_handle,
777 NULL, 0, 1, 1);
6b7c5b94 778 }
1da87b7f 779
b31c50a7 780 return status;
6b7c5b94
SP
781}
782
6b7c5b94
SP
783static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
784{
785 struct be_adapter *adapter = netdev_priv(netdev);
786
1da87b7f 787 adapter->vlans_added++;
ba343c77
SB
788 if (!be_physfn(adapter))
789 return;
790
6b7c5b94 791 adapter->vlan_tag[vid] = 1;
82903e4b 792 if (adapter->vlans_added <= (adapter->max_vlans + 1))
1da87b7f 793 be_vid_config(adapter, false, 0);
6b7c5b94
SP
794}
795
796static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
797{
798 struct be_adapter *adapter = netdev_priv(netdev);
799
1da87b7f 800 adapter->vlans_added--;
1da87b7f 801
ba343c77
SB
802 if (!be_physfn(adapter))
803 return;
804
6b7c5b94 805 adapter->vlan_tag[vid] = 0;
82903e4b 806 if (adapter->vlans_added <= adapter->max_vlans)
1da87b7f 807 be_vid_config(adapter, false, 0);
6b7c5b94
SP
808}
809
a54769f5 810static void be_set_rx_mode(struct net_device *netdev)
6b7c5b94
SP
811{
812 struct be_adapter *adapter = netdev_priv(netdev);
6b7c5b94 813
24307eef 814 if (netdev->flags & IFF_PROMISC) {
5b8821b7 815 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
24307eef
SP
816 adapter->promiscuous = true;
817 goto done;
6b7c5b94
SP
818 }
819
25985edc 820 /* BE was previously in promiscuous mode; disable it */
24307eef
SP
821 if (adapter->promiscuous) {
822 adapter->promiscuous = false;
5b8821b7 823 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
c0e64ef4
SP
824
825 if (adapter->vlans_added)
826 be_vid_config(adapter, false, 0);
6b7c5b94
SP
827 }
828
e7b909a6 829 /* Enable multicast promisc if num configured exceeds what we support */
4cd24eaf 830 if (netdev->flags & IFF_ALLMULTI ||
5b8821b7
SP
831 netdev_mc_count(netdev) > BE_MAX_MC) {
832 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
24307eef 833 goto done;
6b7c5b94 834 }
6b7c5b94 835
5b8821b7 836 be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
24307eef
SP
837done:
838 return;
6b7c5b94
SP
839}
840
ba343c77
SB
841static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
842{
843 struct be_adapter *adapter = netdev_priv(netdev);
844 int status;
845
846 if (!adapter->sriov_enabled)
847 return -EPERM;
848
849 if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
850 return -EINVAL;
851
590c391d
PR
852 if (lancer_chip(adapter)) {
853 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
854 } else {
855 status = be_cmd_pmac_del(adapter,
856 adapter->vf_cfg[vf].vf_if_handle,
30128031 857 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
ba343c77 858
590c391d
PR
859 status = be_cmd_pmac_add(adapter, mac,
860 adapter->vf_cfg[vf].vf_if_handle,
f8617e08 861 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
590c391d
PR
862 }
863
64600ea5 864 if (status)
ba343c77
SB
865 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
866 mac, vf);
64600ea5
AK
867 else
868 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
869
ba343c77
SB
870 return status;
871}
872
64600ea5
AK
873static int be_get_vf_config(struct net_device *netdev, int vf,
874 struct ifla_vf_info *vi)
875{
876 struct be_adapter *adapter = netdev_priv(netdev);
877
878 if (!adapter->sriov_enabled)
879 return -EPERM;
880
881 if (vf >= num_vfs)
882 return -EINVAL;
883
884 vi->vf = vf;
e1d18735 885 vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
1da87b7f 886 vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
64600ea5
AK
887 vi->qos = 0;
888 memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
889
890 return 0;
891}
892
1da87b7f
AK
893static int be_set_vf_vlan(struct net_device *netdev,
894 int vf, u16 vlan, u8 qos)
895{
896 struct be_adapter *adapter = netdev_priv(netdev);
897 int status = 0;
898
899 if (!adapter->sriov_enabled)
900 return -EPERM;
901
902 if ((vf >= num_vfs) || (vlan > 4095))
903 return -EINVAL;
904
905 if (vlan) {
906 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
907 adapter->vlans_added++;
908 } else {
909 adapter->vf_cfg[vf].vf_vlan_tag = 0;
910 adapter->vlans_added--;
911 }
912
913 status = be_vid_config(adapter, true, vf);
914
915 if (status)
916 dev_info(&adapter->pdev->dev,
917 "VLAN %d config on VF %d failed\n", vlan, vf);
918 return status;
919}
920
e1d18735
AK
921static int be_set_vf_tx_rate(struct net_device *netdev,
922 int vf, int rate)
923{
924 struct be_adapter *adapter = netdev_priv(netdev);
925 int status = 0;
926
927 if (!adapter->sriov_enabled)
928 return -EPERM;
929
930 if ((vf >= num_vfs) || (rate < 0))
931 return -EINVAL;
932
933 if (rate > 10000)
934 rate = 10000;
935
936 adapter->vf_cfg[vf].vf_tx_rate = rate;
856c4012 937 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
e1d18735
AK
938
939 if (status)
940 dev_info(&adapter->pdev->dev,
941 "tx rate %d on VF %d failed\n", rate, vf);
942 return status;
943}
944
ac124ff9 945static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
6b7c5b94 946{
ac124ff9
SP
947 struct be_eq_obj *rx_eq = &rxo->rx_eq;
948 struct be_rx_stats *stats = rx_stats(rxo);
4097f663 949 ulong now = jiffies;
ac124ff9 950 ulong delta = now - stats->rx_jiffies;
ab1594e9
SP
951 u64 pkts;
952 unsigned int start, eqd;
ac124ff9
SP
953
954 if (!rx_eq->enable_aic)
955 return;
6b7c5b94 956
4097f663 957 /* Wrapped around */
3abcdeda
SP
958 if (time_before(now, stats->rx_jiffies)) {
959 stats->rx_jiffies = now;
4097f663
SP
960 return;
961 }
6b7c5b94 962
ac124ff9
SP
963 /* Update once a second */
964 if (delta < HZ)
6b7c5b94
SP
965 return;
966
ab1594e9
SP
967 do {
968 start = u64_stats_fetch_begin_bh(&stats->sync);
969 pkts = stats->rx_pkts;
970 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
971
68c3e5a7 972 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
ab1594e9 973 stats->rx_pkts_prev = pkts;
3abcdeda 974 stats->rx_jiffies = now;
ac124ff9
SP
975 eqd = stats->rx_pps / 110000;
976 eqd = eqd << 3;
977 if (eqd > rx_eq->max_eqd)
978 eqd = rx_eq->max_eqd;
979 if (eqd < rx_eq->min_eqd)
980 eqd = rx_eq->min_eqd;
981 if (eqd < 10)
982 eqd = 0;
983 if (eqd != rx_eq->cur_eqd) {
984 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
985 rx_eq->cur_eqd = eqd;
986 }
6b7c5b94
SP
987}
988
3abcdeda 989static void be_rx_stats_update(struct be_rx_obj *rxo,
2e588f84 990 struct be_rx_compl_info *rxcp)
4097f663 991{
ac124ff9 992 struct be_rx_stats *stats = rx_stats(rxo);
1ef78abe 993
ab1594e9 994 u64_stats_update_begin(&stats->sync);
3abcdeda 995 stats->rx_compl++;
2e588f84 996 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 997 stats->rx_pkts++;
2e588f84 998 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 999 stats->rx_mcast_pkts++;
2e588f84 1000 if (rxcp->err)
ac124ff9 1001 stats->rx_compl_err++;
ab1594e9 1002 u64_stats_update_end(&stats->sync);
4097f663
SP
1003}
1004
2e588f84 1005static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 1006{
19fad86f
PR
1007 /* L4 checksum is not reliable for non TCP/UDP packets.
1008 * Also ignore ipcksm for ipv6 pkts */
2e588f84
SP
1009 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1010 (rxcp->ip_csum || rxcp->ipv6);
728a9972
AK
1011}
1012
6b7c5b94 1013static struct be_rx_page_info *
3abcdeda
SP
1014get_rx_page_info(struct be_adapter *adapter,
1015 struct be_rx_obj *rxo,
1016 u16 frag_idx)
6b7c5b94
SP
1017{
1018 struct be_rx_page_info *rx_page_info;
3abcdeda 1019 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1020
3abcdeda 1021 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
1022 BUG_ON(!rx_page_info->page);
1023
205859a2 1024 if (rx_page_info->last_page_user) {
2b7bcebf
IV
1025 dma_unmap_page(&adapter->pdev->dev,
1026 dma_unmap_addr(rx_page_info, bus),
1027 adapter->big_page_size, DMA_FROM_DEVICE);
205859a2
AK
1028 rx_page_info->last_page_user = false;
1029 }
6b7c5b94
SP
1030
1031 atomic_dec(&rxq->used);
1032 return rx_page_info;
1033}
1034
1035/* Throwaway the data in the Rx completion */
1036static void be_rx_compl_discard(struct be_adapter *adapter,
3abcdeda 1037 struct be_rx_obj *rxo,
2e588f84 1038 struct be_rx_compl_info *rxcp)
6b7c5b94 1039{
3abcdeda 1040 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1041 struct be_rx_page_info *page_info;
2e588f84 1042 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 1043
e80d9da6 1044 for (i = 0; i < num_rcvd; i++) {
2e588f84 1045 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
e80d9da6
PR
1046 put_page(page_info->page);
1047 memset(page_info, 0, sizeof(*page_info));
2e588f84 1048 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1049 }
1050}
1051
1052/*
1053 * skb_fill_rx_data forms a complete skb for an ether frame
1054 * indicated by rxcp.
1055 */
3abcdeda 1056static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
2e588f84 1057 struct sk_buff *skb, struct be_rx_compl_info *rxcp)
6b7c5b94 1058{
3abcdeda 1059 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1060 struct be_rx_page_info *page_info;
2e588f84
SP
1061 u16 i, j;
1062 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 1063 u8 *start;
6b7c5b94 1064
2e588f84 1065 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
6b7c5b94
SP
1066 start = page_address(page_info->page) + page_info->page_offset;
1067 prefetch(start);
1068
1069 /* Copy data in the first descriptor of this completion */
2e588f84 1070 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94
SP
1071
1072 /* Copy the header portion into skb_data */
2e588f84 1073 hdr_len = min(BE_HDR_LEN, curr_frag_len);
6b7c5b94
SP
1074 memcpy(skb->data, start, hdr_len);
1075 skb->len = curr_frag_len;
1076 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1077 /* Complete packet has now been moved to data */
1078 put_page(page_info->page);
1079 skb->data_len = 0;
1080 skb->tail += curr_frag_len;
1081 } else {
1082 skb_shinfo(skb)->nr_frags = 1;
b061b39e 1083 skb_frag_set_page(skb, 0, page_info->page);
6b7c5b94
SP
1084 skb_shinfo(skb)->frags[0].page_offset =
1085 page_info->page_offset + hdr_len;
9e903e08 1086 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
6b7c5b94 1087 skb->data_len = curr_frag_len - hdr_len;
bdb28a97 1088 skb->truesize += rx_frag_size;
6b7c5b94
SP
1089 skb->tail += hdr_len;
1090 }
205859a2 1091 page_info->page = NULL;
6b7c5b94 1092
2e588f84
SP
1093 if (rxcp->pkt_size <= rx_frag_size) {
1094 BUG_ON(rxcp->num_rcvd != 1);
1095 return;
6b7c5b94
SP
1096 }
1097
1098 /* More frags present for this completion */
2e588f84
SP
1099 index_inc(&rxcp->rxq_idx, rxq->len);
1100 remaining = rxcp->pkt_size - curr_frag_len;
1101 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1102 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1103 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 1104
bd46cb6c
AK
1105 /* Coalesce all frags from the same physical page in one slot */
1106 if (page_info->page_offset == 0) {
1107 /* Fresh page */
1108 j++;
b061b39e 1109 skb_frag_set_page(skb, j, page_info->page);
bd46cb6c
AK
1110 skb_shinfo(skb)->frags[j].page_offset =
1111 page_info->page_offset;
9e903e08 1112 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1113 skb_shinfo(skb)->nr_frags++;
1114 } else {
1115 put_page(page_info->page);
1116 }
1117
9e903e08 1118 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
6b7c5b94
SP
1119 skb->len += curr_frag_len;
1120 skb->data_len += curr_frag_len;
bdb28a97 1121 skb->truesize += rx_frag_size;
2e588f84
SP
1122 remaining -= curr_frag_len;
1123 index_inc(&rxcp->rxq_idx, rxq->len);
205859a2 1124 page_info->page = NULL;
6b7c5b94 1125 }
bd46cb6c 1126 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
1127}
1128
5be93b9a 1129/* Process the RX completion indicated by rxcp when GRO is disabled */
6b7c5b94 1130static void be_rx_compl_process(struct be_adapter *adapter,
3abcdeda 1131 struct be_rx_obj *rxo,
2e588f84 1132 struct be_rx_compl_info *rxcp)
6b7c5b94 1133{
6332c8d3 1134 struct net_device *netdev = adapter->netdev;
6b7c5b94 1135 struct sk_buff *skb;
89420424 1136
6332c8d3 1137 skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
a058a632 1138 if (unlikely(!skb)) {
ac124ff9 1139 rx_stats(rxo)->rx_drops_no_skbs++;
3abcdeda 1140 be_rx_compl_discard(adapter, rxo, rxcp);
6b7c5b94
SP
1141 return;
1142 }
1143
2e588f84 1144 skb_fill_rx_data(adapter, rxo, skb, rxcp);
6b7c5b94 1145
6332c8d3 1146 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
728a9972 1147 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
1148 else
1149 skb_checksum_none_assert(skb);
6b7c5b94 1150
6332c8d3 1151 skb->protocol = eth_type_trans(skb, netdev);
4b972914
AK
1152 if (adapter->netdev->features & NETIF_F_RXHASH)
1153 skb->rxhash = rxcp->rss_hash;
1154
6b7c5b94 1155
343e43c0 1156 if (rxcp->vlanf)
4c5102f9
AK
1157 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1158
1159 netif_receive_skb(skb);
6b7c5b94
SP
1160}
1161
5be93b9a
AK
1162/* Process the RX completion indicated by rxcp when GRO is enabled */
1163static void be_rx_compl_process_gro(struct be_adapter *adapter,
3abcdeda 1164 struct be_rx_obj *rxo,
2e588f84 1165 struct be_rx_compl_info *rxcp)
6b7c5b94
SP
1166{
1167 struct be_rx_page_info *page_info;
5be93b9a 1168 struct sk_buff *skb = NULL;
3abcdeda
SP
1169 struct be_queue_info *rxq = &rxo->q;
1170 struct be_eq_obj *eq_obj = &rxo->rx_eq;
2e588f84
SP
1171 u16 remaining, curr_frag_len;
1172 u16 i, j;
3968fa1e 1173
5be93b9a
AK
1174 skb = napi_get_frags(&eq_obj->napi);
1175 if (!skb) {
3abcdeda 1176 be_rx_compl_discard(adapter, rxo, rxcp);
5be93b9a
AK
1177 return;
1178 }
1179
2e588f84
SP
1180 remaining = rxcp->pkt_size;
1181 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1182 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
6b7c5b94
SP
1183
1184 curr_frag_len = min(remaining, rx_frag_size);
1185
bd46cb6c
AK
1186 /* Coalesce all frags from the same physical page in one slot */
1187 if (i == 0 || page_info->page_offset == 0) {
1188 /* First frag or Fresh page */
1189 j++;
b061b39e 1190 skb_frag_set_page(skb, j, page_info->page);
5be93b9a
AK
1191 skb_shinfo(skb)->frags[j].page_offset =
1192 page_info->page_offset;
9e903e08 1193 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1194 } else {
1195 put_page(page_info->page);
1196 }
9e903e08 1197 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
bdb28a97 1198 skb->truesize += rx_frag_size;
bd46cb6c 1199 remaining -= curr_frag_len;
2e588f84 1200 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1201 memset(page_info, 0, sizeof(*page_info));
1202 }
bd46cb6c 1203 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 1204
5be93b9a 1205 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
1206 skb->len = rxcp->pkt_size;
1207 skb->data_len = rxcp->pkt_size;
5be93b9a 1208 skb->ip_summed = CHECKSUM_UNNECESSARY;
4b972914
AK
1209 if (adapter->netdev->features & NETIF_F_RXHASH)
1210 skb->rxhash = rxcp->rss_hash;
5be93b9a 1211
343e43c0 1212 if (rxcp->vlanf)
4c5102f9
AK
1213 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1214
1215 napi_gro_frags(&eq_obj->napi);
2e588f84
SP
1216}
1217
1218static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1219 struct be_eth_rx_compl *compl,
1220 struct be_rx_compl_info *rxcp)
1221{
1222 rxcp->pkt_size =
1223 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1224 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1225 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1226 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
9ecb42fd 1227 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
2e588f84
SP
1228 rxcp->ip_csum =
1229 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1230 rxcp->l4_csum =
1231 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1232 rxcp->ipv6 =
1233 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1234 rxcp->rxq_idx =
1235 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1236 rxcp->num_rcvd =
1237 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1238 rxcp->pkt_type =
1239 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
4b972914
AK
1240 rxcp->rss_hash =
1241 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
15d72184
SP
1242 if (rxcp->vlanf) {
1243 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
3c709f8f
DM
1244 compl);
1245 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1246 compl);
15d72184 1247 }
12004ae9 1248 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
2e588f84
SP
1249}
1250
1251static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1252 struct be_eth_rx_compl *compl,
1253 struct be_rx_compl_info *rxcp)
1254{
1255 rxcp->pkt_size =
1256 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1257 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1258 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1259 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
9ecb42fd 1260 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
2e588f84
SP
1261 rxcp->ip_csum =
1262 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1263 rxcp->l4_csum =
1264 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1265 rxcp->ipv6 =
1266 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1267 rxcp->rxq_idx =
1268 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1269 rxcp->num_rcvd =
1270 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1271 rxcp->pkt_type =
1272 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
4b972914
AK
1273 rxcp->rss_hash =
1274 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
15d72184
SP
1275 if (rxcp->vlanf) {
1276 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
3c709f8f
DM
1277 compl);
1278 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1279 compl);
15d72184 1280 }
12004ae9 1281 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
2e588f84
SP
1282}
1283
1284static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1285{
1286 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1287 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1288 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1289
2e588f84
SP
1290 /* For checking the valid bit it is Ok to use either definition as the
1291 * valid bit is at the same position in both v0 and v1 Rx compl */
1292 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1293 return NULL;
6b7c5b94 1294
2e588f84
SP
1295 rmb();
1296 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 1297
2e588f84
SP
1298 if (adapter->be3_native)
1299 be_parse_rx_compl_v1(adapter, compl, rxcp);
1300 else
1301 be_parse_rx_compl_v0(adapter, compl, rxcp);
6b7c5b94 1302
15d72184
SP
1303 if (rxcp->vlanf) {
1304 /* vlanf could be wrongly set in some cards.
1305 * ignore if vtm is not set */
752961a1 1306 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
15d72184 1307 rxcp->vlanf = 0;
6b7c5b94 1308
15d72184 1309 if (!lancer_chip(adapter))
3c709f8f 1310 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
6b7c5b94 1311
939cf306 1312 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
3c709f8f 1313 !adapter->vlan_tag[rxcp->vlan_tag])
15d72184
SP
1314 rxcp->vlanf = 0;
1315 }
2e588f84
SP
1316
1317 /* As the compl has been parsed, reset it; we wont touch it again */
1318 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 1319
3abcdeda 1320 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
1321 return rxcp;
1322}
1323
1829b086 1324static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 1325{
6b7c5b94 1326 u32 order = get_order(size);
1829b086 1327
6b7c5b94 1328 if (order > 0)
1829b086
ED
1329 gfp |= __GFP_COMP;
1330 return alloc_pages(gfp, order);
6b7c5b94
SP
1331}
1332
1333/*
1334 * Allocate a page, split it to fragments of size rx_frag_size and post as
1335 * receive buffers to BE
1336 */
1829b086 1337static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
6b7c5b94 1338{
3abcdeda
SP
1339 struct be_adapter *adapter = rxo->adapter;
1340 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
26d92f92 1341 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 1342 struct be_queue_info *rxq = &rxo->q;
6b7c5b94
SP
1343 struct page *pagep = NULL;
1344 struct be_eth_rx_d *rxd;
1345 u64 page_dmaaddr = 0, frag_dmaaddr;
1346 u32 posted, page_offset = 0;
1347
3abcdeda 1348 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1349 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1350 if (!pagep) {
1829b086 1351 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 1352 if (unlikely(!pagep)) {
ac124ff9 1353 rx_stats(rxo)->rx_post_fail++;
6b7c5b94
SP
1354 break;
1355 }
2b7bcebf
IV
1356 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1357 0, adapter->big_page_size,
1358 DMA_FROM_DEVICE);
6b7c5b94
SP
1359 page_info->page_offset = 0;
1360 } else {
1361 get_page(pagep);
1362 page_info->page_offset = page_offset + rx_frag_size;
1363 }
1364 page_offset = page_info->page_offset;
1365 page_info->page = pagep;
fac6da5b 1366 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
6b7c5b94
SP
1367 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1368
1369 rxd = queue_head_node(rxq);
1370 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1371 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
1372
1373 /* Any space left in the current big page for another frag? */
1374 if ((page_offset + rx_frag_size + rx_frag_size) >
1375 adapter->big_page_size) {
1376 pagep = NULL;
1377 page_info->last_page_user = true;
1378 }
26d92f92
SP
1379
1380 prev_page_info = page_info;
1381 queue_head_inc(rxq);
6b7c5b94
SP
1382 page_info = &page_info_tbl[rxq->head];
1383 }
1384 if (pagep)
26d92f92 1385 prev_page_info->last_page_user = true;
6b7c5b94
SP
1386
1387 if (posted) {
6b7c5b94 1388 atomic_add(posted, &rxq->used);
8788fdc2 1389 be_rxq_notify(adapter, rxq->id, posted);
ea1dae11
SP
1390 } else if (atomic_read(&rxq->used) == 0) {
1391 /* Let be_worker replenish when memory is available */
3abcdeda 1392 rxo->rx_post_starved = true;
6b7c5b94 1393 }
6b7c5b94
SP
1394}
1395
5fb379ee 1396static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
6b7c5b94 1397{
6b7c5b94
SP
1398 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1399
1400 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1401 return NULL;
1402
f3eb62d2 1403 rmb();
6b7c5b94
SP
1404 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1405
1406 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1407
1408 queue_tail_inc(tx_cq);
1409 return txcp;
1410}
1411
3c8def97
SP
1412static u16 be_tx_compl_process(struct be_adapter *adapter,
1413 struct be_tx_obj *txo, u16 last_index)
6b7c5b94 1414{
3c8def97 1415 struct be_queue_info *txq = &txo->q;
a73b796e 1416 struct be_eth_wrb *wrb;
3c8def97 1417 struct sk_buff **sent_skbs = txo->sent_skb_list;
6b7c5b94 1418 struct sk_buff *sent_skb;
ec43b1a6
SP
1419 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1420 bool unmap_skb_hdr = true;
6b7c5b94 1421
ec43b1a6 1422 sent_skb = sent_skbs[txq->tail];
6b7c5b94 1423 BUG_ON(!sent_skb);
ec43b1a6
SP
1424 sent_skbs[txq->tail] = NULL;
1425
1426 /* skip header wrb */
a73b796e 1427 queue_tail_inc(txq);
6b7c5b94 1428
ec43b1a6 1429 do {
6b7c5b94 1430 cur_index = txq->tail;
a73b796e 1431 wrb = queue_tail_node(txq);
2b7bcebf
IV
1432 unmap_tx_frag(&adapter->pdev->dev, wrb,
1433 (unmap_skb_hdr && skb_headlen(sent_skb)));
ec43b1a6
SP
1434 unmap_skb_hdr = false;
1435
6b7c5b94
SP
1436 num_wrbs++;
1437 queue_tail_inc(txq);
ec43b1a6 1438 } while (cur_index != last_index);
6b7c5b94 1439
6b7c5b94 1440 kfree_skb(sent_skb);
4d586b82 1441 return num_wrbs;
6b7c5b94
SP
1442}
1443
859b1e4e
SP
1444static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1445{
1446 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1447
1448 if (!eqe->evt)
1449 return NULL;
1450
f3eb62d2 1451 rmb();
859b1e4e
SP
1452 eqe->evt = le32_to_cpu(eqe->evt);
1453 queue_tail_inc(&eq_obj->q);
1454 return eqe;
1455}
1456
1457static int event_handle(struct be_adapter *adapter,
3c8def97
SP
1458 struct be_eq_obj *eq_obj,
1459 bool rearm)
859b1e4e
SP
1460{
1461 struct be_eq_entry *eqe;
1462 u16 num = 0;
1463
1464 while ((eqe = event_get(eq_obj)) != NULL) {
1465 eqe->evt = 0;
1466 num++;
1467 }
1468
1469 /* Deal with any spurious interrupts that come
1470 * without events
1471 */
3c8def97
SP
1472 if (!num)
1473 rearm = true;
1474
1475 be_eq_notify(adapter, eq_obj->q.id, rearm, true, num);
859b1e4e
SP
1476 if (num)
1477 napi_schedule(&eq_obj->napi);
1478
1479 return num;
1480}
1481
1482/* Just read and notify events without processing them.
1483 * Used at the time of destroying event queues */
1484static void be_eq_clean(struct be_adapter *adapter,
1485 struct be_eq_obj *eq_obj)
1486{
1487 struct be_eq_entry *eqe;
1488 u16 num = 0;
1489
1490 while ((eqe = event_get(eq_obj)) != NULL) {
1491 eqe->evt = 0;
1492 num++;
1493 }
1494
1495 if (num)
1496 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1497}
1498
3abcdeda 1499static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
6b7c5b94
SP
1500{
1501 struct be_rx_page_info *page_info;
3abcdeda
SP
1502 struct be_queue_info *rxq = &rxo->q;
1503 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 1504 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
1505 u16 tail;
1506
1507 /* First cleanup pending rx completions */
3abcdeda
SP
1508 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1509 be_rx_compl_discard(adapter, rxo, rxcp);
64642811 1510 be_cq_notify(adapter, rx_cq->id, false, 1);
6b7c5b94
SP
1511 }
1512
1513 /* Then free posted rx buffer that were not used */
1514 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
cdab23b7 1515 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
3abcdeda 1516 page_info = get_rx_page_info(adapter, rxo, tail);
6b7c5b94
SP
1517 put_page(page_info->page);
1518 memset(page_info, 0, sizeof(*page_info));
1519 }
1520 BUG_ON(atomic_read(&rxq->used));
482c9e79 1521 rxq->tail = rxq->head = 0;
6b7c5b94
SP
1522}
1523
3c8def97
SP
1524static void be_tx_compl_clean(struct be_adapter *adapter,
1525 struct be_tx_obj *txo)
6b7c5b94 1526{
3c8def97
SP
1527 struct be_queue_info *tx_cq = &txo->cq;
1528 struct be_queue_info *txq = &txo->q;
a8e9179a 1529 struct be_eth_tx_compl *txcp;
4d586b82 1530 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
3c8def97 1531 struct sk_buff **sent_skbs = txo->sent_skb_list;
b03388d6
SP
1532 struct sk_buff *sent_skb;
1533 bool dummy_wrb;
a8e9179a
SP
1534
1535 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1536 do {
1537 while ((txcp = be_tx_compl_get(tx_cq))) {
1538 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1539 wrb_index, txcp);
3c8def97 1540 num_wrbs += be_tx_compl_process(adapter, txo, end_idx);
a8e9179a
SP
1541 cmpl++;
1542 }
1543 if (cmpl) {
1544 be_cq_notify(adapter, tx_cq->id, false, cmpl);
4d586b82 1545 atomic_sub(num_wrbs, &txq->used);
a8e9179a 1546 cmpl = 0;
4d586b82 1547 num_wrbs = 0;
a8e9179a
SP
1548 }
1549
1550 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1551 break;
1552
1553 mdelay(1);
1554 } while (true);
1555
1556 if (atomic_read(&txq->used))
1557 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1558 atomic_read(&txq->used));
b03388d6
SP
1559
1560 /* free posted tx for which compls will never arrive */
1561 while (atomic_read(&txq->used)) {
1562 sent_skb = sent_skbs[txq->tail];
1563 end_idx = txq->tail;
1564 index_adv(&end_idx,
fe6d2a38
SP
1565 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1566 txq->len);
3c8def97 1567 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
4d586b82 1568 atomic_sub(num_wrbs, &txq->used);
b03388d6 1569 }
6b7c5b94
SP
1570}
1571
5fb379ee
SP
1572static void be_mcc_queues_destroy(struct be_adapter *adapter)
1573{
1574 struct be_queue_info *q;
5fb379ee 1575
8788fdc2 1576 q = &adapter->mcc_obj.q;
5fb379ee 1577 if (q->created)
8788fdc2 1578 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
1579 be_queue_free(adapter, q);
1580
8788fdc2 1581 q = &adapter->mcc_obj.cq;
5fb379ee 1582 if (q->created)
8788fdc2 1583 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
1584 be_queue_free(adapter, q);
1585}
1586
1587/* Must be called only after TX qs are created as MCC shares TX EQ */
1588static int be_mcc_queues_create(struct be_adapter *adapter)
1589{
1590 struct be_queue_info *q, *cq;
5fb379ee
SP
1591
1592 /* Alloc MCC compl queue */
8788fdc2 1593 cq = &adapter->mcc_obj.cq;
5fb379ee 1594 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
efd2e40a 1595 sizeof(struct be_mcc_compl)))
5fb379ee
SP
1596 goto err;
1597
1598 /* Ask BE to create MCC compl queue; share TX's eq */
8788fdc2 1599 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
5fb379ee
SP
1600 goto mcc_cq_free;
1601
1602 /* Alloc MCC queue */
8788fdc2 1603 q = &adapter->mcc_obj.q;
5fb379ee
SP
1604 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1605 goto mcc_cq_destroy;
1606
1607 /* Ask BE to create MCC queue */
8788fdc2 1608 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
1609 goto mcc_q_free;
1610
1611 return 0;
1612
1613mcc_q_free:
1614 be_queue_free(adapter, q);
1615mcc_cq_destroy:
8788fdc2 1616 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
1617mcc_cq_free:
1618 be_queue_free(adapter, cq);
1619err:
1620 return -1;
1621}
1622
6b7c5b94
SP
1623static void be_tx_queues_destroy(struct be_adapter *adapter)
1624{
1625 struct be_queue_info *q;
3c8def97
SP
1626 struct be_tx_obj *txo;
1627 u8 i;
6b7c5b94 1628
3c8def97
SP
1629 for_all_tx_queues(adapter, txo, i) {
1630 q = &txo->q;
1631 if (q->created)
1632 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1633 be_queue_free(adapter, q);
6b7c5b94 1634
3c8def97
SP
1635 q = &txo->cq;
1636 if (q->created)
1637 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1638 be_queue_free(adapter, q);
1639 }
6b7c5b94 1640
859b1e4e
SP
1641 /* Clear any residual events */
1642 be_eq_clean(adapter, &adapter->tx_eq);
1643
6b7c5b94
SP
1644 q = &adapter->tx_eq.q;
1645 if (q->created)
8788fdc2 1646 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
6b7c5b94
SP
1647 be_queue_free(adapter, q);
1648}
1649
dafc0fe3
SP
1650static int be_num_txqs_want(struct be_adapter *adapter)
1651{
1652 if ((num_vfs && adapter->sriov_enabled) ||
752961a1 1653 be_is_mc(adapter) ||
dafc0fe3
SP
1654 lancer_chip(adapter) || !be_physfn(adapter) ||
1655 adapter->generation == BE_GEN2)
1656 return 1;
1657 else
1658 return MAX_TX_QS;
1659}
1660
3c8def97 1661/* One TX event queue is shared by all TX compl qs */
6b7c5b94
SP
1662static int be_tx_queues_create(struct be_adapter *adapter)
1663{
1664 struct be_queue_info *eq, *q, *cq;
3c8def97
SP
1665 struct be_tx_obj *txo;
1666 u8 i;
6b7c5b94 1667
dafc0fe3 1668 adapter->num_tx_qs = be_num_txqs_want(adapter);
3bb62f4f
PR
1669 if (adapter->num_tx_qs != MAX_TX_QS) {
1670 rtnl_lock();
dafc0fe3
SP
1671 netif_set_real_num_tx_queues(adapter->netdev,
1672 adapter->num_tx_qs);
3bb62f4f
PR
1673 rtnl_unlock();
1674 }
dafc0fe3 1675
6b7c5b94
SP
1676 adapter->tx_eq.max_eqd = 0;
1677 adapter->tx_eq.min_eqd = 0;
1678 adapter->tx_eq.cur_eqd = 96;
1679 adapter->tx_eq.enable_aic = false;
3c8def97 1680
6b7c5b94 1681 eq = &adapter->tx_eq.q;
3c8def97
SP
1682 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1683 sizeof(struct be_eq_entry)))
6b7c5b94
SP
1684 return -1;
1685
8788fdc2 1686 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
3c8def97 1687 goto err;
ecd62107 1688 adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
fe6d2a38 1689
3c8def97
SP
1690 for_all_tx_queues(adapter, txo, i) {
1691 cq = &txo->cq;
1692 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
6b7c5b94 1693 sizeof(struct be_eth_tx_compl)))
3c8def97 1694 goto err;
6b7c5b94 1695
3c8def97
SP
1696 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1697 goto err;
6b7c5b94 1698
3c8def97
SP
1699 q = &txo->q;
1700 if (be_queue_alloc(adapter, q, TX_Q_LEN,
1701 sizeof(struct be_eth_wrb)))
1702 goto err;
3c8def97 1703 }
6b7c5b94
SP
1704 return 0;
1705
3c8def97
SP
1706err:
1707 be_tx_queues_destroy(adapter);
6b7c5b94
SP
1708 return -1;
1709}
1710
1711static void be_rx_queues_destroy(struct be_adapter *adapter)
1712{
1713 struct be_queue_info *q;
3abcdeda
SP
1714 struct be_rx_obj *rxo;
1715 int i;
1716
1717 for_all_rx_queues(adapter, rxo, i) {
482c9e79 1718 be_queue_free(adapter, &rxo->q);
3abcdeda
SP
1719
1720 q = &rxo->cq;
1721 if (q->created)
1722 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1723 be_queue_free(adapter, q);
1724
3abcdeda 1725 q = &rxo->rx_eq.q;
482c9e79 1726 if (q->created)
3abcdeda 1727 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
3abcdeda 1728 be_queue_free(adapter, q);
6b7c5b94 1729 }
6b7c5b94
SP
1730}
1731
ac6a0c4a
SP
1732static u32 be_num_rxqs_want(struct be_adapter *adapter)
1733{
c814fd36 1734 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
752961a1
SP
1735 !adapter->sriov_enabled && be_physfn(adapter) &&
1736 !be_is_mc(adapter)) {
ac6a0c4a
SP
1737 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1738 } else {
1739 dev_warn(&adapter->pdev->dev,
1740 "No support for multiple RX queues\n");
1741 return 1;
1742 }
1743}
1744
6b7c5b94
SP
1745static int be_rx_queues_create(struct be_adapter *adapter)
1746{
1747 struct be_queue_info *eq, *q, *cq;
3abcdeda
SP
1748 struct be_rx_obj *rxo;
1749 int rc, i;
6b7c5b94 1750
ac6a0c4a
SP
1751 adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1752 msix_enabled(adapter) ?
1753 adapter->num_msix_vec - 1 : 1);
1754 if (adapter->num_rx_qs != MAX_RX_QS)
1755 dev_warn(&adapter->pdev->dev,
1756 "Can create only %d RX queues", adapter->num_rx_qs);
1757
6b7c5b94 1758 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
1759 for_all_rx_queues(adapter, rxo, i) {
1760 rxo->adapter = adapter;
1761 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1762 rxo->rx_eq.enable_aic = true;
1763
1764 /* EQ */
1765 eq = &rxo->rx_eq.q;
1766 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1767 sizeof(struct be_eq_entry));
1768 if (rc)
1769 goto err;
1770
1771 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1772 if (rc)
1773 goto err;
1774
ecd62107 1775 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
fe6d2a38 1776
3abcdeda
SP
1777 /* CQ */
1778 cq = &rxo->cq;
1779 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1780 sizeof(struct be_eth_rx_compl));
1781 if (rc)
1782 goto err;
1783
1784 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1785 if (rc)
1786 goto err;
482c9e79
SP
1787
1788 /* Rx Q - will be created in be_open() */
3abcdeda
SP
1789 q = &rxo->q;
1790 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1791 sizeof(struct be_eth_rx_d));
1792 if (rc)
1793 goto err;
1794
3abcdeda 1795 }
6b7c5b94
SP
1796
1797 return 0;
3abcdeda
SP
1798err:
1799 be_rx_queues_destroy(adapter);
1800 return -1;
6b7c5b94 1801}
6b7c5b94 1802
fe6d2a38 1803static bool event_peek(struct be_eq_obj *eq_obj)
b628bde2 1804{
fe6d2a38
SP
1805 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1806 if (!eqe->evt)
1807 return false;
1808 else
1809 return true;
b628bde2
SP
1810}
1811
6b7c5b94
SP
1812static irqreturn_t be_intx(int irq, void *dev)
1813{
1814 struct be_adapter *adapter = dev;
3abcdeda 1815 struct be_rx_obj *rxo;
fe6d2a38 1816 int isr, i, tx = 0 , rx = 0;
6b7c5b94 1817
fe6d2a38
SP
1818 if (lancer_chip(adapter)) {
1819 if (event_peek(&adapter->tx_eq))
3c8def97 1820 tx = event_handle(adapter, &adapter->tx_eq, false);
fe6d2a38
SP
1821 for_all_rx_queues(adapter, rxo, i) {
1822 if (event_peek(&rxo->rx_eq))
3c8def97 1823 rx |= event_handle(adapter, &rxo->rx_eq, true);
fe6d2a38 1824 }
6b7c5b94 1825
fe6d2a38
SP
1826 if (!(tx || rx))
1827 return IRQ_NONE;
3abcdeda 1828
fe6d2a38
SP
1829 } else {
1830 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1831 (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1832 if (!isr)
1833 return IRQ_NONE;
1834
ecd62107 1835 if ((1 << adapter->tx_eq.eq_idx & isr))
3c8def97 1836 event_handle(adapter, &adapter->tx_eq, false);
fe6d2a38
SP
1837
1838 for_all_rx_queues(adapter, rxo, i) {
ecd62107 1839 if ((1 << rxo->rx_eq.eq_idx & isr))
3c8def97 1840 event_handle(adapter, &rxo->rx_eq, true);
fe6d2a38 1841 }
3abcdeda 1842 }
c001c213 1843
8788fdc2 1844 return IRQ_HANDLED;
6b7c5b94
SP
1845}
1846
1847static irqreturn_t be_msix_rx(int irq, void *dev)
1848{
3abcdeda
SP
1849 struct be_rx_obj *rxo = dev;
1850 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1851
3c8def97 1852 event_handle(adapter, &rxo->rx_eq, true);
6b7c5b94
SP
1853
1854 return IRQ_HANDLED;
1855}
1856
5fb379ee 1857static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
6b7c5b94
SP
1858{
1859 struct be_adapter *adapter = dev;
1860
3c8def97 1861 event_handle(adapter, &adapter->tx_eq, false);
6b7c5b94
SP
1862
1863 return IRQ_HANDLED;
1864}
1865
2e588f84 1866static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 1867{
2e588f84 1868 return (rxcp->tcpf && !rxcp->err) ? true : false;
6b7c5b94
SP
1869}
1870
49b05221 1871static int be_poll_rx(struct napi_struct *napi, int budget)
6b7c5b94
SP
1872{
1873 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
3abcdeda
SP
1874 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1875 struct be_adapter *adapter = rxo->adapter;
1876 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 1877 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
1878 u32 work_done;
1879
ac124ff9 1880 rx_stats(rxo)->rx_polls++;
6b7c5b94 1881 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 1882 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
1883 if (!rxcp)
1884 break;
1885
12004ae9
SP
1886 /* Is it a flush compl that has no data */
1887 if (unlikely(rxcp->num_rcvd == 0))
1888 goto loop_continue;
1889
1890 /* Discard compl with partial DMA Lancer B0 */
1891 if (unlikely(!rxcp->pkt_size)) {
1892 be_rx_compl_discard(adapter, rxo, rxcp);
1893 goto loop_continue;
1894 }
1895
1896 /* On BE drop pkts that arrive due to imperfect filtering in
1897 * promiscuous mode on some skews
1898 */
1899 if (unlikely(rxcp->port != adapter->port_num &&
1900 !lancer_chip(adapter))) {
009dd872 1901 be_rx_compl_discard(adapter, rxo, rxcp);
12004ae9 1902 goto loop_continue;
64642811 1903 }
009dd872 1904
12004ae9
SP
1905 if (do_gro(rxcp))
1906 be_rx_compl_process_gro(adapter, rxo, rxcp);
1907 else
1908 be_rx_compl_process(adapter, rxo, rxcp);
1909loop_continue:
2e588f84 1910 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
1911 }
1912
9372cacb
PR
1913 be_cq_notify(adapter, rx_cq->id, false, work_done);
1914
6b7c5b94 1915 /* Refill the queue */
857c9905 1916 if (work_done && atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1829b086 1917 be_post_rx_frags(rxo, GFP_ATOMIC);
6b7c5b94
SP
1918
1919 /* All consumed */
1920 if (work_done < budget) {
1921 napi_complete(napi);
9372cacb
PR
1922 /* Arm CQ */
1923 be_cq_notify(adapter, rx_cq->id, true, 0);
6b7c5b94
SP
1924 }
1925 return work_done;
1926}
1927
f31e50a8
SP
1928/* As TX and MCC share the same EQ check for both TX and MCC completions.
1929 * For TX/MCC we don't honour budget; consume everything
1930 */
1931static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
6b7c5b94 1932{
f31e50a8
SP
1933 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1934 struct be_adapter *adapter =
1935 container_of(tx_eq, struct be_adapter, tx_eq);
3c8def97 1936 struct be_tx_obj *txo;
6b7c5b94 1937 struct be_eth_tx_compl *txcp;
3c8def97
SP
1938 int tx_compl, mcc_compl, status = 0;
1939 u8 i;
1940 u16 num_wrbs;
1941
1942 for_all_tx_queues(adapter, txo, i) {
1943 tx_compl = 0;
1944 num_wrbs = 0;
1945 while ((txcp = be_tx_compl_get(&txo->cq))) {
1946 num_wrbs += be_tx_compl_process(adapter, txo,
1947 AMAP_GET_BITS(struct amap_eth_tx_compl,
1948 wrb_index, txcp));
1949 tx_compl++;
1950 }
1951 if (tx_compl) {
1952 be_cq_notify(adapter, txo->cq.id, true, tx_compl);
1953
1954 atomic_sub(num_wrbs, &txo->q.used);
6b7c5b94 1955
3c8def97
SP
1956 /* As Tx wrbs have been freed up, wake up netdev queue
1957 * if it was stopped due to lack of tx wrbs. */
1958 if (__netif_subqueue_stopped(adapter->netdev, i) &&
1959 atomic_read(&txo->q.used) < txo->q.len / 2) {
1960 netif_wake_subqueue(adapter->netdev, i);
1961 }
1962
ab1594e9 1963 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
ac124ff9 1964 tx_stats(txo)->tx_compl += tx_compl;
ab1594e9 1965 u64_stats_update_end(&tx_stats(txo)->sync_compl);
3c8def97 1966 }
6b7c5b94
SP
1967 }
1968
f31e50a8
SP
1969 mcc_compl = be_process_mcc(adapter, &status);
1970
f31e50a8
SP
1971 if (mcc_compl) {
1972 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1973 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1974 }
1975
3c8def97 1976 napi_complete(napi);
6b7c5b94 1977
3c8def97 1978 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
ab1594e9 1979 adapter->drv_stats.tx_events++;
6b7c5b94
SP
1980 return 1;
1981}
1982
d053de91 1983void be_detect_dump_ue(struct be_adapter *adapter)
7c185276 1984{
e1cfb67a
PR
1985 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
1986 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
7c185276
AK
1987 u32 i;
1988
72f02485
SP
1989 if (adapter->eeh_err || adapter->ue_detected)
1990 return;
1991
e1cfb67a
PR
1992 if (lancer_chip(adapter)) {
1993 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
1994 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
1995 sliport_err1 = ioread32(adapter->db +
1996 SLIPORT_ERROR1_OFFSET);
1997 sliport_err2 = ioread32(adapter->db +
1998 SLIPORT_ERROR2_OFFSET);
1999 }
2000 } else {
2001 pci_read_config_dword(adapter->pdev,
2002 PCICFG_UE_STATUS_LOW, &ue_lo);
2003 pci_read_config_dword(adapter->pdev,
2004 PCICFG_UE_STATUS_HIGH, &ue_hi);
2005 pci_read_config_dword(adapter->pdev,
2006 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2007 pci_read_config_dword(adapter->pdev,
2008 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2009
2010 ue_lo = (ue_lo & (~ue_lo_mask));
2011 ue_hi = (ue_hi & (~ue_hi_mask));
2012 }
7c185276 2013
e1cfb67a
PR
2014 if (ue_lo || ue_hi ||
2015 sliport_status & SLIPORT_STATUS_ERR_MASK) {
d053de91 2016 adapter->ue_detected = true;
7acc2087 2017 adapter->eeh_err = true;
434b3648
SP
2018 dev_err(&adapter->pdev->dev,
2019 "Unrecoverable error in the card\n");
d053de91
AK
2020 }
2021
e1cfb67a
PR
2022 if (ue_lo) {
2023 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2024 if (ue_lo & 1)
7c185276
AK
2025 dev_err(&adapter->pdev->dev,
2026 "UE: %s bit set\n", ue_status_low_desc[i]);
2027 }
2028 }
e1cfb67a
PR
2029 if (ue_hi) {
2030 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2031 if (ue_hi & 1)
7c185276
AK
2032 dev_err(&adapter->pdev->dev,
2033 "UE: %s bit set\n", ue_status_hi_desc[i]);
2034 }
2035 }
2036
e1cfb67a
PR
2037 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2038 dev_err(&adapter->pdev->dev,
2039 "sliport status 0x%x\n", sliport_status);
2040 dev_err(&adapter->pdev->dev,
2041 "sliport error1 0x%x\n", sliport_err1);
2042 dev_err(&adapter->pdev->dev,
2043 "sliport error2 0x%x\n", sliport_err2);
2044 }
7c185276
AK
2045}
2046
8d56ff11
SP
2047static void be_msix_disable(struct be_adapter *adapter)
2048{
ac6a0c4a 2049 if (msix_enabled(adapter)) {
8d56ff11 2050 pci_disable_msix(adapter->pdev);
ac6a0c4a 2051 adapter->num_msix_vec = 0;
3abcdeda
SP
2052 }
2053}
2054
6b7c5b94
SP
2055static void be_msix_enable(struct be_adapter *adapter)
2056{
3abcdeda 2057#define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
ac6a0c4a 2058 int i, status, num_vec;
6b7c5b94 2059
ac6a0c4a 2060 num_vec = be_num_rxqs_want(adapter) + 1;
3abcdeda 2061
ac6a0c4a 2062 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
2063 adapter->msix_entries[i].entry = i;
2064
ac6a0c4a 2065 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
3abcdeda
SP
2066 if (status == 0) {
2067 goto done;
2068 } else if (status >= BE_MIN_MSIX_VECTORS) {
ac6a0c4a 2069 num_vec = status;
3abcdeda 2070 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
ac6a0c4a 2071 num_vec) == 0)
3abcdeda 2072 goto done;
3abcdeda
SP
2073 }
2074 return;
2075done:
ac6a0c4a
SP
2076 adapter->num_msix_vec = num_vec;
2077 return;
6b7c5b94
SP
2078}
2079
f9449ab7 2080static int be_sriov_enable(struct be_adapter *adapter)
ba343c77 2081{
344dbf10 2082 be_check_sriov_fn_type(adapter);
6dedec81 2083#ifdef CONFIG_PCI_IOV
ba343c77 2084 if (be_physfn(adapter) && num_vfs) {
81be8f0a
AK
2085 int status, pos;
2086 u16 nvfs;
2087
2088 pos = pci_find_ext_capability(adapter->pdev,
2089 PCI_EXT_CAP_ID_SRIOV);
2090 pci_read_config_word(adapter->pdev,
2091 pos + PCI_SRIOV_TOTAL_VF, &nvfs);
2092
2093 if (num_vfs > nvfs) {
2094 dev_info(&adapter->pdev->dev,
2095 "Device supports %d VFs and not %d\n",
2096 nvfs, num_vfs);
2097 num_vfs = nvfs;
2098 }
6dedec81 2099
ba343c77
SB
2100 status = pci_enable_sriov(adapter->pdev, num_vfs);
2101 adapter->sriov_enabled = status ? false : true;
f9449ab7
SP
2102
2103 if (adapter->sriov_enabled) {
2104 adapter->vf_cfg = kcalloc(num_vfs,
2105 sizeof(struct be_vf_cfg),
2106 GFP_KERNEL);
2107 if (!adapter->vf_cfg)
2108 return -ENOMEM;
2109 }
ba343c77
SB
2110 }
2111#endif
f9449ab7 2112 return 0;
ba343c77
SB
2113}
2114
2115static void be_sriov_disable(struct be_adapter *adapter)
2116{
2117#ifdef CONFIG_PCI_IOV
2118 if (adapter->sriov_enabled) {
2119 pci_disable_sriov(adapter->pdev);
f9449ab7 2120 kfree(adapter->vf_cfg);
ba343c77
SB
2121 adapter->sriov_enabled = false;
2122 }
2123#endif
2124}
2125
fe6d2a38
SP
2126static inline int be_msix_vec_get(struct be_adapter *adapter,
2127 struct be_eq_obj *eq_obj)
6b7c5b94 2128{
ecd62107 2129 return adapter->msix_entries[eq_obj->eq_idx].vector;
6b7c5b94
SP
2130}
2131
b628bde2
SP
2132static int be_request_irq(struct be_adapter *adapter,
2133 struct be_eq_obj *eq_obj,
3abcdeda 2134 void *handler, char *desc, void *context)
6b7c5b94
SP
2135{
2136 struct net_device *netdev = adapter->netdev;
b628bde2
SP
2137 int vec;
2138
2139 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
fe6d2a38 2140 vec = be_msix_vec_get(adapter, eq_obj);
3abcdeda 2141 return request_irq(vec, handler, 0, eq_obj->desc, context);
b628bde2
SP
2142}
2143
3abcdeda
SP
2144static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
2145 void *context)
b628bde2 2146{
fe6d2a38 2147 int vec = be_msix_vec_get(adapter, eq_obj);
3abcdeda 2148 free_irq(vec, context);
b628bde2 2149}
6b7c5b94 2150
b628bde2
SP
2151static int be_msix_register(struct be_adapter *adapter)
2152{
3abcdeda
SP
2153 struct be_rx_obj *rxo;
2154 int status, i;
2155 char qname[10];
b628bde2 2156
3abcdeda
SP
2157 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2158 adapter);
6b7c5b94
SP
2159 if (status)
2160 goto err;
2161
3abcdeda
SP
2162 for_all_rx_queues(adapter, rxo, i) {
2163 sprintf(qname, "rxq%d", i);
2164 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2165 qname, rxo);
2166 if (status)
2167 goto err_msix;
2168 }
b628bde2 2169
6b7c5b94 2170 return 0;
b628bde2 2171
3abcdeda
SP
2172err_msix:
2173 be_free_irq(adapter, &adapter->tx_eq, adapter);
2174
2175 for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2176 be_free_irq(adapter, &rxo->rx_eq, rxo);
2177
6b7c5b94
SP
2178err:
2179 dev_warn(&adapter->pdev->dev,
2180 "MSIX Request IRQ failed - err %d\n", status);
ac6a0c4a 2181 be_msix_disable(adapter);
6b7c5b94
SP
2182 return status;
2183}
2184
2185static int be_irq_register(struct be_adapter *adapter)
2186{
2187 struct net_device *netdev = adapter->netdev;
2188 int status;
2189
ac6a0c4a 2190 if (msix_enabled(adapter)) {
6b7c5b94
SP
2191 status = be_msix_register(adapter);
2192 if (status == 0)
2193 goto done;
ba343c77
SB
2194 /* INTx is not supported for VF */
2195 if (!be_physfn(adapter))
2196 return status;
6b7c5b94
SP
2197 }
2198
2199 /* INTx */
2200 netdev->irq = adapter->pdev->irq;
2201 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2202 adapter);
2203 if (status) {
2204 dev_err(&adapter->pdev->dev,
2205 "INTx request IRQ failed - err %d\n", status);
2206 return status;
2207 }
2208done:
2209 adapter->isr_registered = true;
2210 return 0;
2211}
2212
2213static void be_irq_unregister(struct be_adapter *adapter)
2214{
2215 struct net_device *netdev = adapter->netdev;
3abcdeda
SP
2216 struct be_rx_obj *rxo;
2217 int i;
6b7c5b94
SP
2218
2219 if (!adapter->isr_registered)
2220 return;
2221
2222 /* INTx */
ac6a0c4a 2223 if (!msix_enabled(adapter)) {
6b7c5b94
SP
2224 free_irq(netdev->irq, adapter);
2225 goto done;
2226 }
2227
2228 /* MSIx */
3abcdeda
SP
2229 be_free_irq(adapter, &adapter->tx_eq, adapter);
2230
2231 for_all_rx_queues(adapter, rxo, i)
2232 be_free_irq(adapter, &rxo->rx_eq, rxo);
2233
6b7c5b94
SP
2234done:
2235 adapter->isr_registered = false;
6b7c5b94
SP
2236}
2237
482c9e79
SP
2238static void be_rx_queues_clear(struct be_adapter *adapter)
2239{
2240 struct be_queue_info *q;
2241 struct be_rx_obj *rxo;
2242 int i;
2243
2244 for_all_rx_queues(adapter, rxo, i) {
2245 q = &rxo->q;
2246 if (q->created) {
2247 be_cmd_rxq_destroy(adapter, q);
2248 /* After the rxq is invalidated, wait for a grace time
2249 * of 1ms for all dma to end and the flush compl to
2250 * arrive
2251 */
2252 mdelay(1);
2253 be_rx_q_clean(adapter, rxo);
2254 }
2255
2256 /* Clear any residual events */
2257 q = &rxo->rx_eq.q;
2258 if (q->created)
2259 be_eq_clean(adapter, &rxo->rx_eq);
2260 }
2261}
2262
889cd4b2
SP
2263static int be_close(struct net_device *netdev)
2264{
2265 struct be_adapter *adapter = netdev_priv(netdev);
3abcdeda 2266 struct be_rx_obj *rxo;
3c8def97 2267 struct be_tx_obj *txo;
889cd4b2 2268 struct be_eq_obj *tx_eq = &adapter->tx_eq;
3abcdeda 2269 int vec, i;
889cd4b2 2270
889cd4b2
SP
2271 be_async_mcc_disable(adapter);
2272
fe6d2a38
SP
2273 if (!lancer_chip(adapter))
2274 be_intr_set(adapter, false);
889cd4b2 2275
63fcb27f
PR
2276 for_all_rx_queues(adapter, rxo, i)
2277 napi_disable(&rxo->rx_eq.napi);
2278
2279 napi_disable(&tx_eq->napi);
2280
2281 if (lancer_chip(adapter)) {
63fcb27f
PR
2282 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2283 for_all_rx_queues(adapter, rxo, i)
2284 be_cq_notify(adapter, rxo->cq.id, false, 0);
3c8def97
SP
2285 for_all_tx_queues(adapter, txo, i)
2286 be_cq_notify(adapter, txo->cq.id, false, 0);
63fcb27f
PR
2287 }
2288
ac6a0c4a 2289 if (msix_enabled(adapter)) {
fe6d2a38 2290 vec = be_msix_vec_get(adapter, tx_eq);
889cd4b2 2291 synchronize_irq(vec);
3abcdeda
SP
2292
2293 for_all_rx_queues(adapter, rxo, i) {
fe6d2a38 2294 vec = be_msix_vec_get(adapter, &rxo->rx_eq);
3abcdeda
SP
2295 synchronize_irq(vec);
2296 }
889cd4b2
SP
2297 } else {
2298 synchronize_irq(netdev->irq);
2299 }
2300 be_irq_unregister(adapter);
2301
889cd4b2
SP
2302 /* Wait for all pending tx completions to arrive so that
2303 * all tx skbs are freed.
2304 */
3c8def97
SP
2305 for_all_tx_queues(adapter, txo, i)
2306 be_tx_compl_clean(adapter, txo);
889cd4b2 2307
482c9e79
SP
2308 be_rx_queues_clear(adapter);
2309 return 0;
2310}
2311
2312static int be_rx_queues_setup(struct be_adapter *adapter)
2313{
2314 struct be_rx_obj *rxo;
e9008ee9
PR
2315 int rc, i, j;
2316 u8 rsstable[128];
482c9e79
SP
2317
2318 for_all_rx_queues(adapter, rxo, i) {
2319 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2320 rx_frag_size, BE_MAX_JUMBO_FRAME_SIZE,
2321 adapter->if_handle,
2322 (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
2323 if (rc)
2324 return rc;
2325 }
2326
2327 if (be_multi_rxq(adapter)) {
e9008ee9
PR
2328 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2329 for_all_rss_queues(adapter, rxo, i) {
2330 if ((j + i) >= 128)
2331 break;
2332 rsstable[j + i] = rxo->rss_id;
2333 }
2334 }
2335 rc = be_cmd_rss_config(adapter, rsstable, 128);
482c9e79 2336
482c9e79
SP
2337 if (rc)
2338 return rc;
2339 }
2340
2341 /* First time posting */
2342 for_all_rx_queues(adapter, rxo, i) {
2343 be_post_rx_frags(rxo, GFP_KERNEL);
2344 napi_enable(&rxo->rx_eq.napi);
2345 }
889cd4b2
SP
2346 return 0;
2347}
2348
6b7c5b94
SP
2349static int be_open(struct net_device *netdev)
2350{
2351 struct be_adapter *adapter = netdev_priv(netdev);
6b7c5b94 2352 struct be_eq_obj *tx_eq = &adapter->tx_eq;
3abcdeda 2353 struct be_rx_obj *rxo;
3abcdeda 2354 int status, i;
5fb379ee 2355
482c9e79
SP
2356 status = be_rx_queues_setup(adapter);
2357 if (status)
2358 goto err;
2359
5fb379ee
SP
2360 napi_enable(&tx_eq->napi);
2361
2362 be_irq_register(adapter);
2363
fe6d2a38
SP
2364 if (!lancer_chip(adapter))
2365 be_intr_set(adapter, true);
5fb379ee
SP
2366
2367 /* The evt queues are created in unarmed state; arm them */
3abcdeda
SP
2368 for_all_rx_queues(adapter, rxo, i) {
2369 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2370 be_cq_notify(adapter, rxo->cq.id, true, 0);
2371 }
8788fdc2 2372 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
5fb379ee 2373
7a1e9b20
SP
2374 /* Now that interrupts are on we can process async mcc */
2375 be_async_mcc_enable(adapter);
2376
889cd4b2
SP
2377 return 0;
2378err:
2379 be_close(adapter->netdev);
2380 return -EIO;
5fb379ee
SP
2381}
2382
71d8d1b5
AK
2383static int be_setup_wol(struct be_adapter *adapter, bool enable)
2384{
2385 struct be_dma_mem cmd;
2386 int status = 0;
2387 u8 mac[ETH_ALEN];
2388
2389 memset(mac, 0, ETH_ALEN);
2390
2391 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2b7bcebf
IV
2392 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2393 GFP_KERNEL);
71d8d1b5
AK
2394 if (cmd.va == NULL)
2395 return -1;
2396 memset(cmd.va, 0, cmd.size);
2397
2398 if (enable) {
2399 status = pci_write_config_dword(adapter->pdev,
2400 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2401 if (status) {
2402 dev_err(&adapter->pdev->dev,
2381a55c 2403 "Could not enable Wake-on-lan\n");
2b7bcebf
IV
2404 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2405 cmd.dma);
71d8d1b5
AK
2406 return status;
2407 }
2408 status = be_cmd_enable_magic_wol(adapter,
2409 adapter->netdev->dev_addr, &cmd);
2410 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2411 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2412 } else {
2413 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2414 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2415 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2416 }
2417
2b7bcebf 2418 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
71d8d1b5
AK
2419 return status;
2420}
2421
6d87f5c3
AK
2422/*
2423 * Generate a seed MAC address from the PF MAC Address using jhash.
2424 * MAC Address for VFs are assigned incrementally starting from the seed.
2425 * These addresses are programmed in the ASIC by the PF and the VF driver
2426 * queries for the MAC address during its probe.
2427 */
2428static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2429{
f9449ab7 2430 u32 vf;
3abcdeda 2431 int status = 0;
6d87f5c3
AK
2432 u8 mac[ETH_ALEN];
2433
2434 be_vf_eth_addr_generate(adapter, mac);
2435
2436 for (vf = 0; vf < num_vfs; vf++) {
590c391d
PR
2437 if (lancer_chip(adapter)) {
2438 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2439 } else {
2440 status = be_cmd_pmac_add(adapter, mac,
6d87f5c3 2441 adapter->vf_cfg[vf].vf_if_handle,
f8617e08
AK
2442 &adapter->vf_cfg[vf].vf_pmac_id,
2443 vf + 1);
590c391d
PR
2444 }
2445
6d87f5c3
AK
2446 if (status)
2447 dev_err(&adapter->pdev->dev,
590c391d 2448 "Mac address assignment failed for VF %d\n", vf);
6d87f5c3
AK
2449 else
2450 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2451
2452 mac[5] += 1;
2453 }
2454 return status;
2455}
2456
f9449ab7 2457static void be_vf_clear(struct be_adapter *adapter)
6d87f5c3
AK
2458{
2459 u32 vf;
2460
590c391d
PR
2461 for (vf = 0; vf < num_vfs; vf++) {
2462 if (lancer_chip(adapter))
2463 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2464 else
2465 be_cmd_pmac_del(adapter,
2466 adapter->vf_cfg[vf].vf_if_handle,
2467 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
2468 }
f9449ab7
SP
2469
2470 for (vf = 0; vf < num_vfs; vf++)
30128031
SP
2471 be_cmd_if_destroy(adapter, adapter->vf_cfg[vf].vf_if_handle,
2472 vf + 1);
6d87f5c3
AK
2473}
2474
a54769f5
SP
2475static int be_clear(struct be_adapter *adapter)
2476{
a54769f5 2477 if (be_physfn(adapter) && adapter->sriov_enabled)
f9449ab7
SP
2478 be_vf_clear(adapter);
2479
2480 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
a54769f5
SP
2481
2482 be_mcc_queues_destroy(adapter);
2483 be_rx_queues_destroy(adapter);
2484 be_tx_queues_destroy(adapter);
a54769f5
SP
2485
2486 /* tell fw we're done with firing cmds */
2487 be_cmd_fw_clean(adapter);
2488 return 0;
2489}
2490
30128031
SP
2491static void be_vf_setup_init(struct be_adapter *adapter)
2492{
2493 int vf;
2494
2495 for (vf = 0; vf < num_vfs; vf++) {
2496 adapter->vf_cfg[vf].vf_if_handle = -1;
2497 adapter->vf_cfg[vf].vf_pmac_id = -1;
2498 }
2499}
2500
f9449ab7
SP
2501static int be_vf_setup(struct be_adapter *adapter)
2502{
2503 u32 cap_flags, en_flags, vf;
2504 u16 lnk_speed;
2505 int status;
2506
30128031
SP
2507 be_vf_setup_init(adapter);
2508
590c391d
PR
2509 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2510 BE_IF_FLAGS_MULTICAST;
2511
f9449ab7
SP
2512 for (vf = 0; vf < num_vfs; vf++) {
2513 status = be_cmd_if_create(adapter, cap_flags, en_flags, NULL,
2514 &adapter->vf_cfg[vf].vf_if_handle,
2515 NULL, vf+1);
2516 if (status)
2517 goto err;
f9449ab7
SP
2518 }
2519
590c391d
PR
2520 status = be_vf_eth_addr_config(adapter);
2521 if (status)
2522 goto err;
f9449ab7
SP
2523
2524 for (vf = 0; vf < num_vfs; vf++) {
2525 status = be_cmd_link_status_query(adapter, NULL, &lnk_speed,
2526 vf + 1);
2527 if (status)
2528 goto err;
2529 adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
2530 }
2531 return 0;
2532err:
2533 return status;
2534}
2535
30128031
SP
2536static void be_setup_init(struct be_adapter *adapter)
2537{
2538 adapter->vlan_prio_bmap = 0xff;
2539 adapter->link_speed = -1;
2540 adapter->if_handle = -1;
2541 adapter->be3_native = false;
2542 adapter->promiscuous = false;
2543 adapter->eq_next_idx = 0;
2544}
2545
590c391d
PR
2546static int be_configure_mac_from_list(struct be_adapter *adapter, u8 *mac)
2547{
2548 u32 pmac_id;
2549 int status = be_cmd_get_mac_from_list(adapter, 0, &pmac_id);
2550 if (status != 0)
2551 goto do_none;
2552 status = be_cmd_mac_addr_query(adapter, mac,
2553 MAC_ADDRESS_TYPE_NETWORK,
2554 false, adapter->if_handle, pmac_id);
2555 if (status != 0)
2556 goto do_none;
2557 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
2558 &adapter->pmac_id, 0);
2559do_none:
2560 return status;
2561}
2562
5fb379ee
SP
2563static int be_setup(struct be_adapter *adapter)
2564{
5fb379ee 2565 struct net_device *netdev = adapter->netdev;
f9449ab7 2566 u32 cap_flags, en_flags;
a54769f5 2567 u32 tx_fc, rx_fc;
293c4a7d 2568 int status, i;
ba343c77 2569 u8 mac[ETH_ALEN];
293c4a7d 2570 struct be_tx_obj *txo;
ba343c77 2571
30128031 2572 be_setup_init(adapter);
6b7c5b94 2573
f9449ab7 2574 be_cmd_req_native_mode(adapter);
73d540f2 2575
f9449ab7 2576 status = be_tx_queues_create(adapter);
6b7c5b94 2577 if (status != 0)
a54769f5 2578 goto err;
6b7c5b94 2579
f9449ab7 2580 status = be_rx_queues_create(adapter);
6b7c5b94 2581 if (status != 0)
a54769f5 2582 goto err;
6b7c5b94 2583
f9449ab7 2584 status = be_mcc_queues_create(adapter);
6b7c5b94 2585 if (status != 0)
a54769f5 2586 goto err;
6b7c5b94 2587
f9449ab7
SP
2588 memset(mac, 0, ETH_ALEN);
2589 status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK,
590c391d 2590 true /*permanent */, 0, 0);
f9449ab7
SP
2591 if (status)
2592 return status;
2593 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2594 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2903dd65 2595
f9449ab7
SP
2596 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2597 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2598 cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
5d5adb93
PR
2599 BE_IF_FLAGS_VLAN_PROMISCUOUS | BE_IF_FLAGS_PROMISCUOUS;
2600
f9449ab7
SP
2601 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2602 cap_flags |= BE_IF_FLAGS_RSS;
2603 en_flags |= BE_IF_FLAGS_RSS;
2604 }
2605 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2606 netdev->dev_addr, &adapter->if_handle,
2607 &adapter->pmac_id, 0);
5fb379ee 2608 if (status != 0)
a54769f5 2609 goto err;
6b7c5b94 2610
293c4a7d
PR
2611 for_all_tx_queues(adapter, txo, i) {
2612 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
2613 if (status)
2614 goto err;
2615 }
2616
590c391d
PR
2617 /* The VF's permanent mac queried from card is incorrect.
2618 * For BEx: Query the mac configued by the PF using if_handle
2619 * For Lancer: Get and use mac_list to obtain mac address.
2620 */
2621 if (!be_physfn(adapter)) {
2622 if (lancer_chip(adapter))
2623 status = be_configure_mac_from_list(adapter, mac);
2624 else
2625 status = be_cmd_mac_addr_query(adapter, mac,
2626 MAC_ADDRESS_TYPE_NETWORK, false,
2627 adapter->if_handle, 0);
f9449ab7
SP
2628 if (!status) {
2629 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2630 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2631 }
2632 }
0dffc83e 2633
04b71175 2634 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
5a56eb10 2635
a54769f5
SP
2636 status = be_vid_config(adapter, false, 0);
2637 if (status)
2638 goto err;
7ab8b0b4 2639
a54769f5 2640 be_set_rx_mode(adapter->netdev);
5fb379ee 2641
a54769f5 2642 status = be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
590c391d
PR
2643 /* For Lancer: It is legal for this cmd to fail on VF */
2644 if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
a54769f5 2645 goto err;
590c391d 2646
a54769f5
SP
2647 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc) {
2648 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
2649 adapter->rx_fc);
590c391d
PR
2650 /* For Lancer: It is legal for this cmd to fail on VF */
2651 if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
a54769f5
SP
2652 goto err;
2653 }
2dc1deb6 2654
a54769f5 2655 pcie_set_readrq(adapter->pdev, 4096);
5fb379ee 2656
f9449ab7
SP
2657 if (be_physfn(adapter) && adapter->sriov_enabled) {
2658 status = be_vf_setup(adapter);
2659 if (status)
2660 goto err;
2661 }
2662
2663 return 0;
a54769f5
SP
2664err:
2665 be_clear(adapter);
2666 return status;
2667}
6b7c5b94 2668
84517482 2669#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
fa9a6fed 2670static bool be_flash_redboot(struct be_adapter *adapter,
3f0d4560
AK
2671 const u8 *p, u32 img_start, int image_size,
2672 int hdr_size)
fa9a6fed
SB
2673{
2674 u32 crc_offset;
2675 u8 flashed_crc[4];
2676 int status;
3f0d4560
AK
2677
2678 crc_offset = hdr_size + img_start + image_size - 4;
2679
fa9a6fed 2680 p += crc_offset;
3f0d4560
AK
2681
2682 status = be_cmd_get_flash_crc(adapter, flashed_crc,
f510fc64 2683 (image_size - 4));
fa9a6fed
SB
2684 if (status) {
2685 dev_err(&adapter->pdev->dev,
2686 "could not get crc from flash, not flashing redboot\n");
2687 return false;
2688 }
2689
2690 /*update redboot only if crc does not match*/
2691 if (!memcmp(flashed_crc, p, 4))
2692 return false;
2693 else
2694 return true;
fa9a6fed
SB
2695}
2696
306f1348
SP
2697static bool phy_flashing_required(struct be_adapter *adapter)
2698{
2699 int status = 0;
2700 struct be_phy_info phy_info;
2701
2702 status = be_cmd_get_phy_info(adapter, &phy_info);
2703 if (status)
2704 return false;
2705 if ((phy_info.phy_type == TN_8022) &&
2706 (phy_info.interface_type == PHY_TYPE_BASET_10GB)) {
2707 return true;
2708 }
2709 return false;
2710}
2711
3f0d4560 2712static int be_flash_data(struct be_adapter *adapter,
84517482 2713 const struct firmware *fw,
3f0d4560
AK
2714 struct be_dma_mem *flash_cmd, int num_of_images)
2715
84517482 2716{
3f0d4560
AK
2717 int status = 0, i, filehdr_size = 0;
2718 u32 total_bytes = 0, flash_op;
84517482
AK
2719 int num_bytes;
2720 const u8 *p = fw->data;
2721 struct be_cmd_write_flashrom *req = flash_cmd->va;
215faf9c 2722 const struct flash_comp *pflashcomp;
9fe96934 2723 int num_comp;
3f0d4560 2724
306f1348 2725 static const struct flash_comp gen3_flash_types[10] = {
3f0d4560
AK
2726 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2727 FLASH_IMAGE_MAX_SIZE_g3},
2728 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2729 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2730 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2731 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2732 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2733 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2734 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2735 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2736 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2737 FLASH_IMAGE_MAX_SIZE_g3},
2738 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2739 FLASH_IMAGE_MAX_SIZE_g3},
2740 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
9fe96934
SB
2741 FLASH_IMAGE_MAX_SIZE_g3},
2742 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
306f1348
SP
2743 FLASH_NCSI_IMAGE_MAX_SIZE_g3},
2744 { FLASH_PHY_FW_START_g3, IMG_TYPE_PHY_FW,
2745 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3}
3f0d4560 2746 };
215faf9c 2747 static const struct flash_comp gen2_flash_types[8] = {
3f0d4560
AK
2748 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2749 FLASH_IMAGE_MAX_SIZE_g2},
2750 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2751 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2752 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2753 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2754 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2755 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2756 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2757 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2758 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2759 FLASH_IMAGE_MAX_SIZE_g2},
2760 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2761 FLASH_IMAGE_MAX_SIZE_g2},
2762 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2763 FLASH_IMAGE_MAX_SIZE_g2}
2764 };
2765
2766 if (adapter->generation == BE_GEN3) {
2767 pflashcomp = gen3_flash_types;
2768 filehdr_size = sizeof(struct flash_file_hdr_g3);
215faf9c 2769 num_comp = ARRAY_SIZE(gen3_flash_types);
3f0d4560
AK
2770 } else {
2771 pflashcomp = gen2_flash_types;
2772 filehdr_size = sizeof(struct flash_file_hdr_g2);
215faf9c 2773 num_comp = ARRAY_SIZE(gen2_flash_types);
84517482 2774 }
9fe96934
SB
2775 for (i = 0; i < num_comp; i++) {
2776 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2777 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2778 continue;
306f1348
SP
2779 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW) {
2780 if (!phy_flashing_required(adapter))
2781 continue;
2782 }
3f0d4560
AK
2783 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2784 (!be_flash_redboot(adapter, fw->data,
fae21a4d
AK
2785 pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2786 (num_of_images * sizeof(struct image_hdr)))))
3f0d4560
AK
2787 continue;
2788 p = fw->data;
2789 p += filehdr_size + pflashcomp[i].offset
2790 + (num_of_images * sizeof(struct image_hdr));
306f1348
SP
2791 if (p + pflashcomp[i].size > fw->data + fw->size)
2792 return -1;
2793 total_bytes = pflashcomp[i].size;
3f0d4560
AK
2794 while (total_bytes) {
2795 if (total_bytes > 32*1024)
2796 num_bytes = 32*1024;
2797 else
2798 num_bytes = total_bytes;
2799 total_bytes -= num_bytes;
306f1348
SP
2800 if (!total_bytes) {
2801 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2802 flash_op = FLASHROM_OPER_PHY_FLASH;
2803 else
2804 flash_op = FLASHROM_OPER_FLASH;
2805 } else {
2806 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2807 flash_op = FLASHROM_OPER_PHY_SAVE;
2808 else
2809 flash_op = FLASHROM_OPER_SAVE;
2810 }
3f0d4560
AK
2811 memcpy(req->params.data_buf, p, num_bytes);
2812 p += num_bytes;
2813 status = be_cmd_write_flashrom(adapter, flash_cmd,
2814 pflashcomp[i].optype, flash_op, num_bytes);
2815 if (status) {
306f1348
SP
2816 if ((status == ILLEGAL_IOCTL_REQ) &&
2817 (pflashcomp[i].optype ==
2818 IMG_TYPE_PHY_FW))
2819 break;
3f0d4560
AK
2820 dev_err(&adapter->pdev->dev,
2821 "cmd to write to flash rom failed.\n");
2822 return -1;
2823 }
84517482 2824 }
84517482 2825 }
84517482
AK
2826 return 0;
2827}
2828
3f0d4560
AK
2829static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2830{
2831 if (fhdr == NULL)
2832 return 0;
2833 if (fhdr->build[0] == '3')
2834 return BE_GEN3;
2835 else if (fhdr->build[0] == '2')
2836 return BE_GEN2;
2837 else
2838 return 0;
2839}
2840
485bf569
SN
2841static int lancer_fw_download(struct be_adapter *adapter,
2842 const struct firmware *fw)
84517482 2843{
485bf569
SN
2844#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
2845#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
84517482 2846 struct be_dma_mem flash_cmd;
485bf569
SN
2847 const u8 *data_ptr = NULL;
2848 u8 *dest_image_ptr = NULL;
2849 size_t image_size = 0;
2850 u32 chunk_size = 0;
2851 u32 data_written = 0;
2852 u32 offset = 0;
2853 int status = 0;
2854 u8 add_status = 0;
84517482 2855
485bf569 2856 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
d9efd2af 2857 dev_err(&adapter->pdev->dev,
485bf569
SN
2858 "FW Image not properly aligned. "
2859 "Length must be 4 byte aligned.\n");
2860 status = -EINVAL;
2861 goto lancer_fw_exit;
d9efd2af
SB
2862 }
2863
485bf569
SN
2864 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2865 + LANCER_FW_DOWNLOAD_CHUNK;
2866 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2867 &flash_cmd.dma, GFP_KERNEL);
2868 if (!flash_cmd.va) {
2869 status = -ENOMEM;
2870 dev_err(&adapter->pdev->dev,
2871 "Memory allocation failure while flashing\n");
2872 goto lancer_fw_exit;
2873 }
84517482 2874
485bf569
SN
2875 dest_image_ptr = flash_cmd.va +
2876 sizeof(struct lancer_cmd_req_write_object);
2877 image_size = fw->size;
2878 data_ptr = fw->data;
2879
2880 while (image_size) {
2881 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
2882
2883 /* Copy the image chunk content. */
2884 memcpy(dest_image_ptr, data_ptr, chunk_size);
2885
2886 status = lancer_cmd_write_object(adapter, &flash_cmd,
2887 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
2888 &data_written, &add_status);
2889
2890 if (status)
2891 break;
2892
2893 offset += data_written;
2894 data_ptr += data_written;
2895 image_size -= data_written;
2896 }
2897
2898 if (!status) {
2899 /* Commit the FW written */
2900 status = lancer_cmd_write_object(adapter, &flash_cmd,
2901 0, offset, LANCER_FW_DOWNLOAD_LOCATION,
2902 &data_written, &add_status);
2903 }
2904
2905 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2906 flash_cmd.dma);
2907 if (status) {
2908 dev_err(&adapter->pdev->dev,
2909 "Firmware load error. "
2910 "Status code: 0x%x Additional Status: 0x%x\n",
2911 status, add_status);
2912 goto lancer_fw_exit;
2913 }
2914
2915 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2916lancer_fw_exit:
2917 return status;
2918}
2919
2920static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
2921{
2922 struct flash_file_hdr_g2 *fhdr;
2923 struct flash_file_hdr_g3 *fhdr3;
2924 struct image_hdr *img_hdr_ptr = NULL;
2925 struct be_dma_mem flash_cmd;
2926 const u8 *p;
2927 int status = 0, i = 0, num_imgs = 0;
84517482
AK
2928
2929 p = fw->data;
3f0d4560 2930 fhdr = (struct flash_file_hdr_g2 *) p;
84517482 2931
84517482 2932 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2b7bcebf
IV
2933 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2934 &flash_cmd.dma, GFP_KERNEL);
84517482
AK
2935 if (!flash_cmd.va) {
2936 status = -ENOMEM;
2937 dev_err(&adapter->pdev->dev,
2938 "Memory allocation failure while flashing\n");
485bf569 2939 goto be_fw_exit;
84517482
AK
2940 }
2941
3f0d4560
AK
2942 if ((adapter->generation == BE_GEN3) &&
2943 (get_ufigen_type(fhdr) == BE_GEN3)) {
2944 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
8b93b710
AK
2945 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2946 for (i = 0; i < num_imgs; i++) {
3f0d4560
AK
2947 img_hdr_ptr = (struct image_hdr *) (fw->data +
2948 (sizeof(struct flash_file_hdr_g3) +
8b93b710
AK
2949 i * sizeof(struct image_hdr)));
2950 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2951 status = be_flash_data(adapter, fw, &flash_cmd,
2952 num_imgs);
3f0d4560
AK
2953 }
2954 } else if ((adapter->generation == BE_GEN2) &&
2955 (get_ufigen_type(fhdr) == BE_GEN2)) {
2956 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2957 } else {
2958 dev_err(&adapter->pdev->dev,
2959 "UFI and Interface are not compatible for flashing\n");
2960 status = -1;
84517482
AK
2961 }
2962
2b7bcebf
IV
2963 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2964 flash_cmd.dma);
84517482
AK
2965 if (status) {
2966 dev_err(&adapter->pdev->dev, "Firmware load error\n");
485bf569 2967 goto be_fw_exit;
84517482
AK
2968 }
2969
af901ca1 2970 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
84517482 2971
485bf569
SN
2972be_fw_exit:
2973 return status;
2974}
2975
2976int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
2977{
2978 const struct firmware *fw;
2979 int status;
2980
2981 if (!netif_running(adapter->netdev)) {
2982 dev_err(&adapter->pdev->dev,
2983 "Firmware load not allowed (interface is down)\n");
2984 return -1;
2985 }
2986
2987 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2988 if (status)
2989 goto fw_exit;
2990
2991 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2992
2993 if (lancer_chip(adapter))
2994 status = lancer_fw_download(adapter, fw);
2995 else
2996 status = be_fw_download(adapter, fw);
2997
84517482
AK
2998fw_exit:
2999 release_firmware(fw);
3000 return status;
3001}
3002
6b7c5b94
SP
3003static struct net_device_ops be_netdev_ops = {
3004 .ndo_open = be_open,
3005 .ndo_stop = be_close,
3006 .ndo_start_xmit = be_xmit,
a54769f5 3007 .ndo_set_rx_mode = be_set_rx_mode,
6b7c5b94
SP
3008 .ndo_set_mac_address = be_mac_addr_set,
3009 .ndo_change_mtu = be_change_mtu,
ab1594e9 3010 .ndo_get_stats64 = be_get_stats64,
6b7c5b94 3011 .ndo_validate_addr = eth_validate_addr,
6b7c5b94
SP
3012 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3013 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 3014 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 3015 .ndo_set_vf_vlan = be_set_vf_vlan,
e1d18735 3016 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
64600ea5 3017 .ndo_get_vf_config = be_get_vf_config
6b7c5b94
SP
3018};
3019
3020static void be_netdev_init(struct net_device *netdev)
3021{
3022 struct be_adapter *adapter = netdev_priv(netdev);
3abcdeda
SP
3023 struct be_rx_obj *rxo;
3024 int i;
6b7c5b94 3025
6332c8d3 3026 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
8b8ddc68
MM
3027 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3028 NETIF_F_HW_VLAN_TX;
3029 if (be_multi_rxq(adapter))
3030 netdev->hw_features |= NETIF_F_RXHASH;
6332c8d3
MM
3031
3032 netdev->features |= netdev->hw_features |
8b8ddc68 3033 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
4b972914 3034
eb8a50d9 3035 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
79032644 3036 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 3037
6b7c5b94
SP
3038 netdev->flags |= IFF_MULTICAST;
3039
c190e3c8
AK
3040 netif_set_gso_max_size(netdev, 65535);
3041
6b7c5b94
SP
3042 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
3043
3044 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3045
3abcdeda
SP
3046 for_all_rx_queues(adapter, rxo, i)
3047 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
3048 BE_NAPI_WEIGHT);
3049
5fb379ee 3050 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
6b7c5b94 3051 BE_NAPI_WEIGHT);
6b7c5b94
SP
3052}
3053
3054static void be_unmap_pci_bars(struct be_adapter *adapter)
3055{
8788fdc2
SP
3056 if (adapter->csr)
3057 iounmap(adapter->csr);
3058 if (adapter->db)
3059 iounmap(adapter->db);
6b7c5b94
SP
3060}
3061
3062static int be_map_pci_bars(struct be_adapter *adapter)
3063{
3064 u8 __iomem *addr;
db3ea781 3065 int db_reg;
6b7c5b94 3066
fe6d2a38
SP
3067 if (lancer_chip(adapter)) {
3068 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
3069 pci_resource_len(adapter->pdev, 0));
3070 if (addr == NULL)
3071 return -ENOMEM;
3072 adapter->db = addr;
3073 return 0;
3074 }
3075
ba343c77
SB
3076 if (be_physfn(adapter)) {
3077 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3078 pci_resource_len(adapter->pdev, 2));
3079 if (addr == NULL)
3080 return -ENOMEM;
3081 adapter->csr = addr;
3082 }
6b7c5b94 3083
ba343c77 3084 if (adapter->generation == BE_GEN2) {
ba343c77
SB
3085 db_reg = 4;
3086 } else {
ba343c77
SB
3087 if (be_physfn(adapter))
3088 db_reg = 4;
3089 else
3090 db_reg = 0;
3091 }
3092 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3093 pci_resource_len(adapter->pdev, db_reg));
6b7c5b94
SP
3094 if (addr == NULL)
3095 goto pci_map_err;
ba343c77
SB
3096 adapter->db = addr;
3097
6b7c5b94
SP
3098 return 0;
3099pci_map_err:
3100 be_unmap_pci_bars(adapter);
3101 return -ENOMEM;
3102}
3103
3104
3105static void be_ctrl_cleanup(struct be_adapter *adapter)
3106{
8788fdc2 3107 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
6b7c5b94
SP
3108
3109 be_unmap_pci_bars(adapter);
3110
3111 if (mem->va)
2b7bcebf
IV
3112 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3113 mem->dma);
e7b909a6 3114
5b8821b7 3115 mem = &adapter->rx_filter;
e7b909a6 3116 if (mem->va)
2b7bcebf
IV
3117 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3118 mem->dma);
6b7c5b94
SP
3119}
3120
6b7c5b94
SP
3121static int be_ctrl_init(struct be_adapter *adapter)
3122{
8788fdc2
SP
3123 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3124 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5b8821b7 3125 struct be_dma_mem *rx_filter = &adapter->rx_filter;
6b7c5b94 3126 int status;
6b7c5b94
SP
3127
3128 status = be_map_pci_bars(adapter);
3129 if (status)
e7b909a6 3130 goto done;
6b7c5b94
SP
3131
3132 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2b7bcebf
IV
3133 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3134 mbox_mem_alloc->size,
3135 &mbox_mem_alloc->dma,
3136 GFP_KERNEL);
6b7c5b94 3137 if (!mbox_mem_alloc->va) {
e7b909a6
SP
3138 status = -ENOMEM;
3139 goto unmap_pci_bars;
6b7c5b94
SP
3140 }
3141 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3142 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3143 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3144 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
e7b909a6 3145
5b8821b7
SP
3146 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3147 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3148 &rx_filter->dma, GFP_KERNEL);
3149 if (rx_filter->va == NULL) {
e7b909a6
SP
3150 status = -ENOMEM;
3151 goto free_mbox;
3152 }
5b8821b7 3153 memset(rx_filter->va, 0, rx_filter->size);
e7b909a6 3154
2984961c 3155 mutex_init(&adapter->mbox_lock);
8788fdc2
SP
3156 spin_lock_init(&adapter->mcc_lock);
3157 spin_lock_init(&adapter->mcc_cq_lock);
a8f447bd 3158
dd131e76 3159 init_completion(&adapter->flash_compl);
cf588477 3160 pci_save_state(adapter->pdev);
6b7c5b94 3161 return 0;
e7b909a6
SP
3162
3163free_mbox:
2b7bcebf
IV
3164 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3165 mbox_mem_alloc->va, mbox_mem_alloc->dma);
e7b909a6
SP
3166
3167unmap_pci_bars:
3168 be_unmap_pci_bars(adapter);
3169
3170done:
3171 return status;
6b7c5b94
SP
3172}
3173
3174static void be_stats_cleanup(struct be_adapter *adapter)
3175{
3abcdeda 3176 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94
SP
3177
3178 if (cmd->va)
2b7bcebf
IV
3179 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3180 cmd->va, cmd->dma);
6b7c5b94
SP
3181}
3182
3183static int be_stats_init(struct be_adapter *adapter)
3184{
3abcdeda 3185 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94 3186
005d5696 3187 if (adapter->generation == BE_GEN2) {
89a88ab8 3188 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
005d5696
SX
3189 } else {
3190 if (lancer_chip(adapter))
3191 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3192 else
3193 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3194 }
2b7bcebf
IV
3195 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3196 GFP_KERNEL);
6b7c5b94
SP
3197 if (cmd->va == NULL)
3198 return -1;
d291b9af 3199 memset(cmd->va, 0, cmd->size);
6b7c5b94
SP
3200 return 0;
3201}
3202
3203static void __devexit be_remove(struct pci_dev *pdev)
3204{
3205 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 3206
6b7c5b94
SP
3207 if (!adapter)
3208 return;
3209
f203af70
SK
3210 cancel_delayed_work_sync(&adapter->work);
3211
6b7c5b94
SP
3212 unregister_netdev(adapter->netdev);
3213
5fb379ee
SP
3214 be_clear(adapter);
3215
6b7c5b94
SP
3216 be_stats_cleanup(adapter);
3217
3218 be_ctrl_cleanup(adapter);
3219
ba343c77
SB
3220 be_sriov_disable(adapter);
3221
8d56ff11 3222 be_msix_disable(adapter);
6b7c5b94
SP
3223
3224 pci_set_drvdata(pdev, NULL);
3225 pci_release_regions(pdev);
3226 pci_disable_device(pdev);
3227
3228 free_netdev(adapter->netdev);
3229}
3230
2243e2e9 3231static int be_get_config(struct be_adapter *adapter)
6b7c5b94 3232{
6b7c5b94
SP
3233 int status;
3234
3abcdeda
SP
3235 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3236 &adapter->function_mode, &adapter->function_caps);
43a04fdc
SP
3237 if (status)
3238 return status;
3239
752961a1 3240 if (adapter->function_mode & FLEX10_MODE)
82903e4b
AK
3241 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
3242 else
3243 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3244
9e1453c5
AK
3245 status = be_cmd_get_cntl_attributes(adapter);
3246 if (status)
3247 return status;
3248
2243e2e9 3249 return 0;
6b7c5b94
SP
3250}
3251
fe6d2a38
SP
3252static int be_dev_family_check(struct be_adapter *adapter)
3253{
3254 struct pci_dev *pdev = adapter->pdev;
3255 u32 sli_intf = 0, if_type;
3256
3257 switch (pdev->device) {
3258 case BE_DEVICE_ID1:
3259 case OC_DEVICE_ID1:
3260 adapter->generation = BE_GEN2;
3261 break;
3262 case BE_DEVICE_ID2:
3263 case OC_DEVICE_ID2:
3264 adapter->generation = BE_GEN3;
3265 break;
3266 case OC_DEVICE_ID3:
12f4d0a8 3267 case OC_DEVICE_ID4:
fe6d2a38
SP
3268 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3269 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3270 SLI_INTF_IF_TYPE_SHIFT;
3271
3272 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3273 if_type != 0x02) {
3274 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3275 return -EINVAL;
3276 }
fe6d2a38
SP
3277 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3278 SLI_INTF_FAMILY_SHIFT);
3279 adapter->generation = BE_GEN3;
3280 break;
3281 default:
3282 adapter->generation = 0;
3283 }
3284 return 0;
3285}
3286
37eed1cb
PR
3287static int lancer_wait_ready(struct be_adapter *adapter)
3288{
d8110f62 3289#define SLIPORT_READY_TIMEOUT 30
37eed1cb
PR
3290 u32 sliport_status;
3291 int status = 0, i;
3292
3293 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3294 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3295 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3296 break;
3297
d8110f62 3298 msleep(1000);
37eed1cb
PR
3299 }
3300
3301 if (i == SLIPORT_READY_TIMEOUT)
3302 status = -1;
3303
3304 return status;
3305}
3306
3307static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3308{
3309 int status;
3310 u32 sliport_status, err, reset_needed;
3311 status = lancer_wait_ready(adapter);
3312 if (!status) {
3313 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3314 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3315 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3316 if (err && reset_needed) {
3317 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3318 adapter->db + SLIPORT_CONTROL_OFFSET);
3319
3320 /* check adapter has corrected the error */
3321 status = lancer_wait_ready(adapter);
3322 sliport_status = ioread32(adapter->db +
3323 SLIPORT_STATUS_OFFSET);
3324 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3325 SLIPORT_STATUS_RN_MASK);
3326 if (status || sliport_status)
3327 status = -1;
3328 } else if (err || reset_needed) {
3329 status = -1;
3330 }
3331 }
3332 return status;
3333}
3334
d8110f62
PR
3335static void lancer_test_and_recover_fn_err(struct be_adapter *adapter)
3336{
3337 int status;
3338 u32 sliport_status;
3339
3340 if (adapter->eeh_err || adapter->ue_detected)
3341 return;
3342
3343 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3344
3345 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
3346 dev_err(&adapter->pdev->dev,
3347 "Adapter in error state."
3348 "Trying to recover.\n");
3349
3350 status = lancer_test_and_set_rdy_state(adapter);
3351 if (status)
3352 goto err;
3353
3354 netif_device_detach(adapter->netdev);
3355
3356 if (netif_running(adapter->netdev))
3357 be_close(adapter->netdev);
3358
3359 be_clear(adapter);
3360
3361 adapter->fw_timeout = false;
3362
3363 status = be_setup(adapter);
3364 if (status)
3365 goto err;
3366
3367 if (netif_running(adapter->netdev)) {
3368 status = be_open(adapter->netdev);
3369 if (status)
3370 goto err;
3371 }
3372
3373 netif_device_attach(adapter->netdev);
3374
3375 dev_err(&adapter->pdev->dev,
3376 "Adapter error recovery succeeded\n");
3377 }
3378 return;
3379err:
3380 dev_err(&adapter->pdev->dev,
3381 "Adapter error recovery failed\n");
3382}
3383
3384static void be_worker(struct work_struct *work)
3385{
3386 struct be_adapter *adapter =
3387 container_of(work, struct be_adapter, work.work);
3388 struct be_rx_obj *rxo;
3389 int i;
3390
3391 if (lancer_chip(adapter))
3392 lancer_test_and_recover_fn_err(adapter);
3393
3394 be_detect_dump_ue(adapter);
3395
3396 /* when interrupts are not yet enabled, just reap any pending
3397 * mcc completions */
3398 if (!netif_running(adapter->netdev)) {
3399 int mcc_compl, status = 0;
3400
3401 mcc_compl = be_process_mcc(adapter, &status);
3402
3403 if (mcc_compl) {
3404 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
3405 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
3406 }
3407
3408 goto reschedule;
3409 }
3410
3411 if (!adapter->stats_cmd_sent) {
3412 if (lancer_chip(adapter))
3413 lancer_cmd_get_pport_stats(adapter,
3414 &adapter->stats_cmd);
3415 else
3416 be_cmd_get_stats(adapter, &adapter->stats_cmd);
3417 }
3418
3419 for_all_rx_queues(adapter, rxo, i) {
3420 be_rx_eqd_update(adapter, rxo);
3421
3422 if (rxo->rx_post_starved) {
3423 rxo->rx_post_starved = false;
3424 be_post_rx_frags(rxo, GFP_KERNEL);
3425 }
3426 }
3427
3428reschedule:
3429 adapter->work_counter++;
3430 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3431}
3432
6b7c5b94
SP
3433static int __devinit be_probe(struct pci_dev *pdev,
3434 const struct pci_device_id *pdev_id)
3435{
3436 int status = 0;
3437 struct be_adapter *adapter;
3438 struct net_device *netdev;
6b7c5b94
SP
3439
3440 status = pci_enable_device(pdev);
3441 if (status)
3442 goto do_none;
3443
3444 status = pci_request_regions(pdev, DRV_NAME);
3445 if (status)
3446 goto disable_dev;
3447 pci_set_master(pdev);
3448
3c8def97 3449 netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
6b7c5b94
SP
3450 if (netdev == NULL) {
3451 status = -ENOMEM;
3452 goto rel_reg;
3453 }
3454 adapter = netdev_priv(netdev);
3455 adapter->pdev = pdev;
3456 pci_set_drvdata(pdev, adapter);
fe6d2a38
SP
3457
3458 status = be_dev_family_check(adapter);
63657b9c 3459 if (status)
fe6d2a38
SP
3460 goto free_netdev;
3461
6b7c5b94 3462 adapter->netdev = netdev;
2243e2e9 3463 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 3464
2b7bcebf 3465 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
6b7c5b94
SP
3466 if (!status) {
3467 netdev->features |= NETIF_F_HIGHDMA;
3468 } else {
2b7bcebf 3469 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
6b7c5b94
SP
3470 if (status) {
3471 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3472 goto free_netdev;
3473 }
3474 }
3475
f9449ab7
SP
3476 status = be_sriov_enable(adapter);
3477 if (status)
3478 goto free_netdev;
ba343c77 3479
6b7c5b94
SP
3480 status = be_ctrl_init(adapter);
3481 if (status)
f9449ab7 3482 goto disable_sriov;
6b7c5b94 3483
37eed1cb 3484 if (lancer_chip(adapter)) {
d8110f62
PR
3485 status = lancer_wait_ready(adapter);
3486 if (!status) {
3487 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3488 adapter->db + SLIPORT_CONTROL_OFFSET);
3489 status = lancer_test_and_set_rdy_state(adapter);
3490 }
37eed1cb
PR
3491 if (status) {
3492 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
48f5a191 3493 goto ctrl_clean;
37eed1cb
PR
3494 }
3495 }
3496
2243e2e9 3497 /* sync up with fw's ready state */
ba343c77
SB
3498 if (be_physfn(adapter)) {
3499 status = be_cmd_POST(adapter);
3500 if (status)
3501 goto ctrl_clean;
ba343c77 3502 }
6b7c5b94 3503
2243e2e9
SP
3504 /* tell fw we're ready to fire cmds */
3505 status = be_cmd_fw_init(adapter);
6b7c5b94 3506 if (status)
2243e2e9
SP
3507 goto ctrl_clean;
3508
a4b4dfab
AK
3509 status = be_cmd_reset_function(adapter);
3510 if (status)
3511 goto ctrl_clean;
556ae191 3512
2243e2e9
SP
3513 status = be_stats_init(adapter);
3514 if (status)
3515 goto ctrl_clean;
3516
3517 status = be_get_config(adapter);
6b7c5b94
SP
3518 if (status)
3519 goto stats_clean;
6b7c5b94 3520
b9ab82c7
SP
3521 /* The INTR bit may be set in the card when probed by a kdump kernel
3522 * after a crash.
3523 */
3524 if (!lancer_chip(adapter))
3525 be_intr_set(adapter, false);
3526
3abcdeda
SP
3527 be_msix_enable(adapter);
3528
6b7c5b94 3529 INIT_DELAYED_WORK(&adapter->work, be_worker);
a54769f5 3530 adapter->rx_fc = adapter->tx_fc = true;
6b7c5b94 3531
5fb379ee
SP
3532 status = be_setup(adapter);
3533 if (status)
3abcdeda 3534 goto msix_disable;
2243e2e9 3535
3abcdeda 3536 be_netdev_init(netdev);
6b7c5b94
SP
3537 status = register_netdev(netdev);
3538 if (status != 0)
5fb379ee 3539 goto unsetup;
6b7c5b94 3540
c4ca2374 3541 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
34b1ef04 3542
f203af70 3543 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
6b7c5b94
SP
3544 return 0;
3545
5fb379ee
SP
3546unsetup:
3547 be_clear(adapter);
3abcdeda
SP
3548msix_disable:
3549 be_msix_disable(adapter);
6b7c5b94
SP
3550stats_clean:
3551 be_stats_cleanup(adapter);
3552ctrl_clean:
3553 be_ctrl_cleanup(adapter);
f9449ab7 3554disable_sriov:
ba343c77 3555 be_sriov_disable(adapter);
f9449ab7 3556free_netdev:
fe6d2a38 3557 free_netdev(netdev);
8d56ff11 3558 pci_set_drvdata(pdev, NULL);
6b7c5b94
SP
3559rel_reg:
3560 pci_release_regions(pdev);
3561disable_dev:
3562 pci_disable_device(pdev);
3563do_none:
c4ca2374 3564 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
3565 return status;
3566}
3567
3568static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3569{
3570 struct be_adapter *adapter = pci_get_drvdata(pdev);
3571 struct net_device *netdev = adapter->netdev;
3572
a4ca055f 3573 cancel_delayed_work_sync(&adapter->work);
71d8d1b5
AK
3574 if (adapter->wol)
3575 be_setup_wol(adapter, true);
3576
6b7c5b94
SP
3577 netif_device_detach(netdev);
3578 if (netif_running(netdev)) {
3579 rtnl_lock();
3580 be_close(netdev);
3581 rtnl_unlock();
3582 }
9b0365f1 3583 be_clear(adapter);
6b7c5b94 3584
a4ca055f 3585 be_msix_disable(adapter);
6b7c5b94
SP
3586 pci_save_state(pdev);
3587 pci_disable_device(pdev);
3588 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3589 return 0;
3590}
3591
3592static int be_resume(struct pci_dev *pdev)
3593{
3594 int status = 0;
3595 struct be_adapter *adapter = pci_get_drvdata(pdev);
3596 struct net_device *netdev = adapter->netdev;
3597
3598 netif_device_detach(netdev);
3599
3600 status = pci_enable_device(pdev);
3601 if (status)
3602 return status;
3603
3604 pci_set_power_state(pdev, 0);
3605 pci_restore_state(pdev);
3606
a4ca055f 3607 be_msix_enable(adapter);
2243e2e9
SP
3608 /* tell fw we're ready to fire cmds */
3609 status = be_cmd_fw_init(adapter);
3610 if (status)
3611 return status;
3612
9b0365f1 3613 be_setup(adapter);
6b7c5b94
SP
3614 if (netif_running(netdev)) {
3615 rtnl_lock();
3616 be_open(netdev);
3617 rtnl_unlock();
3618 }
3619 netif_device_attach(netdev);
71d8d1b5
AK
3620
3621 if (adapter->wol)
3622 be_setup_wol(adapter, false);
a4ca055f
AK
3623
3624 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
6b7c5b94
SP
3625 return 0;
3626}
3627
82456b03
SP
3628/*
3629 * An FLR will stop BE from DMAing any data.
3630 */
3631static void be_shutdown(struct pci_dev *pdev)
3632{
3633 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 3634
2d5d4154
AK
3635 if (!adapter)
3636 return;
82456b03 3637
0f4a6828 3638 cancel_delayed_work_sync(&adapter->work);
a4ca055f 3639
2d5d4154 3640 netif_device_detach(adapter->netdev);
82456b03 3641
82456b03
SP
3642 if (adapter->wol)
3643 be_setup_wol(adapter, true);
3644
57841869
AK
3645 be_cmd_reset_function(adapter);
3646
82456b03 3647 pci_disable_device(pdev);
82456b03
SP
3648}
3649
cf588477
SP
3650static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3651 pci_channel_state_t state)
3652{
3653 struct be_adapter *adapter = pci_get_drvdata(pdev);
3654 struct net_device *netdev = adapter->netdev;
3655
3656 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3657
3658 adapter->eeh_err = true;
3659
3660 netif_device_detach(netdev);
3661
3662 if (netif_running(netdev)) {
3663 rtnl_lock();
3664 be_close(netdev);
3665 rtnl_unlock();
3666 }
3667 be_clear(adapter);
3668
3669 if (state == pci_channel_io_perm_failure)
3670 return PCI_ERS_RESULT_DISCONNECT;
3671
3672 pci_disable_device(pdev);
3673
3674 return PCI_ERS_RESULT_NEED_RESET;
3675}
3676
3677static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3678{
3679 struct be_adapter *adapter = pci_get_drvdata(pdev);
3680 int status;
3681
3682 dev_info(&adapter->pdev->dev, "EEH reset\n");
3683 adapter->eeh_err = false;
6589ade0
SP
3684 adapter->ue_detected = false;
3685 adapter->fw_timeout = false;
cf588477
SP
3686
3687 status = pci_enable_device(pdev);
3688 if (status)
3689 return PCI_ERS_RESULT_DISCONNECT;
3690
3691 pci_set_master(pdev);
3692 pci_set_power_state(pdev, 0);
3693 pci_restore_state(pdev);
3694
3695 /* Check if card is ok and fw is ready */
3696 status = be_cmd_POST(adapter);
3697 if (status)
3698 return PCI_ERS_RESULT_DISCONNECT;
3699
3700 return PCI_ERS_RESULT_RECOVERED;
3701}
3702
3703static void be_eeh_resume(struct pci_dev *pdev)
3704{
3705 int status = 0;
3706 struct be_adapter *adapter = pci_get_drvdata(pdev);
3707 struct net_device *netdev = adapter->netdev;
3708
3709 dev_info(&adapter->pdev->dev, "EEH resume\n");
3710
3711 pci_save_state(pdev);
3712
3713 /* tell fw we're ready to fire cmds */
3714 status = be_cmd_fw_init(adapter);
3715 if (status)
3716 goto err;
3717
3718 status = be_setup(adapter);
3719 if (status)
3720 goto err;
3721
3722 if (netif_running(netdev)) {
3723 status = be_open(netdev);
3724 if (status)
3725 goto err;
3726 }
3727 netif_device_attach(netdev);
3728 return;
3729err:
3730 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
3731}
3732
3733static struct pci_error_handlers be_eeh_handlers = {
3734 .error_detected = be_eeh_err_detected,
3735 .slot_reset = be_eeh_reset,
3736 .resume = be_eeh_resume,
3737};
3738
6b7c5b94
SP
3739static struct pci_driver be_driver = {
3740 .name = DRV_NAME,
3741 .id_table = be_dev_ids,
3742 .probe = be_probe,
3743 .remove = be_remove,
3744 .suspend = be_suspend,
cf588477 3745 .resume = be_resume,
82456b03 3746 .shutdown = be_shutdown,
cf588477 3747 .err_handler = &be_eeh_handlers
6b7c5b94
SP
3748};
3749
3750static int __init be_init_module(void)
3751{
8e95a202
JP
3752 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3753 rx_frag_size != 2048) {
6b7c5b94
SP
3754 printk(KERN_WARNING DRV_NAME
3755 " : Module param rx_frag_size must be 2048/4096/8192."
3756 " Using 2048\n");
3757 rx_frag_size = 2048;
3758 }
6b7c5b94
SP
3759
3760 return pci_register_driver(&be_driver);
3761}
3762module_init(be_init_module);
3763
3764static void __exit be_exit_module(void)
3765{
3766 pci_unregister_driver(&be_driver);
3767}
3768module_exit(be_exit_module);