Merge branch 'timer/cleanup' into late/mvebu2
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / net / ethernet / emulex / benet / be_main.c
CommitLineData
6b7c5b94 1/*
d2145cde 2 * Copyright (C) 2005 - 2011 Emulex
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
70c71606 18#include <linux/prefetch.h>
9d9779e7 19#include <linux/module.h>
6b7c5b94 20#include "be.h"
8788fdc2 21#include "be_cmds.h"
65f71b8b 22#include <asm/div64.h>
d6b6d987 23#include <linux/aer.h>
6b7c5b94
SP
24
25MODULE_VERSION(DRV_VER);
26MODULE_DEVICE_TABLE(pci, be_dev_ids);
27MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
28MODULE_AUTHOR("ServerEngines Corporation");
29MODULE_LICENSE("GPL");
30
ba343c77 31static unsigned int num_vfs;
ba343c77 32module_param(num_vfs, uint, S_IRUGO);
ba343c77 33MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 34
11ac75ed
SP
35static ushort rx_frag_size = 2048;
36module_param(rx_frag_size, ushort, S_IRUGO);
37MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
38
6b7c5b94 39static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
c4ca2374 40 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 41 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
fe6d2a38 44 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
12f4d0a8 45 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
ecedb6ae 46 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
76b73530 47 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
6b7c5b94
SP
48 { 0 }
49};
50MODULE_DEVICE_TABLE(pci, be_dev_ids);
7c185276 51/* UE Status Low CSR */
42c8b11e 52static const char * const ue_status_low_desc[] = {
7c185276
AK
53 "CEV",
54 "CTX",
55 "DBUF",
56 "ERX",
57 "Host",
58 "MPU",
59 "NDMA",
60 "PTC ",
61 "RDMA ",
62 "RXF ",
63 "RXIPS ",
64 "RXULP0 ",
65 "RXULP1 ",
66 "RXULP2 ",
67 "TIM ",
68 "TPOST ",
69 "TPRE ",
70 "TXIPS ",
71 "TXULP0 ",
72 "TXULP1 ",
73 "UC ",
74 "WDMA ",
75 "TXULP2 ",
76 "HOST1 ",
77 "P0_OB_LINK ",
78 "P1_OB_LINK ",
79 "HOST_GPIO ",
80 "MBOX ",
81 "AXGMAC0",
82 "AXGMAC1",
83 "JTAG",
84 "MPU_INTPEND"
85};
86/* UE Status High CSR */
42c8b11e 87static const char * const ue_status_hi_desc[] = {
7c185276
AK
88 "LPCMEMHOST",
89 "MGMT_MAC",
90 "PCS0ONLINE",
91 "MPU_IRAM",
92 "PCS1ONLINE",
93 "PCTL0",
94 "PCTL1",
95 "PMEM",
96 "RR",
97 "TXPB",
98 "RXPP",
99 "XAUI",
100 "TXP",
101 "ARM",
102 "IPC",
103 "HOST2",
104 "HOST3",
105 "HOST4",
106 "HOST5",
107 "HOST6",
108 "HOST7",
109 "HOST8",
110 "HOST9",
42c8b11e 111 "NETC",
7c185276
AK
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown",
118 "Unknown",
119 "Unknown"
120};
6b7c5b94 121
752961a1
SP
122/* Is BE in a multi-channel mode */
123static inline bool be_is_mc(struct be_adapter *adapter) {
124 return (adapter->function_mode & FLEX10_MODE ||
125 adapter->function_mode & VNIC_MODE ||
126 adapter->function_mode & UMC_ENABLED);
127}
128
6b7c5b94
SP
129static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
130{
131 struct be_dma_mem *mem = &q->dma_mem;
1cfafab9 132 if (mem->va) {
2b7bcebf
IV
133 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
134 mem->dma);
1cfafab9
SP
135 mem->va = NULL;
136 }
6b7c5b94
SP
137}
138
139static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
140 u16 len, u16 entry_size)
141{
142 struct be_dma_mem *mem = &q->dma_mem;
143
144 memset(q, 0, sizeof(*q));
145 q->len = len;
146 q->entry_size = entry_size;
147 mem->size = len * entry_size;
2b7bcebf
IV
148 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
149 GFP_KERNEL);
6b7c5b94 150 if (!mem->va)
10ef9ab4 151 return -ENOMEM;
6b7c5b94
SP
152 memset(mem->va, 0, mem->size);
153 return 0;
154}
155
8788fdc2 156static void be_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 157{
db3ea781 158 u32 reg, enabled;
5f0b849e 159
f67ef7ba 160 if (adapter->eeh_error)
cf588477
SP
161 return;
162
db3ea781
SP
163 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
164 &reg);
165 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
166
5f0b849e 167 if (!enabled && enable)
6b7c5b94 168 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 169 else if (enabled && !enable)
6b7c5b94 170 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 171 else
6b7c5b94 172 return;
5f0b849e 173
db3ea781
SP
174 pci_write_config_dword(adapter->pdev,
175 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
6b7c5b94
SP
176}
177
8788fdc2 178static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
179{
180 u32 val = 0;
181 val |= qid & DB_RQ_RING_ID_MASK;
182 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
183
184 wmb();
8788fdc2 185 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
186}
187
8788fdc2 188static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
189{
190 u32 val = 0;
191 val |= qid & DB_TXULP_RING_ID_MASK;
192 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
193
194 wmb();
8788fdc2 195 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
6b7c5b94
SP
196}
197
8788fdc2 198static void be_eq_notify(struct be_adapter *adapter, u16 qid,
6b7c5b94
SP
199 bool arm, bool clear_int, u16 num_popped)
200{
201 u32 val = 0;
202 val |= qid & DB_EQ_RING_ID_MASK;
fe6d2a38
SP
203 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
204 DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477 205
f67ef7ba 206 if (adapter->eeh_error)
cf588477
SP
207 return;
208
6b7c5b94
SP
209 if (arm)
210 val |= 1 << DB_EQ_REARM_SHIFT;
211 if (clear_int)
212 val |= 1 << DB_EQ_CLR_SHIFT;
213 val |= 1 << DB_EQ_EVNT_SHIFT;
214 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
8788fdc2 215 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
216}
217
8788fdc2 218void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
219{
220 u32 val = 0;
221 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
222 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
223 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477 224
f67ef7ba 225 if (adapter->eeh_error)
cf588477
SP
226 return;
227
6b7c5b94
SP
228 if (arm)
229 val |= 1 << DB_CQ_REARM_SHIFT;
230 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 231 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
232}
233
6b7c5b94
SP
234static int be_mac_addr_set(struct net_device *netdev, void *p)
235{
236 struct be_adapter *adapter = netdev_priv(netdev);
237 struct sockaddr *addr = p;
238 int status = 0;
e3a7ae2c 239 u8 current_mac[ETH_ALEN];
fbc13f01 240 u32 pmac_id = adapter->pmac_id[0];
704e4c88 241 bool active_mac = true;
6b7c5b94 242
ca9e4988
AK
243 if (!is_valid_ether_addr(addr->sa_data))
244 return -EADDRNOTAVAIL;
245
704e4c88
PR
246 /* For BE VF, MAC address is already activated by PF.
247 * Hence only operation left is updating netdev->devaddr.
248 * Update it if user is passing the same MAC which was used
249 * during configuring VF MAC from PF(Hypervisor).
250 */
251 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
252 status = be_cmd_mac_addr_query(adapter, current_mac,
253 false, adapter->if_handle, 0);
254 if (!status && !memcmp(current_mac, addr->sa_data, ETH_ALEN))
255 goto done;
256 else
257 goto err;
258 }
259
260 if (!memcmp(addr->sa_data, netdev->dev_addr, ETH_ALEN))
261 goto done;
262
263 /* For Lancer check if any MAC is active.
264 * If active, get its mac id.
265 */
266 if (lancer_chip(adapter) && !be_physfn(adapter))
267 be_cmd_get_mac_from_list(adapter, current_mac, &active_mac,
268 &pmac_id, 0);
269
270 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
271 adapter->if_handle,
272 &adapter->pmac_id[0], 0);
273
a65027e4 274 if (status)
e3a7ae2c 275 goto err;
6b7c5b94 276
704e4c88
PR
277 if (active_mac)
278 be_cmd_pmac_del(adapter, adapter->if_handle,
279 pmac_id, 0);
280done:
e3a7ae2c
SK
281 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
282 return 0;
283err:
284 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
6b7c5b94
SP
285 return status;
286}
287
ca34fe38
SP
288/* BE2 supports only v0 cmd */
289static void *hw_stats_from_cmd(struct be_adapter *adapter)
290{
291 if (BE2_chip(adapter)) {
292 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
293
294 return &cmd->hw_stats;
295 } else {
296 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
297
298 return &cmd->hw_stats;
299 }
300}
301
302/* BE2 supports only v0 cmd */
303static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
304{
305 if (BE2_chip(adapter)) {
306 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
307
308 return &hw_stats->erx;
309 } else {
310 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
311
312 return &hw_stats->erx;
313 }
314}
315
316static void populate_be_v0_stats(struct be_adapter *adapter)
89a88ab8 317{
ac124ff9
SP
318 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
319 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
320 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
89a88ab8 321 struct be_port_rxf_stats_v0 *port_stats =
ac124ff9
SP
322 &rxf_stats->port[adapter->port_num];
323 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 324
ac124ff9 325 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
326 drvs->rx_pause_frames = port_stats->rx_pause_frames;
327 drvs->rx_crc_errors = port_stats->rx_crc_errors;
328 drvs->rx_control_frames = port_stats->rx_control_frames;
329 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
330 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
331 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
332 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
333 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
334 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
335 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
336 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
337 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
338 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
339 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
ac124ff9 340 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
89a88ab8
AK
341 drvs->rx_dropped_header_too_small =
342 port_stats->rx_dropped_header_too_small;
d45b9d39
SP
343 drvs->rx_address_mismatch_drops =
344 port_stats->rx_address_mismatch_drops +
345 port_stats->rx_vlan_mismatch_drops;
89a88ab8
AK
346 drvs->rx_alignment_symbol_errors =
347 port_stats->rx_alignment_symbol_errors;
348
349 drvs->tx_pauseframes = port_stats->tx_pauseframes;
350 drvs->tx_controlframes = port_stats->tx_controlframes;
351
352 if (adapter->port_num)
ac124ff9 353 drvs->jabber_events = rxf_stats->port1_jabber_events;
89a88ab8 354 else
ac124ff9 355 drvs->jabber_events = rxf_stats->port0_jabber_events;
89a88ab8 356 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 357 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
358 drvs->forwarded_packets = rxf_stats->forwarded_packets;
359 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
360 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
361 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
362 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
363}
364
ca34fe38 365static void populate_be_v1_stats(struct be_adapter *adapter)
89a88ab8 366{
ac124ff9
SP
367 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
368 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
369 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
89a88ab8 370 struct be_port_rxf_stats_v1 *port_stats =
ac124ff9
SP
371 &rxf_stats->port[adapter->port_num];
372 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 373
ac124ff9 374 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
02fe7027
AK
375 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
376 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
89a88ab8
AK
377 drvs->rx_pause_frames = port_stats->rx_pause_frames;
378 drvs->rx_crc_errors = port_stats->rx_crc_errors;
379 drvs->rx_control_frames = port_stats->rx_control_frames;
380 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
381 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
382 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
383 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
384 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
385 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
386 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
387 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
388 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
389 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
390 drvs->rx_dropped_header_too_small =
391 port_stats->rx_dropped_header_too_small;
392 drvs->rx_input_fifo_overflow_drop =
393 port_stats->rx_input_fifo_overflow_drop;
d45b9d39 394 drvs->rx_address_mismatch_drops = port_stats->rx_address_mismatch_drops;
89a88ab8
AK
395 drvs->rx_alignment_symbol_errors =
396 port_stats->rx_alignment_symbol_errors;
ac124ff9 397 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
89a88ab8
AK
398 drvs->tx_pauseframes = port_stats->tx_pauseframes;
399 drvs->tx_controlframes = port_stats->tx_controlframes;
400 drvs->jabber_events = port_stats->jabber_events;
401 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 402 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
403 drvs->forwarded_packets = rxf_stats->forwarded_packets;
404 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
405 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
406 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
407 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
408}
409
005d5696
SX
410static void populate_lancer_stats(struct be_adapter *adapter)
411{
89a88ab8 412
005d5696 413 struct be_drv_stats *drvs = &adapter->drv_stats;
ac124ff9
SP
414 struct lancer_pport_stats *pport_stats =
415 pport_stats_from_cmd(adapter);
416
417 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
418 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
419 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
420 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
005d5696 421 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
ac124ff9 422 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
005d5696
SX
423 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
424 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
425 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
426 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
427 drvs->rx_dropped_tcp_length =
428 pport_stats->rx_dropped_invalid_tcp_length;
429 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
430 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
431 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
432 drvs->rx_dropped_header_too_small =
433 pport_stats->rx_dropped_header_too_small;
434 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
d45b9d39
SP
435 drvs->rx_address_mismatch_drops =
436 pport_stats->rx_address_mismatch_drops +
437 pport_stats->rx_vlan_mismatch_drops;
ac124ff9 438 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
005d5696 439 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
ac124ff9
SP
440 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
441 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
005d5696 442 drvs->jabber_events = pport_stats->rx_jabbers;
ac124ff9
SP
443 drvs->forwarded_packets = pport_stats->num_forwards_lo;
444 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
005d5696 445 drvs->rx_drops_too_many_frags =
ac124ff9 446 pport_stats->rx_drops_too_many_frags_lo;
005d5696 447}
89a88ab8 448
09c1c68f
SP
449static void accumulate_16bit_val(u32 *acc, u16 val)
450{
451#define lo(x) (x & 0xFFFF)
452#define hi(x) (x & 0xFFFF0000)
453 bool wrapped = val < lo(*acc);
454 u32 newacc = hi(*acc) + val;
455
456 if (wrapped)
457 newacc += 65536;
458 ACCESS_ONCE(*acc) = newacc;
459}
460
89a88ab8
AK
461void be_parse_stats(struct be_adapter *adapter)
462{
ac124ff9
SP
463 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
464 struct be_rx_obj *rxo;
465 int i;
466
ca34fe38
SP
467 if (lancer_chip(adapter)) {
468 populate_lancer_stats(adapter);
005d5696 469 } else {
ca34fe38
SP
470 if (BE2_chip(adapter))
471 populate_be_v0_stats(adapter);
472 else
473 /* for BE3 and Skyhawk */
474 populate_be_v1_stats(adapter);
d51ebd33 475
ca34fe38
SP
476 /* as erx_v1 is longer than v0, ok to use v1 for v0 access */
477 for_all_rx_queues(adapter, rxo, i) {
478 /* below erx HW counter can actually wrap around after
479 * 65535. Driver accumulates a 32-bit value
480 */
481 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
482 (u16)erx->rx_drops_no_fragments \
483 [rxo->q.id]);
484 }
09c1c68f 485 }
89a88ab8
AK
486}
487
ab1594e9
SP
488static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
489 struct rtnl_link_stats64 *stats)
6b7c5b94 490{
ab1594e9 491 struct be_adapter *adapter = netdev_priv(netdev);
89a88ab8 492 struct be_drv_stats *drvs = &adapter->drv_stats;
3abcdeda 493 struct be_rx_obj *rxo;
3c8def97 494 struct be_tx_obj *txo;
ab1594e9
SP
495 u64 pkts, bytes;
496 unsigned int start;
3abcdeda 497 int i;
6b7c5b94 498
3abcdeda 499 for_all_rx_queues(adapter, rxo, i) {
ab1594e9
SP
500 const struct be_rx_stats *rx_stats = rx_stats(rxo);
501 do {
502 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
503 pkts = rx_stats(rxo)->rx_pkts;
504 bytes = rx_stats(rxo)->rx_bytes;
505 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
506 stats->rx_packets += pkts;
507 stats->rx_bytes += bytes;
508 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
509 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
510 rx_stats(rxo)->rx_drops_no_frags;
3abcdeda
SP
511 }
512
3c8def97 513 for_all_tx_queues(adapter, txo, i) {
ab1594e9
SP
514 const struct be_tx_stats *tx_stats = tx_stats(txo);
515 do {
516 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
517 pkts = tx_stats(txo)->tx_pkts;
518 bytes = tx_stats(txo)->tx_bytes;
519 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
520 stats->tx_packets += pkts;
521 stats->tx_bytes += bytes;
3c8def97 522 }
6b7c5b94
SP
523
524 /* bad pkts received */
ab1594e9 525 stats->rx_errors = drvs->rx_crc_errors +
89a88ab8
AK
526 drvs->rx_alignment_symbol_errors +
527 drvs->rx_in_range_errors +
528 drvs->rx_out_range_errors +
529 drvs->rx_frame_too_long +
530 drvs->rx_dropped_too_small +
531 drvs->rx_dropped_too_short +
532 drvs->rx_dropped_header_too_small +
533 drvs->rx_dropped_tcp_length +
ab1594e9 534 drvs->rx_dropped_runt;
68110868 535
6b7c5b94 536 /* detailed rx errors */
ab1594e9 537 stats->rx_length_errors = drvs->rx_in_range_errors +
89a88ab8
AK
538 drvs->rx_out_range_errors +
539 drvs->rx_frame_too_long;
68110868 540
ab1594e9 541 stats->rx_crc_errors = drvs->rx_crc_errors;
6b7c5b94
SP
542
543 /* frame alignment errors */
ab1594e9 544 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
68110868 545
6b7c5b94
SP
546 /* receiver fifo overrun */
547 /* drops_no_pbuf is no per i/f, it's per BE card */
ab1594e9 548 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
89a88ab8
AK
549 drvs->rx_input_fifo_overflow_drop +
550 drvs->rx_drops_no_pbuf;
ab1594e9 551 return stats;
6b7c5b94
SP
552}
553
b236916a 554void be_link_status_update(struct be_adapter *adapter, u8 link_status)
6b7c5b94 555{
6b7c5b94
SP
556 struct net_device *netdev = adapter->netdev;
557
b236916a 558 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
ea172a01 559 netif_carrier_off(netdev);
b236916a 560 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
6b7c5b94 561 }
b236916a
AK
562
563 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
564 netif_carrier_on(netdev);
565 else
566 netif_carrier_off(netdev);
6b7c5b94
SP
567}
568
3c8def97 569static void be_tx_stats_update(struct be_tx_obj *txo,
91992e44 570 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
6b7c5b94 571{
3c8def97
SP
572 struct be_tx_stats *stats = tx_stats(txo);
573
ab1594e9 574 u64_stats_update_begin(&stats->sync);
ac124ff9
SP
575 stats->tx_reqs++;
576 stats->tx_wrbs += wrb_cnt;
577 stats->tx_bytes += copied;
578 stats->tx_pkts += (gso_segs ? gso_segs : 1);
6b7c5b94 579 if (stopped)
ac124ff9 580 stats->tx_stops++;
ab1594e9 581 u64_stats_update_end(&stats->sync);
6b7c5b94
SP
582}
583
584/* Determine number of WRB entries needed to xmit data in an skb */
fe6d2a38
SP
585static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
586 bool *dummy)
6b7c5b94 587{
ebc8d2ab
DM
588 int cnt = (skb->len > skb->data_len);
589
590 cnt += skb_shinfo(skb)->nr_frags;
591
6b7c5b94
SP
592 /* to account for hdr wrb */
593 cnt++;
fe6d2a38
SP
594 if (lancer_chip(adapter) || !(cnt & 1)) {
595 *dummy = false;
596 } else {
6b7c5b94
SP
597 /* add a dummy to make it an even num */
598 cnt++;
599 *dummy = true;
fe6d2a38 600 }
6b7c5b94
SP
601 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
602 return cnt;
603}
604
605static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
606{
607 wrb->frag_pa_hi = upper_32_bits(addr);
608 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
609 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
89b1f496 610 wrb->rsvd0 = 0;
6b7c5b94
SP
611}
612
1ded132d
AK
613static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
614 struct sk_buff *skb)
615{
616 u8 vlan_prio;
617 u16 vlan_tag;
618
619 vlan_tag = vlan_tx_tag_get(skb);
620 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
621 /* If vlan priority provided by OS is NOT in available bmap */
622 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
623 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
624 adapter->recommended_prio;
625
626 return vlan_tag;
627}
628
93040ae5
SK
629static int be_vlan_tag_chk(struct be_adapter *adapter, struct sk_buff *skb)
630{
631 return vlan_tx_tag_present(skb) || adapter->pvid;
632}
633
cc4ce020
SK
634static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
635 struct sk_buff *skb, u32 wrb_cnt, u32 len)
6b7c5b94 636{
1ded132d 637 u16 vlan_tag;
cc4ce020 638
6b7c5b94
SP
639 memset(hdr, 0, sizeof(*hdr));
640
641 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
642
49e4b847 643 if (skb_is_gso(skb)) {
6b7c5b94
SP
644 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
645 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
646 hdr, skb_shinfo(skb)->gso_size);
fe6d2a38 647 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
49e4b847 648 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
6b7c5b94
SP
649 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
650 if (is_tcp_pkt(skb))
651 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
652 else if (is_udp_pkt(skb))
653 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
654 }
655
4c5102f9 656 if (vlan_tx_tag_present(skb)) {
6b7c5b94 657 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
1ded132d 658 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
cc4ce020 659 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
6b7c5b94
SP
660 }
661
662 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
663 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
664 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
665 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
666}
667
2b7bcebf 668static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
7101e111
SP
669 bool unmap_single)
670{
671 dma_addr_t dma;
672
673 be_dws_le_to_cpu(wrb, sizeof(*wrb));
674
675 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
b681ee77 676 if (wrb->frag_len) {
7101e111 677 if (unmap_single)
2b7bcebf
IV
678 dma_unmap_single(dev, dma, wrb->frag_len,
679 DMA_TO_DEVICE);
7101e111 680 else
2b7bcebf 681 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
7101e111
SP
682 }
683}
6b7c5b94 684
3c8def97 685static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
6b7c5b94
SP
686 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
687{
7101e111
SP
688 dma_addr_t busaddr;
689 int i, copied = 0;
2b7bcebf 690 struct device *dev = &adapter->pdev->dev;
6b7c5b94 691 struct sk_buff *first_skb = skb;
6b7c5b94
SP
692 struct be_eth_wrb *wrb;
693 struct be_eth_hdr_wrb *hdr;
7101e111
SP
694 bool map_single = false;
695 u16 map_head;
6b7c5b94 696
6b7c5b94
SP
697 hdr = queue_head_node(txq);
698 queue_head_inc(txq);
7101e111 699 map_head = txq->head;
6b7c5b94 700
ebc8d2ab 701 if (skb->len > skb->data_len) {
e743d313 702 int len = skb_headlen(skb);
2b7bcebf
IV
703 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
704 if (dma_mapping_error(dev, busaddr))
7101e111
SP
705 goto dma_err;
706 map_single = true;
ebc8d2ab
DM
707 wrb = queue_head_node(txq);
708 wrb_fill(wrb, busaddr, len);
709 be_dws_cpu_to_le(wrb, sizeof(*wrb));
710 queue_head_inc(txq);
711 copied += len;
712 }
6b7c5b94 713
ebc8d2ab 714 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9e903e08 715 const struct skb_frag_struct *frag =
ebc8d2ab 716 &skb_shinfo(skb)->frags[i];
b061b39e 717 busaddr = skb_frag_dma_map(dev, frag, 0,
9e903e08 718 skb_frag_size(frag), DMA_TO_DEVICE);
2b7bcebf 719 if (dma_mapping_error(dev, busaddr))
7101e111 720 goto dma_err;
ebc8d2ab 721 wrb = queue_head_node(txq);
9e903e08 722 wrb_fill(wrb, busaddr, skb_frag_size(frag));
ebc8d2ab
DM
723 be_dws_cpu_to_le(wrb, sizeof(*wrb));
724 queue_head_inc(txq);
9e903e08 725 copied += skb_frag_size(frag);
6b7c5b94
SP
726 }
727
728 if (dummy_wrb) {
729 wrb = queue_head_node(txq);
730 wrb_fill(wrb, 0, 0);
731 be_dws_cpu_to_le(wrb, sizeof(*wrb));
732 queue_head_inc(txq);
733 }
734
cc4ce020 735 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
6b7c5b94
SP
736 be_dws_cpu_to_le(hdr, sizeof(*hdr));
737
738 return copied;
7101e111
SP
739dma_err:
740 txq->head = map_head;
741 while (copied) {
742 wrb = queue_head_node(txq);
2b7bcebf 743 unmap_tx_frag(dev, wrb, map_single);
7101e111
SP
744 map_single = false;
745 copied -= wrb->frag_len;
746 queue_head_inc(txq);
747 }
748 return 0;
6b7c5b94
SP
749}
750
93040ae5
SK
751static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
752 struct sk_buff *skb)
753{
754 u16 vlan_tag = 0;
755
756 skb = skb_share_check(skb, GFP_ATOMIC);
757 if (unlikely(!skb))
758 return skb;
759
760 if (vlan_tx_tag_present(skb)) {
761 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
762 __vlan_put_tag(skb, vlan_tag);
763 skb->vlan_tci = 0;
764 }
765
766 return skb;
767}
768
61357325 769static netdev_tx_t be_xmit(struct sk_buff *skb,
b31c50a7 770 struct net_device *netdev)
6b7c5b94
SP
771{
772 struct be_adapter *adapter = netdev_priv(netdev);
3c8def97
SP
773 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
774 struct be_queue_info *txq = &txo->q;
93040ae5 775 struct iphdr *ip = NULL;
6b7c5b94 776 u32 wrb_cnt = 0, copied = 0;
93040ae5 777 u32 start = txq->head, eth_hdr_len;
6b7c5b94
SP
778 bool dummy_wrb, stopped = false;
779
93040ae5
SK
780 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
781 VLAN_ETH_HLEN : ETH_HLEN;
782
783 /* HW has a bug which considers padding bytes as legal
784 * and modifies the IPv4 hdr's 'tot_len' field
1ded132d 785 */
93040ae5
SK
786 if (skb->len <= 60 && be_vlan_tag_chk(adapter, skb) &&
787 is_ipv4_pkt(skb)) {
788 ip = (struct iphdr *)ip_hdr(skb);
789 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
790 }
1ded132d 791
93040ae5
SK
792 /* HW has a bug wherein it will calculate CSUM for VLAN
793 * pkts even though it is disabled.
794 * Manually insert VLAN in pkt.
795 */
796 if (skb->ip_summed != CHECKSUM_PARTIAL &&
797 be_vlan_tag_chk(adapter, skb)) {
798 skb = be_insert_vlan_in_pkt(adapter, skb);
1ded132d
AK
799 if (unlikely(!skb))
800 goto tx_drop;
1ded132d
AK
801 }
802
fe6d2a38 803 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
6b7c5b94 804
3c8def97 805 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
c190e3c8 806 if (copied) {
cd8f76c0
ED
807 int gso_segs = skb_shinfo(skb)->gso_segs;
808
c190e3c8 809 /* record the sent skb in the sent_skb table */
3c8def97
SP
810 BUG_ON(txo->sent_skb_list[start]);
811 txo->sent_skb_list[start] = skb;
c190e3c8
AK
812
813 /* Ensure txq has space for the next skb; Else stop the queue
814 * *BEFORE* ringing the tx doorbell, so that we serialze the
815 * tx compls of the current transmit which'll wake up the queue
816 */
7101e111 817 atomic_add(wrb_cnt, &txq->used);
c190e3c8
AK
818 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
819 txq->len) {
3c8def97 820 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
c190e3c8
AK
821 stopped = true;
822 }
6b7c5b94 823
c190e3c8 824 be_txq_notify(adapter, txq->id, wrb_cnt);
6b7c5b94 825
cd8f76c0 826 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
c190e3c8
AK
827 } else {
828 txq->head = start;
829 dev_kfree_skb_any(skb);
6b7c5b94 830 }
1ded132d 831tx_drop:
6b7c5b94
SP
832 return NETDEV_TX_OK;
833}
834
835static int be_change_mtu(struct net_device *netdev, int new_mtu)
836{
837 struct be_adapter *adapter = netdev_priv(netdev);
838 if (new_mtu < BE_MIN_MTU ||
34a89b8c
AK
839 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
840 (ETH_HLEN + ETH_FCS_LEN))) {
6b7c5b94
SP
841 dev_info(&adapter->pdev->dev,
842 "MTU must be between %d and %d bytes\n",
34a89b8c
AK
843 BE_MIN_MTU,
844 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
6b7c5b94
SP
845 return -EINVAL;
846 }
847 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
848 netdev->mtu, new_mtu);
849 netdev->mtu = new_mtu;
850 return 0;
851}
852
853/*
82903e4b
AK
854 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
855 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 856 */
10329df8 857static int be_vid_config(struct be_adapter *adapter)
6b7c5b94 858{
10329df8
SP
859 u16 vids[BE_NUM_VLANS_SUPPORTED];
860 u16 num = 0, i;
82903e4b 861 int status = 0;
1da87b7f 862
c0e64ef4
SP
863 /* No need to further configure vids if in promiscuous mode */
864 if (adapter->promiscuous)
865 return 0;
866
0fc16ebf
PR
867 if (adapter->vlans_added > adapter->max_vlans)
868 goto set_vlan_promisc;
869
870 /* Construct VLAN Table to give to HW */
871 for (i = 0; i < VLAN_N_VID; i++)
872 if (adapter->vlan_tag[i])
10329df8 873 vids[num++] = cpu_to_le16(i);
0fc16ebf
PR
874
875 status = be_cmd_vlan_config(adapter, adapter->if_handle,
10329df8 876 vids, num, 1, 0);
0fc16ebf
PR
877
878 /* Set to VLAN promisc mode as setting VLAN filter failed */
879 if (status) {
880 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
881 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
882 goto set_vlan_promisc;
6b7c5b94 883 }
1da87b7f 884
b31c50a7 885 return status;
0fc16ebf
PR
886
887set_vlan_promisc:
888 status = be_cmd_vlan_config(adapter, adapter->if_handle,
889 NULL, 0, 1, 1);
890 return status;
6b7c5b94
SP
891}
892
8e586137 893static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
6b7c5b94
SP
894{
895 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 896 int status = 0;
6b7c5b94 897
a85e9986 898 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
80817cbf
AK
899 status = -EINVAL;
900 goto ret;
901 }
ba343c77 902
a85e9986
PR
903 /* Packets with VID 0 are always received by Lancer by default */
904 if (lancer_chip(adapter) && vid == 0)
905 goto ret;
906
6b7c5b94 907 adapter->vlan_tag[vid] = 1;
82903e4b 908 if (adapter->vlans_added <= (adapter->max_vlans + 1))
10329df8 909 status = be_vid_config(adapter);
8e586137 910
80817cbf
AK
911 if (!status)
912 adapter->vlans_added++;
913 else
914 adapter->vlan_tag[vid] = 0;
915ret:
916 return status;
6b7c5b94
SP
917}
918
8e586137 919static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
6b7c5b94
SP
920{
921 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 922 int status = 0;
6b7c5b94 923
a85e9986 924 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
80817cbf
AK
925 status = -EINVAL;
926 goto ret;
927 }
ba343c77 928
a85e9986
PR
929 /* Packets with VID 0 are always received by Lancer by default */
930 if (lancer_chip(adapter) && vid == 0)
931 goto ret;
932
6b7c5b94 933 adapter->vlan_tag[vid] = 0;
82903e4b 934 if (adapter->vlans_added <= adapter->max_vlans)
10329df8 935 status = be_vid_config(adapter);
8e586137 936
80817cbf
AK
937 if (!status)
938 adapter->vlans_added--;
939 else
940 adapter->vlan_tag[vid] = 1;
941ret:
942 return status;
6b7c5b94
SP
943}
944
a54769f5 945static void be_set_rx_mode(struct net_device *netdev)
6b7c5b94
SP
946{
947 struct be_adapter *adapter = netdev_priv(netdev);
0fc16ebf 948 int status;
6b7c5b94 949
24307eef 950 if (netdev->flags & IFF_PROMISC) {
5b8821b7 951 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
24307eef
SP
952 adapter->promiscuous = true;
953 goto done;
6b7c5b94
SP
954 }
955
25985edc 956 /* BE was previously in promiscuous mode; disable it */
24307eef
SP
957 if (adapter->promiscuous) {
958 adapter->promiscuous = false;
5b8821b7 959 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
c0e64ef4
SP
960
961 if (adapter->vlans_added)
10329df8 962 be_vid_config(adapter);
6b7c5b94
SP
963 }
964
e7b909a6 965 /* Enable multicast promisc if num configured exceeds what we support */
4cd24eaf 966 if (netdev->flags & IFF_ALLMULTI ||
abb93951 967 netdev_mc_count(netdev) > adapter->max_mcast_mac) {
5b8821b7 968 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
24307eef 969 goto done;
6b7c5b94 970 }
6b7c5b94 971
fbc13f01
AK
972 if (netdev_uc_count(netdev) != adapter->uc_macs) {
973 struct netdev_hw_addr *ha;
974 int i = 1; /* First slot is claimed by the Primary MAC */
975
976 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
977 be_cmd_pmac_del(adapter, adapter->if_handle,
978 adapter->pmac_id[i], 0);
979 }
980
981 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
982 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
983 adapter->promiscuous = true;
984 goto done;
985 }
986
987 netdev_for_each_uc_addr(ha, adapter->netdev) {
988 adapter->uc_macs++; /* First slot is for Primary MAC */
989 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
990 adapter->if_handle,
991 &adapter->pmac_id[adapter->uc_macs], 0);
992 }
993 }
994
0fc16ebf
PR
995 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
996
997 /* Set to MCAST promisc mode if setting MULTICAST address fails */
998 if (status) {
999 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1000 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1001 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1002 }
24307eef
SP
1003done:
1004 return;
6b7c5b94
SP
1005}
1006
ba343c77
SB
1007static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1008{
1009 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1010 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
ba343c77 1011 int status;
704e4c88
PR
1012 bool active_mac = false;
1013 u32 pmac_id;
1014 u8 old_mac[ETH_ALEN];
ba343c77 1015
11ac75ed 1016 if (!sriov_enabled(adapter))
ba343c77
SB
1017 return -EPERM;
1018
11ac75ed 1019 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
ba343c77
SB
1020 return -EINVAL;
1021
590c391d 1022 if (lancer_chip(adapter)) {
704e4c88
PR
1023 status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
1024 &pmac_id, vf + 1);
1025 if (!status && active_mac)
1026 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1027 pmac_id, vf + 1);
1028
590c391d
PR
1029 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
1030 } else {
11ac75ed
SP
1031 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1032 vf_cfg->pmac_id, vf + 1);
ba343c77 1033
11ac75ed
SP
1034 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1035 &vf_cfg->pmac_id, vf + 1);
590c391d
PR
1036 }
1037
64600ea5 1038 if (status)
ba343c77
SB
1039 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1040 mac, vf);
64600ea5 1041 else
11ac75ed 1042 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
64600ea5 1043
ba343c77
SB
1044 return status;
1045}
1046
64600ea5
AK
1047static int be_get_vf_config(struct net_device *netdev, int vf,
1048 struct ifla_vf_info *vi)
1049{
1050 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1051 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
64600ea5 1052
11ac75ed 1053 if (!sriov_enabled(adapter))
64600ea5
AK
1054 return -EPERM;
1055
11ac75ed 1056 if (vf >= adapter->num_vfs)
64600ea5
AK
1057 return -EINVAL;
1058
1059 vi->vf = vf;
11ac75ed
SP
1060 vi->tx_rate = vf_cfg->tx_rate;
1061 vi->vlan = vf_cfg->vlan_tag;
64600ea5 1062 vi->qos = 0;
11ac75ed 1063 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
64600ea5
AK
1064
1065 return 0;
1066}
1067
1da87b7f
AK
1068static int be_set_vf_vlan(struct net_device *netdev,
1069 int vf, u16 vlan, u8 qos)
1070{
1071 struct be_adapter *adapter = netdev_priv(netdev);
1072 int status = 0;
1073
11ac75ed 1074 if (!sriov_enabled(adapter))
1da87b7f
AK
1075 return -EPERM;
1076
11ac75ed 1077 if (vf >= adapter->num_vfs || vlan > 4095)
1da87b7f
AK
1078 return -EINVAL;
1079
1080 if (vlan) {
f1f3ee1b
AK
1081 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1082 /* If this is new value, program it. Else skip. */
1083 adapter->vf_cfg[vf].vlan_tag = vlan;
1084
1085 status = be_cmd_set_hsw_config(adapter, vlan,
1086 vf + 1, adapter->vf_cfg[vf].if_handle);
1087 }
1da87b7f 1088 } else {
f1f3ee1b 1089 /* Reset Transparent Vlan Tagging. */
11ac75ed 1090 adapter->vf_cfg[vf].vlan_tag = 0;
f1f3ee1b
AK
1091 vlan = adapter->vf_cfg[vf].def_vid;
1092 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1093 adapter->vf_cfg[vf].if_handle);
1da87b7f
AK
1094 }
1095
1da87b7f
AK
1096
1097 if (status)
1098 dev_info(&adapter->pdev->dev,
1099 "VLAN %d config on VF %d failed\n", vlan, vf);
1100 return status;
1101}
1102
e1d18735
AK
1103static int be_set_vf_tx_rate(struct net_device *netdev,
1104 int vf, int rate)
1105{
1106 struct be_adapter *adapter = netdev_priv(netdev);
1107 int status = 0;
1108
11ac75ed 1109 if (!sriov_enabled(adapter))
e1d18735
AK
1110 return -EPERM;
1111
94f434c2 1112 if (vf >= adapter->num_vfs)
e1d18735
AK
1113 return -EINVAL;
1114
94f434c2
AK
1115 if (rate < 100 || rate > 10000) {
1116 dev_err(&adapter->pdev->dev,
1117 "tx rate must be between 100 and 10000 Mbps\n");
1118 return -EINVAL;
1119 }
e1d18735 1120
d5c18473
PR
1121 if (lancer_chip(adapter))
1122 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1123 else
1124 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
e1d18735
AK
1125
1126 if (status)
94f434c2 1127 dev_err(&adapter->pdev->dev,
e1d18735 1128 "tx rate %d on VF %d failed\n", rate, vf);
94f434c2
AK
1129 else
1130 adapter->vf_cfg[vf].tx_rate = rate;
e1d18735
AK
1131 return status;
1132}
1133
39f1d94d
SP
1134static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1135{
1136 struct pci_dev *dev, *pdev = adapter->pdev;
2f6a0260 1137 int vfs = 0, assigned_vfs = 0, pos;
39f1d94d
SP
1138 u16 offset, stride;
1139
1140 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
d79c0a20
SP
1141 if (!pos)
1142 return 0;
39f1d94d
SP
1143 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
1144 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
1145
1146 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
1147 while (dev) {
2f6a0260 1148 if (dev->is_virtfn && pci_physfn(dev) == pdev) {
39f1d94d
SP
1149 vfs++;
1150 if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
1151 assigned_vfs++;
1152 }
1153 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev);
1154 }
1155 return (vf_state == ASSIGNED) ? assigned_vfs : vfs;
1156}
1157
10ef9ab4 1158static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
6b7c5b94 1159{
10ef9ab4 1160 struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
4097f663 1161 ulong now = jiffies;
ac124ff9 1162 ulong delta = now - stats->rx_jiffies;
ab1594e9
SP
1163 u64 pkts;
1164 unsigned int start, eqd;
ac124ff9 1165
10ef9ab4
SP
1166 if (!eqo->enable_aic) {
1167 eqd = eqo->eqd;
1168 goto modify_eqd;
1169 }
1170
1171 if (eqo->idx >= adapter->num_rx_qs)
ac124ff9 1172 return;
6b7c5b94 1173
10ef9ab4
SP
1174 stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1175
4097f663 1176 /* Wrapped around */
3abcdeda
SP
1177 if (time_before(now, stats->rx_jiffies)) {
1178 stats->rx_jiffies = now;
4097f663
SP
1179 return;
1180 }
6b7c5b94 1181
ac124ff9
SP
1182 /* Update once a second */
1183 if (delta < HZ)
6b7c5b94
SP
1184 return;
1185
ab1594e9
SP
1186 do {
1187 start = u64_stats_fetch_begin_bh(&stats->sync);
1188 pkts = stats->rx_pkts;
1189 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1190
68c3e5a7 1191 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
ab1594e9 1192 stats->rx_pkts_prev = pkts;
3abcdeda 1193 stats->rx_jiffies = now;
10ef9ab4
SP
1194 eqd = (stats->rx_pps / 110000) << 3;
1195 eqd = min(eqd, eqo->max_eqd);
1196 eqd = max(eqd, eqo->min_eqd);
ac124ff9
SP
1197 if (eqd < 10)
1198 eqd = 0;
10ef9ab4
SP
1199
1200modify_eqd:
1201 if (eqd != eqo->cur_eqd) {
1202 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1203 eqo->cur_eqd = eqd;
ac124ff9 1204 }
6b7c5b94
SP
1205}
1206
3abcdeda 1207static void be_rx_stats_update(struct be_rx_obj *rxo,
2e588f84 1208 struct be_rx_compl_info *rxcp)
4097f663 1209{
ac124ff9 1210 struct be_rx_stats *stats = rx_stats(rxo);
1ef78abe 1211
ab1594e9 1212 u64_stats_update_begin(&stats->sync);
3abcdeda 1213 stats->rx_compl++;
2e588f84 1214 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 1215 stats->rx_pkts++;
2e588f84 1216 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 1217 stats->rx_mcast_pkts++;
2e588f84 1218 if (rxcp->err)
ac124ff9 1219 stats->rx_compl_err++;
ab1594e9 1220 u64_stats_update_end(&stats->sync);
4097f663
SP
1221}
1222
2e588f84 1223static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 1224{
19fad86f
PR
1225 /* L4 checksum is not reliable for non TCP/UDP packets.
1226 * Also ignore ipcksm for ipv6 pkts */
2e588f84
SP
1227 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1228 (rxcp->ip_csum || rxcp->ipv6);
728a9972
AK
1229}
1230
10ef9ab4
SP
1231static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1232 u16 frag_idx)
6b7c5b94 1233{
10ef9ab4 1234 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1235 struct be_rx_page_info *rx_page_info;
3abcdeda 1236 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1237
3abcdeda 1238 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
1239 BUG_ON(!rx_page_info->page);
1240
205859a2 1241 if (rx_page_info->last_page_user) {
2b7bcebf
IV
1242 dma_unmap_page(&adapter->pdev->dev,
1243 dma_unmap_addr(rx_page_info, bus),
1244 adapter->big_page_size, DMA_FROM_DEVICE);
205859a2
AK
1245 rx_page_info->last_page_user = false;
1246 }
6b7c5b94
SP
1247
1248 atomic_dec(&rxq->used);
1249 return rx_page_info;
1250}
1251
1252/* Throwaway the data in the Rx completion */
10ef9ab4
SP
1253static void be_rx_compl_discard(struct be_rx_obj *rxo,
1254 struct be_rx_compl_info *rxcp)
6b7c5b94 1255{
3abcdeda 1256 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1257 struct be_rx_page_info *page_info;
2e588f84 1258 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 1259
e80d9da6 1260 for (i = 0; i < num_rcvd; i++) {
10ef9ab4 1261 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
e80d9da6
PR
1262 put_page(page_info->page);
1263 memset(page_info, 0, sizeof(*page_info));
2e588f84 1264 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1265 }
1266}
1267
1268/*
1269 * skb_fill_rx_data forms a complete skb for an ether frame
1270 * indicated by rxcp.
1271 */
10ef9ab4
SP
1272static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1273 struct be_rx_compl_info *rxcp)
6b7c5b94 1274{
3abcdeda 1275 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1276 struct be_rx_page_info *page_info;
2e588f84
SP
1277 u16 i, j;
1278 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 1279 u8 *start;
6b7c5b94 1280
10ef9ab4 1281 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
6b7c5b94
SP
1282 start = page_address(page_info->page) + page_info->page_offset;
1283 prefetch(start);
1284
1285 /* Copy data in the first descriptor of this completion */
2e588f84 1286 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94 1287
6b7c5b94
SP
1288 skb->len = curr_frag_len;
1289 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
ac1ae5f3 1290 memcpy(skb->data, start, curr_frag_len);
6b7c5b94
SP
1291 /* Complete packet has now been moved to data */
1292 put_page(page_info->page);
1293 skb->data_len = 0;
1294 skb->tail += curr_frag_len;
1295 } else {
ac1ae5f3
ED
1296 hdr_len = ETH_HLEN;
1297 memcpy(skb->data, start, hdr_len);
6b7c5b94 1298 skb_shinfo(skb)->nr_frags = 1;
b061b39e 1299 skb_frag_set_page(skb, 0, page_info->page);
6b7c5b94
SP
1300 skb_shinfo(skb)->frags[0].page_offset =
1301 page_info->page_offset + hdr_len;
9e903e08 1302 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
6b7c5b94 1303 skb->data_len = curr_frag_len - hdr_len;
bdb28a97 1304 skb->truesize += rx_frag_size;
6b7c5b94
SP
1305 skb->tail += hdr_len;
1306 }
205859a2 1307 page_info->page = NULL;
6b7c5b94 1308
2e588f84
SP
1309 if (rxcp->pkt_size <= rx_frag_size) {
1310 BUG_ON(rxcp->num_rcvd != 1);
1311 return;
6b7c5b94
SP
1312 }
1313
1314 /* More frags present for this completion */
2e588f84
SP
1315 index_inc(&rxcp->rxq_idx, rxq->len);
1316 remaining = rxcp->pkt_size - curr_frag_len;
1317 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
10ef9ab4 1318 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
2e588f84 1319 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 1320
bd46cb6c
AK
1321 /* Coalesce all frags from the same physical page in one slot */
1322 if (page_info->page_offset == 0) {
1323 /* Fresh page */
1324 j++;
b061b39e 1325 skb_frag_set_page(skb, j, page_info->page);
bd46cb6c
AK
1326 skb_shinfo(skb)->frags[j].page_offset =
1327 page_info->page_offset;
9e903e08 1328 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1329 skb_shinfo(skb)->nr_frags++;
1330 } else {
1331 put_page(page_info->page);
1332 }
1333
9e903e08 1334 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
6b7c5b94
SP
1335 skb->len += curr_frag_len;
1336 skb->data_len += curr_frag_len;
bdb28a97 1337 skb->truesize += rx_frag_size;
2e588f84
SP
1338 remaining -= curr_frag_len;
1339 index_inc(&rxcp->rxq_idx, rxq->len);
205859a2 1340 page_info->page = NULL;
6b7c5b94 1341 }
bd46cb6c 1342 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
1343}
1344
5be93b9a 1345/* Process the RX completion indicated by rxcp when GRO is disabled */
10ef9ab4
SP
1346static void be_rx_compl_process(struct be_rx_obj *rxo,
1347 struct be_rx_compl_info *rxcp)
6b7c5b94 1348{
10ef9ab4 1349 struct be_adapter *adapter = rxo->adapter;
6332c8d3 1350 struct net_device *netdev = adapter->netdev;
6b7c5b94 1351 struct sk_buff *skb;
89420424 1352
bb349bb4 1353 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
a058a632 1354 if (unlikely(!skb)) {
ac124ff9 1355 rx_stats(rxo)->rx_drops_no_skbs++;
10ef9ab4 1356 be_rx_compl_discard(rxo, rxcp);
6b7c5b94
SP
1357 return;
1358 }
1359
10ef9ab4 1360 skb_fill_rx_data(rxo, skb, rxcp);
6b7c5b94 1361
6332c8d3 1362 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
728a9972 1363 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
1364 else
1365 skb_checksum_none_assert(skb);
6b7c5b94 1366
6332c8d3 1367 skb->protocol = eth_type_trans(skb, netdev);
aaa6daec 1368 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
10ef9ab4 1369 if (netdev->features & NETIF_F_RXHASH)
4b972914
AK
1370 skb->rxhash = rxcp->rss_hash;
1371
6b7c5b94 1372
343e43c0 1373 if (rxcp->vlanf)
4c5102f9
AK
1374 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1375
1376 netif_receive_skb(skb);
6b7c5b94
SP
1377}
1378
5be93b9a 1379/* Process the RX completion indicated by rxcp when GRO is enabled */
10ef9ab4
SP
1380void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1381 struct be_rx_compl_info *rxcp)
6b7c5b94 1382{
10ef9ab4 1383 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1384 struct be_rx_page_info *page_info;
5be93b9a 1385 struct sk_buff *skb = NULL;
3abcdeda 1386 struct be_queue_info *rxq = &rxo->q;
2e588f84
SP
1387 u16 remaining, curr_frag_len;
1388 u16 i, j;
3968fa1e 1389
10ef9ab4 1390 skb = napi_get_frags(napi);
5be93b9a 1391 if (!skb) {
10ef9ab4 1392 be_rx_compl_discard(rxo, rxcp);
5be93b9a
AK
1393 return;
1394 }
1395
2e588f84
SP
1396 remaining = rxcp->pkt_size;
1397 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
10ef9ab4 1398 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
6b7c5b94
SP
1399
1400 curr_frag_len = min(remaining, rx_frag_size);
1401
bd46cb6c
AK
1402 /* Coalesce all frags from the same physical page in one slot */
1403 if (i == 0 || page_info->page_offset == 0) {
1404 /* First frag or Fresh page */
1405 j++;
b061b39e 1406 skb_frag_set_page(skb, j, page_info->page);
5be93b9a
AK
1407 skb_shinfo(skb)->frags[j].page_offset =
1408 page_info->page_offset;
9e903e08 1409 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1410 } else {
1411 put_page(page_info->page);
1412 }
9e903e08 1413 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
bdb28a97 1414 skb->truesize += rx_frag_size;
bd46cb6c 1415 remaining -= curr_frag_len;
2e588f84 1416 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1417 memset(page_info, 0, sizeof(*page_info));
1418 }
bd46cb6c 1419 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 1420
5be93b9a 1421 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
1422 skb->len = rxcp->pkt_size;
1423 skb->data_len = rxcp->pkt_size;
5be93b9a 1424 skb->ip_summed = CHECKSUM_UNNECESSARY;
aaa6daec 1425 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
4b972914
AK
1426 if (adapter->netdev->features & NETIF_F_RXHASH)
1427 skb->rxhash = rxcp->rss_hash;
5be93b9a 1428
343e43c0 1429 if (rxcp->vlanf)
4c5102f9
AK
1430 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1431
10ef9ab4 1432 napi_gro_frags(napi);
2e588f84
SP
1433}
1434
10ef9ab4
SP
1435static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1436 struct be_rx_compl_info *rxcp)
2e588f84
SP
1437{
1438 rxcp->pkt_size =
1439 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1440 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1441 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1442 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
9ecb42fd 1443 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
2e588f84
SP
1444 rxcp->ip_csum =
1445 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1446 rxcp->l4_csum =
1447 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1448 rxcp->ipv6 =
1449 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1450 rxcp->rxq_idx =
1451 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1452 rxcp->num_rcvd =
1453 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1454 rxcp->pkt_type =
1455 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
4b972914 1456 rxcp->rss_hash =
c297977e 1457 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
15d72184
SP
1458 if (rxcp->vlanf) {
1459 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
3c709f8f
DM
1460 compl);
1461 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1462 compl);
15d72184 1463 }
12004ae9 1464 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
2e588f84
SP
1465}
1466
10ef9ab4
SP
1467static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1468 struct be_rx_compl_info *rxcp)
2e588f84
SP
1469{
1470 rxcp->pkt_size =
1471 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1472 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1473 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1474 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
9ecb42fd 1475 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
2e588f84
SP
1476 rxcp->ip_csum =
1477 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1478 rxcp->l4_csum =
1479 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1480 rxcp->ipv6 =
1481 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1482 rxcp->rxq_idx =
1483 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1484 rxcp->num_rcvd =
1485 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1486 rxcp->pkt_type =
1487 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
4b972914 1488 rxcp->rss_hash =
c297977e 1489 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
15d72184
SP
1490 if (rxcp->vlanf) {
1491 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
3c709f8f
DM
1492 compl);
1493 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1494 compl);
15d72184 1495 }
12004ae9 1496 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
2e588f84
SP
1497}
1498
1499static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1500{
1501 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1502 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1503 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1504
2e588f84
SP
1505 /* For checking the valid bit it is Ok to use either definition as the
1506 * valid bit is at the same position in both v0 and v1 Rx compl */
1507 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1508 return NULL;
6b7c5b94 1509
2e588f84
SP
1510 rmb();
1511 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 1512
2e588f84 1513 if (adapter->be3_native)
10ef9ab4 1514 be_parse_rx_compl_v1(compl, rxcp);
2e588f84 1515 else
10ef9ab4 1516 be_parse_rx_compl_v0(compl, rxcp);
6b7c5b94 1517
15d72184
SP
1518 if (rxcp->vlanf) {
1519 /* vlanf could be wrongly set in some cards.
1520 * ignore if vtm is not set */
752961a1 1521 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
15d72184 1522 rxcp->vlanf = 0;
6b7c5b94 1523
15d72184 1524 if (!lancer_chip(adapter))
3c709f8f 1525 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
6b7c5b94 1526
939cf306 1527 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
3c709f8f 1528 !adapter->vlan_tag[rxcp->vlan_tag])
15d72184
SP
1529 rxcp->vlanf = 0;
1530 }
2e588f84
SP
1531
1532 /* As the compl has been parsed, reset it; we wont touch it again */
1533 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 1534
3abcdeda 1535 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
1536 return rxcp;
1537}
1538
1829b086 1539static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 1540{
6b7c5b94 1541 u32 order = get_order(size);
1829b086 1542
6b7c5b94 1543 if (order > 0)
1829b086
ED
1544 gfp |= __GFP_COMP;
1545 return alloc_pages(gfp, order);
6b7c5b94
SP
1546}
1547
1548/*
1549 * Allocate a page, split it to fragments of size rx_frag_size and post as
1550 * receive buffers to BE
1551 */
1829b086 1552static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
6b7c5b94 1553{
3abcdeda 1554 struct be_adapter *adapter = rxo->adapter;
26d92f92 1555 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 1556 struct be_queue_info *rxq = &rxo->q;
6b7c5b94
SP
1557 struct page *pagep = NULL;
1558 struct be_eth_rx_d *rxd;
1559 u64 page_dmaaddr = 0, frag_dmaaddr;
1560 u32 posted, page_offset = 0;
1561
3abcdeda 1562 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1563 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1564 if (!pagep) {
1829b086 1565 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 1566 if (unlikely(!pagep)) {
ac124ff9 1567 rx_stats(rxo)->rx_post_fail++;
6b7c5b94
SP
1568 break;
1569 }
2b7bcebf
IV
1570 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1571 0, adapter->big_page_size,
1572 DMA_FROM_DEVICE);
6b7c5b94
SP
1573 page_info->page_offset = 0;
1574 } else {
1575 get_page(pagep);
1576 page_info->page_offset = page_offset + rx_frag_size;
1577 }
1578 page_offset = page_info->page_offset;
1579 page_info->page = pagep;
fac6da5b 1580 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
6b7c5b94
SP
1581 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1582
1583 rxd = queue_head_node(rxq);
1584 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1585 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
1586
1587 /* Any space left in the current big page for another frag? */
1588 if ((page_offset + rx_frag_size + rx_frag_size) >
1589 adapter->big_page_size) {
1590 pagep = NULL;
1591 page_info->last_page_user = true;
1592 }
26d92f92
SP
1593
1594 prev_page_info = page_info;
1595 queue_head_inc(rxq);
10ef9ab4 1596 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1597 }
1598 if (pagep)
26d92f92 1599 prev_page_info->last_page_user = true;
6b7c5b94
SP
1600
1601 if (posted) {
6b7c5b94 1602 atomic_add(posted, &rxq->used);
8788fdc2 1603 be_rxq_notify(adapter, rxq->id, posted);
ea1dae11
SP
1604 } else if (atomic_read(&rxq->used) == 0) {
1605 /* Let be_worker replenish when memory is available */
3abcdeda 1606 rxo->rx_post_starved = true;
6b7c5b94 1607 }
6b7c5b94
SP
1608}
1609
5fb379ee 1610static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
6b7c5b94 1611{
6b7c5b94
SP
1612 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1613
1614 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1615 return NULL;
1616
f3eb62d2 1617 rmb();
6b7c5b94
SP
1618 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1619
1620 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1621
1622 queue_tail_inc(tx_cq);
1623 return txcp;
1624}
1625
3c8def97
SP
1626static u16 be_tx_compl_process(struct be_adapter *adapter,
1627 struct be_tx_obj *txo, u16 last_index)
6b7c5b94 1628{
3c8def97 1629 struct be_queue_info *txq = &txo->q;
a73b796e 1630 struct be_eth_wrb *wrb;
3c8def97 1631 struct sk_buff **sent_skbs = txo->sent_skb_list;
6b7c5b94 1632 struct sk_buff *sent_skb;
ec43b1a6
SP
1633 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1634 bool unmap_skb_hdr = true;
6b7c5b94 1635
ec43b1a6 1636 sent_skb = sent_skbs[txq->tail];
6b7c5b94 1637 BUG_ON(!sent_skb);
ec43b1a6
SP
1638 sent_skbs[txq->tail] = NULL;
1639
1640 /* skip header wrb */
a73b796e 1641 queue_tail_inc(txq);
6b7c5b94 1642
ec43b1a6 1643 do {
6b7c5b94 1644 cur_index = txq->tail;
a73b796e 1645 wrb = queue_tail_node(txq);
2b7bcebf
IV
1646 unmap_tx_frag(&adapter->pdev->dev, wrb,
1647 (unmap_skb_hdr && skb_headlen(sent_skb)));
ec43b1a6
SP
1648 unmap_skb_hdr = false;
1649
6b7c5b94
SP
1650 num_wrbs++;
1651 queue_tail_inc(txq);
ec43b1a6 1652 } while (cur_index != last_index);
6b7c5b94 1653
6b7c5b94 1654 kfree_skb(sent_skb);
4d586b82 1655 return num_wrbs;
6b7c5b94
SP
1656}
1657
10ef9ab4
SP
1658/* Return the number of events in the event queue */
1659static inline int events_get(struct be_eq_obj *eqo)
859b1e4e 1660{
10ef9ab4
SP
1661 struct be_eq_entry *eqe;
1662 int num = 0;
859b1e4e 1663
10ef9ab4
SP
1664 do {
1665 eqe = queue_tail_node(&eqo->q);
1666 if (eqe->evt == 0)
1667 break;
859b1e4e 1668
10ef9ab4
SP
1669 rmb();
1670 eqe->evt = 0;
1671 num++;
1672 queue_tail_inc(&eqo->q);
1673 } while (true);
1674
1675 return num;
859b1e4e
SP
1676}
1677
10ef9ab4
SP
1678/* Leaves the EQ is disarmed state */
1679static void be_eq_clean(struct be_eq_obj *eqo)
859b1e4e 1680{
10ef9ab4 1681 int num = events_get(eqo);
859b1e4e 1682
10ef9ab4 1683 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
859b1e4e
SP
1684}
1685
10ef9ab4 1686static void be_rx_cq_clean(struct be_rx_obj *rxo)
6b7c5b94
SP
1687{
1688 struct be_rx_page_info *page_info;
3abcdeda
SP
1689 struct be_queue_info *rxq = &rxo->q;
1690 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 1691 struct be_rx_compl_info *rxcp;
d23e946c
SP
1692 struct be_adapter *adapter = rxo->adapter;
1693 int flush_wait = 0;
6b7c5b94
SP
1694 u16 tail;
1695
d23e946c
SP
1696 /* Consume pending rx completions.
1697 * Wait for the flush completion (identified by zero num_rcvd)
1698 * to arrive. Notify CQ even when there are no more CQ entries
1699 * for HW to flush partially coalesced CQ entries.
1700 * In Lancer, there is no need to wait for flush compl.
1701 */
1702 for (;;) {
1703 rxcp = be_rx_compl_get(rxo);
1704 if (rxcp == NULL) {
1705 if (lancer_chip(adapter))
1706 break;
1707
1708 if (flush_wait++ > 10 || be_hw_error(adapter)) {
1709 dev_warn(&adapter->pdev->dev,
1710 "did not receive flush compl\n");
1711 break;
1712 }
1713 be_cq_notify(adapter, rx_cq->id, true, 0);
1714 mdelay(1);
1715 } else {
1716 be_rx_compl_discard(rxo, rxcp);
1717 be_cq_notify(adapter, rx_cq->id, true, 1);
1718 if (rxcp->num_rcvd == 0)
1719 break;
1720 }
6b7c5b94
SP
1721 }
1722
d23e946c
SP
1723 /* After cleanup, leave the CQ in unarmed state */
1724 be_cq_notify(adapter, rx_cq->id, false, 0);
1725
1726 /* Then free posted rx buffers that were not used */
6b7c5b94 1727 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
cdab23b7 1728 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
10ef9ab4 1729 page_info = get_rx_page_info(rxo, tail);
6b7c5b94
SP
1730 put_page(page_info->page);
1731 memset(page_info, 0, sizeof(*page_info));
1732 }
1733 BUG_ON(atomic_read(&rxq->used));
482c9e79 1734 rxq->tail = rxq->head = 0;
6b7c5b94
SP
1735}
1736
0ae57bb3 1737static void be_tx_compl_clean(struct be_adapter *adapter)
6b7c5b94 1738{
0ae57bb3
SP
1739 struct be_tx_obj *txo;
1740 struct be_queue_info *txq;
a8e9179a 1741 struct be_eth_tx_compl *txcp;
4d586b82 1742 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
b03388d6
SP
1743 struct sk_buff *sent_skb;
1744 bool dummy_wrb;
0ae57bb3 1745 int i, pending_txqs;
a8e9179a
SP
1746
1747 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1748 do {
0ae57bb3
SP
1749 pending_txqs = adapter->num_tx_qs;
1750
1751 for_all_tx_queues(adapter, txo, i) {
1752 txq = &txo->q;
1753 while ((txcp = be_tx_compl_get(&txo->cq))) {
1754 end_idx =
1755 AMAP_GET_BITS(struct amap_eth_tx_compl,
1756 wrb_index, txcp);
1757 num_wrbs += be_tx_compl_process(adapter, txo,
1758 end_idx);
1759 cmpl++;
1760 }
1761 if (cmpl) {
1762 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1763 atomic_sub(num_wrbs, &txq->used);
1764 cmpl = 0;
1765 num_wrbs = 0;
1766 }
1767 if (atomic_read(&txq->used) == 0)
1768 pending_txqs--;
a8e9179a
SP
1769 }
1770
0ae57bb3 1771 if (pending_txqs == 0 || ++timeo > 200)
a8e9179a
SP
1772 break;
1773
1774 mdelay(1);
1775 } while (true);
1776
0ae57bb3
SP
1777 for_all_tx_queues(adapter, txo, i) {
1778 txq = &txo->q;
1779 if (atomic_read(&txq->used))
1780 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1781 atomic_read(&txq->used));
1782
1783 /* free posted tx for which compls will never arrive */
1784 while (atomic_read(&txq->used)) {
1785 sent_skb = txo->sent_skb_list[txq->tail];
1786 end_idx = txq->tail;
1787 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1788 &dummy_wrb);
1789 index_adv(&end_idx, num_wrbs - 1, txq->len);
1790 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1791 atomic_sub(num_wrbs, &txq->used);
1792 }
b03388d6 1793 }
6b7c5b94
SP
1794}
1795
10ef9ab4
SP
1796static void be_evt_queues_destroy(struct be_adapter *adapter)
1797{
1798 struct be_eq_obj *eqo;
1799 int i;
1800
1801 for_all_evt_queues(adapter, eqo, i) {
19d59aa7
PR
1802 if (eqo->q.created) {
1803 be_eq_clean(eqo);
10ef9ab4 1804 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
19d59aa7 1805 }
10ef9ab4
SP
1806 be_queue_free(adapter, &eqo->q);
1807 }
1808}
1809
1810static int be_evt_queues_create(struct be_adapter *adapter)
1811{
1812 struct be_queue_info *eq;
1813 struct be_eq_obj *eqo;
1814 int i, rc;
1815
1816 adapter->num_evt_qs = num_irqs(adapter);
1817
1818 for_all_evt_queues(adapter, eqo, i) {
1819 eqo->adapter = adapter;
1820 eqo->tx_budget = BE_TX_BUDGET;
1821 eqo->idx = i;
1822 eqo->max_eqd = BE_MAX_EQD;
1823 eqo->enable_aic = true;
1824
1825 eq = &eqo->q;
1826 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1827 sizeof(struct be_eq_entry));
1828 if (rc)
1829 return rc;
1830
1831 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1832 if (rc)
1833 return rc;
1834 }
1cfafab9 1835 return 0;
10ef9ab4
SP
1836}
1837
5fb379ee
SP
1838static void be_mcc_queues_destroy(struct be_adapter *adapter)
1839{
1840 struct be_queue_info *q;
5fb379ee 1841
8788fdc2 1842 q = &adapter->mcc_obj.q;
5fb379ee 1843 if (q->created)
8788fdc2 1844 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
1845 be_queue_free(adapter, q);
1846
8788fdc2 1847 q = &adapter->mcc_obj.cq;
5fb379ee 1848 if (q->created)
8788fdc2 1849 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
1850 be_queue_free(adapter, q);
1851}
1852
1853/* Must be called only after TX qs are created as MCC shares TX EQ */
1854static int be_mcc_queues_create(struct be_adapter *adapter)
1855{
1856 struct be_queue_info *q, *cq;
5fb379ee 1857
8788fdc2 1858 cq = &adapter->mcc_obj.cq;
5fb379ee 1859 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
efd2e40a 1860 sizeof(struct be_mcc_compl)))
5fb379ee
SP
1861 goto err;
1862
10ef9ab4
SP
1863 /* Use the default EQ for MCC completions */
1864 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
5fb379ee
SP
1865 goto mcc_cq_free;
1866
8788fdc2 1867 q = &adapter->mcc_obj.q;
5fb379ee
SP
1868 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1869 goto mcc_cq_destroy;
1870
8788fdc2 1871 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
1872 goto mcc_q_free;
1873
1874 return 0;
1875
1876mcc_q_free:
1877 be_queue_free(adapter, q);
1878mcc_cq_destroy:
8788fdc2 1879 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
1880mcc_cq_free:
1881 be_queue_free(adapter, cq);
1882err:
1883 return -1;
1884}
1885
6b7c5b94
SP
1886static void be_tx_queues_destroy(struct be_adapter *adapter)
1887{
1888 struct be_queue_info *q;
3c8def97
SP
1889 struct be_tx_obj *txo;
1890 u8 i;
6b7c5b94 1891
3c8def97
SP
1892 for_all_tx_queues(adapter, txo, i) {
1893 q = &txo->q;
1894 if (q->created)
1895 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1896 be_queue_free(adapter, q);
6b7c5b94 1897
3c8def97
SP
1898 q = &txo->cq;
1899 if (q->created)
1900 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1901 be_queue_free(adapter, q);
1902 }
6b7c5b94
SP
1903}
1904
dafc0fe3
SP
1905static int be_num_txqs_want(struct be_adapter *adapter)
1906{
abb93951
PR
1907 if ((!lancer_chip(adapter) && sriov_want(adapter)) ||
1908 be_is_mc(adapter) ||
1909 (!lancer_chip(adapter) && !be_physfn(adapter)) ||
ca34fe38 1910 BE2_chip(adapter))
dafc0fe3
SP
1911 return 1;
1912 else
abb93951 1913 return adapter->max_tx_queues;
dafc0fe3
SP
1914}
1915
10ef9ab4 1916static int be_tx_cqs_create(struct be_adapter *adapter)
6b7c5b94 1917{
10ef9ab4
SP
1918 struct be_queue_info *cq, *eq;
1919 int status;
3c8def97
SP
1920 struct be_tx_obj *txo;
1921 u8 i;
6b7c5b94 1922
dafc0fe3 1923 adapter->num_tx_qs = be_num_txqs_want(adapter);
3bb62f4f
PR
1924 if (adapter->num_tx_qs != MAX_TX_QS) {
1925 rtnl_lock();
dafc0fe3
SP
1926 netif_set_real_num_tx_queues(adapter->netdev,
1927 adapter->num_tx_qs);
3bb62f4f
PR
1928 rtnl_unlock();
1929 }
dafc0fe3 1930
10ef9ab4
SP
1931 for_all_tx_queues(adapter, txo, i) {
1932 cq = &txo->cq;
1933 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
1934 sizeof(struct be_eth_tx_compl));
1935 if (status)
1936 return status;
3c8def97 1937
10ef9ab4
SP
1938 /* If num_evt_qs is less than num_tx_qs, then more than
1939 * one txq share an eq
1940 */
1941 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1942 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
1943 if (status)
1944 return status;
1945 }
1946 return 0;
1947}
6b7c5b94 1948
10ef9ab4
SP
1949static int be_tx_qs_create(struct be_adapter *adapter)
1950{
1951 struct be_tx_obj *txo;
1952 int i, status;
fe6d2a38 1953
3c8def97 1954 for_all_tx_queues(adapter, txo, i) {
10ef9ab4
SP
1955 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
1956 sizeof(struct be_eth_wrb));
1957 if (status)
1958 return status;
6b7c5b94 1959
10ef9ab4
SP
1960 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
1961 if (status)
1962 return status;
3c8def97 1963 }
6b7c5b94 1964
d379142b
SP
1965 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
1966 adapter->num_tx_qs);
10ef9ab4 1967 return 0;
6b7c5b94
SP
1968}
1969
10ef9ab4 1970static void be_rx_cqs_destroy(struct be_adapter *adapter)
6b7c5b94
SP
1971{
1972 struct be_queue_info *q;
3abcdeda
SP
1973 struct be_rx_obj *rxo;
1974 int i;
1975
1976 for_all_rx_queues(adapter, rxo, i) {
3abcdeda
SP
1977 q = &rxo->cq;
1978 if (q->created)
1979 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1980 be_queue_free(adapter, q);
ac6a0c4a
SP
1981 }
1982}
1983
10ef9ab4 1984static int be_rx_cqs_create(struct be_adapter *adapter)
6b7c5b94 1985{
10ef9ab4 1986 struct be_queue_info *eq, *cq;
3abcdeda
SP
1987 struct be_rx_obj *rxo;
1988 int rc, i;
6b7c5b94 1989
10ef9ab4
SP
1990 /* We'll create as many RSS rings as there are irqs.
1991 * But when there's only one irq there's no use creating RSS rings
1992 */
1993 adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
1994 num_irqs(adapter) + 1 : 1;
7f640062
SP
1995 if (adapter->num_rx_qs != MAX_RX_QS) {
1996 rtnl_lock();
1997 netif_set_real_num_rx_queues(adapter->netdev,
1998 adapter->num_rx_qs);
1999 rtnl_unlock();
2000 }
ac6a0c4a 2001
6b7c5b94 2002 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
2003 for_all_rx_queues(adapter, rxo, i) {
2004 rxo->adapter = adapter;
3abcdeda
SP
2005 cq = &rxo->cq;
2006 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2007 sizeof(struct be_eth_rx_compl));
2008 if (rc)
10ef9ab4 2009 return rc;
3abcdeda 2010
10ef9ab4
SP
2011 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2012 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
3abcdeda 2013 if (rc)
10ef9ab4 2014 return rc;
3abcdeda 2015 }
6b7c5b94 2016
d379142b
SP
2017 dev_info(&adapter->pdev->dev,
2018 "created %d RSS queue(s) and 1 default RX queue\n",
2019 adapter->num_rx_qs - 1);
10ef9ab4 2020 return 0;
b628bde2
SP
2021}
2022
6b7c5b94
SP
2023static irqreturn_t be_intx(int irq, void *dev)
2024{
e49cc34f
SP
2025 struct be_eq_obj *eqo = dev;
2026 struct be_adapter *adapter = eqo->adapter;
2027 int num_evts = 0;
6b7c5b94 2028
d0b9cec3
SP
2029 /* IRQ is not expected when NAPI is scheduled as the EQ
2030 * will not be armed.
2031 * But, this can happen on Lancer INTx where it takes
2032 * a while to de-assert INTx or in BE2 where occasionaly
2033 * an interrupt may be raised even when EQ is unarmed.
2034 * If NAPI is already scheduled, then counting & notifying
2035 * events will orphan them.
e49cc34f 2036 */
d0b9cec3 2037 if (napi_schedule_prep(&eqo->napi)) {
e49cc34f 2038 num_evts = events_get(eqo);
d0b9cec3
SP
2039 __napi_schedule(&eqo->napi);
2040 if (num_evts)
2041 eqo->spurious_intr = 0;
2042 }
2043 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
e49cc34f 2044
d0b9cec3
SP
2045 /* Return IRQ_HANDLED only for the the first spurious intr
2046 * after a valid intr to stop the kernel from branding
2047 * this irq as a bad one!
e49cc34f 2048 */
d0b9cec3
SP
2049 if (num_evts || eqo->spurious_intr++ == 0)
2050 return IRQ_HANDLED;
2051 else
2052 return IRQ_NONE;
6b7c5b94
SP
2053}
2054
10ef9ab4 2055static irqreturn_t be_msix(int irq, void *dev)
6b7c5b94 2056{
10ef9ab4 2057 struct be_eq_obj *eqo = dev;
6b7c5b94 2058
0b545a62
SP
2059 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2060 napi_schedule(&eqo->napi);
6b7c5b94
SP
2061 return IRQ_HANDLED;
2062}
2063
2e588f84 2064static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 2065{
2e588f84 2066 return (rxcp->tcpf && !rxcp->err) ? true : false;
6b7c5b94
SP
2067}
2068
10ef9ab4
SP
2069static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2070 int budget)
6b7c5b94 2071{
3abcdeda
SP
2072 struct be_adapter *adapter = rxo->adapter;
2073 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 2074 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
2075 u32 work_done;
2076
2077 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 2078 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
2079 if (!rxcp)
2080 break;
2081
12004ae9
SP
2082 /* Is it a flush compl that has no data */
2083 if (unlikely(rxcp->num_rcvd == 0))
2084 goto loop_continue;
2085
2086 /* Discard compl with partial DMA Lancer B0 */
2087 if (unlikely(!rxcp->pkt_size)) {
10ef9ab4 2088 be_rx_compl_discard(rxo, rxcp);
12004ae9
SP
2089 goto loop_continue;
2090 }
2091
2092 /* On BE drop pkts that arrive due to imperfect filtering in
2093 * promiscuous mode on some skews
2094 */
2095 if (unlikely(rxcp->port != adapter->port_num &&
2096 !lancer_chip(adapter))) {
10ef9ab4 2097 be_rx_compl_discard(rxo, rxcp);
12004ae9 2098 goto loop_continue;
64642811 2099 }
009dd872 2100
12004ae9 2101 if (do_gro(rxcp))
10ef9ab4 2102 be_rx_compl_process_gro(rxo, napi, rxcp);
12004ae9 2103 else
10ef9ab4 2104 be_rx_compl_process(rxo, rxcp);
12004ae9 2105loop_continue:
2e588f84 2106 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
2107 }
2108
10ef9ab4
SP
2109 if (work_done) {
2110 be_cq_notify(adapter, rx_cq->id, true, work_done);
9372cacb 2111
10ef9ab4
SP
2112 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2113 be_post_rx_frags(rxo, GFP_ATOMIC);
6b7c5b94 2114 }
10ef9ab4 2115
6b7c5b94
SP
2116 return work_done;
2117}
2118
10ef9ab4
SP
2119static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2120 int budget, int idx)
6b7c5b94 2121{
6b7c5b94 2122 struct be_eth_tx_compl *txcp;
10ef9ab4 2123 int num_wrbs = 0, work_done;
3c8def97 2124
10ef9ab4
SP
2125 for (work_done = 0; work_done < budget; work_done++) {
2126 txcp = be_tx_compl_get(&txo->cq);
2127 if (!txcp)
2128 break;
2129 num_wrbs += be_tx_compl_process(adapter, txo,
3c8def97
SP
2130 AMAP_GET_BITS(struct amap_eth_tx_compl,
2131 wrb_index, txcp));
10ef9ab4 2132 }
6b7c5b94 2133
10ef9ab4
SP
2134 if (work_done) {
2135 be_cq_notify(adapter, txo->cq.id, true, work_done);
2136 atomic_sub(num_wrbs, &txo->q.used);
3c8def97 2137
10ef9ab4
SP
2138 /* As Tx wrbs have been freed up, wake up netdev queue
2139 * if it was stopped due to lack of tx wrbs. */
2140 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2141 atomic_read(&txo->q.used) < txo->q.len / 2) {
2142 netif_wake_subqueue(adapter->netdev, idx);
3c8def97 2143 }
10ef9ab4
SP
2144
2145 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2146 tx_stats(txo)->tx_compl += work_done;
2147 u64_stats_update_end(&tx_stats(txo)->sync_compl);
6b7c5b94 2148 }
10ef9ab4
SP
2149 return (work_done < budget); /* Done */
2150}
6b7c5b94 2151
10ef9ab4
SP
2152int be_poll(struct napi_struct *napi, int budget)
2153{
2154 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2155 struct be_adapter *adapter = eqo->adapter;
0b545a62 2156 int max_work = 0, work, i, num_evts;
10ef9ab4 2157 bool tx_done;
f31e50a8 2158
0b545a62
SP
2159 num_evts = events_get(eqo);
2160
10ef9ab4
SP
2161 /* Process all TXQs serviced by this EQ */
2162 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2163 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2164 eqo->tx_budget, i);
2165 if (!tx_done)
2166 max_work = budget;
f31e50a8
SP
2167 }
2168
10ef9ab4
SP
2169 /* This loop will iterate twice for EQ0 in which
2170 * completions of the last RXQ (default one) are also processed
2171 * For other EQs the loop iterates only once
2172 */
2173 for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2174 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2175 max_work = max(work, max_work);
2176 }
6b7c5b94 2177
10ef9ab4
SP
2178 if (is_mcc_eqo(eqo))
2179 be_process_mcc(adapter);
93c86700 2180
10ef9ab4
SP
2181 if (max_work < budget) {
2182 napi_complete(napi);
0b545a62 2183 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
10ef9ab4
SP
2184 } else {
2185 /* As we'll continue in polling mode, count and clear events */
0b545a62 2186 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
93c86700 2187 }
10ef9ab4 2188 return max_work;
6b7c5b94
SP
2189}
2190
f67ef7ba 2191void be_detect_error(struct be_adapter *adapter)
7c185276 2192{
e1cfb67a
PR
2193 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2194 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
7c185276
AK
2195 u32 i;
2196
d23e946c 2197 if (be_hw_error(adapter))
72f02485
SP
2198 return;
2199
e1cfb67a
PR
2200 if (lancer_chip(adapter)) {
2201 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2202 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2203 sliport_err1 = ioread32(adapter->db +
2204 SLIPORT_ERROR1_OFFSET);
2205 sliport_err2 = ioread32(adapter->db +
2206 SLIPORT_ERROR2_OFFSET);
2207 }
2208 } else {
2209 pci_read_config_dword(adapter->pdev,
2210 PCICFG_UE_STATUS_LOW, &ue_lo);
2211 pci_read_config_dword(adapter->pdev,
2212 PCICFG_UE_STATUS_HIGH, &ue_hi);
2213 pci_read_config_dword(adapter->pdev,
2214 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2215 pci_read_config_dword(adapter->pdev,
2216 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2217
f67ef7ba
PR
2218 ue_lo = (ue_lo & ~ue_lo_mask);
2219 ue_hi = (ue_hi & ~ue_hi_mask);
e1cfb67a 2220 }
7c185276 2221
1451ae6e
AK
2222 /* On certain platforms BE hardware can indicate spurious UEs.
2223 * Allow the h/w to stop working completely in case of a real UE.
2224 * Hence not setting the hw_error for UE detection.
2225 */
2226 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
f67ef7ba 2227 adapter->hw_error = true;
434b3648 2228 dev_err(&adapter->pdev->dev,
f67ef7ba
PR
2229 "Error detected in the card\n");
2230 }
2231
2232 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2233 dev_err(&adapter->pdev->dev,
2234 "ERR: sliport status 0x%x\n", sliport_status);
2235 dev_err(&adapter->pdev->dev,
2236 "ERR: sliport error1 0x%x\n", sliport_err1);
2237 dev_err(&adapter->pdev->dev,
2238 "ERR: sliport error2 0x%x\n", sliport_err2);
d053de91
AK
2239 }
2240
e1cfb67a
PR
2241 if (ue_lo) {
2242 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2243 if (ue_lo & 1)
7c185276
AK
2244 dev_err(&adapter->pdev->dev,
2245 "UE: %s bit set\n", ue_status_low_desc[i]);
2246 }
2247 }
f67ef7ba 2248
e1cfb67a
PR
2249 if (ue_hi) {
2250 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2251 if (ue_hi & 1)
7c185276
AK
2252 dev_err(&adapter->pdev->dev,
2253 "UE: %s bit set\n", ue_status_hi_desc[i]);
2254 }
2255 }
2256
2257}
2258
8d56ff11
SP
2259static void be_msix_disable(struct be_adapter *adapter)
2260{
ac6a0c4a 2261 if (msix_enabled(adapter)) {
8d56ff11 2262 pci_disable_msix(adapter->pdev);
ac6a0c4a 2263 adapter->num_msix_vec = 0;
3abcdeda
SP
2264 }
2265}
2266
10ef9ab4
SP
2267static uint be_num_rss_want(struct be_adapter *adapter)
2268{
30e80b55 2269 u32 num = 0;
abb93951 2270
10ef9ab4 2271 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
abb93951
PR
2272 (lancer_chip(adapter) ||
2273 (!sriov_want(adapter) && be_physfn(adapter)))) {
2274 num = adapter->max_rss_queues;
30e80b55
YM
2275 num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
2276 }
2277 return num;
10ef9ab4
SP
2278}
2279
6b7c5b94
SP
2280static void be_msix_enable(struct be_adapter *adapter)
2281{
10ef9ab4 2282#define BE_MIN_MSIX_VECTORS 1
045508a8 2283 int i, status, num_vec, num_roce_vec = 0;
d379142b 2284 struct device *dev = &adapter->pdev->dev;
6b7c5b94 2285
10ef9ab4
SP
2286 /* If RSS queues are not used, need a vec for default RX Q */
2287 num_vec = min(be_num_rss_want(adapter), num_online_cpus());
045508a8
PP
2288 if (be_roce_supported(adapter)) {
2289 num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
2290 (num_online_cpus() + 1));
2291 num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
2292 num_vec += num_roce_vec;
2293 num_vec = min(num_vec, MAX_MSIX_VECTORS);
2294 }
10ef9ab4 2295 num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
3abcdeda 2296
ac6a0c4a 2297 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
2298 adapter->msix_entries[i].entry = i;
2299
ac6a0c4a 2300 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
3abcdeda
SP
2301 if (status == 0) {
2302 goto done;
2303 } else if (status >= BE_MIN_MSIX_VECTORS) {
ac6a0c4a 2304 num_vec = status;
3abcdeda 2305 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
ac6a0c4a 2306 num_vec) == 0)
3abcdeda 2307 goto done;
3abcdeda 2308 }
d379142b
SP
2309
2310 dev_warn(dev, "MSIx enable failed\n");
3abcdeda
SP
2311 return;
2312done:
045508a8
PP
2313 if (be_roce_supported(adapter)) {
2314 if (num_vec > num_roce_vec) {
2315 adapter->num_msix_vec = num_vec - num_roce_vec;
2316 adapter->num_msix_roce_vec =
2317 num_vec - adapter->num_msix_vec;
2318 } else {
2319 adapter->num_msix_vec = num_vec;
2320 adapter->num_msix_roce_vec = 0;
2321 }
2322 } else
2323 adapter->num_msix_vec = num_vec;
d379142b 2324 dev_info(dev, "enabled %d MSI-x vector(s)\n", adapter->num_msix_vec);
ac6a0c4a 2325 return;
6b7c5b94
SP
2326}
2327
fe6d2a38 2328static inline int be_msix_vec_get(struct be_adapter *adapter,
10ef9ab4 2329 struct be_eq_obj *eqo)
b628bde2 2330{
10ef9ab4 2331 return adapter->msix_entries[eqo->idx].vector;
b628bde2 2332}
6b7c5b94 2333
b628bde2
SP
2334static int be_msix_register(struct be_adapter *adapter)
2335{
10ef9ab4
SP
2336 struct net_device *netdev = adapter->netdev;
2337 struct be_eq_obj *eqo;
2338 int status, i, vec;
6b7c5b94 2339
10ef9ab4
SP
2340 for_all_evt_queues(adapter, eqo, i) {
2341 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2342 vec = be_msix_vec_get(adapter, eqo);
2343 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
3abcdeda
SP
2344 if (status)
2345 goto err_msix;
2346 }
b628bde2 2347
6b7c5b94 2348 return 0;
3abcdeda 2349err_msix:
10ef9ab4
SP
2350 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2351 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2352 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2353 status);
ac6a0c4a 2354 be_msix_disable(adapter);
6b7c5b94
SP
2355 return status;
2356}
2357
2358static int be_irq_register(struct be_adapter *adapter)
2359{
2360 struct net_device *netdev = adapter->netdev;
2361 int status;
2362
ac6a0c4a 2363 if (msix_enabled(adapter)) {
6b7c5b94
SP
2364 status = be_msix_register(adapter);
2365 if (status == 0)
2366 goto done;
ba343c77
SB
2367 /* INTx is not supported for VF */
2368 if (!be_physfn(adapter))
2369 return status;
6b7c5b94
SP
2370 }
2371
e49cc34f 2372 /* INTx: only the first EQ is used */
6b7c5b94
SP
2373 netdev->irq = adapter->pdev->irq;
2374 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
e49cc34f 2375 &adapter->eq_obj[0]);
6b7c5b94
SP
2376 if (status) {
2377 dev_err(&adapter->pdev->dev,
2378 "INTx request IRQ failed - err %d\n", status);
2379 return status;
2380 }
2381done:
2382 adapter->isr_registered = true;
2383 return 0;
2384}
2385
2386static void be_irq_unregister(struct be_adapter *adapter)
2387{
2388 struct net_device *netdev = adapter->netdev;
10ef9ab4 2389 struct be_eq_obj *eqo;
3abcdeda 2390 int i;
6b7c5b94
SP
2391
2392 if (!adapter->isr_registered)
2393 return;
2394
2395 /* INTx */
ac6a0c4a 2396 if (!msix_enabled(adapter)) {
e49cc34f 2397 free_irq(netdev->irq, &adapter->eq_obj[0]);
6b7c5b94
SP
2398 goto done;
2399 }
2400
2401 /* MSIx */
10ef9ab4
SP
2402 for_all_evt_queues(adapter, eqo, i)
2403 free_irq(be_msix_vec_get(adapter, eqo), eqo);
3abcdeda 2404
6b7c5b94
SP
2405done:
2406 adapter->isr_registered = false;
6b7c5b94
SP
2407}
2408
10ef9ab4 2409static void be_rx_qs_destroy(struct be_adapter *adapter)
482c9e79
SP
2410{
2411 struct be_queue_info *q;
2412 struct be_rx_obj *rxo;
2413 int i;
2414
2415 for_all_rx_queues(adapter, rxo, i) {
2416 q = &rxo->q;
2417 if (q->created) {
2418 be_cmd_rxq_destroy(adapter, q);
2419 /* After the rxq is invalidated, wait for a grace time
2420 * of 1ms for all dma to end and the flush compl to
2421 * arrive
2422 */
2423 mdelay(1);
10ef9ab4 2424 be_rx_cq_clean(rxo);
482c9e79 2425 }
10ef9ab4 2426 be_queue_free(adapter, q);
482c9e79
SP
2427 }
2428}
2429
889cd4b2
SP
2430static int be_close(struct net_device *netdev)
2431{
2432 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4
SP
2433 struct be_eq_obj *eqo;
2434 int i;
889cd4b2 2435
045508a8
PP
2436 be_roce_dev_close(adapter);
2437
fe6d2a38
SP
2438 if (!lancer_chip(adapter))
2439 be_intr_set(adapter, false);
889cd4b2 2440
a323d9bf 2441 for_all_evt_queues(adapter, eqo, i)
10ef9ab4 2442 napi_disable(&eqo->napi);
a323d9bf
SP
2443
2444 be_async_mcc_disable(adapter);
2445
2446 /* Wait for all pending tx completions to arrive so that
2447 * all tx skbs are freed.
2448 */
2449 be_tx_compl_clean(adapter);
2450
2451 be_rx_qs_destroy(adapter);
2452
2453 for_all_evt_queues(adapter, eqo, i) {
10ef9ab4
SP
2454 if (msix_enabled(adapter))
2455 synchronize_irq(be_msix_vec_get(adapter, eqo));
2456 else
2457 synchronize_irq(netdev->irq);
2458 be_eq_clean(eqo);
63fcb27f
PR
2459 }
2460
889cd4b2
SP
2461 be_irq_unregister(adapter);
2462
482c9e79
SP
2463 return 0;
2464}
2465
10ef9ab4 2466static int be_rx_qs_create(struct be_adapter *adapter)
482c9e79
SP
2467{
2468 struct be_rx_obj *rxo;
e9008ee9
PR
2469 int rc, i, j;
2470 u8 rsstable[128];
482c9e79
SP
2471
2472 for_all_rx_queues(adapter, rxo, i) {
10ef9ab4
SP
2473 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2474 sizeof(struct be_eth_rx_d));
2475 if (rc)
2476 return rc;
2477 }
2478
2479 /* The FW would like the default RXQ to be created first */
2480 rxo = default_rxo(adapter);
2481 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2482 adapter->if_handle, false, &rxo->rss_id);
2483 if (rc)
2484 return rc;
2485
2486 for_all_rss_queues(adapter, rxo, i) {
482c9e79 2487 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
10ef9ab4
SP
2488 rx_frag_size, adapter->if_handle,
2489 true, &rxo->rss_id);
482c9e79
SP
2490 if (rc)
2491 return rc;
2492 }
2493
2494 if (be_multi_rxq(adapter)) {
e9008ee9
PR
2495 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2496 for_all_rss_queues(adapter, rxo, i) {
2497 if ((j + i) >= 128)
2498 break;
2499 rsstable[j + i] = rxo->rss_id;
2500 }
2501 }
2502 rc = be_cmd_rss_config(adapter, rsstable, 128);
482c9e79
SP
2503 if (rc)
2504 return rc;
2505 }
2506
2507 /* First time posting */
10ef9ab4 2508 for_all_rx_queues(adapter, rxo, i)
482c9e79 2509 be_post_rx_frags(rxo, GFP_KERNEL);
889cd4b2
SP
2510 return 0;
2511}
2512
6b7c5b94
SP
2513static int be_open(struct net_device *netdev)
2514{
2515 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 2516 struct be_eq_obj *eqo;
3abcdeda 2517 struct be_rx_obj *rxo;
10ef9ab4 2518 struct be_tx_obj *txo;
b236916a 2519 u8 link_status;
3abcdeda 2520 int status, i;
5fb379ee 2521
10ef9ab4 2522 status = be_rx_qs_create(adapter);
482c9e79
SP
2523 if (status)
2524 goto err;
2525
5fb379ee
SP
2526 be_irq_register(adapter);
2527
fe6d2a38
SP
2528 if (!lancer_chip(adapter))
2529 be_intr_set(adapter, true);
5fb379ee 2530
10ef9ab4 2531 for_all_rx_queues(adapter, rxo, i)
3abcdeda 2532 be_cq_notify(adapter, rxo->cq.id, true, 0);
5fb379ee 2533
10ef9ab4
SP
2534 for_all_tx_queues(adapter, txo, i)
2535 be_cq_notify(adapter, txo->cq.id, true, 0);
2536
7a1e9b20
SP
2537 be_async_mcc_enable(adapter);
2538
10ef9ab4
SP
2539 for_all_evt_queues(adapter, eqo, i) {
2540 napi_enable(&eqo->napi);
2541 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2542 }
2543
323ff71e 2544 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
b236916a
AK
2545 if (!status)
2546 be_link_status_update(adapter, link_status);
2547
045508a8 2548 be_roce_dev_open(adapter);
889cd4b2
SP
2549 return 0;
2550err:
2551 be_close(adapter->netdev);
2552 return -EIO;
5fb379ee
SP
2553}
2554
71d8d1b5
AK
2555static int be_setup_wol(struct be_adapter *adapter, bool enable)
2556{
2557 struct be_dma_mem cmd;
2558 int status = 0;
2559 u8 mac[ETH_ALEN];
2560
2561 memset(mac, 0, ETH_ALEN);
2562
2563 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2b7bcebf
IV
2564 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2565 GFP_KERNEL);
71d8d1b5
AK
2566 if (cmd.va == NULL)
2567 return -1;
2568 memset(cmd.va, 0, cmd.size);
2569
2570 if (enable) {
2571 status = pci_write_config_dword(adapter->pdev,
2572 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2573 if (status) {
2574 dev_err(&adapter->pdev->dev,
2381a55c 2575 "Could not enable Wake-on-lan\n");
2b7bcebf
IV
2576 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2577 cmd.dma);
71d8d1b5
AK
2578 return status;
2579 }
2580 status = be_cmd_enable_magic_wol(adapter,
2581 adapter->netdev->dev_addr, &cmd);
2582 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2583 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2584 } else {
2585 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2586 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2587 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2588 }
2589
2b7bcebf 2590 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
71d8d1b5
AK
2591 return status;
2592}
2593
6d87f5c3
AK
2594/*
2595 * Generate a seed MAC address from the PF MAC Address using jhash.
2596 * MAC Address for VFs are assigned incrementally starting from the seed.
2597 * These addresses are programmed in the ASIC by the PF and the VF driver
2598 * queries for the MAC address during its probe.
2599 */
2600static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2601{
f9449ab7 2602 u32 vf;
3abcdeda 2603 int status = 0;
6d87f5c3 2604 u8 mac[ETH_ALEN];
11ac75ed 2605 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
2606
2607 be_vf_eth_addr_generate(adapter, mac);
2608
11ac75ed 2609 for_all_vfs(adapter, vf_cfg, vf) {
590c391d
PR
2610 if (lancer_chip(adapter)) {
2611 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2612 } else {
2613 status = be_cmd_pmac_add(adapter, mac,
11ac75ed
SP
2614 vf_cfg->if_handle,
2615 &vf_cfg->pmac_id, vf + 1);
590c391d
PR
2616 }
2617
6d87f5c3
AK
2618 if (status)
2619 dev_err(&adapter->pdev->dev,
590c391d 2620 "Mac address assignment failed for VF %d\n", vf);
6d87f5c3 2621 else
11ac75ed 2622 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
6d87f5c3
AK
2623
2624 mac[5] += 1;
2625 }
2626 return status;
2627}
2628
f9449ab7 2629static void be_vf_clear(struct be_adapter *adapter)
6d87f5c3 2630{
11ac75ed 2631 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
2632 u32 vf;
2633
39f1d94d
SP
2634 if (be_find_vfs(adapter, ASSIGNED)) {
2635 dev_warn(&adapter->pdev->dev, "VFs are assigned to VMs\n");
2636 goto done;
2637 }
2638
11ac75ed 2639 for_all_vfs(adapter, vf_cfg, vf) {
590c391d
PR
2640 if (lancer_chip(adapter))
2641 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2642 else
11ac75ed
SP
2643 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2644 vf_cfg->pmac_id, vf + 1);
f9449ab7 2645
11ac75ed
SP
2646 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2647 }
39f1d94d
SP
2648 pci_disable_sriov(adapter->pdev);
2649done:
2650 kfree(adapter->vf_cfg);
2651 adapter->num_vfs = 0;
6d87f5c3
AK
2652}
2653
a54769f5
SP
2654static int be_clear(struct be_adapter *adapter)
2655{
fbc13f01
AK
2656 int i = 1;
2657
191eb756
SP
2658 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2659 cancel_delayed_work_sync(&adapter->work);
2660 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2661 }
2662
11ac75ed 2663 if (sriov_enabled(adapter))
f9449ab7
SP
2664 be_vf_clear(adapter);
2665
fbc13f01
AK
2666 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2667 be_cmd_pmac_del(adapter, adapter->if_handle,
2668 adapter->pmac_id[i], 0);
2669
f9449ab7 2670 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
a54769f5
SP
2671
2672 be_mcc_queues_destroy(adapter);
10ef9ab4 2673 be_rx_cqs_destroy(adapter);
a54769f5 2674 be_tx_queues_destroy(adapter);
10ef9ab4 2675 be_evt_queues_destroy(adapter);
a54769f5 2676
abb93951
PR
2677 kfree(adapter->pmac_id);
2678 adapter->pmac_id = NULL;
2679
10ef9ab4 2680 be_msix_disable(adapter);
a54769f5
SP
2681 return 0;
2682}
2683
abb93951
PR
2684static void be_get_vf_if_cap_flags(struct be_adapter *adapter,
2685 u32 *cap_flags, u8 domain)
2686{
2687 bool profile_present = false;
2688 int status;
2689
2690 if (lancer_chip(adapter)) {
2691 status = be_cmd_get_profile_config(adapter, cap_flags, domain);
2692 if (!status)
2693 profile_present = true;
2694 }
2695
2696 if (!profile_present)
2697 *cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2698 BE_IF_FLAGS_MULTICAST;
2699}
2700
39f1d94d 2701static int be_vf_setup_init(struct be_adapter *adapter)
30128031 2702{
11ac75ed 2703 struct be_vf_cfg *vf_cfg;
30128031
SP
2704 int vf;
2705
39f1d94d
SP
2706 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2707 GFP_KERNEL);
2708 if (!adapter->vf_cfg)
2709 return -ENOMEM;
2710
11ac75ed
SP
2711 for_all_vfs(adapter, vf_cfg, vf) {
2712 vf_cfg->if_handle = -1;
2713 vf_cfg->pmac_id = -1;
30128031 2714 }
39f1d94d 2715 return 0;
30128031
SP
2716}
2717
f9449ab7
SP
2718static int be_vf_setup(struct be_adapter *adapter)
2719{
11ac75ed 2720 struct be_vf_cfg *vf_cfg;
39f1d94d 2721 struct device *dev = &adapter->pdev->dev;
f9449ab7 2722 u32 cap_flags, en_flags, vf;
f1f3ee1b 2723 u16 def_vlan, lnk_speed;
39f1d94d
SP
2724 int status, enabled_vfs;
2725
2726 enabled_vfs = be_find_vfs(adapter, ENABLED);
2727 if (enabled_vfs) {
2728 dev_warn(dev, "%d VFs are already enabled\n", enabled_vfs);
2729 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2730 return 0;
2731 }
f9449ab7 2732
39f1d94d
SP
2733 if (num_vfs > adapter->dev_num_vfs) {
2734 dev_warn(dev, "Device supports %d VFs and not %d\n",
2735 adapter->dev_num_vfs, num_vfs);
2736 num_vfs = adapter->dev_num_vfs;
2737 }
2738
2739 status = pci_enable_sriov(adapter->pdev, num_vfs);
2740 if (!status) {
2741 adapter->num_vfs = num_vfs;
2742 } else {
2743 /* Platform doesn't support SRIOV though device supports it */
2744 dev_warn(dev, "SRIOV enable failed\n");
2745 return 0;
2746 }
2747
2748 status = be_vf_setup_init(adapter);
2749 if (status)
2750 goto err;
30128031 2751
11ac75ed 2752 for_all_vfs(adapter, vf_cfg, vf) {
abb93951
PR
2753 be_get_vf_if_cap_flags(adapter, &cap_flags, vf + 1);
2754
2755 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2756 BE_IF_FLAGS_BROADCAST |
2757 BE_IF_FLAGS_MULTICAST);
2758
1578e777
PR
2759 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2760 &vf_cfg->if_handle, vf + 1);
f9449ab7
SP
2761 if (status)
2762 goto err;
f9449ab7
SP
2763 }
2764
39f1d94d
SP
2765 if (!enabled_vfs) {
2766 status = be_vf_eth_addr_config(adapter);
2767 if (status)
2768 goto err;
2769 }
f9449ab7 2770
11ac75ed 2771 for_all_vfs(adapter, vf_cfg, vf) {
8a046d3b
VV
2772 lnk_speed = 1000;
2773 status = be_cmd_set_qos(adapter, lnk_speed, vf + 1);
f9449ab7
SP
2774 if (status)
2775 goto err;
11ac75ed 2776 vf_cfg->tx_rate = lnk_speed * 10;
f1f3ee1b
AK
2777
2778 status = be_cmd_get_hsw_config(adapter, &def_vlan,
2779 vf + 1, vf_cfg->if_handle);
2780 if (status)
2781 goto err;
2782 vf_cfg->def_vid = def_vlan;
dcf7ebba
PR
2783
2784 be_cmd_enable_vf(adapter, vf + 1);
f9449ab7
SP
2785 }
2786 return 0;
2787err:
2788 return status;
2789}
2790
30128031
SP
2791static void be_setup_init(struct be_adapter *adapter)
2792{
2793 adapter->vlan_prio_bmap = 0xff;
42f11cf2 2794 adapter->phy.link_speed = -1;
30128031
SP
2795 adapter->if_handle = -1;
2796 adapter->be3_native = false;
2797 adapter->promiscuous = false;
f25b119c
PR
2798 if (be_physfn(adapter))
2799 adapter->cmd_privileges = MAX_PRIVILEGES;
2800 else
2801 adapter->cmd_privileges = MIN_PRIVILEGES;
30128031
SP
2802}
2803
1578e777
PR
2804static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
2805 bool *active_mac, u32 *pmac_id)
590c391d 2806{
1578e777 2807 int status = 0;
e5e1ee89 2808
1578e777
PR
2809 if (!is_zero_ether_addr(adapter->netdev->perm_addr)) {
2810 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
2811 if (!lancer_chip(adapter) && !be_physfn(adapter))
2812 *active_mac = true;
2813 else
2814 *active_mac = false;
e5e1ee89 2815
1578e777
PR
2816 return status;
2817 }
e5e1ee89 2818
1578e777
PR
2819 if (lancer_chip(adapter)) {
2820 status = be_cmd_get_mac_from_list(adapter, mac,
2821 active_mac, pmac_id, 0);
2822 if (*active_mac) {
5ee4979b
SP
2823 status = be_cmd_mac_addr_query(adapter, mac, false,
2824 if_handle, *pmac_id);
1578e777
PR
2825 }
2826 } else if (be_physfn(adapter)) {
2827 /* For BE3, for PF get permanent MAC */
5ee4979b 2828 status = be_cmd_mac_addr_query(adapter, mac, true, 0, 0);
1578e777 2829 *active_mac = false;
e5e1ee89 2830 } else {
1578e777 2831 /* For BE3, for VF get soft MAC assigned by PF*/
5ee4979b 2832 status = be_cmd_mac_addr_query(adapter, mac, false,
1578e777
PR
2833 if_handle, 0);
2834 *active_mac = true;
e5e1ee89 2835 }
590c391d
PR
2836 return status;
2837}
2838
abb93951
PR
2839static void be_get_resources(struct be_adapter *adapter)
2840{
2841 int status;
2842 bool profile_present = false;
2843
2844 if (lancer_chip(adapter)) {
2845 status = be_cmd_get_func_config(adapter);
2846
2847 if (!status)
2848 profile_present = true;
2849 }
2850
2851 if (profile_present) {
2852 /* Sanity fixes for Lancer */
2853 adapter->max_pmac_cnt = min_t(u16, adapter->max_pmac_cnt,
2854 BE_UC_PMAC_COUNT);
2855 adapter->max_vlans = min_t(u16, adapter->max_vlans,
2856 BE_NUM_VLANS_SUPPORTED);
2857 adapter->max_mcast_mac = min_t(u16, adapter->max_mcast_mac,
2858 BE_MAX_MC);
2859 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
2860 MAX_TX_QS);
2861 adapter->max_rss_queues = min_t(u16, adapter->max_rss_queues,
2862 BE3_MAX_RSS_QS);
2863 adapter->max_event_queues = min_t(u16,
2864 adapter->max_event_queues,
2865 BE3_MAX_RSS_QS);
2866
2867 if (adapter->max_rss_queues &&
2868 adapter->max_rss_queues == adapter->max_rx_queues)
2869 adapter->max_rss_queues -= 1;
2870
2871 if (adapter->max_event_queues < adapter->max_rss_queues)
2872 adapter->max_rss_queues = adapter->max_event_queues;
2873
2874 } else {
2875 if (be_physfn(adapter))
2876 adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
2877 else
2878 adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
2879
2880 if (adapter->function_mode & FLEX10_MODE)
2881 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
2882 else
2883 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2884
2885 adapter->max_mcast_mac = BE_MAX_MC;
2886 adapter->max_tx_queues = MAX_TX_QS;
2887 adapter->max_rss_queues = (adapter->be3_native) ?
2888 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2889 adapter->max_event_queues = BE3_MAX_RSS_QS;
2890
2891 adapter->if_cap_flags = BE_IF_FLAGS_UNTAGGED |
2892 BE_IF_FLAGS_BROADCAST |
2893 BE_IF_FLAGS_MULTICAST |
2894 BE_IF_FLAGS_PASS_L3L4_ERRORS |
2895 BE_IF_FLAGS_MCAST_PROMISCUOUS |
2896 BE_IF_FLAGS_VLAN_PROMISCUOUS |
2897 BE_IF_FLAGS_PROMISCUOUS;
2898
2899 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
2900 adapter->if_cap_flags |= BE_IF_FLAGS_RSS;
2901 }
2902}
2903
39f1d94d
SP
2904/* Routine to query per function resource limits */
2905static int be_get_config(struct be_adapter *adapter)
2906{
abb93951 2907 int pos, status;
39f1d94d
SP
2908 u16 dev_num_vfs;
2909
abb93951
PR
2910 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
2911 &adapter->function_mode,
2912 &adapter->function_caps);
2913 if (status)
2914 goto err;
2915
2916 be_get_resources(adapter);
2917
2918 /* primary mac needs 1 pmac entry */
2919 adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
2920 sizeof(u32), GFP_KERNEL);
2921 if (!adapter->pmac_id) {
2922 status = -ENOMEM;
2923 goto err;
2924 }
2925
39f1d94d
SP
2926 pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
2927 if (pos) {
2928 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
2929 &dev_num_vfs);
7c5a5242
VV
2930 if (!lancer_chip(adapter))
2931 dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
39f1d94d
SP
2932 adapter->dev_num_vfs = dev_num_vfs;
2933 }
abb93951
PR
2934err:
2935 return status;
39f1d94d
SP
2936}
2937
5fb379ee
SP
2938static int be_setup(struct be_adapter *adapter)
2939{
39f1d94d 2940 struct device *dev = &adapter->pdev->dev;
abb93951 2941 u32 en_flags;
a54769f5 2942 u32 tx_fc, rx_fc;
10ef9ab4 2943 int status;
ba343c77 2944 u8 mac[ETH_ALEN];
1578e777 2945 bool active_mac;
ba343c77 2946
30128031 2947 be_setup_init(adapter);
6b7c5b94 2948
abb93951
PR
2949 if (!lancer_chip(adapter))
2950 be_cmd_req_native_mode(adapter);
39f1d94d 2951
abb93951
PR
2952 status = be_get_config(adapter);
2953 if (status)
2954 goto err;
73d540f2 2955
10ef9ab4
SP
2956 be_msix_enable(adapter);
2957
2958 status = be_evt_queues_create(adapter);
2959 if (status)
a54769f5 2960 goto err;
6b7c5b94 2961
10ef9ab4
SP
2962 status = be_tx_cqs_create(adapter);
2963 if (status)
2964 goto err;
2965
2966 status = be_rx_cqs_create(adapter);
2967 if (status)
a54769f5 2968 goto err;
6b7c5b94 2969
f9449ab7 2970 status = be_mcc_queues_create(adapter);
10ef9ab4 2971 if (status)
a54769f5 2972 goto err;
6b7c5b94 2973
f25b119c
PR
2974 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
2975 /* In UMC mode FW does not return right privileges.
2976 * Override with correct privilege equivalent to PF.
2977 */
2978 if (be_is_mc(adapter))
2979 adapter->cmd_privileges = MAX_PRIVILEGES;
2980
f9449ab7
SP
2981 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2982 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
5d5adb93 2983
abb93951 2984 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
f9449ab7 2985 en_flags |= BE_IF_FLAGS_RSS;
1578e777 2986
abb93951 2987 en_flags = en_flags & adapter->if_cap_flags;
0b13fb45 2988
abb93951 2989 status = be_cmd_if_create(adapter, adapter->if_cap_flags, en_flags,
1578e777 2990 &adapter->if_handle, 0);
5fb379ee 2991 if (status != 0)
a54769f5 2992 goto err;
6b7c5b94 2993
1578e777
PR
2994 memset(mac, 0, ETH_ALEN);
2995 active_mac = false;
2996 status = be_get_mac_addr(adapter, mac, adapter->if_handle,
2997 &active_mac, &adapter->pmac_id[0]);
2998 if (status != 0)
2999 goto err;
3000
3001 if (!active_mac) {
3002 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3003 &adapter->pmac_id[0], 0);
3004 if (status != 0)
3005 goto err;
3006 }
3007
3008 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3009 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3010 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
f9449ab7 3011 }
0dffc83e 3012
10ef9ab4
SP
3013 status = be_tx_qs_create(adapter);
3014 if (status)
3015 goto err;
3016
04b71175 3017 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
5a56eb10 3018
1d1e9a46 3019 if (adapter->vlans_added)
10329df8 3020 be_vid_config(adapter);
7ab8b0b4 3021
a54769f5 3022 be_set_rx_mode(adapter->netdev);
5fb379ee 3023
ddc3f5cb 3024 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
590c391d 3025
ddc3f5cb
AK
3026 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3027 be_cmd_set_flow_control(adapter, adapter->tx_fc,
a54769f5 3028 adapter->rx_fc);
2dc1deb6 3029
39f1d94d
SP
3030 if (be_physfn(adapter) && num_vfs) {
3031 if (adapter->dev_num_vfs)
3032 be_vf_setup(adapter);
3033 else
3034 dev_warn(dev, "device doesn't support SRIOV\n");
f9449ab7
SP
3035 }
3036
f25b119c
PR
3037 status = be_cmd_get_phy_info(adapter);
3038 if (!status && be_pause_supported(adapter))
42f11cf2
AK
3039 adapter->phy.fc_autoneg = 1;
3040
191eb756
SP
3041 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3042 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
f9449ab7 3043 return 0;
a54769f5
SP
3044err:
3045 be_clear(adapter);
3046 return status;
3047}
6b7c5b94 3048
66268739
IV
3049#ifdef CONFIG_NET_POLL_CONTROLLER
3050static void be_netpoll(struct net_device *netdev)
3051{
3052 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 3053 struct be_eq_obj *eqo;
66268739
IV
3054 int i;
3055
e49cc34f
SP
3056 for_all_evt_queues(adapter, eqo, i) {
3057 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3058 napi_schedule(&eqo->napi);
3059 }
10ef9ab4
SP
3060
3061 return;
66268739
IV
3062}
3063#endif
3064
84517482 3065#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
c165541e
PR
3066char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
3067
fa9a6fed 3068static bool be_flash_redboot(struct be_adapter *adapter,
3f0d4560
AK
3069 const u8 *p, u32 img_start, int image_size,
3070 int hdr_size)
fa9a6fed
SB
3071{
3072 u32 crc_offset;
3073 u8 flashed_crc[4];
3074 int status;
3f0d4560
AK
3075
3076 crc_offset = hdr_size + img_start + image_size - 4;
3077
fa9a6fed 3078 p += crc_offset;
3f0d4560
AK
3079
3080 status = be_cmd_get_flash_crc(adapter, flashed_crc,
f510fc64 3081 (image_size - 4));
fa9a6fed
SB
3082 if (status) {
3083 dev_err(&adapter->pdev->dev,
3084 "could not get crc from flash, not flashing redboot\n");
3085 return false;
3086 }
3087
3088 /*update redboot only if crc does not match*/
3089 if (!memcmp(flashed_crc, p, 4))
3090 return false;
3091 else
3092 return true;
fa9a6fed
SB
3093}
3094
306f1348
SP
3095static bool phy_flashing_required(struct be_adapter *adapter)
3096{
42f11cf2
AK
3097 return (adapter->phy.phy_type == TN_8022 &&
3098 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
306f1348
SP
3099}
3100
c165541e
PR
3101static bool is_comp_in_ufi(struct be_adapter *adapter,
3102 struct flash_section_info *fsec, int type)
3103{
3104 int i = 0, img_type = 0;
3105 struct flash_section_info_g2 *fsec_g2 = NULL;
3106
ca34fe38 3107 if (BE2_chip(adapter))
c165541e
PR
3108 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3109
3110 for (i = 0; i < MAX_FLASH_COMP; i++) {
3111 if (fsec_g2)
3112 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3113 else
3114 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3115
3116 if (img_type == type)
3117 return true;
3118 }
3119 return false;
3120
3121}
3122
3123struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
3124 int header_size,
3125 const struct firmware *fw)
3126{
3127 struct flash_section_info *fsec = NULL;
3128 const u8 *p = fw->data;
3129
3130 p += header_size;
3131 while (p < (fw->data + fw->size)) {
3132 fsec = (struct flash_section_info *)p;
3133 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3134 return fsec;
3135 p += 32;
3136 }
3137 return NULL;
3138}
3139
773a2d7c
PR
3140static int be_flash(struct be_adapter *adapter, const u8 *img,
3141 struct be_dma_mem *flash_cmd, int optype, int img_size)
3142{
3143 u32 total_bytes = 0, flash_op, num_bytes = 0;
3144 int status = 0;
3145 struct be_cmd_write_flashrom *req = flash_cmd->va;
3146
3147 total_bytes = img_size;
3148 while (total_bytes) {
3149 num_bytes = min_t(u32, 32*1024, total_bytes);
3150
3151 total_bytes -= num_bytes;
3152
3153 if (!total_bytes) {
3154 if (optype == OPTYPE_PHY_FW)
3155 flash_op = FLASHROM_OPER_PHY_FLASH;
3156 else
3157 flash_op = FLASHROM_OPER_FLASH;
3158 } else {
3159 if (optype == OPTYPE_PHY_FW)
3160 flash_op = FLASHROM_OPER_PHY_SAVE;
3161 else
3162 flash_op = FLASHROM_OPER_SAVE;
3163 }
3164
be716446 3165 memcpy(req->data_buf, img, num_bytes);
773a2d7c
PR
3166 img += num_bytes;
3167 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3168 flash_op, num_bytes);
3169 if (status) {
3170 if (status == ILLEGAL_IOCTL_REQ &&
3171 optype == OPTYPE_PHY_FW)
3172 break;
3173 dev_err(&adapter->pdev->dev,
3174 "cmd to write to flash rom failed.\n");
3175 return status;
3176 }
3177 }
3178 return 0;
3179}
3180
ca34fe38
SP
3181/* For BE2 and BE3 */
3182static int be_flash_BEx(struct be_adapter *adapter,
c165541e
PR
3183 const struct firmware *fw,
3184 struct be_dma_mem *flash_cmd,
3185 int num_of_images)
3f0d4560 3186
84517482 3187{
3f0d4560 3188 int status = 0, i, filehdr_size = 0;
c165541e 3189 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
84517482 3190 const u8 *p = fw->data;
215faf9c 3191 const struct flash_comp *pflashcomp;
773a2d7c 3192 int num_comp, redboot;
c165541e
PR
3193 struct flash_section_info *fsec = NULL;
3194
3195 struct flash_comp gen3_flash_types[] = {
3196 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3197 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3198 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3199 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3200 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3201 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3202 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3203 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3204 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3205 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3206 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3207 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3208 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3209 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3210 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3211 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3212 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3213 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3214 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3215 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3f0d4560 3216 };
c165541e
PR
3217
3218 struct flash_comp gen2_flash_types[] = {
3219 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3220 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3221 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3222 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3223 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3224 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3225 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3226 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3227 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3228 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3229 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3230 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3231 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3232 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3233 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3234 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3f0d4560
AK
3235 };
3236
ca34fe38 3237 if (BE3_chip(adapter)) {
3f0d4560
AK
3238 pflashcomp = gen3_flash_types;
3239 filehdr_size = sizeof(struct flash_file_hdr_g3);
215faf9c 3240 num_comp = ARRAY_SIZE(gen3_flash_types);
3f0d4560
AK
3241 } else {
3242 pflashcomp = gen2_flash_types;
3243 filehdr_size = sizeof(struct flash_file_hdr_g2);
215faf9c 3244 num_comp = ARRAY_SIZE(gen2_flash_types);
84517482 3245 }
ca34fe38 3246
c165541e
PR
3247 /* Get flash section info*/
3248 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3249 if (!fsec) {
3250 dev_err(&adapter->pdev->dev,
3251 "Invalid Cookie. UFI corrupted ?\n");
3252 return -1;
3253 }
9fe96934 3254 for (i = 0; i < num_comp; i++) {
c165541e 3255 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
9fe96934 3256 continue;
c165541e
PR
3257
3258 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3259 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3260 continue;
3261
773a2d7c
PR
3262 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3263 !phy_flashing_required(adapter))
306f1348 3264 continue;
c165541e 3265
773a2d7c
PR
3266 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3267 redboot = be_flash_redboot(adapter, fw->data,
3268 pflashcomp[i].offset, pflashcomp[i].size,
3269 filehdr_size + img_hdrs_size);
3270 if (!redboot)
3271 continue;
3272 }
c165541e 3273
3f0d4560 3274 p = fw->data;
c165541e 3275 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
306f1348
SP
3276 if (p + pflashcomp[i].size > fw->data + fw->size)
3277 return -1;
773a2d7c
PR
3278
3279 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3280 pflashcomp[i].size);
3281 if (status) {
3282 dev_err(&adapter->pdev->dev,
3283 "Flashing section type %d failed.\n",
3284 pflashcomp[i].img_type);
3285 return status;
84517482 3286 }
84517482 3287 }
84517482
AK
3288 return 0;
3289}
3290
773a2d7c
PR
3291static int be_flash_skyhawk(struct be_adapter *adapter,
3292 const struct firmware *fw,
3293 struct be_dma_mem *flash_cmd, int num_of_images)
3f0d4560 3294{
773a2d7c
PR
3295 int status = 0, i, filehdr_size = 0;
3296 int img_offset, img_size, img_optype, redboot;
3297 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3298 const u8 *p = fw->data;
3299 struct flash_section_info *fsec = NULL;
3300
3301 filehdr_size = sizeof(struct flash_file_hdr_g3);
3302 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3303 if (!fsec) {
3304 dev_err(&adapter->pdev->dev,
3305 "Invalid Cookie. UFI corrupted ?\n");
3306 return -1;
3307 }
3308
3309 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3310 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3311 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3312
3313 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3314 case IMAGE_FIRMWARE_iSCSI:
3315 img_optype = OPTYPE_ISCSI_ACTIVE;
3316 break;
3317 case IMAGE_BOOT_CODE:
3318 img_optype = OPTYPE_REDBOOT;
3319 break;
3320 case IMAGE_OPTION_ROM_ISCSI:
3321 img_optype = OPTYPE_BIOS;
3322 break;
3323 case IMAGE_OPTION_ROM_PXE:
3324 img_optype = OPTYPE_PXE_BIOS;
3325 break;
3326 case IMAGE_OPTION_ROM_FCoE:
3327 img_optype = OPTYPE_FCOE_BIOS;
3328 break;
3329 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3330 img_optype = OPTYPE_ISCSI_BACKUP;
3331 break;
3332 case IMAGE_NCSI:
3333 img_optype = OPTYPE_NCSI_FW;
3334 break;
3335 default:
3336 continue;
3337 }
3338
3339 if (img_optype == OPTYPE_REDBOOT) {
3340 redboot = be_flash_redboot(adapter, fw->data,
3341 img_offset, img_size,
3342 filehdr_size + img_hdrs_size);
3343 if (!redboot)
3344 continue;
3345 }
3346
3347 p = fw->data;
3348 p += filehdr_size + img_offset + img_hdrs_size;
3349 if (p + img_size > fw->data + fw->size)
3350 return -1;
3351
3352 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3353 if (status) {
3354 dev_err(&adapter->pdev->dev,
3355 "Flashing section type %d failed.\n",
3356 fsec->fsec_entry[i].type);
3357 return status;
3358 }
3359 }
3360 return 0;
3f0d4560
AK
3361}
3362
f67ef7ba
PR
3363static int lancer_wait_idle(struct be_adapter *adapter)
3364{
3365#define SLIPORT_IDLE_TIMEOUT 30
3366 u32 reg_val;
3367 int status = 0, i;
3368
3369 for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
3370 reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
3371 if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
3372 break;
3373
3374 ssleep(1);
3375 }
3376
3377 if (i == SLIPORT_IDLE_TIMEOUT)
3378 status = -1;
3379
3380 return status;
3381}
3382
3383static int lancer_fw_reset(struct be_adapter *adapter)
3384{
3385 int status = 0;
3386
3387 status = lancer_wait_idle(adapter);
3388 if (status)
3389 return status;
3390
3391 iowrite32(PHYSDEV_CONTROL_FW_RESET_MASK, adapter->db +
3392 PHYSDEV_CONTROL_OFFSET);
3393
3394 return status;
3395}
3396
485bf569
SN
3397static int lancer_fw_download(struct be_adapter *adapter,
3398 const struct firmware *fw)
84517482 3399{
485bf569
SN
3400#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3401#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
84517482 3402 struct be_dma_mem flash_cmd;
485bf569
SN
3403 const u8 *data_ptr = NULL;
3404 u8 *dest_image_ptr = NULL;
3405 size_t image_size = 0;
3406 u32 chunk_size = 0;
3407 u32 data_written = 0;
3408 u32 offset = 0;
3409 int status = 0;
3410 u8 add_status = 0;
f67ef7ba 3411 u8 change_status;
84517482 3412
485bf569 3413 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
d9efd2af 3414 dev_err(&adapter->pdev->dev,
485bf569
SN
3415 "FW Image not properly aligned. "
3416 "Length must be 4 byte aligned.\n");
3417 status = -EINVAL;
3418 goto lancer_fw_exit;
d9efd2af
SB
3419 }
3420
485bf569
SN
3421 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3422 + LANCER_FW_DOWNLOAD_CHUNK;
3423 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3424 &flash_cmd.dma, GFP_KERNEL);
3425 if (!flash_cmd.va) {
3426 status = -ENOMEM;
3427 dev_err(&adapter->pdev->dev,
3428 "Memory allocation failure while flashing\n");
3429 goto lancer_fw_exit;
3430 }
84517482 3431
485bf569
SN
3432 dest_image_ptr = flash_cmd.va +
3433 sizeof(struct lancer_cmd_req_write_object);
3434 image_size = fw->size;
3435 data_ptr = fw->data;
3436
3437 while (image_size) {
3438 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3439
3440 /* Copy the image chunk content. */
3441 memcpy(dest_image_ptr, data_ptr, chunk_size);
3442
3443 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
3444 chunk_size, offset,
3445 LANCER_FW_DOWNLOAD_LOCATION,
3446 &data_written, &change_status,
3447 &add_status);
485bf569
SN
3448 if (status)
3449 break;
3450
3451 offset += data_written;
3452 data_ptr += data_written;
3453 image_size -= data_written;
3454 }
3455
3456 if (!status) {
3457 /* Commit the FW written */
3458 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
3459 0, offset,
3460 LANCER_FW_DOWNLOAD_LOCATION,
3461 &data_written, &change_status,
3462 &add_status);
485bf569
SN
3463 }
3464
3465 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3466 flash_cmd.dma);
3467 if (status) {
3468 dev_err(&adapter->pdev->dev,
3469 "Firmware load error. "
3470 "Status code: 0x%x Additional Status: 0x%x\n",
3471 status, add_status);
3472 goto lancer_fw_exit;
3473 }
3474
f67ef7ba
PR
3475 if (change_status == LANCER_FW_RESET_NEEDED) {
3476 status = lancer_fw_reset(adapter);
3477 if (status) {
3478 dev_err(&adapter->pdev->dev,
3479 "Adapter busy for FW reset.\n"
3480 "New FW will not be active.\n");
3481 goto lancer_fw_exit;
3482 }
3483 } else if (change_status != LANCER_NO_RESET_NEEDED) {
3484 dev_err(&adapter->pdev->dev,
3485 "System reboot required for new FW"
3486 " to be active\n");
3487 }
3488
485bf569
SN
3489 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3490lancer_fw_exit:
3491 return status;
3492}
3493
ca34fe38
SP
3494#define UFI_TYPE2 2
3495#define UFI_TYPE3 3
3496#define UFI_TYPE4 4
3497static int be_get_ufi_type(struct be_adapter *adapter,
3498 struct flash_file_hdr_g2 *fhdr)
773a2d7c
PR
3499{
3500 if (fhdr == NULL)
3501 goto be_get_ufi_exit;
3502
ca34fe38
SP
3503 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3504 return UFI_TYPE4;
3505 else if (BE3_chip(adapter) && fhdr->build[0] == '3')
3506 return UFI_TYPE3;
3507 else if (BE2_chip(adapter) && fhdr->build[0] == '2')
3508 return UFI_TYPE2;
773a2d7c
PR
3509
3510be_get_ufi_exit:
3511 dev_err(&adapter->pdev->dev,
3512 "UFI and Interface are not compatible for flashing\n");
3513 return -1;
3514}
3515
485bf569
SN
3516static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3517{
3518 struct flash_file_hdr_g2 *fhdr;
3519 struct flash_file_hdr_g3 *fhdr3;
3520 struct image_hdr *img_hdr_ptr = NULL;
3521 struct be_dma_mem flash_cmd;
3522 const u8 *p;
773a2d7c 3523 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
84517482 3524
be716446 3525 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
2b7bcebf
IV
3526 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3527 &flash_cmd.dma, GFP_KERNEL);
84517482
AK
3528 if (!flash_cmd.va) {
3529 status = -ENOMEM;
3530 dev_err(&adapter->pdev->dev,
3531 "Memory allocation failure while flashing\n");
485bf569 3532 goto be_fw_exit;
84517482
AK
3533 }
3534
773a2d7c
PR
3535 p = fw->data;
3536 fhdr = (struct flash_file_hdr_g2 *)p;
3537
ca34fe38 3538 ufi_type = be_get_ufi_type(adapter, fhdr);
773a2d7c
PR
3539
3540 fhdr3 = (struct flash_file_hdr_g3 *)fw->data;
3541 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3542 for (i = 0; i < num_imgs; i++) {
3543 img_hdr_ptr = (struct image_hdr *)(fw->data +
3544 (sizeof(struct flash_file_hdr_g3) +
3545 i * sizeof(struct image_hdr)));
3546 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
ca34fe38 3547 if (ufi_type == UFI_TYPE4)
773a2d7c
PR
3548 status = be_flash_skyhawk(adapter, fw,
3549 &flash_cmd, num_imgs);
ca34fe38
SP
3550 else if (ufi_type == UFI_TYPE3)
3551 status = be_flash_BEx(adapter, fw, &flash_cmd,
3552 num_imgs);
3f0d4560 3553 }
773a2d7c
PR
3554 }
3555
ca34fe38
SP
3556 if (ufi_type == UFI_TYPE2)
3557 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
773a2d7c 3558 else if (ufi_type == -1)
3f0d4560 3559 status = -1;
84517482 3560
2b7bcebf
IV
3561 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3562 flash_cmd.dma);
84517482
AK
3563 if (status) {
3564 dev_err(&adapter->pdev->dev, "Firmware load error\n");
485bf569 3565 goto be_fw_exit;
84517482
AK
3566 }
3567
af901ca1 3568 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
84517482 3569
485bf569
SN
3570be_fw_exit:
3571 return status;
3572}
3573
3574int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3575{
3576 const struct firmware *fw;
3577 int status;
3578
3579 if (!netif_running(adapter->netdev)) {
3580 dev_err(&adapter->pdev->dev,
3581 "Firmware load not allowed (interface is down)\n");
3582 return -1;
3583 }
3584
3585 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3586 if (status)
3587 goto fw_exit;
3588
3589 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3590
3591 if (lancer_chip(adapter))
3592 status = lancer_fw_download(adapter, fw);
3593 else
3594 status = be_fw_download(adapter, fw);
3595
84517482
AK
3596fw_exit:
3597 release_firmware(fw);
3598 return status;
3599}
3600
e5686ad8 3601static const struct net_device_ops be_netdev_ops = {
6b7c5b94
SP
3602 .ndo_open = be_open,
3603 .ndo_stop = be_close,
3604 .ndo_start_xmit = be_xmit,
a54769f5 3605 .ndo_set_rx_mode = be_set_rx_mode,
6b7c5b94
SP
3606 .ndo_set_mac_address = be_mac_addr_set,
3607 .ndo_change_mtu = be_change_mtu,
ab1594e9 3608 .ndo_get_stats64 = be_get_stats64,
6b7c5b94 3609 .ndo_validate_addr = eth_validate_addr,
6b7c5b94
SP
3610 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3611 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 3612 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 3613 .ndo_set_vf_vlan = be_set_vf_vlan,
e1d18735 3614 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
66268739
IV
3615 .ndo_get_vf_config = be_get_vf_config,
3616#ifdef CONFIG_NET_POLL_CONTROLLER
3617 .ndo_poll_controller = be_netpoll,
3618#endif
6b7c5b94
SP
3619};
3620
3621static void be_netdev_init(struct net_device *netdev)
3622{
3623 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 3624 struct be_eq_obj *eqo;
3abcdeda 3625 int i;
6b7c5b94 3626
6332c8d3 3627 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
8b8ddc68
MM
3628 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3629 NETIF_F_HW_VLAN_TX;
3630 if (be_multi_rxq(adapter))
3631 netdev->hw_features |= NETIF_F_RXHASH;
6332c8d3
MM
3632
3633 netdev->features |= netdev->hw_features |
8b8ddc68 3634 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
4b972914 3635
eb8a50d9 3636 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
79032644 3637 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 3638
fbc13f01
AK
3639 netdev->priv_flags |= IFF_UNICAST_FLT;
3640
6b7c5b94
SP
3641 netdev->flags |= IFF_MULTICAST;
3642
b7e5887e 3643 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
c190e3c8 3644
10ef9ab4 3645 netdev->netdev_ops = &be_netdev_ops;
6b7c5b94
SP
3646
3647 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3648
10ef9ab4
SP
3649 for_all_evt_queues(adapter, eqo, i)
3650 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
6b7c5b94
SP
3651}
3652
3653static void be_unmap_pci_bars(struct be_adapter *adapter)
3654{
8788fdc2 3655 if (adapter->db)
ce66f781 3656 pci_iounmap(adapter->pdev, adapter->db);
045508a8
PP
3657}
3658
ce66f781
SP
3659static int db_bar(struct be_adapter *adapter)
3660{
3661 if (lancer_chip(adapter) || !be_physfn(adapter))
3662 return 0;
3663 else
3664 return 4;
3665}
3666
3667static int be_roce_map_pci_bars(struct be_adapter *adapter)
045508a8 3668{
dbf0f2a7 3669 if (skyhawk_chip(adapter)) {
ce66f781
SP
3670 adapter->roce_db.size = 4096;
3671 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
3672 db_bar(adapter));
3673 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
3674 db_bar(adapter));
3675 }
045508a8 3676 return 0;
6b7c5b94
SP
3677}
3678
3679static int be_map_pci_bars(struct be_adapter *adapter)
3680{
3681 u8 __iomem *addr;
ce66f781 3682 u32 sli_intf;
6b7c5b94 3683
ce66f781
SP
3684 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3685 adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3686 SLI_INTF_IF_TYPE_SHIFT;
fe6d2a38 3687
ce66f781 3688 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
6b7c5b94
SP
3689 if (addr == NULL)
3690 goto pci_map_err;
ba343c77 3691 adapter->db = addr;
ce66f781
SP
3692
3693 be_roce_map_pci_bars(adapter);
6b7c5b94 3694 return 0;
ce66f781 3695
6b7c5b94
SP
3696pci_map_err:
3697 be_unmap_pci_bars(adapter);
3698 return -ENOMEM;
3699}
3700
6b7c5b94
SP
3701static void be_ctrl_cleanup(struct be_adapter *adapter)
3702{
8788fdc2 3703 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
6b7c5b94
SP
3704
3705 be_unmap_pci_bars(adapter);
3706
3707 if (mem->va)
2b7bcebf
IV
3708 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3709 mem->dma);
e7b909a6 3710
5b8821b7 3711 mem = &adapter->rx_filter;
e7b909a6 3712 if (mem->va)
2b7bcebf
IV
3713 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3714 mem->dma);
6b7c5b94
SP
3715}
3716
6b7c5b94
SP
3717static int be_ctrl_init(struct be_adapter *adapter)
3718{
8788fdc2
SP
3719 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3720 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5b8821b7 3721 struct be_dma_mem *rx_filter = &adapter->rx_filter;
ce66f781 3722 u32 sli_intf;
6b7c5b94 3723 int status;
6b7c5b94 3724
ce66f781
SP
3725 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3726 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
3727 SLI_INTF_FAMILY_SHIFT;
3728 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
3729
6b7c5b94
SP
3730 status = be_map_pci_bars(adapter);
3731 if (status)
e7b909a6 3732 goto done;
6b7c5b94
SP
3733
3734 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2b7bcebf
IV
3735 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3736 mbox_mem_alloc->size,
3737 &mbox_mem_alloc->dma,
3738 GFP_KERNEL);
6b7c5b94 3739 if (!mbox_mem_alloc->va) {
e7b909a6
SP
3740 status = -ENOMEM;
3741 goto unmap_pci_bars;
6b7c5b94
SP
3742 }
3743 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3744 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3745 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3746 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
e7b909a6 3747
5b8821b7
SP
3748 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3749 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3750 &rx_filter->dma, GFP_KERNEL);
3751 if (rx_filter->va == NULL) {
e7b909a6
SP
3752 status = -ENOMEM;
3753 goto free_mbox;
3754 }
5b8821b7 3755 memset(rx_filter->va, 0, rx_filter->size);
2984961c 3756 mutex_init(&adapter->mbox_lock);
8788fdc2
SP
3757 spin_lock_init(&adapter->mcc_lock);
3758 spin_lock_init(&adapter->mcc_cq_lock);
a8f447bd 3759
dd131e76 3760 init_completion(&adapter->flash_compl);
cf588477 3761 pci_save_state(adapter->pdev);
6b7c5b94 3762 return 0;
e7b909a6
SP
3763
3764free_mbox:
2b7bcebf
IV
3765 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3766 mbox_mem_alloc->va, mbox_mem_alloc->dma);
e7b909a6
SP
3767
3768unmap_pci_bars:
3769 be_unmap_pci_bars(adapter);
3770
3771done:
3772 return status;
6b7c5b94
SP
3773}
3774
3775static void be_stats_cleanup(struct be_adapter *adapter)
3776{
3abcdeda 3777 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94
SP
3778
3779 if (cmd->va)
2b7bcebf
IV
3780 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3781 cmd->va, cmd->dma);
6b7c5b94
SP
3782}
3783
3784static int be_stats_init(struct be_adapter *adapter)
3785{
3abcdeda 3786 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94 3787
ca34fe38
SP
3788 if (lancer_chip(adapter))
3789 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3790 else if (BE2_chip(adapter))
89a88ab8 3791 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
ca34fe38
SP
3792 else
3793 /* BE3 and Skyhawk */
3794 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3795
2b7bcebf
IV
3796 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3797 GFP_KERNEL);
6b7c5b94
SP
3798 if (cmd->va == NULL)
3799 return -1;
d291b9af 3800 memset(cmd->va, 0, cmd->size);
6b7c5b94
SP
3801 return 0;
3802}
3803
3bc6b06c 3804static void be_remove(struct pci_dev *pdev)
6b7c5b94
SP
3805{
3806 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 3807
6b7c5b94
SP
3808 if (!adapter)
3809 return;
3810
045508a8
PP
3811 be_roce_dev_remove(adapter);
3812
f67ef7ba
PR
3813 cancel_delayed_work_sync(&adapter->func_recovery_work);
3814
6b7c5b94
SP
3815 unregister_netdev(adapter->netdev);
3816
5fb379ee
SP
3817 be_clear(adapter);
3818
bf99e50d
PR
3819 /* tell fw we're done with firing cmds */
3820 be_cmd_fw_clean(adapter);
3821
6b7c5b94
SP
3822 be_stats_cleanup(adapter);
3823
3824 be_ctrl_cleanup(adapter);
3825
d6b6d987
SP
3826 pci_disable_pcie_error_reporting(pdev);
3827
6b7c5b94
SP
3828 pci_set_drvdata(pdev, NULL);
3829 pci_release_regions(pdev);
3830 pci_disable_device(pdev);
3831
3832 free_netdev(adapter->netdev);
3833}
3834
4762f6ce
AK
3835bool be_is_wol_supported(struct be_adapter *adapter)
3836{
3837 return ((adapter->wol_cap & BE_WOL_CAP) &&
3838 !be_is_wol_excluded(adapter)) ? true : false;
3839}
3840
941a77d5
SK
3841u32 be_get_fw_log_level(struct be_adapter *adapter)
3842{
3843 struct be_dma_mem extfat_cmd;
3844 struct be_fat_conf_params *cfgs;
3845 int status;
3846 u32 level = 0;
3847 int j;
3848
f25b119c
PR
3849 if (lancer_chip(adapter))
3850 return 0;
3851
941a77d5
SK
3852 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3853 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3854 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
3855 &extfat_cmd.dma);
3856
3857 if (!extfat_cmd.va) {
3858 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
3859 __func__);
3860 goto err;
3861 }
3862
3863 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
3864 if (!status) {
3865 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
3866 sizeof(struct be_cmd_resp_hdr));
ac46a462 3867 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
941a77d5
SK
3868 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
3869 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
3870 }
3871 }
3872 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
3873 extfat_cmd.dma);
3874err:
3875 return level;
3876}
abb93951 3877
39f1d94d 3878static int be_get_initial_config(struct be_adapter *adapter)
6b7c5b94 3879{
6b7c5b94 3880 int status;
941a77d5 3881 u32 level;
6b7c5b94 3882
9e1453c5
AK
3883 status = be_cmd_get_cntl_attributes(adapter);
3884 if (status)
3885 return status;
3886
4762f6ce
AK
3887 status = be_cmd_get_acpi_wol_cap(adapter);
3888 if (status) {
3889 /* in case of a failure to get wol capabillities
3890 * check the exclusion list to determine WOL capability */
3891 if (!be_is_wol_excluded(adapter))
3892 adapter->wol_cap |= BE_WOL_CAP;
3893 }
3894
3895 if (be_is_wol_supported(adapter))
3896 adapter->wol = true;
3897
7aeb2156
PR
3898 /* Must be a power of 2 or else MODULO will BUG_ON */
3899 adapter->be_get_temp_freq = 64;
3900
941a77d5
SK
3901 level = be_get_fw_log_level(adapter);
3902 adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
3903
2243e2e9 3904 return 0;
6b7c5b94
SP
3905}
3906
f67ef7ba 3907static int lancer_recover_func(struct be_adapter *adapter)
d8110f62
PR
3908{
3909 int status;
d8110f62 3910
f67ef7ba
PR
3911 status = lancer_test_and_set_rdy_state(adapter);
3912 if (status)
3913 goto err;
d8110f62 3914
f67ef7ba
PR
3915 if (netif_running(adapter->netdev))
3916 be_close(adapter->netdev);
d8110f62 3917
f67ef7ba
PR
3918 be_clear(adapter);
3919
3920 adapter->hw_error = false;
3921 adapter->fw_timeout = false;
3922
3923 status = be_setup(adapter);
3924 if (status)
3925 goto err;
d8110f62 3926
f67ef7ba
PR
3927 if (netif_running(adapter->netdev)) {
3928 status = be_open(adapter->netdev);
d8110f62
PR
3929 if (status)
3930 goto err;
f67ef7ba 3931 }
d8110f62 3932
f67ef7ba
PR
3933 dev_err(&adapter->pdev->dev,
3934 "Adapter SLIPORT recovery succeeded\n");
3935 return 0;
3936err:
67297ad8
PR
3937 if (adapter->eeh_error)
3938 dev_err(&adapter->pdev->dev,
3939 "Adapter SLIPORT recovery failed\n");
d8110f62 3940
f67ef7ba
PR
3941 return status;
3942}
3943
3944static void be_func_recovery_task(struct work_struct *work)
3945{
3946 struct be_adapter *adapter =
3947 container_of(work, struct be_adapter, func_recovery_work.work);
3948 int status;
d8110f62 3949
f67ef7ba 3950 be_detect_error(adapter);
d8110f62 3951
f67ef7ba 3952 if (adapter->hw_error && lancer_chip(adapter)) {
d8110f62 3953
f67ef7ba
PR
3954 if (adapter->eeh_error)
3955 goto out;
d8110f62 3956
f67ef7ba
PR
3957 rtnl_lock();
3958 netif_device_detach(adapter->netdev);
3959 rtnl_unlock();
d8110f62 3960
f67ef7ba 3961 status = lancer_recover_func(adapter);
d8110f62 3962
f67ef7ba
PR
3963 if (!status)
3964 netif_device_attach(adapter->netdev);
d8110f62 3965 }
f67ef7ba
PR
3966
3967out:
3968 schedule_delayed_work(&adapter->func_recovery_work,
3969 msecs_to_jiffies(1000));
d8110f62
PR
3970}
3971
3972static void be_worker(struct work_struct *work)
3973{
3974 struct be_adapter *adapter =
3975 container_of(work, struct be_adapter, work.work);
3976 struct be_rx_obj *rxo;
10ef9ab4 3977 struct be_eq_obj *eqo;
d8110f62
PR
3978 int i;
3979
d8110f62
PR
3980 /* when interrupts are not yet enabled, just reap any pending
3981 * mcc completions */
3982 if (!netif_running(adapter->netdev)) {
072a9c48 3983 local_bh_disable();
10ef9ab4 3984 be_process_mcc(adapter);
072a9c48 3985 local_bh_enable();
d8110f62
PR
3986 goto reschedule;
3987 }
3988
3989 if (!adapter->stats_cmd_sent) {
3990 if (lancer_chip(adapter))
3991 lancer_cmd_get_pport_stats(adapter,
3992 &adapter->stats_cmd);
3993 else
3994 be_cmd_get_stats(adapter, &adapter->stats_cmd);
3995 }
3996
7aeb2156
PR
3997 if (MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
3998 be_cmd_get_die_temperature(adapter);
3999
d8110f62 4000 for_all_rx_queues(adapter, rxo, i) {
d8110f62
PR
4001 if (rxo->rx_post_starved) {
4002 rxo->rx_post_starved = false;
4003 be_post_rx_frags(rxo, GFP_KERNEL);
4004 }
4005 }
4006
10ef9ab4
SP
4007 for_all_evt_queues(adapter, eqo, i)
4008 be_eqd_update(adapter, eqo);
4009
d8110f62
PR
4010reschedule:
4011 adapter->work_counter++;
4012 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4013}
4014
39f1d94d
SP
4015static bool be_reset_required(struct be_adapter *adapter)
4016{
d79c0a20 4017 return be_find_vfs(adapter, ENABLED) > 0 ? false : true;
39f1d94d
SP
4018}
4019
d379142b
SP
4020static char *mc_name(struct be_adapter *adapter)
4021{
4022 if (adapter->function_mode & FLEX10_MODE)
4023 return "FLEX10";
4024 else if (adapter->function_mode & VNIC_MODE)
4025 return "vNIC";
4026 else if (adapter->function_mode & UMC_ENABLED)
4027 return "UMC";
4028 else
4029 return "";
4030}
4031
4032static inline char *func_name(struct be_adapter *adapter)
4033{
4034 return be_physfn(adapter) ? "PF" : "VF";
4035}
4036
1dd06ae8 4037static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
6b7c5b94
SP
4038{
4039 int status = 0;
4040 struct be_adapter *adapter;
4041 struct net_device *netdev;
b4e32a71 4042 char port_name;
6b7c5b94
SP
4043
4044 status = pci_enable_device(pdev);
4045 if (status)
4046 goto do_none;
4047
4048 status = pci_request_regions(pdev, DRV_NAME);
4049 if (status)
4050 goto disable_dev;
4051 pci_set_master(pdev);
4052
7f640062 4053 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
6b7c5b94
SP
4054 if (netdev == NULL) {
4055 status = -ENOMEM;
4056 goto rel_reg;
4057 }
4058 adapter = netdev_priv(netdev);
4059 adapter->pdev = pdev;
4060 pci_set_drvdata(pdev, adapter);
4061 adapter->netdev = netdev;
2243e2e9 4062 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 4063
2b7bcebf 4064 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
6b7c5b94
SP
4065 if (!status) {
4066 netdev->features |= NETIF_F_HIGHDMA;
4067 } else {
2b7bcebf 4068 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
6b7c5b94
SP
4069 if (status) {
4070 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4071 goto free_netdev;
4072 }
4073 }
4074
d6b6d987
SP
4075 status = pci_enable_pcie_error_reporting(pdev);
4076 if (status)
4077 dev_err(&pdev->dev, "Could not use PCIe error reporting\n");
4078
6b7c5b94
SP
4079 status = be_ctrl_init(adapter);
4080 if (status)
39f1d94d 4081 goto free_netdev;
6b7c5b94 4082
2243e2e9 4083 /* sync up with fw's ready state */
ba343c77 4084 if (be_physfn(adapter)) {
bf99e50d 4085 status = be_fw_wait_ready(adapter);
ba343c77
SB
4086 if (status)
4087 goto ctrl_clean;
ba343c77 4088 }
6b7c5b94 4089
2243e2e9
SP
4090 /* tell fw we're ready to fire cmds */
4091 status = be_cmd_fw_init(adapter);
6b7c5b94 4092 if (status)
2243e2e9
SP
4093 goto ctrl_clean;
4094
39f1d94d
SP
4095 if (be_reset_required(adapter)) {
4096 status = be_cmd_reset_function(adapter);
4097 if (status)
4098 goto ctrl_clean;
4099 }
556ae191 4100
10ef9ab4
SP
4101 /* The INTR bit may be set in the card when probed by a kdump kernel
4102 * after a crash.
4103 */
4104 if (!lancer_chip(adapter))
4105 be_intr_set(adapter, false);
4106
2243e2e9
SP
4107 status = be_stats_init(adapter);
4108 if (status)
4109 goto ctrl_clean;
4110
39f1d94d 4111 status = be_get_initial_config(adapter);
6b7c5b94
SP
4112 if (status)
4113 goto stats_clean;
6b7c5b94
SP
4114
4115 INIT_DELAYED_WORK(&adapter->work, be_worker);
f67ef7ba 4116 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
a54769f5 4117 adapter->rx_fc = adapter->tx_fc = true;
6b7c5b94 4118
5fb379ee
SP
4119 status = be_setup(adapter);
4120 if (status)
55f5c3c5 4121 goto stats_clean;
2243e2e9 4122
3abcdeda 4123 be_netdev_init(netdev);
6b7c5b94
SP
4124 status = register_netdev(netdev);
4125 if (status != 0)
5fb379ee 4126 goto unsetup;
6b7c5b94 4127
045508a8
PP
4128 be_roce_dev_add(adapter);
4129
f67ef7ba
PR
4130 schedule_delayed_work(&adapter->func_recovery_work,
4131 msecs_to_jiffies(1000));
b4e32a71
PR
4132
4133 be_cmd_query_port_name(adapter, &port_name);
4134
d379142b
SP
4135 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4136 func_name(adapter), mc_name(adapter), port_name);
34b1ef04 4137
6b7c5b94
SP
4138 return 0;
4139
5fb379ee
SP
4140unsetup:
4141 be_clear(adapter);
6b7c5b94
SP
4142stats_clean:
4143 be_stats_cleanup(adapter);
4144ctrl_clean:
4145 be_ctrl_cleanup(adapter);
f9449ab7 4146free_netdev:
fe6d2a38 4147 free_netdev(netdev);
8d56ff11 4148 pci_set_drvdata(pdev, NULL);
6b7c5b94
SP
4149rel_reg:
4150 pci_release_regions(pdev);
4151disable_dev:
4152 pci_disable_device(pdev);
4153do_none:
c4ca2374 4154 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
4155 return status;
4156}
4157
4158static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4159{
4160 struct be_adapter *adapter = pci_get_drvdata(pdev);
4161 struct net_device *netdev = adapter->netdev;
4162
71d8d1b5
AK
4163 if (adapter->wol)
4164 be_setup_wol(adapter, true);
4165
f67ef7ba
PR
4166 cancel_delayed_work_sync(&adapter->func_recovery_work);
4167
6b7c5b94
SP
4168 netif_device_detach(netdev);
4169 if (netif_running(netdev)) {
4170 rtnl_lock();
4171 be_close(netdev);
4172 rtnl_unlock();
4173 }
9b0365f1 4174 be_clear(adapter);
6b7c5b94
SP
4175
4176 pci_save_state(pdev);
4177 pci_disable_device(pdev);
4178 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4179 return 0;
4180}
4181
4182static int be_resume(struct pci_dev *pdev)
4183{
4184 int status = 0;
4185 struct be_adapter *adapter = pci_get_drvdata(pdev);
4186 struct net_device *netdev = adapter->netdev;
4187
4188 netif_device_detach(netdev);
4189
4190 status = pci_enable_device(pdev);
4191 if (status)
4192 return status;
4193
4194 pci_set_power_state(pdev, 0);
4195 pci_restore_state(pdev);
4196
2243e2e9
SP
4197 /* tell fw we're ready to fire cmds */
4198 status = be_cmd_fw_init(adapter);
4199 if (status)
4200 return status;
4201
9b0365f1 4202 be_setup(adapter);
6b7c5b94
SP
4203 if (netif_running(netdev)) {
4204 rtnl_lock();
4205 be_open(netdev);
4206 rtnl_unlock();
4207 }
f67ef7ba
PR
4208
4209 schedule_delayed_work(&adapter->func_recovery_work,
4210 msecs_to_jiffies(1000));
6b7c5b94 4211 netif_device_attach(netdev);
71d8d1b5
AK
4212
4213 if (adapter->wol)
4214 be_setup_wol(adapter, false);
a4ca055f 4215
6b7c5b94
SP
4216 return 0;
4217}
4218
82456b03
SP
4219/*
4220 * An FLR will stop BE from DMAing any data.
4221 */
4222static void be_shutdown(struct pci_dev *pdev)
4223{
4224 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 4225
2d5d4154
AK
4226 if (!adapter)
4227 return;
82456b03 4228
0f4a6828 4229 cancel_delayed_work_sync(&adapter->work);
f67ef7ba 4230 cancel_delayed_work_sync(&adapter->func_recovery_work);
a4ca055f 4231
2d5d4154 4232 netif_device_detach(adapter->netdev);
82456b03 4233
57841869
AK
4234 be_cmd_reset_function(adapter);
4235
82456b03 4236 pci_disable_device(pdev);
82456b03
SP
4237}
4238
cf588477
SP
4239static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4240 pci_channel_state_t state)
4241{
4242 struct be_adapter *adapter = pci_get_drvdata(pdev);
4243 struct net_device *netdev = adapter->netdev;
4244
4245 dev_err(&adapter->pdev->dev, "EEH error detected\n");
4246
f67ef7ba
PR
4247 adapter->eeh_error = true;
4248
4249 cancel_delayed_work_sync(&adapter->func_recovery_work);
cf588477 4250
f67ef7ba 4251 rtnl_lock();
cf588477 4252 netif_device_detach(netdev);
f67ef7ba 4253 rtnl_unlock();
cf588477
SP
4254
4255 if (netif_running(netdev)) {
4256 rtnl_lock();
4257 be_close(netdev);
4258 rtnl_unlock();
4259 }
4260 be_clear(adapter);
4261
4262 if (state == pci_channel_io_perm_failure)
4263 return PCI_ERS_RESULT_DISCONNECT;
4264
4265 pci_disable_device(pdev);
4266
eeb7fc7b
SK
4267 /* The error could cause the FW to trigger a flash debug dump.
4268 * Resetting the card while flash dump is in progress
c8a54163
PR
4269 * can cause it not to recover; wait for it to finish.
4270 * Wait only for first function as it is needed only once per
4271 * adapter.
eeb7fc7b 4272 */
c8a54163
PR
4273 if (pdev->devfn == 0)
4274 ssleep(30);
4275
cf588477
SP
4276 return PCI_ERS_RESULT_NEED_RESET;
4277}
4278
4279static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4280{
4281 struct be_adapter *adapter = pci_get_drvdata(pdev);
4282 int status;
4283
4284 dev_info(&adapter->pdev->dev, "EEH reset\n");
f67ef7ba 4285 be_clear_all_error(adapter);
cf588477
SP
4286
4287 status = pci_enable_device(pdev);
4288 if (status)
4289 return PCI_ERS_RESULT_DISCONNECT;
4290
4291 pci_set_master(pdev);
4292 pci_set_power_state(pdev, 0);
4293 pci_restore_state(pdev);
4294
4295 /* Check if card is ok and fw is ready */
bf99e50d 4296 status = be_fw_wait_ready(adapter);
cf588477
SP
4297 if (status)
4298 return PCI_ERS_RESULT_DISCONNECT;
4299
d6b6d987 4300 pci_cleanup_aer_uncorrect_error_status(pdev);
cf588477
SP
4301 return PCI_ERS_RESULT_RECOVERED;
4302}
4303
4304static void be_eeh_resume(struct pci_dev *pdev)
4305{
4306 int status = 0;
4307 struct be_adapter *adapter = pci_get_drvdata(pdev);
4308 struct net_device *netdev = adapter->netdev;
4309
4310 dev_info(&adapter->pdev->dev, "EEH resume\n");
4311
4312 pci_save_state(pdev);
4313
4314 /* tell fw we're ready to fire cmds */
4315 status = be_cmd_fw_init(adapter);
4316 if (status)
4317 goto err;
4318
bf99e50d
PR
4319 status = be_cmd_reset_function(adapter);
4320 if (status)
4321 goto err;
4322
cf588477
SP
4323 status = be_setup(adapter);
4324 if (status)
4325 goto err;
4326
4327 if (netif_running(netdev)) {
4328 status = be_open(netdev);
4329 if (status)
4330 goto err;
4331 }
f67ef7ba
PR
4332
4333 schedule_delayed_work(&adapter->func_recovery_work,
4334 msecs_to_jiffies(1000));
cf588477
SP
4335 netif_device_attach(netdev);
4336 return;
4337err:
4338 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
4339}
4340
3646f0e5 4341static const struct pci_error_handlers be_eeh_handlers = {
cf588477
SP
4342 .error_detected = be_eeh_err_detected,
4343 .slot_reset = be_eeh_reset,
4344 .resume = be_eeh_resume,
4345};
4346
6b7c5b94
SP
4347static struct pci_driver be_driver = {
4348 .name = DRV_NAME,
4349 .id_table = be_dev_ids,
4350 .probe = be_probe,
4351 .remove = be_remove,
4352 .suspend = be_suspend,
cf588477 4353 .resume = be_resume,
82456b03 4354 .shutdown = be_shutdown,
cf588477 4355 .err_handler = &be_eeh_handlers
6b7c5b94
SP
4356};
4357
4358static int __init be_init_module(void)
4359{
8e95a202
JP
4360 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4361 rx_frag_size != 2048) {
6b7c5b94
SP
4362 printk(KERN_WARNING DRV_NAME
4363 " : Module param rx_frag_size must be 2048/4096/8192."
4364 " Using 2048\n");
4365 rx_frag_size = 2048;
4366 }
6b7c5b94
SP
4367
4368 return pci_register_driver(&be_driver);
4369}
4370module_init(be_init_module);
4371
4372static void __exit be_exit_module(void)
4373{
4374 pci_unregister_driver(&be_driver);
4375}
4376module_exit(be_exit_module);