net: add skb frag size accessors
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / net / ethernet / emulex / benet / be_main.c
1 /*
2 * Copyright (C) 2005 - 2011 Emulex
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@emulex.com
12 *
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
16 */
17
18 #include <linux/prefetch.h>
19 #include "be.h"
20 #include "be_cmds.h"
21 #include <asm/div64.h>
22
23 MODULE_VERSION(DRV_VER);
24 MODULE_DEVICE_TABLE(pci, be_dev_ids);
25 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
26 MODULE_AUTHOR("ServerEngines Corporation");
27 MODULE_LICENSE("GPL");
28
29 static ushort rx_frag_size = 2048;
30 static unsigned int num_vfs;
31 module_param(rx_frag_size, ushort, S_IRUGO);
32 module_param(num_vfs, uint, S_IRUGO);
33 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
34 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
35
36 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
37 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
38 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
39 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
40 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
41 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
42 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
43 { 0 }
44 };
45 MODULE_DEVICE_TABLE(pci, be_dev_ids);
46 /* UE Status Low CSR */
47 static const char * const ue_status_low_desc[] = {
48 "CEV",
49 "CTX",
50 "DBUF",
51 "ERX",
52 "Host",
53 "MPU",
54 "NDMA",
55 "PTC ",
56 "RDMA ",
57 "RXF ",
58 "RXIPS ",
59 "RXULP0 ",
60 "RXULP1 ",
61 "RXULP2 ",
62 "TIM ",
63 "TPOST ",
64 "TPRE ",
65 "TXIPS ",
66 "TXULP0 ",
67 "TXULP1 ",
68 "UC ",
69 "WDMA ",
70 "TXULP2 ",
71 "HOST1 ",
72 "P0_OB_LINK ",
73 "P1_OB_LINK ",
74 "HOST_GPIO ",
75 "MBOX ",
76 "AXGMAC0",
77 "AXGMAC1",
78 "JTAG",
79 "MPU_INTPEND"
80 };
81 /* UE Status High CSR */
82 static const char * const ue_status_hi_desc[] = {
83 "LPCMEMHOST",
84 "MGMT_MAC",
85 "PCS0ONLINE",
86 "MPU_IRAM",
87 "PCS1ONLINE",
88 "PCTL0",
89 "PCTL1",
90 "PMEM",
91 "RR",
92 "TXPB",
93 "RXPP",
94 "XAUI",
95 "TXP",
96 "ARM",
97 "IPC",
98 "HOST2",
99 "HOST3",
100 "HOST4",
101 "HOST5",
102 "HOST6",
103 "HOST7",
104 "HOST8",
105 "HOST9",
106 "NETC",
107 "Unknown",
108 "Unknown",
109 "Unknown",
110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown"
115 };
116
117 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
118 {
119 struct be_dma_mem *mem = &q->dma_mem;
120 if (mem->va)
121 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
122 mem->dma);
123 }
124
125 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
126 u16 len, u16 entry_size)
127 {
128 struct be_dma_mem *mem = &q->dma_mem;
129
130 memset(q, 0, sizeof(*q));
131 q->len = len;
132 q->entry_size = entry_size;
133 mem->size = len * entry_size;
134 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
135 GFP_KERNEL);
136 if (!mem->va)
137 return -1;
138 memset(mem->va, 0, mem->size);
139 return 0;
140 }
141
142 static void be_intr_set(struct be_adapter *adapter, bool enable)
143 {
144 u32 reg, enabled;
145
146 if (adapter->eeh_err)
147 return;
148
149 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
150 &reg);
151 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
152
153 if (!enabled && enable)
154 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
155 else if (enabled && !enable)
156 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
157 else
158 return;
159
160 pci_write_config_dword(adapter->pdev,
161 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
162 }
163
164 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
165 {
166 u32 val = 0;
167 val |= qid & DB_RQ_RING_ID_MASK;
168 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
169
170 wmb();
171 iowrite32(val, adapter->db + DB_RQ_OFFSET);
172 }
173
174 static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
175 {
176 u32 val = 0;
177 val |= qid & DB_TXULP_RING_ID_MASK;
178 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
179
180 wmb();
181 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
182 }
183
184 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
185 bool arm, bool clear_int, u16 num_popped)
186 {
187 u32 val = 0;
188 val |= qid & DB_EQ_RING_ID_MASK;
189 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
190 DB_EQ_RING_ID_EXT_MASK_SHIFT);
191
192 if (adapter->eeh_err)
193 return;
194
195 if (arm)
196 val |= 1 << DB_EQ_REARM_SHIFT;
197 if (clear_int)
198 val |= 1 << DB_EQ_CLR_SHIFT;
199 val |= 1 << DB_EQ_EVNT_SHIFT;
200 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
201 iowrite32(val, adapter->db + DB_EQ_OFFSET);
202 }
203
204 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
205 {
206 u32 val = 0;
207 val |= qid & DB_CQ_RING_ID_MASK;
208 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
209 DB_CQ_RING_ID_EXT_MASK_SHIFT);
210
211 if (adapter->eeh_err)
212 return;
213
214 if (arm)
215 val |= 1 << DB_CQ_REARM_SHIFT;
216 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
217 iowrite32(val, adapter->db + DB_CQ_OFFSET);
218 }
219
220 static int be_mac_addr_set(struct net_device *netdev, void *p)
221 {
222 struct be_adapter *adapter = netdev_priv(netdev);
223 struct sockaddr *addr = p;
224 int status = 0;
225
226 if (!is_valid_ether_addr(addr->sa_data))
227 return -EADDRNOTAVAIL;
228
229 /* MAC addr configuration will be done in hardware for VFs
230 * by their corresponding PFs. Just copy to netdev addr here
231 */
232 if (!be_physfn(adapter))
233 goto netdev_addr;
234
235 status = be_cmd_pmac_del(adapter, adapter->if_handle,
236 adapter->pmac_id, 0);
237 if (status)
238 return status;
239
240 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
241 adapter->if_handle, &adapter->pmac_id, 0);
242 netdev_addr:
243 if (!status)
244 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
245
246 return status;
247 }
248
249 static void populate_be2_stats(struct be_adapter *adapter)
250 {
251 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
252 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
253 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
254 struct be_port_rxf_stats_v0 *port_stats =
255 &rxf_stats->port[adapter->port_num];
256 struct be_drv_stats *drvs = &adapter->drv_stats;
257
258 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
259 drvs->rx_pause_frames = port_stats->rx_pause_frames;
260 drvs->rx_crc_errors = port_stats->rx_crc_errors;
261 drvs->rx_control_frames = port_stats->rx_control_frames;
262 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
263 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
264 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
265 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
266 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
267 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
268 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
269 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
270 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
271 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
272 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
273 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
274 drvs->rx_dropped_header_too_small =
275 port_stats->rx_dropped_header_too_small;
276 drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
277 drvs->rx_alignment_symbol_errors =
278 port_stats->rx_alignment_symbol_errors;
279
280 drvs->tx_pauseframes = port_stats->tx_pauseframes;
281 drvs->tx_controlframes = port_stats->tx_controlframes;
282
283 if (adapter->port_num)
284 drvs->jabber_events = rxf_stats->port1_jabber_events;
285 else
286 drvs->jabber_events = rxf_stats->port0_jabber_events;
287 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
288 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
289 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
290 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
291 drvs->forwarded_packets = rxf_stats->forwarded_packets;
292 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
293 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
294 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
295 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
296 }
297
298 static void populate_be3_stats(struct be_adapter *adapter)
299 {
300 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
301 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
302 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
303 struct be_port_rxf_stats_v1 *port_stats =
304 &rxf_stats->port[adapter->port_num];
305 struct be_drv_stats *drvs = &adapter->drv_stats;
306
307 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
308 drvs->rx_pause_frames = port_stats->rx_pause_frames;
309 drvs->rx_crc_errors = port_stats->rx_crc_errors;
310 drvs->rx_control_frames = port_stats->rx_control_frames;
311 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
312 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
313 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
314 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
315 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
316 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
317 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
318 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
319 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
320 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
321 drvs->rx_dropped_header_too_small =
322 port_stats->rx_dropped_header_too_small;
323 drvs->rx_input_fifo_overflow_drop =
324 port_stats->rx_input_fifo_overflow_drop;
325 drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
326 drvs->rx_alignment_symbol_errors =
327 port_stats->rx_alignment_symbol_errors;
328 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
329 drvs->tx_pauseframes = port_stats->tx_pauseframes;
330 drvs->tx_controlframes = port_stats->tx_controlframes;
331 drvs->jabber_events = port_stats->jabber_events;
332 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
333 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
334 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
335 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
336 drvs->forwarded_packets = rxf_stats->forwarded_packets;
337 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
338 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
339 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
340 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
341 }
342
343 static void populate_lancer_stats(struct be_adapter *adapter)
344 {
345
346 struct be_drv_stats *drvs = &adapter->drv_stats;
347 struct lancer_pport_stats *pport_stats =
348 pport_stats_from_cmd(adapter);
349
350 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
351 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
352 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
353 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
354 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
355 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
356 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
357 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
358 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
359 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
360 drvs->rx_dropped_tcp_length =
361 pport_stats->rx_dropped_invalid_tcp_length;
362 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
363 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
364 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
365 drvs->rx_dropped_header_too_small =
366 pport_stats->rx_dropped_header_too_small;
367 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
368 drvs->rx_address_match_errors = pport_stats->rx_address_match_errors;
369 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
370 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
371 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
372 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
373 drvs->jabber_events = pport_stats->rx_jabbers;
374 drvs->rx_drops_invalid_ring = pport_stats->rx_drops_invalid_queue;
375 drvs->forwarded_packets = pport_stats->num_forwards_lo;
376 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
377 drvs->rx_drops_too_many_frags =
378 pport_stats->rx_drops_too_many_frags_lo;
379 }
380
381 static void accumulate_16bit_val(u32 *acc, u16 val)
382 {
383 #define lo(x) (x & 0xFFFF)
384 #define hi(x) (x & 0xFFFF0000)
385 bool wrapped = val < lo(*acc);
386 u32 newacc = hi(*acc) + val;
387
388 if (wrapped)
389 newacc += 65536;
390 ACCESS_ONCE(*acc) = newacc;
391 }
392
393 void be_parse_stats(struct be_adapter *adapter)
394 {
395 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
396 struct be_rx_obj *rxo;
397 int i;
398
399 if (adapter->generation == BE_GEN3) {
400 if (lancer_chip(adapter))
401 populate_lancer_stats(adapter);
402 else
403 populate_be3_stats(adapter);
404 } else {
405 populate_be2_stats(adapter);
406 }
407
408 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
409 for_all_rx_queues(adapter, rxo, i) {
410 /* below erx HW counter can actually wrap around after
411 * 65535. Driver accumulates a 32-bit value
412 */
413 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
414 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
415 }
416 }
417
418 static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
419 struct rtnl_link_stats64 *stats)
420 {
421 struct be_adapter *adapter = netdev_priv(netdev);
422 struct be_drv_stats *drvs = &adapter->drv_stats;
423 struct be_rx_obj *rxo;
424 struct be_tx_obj *txo;
425 u64 pkts, bytes;
426 unsigned int start;
427 int i;
428
429 for_all_rx_queues(adapter, rxo, i) {
430 const struct be_rx_stats *rx_stats = rx_stats(rxo);
431 do {
432 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
433 pkts = rx_stats(rxo)->rx_pkts;
434 bytes = rx_stats(rxo)->rx_bytes;
435 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
436 stats->rx_packets += pkts;
437 stats->rx_bytes += bytes;
438 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
439 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
440 rx_stats(rxo)->rx_drops_no_frags;
441 }
442
443 for_all_tx_queues(adapter, txo, i) {
444 const struct be_tx_stats *tx_stats = tx_stats(txo);
445 do {
446 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
447 pkts = tx_stats(txo)->tx_pkts;
448 bytes = tx_stats(txo)->tx_bytes;
449 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
450 stats->tx_packets += pkts;
451 stats->tx_bytes += bytes;
452 }
453
454 /* bad pkts received */
455 stats->rx_errors = drvs->rx_crc_errors +
456 drvs->rx_alignment_symbol_errors +
457 drvs->rx_in_range_errors +
458 drvs->rx_out_range_errors +
459 drvs->rx_frame_too_long +
460 drvs->rx_dropped_too_small +
461 drvs->rx_dropped_too_short +
462 drvs->rx_dropped_header_too_small +
463 drvs->rx_dropped_tcp_length +
464 drvs->rx_dropped_runt;
465
466 /* detailed rx errors */
467 stats->rx_length_errors = drvs->rx_in_range_errors +
468 drvs->rx_out_range_errors +
469 drvs->rx_frame_too_long;
470
471 stats->rx_crc_errors = drvs->rx_crc_errors;
472
473 /* frame alignment errors */
474 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
475
476 /* receiver fifo overrun */
477 /* drops_no_pbuf is no per i/f, it's per BE card */
478 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
479 drvs->rx_input_fifo_overflow_drop +
480 drvs->rx_drops_no_pbuf;
481 return stats;
482 }
483
484 void be_link_status_update(struct be_adapter *adapter, u32 link_status)
485 {
486 struct net_device *netdev = adapter->netdev;
487
488 /* when link status changes, link speed must be re-queried from card */
489 adapter->link_speed = -1;
490 if ((link_status & LINK_STATUS_MASK) == LINK_UP) {
491 netif_carrier_on(netdev);
492 dev_info(&adapter->pdev->dev, "%s: Link up\n", netdev->name);
493 } else {
494 netif_carrier_off(netdev);
495 dev_info(&adapter->pdev->dev, "%s: Link down\n", netdev->name);
496 }
497 }
498
499 static void be_tx_stats_update(struct be_tx_obj *txo,
500 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
501 {
502 struct be_tx_stats *stats = tx_stats(txo);
503
504 u64_stats_update_begin(&stats->sync);
505 stats->tx_reqs++;
506 stats->tx_wrbs += wrb_cnt;
507 stats->tx_bytes += copied;
508 stats->tx_pkts += (gso_segs ? gso_segs : 1);
509 if (stopped)
510 stats->tx_stops++;
511 u64_stats_update_end(&stats->sync);
512 }
513
514 /* Determine number of WRB entries needed to xmit data in an skb */
515 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
516 bool *dummy)
517 {
518 int cnt = (skb->len > skb->data_len);
519
520 cnt += skb_shinfo(skb)->nr_frags;
521
522 /* to account for hdr wrb */
523 cnt++;
524 if (lancer_chip(adapter) || !(cnt & 1)) {
525 *dummy = false;
526 } else {
527 /* add a dummy to make it an even num */
528 cnt++;
529 *dummy = true;
530 }
531 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
532 return cnt;
533 }
534
535 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
536 {
537 wrb->frag_pa_hi = upper_32_bits(addr);
538 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
539 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
540 }
541
542 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
543 struct sk_buff *skb, u32 wrb_cnt, u32 len)
544 {
545 u8 vlan_prio = 0;
546 u16 vlan_tag = 0;
547
548 memset(hdr, 0, sizeof(*hdr));
549
550 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
551
552 if (skb_is_gso(skb)) {
553 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
554 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
555 hdr, skb_shinfo(skb)->gso_size);
556 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
557 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
558 if (lancer_chip(adapter) && adapter->sli_family ==
559 LANCER_A0_SLI_FAMILY) {
560 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
561 if (is_tcp_pkt(skb))
562 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
563 tcpcs, hdr, 1);
564 else if (is_udp_pkt(skb))
565 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
566 udpcs, hdr, 1);
567 }
568 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
569 if (is_tcp_pkt(skb))
570 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
571 else if (is_udp_pkt(skb))
572 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
573 }
574
575 if (vlan_tx_tag_present(skb)) {
576 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
577 vlan_tag = vlan_tx_tag_get(skb);
578 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
579 /* If vlan priority provided by OS is NOT in available bmap */
580 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
581 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
582 adapter->recommended_prio;
583 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
584 }
585
586 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
587 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
588 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
589 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
590 }
591
592 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
593 bool unmap_single)
594 {
595 dma_addr_t dma;
596
597 be_dws_le_to_cpu(wrb, sizeof(*wrb));
598
599 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
600 if (wrb->frag_len) {
601 if (unmap_single)
602 dma_unmap_single(dev, dma, wrb->frag_len,
603 DMA_TO_DEVICE);
604 else
605 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
606 }
607 }
608
609 static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
610 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
611 {
612 dma_addr_t busaddr;
613 int i, copied = 0;
614 struct device *dev = &adapter->pdev->dev;
615 struct sk_buff *first_skb = skb;
616 struct be_eth_wrb *wrb;
617 struct be_eth_hdr_wrb *hdr;
618 bool map_single = false;
619 u16 map_head;
620
621 hdr = queue_head_node(txq);
622 queue_head_inc(txq);
623 map_head = txq->head;
624
625 if (skb->len > skb->data_len) {
626 int len = skb_headlen(skb);
627 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
628 if (dma_mapping_error(dev, busaddr))
629 goto dma_err;
630 map_single = true;
631 wrb = queue_head_node(txq);
632 wrb_fill(wrb, busaddr, len);
633 be_dws_cpu_to_le(wrb, sizeof(*wrb));
634 queue_head_inc(txq);
635 copied += len;
636 }
637
638 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
639 const struct skb_frag_struct *frag =
640 &skb_shinfo(skb)->frags[i];
641 busaddr = skb_frag_dma_map(dev, frag, 0,
642 skb_frag_size(frag), DMA_TO_DEVICE);
643 if (dma_mapping_error(dev, busaddr))
644 goto dma_err;
645 wrb = queue_head_node(txq);
646 wrb_fill(wrb, busaddr, skb_frag_size(frag));
647 be_dws_cpu_to_le(wrb, sizeof(*wrb));
648 queue_head_inc(txq);
649 copied += skb_frag_size(frag);
650 }
651
652 if (dummy_wrb) {
653 wrb = queue_head_node(txq);
654 wrb_fill(wrb, 0, 0);
655 be_dws_cpu_to_le(wrb, sizeof(*wrb));
656 queue_head_inc(txq);
657 }
658
659 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
660 be_dws_cpu_to_le(hdr, sizeof(*hdr));
661
662 return copied;
663 dma_err:
664 txq->head = map_head;
665 while (copied) {
666 wrb = queue_head_node(txq);
667 unmap_tx_frag(dev, wrb, map_single);
668 map_single = false;
669 copied -= wrb->frag_len;
670 queue_head_inc(txq);
671 }
672 return 0;
673 }
674
675 static netdev_tx_t be_xmit(struct sk_buff *skb,
676 struct net_device *netdev)
677 {
678 struct be_adapter *adapter = netdev_priv(netdev);
679 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
680 struct be_queue_info *txq = &txo->q;
681 u32 wrb_cnt = 0, copied = 0;
682 u32 start = txq->head;
683 bool dummy_wrb, stopped = false;
684
685 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
686
687 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
688 if (copied) {
689 /* record the sent skb in the sent_skb table */
690 BUG_ON(txo->sent_skb_list[start]);
691 txo->sent_skb_list[start] = skb;
692
693 /* Ensure txq has space for the next skb; Else stop the queue
694 * *BEFORE* ringing the tx doorbell, so that we serialze the
695 * tx compls of the current transmit which'll wake up the queue
696 */
697 atomic_add(wrb_cnt, &txq->used);
698 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
699 txq->len) {
700 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
701 stopped = true;
702 }
703
704 be_txq_notify(adapter, txq->id, wrb_cnt);
705
706 be_tx_stats_update(txo, wrb_cnt, copied,
707 skb_shinfo(skb)->gso_segs, stopped);
708 } else {
709 txq->head = start;
710 dev_kfree_skb_any(skb);
711 }
712 return NETDEV_TX_OK;
713 }
714
715 static int be_change_mtu(struct net_device *netdev, int new_mtu)
716 {
717 struct be_adapter *adapter = netdev_priv(netdev);
718 if (new_mtu < BE_MIN_MTU ||
719 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
720 (ETH_HLEN + ETH_FCS_LEN))) {
721 dev_info(&adapter->pdev->dev,
722 "MTU must be between %d and %d bytes\n",
723 BE_MIN_MTU,
724 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
725 return -EINVAL;
726 }
727 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
728 netdev->mtu, new_mtu);
729 netdev->mtu = new_mtu;
730 return 0;
731 }
732
733 /*
734 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
735 * If the user configures more, place BE in vlan promiscuous mode.
736 */
737 static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
738 {
739 u16 vtag[BE_NUM_VLANS_SUPPORTED];
740 u16 ntags = 0, i;
741 int status = 0;
742 u32 if_handle;
743
744 if (vf) {
745 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
746 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
747 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
748 }
749
750 /* No need to further configure vids if in promiscuous mode */
751 if (adapter->promiscuous)
752 return 0;
753
754 if (adapter->vlans_added <= adapter->max_vlans) {
755 /* Construct VLAN Table to give to HW */
756 for (i = 0; i < VLAN_N_VID; i++) {
757 if (adapter->vlan_tag[i]) {
758 vtag[ntags] = cpu_to_le16(i);
759 ntags++;
760 }
761 }
762 status = be_cmd_vlan_config(adapter, adapter->if_handle,
763 vtag, ntags, 1, 0);
764 } else {
765 status = be_cmd_vlan_config(adapter, adapter->if_handle,
766 NULL, 0, 1, 1);
767 }
768
769 return status;
770 }
771
772 static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
773 {
774 struct be_adapter *adapter = netdev_priv(netdev);
775
776 adapter->vlans_added++;
777 if (!be_physfn(adapter))
778 return;
779
780 adapter->vlan_tag[vid] = 1;
781 if (adapter->vlans_added <= (adapter->max_vlans + 1))
782 be_vid_config(adapter, false, 0);
783 }
784
785 static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
786 {
787 struct be_adapter *adapter = netdev_priv(netdev);
788
789 adapter->vlans_added--;
790
791 if (!be_physfn(adapter))
792 return;
793
794 adapter->vlan_tag[vid] = 0;
795 if (adapter->vlans_added <= adapter->max_vlans)
796 be_vid_config(adapter, false, 0);
797 }
798
799 static void be_set_multicast_list(struct net_device *netdev)
800 {
801 struct be_adapter *adapter = netdev_priv(netdev);
802
803 if (netdev->flags & IFF_PROMISC) {
804 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
805 adapter->promiscuous = true;
806 goto done;
807 }
808
809 /* BE was previously in promiscuous mode; disable it */
810 if (adapter->promiscuous) {
811 adapter->promiscuous = false;
812 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
813
814 if (adapter->vlans_added)
815 be_vid_config(adapter, false, 0);
816 }
817
818 /* Enable multicast promisc if num configured exceeds what we support */
819 if (netdev->flags & IFF_ALLMULTI ||
820 netdev_mc_count(netdev) > BE_MAX_MC) {
821 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
822 goto done;
823 }
824
825 be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
826 done:
827 return;
828 }
829
830 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
831 {
832 struct be_adapter *adapter = netdev_priv(netdev);
833 int status;
834
835 if (!adapter->sriov_enabled)
836 return -EPERM;
837
838 if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
839 return -EINVAL;
840
841 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
842 status = be_cmd_pmac_del(adapter,
843 adapter->vf_cfg[vf].vf_if_handle,
844 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
845
846 status = be_cmd_pmac_add(adapter, mac,
847 adapter->vf_cfg[vf].vf_if_handle,
848 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
849
850 if (status)
851 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
852 mac, vf);
853 else
854 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
855
856 return status;
857 }
858
859 static int be_get_vf_config(struct net_device *netdev, int vf,
860 struct ifla_vf_info *vi)
861 {
862 struct be_adapter *adapter = netdev_priv(netdev);
863
864 if (!adapter->sriov_enabled)
865 return -EPERM;
866
867 if (vf >= num_vfs)
868 return -EINVAL;
869
870 vi->vf = vf;
871 vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
872 vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
873 vi->qos = 0;
874 memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
875
876 return 0;
877 }
878
879 static int be_set_vf_vlan(struct net_device *netdev,
880 int vf, u16 vlan, u8 qos)
881 {
882 struct be_adapter *adapter = netdev_priv(netdev);
883 int status = 0;
884
885 if (!adapter->sriov_enabled)
886 return -EPERM;
887
888 if ((vf >= num_vfs) || (vlan > 4095))
889 return -EINVAL;
890
891 if (vlan) {
892 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
893 adapter->vlans_added++;
894 } else {
895 adapter->vf_cfg[vf].vf_vlan_tag = 0;
896 adapter->vlans_added--;
897 }
898
899 status = be_vid_config(adapter, true, vf);
900
901 if (status)
902 dev_info(&adapter->pdev->dev,
903 "VLAN %d config on VF %d failed\n", vlan, vf);
904 return status;
905 }
906
907 static int be_set_vf_tx_rate(struct net_device *netdev,
908 int vf, int rate)
909 {
910 struct be_adapter *adapter = netdev_priv(netdev);
911 int status = 0;
912
913 if (!adapter->sriov_enabled)
914 return -EPERM;
915
916 if ((vf >= num_vfs) || (rate < 0))
917 return -EINVAL;
918
919 if (rate > 10000)
920 rate = 10000;
921
922 adapter->vf_cfg[vf].vf_tx_rate = rate;
923 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
924
925 if (status)
926 dev_info(&adapter->pdev->dev,
927 "tx rate %d on VF %d failed\n", rate, vf);
928 return status;
929 }
930
931 static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
932 {
933 struct be_eq_obj *rx_eq = &rxo->rx_eq;
934 struct be_rx_stats *stats = rx_stats(rxo);
935 ulong now = jiffies;
936 ulong delta = now - stats->rx_jiffies;
937 u64 pkts;
938 unsigned int start, eqd;
939
940 if (!rx_eq->enable_aic)
941 return;
942
943 /* Wrapped around */
944 if (time_before(now, stats->rx_jiffies)) {
945 stats->rx_jiffies = now;
946 return;
947 }
948
949 /* Update once a second */
950 if (delta < HZ)
951 return;
952
953 do {
954 start = u64_stats_fetch_begin_bh(&stats->sync);
955 pkts = stats->rx_pkts;
956 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
957
958 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
959 stats->rx_pkts_prev = pkts;
960 stats->rx_jiffies = now;
961 eqd = stats->rx_pps / 110000;
962 eqd = eqd << 3;
963 if (eqd > rx_eq->max_eqd)
964 eqd = rx_eq->max_eqd;
965 if (eqd < rx_eq->min_eqd)
966 eqd = rx_eq->min_eqd;
967 if (eqd < 10)
968 eqd = 0;
969 if (eqd != rx_eq->cur_eqd) {
970 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
971 rx_eq->cur_eqd = eqd;
972 }
973 }
974
975 static void be_rx_stats_update(struct be_rx_obj *rxo,
976 struct be_rx_compl_info *rxcp)
977 {
978 struct be_rx_stats *stats = rx_stats(rxo);
979
980 u64_stats_update_begin(&stats->sync);
981 stats->rx_compl++;
982 stats->rx_bytes += rxcp->pkt_size;
983 stats->rx_pkts++;
984 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
985 stats->rx_mcast_pkts++;
986 if (rxcp->err)
987 stats->rx_compl_err++;
988 u64_stats_update_end(&stats->sync);
989 }
990
991 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
992 {
993 /* L4 checksum is not reliable for non TCP/UDP packets.
994 * Also ignore ipcksm for ipv6 pkts */
995 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
996 (rxcp->ip_csum || rxcp->ipv6);
997 }
998
999 static struct be_rx_page_info *
1000 get_rx_page_info(struct be_adapter *adapter,
1001 struct be_rx_obj *rxo,
1002 u16 frag_idx)
1003 {
1004 struct be_rx_page_info *rx_page_info;
1005 struct be_queue_info *rxq = &rxo->q;
1006
1007 rx_page_info = &rxo->page_info_tbl[frag_idx];
1008 BUG_ON(!rx_page_info->page);
1009
1010 if (rx_page_info->last_page_user) {
1011 dma_unmap_page(&adapter->pdev->dev,
1012 dma_unmap_addr(rx_page_info, bus),
1013 adapter->big_page_size, DMA_FROM_DEVICE);
1014 rx_page_info->last_page_user = false;
1015 }
1016
1017 atomic_dec(&rxq->used);
1018 return rx_page_info;
1019 }
1020
1021 /* Throwaway the data in the Rx completion */
1022 static void be_rx_compl_discard(struct be_adapter *adapter,
1023 struct be_rx_obj *rxo,
1024 struct be_rx_compl_info *rxcp)
1025 {
1026 struct be_queue_info *rxq = &rxo->q;
1027 struct be_rx_page_info *page_info;
1028 u16 i, num_rcvd = rxcp->num_rcvd;
1029
1030 for (i = 0; i < num_rcvd; i++) {
1031 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1032 put_page(page_info->page);
1033 memset(page_info, 0, sizeof(*page_info));
1034 index_inc(&rxcp->rxq_idx, rxq->len);
1035 }
1036 }
1037
1038 /*
1039 * skb_fill_rx_data forms a complete skb for an ether frame
1040 * indicated by rxcp.
1041 */
1042 static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
1043 struct sk_buff *skb, struct be_rx_compl_info *rxcp)
1044 {
1045 struct be_queue_info *rxq = &rxo->q;
1046 struct be_rx_page_info *page_info;
1047 u16 i, j;
1048 u16 hdr_len, curr_frag_len, remaining;
1049 u8 *start;
1050
1051 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1052 start = page_address(page_info->page) + page_info->page_offset;
1053 prefetch(start);
1054
1055 /* Copy data in the first descriptor of this completion */
1056 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1057
1058 /* Copy the header portion into skb_data */
1059 hdr_len = min(BE_HDR_LEN, curr_frag_len);
1060 memcpy(skb->data, start, hdr_len);
1061 skb->len = curr_frag_len;
1062 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1063 /* Complete packet has now been moved to data */
1064 put_page(page_info->page);
1065 skb->data_len = 0;
1066 skb->tail += curr_frag_len;
1067 } else {
1068 skb_shinfo(skb)->nr_frags = 1;
1069 skb_frag_set_page(skb, 0, page_info->page);
1070 skb_shinfo(skb)->frags[0].page_offset =
1071 page_info->page_offset + hdr_len;
1072 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
1073 skb->data_len = curr_frag_len - hdr_len;
1074 skb->truesize += rx_frag_size;
1075 skb->tail += hdr_len;
1076 }
1077 page_info->page = NULL;
1078
1079 if (rxcp->pkt_size <= rx_frag_size) {
1080 BUG_ON(rxcp->num_rcvd != 1);
1081 return;
1082 }
1083
1084 /* More frags present for this completion */
1085 index_inc(&rxcp->rxq_idx, rxq->len);
1086 remaining = rxcp->pkt_size - curr_frag_len;
1087 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1088 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1089 curr_frag_len = min(remaining, rx_frag_size);
1090
1091 /* Coalesce all frags from the same physical page in one slot */
1092 if (page_info->page_offset == 0) {
1093 /* Fresh page */
1094 j++;
1095 skb_frag_set_page(skb, j, page_info->page);
1096 skb_shinfo(skb)->frags[j].page_offset =
1097 page_info->page_offset;
1098 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1099 skb_shinfo(skb)->nr_frags++;
1100 } else {
1101 put_page(page_info->page);
1102 }
1103
1104 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1105 skb->len += curr_frag_len;
1106 skb->data_len += curr_frag_len;
1107 skb->truesize += rx_frag_size;
1108 remaining -= curr_frag_len;
1109 index_inc(&rxcp->rxq_idx, rxq->len);
1110 page_info->page = NULL;
1111 }
1112 BUG_ON(j > MAX_SKB_FRAGS);
1113 }
1114
1115 /* Process the RX completion indicated by rxcp when GRO is disabled */
1116 static void be_rx_compl_process(struct be_adapter *adapter,
1117 struct be_rx_obj *rxo,
1118 struct be_rx_compl_info *rxcp)
1119 {
1120 struct net_device *netdev = adapter->netdev;
1121 struct sk_buff *skb;
1122
1123 skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
1124 if (unlikely(!skb)) {
1125 rx_stats(rxo)->rx_drops_no_skbs++;
1126 be_rx_compl_discard(adapter, rxo, rxcp);
1127 return;
1128 }
1129
1130 skb_fill_rx_data(adapter, rxo, skb, rxcp);
1131
1132 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1133 skb->ip_summed = CHECKSUM_UNNECESSARY;
1134 else
1135 skb_checksum_none_assert(skb);
1136
1137 skb->protocol = eth_type_trans(skb, netdev);
1138 if (adapter->netdev->features & NETIF_F_RXHASH)
1139 skb->rxhash = rxcp->rss_hash;
1140
1141
1142 if (rxcp->vlanf)
1143 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1144
1145 netif_receive_skb(skb);
1146 }
1147
1148 /* Process the RX completion indicated by rxcp when GRO is enabled */
1149 static void be_rx_compl_process_gro(struct be_adapter *adapter,
1150 struct be_rx_obj *rxo,
1151 struct be_rx_compl_info *rxcp)
1152 {
1153 struct be_rx_page_info *page_info;
1154 struct sk_buff *skb = NULL;
1155 struct be_queue_info *rxq = &rxo->q;
1156 struct be_eq_obj *eq_obj = &rxo->rx_eq;
1157 u16 remaining, curr_frag_len;
1158 u16 i, j;
1159
1160 skb = napi_get_frags(&eq_obj->napi);
1161 if (!skb) {
1162 be_rx_compl_discard(adapter, rxo, rxcp);
1163 return;
1164 }
1165
1166 remaining = rxcp->pkt_size;
1167 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1168 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1169
1170 curr_frag_len = min(remaining, rx_frag_size);
1171
1172 /* Coalesce all frags from the same physical page in one slot */
1173 if (i == 0 || page_info->page_offset == 0) {
1174 /* First frag or Fresh page */
1175 j++;
1176 skb_frag_set_page(skb, j, page_info->page);
1177 skb_shinfo(skb)->frags[j].page_offset =
1178 page_info->page_offset;
1179 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1180 } else {
1181 put_page(page_info->page);
1182 }
1183 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1184 skb->truesize += rx_frag_size;
1185 remaining -= curr_frag_len;
1186 index_inc(&rxcp->rxq_idx, rxq->len);
1187 memset(page_info, 0, sizeof(*page_info));
1188 }
1189 BUG_ON(j > MAX_SKB_FRAGS);
1190
1191 skb_shinfo(skb)->nr_frags = j + 1;
1192 skb->len = rxcp->pkt_size;
1193 skb->data_len = rxcp->pkt_size;
1194 skb->ip_summed = CHECKSUM_UNNECESSARY;
1195 if (adapter->netdev->features & NETIF_F_RXHASH)
1196 skb->rxhash = rxcp->rss_hash;
1197
1198 if (rxcp->vlanf)
1199 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1200
1201 napi_gro_frags(&eq_obj->napi);
1202 }
1203
1204 static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1205 struct be_eth_rx_compl *compl,
1206 struct be_rx_compl_info *rxcp)
1207 {
1208 rxcp->pkt_size =
1209 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1210 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1211 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1212 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1213 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1214 rxcp->ip_csum =
1215 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1216 rxcp->l4_csum =
1217 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1218 rxcp->ipv6 =
1219 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1220 rxcp->rxq_idx =
1221 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1222 rxcp->num_rcvd =
1223 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1224 rxcp->pkt_type =
1225 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1226 rxcp->rss_hash =
1227 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
1228 if (rxcp->vlanf) {
1229 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1230 compl);
1231 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1232 compl);
1233 }
1234 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
1235 }
1236
1237 static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1238 struct be_eth_rx_compl *compl,
1239 struct be_rx_compl_info *rxcp)
1240 {
1241 rxcp->pkt_size =
1242 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1243 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1244 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1245 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1246 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1247 rxcp->ip_csum =
1248 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1249 rxcp->l4_csum =
1250 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1251 rxcp->ipv6 =
1252 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1253 rxcp->rxq_idx =
1254 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1255 rxcp->num_rcvd =
1256 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1257 rxcp->pkt_type =
1258 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1259 rxcp->rss_hash =
1260 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
1261 if (rxcp->vlanf) {
1262 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1263 compl);
1264 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1265 compl);
1266 }
1267 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
1268 }
1269
1270 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1271 {
1272 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1273 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1274 struct be_adapter *adapter = rxo->adapter;
1275
1276 /* For checking the valid bit it is Ok to use either definition as the
1277 * valid bit is at the same position in both v0 and v1 Rx compl */
1278 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1279 return NULL;
1280
1281 rmb();
1282 be_dws_le_to_cpu(compl, sizeof(*compl));
1283
1284 if (adapter->be3_native)
1285 be_parse_rx_compl_v1(adapter, compl, rxcp);
1286 else
1287 be_parse_rx_compl_v0(adapter, compl, rxcp);
1288
1289 if (rxcp->vlanf) {
1290 /* vlanf could be wrongly set in some cards.
1291 * ignore if vtm is not set */
1292 if ((adapter->function_mode & 0x400) && !rxcp->vtm)
1293 rxcp->vlanf = 0;
1294
1295 if (!lancer_chip(adapter))
1296 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1297
1298 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
1299 !adapter->vlan_tag[rxcp->vlan_tag])
1300 rxcp->vlanf = 0;
1301 }
1302
1303 /* As the compl has been parsed, reset it; we wont touch it again */
1304 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1305
1306 queue_tail_inc(&rxo->cq);
1307 return rxcp;
1308 }
1309
1310 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1311 {
1312 u32 order = get_order(size);
1313
1314 if (order > 0)
1315 gfp |= __GFP_COMP;
1316 return alloc_pages(gfp, order);
1317 }
1318
1319 /*
1320 * Allocate a page, split it to fragments of size rx_frag_size and post as
1321 * receive buffers to BE
1322 */
1323 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1324 {
1325 struct be_adapter *adapter = rxo->adapter;
1326 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
1327 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1328 struct be_queue_info *rxq = &rxo->q;
1329 struct page *pagep = NULL;
1330 struct be_eth_rx_d *rxd;
1331 u64 page_dmaaddr = 0, frag_dmaaddr;
1332 u32 posted, page_offset = 0;
1333
1334 page_info = &rxo->page_info_tbl[rxq->head];
1335 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1336 if (!pagep) {
1337 pagep = be_alloc_pages(adapter->big_page_size, gfp);
1338 if (unlikely(!pagep)) {
1339 rx_stats(rxo)->rx_post_fail++;
1340 break;
1341 }
1342 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1343 0, adapter->big_page_size,
1344 DMA_FROM_DEVICE);
1345 page_info->page_offset = 0;
1346 } else {
1347 get_page(pagep);
1348 page_info->page_offset = page_offset + rx_frag_size;
1349 }
1350 page_offset = page_info->page_offset;
1351 page_info->page = pagep;
1352 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1353 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1354
1355 rxd = queue_head_node(rxq);
1356 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1357 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1358
1359 /* Any space left in the current big page for another frag? */
1360 if ((page_offset + rx_frag_size + rx_frag_size) >
1361 adapter->big_page_size) {
1362 pagep = NULL;
1363 page_info->last_page_user = true;
1364 }
1365
1366 prev_page_info = page_info;
1367 queue_head_inc(rxq);
1368 page_info = &page_info_tbl[rxq->head];
1369 }
1370 if (pagep)
1371 prev_page_info->last_page_user = true;
1372
1373 if (posted) {
1374 atomic_add(posted, &rxq->used);
1375 be_rxq_notify(adapter, rxq->id, posted);
1376 } else if (atomic_read(&rxq->used) == 0) {
1377 /* Let be_worker replenish when memory is available */
1378 rxo->rx_post_starved = true;
1379 }
1380 }
1381
1382 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1383 {
1384 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1385
1386 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1387 return NULL;
1388
1389 rmb();
1390 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1391
1392 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1393
1394 queue_tail_inc(tx_cq);
1395 return txcp;
1396 }
1397
1398 static u16 be_tx_compl_process(struct be_adapter *adapter,
1399 struct be_tx_obj *txo, u16 last_index)
1400 {
1401 struct be_queue_info *txq = &txo->q;
1402 struct be_eth_wrb *wrb;
1403 struct sk_buff **sent_skbs = txo->sent_skb_list;
1404 struct sk_buff *sent_skb;
1405 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1406 bool unmap_skb_hdr = true;
1407
1408 sent_skb = sent_skbs[txq->tail];
1409 BUG_ON(!sent_skb);
1410 sent_skbs[txq->tail] = NULL;
1411
1412 /* skip header wrb */
1413 queue_tail_inc(txq);
1414
1415 do {
1416 cur_index = txq->tail;
1417 wrb = queue_tail_node(txq);
1418 unmap_tx_frag(&adapter->pdev->dev, wrb,
1419 (unmap_skb_hdr && skb_headlen(sent_skb)));
1420 unmap_skb_hdr = false;
1421
1422 num_wrbs++;
1423 queue_tail_inc(txq);
1424 } while (cur_index != last_index);
1425
1426 kfree_skb(sent_skb);
1427 return num_wrbs;
1428 }
1429
1430 static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1431 {
1432 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1433
1434 if (!eqe->evt)
1435 return NULL;
1436
1437 rmb();
1438 eqe->evt = le32_to_cpu(eqe->evt);
1439 queue_tail_inc(&eq_obj->q);
1440 return eqe;
1441 }
1442
1443 static int event_handle(struct be_adapter *adapter,
1444 struct be_eq_obj *eq_obj,
1445 bool rearm)
1446 {
1447 struct be_eq_entry *eqe;
1448 u16 num = 0;
1449
1450 while ((eqe = event_get(eq_obj)) != NULL) {
1451 eqe->evt = 0;
1452 num++;
1453 }
1454
1455 /* Deal with any spurious interrupts that come
1456 * without events
1457 */
1458 if (!num)
1459 rearm = true;
1460
1461 be_eq_notify(adapter, eq_obj->q.id, rearm, true, num);
1462 if (num)
1463 napi_schedule(&eq_obj->napi);
1464
1465 return num;
1466 }
1467
1468 /* Just read and notify events without processing them.
1469 * Used at the time of destroying event queues */
1470 static void be_eq_clean(struct be_adapter *adapter,
1471 struct be_eq_obj *eq_obj)
1472 {
1473 struct be_eq_entry *eqe;
1474 u16 num = 0;
1475
1476 while ((eqe = event_get(eq_obj)) != NULL) {
1477 eqe->evt = 0;
1478 num++;
1479 }
1480
1481 if (num)
1482 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1483 }
1484
1485 static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
1486 {
1487 struct be_rx_page_info *page_info;
1488 struct be_queue_info *rxq = &rxo->q;
1489 struct be_queue_info *rx_cq = &rxo->cq;
1490 struct be_rx_compl_info *rxcp;
1491 u16 tail;
1492
1493 /* First cleanup pending rx completions */
1494 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1495 be_rx_compl_discard(adapter, rxo, rxcp);
1496 be_cq_notify(adapter, rx_cq->id, false, 1);
1497 }
1498
1499 /* Then free posted rx buffer that were not used */
1500 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1501 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1502 page_info = get_rx_page_info(adapter, rxo, tail);
1503 put_page(page_info->page);
1504 memset(page_info, 0, sizeof(*page_info));
1505 }
1506 BUG_ON(atomic_read(&rxq->used));
1507 rxq->tail = rxq->head = 0;
1508 }
1509
1510 static void be_tx_compl_clean(struct be_adapter *adapter,
1511 struct be_tx_obj *txo)
1512 {
1513 struct be_queue_info *tx_cq = &txo->cq;
1514 struct be_queue_info *txq = &txo->q;
1515 struct be_eth_tx_compl *txcp;
1516 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1517 struct sk_buff **sent_skbs = txo->sent_skb_list;
1518 struct sk_buff *sent_skb;
1519 bool dummy_wrb;
1520
1521 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1522 do {
1523 while ((txcp = be_tx_compl_get(tx_cq))) {
1524 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1525 wrb_index, txcp);
1526 num_wrbs += be_tx_compl_process(adapter, txo, end_idx);
1527 cmpl++;
1528 }
1529 if (cmpl) {
1530 be_cq_notify(adapter, tx_cq->id, false, cmpl);
1531 atomic_sub(num_wrbs, &txq->used);
1532 cmpl = 0;
1533 num_wrbs = 0;
1534 }
1535
1536 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1537 break;
1538
1539 mdelay(1);
1540 } while (true);
1541
1542 if (atomic_read(&txq->used))
1543 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1544 atomic_read(&txq->used));
1545
1546 /* free posted tx for which compls will never arrive */
1547 while (atomic_read(&txq->used)) {
1548 sent_skb = sent_skbs[txq->tail];
1549 end_idx = txq->tail;
1550 index_adv(&end_idx,
1551 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1552 txq->len);
1553 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1554 atomic_sub(num_wrbs, &txq->used);
1555 }
1556 }
1557
1558 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1559 {
1560 struct be_queue_info *q;
1561
1562 q = &adapter->mcc_obj.q;
1563 if (q->created)
1564 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1565 be_queue_free(adapter, q);
1566
1567 q = &adapter->mcc_obj.cq;
1568 if (q->created)
1569 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1570 be_queue_free(adapter, q);
1571 }
1572
1573 /* Must be called only after TX qs are created as MCC shares TX EQ */
1574 static int be_mcc_queues_create(struct be_adapter *adapter)
1575 {
1576 struct be_queue_info *q, *cq;
1577
1578 /* Alloc MCC compl queue */
1579 cq = &adapter->mcc_obj.cq;
1580 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1581 sizeof(struct be_mcc_compl)))
1582 goto err;
1583
1584 /* Ask BE to create MCC compl queue; share TX's eq */
1585 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
1586 goto mcc_cq_free;
1587
1588 /* Alloc MCC queue */
1589 q = &adapter->mcc_obj.q;
1590 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1591 goto mcc_cq_destroy;
1592
1593 /* Ask BE to create MCC queue */
1594 if (be_cmd_mccq_create(adapter, q, cq))
1595 goto mcc_q_free;
1596
1597 return 0;
1598
1599 mcc_q_free:
1600 be_queue_free(adapter, q);
1601 mcc_cq_destroy:
1602 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1603 mcc_cq_free:
1604 be_queue_free(adapter, cq);
1605 err:
1606 return -1;
1607 }
1608
1609 static void be_tx_queues_destroy(struct be_adapter *adapter)
1610 {
1611 struct be_queue_info *q;
1612 struct be_tx_obj *txo;
1613 u8 i;
1614
1615 for_all_tx_queues(adapter, txo, i) {
1616 q = &txo->q;
1617 if (q->created)
1618 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1619 be_queue_free(adapter, q);
1620
1621 q = &txo->cq;
1622 if (q->created)
1623 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1624 be_queue_free(adapter, q);
1625 }
1626
1627 /* Clear any residual events */
1628 be_eq_clean(adapter, &adapter->tx_eq);
1629
1630 q = &adapter->tx_eq.q;
1631 if (q->created)
1632 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1633 be_queue_free(adapter, q);
1634 }
1635
1636 /* One TX event queue is shared by all TX compl qs */
1637 static int be_tx_queues_create(struct be_adapter *adapter)
1638 {
1639 struct be_queue_info *eq, *q, *cq;
1640 struct be_tx_obj *txo;
1641 u8 i;
1642
1643 adapter->tx_eq.max_eqd = 0;
1644 adapter->tx_eq.min_eqd = 0;
1645 adapter->tx_eq.cur_eqd = 96;
1646 adapter->tx_eq.enable_aic = false;
1647
1648 eq = &adapter->tx_eq.q;
1649 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1650 sizeof(struct be_eq_entry)))
1651 return -1;
1652
1653 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
1654 goto err;
1655 adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
1656
1657 for_all_tx_queues(adapter, txo, i) {
1658 cq = &txo->cq;
1659 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1660 sizeof(struct be_eth_tx_compl)))
1661 goto err;
1662
1663 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1664 goto err;
1665
1666 q = &txo->q;
1667 if (be_queue_alloc(adapter, q, TX_Q_LEN,
1668 sizeof(struct be_eth_wrb)))
1669 goto err;
1670
1671 if (be_cmd_txq_create(adapter, q, cq))
1672 goto err;
1673 }
1674 return 0;
1675
1676 err:
1677 be_tx_queues_destroy(adapter);
1678 return -1;
1679 }
1680
1681 static void be_rx_queues_destroy(struct be_adapter *adapter)
1682 {
1683 struct be_queue_info *q;
1684 struct be_rx_obj *rxo;
1685 int i;
1686
1687 for_all_rx_queues(adapter, rxo, i) {
1688 be_queue_free(adapter, &rxo->q);
1689
1690 q = &rxo->cq;
1691 if (q->created)
1692 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1693 be_queue_free(adapter, q);
1694
1695 q = &rxo->rx_eq.q;
1696 if (q->created)
1697 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1698 be_queue_free(adapter, q);
1699 }
1700 }
1701
1702 static u32 be_num_rxqs_want(struct be_adapter *adapter)
1703 {
1704 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1705 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1706 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1707 } else {
1708 dev_warn(&adapter->pdev->dev,
1709 "No support for multiple RX queues\n");
1710 return 1;
1711 }
1712 }
1713
1714 static int be_rx_queues_create(struct be_adapter *adapter)
1715 {
1716 struct be_queue_info *eq, *q, *cq;
1717 struct be_rx_obj *rxo;
1718 int rc, i;
1719
1720 adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1721 msix_enabled(adapter) ?
1722 adapter->num_msix_vec - 1 : 1);
1723 if (adapter->num_rx_qs != MAX_RX_QS)
1724 dev_warn(&adapter->pdev->dev,
1725 "Can create only %d RX queues", adapter->num_rx_qs);
1726
1727 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1728 for_all_rx_queues(adapter, rxo, i) {
1729 rxo->adapter = adapter;
1730 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1731 rxo->rx_eq.enable_aic = true;
1732
1733 /* EQ */
1734 eq = &rxo->rx_eq.q;
1735 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1736 sizeof(struct be_eq_entry));
1737 if (rc)
1738 goto err;
1739
1740 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1741 if (rc)
1742 goto err;
1743
1744 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
1745
1746 /* CQ */
1747 cq = &rxo->cq;
1748 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1749 sizeof(struct be_eth_rx_compl));
1750 if (rc)
1751 goto err;
1752
1753 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1754 if (rc)
1755 goto err;
1756
1757 /* Rx Q - will be created in be_open() */
1758 q = &rxo->q;
1759 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1760 sizeof(struct be_eth_rx_d));
1761 if (rc)
1762 goto err;
1763
1764 }
1765
1766 return 0;
1767 err:
1768 be_rx_queues_destroy(adapter);
1769 return -1;
1770 }
1771
1772 static bool event_peek(struct be_eq_obj *eq_obj)
1773 {
1774 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1775 if (!eqe->evt)
1776 return false;
1777 else
1778 return true;
1779 }
1780
1781 static irqreturn_t be_intx(int irq, void *dev)
1782 {
1783 struct be_adapter *adapter = dev;
1784 struct be_rx_obj *rxo;
1785 int isr, i, tx = 0 , rx = 0;
1786
1787 if (lancer_chip(adapter)) {
1788 if (event_peek(&adapter->tx_eq))
1789 tx = event_handle(adapter, &adapter->tx_eq, false);
1790 for_all_rx_queues(adapter, rxo, i) {
1791 if (event_peek(&rxo->rx_eq))
1792 rx |= event_handle(adapter, &rxo->rx_eq, true);
1793 }
1794
1795 if (!(tx || rx))
1796 return IRQ_NONE;
1797
1798 } else {
1799 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1800 (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1801 if (!isr)
1802 return IRQ_NONE;
1803
1804 if ((1 << adapter->tx_eq.eq_idx & isr))
1805 event_handle(adapter, &adapter->tx_eq, false);
1806
1807 for_all_rx_queues(adapter, rxo, i) {
1808 if ((1 << rxo->rx_eq.eq_idx & isr))
1809 event_handle(adapter, &rxo->rx_eq, true);
1810 }
1811 }
1812
1813 return IRQ_HANDLED;
1814 }
1815
1816 static irqreturn_t be_msix_rx(int irq, void *dev)
1817 {
1818 struct be_rx_obj *rxo = dev;
1819 struct be_adapter *adapter = rxo->adapter;
1820
1821 event_handle(adapter, &rxo->rx_eq, true);
1822
1823 return IRQ_HANDLED;
1824 }
1825
1826 static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
1827 {
1828 struct be_adapter *adapter = dev;
1829
1830 event_handle(adapter, &adapter->tx_eq, false);
1831
1832 return IRQ_HANDLED;
1833 }
1834
1835 static inline bool do_gro(struct be_rx_compl_info *rxcp)
1836 {
1837 return (rxcp->tcpf && !rxcp->err) ? true : false;
1838 }
1839
1840 static int be_poll_rx(struct napi_struct *napi, int budget)
1841 {
1842 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
1843 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1844 struct be_adapter *adapter = rxo->adapter;
1845 struct be_queue_info *rx_cq = &rxo->cq;
1846 struct be_rx_compl_info *rxcp;
1847 u32 work_done;
1848
1849 rx_stats(rxo)->rx_polls++;
1850 for (work_done = 0; work_done < budget; work_done++) {
1851 rxcp = be_rx_compl_get(rxo);
1852 if (!rxcp)
1853 break;
1854
1855 /* Is it a flush compl that has no data */
1856 if (unlikely(rxcp->num_rcvd == 0))
1857 goto loop_continue;
1858
1859 /* Discard compl with partial DMA Lancer B0 */
1860 if (unlikely(!rxcp->pkt_size)) {
1861 be_rx_compl_discard(adapter, rxo, rxcp);
1862 goto loop_continue;
1863 }
1864
1865 /* On BE drop pkts that arrive due to imperfect filtering in
1866 * promiscuous mode on some skews
1867 */
1868 if (unlikely(rxcp->port != adapter->port_num &&
1869 !lancer_chip(adapter))) {
1870 be_rx_compl_discard(adapter, rxo, rxcp);
1871 goto loop_continue;
1872 }
1873
1874 if (do_gro(rxcp))
1875 be_rx_compl_process_gro(adapter, rxo, rxcp);
1876 else
1877 be_rx_compl_process(adapter, rxo, rxcp);
1878 loop_continue:
1879 be_rx_stats_update(rxo, rxcp);
1880 }
1881
1882 /* Refill the queue */
1883 if (work_done && atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1884 be_post_rx_frags(rxo, GFP_ATOMIC);
1885
1886 /* All consumed */
1887 if (work_done < budget) {
1888 napi_complete(napi);
1889 be_cq_notify(adapter, rx_cq->id, true, work_done);
1890 } else {
1891 /* More to be consumed; continue with interrupts disabled */
1892 be_cq_notify(adapter, rx_cq->id, false, work_done);
1893 }
1894 return work_done;
1895 }
1896
1897 /* As TX and MCC share the same EQ check for both TX and MCC completions.
1898 * For TX/MCC we don't honour budget; consume everything
1899 */
1900 static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
1901 {
1902 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1903 struct be_adapter *adapter =
1904 container_of(tx_eq, struct be_adapter, tx_eq);
1905 struct be_tx_obj *txo;
1906 struct be_eth_tx_compl *txcp;
1907 int tx_compl, mcc_compl, status = 0;
1908 u8 i;
1909 u16 num_wrbs;
1910
1911 for_all_tx_queues(adapter, txo, i) {
1912 tx_compl = 0;
1913 num_wrbs = 0;
1914 while ((txcp = be_tx_compl_get(&txo->cq))) {
1915 num_wrbs += be_tx_compl_process(adapter, txo,
1916 AMAP_GET_BITS(struct amap_eth_tx_compl,
1917 wrb_index, txcp));
1918 tx_compl++;
1919 }
1920 if (tx_compl) {
1921 be_cq_notify(adapter, txo->cq.id, true, tx_compl);
1922
1923 atomic_sub(num_wrbs, &txo->q.used);
1924
1925 /* As Tx wrbs have been freed up, wake up netdev queue
1926 * if it was stopped due to lack of tx wrbs. */
1927 if (__netif_subqueue_stopped(adapter->netdev, i) &&
1928 atomic_read(&txo->q.used) < txo->q.len / 2) {
1929 netif_wake_subqueue(adapter->netdev, i);
1930 }
1931
1932 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
1933 tx_stats(txo)->tx_compl += tx_compl;
1934 u64_stats_update_end(&tx_stats(txo)->sync_compl);
1935 }
1936 }
1937
1938 mcc_compl = be_process_mcc(adapter, &status);
1939
1940 if (mcc_compl) {
1941 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1942 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1943 }
1944
1945 napi_complete(napi);
1946
1947 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
1948 adapter->drv_stats.tx_events++;
1949 return 1;
1950 }
1951
1952 void be_detect_dump_ue(struct be_adapter *adapter)
1953 {
1954 u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
1955 u32 i;
1956
1957 pci_read_config_dword(adapter->pdev,
1958 PCICFG_UE_STATUS_LOW, &ue_status_lo);
1959 pci_read_config_dword(adapter->pdev,
1960 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
1961 pci_read_config_dword(adapter->pdev,
1962 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
1963 pci_read_config_dword(adapter->pdev,
1964 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
1965
1966 ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
1967 ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
1968
1969 if (ue_status_lo || ue_status_hi) {
1970 adapter->ue_detected = true;
1971 adapter->eeh_err = true;
1972 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
1973 }
1974
1975 if (ue_status_lo) {
1976 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
1977 if (ue_status_lo & 1)
1978 dev_err(&adapter->pdev->dev,
1979 "UE: %s bit set\n", ue_status_low_desc[i]);
1980 }
1981 }
1982 if (ue_status_hi) {
1983 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
1984 if (ue_status_hi & 1)
1985 dev_err(&adapter->pdev->dev,
1986 "UE: %s bit set\n", ue_status_hi_desc[i]);
1987 }
1988 }
1989
1990 }
1991
1992 static void be_worker(struct work_struct *work)
1993 {
1994 struct be_adapter *adapter =
1995 container_of(work, struct be_adapter, work.work);
1996 struct be_rx_obj *rxo;
1997 int i;
1998
1999 if (!adapter->ue_detected && !lancer_chip(adapter))
2000 be_detect_dump_ue(adapter);
2001
2002 /* when interrupts are not yet enabled, just reap any pending
2003 * mcc completions */
2004 if (!netif_running(adapter->netdev)) {
2005 int mcc_compl, status = 0;
2006
2007 mcc_compl = be_process_mcc(adapter, &status);
2008
2009 if (mcc_compl) {
2010 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
2011 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
2012 }
2013
2014 goto reschedule;
2015 }
2016
2017 if (!adapter->stats_cmd_sent) {
2018 if (lancer_chip(adapter))
2019 lancer_cmd_get_pport_stats(adapter,
2020 &adapter->stats_cmd);
2021 else
2022 be_cmd_get_stats(adapter, &adapter->stats_cmd);
2023 }
2024
2025 for_all_rx_queues(adapter, rxo, i) {
2026 be_rx_eqd_update(adapter, rxo);
2027
2028 if (rxo->rx_post_starved) {
2029 rxo->rx_post_starved = false;
2030 be_post_rx_frags(rxo, GFP_KERNEL);
2031 }
2032 }
2033
2034 reschedule:
2035 adapter->work_counter++;
2036 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2037 }
2038
2039 static void be_msix_disable(struct be_adapter *adapter)
2040 {
2041 if (msix_enabled(adapter)) {
2042 pci_disable_msix(adapter->pdev);
2043 adapter->num_msix_vec = 0;
2044 }
2045 }
2046
2047 static void be_msix_enable(struct be_adapter *adapter)
2048 {
2049 #define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
2050 int i, status, num_vec;
2051
2052 num_vec = be_num_rxqs_want(adapter) + 1;
2053
2054 for (i = 0; i < num_vec; i++)
2055 adapter->msix_entries[i].entry = i;
2056
2057 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2058 if (status == 0) {
2059 goto done;
2060 } else if (status >= BE_MIN_MSIX_VECTORS) {
2061 num_vec = status;
2062 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
2063 num_vec) == 0)
2064 goto done;
2065 }
2066 return;
2067 done:
2068 adapter->num_msix_vec = num_vec;
2069 return;
2070 }
2071
2072 static void be_sriov_enable(struct be_adapter *adapter)
2073 {
2074 be_check_sriov_fn_type(adapter);
2075 #ifdef CONFIG_PCI_IOV
2076 if (be_physfn(adapter) && num_vfs) {
2077 int status, pos;
2078 u16 nvfs;
2079
2080 pos = pci_find_ext_capability(adapter->pdev,
2081 PCI_EXT_CAP_ID_SRIOV);
2082 pci_read_config_word(adapter->pdev,
2083 pos + PCI_SRIOV_TOTAL_VF, &nvfs);
2084
2085 if (num_vfs > nvfs) {
2086 dev_info(&adapter->pdev->dev,
2087 "Device supports %d VFs and not %d\n",
2088 nvfs, num_vfs);
2089 num_vfs = nvfs;
2090 }
2091
2092 status = pci_enable_sriov(adapter->pdev, num_vfs);
2093 adapter->sriov_enabled = status ? false : true;
2094 }
2095 #endif
2096 }
2097
2098 static void be_sriov_disable(struct be_adapter *adapter)
2099 {
2100 #ifdef CONFIG_PCI_IOV
2101 if (adapter->sriov_enabled) {
2102 pci_disable_sriov(adapter->pdev);
2103 adapter->sriov_enabled = false;
2104 }
2105 #endif
2106 }
2107
2108 static inline int be_msix_vec_get(struct be_adapter *adapter,
2109 struct be_eq_obj *eq_obj)
2110 {
2111 return adapter->msix_entries[eq_obj->eq_idx].vector;
2112 }
2113
2114 static int be_request_irq(struct be_adapter *adapter,
2115 struct be_eq_obj *eq_obj,
2116 void *handler, char *desc, void *context)
2117 {
2118 struct net_device *netdev = adapter->netdev;
2119 int vec;
2120
2121 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
2122 vec = be_msix_vec_get(adapter, eq_obj);
2123 return request_irq(vec, handler, 0, eq_obj->desc, context);
2124 }
2125
2126 static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
2127 void *context)
2128 {
2129 int vec = be_msix_vec_get(adapter, eq_obj);
2130 free_irq(vec, context);
2131 }
2132
2133 static int be_msix_register(struct be_adapter *adapter)
2134 {
2135 struct be_rx_obj *rxo;
2136 int status, i;
2137 char qname[10];
2138
2139 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2140 adapter);
2141 if (status)
2142 goto err;
2143
2144 for_all_rx_queues(adapter, rxo, i) {
2145 sprintf(qname, "rxq%d", i);
2146 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2147 qname, rxo);
2148 if (status)
2149 goto err_msix;
2150 }
2151
2152 return 0;
2153
2154 err_msix:
2155 be_free_irq(adapter, &adapter->tx_eq, adapter);
2156
2157 for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2158 be_free_irq(adapter, &rxo->rx_eq, rxo);
2159
2160 err:
2161 dev_warn(&adapter->pdev->dev,
2162 "MSIX Request IRQ failed - err %d\n", status);
2163 be_msix_disable(adapter);
2164 return status;
2165 }
2166
2167 static int be_irq_register(struct be_adapter *adapter)
2168 {
2169 struct net_device *netdev = adapter->netdev;
2170 int status;
2171
2172 if (msix_enabled(adapter)) {
2173 status = be_msix_register(adapter);
2174 if (status == 0)
2175 goto done;
2176 /* INTx is not supported for VF */
2177 if (!be_physfn(adapter))
2178 return status;
2179 }
2180
2181 /* INTx */
2182 netdev->irq = adapter->pdev->irq;
2183 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2184 adapter);
2185 if (status) {
2186 dev_err(&adapter->pdev->dev,
2187 "INTx request IRQ failed - err %d\n", status);
2188 return status;
2189 }
2190 done:
2191 adapter->isr_registered = true;
2192 return 0;
2193 }
2194
2195 static void be_irq_unregister(struct be_adapter *adapter)
2196 {
2197 struct net_device *netdev = adapter->netdev;
2198 struct be_rx_obj *rxo;
2199 int i;
2200
2201 if (!adapter->isr_registered)
2202 return;
2203
2204 /* INTx */
2205 if (!msix_enabled(adapter)) {
2206 free_irq(netdev->irq, adapter);
2207 goto done;
2208 }
2209
2210 /* MSIx */
2211 be_free_irq(adapter, &adapter->tx_eq, adapter);
2212
2213 for_all_rx_queues(adapter, rxo, i)
2214 be_free_irq(adapter, &rxo->rx_eq, rxo);
2215
2216 done:
2217 adapter->isr_registered = false;
2218 }
2219
2220 static void be_rx_queues_clear(struct be_adapter *adapter)
2221 {
2222 struct be_queue_info *q;
2223 struct be_rx_obj *rxo;
2224 int i;
2225
2226 for_all_rx_queues(adapter, rxo, i) {
2227 q = &rxo->q;
2228 if (q->created) {
2229 be_cmd_rxq_destroy(adapter, q);
2230 /* After the rxq is invalidated, wait for a grace time
2231 * of 1ms for all dma to end and the flush compl to
2232 * arrive
2233 */
2234 mdelay(1);
2235 be_rx_q_clean(adapter, rxo);
2236 }
2237
2238 /* Clear any residual events */
2239 q = &rxo->rx_eq.q;
2240 if (q->created)
2241 be_eq_clean(adapter, &rxo->rx_eq);
2242 }
2243 }
2244
2245 static int be_close(struct net_device *netdev)
2246 {
2247 struct be_adapter *adapter = netdev_priv(netdev);
2248 struct be_rx_obj *rxo;
2249 struct be_tx_obj *txo;
2250 struct be_eq_obj *tx_eq = &adapter->tx_eq;
2251 int vec, i;
2252
2253 be_async_mcc_disable(adapter);
2254
2255 if (!lancer_chip(adapter))
2256 be_intr_set(adapter, false);
2257
2258 for_all_rx_queues(adapter, rxo, i)
2259 napi_disable(&rxo->rx_eq.napi);
2260
2261 napi_disable(&tx_eq->napi);
2262
2263 if (lancer_chip(adapter)) {
2264 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2265 for_all_rx_queues(adapter, rxo, i)
2266 be_cq_notify(adapter, rxo->cq.id, false, 0);
2267 for_all_tx_queues(adapter, txo, i)
2268 be_cq_notify(adapter, txo->cq.id, false, 0);
2269 }
2270
2271 if (msix_enabled(adapter)) {
2272 vec = be_msix_vec_get(adapter, tx_eq);
2273 synchronize_irq(vec);
2274
2275 for_all_rx_queues(adapter, rxo, i) {
2276 vec = be_msix_vec_get(adapter, &rxo->rx_eq);
2277 synchronize_irq(vec);
2278 }
2279 } else {
2280 synchronize_irq(netdev->irq);
2281 }
2282 be_irq_unregister(adapter);
2283
2284 /* Wait for all pending tx completions to arrive so that
2285 * all tx skbs are freed.
2286 */
2287 for_all_tx_queues(adapter, txo, i)
2288 be_tx_compl_clean(adapter, txo);
2289
2290 be_rx_queues_clear(adapter);
2291 return 0;
2292 }
2293
2294 static int be_rx_queues_setup(struct be_adapter *adapter)
2295 {
2296 struct be_rx_obj *rxo;
2297 int rc, i;
2298 u8 rsstable[MAX_RSS_QS];
2299
2300 for_all_rx_queues(adapter, rxo, i) {
2301 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2302 rx_frag_size, BE_MAX_JUMBO_FRAME_SIZE,
2303 adapter->if_handle,
2304 (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
2305 if (rc)
2306 return rc;
2307 }
2308
2309 if (be_multi_rxq(adapter)) {
2310 for_all_rss_queues(adapter, rxo, i)
2311 rsstable[i] = rxo->rss_id;
2312
2313 rc = be_cmd_rss_config(adapter, rsstable,
2314 adapter->num_rx_qs - 1);
2315 if (rc)
2316 return rc;
2317 }
2318
2319 /* First time posting */
2320 for_all_rx_queues(adapter, rxo, i) {
2321 be_post_rx_frags(rxo, GFP_KERNEL);
2322 napi_enable(&rxo->rx_eq.napi);
2323 }
2324 return 0;
2325 }
2326
2327 static int be_open(struct net_device *netdev)
2328 {
2329 struct be_adapter *adapter = netdev_priv(netdev);
2330 struct be_eq_obj *tx_eq = &adapter->tx_eq;
2331 struct be_rx_obj *rxo;
2332 int status, i;
2333
2334 status = be_rx_queues_setup(adapter);
2335 if (status)
2336 goto err;
2337
2338 napi_enable(&tx_eq->napi);
2339
2340 be_irq_register(adapter);
2341
2342 if (!lancer_chip(adapter))
2343 be_intr_set(adapter, true);
2344
2345 /* The evt queues are created in unarmed state; arm them */
2346 for_all_rx_queues(adapter, rxo, i) {
2347 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2348 be_cq_notify(adapter, rxo->cq.id, true, 0);
2349 }
2350 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
2351
2352 /* Now that interrupts are on we can process async mcc */
2353 be_async_mcc_enable(adapter);
2354
2355 if (be_physfn(adapter)) {
2356 status = be_vid_config(adapter, false, 0);
2357 if (status)
2358 goto err;
2359
2360 status = be_cmd_set_flow_control(adapter,
2361 adapter->tx_fc, adapter->rx_fc);
2362 if (status)
2363 goto err;
2364 }
2365
2366 return 0;
2367 err:
2368 be_close(adapter->netdev);
2369 return -EIO;
2370 }
2371
2372 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2373 {
2374 struct be_dma_mem cmd;
2375 int status = 0;
2376 u8 mac[ETH_ALEN];
2377
2378 memset(mac, 0, ETH_ALEN);
2379
2380 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2381 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2382 GFP_KERNEL);
2383 if (cmd.va == NULL)
2384 return -1;
2385 memset(cmd.va, 0, cmd.size);
2386
2387 if (enable) {
2388 status = pci_write_config_dword(adapter->pdev,
2389 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2390 if (status) {
2391 dev_err(&adapter->pdev->dev,
2392 "Could not enable Wake-on-lan\n");
2393 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2394 cmd.dma);
2395 return status;
2396 }
2397 status = be_cmd_enable_magic_wol(adapter,
2398 adapter->netdev->dev_addr, &cmd);
2399 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2400 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2401 } else {
2402 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2403 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2404 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2405 }
2406
2407 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2408 return status;
2409 }
2410
2411 /*
2412 * Generate a seed MAC address from the PF MAC Address using jhash.
2413 * MAC Address for VFs are assigned incrementally starting from the seed.
2414 * These addresses are programmed in the ASIC by the PF and the VF driver
2415 * queries for the MAC address during its probe.
2416 */
2417 static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2418 {
2419 u32 vf = 0;
2420 int status = 0;
2421 u8 mac[ETH_ALEN];
2422
2423 be_vf_eth_addr_generate(adapter, mac);
2424
2425 for (vf = 0; vf < num_vfs; vf++) {
2426 status = be_cmd_pmac_add(adapter, mac,
2427 adapter->vf_cfg[vf].vf_if_handle,
2428 &adapter->vf_cfg[vf].vf_pmac_id,
2429 vf + 1);
2430 if (status)
2431 dev_err(&adapter->pdev->dev,
2432 "Mac address add failed for VF %d\n", vf);
2433 else
2434 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2435
2436 mac[5] += 1;
2437 }
2438 return status;
2439 }
2440
2441 static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
2442 {
2443 u32 vf;
2444
2445 for (vf = 0; vf < num_vfs; vf++) {
2446 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2447 be_cmd_pmac_del(adapter,
2448 adapter->vf_cfg[vf].vf_if_handle,
2449 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
2450 }
2451 }
2452
2453 static int be_setup(struct be_adapter *adapter)
2454 {
2455 struct net_device *netdev = adapter->netdev;
2456 u32 cap_flags, en_flags, vf = 0;
2457 int status;
2458 u8 mac[ETH_ALEN];
2459
2460 be_cmd_req_native_mode(adapter);
2461
2462 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2463 BE_IF_FLAGS_BROADCAST |
2464 BE_IF_FLAGS_MULTICAST;
2465
2466 if (be_physfn(adapter)) {
2467 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
2468 BE_IF_FLAGS_PROMISCUOUS |
2469 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2470 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
2471
2472 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2473 cap_flags |= BE_IF_FLAGS_RSS;
2474 en_flags |= BE_IF_FLAGS_RSS;
2475 }
2476 }
2477
2478 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2479 netdev->dev_addr, false/* pmac_invalid */,
2480 &adapter->if_handle, &adapter->pmac_id, 0);
2481 if (status != 0)
2482 goto do_none;
2483
2484 if (be_physfn(adapter)) {
2485 if (adapter->sriov_enabled) {
2486 while (vf < num_vfs) {
2487 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2488 BE_IF_FLAGS_BROADCAST;
2489 status = be_cmd_if_create(adapter, cap_flags,
2490 en_flags, mac, true,
2491 &adapter->vf_cfg[vf].vf_if_handle,
2492 NULL, vf+1);
2493 if (status) {
2494 dev_err(&adapter->pdev->dev,
2495 "Interface Create failed for VF %d\n",
2496 vf);
2497 goto if_destroy;
2498 }
2499 adapter->vf_cfg[vf].vf_pmac_id =
2500 BE_INVALID_PMAC_ID;
2501 vf++;
2502 }
2503 }
2504 } else {
2505 status = be_cmd_mac_addr_query(adapter, mac,
2506 MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2507 if (!status) {
2508 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2509 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2510 }
2511 }
2512
2513 status = be_tx_queues_create(adapter);
2514 if (status != 0)
2515 goto if_destroy;
2516
2517 status = be_rx_queues_create(adapter);
2518 if (status != 0)
2519 goto tx_qs_destroy;
2520
2521 /* Allow all priorities by default. A GRP5 evt may modify this */
2522 adapter->vlan_prio_bmap = 0xff;
2523
2524 status = be_mcc_queues_create(adapter);
2525 if (status != 0)
2526 goto rx_qs_destroy;
2527
2528 adapter->link_speed = -1;
2529
2530 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
2531
2532 pcie_set_readrq(adapter->pdev, 4096);
2533 return 0;
2534
2535 rx_qs_destroy:
2536 be_rx_queues_destroy(adapter);
2537 tx_qs_destroy:
2538 be_tx_queues_destroy(adapter);
2539 if_destroy:
2540 if (be_physfn(adapter) && adapter->sriov_enabled)
2541 for (vf = 0; vf < num_vfs; vf++)
2542 if (adapter->vf_cfg[vf].vf_if_handle)
2543 be_cmd_if_destroy(adapter,
2544 adapter->vf_cfg[vf].vf_if_handle,
2545 vf + 1);
2546 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
2547 do_none:
2548 return status;
2549 }
2550
2551 static int be_clear(struct be_adapter *adapter)
2552 {
2553 int vf;
2554
2555 if (be_physfn(adapter) && adapter->sriov_enabled)
2556 be_vf_eth_addr_rem(adapter);
2557
2558 be_mcc_queues_destroy(adapter);
2559 be_rx_queues_destroy(adapter);
2560 be_tx_queues_destroy(adapter);
2561 adapter->eq_next_idx = 0;
2562
2563 if (be_physfn(adapter) && adapter->sriov_enabled)
2564 for (vf = 0; vf < num_vfs; vf++)
2565 if (adapter->vf_cfg[vf].vf_if_handle)
2566 be_cmd_if_destroy(adapter,
2567 adapter->vf_cfg[vf].vf_if_handle,
2568 vf + 1);
2569
2570 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
2571
2572 adapter->be3_native = 0;
2573
2574 /* tell fw we're done with firing cmds */
2575 be_cmd_fw_clean(adapter);
2576 return 0;
2577 }
2578
2579
2580 #define FW_FILE_HDR_SIGN "ServerEngines Corp. "
2581 static bool be_flash_redboot(struct be_adapter *adapter,
2582 const u8 *p, u32 img_start, int image_size,
2583 int hdr_size)
2584 {
2585 u32 crc_offset;
2586 u8 flashed_crc[4];
2587 int status;
2588
2589 crc_offset = hdr_size + img_start + image_size - 4;
2590
2591 p += crc_offset;
2592
2593 status = be_cmd_get_flash_crc(adapter, flashed_crc,
2594 (image_size - 4));
2595 if (status) {
2596 dev_err(&adapter->pdev->dev,
2597 "could not get crc from flash, not flashing redboot\n");
2598 return false;
2599 }
2600
2601 /*update redboot only if crc does not match*/
2602 if (!memcmp(flashed_crc, p, 4))
2603 return false;
2604 else
2605 return true;
2606 }
2607
2608 static bool phy_flashing_required(struct be_adapter *adapter)
2609 {
2610 int status = 0;
2611 struct be_phy_info phy_info;
2612
2613 status = be_cmd_get_phy_info(adapter, &phy_info);
2614 if (status)
2615 return false;
2616 if ((phy_info.phy_type == TN_8022) &&
2617 (phy_info.interface_type == PHY_TYPE_BASET_10GB)) {
2618 return true;
2619 }
2620 return false;
2621 }
2622
2623 static int be_flash_data(struct be_adapter *adapter,
2624 const struct firmware *fw,
2625 struct be_dma_mem *flash_cmd, int num_of_images)
2626
2627 {
2628 int status = 0, i, filehdr_size = 0;
2629 u32 total_bytes = 0, flash_op;
2630 int num_bytes;
2631 const u8 *p = fw->data;
2632 struct be_cmd_write_flashrom *req = flash_cmd->va;
2633 const struct flash_comp *pflashcomp;
2634 int num_comp;
2635
2636 static const struct flash_comp gen3_flash_types[10] = {
2637 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2638 FLASH_IMAGE_MAX_SIZE_g3},
2639 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2640 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2641 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2642 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2643 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2644 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2645 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2646 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2647 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2648 FLASH_IMAGE_MAX_SIZE_g3},
2649 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2650 FLASH_IMAGE_MAX_SIZE_g3},
2651 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
2652 FLASH_IMAGE_MAX_SIZE_g3},
2653 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2654 FLASH_NCSI_IMAGE_MAX_SIZE_g3},
2655 { FLASH_PHY_FW_START_g3, IMG_TYPE_PHY_FW,
2656 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3}
2657 };
2658 static const struct flash_comp gen2_flash_types[8] = {
2659 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2660 FLASH_IMAGE_MAX_SIZE_g2},
2661 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2662 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2663 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2664 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2665 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2666 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2667 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2668 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2669 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2670 FLASH_IMAGE_MAX_SIZE_g2},
2671 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2672 FLASH_IMAGE_MAX_SIZE_g2},
2673 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2674 FLASH_IMAGE_MAX_SIZE_g2}
2675 };
2676
2677 if (adapter->generation == BE_GEN3) {
2678 pflashcomp = gen3_flash_types;
2679 filehdr_size = sizeof(struct flash_file_hdr_g3);
2680 num_comp = ARRAY_SIZE(gen3_flash_types);
2681 } else {
2682 pflashcomp = gen2_flash_types;
2683 filehdr_size = sizeof(struct flash_file_hdr_g2);
2684 num_comp = ARRAY_SIZE(gen2_flash_types);
2685 }
2686 for (i = 0; i < num_comp; i++) {
2687 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2688 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2689 continue;
2690 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW) {
2691 if (!phy_flashing_required(adapter))
2692 continue;
2693 }
2694 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2695 (!be_flash_redboot(adapter, fw->data,
2696 pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2697 (num_of_images * sizeof(struct image_hdr)))))
2698 continue;
2699 p = fw->data;
2700 p += filehdr_size + pflashcomp[i].offset
2701 + (num_of_images * sizeof(struct image_hdr));
2702 if (p + pflashcomp[i].size > fw->data + fw->size)
2703 return -1;
2704 total_bytes = pflashcomp[i].size;
2705 while (total_bytes) {
2706 if (total_bytes > 32*1024)
2707 num_bytes = 32*1024;
2708 else
2709 num_bytes = total_bytes;
2710 total_bytes -= num_bytes;
2711 if (!total_bytes) {
2712 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2713 flash_op = FLASHROM_OPER_PHY_FLASH;
2714 else
2715 flash_op = FLASHROM_OPER_FLASH;
2716 } else {
2717 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2718 flash_op = FLASHROM_OPER_PHY_SAVE;
2719 else
2720 flash_op = FLASHROM_OPER_SAVE;
2721 }
2722 memcpy(req->params.data_buf, p, num_bytes);
2723 p += num_bytes;
2724 status = be_cmd_write_flashrom(adapter, flash_cmd,
2725 pflashcomp[i].optype, flash_op, num_bytes);
2726 if (status) {
2727 if ((status == ILLEGAL_IOCTL_REQ) &&
2728 (pflashcomp[i].optype ==
2729 IMG_TYPE_PHY_FW))
2730 break;
2731 dev_err(&adapter->pdev->dev,
2732 "cmd to write to flash rom failed.\n");
2733 return -1;
2734 }
2735 }
2736 }
2737 return 0;
2738 }
2739
2740 static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2741 {
2742 if (fhdr == NULL)
2743 return 0;
2744 if (fhdr->build[0] == '3')
2745 return BE_GEN3;
2746 else if (fhdr->build[0] == '2')
2747 return BE_GEN2;
2748 else
2749 return 0;
2750 }
2751
2752 static int lancer_fw_download(struct be_adapter *adapter,
2753 const struct firmware *fw)
2754 {
2755 #define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
2756 #define LANCER_FW_DOWNLOAD_LOCATION "/prg"
2757 struct be_dma_mem flash_cmd;
2758 const u8 *data_ptr = NULL;
2759 u8 *dest_image_ptr = NULL;
2760 size_t image_size = 0;
2761 u32 chunk_size = 0;
2762 u32 data_written = 0;
2763 u32 offset = 0;
2764 int status = 0;
2765 u8 add_status = 0;
2766
2767 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
2768 dev_err(&adapter->pdev->dev,
2769 "FW Image not properly aligned. "
2770 "Length must be 4 byte aligned.\n");
2771 status = -EINVAL;
2772 goto lancer_fw_exit;
2773 }
2774
2775 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2776 + LANCER_FW_DOWNLOAD_CHUNK;
2777 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2778 &flash_cmd.dma, GFP_KERNEL);
2779 if (!flash_cmd.va) {
2780 status = -ENOMEM;
2781 dev_err(&adapter->pdev->dev,
2782 "Memory allocation failure while flashing\n");
2783 goto lancer_fw_exit;
2784 }
2785
2786 dest_image_ptr = flash_cmd.va +
2787 sizeof(struct lancer_cmd_req_write_object);
2788 image_size = fw->size;
2789 data_ptr = fw->data;
2790
2791 while (image_size) {
2792 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
2793
2794 /* Copy the image chunk content. */
2795 memcpy(dest_image_ptr, data_ptr, chunk_size);
2796
2797 status = lancer_cmd_write_object(adapter, &flash_cmd,
2798 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
2799 &data_written, &add_status);
2800
2801 if (status)
2802 break;
2803
2804 offset += data_written;
2805 data_ptr += data_written;
2806 image_size -= data_written;
2807 }
2808
2809 if (!status) {
2810 /* Commit the FW written */
2811 status = lancer_cmd_write_object(adapter, &flash_cmd,
2812 0, offset, LANCER_FW_DOWNLOAD_LOCATION,
2813 &data_written, &add_status);
2814 }
2815
2816 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2817 flash_cmd.dma);
2818 if (status) {
2819 dev_err(&adapter->pdev->dev,
2820 "Firmware load error. "
2821 "Status code: 0x%x Additional Status: 0x%x\n",
2822 status, add_status);
2823 goto lancer_fw_exit;
2824 }
2825
2826 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2827 lancer_fw_exit:
2828 return status;
2829 }
2830
2831 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
2832 {
2833 struct flash_file_hdr_g2 *fhdr;
2834 struct flash_file_hdr_g3 *fhdr3;
2835 struct image_hdr *img_hdr_ptr = NULL;
2836 struct be_dma_mem flash_cmd;
2837 const u8 *p;
2838 int status = 0, i = 0, num_imgs = 0;
2839
2840 p = fw->data;
2841 fhdr = (struct flash_file_hdr_g2 *) p;
2842
2843 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2844 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2845 &flash_cmd.dma, GFP_KERNEL);
2846 if (!flash_cmd.va) {
2847 status = -ENOMEM;
2848 dev_err(&adapter->pdev->dev,
2849 "Memory allocation failure while flashing\n");
2850 goto be_fw_exit;
2851 }
2852
2853 if ((adapter->generation == BE_GEN3) &&
2854 (get_ufigen_type(fhdr) == BE_GEN3)) {
2855 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
2856 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2857 for (i = 0; i < num_imgs; i++) {
2858 img_hdr_ptr = (struct image_hdr *) (fw->data +
2859 (sizeof(struct flash_file_hdr_g3) +
2860 i * sizeof(struct image_hdr)));
2861 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2862 status = be_flash_data(adapter, fw, &flash_cmd,
2863 num_imgs);
2864 }
2865 } else if ((adapter->generation == BE_GEN2) &&
2866 (get_ufigen_type(fhdr) == BE_GEN2)) {
2867 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2868 } else {
2869 dev_err(&adapter->pdev->dev,
2870 "UFI and Interface are not compatible for flashing\n");
2871 status = -1;
2872 }
2873
2874 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2875 flash_cmd.dma);
2876 if (status) {
2877 dev_err(&adapter->pdev->dev, "Firmware load error\n");
2878 goto be_fw_exit;
2879 }
2880
2881 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2882
2883 be_fw_exit:
2884 return status;
2885 }
2886
2887 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
2888 {
2889 const struct firmware *fw;
2890 int status;
2891
2892 if (!netif_running(adapter->netdev)) {
2893 dev_err(&adapter->pdev->dev,
2894 "Firmware load not allowed (interface is down)\n");
2895 return -1;
2896 }
2897
2898 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2899 if (status)
2900 goto fw_exit;
2901
2902 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2903
2904 if (lancer_chip(adapter))
2905 status = lancer_fw_download(adapter, fw);
2906 else
2907 status = be_fw_download(adapter, fw);
2908
2909 fw_exit:
2910 release_firmware(fw);
2911 return status;
2912 }
2913
2914 static struct net_device_ops be_netdev_ops = {
2915 .ndo_open = be_open,
2916 .ndo_stop = be_close,
2917 .ndo_start_xmit = be_xmit,
2918 .ndo_set_rx_mode = be_set_multicast_list,
2919 .ndo_set_mac_address = be_mac_addr_set,
2920 .ndo_change_mtu = be_change_mtu,
2921 .ndo_get_stats64 = be_get_stats64,
2922 .ndo_validate_addr = eth_validate_addr,
2923 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
2924 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
2925 .ndo_set_vf_mac = be_set_vf_mac,
2926 .ndo_set_vf_vlan = be_set_vf_vlan,
2927 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
2928 .ndo_get_vf_config = be_get_vf_config
2929 };
2930
2931 static void be_netdev_init(struct net_device *netdev)
2932 {
2933 struct be_adapter *adapter = netdev_priv(netdev);
2934 struct be_rx_obj *rxo;
2935 int i;
2936
2937 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
2938 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
2939 NETIF_F_HW_VLAN_TX;
2940 if (be_multi_rxq(adapter))
2941 netdev->hw_features |= NETIF_F_RXHASH;
2942
2943 netdev->features |= netdev->hw_features |
2944 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
2945
2946 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
2947 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2948
2949 netdev->flags |= IFF_MULTICAST;
2950
2951 /* Default settings for Rx and Tx flow control */
2952 adapter->rx_fc = true;
2953 adapter->tx_fc = true;
2954
2955 netif_set_gso_max_size(netdev, 65535);
2956
2957 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2958
2959 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2960
2961 for_all_rx_queues(adapter, rxo, i)
2962 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
2963 BE_NAPI_WEIGHT);
2964
2965 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
2966 BE_NAPI_WEIGHT);
2967 }
2968
2969 static void be_unmap_pci_bars(struct be_adapter *adapter)
2970 {
2971 if (adapter->csr)
2972 iounmap(adapter->csr);
2973 if (adapter->db)
2974 iounmap(adapter->db);
2975 }
2976
2977 static int be_map_pci_bars(struct be_adapter *adapter)
2978 {
2979 u8 __iomem *addr;
2980 int db_reg;
2981
2982 if (lancer_chip(adapter)) {
2983 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
2984 pci_resource_len(adapter->pdev, 0));
2985 if (addr == NULL)
2986 return -ENOMEM;
2987 adapter->db = addr;
2988 return 0;
2989 }
2990
2991 if (be_physfn(adapter)) {
2992 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2993 pci_resource_len(adapter->pdev, 2));
2994 if (addr == NULL)
2995 return -ENOMEM;
2996 adapter->csr = addr;
2997 }
2998
2999 if (adapter->generation == BE_GEN2) {
3000 db_reg = 4;
3001 } else {
3002 if (be_physfn(adapter))
3003 db_reg = 4;
3004 else
3005 db_reg = 0;
3006 }
3007 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3008 pci_resource_len(adapter->pdev, db_reg));
3009 if (addr == NULL)
3010 goto pci_map_err;
3011 adapter->db = addr;
3012
3013 return 0;
3014 pci_map_err:
3015 be_unmap_pci_bars(adapter);
3016 return -ENOMEM;
3017 }
3018
3019
3020 static void be_ctrl_cleanup(struct be_adapter *adapter)
3021 {
3022 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
3023
3024 be_unmap_pci_bars(adapter);
3025
3026 if (mem->va)
3027 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3028 mem->dma);
3029
3030 mem = &adapter->rx_filter;
3031 if (mem->va)
3032 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3033 mem->dma);
3034 }
3035
3036 static int be_ctrl_init(struct be_adapter *adapter)
3037 {
3038 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3039 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
3040 struct be_dma_mem *rx_filter = &adapter->rx_filter;
3041 int status;
3042
3043 status = be_map_pci_bars(adapter);
3044 if (status)
3045 goto done;
3046
3047 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
3048 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3049 mbox_mem_alloc->size,
3050 &mbox_mem_alloc->dma,
3051 GFP_KERNEL);
3052 if (!mbox_mem_alloc->va) {
3053 status = -ENOMEM;
3054 goto unmap_pci_bars;
3055 }
3056 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3057 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3058 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3059 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
3060
3061 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3062 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3063 &rx_filter->dma, GFP_KERNEL);
3064 if (rx_filter->va == NULL) {
3065 status = -ENOMEM;
3066 goto free_mbox;
3067 }
3068 memset(rx_filter->va, 0, rx_filter->size);
3069
3070 mutex_init(&adapter->mbox_lock);
3071 spin_lock_init(&adapter->mcc_lock);
3072 spin_lock_init(&adapter->mcc_cq_lock);
3073
3074 init_completion(&adapter->flash_compl);
3075 pci_save_state(adapter->pdev);
3076 return 0;
3077
3078 free_mbox:
3079 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3080 mbox_mem_alloc->va, mbox_mem_alloc->dma);
3081
3082 unmap_pci_bars:
3083 be_unmap_pci_bars(adapter);
3084
3085 done:
3086 return status;
3087 }
3088
3089 static void be_stats_cleanup(struct be_adapter *adapter)
3090 {
3091 struct be_dma_mem *cmd = &adapter->stats_cmd;
3092
3093 if (cmd->va)
3094 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3095 cmd->va, cmd->dma);
3096 }
3097
3098 static int be_stats_init(struct be_adapter *adapter)
3099 {
3100 struct be_dma_mem *cmd = &adapter->stats_cmd;
3101
3102 if (adapter->generation == BE_GEN2) {
3103 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
3104 } else {
3105 if (lancer_chip(adapter))
3106 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3107 else
3108 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3109 }
3110 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3111 GFP_KERNEL);
3112 if (cmd->va == NULL)
3113 return -1;
3114 memset(cmd->va, 0, cmd->size);
3115 return 0;
3116 }
3117
3118 static void __devexit be_remove(struct pci_dev *pdev)
3119 {
3120 struct be_adapter *adapter = pci_get_drvdata(pdev);
3121
3122 if (!adapter)
3123 return;
3124
3125 cancel_delayed_work_sync(&adapter->work);
3126
3127 unregister_netdev(adapter->netdev);
3128
3129 be_clear(adapter);
3130
3131 be_stats_cleanup(adapter);
3132
3133 be_ctrl_cleanup(adapter);
3134
3135 kfree(adapter->vf_cfg);
3136 be_sriov_disable(adapter);
3137
3138 be_msix_disable(adapter);
3139
3140 pci_set_drvdata(pdev, NULL);
3141 pci_release_regions(pdev);
3142 pci_disable_device(pdev);
3143
3144 free_netdev(adapter->netdev);
3145 }
3146
3147 static int be_get_config(struct be_adapter *adapter)
3148 {
3149 int status;
3150 u8 mac[ETH_ALEN];
3151
3152 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3153 &adapter->function_mode, &adapter->function_caps);
3154 if (status)
3155 return status;
3156
3157 memset(mac, 0, ETH_ALEN);
3158
3159 /* A default permanent address is given to each VF for Lancer*/
3160 if (be_physfn(adapter) || lancer_chip(adapter)) {
3161 status = be_cmd_mac_addr_query(adapter, mac,
3162 MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
3163
3164 if (status)
3165 return status;
3166
3167 if (!is_valid_ether_addr(mac))
3168 return -EADDRNOTAVAIL;
3169
3170 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3171 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3172 }
3173
3174 if (adapter->function_mode & 0x400)
3175 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
3176 else
3177 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3178
3179 status = be_cmd_get_cntl_attributes(adapter);
3180 if (status)
3181 return status;
3182
3183 if ((num_vfs && adapter->sriov_enabled) ||
3184 (adapter->function_mode & 0x400) ||
3185 lancer_chip(adapter) || !be_physfn(adapter)) {
3186 adapter->num_tx_qs = 1;
3187 netif_set_real_num_tx_queues(adapter->netdev,
3188 adapter->num_tx_qs);
3189 } else {
3190 adapter->num_tx_qs = MAX_TX_QS;
3191 }
3192
3193 return 0;
3194 }
3195
3196 static int be_dev_family_check(struct be_adapter *adapter)
3197 {
3198 struct pci_dev *pdev = adapter->pdev;
3199 u32 sli_intf = 0, if_type;
3200
3201 switch (pdev->device) {
3202 case BE_DEVICE_ID1:
3203 case OC_DEVICE_ID1:
3204 adapter->generation = BE_GEN2;
3205 break;
3206 case BE_DEVICE_ID2:
3207 case OC_DEVICE_ID2:
3208 adapter->generation = BE_GEN3;
3209 break;
3210 case OC_DEVICE_ID3:
3211 case OC_DEVICE_ID4:
3212 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3213 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3214 SLI_INTF_IF_TYPE_SHIFT;
3215
3216 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3217 if_type != 0x02) {
3218 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3219 return -EINVAL;
3220 }
3221 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3222 SLI_INTF_FAMILY_SHIFT);
3223 adapter->generation = BE_GEN3;
3224 break;
3225 default:
3226 adapter->generation = 0;
3227 }
3228 return 0;
3229 }
3230
3231 static int lancer_wait_ready(struct be_adapter *adapter)
3232 {
3233 #define SLIPORT_READY_TIMEOUT 500
3234 u32 sliport_status;
3235 int status = 0, i;
3236
3237 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3238 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3239 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3240 break;
3241
3242 msleep(20);
3243 }
3244
3245 if (i == SLIPORT_READY_TIMEOUT)
3246 status = -1;
3247
3248 return status;
3249 }
3250
3251 static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3252 {
3253 int status;
3254 u32 sliport_status, err, reset_needed;
3255 status = lancer_wait_ready(adapter);
3256 if (!status) {
3257 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3258 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3259 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3260 if (err && reset_needed) {
3261 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3262 adapter->db + SLIPORT_CONTROL_OFFSET);
3263
3264 /* check adapter has corrected the error */
3265 status = lancer_wait_ready(adapter);
3266 sliport_status = ioread32(adapter->db +
3267 SLIPORT_STATUS_OFFSET);
3268 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3269 SLIPORT_STATUS_RN_MASK);
3270 if (status || sliport_status)
3271 status = -1;
3272 } else if (err || reset_needed) {
3273 status = -1;
3274 }
3275 }
3276 return status;
3277 }
3278
3279 static int __devinit be_probe(struct pci_dev *pdev,
3280 const struct pci_device_id *pdev_id)
3281 {
3282 int status = 0;
3283 struct be_adapter *adapter;
3284 struct net_device *netdev;
3285
3286 status = pci_enable_device(pdev);
3287 if (status)
3288 goto do_none;
3289
3290 status = pci_request_regions(pdev, DRV_NAME);
3291 if (status)
3292 goto disable_dev;
3293 pci_set_master(pdev);
3294
3295 netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
3296 if (netdev == NULL) {
3297 status = -ENOMEM;
3298 goto rel_reg;
3299 }
3300 adapter = netdev_priv(netdev);
3301 adapter->pdev = pdev;
3302 pci_set_drvdata(pdev, adapter);
3303
3304 status = be_dev_family_check(adapter);
3305 if (status)
3306 goto free_netdev;
3307
3308 adapter->netdev = netdev;
3309 SET_NETDEV_DEV(netdev, &pdev->dev);
3310
3311 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
3312 if (!status) {
3313 netdev->features |= NETIF_F_HIGHDMA;
3314 } else {
3315 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3316 if (status) {
3317 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3318 goto free_netdev;
3319 }
3320 }
3321
3322 be_sriov_enable(adapter);
3323 if (adapter->sriov_enabled) {
3324 adapter->vf_cfg = kcalloc(num_vfs,
3325 sizeof(struct be_vf_cfg), GFP_KERNEL);
3326
3327 if (!adapter->vf_cfg)
3328 goto free_netdev;
3329 }
3330
3331 status = be_ctrl_init(adapter);
3332 if (status)
3333 goto free_vf_cfg;
3334
3335 if (lancer_chip(adapter)) {
3336 status = lancer_test_and_set_rdy_state(adapter);
3337 if (status) {
3338 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
3339 goto ctrl_clean;
3340 }
3341 }
3342
3343 /* sync up with fw's ready state */
3344 if (be_physfn(adapter)) {
3345 status = be_cmd_POST(adapter);
3346 if (status)
3347 goto ctrl_clean;
3348 }
3349
3350 /* tell fw we're ready to fire cmds */
3351 status = be_cmd_fw_init(adapter);
3352 if (status)
3353 goto ctrl_clean;
3354
3355 status = be_cmd_reset_function(adapter);
3356 if (status)
3357 goto ctrl_clean;
3358
3359 status = be_stats_init(adapter);
3360 if (status)
3361 goto ctrl_clean;
3362
3363 status = be_get_config(adapter);
3364 if (status)
3365 goto stats_clean;
3366
3367 /* The INTR bit may be set in the card when probed by a kdump kernel
3368 * after a crash.
3369 */
3370 if (!lancer_chip(adapter))
3371 be_intr_set(adapter, false);
3372
3373 be_msix_enable(adapter);
3374
3375 INIT_DELAYED_WORK(&adapter->work, be_worker);
3376
3377 status = be_setup(adapter);
3378 if (status)
3379 goto msix_disable;
3380
3381 be_netdev_init(netdev);
3382 status = register_netdev(netdev);
3383 if (status != 0)
3384 goto unsetup;
3385
3386 if (be_physfn(adapter) && adapter->sriov_enabled) {
3387 u8 mac_speed;
3388 u16 vf, lnk_speed;
3389
3390 if (!lancer_chip(adapter)) {
3391 status = be_vf_eth_addr_config(adapter);
3392 if (status)
3393 goto unreg_netdev;
3394 }
3395
3396 for (vf = 0; vf < num_vfs; vf++) {
3397 status = be_cmd_link_status_query(adapter, &mac_speed,
3398 &lnk_speed, vf + 1);
3399 if (!status)
3400 adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
3401 else
3402 goto unreg_netdev;
3403 }
3404 }
3405
3406 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
3407
3408 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3409 return 0;
3410
3411 unreg_netdev:
3412 unregister_netdev(netdev);
3413 unsetup:
3414 be_clear(adapter);
3415 msix_disable:
3416 be_msix_disable(adapter);
3417 stats_clean:
3418 be_stats_cleanup(adapter);
3419 ctrl_clean:
3420 be_ctrl_cleanup(adapter);
3421 free_vf_cfg:
3422 kfree(adapter->vf_cfg);
3423 free_netdev:
3424 be_sriov_disable(adapter);
3425 free_netdev(netdev);
3426 pci_set_drvdata(pdev, NULL);
3427 rel_reg:
3428 pci_release_regions(pdev);
3429 disable_dev:
3430 pci_disable_device(pdev);
3431 do_none:
3432 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
3433 return status;
3434 }
3435
3436 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3437 {
3438 struct be_adapter *adapter = pci_get_drvdata(pdev);
3439 struct net_device *netdev = adapter->netdev;
3440
3441 cancel_delayed_work_sync(&adapter->work);
3442 if (adapter->wol)
3443 be_setup_wol(adapter, true);
3444
3445 netif_device_detach(netdev);
3446 if (netif_running(netdev)) {
3447 rtnl_lock();
3448 be_close(netdev);
3449 rtnl_unlock();
3450 }
3451 be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
3452 be_clear(adapter);
3453
3454 be_msix_disable(adapter);
3455 pci_save_state(pdev);
3456 pci_disable_device(pdev);
3457 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3458 return 0;
3459 }
3460
3461 static int be_resume(struct pci_dev *pdev)
3462 {
3463 int status = 0;
3464 struct be_adapter *adapter = pci_get_drvdata(pdev);
3465 struct net_device *netdev = adapter->netdev;
3466
3467 netif_device_detach(netdev);
3468
3469 status = pci_enable_device(pdev);
3470 if (status)
3471 return status;
3472
3473 pci_set_power_state(pdev, 0);
3474 pci_restore_state(pdev);
3475
3476 be_msix_enable(adapter);
3477 /* tell fw we're ready to fire cmds */
3478 status = be_cmd_fw_init(adapter);
3479 if (status)
3480 return status;
3481
3482 be_setup(adapter);
3483 if (netif_running(netdev)) {
3484 rtnl_lock();
3485 be_open(netdev);
3486 rtnl_unlock();
3487 }
3488 netif_device_attach(netdev);
3489
3490 if (adapter->wol)
3491 be_setup_wol(adapter, false);
3492
3493 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3494 return 0;
3495 }
3496
3497 /*
3498 * An FLR will stop BE from DMAing any data.
3499 */
3500 static void be_shutdown(struct pci_dev *pdev)
3501 {
3502 struct be_adapter *adapter = pci_get_drvdata(pdev);
3503
3504 if (!adapter)
3505 return;
3506
3507 cancel_delayed_work_sync(&adapter->work);
3508
3509 netif_device_detach(adapter->netdev);
3510
3511 if (adapter->wol)
3512 be_setup_wol(adapter, true);
3513
3514 be_cmd_reset_function(adapter);
3515
3516 pci_disable_device(pdev);
3517 }
3518
3519 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3520 pci_channel_state_t state)
3521 {
3522 struct be_adapter *adapter = pci_get_drvdata(pdev);
3523 struct net_device *netdev = adapter->netdev;
3524
3525 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3526
3527 adapter->eeh_err = true;
3528
3529 netif_device_detach(netdev);
3530
3531 if (netif_running(netdev)) {
3532 rtnl_lock();
3533 be_close(netdev);
3534 rtnl_unlock();
3535 }
3536 be_clear(adapter);
3537
3538 if (state == pci_channel_io_perm_failure)
3539 return PCI_ERS_RESULT_DISCONNECT;
3540
3541 pci_disable_device(pdev);
3542
3543 return PCI_ERS_RESULT_NEED_RESET;
3544 }
3545
3546 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3547 {
3548 struct be_adapter *adapter = pci_get_drvdata(pdev);
3549 int status;
3550
3551 dev_info(&adapter->pdev->dev, "EEH reset\n");
3552 adapter->eeh_err = false;
3553
3554 status = pci_enable_device(pdev);
3555 if (status)
3556 return PCI_ERS_RESULT_DISCONNECT;
3557
3558 pci_set_master(pdev);
3559 pci_set_power_state(pdev, 0);
3560 pci_restore_state(pdev);
3561
3562 /* Check if card is ok and fw is ready */
3563 status = be_cmd_POST(adapter);
3564 if (status)
3565 return PCI_ERS_RESULT_DISCONNECT;
3566
3567 return PCI_ERS_RESULT_RECOVERED;
3568 }
3569
3570 static void be_eeh_resume(struct pci_dev *pdev)
3571 {
3572 int status = 0;
3573 struct be_adapter *adapter = pci_get_drvdata(pdev);
3574 struct net_device *netdev = adapter->netdev;
3575
3576 dev_info(&adapter->pdev->dev, "EEH resume\n");
3577
3578 pci_save_state(pdev);
3579
3580 /* tell fw we're ready to fire cmds */
3581 status = be_cmd_fw_init(adapter);
3582 if (status)
3583 goto err;
3584
3585 status = be_setup(adapter);
3586 if (status)
3587 goto err;
3588
3589 if (netif_running(netdev)) {
3590 status = be_open(netdev);
3591 if (status)
3592 goto err;
3593 }
3594 netif_device_attach(netdev);
3595 return;
3596 err:
3597 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
3598 }
3599
3600 static struct pci_error_handlers be_eeh_handlers = {
3601 .error_detected = be_eeh_err_detected,
3602 .slot_reset = be_eeh_reset,
3603 .resume = be_eeh_resume,
3604 };
3605
3606 static struct pci_driver be_driver = {
3607 .name = DRV_NAME,
3608 .id_table = be_dev_ids,
3609 .probe = be_probe,
3610 .remove = be_remove,
3611 .suspend = be_suspend,
3612 .resume = be_resume,
3613 .shutdown = be_shutdown,
3614 .err_handler = &be_eeh_handlers
3615 };
3616
3617 static int __init be_init_module(void)
3618 {
3619 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3620 rx_frag_size != 2048) {
3621 printk(KERN_WARNING DRV_NAME
3622 " : Module param rx_frag_size must be 2048/4096/8192."
3623 " Using 2048\n");
3624 rx_frag_size = 2048;
3625 }
3626
3627 return pci_register_driver(&be_driver);
3628 }
3629 module_init(be_init_module);
3630
3631 static void __exit be_exit_module(void)
3632 {
3633 pci_unregister_driver(&be_driver);
3634 }
3635 module_exit(be_exit_module);