qlcnic: remove unnecessary [kv][mcz]alloc casts
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / net / qlcnic / qlcnic_init.c
CommitLineData
af19b491
AKS
1/*
2 * Copyright (C) 2009 - QLogic Corporation.
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
18 * MA 02111-1307, USA.
19 *
20 * The full GNU General Public License is included in this distribution
21 * in the file called "COPYING".
22 *
23 */
24
25#include <linux/netdevice.h>
26#include <linux/delay.h>
5a0e3ad6 27#include <linux/slab.h>
8cf61f89 28#include <linux/if_vlan.h>
af19b491
AKS
29#include "qlcnic.h"
30
31struct crb_addr_pair {
32 u32 addr;
33 u32 data;
34};
35
36#define QLCNIC_MAX_CRB_XFORM 60
37static unsigned int crb_addr_xform[QLCNIC_MAX_CRB_XFORM];
38
39#define crb_addr_transform(name) \
40 (crb_addr_xform[QLCNIC_HW_PX_MAP_CRB_##name] = \
41 QLCNIC_HW_CRB_HUB_AGT_ADR_##name << 20)
42
43#define QLCNIC_ADDR_ERROR (0xffffffff)
44
45static void
46qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter,
47 struct qlcnic_host_rds_ring *rds_ring);
48
4e70812b
SC
49static int
50qlcnic_check_fw_hearbeat(struct qlcnic_adapter *adapter);
51
af19b491
AKS
52static void crb_addr_transform_setup(void)
53{
54 crb_addr_transform(XDMA);
55 crb_addr_transform(TIMR);
56 crb_addr_transform(SRE);
57 crb_addr_transform(SQN3);
58 crb_addr_transform(SQN2);
59 crb_addr_transform(SQN1);
60 crb_addr_transform(SQN0);
61 crb_addr_transform(SQS3);
62 crb_addr_transform(SQS2);
63 crb_addr_transform(SQS1);
64 crb_addr_transform(SQS0);
65 crb_addr_transform(RPMX7);
66 crb_addr_transform(RPMX6);
67 crb_addr_transform(RPMX5);
68 crb_addr_transform(RPMX4);
69 crb_addr_transform(RPMX3);
70 crb_addr_transform(RPMX2);
71 crb_addr_transform(RPMX1);
72 crb_addr_transform(RPMX0);
73 crb_addr_transform(ROMUSB);
74 crb_addr_transform(SN);
75 crb_addr_transform(QMN);
76 crb_addr_transform(QMS);
77 crb_addr_transform(PGNI);
78 crb_addr_transform(PGND);
79 crb_addr_transform(PGN3);
80 crb_addr_transform(PGN2);
81 crb_addr_transform(PGN1);
82 crb_addr_transform(PGN0);
83 crb_addr_transform(PGSI);
84 crb_addr_transform(PGSD);
85 crb_addr_transform(PGS3);
86 crb_addr_transform(PGS2);
87 crb_addr_transform(PGS1);
88 crb_addr_transform(PGS0);
89 crb_addr_transform(PS);
90 crb_addr_transform(PH);
91 crb_addr_transform(NIU);
92 crb_addr_transform(I2Q);
93 crb_addr_transform(EG);
94 crb_addr_transform(MN);
95 crb_addr_transform(MS);
96 crb_addr_transform(CAS2);
97 crb_addr_transform(CAS1);
98 crb_addr_transform(CAS0);
99 crb_addr_transform(CAM);
100 crb_addr_transform(C2C1);
101 crb_addr_transform(C2C0);
102 crb_addr_transform(SMB);
103 crb_addr_transform(OCM0);
104 crb_addr_transform(I2C0);
105}
106
107void qlcnic_release_rx_buffers(struct qlcnic_adapter *adapter)
108{
109 struct qlcnic_recv_context *recv_ctx;
110 struct qlcnic_host_rds_ring *rds_ring;
111 struct qlcnic_rx_buffer *rx_buf;
112 int i, ring;
113
114 recv_ctx = &adapter->recv_ctx;
115 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
116 rds_ring = &recv_ctx->rds_rings[ring];
117 for (i = 0; i < rds_ring->num_desc; ++i) {
118 rx_buf = &(rds_ring->rx_buf_arr[i]);
96659828 119 if (rx_buf->skb == NULL)
af19b491 120 continue;
96659828 121
af19b491
AKS
122 pci_unmap_single(adapter->pdev,
123 rx_buf->dma,
124 rds_ring->dma_size,
125 PCI_DMA_FROMDEVICE);
96659828
AKS
126
127 dev_kfree_skb_any(rx_buf->skb);
af19b491
AKS
128 }
129 }
130}
131
8a15ad1f
AKS
132void qlcnic_reset_rx_buffers_list(struct qlcnic_adapter *adapter)
133{
134 struct qlcnic_recv_context *recv_ctx;
135 struct qlcnic_host_rds_ring *rds_ring;
136 struct qlcnic_rx_buffer *rx_buf;
137 int i, ring;
138
139 recv_ctx = &adapter->recv_ctx;
140 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
141 rds_ring = &recv_ctx->rds_rings[ring];
142
8a15ad1f
AKS
143 INIT_LIST_HEAD(&rds_ring->free_list);
144
145 rx_buf = rds_ring->rx_buf_arr;
146 for (i = 0; i < rds_ring->num_desc; i++) {
147 list_add_tail(&rx_buf->list,
148 &rds_ring->free_list);
149 rx_buf++;
150 }
8a15ad1f
AKS
151 }
152}
153
af19b491
AKS
154void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter)
155{
156 struct qlcnic_cmd_buffer *cmd_buf;
157 struct qlcnic_skb_frag *buffrag;
158 int i, j;
159 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
160
161 cmd_buf = tx_ring->cmd_buf_arr;
162 for (i = 0; i < tx_ring->num_desc; i++) {
163 buffrag = cmd_buf->frag_array;
164 if (buffrag->dma) {
165 pci_unmap_single(adapter->pdev, buffrag->dma,
166 buffrag->length, PCI_DMA_TODEVICE);
167 buffrag->dma = 0ULL;
168 }
169 for (j = 0; j < cmd_buf->frag_count; j++) {
170 buffrag++;
171 if (buffrag->dma) {
172 pci_unmap_page(adapter->pdev, buffrag->dma,
173 buffrag->length,
174 PCI_DMA_TODEVICE);
175 buffrag->dma = 0ULL;
176 }
177 }
178 if (cmd_buf->skb) {
179 dev_kfree_skb_any(cmd_buf->skb);
180 cmd_buf->skb = NULL;
181 }
182 cmd_buf++;
183 }
184}
185
186void qlcnic_free_sw_resources(struct qlcnic_adapter *adapter)
187{
188 struct qlcnic_recv_context *recv_ctx;
189 struct qlcnic_host_rds_ring *rds_ring;
190 struct qlcnic_host_tx_ring *tx_ring;
191 int ring;
192
193 recv_ctx = &adapter->recv_ctx;
194
195 if (recv_ctx->rds_rings == NULL)
196 goto skip_rds;
197
198 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
199 rds_ring = &recv_ctx->rds_rings[ring];
200 vfree(rds_ring->rx_buf_arr);
201 rds_ring->rx_buf_arr = NULL;
202 }
203 kfree(recv_ctx->rds_rings);
204
205skip_rds:
206 if (adapter->tx_ring == NULL)
207 return;
208
209 tx_ring = adapter->tx_ring;
210 vfree(tx_ring->cmd_buf_arr);
ef71ff83 211 tx_ring->cmd_buf_arr = NULL;
af19b491 212 kfree(adapter->tx_ring);
ef71ff83 213 adapter->tx_ring = NULL;
af19b491
AKS
214}
215
216int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter)
217{
218 struct qlcnic_recv_context *recv_ctx;
219 struct qlcnic_host_rds_ring *rds_ring;
220 struct qlcnic_host_sds_ring *sds_ring;
221 struct qlcnic_host_tx_ring *tx_ring;
222 struct qlcnic_rx_buffer *rx_buf;
223 int ring, i, size;
224
225 struct qlcnic_cmd_buffer *cmd_buf_arr;
226 struct net_device *netdev = adapter->netdev;
227
228 size = sizeof(struct qlcnic_host_tx_ring);
229 tx_ring = kzalloc(size, GFP_KERNEL);
230 if (tx_ring == NULL) {
231 dev_err(&netdev->dev, "failed to allocate tx ring struct\n");
232 return -ENOMEM;
233 }
234 adapter->tx_ring = tx_ring;
235
236 tx_ring->num_desc = adapter->num_txd;
237 tx_ring->txq = netdev_get_tx_queue(netdev, 0);
238
89bf67f1 239 cmd_buf_arr = vzalloc(TX_BUFF_RINGSIZE(tx_ring));
af19b491
AKS
240 if (cmd_buf_arr == NULL) {
241 dev_err(&netdev->dev, "failed to allocate cmd buffer ring\n");
aadd8184 242 goto err_out;
af19b491 243 }
af19b491
AKS
244 tx_ring->cmd_buf_arr = cmd_buf_arr;
245
246 recv_ctx = &adapter->recv_ctx;
247
248 size = adapter->max_rds_rings * sizeof(struct qlcnic_host_rds_ring);
249 rds_ring = kzalloc(size, GFP_KERNEL);
250 if (rds_ring == NULL) {
251 dev_err(&netdev->dev, "failed to allocate rds ring struct\n");
aadd8184 252 goto err_out;
af19b491
AKS
253 }
254 recv_ctx->rds_rings = rds_ring;
255
256 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
257 rds_ring = &recv_ctx->rds_rings[ring];
258 switch (ring) {
259 case RCV_RING_NORMAL:
260 rds_ring->num_desc = adapter->num_rxd;
ff1b1bf8 261 rds_ring->dma_size = QLCNIC_P3P_RX_BUF_MAX_LEN;
251a84c9 262 rds_ring->skb_size = rds_ring->dma_size + NET_IP_ALIGN;
af19b491
AKS
263 break;
264
265 case RCV_RING_JUMBO:
266 rds_ring->num_desc = adapter->num_jumbo_rxd;
267 rds_ring->dma_size =
ff1b1bf8 268 QLCNIC_P3P_RX_JUMBO_BUF_MAX_LEN;
af19b491
AKS
269
270 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
271 rds_ring->dma_size += QLCNIC_LRO_BUFFER_EXTRA;
272
273 rds_ring->skb_size =
274 rds_ring->dma_size + NET_IP_ALIGN;
275 break;
af19b491 276 }
f3167460 277 rds_ring->rx_buf_arr = vzalloc(RCV_BUFF_RINGSIZE(rds_ring));
af19b491
AKS
278 if (rds_ring->rx_buf_arr == NULL) {
279 dev_err(&netdev->dev, "Failed to allocate "
280 "rx buffer ring %d\n", ring);
281 goto err_out;
282 }
af19b491
AKS
283 INIT_LIST_HEAD(&rds_ring->free_list);
284 /*
285 * Now go through all of them, set reference handles
286 * and put them in the queues.
287 */
288 rx_buf = rds_ring->rx_buf_arr;
289 for (i = 0; i < rds_ring->num_desc; i++) {
290 list_add_tail(&rx_buf->list,
291 &rds_ring->free_list);
292 rx_buf->ref_handle = i;
af19b491
AKS
293 rx_buf++;
294 }
295 spin_lock_init(&rds_ring->lock);
296 }
297
298 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
299 sds_ring = &recv_ctx->sds_rings[ring];
300 sds_ring->irq = adapter->msix_entries[ring].vector;
301 sds_ring->adapter = adapter;
302 sds_ring->num_desc = adapter->num_rxd;
303
304 for (i = 0; i < NUM_RCV_DESC_RINGS; i++)
305 INIT_LIST_HEAD(&sds_ring->free_list[i]);
306 }
307
308 return 0;
309
310err_out:
311 qlcnic_free_sw_resources(adapter);
312 return -ENOMEM;
313}
314
315/*
316 * Utility to translate from internal Phantom CRB address
317 * to external PCI CRB address.
318 */
319static u32 qlcnic_decode_crb_addr(u32 addr)
320{
321 int i;
322 u32 base_addr, offset, pci_base;
323
324 crb_addr_transform_setup();
325
326 pci_base = QLCNIC_ADDR_ERROR;
327 base_addr = addr & 0xfff00000;
328 offset = addr & 0x000fffff;
329
330 for (i = 0; i < QLCNIC_MAX_CRB_XFORM; i++) {
331 if (crb_addr_xform[i] == base_addr) {
332 pci_base = i << 20;
333 break;
334 }
335 }
336 if (pci_base == QLCNIC_ADDR_ERROR)
337 return pci_base;
338 else
339 return pci_base + offset;
340}
341
342#define QLCNIC_MAX_ROM_WAIT_USEC 100
343
344static int qlcnic_wait_rom_done(struct qlcnic_adapter *adapter)
345{
346 long timeout = 0;
347 long done = 0;
348
349 cond_resched();
350
351 while (done == 0) {
352 done = QLCRD32(adapter, QLCNIC_ROMUSB_GLB_STATUS);
353 done &= 2;
354 if (++timeout >= QLCNIC_MAX_ROM_WAIT_USEC) {
355 dev_err(&adapter->pdev->dev,
356 "Timeout reached waiting for rom done");
357 return -EIO;
358 }
359 udelay(1);
360 }
361 return 0;
362}
363
364static int do_rom_fast_read(struct qlcnic_adapter *adapter,
365 int addr, int *valp)
366{
367 QLCWR32(adapter, QLCNIC_ROMUSB_ROM_ADDRESS, addr);
368 QLCWR32(adapter, QLCNIC_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
369 QLCWR32(adapter, QLCNIC_ROMUSB_ROM_ABYTE_CNT, 3);
370 QLCWR32(adapter, QLCNIC_ROMUSB_ROM_INSTR_OPCODE, 0xb);
371 if (qlcnic_wait_rom_done(adapter)) {
372 dev_err(&adapter->pdev->dev, "Error waiting for rom done\n");
373 return -EIO;
374 }
375 /* reset abyte_cnt and dummy_byte_cnt */
376 QLCWR32(adapter, QLCNIC_ROMUSB_ROM_ABYTE_CNT, 0);
377 udelay(10);
378 QLCWR32(adapter, QLCNIC_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
379
380 *valp = QLCRD32(adapter, QLCNIC_ROMUSB_ROM_RDATA);
381 return 0;
382}
383
384static int do_rom_fast_read_words(struct qlcnic_adapter *adapter, int addr,
385 u8 *bytes, size_t size)
386{
387 int addridx;
388 int ret = 0;
389
390 for (addridx = addr; addridx < (addr + size); addridx += 4) {
391 int v;
392 ret = do_rom_fast_read(adapter, addridx, &v);
393 if (ret != 0)
394 break;
395 *(__le32 *)bytes = cpu_to_le32(v);
396 bytes += 4;
397 }
398
399 return ret;
400}
401
402int
403qlcnic_rom_fast_read_words(struct qlcnic_adapter *adapter, int addr,
404 u8 *bytes, size_t size)
405{
406 int ret;
407
408 ret = qlcnic_rom_lock(adapter);
409 if (ret < 0)
410 return ret;
411
412 ret = do_rom_fast_read_words(adapter, addr, bytes, size);
413
414 qlcnic_rom_unlock(adapter);
415 return ret;
416}
417
418int qlcnic_rom_fast_read(struct qlcnic_adapter *adapter, int addr, int *valp)
419{
420 int ret;
421
422 if (qlcnic_rom_lock(adapter) != 0)
423 return -EIO;
424
425 ret = do_rom_fast_read(adapter, addr, valp);
426 qlcnic_rom_unlock(adapter);
427 return ret;
428}
429
430int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter)
431{
432 int addr, val;
433 int i, n, init_delay;
434 struct crb_addr_pair *buf;
435 unsigned offset;
436 u32 off;
437 struct pci_dev *pdev = adapter->pdev;
438
d4066833
SC
439 QLCWR32(adapter, CRB_CMDPEG_STATE, 0);
440 QLCWR32(adapter, CRB_RCVPEG_STATE, 0);
441
af19b491 442 qlcnic_rom_lock(adapter);
469c221f 443 QLCWR32(adapter, QLCNIC_ROMUSB_GLB_SW_RESET, 0xfeffffff);
af19b491
AKS
444 qlcnic_rom_unlock(adapter);
445
d4066833 446 /* Init HW CRB block */
af19b491
AKS
447 if (qlcnic_rom_fast_read(adapter, 0, &n) != 0 || (n != 0xcafecafe) ||
448 qlcnic_rom_fast_read(adapter, 4, &n) != 0) {
449 dev_err(&pdev->dev, "ERROR Reading crb_init area: val:%x\n", n);
450 return -EIO;
451 }
452 offset = n & 0xffffU;
453 n = (n >> 16) & 0xffffU;
454
455 if (n >= 1024) {
456 dev_err(&pdev->dev, "QLOGIC card flash not initialized.\n");
457 return -EIO;
458 }
459
460 buf = kcalloc(n, sizeof(struct crb_addr_pair), GFP_KERNEL);
461 if (buf == NULL) {
462 dev_err(&pdev->dev, "Unable to calloc memory for rom read.\n");
463 return -ENOMEM;
464 }
465
466 for (i = 0; i < n; i++) {
467 if (qlcnic_rom_fast_read(adapter, 8*i + 4*offset, &val) != 0 ||
468 qlcnic_rom_fast_read(adapter, 8*i + 4*offset + 4, &addr) != 0) {
469 kfree(buf);
470 return -EIO;
471 }
472
473 buf[i].addr = addr;
474 buf[i].data = val;
475 }
476
477 for (i = 0; i < n; i++) {
478
479 off = qlcnic_decode_crb_addr(buf[i].addr);
480 if (off == QLCNIC_ADDR_ERROR) {
481 dev_err(&pdev->dev, "CRB init value out of range %x\n",
482 buf[i].addr);
483 continue;
484 }
485 off += QLCNIC_PCI_CRBSPACE;
486
487 if (off & 1)
488 continue;
489
490 /* skipping cold reboot MAGIC */
491 if (off == QLCNIC_CAM_RAM(0x1fc))
492 continue;
493 if (off == (QLCNIC_CRB_I2C0 + 0x1c))
494 continue;
495 if (off == (ROMUSB_GLB + 0xbc)) /* do not reset PCI */
496 continue;
497 if (off == (ROMUSB_GLB + 0xa8))
498 continue;
499 if (off == (ROMUSB_GLB + 0xc8)) /* core clock */
500 continue;
501 if (off == (ROMUSB_GLB + 0x24)) /* MN clock */
502 continue;
503 if (off == (ROMUSB_GLB + 0x1c)) /* MS clock */
504 continue;
505 if ((off & 0x0ff00000) == QLCNIC_CRB_DDR_NET)
506 continue;
507 /* skip the function enable register */
508 if (off == QLCNIC_PCIE_REG(PCIE_SETUP_FUNCTION))
509 continue;
510 if (off == QLCNIC_PCIE_REG(PCIE_SETUP_FUNCTION2))
511 continue;
512 if ((off & 0x0ff00000) == QLCNIC_CRB_SMB)
513 continue;
514
515 init_delay = 1;
516 /* After writing this register, HW needs time for CRB */
517 /* to quiet down (else crb_window returns 0xffffffff) */
518 if (off == QLCNIC_ROMUSB_GLB_SW_RESET)
519 init_delay = 1000;
520
521 QLCWR32(adapter, off, buf[i].data);
522
523 msleep(init_delay);
524 }
525 kfree(buf);
526
d4066833 527 /* Initialize protocol process engine */
af19b491 528 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_D + 0xec, 0x1e);
af19b491
AKS
529 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_D + 0x4c, 8);
530 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_I + 0x4c, 8);
af19b491
AKS
531 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x8, 0);
532 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_0 + 0xc, 0);
533 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_1 + 0x8, 0);
534 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_1 + 0xc, 0);
535 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_2 + 0x8, 0);
536 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_2 + 0xc, 0);
537 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_3 + 0x8, 0);
538 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_3 + 0xc, 0);
d4066833
SC
539 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_4 + 0x8, 0);
540 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_4 + 0xc, 0);
541 msleep(1);
542 QLCWR32(adapter, QLCNIC_PEG_HALT_STATUS1, 0);
543 QLCWR32(adapter, QLCNIC_PEG_HALT_STATUS2, 0);
af19b491
AKS
544 return 0;
545}
546
4e70812b 547static int qlcnic_cmd_peg_ready(struct qlcnic_adapter *adapter)
d4066833 548{
4e70812b
SC
549 u32 val;
550 int retries = QLCNIC_CMDPEG_CHECK_RETRY_COUNT;
d4066833 551
d4066833 552 do {
4e70812b
SC
553 val = QLCRD32(adapter, CRB_CMDPEG_STATE);
554
555 switch (val) {
556 case PHAN_INITIALIZE_COMPLETE:
557 case PHAN_INITIALIZE_ACK:
558 return 0;
559 case PHAN_INITIALIZE_FAILED:
560 goto out_err;
561 default:
562 break;
d4066833 563 }
4e70812b
SC
564
565 msleep(QLCNIC_CMDPEG_CHECK_DELAY);
566
d4066833
SC
567 } while (--retries);
568
4e70812b
SC
569 QLCWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_FAILED);
570
571out_err:
572 dev_err(&adapter->pdev->dev, "Command Peg initialization not "
573 "complete, state: 0x%x.\n", val);
574 return -EIO;
575}
576
577static int
578qlcnic_receive_peg_ready(struct qlcnic_adapter *adapter)
579{
580 u32 val;
581 int retries = QLCNIC_RCVPEG_CHECK_RETRY_COUNT;
582
583 do {
584 val = QLCRD32(adapter, CRB_RCVPEG_STATE);
585
586 if (val == PHAN_PEG_RCV_INITIALIZED)
587 return 0;
588
589 msleep(QLCNIC_RCVPEG_CHECK_DELAY);
590
591 } while (--retries);
592
593 if (!retries) {
594 dev_err(&adapter->pdev->dev, "Receive Peg initialization not "
595 "complete, state: 0x%x.\n", val);
596 return -EIO;
597 }
598
599 return 0;
600}
601
602int
603qlcnic_check_fw_status(struct qlcnic_adapter *adapter)
604{
605 int err;
606
607 err = qlcnic_cmd_peg_ready(adapter);
608 if (err)
609 return err;
610
611 err = qlcnic_receive_peg_ready(adapter);
612 if (err)
613 return err;
614
615 QLCWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_ACK);
616
617 return err;
d4066833
SC
618}
619
b3a24649 620int
aa5e18c0
SC
621qlcnic_setup_idc_param(struct qlcnic_adapter *adapter) {
622
623 int timeo;
b3a24649
SC
624 u32 val;
625
45918e2f
AC
626 val = QLCRD32(adapter, QLCNIC_CRB_DEV_PARTITION_INFO);
627 val = QLC_DEV_GET_DRV(val, adapter->portnum);
628 if ((val & 0x3) != QLCNIC_TYPE_NIC) {
629 dev_err(&adapter->pdev->dev,
630 "Not an Ethernet NIC func=%u\n", val);
631 return -EIO;
b3a24649 632 }
45918e2f 633 adapter->physical_port = (val >> 2);
aa5e18c0 634 if (qlcnic_rom_fast_read(adapter, QLCNIC_ROM_DEV_INIT_TIMEOUT, &timeo))
251b036a 635 timeo = QLCNIC_INIT_TIMEOUT_SECS;
aa5e18c0
SC
636
637 adapter->dev_init_timeo = timeo;
638
639 if (qlcnic_rom_fast_read(adapter, QLCNIC_ROM_DRV_RESET_TIMEOUT, &timeo))
251b036a 640 timeo = QLCNIC_RESET_TIMEOUT_SECS;
aa5e18c0
SC
641
642 adapter->reset_ack_timeo = timeo;
b3a24649
SC
643
644 return 0;
aa5e18c0
SC
645}
646
8f891387 647int
648qlcnic_check_flash_fw_ver(struct qlcnic_adapter *adapter)
649{
650 u32 ver = -1, min_ver;
651
652 qlcnic_rom_fast_read(adapter, QLCNIC_FW_VERSION_OFFSET, (int *)&ver);
653
654 ver = QLCNIC_DECODE_VERSION(ver);
655 min_ver = QLCNIC_MIN_FW_VERSION;
656
657 if (ver < min_ver) {
658 dev_err(&adapter->pdev->dev,
659 "firmware version %d.%d.%d unsupported."
660 "Min supported version %d.%d.%d\n",
661 _major(ver), _minor(ver), _build(ver),
662 _major(min_ver), _minor(min_ver), _build(min_ver));
663 return -EINVAL;
664 }
665
666 return 0;
667}
668
af19b491
AKS
669static int
670qlcnic_has_mn(struct qlcnic_adapter *adapter)
671{
8f891387 672 u32 capability;
af19b491
AKS
673 capability = 0;
674
251a84c9
AKS
675 capability = QLCRD32(adapter, QLCNIC_PEG_TUNE_CAPABILITY);
676 if (capability & QLCNIC_PEG_TUNE_MN_PRESENT)
677 return 1;
af19b491 678
af19b491
AKS
679 return 0;
680}
681
682static
683struct uni_table_desc *qlcnic_get_table_desc(const u8 *unirom, int section)
684{
685 u32 i;
686 struct uni_table_desc *directory = (struct uni_table_desc *) &unirom[0];
687 __le32 entries = cpu_to_le32(directory->num_entries);
688
689 for (i = 0; i < entries; i++) {
690
691 __le32 offs = cpu_to_le32(directory->findex) +
692 (i * cpu_to_le32(directory->entry_size));
693 __le32 tab_type = cpu_to_le32(*((u32 *)&unirom[offs] + 8));
694
695 if (tab_type == section)
696 return (struct uni_table_desc *) &unirom[offs];
697 }
698
699 return NULL;
700}
701
b7eff100
SC
702#define FILEHEADER_SIZE (14 * 4)
703
af19b491 704static int
b7eff100 705qlcnic_validate_header(struct qlcnic_adapter *adapter)
af19b491 706{
af19b491 707 const u8 *unirom = adapter->fw->data;
b7eff100
SC
708 struct uni_table_desc *directory = (struct uni_table_desc *) &unirom[0];
709 __le32 fw_file_size = adapter->fw->size;
af19b491 710 __le32 entries;
b7eff100
SC
711 __le32 entry_size;
712 __le32 tab_size;
713
714 if (fw_file_size < FILEHEADER_SIZE)
715 return -EINVAL;
716
717 entries = cpu_to_le32(directory->num_entries);
718 entry_size = cpu_to_le32(directory->entry_size);
719 tab_size = cpu_to_le32(directory->findex) + (entries * entry_size);
720
721 if (fw_file_size < tab_size)
722 return -EINVAL;
723
724 return 0;
725}
726
727static int
728qlcnic_validate_bootld(struct qlcnic_adapter *adapter)
729{
730 struct uni_table_desc *tab_desc;
731 struct uni_data_desc *descr;
732 const u8 *unirom = adapter->fw->data;
733 int idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] +
734 QLCNIC_UNI_BOOTLD_IDX_OFF));
735 __le32 offs;
736 __le32 tab_size;
737 __le32 data_size;
738
739 tab_desc = qlcnic_get_table_desc(unirom, QLCNIC_UNI_DIR_SECT_BOOTLD);
740
741 if (!tab_desc)
742 return -EINVAL;
743
744 tab_size = cpu_to_le32(tab_desc->findex) +
22dfaa86 745 (cpu_to_le32(tab_desc->entry_size) * (idx + 1));
b7eff100
SC
746
747 if (adapter->fw->size < tab_size)
748 return -EINVAL;
749
750 offs = cpu_to_le32(tab_desc->findex) +
751 (cpu_to_le32(tab_desc->entry_size) * (idx));
752 descr = (struct uni_data_desc *)&unirom[offs];
753
22dfaa86 754 data_size = cpu_to_le32(descr->findex) + cpu_to_le32(descr->size);
b7eff100
SC
755
756 if (adapter->fw->size < data_size)
757 return -EINVAL;
758
759 return 0;
760}
761
762static int
763qlcnic_validate_fw(struct qlcnic_adapter *adapter)
764{
765 struct uni_table_desc *tab_desc;
766 struct uni_data_desc *descr;
767 const u8 *unirom = adapter->fw->data;
768 int idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] +
769 QLCNIC_UNI_FIRMWARE_IDX_OFF));
770 __le32 offs;
771 __le32 tab_size;
772 __le32 data_size;
773
774 tab_desc = qlcnic_get_table_desc(unirom, QLCNIC_UNI_DIR_SECT_FW);
775
776 if (!tab_desc)
777 return -EINVAL;
778
779 tab_size = cpu_to_le32(tab_desc->findex) +
22dfaa86 780 (cpu_to_le32(tab_desc->entry_size) * (idx + 1));
b7eff100
SC
781
782 if (adapter->fw->size < tab_size)
783 return -EINVAL;
784
785 offs = cpu_to_le32(tab_desc->findex) +
786 (cpu_to_le32(tab_desc->entry_size) * (idx));
787 descr = (struct uni_data_desc *)&unirom[offs];
22dfaa86 788 data_size = cpu_to_le32(descr->findex) + cpu_to_le32(descr->size);
b7eff100
SC
789
790 if (adapter->fw->size < data_size)
791 return -EINVAL;
792
793 return 0;
794}
795
796static int
797qlcnic_validate_product_offs(struct qlcnic_adapter *adapter)
798{
799 struct uni_table_desc *ptab_descr;
800 const u8 *unirom = adapter->fw->data;
af19b491 801 int mn_present = qlcnic_has_mn(adapter);
b7eff100
SC
802 __le32 entries;
803 __le32 entry_size;
804 __le32 tab_size;
805 u32 i;
af19b491
AKS
806
807 ptab_descr = qlcnic_get_table_desc(unirom,
808 QLCNIC_UNI_DIR_SECT_PRODUCT_TBL);
b7eff100
SC
809 if (!ptab_descr)
810 return -EINVAL;
af19b491
AKS
811
812 entries = cpu_to_le32(ptab_descr->num_entries);
b7eff100
SC
813 entry_size = cpu_to_le32(ptab_descr->entry_size);
814 tab_size = cpu_to_le32(ptab_descr->findex) + (entries * entry_size);
815
816 if (adapter->fw->size < tab_size)
817 return -EINVAL;
818
af19b491
AKS
819nomn:
820 for (i = 0; i < entries; i++) {
821
822 __le32 flags, file_chiprev, offs;
823 u8 chiprev = adapter->ahw.revision_id;
824 u32 flagbit;
825
826 offs = cpu_to_le32(ptab_descr->findex) +
827 (i * cpu_to_le32(ptab_descr->entry_size));
828 flags = cpu_to_le32(*((int *)&unirom[offs] +
829 QLCNIC_UNI_FLAGS_OFF));
830 file_chiprev = cpu_to_le32(*((int *)&unirom[offs] +
831 QLCNIC_UNI_CHIP_REV_OFF));
832
833 flagbit = mn_present ? 1 : 2;
834
835 if ((chiprev == file_chiprev) &&
836 ((1ULL << flagbit) & flags)) {
837 adapter->file_prd_off = offs;
838 return 0;
839 }
840 }
841 if (mn_present) {
842 mn_present = 0;
843 goto nomn;
844 }
b7eff100
SC
845 return -EINVAL;
846}
847
848static int
849qlcnic_validate_unified_romimage(struct qlcnic_adapter *adapter)
850{
851 if (qlcnic_validate_header(adapter)) {
852 dev_err(&adapter->pdev->dev,
853 "unified image: header validation failed\n");
854 return -EINVAL;
855 }
856
857 if (qlcnic_validate_product_offs(adapter)) {
858 dev_err(&adapter->pdev->dev,
859 "unified image: product validation failed\n");
860 return -EINVAL;
861 }
862
863 if (qlcnic_validate_bootld(adapter)) {
864 dev_err(&adapter->pdev->dev,
865 "unified image: bootld validation failed\n");
866 return -EINVAL;
867 }
868
869 if (qlcnic_validate_fw(adapter)) {
870 dev_err(&adapter->pdev->dev,
871 "unified image: firmware validation failed\n");
872 return -EINVAL;
873 }
874
875 return 0;
af19b491
AKS
876}
877
878static
879struct uni_data_desc *qlcnic_get_data_desc(struct qlcnic_adapter *adapter,
880 u32 section, u32 idx_offset)
881{
882 const u8 *unirom = adapter->fw->data;
883 int idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] +
884 idx_offset));
885 struct uni_table_desc *tab_desc;
886 __le32 offs;
887
888 tab_desc = qlcnic_get_table_desc(unirom, section);
889
890 if (tab_desc == NULL)
891 return NULL;
892
893 offs = cpu_to_le32(tab_desc->findex) +
894 (cpu_to_le32(tab_desc->entry_size) * idx);
895
896 return (struct uni_data_desc *)&unirom[offs];
897}
898
899static u8 *
900qlcnic_get_bootld_offs(struct qlcnic_adapter *adapter)
901{
902 u32 offs = QLCNIC_BOOTLD_START;
903
904 if (adapter->fw_type == QLCNIC_UNIFIED_ROMIMAGE)
905 offs = cpu_to_le32((qlcnic_get_data_desc(adapter,
906 QLCNIC_UNI_DIR_SECT_BOOTLD,
907 QLCNIC_UNI_BOOTLD_IDX_OFF))->findex);
908
909 return (u8 *)&adapter->fw->data[offs];
910}
911
912static u8 *
913qlcnic_get_fw_offs(struct qlcnic_adapter *adapter)
914{
915 u32 offs = QLCNIC_IMAGE_START;
916
917 if (adapter->fw_type == QLCNIC_UNIFIED_ROMIMAGE)
918 offs = cpu_to_le32((qlcnic_get_data_desc(adapter,
919 QLCNIC_UNI_DIR_SECT_FW,
920 QLCNIC_UNI_FIRMWARE_IDX_OFF))->findex);
921
922 return (u8 *)&adapter->fw->data[offs];
923}
924
925static __le32
926qlcnic_get_fw_size(struct qlcnic_adapter *adapter)
927{
928 if (adapter->fw_type == QLCNIC_UNIFIED_ROMIMAGE)
929 return cpu_to_le32((qlcnic_get_data_desc(adapter,
930 QLCNIC_UNI_DIR_SECT_FW,
931 QLCNIC_UNI_FIRMWARE_IDX_OFF))->size);
932 else
933 return cpu_to_le32(
934 *(u32 *)&adapter->fw->data[QLCNIC_FW_SIZE_OFFSET]);
935}
936
937static __le32
938qlcnic_get_fw_version(struct qlcnic_adapter *adapter)
939{
940 struct uni_data_desc *fw_data_desc;
941 const struct firmware *fw = adapter->fw;
942 __le32 major, minor, sub;
943 const u8 *ver_str;
944 int i, ret;
945
946 if (adapter->fw_type != QLCNIC_UNIFIED_ROMIMAGE)
947 return cpu_to_le32(*(u32 *)&fw->data[QLCNIC_FW_VERSION_OFFSET]);
948
949 fw_data_desc = qlcnic_get_data_desc(adapter, QLCNIC_UNI_DIR_SECT_FW,
950 QLCNIC_UNI_FIRMWARE_IDX_OFF);
951 ver_str = fw->data + cpu_to_le32(fw_data_desc->findex) +
952 cpu_to_le32(fw_data_desc->size) - 17;
953
954 for (i = 0; i < 12; i++) {
955 if (!strncmp(&ver_str[i], "REV=", 4)) {
956 ret = sscanf(&ver_str[i+4], "%u.%u.%u ",
957 &major, &minor, &sub);
958 if (ret != 3)
959 return 0;
960 else
961 return major + (minor << 8) + (sub << 16);
962 }
963 }
964
965 return 0;
966}
967
968static __le32
969qlcnic_get_bios_version(struct qlcnic_adapter *adapter)
970{
971 const struct firmware *fw = adapter->fw;
972 __le32 bios_ver, prd_off = adapter->file_prd_off;
973
974 if (adapter->fw_type != QLCNIC_UNIFIED_ROMIMAGE)
975 return cpu_to_le32(
976 *(u32 *)&fw->data[QLCNIC_BIOS_VERSION_OFFSET]);
977
978 bios_ver = cpu_to_le32(*((u32 *) (&fw->data[prd_off])
979 + QLCNIC_UNI_BIOS_VERSION_OFF));
980
addd5abf 981 return (bios_ver << 16) + ((bios_ver >> 8) & 0xff00) + (bios_ver >> 24);
af19b491
AKS
982}
983
091754a1
SC
984static void qlcnic_rom_lock_recovery(struct qlcnic_adapter *adapter)
985{
986 if (qlcnic_pcie_sem_lock(adapter, 2, QLCNIC_ROM_LOCK_ID))
987 dev_info(&adapter->pdev->dev, "Resetting rom_lock\n");
988
989 qlcnic_pcie_sem_unlock(adapter, 2);
990}
991
4e70812b
SC
992static int
993qlcnic_check_fw_hearbeat(struct qlcnic_adapter *adapter)
994{
995 u32 heartbeat, ret = -EIO;
996 int retries = QLCNIC_HEARTBEAT_CHECK_RETRY_COUNT;
997
998 adapter->heartbeat = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
999
1000 do {
1001 msleep(QLCNIC_HEARTBEAT_PERIOD_MSECS);
1002 heartbeat = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
1003 if (heartbeat != adapter->heartbeat) {
1004 ret = QLCNIC_RCODE_SUCCESS;
1005 break;
1006 }
1007 } while (--retries);
1008
1009 return ret;
1010}
1011
af19b491
AKS
1012int
1013qlcnic_need_fw_reset(struct qlcnic_adapter *adapter)
1014{
4e70812b 1015 if (qlcnic_check_fw_hearbeat(adapter)) {
091754a1 1016 qlcnic_rom_lock_recovery(adapter);
af19b491 1017 return 1;
091754a1 1018 }
af19b491 1019
091754a1 1020 if (adapter->need_fw_reset)
af19b491
AKS
1021 return 1;
1022
97f29d82
AKS
1023 if (adapter->fw)
1024 return 1;
af19b491
AKS
1025
1026 return 0;
1027}
1028
1029static const char *fw_name[] = {
1030 QLCNIC_UNIFIED_ROMIMAGE_NAME,
1031 QLCNIC_FLASH_ROMIMAGE_NAME,
1032};
1033
1034int
1035qlcnic_load_firmware(struct qlcnic_adapter *adapter)
1036{
1037 u64 *ptr64;
1038 u32 i, flashaddr, size;
1039 const struct firmware *fw = adapter->fw;
1040 struct pci_dev *pdev = adapter->pdev;
1041
1042 dev_info(&pdev->dev, "loading firmware from %s\n",
1043 fw_name[adapter->fw_type]);
1044
1045 if (fw) {
1046 __le64 data;
1047
1048 size = (QLCNIC_IMAGE_START - QLCNIC_BOOTLD_START) / 8;
1049
1050 ptr64 = (u64 *)qlcnic_get_bootld_offs(adapter);
1051 flashaddr = QLCNIC_BOOTLD_START;
1052
1053 for (i = 0; i < size; i++) {
1054 data = cpu_to_le64(ptr64[i]);
1055
1056 if (qlcnic_pci_mem_write_2M(adapter, flashaddr, data))
1057 return -EIO;
1058
1059 flashaddr += 8;
1060 }
1061
1062 size = (__force u32)qlcnic_get_fw_size(adapter) / 8;
1063
1064 ptr64 = (u64 *)qlcnic_get_fw_offs(adapter);
1065 flashaddr = QLCNIC_IMAGE_START;
1066
1067 for (i = 0; i < size; i++) {
1068 data = cpu_to_le64(ptr64[i]);
1069
1070 if (qlcnic_pci_mem_write_2M(adapter,
1071 flashaddr, data))
1072 return -EIO;
1073
1074 flashaddr += 8;
1075 }
0bc92b5b
AKS
1076
1077 size = (__force u32)qlcnic_get_fw_size(adapter) % 8;
1078 if (size) {
1079 data = cpu_to_le64(ptr64[i]);
1080
1081 if (qlcnic_pci_mem_write_2M(adapter,
1082 flashaddr, data))
1083 return -EIO;
1084 }
1085
af19b491
AKS
1086 } else {
1087 u64 data;
1088 u32 hi, lo;
1089
1090 size = (QLCNIC_IMAGE_START - QLCNIC_BOOTLD_START) / 8;
1091 flashaddr = QLCNIC_BOOTLD_START;
1092
1093 for (i = 0; i < size; i++) {
1094 if (qlcnic_rom_fast_read(adapter,
1095 flashaddr, (int *)&lo) != 0)
1096 return -EIO;
1097 if (qlcnic_rom_fast_read(adapter,
1098 flashaddr + 4, (int *)&hi) != 0)
1099 return -EIO;
1100
1101 data = (((u64)hi << 32) | lo);
1102
1103 if (qlcnic_pci_mem_write_2M(adapter,
1104 flashaddr, data))
1105 return -EIO;
1106
1107 flashaddr += 8;
1108 }
1109 }
1110 msleep(1);
1111
1112 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x18, 0x1020);
1113 QLCWR32(adapter, QLCNIC_ROMUSB_GLB_SW_RESET, 0x80001e);
1114 return 0;
1115}
1116
1117static int
1118qlcnic_validate_firmware(struct qlcnic_adapter *adapter)
1119{
1120 __le32 val;
8f891387 1121 u32 ver, bios, min_size;
af19b491
AKS
1122 struct pci_dev *pdev = adapter->pdev;
1123 const struct firmware *fw = adapter->fw;
1124 u8 fw_type = adapter->fw_type;
1125
1126 if (fw_type == QLCNIC_UNIFIED_ROMIMAGE) {
b7eff100 1127 if (qlcnic_validate_unified_romimage(adapter))
af19b491
AKS
1128 return -EINVAL;
1129
1130 min_size = QLCNIC_UNI_FW_MIN_SIZE;
1131 } else {
1132 val = cpu_to_le32(*(u32 *)&fw->data[QLCNIC_FW_MAGIC_OFFSET]);
1133 if ((__force u32)val != QLCNIC_BDINFO_MAGIC)
1134 return -EINVAL;
1135
1136 min_size = QLCNIC_FW_MIN_SIZE;
1137 }
1138
1139 if (fw->size < min_size)
1140 return -EINVAL;
1141
1142 val = qlcnic_get_fw_version(adapter);
af19b491
AKS
1143 ver = QLCNIC_DECODE_VERSION(val);
1144
8f891387 1145 if (ver < QLCNIC_MIN_FW_VERSION) {
af19b491
AKS
1146 dev_err(&pdev->dev,
1147 "%s: firmware version %d.%d.%d unsupported\n",
1148 fw_name[fw_type], _major(ver), _minor(ver), _build(ver));
1149 return -EINVAL;
1150 }
1151
1152 val = qlcnic_get_bios_version(adapter);
1153 qlcnic_rom_fast_read(adapter, QLCNIC_BIOS_VERSION_OFFSET, (int *)&bios);
1154 if ((__force u32)val != bios) {
1155 dev_err(&pdev->dev, "%s: firmware bios is incompatible\n",
1156 fw_name[fw_type]);
1157 return -EINVAL;
1158 }
1159
af19b491
AKS
1160 QLCWR32(adapter, QLCNIC_CAM_RAM(0x1fc), QLCNIC_BDINFO_MAGIC);
1161 return 0;
1162}
1163
1164static void
1165qlcnic_get_next_fwtype(struct qlcnic_adapter *adapter)
1166{
1167 u8 fw_type;
1168
1169 switch (adapter->fw_type) {
1170 case QLCNIC_UNKNOWN_ROMIMAGE:
1171 fw_type = QLCNIC_UNIFIED_ROMIMAGE;
1172 break;
1173
1174 case QLCNIC_UNIFIED_ROMIMAGE:
1175 default:
1176 fw_type = QLCNIC_FLASH_ROMIMAGE;
1177 break;
1178 }
1179
1180 adapter->fw_type = fw_type;
1181}
1182
1183
1184
1185void qlcnic_request_firmware(struct qlcnic_adapter *adapter)
1186{
1187 struct pci_dev *pdev = adapter->pdev;
1188 int rc;
1189
1190 adapter->fw_type = QLCNIC_UNKNOWN_ROMIMAGE;
1191
1192next:
1193 qlcnic_get_next_fwtype(adapter);
1194
1195 if (adapter->fw_type == QLCNIC_FLASH_ROMIMAGE) {
1196 adapter->fw = NULL;
1197 } else {
1198 rc = request_firmware(&adapter->fw,
1199 fw_name[adapter->fw_type], &pdev->dev);
1200 if (rc != 0)
1201 goto next;
1202
1203 rc = qlcnic_validate_firmware(adapter);
1204 if (rc != 0) {
1205 release_firmware(adapter->fw);
1206 msleep(1);
1207 goto next;
1208 }
1209 }
1210}
1211
1212
1213void
1214qlcnic_release_firmware(struct qlcnic_adapter *adapter)
1215{
1216 if (adapter->fw)
1217 release_firmware(adapter->fw);
1218 adapter->fw = NULL;
1219}
1220
af19b491
AKS
1221static void
1222qlcnic_handle_linkevent(struct qlcnic_adapter *adapter,
1223 struct qlcnic_fw_msg *msg)
1224{
1225 u32 cable_OUI;
1226 u16 cable_len;
1227 u16 link_speed;
1228 u8 link_status, module, duplex, autoneg;
1229 struct net_device *netdev = adapter->netdev;
1230
1231 adapter->has_link_events = 1;
1232
1233 cable_OUI = msg->body[1] & 0xffffffff;
1234 cable_len = (msg->body[1] >> 32) & 0xffff;
1235 link_speed = (msg->body[1] >> 48) & 0xffff;
1236
1237 link_status = msg->body[2] & 0xff;
1238 duplex = (msg->body[2] >> 16) & 0xff;
1239 autoneg = (msg->body[2] >> 24) & 0xff;
1240
1241 module = (msg->body[2] >> 8) & 0xff;
1242 if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE)
1243 dev_info(&netdev->dev, "unsupported cable: OUI 0x%x, "
1244 "length %d\n", cable_OUI, cable_len);
1245 else if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN)
1246 dev_info(&netdev->dev, "unsupported cable length %d\n",
1247 cable_len);
1248
1249 qlcnic_advert_link_change(adapter, link_status);
1250
1251 if (duplex == LINKEVENT_FULL_DUPLEX)
1252 adapter->link_duplex = DUPLEX_FULL;
1253 else
1254 adapter->link_duplex = DUPLEX_HALF;
1255
1256 adapter->module_type = module;
1257 adapter->link_autoneg = autoneg;
1258 adapter->link_speed = link_speed;
1259}
1260
1261static void
1262qlcnic_handle_fw_message(int desc_cnt, int index,
1263 struct qlcnic_host_sds_ring *sds_ring)
1264{
1265 struct qlcnic_fw_msg msg;
1266 struct status_desc *desc;
1267 int i = 0, opcode;
1268
1269 while (desc_cnt > 0 && i < 8) {
1270 desc = &sds_ring->desc_head[index];
1271 msg.words[i++] = le64_to_cpu(desc->status_desc_data[0]);
1272 msg.words[i++] = le64_to_cpu(desc->status_desc_data[1]);
1273
1274 index = get_next_index(index, sds_ring->num_desc);
1275 desc_cnt--;
1276 }
1277
1278 opcode = qlcnic_get_nic_msg_opcode(msg.body[0]);
1279 switch (opcode) {
1280 case QLCNIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE:
1281 qlcnic_handle_linkevent(sds_ring->adapter, &msg);
1282 break;
1283 default:
1284 break;
1285 }
1286}
1287
1288static int
1289qlcnic_alloc_rx_skb(struct qlcnic_adapter *adapter,
1290 struct qlcnic_host_rds_ring *rds_ring,
1291 struct qlcnic_rx_buffer *buffer)
1292{
1293 struct sk_buff *skb;
1294 dma_addr_t dma;
1295 struct pci_dev *pdev = adapter->pdev;
1296
96659828
AKS
1297 skb = dev_alloc_skb(rds_ring->skb_size);
1298 if (!skb) {
8bfe8b91 1299 adapter->stats.skb_alloc_failure++;
af19b491 1300 return -ENOMEM;
8bfe8b91 1301 }
af19b491 1302
04746ff1 1303 skb_reserve(skb, NET_IP_ALIGN);
af19b491
AKS
1304
1305 dma = pci_map_single(pdev, skb->data,
1306 rds_ring->dma_size, PCI_DMA_FROMDEVICE);
1307
1308 if (pci_dma_mapping_error(pdev, dma)) {
8ae6df97 1309 adapter->stats.rx_dma_map_error++;
af19b491 1310 dev_kfree_skb_any(skb);
af19b491
AKS
1311 return -ENOMEM;
1312 }
1313
1314 buffer->skb = skb;
1315 buffer->dma = dma;
af19b491
AKS
1316
1317 return 0;
1318}
1319
1320static struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *adapter,
1321 struct qlcnic_host_rds_ring *rds_ring, u16 index, u16 cksum)
1322{
1323 struct qlcnic_rx_buffer *buffer;
1324 struct sk_buff *skb;
1325
1326 buffer = &rds_ring->rx_buf_arr[index];
1327
96659828
AKS
1328 if (unlikely(buffer->skb == NULL)) {
1329 WARN_ON(1);
1330 return NULL;
1331 }
1332
af19b491
AKS
1333 pci_unmap_single(adapter->pdev, buffer->dma, rds_ring->dma_size,
1334 PCI_DMA_FROMDEVICE);
1335
1336 skb = buffer->skb;
af19b491 1337
d807b3f7
AKS
1338 if (likely(adapter->rx_csum && (cksum == STATUS_CKSUM_OK ||
1339 cksum == STATUS_CKSUM_LOOP))) {
af19b491
AKS
1340 adapter->stats.csummed++;
1341 skb->ip_summed = CHECKSUM_UNNECESSARY;
1342 } else {
bc8acf2c 1343 skb_checksum_none_assert(skb);
af19b491
AKS
1344 }
1345
1346 skb->dev = adapter->netdev;
1347
1348 buffer->skb = NULL;
96659828 1349
af19b491
AKS
1350 return skb;
1351}
1352
8cf61f89 1353static int
d5790663
AKS
1354qlcnic_check_rx_tagging(struct qlcnic_adapter *adapter, struct sk_buff *skb,
1355 u16 *vlan_tag)
8cf61f89 1356{
8cf61f89
AKS
1357 struct ethhdr *eth_hdr;
1358
d5790663
AKS
1359 if (!__vlan_get_tag(skb, vlan_tag)) {
1360 eth_hdr = (struct ethhdr *) skb->data;
1361 memmove(skb->data + VLAN_HLEN, eth_hdr, ETH_ALEN * 2);
1362 skb_pull(skb, VLAN_HLEN);
1363 }
1364 if (!adapter->pvid)
1365 return 0;
1366
1367 if (*vlan_tag == adapter->pvid) {
1368 /* Outer vlan tag. Packet should follow non-vlan path */
1369 *vlan_tag = 0xffff;
1370 return 0;
8cf61f89
AKS
1371 }
1372 if (adapter->flags & QLCNIC_TAGGING_ENABLED)
1373 return 0;
1374
d5790663 1375 return -EINVAL;
8cf61f89
AKS
1376}
1377
af19b491
AKS
1378static struct qlcnic_rx_buffer *
1379qlcnic_process_rcv(struct qlcnic_adapter *adapter,
1380 struct qlcnic_host_sds_ring *sds_ring,
1381 int ring, u64 sts_data0)
1382{
1383 struct net_device *netdev = adapter->netdev;
1384 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
1385 struct qlcnic_rx_buffer *buffer;
1386 struct sk_buff *skb;
1387 struct qlcnic_host_rds_ring *rds_ring;
1388 int index, length, cksum, pkt_offset;
d5790663 1389 u16 vid = 0xffff;
af19b491
AKS
1390
1391 if (unlikely(ring >= adapter->max_rds_rings))
1392 return NULL;
1393
1394 rds_ring = &recv_ctx->rds_rings[ring];
1395
1396 index = qlcnic_get_sts_refhandle(sts_data0);
1397 if (unlikely(index >= rds_ring->num_desc))
1398 return NULL;
1399
1400 buffer = &rds_ring->rx_buf_arr[index];
1401
1402 length = qlcnic_get_sts_totallength(sts_data0);
1403 cksum = qlcnic_get_sts_status(sts_data0);
1404 pkt_offset = qlcnic_get_sts_pkt_offset(sts_data0);
1405
1406 skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum);
1407 if (!skb)
1408 return buffer;
1409
1410 if (length > rds_ring->skb_size)
1411 skb_put(skb, rds_ring->skb_size);
1412 else
1413 skb_put(skb, length);
1414
1415 if (pkt_offset)
1416 skb_pull(skb, pkt_offset);
1417
d5790663
AKS
1418 if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) {
1419 adapter->stats.rxdropped++;
1420 dev_kfree_skb(skb);
1421 return buffer;
8cf61f89
AKS
1422 }
1423
af19b491
AKS
1424 skb->protocol = eth_type_trans(skb, netdev);
1425
d5790663 1426 if ((vid != 0xffff) && adapter->vlgrp)
5718d3b4 1427 vlan_gro_receive(&sds_ring->napi, adapter->vlgrp, vid, skb);
d5790663
AKS
1428 else
1429 napi_gro_receive(&sds_ring->napi, skb);
af19b491
AKS
1430
1431 adapter->stats.rx_pkts++;
1432 adapter->stats.rxbytes += length;
1433
1434 return buffer;
1435}
1436
1437#define QLC_TCP_HDR_SIZE 20
1438#define QLC_TCP_TS_OPTION_SIZE 12
1439#define QLC_TCP_TS_HDR_SIZE (QLC_TCP_HDR_SIZE + QLC_TCP_TS_OPTION_SIZE)
1440
1441static struct qlcnic_rx_buffer *
1442qlcnic_process_lro(struct qlcnic_adapter *adapter,
1443 struct qlcnic_host_sds_ring *sds_ring,
1444 int ring, u64 sts_data0, u64 sts_data1)
1445{
1446 struct net_device *netdev = adapter->netdev;
1447 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
1448 struct qlcnic_rx_buffer *buffer;
1449 struct sk_buff *skb;
1450 struct qlcnic_host_rds_ring *rds_ring;
1451 struct iphdr *iph;
1452 struct tcphdr *th;
1453 bool push, timestamp;
1454 int l2_hdr_offset, l4_hdr_offset;
1455 int index;
1456 u16 lro_length, length, data_offset;
1457 u32 seq_number;
d5790663 1458 u16 vid = 0xffff;
af19b491
AKS
1459
1460 if (unlikely(ring > adapter->max_rds_rings))
1461 return NULL;
1462
1463 rds_ring = &recv_ctx->rds_rings[ring];
1464
1465 index = qlcnic_get_lro_sts_refhandle(sts_data0);
1466 if (unlikely(index > rds_ring->num_desc))
1467 return NULL;
1468
1469 buffer = &rds_ring->rx_buf_arr[index];
1470
1471 timestamp = qlcnic_get_lro_sts_timestamp(sts_data0);
1472 lro_length = qlcnic_get_lro_sts_length(sts_data0);
1473 l2_hdr_offset = qlcnic_get_lro_sts_l2_hdr_offset(sts_data0);
1474 l4_hdr_offset = qlcnic_get_lro_sts_l4_hdr_offset(sts_data0);
1475 push = qlcnic_get_lro_sts_push_flag(sts_data0);
1476 seq_number = qlcnic_get_lro_sts_seq_number(sts_data1);
1477
1478 skb = qlcnic_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK);
1479 if (!skb)
1480 return buffer;
1481
1482 if (timestamp)
1483 data_offset = l4_hdr_offset + QLC_TCP_TS_HDR_SIZE;
1484 else
1485 data_offset = l4_hdr_offset + QLC_TCP_HDR_SIZE;
1486
1487 skb_put(skb, lro_length + data_offset);
1488
af19b491 1489 skb_pull(skb, l2_hdr_offset);
8cf61f89 1490
d5790663
AKS
1491 if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) {
1492 adapter->stats.rxdropped++;
1493 dev_kfree_skb(skb);
1494 return buffer;
8cf61f89 1495 }
d5790663 1496
af19b491
AKS
1497 skb->protocol = eth_type_trans(skb, netdev);
1498
1499 iph = (struct iphdr *)skb->data;
1500 th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
1501
1502 length = (iph->ihl << 2) + (th->doff << 2) + lro_length;
1503 iph->tot_len = htons(length);
1504 iph->check = 0;
1505 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
1506 th->psh = push;
1507 th->seq = htonl(seq_number);
1508
1509 length = skb->len;
1510
d5790663
AKS
1511 if ((vid != 0xffff) && adapter->vlgrp)
1512 vlan_hwaccel_receive_skb(skb, adapter->vlgrp, vid);
1513 else
1514 netif_receive_skb(skb);
af19b491
AKS
1515
1516 adapter->stats.lro_pkts++;
8bfe8b91 1517 adapter->stats.lrobytes += length;
af19b491
AKS
1518
1519 return buffer;
1520}
1521
1522int
1523qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max)
1524{
1525 struct qlcnic_adapter *adapter = sds_ring->adapter;
1526 struct list_head *cur;
1527 struct status_desc *desc;
1528 struct qlcnic_rx_buffer *rxbuf;
1529 u64 sts_data0, sts_data1;
1530
1531 int count = 0;
1532 int opcode, ring, desc_cnt;
1533 u32 consumer = sds_ring->consumer;
1534
1535 while (count < max) {
1536 desc = &sds_ring->desc_head[consumer];
1537 sts_data0 = le64_to_cpu(desc->status_desc_data[0]);
1538
1539 if (!(sts_data0 & STATUS_OWNER_HOST))
1540 break;
1541
1542 desc_cnt = qlcnic_get_sts_desc_cnt(sts_data0);
1543 opcode = qlcnic_get_sts_opcode(sts_data0);
1544
1545 switch (opcode) {
1546 case QLCNIC_RXPKT_DESC:
1547 case QLCNIC_OLD_RXPKT_DESC:
1548 case QLCNIC_SYN_OFFLOAD:
1549 ring = qlcnic_get_sts_type(sts_data0);
1550 rxbuf = qlcnic_process_rcv(adapter, sds_ring,
1551 ring, sts_data0);
1552 break;
1553 case QLCNIC_LRO_DESC:
1554 ring = qlcnic_get_lro_sts_type(sts_data0);
1555 sts_data1 = le64_to_cpu(desc->status_desc_data[1]);
1556 rxbuf = qlcnic_process_lro(adapter, sds_ring,
1557 ring, sts_data0, sts_data1);
1558 break;
1559 case QLCNIC_RESPONSE_DESC:
1560 qlcnic_handle_fw_message(desc_cnt, consumer, sds_ring);
1561 default:
1562 goto skip;
1563 }
1564
1565 WARN_ON(desc_cnt > 1);
1566
96659828 1567 if (likely(rxbuf))
af19b491 1568 list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]);
8ae6df97
AKS
1569 else
1570 adapter->stats.null_rxbuf++;
af19b491
AKS
1571
1572skip:
1573 for (; desc_cnt > 0; desc_cnt--) {
1574 desc = &sds_ring->desc_head[consumer];
1575 desc->status_desc_data[0] =
1576 cpu_to_le64(STATUS_OWNER_PHANTOM);
1577 consumer = get_next_index(consumer, sds_ring->num_desc);
1578 }
1579 count++;
1580 }
1581
1582 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
1583 struct qlcnic_host_rds_ring *rds_ring =
1584 &adapter->recv_ctx.rds_rings[ring];
1585
1586 if (!list_empty(&sds_ring->free_list[ring])) {
1587 list_for_each(cur, &sds_ring->free_list[ring]) {
1588 rxbuf = list_entry(cur,
1589 struct qlcnic_rx_buffer, list);
1590 qlcnic_alloc_rx_skb(adapter, rds_ring, rxbuf);
1591 }
1592 spin_lock(&rds_ring->lock);
1593 list_splice_tail_init(&sds_ring->free_list[ring],
1594 &rds_ring->free_list);
1595 spin_unlock(&rds_ring->lock);
1596 }
1597
1598 qlcnic_post_rx_buffers_nodb(adapter, rds_ring);
1599 }
1600
1601 if (count) {
1602 sds_ring->consumer = consumer;
1603 writel(consumer, sds_ring->crb_sts_consumer);
1604 }
1605
1606 return count;
1607}
1608
1609void
1610qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter, u32 ringid,
1611 struct qlcnic_host_rds_ring *rds_ring)
1612{
1613 struct rcv_desc *pdesc;
1614 struct qlcnic_rx_buffer *buffer;
1615 int producer, count = 0;
1616 struct list_head *head;
1617
1618 producer = rds_ring->producer;
1619
af19b491
AKS
1620 head = &rds_ring->free_list;
1621 while (!list_empty(head)) {
1622
1623 buffer = list_entry(head->next, struct qlcnic_rx_buffer, list);
1624
1625 if (!buffer->skb) {
1626 if (qlcnic_alloc_rx_skb(adapter, rds_ring, buffer))
1627 break;
1628 }
1629
1630 count++;
1631 list_del(&buffer->list);
1632
1633 /* make a rcv descriptor */
1634 pdesc = &rds_ring->desc_head[producer];
1635 pdesc->addr_buffer = cpu_to_le64(buffer->dma);
1636 pdesc->reference_handle = cpu_to_le16(buffer->ref_handle);
1637 pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
1638
1639 producer = get_next_index(producer, rds_ring->num_desc);
1640 }
af19b491
AKS
1641
1642 if (count) {
1643 rds_ring->producer = producer;
1644 writel((producer-1) & (rds_ring->num_desc-1),
1645 rds_ring->crb_rcv_producer);
1646 }
1647}
1648
1649static void
1650qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter,
1651 struct qlcnic_host_rds_ring *rds_ring)
1652{
1653 struct rcv_desc *pdesc;
1654 struct qlcnic_rx_buffer *buffer;
1655 int producer, count = 0;
1656 struct list_head *head;
1657
af19b491
AKS
1658 if (!spin_trylock(&rds_ring->lock))
1659 return;
1660
ed6f1353
AKS
1661 producer = rds_ring->producer;
1662
af19b491
AKS
1663 head = &rds_ring->free_list;
1664 while (!list_empty(head)) {
1665
1666 buffer = list_entry(head->next, struct qlcnic_rx_buffer, list);
1667
1668 if (!buffer->skb) {
1669 if (qlcnic_alloc_rx_skb(adapter, rds_ring, buffer))
1670 break;
1671 }
1672
1673 count++;
1674 list_del(&buffer->list);
1675
1676 /* make a rcv descriptor */
1677 pdesc = &rds_ring->desc_head[producer];
1678 pdesc->reference_handle = cpu_to_le16(buffer->ref_handle);
1679 pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
1680 pdesc->addr_buffer = cpu_to_le64(buffer->dma);
1681
1682 producer = get_next_index(producer, rds_ring->num_desc);
1683 }
1684
1685 if (count) {
1686 rds_ring->producer = producer;
1687 writel((producer - 1) & (rds_ring->num_desc - 1),
1688 rds_ring->crb_rcv_producer);
1689 }
1690 spin_unlock(&rds_ring->lock);
1691}
1692
31dee692
AKS
1693static void dump_skb(struct sk_buff *skb)
1694{
1695 int i;
1696 unsigned char *data = skb->data;
1697
1698 for (i = 0; i < skb->len; i++) {
1699 printk("%02x ", data[i]);
1700 if ((i & 0x0f) == 8)
1701 printk("\n");
1702 }
1703}
1704
cdaff185
AKS
1705static struct qlcnic_rx_buffer *
1706qlcnic_process_rcv_diag(struct qlcnic_adapter *adapter,
1707 struct qlcnic_host_sds_ring *sds_ring,
1708 int ring, u64 sts_data0)
1709{
1710 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
1711 struct qlcnic_rx_buffer *buffer;
1712 struct sk_buff *skb;
1713 struct qlcnic_host_rds_ring *rds_ring;
1714 int index, length, cksum, pkt_offset;
1715
1716 if (unlikely(ring >= adapter->max_rds_rings))
1717 return NULL;
1718
1719 rds_ring = &recv_ctx->rds_rings[ring];
1720
1721 index = qlcnic_get_sts_refhandle(sts_data0);
1722 if (unlikely(index >= rds_ring->num_desc))
1723 return NULL;
1724
1725 buffer = &rds_ring->rx_buf_arr[index];
1726
1727 length = qlcnic_get_sts_totallength(sts_data0);
1728 cksum = qlcnic_get_sts_status(sts_data0);
1729 pkt_offset = qlcnic_get_sts_pkt_offset(sts_data0);
1730
1731 skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum);
1732 if (!skb)
1733 return buffer;
1734
31dee692
AKS
1735 if (length > rds_ring->skb_size)
1736 skb_put(skb, rds_ring->skb_size);
1737 else
1738 skb_put(skb, length);
cdaff185
AKS
1739
1740 if (pkt_offset)
1741 skb_pull(skb, pkt_offset);
1742
cdaff185
AKS
1743 if (!qlcnic_check_loopback_buff(skb->data))
1744 adapter->diag_cnt++;
31dee692
AKS
1745 else
1746 dump_skb(skb);
cdaff185
AKS
1747
1748 dev_kfree_skb_any(skb);
8bfe8b91
SC
1749 adapter->stats.rx_pkts++;
1750 adapter->stats.rxbytes += length;
cdaff185
AKS
1751
1752 return buffer;
1753}
1754
1755void
1756qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring)
1757{
1758 struct qlcnic_adapter *adapter = sds_ring->adapter;
1759 struct status_desc *desc;
1760 struct qlcnic_rx_buffer *rxbuf;
1761 u64 sts_data0;
1762
1763 int opcode, ring, desc_cnt;
1764 u32 consumer = sds_ring->consumer;
1765
1766 desc = &sds_ring->desc_head[consumer];
1767 sts_data0 = le64_to_cpu(desc->status_desc_data[0]);
1768
1769 if (!(sts_data0 & STATUS_OWNER_HOST))
1770 return;
1771
1772 desc_cnt = qlcnic_get_sts_desc_cnt(sts_data0);
1773 opcode = qlcnic_get_sts_opcode(sts_data0);
1774
1775 ring = qlcnic_get_sts_type(sts_data0);
1776 rxbuf = qlcnic_process_rcv_diag(adapter, sds_ring,
1777 ring, sts_data0);
1778
1779 desc->status_desc_data[0] = cpu_to_le64(STATUS_OWNER_PHANTOM);
1780 consumer = get_next_index(consumer, sds_ring->num_desc);
1781
1782 sds_ring->consumer = consumer;
1783 writel(consumer, sds_ring->crb_sts_consumer);
1784}
2e9d722d
AC
1785
1786void
1787qlcnic_fetch_mac(struct qlcnic_adapter *adapter, u32 off1, u32 off2,
1788 u8 alt_mac, u8 *mac)
1789{
1790 u32 mac_low, mac_high;
1791 int i;
1792
1793 mac_low = QLCRD32(adapter, off1);
1794 mac_high = QLCRD32(adapter, off2);
1795
1796 if (alt_mac) {
1797 mac_low |= (mac_low >> 16) | (mac_high << 16);
1798 mac_high >>= 16;
1799 }
1800
1801 for (i = 0; i < 2; i++)
1802 mac[i] = (u8)(mac_high >> ((1 - i) * 8));
1803 for (i = 2; i < 6; i++)
1804 mac[i] = (u8)(mac_low >> ((5 - i) * 8));
1805}