xfs: remote attribute lookups require the value length
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / net / ethernet / emulex / benet / be_cmds.c
1 /*
2 * Copyright (C) 2005 - 2013 Emulex
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@emulex.com
12 *
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
16 */
17
18 #include <linux/module.h>
19 #include "be.h"
20 #include "be_cmds.h"
21
22 static struct be_cmd_priv_map cmd_priv_map[] = {
23 {
24 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
25 CMD_SUBSYSTEM_ETH,
26 BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
27 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
28 },
29 {
30 OPCODE_COMMON_GET_FLOW_CONTROL,
31 CMD_SUBSYSTEM_COMMON,
32 BE_PRIV_LNKQUERY | BE_PRIV_VHADM |
33 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
34 },
35 {
36 OPCODE_COMMON_SET_FLOW_CONTROL,
37 CMD_SUBSYSTEM_COMMON,
38 BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
39 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
40 },
41 {
42 OPCODE_ETH_GET_PPORT_STATS,
43 CMD_SUBSYSTEM_ETH,
44 BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
45 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
46 },
47 {
48 OPCODE_COMMON_GET_PHY_DETAILS,
49 CMD_SUBSYSTEM_COMMON,
50 BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
51 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
52 }
53 };
54
55 static bool be_cmd_allowed(struct be_adapter *adapter, u8 opcode,
56 u8 subsystem)
57 {
58 int i;
59 int num_entries = sizeof(cmd_priv_map)/sizeof(struct be_cmd_priv_map);
60 u32 cmd_privileges = adapter->cmd_privileges;
61
62 for (i = 0; i < num_entries; i++)
63 if (opcode == cmd_priv_map[i].opcode &&
64 subsystem == cmd_priv_map[i].subsystem)
65 if (!(cmd_privileges & cmd_priv_map[i].priv_mask))
66 return false;
67
68 return true;
69 }
70
71 static inline void *embedded_payload(struct be_mcc_wrb *wrb)
72 {
73 return wrb->payload.embedded_payload;
74 }
75
76 static void be_mcc_notify(struct be_adapter *adapter)
77 {
78 struct be_queue_info *mccq = &adapter->mcc_obj.q;
79 u32 val = 0;
80
81 if (be_error(adapter))
82 return;
83
84 val |= mccq->id & DB_MCCQ_RING_ID_MASK;
85 val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
86
87 wmb();
88 iowrite32(val, adapter->db + DB_MCCQ_OFFSET);
89 }
90
91 /* To check if valid bit is set, check the entire word as we don't know
92 * the endianness of the data (old entry is host endian while a new entry is
93 * little endian) */
94 static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
95 {
96 u32 flags;
97
98 if (compl->flags != 0) {
99 flags = le32_to_cpu(compl->flags);
100 if (flags & CQE_FLAGS_VALID_MASK) {
101 compl->flags = flags;
102 return true;
103 }
104 }
105 return false;
106 }
107
108 /* Need to reset the entire word that houses the valid bit */
109 static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
110 {
111 compl->flags = 0;
112 }
113
114 static struct be_cmd_resp_hdr *be_decode_resp_hdr(u32 tag0, u32 tag1)
115 {
116 unsigned long addr;
117
118 addr = tag1;
119 addr = ((addr << 16) << 16) | tag0;
120 return (void *)addr;
121 }
122
123 static int be_mcc_compl_process(struct be_adapter *adapter,
124 struct be_mcc_compl *compl)
125 {
126 u16 compl_status, extd_status;
127 struct be_cmd_resp_hdr *resp_hdr;
128 u8 opcode = 0, subsystem = 0;
129
130 /* Just swap the status to host endian; mcc tag is opaquely copied
131 * from mcc_wrb */
132 be_dws_le_to_cpu(compl, 4);
133
134 compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
135 CQE_STATUS_COMPL_MASK;
136
137 resp_hdr = be_decode_resp_hdr(compl->tag0, compl->tag1);
138
139 if (resp_hdr) {
140 opcode = resp_hdr->opcode;
141 subsystem = resp_hdr->subsystem;
142 }
143
144 if (((opcode == OPCODE_COMMON_WRITE_FLASHROM) ||
145 (opcode == OPCODE_COMMON_WRITE_OBJECT)) &&
146 (subsystem == CMD_SUBSYSTEM_COMMON)) {
147 adapter->flash_status = compl_status;
148 complete(&adapter->flash_compl);
149 }
150
151 if (compl_status == MCC_STATUS_SUCCESS) {
152 if (((opcode == OPCODE_ETH_GET_STATISTICS) ||
153 (opcode == OPCODE_ETH_GET_PPORT_STATS)) &&
154 (subsystem == CMD_SUBSYSTEM_ETH)) {
155 be_parse_stats(adapter);
156 adapter->stats_cmd_sent = false;
157 }
158 if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES &&
159 subsystem == CMD_SUBSYSTEM_COMMON) {
160 struct be_cmd_resp_get_cntl_addnl_attribs *resp =
161 (void *)resp_hdr;
162 adapter->drv_stats.be_on_die_temperature =
163 resp->on_die_temperature;
164 }
165 } else {
166 if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES)
167 adapter->be_get_temp_freq = 0;
168
169 if (compl_status == MCC_STATUS_NOT_SUPPORTED ||
170 compl_status == MCC_STATUS_ILLEGAL_REQUEST)
171 goto done;
172
173 if (compl_status == MCC_STATUS_UNAUTHORIZED_REQUEST) {
174 dev_warn(&adapter->pdev->dev,
175 "VF is not privileged to issue opcode %d-%d\n",
176 opcode, subsystem);
177 } else {
178 extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
179 CQE_STATUS_EXTD_MASK;
180 dev_err(&adapter->pdev->dev,
181 "opcode %d-%d failed:status %d-%d\n",
182 opcode, subsystem, compl_status, extd_status);
183 }
184 }
185 done:
186 return compl_status;
187 }
188
189 /* Link state evt is a string of bytes; no need for endian swapping */
190 static void be_async_link_state_process(struct be_adapter *adapter,
191 struct be_async_event_link_state *evt)
192 {
193 /* When link status changes, link speed must be re-queried from FW */
194 adapter->phy.link_speed = -1;
195
196 /* Ignore physical link event */
197 if (lancer_chip(adapter) &&
198 !(evt->port_link_status & LOGICAL_LINK_STATUS_MASK))
199 return;
200
201 /* For the initial link status do not rely on the ASYNC event as
202 * it may not be received in some cases.
203 */
204 if (adapter->flags & BE_FLAGS_LINK_STATUS_INIT)
205 be_link_status_update(adapter, evt->port_link_status);
206 }
207
208 /* Grp5 CoS Priority evt */
209 static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
210 struct be_async_event_grp5_cos_priority *evt)
211 {
212 if (evt->valid) {
213 adapter->vlan_prio_bmap = evt->available_priority_bmap;
214 adapter->recommended_prio &= ~VLAN_PRIO_MASK;
215 adapter->recommended_prio =
216 evt->reco_default_priority << VLAN_PRIO_SHIFT;
217 }
218 }
219
220 /* Grp5 QOS Speed evt: qos_link_speed is in units of 10 Mbps */
221 static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
222 struct be_async_event_grp5_qos_link_speed *evt)
223 {
224 if (adapter->phy.link_speed >= 0 &&
225 evt->physical_port == adapter->port_num)
226 adapter->phy.link_speed = le16_to_cpu(evt->qos_link_speed) * 10;
227 }
228
229 /*Grp5 PVID evt*/
230 static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
231 struct be_async_event_grp5_pvid_state *evt)
232 {
233 if (evt->enabled)
234 adapter->pvid = le16_to_cpu(evt->tag) & VLAN_VID_MASK;
235 else
236 adapter->pvid = 0;
237 }
238
239 static void be_async_grp5_evt_process(struct be_adapter *adapter,
240 u32 trailer, struct be_mcc_compl *evt)
241 {
242 u8 event_type = 0;
243
244 event_type = (trailer >> ASYNC_TRAILER_EVENT_TYPE_SHIFT) &
245 ASYNC_TRAILER_EVENT_TYPE_MASK;
246
247 switch (event_type) {
248 case ASYNC_EVENT_COS_PRIORITY:
249 be_async_grp5_cos_priority_process(adapter,
250 (struct be_async_event_grp5_cos_priority *)evt);
251 break;
252 case ASYNC_EVENT_QOS_SPEED:
253 be_async_grp5_qos_speed_process(adapter,
254 (struct be_async_event_grp5_qos_link_speed *)evt);
255 break;
256 case ASYNC_EVENT_PVID_STATE:
257 be_async_grp5_pvid_state_process(adapter,
258 (struct be_async_event_grp5_pvid_state *)evt);
259 break;
260 default:
261 dev_warn(&adapter->pdev->dev, "Unknown grp5 event!\n");
262 break;
263 }
264 }
265
266 static void be_async_dbg_evt_process(struct be_adapter *adapter,
267 u32 trailer, struct be_mcc_compl *cmp)
268 {
269 u8 event_type = 0;
270 struct be_async_event_qnq *evt = (struct be_async_event_qnq *) cmp;
271
272 event_type = (trailer >> ASYNC_TRAILER_EVENT_TYPE_SHIFT) &
273 ASYNC_TRAILER_EVENT_TYPE_MASK;
274
275 switch (event_type) {
276 case ASYNC_DEBUG_EVENT_TYPE_QNQ:
277 if (evt->valid)
278 adapter->qnq_vid = le16_to_cpu(evt->vlan_tag);
279 adapter->flags |= BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
280 break;
281 default:
282 dev_warn(&adapter->pdev->dev, "Unknown debug event\n");
283 break;
284 }
285 }
286
287 static inline bool is_link_state_evt(u32 trailer)
288 {
289 return ((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
290 ASYNC_TRAILER_EVENT_CODE_MASK) ==
291 ASYNC_EVENT_CODE_LINK_STATE;
292 }
293
294 static inline bool is_grp5_evt(u32 trailer)
295 {
296 return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
297 ASYNC_TRAILER_EVENT_CODE_MASK) ==
298 ASYNC_EVENT_CODE_GRP_5);
299 }
300
301 static inline bool is_dbg_evt(u32 trailer)
302 {
303 return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
304 ASYNC_TRAILER_EVENT_CODE_MASK) ==
305 ASYNC_EVENT_CODE_QNQ);
306 }
307
308 static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
309 {
310 struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq;
311 struct be_mcc_compl *compl = queue_tail_node(mcc_cq);
312
313 if (be_mcc_compl_is_new(compl)) {
314 queue_tail_inc(mcc_cq);
315 return compl;
316 }
317 return NULL;
318 }
319
320 void be_async_mcc_enable(struct be_adapter *adapter)
321 {
322 spin_lock_bh(&adapter->mcc_cq_lock);
323
324 be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, 0);
325 adapter->mcc_obj.rearm_cq = true;
326
327 spin_unlock_bh(&adapter->mcc_cq_lock);
328 }
329
330 void be_async_mcc_disable(struct be_adapter *adapter)
331 {
332 spin_lock_bh(&adapter->mcc_cq_lock);
333
334 adapter->mcc_obj.rearm_cq = false;
335 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
336
337 spin_unlock_bh(&adapter->mcc_cq_lock);
338 }
339
340 int be_process_mcc(struct be_adapter *adapter)
341 {
342 struct be_mcc_compl *compl;
343 int num = 0, status = 0;
344 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
345
346 spin_lock(&adapter->mcc_cq_lock);
347 while ((compl = be_mcc_compl_get(adapter))) {
348 if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
349 /* Interpret flags as an async trailer */
350 if (is_link_state_evt(compl->flags))
351 be_async_link_state_process(adapter,
352 (struct be_async_event_link_state *) compl);
353 else if (is_grp5_evt(compl->flags))
354 be_async_grp5_evt_process(adapter,
355 compl->flags, compl);
356 else if (is_dbg_evt(compl->flags))
357 be_async_dbg_evt_process(adapter,
358 compl->flags, compl);
359 } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
360 status = be_mcc_compl_process(adapter, compl);
361 atomic_dec(&mcc_obj->q.used);
362 }
363 be_mcc_compl_use(compl);
364 num++;
365 }
366
367 if (num)
368 be_cq_notify(adapter, mcc_obj->cq.id, mcc_obj->rearm_cq, num);
369
370 spin_unlock(&adapter->mcc_cq_lock);
371 return status;
372 }
373
374 /* Wait till no more pending mcc requests are present */
375 static int be_mcc_wait_compl(struct be_adapter *adapter)
376 {
377 #define mcc_timeout 120000 /* 12s timeout */
378 int i, status = 0;
379 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
380
381 for (i = 0; i < mcc_timeout; i++) {
382 if (be_error(adapter))
383 return -EIO;
384
385 local_bh_disable();
386 status = be_process_mcc(adapter);
387 local_bh_enable();
388
389 if (atomic_read(&mcc_obj->q.used) == 0)
390 break;
391 udelay(100);
392 }
393 if (i == mcc_timeout) {
394 dev_err(&adapter->pdev->dev, "FW not responding\n");
395 adapter->fw_timeout = true;
396 return -EIO;
397 }
398 return status;
399 }
400
401 /* Notify MCC requests and wait for completion */
402 static int be_mcc_notify_wait(struct be_adapter *adapter)
403 {
404 int status;
405 struct be_mcc_wrb *wrb;
406 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
407 u16 index = mcc_obj->q.head;
408 struct be_cmd_resp_hdr *resp;
409
410 index_dec(&index, mcc_obj->q.len);
411 wrb = queue_index_node(&mcc_obj->q, index);
412
413 resp = be_decode_resp_hdr(wrb->tag0, wrb->tag1);
414
415 be_mcc_notify(adapter);
416
417 status = be_mcc_wait_compl(adapter);
418 if (status == -EIO)
419 goto out;
420
421 status = resp->status;
422 out:
423 return status;
424 }
425
426 static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
427 {
428 int msecs = 0;
429 u32 ready;
430
431 do {
432 if (be_error(adapter))
433 return -EIO;
434
435 ready = ioread32(db);
436 if (ready == 0xffffffff)
437 return -1;
438
439 ready &= MPU_MAILBOX_DB_RDY_MASK;
440 if (ready)
441 break;
442
443 if (msecs > 4000) {
444 dev_err(&adapter->pdev->dev, "FW not responding\n");
445 adapter->fw_timeout = true;
446 be_detect_error(adapter);
447 return -1;
448 }
449
450 msleep(1);
451 msecs++;
452 } while (true);
453
454 return 0;
455 }
456
457 /*
458 * Insert the mailbox address into the doorbell in two steps
459 * Polls on the mbox doorbell till a command completion (or a timeout) occurs
460 */
461 static int be_mbox_notify_wait(struct be_adapter *adapter)
462 {
463 int status;
464 u32 val = 0;
465 void __iomem *db = adapter->db + MPU_MAILBOX_DB_OFFSET;
466 struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
467 struct be_mcc_mailbox *mbox = mbox_mem->va;
468 struct be_mcc_compl *compl = &mbox->compl;
469
470 /* wait for ready to be set */
471 status = be_mbox_db_ready_wait(adapter, db);
472 if (status != 0)
473 return status;
474
475 val |= MPU_MAILBOX_DB_HI_MASK;
476 /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
477 val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
478 iowrite32(val, db);
479
480 /* wait for ready to be set */
481 status = be_mbox_db_ready_wait(adapter, db);
482 if (status != 0)
483 return status;
484
485 val = 0;
486 /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
487 val |= (u32)(mbox_mem->dma >> 4) << 2;
488 iowrite32(val, db);
489
490 status = be_mbox_db_ready_wait(adapter, db);
491 if (status != 0)
492 return status;
493
494 /* A cq entry has been made now */
495 if (be_mcc_compl_is_new(compl)) {
496 status = be_mcc_compl_process(adapter, &mbox->compl);
497 be_mcc_compl_use(compl);
498 if (status)
499 return status;
500 } else {
501 dev_err(&adapter->pdev->dev, "invalid mailbox completion\n");
502 return -1;
503 }
504 return 0;
505 }
506
507 static u16 be_POST_stage_get(struct be_adapter *adapter)
508 {
509 u32 sem;
510
511 if (BEx_chip(adapter))
512 sem = ioread32(adapter->csr + SLIPORT_SEMAPHORE_OFFSET_BEx);
513 else
514 pci_read_config_dword(adapter->pdev,
515 SLIPORT_SEMAPHORE_OFFSET_SH, &sem);
516
517 return sem & POST_STAGE_MASK;
518 }
519
520 int lancer_wait_ready(struct be_adapter *adapter)
521 {
522 #define SLIPORT_READY_TIMEOUT 30
523 u32 sliport_status;
524 int status = 0, i;
525
526 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
527 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
528 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
529 break;
530
531 msleep(1000);
532 }
533
534 if (i == SLIPORT_READY_TIMEOUT)
535 status = -1;
536
537 return status;
538 }
539
540 static bool lancer_provisioning_error(struct be_adapter *adapter)
541 {
542 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
543 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
544 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
545 sliport_err1 = ioread32(adapter->db +
546 SLIPORT_ERROR1_OFFSET);
547 sliport_err2 = ioread32(adapter->db +
548 SLIPORT_ERROR2_OFFSET);
549
550 if (sliport_err1 == SLIPORT_ERROR_NO_RESOURCE1 &&
551 sliport_err2 == SLIPORT_ERROR_NO_RESOURCE2)
552 return true;
553 }
554 return false;
555 }
556
557 int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
558 {
559 int status;
560 u32 sliport_status, err, reset_needed;
561 bool resource_error;
562
563 resource_error = lancer_provisioning_error(adapter);
564 if (resource_error)
565 return -1;
566
567 status = lancer_wait_ready(adapter);
568 if (!status) {
569 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
570 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
571 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
572 if (err && reset_needed) {
573 iowrite32(SLI_PORT_CONTROL_IP_MASK,
574 adapter->db + SLIPORT_CONTROL_OFFSET);
575
576 /* check adapter has corrected the error */
577 status = lancer_wait_ready(adapter);
578 sliport_status = ioread32(adapter->db +
579 SLIPORT_STATUS_OFFSET);
580 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
581 SLIPORT_STATUS_RN_MASK);
582 if (status || sliport_status)
583 status = -1;
584 } else if (err || reset_needed) {
585 status = -1;
586 }
587 }
588 /* Stop error recovery if error is not recoverable.
589 * No resource error is temporary errors and will go away
590 * when PF provisions resources.
591 */
592 resource_error = lancer_provisioning_error(adapter);
593 if (status == -1 && !resource_error)
594 adapter->eeh_error = true;
595
596 return status;
597 }
598
599 int be_fw_wait_ready(struct be_adapter *adapter)
600 {
601 u16 stage;
602 int status, timeout = 0;
603 struct device *dev = &adapter->pdev->dev;
604
605 if (lancer_chip(adapter)) {
606 status = lancer_wait_ready(adapter);
607 return status;
608 }
609
610 do {
611 stage = be_POST_stage_get(adapter);
612 if (stage == POST_STAGE_ARMFW_RDY)
613 return 0;
614
615 dev_info(dev, "Waiting for POST, %ds elapsed\n",
616 timeout);
617 if (msleep_interruptible(2000)) {
618 dev_err(dev, "Waiting for POST aborted\n");
619 return -EINTR;
620 }
621 timeout += 2;
622 } while (timeout < 60);
623
624 dev_err(dev, "POST timeout; stage=0x%x\n", stage);
625 return -1;
626 }
627
628
629 static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
630 {
631 return &wrb->payload.sgl[0];
632 }
633
634
635 /* Don't touch the hdr after it's prepared */
636 /* mem will be NULL for embedded commands */
637 static void be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
638 u8 subsystem, u8 opcode, int cmd_len,
639 struct be_mcc_wrb *wrb, struct be_dma_mem *mem)
640 {
641 struct be_sge *sge;
642 unsigned long addr = (unsigned long)req_hdr;
643 u64 req_addr = addr;
644
645 req_hdr->opcode = opcode;
646 req_hdr->subsystem = subsystem;
647 req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
648 req_hdr->version = 0;
649
650 wrb->tag0 = req_addr & 0xFFFFFFFF;
651 wrb->tag1 = upper_32_bits(req_addr);
652
653 wrb->payload_length = cmd_len;
654 if (mem) {
655 wrb->embedded |= (1 & MCC_WRB_SGE_CNT_MASK) <<
656 MCC_WRB_SGE_CNT_SHIFT;
657 sge = nonembedded_sgl(wrb);
658 sge->pa_hi = cpu_to_le32(upper_32_bits(mem->dma));
659 sge->pa_lo = cpu_to_le32(mem->dma & 0xFFFFFFFF);
660 sge->len = cpu_to_le32(mem->size);
661 } else
662 wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
663 be_dws_cpu_to_le(wrb, 8);
664 }
665
666 static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
667 struct be_dma_mem *mem)
668 {
669 int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
670 u64 dma = (u64)mem->dma;
671
672 for (i = 0; i < buf_pages; i++) {
673 pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
674 pages[i].hi = cpu_to_le32(upper_32_bits(dma));
675 dma += PAGE_SIZE_4K;
676 }
677 }
678
679 /* Converts interrupt delay in microseconds to multiplier value */
680 static u32 eq_delay_to_mult(u32 usec_delay)
681 {
682 #define MAX_INTR_RATE 651042
683 const u32 round = 10;
684 u32 multiplier;
685
686 if (usec_delay == 0)
687 multiplier = 0;
688 else {
689 u32 interrupt_rate = 1000000 / usec_delay;
690 /* Max delay, corresponding to the lowest interrupt rate */
691 if (interrupt_rate == 0)
692 multiplier = 1023;
693 else {
694 multiplier = (MAX_INTR_RATE - interrupt_rate) * round;
695 multiplier /= interrupt_rate;
696 /* Round the multiplier to the closest value.*/
697 multiplier = (multiplier + round/2) / round;
698 multiplier = min(multiplier, (u32)1023);
699 }
700 }
701 return multiplier;
702 }
703
704 static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter)
705 {
706 struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
707 struct be_mcc_wrb *wrb
708 = &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
709 memset(wrb, 0, sizeof(*wrb));
710 return wrb;
711 }
712
713 static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
714 {
715 struct be_queue_info *mccq = &adapter->mcc_obj.q;
716 struct be_mcc_wrb *wrb;
717
718 if (!mccq->created)
719 return NULL;
720
721 if (atomic_read(&mccq->used) >= mccq->len)
722 return NULL;
723
724 wrb = queue_head_node(mccq);
725 queue_head_inc(mccq);
726 atomic_inc(&mccq->used);
727 memset(wrb, 0, sizeof(*wrb));
728 return wrb;
729 }
730
731 /* Tell fw we're about to start firing cmds by writing a
732 * special pattern across the wrb hdr; uses mbox
733 */
734 int be_cmd_fw_init(struct be_adapter *adapter)
735 {
736 u8 *wrb;
737 int status;
738
739 if (lancer_chip(adapter))
740 return 0;
741
742 if (mutex_lock_interruptible(&adapter->mbox_lock))
743 return -1;
744
745 wrb = (u8 *)wrb_from_mbox(adapter);
746 *wrb++ = 0xFF;
747 *wrb++ = 0x12;
748 *wrb++ = 0x34;
749 *wrb++ = 0xFF;
750 *wrb++ = 0xFF;
751 *wrb++ = 0x56;
752 *wrb++ = 0x78;
753 *wrb = 0xFF;
754
755 status = be_mbox_notify_wait(adapter);
756
757 mutex_unlock(&adapter->mbox_lock);
758 return status;
759 }
760
761 /* Tell fw we're done with firing cmds by writing a
762 * special pattern across the wrb hdr; uses mbox
763 */
764 int be_cmd_fw_clean(struct be_adapter *adapter)
765 {
766 u8 *wrb;
767 int status;
768
769 if (lancer_chip(adapter))
770 return 0;
771
772 if (mutex_lock_interruptible(&adapter->mbox_lock))
773 return -1;
774
775 wrb = (u8 *)wrb_from_mbox(adapter);
776 *wrb++ = 0xFF;
777 *wrb++ = 0xAA;
778 *wrb++ = 0xBB;
779 *wrb++ = 0xFF;
780 *wrb++ = 0xFF;
781 *wrb++ = 0xCC;
782 *wrb++ = 0xDD;
783 *wrb = 0xFF;
784
785 status = be_mbox_notify_wait(adapter);
786
787 mutex_unlock(&adapter->mbox_lock);
788 return status;
789 }
790
791 int be_cmd_eq_create(struct be_adapter *adapter,
792 struct be_queue_info *eq, int eq_delay)
793 {
794 struct be_mcc_wrb *wrb;
795 struct be_cmd_req_eq_create *req;
796 struct be_dma_mem *q_mem = &eq->dma_mem;
797 int status;
798
799 if (mutex_lock_interruptible(&adapter->mbox_lock))
800 return -1;
801
802 wrb = wrb_from_mbox(adapter);
803 req = embedded_payload(wrb);
804
805 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
806 OPCODE_COMMON_EQ_CREATE, sizeof(*req), wrb, NULL);
807
808 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
809
810 AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
811 /* 4byte eqe*/
812 AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
813 AMAP_SET_BITS(struct amap_eq_context, count, req->context,
814 __ilog2_u32(eq->len/256));
815 AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context,
816 eq_delay_to_mult(eq_delay));
817 be_dws_cpu_to_le(req->context, sizeof(req->context));
818
819 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
820
821 status = be_mbox_notify_wait(adapter);
822 if (!status) {
823 struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
824 eq->id = le16_to_cpu(resp->eq_id);
825 eq->created = true;
826 }
827
828 mutex_unlock(&adapter->mbox_lock);
829 return status;
830 }
831
832 /* Use MCC */
833 int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
834 bool permanent, u32 if_handle, u32 pmac_id)
835 {
836 struct be_mcc_wrb *wrb;
837 struct be_cmd_req_mac_query *req;
838 int status;
839
840 spin_lock_bh(&adapter->mcc_lock);
841
842 wrb = wrb_from_mccq(adapter);
843 if (!wrb) {
844 status = -EBUSY;
845 goto err;
846 }
847 req = embedded_payload(wrb);
848
849 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
850 OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req), wrb, NULL);
851 req->type = MAC_ADDRESS_TYPE_NETWORK;
852 if (permanent) {
853 req->permanent = 1;
854 } else {
855 req->if_id = cpu_to_le16((u16) if_handle);
856 req->pmac_id = cpu_to_le32(pmac_id);
857 req->permanent = 0;
858 }
859
860 status = be_mcc_notify_wait(adapter);
861 if (!status) {
862 struct be_cmd_resp_mac_query *resp = embedded_payload(wrb);
863 memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
864 }
865
866 err:
867 spin_unlock_bh(&adapter->mcc_lock);
868 return status;
869 }
870
871 /* Uses synchronous MCCQ */
872 int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
873 u32 if_id, u32 *pmac_id, u32 domain)
874 {
875 struct be_mcc_wrb *wrb;
876 struct be_cmd_req_pmac_add *req;
877 int status;
878
879 spin_lock_bh(&adapter->mcc_lock);
880
881 wrb = wrb_from_mccq(adapter);
882 if (!wrb) {
883 status = -EBUSY;
884 goto err;
885 }
886 req = embedded_payload(wrb);
887
888 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
889 OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req), wrb, NULL);
890
891 req->hdr.domain = domain;
892 req->if_id = cpu_to_le32(if_id);
893 memcpy(req->mac_address, mac_addr, ETH_ALEN);
894
895 status = be_mcc_notify_wait(adapter);
896 if (!status) {
897 struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb);
898 *pmac_id = le32_to_cpu(resp->pmac_id);
899 }
900
901 err:
902 spin_unlock_bh(&adapter->mcc_lock);
903
904 if (status == MCC_STATUS_UNAUTHORIZED_REQUEST)
905 status = -EPERM;
906
907 return status;
908 }
909
910 /* Uses synchronous MCCQ */
911 int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom)
912 {
913 struct be_mcc_wrb *wrb;
914 struct be_cmd_req_pmac_del *req;
915 int status;
916
917 if (pmac_id == -1)
918 return 0;
919
920 spin_lock_bh(&adapter->mcc_lock);
921
922 wrb = wrb_from_mccq(adapter);
923 if (!wrb) {
924 status = -EBUSY;
925 goto err;
926 }
927 req = embedded_payload(wrb);
928
929 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
930 OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req), wrb, NULL);
931
932 req->hdr.domain = dom;
933 req->if_id = cpu_to_le32(if_id);
934 req->pmac_id = cpu_to_le32(pmac_id);
935
936 status = be_mcc_notify_wait(adapter);
937
938 err:
939 spin_unlock_bh(&adapter->mcc_lock);
940 return status;
941 }
942
943 /* Uses Mbox */
944 int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq,
945 struct be_queue_info *eq, bool no_delay, int coalesce_wm)
946 {
947 struct be_mcc_wrb *wrb;
948 struct be_cmd_req_cq_create *req;
949 struct be_dma_mem *q_mem = &cq->dma_mem;
950 void *ctxt;
951 int status;
952
953 if (mutex_lock_interruptible(&adapter->mbox_lock))
954 return -1;
955
956 wrb = wrb_from_mbox(adapter);
957 req = embedded_payload(wrb);
958 ctxt = &req->context;
959
960 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
961 OPCODE_COMMON_CQ_CREATE, sizeof(*req), wrb, NULL);
962
963 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
964
965 if (BEx_chip(adapter)) {
966 AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt,
967 coalesce_wm);
968 AMAP_SET_BITS(struct amap_cq_context_be, nodelay,
969 ctxt, no_delay);
970 AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt,
971 __ilog2_u32(cq->len/256));
972 AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1);
973 AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1);
974 AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id);
975 } else {
976 req->hdr.version = 2;
977 req->page_size = 1; /* 1 for 4K */
978 AMAP_SET_BITS(struct amap_cq_context_v2, nodelay, ctxt,
979 no_delay);
980 AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt,
981 __ilog2_u32(cq->len/256));
982 AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1);
983 AMAP_SET_BITS(struct amap_cq_context_v2, eventable,
984 ctxt, 1);
985 AMAP_SET_BITS(struct amap_cq_context_v2, eqid,
986 ctxt, eq->id);
987 }
988
989 be_dws_cpu_to_le(ctxt, sizeof(req->context));
990
991 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
992
993 status = be_mbox_notify_wait(adapter);
994 if (!status) {
995 struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
996 cq->id = le16_to_cpu(resp->cq_id);
997 cq->created = true;
998 }
999
1000 mutex_unlock(&adapter->mbox_lock);
1001
1002 return status;
1003 }
1004
1005 static u32 be_encoded_q_len(int q_len)
1006 {
1007 u32 len_encoded = fls(q_len); /* log2(len) + 1 */
1008 if (len_encoded == 16)
1009 len_encoded = 0;
1010 return len_encoded;
1011 }
1012
1013 int be_cmd_mccq_ext_create(struct be_adapter *adapter,
1014 struct be_queue_info *mccq,
1015 struct be_queue_info *cq)
1016 {
1017 struct be_mcc_wrb *wrb;
1018 struct be_cmd_req_mcc_ext_create *req;
1019 struct be_dma_mem *q_mem = &mccq->dma_mem;
1020 void *ctxt;
1021 int status;
1022
1023 if (mutex_lock_interruptible(&adapter->mbox_lock))
1024 return -1;
1025
1026 wrb = wrb_from_mbox(adapter);
1027 req = embedded_payload(wrb);
1028 ctxt = &req->context;
1029
1030 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1031 OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req), wrb, NULL);
1032
1033 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
1034 if (lancer_chip(adapter)) {
1035 req->hdr.version = 1;
1036 req->cq_id = cpu_to_le16(cq->id);
1037
1038 AMAP_SET_BITS(struct amap_mcc_context_lancer, ring_size, ctxt,
1039 be_encoded_q_len(mccq->len));
1040 AMAP_SET_BITS(struct amap_mcc_context_lancer, valid, ctxt, 1);
1041 AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_id,
1042 ctxt, cq->id);
1043 AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_valid,
1044 ctxt, 1);
1045
1046 } else {
1047 AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
1048 AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
1049 be_encoded_q_len(mccq->len));
1050 AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
1051 }
1052
1053 /* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */
1054 req->async_event_bitmap[0] = cpu_to_le32(0x00000022);
1055 req->async_event_bitmap[0] |= cpu_to_le32(1 << ASYNC_EVENT_CODE_QNQ);
1056 be_dws_cpu_to_le(ctxt, sizeof(req->context));
1057
1058 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1059
1060 status = be_mbox_notify_wait(adapter);
1061 if (!status) {
1062 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
1063 mccq->id = le16_to_cpu(resp->id);
1064 mccq->created = true;
1065 }
1066 mutex_unlock(&adapter->mbox_lock);
1067
1068 return status;
1069 }
1070
1071 int be_cmd_mccq_org_create(struct be_adapter *adapter,
1072 struct be_queue_info *mccq,
1073 struct be_queue_info *cq)
1074 {
1075 struct be_mcc_wrb *wrb;
1076 struct be_cmd_req_mcc_create *req;
1077 struct be_dma_mem *q_mem = &mccq->dma_mem;
1078 void *ctxt;
1079 int status;
1080
1081 if (mutex_lock_interruptible(&adapter->mbox_lock))
1082 return -1;
1083
1084 wrb = wrb_from_mbox(adapter);
1085 req = embedded_payload(wrb);
1086 ctxt = &req->context;
1087
1088 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1089 OPCODE_COMMON_MCC_CREATE, sizeof(*req), wrb, NULL);
1090
1091 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
1092
1093 AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
1094 AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
1095 be_encoded_q_len(mccq->len));
1096 AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
1097
1098 be_dws_cpu_to_le(ctxt, sizeof(req->context));
1099
1100 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1101
1102 status = be_mbox_notify_wait(adapter);
1103 if (!status) {
1104 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
1105 mccq->id = le16_to_cpu(resp->id);
1106 mccq->created = true;
1107 }
1108
1109 mutex_unlock(&adapter->mbox_lock);
1110 return status;
1111 }
1112
1113 int be_cmd_mccq_create(struct be_adapter *adapter,
1114 struct be_queue_info *mccq,
1115 struct be_queue_info *cq)
1116 {
1117 int status;
1118
1119 status = be_cmd_mccq_ext_create(adapter, mccq, cq);
1120 if (status && !lancer_chip(adapter)) {
1121 dev_warn(&adapter->pdev->dev, "Upgrade to F/W ver 2.102.235.0 "
1122 "or newer to avoid conflicting priorities between NIC "
1123 "and FCoE traffic");
1124 status = be_cmd_mccq_org_create(adapter, mccq, cq);
1125 }
1126 return status;
1127 }
1128
1129 int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo)
1130 {
1131 struct be_mcc_wrb *wrb;
1132 struct be_cmd_req_eth_tx_create *req;
1133 struct be_queue_info *txq = &txo->q;
1134 struct be_queue_info *cq = &txo->cq;
1135 struct be_dma_mem *q_mem = &txq->dma_mem;
1136 int status, ver = 0;
1137
1138 spin_lock_bh(&adapter->mcc_lock);
1139
1140 wrb = wrb_from_mccq(adapter);
1141 if (!wrb) {
1142 status = -EBUSY;
1143 goto err;
1144 }
1145
1146 req = embedded_payload(wrb);
1147
1148 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1149 OPCODE_ETH_TX_CREATE, sizeof(*req), wrb, NULL);
1150
1151 if (lancer_chip(adapter)) {
1152 req->hdr.version = 1;
1153 req->if_id = cpu_to_le16(adapter->if_handle);
1154 } else if (BEx_chip(adapter)) {
1155 if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC)
1156 req->hdr.version = 2;
1157 } else { /* For SH */
1158 req->hdr.version = 2;
1159 }
1160
1161 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
1162 req->ulp_num = BE_ULP1_NUM;
1163 req->type = BE_ETH_TX_RING_TYPE_STANDARD;
1164 req->cq_id = cpu_to_le16(cq->id);
1165 req->queue_size = be_encoded_q_len(txq->len);
1166 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1167
1168 ver = req->hdr.version;
1169
1170 status = be_mcc_notify_wait(adapter);
1171 if (!status) {
1172 struct be_cmd_resp_eth_tx_create *resp = embedded_payload(wrb);
1173 txq->id = le16_to_cpu(resp->cid);
1174 if (ver == 2)
1175 txo->db_offset = le32_to_cpu(resp->db_offset);
1176 else
1177 txo->db_offset = DB_TXULP1_OFFSET;
1178 txq->created = true;
1179 }
1180
1181 err:
1182 spin_unlock_bh(&adapter->mcc_lock);
1183
1184 return status;
1185 }
1186
1187 /* Uses MCC */
1188 int be_cmd_rxq_create(struct be_adapter *adapter,
1189 struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
1190 u32 if_id, u32 rss, u8 *rss_id)
1191 {
1192 struct be_mcc_wrb *wrb;
1193 struct be_cmd_req_eth_rx_create *req;
1194 struct be_dma_mem *q_mem = &rxq->dma_mem;
1195 int status;
1196
1197 spin_lock_bh(&adapter->mcc_lock);
1198
1199 wrb = wrb_from_mccq(adapter);
1200 if (!wrb) {
1201 status = -EBUSY;
1202 goto err;
1203 }
1204 req = embedded_payload(wrb);
1205
1206 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1207 OPCODE_ETH_RX_CREATE, sizeof(*req), wrb, NULL);
1208
1209 req->cq_id = cpu_to_le16(cq_id);
1210 req->frag_size = fls(frag_size) - 1;
1211 req->num_pages = 2;
1212 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1213 req->interface_id = cpu_to_le32(if_id);
1214 req->max_frame_size = cpu_to_le16(BE_MAX_JUMBO_FRAME_SIZE);
1215 req->rss_queue = cpu_to_le32(rss);
1216
1217 status = be_mcc_notify_wait(adapter);
1218 if (!status) {
1219 struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb);
1220 rxq->id = le16_to_cpu(resp->id);
1221 rxq->created = true;
1222 *rss_id = resp->rss_id;
1223 }
1224
1225 err:
1226 spin_unlock_bh(&adapter->mcc_lock);
1227 return status;
1228 }
1229
1230 /* Generic destroyer function for all types of queues
1231 * Uses Mbox
1232 */
1233 int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
1234 int queue_type)
1235 {
1236 struct be_mcc_wrb *wrb;
1237 struct be_cmd_req_q_destroy *req;
1238 u8 subsys = 0, opcode = 0;
1239 int status;
1240
1241 if (mutex_lock_interruptible(&adapter->mbox_lock))
1242 return -1;
1243
1244 wrb = wrb_from_mbox(adapter);
1245 req = embedded_payload(wrb);
1246
1247 switch (queue_type) {
1248 case QTYPE_EQ:
1249 subsys = CMD_SUBSYSTEM_COMMON;
1250 opcode = OPCODE_COMMON_EQ_DESTROY;
1251 break;
1252 case QTYPE_CQ:
1253 subsys = CMD_SUBSYSTEM_COMMON;
1254 opcode = OPCODE_COMMON_CQ_DESTROY;
1255 break;
1256 case QTYPE_TXQ:
1257 subsys = CMD_SUBSYSTEM_ETH;
1258 opcode = OPCODE_ETH_TX_DESTROY;
1259 break;
1260 case QTYPE_RXQ:
1261 subsys = CMD_SUBSYSTEM_ETH;
1262 opcode = OPCODE_ETH_RX_DESTROY;
1263 break;
1264 case QTYPE_MCCQ:
1265 subsys = CMD_SUBSYSTEM_COMMON;
1266 opcode = OPCODE_COMMON_MCC_DESTROY;
1267 break;
1268 default:
1269 BUG();
1270 }
1271
1272 be_wrb_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req), wrb,
1273 NULL);
1274 req->id = cpu_to_le16(q->id);
1275
1276 status = be_mbox_notify_wait(adapter);
1277 q->created = false;
1278
1279 mutex_unlock(&adapter->mbox_lock);
1280 return status;
1281 }
1282
1283 /* Uses MCC */
1284 int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q)
1285 {
1286 struct be_mcc_wrb *wrb;
1287 struct be_cmd_req_q_destroy *req;
1288 int status;
1289
1290 spin_lock_bh(&adapter->mcc_lock);
1291
1292 wrb = wrb_from_mccq(adapter);
1293 if (!wrb) {
1294 status = -EBUSY;
1295 goto err;
1296 }
1297 req = embedded_payload(wrb);
1298
1299 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1300 OPCODE_ETH_RX_DESTROY, sizeof(*req), wrb, NULL);
1301 req->id = cpu_to_le16(q->id);
1302
1303 status = be_mcc_notify_wait(adapter);
1304 q->created = false;
1305
1306 err:
1307 spin_unlock_bh(&adapter->mcc_lock);
1308 return status;
1309 }
1310
1311 /* Create an rx filtering policy configuration on an i/f
1312 * Uses MCCQ
1313 */
1314 int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
1315 u32 *if_handle, u32 domain)
1316 {
1317 struct be_mcc_wrb *wrb;
1318 struct be_cmd_req_if_create *req;
1319 int status;
1320
1321 spin_lock_bh(&adapter->mcc_lock);
1322
1323 wrb = wrb_from_mccq(adapter);
1324 if (!wrb) {
1325 status = -EBUSY;
1326 goto err;
1327 }
1328 req = embedded_payload(wrb);
1329
1330 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1331 OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req), wrb, NULL);
1332 req->hdr.domain = domain;
1333 req->capability_flags = cpu_to_le32(cap_flags);
1334 req->enable_flags = cpu_to_le32(en_flags);
1335
1336 req->pmac_invalid = true;
1337
1338 status = be_mcc_notify_wait(adapter);
1339 if (!status) {
1340 struct be_cmd_resp_if_create *resp = embedded_payload(wrb);
1341 *if_handle = le32_to_cpu(resp->interface_id);
1342 }
1343
1344 err:
1345 spin_unlock_bh(&adapter->mcc_lock);
1346 return status;
1347 }
1348
1349 /* Uses MCCQ */
1350 int be_cmd_if_destroy(struct be_adapter *adapter, int interface_id, u32 domain)
1351 {
1352 struct be_mcc_wrb *wrb;
1353 struct be_cmd_req_if_destroy *req;
1354 int status;
1355
1356 if (interface_id == -1)
1357 return 0;
1358
1359 spin_lock_bh(&adapter->mcc_lock);
1360
1361 wrb = wrb_from_mccq(adapter);
1362 if (!wrb) {
1363 status = -EBUSY;
1364 goto err;
1365 }
1366 req = embedded_payload(wrb);
1367
1368 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1369 OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req), wrb, NULL);
1370 req->hdr.domain = domain;
1371 req->interface_id = cpu_to_le32(interface_id);
1372
1373 status = be_mcc_notify_wait(adapter);
1374 err:
1375 spin_unlock_bh(&adapter->mcc_lock);
1376 return status;
1377 }
1378
1379 /* Get stats is a non embedded command: the request is not embedded inside
1380 * WRB but is a separate dma memory block
1381 * Uses asynchronous MCC
1382 */
1383 int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
1384 {
1385 struct be_mcc_wrb *wrb;
1386 struct be_cmd_req_hdr *hdr;
1387 int status = 0;
1388
1389 spin_lock_bh(&adapter->mcc_lock);
1390
1391 wrb = wrb_from_mccq(adapter);
1392 if (!wrb) {
1393 status = -EBUSY;
1394 goto err;
1395 }
1396 hdr = nonemb_cmd->va;
1397
1398 be_wrb_cmd_hdr_prepare(hdr, CMD_SUBSYSTEM_ETH,
1399 OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size, wrb, nonemb_cmd);
1400
1401 /* version 1 of the cmd is not supported only by BE2 */
1402 if (!BE2_chip(adapter))
1403 hdr->version = 1;
1404
1405 be_mcc_notify(adapter);
1406 adapter->stats_cmd_sent = true;
1407
1408 err:
1409 spin_unlock_bh(&adapter->mcc_lock);
1410 return status;
1411 }
1412
1413 /* Lancer Stats */
1414 int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
1415 struct be_dma_mem *nonemb_cmd)
1416 {
1417
1418 struct be_mcc_wrb *wrb;
1419 struct lancer_cmd_req_pport_stats *req;
1420 int status = 0;
1421
1422 if (!be_cmd_allowed(adapter, OPCODE_ETH_GET_PPORT_STATS,
1423 CMD_SUBSYSTEM_ETH))
1424 return -EPERM;
1425
1426 spin_lock_bh(&adapter->mcc_lock);
1427
1428 wrb = wrb_from_mccq(adapter);
1429 if (!wrb) {
1430 status = -EBUSY;
1431 goto err;
1432 }
1433 req = nonemb_cmd->va;
1434
1435 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1436 OPCODE_ETH_GET_PPORT_STATS, nonemb_cmd->size, wrb,
1437 nonemb_cmd);
1438
1439 req->cmd_params.params.pport_num = cpu_to_le16(adapter->hba_port_num);
1440 req->cmd_params.params.reset_stats = 0;
1441
1442 be_mcc_notify(adapter);
1443 adapter->stats_cmd_sent = true;
1444
1445 err:
1446 spin_unlock_bh(&adapter->mcc_lock);
1447 return status;
1448 }
1449
1450 static int be_mac_to_link_speed(int mac_speed)
1451 {
1452 switch (mac_speed) {
1453 case PHY_LINK_SPEED_ZERO:
1454 return 0;
1455 case PHY_LINK_SPEED_10MBPS:
1456 return 10;
1457 case PHY_LINK_SPEED_100MBPS:
1458 return 100;
1459 case PHY_LINK_SPEED_1GBPS:
1460 return 1000;
1461 case PHY_LINK_SPEED_10GBPS:
1462 return 10000;
1463 }
1464 return 0;
1465 }
1466
1467 /* Uses synchronous mcc
1468 * Returns link_speed in Mbps
1469 */
1470 int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed,
1471 u8 *link_status, u32 dom)
1472 {
1473 struct be_mcc_wrb *wrb;
1474 struct be_cmd_req_link_status *req;
1475 int status;
1476
1477 spin_lock_bh(&adapter->mcc_lock);
1478
1479 if (link_status)
1480 *link_status = LINK_DOWN;
1481
1482 wrb = wrb_from_mccq(adapter);
1483 if (!wrb) {
1484 status = -EBUSY;
1485 goto err;
1486 }
1487 req = embedded_payload(wrb);
1488
1489 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1490 OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req), wrb, NULL);
1491
1492 /* version 1 of the cmd is not supported only by BE2 */
1493 if (!BE2_chip(adapter))
1494 req->hdr.version = 1;
1495
1496 req->hdr.domain = dom;
1497
1498 status = be_mcc_notify_wait(adapter);
1499 if (!status) {
1500 struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
1501 if (link_speed) {
1502 *link_speed = resp->link_speed ?
1503 le16_to_cpu(resp->link_speed) * 10 :
1504 be_mac_to_link_speed(resp->mac_speed);
1505
1506 if (!resp->logical_link_status)
1507 *link_speed = 0;
1508 }
1509 if (link_status)
1510 *link_status = resp->logical_link_status;
1511 }
1512
1513 err:
1514 spin_unlock_bh(&adapter->mcc_lock);
1515 return status;
1516 }
1517
1518 /* Uses synchronous mcc */
1519 int be_cmd_get_die_temperature(struct be_adapter *adapter)
1520 {
1521 struct be_mcc_wrb *wrb;
1522 struct be_cmd_req_get_cntl_addnl_attribs *req;
1523 int status;
1524
1525 spin_lock_bh(&adapter->mcc_lock);
1526
1527 wrb = wrb_from_mccq(adapter);
1528 if (!wrb) {
1529 status = -EBUSY;
1530 goto err;
1531 }
1532 req = embedded_payload(wrb);
1533
1534 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1535 OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES, sizeof(*req),
1536 wrb, NULL);
1537
1538 be_mcc_notify(adapter);
1539
1540 err:
1541 spin_unlock_bh(&adapter->mcc_lock);
1542 return status;
1543 }
1544
1545 /* Uses synchronous mcc */
1546 int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size)
1547 {
1548 struct be_mcc_wrb *wrb;
1549 struct be_cmd_req_get_fat *req;
1550 int status;
1551
1552 spin_lock_bh(&adapter->mcc_lock);
1553
1554 wrb = wrb_from_mccq(adapter);
1555 if (!wrb) {
1556 status = -EBUSY;
1557 goto err;
1558 }
1559 req = embedded_payload(wrb);
1560
1561 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1562 OPCODE_COMMON_MANAGE_FAT, sizeof(*req), wrb, NULL);
1563 req->fat_operation = cpu_to_le32(QUERY_FAT);
1564 status = be_mcc_notify_wait(adapter);
1565 if (!status) {
1566 struct be_cmd_resp_get_fat *resp = embedded_payload(wrb);
1567 if (log_size && resp->log_size)
1568 *log_size = le32_to_cpu(resp->log_size) -
1569 sizeof(u32);
1570 }
1571 err:
1572 spin_unlock_bh(&adapter->mcc_lock);
1573 return status;
1574 }
1575
1576 void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
1577 {
1578 struct be_dma_mem get_fat_cmd;
1579 struct be_mcc_wrb *wrb;
1580 struct be_cmd_req_get_fat *req;
1581 u32 offset = 0, total_size, buf_size,
1582 log_offset = sizeof(u32), payload_len;
1583 int status;
1584
1585 if (buf_len == 0)
1586 return;
1587
1588 total_size = buf_len;
1589
1590 get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024;
1591 get_fat_cmd.va = pci_alloc_consistent(adapter->pdev,
1592 get_fat_cmd.size,
1593 &get_fat_cmd.dma);
1594 if (!get_fat_cmd.va) {
1595 status = -ENOMEM;
1596 dev_err(&adapter->pdev->dev,
1597 "Memory allocation failure while retrieving FAT data\n");
1598 return;
1599 }
1600
1601 spin_lock_bh(&adapter->mcc_lock);
1602
1603 while (total_size) {
1604 buf_size = min(total_size, (u32)60*1024);
1605 total_size -= buf_size;
1606
1607 wrb = wrb_from_mccq(adapter);
1608 if (!wrb) {
1609 status = -EBUSY;
1610 goto err;
1611 }
1612 req = get_fat_cmd.va;
1613
1614 payload_len = sizeof(struct be_cmd_req_get_fat) + buf_size;
1615 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1616 OPCODE_COMMON_MANAGE_FAT, payload_len, wrb,
1617 &get_fat_cmd);
1618
1619 req->fat_operation = cpu_to_le32(RETRIEVE_FAT);
1620 req->read_log_offset = cpu_to_le32(log_offset);
1621 req->read_log_length = cpu_to_le32(buf_size);
1622 req->data_buffer_size = cpu_to_le32(buf_size);
1623
1624 status = be_mcc_notify_wait(adapter);
1625 if (!status) {
1626 struct be_cmd_resp_get_fat *resp = get_fat_cmd.va;
1627 memcpy(buf + offset,
1628 resp->data_buffer,
1629 le32_to_cpu(resp->read_log_length));
1630 } else {
1631 dev_err(&adapter->pdev->dev, "FAT Table Retrieve error\n");
1632 goto err;
1633 }
1634 offset += buf_size;
1635 log_offset += buf_size;
1636 }
1637 err:
1638 pci_free_consistent(adapter->pdev, get_fat_cmd.size,
1639 get_fat_cmd.va,
1640 get_fat_cmd.dma);
1641 spin_unlock_bh(&adapter->mcc_lock);
1642 }
1643
1644 /* Uses synchronous mcc */
1645 int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver,
1646 char *fw_on_flash)
1647 {
1648 struct be_mcc_wrb *wrb;
1649 struct be_cmd_req_get_fw_version *req;
1650 int status;
1651
1652 spin_lock_bh(&adapter->mcc_lock);
1653
1654 wrb = wrb_from_mccq(adapter);
1655 if (!wrb) {
1656 status = -EBUSY;
1657 goto err;
1658 }
1659
1660 req = embedded_payload(wrb);
1661
1662 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1663 OPCODE_COMMON_GET_FW_VERSION, sizeof(*req), wrb, NULL);
1664 status = be_mcc_notify_wait(adapter);
1665 if (!status) {
1666 struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
1667 strcpy(fw_ver, resp->firmware_version_string);
1668 if (fw_on_flash)
1669 strcpy(fw_on_flash, resp->fw_on_flash_version_string);
1670 }
1671 err:
1672 spin_unlock_bh(&adapter->mcc_lock);
1673 return status;
1674 }
1675
1676 /* set the EQ delay interval of an EQ to specified value
1677 * Uses async mcc
1678 */
1679 int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd)
1680 {
1681 struct be_mcc_wrb *wrb;
1682 struct be_cmd_req_modify_eq_delay *req;
1683 int status = 0;
1684
1685 spin_lock_bh(&adapter->mcc_lock);
1686
1687 wrb = wrb_from_mccq(adapter);
1688 if (!wrb) {
1689 status = -EBUSY;
1690 goto err;
1691 }
1692 req = embedded_payload(wrb);
1693
1694 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1695 OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req), wrb, NULL);
1696
1697 req->num_eq = cpu_to_le32(1);
1698 req->delay[0].eq_id = cpu_to_le32(eq_id);
1699 req->delay[0].phase = 0;
1700 req->delay[0].delay_multiplier = cpu_to_le32(eqd);
1701
1702 be_mcc_notify(adapter);
1703
1704 err:
1705 spin_unlock_bh(&adapter->mcc_lock);
1706 return status;
1707 }
1708
1709 /* Uses sycnhronous mcc */
1710 int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
1711 u32 num, bool untagged, bool promiscuous)
1712 {
1713 struct be_mcc_wrb *wrb;
1714 struct be_cmd_req_vlan_config *req;
1715 int status;
1716
1717 spin_lock_bh(&adapter->mcc_lock);
1718
1719 wrb = wrb_from_mccq(adapter);
1720 if (!wrb) {
1721 status = -EBUSY;
1722 goto err;
1723 }
1724 req = embedded_payload(wrb);
1725
1726 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1727 OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req), wrb, NULL);
1728
1729 req->interface_id = if_id;
1730 req->promiscuous = promiscuous;
1731 req->untagged = untagged;
1732 req->num_vlan = num;
1733 if (!promiscuous) {
1734 memcpy(req->normal_vlan, vtag_array,
1735 req->num_vlan * sizeof(vtag_array[0]));
1736 }
1737
1738 status = be_mcc_notify_wait(adapter);
1739
1740 err:
1741 spin_unlock_bh(&adapter->mcc_lock);
1742 return status;
1743 }
1744
1745 int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
1746 {
1747 struct be_mcc_wrb *wrb;
1748 struct be_dma_mem *mem = &adapter->rx_filter;
1749 struct be_cmd_req_rx_filter *req = mem->va;
1750 int status;
1751
1752 spin_lock_bh(&adapter->mcc_lock);
1753
1754 wrb = wrb_from_mccq(adapter);
1755 if (!wrb) {
1756 status = -EBUSY;
1757 goto err;
1758 }
1759 memset(req, 0, sizeof(*req));
1760 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1761 OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req),
1762 wrb, mem);
1763
1764 req->if_id = cpu_to_le32(adapter->if_handle);
1765 if (flags & IFF_PROMISC) {
1766 req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
1767 BE_IF_FLAGS_VLAN_PROMISCUOUS |
1768 BE_IF_FLAGS_MCAST_PROMISCUOUS);
1769 if (value == ON)
1770 req->if_flags = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
1771 BE_IF_FLAGS_VLAN_PROMISCUOUS |
1772 BE_IF_FLAGS_MCAST_PROMISCUOUS);
1773 } else if (flags & IFF_ALLMULTI) {
1774 req->if_flags_mask = req->if_flags =
1775 cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS);
1776 } else {
1777 struct netdev_hw_addr *ha;
1778 int i = 0;
1779
1780 req->if_flags_mask = req->if_flags =
1781 cpu_to_le32(BE_IF_FLAGS_MULTICAST);
1782
1783 /* Reset mcast promisc mode if already set by setting mask
1784 * and not setting flags field
1785 */
1786 req->if_flags_mask |=
1787 cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS &
1788 adapter->if_cap_flags);
1789
1790 req->mcast_num = cpu_to_le32(netdev_mc_count(adapter->netdev));
1791 netdev_for_each_mc_addr(ha, adapter->netdev)
1792 memcpy(req->mcast_mac[i++].byte, ha->addr, ETH_ALEN);
1793 }
1794
1795 status = be_mcc_notify_wait(adapter);
1796 err:
1797 spin_unlock_bh(&adapter->mcc_lock);
1798 return status;
1799 }
1800
1801 /* Uses synchrounous mcc */
1802 int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
1803 {
1804 struct be_mcc_wrb *wrb;
1805 struct be_cmd_req_set_flow_control *req;
1806 int status;
1807
1808 if (!be_cmd_allowed(adapter, OPCODE_COMMON_SET_FLOW_CONTROL,
1809 CMD_SUBSYSTEM_COMMON))
1810 return -EPERM;
1811
1812 spin_lock_bh(&adapter->mcc_lock);
1813
1814 wrb = wrb_from_mccq(adapter);
1815 if (!wrb) {
1816 status = -EBUSY;
1817 goto err;
1818 }
1819 req = embedded_payload(wrb);
1820
1821 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1822 OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req), wrb, NULL);
1823
1824 req->tx_flow_control = cpu_to_le16((u16)tx_fc);
1825 req->rx_flow_control = cpu_to_le16((u16)rx_fc);
1826
1827 status = be_mcc_notify_wait(adapter);
1828
1829 err:
1830 spin_unlock_bh(&adapter->mcc_lock);
1831 return status;
1832 }
1833
1834 /* Uses sycn mcc */
1835 int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
1836 {
1837 struct be_mcc_wrb *wrb;
1838 struct be_cmd_req_get_flow_control *req;
1839 int status;
1840
1841 if (!be_cmd_allowed(adapter, OPCODE_COMMON_GET_FLOW_CONTROL,
1842 CMD_SUBSYSTEM_COMMON))
1843 return -EPERM;
1844
1845 spin_lock_bh(&adapter->mcc_lock);
1846
1847 wrb = wrb_from_mccq(adapter);
1848 if (!wrb) {
1849 status = -EBUSY;
1850 goto err;
1851 }
1852 req = embedded_payload(wrb);
1853
1854 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1855 OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req), wrb, NULL);
1856
1857 status = be_mcc_notify_wait(adapter);
1858 if (!status) {
1859 struct be_cmd_resp_get_flow_control *resp =
1860 embedded_payload(wrb);
1861 *tx_fc = le16_to_cpu(resp->tx_flow_control);
1862 *rx_fc = le16_to_cpu(resp->rx_flow_control);
1863 }
1864
1865 err:
1866 spin_unlock_bh(&adapter->mcc_lock);
1867 return status;
1868 }
1869
1870 /* Uses mbox */
1871 int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
1872 u32 *mode, u32 *caps, u16 *asic_rev)
1873 {
1874 struct be_mcc_wrb *wrb;
1875 struct be_cmd_req_query_fw_cfg *req;
1876 int status;
1877
1878 if (mutex_lock_interruptible(&adapter->mbox_lock))
1879 return -1;
1880
1881 wrb = wrb_from_mbox(adapter);
1882 req = embedded_payload(wrb);
1883
1884 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1885 OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req), wrb, NULL);
1886
1887 status = be_mbox_notify_wait(adapter);
1888 if (!status) {
1889 struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
1890 *port_num = le32_to_cpu(resp->phys_port);
1891 *mode = le32_to_cpu(resp->function_mode);
1892 *caps = le32_to_cpu(resp->function_caps);
1893 *asic_rev = le32_to_cpu(resp->asic_revision) & 0xFF;
1894 }
1895
1896 mutex_unlock(&adapter->mbox_lock);
1897 return status;
1898 }
1899
1900 /* Uses mbox */
1901 int be_cmd_reset_function(struct be_adapter *adapter)
1902 {
1903 struct be_mcc_wrb *wrb;
1904 struct be_cmd_req_hdr *req;
1905 int status;
1906
1907 if (lancer_chip(adapter)) {
1908 status = lancer_wait_ready(adapter);
1909 if (!status) {
1910 iowrite32(SLI_PORT_CONTROL_IP_MASK,
1911 adapter->db + SLIPORT_CONTROL_OFFSET);
1912 status = lancer_test_and_set_rdy_state(adapter);
1913 }
1914 if (status) {
1915 dev_err(&adapter->pdev->dev,
1916 "Adapter in non recoverable error\n");
1917 }
1918 return status;
1919 }
1920
1921 if (mutex_lock_interruptible(&adapter->mbox_lock))
1922 return -1;
1923
1924 wrb = wrb_from_mbox(adapter);
1925 req = embedded_payload(wrb);
1926
1927 be_wrb_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON,
1928 OPCODE_COMMON_FUNCTION_RESET, sizeof(*req), wrb, NULL);
1929
1930 status = be_mbox_notify_wait(adapter);
1931
1932 mutex_unlock(&adapter->mbox_lock);
1933 return status;
1934 }
1935
1936 int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
1937 u32 rss_hash_opts, u16 table_size)
1938 {
1939 struct be_mcc_wrb *wrb;
1940 struct be_cmd_req_rss_config *req;
1941 u32 myhash[10] = {0x15d43fa5, 0x2534685a, 0x5f87693a, 0x5668494e,
1942 0x33cf6a53, 0x383334c6, 0x76ac4257, 0x59b242b2,
1943 0x3ea83c02, 0x4a110304};
1944 int status;
1945
1946 if (mutex_lock_interruptible(&adapter->mbox_lock))
1947 return -1;
1948
1949 wrb = wrb_from_mbox(adapter);
1950 req = embedded_payload(wrb);
1951
1952 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1953 OPCODE_ETH_RSS_CONFIG, sizeof(*req), wrb, NULL);
1954
1955 req->if_id = cpu_to_le32(adapter->if_handle);
1956 req->enable_rss = cpu_to_le16(rss_hash_opts);
1957 req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1);
1958
1959 if (lancer_chip(adapter) || skyhawk_chip(adapter))
1960 req->hdr.version = 1;
1961
1962 memcpy(req->cpu_table, rsstable, table_size);
1963 memcpy(req->hash, myhash, sizeof(myhash));
1964 be_dws_cpu_to_le(req->hash, sizeof(req->hash));
1965
1966 status = be_mbox_notify_wait(adapter);
1967
1968 mutex_unlock(&adapter->mbox_lock);
1969 return status;
1970 }
1971
1972 /* Uses sync mcc */
1973 int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
1974 u8 bcn, u8 sts, u8 state)
1975 {
1976 struct be_mcc_wrb *wrb;
1977 struct be_cmd_req_enable_disable_beacon *req;
1978 int status;
1979
1980 spin_lock_bh(&adapter->mcc_lock);
1981
1982 wrb = wrb_from_mccq(adapter);
1983 if (!wrb) {
1984 status = -EBUSY;
1985 goto err;
1986 }
1987 req = embedded_payload(wrb);
1988
1989 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1990 OPCODE_COMMON_ENABLE_DISABLE_BEACON, sizeof(*req), wrb, NULL);
1991
1992 req->port_num = port_num;
1993 req->beacon_state = state;
1994 req->beacon_duration = bcn;
1995 req->status_duration = sts;
1996
1997 status = be_mcc_notify_wait(adapter);
1998
1999 err:
2000 spin_unlock_bh(&adapter->mcc_lock);
2001 return status;
2002 }
2003
2004 /* Uses sync mcc */
2005 int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
2006 {
2007 struct be_mcc_wrb *wrb;
2008 struct be_cmd_req_get_beacon_state *req;
2009 int status;
2010
2011 spin_lock_bh(&adapter->mcc_lock);
2012
2013 wrb = wrb_from_mccq(adapter);
2014 if (!wrb) {
2015 status = -EBUSY;
2016 goto err;
2017 }
2018 req = embedded_payload(wrb);
2019
2020 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2021 OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req), wrb, NULL);
2022
2023 req->port_num = port_num;
2024
2025 status = be_mcc_notify_wait(adapter);
2026 if (!status) {
2027 struct be_cmd_resp_get_beacon_state *resp =
2028 embedded_payload(wrb);
2029 *state = resp->beacon_state;
2030 }
2031
2032 err:
2033 spin_unlock_bh(&adapter->mcc_lock);
2034 return status;
2035 }
2036
2037 int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
2038 u32 data_size, u32 data_offset,
2039 const char *obj_name, u32 *data_written,
2040 u8 *change_status, u8 *addn_status)
2041 {
2042 struct be_mcc_wrb *wrb;
2043 struct lancer_cmd_req_write_object *req;
2044 struct lancer_cmd_resp_write_object *resp;
2045 void *ctxt = NULL;
2046 int status;
2047
2048 spin_lock_bh(&adapter->mcc_lock);
2049 adapter->flash_status = 0;
2050
2051 wrb = wrb_from_mccq(adapter);
2052 if (!wrb) {
2053 status = -EBUSY;
2054 goto err_unlock;
2055 }
2056
2057 req = embedded_payload(wrb);
2058
2059 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2060 OPCODE_COMMON_WRITE_OBJECT,
2061 sizeof(struct lancer_cmd_req_write_object), wrb,
2062 NULL);
2063
2064 ctxt = &req->context;
2065 AMAP_SET_BITS(struct amap_lancer_write_obj_context,
2066 write_length, ctxt, data_size);
2067
2068 if (data_size == 0)
2069 AMAP_SET_BITS(struct amap_lancer_write_obj_context,
2070 eof, ctxt, 1);
2071 else
2072 AMAP_SET_BITS(struct amap_lancer_write_obj_context,
2073 eof, ctxt, 0);
2074
2075 be_dws_cpu_to_le(ctxt, sizeof(req->context));
2076 req->write_offset = cpu_to_le32(data_offset);
2077 strcpy(req->object_name, obj_name);
2078 req->descriptor_count = cpu_to_le32(1);
2079 req->buf_len = cpu_to_le32(data_size);
2080 req->addr_low = cpu_to_le32((cmd->dma +
2081 sizeof(struct lancer_cmd_req_write_object))
2082 & 0xFFFFFFFF);
2083 req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma +
2084 sizeof(struct lancer_cmd_req_write_object)));
2085
2086 be_mcc_notify(adapter);
2087 spin_unlock_bh(&adapter->mcc_lock);
2088
2089 if (!wait_for_completion_timeout(&adapter->flash_compl,
2090 msecs_to_jiffies(60000)))
2091 status = -1;
2092 else
2093 status = adapter->flash_status;
2094
2095 resp = embedded_payload(wrb);
2096 if (!status) {
2097 *data_written = le32_to_cpu(resp->actual_write_len);
2098 *change_status = resp->change_status;
2099 } else {
2100 *addn_status = resp->additional_status;
2101 }
2102
2103 return status;
2104
2105 err_unlock:
2106 spin_unlock_bh(&adapter->mcc_lock);
2107 return status;
2108 }
2109
2110 int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
2111 u32 data_size, u32 data_offset, const char *obj_name,
2112 u32 *data_read, u32 *eof, u8 *addn_status)
2113 {
2114 struct be_mcc_wrb *wrb;
2115 struct lancer_cmd_req_read_object *req;
2116 struct lancer_cmd_resp_read_object *resp;
2117 int status;
2118
2119 spin_lock_bh(&adapter->mcc_lock);
2120
2121 wrb = wrb_from_mccq(adapter);
2122 if (!wrb) {
2123 status = -EBUSY;
2124 goto err_unlock;
2125 }
2126
2127 req = embedded_payload(wrb);
2128
2129 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2130 OPCODE_COMMON_READ_OBJECT,
2131 sizeof(struct lancer_cmd_req_read_object), wrb,
2132 NULL);
2133
2134 req->desired_read_len = cpu_to_le32(data_size);
2135 req->read_offset = cpu_to_le32(data_offset);
2136 strcpy(req->object_name, obj_name);
2137 req->descriptor_count = cpu_to_le32(1);
2138 req->buf_len = cpu_to_le32(data_size);
2139 req->addr_low = cpu_to_le32((cmd->dma & 0xFFFFFFFF));
2140 req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma));
2141
2142 status = be_mcc_notify_wait(adapter);
2143
2144 resp = embedded_payload(wrb);
2145 if (!status) {
2146 *data_read = le32_to_cpu(resp->actual_read_len);
2147 *eof = le32_to_cpu(resp->eof);
2148 } else {
2149 *addn_status = resp->additional_status;
2150 }
2151
2152 err_unlock:
2153 spin_unlock_bh(&adapter->mcc_lock);
2154 return status;
2155 }
2156
2157 int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
2158 u32 flash_type, u32 flash_opcode, u32 buf_size)
2159 {
2160 struct be_mcc_wrb *wrb;
2161 struct be_cmd_write_flashrom *req;
2162 int status;
2163
2164 spin_lock_bh(&adapter->mcc_lock);
2165 adapter->flash_status = 0;
2166
2167 wrb = wrb_from_mccq(adapter);
2168 if (!wrb) {
2169 status = -EBUSY;
2170 goto err_unlock;
2171 }
2172 req = cmd->va;
2173
2174 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2175 OPCODE_COMMON_WRITE_FLASHROM, cmd->size, wrb, cmd);
2176
2177 req->params.op_type = cpu_to_le32(flash_type);
2178 req->params.op_code = cpu_to_le32(flash_opcode);
2179 req->params.data_buf_size = cpu_to_le32(buf_size);
2180
2181 be_mcc_notify(adapter);
2182 spin_unlock_bh(&adapter->mcc_lock);
2183
2184 if (!wait_for_completion_timeout(&adapter->flash_compl,
2185 msecs_to_jiffies(40000)))
2186 status = -1;
2187 else
2188 status = adapter->flash_status;
2189
2190 return status;
2191
2192 err_unlock:
2193 spin_unlock_bh(&adapter->mcc_lock);
2194 return status;
2195 }
2196
2197 int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
2198 int offset)
2199 {
2200 struct be_mcc_wrb *wrb;
2201 struct be_cmd_read_flash_crc *req;
2202 int status;
2203
2204 spin_lock_bh(&adapter->mcc_lock);
2205
2206 wrb = wrb_from_mccq(adapter);
2207 if (!wrb) {
2208 status = -EBUSY;
2209 goto err;
2210 }
2211 req = embedded_payload(wrb);
2212
2213 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2214 OPCODE_COMMON_READ_FLASHROM, sizeof(*req),
2215 wrb, NULL);
2216
2217 req->params.op_type = cpu_to_le32(OPTYPE_REDBOOT);
2218 req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
2219 req->params.offset = cpu_to_le32(offset);
2220 req->params.data_buf_size = cpu_to_le32(0x4);
2221
2222 status = be_mcc_notify_wait(adapter);
2223 if (!status)
2224 memcpy(flashed_crc, req->crc, 4);
2225
2226 err:
2227 spin_unlock_bh(&adapter->mcc_lock);
2228 return status;
2229 }
2230
2231 int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
2232 struct be_dma_mem *nonemb_cmd)
2233 {
2234 struct be_mcc_wrb *wrb;
2235 struct be_cmd_req_acpi_wol_magic_config *req;
2236 int status;
2237
2238 spin_lock_bh(&adapter->mcc_lock);
2239
2240 wrb = wrb_from_mccq(adapter);
2241 if (!wrb) {
2242 status = -EBUSY;
2243 goto err;
2244 }
2245 req = nonemb_cmd->va;
2246
2247 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
2248 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req), wrb,
2249 nonemb_cmd);
2250 memcpy(req->magic_mac, mac, ETH_ALEN);
2251
2252 status = be_mcc_notify_wait(adapter);
2253
2254 err:
2255 spin_unlock_bh(&adapter->mcc_lock);
2256 return status;
2257 }
2258
2259 int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
2260 u8 loopback_type, u8 enable)
2261 {
2262 struct be_mcc_wrb *wrb;
2263 struct be_cmd_req_set_lmode *req;
2264 int status;
2265
2266 spin_lock_bh(&adapter->mcc_lock);
2267
2268 wrb = wrb_from_mccq(adapter);
2269 if (!wrb) {
2270 status = -EBUSY;
2271 goto err;
2272 }
2273
2274 req = embedded_payload(wrb);
2275
2276 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2277 OPCODE_LOWLEVEL_SET_LOOPBACK_MODE, sizeof(*req), wrb,
2278 NULL);
2279
2280 req->src_port = port_num;
2281 req->dest_port = port_num;
2282 req->loopback_type = loopback_type;
2283 req->loopback_state = enable;
2284
2285 status = be_mcc_notify_wait(adapter);
2286 err:
2287 spin_unlock_bh(&adapter->mcc_lock);
2288 return status;
2289 }
2290
2291 int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
2292 u32 loopback_type, u32 pkt_size, u32 num_pkts, u64 pattern)
2293 {
2294 struct be_mcc_wrb *wrb;
2295 struct be_cmd_req_loopback_test *req;
2296 int status;
2297
2298 spin_lock_bh(&adapter->mcc_lock);
2299
2300 wrb = wrb_from_mccq(adapter);
2301 if (!wrb) {
2302 status = -EBUSY;
2303 goto err;
2304 }
2305
2306 req = embedded_payload(wrb);
2307
2308 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2309 OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb, NULL);
2310 req->hdr.timeout = cpu_to_le32(4);
2311
2312 req->pattern = cpu_to_le64(pattern);
2313 req->src_port = cpu_to_le32(port_num);
2314 req->dest_port = cpu_to_le32(port_num);
2315 req->pkt_size = cpu_to_le32(pkt_size);
2316 req->num_pkts = cpu_to_le32(num_pkts);
2317 req->loopback_type = cpu_to_le32(loopback_type);
2318
2319 status = be_mcc_notify_wait(adapter);
2320 if (!status) {
2321 struct be_cmd_resp_loopback_test *resp = embedded_payload(wrb);
2322 status = le32_to_cpu(resp->status);
2323 }
2324
2325 err:
2326 spin_unlock_bh(&adapter->mcc_lock);
2327 return status;
2328 }
2329
2330 int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
2331 u32 byte_cnt, struct be_dma_mem *cmd)
2332 {
2333 struct be_mcc_wrb *wrb;
2334 struct be_cmd_req_ddrdma_test *req;
2335 int status;
2336 int i, j = 0;
2337
2338 spin_lock_bh(&adapter->mcc_lock);
2339
2340 wrb = wrb_from_mccq(adapter);
2341 if (!wrb) {
2342 status = -EBUSY;
2343 goto err;
2344 }
2345 req = cmd->va;
2346 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2347 OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size, wrb, cmd);
2348
2349 req->pattern = cpu_to_le64(pattern);
2350 req->byte_count = cpu_to_le32(byte_cnt);
2351 for (i = 0; i < byte_cnt; i++) {
2352 req->snd_buff[i] = (u8)(pattern >> (j*8));
2353 j++;
2354 if (j > 7)
2355 j = 0;
2356 }
2357
2358 status = be_mcc_notify_wait(adapter);
2359
2360 if (!status) {
2361 struct be_cmd_resp_ddrdma_test *resp;
2362 resp = cmd->va;
2363 if ((memcmp(resp->rcv_buff, req->snd_buff, byte_cnt) != 0) ||
2364 resp->snd_err) {
2365 status = -1;
2366 }
2367 }
2368
2369 err:
2370 spin_unlock_bh(&adapter->mcc_lock);
2371 return status;
2372 }
2373
2374 int be_cmd_get_seeprom_data(struct be_adapter *adapter,
2375 struct be_dma_mem *nonemb_cmd)
2376 {
2377 struct be_mcc_wrb *wrb;
2378 struct be_cmd_req_seeprom_read *req;
2379 int status;
2380
2381 spin_lock_bh(&adapter->mcc_lock);
2382
2383 wrb = wrb_from_mccq(adapter);
2384 if (!wrb) {
2385 status = -EBUSY;
2386 goto err;
2387 }
2388 req = nonemb_cmd->va;
2389
2390 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2391 OPCODE_COMMON_SEEPROM_READ, sizeof(*req), wrb,
2392 nonemb_cmd);
2393
2394 status = be_mcc_notify_wait(adapter);
2395
2396 err:
2397 spin_unlock_bh(&adapter->mcc_lock);
2398 return status;
2399 }
2400
2401 int be_cmd_get_phy_info(struct be_adapter *adapter)
2402 {
2403 struct be_mcc_wrb *wrb;
2404 struct be_cmd_req_get_phy_info *req;
2405 struct be_dma_mem cmd;
2406 int status;
2407
2408 if (!be_cmd_allowed(adapter, OPCODE_COMMON_GET_PHY_DETAILS,
2409 CMD_SUBSYSTEM_COMMON))
2410 return -EPERM;
2411
2412 spin_lock_bh(&adapter->mcc_lock);
2413
2414 wrb = wrb_from_mccq(adapter);
2415 if (!wrb) {
2416 status = -EBUSY;
2417 goto err;
2418 }
2419 cmd.size = sizeof(struct be_cmd_req_get_phy_info);
2420 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
2421 &cmd.dma);
2422 if (!cmd.va) {
2423 dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
2424 status = -ENOMEM;
2425 goto err;
2426 }
2427
2428 req = cmd.va;
2429
2430 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2431 OPCODE_COMMON_GET_PHY_DETAILS, sizeof(*req),
2432 wrb, &cmd);
2433
2434 status = be_mcc_notify_wait(adapter);
2435 if (!status) {
2436 struct be_phy_info *resp_phy_info =
2437 cmd.va + sizeof(struct be_cmd_req_hdr);
2438 adapter->phy.phy_type = le16_to_cpu(resp_phy_info->phy_type);
2439 adapter->phy.interface_type =
2440 le16_to_cpu(resp_phy_info->interface_type);
2441 adapter->phy.auto_speeds_supported =
2442 le16_to_cpu(resp_phy_info->auto_speeds_supported);
2443 adapter->phy.fixed_speeds_supported =
2444 le16_to_cpu(resp_phy_info->fixed_speeds_supported);
2445 adapter->phy.misc_params =
2446 le32_to_cpu(resp_phy_info->misc_params);
2447 }
2448 pci_free_consistent(adapter->pdev, cmd.size,
2449 cmd.va, cmd.dma);
2450 err:
2451 spin_unlock_bh(&adapter->mcc_lock);
2452 return status;
2453 }
2454
2455 int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
2456 {
2457 struct be_mcc_wrb *wrb;
2458 struct be_cmd_req_set_qos *req;
2459 int status;
2460
2461 spin_lock_bh(&adapter->mcc_lock);
2462
2463 wrb = wrb_from_mccq(adapter);
2464 if (!wrb) {
2465 status = -EBUSY;
2466 goto err;
2467 }
2468
2469 req = embedded_payload(wrb);
2470
2471 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2472 OPCODE_COMMON_SET_QOS, sizeof(*req), wrb, NULL);
2473
2474 req->hdr.domain = domain;
2475 req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC);
2476 req->max_bps_nic = cpu_to_le32(bps);
2477
2478 status = be_mcc_notify_wait(adapter);
2479
2480 err:
2481 spin_unlock_bh(&adapter->mcc_lock);
2482 return status;
2483 }
2484
2485 int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
2486 {
2487 struct be_mcc_wrb *wrb;
2488 struct be_cmd_req_cntl_attribs *req;
2489 struct be_cmd_resp_cntl_attribs *resp;
2490 int status;
2491 int payload_len = max(sizeof(*req), sizeof(*resp));
2492 struct mgmt_controller_attrib *attribs;
2493 struct be_dma_mem attribs_cmd;
2494
2495 if (mutex_lock_interruptible(&adapter->mbox_lock))
2496 return -1;
2497
2498 memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
2499 attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
2500 attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size,
2501 &attribs_cmd.dma);
2502 if (!attribs_cmd.va) {
2503 dev_err(&adapter->pdev->dev,
2504 "Memory allocation failure\n");
2505 status = -ENOMEM;
2506 goto err;
2507 }
2508
2509 wrb = wrb_from_mbox(adapter);
2510 if (!wrb) {
2511 status = -EBUSY;
2512 goto err;
2513 }
2514 req = attribs_cmd.va;
2515
2516 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2517 OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len, wrb,
2518 &attribs_cmd);
2519
2520 status = be_mbox_notify_wait(adapter);
2521 if (!status) {
2522 attribs = attribs_cmd.va + sizeof(struct be_cmd_resp_hdr);
2523 adapter->hba_port_num = attribs->hba_attribs.phy_port;
2524 }
2525
2526 err:
2527 mutex_unlock(&adapter->mbox_lock);
2528 if (attribs_cmd.va)
2529 pci_free_consistent(adapter->pdev, attribs_cmd.size,
2530 attribs_cmd.va, attribs_cmd.dma);
2531 return status;
2532 }
2533
2534 /* Uses mbox */
2535 int be_cmd_req_native_mode(struct be_adapter *adapter)
2536 {
2537 struct be_mcc_wrb *wrb;
2538 struct be_cmd_req_set_func_cap *req;
2539 int status;
2540
2541 if (mutex_lock_interruptible(&adapter->mbox_lock))
2542 return -1;
2543
2544 wrb = wrb_from_mbox(adapter);
2545 if (!wrb) {
2546 status = -EBUSY;
2547 goto err;
2548 }
2549
2550 req = embedded_payload(wrb);
2551
2552 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2553 OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP, sizeof(*req), wrb, NULL);
2554
2555 req->valid_cap_flags = cpu_to_le32(CAPABILITY_SW_TIMESTAMPS |
2556 CAPABILITY_BE3_NATIVE_ERX_API);
2557 req->cap_flags = cpu_to_le32(CAPABILITY_BE3_NATIVE_ERX_API);
2558
2559 status = be_mbox_notify_wait(adapter);
2560 if (!status) {
2561 struct be_cmd_resp_set_func_cap *resp = embedded_payload(wrb);
2562 adapter->be3_native = le32_to_cpu(resp->cap_flags) &
2563 CAPABILITY_BE3_NATIVE_ERX_API;
2564 if (!adapter->be3_native)
2565 dev_warn(&adapter->pdev->dev,
2566 "adapter not in advanced mode\n");
2567 }
2568 err:
2569 mutex_unlock(&adapter->mbox_lock);
2570 return status;
2571 }
2572
2573 /* Get privilege(s) for a function */
2574 int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege,
2575 u32 domain)
2576 {
2577 struct be_mcc_wrb *wrb;
2578 struct be_cmd_req_get_fn_privileges *req;
2579 int status;
2580
2581 spin_lock_bh(&adapter->mcc_lock);
2582
2583 wrb = wrb_from_mccq(adapter);
2584 if (!wrb) {
2585 status = -EBUSY;
2586 goto err;
2587 }
2588
2589 req = embedded_payload(wrb);
2590
2591 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2592 OPCODE_COMMON_GET_FN_PRIVILEGES, sizeof(*req),
2593 wrb, NULL);
2594
2595 req->hdr.domain = domain;
2596
2597 status = be_mcc_notify_wait(adapter);
2598 if (!status) {
2599 struct be_cmd_resp_get_fn_privileges *resp =
2600 embedded_payload(wrb);
2601 *privilege = le32_to_cpu(resp->privilege_mask);
2602 }
2603
2604 err:
2605 spin_unlock_bh(&adapter->mcc_lock);
2606 return status;
2607 }
2608
2609 /* Uses synchronous MCCQ */
2610 int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
2611 bool *pmac_id_active, u32 *pmac_id, u8 domain)
2612 {
2613 struct be_mcc_wrb *wrb;
2614 struct be_cmd_req_get_mac_list *req;
2615 int status;
2616 int mac_count;
2617 struct be_dma_mem get_mac_list_cmd;
2618 int i;
2619
2620 memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem));
2621 get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list);
2622 get_mac_list_cmd.va = pci_alloc_consistent(adapter->pdev,
2623 get_mac_list_cmd.size,
2624 &get_mac_list_cmd.dma);
2625
2626 if (!get_mac_list_cmd.va) {
2627 dev_err(&adapter->pdev->dev,
2628 "Memory allocation failure during GET_MAC_LIST\n");
2629 return -ENOMEM;
2630 }
2631
2632 spin_lock_bh(&adapter->mcc_lock);
2633
2634 wrb = wrb_from_mccq(adapter);
2635 if (!wrb) {
2636 status = -EBUSY;
2637 goto out;
2638 }
2639
2640 req = get_mac_list_cmd.va;
2641
2642 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2643 OPCODE_COMMON_GET_MAC_LIST,
2644 get_mac_list_cmd.size, wrb, &get_mac_list_cmd);
2645 req->hdr.domain = domain;
2646 req->mac_type = MAC_ADDRESS_TYPE_NETWORK;
2647 req->perm_override = 1;
2648
2649 status = be_mcc_notify_wait(adapter);
2650 if (!status) {
2651 struct be_cmd_resp_get_mac_list *resp =
2652 get_mac_list_cmd.va;
2653 mac_count = resp->true_mac_count + resp->pseudo_mac_count;
2654 /* Mac list returned could contain one or more active mac_ids
2655 * or one or more true or pseudo permanant mac addresses.
2656 * If an active mac_id is present, return first active mac_id
2657 * found.
2658 */
2659 for (i = 0; i < mac_count; i++) {
2660 struct get_list_macaddr *mac_entry;
2661 u16 mac_addr_size;
2662 u32 mac_id;
2663
2664 mac_entry = &resp->macaddr_list[i];
2665 mac_addr_size = le16_to_cpu(mac_entry->mac_addr_size);
2666 /* mac_id is a 32 bit value and mac_addr size
2667 * is 6 bytes
2668 */
2669 if (mac_addr_size == sizeof(u32)) {
2670 *pmac_id_active = true;
2671 mac_id = mac_entry->mac_addr_id.s_mac_id.mac_id;
2672 *pmac_id = le32_to_cpu(mac_id);
2673 goto out;
2674 }
2675 }
2676 /* If no active mac_id found, return first mac addr */
2677 *pmac_id_active = false;
2678 memcpy(mac, resp->macaddr_list[0].mac_addr_id.macaddr,
2679 ETH_ALEN);
2680 }
2681
2682 out:
2683 spin_unlock_bh(&adapter->mcc_lock);
2684 pci_free_consistent(adapter->pdev, get_mac_list_cmd.size,
2685 get_mac_list_cmd.va, get_mac_list_cmd.dma);
2686 return status;
2687 }
2688
2689 /* Uses synchronous MCCQ */
2690 int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
2691 u8 mac_count, u32 domain)
2692 {
2693 struct be_mcc_wrb *wrb;
2694 struct be_cmd_req_set_mac_list *req;
2695 int status;
2696 struct be_dma_mem cmd;
2697
2698 memset(&cmd, 0, sizeof(struct be_dma_mem));
2699 cmd.size = sizeof(struct be_cmd_req_set_mac_list);
2700 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size,
2701 &cmd.dma, GFP_KERNEL);
2702 if (!cmd.va)
2703 return -ENOMEM;
2704
2705 spin_lock_bh(&adapter->mcc_lock);
2706
2707 wrb = wrb_from_mccq(adapter);
2708 if (!wrb) {
2709 status = -EBUSY;
2710 goto err;
2711 }
2712
2713 req = cmd.va;
2714 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2715 OPCODE_COMMON_SET_MAC_LIST, sizeof(*req),
2716 wrb, &cmd);
2717
2718 req->hdr.domain = domain;
2719 req->mac_count = mac_count;
2720 if (mac_count)
2721 memcpy(req->mac, mac_array, ETH_ALEN*mac_count);
2722
2723 status = be_mcc_notify_wait(adapter);
2724
2725 err:
2726 dma_free_coherent(&adapter->pdev->dev, cmd.size,
2727 cmd.va, cmd.dma);
2728 spin_unlock_bh(&adapter->mcc_lock);
2729 return status;
2730 }
2731
2732 int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
2733 u32 domain, u16 intf_id)
2734 {
2735 struct be_mcc_wrb *wrb;
2736 struct be_cmd_req_set_hsw_config *req;
2737 void *ctxt;
2738 int status;
2739
2740 spin_lock_bh(&adapter->mcc_lock);
2741
2742 wrb = wrb_from_mccq(adapter);
2743 if (!wrb) {
2744 status = -EBUSY;
2745 goto err;
2746 }
2747
2748 req = embedded_payload(wrb);
2749 ctxt = &req->context;
2750
2751 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2752 OPCODE_COMMON_SET_HSW_CONFIG, sizeof(*req), wrb, NULL);
2753
2754 req->hdr.domain = domain;
2755 AMAP_SET_BITS(struct amap_set_hsw_context, interface_id, ctxt, intf_id);
2756 if (pvid) {
2757 AMAP_SET_BITS(struct amap_set_hsw_context, pvid_valid, ctxt, 1);
2758 AMAP_SET_BITS(struct amap_set_hsw_context, pvid, ctxt, pvid);
2759 }
2760
2761 be_dws_cpu_to_le(req->context, sizeof(req->context));
2762 status = be_mcc_notify_wait(adapter);
2763
2764 err:
2765 spin_unlock_bh(&adapter->mcc_lock);
2766 return status;
2767 }
2768
2769 /* Get Hyper switch config */
2770 int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
2771 u32 domain, u16 intf_id)
2772 {
2773 struct be_mcc_wrb *wrb;
2774 struct be_cmd_req_get_hsw_config *req;
2775 void *ctxt;
2776 int status;
2777 u16 vid;
2778
2779 spin_lock_bh(&adapter->mcc_lock);
2780
2781 wrb = wrb_from_mccq(adapter);
2782 if (!wrb) {
2783 status = -EBUSY;
2784 goto err;
2785 }
2786
2787 req = embedded_payload(wrb);
2788 ctxt = &req->context;
2789
2790 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2791 OPCODE_COMMON_GET_HSW_CONFIG, sizeof(*req), wrb, NULL);
2792
2793 req->hdr.domain = domain;
2794 AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id, ctxt,
2795 intf_id);
2796 AMAP_SET_BITS(struct amap_get_hsw_req_context, pvid_valid, ctxt, 1);
2797 be_dws_cpu_to_le(req->context, sizeof(req->context));
2798
2799 status = be_mcc_notify_wait(adapter);
2800 if (!status) {
2801 struct be_cmd_resp_get_hsw_config *resp =
2802 embedded_payload(wrb);
2803 be_dws_le_to_cpu(&resp->context,
2804 sizeof(resp->context));
2805 vid = AMAP_GET_BITS(struct amap_get_hsw_resp_context,
2806 pvid, &resp->context);
2807 *pvid = le16_to_cpu(vid);
2808 }
2809
2810 err:
2811 spin_unlock_bh(&adapter->mcc_lock);
2812 return status;
2813 }
2814
2815 int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
2816 {
2817 struct be_mcc_wrb *wrb;
2818 struct be_cmd_req_acpi_wol_magic_config_v1 *req;
2819 int status;
2820 int payload_len = sizeof(*req);
2821 struct be_dma_mem cmd;
2822
2823 if (!be_cmd_allowed(adapter, OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
2824 CMD_SUBSYSTEM_ETH))
2825 return -EPERM;
2826
2827 if (mutex_lock_interruptible(&adapter->mbox_lock))
2828 return -1;
2829
2830 memset(&cmd, 0, sizeof(struct be_dma_mem));
2831 cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1);
2832 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
2833 &cmd.dma);
2834 if (!cmd.va) {
2835 dev_err(&adapter->pdev->dev,
2836 "Memory allocation failure\n");
2837 status = -ENOMEM;
2838 goto err;
2839 }
2840
2841 wrb = wrb_from_mbox(adapter);
2842 if (!wrb) {
2843 status = -EBUSY;
2844 goto err;
2845 }
2846
2847 req = cmd.va;
2848
2849 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
2850 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
2851 payload_len, wrb, &cmd);
2852
2853 req->hdr.version = 1;
2854 req->query_options = BE_GET_WOL_CAP;
2855
2856 status = be_mbox_notify_wait(adapter);
2857 if (!status) {
2858 struct be_cmd_resp_acpi_wol_magic_config_v1 *resp;
2859 resp = (struct be_cmd_resp_acpi_wol_magic_config_v1 *) cmd.va;
2860
2861 /* the command could succeed misleadingly on old f/w
2862 * which is not aware of the V1 version. fake an error. */
2863 if (resp->hdr.response_length < payload_len) {
2864 status = -1;
2865 goto err;
2866 }
2867 adapter->wol_cap = resp->wol_settings;
2868 }
2869 err:
2870 mutex_unlock(&adapter->mbox_lock);
2871 if (cmd.va)
2872 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
2873 return status;
2874
2875 }
2876 int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter,
2877 struct be_dma_mem *cmd)
2878 {
2879 struct be_mcc_wrb *wrb;
2880 struct be_cmd_req_get_ext_fat_caps *req;
2881 int status;
2882
2883 if (mutex_lock_interruptible(&adapter->mbox_lock))
2884 return -1;
2885
2886 wrb = wrb_from_mbox(adapter);
2887 if (!wrb) {
2888 status = -EBUSY;
2889 goto err;
2890 }
2891
2892 req = cmd->va;
2893 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2894 OPCODE_COMMON_GET_EXT_FAT_CAPABILITES,
2895 cmd->size, wrb, cmd);
2896 req->parameter_type = cpu_to_le32(1);
2897
2898 status = be_mbox_notify_wait(adapter);
2899 err:
2900 mutex_unlock(&adapter->mbox_lock);
2901 return status;
2902 }
2903
2904 int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter,
2905 struct be_dma_mem *cmd,
2906 struct be_fat_conf_params *configs)
2907 {
2908 struct be_mcc_wrb *wrb;
2909 struct be_cmd_req_set_ext_fat_caps *req;
2910 int status;
2911
2912 spin_lock_bh(&adapter->mcc_lock);
2913
2914 wrb = wrb_from_mccq(adapter);
2915 if (!wrb) {
2916 status = -EBUSY;
2917 goto err;
2918 }
2919
2920 req = cmd->va;
2921 memcpy(&req->set_params, configs, sizeof(struct be_fat_conf_params));
2922 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2923 OPCODE_COMMON_SET_EXT_FAT_CAPABILITES,
2924 cmd->size, wrb, cmd);
2925
2926 status = be_mcc_notify_wait(adapter);
2927 err:
2928 spin_unlock_bh(&adapter->mcc_lock);
2929 return status;
2930 }
2931
2932 int be_cmd_query_port_name(struct be_adapter *adapter, u8 *port_name)
2933 {
2934 struct be_mcc_wrb *wrb;
2935 struct be_cmd_req_get_port_name *req;
2936 int status;
2937
2938 if (!lancer_chip(adapter)) {
2939 *port_name = adapter->hba_port_num + '0';
2940 return 0;
2941 }
2942
2943 spin_lock_bh(&adapter->mcc_lock);
2944
2945 wrb = wrb_from_mccq(adapter);
2946 if (!wrb) {
2947 status = -EBUSY;
2948 goto err;
2949 }
2950
2951 req = embedded_payload(wrb);
2952
2953 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2954 OPCODE_COMMON_GET_PORT_NAME, sizeof(*req), wrb,
2955 NULL);
2956 req->hdr.version = 1;
2957
2958 status = be_mcc_notify_wait(adapter);
2959 if (!status) {
2960 struct be_cmd_resp_get_port_name *resp = embedded_payload(wrb);
2961 *port_name = resp->port_name[adapter->hba_port_num];
2962 } else {
2963 *port_name = adapter->hba_port_num + '0';
2964 }
2965 err:
2966 spin_unlock_bh(&adapter->mcc_lock);
2967 return status;
2968 }
2969
2970 static struct be_nic_resource_desc *be_get_nic_desc(u8 *buf, u32 desc_count,
2971 u32 max_buf_size)
2972 {
2973 struct be_nic_resource_desc *desc = (struct be_nic_resource_desc *)buf;
2974 int i;
2975
2976 for (i = 0; i < desc_count; i++) {
2977 desc->desc_len = desc->desc_len ? : RESOURCE_DESC_SIZE;
2978 if (((void *)desc + desc->desc_len) >
2979 (void *)(buf + max_buf_size)) {
2980 desc = NULL;
2981 break;
2982 }
2983
2984 if (desc->desc_type == NIC_RESOURCE_DESC_TYPE_V0 ||
2985 desc->desc_type == NIC_RESOURCE_DESC_TYPE_V1)
2986 break;
2987
2988 desc = (void *)desc + desc->desc_len;
2989 }
2990
2991 if (!desc || i == MAX_RESOURCE_DESC)
2992 return NULL;
2993
2994 return desc;
2995 }
2996
2997 /* Uses Mbox */
2998 int be_cmd_get_func_config(struct be_adapter *adapter)
2999 {
3000 struct be_mcc_wrb *wrb;
3001 struct be_cmd_req_get_func_config *req;
3002 int status;
3003 struct be_dma_mem cmd;
3004
3005 if (mutex_lock_interruptible(&adapter->mbox_lock))
3006 return -1;
3007
3008 memset(&cmd, 0, sizeof(struct be_dma_mem));
3009 cmd.size = sizeof(struct be_cmd_resp_get_func_config);
3010 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
3011 &cmd.dma);
3012 if (!cmd.va) {
3013 dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
3014 status = -ENOMEM;
3015 goto err;
3016 }
3017
3018 wrb = wrb_from_mbox(adapter);
3019 if (!wrb) {
3020 status = -EBUSY;
3021 goto err;
3022 }
3023
3024 req = cmd.va;
3025
3026 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3027 OPCODE_COMMON_GET_FUNC_CONFIG,
3028 cmd.size, wrb, &cmd);
3029
3030 if (skyhawk_chip(adapter))
3031 req->hdr.version = 1;
3032
3033 status = be_mbox_notify_wait(adapter);
3034 if (!status) {
3035 struct be_cmd_resp_get_func_config *resp = cmd.va;
3036 u32 desc_count = le32_to_cpu(resp->desc_count);
3037 struct be_nic_resource_desc *desc;
3038
3039 desc = be_get_nic_desc(resp->func_param, desc_count,
3040 sizeof(resp->func_param));
3041 if (!desc) {
3042 status = -EINVAL;
3043 goto err;
3044 }
3045
3046 adapter->pf_number = desc->pf_num;
3047 adapter->max_pmac_cnt = le16_to_cpu(desc->unicast_mac_count);
3048 adapter->max_vlans = le16_to_cpu(desc->vlan_count);
3049 adapter->max_mcast_mac = le16_to_cpu(desc->mcast_mac_count);
3050 adapter->max_tx_queues = le16_to_cpu(desc->txq_count);
3051 adapter->max_rss_queues = le16_to_cpu(desc->rssq_count);
3052 adapter->max_rx_queues = le16_to_cpu(desc->rq_count);
3053
3054 adapter->max_event_queues = le16_to_cpu(desc->eq_count);
3055 adapter->if_cap_flags = le32_to_cpu(desc->cap_flags);
3056 }
3057 err:
3058 mutex_unlock(&adapter->mbox_lock);
3059 if (cmd.va)
3060 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
3061 return status;
3062 }
3063
3064 /* Uses mbox */
3065 int be_cmd_get_profile_config_mbox(struct be_adapter *adapter,
3066 u8 domain, struct be_dma_mem *cmd)
3067 {
3068 struct be_mcc_wrb *wrb;
3069 struct be_cmd_req_get_profile_config *req;
3070 int status;
3071
3072 if (mutex_lock_interruptible(&adapter->mbox_lock))
3073 return -1;
3074 wrb = wrb_from_mbox(adapter);
3075
3076 req = cmd->va;
3077 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3078 OPCODE_COMMON_GET_PROFILE_CONFIG,
3079 cmd->size, wrb, cmd);
3080
3081 req->type = ACTIVE_PROFILE_TYPE;
3082 req->hdr.domain = domain;
3083 if (!lancer_chip(adapter))
3084 req->hdr.version = 1;
3085
3086 status = be_mbox_notify_wait(adapter);
3087
3088 mutex_unlock(&adapter->mbox_lock);
3089 return status;
3090 }
3091
3092 /* Uses sync mcc */
3093 int be_cmd_get_profile_config_mccq(struct be_adapter *adapter,
3094 u8 domain, struct be_dma_mem *cmd)
3095 {
3096 struct be_mcc_wrb *wrb;
3097 struct be_cmd_req_get_profile_config *req;
3098 int status;
3099
3100 spin_lock_bh(&adapter->mcc_lock);
3101
3102 wrb = wrb_from_mccq(adapter);
3103 if (!wrb) {
3104 status = -EBUSY;
3105 goto err;
3106 }
3107
3108 req = cmd->va;
3109 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3110 OPCODE_COMMON_GET_PROFILE_CONFIG,
3111 cmd->size, wrb, cmd);
3112
3113 req->type = ACTIVE_PROFILE_TYPE;
3114 req->hdr.domain = domain;
3115 if (!lancer_chip(adapter))
3116 req->hdr.version = 1;
3117
3118 status = be_mcc_notify_wait(adapter);
3119
3120 err:
3121 spin_unlock_bh(&adapter->mcc_lock);
3122 return status;
3123 }
3124
3125 /* Uses sync mcc, if MCCQ is already created otherwise mbox */
3126 int be_cmd_get_profile_config(struct be_adapter *adapter, u32 *cap_flags,
3127 u16 *txq_count, u8 domain)
3128 {
3129 struct be_queue_info *mccq = &adapter->mcc_obj.q;
3130 struct be_dma_mem cmd;
3131 int status;
3132
3133 memset(&cmd, 0, sizeof(struct be_dma_mem));
3134 if (!lancer_chip(adapter))
3135 cmd.size = sizeof(struct be_cmd_resp_get_profile_config_v1);
3136 else
3137 cmd.size = sizeof(struct be_cmd_resp_get_profile_config);
3138 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
3139 &cmd.dma);
3140 if (!cmd.va) {
3141 dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
3142 return -ENOMEM;
3143 }
3144
3145 if (!mccq->created)
3146 status = be_cmd_get_profile_config_mbox(adapter, domain, &cmd);
3147 else
3148 status = be_cmd_get_profile_config_mccq(adapter, domain, &cmd);
3149 if (!status) {
3150 struct be_cmd_resp_get_profile_config *resp = cmd.va;
3151 u32 desc_count = le32_to_cpu(resp->desc_count);
3152 struct be_nic_resource_desc *desc;
3153
3154 desc = be_get_nic_desc(resp->func_param, desc_count,
3155 sizeof(resp->func_param));
3156
3157 if (!desc) {
3158 status = -EINVAL;
3159 goto err;
3160 }
3161 if (cap_flags)
3162 *cap_flags = le32_to_cpu(desc->cap_flags);
3163 if (txq_count)
3164 *txq_count = le32_to_cpu(desc->txq_count);
3165 }
3166 err:
3167 if (cmd.va)
3168 pci_free_consistent(adapter->pdev, cmd.size,
3169 cmd.va, cmd.dma);
3170 return status;
3171 }
3172
3173 /* Uses sync mcc */
3174 int be_cmd_set_profile_config(struct be_adapter *adapter, u32 bps,
3175 u8 domain)
3176 {
3177 struct be_mcc_wrb *wrb;
3178 struct be_cmd_req_set_profile_config *req;
3179 int status;
3180
3181 spin_lock_bh(&adapter->mcc_lock);
3182
3183 wrb = wrb_from_mccq(adapter);
3184 if (!wrb) {
3185 status = -EBUSY;
3186 goto err;
3187 }
3188
3189 req = embedded_payload(wrb);
3190
3191 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3192 OPCODE_COMMON_SET_PROFILE_CONFIG, sizeof(*req),
3193 wrb, NULL);
3194
3195 req->hdr.domain = domain;
3196 req->desc_count = cpu_to_le32(1);
3197
3198 req->nic_desc.desc_type = NIC_RESOURCE_DESC_TYPE_V0;
3199 req->nic_desc.desc_len = RESOURCE_DESC_SIZE;
3200 req->nic_desc.flags = (1 << QUN) | (1 << IMM) | (1 << NOSV);
3201 req->nic_desc.pf_num = adapter->pf_number;
3202 req->nic_desc.vf_num = domain;
3203
3204 /* Mark fields invalid */
3205 req->nic_desc.unicast_mac_count = 0xFFFF;
3206 req->nic_desc.mcc_count = 0xFFFF;
3207 req->nic_desc.vlan_count = 0xFFFF;
3208 req->nic_desc.mcast_mac_count = 0xFFFF;
3209 req->nic_desc.txq_count = 0xFFFF;
3210 req->nic_desc.rq_count = 0xFFFF;
3211 req->nic_desc.rssq_count = 0xFFFF;
3212 req->nic_desc.lro_count = 0xFFFF;
3213 req->nic_desc.cq_count = 0xFFFF;
3214 req->nic_desc.toe_conn_count = 0xFFFF;
3215 req->nic_desc.eq_count = 0xFFFF;
3216 req->nic_desc.link_param = 0xFF;
3217 req->nic_desc.bw_min = 0xFFFFFFFF;
3218 req->nic_desc.acpi_params = 0xFF;
3219 req->nic_desc.wol_param = 0x0F;
3220
3221 /* Change BW */
3222 req->nic_desc.bw_min = cpu_to_le32(bps);
3223 req->nic_desc.bw_max = cpu_to_le32(bps);
3224 status = be_mcc_notify_wait(adapter);
3225 err:
3226 spin_unlock_bh(&adapter->mcc_lock);
3227 return status;
3228 }
3229
3230 int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg,
3231 int vf_num)
3232 {
3233 struct be_mcc_wrb *wrb;
3234 struct be_cmd_req_get_iface_list *req;
3235 struct be_cmd_resp_get_iface_list *resp;
3236 int status;
3237
3238 spin_lock_bh(&adapter->mcc_lock);
3239
3240 wrb = wrb_from_mccq(adapter);
3241 if (!wrb) {
3242 status = -EBUSY;
3243 goto err;
3244 }
3245 req = embedded_payload(wrb);
3246
3247 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3248 OPCODE_COMMON_GET_IFACE_LIST, sizeof(*resp),
3249 wrb, NULL);
3250 req->hdr.domain = vf_num + 1;
3251
3252 status = be_mcc_notify_wait(adapter);
3253 if (!status) {
3254 resp = (struct be_cmd_resp_get_iface_list *)req;
3255 vf_cfg->if_handle = le32_to_cpu(resp->if_desc.if_id);
3256 }
3257
3258 err:
3259 spin_unlock_bh(&adapter->mcc_lock);
3260 return status;
3261 }
3262
3263 /* Uses sync mcc */
3264 int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain)
3265 {
3266 struct be_mcc_wrb *wrb;
3267 struct be_cmd_enable_disable_vf *req;
3268 int status;
3269
3270 if (!lancer_chip(adapter))
3271 return 0;
3272
3273 spin_lock_bh(&adapter->mcc_lock);
3274
3275 wrb = wrb_from_mccq(adapter);
3276 if (!wrb) {
3277 status = -EBUSY;
3278 goto err;
3279 }
3280
3281 req = embedded_payload(wrb);
3282
3283 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3284 OPCODE_COMMON_ENABLE_DISABLE_VF, sizeof(*req),
3285 wrb, NULL);
3286
3287 req->hdr.domain = domain;
3288 req->enable = 1;
3289 status = be_mcc_notify_wait(adapter);
3290 err:
3291 spin_unlock_bh(&adapter->mcc_lock);
3292 return status;
3293 }
3294
3295 int be_cmd_intr_set(struct be_adapter *adapter, bool intr_enable)
3296 {
3297 struct be_mcc_wrb *wrb;
3298 struct be_cmd_req_intr_set *req;
3299 int status;
3300
3301 if (mutex_lock_interruptible(&adapter->mbox_lock))
3302 return -1;
3303
3304 wrb = wrb_from_mbox(adapter);
3305
3306 req = embedded_payload(wrb);
3307
3308 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3309 OPCODE_COMMON_SET_INTERRUPT_ENABLE, sizeof(*req),
3310 wrb, NULL);
3311
3312 req->intr_enabled = intr_enable;
3313
3314 status = be_mbox_notify_wait(adapter);
3315
3316 mutex_unlock(&adapter->mbox_lock);
3317 return status;
3318 }
3319
3320 int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
3321 int wrb_payload_size, u16 *cmd_status, u16 *ext_status)
3322 {
3323 struct be_adapter *adapter = netdev_priv(netdev_handle);
3324 struct be_mcc_wrb *wrb;
3325 struct be_cmd_req_hdr *hdr = (struct be_cmd_req_hdr *) wrb_payload;
3326 struct be_cmd_req_hdr *req;
3327 struct be_cmd_resp_hdr *resp;
3328 int status;
3329
3330 spin_lock_bh(&adapter->mcc_lock);
3331
3332 wrb = wrb_from_mccq(adapter);
3333 if (!wrb) {
3334 status = -EBUSY;
3335 goto err;
3336 }
3337 req = embedded_payload(wrb);
3338 resp = embedded_payload(wrb);
3339
3340 be_wrb_cmd_hdr_prepare(req, hdr->subsystem,
3341 hdr->opcode, wrb_payload_size, wrb, NULL);
3342 memcpy(req, wrb_payload, wrb_payload_size);
3343 be_dws_cpu_to_le(req, wrb_payload_size);
3344
3345 status = be_mcc_notify_wait(adapter);
3346 if (cmd_status)
3347 *cmd_status = (status & 0xffff);
3348 if (ext_status)
3349 *ext_status = 0;
3350 memcpy(wrb_payload, resp, sizeof(*resp) + resp->response_length);
3351 be_dws_le_to_cpu(wrb_payload, sizeof(*resp) + resp->response_length);
3352 err:
3353 spin_unlock_bh(&adapter->mcc_lock);
3354 return status;
3355 }
3356 EXPORT_SYMBOL(be_roce_mcc_cmd);