Merge branch 'next/soc' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/linux...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / net / bnx2x / bnx2x_sp.c
1 /* bnx2x_sp.c: Broadcom Everest network driver.
2 *
3 * Copyright 2011 Broadcom Corporation
4 *
5 * Unless you and Broadcom execute a separate written software license
6 * agreement governing use of this software, this software is licensed to you
7 * under the terms of the GNU General Public License version 2, available
8 * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
9 *
10 * Notwithstanding the above, under no circumstances may you combine this
11 * software in any way with any other Broadcom software provided under a
12 * license other than the GPL, without Broadcom's express prior written
13 * consent.
14 *
15 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
16 * Written by: Vladislav Zolotarov
17 *
18 */
19 #include <linux/module.h>
20 #include <linux/crc32.h>
21 #include <linux/netdevice.h>
22 #include <linux/etherdevice.h>
23 #include <linux/crc32c.h>
24 #include "bnx2x.h"
25 #include "bnx2x_cmn.h"
26 #include "bnx2x_sp.h"
27
28 #define BNX2X_MAX_EMUL_MULTI 16
29
30 /**** Exe Queue interfaces ****/
31
32 /**
33 * bnx2x_exe_queue_init - init the Exe Queue object
34 *
35 * @o: poiter to the object
36 * @exe_len: length
37 * @owner: poiter to the owner
38 * @validate: validate function pointer
39 * @optimize: optimize function pointer
40 * @exec: execute function pointer
41 * @get: get function pointer
42 */
43 static inline void bnx2x_exe_queue_init(struct bnx2x *bp,
44 struct bnx2x_exe_queue_obj *o,
45 int exe_len,
46 union bnx2x_qable_obj *owner,
47 exe_q_validate validate,
48 exe_q_optimize optimize,
49 exe_q_execute exec,
50 exe_q_get get)
51 {
52 memset(o, 0, sizeof(*o));
53
54 INIT_LIST_HEAD(&o->exe_queue);
55 INIT_LIST_HEAD(&o->pending_comp);
56
57 spin_lock_init(&o->lock);
58
59 o->exe_chunk_len = exe_len;
60 o->owner = owner;
61
62 /* Owner specific callbacks */
63 o->validate = validate;
64 o->optimize = optimize;
65 o->execute = exec;
66 o->get = get;
67
68 DP(BNX2X_MSG_SP, "Setup the execution queue with the chunk "
69 "length of %d\n", exe_len);
70 }
71
72 static inline void bnx2x_exe_queue_free_elem(struct bnx2x *bp,
73 struct bnx2x_exeq_elem *elem)
74 {
75 DP(BNX2X_MSG_SP, "Deleting an exe_queue element\n");
76 kfree(elem);
77 }
78
79 static inline int bnx2x_exe_queue_length(struct bnx2x_exe_queue_obj *o)
80 {
81 struct bnx2x_exeq_elem *elem;
82 int cnt = 0;
83
84 spin_lock_bh(&o->lock);
85
86 list_for_each_entry(elem, &o->exe_queue, link)
87 cnt++;
88
89 spin_unlock_bh(&o->lock);
90
91 return cnt;
92 }
93
94 /**
95 * bnx2x_exe_queue_add - add a new element to the execution queue
96 *
97 * @bp: driver handle
98 * @o: queue
99 * @cmd: new command to add
100 * @restore: true - do not optimize the command
101 *
102 * If the element is optimized or is illegal, frees it.
103 */
104 static inline int bnx2x_exe_queue_add(struct bnx2x *bp,
105 struct bnx2x_exe_queue_obj *o,
106 struct bnx2x_exeq_elem *elem,
107 bool restore)
108 {
109 int rc;
110
111 spin_lock_bh(&o->lock);
112
113 if (!restore) {
114 /* Try to cancel this element queue */
115 rc = o->optimize(bp, o->owner, elem);
116 if (rc)
117 goto free_and_exit;
118
119 /* Check if this request is ok */
120 rc = o->validate(bp, o->owner, elem);
121 if (rc) {
122 BNX2X_ERR("Preamble failed: %d\n", rc);
123 goto free_and_exit;
124 }
125 }
126
127 /* If so, add it to the execution queue */
128 list_add_tail(&elem->link, &o->exe_queue);
129
130 spin_unlock_bh(&o->lock);
131
132 return 0;
133
134 free_and_exit:
135 bnx2x_exe_queue_free_elem(bp, elem);
136
137 spin_unlock_bh(&o->lock);
138
139 return rc;
140
141 }
142
143 static inline void __bnx2x_exe_queue_reset_pending(
144 struct bnx2x *bp,
145 struct bnx2x_exe_queue_obj *o)
146 {
147 struct bnx2x_exeq_elem *elem;
148
149 while (!list_empty(&o->pending_comp)) {
150 elem = list_first_entry(&o->pending_comp,
151 struct bnx2x_exeq_elem, link);
152
153 list_del(&elem->link);
154 bnx2x_exe_queue_free_elem(bp, elem);
155 }
156 }
157
158 static inline void bnx2x_exe_queue_reset_pending(struct bnx2x *bp,
159 struct bnx2x_exe_queue_obj *o)
160 {
161
162 spin_lock_bh(&o->lock);
163
164 __bnx2x_exe_queue_reset_pending(bp, o);
165
166 spin_unlock_bh(&o->lock);
167
168 }
169
170 /**
171 * bnx2x_exe_queue_step - execute one execution chunk atomically
172 *
173 * @bp: driver handle
174 * @o: queue
175 * @ramrod_flags: flags
176 *
177 * (Atomicy is ensured using the exe_queue->lock).
178 */
179 static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
180 struct bnx2x_exe_queue_obj *o,
181 unsigned long *ramrod_flags)
182 {
183 struct bnx2x_exeq_elem *elem, spacer;
184 int cur_len = 0, rc;
185
186 memset(&spacer, 0, sizeof(spacer));
187
188 spin_lock_bh(&o->lock);
189
190 /*
191 * Next step should not be performed until the current is finished,
192 * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to
193 * properly clear object internals without sending any command to the FW
194 * which also implies there won't be any completion to clear the
195 * 'pending' list.
196 */
197 if (!list_empty(&o->pending_comp)) {
198 if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
199 DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: "
200 "resetting pending_comp\n");
201 __bnx2x_exe_queue_reset_pending(bp, o);
202 } else {
203 spin_unlock_bh(&o->lock);
204 return 1;
205 }
206 }
207
208 /*
209 * Run through the pending commands list and create a next
210 * execution chunk.
211 */
212 while (!list_empty(&o->exe_queue)) {
213 elem = list_first_entry(&o->exe_queue, struct bnx2x_exeq_elem,
214 link);
215 WARN_ON(!elem->cmd_len);
216
217 if (cur_len + elem->cmd_len <= o->exe_chunk_len) {
218 cur_len += elem->cmd_len;
219 /*
220 * Prevent from both lists being empty when moving an
221 * element. This will allow the call of
222 * bnx2x_exe_queue_empty() without locking.
223 */
224 list_add_tail(&spacer.link, &o->pending_comp);
225 mb();
226 list_del(&elem->link);
227 list_add_tail(&elem->link, &o->pending_comp);
228 list_del(&spacer.link);
229 } else
230 break;
231 }
232
233 /* Sanity check */
234 if (!cur_len) {
235 spin_unlock_bh(&o->lock);
236 return 0;
237 }
238
239 rc = o->execute(bp, o->owner, &o->pending_comp, ramrod_flags);
240 if (rc < 0)
241 /*
242 * In case of an error return the commands back to the queue
243 * and reset the pending_comp.
244 */
245 list_splice_init(&o->pending_comp, &o->exe_queue);
246 else if (!rc)
247 /*
248 * If zero is returned, means there are no outstanding pending
249 * completions and we may dismiss the pending list.
250 */
251 __bnx2x_exe_queue_reset_pending(bp, o);
252
253 spin_unlock_bh(&o->lock);
254 return rc;
255 }
256
257 static inline bool bnx2x_exe_queue_empty(struct bnx2x_exe_queue_obj *o)
258 {
259 bool empty = list_empty(&o->exe_queue);
260
261 /* Don't reorder!!! */
262 mb();
263
264 return empty && list_empty(&o->pending_comp);
265 }
266
267 static inline struct bnx2x_exeq_elem *bnx2x_exe_queue_alloc_elem(
268 struct bnx2x *bp)
269 {
270 DP(BNX2X_MSG_SP, "Allocating a new exe_queue element\n");
271 return kzalloc(sizeof(struct bnx2x_exeq_elem), GFP_ATOMIC);
272 }
273
274 /************************ raw_obj functions ***********************************/
275 static bool bnx2x_raw_check_pending(struct bnx2x_raw_obj *o)
276 {
277 return !!test_bit(o->state, o->pstate);
278 }
279
280 static void bnx2x_raw_clear_pending(struct bnx2x_raw_obj *o)
281 {
282 smp_mb__before_clear_bit();
283 clear_bit(o->state, o->pstate);
284 smp_mb__after_clear_bit();
285 }
286
287 static void bnx2x_raw_set_pending(struct bnx2x_raw_obj *o)
288 {
289 smp_mb__before_clear_bit();
290 set_bit(o->state, o->pstate);
291 smp_mb__after_clear_bit();
292 }
293
294 /**
295 * bnx2x_state_wait - wait until the given bit(state) is cleared
296 *
297 * @bp: device handle
298 * @state: state which is to be cleared
299 * @state_p: state buffer
300 *
301 */
302 static inline int bnx2x_state_wait(struct bnx2x *bp, int state,
303 unsigned long *pstate)
304 {
305 /* can take a while if any port is running */
306 int cnt = 5000;
307
308
309 if (CHIP_REV_IS_EMUL(bp))
310 cnt *= 20;
311
312 DP(BNX2X_MSG_SP, "waiting for state to become %d\n", state);
313
314 might_sleep();
315 while (cnt--) {
316 if (!test_bit(state, pstate)) {
317 #ifdef BNX2X_STOP_ON_ERROR
318 DP(BNX2X_MSG_SP, "exit (cnt %d)\n", 5000 - cnt);
319 #endif
320 return 0;
321 }
322
323 usleep_range(1000, 1000);
324
325 if (bp->panic)
326 return -EIO;
327 }
328
329 /* timeout! */
330 BNX2X_ERR("timeout waiting for state %d\n", state);
331 #ifdef BNX2X_STOP_ON_ERROR
332 bnx2x_panic();
333 #endif
334
335 return -EBUSY;
336 }
337
338 static int bnx2x_raw_wait(struct bnx2x *bp, struct bnx2x_raw_obj *raw)
339 {
340 return bnx2x_state_wait(bp, raw->state, raw->pstate);
341 }
342
343 /***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/
344 /* credit handling callbacks */
345 static bool bnx2x_get_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int *offset)
346 {
347 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
348
349 WARN_ON(!mp);
350
351 return mp->get_entry(mp, offset);
352 }
353
354 static bool bnx2x_get_credit_mac(struct bnx2x_vlan_mac_obj *o)
355 {
356 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
357
358 WARN_ON(!mp);
359
360 return mp->get(mp, 1);
361 }
362
363 static bool bnx2x_get_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int *offset)
364 {
365 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
366
367 WARN_ON(!vp);
368
369 return vp->get_entry(vp, offset);
370 }
371
372 static bool bnx2x_get_credit_vlan(struct bnx2x_vlan_mac_obj *o)
373 {
374 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
375
376 WARN_ON(!vp);
377
378 return vp->get(vp, 1);
379 }
380
381 static bool bnx2x_get_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
382 {
383 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
384 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
385
386 if (!mp->get(mp, 1))
387 return false;
388
389 if (!vp->get(vp, 1)) {
390 mp->put(mp, 1);
391 return false;
392 }
393
394 return true;
395 }
396
397 static bool bnx2x_put_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int offset)
398 {
399 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
400
401 return mp->put_entry(mp, offset);
402 }
403
404 static bool bnx2x_put_credit_mac(struct bnx2x_vlan_mac_obj *o)
405 {
406 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
407
408 return mp->put(mp, 1);
409 }
410
411 static bool bnx2x_put_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int offset)
412 {
413 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
414
415 return vp->put_entry(vp, offset);
416 }
417
418 static bool bnx2x_put_credit_vlan(struct bnx2x_vlan_mac_obj *o)
419 {
420 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
421
422 return vp->put(vp, 1);
423 }
424
425 static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
426 {
427 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
428 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
429
430 if (!mp->put(mp, 1))
431 return false;
432
433 if (!vp->put(vp, 1)) {
434 mp->get(mp, 1);
435 return false;
436 }
437
438 return true;
439 }
440
441 /* check_add() callbacks */
442 static int bnx2x_check_mac_add(struct bnx2x_vlan_mac_obj *o,
443 union bnx2x_classification_ramrod_data *data)
444 {
445 struct bnx2x_vlan_mac_registry_elem *pos;
446
447 if (!is_valid_ether_addr(data->mac.mac))
448 return -EINVAL;
449
450 /* Check if a requested MAC already exists */
451 list_for_each_entry(pos, &o->head, link)
452 if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN))
453 return -EEXIST;
454
455 return 0;
456 }
457
458 static int bnx2x_check_vlan_add(struct bnx2x_vlan_mac_obj *o,
459 union bnx2x_classification_ramrod_data *data)
460 {
461 struct bnx2x_vlan_mac_registry_elem *pos;
462
463 list_for_each_entry(pos, &o->head, link)
464 if (data->vlan.vlan == pos->u.vlan.vlan)
465 return -EEXIST;
466
467 return 0;
468 }
469
470 static int bnx2x_check_vlan_mac_add(struct bnx2x_vlan_mac_obj *o,
471 union bnx2x_classification_ramrod_data *data)
472 {
473 struct bnx2x_vlan_mac_registry_elem *pos;
474
475 list_for_each_entry(pos, &o->head, link)
476 if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
477 (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
478 ETH_ALEN)))
479 return -EEXIST;
480
481 return 0;
482 }
483
484
485 /* check_del() callbacks */
486 static struct bnx2x_vlan_mac_registry_elem *
487 bnx2x_check_mac_del(struct bnx2x_vlan_mac_obj *o,
488 union bnx2x_classification_ramrod_data *data)
489 {
490 struct bnx2x_vlan_mac_registry_elem *pos;
491
492 list_for_each_entry(pos, &o->head, link)
493 if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN))
494 return pos;
495
496 return NULL;
497 }
498
499 static struct bnx2x_vlan_mac_registry_elem *
500 bnx2x_check_vlan_del(struct bnx2x_vlan_mac_obj *o,
501 union bnx2x_classification_ramrod_data *data)
502 {
503 struct bnx2x_vlan_mac_registry_elem *pos;
504
505 list_for_each_entry(pos, &o->head, link)
506 if (data->vlan.vlan == pos->u.vlan.vlan)
507 return pos;
508
509 return NULL;
510 }
511
512 static struct bnx2x_vlan_mac_registry_elem *
513 bnx2x_check_vlan_mac_del(struct bnx2x_vlan_mac_obj *o,
514 union bnx2x_classification_ramrod_data *data)
515 {
516 struct bnx2x_vlan_mac_registry_elem *pos;
517
518 list_for_each_entry(pos, &o->head, link)
519 if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
520 (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
521 ETH_ALEN)))
522 return pos;
523
524 return NULL;
525 }
526
527 /* check_move() callback */
528 static bool bnx2x_check_move(struct bnx2x_vlan_mac_obj *src_o,
529 struct bnx2x_vlan_mac_obj *dst_o,
530 union bnx2x_classification_ramrod_data *data)
531 {
532 struct bnx2x_vlan_mac_registry_elem *pos;
533 int rc;
534
535 /* Check if we can delete the requested configuration from the first
536 * object.
537 */
538 pos = src_o->check_del(src_o, data);
539
540 /* check if configuration can be added */
541 rc = dst_o->check_add(dst_o, data);
542
543 /* If this classification can not be added (is already set)
544 * or can't be deleted - return an error.
545 */
546 if (rc || !pos)
547 return false;
548
549 return true;
550 }
551
552 static bool bnx2x_check_move_always_err(
553 struct bnx2x_vlan_mac_obj *src_o,
554 struct bnx2x_vlan_mac_obj *dst_o,
555 union bnx2x_classification_ramrod_data *data)
556 {
557 return false;
558 }
559
560
561 static inline u8 bnx2x_vlan_mac_get_rx_tx_flag(struct bnx2x_vlan_mac_obj *o)
562 {
563 struct bnx2x_raw_obj *raw = &o->raw;
564 u8 rx_tx_flag = 0;
565
566 if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) ||
567 (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
568 rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_TX_CMD;
569
570 if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) ||
571 (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
572 rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_RX_CMD;
573
574 return rx_tx_flag;
575 }
576
577 /* LLH CAM line allocations */
578 enum {
579 LLH_CAM_ISCSI_ETH_LINE = 0,
580 LLH_CAM_ETH_LINE,
581 LLH_CAM_MAX_PF_LINE = NIG_REG_LLH1_FUNC_MEM_SIZE / 2
582 };
583
584 static inline void bnx2x_set_mac_in_nig(struct bnx2x *bp,
585 bool add, unsigned char *dev_addr, int index)
586 {
587 u32 wb_data[2];
588 u32 reg_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM :
589 NIG_REG_LLH0_FUNC_MEM;
590
591 if (!IS_MF_SI(bp) || index > LLH_CAM_MAX_PF_LINE)
592 return;
593
594 DP(BNX2X_MSG_SP, "Going to %s LLH configuration at entry %d\n",
595 (add ? "ADD" : "DELETE"), index);
596
597 if (add) {
598 /* LLH_FUNC_MEM is a u64 WB register */
599 reg_offset += 8*index;
600
601 wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) |
602 (dev_addr[4] << 8) | dev_addr[5]);
603 wb_data[1] = ((dev_addr[0] << 8) | dev_addr[1]);
604
605 REG_WR_DMAE(bp, reg_offset, wb_data, 2);
606 }
607
608 REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM_ENABLE :
609 NIG_REG_LLH0_FUNC_MEM_ENABLE) + 4*index, add);
610 }
611
612 /**
613 * bnx2x_vlan_mac_set_cmd_hdr_e2 - set a header in a single classify ramrod
614 *
615 * @bp: device handle
616 * @o: queue for which we want to configure this rule
617 * @add: if true the command is an ADD command, DEL otherwise
618 * @opcode: CLASSIFY_RULE_OPCODE_XXX
619 * @hdr: pointer to a header to setup
620 *
621 */
622 static inline void bnx2x_vlan_mac_set_cmd_hdr_e2(struct bnx2x *bp,
623 struct bnx2x_vlan_mac_obj *o, bool add, int opcode,
624 struct eth_classify_cmd_header *hdr)
625 {
626 struct bnx2x_raw_obj *raw = &o->raw;
627
628 hdr->client_id = raw->cl_id;
629 hdr->func_id = raw->func_id;
630
631 /* Rx or/and Tx (internal switching) configuration ? */
632 hdr->cmd_general_data |=
633 bnx2x_vlan_mac_get_rx_tx_flag(o);
634
635 if (add)
636 hdr->cmd_general_data |= ETH_CLASSIFY_CMD_HEADER_IS_ADD;
637
638 hdr->cmd_general_data |=
639 (opcode << ETH_CLASSIFY_CMD_HEADER_OPCODE_SHIFT);
640 }
641
642 /**
643 * bnx2x_vlan_mac_set_rdata_hdr_e2 - set the classify ramrod data header
644 *
645 * @cid: connection id
646 * @type: BNX2X_FILTER_XXX_PENDING
647 * @hdr: poiter to header to setup
648 * @rule_cnt:
649 *
650 * currently we always configure one rule and echo field to contain a CID and an
651 * opcode type.
652 */
653 static inline void bnx2x_vlan_mac_set_rdata_hdr_e2(u32 cid, int type,
654 struct eth_classify_header *hdr, int rule_cnt)
655 {
656 hdr->echo = (cid & BNX2X_SWCID_MASK) | (type << BNX2X_SWCID_SHIFT);
657 hdr->rule_cnt = (u8)rule_cnt;
658 }
659
660
661 /* hw_config() callbacks */
662 static void bnx2x_set_one_mac_e2(struct bnx2x *bp,
663 struct bnx2x_vlan_mac_obj *o,
664 struct bnx2x_exeq_elem *elem, int rule_idx,
665 int cam_offset)
666 {
667 struct bnx2x_raw_obj *raw = &o->raw;
668 struct eth_classify_rules_ramrod_data *data =
669 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
670 int rule_cnt = rule_idx + 1, cmd = elem->cmd_data.vlan_mac.cmd;
671 union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
672 bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
673 unsigned long *vlan_mac_flags = &elem->cmd_data.vlan_mac.vlan_mac_flags;
674 u8 *mac = elem->cmd_data.vlan_mac.u.mac.mac;
675
676 /*
677 * Set LLH CAM entry: currently only iSCSI and ETH macs are
678 * relevant. In addition, current implementation is tuned for a
679 * single ETH MAC.
680 *
681 * When multiple unicast ETH MACs PF configuration in switch
682 * independent mode is required (NetQ, multiple netdev MACs,
683 * etc.), consider better utilisation of 8 per function MAC
684 * entries in the LLH register. There is also
685 * NIG_REG_P[01]_LLH_FUNC_MEM2 registers that complete the
686 * total number of CAM entries to 16.
687 *
688 * Currently we won't configure NIG for MACs other than a primary ETH
689 * MAC and iSCSI L2 MAC.
690 *
691 * If this MAC is moving from one Queue to another, no need to change
692 * NIG configuration.
693 */
694 if (cmd != BNX2X_VLAN_MAC_MOVE) {
695 if (test_bit(BNX2X_ISCSI_ETH_MAC, vlan_mac_flags))
696 bnx2x_set_mac_in_nig(bp, add, mac,
697 LLH_CAM_ISCSI_ETH_LINE);
698 else if (test_bit(BNX2X_ETH_MAC, vlan_mac_flags))
699 bnx2x_set_mac_in_nig(bp, add, mac, LLH_CAM_ETH_LINE);
700 }
701
702 /* Reset the ramrod data buffer for the first rule */
703 if (rule_idx == 0)
704 memset(data, 0, sizeof(*data));
705
706 /* Setup a command header */
707 bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_MAC,
708 &rule_entry->mac.header);
709
710 DP(BNX2X_MSG_SP, "About to %s MAC "BNX2X_MAC_FMT" for "
711 "Queue %d\n", (add ? "add" : "delete"),
712 BNX2X_MAC_PRN_LIST(mac), raw->cl_id);
713
714 /* Set a MAC itself */
715 bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
716 &rule_entry->mac.mac_mid,
717 &rule_entry->mac.mac_lsb, mac);
718
719 /* MOVE: Add a rule that will add this MAC to the target Queue */
720 if (cmd == BNX2X_VLAN_MAC_MOVE) {
721 rule_entry++;
722 rule_cnt++;
723
724 /* Setup ramrod data */
725 bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
726 elem->cmd_data.vlan_mac.target_obj,
727 true, CLASSIFY_RULE_OPCODE_MAC,
728 &rule_entry->mac.header);
729
730 /* Set a MAC itself */
731 bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
732 &rule_entry->mac.mac_mid,
733 &rule_entry->mac.mac_lsb, mac);
734 }
735
736 /* Set the ramrod data header */
737 /* TODO: take this to the higher level in order to prevent multiple
738 writing */
739 bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
740 rule_cnt);
741 }
742
743 /**
744 * bnx2x_vlan_mac_set_rdata_hdr_e1x - set a header in a single classify ramrod
745 *
746 * @bp: device handle
747 * @o: queue
748 * @type:
749 * @cam_offset: offset in cam memory
750 * @hdr: pointer to a header to setup
751 *
752 * E1/E1H
753 */
754 static inline void bnx2x_vlan_mac_set_rdata_hdr_e1x(struct bnx2x *bp,
755 struct bnx2x_vlan_mac_obj *o, int type, int cam_offset,
756 struct mac_configuration_hdr *hdr)
757 {
758 struct bnx2x_raw_obj *r = &o->raw;
759
760 hdr->length = 1;
761 hdr->offset = (u8)cam_offset;
762 hdr->client_id = 0xff;
763 hdr->echo = ((r->cid & BNX2X_SWCID_MASK) | (type << BNX2X_SWCID_SHIFT));
764 }
765
766 static inline void bnx2x_vlan_mac_set_cfg_entry_e1x(struct bnx2x *bp,
767 struct bnx2x_vlan_mac_obj *o, bool add, int opcode, u8 *mac,
768 u16 vlan_id, struct mac_configuration_entry *cfg_entry)
769 {
770 struct bnx2x_raw_obj *r = &o->raw;
771 u32 cl_bit_vec = (1 << r->cl_id);
772
773 cfg_entry->clients_bit_vector = cpu_to_le32(cl_bit_vec);
774 cfg_entry->pf_id = r->func_id;
775 cfg_entry->vlan_id = cpu_to_le16(vlan_id);
776
777 if (add) {
778 SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
779 T_ETH_MAC_COMMAND_SET);
780 SET_FLAG(cfg_entry->flags,
781 MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE, opcode);
782
783 /* Set a MAC in a ramrod data */
784 bnx2x_set_fw_mac_addr(&cfg_entry->msb_mac_addr,
785 &cfg_entry->middle_mac_addr,
786 &cfg_entry->lsb_mac_addr, mac);
787 } else
788 SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
789 T_ETH_MAC_COMMAND_INVALIDATE);
790 }
791
792 static inline void bnx2x_vlan_mac_set_rdata_e1x(struct bnx2x *bp,
793 struct bnx2x_vlan_mac_obj *o, int type, int cam_offset, bool add,
794 u8 *mac, u16 vlan_id, int opcode, struct mac_configuration_cmd *config)
795 {
796 struct mac_configuration_entry *cfg_entry = &config->config_table[0];
797 struct bnx2x_raw_obj *raw = &o->raw;
798
799 bnx2x_vlan_mac_set_rdata_hdr_e1x(bp, o, type, cam_offset,
800 &config->hdr);
801 bnx2x_vlan_mac_set_cfg_entry_e1x(bp, o, add, opcode, mac, vlan_id,
802 cfg_entry);
803
804 DP(BNX2X_MSG_SP, "%s MAC "BNX2X_MAC_FMT" CLID %d CAM offset %d\n",
805 (add ? "setting" : "clearing"),
806 BNX2X_MAC_PRN_LIST(mac), raw->cl_id, cam_offset);
807 }
808
809 /**
810 * bnx2x_set_one_mac_e1x - fill a single MAC rule ramrod data
811 *
812 * @bp: device handle
813 * @o: bnx2x_vlan_mac_obj
814 * @elem: bnx2x_exeq_elem
815 * @rule_idx: rule_idx
816 * @cam_offset: cam_offset
817 */
818 static void bnx2x_set_one_mac_e1x(struct bnx2x *bp,
819 struct bnx2x_vlan_mac_obj *o,
820 struct bnx2x_exeq_elem *elem, int rule_idx,
821 int cam_offset)
822 {
823 struct bnx2x_raw_obj *raw = &o->raw;
824 struct mac_configuration_cmd *config =
825 (struct mac_configuration_cmd *)(raw->rdata);
826 /*
827 * 57710 and 57711 do not support MOVE command,
828 * so it's either ADD or DEL
829 */
830 bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
831 true : false;
832
833 /* Reset the ramrod data buffer */
834 memset(config, 0, sizeof(*config));
835
836 bnx2x_vlan_mac_set_rdata_e1x(bp, o, BNX2X_FILTER_MAC_PENDING,
837 cam_offset, add,
838 elem->cmd_data.vlan_mac.u.mac.mac, 0,
839 ETH_VLAN_FILTER_ANY_VLAN, config);
840 }
841
842 static void bnx2x_set_one_vlan_e2(struct bnx2x *bp,
843 struct bnx2x_vlan_mac_obj *o,
844 struct bnx2x_exeq_elem *elem, int rule_idx,
845 int cam_offset)
846 {
847 struct bnx2x_raw_obj *raw = &o->raw;
848 struct eth_classify_rules_ramrod_data *data =
849 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
850 int rule_cnt = rule_idx + 1;
851 union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
852 int cmd = elem->cmd_data.vlan_mac.cmd;
853 bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
854 u16 vlan = elem->cmd_data.vlan_mac.u.vlan.vlan;
855
856 /* Reset the ramrod data buffer for the first rule */
857 if (rule_idx == 0)
858 memset(data, 0, sizeof(*data));
859
860 /* Set a rule header */
861 bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_VLAN,
862 &rule_entry->vlan.header);
863
864 DP(BNX2X_MSG_SP, "About to %s VLAN %d\n", (add ? "add" : "delete"),
865 vlan);
866
867 /* Set a VLAN itself */
868 rule_entry->vlan.vlan = cpu_to_le16(vlan);
869
870 /* MOVE: Add a rule that will add this MAC to the target Queue */
871 if (cmd == BNX2X_VLAN_MAC_MOVE) {
872 rule_entry++;
873 rule_cnt++;
874
875 /* Setup ramrod data */
876 bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
877 elem->cmd_data.vlan_mac.target_obj,
878 true, CLASSIFY_RULE_OPCODE_VLAN,
879 &rule_entry->vlan.header);
880
881 /* Set a VLAN itself */
882 rule_entry->vlan.vlan = cpu_to_le16(vlan);
883 }
884
885 /* Set the ramrod data header */
886 /* TODO: take this to the higher level in order to prevent multiple
887 writing */
888 bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
889 rule_cnt);
890 }
891
892 static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp,
893 struct bnx2x_vlan_mac_obj *o,
894 struct bnx2x_exeq_elem *elem,
895 int rule_idx, int cam_offset)
896 {
897 struct bnx2x_raw_obj *raw = &o->raw;
898 struct eth_classify_rules_ramrod_data *data =
899 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
900 int rule_cnt = rule_idx + 1;
901 union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
902 int cmd = elem->cmd_data.vlan_mac.cmd;
903 bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
904 u16 vlan = elem->cmd_data.vlan_mac.u.vlan_mac.vlan;
905 u8 *mac = elem->cmd_data.vlan_mac.u.vlan_mac.mac;
906
907
908 /* Reset the ramrod data buffer for the first rule */
909 if (rule_idx == 0)
910 memset(data, 0, sizeof(*data));
911
912 /* Set a rule header */
913 bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_PAIR,
914 &rule_entry->pair.header);
915
916 /* Set VLAN and MAC themselvs */
917 rule_entry->pair.vlan = cpu_to_le16(vlan);
918 bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
919 &rule_entry->pair.mac_mid,
920 &rule_entry->pair.mac_lsb, mac);
921
922 /* MOVE: Add a rule that will add this MAC to the target Queue */
923 if (cmd == BNX2X_VLAN_MAC_MOVE) {
924 rule_entry++;
925 rule_cnt++;
926
927 /* Setup ramrod data */
928 bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
929 elem->cmd_data.vlan_mac.target_obj,
930 true, CLASSIFY_RULE_OPCODE_PAIR,
931 &rule_entry->pair.header);
932
933 /* Set a VLAN itself */
934 rule_entry->pair.vlan = cpu_to_le16(vlan);
935 bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
936 &rule_entry->pair.mac_mid,
937 &rule_entry->pair.mac_lsb, mac);
938 }
939
940 /* Set the ramrod data header */
941 /* TODO: take this to the higher level in order to prevent multiple
942 writing */
943 bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
944 rule_cnt);
945 }
946
947 /**
948 * bnx2x_set_one_vlan_mac_e1h -
949 *
950 * @bp: device handle
951 * @o: bnx2x_vlan_mac_obj
952 * @elem: bnx2x_exeq_elem
953 * @rule_idx: rule_idx
954 * @cam_offset: cam_offset
955 */
956 static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x *bp,
957 struct bnx2x_vlan_mac_obj *o,
958 struct bnx2x_exeq_elem *elem,
959 int rule_idx, int cam_offset)
960 {
961 struct bnx2x_raw_obj *raw = &o->raw;
962 struct mac_configuration_cmd *config =
963 (struct mac_configuration_cmd *)(raw->rdata);
964 /*
965 * 57710 and 57711 do not support MOVE command,
966 * so it's either ADD or DEL
967 */
968 bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
969 true : false;
970
971 /* Reset the ramrod data buffer */
972 memset(config, 0, sizeof(*config));
973
974 bnx2x_vlan_mac_set_rdata_e1x(bp, o, BNX2X_FILTER_VLAN_MAC_PENDING,
975 cam_offset, add,
976 elem->cmd_data.vlan_mac.u.vlan_mac.mac,
977 elem->cmd_data.vlan_mac.u.vlan_mac.vlan,
978 ETH_VLAN_FILTER_CLASSIFY, config);
979 }
980
981 #define list_next_entry(pos, member) \
982 list_entry((pos)->member.next, typeof(*(pos)), member)
983
984 /**
985 * bnx2x_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element
986 *
987 * @bp: device handle
988 * @p: command parameters
989 * @ppos: pointer to the cooky
990 *
991 * reconfigure next MAC/VLAN/VLAN-MAC element from the
992 * previously configured elements list.
993 *
994 * from command parameters only RAMROD_COMP_WAIT bit in ramrod_flags is taken
995 * into an account
996 *
997 * pointer to the cooky - that should be given back in the next call to make
998 * function handle the next element. If *ppos is set to NULL it will restart the
999 * iterator. If returned *ppos == NULL this means that the last element has been
1000 * handled.
1001 *
1002 */
1003 static int bnx2x_vlan_mac_restore(struct bnx2x *bp,
1004 struct bnx2x_vlan_mac_ramrod_params *p,
1005 struct bnx2x_vlan_mac_registry_elem **ppos)
1006 {
1007 struct bnx2x_vlan_mac_registry_elem *pos;
1008 struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1009
1010 /* If list is empty - there is nothing to do here */
1011 if (list_empty(&o->head)) {
1012 *ppos = NULL;
1013 return 0;
1014 }
1015
1016 /* make a step... */
1017 if (*ppos == NULL)
1018 *ppos = list_first_entry(&o->head,
1019 struct bnx2x_vlan_mac_registry_elem,
1020 link);
1021 else
1022 *ppos = list_next_entry(*ppos, link);
1023
1024 pos = *ppos;
1025
1026 /* If it's the last step - return NULL */
1027 if (list_is_last(&pos->link, &o->head))
1028 *ppos = NULL;
1029
1030 /* Prepare a 'user_req' */
1031 memcpy(&p->user_req.u, &pos->u, sizeof(pos->u));
1032
1033 /* Set the command */
1034 p->user_req.cmd = BNX2X_VLAN_MAC_ADD;
1035
1036 /* Set vlan_mac_flags */
1037 p->user_req.vlan_mac_flags = pos->vlan_mac_flags;
1038
1039 /* Set a restore bit */
1040 __set_bit(RAMROD_RESTORE, &p->ramrod_flags);
1041
1042 return bnx2x_config_vlan_mac(bp, p);
1043 }
1044
1045 /*
1046 * bnx2x_exeq_get_mac/bnx2x_exeq_get_vlan/bnx2x_exeq_get_vlan_mac return a
1047 * pointer to an element with a specific criteria and NULL if such an element
1048 * hasn't been found.
1049 */
1050 static struct bnx2x_exeq_elem *bnx2x_exeq_get_mac(
1051 struct bnx2x_exe_queue_obj *o,
1052 struct bnx2x_exeq_elem *elem)
1053 {
1054 struct bnx2x_exeq_elem *pos;
1055 struct bnx2x_mac_ramrod_data *data = &elem->cmd_data.vlan_mac.u.mac;
1056
1057 /* Check pending for execution commands */
1058 list_for_each_entry(pos, &o->exe_queue, link)
1059 if (!memcmp(&pos->cmd_data.vlan_mac.u.mac, data,
1060 sizeof(*data)) &&
1061 (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1062 return pos;
1063
1064 return NULL;
1065 }
1066
1067 static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan(
1068 struct bnx2x_exe_queue_obj *o,
1069 struct bnx2x_exeq_elem *elem)
1070 {
1071 struct bnx2x_exeq_elem *pos;
1072 struct bnx2x_vlan_ramrod_data *data = &elem->cmd_data.vlan_mac.u.vlan;
1073
1074 /* Check pending for execution commands */
1075 list_for_each_entry(pos, &o->exe_queue, link)
1076 if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan, data,
1077 sizeof(*data)) &&
1078 (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1079 return pos;
1080
1081 return NULL;
1082 }
1083
1084 static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan_mac(
1085 struct bnx2x_exe_queue_obj *o,
1086 struct bnx2x_exeq_elem *elem)
1087 {
1088 struct bnx2x_exeq_elem *pos;
1089 struct bnx2x_vlan_mac_ramrod_data *data =
1090 &elem->cmd_data.vlan_mac.u.vlan_mac;
1091
1092 /* Check pending for execution commands */
1093 list_for_each_entry(pos, &o->exe_queue, link)
1094 if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan_mac, data,
1095 sizeof(*data)) &&
1096 (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1097 return pos;
1098
1099 return NULL;
1100 }
1101
1102 /**
1103 * bnx2x_validate_vlan_mac_add - check if an ADD command can be executed
1104 *
1105 * @bp: device handle
1106 * @qo: bnx2x_qable_obj
1107 * @elem: bnx2x_exeq_elem
1108 *
1109 * Checks that the requested configuration can be added. If yes and if
1110 * requested, consume CAM credit.
1111 *
1112 * The 'validate' is run after the 'optimize'.
1113 *
1114 */
1115 static inline int bnx2x_validate_vlan_mac_add(struct bnx2x *bp,
1116 union bnx2x_qable_obj *qo,
1117 struct bnx2x_exeq_elem *elem)
1118 {
1119 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1120 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1121 int rc;
1122
1123 /* Check the registry */
1124 rc = o->check_add(o, &elem->cmd_data.vlan_mac.u);
1125 if (rc) {
1126 DP(BNX2X_MSG_SP, "ADD command is not allowed considering "
1127 "current registry state\n");
1128 return rc;
1129 }
1130
1131 /*
1132 * Check if there is a pending ADD command for this
1133 * MAC/VLAN/VLAN-MAC. Return an error if there is.
1134 */
1135 if (exeq->get(exeq, elem)) {
1136 DP(BNX2X_MSG_SP, "There is a pending ADD command already\n");
1137 return -EEXIST;
1138 }
1139
1140 /*
1141 * TODO: Check the pending MOVE from other objects where this
1142 * object is a destination object.
1143 */
1144
1145 /* Consume the credit if not requested not to */
1146 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1147 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1148 o->get_credit(o)))
1149 return -EINVAL;
1150
1151 return 0;
1152 }
1153
1154 /**
1155 * bnx2x_validate_vlan_mac_del - check if the DEL command can be executed
1156 *
1157 * @bp: device handle
1158 * @qo: quable object to check
1159 * @elem: element that needs to be deleted
1160 *
1161 * Checks that the requested configuration can be deleted. If yes and if
1162 * requested, returns a CAM credit.
1163 *
1164 * The 'validate' is run after the 'optimize'.
1165 */
1166 static inline int bnx2x_validate_vlan_mac_del(struct bnx2x *bp,
1167 union bnx2x_qable_obj *qo,
1168 struct bnx2x_exeq_elem *elem)
1169 {
1170 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1171 struct bnx2x_vlan_mac_registry_elem *pos;
1172 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1173 struct bnx2x_exeq_elem query_elem;
1174
1175 /* If this classification can not be deleted (doesn't exist)
1176 * - return a BNX2X_EXIST.
1177 */
1178 pos = o->check_del(o, &elem->cmd_data.vlan_mac.u);
1179 if (!pos) {
1180 DP(BNX2X_MSG_SP, "DEL command is not allowed considering "
1181 "current registry state\n");
1182 return -EEXIST;
1183 }
1184
1185 /*
1186 * Check if there are pending DEL or MOVE commands for this
1187 * MAC/VLAN/VLAN-MAC. Return an error if so.
1188 */
1189 memcpy(&query_elem, elem, sizeof(query_elem));
1190
1191 /* Check for MOVE commands */
1192 query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_MOVE;
1193 if (exeq->get(exeq, &query_elem)) {
1194 BNX2X_ERR("There is a pending MOVE command already\n");
1195 return -EINVAL;
1196 }
1197
1198 /* Check for DEL commands */
1199 if (exeq->get(exeq, elem)) {
1200 DP(BNX2X_MSG_SP, "There is a pending DEL command already\n");
1201 return -EEXIST;
1202 }
1203
1204 /* Return the credit to the credit pool if not requested not to */
1205 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1206 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1207 o->put_credit(o))) {
1208 BNX2X_ERR("Failed to return a credit\n");
1209 return -EINVAL;
1210 }
1211
1212 return 0;
1213 }
1214
1215 /**
1216 * bnx2x_validate_vlan_mac_move - check if the MOVE command can be executed
1217 *
1218 * @bp: device handle
1219 * @qo: quable object to check (source)
1220 * @elem: element that needs to be moved
1221 *
1222 * Checks that the requested configuration can be moved. If yes and if
1223 * requested, returns a CAM credit.
1224 *
1225 * The 'validate' is run after the 'optimize'.
1226 */
1227 static inline int bnx2x_validate_vlan_mac_move(struct bnx2x *bp,
1228 union bnx2x_qable_obj *qo,
1229 struct bnx2x_exeq_elem *elem)
1230 {
1231 struct bnx2x_vlan_mac_obj *src_o = &qo->vlan_mac;
1232 struct bnx2x_vlan_mac_obj *dest_o = elem->cmd_data.vlan_mac.target_obj;
1233 struct bnx2x_exeq_elem query_elem;
1234 struct bnx2x_exe_queue_obj *src_exeq = &src_o->exe_queue;
1235 struct bnx2x_exe_queue_obj *dest_exeq = &dest_o->exe_queue;
1236
1237 /*
1238 * Check if we can perform this operation based on the current registry
1239 * state.
1240 */
1241 if (!src_o->check_move(src_o, dest_o, &elem->cmd_data.vlan_mac.u)) {
1242 DP(BNX2X_MSG_SP, "MOVE command is not allowed considering "
1243 "current registry state\n");
1244 return -EINVAL;
1245 }
1246
1247 /*
1248 * Check if there is an already pending DEL or MOVE command for the
1249 * source object or ADD command for a destination object. Return an
1250 * error if so.
1251 */
1252 memcpy(&query_elem, elem, sizeof(query_elem));
1253
1254 /* Check DEL on source */
1255 query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL;
1256 if (src_exeq->get(src_exeq, &query_elem)) {
1257 BNX2X_ERR("There is a pending DEL command on the source "
1258 "queue already\n");
1259 return -EINVAL;
1260 }
1261
1262 /* Check MOVE on source */
1263 if (src_exeq->get(src_exeq, elem)) {
1264 DP(BNX2X_MSG_SP, "There is a pending MOVE command already\n");
1265 return -EEXIST;
1266 }
1267
1268 /* Check ADD on destination */
1269 query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD;
1270 if (dest_exeq->get(dest_exeq, &query_elem)) {
1271 BNX2X_ERR("There is a pending ADD command on the "
1272 "destination queue already\n");
1273 return -EINVAL;
1274 }
1275
1276 /* Consume the credit if not requested not to */
1277 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT_DEST,
1278 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1279 dest_o->get_credit(dest_o)))
1280 return -EINVAL;
1281
1282 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1283 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1284 src_o->put_credit(src_o))) {
1285 /* return the credit taken from dest... */
1286 dest_o->put_credit(dest_o);
1287 return -EINVAL;
1288 }
1289
1290 return 0;
1291 }
1292
1293 static int bnx2x_validate_vlan_mac(struct bnx2x *bp,
1294 union bnx2x_qable_obj *qo,
1295 struct bnx2x_exeq_elem *elem)
1296 {
1297 switch (elem->cmd_data.vlan_mac.cmd) {
1298 case BNX2X_VLAN_MAC_ADD:
1299 return bnx2x_validate_vlan_mac_add(bp, qo, elem);
1300 case BNX2X_VLAN_MAC_DEL:
1301 return bnx2x_validate_vlan_mac_del(bp, qo, elem);
1302 case BNX2X_VLAN_MAC_MOVE:
1303 return bnx2x_validate_vlan_mac_move(bp, qo, elem);
1304 default:
1305 return -EINVAL;
1306 }
1307 }
1308
1309 /**
1310 * bnx2x_wait_vlan_mac - passivly wait for 5 seconds until all work completes.
1311 *
1312 * @bp: device handle
1313 * @o: bnx2x_vlan_mac_obj
1314 *
1315 */
1316 static int bnx2x_wait_vlan_mac(struct bnx2x *bp,
1317 struct bnx2x_vlan_mac_obj *o)
1318 {
1319 int cnt = 5000, rc;
1320 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1321 struct bnx2x_raw_obj *raw = &o->raw;
1322
1323 while (cnt--) {
1324 /* Wait for the current command to complete */
1325 rc = raw->wait_comp(bp, raw);
1326 if (rc)
1327 return rc;
1328
1329 /* Wait until there are no pending commands */
1330 if (!bnx2x_exe_queue_empty(exeq))
1331 usleep_range(1000, 1000);
1332 else
1333 return 0;
1334 }
1335
1336 return -EBUSY;
1337 }
1338
1339 /**
1340 * bnx2x_complete_vlan_mac - complete one VLAN-MAC ramrod
1341 *
1342 * @bp: device handle
1343 * @o: bnx2x_vlan_mac_obj
1344 * @cqe:
1345 * @cont: if true schedule next execution chunk
1346 *
1347 */
1348 static int bnx2x_complete_vlan_mac(struct bnx2x *bp,
1349 struct bnx2x_vlan_mac_obj *o,
1350 union event_ring_elem *cqe,
1351 unsigned long *ramrod_flags)
1352 {
1353 struct bnx2x_raw_obj *r = &o->raw;
1354 int rc;
1355
1356 /* Reset pending list */
1357 bnx2x_exe_queue_reset_pending(bp, &o->exe_queue);
1358
1359 /* Clear pending */
1360 r->clear_pending(r);
1361
1362 /* If ramrod failed this is most likely a SW bug */
1363 if (cqe->message.error)
1364 return -EINVAL;
1365
1366 /* Run the next bulk of pending commands if requeted */
1367 if (test_bit(RAMROD_CONT, ramrod_flags)) {
1368 rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
1369 if (rc < 0)
1370 return rc;
1371 }
1372
1373 /* If there is more work to do return PENDING */
1374 if (!bnx2x_exe_queue_empty(&o->exe_queue))
1375 return 1;
1376
1377 return 0;
1378 }
1379
1380 /**
1381 * bnx2x_optimize_vlan_mac - optimize ADD and DEL commands.
1382 *
1383 * @bp: device handle
1384 * @o: bnx2x_qable_obj
1385 * @elem: bnx2x_exeq_elem
1386 */
1387 static int bnx2x_optimize_vlan_mac(struct bnx2x *bp,
1388 union bnx2x_qable_obj *qo,
1389 struct bnx2x_exeq_elem *elem)
1390 {
1391 struct bnx2x_exeq_elem query, *pos;
1392 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1393 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1394
1395 memcpy(&query, elem, sizeof(query));
1396
1397 switch (elem->cmd_data.vlan_mac.cmd) {
1398 case BNX2X_VLAN_MAC_ADD:
1399 query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL;
1400 break;
1401 case BNX2X_VLAN_MAC_DEL:
1402 query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD;
1403 break;
1404 default:
1405 /* Don't handle anything other than ADD or DEL */
1406 return 0;
1407 }
1408
1409 /* If we found the appropriate element - delete it */
1410 pos = exeq->get(exeq, &query);
1411 if (pos) {
1412
1413 /* Return the credit of the optimized command */
1414 if (!test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1415 &pos->cmd_data.vlan_mac.vlan_mac_flags)) {
1416 if ((query.cmd_data.vlan_mac.cmd ==
1417 BNX2X_VLAN_MAC_ADD) && !o->put_credit(o)) {
1418 BNX2X_ERR("Failed to return the credit for the "
1419 "optimized ADD command\n");
1420 return -EINVAL;
1421 } else if (!o->get_credit(o)) { /* VLAN_MAC_DEL */
1422 BNX2X_ERR("Failed to recover the credit from "
1423 "the optimized DEL command\n");
1424 return -EINVAL;
1425 }
1426 }
1427
1428 DP(BNX2X_MSG_SP, "Optimizing %s command\n",
1429 (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
1430 "ADD" : "DEL");
1431
1432 list_del(&pos->link);
1433 bnx2x_exe_queue_free_elem(bp, pos);
1434 return 1;
1435 }
1436
1437 return 0;
1438 }
1439
1440 /**
1441 * bnx2x_vlan_mac_get_registry_elem - prepare a registry element
1442 *
1443 * @bp: device handle
1444 * @o:
1445 * @elem:
1446 * @restore:
1447 * @re:
1448 *
1449 * prepare a registry element according to the current command request.
1450 */
1451 static inline int bnx2x_vlan_mac_get_registry_elem(
1452 struct bnx2x *bp,
1453 struct bnx2x_vlan_mac_obj *o,
1454 struct bnx2x_exeq_elem *elem,
1455 bool restore,
1456 struct bnx2x_vlan_mac_registry_elem **re)
1457 {
1458 int cmd = elem->cmd_data.vlan_mac.cmd;
1459 struct bnx2x_vlan_mac_registry_elem *reg_elem;
1460
1461 /* Allocate a new registry element if needed. */
1462 if (!restore &&
1463 ((cmd == BNX2X_VLAN_MAC_ADD) || (cmd == BNX2X_VLAN_MAC_MOVE))) {
1464 reg_elem = kzalloc(sizeof(*reg_elem), GFP_ATOMIC);
1465 if (!reg_elem)
1466 return -ENOMEM;
1467
1468 /* Get a new CAM offset */
1469 if (!o->get_cam_offset(o, &reg_elem->cam_offset)) {
1470 /*
1471 * This shell never happen, because we have checked the
1472 * CAM availiability in the 'validate'.
1473 */
1474 WARN_ON(1);
1475 kfree(reg_elem);
1476 return -EINVAL;
1477 }
1478
1479 DP(BNX2X_MSG_SP, "Got cam offset %d\n", reg_elem->cam_offset);
1480
1481 /* Set a VLAN-MAC data */
1482 memcpy(&reg_elem->u, &elem->cmd_data.vlan_mac.u,
1483 sizeof(reg_elem->u));
1484
1485 /* Copy the flags (needed for DEL and RESTORE flows) */
1486 reg_elem->vlan_mac_flags =
1487 elem->cmd_data.vlan_mac.vlan_mac_flags;
1488 } else /* DEL, RESTORE */
1489 reg_elem = o->check_del(o, &elem->cmd_data.vlan_mac.u);
1490
1491 *re = reg_elem;
1492 return 0;
1493 }
1494
1495 /**
1496 * bnx2x_execute_vlan_mac - execute vlan mac command
1497 *
1498 * @bp: device handle
1499 * @qo:
1500 * @exe_chunk:
1501 * @ramrod_flags:
1502 *
1503 * go and send a ramrod!
1504 */
1505 static int bnx2x_execute_vlan_mac(struct bnx2x *bp,
1506 union bnx2x_qable_obj *qo,
1507 struct list_head *exe_chunk,
1508 unsigned long *ramrod_flags)
1509 {
1510 struct bnx2x_exeq_elem *elem;
1511 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac, *cam_obj;
1512 struct bnx2x_raw_obj *r = &o->raw;
1513 int rc, idx = 0;
1514 bool restore = test_bit(RAMROD_RESTORE, ramrod_flags);
1515 bool drv_only = test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags);
1516 struct bnx2x_vlan_mac_registry_elem *reg_elem;
1517 int cmd;
1518
1519 /*
1520 * If DRIVER_ONLY execution is requested, cleanup a registry
1521 * and exit. Otherwise send a ramrod to FW.
1522 */
1523 if (!drv_only) {
1524 WARN_ON(r->check_pending(r));
1525
1526 /* Set pending */
1527 r->set_pending(r);
1528
1529 /* Fill tha ramrod data */
1530 list_for_each_entry(elem, exe_chunk, link) {
1531 cmd = elem->cmd_data.vlan_mac.cmd;
1532 /*
1533 * We will add to the target object in MOVE command, so
1534 * change the object for a CAM search.
1535 */
1536 if (cmd == BNX2X_VLAN_MAC_MOVE)
1537 cam_obj = elem->cmd_data.vlan_mac.target_obj;
1538 else
1539 cam_obj = o;
1540
1541 rc = bnx2x_vlan_mac_get_registry_elem(bp, cam_obj,
1542 elem, restore,
1543 &reg_elem);
1544 if (rc)
1545 goto error_exit;
1546
1547 WARN_ON(!reg_elem);
1548
1549 /* Push a new entry into the registry */
1550 if (!restore &&
1551 ((cmd == BNX2X_VLAN_MAC_ADD) ||
1552 (cmd == BNX2X_VLAN_MAC_MOVE)))
1553 list_add(&reg_elem->link, &cam_obj->head);
1554
1555 /* Configure a single command in a ramrod data buffer */
1556 o->set_one_rule(bp, o, elem, idx,
1557 reg_elem->cam_offset);
1558
1559 /* MOVE command consumes 2 entries in the ramrod data */
1560 if (cmd == BNX2X_VLAN_MAC_MOVE)
1561 idx += 2;
1562 else
1563 idx++;
1564 }
1565
1566 /*
1567 * No need for an explicit memory barrier here as long we would
1568 * need to ensure the ordering of writing to the SPQ element
1569 * and updating of the SPQ producer which involves a memory
1570 * read and we will have to put a full memory barrier there
1571 * (inside bnx2x_sp_post()).
1572 */
1573
1574 rc = bnx2x_sp_post(bp, o->ramrod_cmd, r->cid,
1575 U64_HI(r->rdata_mapping),
1576 U64_LO(r->rdata_mapping),
1577 ETH_CONNECTION_TYPE);
1578 if (rc)
1579 goto error_exit;
1580 }
1581
1582 /* Now, when we are done with the ramrod - clean up the registry */
1583 list_for_each_entry(elem, exe_chunk, link) {
1584 cmd = elem->cmd_data.vlan_mac.cmd;
1585 if ((cmd == BNX2X_VLAN_MAC_DEL) ||
1586 (cmd == BNX2X_VLAN_MAC_MOVE)) {
1587 reg_elem = o->check_del(o, &elem->cmd_data.vlan_mac.u);
1588
1589 WARN_ON(!reg_elem);
1590
1591 o->put_cam_offset(o, reg_elem->cam_offset);
1592 list_del(&reg_elem->link);
1593 kfree(reg_elem);
1594 }
1595 }
1596
1597 if (!drv_only)
1598 return 1;
1599 else
1600 return 0;
1601
1602 error_exit:
1603 r->clear_pending(r);
1604
1605 /* Cleanup a registry in case of a failure */
1606 list_for_each_entry(elem, exe_chunk, link) {
1607 cmd = elem->cmd_data.vlan_mac.cmd;
1608
1609 if (cmd == BNX2X_VLAN_MAC_MOVE)
1610 cam_obj = elem->cmd_data.vlan_mac.target_obj;
1611 else
1612 cam_obj = o;
1613
1614 /* Delete all newly added above entries */
1615 if (!restore &&
1616 ((cmd == BNX2X_VLAN_MAC_ADD) ||
1617 (cmd == BNX2X_VLAN_MAC_MOVE))) {
1618 reg_elem = o->check_del(cam_obj,
1619 &elem->cmd_data.vlan_mac.u);
1620 if (reg_elem) {
1621 list_del(&reg_elem->link);
1622 kfree(reg_elem);
1623 }
1624 }
1625 }
1626
1627 return rc;
1628 }
1629
1630 static inline int bnx2x_vlan_mac_push_new_cmd(
1631 struct bnx2x *bp,
1632 struct bnx2x_vlan_mac_ramrod_params *p)
1633 {
1634 struct bnx2x_exeq_elem *elem;
1635 struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1636 bool restore = test_bit(RAMROD_RESTORE, &p->ramrod_flags);
1637
1638 /* Allocate the execution queue element */
1639 elem = bnx2x_exe_queue_alloc_elem(bp);
1640 if (!elem)
1641 return -ENOMEM;
1642
1643 /* Set the command 'length' */
1644 switch (p->user_req.cmd) {
1645 case BNX2X_VLAN_MAC_MOVE:
1646 elem->cmd_len = 2;
1647 break;
1648 default:
1649 elem->cmd_len = 1;
1650 }
1651
1652 /* Fill the object specific info */
1653 memcpy(&elem->cmd_data.vlan_mac, &p->user_req, sizeof(p->user_req));
1654
1655 /* Try to add a new command to the pending list */
1656 return bnx2x_exe_queue_add(bp, &o->exe_queue, elem, restore);
1657 }
1658
1659 /**
1660 * bnx2x_config_vlan_mac - configure VLAN/MAC/VLAN_MAC filtering rules.
1661 *
1662 * @bp: device handle
1663 * @p:
1664 *
1665 */
1666 int bnx2x_config_vlan_mac(
1667 struct bnx2x *bp,
1668 struct bnx2x_vlan_mac_ramrod_params *p)
1669 {
1670 int rc = 0;
1671 struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1672 unsigned long *ramrod_flags = &p->ramrod_flags;
1673 bool cont = test_bit(RAMROD_CONT, ramrod_flags);
1674 struct bnx2x_raw_obj *raw = &o->raw;
1675
1676 /*
1677 * Add new elements to the execution list for commands that require it.
1678 */
1679 if (!cont) {
1680 rc = bnx2x_vlan_mac_push_new_cmd(bp, p);
1681 if (rc)
1682 return rc;
1683 }
1684
1685 /*
1686 * If nothing will be executed further in this iteration we want to
1687 * return PENDING if there are pending commands
1688 */
1689 if (!bnx2x_exe_queue_empty(&o->exe_queue))
1690 rc = 1;
1691
1692 if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
1693 DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: "
1694 "clearing a pending bit.\n");
1695 raw->clear_pending(raw);
1696 }
1697
1698 /* Execute commands if required */
1699 if (cont || test_bit(RAMROD_EXEC, ramrod_flags) ||
1700 test_bit(RAMROD_COMP_WAIT, ramrod_flags)) {
1701 rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
1702 if (rc < 0)
1703 return rc;
1704 }
1705
1706 /*
1707 * RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set
1708 * then user want to wait until the last command is done.
1709 */
1710 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
1711 /*
1712 * Wait maximum for the current exe_queue length iterations plus
1713 * one (for the current pending command).
1714 */
1715 int max_iterations = bnx2x_exe_queue_length(&o->exe_queue) + 1;
1716
1717 while (!bnx2x_exe_queue_empty(&o->exe_queue) &&
1718 max_iterations--) {
1719
1720 /* Wait for the current command to complete */
1721 rc = raw->wait_comp(bp, raw);
1722 if (rc)
1723 return rc;
1724
1725 /* Make a next step */
1726 rc = bnx2x_exe_queue_step(bp, &o->exe_queue,
1727 ramrod_flags);
1728 if (rc < 0)
1729 return rc;
1730 }
1731
1732 return 0;
1733 }
1734
1735 return rc;
1736 }
1737
1738
1739
1740 /**
1741 * bnx2x_vlan_mac_del_all - delete elements with given vlan_mac_flags spec
1742 *
1743 * @bp: device handle
1744 * @o:
1745 * @vlan_mac_flags:
1746 * @ramrod_flags: execution flags to be used for this deletion
1747 *
1748 * if the last operation has completed successfully and there are no
1749 * moreelements left, positive value if the last operation has completed
1750 * successfully and there are more previously configured elements, negative
1751 * value is current operation has failed.
1752 */
1753 static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
1754 struct bnx2x_vlan_mac_obj *o,
1755 unsigned long *vlan_mac_flags,
1756 unsigned long *ramrod_flags)
1757 {
1758 struct bnx2x_vlan_mac_registry_elem *pos = NULL;
1759 int rc = 0;
1760 struct bnx2x_vlan_mac_ramrod_params p;
1761 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1762 struct bnx2x_exeq_elem *exeq_pos, *exeq_pos_n;
1763
1764 /* Clear pending commands first */
1765
1766 spin_lock_bh(&exeq->lock);
1767
1768 list_for_each_entry_safe(exeq_pos, exeq_pos_n, &exeq->exe_queue, link) {
1769 if (exeq_pos->cmd_data.vlan_mac.vlan_mac_flags ==
1770 *vlan_mac_flags)
1771 list_del(&exeq_pos->link);
1772 }
1773
1774 spin_unlock_bh(&exeq->lock);
1775
1776 /* Prepare a command request */
1777 memset(&p, 0, sizeof(p));
1778 p.vlan_mac_obj = o;
1779 p.ramrod_flags = *ramrod_flags;
1780 p.user_req.cmd = BNX2X_VLAN_MAC_DEL;
1781
1782 /*
1783 * Add all but the last VLAN-MAC to the execution queue without actually
1784 * execution anything.
1785 */
1786 __clear_bit(RAMROD_COMP_WAIT, &p.ramrod_flags);
1787 __clear_bit(RAMROD_EXEC, &p.ramrod_flags);
1788 __clear_bit(RAMROD_CONT, &p.ramrod_flags);
1789
1790 list_for_each_entry(pos, &o->head, link) {
1791 if (pos->vlan_mac_flags == *vlan_mac_flags) {
1792 p.user_req.vlan_mac_flags = pos->vlan_mac_flags;
1793 memcpy(&p.user_req.u, &pos->u, sizeof(pos->u));
1794 rc = bnx2x_config_vlan_mac(bp, &p);
1795 if (rc < 0) {
1796 BNX2X_ERR("Failed to add a new DEL command\n");
1797 return rc;
1798 }
1799 }
1800 }
1801
1802 p.ramrod_flags = *ramrod_flags;
1803 __set_bit(RAMROD_CONT, &p.ramrod_flags);
1804
1805 return bnx2x_config_vlan_mac(bp, &p);
1806 }
1807
1808 static inline void bnx2x_init_raw_obj(struct bnx2x_raw_obj *raw, u8 cl_id,
1809 u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping, int state,
1810 unsigned long *pstate, bnx2x_obj_type type)
1811 {
1812 raw->func_id = func_id;
1813 raw->cid = cid;
1814 raw->cl_id = cl_id;
1815 raw->rdata = rdata;
1816 raw->rdata_mapping = rdata_mapping;
1817 raw->state = state;
1818 raw->pstate = pstate;
1819 raw->obj_type = type;
1820 raw->check_pending = bnx2x_raw_check_pending;
1821 raw->clear_pending = bnx2x_raw_clear_pending;
1822 raw->set_pending = bnx2x_raw_set_pending;
1823 raw->wait_comp = bnx2x_raw_wait;
1824 }
1825
1826 static inline void bnx2x_init_vlan_mac_common(struct bnx2x_vlan_mac_obj *o,
1827 u8 cl_id, u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping,
1828 int state, unsigned long *pstate, bnx2x_obj_type type,
1829 struct bnx2x_credit_pool_obj *macs_pool,
1830 struct bnx2x_credit_pool_obj *vlans_pool)
1831 {
1832 INIT_LIST_HEAD(&o->head);
1833
1834 o->macs_pool = macs_pool;
1835 o->vlans_pool = vlans_pool;
1836
1837 o->delete_all = bnx2x_vlan_mac_del_all;
1838 o->restore = bnx2x_vlan_mac_restore;
1839 o->complete = bnx2x_complete_vlan_mac;
1840 o->wait = bnx2x_wait_vlan_mac;
1841
1842 bnx2x_init_raw_obj(&o->raw, cl_id, cid, func_id, rdata, rdata_mapping,
1843 state, pstate, type);
1844 }
1845
1846
1847 void bnx2x_init_mac_obj(struct bnx2x *bp,
1848 struct bnx2x_vlan_mac_obj *mac_obj,
1849 u8 cl_id, u32 cid, u8 func_id, void *rdata,
1850 dma_addr_t rdata_mapping, int state,
1851 unsigned long *pstate, bnx2x_obj_type type,
1852 struct bnx2x_credit_pool_obj *macs_pool)
1853 {
1854 union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)mac_obj;
1855
1856 bnx2x_init_vlan_mac_common(mac_obj, cl_id, cid, func_id, rdata,
1857 rdata_mapping, state, pstate, type,
1858 macs_pool, NULL);
1859
1860 /* CAM credit pool handling */
1861 mac_obj->get_credit = bnx2x_get_credit_mac;
1862 mac_obj->put_credit = bnx2x_put_credit_mac;
1863 mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac;
1864 mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac;
1865
1866 if (CHIP_IS_E1x(bp)) {
1867 mac_obj->set_one_rule = bnx2x_set_one_mac_e1x;
1868 mac_obj->check_del = bnx2x_check_mac_del;
1869 mac_obj->check_add = bnx2x_check_mac_add;
1870 mac_obj->check_move = bnx2x_check_move_always_err;
1871 mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC;
1872
1873 /* Exe Queue */
1874 bnx2x_exe_queue_init(bp,
1875 &mac_obj->exe_queue, 1, qable_obj,
1876 bnx2x_validate_vlan_mac,
1877 bnx2x_optimize_vlan_mac,
1878 bnx2x_execute_vlan_mac,
1879 bnx2x_exeq_get_mac);
1880 } else {
1881 mac_obj->set_one_rule = bnx2x_set_one_mac_e2;
1882 mac_obj->check_del = bnx2x_check_mac_del;
1883 mac_obj->check_add = bnx2x_check_mac_add;
1884 mac_obj->check_move = bnx2x_check_move;
1885 mac_obj->ramrod_cmd =
1886 RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
1887
1888 /* Exe Queue */
1889 bnx2x_exe_queue_init(bp,
1890 &mac_obj->exe_queue, CLASSIFY_RULES_COUNT,
1891 qable_obj, bnx2x_validate_vlan_mac,
1892 bnx2x_optimize_vlan_mac,
1893 bnx2x_execute_vlan_mac,
1894 bnx2x_exeq_get_mac);
1895 }
1896 }
1897
1898 void bnx2x_init_vlan_obj(struct bnx2x *bp,
1899 struct bnx2x_vlan_mac_obj *vlan_obj,
1900 u8 cl_id, u32 cid, u8 func_id, void *rdata,
1901 dma_addr_t rdata_mapping, int state,
1902 unsigned long *pstate, bnx2x_obj_type type,
1903 struct bnx2x_credit_pool_obj *vlans_pool)
1904 {
1905 union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)vlan_obj;
1906
1907 bnx2x_init_vlan_mac_common(vlan_obj, cl_id, cid, func_id, rdata,
1908 rdata_mapping, state, pstate, type, NULL,
1909 vlans_pool);
1910
1911 vlan_obj->get_credit = bnx2x_get_credit_vlan;
1912 vlan_obj->put_credit = bnx2x_put_credit_vlan;
1913 vlan_obj->get_cam_offset = bnx2x_get_cam_offset_vlan;
1914 vlan_obj->put_cam_offset = bnx2x_put_cam_offset_vlan;
1915
1916 if (CHIP_IS_E1x(bp)) {
1917 BNX2X_ERR("Do not support chips others than E2 and newer\n");
1918 BUG();
1919 } else {
1920 vlan_obj->set_one_rule = bnx2x_set_one_vlan_e2;
1921 vlan_obj->check_del = bnx2x_check_vlan_del;
1922 vlan_obj->check_add = bnx2x_check_vlan_add;
1923 vlan_obj->check_move = bnx2x_check_move;
1924 vlan_obj->ramrod_cmd =
1925 RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
1926
1927 /* Exe Queue */
1928 bnx2x_exe_queue_init(bp,
1929 &vlan_obj->exe_queue, CLASSIFY_RULES_COUNT,
1930 qable_obj, bnx2x_validate_vlan_mac,
1931 bnx2x_optimize_vlan_mac,
1932 bnx2x_execute_vlan_mac,
1933 bnx2x_exeq_get_vlan);
1934 }
1935 }
1936
1937 void bnx2x_init_vlan_mac_obj(struct bnx2x *bp,
1938 struct bnx2x_vlan_mac_obj *vlan_mac_obj,
1939 u8 cl_id, u32 cid, u8 func_id, void *rdata,
1940 dma_addr_t rdata_mapping, int state,
1941 unsigned long *pstate, bnx2x_obj_type type,
1942 struct bnx2x_credit_pool_obj *macs_pool,
1943 struct bnx2x_credit_pool_obj *vlans_pool)
1944 {
1945 union bnx2x_qable_obj *qable_obj =
1946 (union bnx2x_qable_obj *)vlan_mac_obj;
1947
1948 bnx2x_init_vlan_mac_common(vlan_mac_obj, cl_id, cid, func_id, rdata,
1949 rdata_mapping, state, pstate, type,
1950 macs_pool, vlans_pool);
1951
1952 /* CAM pool handling */
1953 vlan_mac_obj->get_credit = bnx2x_get_credit_vlan_mac;
1954 vlan_mac_obj->put_credit = bnx2x_put_credit_vlan_mac;
1955 /*
1956 * CAM offset is relevant for 57710 and 57711 chips only which have a
1957 * single CAM for both MACs and VLAN-MAC pairs. So the offset
1958 * will be taken from MACs' pool object only.
1959 */
1960 vlan_mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac;
1961 vlan_mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac;
1962
1963 if (CHIP_IS_E1(bp)) {
1964 BNX2X_ERR("Do not support chips others than E2\n");
1965 BUG();
1966 } else if (CHIP_IS_E1H(bp)) {
1967 vlan_mac_obj->set_one_rule = bnx2x_set_one_vlan_mac_e1h;
1968 vlan_mac_obj->check_del = bnx2x_check_vlan_mac_del;
1969 vlan_mac_obj->check_add = bnx2x_check_vlan_mac_add;
1970 vlan_mac_obj->check_move = bnx2x_check_move_always_err;
1971 vlan_mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC;
1972
1973 /* Exe Queue */
1974 bnx2x_exe_queue_init(bp,
1975 &vlan_mac_obj->exe_queue, 1, qable_obj,
1976 bnx2x_validate_vlan_mac,
1977 bnx2x_optimize_vlan_mac,
1978 bnx2x_execute_vlan_mac,
1979 bnx2x_exeq_get_vlan_mac);
1980 } else {
1981 vlan_mac_obj->set_one_rule = bnx2x_set_one_vlan_mac_e2;
1982 vlan_mac_obj->check_del = bnx2x_check_vlan_mac_del;
1983 vlan_mac_obj->check_add = bnx2x_check_vlan_mac_add;
1984 vlan_mac_obj->check_move = bnx2x_check_move;
1985 vlan_mac_obj->ramrod_cmd =
1986 RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
1987
1988 /* Exe Queue */
1989 bnx2x_exe_queue_init(bp,
1990 &vlan_mac_obj->exe_queue,
1991 CLASSIFY_RULES_COUNT,
1992 qable_obj, bnx2x_validate_vlan_mac,
1993 bnx2x_optimize_vlan_mac,
1994 bnx2x_execute_vlan_mac,
1995 bnx2x_exeq_get_vlan_mac);
1996 }
1997
1998 }
1999
2000 /* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
2001 static inline void __storm_memset_mac_filters(struct bnx2x *bp,
2002 struct tstorm_eth_mac_filter_config *mac_filters,
2003 u16 pf_id)
2004 {
2005 size_t size = sizeof(struct tstorm_eth_mac_filter_config);
2006
2007 u32 addr = BAR_TSTRORM_INTMEM +
2008 TSTORM_MAC_FILTER_CONFIG_OFFSET(pf_id);
2009
2010 __storm_memset_struct(bp, addr, size, (u32 *)mac_filters);
2011 }
2012
2013 static int bnx2x_set_rx_mode_e1x(struct bnx2x *bp,
2014 struct bnx2x_rx_mode_ramrod_params *p)
2015 {
2016 /* update the bp MAC filter structure */
2017 u32 mask = (1 << p->cl_id);
2018
2019 struct tstorm_eth_mac_filter_config *mac_filters =
2020 (struct tstorm_eth_mac_filter_config *)p->rdata;
2021
2022 /* initial seeting is drop-all */
2023 u8 drop_all_ucast = 1, drop_all_mcast = 1;
2024 u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
2025 u8 unmatched_unicast = 0;
2026
2027 /* In e1x there we only take into account rx acceot flag since tx switching
2028 * isn't enabled. */
2029 if (test_bit(BNX2X_ACCEPT_UNICAST, &p->rx_accept_flags))
2030 /* accept matched ucast */
2031 drop_all_ucast = 0;
2032
2033 if (test_bit(BNX2X_ACCEPT_MULTICAST, &p->rx_accept_flags))
2034 /* accept matched mcast */
2035 drop_all_mcast = 0;
2036
2037 if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, &p->rx_accept_flags)) {
2038 /* accept all mcast */
2039 drop_all_ucast = 0;
2040 accp_all_ucast = 1;
2041 }
2042 if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, &p->rx_accept_flags)) {
2043 /* accept all mcast */
2044 drop_all_mcast = 0;
2045 accp_all_mcast = 1;
2046 }
2047 if (test_bit(BNX2X_ACCEPT_BROADCAST, &p->rx_accept_flags))
2048 /* accept (all) bcast */
2049 accp_all_bcast = 1;
2050 if (test_bit(BNX2X_ACCEPT_UNMATCHED, &p->rx_accept_flags))
2051 /* accept unmatched unicasts */
2052 unmatched_unicast = 1;
2053
2054 mac_filters->ucast_drop_all = drop_all_ucast ?
2055 mac_filters->ucast_drop_all | mask :
2056 mac_filters->ucast_drop_all & ~mask;
2057
2058 mac_filters->mcast_drop_all = drop_all_mcast ?
2059 mac_filters->mcast_drop_all | mask :
2060 mac_filters->mcast_drop_all & ~mask;
2061
2062 mac_filters->ucast_accept_all = accp_all_ucast ?
2063 mac_filters->ucast_accept_all | mask :
2064 mac_filters->ucast_accept_all & ~mask;
2065
2066 mac_filters->mcast_accept_all = accp_all_mcast ?
2067 mac_filters->mcast_accept_all | mask :
2068 mac_filters->mcast_accept_all & ~mask;
2069
2070 mac_filters->bcast_accept_all = accp_all_bcast ?
2071 mac_filters->bcast_accept_all | mask :
2072 mac_filters->bcast_accept_all & ~mask;
2073
2074 mac_filters->unmatched_unicast = unmatched_unicast ?
2075 mac_filters->unmatched_unicast | mask :
2076 mac_filters->unmatched_unicast & ~mask;
2077
2078 DP(BNX2X_MSG_SP, "drop_ucast 0x%x\ndrop_mcast 0x%x\n accp_ucast 0x%x\n"
2079 "accp_mcast 0x%x\naccp_bcast 0x%x\n",
2080 mac_filters->ucast_drop_all,
2081 mac_filters->mcast_drop_all,
2082 mac_filters->ucast_accept_all,
2083 mac_filters->mcast_accept_all,
2084 mac_filters->bcast_accept_all);
2085
2086 /* write the MAC filter structure*/
2087 __storm_memset_mac_filters(bp, mac_filters, p->func_id);
2088
2089 /* The operation is completed */
2090 clear_bit(p->state, p->pstate);
2091 smp_mb__after_clear_bit();
2092
2093 return 0;
2094 }
2095
2096 /* Setup ramrod data */
2097 static inline void bnx2x_rx_mode_set_rdata_hdr_e2(u32 cid,
2098 struct eth_classify_header *hdr,
2099 u8 rule_cnt)
2100 {
2101 hdr->echo = cid;
2102 hdr->rule_cnt = rule_cnt;
2103 }
2104
2105 static inline void bnx2x_rx_mode_set_cmd_state_e2(struct bnx2x *bp,
2106 unsigned long accept_flags,
2107 struct eth_filter_rules_cmd *cmd,
2108 bool clear_accept_all)
2109 {
2110 u16 state;
2111
2112 /* start with 'drop-all' */
2113 state = ETH_FILTER_RULES_CMD_UCAST_DROP_ALL |
2114 ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2115
2116 if (accept_flags) {
2117 if (test_bit(BNX2X_ACCEPT_UNICAST, &accept_flags))
2118 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2119
2120 if (test_bit(BNX2X_ACCEPT_MULTICAST, &accept_flags))
2121 state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2122
2123 if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, &accept_flags)) {
2124 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2125 state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
2126 }
2127
2128 if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept_flags)) {
2129 state |= ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2130 state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2131 }
2132 if (test_bit(BNX2X_ACCEPT_BROADCAST, &accept_flags))
2133 state |= ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2134
2135 if (test_bit(BNX2X_ACCEPT_UNMATCHED, &accept_flags)) {
2136 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2137 state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2138 }
2139 if (test_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags))
2140 state |= ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN;
2141 }
2142
2143 /* Clear ACCEPT_ALL_XXX flags for FCoE L2 Queue */
2144 if (clear_accept_all) {
2145 state &= ~ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2146 state &= ~ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2147 state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
2148 state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2149 }
2150
2151 cmd->state = cpu_to_le16(state);
2152
2153 }
2154
2155 static int bnx2x_set_rx_mode_e2(struct bnx2x *bp,
2156 struct bnx2x_rx_mode_ramrod_params *p)
2157 {
2158 struct eth_filter_rules_ramrod_data *data = p->rdata;
2159 int rc;
2160 u8 rule_idx = 0;
2161
2162 /* Reset the ramrod data buffer */
2163 memset(data, 0, sizeof(*data));
2164
2165 /* Setup ramrod data */
2166
2167 /* Tx (internal switching) */
2168 if (test_bit(RAMROD_TX, &p->ramrod_flags)) {
2169 data->rules[rule_idx].client_id = p->cl_id;
2170 data->rules[rule_idx].func_id = p->func_id;
2171
2172 data->rules[rule_idx].cmd_general_data =
2173 ETH_FILTER_RULES_CMD_TX_CMD;
2174
2175 bnx2x_rx_mode_set_cmd_state_e2(bp, p->tx_accept_flags,
2176 &(data->rules[rule_idx++]), false);
2177 }
2178
2179 /* Rx */
2180 if (test_bit(RAMROD_RX, &p->ramrod_flags)) {
2181 data->rules[rule_idx].client_id = p->cl_id;
2182 data->rules[rule_idx].func_id = p->func_id;
2183
2184 data->rules[rule_idx].cmd_general_data =
2185 ETH_FILTER_RULES_CMD_RX_CMD;
2186
2187 bnx2x_rx_mode_set_cmd_state_e2(bp, p->rx_accept_flags,
2188 &(data->rules[rule_idx++]), false);
2189 }
2190
2191
2192 /*
2193 * If FCoE Queue configuration has been requested configure the Rx and
2194 * internal switching modes for this queue in separate rules.
2195 *
2196 * FCoE queue shell never be set to ACCEPT_ALL packets of any sort:
2197 * MCAST_ALL, UCAST_ALL, BCAST_ALL and UNMATCHED.
2198 */
2199 if (test_bit(BNX2X_RX_MODE_FCOE_ETH, &p->rx_mode_flags)) {
2200 /* Tx (internal switching) */
2201 if (test_bit(RAMROD_TX, &p->ramrod_flags)) {
2202 data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id);
2203 data->rules[rule_idx].func_id = p->func_id;
2204
2205 data->rules[rule_idx].cmd_general_data =
2206 ETH_FILTER_RULES_CMD_TX_CMD;
2207
2208 bnx2x_rx_mode_set_cmd_state_e2(bp, p->tx_accept_flags,
2209 &(data->rules[rule_idx++]),
2210 true);
2211 }
2212
2213 /* Rx */
2214 if (test_bit(RAMROD_RX, &p->ramrod_flags)) {
2215 data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id);
2216 data->rules[rule_idx].func_id = p->func_id;
2217
2218 data->rules[rule_idx].cmd_general_data =
2219 ETH_FILTER_RULES_CMD_RX_CMD;
2220
2221 bnx2x_rx_mode_set_cmd_state_e2(bp, p->rx_accept_flags,
2222 &(data->rules[rule_idx++]),
2223 true);
2224 }
2225 }
2226
2227 /*
2228 * Set the ramrod header (most importantly - number of rules to
2229 * configure).
2230 */
2231 bnx2x_rx_mode_set_rdata_hdr_e2(p->cid, &data->header, rule_idx);
2232
2233 DP(BNX2X_MSG_SP, "About to configure %d rules, rx_accept_flags 0x%lx, "
2234 "tx_accept_flags 0x%lx\n",
2235 data->header.rule_cnt, p->rx_accept_flags,
2236 p->tx_accept_flags);
2237
2238 /*
2239 * No need for an explicit memory barrier here as long we would
2240 * need to ensure the ordering of writing to the SPQ element
2241 * and updating of the SPQ producer which involves a memory
2242 * read and we will have to put a full memory barrier there
2243 * (inside bnx2x_sp_post()).
2244 */
2245
2246 /* Send a ramrod */
2247 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_FILTER_RULES, p->cid,
2248 U64_HI(p->rdata_mapping),
2249 U64_LO(p->rdata_mapping),
2250 ETH_CONNECTION_TYPE);
2251 if (rc)
2252 return rc;
2253
2254 /* Ramrod completion is pending */
2255 return 1;
2256 }
2257
2258 static int bnx2x_wait_rx_mode_comp_e2(struct bnx2x *bp,
2259 struct bnx2x_rx_mode_ramrod_params *p)
2260 {
2261 return bnx2x_state_wait(bp, p->state, p->pstate);
2262 }
2263
2264 static int bnx2x_empty_rx_mode_wait(struct bnx2x *bp,
2265 struct bnx2x_rx_mode_ramrod_params *p)
2266 {
2267 /* Do nothing */
2268 return 0;
2269 }
2270
2271 int bnx2x_config_rx_mode(struct bnx2x *bp,
2272 struct bnx2x_rx_mode_ramrod_params *p)
2273 {
2274 int rc;
2275
2276 /* Configure the new classification in the chip */
2277 rc = p->rx_mode_obj->config_rx_mode(bp, p);
2278 if (rc < 0)
2279 return rc;
2280
2281 /* Wait for a ramrod completion if was requested */
2282 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
2283 rc = p->rx_mode_obj->wait_comp(bp, p);
2284 if (rc)
2285 return rc;
2286 }
2287
2288 return rc;
2289 }
2290
2291 void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
2292 struct bnx2x_rx_mode_obj *o)
2293 {
2294 if (CHIP_IS_E1x(bp)) {
2295 o->wait_comp = bnx2x_empty_rx_mode_wait;
2296 o->config_rx_mode = bnx2x_set_rx_mode_e1x;
2297 } else {
2298 o->wait_comp = bnx2x_wait_rx_mode_comp_e2;
2299 o->config_rx_mode = bnx2x_set_rx_mode_e2;
2300 }
2301 }
2302
2303 /********************* Multicast verbs: SET, CLEAR ****************************/
2304 static inline u8 bnx2x_mcast_bin_from_mac(u8 *mac)
2305 {
2306 return (crc32c_le(0, mac, ETH_ALEN) >> 24) & 0xff;
2307 }
2308
2309 struct bnx2x_mcast_mac_elem {
2310 struct list_head link;
2311 u8 mac[ETH_ALEN];
2312 u8 pad[2]; /* For a natural alignment of the following buffer */
2313 };
2314
2315 struct bnx2x_pending_mcast_cmd {
2316 struct list_head link;
2317 int type; /* BNX2X_MCAST_CMD_X */
2318 union {
2319 struct list_head macs_head;
2320 u32 macs_num; /* Needed for DEL command */
2321 int next_bin; /* Needed for RESTORE flow with aprox match */
2322 } data;
2323
2324 bool done; /* set to true, when the command has been handled,
2325 * practically used in 57712 handling only, where one pending
2326 * command may be handled in a few operations. As long as for
2327 * other chips every operation handling is completed in a
2328 * single ramrod, there is no need to utilize this field.
2329 */
2330 };
2331
2332 static int bnx2x_mcast_wait(struct bnx2x *bp,
2333 struct bnx2x_mcast_obj *o)
2334 {
2335 if (bnx2x_state_wait(bp, o->sched_state, o->raw.pstate) ||
2336 o->raw.wait_comp(bp, &o->raw))
2337 return -EBUSY;
2338
2339 return 0;
2340 }
2341
2342 static int bnx2x_mcast_enqueue_cmd(struct bnx2x *bp,
2343 struct bnx2x_mcast_obj *o,
2344 struct bnx2x_mcast_ramrod_params *p,
2345 int cmd)
2346 {
2347 int total_sz;
2348 struct bnx2x_pending_mcast_cmd *new_cmd;
2349 struct bnx2x_mcast_mac_elem *cur_mac = NULL;
2350 struct bnx2x_mcast_list_elem *pos;
2351 int macs_list_len = ((cmd == BNX2X_MCAST_CMD_ADD) ?
2352 p->mcast_list_len : 0);
2353
2354 /* If the command is empty ("handle pending commands only"), break */
2355 if (!p->mcast_list_len)
2356 return 0;
2357
2358 total_sz = sizeof(*new_cmd) +
2359 macs_list_len * sizeof(struct bnx2x_mcast_mac_elem);
2360
2361 /* Add mcast is called under spin_lock, thus calling with GFP_ATOMIC */
2362 new_cmd = kzalloc(total_sz, GFP_ATOMIC);
2363
2364 if (!new_cmd)
2365 return -ENOMEM;
2366
2367 DP(BNX2X_MSG_SP, "About to enqueue a new %d command. "
2368 "macs_list_len=%d\n", cmd, macs_list_len);
2369
2370 INIT_LIST_HEAD(&new_cmd->data.macs_head);
2371
2372 new_cmd->type = cmd;
2373 new_cmd->done = false;
2374
2375 switch (cmd) {
2376 case BNX2X_MCAST_CMD_ADD:
2377 cur_mac = (struct bnx2x_mcast_mac_elem *)
2378 ((u8 *)new_cmd + sizeof(*new_cmd));
2379
2380 /* Push the MACs of the current command into the pendig command
2381 * MACs list: FIFO
2382 */
2383 list_for_each_entry(pos, &p->mcast_list, link) {
2384 memcpy(cur_mac->mac, pos->mac, ETH_ALEN);
2385 list_add_tail(&cur_mac->link, &new_cmd->data.macs_head);
2386 cur_mac++;
2387 }
2388
2389 break;
2390
2391 case BNX2X_MCAST_CMD_DEL:
2392 new_cmd->data.macs_num = p->mcast_list_len;
2393 break;
2394
2395 case BNX2X_MCAST_CMD_RESTORE:
2396 new_cmd->data.next_bin = 0;
2397 break;
2398
2399 default:
2400 BNX2X_ERR("Unknown command: %d\n", cmd);
2401 return -EINVAL;
2402 }
2403
2404 /* Push the new pending command to the tail of the pending list: FIFO */
2405 list_add_tail(&new_cmd->link, &o->pending_cmds_head);
2406
2407 o->set_sched(o);
2408
2409 return 1;
2410 }
2411
2412 /**
2413 * bnx2x_mcast_get_next_bin - get the next set bin (index)
2414 *
2415 * @o:
2416 * @last: index to start looking from (including)
2417 *
2418 * returns the next found (set) bin or a negative value if none is found.
2419 */
2420 static inline int bnx2x_mcast_get_next_bin(struct bnx2x_mcast_obj *o, int last)
2421 {
2422 int i, j, inner_start = last % BIT_VEC64_ELEM_SZ;
2423
2424 for (i = last / BIT_VEC64_ELEM_SZ; i < BNX2X_MCAST_VEC_SZ; i++) {
2425 if (o->registry.aprox_match.vec[i])
2426 for (j = inner_start; j < BIT_VEC64_ELEM_SZ; j++) {
2427 int cur_bit = j + BIT_VEC64_ELEM_SZ * i;
2428 if (BIT_VEC64_TEST_BIT(o->registry.aprox_match.
2429 vec, cur_bit)) {
2430 return cur_bit;
2431 }
2432 }
2433 inner_start = 0;
2434 }
2435
2436 /* None found */
2437 return -1;
2438 }
2439
2440 /**
2441 * bnx2x_mcast_clear_first_bin - find the first set bin and clear it
2442 *
2443 * @o:
2444 *
2445 * returns the index of the found bin or -1 if none is found
2446 */
2447 static inline int bnx2x_mcast_clear_first_bin(struct bnx2x_mcast_obj *o)
2448 {
2449 int cur_bit = bnx2x_mcast_get_next_bin(o, 0);
2450
2451 if (cur_bit >= 0)
2452 BIT_VEC64_CLEAR_BIT(o->registry.aprox_match.vec, cur_bit);
2453
2454 return cur_bit;
2455 }
2456
2457 static inline u8 bnx2x_mcast_get_rx_tx_flag(struct bnx2x_mcast_obj *o)
2458 {
2459 struct bnx2x_raw_obj *raw = &o->raw;
2460 u8 rx_tx_flag = 0;
2461
2462 if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) ||
2463 (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
2464 rx_tx_flag |= ETH_MULTICAST_RULES_CMD_TX_CMD;
2465
2466 if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) ||
2467 (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
2468 rx_tx_flag |= ETH_MULTICAST_RULES_CMD_RX_CMD;
2469
2470 return rx_tx_flag;
2471 }
2472
2473 static void bnx2x_mcast_set_one_rule_e2(struct bnx2x *bp,
2474 struct bnx2x_mcast_obj *o, int idx,
2475 union bnx2x_mcast_config_data *cfg_data,
2476 int cmd)
2477 {
2478 struct bnx2x_raw_obj *r = &o->raw;
2479 struct eth_multicast_rules_ramrod_data *data =
2480 (struct eth_multicast_rules_ramrod_data *)(r->rdata);
2481 u8 func_id = r->func_id;
2482 u8 rx_tx_add_flag = bnx2x_mcast_get_rx_tx_flag(o);
2483 int bin;
2484
2485 if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE))
2486 rx_tx_add_flag |= ETH_MULTICAST_RULES_CMD_IS_ADD;
2487
2488 data->rules[idx].cmd_general_data |= rx_tx_add_flag;
2489
2490 /* Get a bin and update a bins' vector */
2491 switch (cmd) {
2492 case BNX2X_MCAST_CMD_ADD:
2493 bin = bnx2x_mcast_bin_from_mac(cfg_data->mac);
2494 BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bin);
2495 break;
2496
2497 case BNX2X_MCAST_CMD_DEL:
2498 /* If there were no more bins to clear
2499 * (bnx2x_mcast_clear_first_bin() returns -1) then we would
2500 * clear any (0xff) bin.
2501 * See bnx2x_mcast_validate_e2() for explanation when it may
2502 * happen.
2503 */
2504 bin = bnx2x_mcast_clear_first_bin(o);
2505 break;
2506
2507 case BNX2X_MCAST_CMD_RESTORE:
2508 bin = cfg_data->bin;
2509 break;
2510
2511 default:
2512 BNX2X_ERR("Unknown command: %d\n", cmd);
2513 return;
2514 }
2515
2516 DP(BNX2X_MSG_SP, "%s bin %d\n",
2517 ((rx_tx_add_flag & ETH_MULTICAST_RULES_CMD_IS_ADD) ?
2518 "Setting" : "Clearing"), bin);
2519
2520 data->rules[idx].bin_id = (u8)bin;
2521 data->rules[idx].func_id = func_id;
2522 data->rules[idx].engine_id = o->engine_id;
2523 }
2524
2525 /**
2526 * bnx2x_mcast_handle_restore_cmd_e2 - restore configuration from the registry
2527 *
2528 * @bp: device handle
2529 * @o:
2530 * @start_bin: index in the registry to start from (including)
2531 * @rdata_idx: index in the ramrod data to start from
2532 *
2533 * returns last handled bin index or -1 if all bins have been handled
2534 */
2535 static inline int bnx2x_mcast_handle_restore_cmd_e2(
2536 struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_bin,
2537 int *rdata_idx)
2538 {
2539 int cur_bin, cnt = *rdata_idx;
2540 union bnx2x_mcast_config_data cfg_data = {0};
2541
2542 /* go through the registry and configure the bins from it */
2543 for (cur_bin = bnx2x_mcast_get_next_bin(o, start_bin); cur_bin >= 0;
2544 cur_bin = bnx2x_mcast_get_next_bin(o, cur_bin + 1)) {
2545
2546 cfg_data.bin = (u8)cur_bin;
2547 o->set_one_rule(bp, o, cnt, &cfg_data,
2548 BNX2X_MCAST_CMD_RESTORE);
2549
2550 cnt++;
2551
2552 DP(BNX2X_MSG_SP, "About to configure a bin %d\n", cur_bin);
2553
2554 /* Break if we reached the maximum number
2555 * of rules.
2556 */
2557 if (cnt >= o->max_cmd_len)
2558 break;
2559 }
2560
2561 *rdata_idx = cnt;
2562
2563 return cur_bin;
2564 }
2565
2566 static inline void bnx2x_mcast_hdl_pending_add_e2(struct bnx2x *bp,
2567 struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2568 int *line_idx)
2569 {
2570 struct bnx2x_mcast_mac_elem *pmac_pos, *pmac_pos_n;
2571 int cnt = *line_idx;
2572 union bnx2x_mcast_config_data cfg_data = {0};
2573
2574 list_for_each_entry_safe(pmac_pos, pmac_pos_n, &cmd_pos->data.macs_head,
2575 link) {
2576
2577 cfg_data.mac = &pmac_pos->mac[0];
2578 o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type);
2579
2580 cnt++;
2581
2582 DP(BNX2X_MSG_SP, "About to configure "BNX2X_MAC_FMT
2583 " mcast MAC\n",
2584 BNX2X_MAC_PRN_LIST(pmac_pos->mac));
2585
2586 list_del(&pmac_pos->link);
2587
2588 /* Break if we reached the maximum number
2589 * of rules.
2590 */
2591 if (cnt >= o->max_cmd_len)
2592 break;
2593 }
2594
2595 *line_idx = cnt;
2596
2597 /* if no more MACs to configure - we are done */
2598 if (list_empty(&cmd_pos->data.macs_head))
2599 cmd_pos->done = true;
2600 }
2601
2602 static inline void bnx2x_mcast_hdl_pending_del_e2(struct bnx2x *bp,
2603 struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2604 int *line_idx)
2605 {
2606 int cnt = *line_idx;
2607
2608 while (cmd_pos->data.macs_num) {
2609 o->set_one_rule(bp, o, cnt, NULL, cmd_pos->type);
2610
2611 cnt++;
2612
2613 cmd_pos->data.macs_num--;
2614
2615 DP(BNX2X_MSG_SP, "Deleting MAC. %d left,cnt is %d\n",
2616 cmd_pos->data.macs_num, cnt);
2617
2618 /* Break if we reached the maximum
2619 * number of rules.
2620 */
2621 if (cnt >= o->max_cmd_len)
2622 break;
2623 }
2624
2625 *line_idx = cnt;
2626
2627 /* If we cleared all bins - we are done */
2628 if (!cmd_pos->data.macs_num)
2629 cmd_pos->done = true;
2630 }
2631
2632 static inline void bnx2x_mcast_hdl_pending_restore_e2(struct bnx2x *bp,
2633 struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2634 int *line_idx)
2635 {
2636 cmd_pos->data.next_bin = o->hdl_restore(bp, o, cmd_pos->data.next_bin,
2637 line_idx);
2638
2639 if (cmd_pos->data.next_bin < 0)
2640 /* If o->set_restore returned -1 we are done */
2641 cmd_pos->done = true;
2642 else
2643 /* Start from the next bin next time */
2644 cmd_pos->data.next_bin++;
2645 }
2646
2647 static inline int bnx2x_mcast_handle_pending_cmds_e2(struct bnx2x *bp,
2648 struct bnx2x_mcast_ramrod_params *p)
2649 {
2650 struct bnx2x_pending_mcast_cmd *cmd_pos, *cmd_pos_n;
2651 int cnt = 0;
2652 struct bnx2x_mcast_obj *o = p->mcast_obj;
2653
2654 list_for_each_entry_safe(cmd_pos, cmd_pos_n, &o->pending_cmds_head,
2655 link) {
2656 switch (cmd_pos->type) {
2657 case BNX2X_MCAST_CMD_ADD:
2658 bnx2x_mcast_hdl_pending_add_e2(bp, o, cmd_pos, &cnt);
2659 break;
2660
2661 case BNX2X_MCAST_CMD_DEL:
2662 bnx2x_mcast_hdl_pending_del_e2(bp, o, cmd_pos, &cnt);
2663 break;
2664
2665 case BNX2X_MCAST_CMD_RESTORE:
2666 bnx2x_mcast_hdl_pending_restore_e2(bp, o, cmd_pos,
2667 &cnt);
2668 break;
2669
2670 default:
2671 BNX2X_ERR("Unknown command: %d\n", cmd_pos->type);
2672 return -EINVAL;
2673 }
2674
2675 /* If the command has been completed - remove it from the list
2676 * and free the memory
2677 */
2678 if (cmd_pos->done) {
2679 list_del(&cmd_pos->link);
2680 kfree(cmd_pos);
2681 }
2682
2683 /* Break if we reached the maximum number of rules */
2684 if (cnt >= o->max_cmd_len)
2685 break;
2686 }
2687
2688 return cnt;
2689 }
2690
2691 static inline void bnx2x_mcast_hdl_add(struct bnx2x *bp,
2692 struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
2693 int *line_idx)
2694 {
2695 struct bnx2x_mcast_list_elem *mlist_pos;
2696 union bnx2x_mcast_config_data cfg_data = {0};
2697 int cnt = *line_idx;
2698
2699 list_for_each_entry(mlist_pos, &p->mcast_list, link) {
2700 cfg_data.mac = mlist_pos->mac;
2701 o->set_one_rule(bp, o, cnt, &cfg_data, BNX2X_MCAST_CMD_ADD);
2702
2703 cnt++;
2704
2705 DP(BNX2X_MSG_SP, "About to configure "BNX2X_MAC_FMT
2706 " mcast MAC\n",
2707 BNX2X_MAC_PRN_LIST(mlist_pos->mac));
2708 }
2709
2710 *line_idx = cnt;
2711 }
2712
2713 static inline void bnx2x_mcast_hdl_del(struct bnx2x *bp,
2714 struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
2715 int *line_idx)
2716 {
2717 int cnt = *line_idx, i;
2718
2719 for (i = 0; i < p->mcast_list_len; i++) {
2720 o->set_one_rule(bp, o, cnt, NULL, BNX2X_MCAST_CMD_DEL);
2721
2722 cnt++;
2723
2724 DP(BNX2X_MSG_SP, "Deleting MAC. %d left\n",
2725 p->mcast_list_len - i - 1);
2726 }
2727
2728 *line_idx = cnt;
2729 }
2730
2731 /**
2732 * bnx2x_mcast_handle_current_cmd -
2733 *
2734 * @bp: device handle
2735 * @p:
2736 * @cmd:
2737 * @start_cnt: first line in the ramrod data that may be used
2738 *
2739 * This function is called iff there is enough place for the current command in
2740 * the ramrod data.
2741 * Returns number of lines filled in the ramrod data in total.
2742 */
2743 static inline int bnx2x_mcast_handle_current_cmd(struct bnx2x *bp,
2744 struct bnx2x_mcast_ramrod_params *p, int cmd,
2745 int start_cnt)
2746 {
2747 struct bnx2x_mcast_obj *o = p->mcast_obj;
2748 int cnt = start_cnt;
2749
2750 DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len);
2751
2752 switch (cmd) {
2753 case BNX2X_MCAST_CMD_ADD:
2754 bnx2x_mcast_hdl_add(bp, o, p, &cnt);
2755 break;
2756
2757 case BNX2X_MCAST_CMD_DEL:
2758 bnx2x_mcast_hdl_del(bp, o, p, &cnt);
2759 break;
2760
2761 case BNX2X_MCAST_CMD_RESTORE:
2762 o->hdl_restore(bp, o, 0, &cnt);
2763 break;
2764
2765 default:
2766 BNX2X_ERR("Unknown command: %d\n", cmd);
2767 return -EINVAL;
2768 }
2769
2770 /* The current command has been handled */
2771 p->mcast_list_len = 0;
2772
2773 return cnt;
2774 }
2775
2776 static int bnx2x_mcast_validate_e2(struct bnx2x *bp,
2777 struct bnx2x_mcast_ramrod_params *p,
2778 int cmd)
2779 {
2780 struct bnx2x_mcast_obj *o = p->mcast_obj;
2781 int reg_sz = o->get_registry_size(o);
2782
2783 switch (cmd) {
2784 /* DEL command deletes all currently configured MACs */
2785 case BNX2X_MCAST_CMD_DEL:
2786 o->set_registry_size(o, 0);
2787 /* Don't break */
2788
2789 /* RESTORE command will restore the entire multicast configuration */
2790 case BNX2X_MCAST_CMD_RESTORE:
2791 /* Here we set the approximate amount of work to do, which in
2792 * fact may be only less as some MACs in postponed ADD
2793 * command(s) scheduled before this command may fall into
2794 * the same bin and the actual number of bins set in the
2795 * registry would be less than we estimated here. See
2796 * bnx2x_mcast_set_one_rule_e2() for further details.
2797 */
2798 p->mcast_list_len = reg_sz;
2799 break;
2800
2801 case BNX2X_MCAST_CMD_ADD:
2802 case BNX2X_MCAST_CMD_CONT:
2803 /* Here we assume that all new MACs will fall into new bins.
2804 * However we will correct the real registry size after we
2805 * handle all pending commands.
2806 */
2807 o->set_registry_size(o, reg_sz + p->mcast_list_len);
2808 break;
2809
2810 default:
2811 BNX2X_ERR("Unknown command: %d\n", cmd);
2812 return -EINVAL;
2813
2814 }
2815
2816 /* Increase the total number of MACs pending to be configured */
2817 o->total_pending_num += p->mcast_list_len;
2818
2819 return 0;
2820 }
2821
2822 static void bnx2x_mcast_revert_e2(struct bnx2x *bp,
2823 struct bnx2x_mcast_ramrod_params *p,
2824 int old_num_bins)
2825 {
2826 struct bnx2x_mcast_obj *o = p->mcast_obj;
2827
2828 o->set_registry_size(o, old_num_bins);
2829 o->total_pending_num -= p->mcast_list_len;
2830 }
2831
2832 /**
2833 * bnx2x_mcast_set_rdata_hdr_e2 - sets a header values
2834 *
2835 * @bp: device handle
2836 * @p:
2837 * @len: number of rules to handle
2838 */
2839 static inline void bnx2x_mcast_set_rdata_hdr_e2(struct bnx2x *bp,
2840 struct bnx2x_mcast_ramrod_params *p,
2841 u8 len)
2842 {
2843 struct bnx2x_raw_obj *r = &p->mcast_obj->raw;
2844 struct eth_multicast_rules_ramrod_data *data =
2845 (struct eth_multicast_rules_ramrod_data *)(r->rdata);
2846
2847 data->header.echo = ((r->cid & BNX2X_SWCID_MASK) |
2848 (BNX2X_FILTER_MCAST_PENDING << BNX2X_SWCID_SHIFT));
2849 data->header.rule_cnt = len;
2850 }
2851
2852 /**
2853 * bnx2x_mcast_refresh_registry_e2 - recalculate the actual number of set bins
2854 *
2855 * @bp: device handle
2856 * @o:
2857 *
2858 * Recalculate the actual number of set bins in the registry using Brian
2859 * Kernighan's algorithm: it's execution complexity is as a number of set bins.
2860 *
2861 * returns 0 for the compliance with bnx2x_mcast_refresh_registry_e1().
2862 */
2863 static inline int bnx2x_mcast_refresh_registry_e2(struct bnx2x *bp,
2864 struct bnx2x_mcast_obj *o)
2865 {
2866 int i, cnt = 0;
2867 u64 elem;
2868
2869 for (i = 0; i < BNX2X_MCAST_VEC_SZ; i++) {
2870 elem = o->registry.aprox_match.vec[i];
2871 for (; elem; cnt++)
2872 elem &= elem - 1;
2873 }
2874
2875 o->set_registry_size(o, cnt);
2876
2877 return 0;
2878 }
2879
2880 static int bnx2x_mcast_setup_e2(struct bnx2x *bp,
2881 struct bnx2x_mcast_ramrod_params *p,
2882 int cmd)
2883 {
2884 struct bnx2x_raw_obj *raw = &p->mcast_obj->raw;
2885 struct bnx2x_mcast_obj *o = p->mcast_obj;
2886 struct eth_multicast_rules_ramrod_data *data =
2887 (struct eth_multicast_rules_ramrod_data *)(raw->rdata);
2888 int cnt = 0, rc;
2889
2890 /* Reset the ramrod data buffer */
2891 memset(data, 0, sizeof(*data));
2892
2893 cnt = bnx2x_mcast_handle_pending_cmds_e2(bp, p);
2894
2895 /* If there are no more pending commands - clear SCHEDULED state */
2896 if (list_empty(&o->pending_cmds_head))
2897 o->clear_sched(o);
2898
2899 /* The below may be true iff there was enough room in ramrod
2900 * data for all pending commands and for the current
2901 * command. Otherwise the current command would have been added
2902 * to the pending commands and p->mcast_list_len would have been
2903 * zeroed.
2904 */
2905 if (p->mcast_list_len > 0)
2906 cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, cnt);
2907
2908 /* We've pulled out some MACs - update the total number of
2909 * outstanding.
2910 */
2911 o->total_pending_num -= cnt;
2912
2913 /* send a ramrod */
2914 WARN_ON(o->total_pending_num < 0);
2915 WARN_ON(cnt > o->max_cmd_len);
2916
2917 bnx2x_mcast_set_rdata_hdr_e2(bp, p, (u8)cnt);
2918
2919 /* Update a registry size if there are no more pending operations.
2920 *
2921 * We don't want to change the value of the registry size if there are
2922 * pending operations because we want it to always be equal to the
2923 * exact or the approximate number (see bnx2x_mcast_validate_e2()) of
2924 * set bins after the last requested operation in order to properly
2925 * evaluate the size of the next DEL/RESTORE operation.
2926 *
2927 * Note that we update the registry itself during command(s) handling
2928 * - see bnx2x_mcast_set_one_rule_e2(). That's because for 57712 we
2929 * aggregate multiple commands (ADD/DEL/RESTORE) into one ramrod but
2930 * with a limited amount of update commands (per MAC/bin) and we don't
2931 * know in this scope what the actual state of bins configuration is
2932 * going to be after this ramrod.
2933 */
2934 if (!o->total_pending_num)
2935 bnx2x_mcast_refresh_registry_e2(bp, o);
2936
2937 /*
2938 * If CLEAR_ONLY was requested - don't send a ramrod and clear
2939 * RAMROD_PENDING status immediately.
2940 */
2941 if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
2942 raw->clear_pending(raw);
2943 return 0;
2944 } else {
2945 /*
2946 * No need for an explicit memory barrier here as long we would
2947 * need to ensure the ordering of writing to the SPQ element
2948 * and updating of the SPQ producer which involves a memory
2949 * read and we will have to put a full memory barrier there
2950 * (inside bnx2x_sp_post()).
2951 */
2952
2953 /* Send a ramrod */
2954 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_MULTICAST_RULES,
2955 raw->cid, U64_HI(raw->rdata_mapping),
2956 U64_LO(raw->rdata_mapping),
2957 ETH_CONNECTION_TYPE);
2958 if (rc)
2959 return rc;
2960
2961 /* Ramrod completion is pending */
2962 return 1;
2963 }
2964 }
2965
2966 static int bnx2x_mcast_validate_e1h(struct bnx2x *bp,
2967 struct bnx2x_mcast_ramrod_params *p,
2968 int cmd)
2969 {
2970 /* Mark, that there is a work to do */
2971 if ((cmd == BNX2X_MCAST_CMD_DEL) || (cmd == BNX2X_MCAST_CMD_RESTORE))
2972 p->mcast_list_len = 1;
2973
2974 return 0;
2975 }
2976
2977 static void bnx2x_mcast_revert_e1h(struct bnx2x *bp,
2978 struct bnx2x_mcast_ramrod_params *p,
2979 int old_num_bins)
2980 {
2981 /* Do nothing */
2982 }
2983
2984 #define BNX2X_57711_SET_MC_FILTER(filter, bit) \
2985 do { \
2986 (filter)[(bit) >> 5] |= (1 << ((bit) & 0x1f)); \
2987 } while (0)
2988
2989 static inline void bnx2x_mcast_hdl_add_e1h(struct bnx2x *bp,
2990 struct bnx2x_mcast_obj *o,
2991 struct bnx2x_mcast_ramrod_params *p,
2992 u32 *mc_filter)
2993 {
2994 struct bnx2x_mcast_list_elem *mlist_pos;
2995 int bit;
2996
2997 list_for_each_entry(mlist_pos, &p->mcast_list, link) {
2998 bit = bnx2x_mcast_bin_from_mac(mlist_pos->mac);
2999 BNX2X_57711_SET_MC_FILTER(mc_filter, bit);
3000
3001 DP(BNX2X_MSG_SP, "About to configure "
3002 BNX2X_MAC_FMT" mcast MAC, bin %d\n",
3003 BNX2X_MAC_PRN_LIST(mlist_pos->mac), bit);
3004
3005 /* bookkeeping... */
3006 BIT_VEC64_SET_BIT(o->registry.aprox_match.vec,
3007 bit);
3008 }
3009 }
3010
3011 static inline void bnx2x_mcast_hdl_restore_e1h(struct bnx2x *bp,
3012 struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
3013 u32 *mc_filter)
3014 {
3015 int bit;
3016
3017 for (bit = bnx2x_mcast_get_next_bin(o, 0);
3018 bit >= 0;
3019 bit = bnx2x_mcast_get_next_bin(o, bit + 1)) {
3020 BNX2X_57711_SET_MC_FILTER(mc_filter, bit);
3021 DP(BNX2X_MSG_SP, "About to set bin %d\n", bit);
3022 }
3023 }
3024
3025 /* On 57711 we write the multicast MACs' aproximate match
3026 * table by directly into the TSTORM's internal RAM. So we don't
3027 * really need to handle any tricks to make it work.
3028 */
3029 static int bnx2x_mcast_setup_e1h(struct bnx2x *bp,
3030 struct bnx2x_mcast_ramrod_params *p,
3031 int cmd)
3032 {
3033 int i;
3034 struct bnx2x_mcast_obj *o = p->mcast_obj;
3035 struct bnx2x_raw_obj *r = &o->raw;
3036
3037 /* If CLEAR_ONLY has been requested - clear the registry
3038 * and clear a pending bit.
3039 */
3040 if (!test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3041 u32 mc_filter[MC_HASH_SIZE] = {0};
3042
3043 /* Set the multicast filter bits before writing it into
3044 * the internal memory.
3045 */
3046 switch (cmd) {
3047 case BNX2X_MCAST_CMD_ADD:
3048 bnx2x_mcast_hdl_add_e1h(bp, o, p, mc_filter);
3049 break;
3050
3051 case BNX2X_MCAST_CMD_DEL:
3052 DP(BNX2X_MSG_SP, "Invalidating multicast "
3053 "MACs configuration\n");
3054
3055 /* clear the registry */
3056 memset(o->registry.aprox_match.vec, 0,
3057 sizeof(o->registry.aprox_match.vec));
3058 break;
3059
3060 case BNX2X_MCAST_CMD_RESTORE:
3061 bnx2x_mcast_hdl_restore_e1h(bp, o, p, mc_filter);
3062 break;
3063
3064 default:
3065 BNX2X_ERR("Unknown command: %d\n", cmd);
3066 return -EINVAL;
3067 }
3068
3069 /* Set the mcast filter in the internal memory */
3070 for (i = 0; i < MC_HASH_SIZE; i++)
3071 REG_WR(bp, MC_HASH_OFFSET(bp, i), mc_filter[i]);
3072 } else
3073 /* clear the registry */
3074 memset(o->registry.aprox_match.vec, 0,
3075 sizeof(o->registry.aprox_match.vec));
3076
3077 /* We are done */
3078 r->clear_pending(r);
3079
3080 return 0;
3081 }
3082
3083 static int bnx2x_mcast_validate_e1(struct bnx2x *bp,
3084 struct bnx2x_mcast_ramrod_params *p,
3085 int cmd)
3086 {
3087 struct bnx2x_mcast_obj *o = p->mcast_obj;
3088 int reg_sz = o->get_registry_size(o);
3089
3090 switch (cmd) {
3091 /* DEL command deletes all currently configured MACs */
3092 case BNX2X_MCAST_CMD_DEL:
3093 o->set_registry_size(o, 0);
3094 /* Don't break */
3095
3096 /* RESTORE command will restore the entire multicast configuration */
3097 case BNX2X_MCAST_CMD_RESTORE:
3098 p->mcast_list_len = reg_sz;
3099 DP(BNX2X_MSG_SP, "Command %d, p->mcast_list_len=%d\n",
3100 cmd, p->mcast_list_len);
3101 break;
3102
3103 case BNX2X_MCAST_CMD_ADD:
3104 case BNX2X_MCAST_CMD_CONT:
3105 /* Multicast MACs on 57710 are configured as unicast MACs and
3106 * there is only a limited number of CAM entries for that
3107 * matter.
3108 */
3109 if (p->mcast_list_len > o->max_cmd_len) {
3110 BNX2X_ERR("Can't configure more than %d multicast MACs"
3111 "on 57710\n", o->max_cmd_len);
3112 return -EINVAL;
3113 }
3114 /* Every configured MAC should be cleared if DEL command is
3115 * called. Only the last ADD command is relevant as long as
3116 * every ADD commands overrides the previous configuration.
3117 */
3118 DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len);
3119 if (p->mcast_list_len > 0)
3120 o->set_registry_size(o, p->mcast_list_len);
3121
3122 break;
3123
3124 default:
3125 BNX2X_ERR("Unknown command: %d\n", cmd);
3126 return -EINVAL;
3127
3128 }
3129
3130 /* We want to ensure that commands are executed one by one for 57710.
3131 * Therefore each none-empty command will consume o->max_cmd_len.
3132 */
3133 if (p->mcast_list_len)
3134 o->total_pending_num += o->max_cmd_len;
3135
3136 return 0;
3137 }
3138
3139 static void bnx2x_mcast_revert_e1(struct bnx2x *bp,
3140 struct bnx2x_mcast_ramrod_params *p,
3141 int old_num_macs)
3142 {
3143 struct bnx2x_mcast_obj *o = p->mcast_obj;
3144
3145 o->set_registry_size(o, old_num_macs);
3146
3147 /* If current command hasn't been handled yet and we are
3148 * here means that it's meant to be dropped and we have to
3149 * update the number of outstandling MACs accordingly.
3150 */
3151 if (p->mcast_list_len)
3152 o->total_pending_num -= o->max_cmd_len;
3153 }
3154
3155 static void bnx2x_mcast_set_one_rule_e1(struct bnx2x *bp,
3156 struct bnx2x_mcast_obj *o, int idx,
3157 union bnx2x_mcast_config_data *cfg_data,
3158 int cmd)
3159 {
3160 struct bnx2x_raw_obj *r = &o->raw;
3161 struct mac_configuration_cmd *data =
3162 (struct mac_configuration_cmd *)(r->rdata);
3163
3164 /* copy mac */
3165 if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE)) {
3166 bnx2x_set_fw_mac_addr(&data->config_table[idx].msb_mac_addr,
3167 &data->config_table[idx].middle_mac_addr,
3168 &data->config_table[idx].lsb_mac_addr,
3169 cfg_data->mac);
3170
3171 data->config_table[idx].vlan_id = 0;
3172 data->config_table[idx].pf_id = r->func_id;
3173 data->config_table[idx].clients_bit_vector =
3174 cpu_to_le32(1 << r->cl_id);
3175
3176 SET_FLAG(data->config_table[idx].flags,
3177 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
3178 T_ETH_MAC_COMMAND_SET);
3179 }
3180 }
3181
3182 /**
3183 * bnx2x_mcast_set_rdata_hdr_e1 - set header values in mac_configuration_cmd
3184 *
3185 * @bp: device handle
3186 * @p:
3187 * @len: number of rules to handle
3188 */
3189 static inline void bnx2x_mcast_set_rdata_hdr_e1(struct bnx2x *bp,
3190 struct bnx2x_mcast_ramrod_params *p,
3191 u8 len)
3192 {
3193 struct bnx2x_raw_obj *r = &p->mcast_obj->raw;
3194 struct mac_configuration_cmd *data =
3195 (struct mac_configuration_cmd *)(r->rdata);
3196
3197 u8 offset = (CHIP_REV_IS_SLOW(bp) ?
3198 BNX2X_MAX_EMUL_MULTI*(1 + r->func_id) :
3199 BNX2X_MAX_MULTICAST*(1 + r->func_id));
3200
3201 data->hdr.offset = offset;
3202 data->hdr.client_id = 0xff;
3203 data->hdr.echo = ((r->cid & BNX2X_SWCID_MASK) |
3204 (BNX2X_FILTER_MCAST_PENDING << BNX2X_SWCID_SHIFT));
3205 data->hdr.length = len;
3206 }
3207
3208 /**
3209 * bnx2x_mcast_handle_restore_cmd_e1 - restore command for 57710
3210 *
3211 * @bp: device handle
3212 * @o:
3213 * @start_idx: index in the registry to start from
3214 * @rdata_idx: index in the ramrod data to start from
3215 *
3216 * restore command for 57710 is like all other commands - always a stand alone
3217 * command - start_idx and rdata_idx will always be 0. This function will always
3218 * succeed.
3219 * returns -1 to comply with 57712 variant.
3220 */
3221 static inline int bnx2x_mcast_handle_restore_cmd_e1(
3222 struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_idx,
3223 int *rdata_idx)
3224 {
3225 struct bnx2x_mcast_mac_elem *elem;
3226 int i = 0;
3227 union bnx2x_mcast_config_data cfg_data = {0};
3228
3229 /* go through the registry and configure the MACs from it. */
3230 list_for_each_entry(elem, &o->registry.exact_match.macs, link) {
3231 cfg_data.mac = &elem->mac[0];
3232 o->set_one_rule(bp, o, i, &cfg_data, BNX2X_MCAST_CMD_RESTORE);
3233
3234 i++;
3235
3236 DP(BNX2X_MSG_SP, "About to configure "BNX2X_MAC_FMT
3237 " mcast MAC\n",
3238 BNX2X_MAC_PRN_LIST(cfg_data.mac));
3239 }
3240
3241 *rdata_idx = i;
3242
3243 return -1;
3244 }
3245
3246
3247 static inline int bnx2x_mcast_handle_pending_cmds_e1(
3248 struct bnx2x *bp, struct bnx2x_mcast_ramrod_params *p)
3249 {
3250 struct bnx2x_pending_mcast_cmd *cmd_pos;
3251 struct bnx2x_mcast_mac_elem *pmac_pos;
3252 struct bnx2x_mcast_obj *o = p->mcast_obj;
3253 union bnx2x_mcast_config_data cfg_data = {0};
3254 int cnt = 0;
3255
3256
3257 /* If nothing to be done - return */
3258 if (list_empty(&o->pending_cmds_head))
3259 return 0;
3260
3261 /* Handle the first command */
3262 cmd_pos = list_first_entry(&o->pending_cmds_head,
3263 struct bnx2x_pending_mcast_cmd, link);
3264
3265 switch (cmd_pos->type) {
3266 case BNX2X_MCAST_CMD_ADD:
3267 list_for_each_entry(pmac_pos, &cmd_pos->data.macs_head, link) {
3268 cfg_data.mac = &pmac_pos->mac[0];
3269 o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type);
3270
3271 cnt++;
3272
3273 DP(BNX2X_MSG_SP, "About to configure "BNX2X_MAC_FMT
3274 " mcast MAC\n",
3275 BNX2X_MAC_PRN_LIST(pmac_pos->mac));
3276 }
3277 break;
3278
3279 case BNX2X_MCAST_CMD_DEL:
3280 cnt = cmd_pos->data.macs_num;
3281 DP(BNX2X_MSG_SP, "About to delete %d multicast MACs\n", cnt);
3282 break;
3283
3284 case BNX2X_MCAST_CMD_RESTORE:
3285 o->hdl_restore(bp, o, 0, &cnt);
3286 break;
3287
3288 default:
3289 BNX2X_ERR("Unknown command: %d\n", cmd_pos->type);
3290 return -EINVAL;
3291 }
3292
3293 list_del(&cmd_pos->link);
3294 kfree(cmd_pos);
3295
3296 return cnt;
3297 }
3298
3299 /**
3300 * bnx2x_get_fw_mac_addr - revert the bnx2x_set_fw_mac_addr().
3301 *
3302 * @fw_hi:
3303 * @fw_mid:
3304 * @fw_lo:
3305 * @mac:
3306 */
3307 static inline void bnx2x_get_fw_mac_addr(__le16 *fw_hi, __le16 *fw_mid,
3308 __le16 *fw_lo, u8 *mac)
3309 {
3310 mac[1] = ((u8 *)fw_hi)[0];
3311 mac[0] = ((u8 *)fw_hi)[1];
3312 mac[3] = ((u8 *)fw_mid)[0];
3313 mac[2] = ((u8 *)fw_mid)[1];
3314 mac[5] = ((u8 *)fw_lo)[0];
3315 mac[4] = ((u8 *)fw_lo)[1];
3316 }
3317
3318 /**
3319 * bnx2x_mcast_refresh_registry_e1 -
3320 *
3321 * @bp: device handle
3322 * @cnt:
3323 *
3324 * Check the ramrod data first entry flag to see if it's a DELETE or ADD command
3325 * and update the registry correspondingly: if ADD - allocate a memory and add
3326 * the entries to the registry (list), if DELETE - clear the registry and free
3327 * the memory.
3328 */
3329 static inline int bnx2x_mcast_refresh_registry_e1(struct bnx2x *bp,
3330 struct bnx2x_mcast_obj *o)
3331 {
3332 struct bnx2x_raw_obj *raw = &o->raw;
3333 struct bnx2x_mcast_mac_elem *elem;
3334 struct mac_configuration_cmd *data =
3335 (struct mac_configuration_cmd *)(raw->rdata);
3336
3337 /* If first entry contains a SET bit - the command was ADD,
3338 * otherwise - DEL_ALL
3339 */
3340 if (GET_FLAG(data->config_table[0].flags,
3341 MAC_CONFIGURATION_ENTRY_ACTION_TYPE)) {
3342 int i, len = data->hdr.length;
3343
3344 /* Break if it was a RESTORE command */
3345 if (!list_empty(&o->registry.exact_match.macs))
3346 return 0;
3347
3348 elem = kzalloc(sizeof(*elem)*len, GFP_ATOMIC);
3349 if (!elem) {
3350 BNX2X_ERR("Failed to allocate registry memory\n");
3351 return -ENOMEM;
3352 }
3353
3354 for (i = 0; i < len; i++, elem++) {
3355 bnx2x_get_fw_mac_addr(
3356 &data->config_table[i].msb_mac_addr,
3357 &data->config_table[i].middle_mac_addr,
3358 &data->config_table[i].lsb_mac_addr,
3359 elem->mac);
3360 DP(BNX2X_MSG_SP, "Adding registry entry for ["
3361 BNX2X_MAC_FMT"]\n",
3362 BNX2X_MAC_PRN_LIST(elem->mac));
3363 list_add_tail(&elem->link,
3364 &o->registry.exact_match.macs);
3365 }
3366 } else {
3367 elem = list_first_entry(&o->registry.exact_match.macs,
3368 struct bnx2x_mcast_mac_elem, link);
3369 DP(BNX2X_MSG_SP, "Deleting a registry\n");
3370 kfree(elem);
3371 INIT_LIST_HEAD(&o->registry.exact_match.macs);
3372 }
3373
3374 return 0;
3375 }
3376
3377 static int bnx2x_mcast_setup_e1(struct bnx2x *bp,
3378 struct bnx2x_mcast_ramrod_params *p,
3379 int cmd)
3380 {
3381 struct bnx2x_mcast_obj *o = p->mcast_obj;
3382 struct bnx2x_raw_obj *raw = &o->raw;
3383 struct mac_configuration_cmd *data =
3384 (struct mac_configuration_cmd *)(raw->rdata);
3385 int cnt = 0, i, rc;
3386
3387 /* Reset the ramrod data buffer */
3388 memset(data, 0, sizeof(*data));
3389
3390 /* First set all entries as invalid */
3391 for (i = 0; i < o->max_cmd_len ; i++)
3392 SET_FLAG(data->config_table[i].flags,
3393 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
3394 T_ETH_MAC_COMMAND_INVALIDATE);
3395
3396 /* Handle pending commands first */
3397 cnt = bnx2x_mcast_handle_pending_cmds_e1(bp, p);
3398
3399 /* If there are no more pending commands - clear SCHEDULED state */
3400 if (list_empty(&o->pending_cmds_head))
3401 o->clear_sched(o);
3402
3403 /* The below may be true iff there were no pending commands */
3404 if (!cnt)
3405 cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, 0);
3406
3407 /* For 57710 every command has o->max_cmd_len length to ensure that
3408 * commands are done one at a time.
3409 */
3410 o->total_pending_num -= o->max_cmd_len;
3411
3412 /* send a ramrod */
3413
3414 WARN_ON(cnt > o->max_cmd_len);
3415
3416 /* Set ramrod header (in particular, a number of entries to update) */
3417 bnx2x_mcast_set_rdata_hdr_e1(bp, p, (u8)cnt);
3418
3419 /* update a registry: we need the registry contents to be always up
3420 * to date in order to be able to execute a RESTORE opcode. Here
3421 * we use the fact that for 57710 we sent one command at a time
3422 * hence we may take the registry update out of the command handling
3423 * and do it in a simpler way here.
3424 */
3425 rc = bnx2x_mcast_refresh_registry_e1(bp, o);
3426 if (rc)
3427 return rc;
3428
3429 /*
3430 * If CLEAR_ONLY was requested - don't send a ramrod and clear
3431 * RAMROD_PENDING status immediately.
3432 */
3433 if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3434 raw->clear_pending(raw);
3435 return 0;
3436 } else {
3437 /*
3438 * No need for an explicit memory barrier here as long we would
3439 * need to ensure the ordering of writing to the SPQ element
3440 * and updating of the SPQ producer which involves a memory
3441 * read and we will have to put a full memory barrier there
3442 * (inside bnx2x_sp_post()).
3443 */
3444
3445 /* Send a ramrod */
3446 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, raw->cid,
3447 U64_HI(raw->rdata_mapping),
3448 U64_LO(raw->rdata_mapping),
3449 ETH_CONNECTION_TYPE);
3450 if (rc)
3451 return rc;
3452
3453 /* Ramrod completion is pending */
3454 return 1;
3455 }
3456
3457 }
3458
3459 static int bnx2x_mcast_get_registry_size_exact(struct bnx2x_mcast_obj *o)
3460 {
3461 return o->registry.exact_match.num_macs_set;
3462 }
3463
3464 static int bnx2x_mcast_get_registry_size_aprox(struct bnx2x_mcast_obj *o)
3465 {
3466 return o->registry.aprox_match.num_bins_set;
3467 }
3468
3469 static void bnx2x_mcast_set_registry_size_exact(struct bnx2x_mcast_obj *o,
3470 int n)
3471 {
3472 o->registry.exact_match.num_macs_set = n;
3473 }
3474
3475 static void bnx2x_mcast_set_registry_size_aprox(struct bnx2x_mcast_obj *o,
3476 int n)
3477 {
3478 o->registry.aprox_match.num_bins_set = n;
3479 }
3480
3481 int bnx2x_config_mcast(struct bnx2x *bp,
3482 struct bnx2x_mcast_ramrod_params *p,
3483 int cmd)
3484 {
3485 struct bnx2x_mcast_obj *o = p->mcast_obj;
3486 struct bnx2x_raw_obj *r = &o->raw;
3487 int rc = 0, old_reg_size;
3488
3489 /* This is needed to recover number of currently configured mcast macs
3490 * in case of failure.
3491 */
3492 old_reg_size = o->get_registry_size(o);
3493
3494 /* Do some calculations and checks */
3495 rc = o->validate(bp, p, cmd);
3496 if (rc)
3497 return rc;
3498
3499 /* Return if there is no work to do */
3500 if ((!p->mcast_list_len) && (!o->check_sched(o)))
3501 return 0;
3502
3503 DP(BNX2X_MSG_SP, "o->total_pending_num=%d p->mcast_list_len=%d "
3504 "o->max_cmd_len=%d\n", o->total_pending_num,
3505 p->mcast_list_len, o->max_cmd_len);
3506
3507 /* Enqueue the current command to the pending list if we can't complete
3508 * it in the current iteration
3509 */
3510 if (r->check_pending(r) ||
3511 ((o->max_cmd_len > 0) && (o->total_pending_num > o->max_cmd_len))) {
3512 rc = o->enqueue_cmd(bp, p->mcast_obj, p, cmd);
3513 if (rc < 0)
3514 goto error_exit1;
3515
3516 /* As long as the current command is in a command list we
3517 * don't need to handle it separately.
3518 */
3519 p->mcast_list_len = 0;
3520 }
3521
3522 if (!r->check_pending(r)) {
3523
3524 /* Set 'pending' state */
3525 r->set_pending(r);
3526
3527 /* Configure the new classification in the chip */
3528 rc = o->config_mcast(bp, p, cmd);
3529 if (rc < 0)
3530 goto error_exit2;
3531
3532 /* Wait for a ramrod completion if was requested */
3533 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags))
3534 rc = o->wait_comp(bp, o);
3535 }
3536
3537 return rc;
3538
3539 error_exit2:
3540 r->clear_pending(r);
3541
3542 error_exit1:
3543 o->revert(bp, p, old_reg_size);
3544
3545 return rc;
3546 }
3547
3548 static void bnx2x_mcast_clear_sched(struct bnx2x_mcast_obj *o)
3549 {
3550 smp_mb__before_clear_bit();
3551 clear_bit(o->sched_state, o->raw.pstate);
3552 smp_mb__after_clear_bit();
3553 }
3554
3555 static void bnx2x_mcast_set_sched(struct bnx2x_mcast_obj *o)
3556 {
3557 smp_mb__before_clear_bit();
3558 set_bit(o->sched_state, o->raw.pstate);
3559 smp_mb__after_clear_bit();
3560 }
3561
3562 static bool bnx2x_mcast_check_sched(struct bnx2x_mcast_obj *o)
3563 {
3564 return !!test_bit(o->sched_state, o->raw.pstate);
3565 }
3566
3567 static bool bnx2x_mcast_check_pending(struct bnx2x_mcast_obj *o)
3568 {
3569 return o->raw.check_pending(&o->raw) || o->check_sched(o);
3570 }
3571
3572 void bnx2x_init_mcast_obj(struct bnx2x *bp,
3573 struct bnx2x_mcast_obj *mcast_obj,
3574 u8 mcast_cl_id, u32 mcast_cid, u8 func_id,
3575 u8 engine_id, void *rdata, dma_addr_t rdata_mapping,
3576 int state, unsigned long *pstate, bnx2x_obj_type type)
3577 {
3578 memset(mcast_obj, 0, sizeof(*mcast_obj));
3579
3580 bnx2x_init_raw_obj(&mcast_obj->raw, mcast_cl_id, mcast_cid, func_id,
3581 rdata, rdata_mapping, state, pstate, type);
3582
3583 mcast_obj->engine_id = engine_id;
3584
3585 INIT_LIST_HEAD(&mcast_obj->pending_cmds_head);
3586
3587 mcast_obj->sched_state = BNX2X_FILTER_MCAST_SCHED;
3588 mcast_obj->check_sched = bnx2x_mcast_check_sched;
3589 mcast_obj->set_sched = bnx2x_mcast_set_sched;
3590 mcast_obj->clear_sched = bnx2x_mcast_clear_sched;
3591
3592 if (CHIP_IS_E1(bp)) {
3593 mcast_obj->config_mcast = bnx2x_mcast_setup_e1;
3594 mcast_obj->enqueue_cmd = bnx2x_mcast_enqueue_cmd;
3595 mcast_obj->hdl_restore =
3596 bnx2x_mcast_handle_restore_cmd_e1;
3597 mcast_obj->check_pending = bnx2x_mcast_check_pending;
3598
3599 if (CHIP_REV_IS_SLOW(bp))
3600 mcast_obj->max_cmd_len = BNX2X_MAX_EMUL_MULTI;
3601 else
3602 mcast_obj->max_cmd_len = BNX2X_MAX_MULTICAST;
3603
3604 mcast_obj->wait_comp = bnx2x_mcast_wait;
3605 mcast_obj->set_one_rule = bnx2x_mcast_set_one_rule_e1;
3606 mcast_obj->validate = bnx2x_mcast_validate_e1;
3607 mcast_obj->revert = bnx2x_mcast_revert_e1;
3608 mcast_obj->get_registry_size =
3609 bnx2x_mcast_get_registry_size_exact;
3610 mcast_obj->set_registry_size =
3611 bnx2x_mcast_set_registry_size_exact;
3612
3613 /* 57710 is the only chip that uses the exact match for mcast
3614 * at the moment.
3615 */
3616 INIT_LIST_HEAD(&mcast_obj->registry.exact_match.macs);
3617
3618 } else if (CHIP_IS_E1H(bp)) {
3619 mcast_obj->config_mcast = bnx2x_mcast_setup_e1h;
3620 mcast_obj->enqueue_cmd = NULL;
3621 mcast_obj->hdl_restore = NULL;
3622 mcast_obj->check_pending = bnx2x_mcast_check_pending;
3623
3624 /* 57711 doesn't send a ramrod, so it has unlimited credit
3625 * for one command.
3626 */
3627 mcast_obj->max_cmd_len = -1;
3628 mcast_obj->wait_comp = bnx2x_mcast_wait;
3629 mcast_obj->set_one_rule = NULL;
3630 mcast_obj->validate = bnx2x_mcast_validate_e1h;
3631 mcast_obj->revert = bnx2x_mcast_revert_e1h;
3632 mcast_obj->get_registry_size =
3633 bnx2x_mcast_get_registry_size_aprox;
3634 mcast_obj->set_registry_size =
3635 bnx2x_mcast_set_registry_size_aprox;
3636 } else {
3637 mcast_obj->config_mcast = bnx2x_mcast_setup_e2;
3638 mcast_obj->enqueue_cmd = bnx2x_mcast_enqueue_cmd;
3639 mcast_obj->hdl_restore =
3640 bnx2x_mcast_handle_restore_cmd_e2;
3641 mcast_obj->check_pending = bnx2x_mcast_check_pending;
3642 /* TODO: There should be a proper HSI define for this number!!!
3643 */
3644 mcast_obj->max_cmd_len = 16;
3645 mcast_obj->wait_comp = bnx2x_mcast_wait;
3646 mcast_obj->set_one_rule = bnx2x_mcast_set_one_rule_e2;
3647 mcast_obj->validate = bnx2x_mcast_validate_e2;
3648 mcast_obj->revert = bnx2x_mcast_revert_e2;
3649 mcast_obj->get_registry_size =
3650 bnx2x_mcast_get_registry_size_aprox;
3651 mcast_obj->set_registry_size =
3652 bnx2x_mcast_set_registry_size_aprox;
3653 }
3654 }
3655
3656 /*************************** Credit handling **********************************/
3657
3658 /**
3659 * atomic_add_ifless - add if the result is less than a given value.
3660 *
3661 * @v: pointer of type atomic_t
3662 * @a: the amount to add to v...
3663 * @u: ...if (v + a) is less than u.
3664 *
3665 * returns true if (v + a) was less than u, and false otherwise.
3666 *
3667 */
3668 static inline bool __atomic_add_ifless(atomic_t *v, int a, int u)
3669 {
3670 int c, old;
3671
3672 c = atomic_read(v);
3673 for (;;) {
3674 if (unlikely(c + a >= u))
3675 return false;
3676
3677 old = atomic_cmpxchg((v), c, c + a);
3678 if (likely(old == c))
3679 break;
3680 c = old;
3681 }
3682
3683 return true;
3684 }
3685
3686 /**
3687 * atomic_dec_ifmoe - dec if the result is more or equal than a given value.
3688 *
3689 * @v: pointer of type atomic_t
3690 * @a: the amount to dec from v...
3691 * @u: ...if (v - a) is more or equal than u.
3692 *
3693 * returns true if (v - a) was more or equal than u, and false
3694 * otherwise.
3695 */
3696 static inline bool __atomic_dec_ifmoe(atomic_t *v, int a, int u)
3697 {
3698 int c, old;
3699
3700 c = atomic_read(v);
3701 for (;;) {
3702 if (unlikely(c - a < u))
3703 return false;
3704
3705 old = atomic_cmpxchg((v), c, c - a);
3706 if (likely(old == c))
3707 break;
3708 c = old;
3709 }
3710
3711 return true;
3712 }
3713
3714 static bool bnx2x_credit_pool_get(struct bnx2x_credit_pool_obj *o, int cnt)
3715 {
3716 bool rc;
3717
3718 smp_mb();
3719 rc = __atomic_dec_ifmoe(&o->credit, cnt, 0);
3720 smp_mb();
3721
3722 return rc;
3723 }
3724
3725 static bool bnx2x_credit_pool_put(struct bnx2x_credit_pool_obj *o, int cnt)
3726 {
3727 bool rc;
3728
3729 smp_mb();
3730
3731 /* Don't let to refill if credit + cnt > pool_sz */
3732 rc = __atomic_add_ifless(&o->credit, cnt, o->pool_sz + 1);
3733
3734 smp_mb();
3735
3736 return rc;
3737 }
3738
3739 static int bnx2x_credit_pool_check(struct bnx2x_credit_pool_obj *o)
3740 {
3741 int cur_credit;
3742
3743 smp_mb();
3744 cur_credit = atomic_read(&o->credit);
3745
3746 return cur_credit;
3747 }
3748
3749 static bool bnx2x_credit_pool_always_true(struct bnx2x_credit_pool_obj *o,
3750 int cnt)
3751 {
3752 return true;
3753 }
3754
3755
3756 static bool bnx2x_credit_pool_get_entry(
3757 struct bnx2x_credit_pool_obj *o,
3758 int *offset)
3759 {
3760 int idx, vec, i;
3761
3762 *offset = -1;
3763
3764 /* Find "internal cam-offset" then add to base for this object... */
3765 for (vec = 0; vec < BNX2X_POOL_VEC_SIZE; vec++) {
3766
3767 /* Skip the current vector if there are no free entries in it */
3768 if (!o->pool_mirror[vec])
3769 continue;
3770
3771 /* If we've got here we are going to find a free entry */
3772 for (idx = vec * BNX2X_POOL_VEC_SIZE, i = 0;
3773 i < BIT_VEC64_ELEM_SZ; idx++, i++)
3774
3775 if (BIT_VEC64_TEST_BIT(o->pool_mirror, idx)) {
3776 /* Got one!! */
3777 BIT_VEC64_CLEAR_BIT(o->pool_mirror, idx);
3778 *offset = o->base_pool_offset + idx;
3779 return true;
3780 }
3781 }
3782
3783 return false;
3784 }
3785
3786 static bool bnx2x_credit_pool_put_entry(
3787 struct bnx2x_credit_pool_obj *o,
3788 int offset)
3789 {
3790 if (offset < o->base_pool_offset)
3791 return false;
3792
3793 offset -= o->base_pool_offset;
3794
3795 if (offset >= o->pool_sz)
3796 return false;
3797
3798 /* Return the entry to the pool */
3799 BIT_VEC64_SET_BIT(o->pool_mirror, offset);
3800
3801 return true;
3802 }
3803
3804 static bool bnx2x_credit_pool_put_entry_always_true(
3805 struct bnx2x_credit_pool_obj *o,
3806 int offset)
3807 {
3808 return true;
3809 }
3810
3811 static bool bnx2x_credit_pool_get_entry_always_true(
3812 struct bnx2x_credit_pool_obj *o,
3813 int *offset)
3814 {
3815 *offset = -1;
3816 return true;
3817 }
3818 /**
3819 * bnx2x_init_credit_pool - initialize credit pool internals.
3820 *
3821 * @p:
3822 * @base: Base entry in the CAM to use.
3823 * @credit: pool size.
3824 *
3825 * If base is negative no CAM entries handling will be performed.
3826 * If credit is negative pool operations will always succeed (unlimited pool).
3827 *
3828 */
3829 static inline void bnx2x_init_credit_pool(struct bnx2x_credit_pool_obj *p,
3830 int base, int credit)
3831 {
3832 /* Zero the object first */
3833 memset(p, 0, sizeof(*p));
3834
3835 /* Set the table to all 1s */
3836 memset(&p->pool_mirror, 0xff, sizeof(p->pool_mirror));
3837
3838 /* Init a pool as full */
3839 atomic_set(&p->credit, credit);
3840
3841 /* The total poll size */
3842 p->pool_sz = credit;
3843
3844 p->base_pool_offset = base;
3845
3846 /* Commit the change */
3847 smp_mb();
3848
3849 p->check = bnx2x_credit_pool_check;
3850
3851 /* if pool credit is negative - disable the checks */
3852 if (credit >= 0) {
3853 p->put = bnx2x_credit_pool_put;
3854 p->get = bnx2x_credit_pool_get;
3855 p->put_entry = bnx2x_credit_pool_put_entry;
3856 p->get_entry = bnx2x_credit_pool_get_entry;
3857 } else {
3858 p->put = bnx2x_credit_pool_always_true;
3859 p->get = bnx2x_credit_pool_always_true;
3860 p->put_entry = bnx2x_credit_pool_put_entry_always_true;
3861 p->get_entry = bnx2x_credit_pool_get_entry_always_true;
3862 }
3863
3864 /* If base is negative - disable entries handling */
3865 if (base < 0) {
3866 p->put_entry = bnx2x_credit_pool_put_entry_always_true;
3867 p->get_entry = bnx2x_credit_pool_get_entry_always_true;
3868 }
3869 }
3870
3871 void bnx2x_init_mac_credit_pool(struct bnx2x *bp,
3872 struct bnx2x_credit_pool_obj *p, u8 func_id,
3873 u8 func_num)
3874 {
3875 /* TODO: this will be defined in consts as well... */
3876 #define BNX2X_CAM_SIZE_EMUL 5
3877
3878 int cam_sz;
3879
3880 if (CHIP_IS_E1(bp)) {
3881 /* In E1, Multicast is saved in cam... */
3882 if (!CHIP_REV_IS_SLOW(bp))
3883 cam_sz = (MAX_MAC_CREDIT_E1 / 2) - BNX2X_MAX_MULTICAST;
3884 else
3885 cam_sz = BNX2X_CAM_SIZE_EMUL - BNX2X_MAX_EMUL_MULTI;
3886
3887 bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz);
3888
3889 } else if (CHIP_IS_E1H(bp)) {
3890 /* CAM credit is equaly divided between all active functions
3891 * on the PORT!.
3892 */
3893 if ((func_num > 0)) {
3894 if (!CHIP_REV_IS_SLOW(bp))
3895 cam_sz = (MAX_MAC_CREDIT_E1H / (2*func_num));
3896 else
3897 cam_sz = BNX2X_CAM_SIZE_EMUL;
3898 bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz);
3899 } else {
3900 /* this should never happen! Block MAC operations. */
3901 bnx2x_init_credit_pool(p, 0, 0);
3902 }
3903
3904 } else {
3905
3906 /*
3907 * CAM credit is equaly divided between all active functions
3908 * on the PATH.
3909 */
3910 if ((func_num > 0)) {
3911 if (!CHIP_REV_IS_SLOW(bp))
3912 cam_sz = (MAX_MAC_CREDIT_E2 / func_num);
3913 else
3914 cam_sz = BNX2X_CAM_SIZE_EMUL;
3915
3916 /*
3917 * No need for CAM entries handling for 57712 and
3918 * newer.
3919 */
3920 bnx2x_init_credit_pool(p, -1, cam_sz);
3921 } else {
3922 /* this should never happen! Block MAC operations. */
3923 bnx2x_init_credit_pool(p, 0, 0);
3924 }
3925
3926 }
3927 }
3928
3929 void bnx2x_init_vlan_credit_pool(struct bnx2x *bp,
3930 struct bnx2x_credit_pool_obj *p,
3931 u8 func_id,
3932 u8 func_num)
3933 {
3934 if (CHIP_IS_E1x(bp)) {
3935 /*
3936 * There is no VLAN credit in HW on 57710 and 57711 only
3937 * MAC / MAC-VLAN can be set
3938 */
3939 bnx2x_init_credit_pool(p, 0, -1);
3940 } else {
3941 /*
3942 * CAM credit is equaly divided between all active functions
3943 * on the PATH.
3944 */
3945 if (func_num > 0) {
3946 int credit = MAX_VLAN_CREDIT_E2 / func_num;
3947 bnx2x_init_credit_pool(p, func_id * credit, credit);
3948 } else
3949 /* this should never happen! Block VLAN operations. */
3950 bnx2x_init_credit_pool(p, 0, 0);
3951 }
3952 }
3953
3954 /****************** RSS Configuration ******************/
3955 /**
3956 * bnx2x_debug_print_ind_table - prints the indirection table configuration.
3957 *
3958 * @bp: driver hanlde
3959 * @p: pointer to rss configuration
3960 *
3961 * Prints it when NETIF_MSG_IFUP debug level is configured.
3962 */
3963 static inline void bnx2x_debug_print_ind_table(struct bnx2x *bp,
3964 struct bnx2x_config_rss_params *p)
3965 {
3966 int i;
3967
3968 DP(BNX2X_MSG_SP, "Setting indirection table to:\n");
3969 DP(BNX2X_MSG_SP, "0x0000: ");
3970 for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) {
3971 DP_CONT(BNX2X_MSG_SP, "0x%02x ", p->ind_table[i]);
3972
3973 /* Print 4 bytes in a line */
3974 if ((i + 1 < T_ETH_INDIRECTION_TABLE_SIZE) &&
3975 (((i + 1) & 0x3) == 0)) {
3976 DP_CONT(BNX2X_MSG_SP, "\n");
3977 DP(BNX2X_MSG_SP, "0x%04x: ", i + 1);
3978 }
3979 }
3980
3981 DP_CONT(BNX2X_MSG_SP, "\n");
3982 }
3983
3984 /**
3985 * bnx2x_setup_rss - configure RSS
3986 *
3987 * @bp: device handle
3988 * @p: rss configuration
3989 *
3990 * sends on UPDATE ramrod for that matter.
3991 */
3992 static int bnx2x_setup_rss(struct bnx2x *bp,
3993 struct bnx2x_config_rss_params *p)
3994 {
3995 struct bnx2x_rss_config_obj *o = p->rss_obj;
3996 struct bnx2x_raw_obj *r = &o->raw;
3997 struct eth_rss_update_ramrod_data *data =
3998 (struct eth_rss_update_ramrod_data *)(r->rdata);
3999 u8 rss_mode = 0;
4000 int rc;
4001
4002 memset(data, 0, sizeof(*data));
4003
4004 DP(BNX2X_MSG_SP, "Configuring RSS\n");
4005
4006 /* Set an echo field */
4007 data->echo = (r->cid & BNX2X_SWCID_MASK) |
4008 (r->state << BNX2X_SWCID_SHIFT);
4009
4010 /* RSS mode */
4011 if (test_bit(BNX2X_RSS_MODE_DISABLED, &p->rss_flags))
4012 rss_mode = ETH_RSS_MODE_DISABLED;
4013 else if (test_bit(BNX2X_RSS_MODE_REGULAR, &p->rss_flags))
4014 rss_mode = ETH_RSS_MODE_REGULAR;
4015 else if (test_bit(BNX2X_RSS_MODE_VLAN_PRI, &p->rss_flags))
4016 rss_mode = ETH_RSS_MODE_VLAN_PRI;
4017 else if (test_bit(BNX2X_RSS_MODE_E1HOV_PRI, &p->rss_flags))
4018 rss_mode = ETH_RSS_MODE_E1HOV_PRI;
4019 else if (test_bit(BNX2X_RSS_MODE_IP_DSCP, &p->rss_flags))
4020 rss_mode = ETH_RSS_MODE_IP_DSCP;
4021
4022 data->rss_mode = rss_mode;
4023
4024 DP(BNX2X_MSG_SP, "rss_mode=%d\n", rss_mode);
4025
4026 /* RSS capabilities */
4027 if (test_bit(BNX2X_RSS_IPV4, &p->rss_flags))
4028 data->capabilities |=
4029 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY;
4030
4031 if (test_bit(BNX2X_RSS_IPV4_TCP, &p->rss_flags))
4032 data->capabilities |=
4033 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY;
4034
4035 if (test_bit(BNX2X_RSS_IPV6, &p->rss_flags))
4036 data->capabilities |=
4037 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY;
4038
4039 if (test_bit(BNX2X_RSS_IPV6_TCP, &p->rss_flags))
4040 data->capabilities |=
4041 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY;
4042
4043 /* Hashing mask */
4044 data->rss_result_mask = p->rss_result_mask;
4045
4046 /* RSS engine ID */
4047 data->rss_engine_id = o->engine_id;
4048
4049 DP(BNX2X_MSG_SP, "rss_engine_id=%d\n", data->rss_engine_id);
4050
4051 /* Indirection table */
4052 memcpy(data->indirection_table, p->ind_table,
4053 T_ETH_INDIRECTION_TABLE_SIZE);
4054
4055 /* Remember the last configuration */
4056 memcpy(o->ind_table, p->ind_table, T_ETH_INDIRECTION_TABLE_SIZE);
4057
4058 /* Print the indirection table */
4059 if (netif_msg_ifup(bp))
4060 bnx2x_debug_print_ind_table(bp, p);
4061
4062 /* RSS keys */
4063 if (test_bit(BNX2X_RSS_SET_SRCH, &p->rss_flags)) {
4064 memcpy(&data->rss_key[0], &p->rss_key[0],
4065 sizeof(data->rss_key));
4066 data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY;
4067 }
4068
4069 /*
4070 * No need for an explicit memory barrier here as long we would
4071 * need to ensure the ordering of writing to the SPQ element
4072 * and updating of the SPQ producer which involves a memory
4073 * read and we will have to put a full memory barrier there
4074 * (inside bnx2x_sp_post()).
4075 */
4076
4077 /* Send a ramrod */
4078 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_RSS_UPDATE, r->cid,
4079 U64_HI(r->rdata_mapping),
4080 U64_LO(r->rdata_mapping),
4081 ETH_CONNECTION_TYPE);
4082
4083 if (rc < 0)
4084 return rc;
4085
4086 return 1;
4087 }
4088
4089 void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj,
4090 u8 *ind_table)
4091 {
4092 memcpy(ind_table, rss_obj->ind_table, sizeof(rss_obj->ind_table));
4093 }
4094
4095 int bnx2x_config_rss(struct bnx2x *bp,
4096 struct bnx2x_config_rss_params *p)
4097 {
4098 int rc;
4099 struct bnx2x_rss_config_obj *o = p->rss_obj;
4100 struct bnx2x_raw_obj *r = &o->raw;
4101
4102 /* Do nothing if only driver cleanup was requested */
4103 if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags))
4104 return 0;
4105
4106 r->set_pending(r);
4107
4108 rc = o->config_rss(bp, p);
4109 if (rc < 0) {
4110 r->clear_pending(r);
4111 return rc;
4112 }
4113
4114 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags))
4115 rc = r->wait_comp(bp, r);
4116
4117 return rc;
4118 }
4119
4120
4121 void bnx2x_init_rss_config_obj(struct bnx2x *bp,
4122 struct bnx2x_rss_config_obj *rss_obj,
4123 u8 cl_id, u32 cid, u8 func_id, u8 engine_id,
4124 void *rdata, dma_addr_t rdata_mapping,
4125 int state, unsigned long *pstate,
4126 bnx2x_obj_type type)
4127 {
4128 bnx2x_init_raw_obj(&rss_obj->raw, cl_id, cid, func_id, rdata,
4129 rdata_mapping, state, pstate, type);
4130
4131 rss_obj->engine_id = engine_id;
4132 rss_obj->config_rss = bnx2x_setup_rss;
4133 }
4134
4135 /********************** Queue state object ***********************************/
4136
4137 /**
4138 * bnx2x_queue_state_change - perform Queue state change transition
4139 *
4140 * @bp: device handle
4141 * @params: parameters to perform the transition
4142 *
4143 * returns 0 in case of successfully completed transition, negative error
4144 * code in case of failure, positive (EBUSY) value if there is a completion
4145 * to that is still pending (possible only if RAMROD_COMP_WAIT is
4146 * not set in params->ramrod_flags for asynchronous commands).
4147 *
4148 */
4149 int bnx2x_queue_state_change(struct bnx2x *bp,
4150 struct bnx2x_queue_state_params *params)
4151 {
4152 struct bnx2x_queue_sp_obj *o = params->q_obj;
4153 int rc, pending_bit;
4154 unsigned long *pending = &o->pending;
4155
4156 /* Check that the requested transition is legal */
4157 if (o->check_transition(bp, o, params))
4158 return -EINVAL;
4159
4160 /* Set "pending" bit */
4161 pending_bit = o->set_pending(o, params);
4162
4163 /* Don't send a command if only driver cleanup was requested */
4164 if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags))
4165 o->complete_cmd(bp, o, pending_bit);
4166 else {
4167 /* Send a ramrod */
4168 rc = o->send_cmd(bp, params);
4169 if (rc) {
4170 o->next_state = BNX2X_Q_STATE_MAX;
4171 clear_bit(pending_bit, pending);
4172 smp_mb__after_clear_bit();
4173 return rc;
4174 }
4175
4176 if (test_bit(RAMROD_COMP_WAIT, &params->ramrod_flags)) {
4177 rc = o->wait_comp(bp, o, pending_bit);
4178 if (rc)
4179 return rc;
4180
4181 return 0;
4182 }
4183 }
4184
4185 return !!test_bit(pending_bit, pending);
4186 }
4187
4188
4189 static int bnx2x_queue_set_pending(struct bnx2x_queue_sp_obj *obj,
4190 struct bnx2x_queue_state_params *params)
4191 {
4192 enum bnx2x_queue_cmd cmd = params->cmd, bit;
4193
4194 /* ACTIVATE and DEACTIVATE commands are implemented on top of
4195 * UPDATE command.
4196 */
4197 if ((cmd == BNX2X_Q_CMD_ACTIVATE) ||
4198 (cmd == BNX2X_Q_CMD_DEACTIVATE))
4199 bit = BNX2X_Q_CMD_UPDATE;
4200 else
4201 bit = cmd;
4202
4203 set_bit(bit, &obj->pending);
4204 return bit;
4205 }
4206
4207 static int bnx2x_queue_wait_comp(struct bnx2x *bp,
4208 struct bnx2x_queue_sp_obj *o,
4209 enum bnx2x_queue_cmd cmd)
4210 {
4211 return bnx2x_state_wait(bp, cmd, &o->pending);
4212 }
4213
4214 /**
4215 * bnx2x_queue_comp_cmd - complete the state change command.
4216 *
4217 * @bp: device handle
4218 * @o:
4219 * @cmd:
4220 *
4221 * Checks that the arrived completion is expected.
4222 */
4223 static int bnx2x_queue_comp_cmd(struct bnx2x *bp,
4224 struct bnx2x_queue_sp_obj *o,
4225 enum bnx2x_queue_cmd cmd)
4226 {
4227 unsigned long cur_pending = o->pending;
4228
4229 if (!test_and_clear_bit(cmd, &cur_pending)) {
4230 BNX2X_ERR("Bad MC reply %d for queue %d in state %d "
4231 "pending 0x%lx, next_state %d\n", cmd,
4232 o->cids[BNX2X_PRIMARY_CID_INDEX],
4233 o->state, cur_pending, o->next_state);
4234 return -EINVAL;
4235 }
4236
4237 if (o->next_tx_only >= o->max_cos)
4238 /* >= becuase tx only must always be smaller than cos since the
4239 * primary connection suports COS 0
4240 */
4241 BNX2X_ERR("illegal value for next tx_only: %d. max cos was %d",
4242 o->next_tx_only, o->max_cos);
4243
4244 DP(BNX2X_MSG_SP, "Completing command %d for queue %d, "
4245 "setting state to %d\n", cmd,
4246 o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_state);
4247
4248 if (o->next_tx_only) /* print num tx-only if any exist */
4249 DP(BNX2X_MSG_SP, "primary cid %d: num tx-only cons %d",
4250 o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_tx_only);
4251
4252 o->state = o->next_state;
4253 o->num_tx_only = o->next_tx_only;
4254 o->next_state = BNX2X_Q_STATE_MAX;
4255
4256 /* It's important that o->state and o->next_state are
4257 * updated before o->pending.
4258 */
4259 wmb();
4260
4261 clear_bit(cmd, &o->pending);
4262 smp_mb__after_clear_bit();
4263
4264 return 0;
4265 }
4266
4267 static void bnx2x_q_fill_setup_data_e2(struct bnx2x *bp,
4268 struct bnx2x_queue_state_params *cmd_params,
4269 struct client_init_ramrod_data *data)
4270 {
4271 struct bnx2x_queue_setup_params *params = &cmd_params->params.setup;
4272
4273 /* Rx data */
4274
4275 /* IPv6 TPA supported for E2 and above only */
4276 data->rx.tpa_en |= test_bit(BNX2X_Q_FLG_TPA_IPV6, &params->flags) *
4277 CLIENT_INIT_RX_DATA_TPA_EN_IPV6;
4278 }
4279
4280 static void bnx2x_q_fill_init_general_data(struct bnx2x *bp,
4281 struct bnx2x_queue_sp_obj *o,
4282 struct bnx2x_general_setup_params *params,
4283 struct client_init_general_data *gen_data,
4284 unsigned long *flags)
4285 {
4286 gen_data->client_id = o->cl_id;
4287
4288 if (test_bit(BNX2X_Q_FLG_STATS, flags)) {
4289 gen_data->statistics_counter_id =
4290 params->stat_id;
4291 gen_data->statistics_en_flg = 1;
4292 gen_data->statistics_zero_flg =
4293 test_bit(BNX2X_Q_FLG_ZERO_STATS, flags);
4294 } else
4295 gen_data->statistics_counter_id =
4296 DISABLE_STATISTIC_COUNTER_ID_VALUE;
4297
4298 gen_data->is_fcoe_flg = test_bit(BNX2X_Q_FLG_FCOE, flags);
4299 gen_data->activate_flg = test_bit(BNX2X_Q_FLG_ACTIVE, flags);
4300 gen_data->sp_client_id = params->spcl_id;
4301 gen_data->mtu = cpu_to_le16(params->mtu);
4302 gen_data->func_id = o->func_id;
4303
4304
4305 gen_data->cos = params->cos;
4306
4307 gen_data->traffic_type =
4308 test_bit(BNX2X_Q_FLG_FCOE, flags) ?
4309 LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
4310
4311 DP(BNX2X_MSG_SP, "flags: active %d, cos %d, stats en %d",
4312 gen_data->activate_flg, gen_data->cos, gen_data->statistics_en_flg);
4313 }
4314
4315 static void bnx2x_q_fill_init_tx_data(struct bnx2x_queue_sp_obj *o,
4316 struct bnx2x_txq_setup_params *params,
4317 struct client_init_tx_data *tx_data,
4318 unsigned long *flags)
4319 {
4320 tx_data->enforce_security_flg =
4321 test_bit(BNX2X_Q_FLG_TX_SEC, flags);
4322 tx_data->default_vlan =
4323 cpu_to_le16(params->default_vlan);
4324 tx_data->default_vlan_flg =
4325 test_bit(BNX2X_Q_FLG_DEF_VLAN, flags);
4326 tx_data->tx_switching_flg =
4327 test_bit(BNX2X_Q_FLG_TX_SWITCH, flags);
4328 tx_data->anti_spoofing_flg =
4329 test_bit(BNX2X_Q_FLG_ANTI_SPOOF, flags);
4330 tx_data->tx_status_block_id = params->fw_sb_id;
4331 tx_data->tx_sb_index_number = params->sb_cq_index;
4332 tx_data->tss_leading_client_id = params->tss_leading_cl_id;
4333
4334 tx_data->tx_bd_page_base.lo =
4335 cpu_to_le32(U64_LO(params->dscr_map));
4336 tx_data->tx_bd_page_base.hi =
4337 cpu_to_le32(U64_HI(params->dscr_map));
4338
4339 /* Don't configure any Tx switching mode during queue SETUP */
4340 tx_data->state = 0;
4341 }
4342
4343 static void bnx2x_q_fill_init_pause_data(struct bnx2x_queue_sp_obj *o,
4344 struct rxq_pause_params *params,
4345 struct client_init_rx_data *rx_data)
4346 {
4347 /* flow control data */
4348 rx_data->cqe_pause_thr_low = cpu_to_le16(params->rcq_th_lo);
4349 rx_data->cqe_pause_thr_high = cpu_to_le16(params->rcq_th_hi);
4350 rx_data->bd_pause_thr_low = cpu_to_le16(params->bd_th_lo);
4351 rx_data->bd_pause_thr_high = cpu_to_le16(params->bd_th_hi);
4352 rx_data->sge_pause_thr_low = cpu_to_le16(params->sge_th_lo);
4353 rx_data->sge_pause_thr_high = cpu_to_le16(params->sge_th_hi);
4354 rx_data->rx_cos_mask = cpu_to_le16(params->pri_map);
4355 }
4356
4357 static void bnx2x_q_fill_init_rx_data(struct bnx2x_queue_sp_obj *o,
4358 struct bnx2x_rxq_setup_params *params,
4359 struct client_init_rx_data *rx_data,
4360 unsigned long *flags)
4361 {
4362 /* Rx data */
4363 rx_data->tpa_en = test_bit(BNX2X_Q_FLG_TPA, flags) *
4364 CLIENT_INIT_RX_DATA_TPA_EN_IPV4;
4365 rx_data->vmqueue_mode_en_flg = 0;
4366
4367 rx_data->cache_line_alignment_log_size =
4368 params->cache_line_log;
4369 rx_data->enable_dynamic_hc =
4370 test_bit(BNX2X_Q_FLG_DHC, flags);
4371 rx_data->max_sges_for_packet = params->max_sges_pkt;
4372 rx_data->client_qzone_id = params->cl_qzone_id;
4373 rx_data->max_agg_size = cpu_to_le16(params->tpa_agg_sz);
4374
4375 /* Always start in DROP_ALL mode */
4376 rx_data->state = cpu_to_le16(CLIENT_INIT_RX_DATA_UCAST_DROP_ALL |
4377 CLIENT_INIT_RX_DATA_MCAST_DROP_ALL);
4378
4379 /* We don't set drop flags */
4380 rx_data->drop_ip_cs_err_flg = 0;
4381 rx_data->drop_tcp_cs_err_flg = 0;
4382 rx_data->drop_ttl0_flg = 0;
4383 rx_data->drop_udp_cs_err_flg = 0;
4384 rx_data->inner_vlan_removal_enable_flg =
4385 test_bit(BNX2X_Q_FLG_VLAN, flags);
4386 rx_data->outer_vlan_removal_enable_flg =
4387 test_bit(BNX2X_Q_FLG_OV, flags);
4388 rx_data->status_block_id = params->fw_sb_id;
4389 rx_data->rx_sb_index_number = params->sb_cq_index;
4390 rx_data->max_tpa_queues = params->max_tpa_queues;
4391 rx_data->max_bytes_on_bd = cpu_to_le16(params->buf_sz);
4392 rx_data->sge_buff_size = cpu_to_le16(params->sge_buf_sz);
4393 rx_data->bd_page_base.lo =
4394 cpu_to_le32(U64_LO(params->dscr_map));
4395 rx_data->bd_page_base.hi =
4396 cpu_to_le32(U64_HI(params->dscr_map));
4397 rx_data->sge_page_base.lo =
4398 cpu_to_le32(U64_LO(params->sge_map));
4399 rx_data->sge_page_base.hi =
4400 cpu_to_le32(U64_HI(params->sge_map));
4401 rx_data->cqe_page_base.lo =
4402 cpu_to_le32(U64_LO(params->rcq_map));
4403 rx_data->cqe_page_base.hi =
4404 cpu_to_le32(U64_HI(params->rcq_map));
4405 rx_data->is_leading_rss = test_bit(BNX2X_Q_FLG_LEADING_RSS, flags);
4406
4407 if (test_bit(BNX2X_Q_FLG_MCAST, flags)) {
4408 rx_data->approx_mcast_engine_id = o->func_id;
4409 rx_data->is_approx_mcast = 1;
4410 }
4411
4412 rx_data->rss_engine_id = params->rss_engine_id;
4413
4414 /* silent vlan removal */
4415 rx_data->silent_vlan_removal_flg =
4416 test_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, flags);
4417 rx_data->silent_vlan_value =
4418 cpu_to_le16(params->silent_removal_value);
4419 rx_data->silent_vlan_mask =
4420 cpu_to_le16(params->silent_removal_mask);
4421
4422 }
4423
4424 /* initialize the general, tx and rx parts of a queue object */
4425 static void bnx2x_q_fill_setup_data_cmn(struct bnx2x *bp,
4426 struct bnx2x_queue_state_params *cmd_params,
4427 struct client_init_ramrod_data *data)
4428 {
4429 bnx2x_q_fill_init_general_data(bp, cmd_params->q_obj,
4430 &cmd_params->params.setup.gen_params,
4431 &data->general,
4432 &cmd_params->params.setup.flags);
4433
4434 bnx2x_q_fill_init_tx_data(cmd_params->q_obj,
4435 &cmd_params->params.setup.txq_params,
4436 &data->tx,
4437 &cmd_params->params.setup.flags);
4438
4439 bnx2x_q_fill_init_rx_data(cmd_params->q_obj,
4440 &cmd_params->params.setup.rxq_params,
4441 &data->rx,
4442 &cmd_params->params.setup.flags);
4443
4444 bnx2x_q_fill_init_pause_data(cmd_params->q_obj,
4445 &cmd_params->params.setup.pause_params,
4446 &data->rx);
4447 }
4448
4449 /* initialize the general and tx parts of a tx-only queue object */
4450 static void bnx2x_q_fill_setup_tx_only(struct bnx2x *bp,
4451 struct bnx2x_queue_state_params *cmd_params,
4452 struct tx_queue_init_ramrod_data *data)
4453 {
4454 bnx2x_q_fill_init_general_data(bp, cmd_params->q_obj,
4455 &cmd_params->params.tx_only.gen_params,
4456 &data->general,
4457 &cmd_params->params.tx_only.flags);
4458
4459 bnx2x_q_fill_init_tx_data(cmd_params->q_obj,
4460 &cmd_params->params.tx_only.txq_params,
4461 &data->tx,
4462 &cmd_params->params.tx_only.flags);
4463
4464 DP(BNX2X_MSG_SP, "cid %d, tx bd page lo %x hi %x",cmd_params->q_obj->cids[0],
4465 data->tx.tx_bd_page_base.lo, data->tx.tx_bd_page_base.hi);
4466 }
4467
4468 /**
4469 * bnx2x_q_init - init HW/FW queue
4470 *
4471 * @bp: device handle
4472 * @params:
4473 *
4474 * HW/FW initial Queue configuration:
4475 * - HC: Rx and Tx
4476 * - CDU context validation
4477 *
4478 */
4479 static inline int bnx2x_q_init(struct bnx2x *bp,
4480 struct bnx2x_queue_state_params *params)
4481 {
4482 struct bnx2x_queue_sp_obj *o = params->q_obj;
4483 struct bnx2x_queue_init_params *init = &params->params.init;
4484 u16 hc_usec;
4485 u8 cos;
4486
4487 /* Tx HC configuration */
4488 if (test_bit(BNX2X_Q_TYPE_HAS_TX, &o->type) &&
4489 test_bit(BNX2X_Q_FLG_HC, &init->tx.flags)) {
4490 hc_usec = init->tx.hc_rate ? 1000000 / init->tx.hc_rate : 0;
4491
4492 bnx2x_update_coalesce_sb_index(bp, init->tx.fw_sb_id,
4493 init->tx.sb_cq_index,
4494 !test_bit(BNX2X_Q_FLG_HC_EN, &init->tx.flags),
4495 hc_usec);
4496 }
4497
4498 /* Rx HC configuration */
4499 if (test_bit(BNX2X_Q_TYPE_HAS_RX, &o->type) &&
4500 test_bit(BNX2X_Q_FLG_HC, &init->rx.flags)) {
4501 hc_usec = init->rx.hc_rate ? 1000000 / init->rx.hc_rate : 0;
4502
4503 bnx2x_update_coalesce_sb_index(bp, init->rx.fw_sb_id,
4504 init->rx.sb_cq_index,
4505 !test_bit(BNX2X_Q_FLG_HC_EN, &init->rx.flags),
4506 hc_usec);
4507 }
4508
4509 /* Set CDU context validation values */
4510 for (cos = 0; cos < o->max_cos; cos++) {
4511 DP(BNX2X_MSG_SP, "setting context validation. cid %d, cos %d",
4512 o->cids[cos], cos);
4513 DP(BNX2X_MSG_SP, "context pointer %p", init->cxts[cos]);
4514 bnx2x_set_ctx_validation(bp, init->cxts[cos], o->cids[cos]);
4515 }
4516
4517 /* As no ramrod is sent, complete the command immediately */
4518 o->complete_cmd(bp, o, BNX2X_Q_CMD_INIT);
4519
4520 mmiowb();
4521 smp_mb();
4522
4523 return 0;
4524 }
4525
4526 static inline int bnx2x_q_send_setup_e1x(struct bnx2x *bp,
4527 struct bnx2x_queue_state_params *params)
4528 {
4529 struct bnx2x_queue_sp_obj *o = params->q_obj;
4530 struct client_init_ramrod_data *rdata =
4531 (struct client_init_ramrod_data *)o->rdata;
4532 dma_addr_t data_mapping = o->rdata_mapping;
4533 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
4534
4535 /* Clear the ramrod data */
4536 memset(rdata, 0, sizeof(*rdata));
4537
4538 /* Fill the ramrod data */
4539 bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
4540
4541 /*
4542 * No need for an explicit memory barrier here as long we would
4543 * need to ensure the ordering of writing to the SPQ element
4544 * and updating of the SPQ producer which involves a memory
4545 * read and we will have to put a full memory barrier there
4546 * (inside bnx2x_sp_post()).
4547 */
4548
4549 return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
4550 U64_HI(data_mapping),
4551 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4552 }
4553
4554 static inline int bnx2x_q_send_setup_e2(struct bnx2x *bp,
4555 struct bnx2x_queue_state_params *params)
4556 {
4557 struct bnx2x_queue_sp_obj *o = params->q_obj;
4558 struct client_init_ramrod_data *rdata =
4559 (struct client_init_ramrod_data *)o->rdata;
4560 dma_addr_t data_mapping = o->rdata_mapping;
4561 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
4562
4563 /* Clear the ramrod data */
4564 memset(rdata, 0, sizeof(*rdata));
4565
4566 /* Fill the ramrod data */
4567 bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
4568 bnx2x_q_fill_setup_data_e2(bp, params, rdata);
4569
4570 /*
4571 * No need for an explicit memory barrier here as long we would
4572 * need to ensure the ordering of writing to the SPQ element
4573 * and updating of the SPQ producer which involves a memory
4574 * read and we will have to put a full memory barrier there
4575 * (inside bnx2x_sp_post()).
4576 */
4577
4578 return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
4579 U64_HI(data_mapping),
4580 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4581 }
4582
4583 static inline int bnx2x_q_send_setup_tx_only(struct bnx2x *bp,
4584 struct bnx2x_queue_state_params *params)
4585 {
4586 struct bnx2x_queue_sp_obj *o = params->q_obj;
4587 struct tx_queue_init_ramrod_data *rdata =
4588 (struct tx_queue_init_ramrod_data *)o->rdata;
4589 dma_addr_t data_mapping = o->rdata_mapping;
4590 int ramrod = RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP;
4591 struct bnx2x_queue_setup_tx_only_params *tx_only_params =
4592 &params->params.tx_only;
4593 u8 cid_index = tx_only_params->cid_index;
4594
4595
4596 if (cid_index >= o->max_cos) {
4597 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4598 o->cl_id, cid_index);
4599 return -EINVAL;
4600 }
4601
4602 DP(BNX2X_MSG_SP, "parameters received: cos: %d sp-id: %d",
4603 tx_only_params->gen_params.cos,
4604 tx_only_params->gen_params.spcl_id);
4605
4606 /* Clear the ramrod data */
4607 memset(rdata, 0, sizeof(*rdata));
4608
4609 /* Fill the ramrod data */
4610 bnx2x_q_fill_setup_tx_only(bp, params, rdata);
4611
4612 DP(BNX2X_MSG_SP, "sending tx-only ramrod: cid %d, client-id %d,"
4613 "sp-client id %d, cos %d",
4614 o->cids[cid_index],
4615 rdata->general.client_id,
4616 rdata->general.sp_client_id, rdata->general.cos);
4617
4618 /*
4619 * No need for an explicit memory barrier here as long we would
4620 * need to ensure the ordering of writing to the SPQ element
4621 * and updating of the SPQ producer which involves a memory
4622 * read and we will have to put a full memory barrier there
4623 * (inside bnx2x_sp_post()).
4624 */
4625
4626 return bnx2x_sp_post(bp, ramrod, o->cids[cid_index],
4627 U64_HI(data_mapping),
4628 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4629 }
4630
4631 static void bnx2x_q_fill_update_data(struct bnx2x *bp,
4632 struct bnx2x_queue_sp_obj *obj,
4633 struct bnx2x_queue_update_params *params,
4634 struct client_update_ramrod_data *data)
4635 {
4636 /* Client ID of the client to update */
4637 data->client_id = obj->cl_id;
4638
4639 /* Function ID of the client to update */
4640 data->func_id = obj->func_id;
4641
4642 /* Default VLAN value */
4643 data->default_vlan = cpu_to_le16(params->def_vlan);
4644
4645 /* Inner VLAN stripping */
4646 data->inner_vlan_removal_enable_flg =
4647 test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM, &params->update_flags);
4648 data->inner_vlan_removal_change_flg =
4649 test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM_CHNG,
4650 &params->update_flags);
4651
4652 /* Outer VLAN sripping */
4653 data->outer_vlan_removal_enable_flg =
4654 test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM, &params->update_flags);
4655 data->outer_vlan_removal_change_flg =
4656 test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM_CHNG,
4657 &params->update_flags);
4658
4659 /* Drop packets that have source MAC that doesn't belong to this
4660 * Queue.
4661 */
4662 data->anti_spoofing_enable_flg =
4663 test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF, &params->update_flags);
4664 data->anti_spoofing_change_flg =
4665 test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF_CHNG, &params->update_flags);
4666
4667 /* Activate/Deactivate */
4668 data->activate_flg =
4669 test_bit(BNX2X_Q_UPDATE_ACTIVATE, &params->update_flags);
4670 data->activate_change_flg =
4671 test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &params->update_flags);
4672
4673 /* Enable default VLAN */
4674 data->default_vlan_enable_flg =
4675 test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, &params->update_flags);
4676 data->default_vlan_change_flg =
4677 test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
4678 &params->update_flags);
4679
4680 /* silent vlan removal */
4681 data->silent_vlan_change_flg =
4682 test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
4683 &params->update_flags);
4684 data->silent_vlan_removal_flg =
4685 test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, &params->update_flags);
4686 data->silent_vlan_value = cpu_to_le16(params->silent_removal_value);
4687 data->silent_vlan_mask = cpu_to_le16(params->silent_removal_mask);
4688 }
4689
4690 static inline int bnx2x_q_send_update(struct bnx2x *bp,
4691 struct bnx2x_queue_state_params *params)
4692 {
4693 struct bnx2x_queue_sp_obj *o = params->q_obj;
4694 struct client_update_ramrod_data *rdata =
4695 (struct client_update_ramrod_data *)o->rdata;
4696 dma_addr_t data_mapping = o->rdata_mapping;
4697 struct bnx2x_queue_update_params *update_params =
4698 &params->params.update;
4699 u8 cid_index = update_params->cid_index;
4700
4701 if (cid_index >= o->max_cos) {
4702 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4703 o->cl_id, cid_index);
4704 return -EINVAL;
4705 }
4706
4707
4708 /* Clear the ramrod data */
4709 memset(rdata, 0, sizeof(*rdata));
4710
4711 /* Fill the ramrod data */
4712 bnx2x_q_fill_update_data(bp, o, update_params, rdata);
4713
4714 /*
4715 * No need for an explicit memory barrier here as long we would
4716 * need to ensure the ordering of writing to the SPQ element
4717 * and updating of the SPQ producer which involves a memory
4718 * read and we will have to put a full memory barrier there
4719 * (inside bnx2x_sp_post()).
4720 */
4721
4722 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_UPDATE,
4723 o->cids[cid_index], U64_HI(data_mapping),
4724 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4725 }
4726
4727 /**
4728 * bnx2x_q_send_deactivate - send DEACTIVATE command
4729 *
4730 * @bp: device handle
4731 * @params:
4732 *
4733 * implemented using the UPDATE command.
4734 */
4735 static inline int bnx2x_q_send_deactivate(struct bnx2x *bp,
4736 struct bnx2x_queue_state_params *params)
4737 {
4738 struct bnx2x_queue_update_params *update = &params->params.update;
4739
4740 memset(update, 0, sizeof(*update));
4741
4742 __set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
4743
4744 return bnx2x_q_send_update(bp, params);
4745 }
4746
4747 /**
4748 * bnx2x_q_send_activate - send ACTIVATE command
4749 *
4750 * @bp: device handle
4751 * @params:
4752 *
4753 * implemented using the UPDATE command.
4754 */
4755 static inline int bnx2x_q_send_activate(struct bnx2x *bp,
4756 struct bnx2x_queue_state_params *params)
4757 {
4758 struct bnx2x_queue_update_params *update = &params->params.update;
4759
4760 memset(update, 0, sizeof(*update));
4761
4762 __set_bit(BNX2X_Q_UPDATE_ACTIVATE, &update->update_flags);
4763 __set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
4764
4765 return bnx2x_q_send_update(bp, params);
4766 }
4767
4768 static inline int bnx2x_q_send_update_tpa(struct bnx2x *bp,
4769 struct bnx2x_queue_state_params *params)
4770 {
4771 /* TODO: Not implemented yet. */
4772 return -1;
4773 }
4774
4775 static inline int bnx2x_q_send_halt(struct bnx2x *bp,
4776 struct bnx2x_queue_state_params *params)
4777 {
4778 struct bnx2x_queue_sp_obj *o = params->q_obj;
4779
4780 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT,
4781 o->cids[BNX2X_PRIMARY_CID_INDEX], 0, o->cl_id,
4782 ETH_CONNECTION_TYPE);
4783 }
4784
4785 static inline int bnx2x_q_send_cfc_del(struct bnx2x *bp,
4786 struct bnx2x_queue_state_params *params)
4787 {
4788 struct bnx2x_queue_sp_obj *o = params->q_obj;
4789 u8 cid_idx = params->params.cfc_del.cid_index;
4790
4791 if (cid_idx >= o->max_cos) {
4792 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4793 o->cl_id, cid_idx);
4794 return -EINVAL;
4795 }
4796
4797 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL,
4798 o->cids[cid_idx], 0, 0, NONE_CONNECTION_TYPE);
4799 }
4800
4801 static inline int bnx2x_q_send_terminate(struct bnx2x *bp,
4802 struct bnx2x_queue_state_params *params)
4803 {
4804 struct bnx2x_queue_sp_obj *o = params->q_obj;
4805 u8 cid_index = params->params.terminate.cid_index;
4806
4807 if (cid_index >= o->max_cos) {
4808 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4809 o->cl_id, cid_index);
4810 return -EINVAL;
4811 }
4812
4813 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE,
4814 o->cids[cid_index], 0, 0, ETH_CONNECTION_TYPE);
4815 }
4816
4817 static inline int bnx2x_q_send_empty(struct bnx2x *bp,
4818 struct bnx2x_queue_state_params *params)
4819 {
4820 struct bnx2x_queue_sp_obj *o = params->q_obj;
4821
4822 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_EMPTY,
4823 o->cids[BNX2X_PRIMARY_CID_INDEX], 0, 0,
4824 ETH_CONNECTION_TYPE);
4825 }
4826
4827 static inline int bnx2x_queue_send_cmd_cmn(struct bnx2x *bp,
4828 struct bnx2x_queue_state_params *params)
4829 {
4830 switch (params->cmd) {
4831 case BNX2X_Q_CMD_INIT:
4832 return bnx2x_q_init(bp, params);
4833 case BNX2X_Q_CMD_SETUP_TX_ONLY:
4834 return bnx2x_q_send_setup_tx_only(bp, params);
4835 case BNX2X_Q_CMD_DEACTIVATE:
4836 return bnx2x_q_send_deactivate(bp, params);
4837 case BNX2X_Q_CMD_ACTIVATE:
4838 return bnx2x_q_send_activate(bp, params);
4839 case BNX2X_Q_CMD_UPDATE:
4840 return bnx2x_q_send_update(bp, params);
4841 case BNX2X_Q_CMD_UPDATE_TPA:
4842 return bnx2x_q_send_update_tpa(bp, params);
4843 case BNX2X_Q_CMD_HALT:
4844 return bnx2x_q_send_halt(bp, params);
4845 case BNX2X_Q_CMD_CFC_DEL:
4846 return bnx2x_q_send_cfc_del(bp, params);
4847 case BNX2X_Q_CMD_TERMINATE:
4848 return bnx2x_q_send_terminate(bp, params);
4849 case BNX2X_Q_CMD_EMPTY:
4850 return bnx2x_q_send_empty(bp, params);
4851 default:
4852 BNX2X_ERR("Unknown command: %d\n", params->cmd);
4853 return -EINVAL;
4854 }
4855 }
4856
4857 static int bnx2x_queue_send_cmd_e1x(struct bnx2x *bp,
4858 struct bnx2x_queue_state_params *params)
4859 {
4860 switch (params->cmd) {
4861 case BNX2X_Q_CMD_SETUP:
4862 return bnx2x_q_send_setup_e1x(bp, params);
4863 case BNX2X_Q_CMD_INIT:
4864 case BNX2X_Q_CMD_SETUP_TX_ONLY:
4865 case BNX2X_Q_CMD_DEACTIVATE:
4866 case BNX2X_Q_CMD_ACTIVATE:
4867 case BNX2X_Q_CMD_UPDATE:
4868 case BNX2X_Q_CMD_UPDATE_TPA:
4869 case BNX2X_Q_CMD_HALT:
4870 case BNX2X_Q_CMD_CFC_DEL:
4871 case BNX2X_Q_CMD_TERMINATE:
4872 case BNX2X_Q_CMD_EMPTY:
4873 return bnx2x_queue_send_cmd_cmn(bp, params);
4874 default:
4875 BNX2X_ERR("Unknown command: %d\n", params->cmd);
4876 return -EINVAL;
4877 }
4878 }
4879
4880 static int bnx2x_queue_send_cmd_e2(struct bnx2x *bp,
4881 struct bnx2x_queue_state_params *params)
4882 {
4883 switch (params->cmd) {
4884 case BNX2X_Q_CMD_SETUP:
4885 return bnx2x_q_send_setup_e2(bp, params);
4886 case BNX2X_Q_CMD_INIT:
4887 case BNX2X_Q_CMD_SETUP_TX_ONLY:
4888 case BNX2X_Q_CMD_DEACTIVATE:
4889 case BNX2X_Q_CMD_ACTIVATE:
4890 case BNX2X_Q_CMD_UPDATE:
4891 case BNX2X_Q_CMD_UPDATE_TPA:
4892 case BNX2X_Q_CMD_HALT:
4893 case BNX2X_Q_CMD_CFC_DEL:
4894 case BNX2X_Q_CMD_TERMINATE:
4895 case BNX2X_Q_CMD_EMPTY:
4896 return bnx2x_queue_send_cmd_cmn(bp, params);
4897 default:
4898 BNX2X_ERR("Unknown command: %d\n", params->cmd);
4899 return -EINVAL;
4900 }
4901 }
4902
4903 /**
4904 * bnx2x_queue_chk_transition - check state machine of a regular Queue
4905 *
4906 * @bp: device handle
4907 * @o:
4908 * @params:
4909 *
4910 * (not Forwarding)
4911 * It both checks if the requested command is legal in a current
4912 * state and, if it's legal, sets a `next_state' in the object
4913 * that will be used in the completion flow to set the `state'
4914 * of the object.
4915 *
4916 * returns 0 if a requested command is a legal transition,
4917 * -EINVAL otherwise.
4918 */
4919 static int bnx2x_queue_chk_transition(struct bnx2x *bp,
4920 struct bnx2x_queue_sp_obj *o,
4921 struct bnx2x_queue_state_params *params)
4922 {
4923 enum bnx2x_q_state state = o->state, next_state = BNX2X_Q_STATE_MAX;
4924 enum bnx2x_queue_cmd cmd = params->cmd;
4925 struct bnx2x_queue_update_params *update_params =
4926 &params->params.update;
4927 u8 next_tx_only = o->num_tx_only;
4928
4929 /*
4930 * Forget all pending for completion commands if a driver only state
4931 * transition has been requested.
4932 */
4933 if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
4934 o->pending = 0;
4935 o->next_state = BNX2X_Q_STATE_MAX;
4936 }
4937
4938 /*
4939 * Don't allow a next state transition if we are in the middle of
4940 * the previous one.
4941 */
4942 if (o->pending)
4943 return -EBUSY;
4944
4945 switch (state) {
4946 case BNX2X_Q_STATE_RESET:
4947 if (cmd == BNX2X_Q_CMD_INIT)
4948 next_state = BNX2X_Q_STATE_INITIALIZED;
4949
4950 break;
4951 case BNX2X_Q_STATE_INITIALIZED:
4952 if (cmd == BNX2X_Q_CMD_SETUP) {
4953 if (test_bit(BNX2X_Q_FLG_ACTIVE,
4954 &params->params.setup.flags))
4955 next_state = BNX2X_Q_STATE_ACTIVE;
4956 else
4957 next_state = BNX2X_Q_STATE_INACTIVE;
4958 }
4959
4960 break;
4961 case BNX2X_Q_STATE_ACTIVE:
4962 if (cmd == BNX2X_Q_CMD_DEACTIVATE)
4963 next_state = BNX2X_Q_STATE_INACTIVE;
4964
4965 else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
4966 (cmd == BNX2X_Q_CMD_UPDATE_TPA))
4967 next_state = BNX2X_Q_STATE_ACTIVE;
4968
4969 else if (cmd == BNX2X_Q_CMD_SETUP_TX_ONLY) {
4970 next_state = BNX2X_Q_STATE_MULTI_COS;
4971 next_tx_only = 1;
4972 }
4973
4974 else if (cmd == BNX2X_Q_CMD_HALT)
4975 next_state = BNX2X_Q_STATE_STOPPED;
4976
4977 else if (cmd == BNX2X_Q_CMD_UPDATE) {
4978 /* If "active" state change is requested, update the
4979 * state accordingly.
4980 */
4981 if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
4982 &update_params->update_flags) &&
4983 !test_bit(BNX2X_Q_UPDATE_ACTIVATE,
4984 &update_params->update_flags))
4985 next_state = BNX2X_Q_STATE_INACTIVE;
4986 else
4987 next_state = BNX2X_Q_STATE_ACTIVE;
4988 }
4989
4990 break;
4991 case BNX2X_Q_STATE_MULTI_COS:
4992 if (cmd == BNX2X_Q_CMD_TERMINATE)
4993 next_state = BNX2X_Q_STATE_MCOS_TERMINATED;
4994
4995 else if (cmd == BNX2X_Q_CMD_SETUP_TX_ONLY) {
4996 next_state = BNX2X_Q_STATE_MULTI_COS;
4997 next_tx_only = o->num_tx_only + 1;
4998 }
4999
5000 else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
5001 (cmd == BNX2X_Q_CMD_UPDATE_TPA))
5002 next_state = BNX2X_Q_STATE_MULTI_COS;
5003
5004 else if (cmd == BNX2X_Q_CMD_UPDATE) {
5005 /* If "active" state change is requested, update the
5006 * state accordingly.
5007 */
5008 if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
5009 &update_params->update_flags) &&
5010 !test_bit(BNX2X_Q_UPDATE_ACTIVATE,
5011 &update_params->update_flags))
5012 next_state = BNX2X_Q_STATE_INACTIVE;
5013 else
5014 next_state = BNX2X_Q_STATE_MULTI_COS;
5015 }
5016
5017 break;
5018 case BNX2X_Q_STATE_MCOS_TERMINATED:
5019 if (cmd == BNX2X_Q_CMD_CFC_DEL) {
5020 next_tx_only = o->num_tx_only - 1;
5021 if (next_tx_only == 0)
5022 next_state = BNX2X_Q_STATE_ACTIVE;
5023 else
5024 next_state = BNX2X_Q_STATE_MULTI_COS;
5025 }
5026
5027 break;
5028 case BNX2X_Q_STATE_INACTIVE:
5029 if (cmd == BNX2X_Q_CMD_ACTIVATE)
5030 next_state = BNX2X_Q_STATE_ACTIVE;
5031
5032 else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
5033 (cmd == BNX2X_Q_CMD_UPDATE_TPA))
5034 next_state = BNX2X_Q_STATE_INACTIVE;
5035
5036 else if (cmd == BNX2X_Q_CMD_HALT)
5037 next_state = BNX2X_Q_STATE_STOPPED;
5038
5039 else if (cmd == BNX2X_Q_CMD_UPDATE) {
5040 /* If "active" state change is requested, update the
5041 * state accordingly.
5042 */
5043 if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
5044 &update_params->update_flags) &&
5045 test_bit(BNX2X_Q_UPDATE_ACTIVATE,
5046 &update_params->update_flags)){
5047 if (o->num_tx_only == 0)
5048 next_state = BNX2X_Q_STATE_ACTIVE;
5049 else /* tx only queues exist for this queue */
5050 next_state = BNX2X_Q_STATE_MULTI_COS;
5051 } else
5052 next_state = BNX2X_Q_STATE_INACTIVE;
5053 }
5054
5055 break;
5056 case BNX2X_Q_STATE_STOPPED:
5057 if (cmd == BNX2X_Q_CMD_TERMINATE)
5058 next_state = BNX2X_Q_STATE_TERMINATED;
5059
5060 break;
5061 case BNX2X_Q_STATE_TERMINATED:
5062 if (cmd == BNX2X_Q_CMD_CFC_DEL)
5063 next_state = BNX2X_Q_STATE_RESET;
5064
5065 break;
5066 default:
5067 BNX2X_ERR("Illegal state: %d\n", state);
5068 }
5069
5070 /* Transition is assured */
5071 if (next_state != BNX2X_Q_STATE_MAX) {
5072 DP(BNX2X_MSG_SP, "Good state transition: %d(%d)->%d\n",
5073 state, cmd, next_state);
5074 o->next_state = next_state;
5075 o->next_tx_only = next_tx_only;
5076 return 0;
5077 }
5078
5079 DP(BNX2X_MSG_SP, "Bad state transition request: %d %d\n", state, cmd);
5080
5081 return -EINVAL;
5082 }
5083
5084 void bnx2x_init_queue_obj(struct bnx2x *bp,
5085 struct bnx2x_queue_sp_obj *obj,
5086 u8 cl_id, u32 *cids, u8 cid_cnt, u8 func_id,
5087 void *rdata,
5088 dma_addr_t rdata_mapping, unsigned long type)
5089 {
5090 memset(obj, 0, sizeof(*obj));
5091
5092 /* We support only BNX2X_MULTI_TX_COS Tx CoS at the moment */
5093 BUG_ON(BNX2X_MULTI_TX_COS < cid_cnt);
5094
5095 memcpy(obj->cids, cids, sizeof(obj->cids[0]) * cid_cnt);
5096 obj->max_cos = cid_cnt;
5097 obj->cl_id = cl_id;
5098 obj->func_id = func_id;
5099 obj->rdata = rdata;
5100 obj->rdata_mapping = rdata_mapping;
5101 obj->type = type;
5102 obj->next_state = BNX2X_Q_STATE_MAX;
5103
5104 if (CHIP_IS_E1x(bp))
5105 obj->send_cmd = bnx2x_queue_send_cmd_e1x;
5106 else
5107 obj->send_cmd = bnx2x_queue_send_cmd_e2;
5108
5109 obj->check_transition = bnx2x_queue_chk_transition;
5110
5111 obj->complete_cmd = bnx2x_queue_comp_cmd;
5112 obj->wait_comp = bnx2x_queue_wait_comp;
5113 obj->set_pending = bnx2x_queue_set_pending;
5114 }
5115
5116 void bnx2x_queue_set_cos_cid(struct bnx2x *bp,
5117 struct bnx2x_queue_sp_obj *obj,
5118 u32 cid, u8 index)
5119 {
5120 obj->cids[index] = cid;
5121 }
5122
5123 /********************** Function state object *********************************/
5124 enum bnx2x_func_state bnx2x_func_get_state(struct bnx2x *bp,
5125 struct bnx2x_func_sp_obj *o)
5126 {
5127 /* in the middle of transaction - return INVALID state */
5128 if (o->pending)
5129 return BNX2X_F_STATE_MAX;
5130
5131 /*
5132 * unsure the order of reading of o->pending and o->state
5133 * o->pending should be read first
5134 */
5135 rmb();
5136
5137 return o->state;
5138 }
5139
5140 static int bnx2x_func_wait_comp(struct bnx2x *bp,
5141 struct bnx2x_func_sp_obj *o,
5142 enum bnx2x_func_cmd cmd)
5143 {
5144 return bnx2x_state_wait(bp, cmd, &o->pending);
5145 }
5146
5147 /**
5148 * bnx2x_func_state_change_comp - complete the state machine transition
5149 *
5150 * @bp: device handle
5151 * @o:
5152 * @cmd:
5153 *
5154 * Called on state change transition. Completes the state
5155 * machine transition only - no HW interaction.
5156 */
5157 static inline int bnx2x_func_state_change_comp(struct bnx2x *bp,
5158 struct bnx2x_func_sp_obj *o,
5159 enum bnx2x_func_cmd cmd)
5160 {
5161 unsigned long cur_pending = o->pending;
5162
5163 if (!test_and_clear_bit(cmd, &cur_pending)) {
5164 BNX2X_ERR("Bad MC reply %d for func %d in state %d "
5165 "pending 0x%lx, next_state %d\n", cmd, BP_FUNC(bp),
5166 o->state, cur_pending, o->next_state);
5167 return -EINVAL;
5168 }
5169
5170 DP(BNX2X_MSG_SP, "Completing command %d for func %d, setting state to "
5171 "%d\n", cmd, BP_FUNC(bp), o->next_state);
5172
5173 o->state = o->next_state;
5174 o->next_state = BNX2X_F_STATE_MAX;
5175
5176 /* It's important that o->state and o->next_state are
5177 * updated before o->pending.
5178 */
5179 wmb();
5180
5181 clear_bit(cmd, &o->pending);
5182 smp_mb__after_clear_bit();
5183
5184 return 0;
5185 }
5186
5187 /**
5188 * bnx2x_func_comp_cmd - complete the state change command
5189 *
5190 * @bp: device handle
5191 * @o:
5192 * @cmd:
5193 *
5194 * Checks that the arrived completion is expected.
5195 */
5196 static int bnx2x_func_comp_cmd(struct bnx2x *bp,
5197 struct bnx2x_func_sp_obj *o,
5198 enum bnx2x_func_cmd cmd)
5199 {
5200 /* Complete the state machine part first, check if it's a
5201 * legal completion.
5202 */
5203 int rc = bnx2x_func_state_change_comp(bp, o, cmd);
5204 return rc;
5205 }
5206
5207 /**
5208 * bnx2x_func_chk_transition - perform function state machine transition
5209 *
5210 * @bp: device handle
5211 * @o:
5212 * @params:
5213 *
5214 * It both checks if the requested command is legal in a current
5215 * state and, if it's legal, sets a `next_state' in the object
5216 * that will be used in the completion flow to set the `state'
5217 * of the object.
5218 *
5219 * returns 0 if a requested command is a legal transition,
5220 * -EINVAL otherwise.
5221 */
5222 static int bnx2x_func_chk_transition(struct bnx2x *bp,
5223 struct bnx2x_func_sp_obj *o,
5224 struct bnx2x_func_state_params *params)
5225 {
5226 enum bnx2x_func_state state = o->state, next_state = BNX2X_F_STATE_MAX;
5227 enum bnx2x_func_cmd cmd = params->cmd;
5228
5229 /*
5230 * Forget all pending for completion commands if a driver only state
5231 * transition has been requested.
5232 */
5233 if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
5234 o->pending = 0;
5235 o->next_state = BNX2X_F_STATE_MAX;
5236 }
5237
5238 /*
5239 * Don't allow a next state transition if we are in the middle of
5240 * the previous one.
5241 */
5242 if (o->pending)
5243 return -EBUSY;
5244
5245 switch (state) {
5246 case BNX2X_F_STATE_RESET:
5247 if (cmd == BNX2X_F_CMD_HW_INIT)
5248 next_state = BNX2X_F_STATE_INITIALIZED;
5249
5250 break;
5251 case BNX2X_F_STATE_INITIALIZED:
5252 if (cmd == BNX2X_F_CMD_START)
5253 next_state = BNX2X_F_STATE_STARTED;
5254
5255 else if (cmd == BNX2X_F_CMD_HW_RESET)
5256 next_state = BNX2X_F_STATE_RESET;
5257
5258 break;
5259 case BNX2X_F_STATE_STARTED:
5260 if (cmd == BNX2X_F_CMD_STOP)
5261 next_state = BNX2X_F_STATE_INITIALIZED;
5262 else if (cmd == BNX2X_F_CMD_TX_STOP)
5263 next_state = BNX2X_F_STATE_TX_STOPPED;
5264
5265 break;
5266 case BNX2X_F_STATE_TX_STOPPED:
5267 if (cmd == BNX2X_F_CMD_TX_START)
5268 next_state = BNX2X_F_STATE_STARTED;
5269
5270 break;
5271 default:
5272 BNX2X_ERR("Unknown state: %d\n", state);
5273 }
5274
5275 /* Transition is assured */
5276 if (next_state != BNX2X_F_STATE_MAX) {
5277 DP(BNX2X_MSG_SP, "Good function state transition: %d(%d)->%d\n",
5278 state, cmd, next_state);
5279 o->next_state = next_state;
5280 return 0;
5281 }
5282
5283 DP(BNX2X_MSG_SP, "Bad function state transition request: %d %d\n",
5284 state, cmd);
5285
5286 return -EINVAL;
5287 }
5288
5289 /**
5290 * bnx2x_func_init_func - performs HW init at function stage
5291 *
5292 * @bp: device handle
5293 * @drv:
5294 *
5295 * Init HW when the current phase is
5296 * FW_MSG_CODE_DRV_LOAD_FUNCTION: initialize only FUNCTION-only
5297 * HW blocks.
5298 */
5299 static inline int bnx2x_func_init_func(struct bnx2x *bp,
5300 const struct bnx2x_func_sp_drv_ops *drv)
5301 {
5302 return drv->init_hw_func(bp);
5303 }
5304
5305 /**
5306 * bnx2x_func_init_port - performs HW init at port stage
5307 *
5308 * @bp: device handle
5309 * @drv:
5310 *
5311 * Init HW when the current phase is
5312 * FW_MSG_CODE_DRV_LOAD_PORT: initialize PORT-only and
5313 * FUNCTION-only HW blocks.
5314 *
5315 */
5316 static inline int bnx2x_func_init_port(struct bnx2x *bp,
5317 const struct bnx2x_func_sp_drv_ops *drv)
5318 {
5319 int rc = drv->init_hw_port(bp);
5320 if (rc)
5321 return rc;
5322
5323 return bnx2x_func_init_func(bp, drv);
5324 }
5325
5326 /**
5327 * bnx2x_func_init_cmn_chip - performs HW init at chip-common stage
5328 *
5329 * @bp: device handle
5330 * @drv:
5331 *
5332 * Init HW when the current phase is
5333 * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON_CHIP,
5334 * PORT-only and FUNCTION-only HW blocks.
5335 */
5336 static inline int bnx2x_func_init_cmn_chip(struct bnx2x *bp,
5337 const struct bnx2x_func_sp_drv_ops *drv)
5338 {
5339 int rc = drv->init_hw_cmn_chip(bp);
5340 if (rc)
5341 return rc;
5342
5343 return bnx2x_func_init_port(bp, drv);
5344 }
5345
5346 /**
5347 * bnx2x_func_init_cmn - performs HW init at common stage
5348 *
5349 * @bp: device handle
5350 * @drv:
5351 *
5352 * Init HW when the current phase is
5353 * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON,
5354 * PORT-only and FUNCTION-only HW blocks.
5355 */
5356 static inline int bnx2x_func_init_cmn(struct bnx2x *bp,
5357 const struct bnx2x_func_sp_drv_ops *drv)
5358 {
5359 int rc = drv->init_hw_cmn(bp);
5360 if (rc)
5361 return rc;
5362
5363 return bnx2x_func_init_port(bp, drv);
5364 }
5365
5366 static int bnx2x_func_hw_init(struct bnx2x *bp,
5367 struct bnx2x_func_state_params *params)
5368 {
5369 u32 load_code = params->params.hw_init.load_phase;
5370 struct bnx2x_func_sp_obj *o = params->f_obj;
5371 const struct bnx2x_func_sp_drv_ops *drv = o->drv;
5372 int rc = 0;
5373
5374 DP(BNX2X_MSG_SP, "function %d load_code %x\n",
5375 BP_ABS_FUNC(bp), load_code);
5376
5377 /* Prepare buffers for unzipping the FW */
5378 rc = drv->gunzip_init(bp);
5379 if (rc)
5380 return rc;
5381
5382 /* Prepare FW */
5383 rc = drv->init_fw(bp);
5384 if (rc) {
5385 BNX2X_ERR("Error loading firmware\n");
5386 goto fw_init_err;
5387 }
5388
5389 /* Handle the beginning of COMMON_XXX pases separatelly... */
5390 switch (load_code) {
5391 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
5392 rc = bnx2x_func_init_cmn_chip(bp, drv);
5393 if (rc)
5394 goto init_hw_err;
5395
5396 break;
5397 case FW_MSG_CODE_DRV_LOAD_COMMON:
5398 rc = bnx2x_func_init_cmn(bp, drv);
5399 if (rc)
5400 goto init_hw_err;
5401
5402 break;
5403 case FW_MSG_CODE_DRV_LOAD_PORT:
5404 rc = bnx2x_func_init_port(bp, drv);
5405 if (rc)
5406 goto init_hw_err;
5407
5408 break;
5409 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5410 rc = bnx2x_func_init_func(bp, drv);
5411 if (rc)
5412 goto init_hw_err;
5413
5414 break;
5415 default:
5416 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5417 rc = -EINVAL;
5418 }
5419
5420 init_hw_err:
5421 drv->release_fw(bp);
5422
5423 fw_init_err:
5424 drv->gunzip_end(bp);
5425
5426 /* In case of success, complete the comand immediatelly: no ramrods
5427 * have been sent.
5428 */
5429 if (!rc)
5430 o->complete_cmd(bp, o, BNX2X_F_CMD_HW_INIT);
5431
5432 return rc;
5433 }
5434
5435 /**
5436 * bnx2x_func_reset_func - reset HW at function stage
5437 *
5438 * @bp: device handle
5439 * @drv:
5440 *
5441 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_FUNCTION stage: reset only
5442 * FUNCTION-only HW blocks.
5443 */
5444 static inline void bnx2x_func_reset_func(struct bnx2x *bp,
5445 const struct bnx2x_func_sp_drv_ops *drv)
5446 {
5447 drv->reset_hw_func(bp);
5448 }
5449
5450 /**
5451 * bnx2x_func_reset_port - reser HW at port stage
5452 *
5453 * @bp: device handle
5454 * @drv:
5455 *
5456 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_PORT stage: reset
5457 * FUNCTION-only and PORT-only HW blocks.
5458 *
5459 * !!!IMPORTANT!!!
5460 *
5461 * It's important to call reset_port before reset_func() as the last thing
5462 * reset_func does is pf_disable() thus disabling PGLUE_B, which
5463 * makes impossible any DMAE transactions.
5464 */
5465 static inline void bnx2x_func_reset_port(struct bnx2x *bp,
5466 const struct bnx2x_func_sp_drv_ops *drv)
5467 {
5468 drv->reset_hw_port(bp);
5469 bnx2x_func_reset_func(bp, drv);
5470 }
5471
5472 /**
5473 * bnx2x_func_reset_cmn - reser HW at common stage
5474 *
5475 * @bp: device handle
5476 * @drv:
5477 *
5478 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_COMMON and
5479 * FW_MSG_CODE_DRV_UNLOAD_COMMON_CHIP stages: reset COMMON,
5480 * COMMON_CHIP, FUNCTION-only and PORT-only HW blocks.
5481 */
5482 static inline void bnx2x_func_reset_cmn(struct bnx2x *bp,
5483 const struct bnx2x_func_sp_drv_ops *drv)
5484 {
5485 bnx2x_func_reset_port(bp, drv);
5486 drv->reset_hw_cmn(bp);
5487 }
5488
5489
5490 static inline int bnx2x_func_hw_reset(struct bnx2x *bp,
5491 struct bnx2x_func_state_params *params)
5492 {
5493 u32 reset_phase = params->params.hw_reset.reset_phase;
5494 struct bnx2x_func_sp_obj *o = params->f_obj;
5495 const struct bnx2x_func_sp_drv_ops *drv = o->drv;
5496
5497 DP(BNX2X_MSG_SP, "function %d reset_phase %x\n", BP_ABS_FUNC(bp),
5498 reset_phase);
5499
5500 switch (reset_phase) {
5501 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
5502 bnx2x_func_reset_cmn(bp, drv);
5503 break;
5504 case FW_MSG_CODE_DRV_UNLOAD_PORT:
5505 bnx2x_func_reset_port(bp, drv);
5506 break;
5507 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
5508 bnx2x_func_reset_func(bp, drv);
5509 break;
5510 default:
5511 BNX2X_ERR("Unknown reset_phase (0x%x) from MCP\n",
5512 reset_phase);
5513 break;
5514 }
5515
5516 /* Complete the comand immediatelly: no ramrods have been sent. */
5517 o->complete_cmd(bp, o, BNX2X_F_CMD_HW_RESET);
5518
5519 return 0;
5520 }
5521
5522 static inline int bnx2x_func_send_start(struct bnx2x *bp,
5523 struct bnx2x_func_state_params *params)
5524 {
5525 struct bnx2x_func_sp_obj *o = params->f_obj;
5526 struct function_start_data *rdata =
5527 (struct function_start_data *)o->rdata;
5528 dma_addr_t data_mapping = o->rdata_mapping;
5529 struct bnx2x_func_start_params *start_params = &params->params.start;
5530
5531 memset(rdata, 0, sizeof(*rdata));
5532
5533 /* Fill the ramrod data with provided parameters */
5534 rdata->function_mode = cpu_to_le16(start_params->mf_mode);
5535 rdata->sd_vlan_tag = start_params->sd_vlan_tag;
5536 rdata->path_id = BP_PATH(bp);
5537 rdata->network_cos_mode = start_params->network_cos_mode;
5538
5539 /*
5540 * No need for an explicit memory barrier here as long we would
5541 * need to ensure the ordering of writing to the SPQ element
5542 * and updating of the SPQ producer which involves a memory
5543 * read and we will have to put a full memory barrier there
5544 * (inside bnx2x_sp_post()).
5545 */
5546
5547 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0,
5548 U64_HI(data_mapping),
5549 U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5550 }
5551
5552 static inline int bnx2x_func_send_stop(struct bnx2x *bp,
5553 struct bnx2x_func_state_params *params)
5554 {
5555 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0,
5556 NONE_CONNECTION_TYPE);
5557 }
5558
5559 static inline int bnx2x_func_send_tx_stop(struct bnx2x *bp,
5560 struct bnx2x_func_state_params *params)
5561 {
5562 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STOP_TRAFFIC, 0, 0, 0,
5563 NONE_CONNECTION_TYPE);
5564 }
5565 static inline int bnx2x_func_send_tx_start(struct bnx2x *bp,
5566 struct bnx2x_func_state_params *params)
5567 {
5568 struct bnx2x_func_sp_obj *o = params->f_obj;
5569 struct flow_control_configuration *rdata =
5570 (struct flow_control_configuration *)o->rdata;
5571 dma_addr_t data_mapping = o->rdata_mapping;
5572 struct bnx2x_func_tx_start_params *tx_start_params =
5573 &params->params.tx_start;
5574 int i;
5575
5576 memset(rdata, 0, sizeof(*rdata));
5577
5578 rdata->dcb_enabled = tx_start_params->dcb_enabled;
5579 rdata->dcb_version = tx_start_params->dcb_version;
5580 rdata->dont_add_pri_0_en = tx_start_params->dont_add_pri_0_en;
5581
5582 for (i = 0; i < ARRAY_SIZE(rdata->traffic_type_to_priority_cos); i++)
5583 rdata->traffic_type_to_priority_cos[i] =
5584 tx_start_params->traffic_type_to_priority_cos[i];
5585
5586 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_START_TRAFFIC, 0,
5587 U64_HI(data_mapping),
5588 U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5589 }
5590
5591 static int bnx2x_func_send_cmd(struct bnx2x *bp,
5592 struct bnx2x_func_state_params *params)
5593 {
5594 switch (params->cmd) {
5595 case BNX2X_F_CMD_HW_INIT:
5596 return bnx2x_func_hw_init(bp, params);
5597 case BNX2X_F_CMD_START:
5598 return bnx2x_func_send_start(bp, params);
5599 case BNX2X_F_CMD_STOP:
5600 return bnx2x_func_send_stop(bp, params);
5601 case BNX2X_F_CMD_HW_RESET:
5602 return bnx2x_func_hw_reset(bp, params);
5603 case BNX2X_F_CMD_TX_STOP:
5604 return bnx2x_func_send_tx_stop(bp, params);
5605 case BNX2X_F_CMD_TX_START:
5606 return bnx2x_func_send_tx_start(bp, params);
5607 default:
5608 BNX2X_ERR("Unknown command: %d\n", params->cmd);
5609 return -EINVAL;
5610 }
5611 }
5612
5613 void bnx2x_init_func_obj(struct bnx2x *bp,
5614 struct bnx2x_func_sp_obj *obj,
5615 void *rdata, dma_addr_t rdata_mapping,
5616 struct bnx2x_func_sp_drv_ops *drv_iface)
5617 {
5618 memset(obj, 0, sizeof(*obj));
5619
5620 mutex_init(&obj->one_pending_mutex);
5621
5622 obj->rdata = rdata;
5623 obj->rdata_mapping = rdata_mapping;
5624
5625 obj->send_cmd = bnx2x_func_send_cmd;
5626 obj->check_transition = bnx2x_func_chk_transition;
5627 obj->complete_cmd = bnx2x_func_comp_cmd;
5628 obj->wait_comp = bnx2x_func_wait_comp;
5629
5630 obj->drv = drv_iface;
5631 }
5632
5633 /**
5634 * bnx2x_func_state_change - perform Function state change transition
5635 *
5636 * @bp: device handle
5637 * @params: parameters to perform the transaction
5638 *
5639 * returns 0 in case of successfully completed transition,
5640 * negative error code in case of failure, positive
5641 * (EBUSY) value if there is a completion to that is
5642 * still pending (possible only if RAMROD_COMP_WAIT is
5643 * not set in params->ramrod_flags for asynchronous
5644 * commands).
5645 */
5646 int bnx2x_func_state_change(struct bnx2x *bp,
5647 struct bnx2x_func_state_params *params)
5648 {
5649 struct bnx2x_func_sp_obj *o = params->f_obj;
5650 int rc;
5651 enum bnx2x_func_cmd cmd = params->cmd;
5652 unsigned long *pending = &o->pending;
5653
5654 mutex_lock(&o->one_pending_mutex);
5655
5656 /* Check that the requested transition is legal */
5657 if (o->check_transition(bp, o, params)) {
5658 mutex_unlock(&o->one_pending_mutex);
5659 return -EINVAL;
5660 }
5661
5662 /* Set "pending" bit */
5663 set_bit(cmd, pending);
5664
5665 /* Don't send a command if only driver cleanup was requested */
5666 if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
5667 bnx2x_func_state_change_comp(bp, o, cmd);
5668 mutex_unlock(&o->one_pending_mutex);
5669 } else {
5670 /* Send a ramrod */
5671 rc = o->send_cmd(bp, params);
5672
5673 mutex_unlock(&o->one_pending_mutex);
5674
5675 if (rc) {
5676 o->next_state = BNX2X_F_STATE_MAX;
5677 clear_bit(cmd, pending);
5678 smp_mb__after_clear_bit();
5679 return rc;
5680 }
5681
5682 if (test_bit(RAMROD_COMP_WAIT, &params->ramrod_flags)) {
5683 rc = o->wait_comp(bp, o, cmd);
5684 if (rc)
5685 return rc;
5686
5687 return 0;
5688 }
5689 }
5690
5691 return !!test_bit(cmd, pending);
5692 }