2.6.27-rc6-00086-gda63874
[GitHub/LineageOS/android_kernel_motorola_exynos9610.git] /
1 /*
2 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34 #include <linux/string.h>
35 #include <linux/etherdevice.h>
36
37 #include <linux/mlx4/cmd.h>
38 #include <linux/export.h>
39
40 #include "mlx4.h"
41
42 #define MGM_QPN_MASK 0x00FFFFFF
43 #define MGM_BLCK_LB_BIT 30
44
45 static const u8 zero_gid[16]; /* automatically initialized to 0 */
46
47 struct mlx4_mgm {
48 __be32 next_gid_index;
49 __be32 members_count;
50 u32 reserved[2];
51 u8 gid[16];
52 __be32 qp[MLX4_MAX_QP_PER_MGM];
53 };
54
55 int mlx4_get_mgm_entry_size(struct mlx4_dev *dev)
56 {
57 return min((1 << mlx4_log_num_mgm_entry_size), MLX4_MAX_MGM_ENTRY_SIZE);
58 }
59
60 int mlx4_get_qp_per_mgm(struct mlx4_dev *dev)
61 {
62 return 4 * (mlx4_get_mgm_entry_size(dev) / 16 - 2);
63 }
64
65 static int mlx4_READ_ENTRY(struct mlx4_dev *dev, int index,
66 struct mlx4_cmd_mailbox *mailbox)
67 {
68 return mlx4_cmd_box(dev, 0, mailbox->dma, index, 0, MLX4_CMD_READ_MCG,
69 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
70 }
71
72 static int mlx4_WRITE_ENTRY(struct mlx4_dev *dev, int index,
73 struct mlx4_cmd_mailbox *mailbox)
74 {
75 return mlx4_cmd(dev, mailbox->dma, index, 0, MLX4_CMD_WRITE_MCG,
76 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
77 }
78
79 static int mlx4_WRITE_PROMISC(struct mlx4_dev *dev, u8 port, u8 steer,
80 struct mlx4_cmd_mailbox *mailbox)
81 {
82 u32 in_mod;
83
84 in_mod = (u32) port << 16 | steer << 1;
85 return mlx4_cmd(dev, mailbox->dma, in_mod, 0x1,
86 MLX4_CMD_WRITE_MCG, MLX4_CMD_TIME_CLASS_A,
87 MLX4_CMD_NATIVE);
88 }
89
90 static int mlx4_GID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
91 u16 *hash, u8 op_mod)
92 {
93 u64 imm;
94 int err;
95
96 err = mlx4_cmd_imm(dev, mailbox->dma, &imm, 0, op_mod,
97 MLX4_CMD_MGID_HASH, MLX4_CMD_TIME_CLASS_A,
98 MLX4_CMD_NATIVE);
99
100 if (!err)
101 *hash = imm;
102
103 return err;
104 }
105
106 static struct mlx4_promisc_qp *get_promisc_qp(struct mlx4_dev *dev, u8 pf_num,
107 enum mlx4_steer_type steer,
108 u32 qpn)
109 {
110 struct mlx4_steer *s_steer = &mlx4_priv(dev)->steer[pf_num];
111 struct mlx4_promisc_qp *pqp;
112
113 list_for_each_entry(pqp, &s_steer->promisc_qps[steer], list) {
114 if (pqp->qpn == qpn)
115 return pqp;
116 }
117 /* not found */
118 return NULL;
119 }
120
121 /*
122 * Add new entry to steering data structure.
123 * All promisc QPs should be added as well
124 */
125 static int new_steering_entry(struct mlx4_dev *dev, u8 port,
126 enum mlx4_steer_type steer,
127 unsigned int index, u32 qpn)
128 {
129 struct mlx4_steer *s_steer;
130 struct mlx4_cmd_mailbox *mailbox;
131 struct mlx4_mgm *mgm;
132 u32 members_count;
133 struct mlx4_steer_index *new_entry;
134 struct mlx4_promisc_qp *pqp;
135 struct mlx4_promisc_qp *dqp = NULL;
136 u32 prot;
137 int err;
138
139 s_steer = &mlx4_priv(dev)->steer[port - 1];
140 new_entry = kzalloc(sizeof *new_entry, GFP_KERNEL);
141 if (!new_entry)
142 return -ENOMEM;
143
144 INIT_LIST_HEAD(&new_entry->duplicates);
145 new_entry->index = index;
146 list_add_tail(&new_entry->list, &s_steer->steer_entries[steer]);
147
148 /* If the given qpn is also a promisc qp,
149 * it should be inserted to duplicates list
150 */
151 pqp = get_promisc_qp(dev, 0, steer, qpn);
152 if (pqp) {
153 dqp = kmalloc(sizeof *dqp, GFP_KERNEL);
154 if (!dqp) {
155 err = -ENOMEM;
156 goto out_alloc;
157 }
158 dqp->qpn = qpn;
159 list_add_tail(&dqp->list, &new_entry->duplicates);
160 }
161
162 /* if no promisc qps for this vep, we are done */
163 if (list_empty(&s_steer->promisc_qps[steer]))
164 return 0;
165
166 /* now need to add all the promisc qps to the new
167 * steering entry, as they should also receive the packets
168 * destined to this address */
169 mailbox = mlx4_alloc_cmd_mailbox(dev);
170 if (IS_ERR(mailbox)) {
171 err = -ENOMEM;
172 goto out_alloc;
173 }
174 mgm = mailbox->buf;
175
176 err = mlx4_READ_ENTRY(dev, index, mailbox);
177 if (err)
178 goto out_mailbox;
179
180 members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
181 prot = be32_to_cpu(mgm->members_count) >> 30;
182 list_for_each_entry(pqp, &s_steer->promisc_qps[steer], list) {
183 /* don't add already existing qpn */
184 if (pqp->qpn == qpn)
185 continue;
186 if (members_count == dev->caps.num_qp_per_mgm) {
187 /* out of space */
188 err = -ENOMEM;
189 goto out_mailbox;
190 }
191
192 /* add the qpn */
193 mgm->qp[members_count++] = cpu_to_be32(pqp->qpn & MGM_QPN_MASK);
194 }
195 /* update the qps count and update the entry with all the promisc qps*/
196 mgm->members_count = cpu_to_be32(members_count | (prot << 30));
197 err = mlx4_WRITE_ENTRY(dev, index, mailbox);
198
199 out_mailbox:
200 mlx4_free_cmd_mailbox(dev, mailbox);
201 if (!err)
202 return 0;
203 out_alloc:
204 if (dqp) {
205 list_del(&dqp->list);
206 kfree(dqp);
207 }
208 list_del(&new_entry->list);
209 kfree(new_entry);
210 return err;
211 }
212
213 /* update the data structures with existing steering entry */
214 static int existing_steering_entry(struct mlx4_dev *dev, u8 port,
215 enum mlx4_steer_type steer,
216 unsigned int index, u32 qpn)
217 {
218 struct mlx4_steer *s_steer;
219 struct mlx4_steer_index *tmp_entry, *entry = NULL;
220 struct mlx4_promisc_qp *pqp;
221 struct mlx4_promisc_qp *dqp;
222
223 s_steer = &mlx4_priv(dev)->steer[port - 1];
224
225 pqp = get_promisc_qp(dev, 0, steer, qpn);
226 if (!pqp)
227 return 0; /* nothing to do */
228
229 list_for_each_entry(tmp_entry, &s_steer->steer_entries[steer], list) {
230 if (tmp_entry->index == index) {
231 entry = tmp_entry;
232 break;
233 }
234 }
235 if (unlikely(!entry)) {
236 mlx4_warn(dev, "Steering entry at index %x is not registered\n", index);
237 return -EINVAL;
238 }
239
240 /* the given qpn is listed as a promisc qpn
241 * we need to add it as a duplicate to this entry
242 * for future references */
243 list_for_each_entry(dqp, &entry->duplicates, list) {
244 if (qpn == pqp->qpn)
245 return 0; /* qp is already duplicated */
246 }
247
248 /* add the qp as a duplicate on this index */
249 dqp = kmalloc(sizeof *dqp, GFP_KERNEL);
250 if (!dqp)
251 return -ENOMEM;
252 dqp->qpn = qpn;
253 list_add_tail(&dqp->list, &entry->duplicates);
254
255 return 0;
256 }
257
258 /* Check whether a qpn is a duplicate on steering entry
259 * If so, it should not be removed from mgm */
260 static bool check_duplicate_entry(struct mlx4_dev *dev, u8 port,
261 enum mlx4_steer_type steer,
262 unsigned int index, u32 qpn)
263 {
264 struct mlx4_steer *s_steer;
265 struct mlx4_steer_index *tmp_entry, *entry = NULL;
266 struct mlx4_promisc_qp *dqp, *tmp_dqp;
267
268 s_steer = &mlx4_priv(dev)->steer[port - 1];
269
270 /* if qp is not promisc, it cannot be duplicated */
271 if (!get_promisc_qp(dev, 0, steer, qpn))
272 return false;
273
274 /* The qp is promisc qp so it is a duplicate on this index
275 * Find the index entry, and remove the duplicate */
276 list_for_each_entry(tmp_entry, &s_steer->steer_entries[steer], list) {
277 if (tmp_entry->index == index) {
278 entry = tmp_entry;
279 break;
280 }
281 }
282 if (unlikely(!entry)) {
283 mlx4_warn(dev, "Steering entry for index %x is not registered\n", index);
284 return false;
285 }
286 list_for_each_entry_safe(dqp, tmp_dqp, &entry->duplicates, list) {
287 if (dqp->qpn == qpn) {
288 list_del(&dqp->list);
289 kfree(dqp);
290 }
291 }
292 return true;
293 }
294
295 /* I a steering entry contains only promisc QPs, it can be removed. */
296 static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 port,
297 enum mlx4_steer_type steer,
298 unsigned int index, u32 tqpn)
299 {
300 struct mlx4_steer *s_steer;
301 struct mlx4_cmd_mailbox *mailbox;
302 struct mlx4_mgm *mgm;
303 struct mlx4_steer_index *entry = NULL, *tmp_entry;
304 u32 qpn;
305 u32 members_count;
306 bool ret = false;
307 int i;
308
309 s_steer = &mlx4_priv(dev)->steer[port - 1];
310
311 mailbox = mlx4_alloc_cmd_mailbox(dev);
312 if (IS_ERR(mailbox))
313 return false;
314 mgm = mailbox->buf;
315
316 if (mlx4_READ_ENTRY(dev, index, mailbox))
317 goto out;
318 members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
319 for (i = 0; i < members_count; i++) {
320 qpn = be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK;
321 if (!get_promisc_qp(dev, 0, steer, qpn) && qpn != tqpn) {
322 /* the qp is not promisc, the entry can't be removed */
323 goto out;
324 }
325 }
326 /* All the qps currently registered for this entry are promiscuous,
327 * Checking for duplicates */
328 ret = true;
329 list_for_each_entry_safe(entry, tmp_entry, &s_steer->steer_entries[steer], list) {
330 if (entry->index == index) {
331 if (list_empty(&entry->duplicates)) {
332 list_del(&entry->list);
333 kfree(entry);
334 } else {
335 /* This entry contains duplicates so it shouldn't be removed */
336 ret = false;
337 goto out;
338 }
339 }
340 }
341
342 out:
343 mlx4_free_cmd_mailbox(dev, mailbox);
344 return ret;
345 }
346
347 static int add_promisc_qp(struct mlx4_dev *dev, u8 port,
348 enum mlx4_steer_type steer, u32 qpn)
349 {
350 struct mlx4_steer *s_steer;
351 struct mlx4_cmd_mailbox *mailbox;
352 struct mlx4_mgm *mgm;
353 struct mlx4_steer_index *entry;
354 struct mlx4_promisc_qp *pqp;
355 struct mlx4_promisc_qp *dqp;
356 u32 members_count;
357 u32 prot;
358 int i;
359 bool found;
360 int last_index;
361 int err;
362 struct mlx4_priv *priv = mlx4_priv(dev);
363
364 s_steer = &mlx4_priv(dev)->steer[port - 1];
365
366 mutex_lock(&priv->mcg_table.mutex);
367
368 if (get_promisc_qp(dev, 0, steer, qpn)) {
369 err = 0; /* Noting to do, already exists */
370 goto out_mutex;
371 }
372
373 pqp = kmalloc(sizeof *pqp, GFP_KERNEL);
374 if (!pqp) {
375 err = -ENOMEM;
376 goto out_mutex;
377 }
378 pqp->qpn = qpn;
379
380 mailbox = mlx4_alloc_cmd_mailbox(dev);
381 if (IS_ERR(mailbox)) {
382 err = -ENOMEM;
383 goto out_alloc;
384 }
385 mgm = mailbox->buf;
386
387 /* the promisc qp needs to be added for each one of the steering
388 * entries, if it already exists, needs to be added as a duplicate
389 * for this entry */
390 list_for_each_entry(entry, &s_steer->steer_entries[steer], list) {
391 err = mlx4_READ_ENTRY(dev, entry->index, mailbox);
392 if (err)
393 goto out_mailbox;
394
395 members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
396 prot = be32_to_cpu(mgm->members_count) >> 30;
397 found = false;
398 for (i = 0; i < members_count; i++) {
399 if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qpn) {
400 /* Entry already exists, add to duplicates */
401 dqp = kmalloc(sizeof *dqp, GFP_KERNEL);
402 if (!dqp)
403 goto out_mailbox;
404 dqp->qpn = qpn;
405 list_add_tail(&dqp->list, &entry->duplicates);
406 found = true;
407 }
408 }
409 if (!found) {
410 /* Need to add the qpn to mgm */
411 if (members_count == dev->caps.num_qp_per_mgm) {
412 /* entry is full */
413 err = -ENOMEM;
414 goto out_mailbox;
415 }
416 mgm->qp[members_count++] = cpu_to_be32(qpn & MGM_QPN_MASK);
417 mgm->members_count = cpu_to_be32(members_count | (prot << 30));
418 err = mlx4_WRITE_ENTRY(dev, entry->index, mailbox);
419 if (err)
420 goto out_mailbox;
421 }
422 last_index = entry->index;
423 }
424
425 /* add the new qpn to list of promisc qps */
426 list_add_tail(&pqp->list, &s_steer->promisc_qps[steer]);
427 /* now need to add all the promisc qps to default entry */
428 memset(mgm, 0, sizeof *mgm);
429 members_count = 0;
430 list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list)
431 mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK);
432 mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30);
433
434 err = mlx4_WRITE_PROMISC(dev, port, steer, mailbox);
435 if (err)
436 goto out_list;
437
438 mlx4_free_cmd_mailbox(dev, mailbox);
439 mutex_unlock(&priv->mcg_table.mutex);
440 return 0;
441
442 out_list:
443 list_del(&pqp->list);
444 out_mailbox:
445 mlx4_free_cmd_mailbox(dev, mailbox);
446 out_alloc:
447 kfree(pqp);
448 out_mutex:
449 mutex_unlock(&priv->mcg_table.mutex);
450 return err;
451 }
452
453 static int remove_promisc_qp(struct mlx4_dev *dev, u8 port,
454 enum mlx4_steer_type steer, u32 qpn)
455 {
456 struct mlx4_priv *priv = mlx4_priv(dev);
457 struct mlx4_steer *s_steer;
458 struct mlx4_cmd_mailbox *mailbox;
459 struct mlx4_mgm *mgm;
460 struct mlx4_steer_index *entry;
461 struct mlx4_promisc_qp *pqp;
462 struct mlx4_promisc_qp *dqp;
463 u32 members_count;
464 bool found;
465 bool back_to_list = false;
466 int loc, i;
467 int err;
468
469 s_steer = &mlx4_priv(dev)->steer[port - 1];
470 mutex_lock(&priv->mcg_table.mutex);
471
472 pqp = get_promisc_qp(dev, 0, steer, qpn);
473 if (unlikely(!pqp)) {
474 mlx4_warn(dev, "QP %x is not promiscuous QP\n", qpn);
475 /* nothing to do */
476 err = 0;
477 goto out_mutex;
478 }
479
480 /*remove from list of promisc qps */
481 list_del(&pqp->list);
482
483 /* set the default entry not to include the removed one */
484 mailbox = mlx4_alloc_cmd_mailbox(dev);
485 if (IS_ERR(mailbox)) {
486 err = -ENOMEM;
487 back_to_list = true;
488 goto out_list;
489 }
490 mgm = mailbox->buf;
491 memset(mgm, 0, sizeof *mgm);
492 members_count = 0;
493 list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list)
494 mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK);
495 mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30);
496
497 err = mlx4_WRITE_PROMISC(dev, port, steer, mailbox);
498 if (err)
499 goto out_mailbox;
500
501 /* remove the qp from all the steering entries*/
502 list_for_each_entry(entry, &s_steer->steer_entries[steer], list) {
503 found = false;
504 list_for_each_entry(dqp, &entry->duplicates, list) {
505 if (dqp->qpn == qpn) {
506 found = true;
507 break;
508 }
509 }
510 if (found) {
511 /* a duplicate, no need to change the mgm,
512 * only update the duplicates list */
513 list_del(&dqp->list);
514 kfree(dqp);
515 } else {
516 err = mlx4_READ_ENTRY(dev, entry->index, mailbox);
517 if (err)
518 goto out_mailbox;
519 members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
520 for (loc = -1, i = 0; i < members_count; ++i)
521 if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qpn)
522 loc = i;
523
524 mgm->members_count = cpu_to_be32(--members_count |
525 (MLX4_PROT_ETH << 30));
526 mgm->qp[loc] = mgm->qp[i - 1];
527 mgm->qp[i - 1] = 0;
528
529 err = mlx4_WRITE_ENTRY(dev, entry->index, mailbox);
530 if (err)
531 goto out_mailbox;
532 }
533
534 }
535
536 out_mailbox:
537 mlx4_free_cmd_mailbox(dev, mailbox);
538 out_list:
539 if (back_to_list)
540 list_add_tail(&pqp->list, &s_steer->promisc_qps[steer]);
541 else
542 kfree(pqp);
543 out_mutex:
544 mutex_unlock(&priv->mcg_table.mutex);
545 return err;
546 }
547
548 /*
549 * Caller must hold MCG table semaphore. gid and mgm parameters must
550 * be properly aligned for command interface.
551 *
552 * Returns 0 unless a firmware command error occurs.
553 *
554 * If GID is found in MGM or MGM is empty, *index = *hash, *prev = -1
555 * and *mgm holds MGM entry.
556 *
557 * if GID is found in AMGM, *index = index in AMGM, *prev = index of
558 * previous entry in hash chain and *mgm holds AMGM entry.
559 *
560 * If no AMGM exists for given gid, *index = -1, *prev = index of last
561 * entry in hash chain and *mgm holds end of hash chain.
562 */
563 static int find_entry(struct mlx4_dev *dev, u8 port,
564 u8 *gid, enum mlx4_protocol prot,
565 struct mlx4_cmd_mailbox *mgm_mailbox,
566 int *prev, int *index)
567 {
568 struct mlx4_cmd_mailbox *mailbox;
569 struct mlx4_mgm *mgm = mgm_mailbox->buf;
570 u8 *mgid;
571 int err;
572 u16 hash;
573 u8 op_mod = (prot == MLX4_PROT_ETH) ?
574 !!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) : 0;
575
576 mailbox = mlx4_alloc_cmd_mailbox(dev);
577 if (IS_ERR(mailbox))
578 return -ENOMEM;
579 mgid = mailbox->buf;
580
581 memcpy(mgid, gid, 16);
582
583 err = mlx4_GID_HASH(dev, mailbox, &hash, op_mod);
584 mlx4_free_cmd_mailbox(dev, mailbox);
585 if (err)
586 return err;
587
588 if (0)
589 mlx4_dbg(dev, "Hash for %pI6 is %04x\n", gid, hash);
590
591 *index = hash;
592 *prev = -1;
593
594 do {
595 err = mlx4_READ_ENTRY(dev, *index, mgm_mailbox);
596 if (err)
597 return err;
598
599 if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) {
600 if (*index != hash) {
601 mlx4_err(dev, "Found zero MGID in AMGM.\n");
602 err = -EINVAL;
603 }
604 return err;
605 }
606
607 if (!memcmp(mgm->gid, gid, 16) &&
608 be32_to_cpu(mgm->members_count) >> 30 == prot)
609 return err;
610
611 *prev = *index;
612 *index = be32_to_cpu(mgm->next_gid_index) >> 6;
613 } while (*index);
614
615 *index = -1;
616 return err;
617 }
618
619 int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
620 int block_mcast_loopback, enum mlx4_protocol prot,
621 enum mlx4_steer_type steer)
622 {
623 struct mlx4_priv *priv = mlx4_priv(dev);
624 struct mlx4_cmd_mailbox *mailbox;
625 struct mlx4_mgm *mgm;
626 u32 members_count;
627 int index, prev;
628 int link = 0;
629 int i;
630 int err;
631 u8 port = gid[5];
632 u8 new_entry = 0;
633
634 mailbox = mlx4_alloc_cmd_mailbox(dev);
635 if (IS_ERR(mailbox))
636 return PTR_ERR(mailbox);
637 mgm = mailbox->buf;
638
639 mutex_lock(&priv->mcg_table.mutex);
640 err = find_entry(dev, port, gid, prot,
641 mailbox, &prev, &index);
642 if (err)
643 goto out;
644
645 if (index != -1) {
646 if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) {
647 new_entry = 1;
648 memcpy(mgm->gid, gid, 16);
649 }
650 } else {
651 link = 1;
652
653 index = mlx4_bitmap_alloc(&priv->mcg_table.bitmap);
654 if (index == -1) {
655 mlx4_err(dev, "No AMGM entries left\n");
656 err = -ENOMEM;
657 goto out;
658 }
659 index += dev->caps.num_mgms;
660
661 new_entry = 1;
662 memset(mgm, 0, sizeof *mgm);
663 memcpy(mgm->gid, gid, 16);
664 }
665
666 members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
667 if (members_count == dev->caps.num_qp_per_mgm) {
668 mlx4_err(dev, "MGM at index %x is full.\n", index);
669 err = -ENOMEM;
670 goto out;
671 }
672
673 for (i = 0; i < members_count; ++i)
674 if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn) {
675 mlx4_dbg(dev, "QP %06x already a member of MGM\n", qp->qpn);
676 err = 0;
677 goto out;
678 }
679
680 if (block_mcast_loopback)
681 mgm->qp[members_count++] = cpu_to_be32((qp->qpn & MGM_QPN_MASK) |
682 (1U << MGM_BLCK_LB_BIT));
683 else
684 mgm->qp[members_count++] = cpu_to_be32(qp->qpn & MGM_QPN_MASK);
685
686 mgm->members_count = cpu_to_be32(members_count | (u32) prot << 30);
687
688 err = mlx4_WRITE_ENTRY(dev, index, mailbox);
689 if (err)
690 goto out;
691
692 if (!link)
693 goto out;
694
695 err = mlx4_READ_ENTRY(dev, prev, mailbox);
696 if (err)
697 goto out;
698
699 mgm->next_gid_index = cpu_to_be32(index << 6);
700
701 err = mlx4_WRITE_ENTRY(dev, prev, mailbox);
702 if (err)
703 goto out;
704
705 out:
706 if (prot == MLX4_PROT_ETH) {
707 /* manage the steering entry for promisc mode */
708 if (new_entry)
709 new_steering_entry(dev, port, steer, index, qp->qpn);
710 else
711 existing_steering_entry(dev, port, steer,
712 index, qp->qpn);
713 }
714 if (err && link && index != -1) {
715 if (index < dev->caps.num_mgms)
716 mlx4_warn(dev, "Got AMGM index %d < %d",
717 index, dev->caps.num_mgms);
718 else
719 mlx4_bitmap_free(&priv->mcg_table.bitmap,
720 index - dev->caps.num_mgms);
721 }
722 mutex_unlock(&priv->mcg_table.mutex);
723
724 mlx4_free_cmd_mailbox(dev, mailbox);
725 return err;
726 }
727
728 int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
729 enum mlx4_protocol prot, enum mlx4_steer_type steer)
730 {
731 struct mlx4_priv *priv = mlx4_priv(dev);
732 struct mlx4_cmd_mailbox *mailbox;
733 struct mlx4_mgm *mgm;
734 u32 members_count;
735 int prev, index;
736 int i, loc;
737 int err;
738 u8 port = gid[5];
739 bool removed_entry = false;
740
741 mailbox = mlx4_alloc_cmd_mailbox(dev);
742 if (IS_ERR(mailbox))
743 return PTR_ERR(mailbox);
744 mgm = mailbox->buf;
745
746 mutex_lock(&priv->mcg_table.mutex);
747
748 err = find_entry(dev, port, gid, prot,
749 mailbox, &prev, &index);
750 if (err)
751 goto out;
752
753 if (index == -1) {
754 mlx4_err(dev, "MGID %pI6 not found\n", gid);
755 err = -EINVAL;
756 goto out;
757 }
758
759 /* if this pq is also a promisc qp, it shouldn't be removed */
760 if (prot == MLX4_PROT_ETH &&
761 check_duplicate_entry(dev, port, steer, index, qp->qpn))
762 goto out;
763
764 members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
765 for (loc = -1, i = 0; i < members_count; ++i)
766 if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn)
767 loc = i;
768
769 if (loc == -1) {
770 mlx4_err(dev, "QP %06x not found in MGM\n", qp->qpn);
771 err = -EINVAL;
772 goto out;
773 }
774
775
776 mgm->members_count = cpu_to_be32(--members_count | (u32) prot << 30);
777 mgm->qp[loc] = mgm->qp[i - 1];
778 mgm->qp[i - 1] = 0;
779
780 if (prot == MLX4_PROT_ETH)
781 removed_entry = can_remove_steering_entry(dev, port, steer,
782 index, qp->qpn);
783 if (i != 1 && (prot != MLX4_PROT_ETH || !removed_entry)) {
784 err = mlx4_WRITE_ENTRY(dev, index, mailbox);
785 goto out;
786 }
787
788 /* We are going to delete the entry, members count should be 0 */
789 mgm->members_count = cpu_to_be32((u32) prot << 30);
790
791 if (prev == -1) {
792 /* Remove entry from MGM */
793 int amgm_index = be32_to_cpu(mgm->next_gid_index) >> 6;
794 if (amgm_index) {
795 err = mlx4_READ_ENTRY(dev, amgm_index, mailbox);
796 if (err)
797 goto out;
798 } else
799 memset(mgm->gid, 0, 16);
800
801 err = mlx4_WRITE_ENTRY(dev, index, mailbox);
802 if (err)
803 goto out;
804
805 if (amgm_index) {
806 if (amgm_index < dev->caps.num_mgms)
807 mlx4_warn(dev, "MGM entry %d had AMGM index %d < %d",
808 index, amgm_index, dev->caps.num_mgms);
809 else
810 mlx4_bitmap_free(&priv->mcg_table.bitmap,
811 amgm_index - dev->caps.num_mgms);
812 }
813 } else {
814 /* Remove entry from AMGM */
815 int cur_next_index = be32_to_cpu(mgm->next_gid_index) >> 6;
816 err = mlx4_READ_ENTRY(dev, prev, mailbox);
817 if (err)
818 goto out;
819
820 mgm->next_gid_index = cpu_to_be32(cur_next_index << 6);
821
822 err = mlx4_WRITE_ENTRY(dev, prev, mailbox);
823 if (err)
824 goto out;
825
826 if (index < dev->caps.num_mgms)
827 mlx4_warn(dev, "entry %d had next AMGM index %d < %d",
828 prev, index, dev->caps.num_mgms);
829 else
830 mlx4_bitmap_free(&priv->mcg_table.bitmap,
831 index - dev->caps.num_mgms);
832 }
833
834 out:
835 mutex_unlock(&priv->mcg_table.mutex);
836
837 mlx4_free_cmd_mailbox(dev, mailbox);
838 return err;
839 }
840
841 static int mlx4_QP_ATTACH(struct mlx4_dev *dev, struct mlx4_qp *qp,
842 u8 gid[16], u8 attach, u8 block_loopback,
843 enum mlx4_protocol prot)
844 {
845 struct mlx4_cmd_mailbox *mailbox;
846 int err = 0;
847 int qpn;
848
849 if (!mlx4_is_mfunc(dev))
850 return -EBADF;
851
852 mailbox = mlx4_alloc_cmd_mailbox(dev);
853 if (IS_ERR(mailbox))
854 return PTR_ERR(mailbox);
855
856 memcpy(mailbox->buf, gid, 16);
857 qpn = qp->qpn;
858 qpn |= (prot << 28);
859 if (attach && block_loopback)
860 qpn |= (1 << 31);
861
862 err = mlx4_cmd(dev, mailbox->dma, qpn, attach,
863 MLX4_CMD_QP_ATTACH, MLX4_CMD_TIME_CLASS_A,
864 MLX4_CMD_WRAPPED);
865
866 mlx4_free_cmd_mailbox(dev, mailbox);
867 return err;
868 }
869
870 int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
871 int block_mcast_loopback, enum mlx4_protocol prot)
872 {
873 enum mlx4_steer_type steer;
874
875 steer = (is_valid_ether_addr(&gid[10])) ? MLX4_UC_STEER : MLX4_MC_STEER;
876
877 if (prot == MLX4_PROT_ETH &&
878 !(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
879 return 0;
880
881 if (prot == MLX4_PROT_ETH)
882 gid[7] |= (steer << 1);
883
884 if (mlx4_is_mfunc(dev))
885 return mlx4_QP_ATTACH(dev, qp, gid, 1,
886 block_mcast_loopback, prot);
887
888 return mlx4_qp_attach_common(dev, qp, gid, block_mcast_loopback,
889 prot, steer);
890 }
891 EXPORT_SYMBOL_GPL(mlx4_multicast_attach);
892
893 int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
894 enum mlx4_protocol prot)
895 {
896 enum mlx4_steer_type steer;
897
898 steer = (is_valid_ether_addr(&gid[10])) ? MLX4_UC_STEER : MLX4_MC_STEER;
899
900 if (prot == MLX4_PROT_ETH &&
901 !(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
902 return 0;
903
904 if (prot == MLX4_PROT_ETH)
905 gid[7] |= (steer << 1);
906
907 if (mlx4_is_mfunc(dev))
908 return mlx4_QP_ATTACH(dev, qp, gid, 0, 0, prot);
909
910 return mlx4_qp_detach_common(dev, qp, gid, prot, steer);
911 }
912 EXPORT_SYMBOL_GPL(mlx4_multicast_detach);
913
914 int mlx4_unicast_attach(struct mlx4_dev *dev,
915 struct mlx4_qp *qp, u8 gid[16],
916 int block_mcast_loopback, enum mlx4_protocol prot)
917 {
918 if (prot == MLX4_PROT_ETH &&
919 !(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
920 return 0;
921
922 if (prot == MLX4_PROT_ETH)
923 gid[7] |= (MLX4_UC_STEER << 1);
924
925 if (mlx4_is_mfunc(dev))
926 return mlx4_QP_ATTACH(dev, qp, gid, 1,
927 block_mcast_loopback, prot);
928
929 return mlx4_qp_attach_common(dev, qp, gid, block_mcast_loopback,
930 prot, MLX4_UC_STEER);
931 }
932 EXPORT_SYMBOL_GPL(mlx4_unicast_attach);
933
934 int mlx4_unicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp,
935 u8 gid[16], enum mlx4_protocol prot)
936 {
937 if (prot == MLX4_PROT_ETH &&
938 !(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
939 return 0;
940
941 if (prot == MLX4_PROT_ETH)
942 gid[7] |= (MLX4_UC_STEER << 1);
943
944 if (mlx4_is_mfunc(dev))
945 return mlx4_QP_ATTACH(dev, qp, gid, 0, 0, prot);
946
947 return mlx4_qp_detach_common(dev, qp, gid, prot, MLX4_UC_STEER);
948 }
949 EXPORT_SYMBOL_GPL(mlx4_unicast_detach);
950
951 int mlx4_PROMISC_wrapper(struct mlx4_dev *dev, int slave,
952 struct mlx4_vhcr *vhcr,
953 struct mlx4_cmd_mailbox *inbox,
954 struct mlx4_cmd_mailbox *outbox,
955 struct mlx4_cmd_info *cmd)
956 {
957 u32 qpn = (u32) vhcr->in_param & 0xffffffff;
958 u8 port = vhcr->in_param >> 62;
959 enum mlx4_steer_type steer = vhcr->in_modifier;
960
961 /* Promiscuous unicast is not allowed in mfunc */
962 if (mlx4_is_mfunc(dev) && steer == MLX4_UC_STEER)
963 return 0;
964
965 if (vhcr->op_modifier)
966 return add_promisc_qp(dev, port, steer, qpn);
967 else
968 return remove_promisc_qp(dev, port, steer, qpn);
969 }
970
971 static int mlx4_PROMISC(struct mlx4_dev *dev, u32 qpn,
972 enum mlx4_steer_type steer, u8 add, u8 port)
973 {
974 return mlx4_cmd(dev, (u64) qpn | (u64) port << 62, (u32) steer, add,
975 MLX4_CMD_PROMISC, MLX4_CMD_TIME_CLASS_A,
976 MLX4_CMD_WRAPPED);
977 }
978
979 int mlx4_multicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port)
980 {
981 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
982 return 0;
983
984 if (mlx4_is_mfunc(dev))
985 return mlx4_PROMISC(dev, qpn, MLX4_MC_STEER, 1, port);
986
987 return add_promisc_qp(dev, port, MLX4_MC_STEER, qpn);
988 }
989 EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_add);
990
991 int mlx4_multicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port)
992 {
993 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
994 return 0;
995
996 if (mlx4_is_mfunc(dev))
997 return mlx4_PROMISC(dev, qpn, MLX4_MC_STEER, 0, port);
998
999 return remove_promisc_qp(dev, port, MLX4_MC_STEER, qpn);
1000 }
1001 EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_remove);
1002
1003 int mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port)
1004 {
1005 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
1006 return 0;
1007
1008 if (mlx4_is_mfunc(dev))
1009 return mlx4_PROMISC(dev, qpn, MLX4_UC_STEER, 1, port);
1010
1011 return add_promisc_qp(dev, port, MLX4_UC_STEER, qpn);
1012 }
1013 EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_add);
1014
1015 int mlx4_unicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port)
1016 {
1017 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
1018 return 0;
1019
1020 if (mlx4_is_mfunc(dev))
1021 return mlx4_PROMISC(dev, qpn, MLX4_UC_STEER, 0, port);
1022
1023 return remove_promisc_qp(dev, port, MLX4_UC_STEER, qpn);
1024 }
1025 EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_remove);
1026
1027 int mlx4_init_mcg_table(struct mlx4_dev *dev)
1028 {
1029 struct mlx4_priv *priv = mlx4_priv(dev);
1030 int err;
1031
1032 err = mlx4_bitmap_init(&priv->mcg_table.bitmap, dev->caps.num_amgms,
1033 dev->caps.num_amgms - 1, 0, 0);
1034 if (err)
1035 return err;
1036
1037 mutex_init(&priv->mcg_table.mutex);
1038
1039 return 0;
1040 }
1041
1042 void mlx4_cleanup_mcg_table(struct mlx4_dev *dev)
1043 {
1044 mlx4_bitmap_cleanup(&mlx4_priv(dev)->mcg_table.bitmap);
1045 }