Merge tag 'for-linus-v3.10-rc3' of git://oss.sgi.com/xfs/xfs
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / net / ethernet / mellanox / mlx4 / resource_tracker.c
CommitLineData
c82e9aa0
EC
1/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
4 * All rights reserved.
5 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 */
35
36#include <linux/sched.h>
37#include <linux/pci.h>
38#include <linux/errno.h>
39#include <linux/kernel.h>
40#include <linux/io.h>
e143a1ad 41#include <linux/slab.h>
c82e9aa0
EC
42#include <linux/mlx4/cmd.h>
43#include <linux/mlx4/qp.h>
af22d9de 44#include <linux/if_ether.h>
7fb40f87 45#include <linux/etherdevice.h>
c82e9aa0
EC
46
47#include "mlx4.h"
48#include "fw.h"
49
50#define MLX4_MAC_VALID (1ull << 63)
c82e9aa0
EC
51
52struct mac_res {
53 struct list_head list;
54 u64 mac;
55 u8 port;
56};
57
58struct res_common {
59 struct list_head list;
4af1c048 60 struct rb_node node;
aa1ec3dd 61 u64 res_id;
c82e9aa0
EC
62 int owner;
63 int state;
64 int from_state;
65 int to_state;
66 int removing;
67};
68
69enum {
70 RES_ANY_BUSY = 1
71};
72
73struct res_gid {
74 struct list_head list;
75 u8 gid[16];
76 enum mlx4_protocol prot;
9f5b6c63 77 enum mlx4_steer_type steer;
fab1e24a 78 u64 reg_id;
c82e9aa0
EC
79};
80
81enum res_qp_states {
82 RES_QP_BUSY = RES_ANY_BUSY,
83
84 /* QP number was allocated */
85 RES_QP_RESERVED,
86
87 /* ICM memory for QP context was mapped */
88 RES_QP_MAPPED,
89
90 /* QP is in hw ownership */
91 RES_QP_HW
92};
93
c82e9aa0
EC
94struct res_qp {
95 struct res_common com;
96 struct res_mtt *mtt;
97 struct res_cq *rcq;
98 struct res_cq *scq;
99 struct res_srq *srq;
100 struct list_head mcg_list;
101 spinlock_t mcg_spl;
102 int local_qpn;
2c473ae7 103 atomic_t ref_count;
c82e9aa0
EC
104};
105
106enum res_mtt_states {
107 RES_MTT_BUSY = RES_ANY_BUSY,
108 RES_MTT_ALLOCATED,
109};
110
111static inline const char *mtt_states_str(enum res_mtt_states state)
112{
113 switch (state) {
114 case RES_MTT_BUSY: return "RES_MTT_BUSY";
115 case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
116 default: return "Unknown";
117 }
118}
119
120struct res_mtt {
121 struct res_common com;
122 int order;
123 atomic_t ref_count;
124};
125
126enum res_mpt_states {
127 RES_MPT_BUSY = RES_ANY_BUSY,
128 RES_MPT_RESERVED,
129 RES_MPT_MAPPED,
130 RES_MPT_HW,
131};
132
133struct res_mpt {
134 struct res_common com;
135 struct res_mtt *mtt;
136 int key;
137};
138
139enum res_eq_states {
140 RES_EQ_BUSY = RES_ANY_BUSY,
141 RES_EQ_RESERVED,
142 RES_EQ_HW,
143};
144
145struct res_eq {
146 struct res_common com;
147 struct res_mtt *mtt;
148};
149
150enum res_cq_states {
151 RES_CQ_BUSY = RES_ANY_BUSY,
152 RES_CQ_ALLOCATED,
153 RES_CQ_HW,
154};
155
156struct res_cq {
157 struct res_common com;
158 struct res_mtt *mtt;
159 atomic_t ref_count;
160};
161
162enum res_srq_states {
163 RES_SRQ_BUSY = RES_ANY_BUSY,
164 RES_SRQ_ALLOCATED,
165 RES_SRQ_HW,
166};
167
c82e9aa0
EC
168struct res_srq {
169 struct res_common com;
170 struct res_mtt *mtt;
171 struct res_cq *cq;
172 atomic_t ref_count;
173};
174
175enum res_counter_states {
176 RES_COUNTER_BUSY = RES_ANY_BUSY,
177 RES_COUNTER_ALLOCATED,
178};
179
c82e9aa0
EC
180struct res_counter {
181 struct res_common com;
182 int port;
183};
184
ba062d52
JM
185enum res_xrcdn_states {
186 RES_XRCD_BUSY = RES_ANY_BUSY,
187 RES_XRCD_ALLOCATED,
188};
189
190struct res_xrcdn {
191 struct res_common com;
192 int port;
193};
194
1b9c6b06
HHZ
195enum res_fs_rule_states {
196 RES_FS_RULE_BUSY = RES_ANY_BUSY,
197 RES_FS_RULE_ALLOCATED,
198};
199
200struct res_fs_rule {
201 struct res_common com;
2c473ae7 202 int qpn;
1b9c6b06
HHZ
203};
204
4af1c048
HHZ
205static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
206{
207 struct rb_node *node = root->rb_node;
208
209 while (node) {
210 struct res_common *res = container_of(node, struct res_common,
211 node);
212
213 if (res_id < res->res_id)
214 node = node->rb_left;
215 else if (res_id > res->res_id)
216 node = node->rb_right;
217 else
218 return res;
219 }
220 return NULL;
221}
222
223static int res_tracker_insert(struct rb_root *root, struct res_common *res)
224{
225 struct rb_node **new = &(root->rb_node), *parent = NULL;
226
227 /* Figure out where to put new node */
228 while (*new) {
229 struct res_common *this = container_of(*new, struct res_common,
230 node);
231
232 parent = *new;
233 if (res->res_id < this->res_id)
234 new = &((*new)->rb_left);
235 else if (res->res_id > this->res_id)
236 new = &((*new)->rb_right);
237 else
238 return -EEXIST;
239 }
240
241 /* Add new node and rebalance tree. */
242 rb_link_node(&res->node, parent, new);
243 rb_insert_color(&res->node, root);
244
245 return 0;
246}
247
54679e14
JM
248enum qp_transition {
249 QP_TRANS_INIT2RTR,
250 QP_TRANS_RTR2RTS,
251 QP_TRANS_RTS2RTS,
252 QP_TRANS_SQERR2RTS,
253 QP_TRANS_SQD2SQD,
254 QP_TRANS_SQD2RTS
255};
256
c82e9aa0
EC
257/* For Debug uses */
258static const char *ResourceType(enum mlx4_resource rt)
259{
260 switch (rt) {
261 case RES_QP: return "RES_QP";
262 case RES_CQ: return "RES_CQ";
263 case RES_SRQ: return "RES_SRQ";
264 case RES_MPT: return "RES_MPT";
265 case RES_MTT: return "RES_MTT";
266 case RES_MAC: return "RES_MAC";
267 case RES_EQ: return "RES_EQ";
268 case RES_COUNTER: return "RES_COUNTER";
1b9c6b06 269 case RES_FS_RULE: return "RES_FS_RULE";
ba062d52 270 case RES_XRCD: return "RES_XRCD";
c82e9aa0
EC
271 default: return "Unknown resource type !!!";
272 };
273}
274
c82e9aa0
EC
275int mlx4_init_resource_tracker(struct mlx4_dev *dev)
276{
277 struct mlx4_priv *priv = mlx4_priv(dev);
278 int i;
279 int t;
280
281 priv->mfunc.master.res_tracker.slave_list =
282 kzalloc(dev->num_slaves * sizeof(struct slave_list),
283 GFP_KERNEL);
284 if (!priv->mfunc.master.res_tracker.slave_list)
285 return -ENOMEM;
286
287 for (i = 0 ; i < dev->num_slaves; i++) {
288 for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
289 INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
290 slave_list[i].res_list[t]);
291 mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
292 }
293
294 mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
295 dev->num_slaves);
296 for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
4af1c048 297 priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT;
c82e9aa0
EC
298
299 spin_lock_init(&priv->mfunc.master.res_tracker.lock);
300 return 0 ;
301}
302
b8924951
JM
303void mlx4_free_resource_tracker(struct mlx4_dev *dev,
304 enum mlx4_res_tracker_free_type type)
c82e9aa0
EC
305{
306 struct mlx4_priv *priv = mlx4_priv(dev);
307 int i;
308
309 if (priv->mfunc.master.res_tracker.slave_list) {
b8924951
JM
310 if (type != RES_TR_FREE_STRUCTS_ONLY)
311 for (i = 0 ; i < dev->num_slaves; i++)
312 if (type == RES_TR_FREE_ALL ||
313 dev->caps.function != i)
314 mlx4_delete_all_resources_for_slave(dev, i);
315
316 if (type != RES_TR_FREE_SLAVES_ONLY) {
317 kfree(priv->mfunc.master.res_tracker.slave_list);
318 priv->mfunc.master.res_tracker.slave_list = NULL;
319 }
c82e9aa0
EC
320 }
321}
322
54679e14
JM
323static void update_pkey_index(struct mlx4_dev *dev, int slave,
324 struct mlx4_cmd_mailbox *inbox)
c82e9aa0 325{
54679e14
JM
326 u8 sched = *(u8 *)(inbox->buf + 64);
327 u8 orig_index = *(u8 *)(inbox->buf + 35);
328 u8 new_index;
329 struct mlx4_priv *priv = mlx4_priv(dev);
330 int port;
331
332 port = (sched >> 6 & 1) + 1;
333
334 new_index = priv->virt2phys_pkey[slave][port - 1][orig_index];
335 *(u8 *)(inbox->buf + 35) = new_index;
54679e14
JM
336}
337
338static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
339 u8 slave)
340{
341 struct mlx4_qp_context *qp_ctx = inbox->buf + 8;
342 enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *) inbox->buf);
343 u32 ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
c82e9aa0
EC
344
345 if (MLX4_QP_ST_UD == ts)
346 qp_ctx->pri_path.mgid_index = 0x80 | slave;
347
54679e14
JM
348 if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_UC == ts) {
349 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH)
350 qp_ctx->pri_path.mgid_index = slave & 0x7F;
351 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH)
352 qp_ctx->alt_path.mgid_index = slave & 0x7F;
353 }
c82e9aa0
EC
354}
355
3f7fb021
RE
356static int update_vport_qp_param(struct mlx4_dev *dev,
357 struct mlx4_cmd_mailbox *inbox,
358 u8 slave)
359{
360 struct mlx4_qp_context *qpc = inbox->buf + 8;
361 struct mlx4_vport_oper_state *vp_oper;
362 struct mlx4_priv *priv;
363 u32 qp_type;
364 int port;
365
366 port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1;
367 priv = mlx4_priv(dev);
368 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
369
370 if (MLX4_VGT != vp_oper->state.default_vlan) {
371 qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
372 if (MLX4_QP_ST_RC == qp_type)
373 return -EINVAL;
374
7677fc96
RE
375 /* force strip vlan by clear vsd */
376 qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN);
377 if (0 != vp_oper->state.default_vlan) {
378 qpc->pri_path.vlan_control =
379 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
380 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
381 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
382 } else { /* priority tagged */
383 qpc->pri_path.vlan_control =
384 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
385 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
386 }
387
388 qpc->pri_path.fvl_rx |= MLX4_FVL_RX_FORCE_ETH_VLAN;
3f7fb021 389 qpc->pri_path.vlan_index = vp_oper->vlan_idx;
7677fc96
RE
390 qpc->pri_path.fl |= MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
391 qpc->pri_path.feup |= MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
3f7fb021
RE
392 qpc->pri_path.sched_queue &= 0xC7;
393 qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3;
3f7fb021 394 }
e6b6a231 395 if (vp_oper->state.spoofchk) {
7677fc96 396 qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC;
e6b6a231 397 qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx;
e6b6a231 398 }
3f7fb021
RE
399 return 0;
400}
401
c82e9aa0
EC
402static int mpt_mask(struct mlx4_dev *dev)
403{
404 return dev->caps.num_mpts - 1;
405}
406
1e3f7b32 407static void *find_res(struct mlx4_dev *dev, u64 res_id,
c82e9aa0
EC
408 enum mlx4_resource type)
409{
410 struct mlx4_priv *priv = mlx4_priv(dev);
411
4af1c048
HHZ
412 return res_tracker_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
413 res_id);
c82e9aa0
EC
414}
415
aa1ec3dd 416static int get_res(struct mlx4_dev *dev, int slave, u64 res_id,
c82e9aa0
EC
417 enum mlx4_resource type,
418 void *res)
419{
420 struct res_common *r;
421 int err = 0;
422
423 spin_lock_irq(mlx4_tlock(dev));
424 r = find_res(dev, res_id, type);
425 if (!r) {
426 err = -ENONET;
427 goto exit;
428 }
429
430 if (r->state == RES_ANY_BUSY) {
431 err = -EBUSY;
432 goto exit;
433 }
434
435 if (r->owner != slave) {
436 err = -EPERM;
437 goto exit;
438 }
439
440 r->from_state = r->state;
441 r->state = RES_ANY_BUSY;
c82e9aa0
EC
442
443 if (res)
444 *((struct res_common **)res) = r;
445
446exit:
447 spin_unlock_irq(mlx4_tlock(dev));
448 return err;
449}
450
451int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
452 enum mlx4_resource type,
aa1ec3dd 453 u64 res_id, int *slave)
c82e9aa0
EC
454{
455
456 struct res_common *r;
457 int err = -ENOENT;
458 int id = res_id;
459
460 if (type == RES_QP)
461 id &= 0x7fffff;
996b0541 462 spin_lock(mlx4_tlock(dev));
c82e9aa0
EC
463
464 r = find_res(dev, id, type);
465 if (r) {
466 *slave = r->owner;
467 err = 0;
468 }
996b0541 469 spin_unlock(mlx4_tlock(dev));
c82e9aa0
EC
470
471 return err;
472}
473
aa1ec3dd 474static void put_res(struct mlx4_dev *dev, int slave, u64 res_id,
c82e9aa0
EC
475 enum mlx4_resource type)
476{
477 struct res_common *r;
478
479 spin_lock_irq(mlx4_tlock(dev));
480 r = find_res(dev, res_id, type);
481 if (r)
482 r->state = r->from_state;
483 spin_unlock_irq(mlx4_tlock(dev));
484}
485
486static struct res_common *alloc_qp_tr(int id)
487{
488 struct res_qp *ret;
489
490 ret = kzalloc(sizeof *ret, GFP_KERNEL);
491 if (!ret)
492 return NULL;
493
494 ret->com.res_id = id;
495 ret->com.state = RES_QP_RESERVED;
2531188b 496 ret->local_qpn = id;
c82e9aa0
EC
497 INIT_LIST_HEAD(&ret->mcg_list);
498 spin_lock_init(&ret->mcg_spl);
2c473ae7 499 atomic_set(&ret->ref_count, 0);
c82e9aa0
EC
500
501 return &ret->com;
502}
503
504static struct res_common *alloc_mtt_tr(int id, int order)
505{
506 struct res_mtt *ret;
507
508 ret = kzalloc(sizeof *ret, GFP_KERNEL);
509 if (!ret)
510 return NULL;
511
512 ret->com.res_id = id;
513 ret->order = order;
514 ret->com.state = RES_MTT_ALLOCATED;
515 atomic_set(&ret->ref_count, 0);
516
517 return &ret->com;
518}
519
520static struct res_common *alloc_mpt_tr(int id, int key)
521{
522 struct res_mpt *ret;
523
524 ret = kzalloc(sizeof *ret, GFP_KERNEL);
525 if (!ret)
526 return NULL;
527
528 ret->com.res_id = id;
529 ret->com.state = RES_MPT_RESERVED;
530 ret->key = key;
531
532 return &ret->com;
533}
534
535static struct res_common *alloc_eq_tr(int id)
536{
537 struct res_eq *ret;
538
539 ret = kzalloc(sizeof *ret, GFP_KERNEL);
540 if (!ret)
541 return NULL;
542
543 ret->com.res_id = id;
544 ret->com.state = RES_EQ_RESERVED;
545
546 return &ret->com;
547}
548
549static struct res_common *alloc_cq_tr(int id)
550{
551 struct res_cq *ret;
552
553 ret = kzalloc(sizeof *ret, GFP_KERNEL);
554 if (!ret)
555 return NULL;
556
557 ret->com.res_id = id;
558 ret->com.state = RES_CQ_ALLOCATED;
559 atomic_set(&ret->ref_count, 0);
560
561 return &ret->com;
562}
563
564static struct res_common *alloc_srq_tr(int id)
565{
566 struct res_srq *ret;
567
568 ret = kzalloc(sizeof *ret, GFP_KERNEL);
569 if (!ret)
570 return NULL;
571
572 ret->com.res_id = id;
573 ret->com.state = RES_SRQ_ALLOCATED;
574 atomic_set(&ret->ref_count, 0);
575
576 return &ret->com;
577}
578
579static struct res_common *alloc_counter_tr(int id)
580{
581 struct res_counter *ret;
582
583 ret = kzalloc(sizeof *ret, GFP_KERNEL);
584 if (!ret)
585 return NULL;
586
587 ret->com.res_id = id;
588 ret->com.state = RES_COUNTER_ALLOCATED;
589
590 return &ret->com;
591}
592
ba062d52
JM
593static struct res_common *alloc_xrcdn_tr(int id)
594{
595 struct res_xrcdn *ret;
596
597 ret = kzalloc(sizeof *ret, GFP_KERNEL);
598 if (!ret)
599 return NULL;
600
601 ret->com.res_id = id;
602 ret->com.state = RES_XRCD_ALLOCATED;
603
604 return &ret->com;
605}
606
2c473ae7 607static struct res_common *alloc_fs_rule_tr(u64 id, int qpn)
1b9c6b06
HHZ
608{
609 struct res_fs_rule *ret;
610
611 ret = kzalloc(sizeof *ret, GFP_KERNEL);
612 if (!ret)
613 return NULL;
614
615 ret->com.res_id = id;
616 ret->com.state = RES_FS_RULE_ALLOCATED;
2c473ae7 617 ret->qpn = qpn;
1b9c6b06
HHZ
618 return &ret->com;
619}
620
aa1ec3dd 621static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
c82e9aa0
EC
622 int extra)
623{
624 struct res_common *ret;
625
626 switch (type) {
627 case RES_QP:
628 ret = alloc_qp_tr(id);
629 break;
630 case RES_MPT:
631 ret = alloc_mpt_tr(id, extra);
632 break;
633 case RES_MTT:
634 ret = alloc_mtt_tr(id, extra);
635 break;
636 case RES_EQ:
637 ret = alloc_eq_tr(id);
638 break;
639 case RES_CQ:
640 ret = alloc_cq_tr(id);
641 break;
642 case RES_SRQ:
643 ret = alloc_srq_tr(id);
644 break;
645 case RES_MAC:
646 printk(KERN_ERR "implementation missing\n");
647 return NULL;
648 case RES_COUNTER:
649 ret = alloc_counter_tr(id);
650 break;
ba062d52
JM
651 case RES_XRCD:
652 ret = alloc_xrcdn_tr(id);
653 break;
1b9c6b06 654 case RES_FS_RULE:
2c473ae7 655 ret = alloc_fs_rule_tr(id, extra);
1b9c6b06 656 break;
c82e9aa0
EC
657 default:
658 return NULL;
659 }
660 if (ret)
661 ret->owner = slave;
662
663 return ret;
664}
665
aa1ec3dd 666static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
c82e9aa0
EC
667 enum mlx4_resource type, int extra)
668{
669 int i;
670 int err;
671 struct mlx4_priv *priv = mlx4_priv(dev);
672 struct res_common **res_arr;
673 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4af1c048 674 struct rb_root *root = &tracker->res_tree[type];
c82e9aa0
EC
675
676 res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
677 if (!res_arr)
678 return -ENOMEM;
679
680 for (i = 0; i < count; ++i) {
681 res_arr[i] = alloc_tr(base + i, type, slave, extra);
682 if (!res_arr[i]) {
683 for (--i; i >= 0; --i)
684 kfree(res_arr[i]);
685
686 kfree(res_arr);
687 return -ENOMEM;
688 }
689 }
690
691 spin_lock_irq(mlx4_tlock(dev));
692 for (i = 0; i < count; ++i) {
693 if (find_res(dev, base + i, type)) {
694 err = -EEXIST;
695 goto undo;
696 }
4af1c048 697 err = res_tracker_insert(root, res_arr[i]);
c82e9aa0
EC
698 if (err)
699 goto undo;
700 list_add_tail(&res_arr[i]->list,
701 &tracker->slave_list[slave].res_list[type]);
702 }
703 spin_unlock_irq(mlx4_tlock(dev));
704 kfree(res_arr);
705
706 return 0;
707
708undo:
709 for (--i; i >= base; --i)
4af1c048 710 rb_erase(&res_arr[i]->node, root);
c82e9aa0
EC
711
712 spin_unlock_irq(mlx4_tlock(dev));
713
714 for (i = 0; i < count; ++i)
715 kfree(res_arr[i]);
716
717 kfree(res_arr);
718
719 return err;
720}
721
722static int remove_qp_ok(struct res_qp *res)
723{
2c473ae7
HHZ
724 if (res->com.state == RES_QP_BUSY || atomic_read(&res->ref_count) ||
725 !list_empty(&res->mcg_list)) {
726 pr_err("resource tracker: fail to remove qp, state %d, ref_count %d\n",
727 res->com.state, atomic_read(&res->ref_count));
c82e9aa0 728 return -EBUSY;
2c473ae7 729 } else if (res->com.state != RES_QP_RESERVED) {
c82e9aa0 730 return -EPERM;
2c473ae7 731 }
c82e9aa0
EC
732
733 return 0;
734}
735
736static int remove_mtt_ok(struct res_mtt *res, int order)
737{
738 if (res->com.state == RES_MTT_BUSY ||
739 atomic_read(&res->ref_count)) {
740 printk(KERN_DEBUG "%s-%d: state %s, ref_count %d\n",
741 __func__, __LINE__,
742 mtt_states_str(res->com.state),
743 atomic_read(&res->ref_count));
744 return -EBUSY;
745 } else if (res->com.state != RES_MTT_ALLOCATED)
746 return -EPERM;
747 else if (res->order != order)
748 return -EINVAL;
749
750 return 0;
751}
752
753static int remove_mpt_ok(struct res_mpt *res)
754{
755 if (res->com.state == RES_MPT_BUSY)
756 return -EBUSY;
757 else if (res->com.state != RES_MPT_RESERVED)
758 return -EPERM;
759
760 return 0;
761}
762
763static int remove_eq_ok(struct res_eq *res)
764{
765 if (res->com.state == RES_MPT_BUSY)
766 return -EBUSY;
767 else if (res->com.state != RES_MPT_RESERVED)
768 return -EPERM;
769
770 return 0;
771}
772
773static int remove_counter_ok(struct res_counter *res)
774{
775 if (res->com.state == RES_COUNTER_BUSY)
776 return -EBUSY;
777 else if (res->com.state != RES_COUNTER_ALLOCATED)
778 return -EPERM;
779
780 return 0;
781}
782
ba062d52
JM
783static int remove_xrcdn_ok(struct res_xrcdn *res)
784{
785 if (res->com.state == RES_XRCD_BUSY)
786 return -EBUSY;
787 else if (res->com.state != RES_XRCD_ALLOCATED)
788 return -EPERM;
789
790 return 0;
791}
792
1b9c6b06
HHZ
793static int remove_fs_rule_ok(struct res_fs_rule *res)
794{
795 if (res->com.state == RES_FS_RULE_BUSY)
796 return -EBUSY;
797 else if (res->com.state != RES_FS_RULE_ALLOCATED)
798 return -EPERM;
799
800 return 0;
801}
802
c82e9aa0
EC
803static int remove_cq_ok(struct res_cq *res)
804{
805 if (res->com.state == RES_CQ_BUSY)
806 return -EBUSY;
807 else if (res->com.state != RES_CQ_ALLOCATED)
808 return -EPERM;
809
810 return 0;
811}
812
813static int remove_srq_ok(struct res_srq *res)
814{
815 if (res->com.state == RES_SRQ_BUSY)
816 return -EBUSY;
817 else if (res->com.state != RES_SRQ_ALLOCATED)
818 return -EPERM;
819
820 return 0;
821}
822
823static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
824{
825 switch (type) {
826 case RES_QP:
827 return remove_qp_ok((struct res_qp *)res);
828 case RES_CQ:
829 return remove_cq_ok((struct res_cq *)res);
830 case RES_SRQ:
831 return remove_srq_ok((struct res_srq *)res);
832 case RES_MPT:
833 return remove_mpt_ok((struct res_mpt *)res);
834 case RES_MTT:
835 return remove_mtt_ok((struct res_mtt *)res, extra);
836 case RES_MAC:
837 return -ENOSYS;
838 case RES_EQ:
839 return remove_eq_ok((struct res_eq *)res);
840 case RES_COUNTER:
841 return remove_counter_ok((struct res_counter *)res);
ba062d52
JM
842 case RES_XRCD:
843 return remove_xrcdn_ok((struct res_xrcdn *)res);
1b9c6b06
HHZ
844 case RES_FS_RULE:
845 return remove_fs_rule_ok((struct res_fs_rule *)res);
c82e9aa0
EC
846 default:
847 return -EINVAL;
848 }
849}
850
aa1ec3dd 851static int rem_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
c82e9aa0
EC
852 enum mlx4_resource type, int extra)
853{
aa1ec3dd 854 u64 i;
c82e9aa0
EC
855 int err;
856 struct mlx4_priv *priv = mlx4_priv(dev);
857 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
858 struct res_common *r;
859
860 spin_lock_irq(mlx4_tlock(dev));
861 for (i = base; i < base + count; ++i) {
4af1c048 862 r = res_tracker_lookup(&tracker->res_tree[type], i);
c82e9aa0
EC
863 if (!r) {
864 err = -ENOENT;
865 goto out;
866 }
867 if (r->owner != slave) {
868 err = -EPERM;
869 goto out;
870 }
871 err = remove_ok(r, type, extra);
872 if (err)
873 goto out;
874 }
875
876 for (i = base; i < base + count; ++i) {
4af1c048
HHZ
877 r = res_tracker_lookup(&tracker->res_tree[type], i);
878 rb_erase(&r->node, &tracker->res_tree[type]);
c82e9aa0
EC
879 list_del(&r->list);
880 kfree(r);
881 }
882 err = 0;
883
884out:
885 spin_unlock_irq(mlx4_tlock(dev));
886
887 return err;
888}
889
890static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
891 enum res_qp_states state, struct res_qp **qp,
892 int alloc)
893{
894 struct mlx4_priv *priv = mlx4_priv(dev);
895 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
896 struct res_qp *r;
897 int err = 0;
898
899 spin_lock_irq(mlx4_tlock(dev));
4af1c048 900 r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn);
c82e9aa0
EC
901 if (!r)
902 err = -ENOENT;
903 else if (r->com.owner != slave)
904 err = -EPERM;
905 else {
906 switch (state) {
907 case RES_QP_BUSY:
aa1ec3dd 908 mlx4_dbg(dev, "%s: failed RES_QP, 0x%llx\n",
c82e9aa0
EC
909 __func__, r->com.res_id);
910 err = -EBUSY;
911 break;
912
913 case RES_QP_RESERVED:
914 if (r->com.state == RES_QP_MAPPED && !alloc)
915 break;
916
aa1ec3dd 917 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", r->com.res_id);
c82e9aa0
EC
918 err = -EINVAL;
919 break;
920
921 case RES_QP_MAPPED:
922 if ((r->com.state == RES_QP_RESERVED && alloc) ||
923 r->com.state == RES_QP_HW)
924 break;
925 else {
aa1ec3dd 926 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n",
c82e9aa0
EC
927 r->com.res_id);
928 err = -EINVAL;
929 }
930
931 break;
932
933 case RES_QP_HW:
934 if (r->com.state != RES_QP_MAPPED)
935 err = -EINVAL;
936 break;
937 default:
938 err = -EINVAL;
939 }
940
941 if (!err) {
942 r->com.from_state = r->com.state;
943 r->com.to_state = state;
944 r->com.state = RES_QP_BUSY;
945 if (qp)
64699336 946 *qp = r;
c82e9aa0
EC
947 }
948 }
949
950 spin_unlock_irq(mlx4_tlock(dev));
951
952 return err;
953}
954
955static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
956 enum res_mpt_states state, struct res_mpt **mpt)
957{
958 struct mlx4_priv *priv = mlx4_priv(dev);
959 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
960 struct res_mpt *r;
961 int err = 0;
962
963 spin_lock_irq(mlx4_tlock(dev));
4af1c048 964 r = res_tracker_lookup(&tracker->res_tree[RES_MPT], index);
c82e9aa0
EC
965 if (!r)
966 err = -ENOENT;
967 else if (r->com.owner != slave)
968 err = -EPERM;
969 else {
970 switch (state) {
971 case RES_MPT_BUSY:
972 err = -EINVAL;
973 break;
974
975 case RES_MPT_RESERVED:
976 if (r->com.state != RES_MPT_MAPPED)
977 err = -EINVAL;
978 break;
979
980 case RES_MPT_MAPPED:
981 if (r->com.state != RES_MPT_RESERVED &&
982 r->com.state != RES_MPT_HW)
983 err = -EINVAL;
984 break;
985
986 case RES_MPT_HW:
987 if (r->com.state != RES_MPT_MAPPED)
988 err = -EINVAL;
989 break;
990 default:
991 err = -EINVAL;
992 }
993
994 if (!err) {
995 r->com.from_state = r->com.state;
996 r->com.to_state = state;
997 r->com.state = RES_MPT_BUSY;
998 if (mpt)
64699336 999 *mpt = r;
c82e9aa0
EC
1000 }
1001 }
1002
1003 spin_unlock_irq(mlx4_tlock(dev));
1004
1005 return err;
1006}
1007
1008static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1009 enum res_eq_states state, struct res_eq **eq)
1010{
1011 struct mlx4_priv *priv = mlx4_priv(dev);
1012 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1013 struct res_eq *r;
1014 int err = 0;
1015
1016 spin_lock_irq(mlx4_tlock(dev));
4af1c048 1017 r = res_tracker_lookup(&tracker->res_tree[RES_EQ], index);
c82e9aa0
EC
1018 if (!r)
1019 err = -ENOENT;
1020 else if (r->com.owner != slave)
1021 err = -EPERM;
1022 else {
1023 switch (state) {
1024 case RES_EQ_BUSY:
1025 err = -EINVAL;
1026 break;
1027
1028 case RES_EQ_RESERVED:
1029 if (r->com.state != RES_EQ_HW)
1030 err = -EINVAL;
1031 break;
1032
1033 case RES_EQ_HW:
1034 if (r->com.state != RES_EQ_RESERVED)
1035 err = -EINVAL;
1036 break;
1037
1038 default:
1039 err = -EINVAL;
1040 }
1041
1042 if (!err) {
1043 r->com.from_state = r->com.state;
1044 r->com.to_state = state;
1045 r->com.state = RES_EQ_BUSY;
1046 if (eq)
1047 *eq = r;
1048 }
1049 }
1050
1051 spin_unlock_irq(mlx4_tlock(dev));
1052
1053 return err;
1054}
1055
1056static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
1057 enum res_cq_states state, struct res_cq **cq)
1058{
1059 struct mlx4_priv *priv = mlx4_priv(dev);
1060 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1061 struct res_cq *r;
1062 int err;
1063
1064 spin_lock_irq(mlx4_tlock(dev));
4af1c048 1065 r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn);
c82e9aa0
EC
1066 if (!r)
1067 err = -ENOENT;
1068 else if (r->com.owner != slave)
1069 err = -EPERM;
1070 else {
1071 switch (state) {
1072 case RES_CQ_BUSY:
1073 err = -EBUSY;
1074 break;
1075
1076 case RES_CQ_ALLOCATED:
1077 if (r->com.state != RES_CQ_HW)
1078 err = -EINVAL;
1079 else if (atomic_read(&r->ref_count))
1080 err = -EBUSY;
1081 else
1082 err = 0;
1083 break;
1084
1085 case RES_CQ_HW:
1086 if (r->com.state != RES_CQ_ALLOCATED)
1087 err = -EINVAL;
1088 else
1089 err = 0;
1090 break;
1091
1092 default:
1093 err = -EINVAL;
1094 }
1095
1096 if (!err) {
1097 r->com.from_state = r->com.state;
1098 r->com.to_state = state;
1099 r->com.state = RES_CQ_BUSY;
1100 if (cq)
1101 *cq = r;
1102 }
1103 }
1104
1105 spin_unlock_irq(mlx4_tlock(dev));
1106
1107 return err;
1108}
1109
1110static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1111 enum res_cq_states state, struct res_srq **srq)
1112{
1113 struct mlx4_priv *priv = mlx4_priv(dev);
1114 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1115 struct res_srq *r;
1116 int err = 0;
1117
1118 spin_lock_irq(mlx4_tlock(dev));
4af1c048 1119 r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index);
c82e9aa0
EC
1120 if (!r)
1121 err = -ENOENT;
1122 else if (r->com.owner != slave)
1123 err = -EPERM;
1124 else {
1125 switch (state) {
1126 case RES_SRQ_BUSY:
1127 err = -EINVAL;
1128 break;
1129
1130 case RES_SRQ_ALLOCATED:
1131 if (r->com.state != RES_SRQ_HW)
1132 err = -EINVAL;
1133 else if (atomic_read(&r->ref_count))
1134 err = -EBUSY;
1135 break;
1136
1137 case RES_SRQ_HW:
1138 if (r->com.state != RES_SRQ_ALLOCATED)
1139 err = -EINVAL;
1140 break;
1141
1142 default:
1143 err = -EINVAL;
1144 }
1145
1146 if (!err) {
1147 r->com.from_state = r->com.state;
1148 r->com.to_state = state;
1149 r->com.state = RES_SRQ_BUSY;
1150 if (srq)
1151 *srq = r;
1152 }
1153 }
1154
1155 spin_unlock_irq(mlx4_tlock(dev));
1156
1157 return err;
1158}
1159
1160static void res_abort_move(struct mlx4_dev *dev, int slave,
1161 enum mlx4_resource type, int id)
1162{
1163 struct mlx4_priv *priv = mlx4_priv(dev);
1164 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1165 struct res_common *r;
1166
1167 spin_lock_irq(mlx4_tlock(dev));
4af1c048 1168 r = res_tracker_lookup(&tracker->res_tree[type], id);
c82e9aa0
EC
1169 if (r && (r->owner == slave))
1170 r->state = r->from_state;
1171 spin_unlock_irq(mlx4_tlock(dev));
1172}
1173
1174static void res_end_move(struct mlx4_dev *dev, int slave,
1175 enum mlx4_resource type, int id)
1176{
1177 struct mlx4_priv *priv = mlx4_priv(dev);
1178 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1179 struct res_common *r;
1180
1181 spin_lock_irq(mlx4_tlock(dev));
4af1c048 1182 r = res_tracker_lookup(&tracker->res_tree[type], id);
c82e9aa0
EC
1183 if (r && (r->owner == slave))
1184 r->state = r->to_state;
1185 spin_unlock_irq(mlx4_tlock(dev));
1186}
1187
1188static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
1189{
e2c76824
JM
1190 return mlx4_is_qp_reserved(dev, qpn) &&
1191 (mlx4_is_master(dev) || mlx4_is_guest_proxy(dev, slave, qpn));
c82e9aa0
EC
1192}
1193
54679e14
JM
1194static int fw_reserved(struct mlx4_dev *dev, int qpn)
1195{
1196 return qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
c82e9aa0
EC
1197}
1198
1199static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1200 u64 in_param, u64 *out_param)
1201{
1202 int err;
1203 int count;
1204 int align;
1205 int base;
1206 int qpn;
1207
1208 switch (op) {
1209 case RES_OP_RESERVE:
1210 count = get_param_l(&in_param);
1211 align = get_param_h(&in_param);
1212 err = __mlx4_qp_reserve_range(dev, count, align, &base);
1213 if (err)
1214 return err;
1215
1216 err = add_res_range(dev, slave, base, count, RES_QP, 0);
1217 if (err) {
1218 __mlx4_qp_release_range(dev, base, count);
1219 return err;
1220 }
1221 set_param_l(out_param, base);
1222 break;
1223 case RES_OP_MAP_ICM:
1224 qpn = get_param_l(&in_param) & 0x7fffff;
1225 if (valid_reserved(dev, slave, qpn)) {
1226 err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
1227 if (err)
1228 return err;
1229 }
1230
1231 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
1232 NULL, 1);
1233 if (err)
1234 return err;
1235
54679e14 1236 if (!fw_reserved(dev, qpn)) {
c82e9aa0
EC
1237 err = __mlx4_qp_alloc_icm(dev, qpn);
1238 if (err) {
1239 res_abort_move(dev, slave, RES_QP, qpn);
1240 return err;
1241 }
1242 }
1243
1244 res_end_move(dev, slave, RES_QP, qpn);
1245 break;
1246
1247 default:
1248 err = -EINVAL;
1249 break;
1250 }
1251 return err;
1252}
1253
1254static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1255 u64 in_param, u64 *out_param)
1256{
1257 int err = -EINVAL;
1258 int base;
1259 int order;
1260
1261 if (op != RES_OP_RESERVE_AND_MAP)
1262 return err;
1263
1264 order = get_param_l(&in_param);
1265 base = __mlx4_alloc_mtt_range(dev, order);
1266 if (base == -1)
1267 return -ENOMEM;
1268
1269 err = add_res_range(dev, slave, base, 1, RES_MTT, order);
1270 if (err)
1271 __mlx4_free_mtt_range(dev, base, order);
1272 else
1273 set_param_l(out_param, base);
1274
1275 return err;
1276}
1277
1278static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1279 u64 in_param, u64 *out_param)
1280{
1281 int err = -EINVAL;
1282 int index;
1283 int id;
1284 struct res_mpt *mpt;
1285
1286 switch (op) {
1287 case RES_OP_RESERVE:
b20e519a 1288 index = __mlx4_mpt_reserve(dev);
c82e9aa0
EC
1289 if (index == -1)
1290 break;
1291 id = index & mpt_mask(dev);
1292
1293 err = add_res_range(dev, slave, id, 1, RES_MPT, index);
1294 if (err) {
b20e519a 1295 __mlx4_mpt_release(dev, index);
c82e9aa0
EC
1296 break;
1297 }
1298 set_param_l(out_param, index);
1299 break;
1300 case RES_OP_MAP_ICM:
1301 index = get_param_l(&in_param);
1302 id = index & mpt_mask(dev);
1303 err = mr_res_start_move_to(dev, slave, id,
1304 RES_MPT_MAPPED, &mpt);
1305 if (err)
1306 return err;
1307
b20e519a 1308 err = __mlx4_mpt_alloc_icm(dev, mpt->key);
c82e9aa0
EC
1309 if (err) {
1310 res_abort_move(dev, slave, RES_MPT, id);
1311 return err;
1312 }
1313
1314 res_end_move(dev, slave, RES_MPT, id);
1315 break;
1316 }
1317 return err;
1318}
1319
1320static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1321 u64 in_param, u64 *out_param)
1322{
1323 int cqn;
1324 int err;
1325
1326 switch (op) {
1327 case RES_OP_RESERVE_AND_MAP:
1328 err = __mlx4_cq_alloc_icm(dev, &cqn);
1329 if (err)
1330 break;
1331
1332 err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1333 if (err) {
1334 __mlx4_cq_free_icm(dev, cqn);
1335 break;
1336 }
1337
1338 set_param_l(out_param, cqn);
1339 break;
1340
1341 default:
1342 err = -EINVAL;
1343 }
1344
1345 return err;
1346}
1347
1348static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1349 u64 in_param, u64 *out_param)
1350{
1351 int srqn;
1352 int err;
1353
1354 switch (op) {
1355 case RES_OP_RESERVE_AND_MAP:
1356 err = __mlx4_srq_alloc_icm(dev, &srqn);
1357 if (err)
1358 break;
1359
1360 err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1361 if (err) {
1362 __mlx4_srq_free_icm(dev, srqn);
1363 break;
1364 }
1365
1366 set_param_l(out_param, srqn);
1367 break;
1368
1369 default:
1370 err = -EINVAL;
1371 }
1372
1373 return err;
1374}
1375
1376static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port)
1377{
1378 struct mlx4_priv *priv = mlx4_priv(dev);
1379 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1380 struct mac_res *res;
1381
1382 res = kzalloc(sizeof *res, GFP_KERNEL);
1383 if (!res)
1384 return -ENOMEM;
1385 res->mac = mac;
1386 res->port = (u8) port;
1387 list_add_tail(&res->list,
1388 &tracker->slave_list[slave].res_list[RES_MAC]);
1389 return 0;
1390}
1391
1392static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
1393 int port)
1394{
1395 struct mlx4_priv *priv = mlx4_priv(dev);
1396 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1397 struct list_head *mac_list =
1398 &tracker->slave_list[slave].res_list[RES_MAC];
1399 struct mac_res *res, *tmp;
1400
1401 list_for_each_entry_safe(res, tmp, mac_list, list) {
1402 if (res->mac == mac && res->port == (u8) port) {
1403 list_del(&res->list);
1404 kfree(res);
1405 break;
1406 }
1407 }
1408}
1409
1410static void rem_slave_macs(struct mlx4_dev *dev, int slave)
1411{
1412 struct mlx4_priv *priv = mlx4_priv(dev);
1413 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1414 struct list_head *mac_list =
1415 &tracker->slave_list[slave].res_list[RES_MAC];
1416 struct mac_res *res, *tmp;
1417
1418 list_for_each_entry_safe(res, tmp, mac_list, list) {
1419 list_del(&res->list);
1420 __mlx4_unregister_mac(dev, res->port, res->mac);
1421 kfree(res);
1422 }
1423}
1424
1425static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1426 u64 in_param, u64 *out_param)
1427{
1428 int err = -EINVAL;
1429 int port;
1430 u64 mac;
1431
1432 if (op != RES_OP_RESERVE_AND_MAP)
1433 return err;
1434
1435 port = get_param_l(out_param);
1436 mac = in_param;
1437
1438 err = __mlx4_register_mac(dev, port, mac);
1439 if (err >= 0) {
1440 set_param_l(out_param, err);
1441 err = 0;
1442 }
1443
1444 if (!err) {
1445 err = mac_add_to_slave(dev, slave, mac, port);
1446 if (err)
1447 __mlx4_unregister_mac(dev, port, mac);
1448 }
1449 return err;
1450}
1451
ffe455ad
EE
1452static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1453 u64 in_param, u64 *out_param)
1454{
1455 return 0;
1456}
1457
ba062d52
JM
1458static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1459 u64 in_param, u64 *out_param)
1460{
1461 u32 index;
1462 int err;
1463
1464 if (op != RES_OP_RESERVE)
1465 return -EINVAL;
1466
1467 err = __mlx4_counter_alloc(dev, &index);
1468 if (err)
1469 return err;
1470
1471 err = add_res_range(dev, slave, index, 1, RES_COUNTER, 0);
1472 if (err)
1473 __mlx4_counter_free(dev, index);
1474 else
1475 set_param_l(out_param, index);
1476
1477 return err;
1478}
1479
1480static int xrcdn_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1481 u64 in_param, u64 *out_param)
1482{
1483 u32 xrcdn;
1484 int err;
1485
1486 if (op != RES_OP_RESERVE)
1487 return -EINVAL;
1488
1489 err = __mlx4_xrcd_alloc(dev, &xrcdn);
1490 if (err)
1491 return err;
1492
1493 err = add_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
1494 if (err)
1495 __mlx4_xrcd_free(dev, xrcdn);
1496 else
1497 set_param_l(out_param, xrcdn);
1498
1499 return err;
1500}
1501
c82e9aa0
EC
1502int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
1503 struct mlx4_vhcr *vhcr,
1504 struct mlx4_cmd_mailbox *inbox,
1505 struct mlx4_cmd_mailbox *outbox,
1506 struct mlx4_cmd_info *cmd)
1507{
1508 int err;
1509 int alop = vhcr->op_modifier;
1510
1511 switch (vhcr->in_modifier) {
1512 case RES_QP:
1513 err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
1514 vhcr->in_param, &vhcr->out_param);
1515 break;
1516
1517 case RES_MTT:
1518 err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
1519 vhcr->in_param, &vhcr->out_param);
1520 break;
1521
1522 case RES_MPT:
1523 err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
1524 vhcr->in_param, &vhcr->out_param);
1525 break;
1526
1527 case RES_CQ:
1528 err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
1529 vhcr->in_param, &vhcr->out_param);
1530 break;
1531
1532 case RES_SRQ:
1533 err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
1534 vhcr->in_param, &vhcr->out_param);
1535 break;
1536
1537 case RES_MAC:
1538 err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
1539 vhcr->in_param, &vhcr->out_param);
1540 break;
1541
ffe455ad
EE
1542 case RES_VLAN:
1543 err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
1544 vhcr->in_param, &vhcr->out_param);
1545 break;
1546
ba062d52
JM
1547 case RES_COUNTER:
1548 err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop,
1549 vhcr->in_param, &vhcr->out_param);
1550 break;
1551
1552 case RES_XRCD:
1553 err = xrcdn_alloc_res(dev, slave, vhcr->op_modifier, alop,
1554 vhcr->in_param, &vhcr->out_param);
1555 break;
1556
c82e9aa0
EC
1557 default:
1558 err = -EINVAL;
1559 break;
1560 }
1561
1562 return err;
1563}
1564
1565static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1566 u64 in_param)
1567{
1568 int err;
1569 int count;
1570 int base;
1571 int qpn;
1572
1573 switch (op) {
1574 case RES_OP_RESERVE:
1575 base = get_param_l(&in_param) & 0x7fffff;
1576 count = get_param_h(&in_param);
1577 err = rem_res_range(dev, slave, base, count, RES_QP, 0);
1578 if (err)
1579 break;
1580 __mlx4_qp_release_range(dev, base, count);
1581 break;
1582 case RES_OP_MAP_ICM:
1583 qpn = get_param_l(&in_param) & 0x7fffff;
1584 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
1585 NULL, 0);
1586 if (err)
1587 return err;
1588
54679e14 1589 if (!fw_reserved(dev, qpn))
c82e9aa0
EC
1590 __mlx4_qp_free_icm(dev, qpn);
1591
1592 res_end_move(dev, slave, RES_QP, qpn);
1593
1594 if (valid_reserved(dev, slave, qpn))
1595 err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
1596 break;
1597 default:
1598 err = -EINVAL;
1599 break;
1600 }
1601 return err;
1602}
1603
1604static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1605 u64 in_param, u64 *out_param)
1606{
1607 int err = -EINVAL;
1608 int base;
1609 int order;
1610
1611 if (op != RES_OP_RESERVE_AND_MAP)
1612 return err;
1613
1614 base = get_param_l(&in_param);
1615 order = get_param_h(&in_param);
1616 err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
1617 if (!err)
1618 __mlx4_free_mtt_range(dev, base, order);
1619 return err;
1620}
1621
1622static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1623 u64 in_param)
1624{
1625 int err = -EINVAL;
1626 int index;
1627 int id;
1628 struct res_mpt *mpt;
1629
1630 switch (op) {
1631 case RES_OP_RESERVE:
1632 index = get_param_l(&in_param);
1633 id = index & mpt_mask(dev);
1634 err = get_res(dev, slave, id, RES_MPT, &mpt);
1635 if (err)
1636 break;
1637 index = mpt->key;
1638 put_res(dev, slave, id, RES_MPT);
1639
1640 err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
1641 if (err)
1642 break;
b20e519a 1643 __mlx4_mpt_release(dev, index);
c82e9aa0
EC
1644 break;
1645 case RES_OP_MAP_ICM:
1646 index = get_param_l(&in_param);
1647 id = index & mpt_mask(dev);
1648 err = mr_res_start_move_to(dev, slave, id,
1649 RES_MPT_RESERVED, &mpt);
1650 if (err)
1651 return err;
1652
b20e519a 1653 __mlx4_mpt_free_icm(dev, mpt->key);
c82e9aa0
EC
1654 res_end_move(dev, slave, RES_MPT, id);
1655 return err;
1656 break;
1657 default:
1658 err = -EINVAL;
1659 break;
1660 }
1661 return err;
1662}
1663
1664static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1665 u64 in_param, u64 *out_param)
1666{
1667 int cqn;
1668 int err;
1669
1670 switch (op) {
1671 case RES_OP_RESERVE_AND_MAP:
1672 cqn = get_param_l(&in_param);
1673 err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1674 if (err)
1675 break;
1676
1677 __mlx4_cq_free_icm(dev, cqn);
1678 break;
1679
1680 default:
1681 err = -EINVAL;
1682 break;
1683 }
1684
1685 return err;
1686}
1687
1688static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1689 u64 in_param, u64 *out_param)
1690{
1691 int srqn;
1692 int err;
1693
1694 switch (op) {
1695 case RES_OP_RESERVE_AND_MAP:
1696 srqn = get_param_l(&in_param);
1697 err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1698 if (err)
1699 break;
1700
1701 __mlx4_srq_free_icm(dev, srqn);
1702 break;
1703
1704 default:
1705 err = -EINVAL;
1706 break;
1707 }
1708
1709 return err;
1710}
1711
1712static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1713 u64 in_param, u64 *out_param)
1714{
1715 int port;
1716 int err = 0;
1717
1718 switch (op) {
1719 case RES_OP_RESERVE_AND_MAP:
1720 port = get_param_l(out_param);
1721 mac_del_from_slave(dev, slave, in_param, port);
1722 __mlx4_unregister_mac(dev, port, in_param);
1723 break;
1724 default:
1725 err = -EINVAL;
1726 break;
1727 }
1728
1729 return err;
1730
1731}
1732
ffe455ad
EE
1733static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1734 u64 in_param, u64 *out_param)
1735{
1736 return 0;
1737}
1738
ba062d52
JM
1739static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1740 u64 in_param, u64 *out_param)
1741{
1742 int index;
1743 int err;
1744
1745 if (op != RES_OP_RESERVE)
1746 return -EINVAL;
1747
1748 index = get_param_l(&in_param);
1749 err = rem_res_range(dev, slave, index, 1, RES_COUNTER, 0);
1750 if (err)
1751 return err;
1752
1753 __mlx4_counter_free(dev, index);
1754
1755 return err;
1756}
1757
1758static int xrcdn_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1759 u64 in_param, u64 *out_param)
1760{
1761 int xrcdn;
1762 int err;
1763
1764 if (op != RES_OP_RESERVE)
1765 return -EINVAL;
1766
1767 xrcdn = get_param_l(&in_param);
1768 err = rem_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
1769 if (err)
1770 return err;
1771
1772 __mlx4_xrcd_free(dev, xrcdn);
1773
1774 return err;
1775}
1776
c82e9aa0
EC
1777int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
1778 struct mlx4_vhcr *vhcr,
1779 struct mlx4_cmd_mailbox *inbox,
1780 struct mlx4_cmd_mailbox *outbox,
1781 struct mlx4_cmd_info *cmd)
1782{
1783 int err = -EINVAL;
1784 int alop = vhcr->op_modifier;
1785
1786 switch (vhcr->in_modifier) {
1787 case RES_QP:
1788 err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
1789 vhcr->in_param);
1790 break;
1791
1792 case RES_MTT:
1793 err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
1794 vhcr->in_param, &vhcr->out_param);
1795 break;
1796
1797 case RES_MPT:
1798 err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
1799 vhcr->in_param);
1800 break;
1801
1802 case RES_CQ:
1803 err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
1804 vhcr->in_param, &vhcr->out_param);
1805 break;
1806
1807 case RES_SRQ:
1808 err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
1809 vhcr->in_param, &vhcr->out_param);
1810 break;
1811
1812 case RES_MAC:
1813 err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
1814 vhcr->in_param, &vhcr->out_param);
1815 break;
1816
ffe455ad
EE
1817 case RES_VLAN:
1818 err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
1819 vhcr->in_param, &vhcr->out_param);
1820 break;
1821
ba062d52
JM
1822 case RES_COUNTER:
1823 err = counter_free_res(dev, slave, vhcr->op_modifier, alop,
1824 vhcr->in_param, &vhcr->out_param);
1825 break;
1826
1827 case RES_XRCD:
1828 err = xrcdn_free_res(dev, slave, vhcr->op_modifier, alop,
1829 vhcr->in_param, &vhcr->out_param);
1830
c82e9aa0
EC
1831 default:
1832 break;
1833 }
1834 return err;
1835}
1836
1837/* ugly but other choices are uglier */
1838static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
1839{
1840 return (be32_to_cpu(mpt->flags) >> 9) & 1;
1841}
1842
2b8fb286 1843static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
c82e9aa0 1844{
2b8fb286 1845 return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
c82e9aa0
EC
1846}
1847
1848static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
1849{
1850 return be32_to_cpu(mpt->mtt_sz);
1851}
1852
cc1ade94
SM
1853static u32 mr_get_pd(struct mlx4_mpt_entry *mpt)
1854{
1855 return be32_to_cpu(mpt->pd_flags) & 0x00ffffff;
1856}
1857
1858static int mr_is_fmr(struct mlx4_mpt_entry *mpt)
1859{
1860 return be32_to_cpu(mpt->pd_flags) & MLX4_MPT_PD_FLAG_FAST_REG;
1861}
1862
1863static int mr_is_bind_enabled(struct mlx4_mpt_entry *mpt)
1864{
1865 return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_BIND_ENABLE;
1866}
1867
1868static int mr_is_region(struct mlx4_mpt_entry *mpt)
1869{
1870 return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_REGION;
1871}
1872
2b8fb286 1873static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
c82e9aa0
EC
1874{
1875 return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
1876}
1877
2b8fb286 1878static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
c82e9aa0
EC
1879{
1880 return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
1881}
1882
1883static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
1884{
1885 int page_shift = (qpc->log_page_size & 0x3f) + 12;
1886 int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
1887 int log_sq_sride = qpc->sq_size_stride & 7;
1888 int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
1889 int log_rq_stride = qpc->rq_size_stride & 7;
1890 int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
1891 int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
1892 int xrc = (be32_to_cpu(qpc->local_qpn) >> 23) & 1;
1893 int sq_size;
1894 int rq_size;
1895 int total_pages;
1896 int total_mem;
1897 int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
1898
1899 sq_size = 1 << (log_sq_size + log_sq_sride + 4);
1900 rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
1901 total_mem = sq_size + rq_size;
1902 total_pages =
1903 roundup_pow_of_two((total_mem + (page_offset << 6)) >>
1904 page_shift);
1905
1906 return total_pages;
1907}
1908
c82e9aa0
EC
1909static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
1910 int size, struct res_mtt *mtt)
1911{
2b8fb286
MA
1912 int res_start = mtt->com.res_id;
1913 int res_size = (1 << mtt->order);
c82e9aa0
EC
1914
1915 if (start < res_start || start + size > res_start + res_size)
1916 return -EPERM;
1917 return 0;
1918}
1919
1920int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
1921 struct mlx4_vhcr *vhcr,
1922 struct mlx4_cmd_mailbox *inbox,
1923 struct mlx4_cmd_mailbox *outbox,
1924 struct mlx4_cmd_info *cmd)
1925{
1926 int err;
1927 int index = vhcr->in_modifier;
1928 struct res_mtt *mtt;
1929 struct res_mpt *mpt;
2b8fb286 1930 int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
c82e9aa0
EC
1931 int phys;
1932 int id;
cc1ade94
SM
1933 u32 pd;
1934 int pd_slave;
c82e9aa0
EC
1935
1936 id = index & mpt_mask(dev);
1937 err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
1938 if (err)
1939 return err;
1940
cc1ade94
SM
1941 /* Disable memory windows for VFs. */
1942 if (!mr_is_region(inbox->buf)) {
1943 err = -EPERM;
1944 goto ex_abort;
1945 }
1946
1947 /* Make sure that the PD bits related to the slave id are zeros. */
1948 pd = mr_get_pd(inbox->buf);
1949 pd_slave = (pd >> 17) & 0x7f;
1950 if (pd_slave != 0 && pd_slave != slave) {
1951 err = -EPERM;
1952 goto ex_abort;
1953 }
1954
1955 if (mr_is_fmr(inbox->buf)) {
1956 /* FMR and Bind Enable are forbidden in slave devices. */
1957 if (mr_is_bind_enabled(inbox->buf)) {
1958 err = -EPERM;
1959 goto ex_abort;
1960 }
1961 /* FMR and Memory Windows are also forbidden. */
1962 if (!mr_is_region(inbox->buf)) {
1963 err = -EPERM;
1964 goto ex_abort;
1965 }
1966 }
1967
c82e9aa0
EC
1968 phys = mr_phys_mpt(inbox->buf);
1969 if (!phys) {
2b8fb286 1970 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
c82e9aa0
EC
1971 if (err)
1972 goto ex_abort;
1973
1974 err = check_mtt_range(dev, slave, mtt_base,
1975 mr_get_mtt_size(inbox->buf), mtt);
1976 if (err)
1977 goto ex_put;
1978
1979 mpt->mtt = mtt;
1980 }
1981
c82e9aa0
EC
1982 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1983 if (err)
1984 goto ex_put;
1985
1986 if (!phys) {
1987 atomic_inc(&mtt->ref_count);
1988 put_res(dev, slave, mtt->com.res_id, RES_MTT);
1989 }
1990
1991 res_end_move(dev, slave, RES_MPT, id);
1992 return 0;
1993
1994ex_put:
1995 if (!phys)
1996 put_res(dev, slave, mtt->com.res_id, RES_MTT);
1997ex_abort:
1998 res_abort_move(dev, slave, RES_MPT, id);
1999
2000 return err;
2001}
2002
2003int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2004 struct mlx4_vhcr *vhcr,
2005 struct mlx4_cmd_mailbox *inbox,
2006 struct mlx4_cmd_mailbox *outbox,
2007 struct mlx4_cmd_info *cmd)
2008{
2009 int err;
2010 int index = vhcr->in_modifier;
2011 struct res_mpt *mpt;
2012 int id;
2013
2014 id = index & mpt_mask(dev);
2015 err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
2016 if (err)
2017 return err;
2018
2019 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2020 if (err)
2021 goto ex_abort;
2022
2023 if (mpt->mtt)
2024 atomic_dec(&mpt->mtt->ref_count);
2025
2026 res_end_move(dev, slave, RES_MPT, id);
2027 return 0;
2028
2029ex_abort:
2030 res_abort_move(dev, slave, RES_MPT, id);
2031
2032 return err;
2033}
2034
2035int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
2036 struct mlx4_vhcr *vhcr,
2037 struct mlx4_cmd_mailbox *inbox,
2038 struct mlx4_cmd_mailbox *outbox,
2039 struct mlx4_cmd_info *cmd)
2040{
2041 int err;
2042 int index = vhcr->in_modifier;
2043 struct res_mpt *mpt;
2044 int id;
2045
2046 id = index & mpt_mask(dev);
2047 err = get_res(dev, slave, id, RES_MPT, &mpt);
2048 if (err)
2049 return err;
2050
2051 if (mpt->com.from_state != RES_MPT_HW) {
2052 err = -EBUSY;
2053 goto out;
2054 }
2055
2056 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2057
2058out:
2059 put_res(dev, slave, id, RES_MPT);
2060 return err;
2061}
2062
2063static int qp_get_rcqn(struct mlx4_qp_context *qpc)
2064{
2065 return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
2066}
2067
2068static int qp_get_scqn(struct mlx4_qp_context *qpc)
2069{
2070 return be32_to_cpu(qpc->cqn_send) & 0xffffff;
2071}
2072
2073static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
2074{
2075 return be32_to_cpu(qpc->srqn) & 0x1ffffff;
2076}
2077
54679e14
JM
2078static void adjust_proxy_tun_qkey(struct mlx4_dev *dev, struct mlx4_vhcr *vhcr,
2079 struct mlx4_qp_context *context)
2080{
2081 u32 qpn = vhcr->in_modifier & 0xffffff;
2082 u32 qkey = 0;
2083
2084 if (mlx4_get_parav_qkey(dev, qpn, &qkey))
2085 return;
2086
2087 /* adjust qkey in qp context */
2088 context->qkey = cpu_to_be32(qkey);
2089}
2090
c82e9aa0
EC
2091int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
2092 struct mlx4_vhcr *vhcr,
2093 struct mlx4_cmd_mailbox *inbox,
2094 struct mlx4_cmd_mailbox *outbox,
2095 struct mlx4_cmd_info *cmd)
2096{
2097 int err;
2098 int qpn = vhcr->in_modifier & 0x7fffff;
2099 struct res_mtt *mtt;
2100 struct res_qp *qp;
2101 struct mlx4_qp_context *qpc = inbox->buf + 8;
2b8fb286 2102 int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
c82e9aa0
EC
2103 int mtt_size = qp_get_mtt_size(qpc);
2104 struct res_cq *rcq;
2105 struct res_cq *scq;
2106 int rcqn = qp_get_rcqn(qpc);
2107 int scqn = qp_get_scqn(qpc);
2108 u32 srqn = qp_get_srqn(qpc) & 0xffffff;
2109 int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
2110 struct res_srq *srq;
2111 int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
2112
2113 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
2114 if (err)
2115 return err;
2116 qp->local_qpn = local_qpn;
2117
2b8fb286 2118 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
c82e9aa0
EC
2119 if (err)
2120 goto ex_abort;
2121
2122 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2123 if (err)
2124 goto ex_put_mtt;
2125
c82e9aa0
EC
2126 err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
2127 if (err)
2128 goto ex_put_mtt;
2129
2130 if (scqn != rcqn) {
2131 err = get_res(dev, slave, scqn, RES_CQ, &scq);
2132 if (err)
2133 goto ex_put_rcq;
2134 } else
2135 scq = rcq;
2136
2137 if (use_srq) {
2138 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2139 if (err)
2140 goto ex_put_scq;
2141 }
2142
54679e14
JM
2143 adjust_proxy_tun_qkey(dev, vhcr, qpc);
2144 update_pkey_index(dev, slave, inbox);
c82e9aa0
EC
2145 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2146 if (err)
2147 goto ex_put_srq;
2148 atomic_inc(&mtt->ref_count);
2149 qp->mtt = mtt;
2150 atomic_inc(&rcq->ref_count);
2151 qp->rcq = rcq;
2152 atomic_inc(&scq->ref_count);
2153 qp->scq = scq;
2154
2155 if (scqn != rcqn)
2156 put_res(dev, slave, scqn, RES_CQ);
2157
2158 if (use_srq) {
2159 atomic_inc(&srq->ref_count);
2160 put_res(dev, slave, srqn, RES_SRQ);
2161 qp->srq = srq;
2162 }
2163 put_res(dev, slave, rcqn, RES_CQ);
2b8fb286 2164 put_res(dev, slave, mtt_base, RES_MTT);
c82e9aa0
EC
2165 res_end_move(dev, slave, RES_QP, qpn);
2166
2167 return 0;
2168
2169ex_put_srq:
2170 if (use_srq)
2171 put_res(dev, slave, srqn, RES_SRQ);
2172ex_put_scq:
2173 if (scqn != rcqn)
2174 put_res(dev, slave, scqn, RES_CQ);
2175ex_put_rcq:
2176 put_res(dev, slave, rcqn, RES_CQ);
2177ex_put_mtt:
2b8fb286 2178 put_res(dev, slave, mtt_base, RES_MTT);
c82e9aa0
EC
2179ex_abort:
2180 res_abort_move(dev, slave, RES_QP, qpn);
2181
2182 return err;
2183}
2184
2b8fb286 2185static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
c82e9aa0
EC
2186{
2187 return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
2188}
2189
2190static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
2191{
2192 int log_eq_size = eqc->log_eq_size & 0x1f;
2193 int page_shift = (eqc->log_page_size & 0x3f) + 12;
2194
2195 if (log_eq_size + 5 < page_shift)
2196 return 1;
2197
2198 return 1 << (log_eq_size + 5 - page_shift);
2199}
2200
2b8fb286 2201static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
c82e9aa0
EC
2202{
2203 return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
2204}
2205
2206static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
2207{
2208 int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
2209 int page_shift = (cqc->log_page_size & 0x3f) + 12;
2210
2211 if (log_cq_size + 5 < page_shift)
2212 return 1;
2213
2214 return 1 << (log_cq_size + 5 - page_shift);
2215}
2216
2217int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2218 struct mlx4_vhcr *vhcr,
2219 struct mlx4_cmd_mailbox *inbox,
2220 struct mlx4_cmd_mailbox *outbox,
2221 struct mlx4_cmd_info *cmd)
2222{
2223 int err;
2224 int eqn = vhcr->in_modifier;
2225 int res_id = (slave << 8) | eqn;
2226 struct mlx4_eq_context *eqc = inbox->buf;
2b8fb286 2227 int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
c82e9aa0
EC
2228 int mtt_size = eq_get_mtt_size(eqc);
2229 struct res_eq *eq;
2230 struct res_mtt *mtt;
2231
2232 err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2233 if (err)
2234 return err;
2235 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
2236 if (err)
2237 goto out_add;
2238
2b8fb286 2239 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
c82e9aa0
EC
2240 if (err)
2241 goto out_move;
2242
2243 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2244 if (err)
2245 goto out_put;
2246
2247 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2248 if (err)
2249 goto out_put;
2250
2251 atomic_inc(&mtt->ref_count);
2252 eq->mtt = mtt;
2253 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2254 res_end_move(dev, slave, RES_EQ, res_id);
2255 return 0;
2256
2257out_put:
2258 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2259out_move:
2260 res_abort_move(dev, slave, RES_EQ, res_id);
2261out_add:
2262 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2263 return err;
2264}
2265
2266static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
2267 int len, struct res_mtt **res)
2268{
2269 struct mlx4_priv *priv = mlx4_priv(dev);
2270 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2271 struct res_mtt *mtt;
2272 int err = -EINVAL;
2273
2274 spin_lock_irq(mlx4_tlock(dev));
2275 list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
2276 com.list) {
2277 if (!check_mtt_range(dev, slave, start, len, mtt)) {
2278 *res = mtt;
2279 mtt->com.from_state = mtt->com.state;
2280 mtt->com.state = RES_MTT_BUSY;
2281 err = 0;
2282 break;
2283 }
2284 }
2285 spin_unlock_irq(mlx4_tlock(dev));
2286
2287 return err;
2288}
2289
54679e14
JM
2290static int verify_qp_parameters(struct mlx4_dev *dev,
2291 struct mlx4_cmd_mailbox *inbox,
2292 enum qp_transition transition, u8 slave)
2293{
2294 u32 qp_type;
2295 struct mlx4_qp_context *qp_ctx;
2296 enum mlx4_qp_optpar optpar;
2297
2298 qp_ctx = inbox->buf + 8;
2299 qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
2300 optpar = be32_to_cpu(*(__be32 *) inbox->buf);
2301
2302 switch (qp_type) {
2303 case MLX4_QP_ST_RC:
2304 case MLX4_QP_ST_UC:
2305 switch (transition) {
2306 case QP_TRANS_INIT2RTR:
2307 case QP_TRANS_RTR2RTS:
2308 case QP_TRANS_RTS2RTS:
2309 case QP_TRANS_SQD2SQD:
2310 case QP_TRANS_SQD2RTS:
2311 if (slave != mlx4_master_func_num(dev))
2312 /* slaves have only gid index 0 */
2313 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH)
2314 if (qp_ctx->pri_path.mgid_index)
2315 return -EINVAL;
2316 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH)
2317 if (qp_ctx->alt_path.mgid_index)
2318 return -EINVAL;
2319 break;
2320 default:
2321 break;
2322 }
2323
2324 break;
2325 default:
2326 break;
2327 }
2328
2329 return 0;
2330}
2331
c82e9aa0
EC
2332int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
2333 struct mlx4_vhcr *vhcr,
2334 struct mlx4_cmd_mailbox *inbox,
2335 struct mlx4_cmd_mailbox *outbox,
2336 struct mlx4_cmd_info *cmd)
2337{
2338 struct mlx4_mtt mtt;
2339 __be64 *page_list = inbox->buf;
2340 u64 *pg_list = (u64 *)page_list;
2341 int i;
2342 struct res_mtt *rmtt = NULL;
2343 int start = be64_to_cpu(page_list[0]);
2344 int npages = vhcr->in_modifier;
2345 int err;
2346
2347 err = get_containing_mtt(dev, slave, start, npages, &rmtt);
2348 if (err)
2349 return err;
2350
2351 /* Call the SW implementation of write_mtt:
2352 * - Prepare a dummy mtt struct
2353 * - Translate inbox contents to simple addresses in host endianess */
2b8fb286
MA
2354 mtt.offset = 0; /* TBD this is broken but I don't handle it since
2355 we don't really use it */
c82e9aa0
EC
2356 mtt.order = 0;
2357 mtt.page_shift = 0;
2358 for (i = 0; i < npages; ++i)
2359 pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
2360
2361 err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
2362 ((u64 *)page_list + 2));
2363
2364 if (rmtt)
2365 put_res(dev, slave, rmtt->com.res_id, RES_MTT);
2366
2367 return err;
2368}
2369
2370int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2371 struct mlx4_vhcr *vhcr,
2372 struct mlx4_cmd_mailbox *inbox,
2373 struct mlx4_cmd_mailbox *outbox,
2374 struct mlx4_cmd_info *cmd)
2375{
2376 int eqn = vhcr->in_modifier;
2377 int res_id = eqn | (slave << 8);
2378 struct res_eq *eq;
2379 int err;
2380
2381 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
2382 if (err)
2383 return err;
2384
2385 err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
2386 if (err)
2387 goto ex_abort;
2388
2389 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2390 if (err)
2391 goto ex_put;
2392
2393 atomic_dec(&eq->mtt->ref_count);
2394 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2395 res_end_move(dev, slave, RES_EQ, res_id);
2396 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2397
2398 return 0;
2399
2400ex_put:
2401 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2402ex_abort:
2403 res_abort_move(dev, slave, RES_EQ, res_id);
2404
2405 return err;
2406}
2407
2408int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
2409{
2410 struct mlx4_priv *priv = mlx4_priv(dev);
2411 struct mlx4_slave_event_eq_info *event_eq;
2412 struct mlx4_cmd_mailbox *mailbox;
2413 u32 in_modifier = 0;
2414 int err;
2415 int res_id;
2416 struct res_eq *req;
2417
2418 if (!priv->mfunc.master.slave_state)
2419 return -EINVAL;
2420
803143fb 2421 event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
c82e9aa0
EC
2422
2423 /* Create the event only if the slave is registered */
803143fb 2424 if (event_eq->eqn < 0)
c82e9aa0
EC
2425 return 0;
2426
2427 mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2428 res_id = (slave << 8) | event_eq->eqn;
2429 err = get_res(dev, slave, res_id, RES_EQ, &req);
2430 if (err)
2431 goto unlock;
2432
2433 if (req->com.from_state != RES_EQ_HW) {
2434 err = -EINVAL;
2435 goto put;
2436 }
2437
2438 mailbox = mlx4_alloc_cmd_mailbox(dev);
2439 if (IS_ERR(mailbox)) {
2440 err = PTR_ERR(mailbox);
2441 goto put;
2442 }
2443
2444 if (eqe->type == MLX4_EVENT_TYPE_CMD) {
2445 ++event_eq->token;
2446 eqe->event.cmd.token = cpu_to_be16(event_eq->token);
2447 }
2448
2449 memcpy(mailbox->buf, (u8 *) eqe, 28);
2450
2451 in_modifier = (slave & 0xff) | ((event_eq->eqn & 0xff) << 16);
2452
2453 err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
2454 MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
2455 MLX4_CMD_NATIVE);
2456
2457 put_res(dev, slave, res_id, RES_EQ);
2458 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2459 mlx4_free_cmd_mailbox(dev, mailbox);
2460 return err;
2461
2462put:
2463 put_res(dev, slave, res_id, RES_EQ);
2464
2465unlock:
2466 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2467 return err;
2468}
2469
2470int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
2471 struct mlx4_vhcr *vhcr,
2472 struct mlx4_cmd_mailbox *inbox,
2473 struct mlx4_cmd_mailbox *outbox,
2474 struct mlx4_cmd_info *cmd)
2475{
2476 int eqn = vhcr->in_modifier;
2477 int res_id = eqn | (slave << 8);
2478 struct res_eq *eq;
2479 int err;
2480
2481 err = get_res(dev, slave, res_id, RES_EQ, &eq);
2482 if (err)
2483 return err;
2484
2485 if (eq->com.from_state != RES_EQ_HW) {
2486 err = -EINVAL;
2487 goto ex_put;
2488 }
2489
2490 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2491
2492ex_put:
2493 put_res(dev, slave, res_id, RES_EQ);
2494 return err;
2495}
2496
2497int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
2498 struct mlx4_vhcr *vhcr,
2499 struct mlx4_cmd_mailbox *inbox,
2500 struct mlx4_cmd_mailbox *outbox,
2501 struct mlx4_cmd_info *cmd)
2502{
2503 int err;
2504 int cqn = vhcr->in_modifier;
2505 struct mlx4_cq_context *cqc = inbox->buf;
2b8fb286 2506 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
c82e9aa0
EC
2507 struct res_cq *cq;
2508 struct res_mtt *mtt;
2509
2510 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
2511 if (err)
2512 return err;
2b8fb286 2513 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
c82e9aa0
EC
2514 if (err)
2515 goto out_move;
2516 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
2517 if (err)
2518 goto out_put;
2519 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2520 if (err)
2521 goto out_put;
2522 atomic_inc(&mtt->ref_count);
2523 cq->mtt = mtt;
2524 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2525 res_end_move(dev, slave, RES_CQ, cqn);
2526 return 0;
2527
2528out_put:
2529 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2530out_move:
2531 res_abort_move(dev, slave, RES_CQ, cqn);
2532 return err;
2533}
2534
2535int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
2536 struct mlx4_vhcr *vhcr,
2537 struct mlx4_cmd_mailbox *inbox,
2538 struct mlx4_cmd_mailbox *outbox,
2539 struct mlx4_cmd_info *cmd)
2540{
2541 int err;
2542 int cqn = vhcr->in_modifier;
2543 struct res_cq *cq;
2544
2545 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
2546 if (err)
2547 return err;
2548 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2549 if (err)
2550 goto out_move;
2551 atomic_dec(&cq->mtt->ref_count);
2552 res_end_move(dev, slave, RES_CQ, cqn);
2553 return 0;
2554
2555out_move:
2556 res_abort_move(dev, slave, RES_CQ, cqn);
2557 return err;
2558}
2559
2560int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
2561 struct mlx4_vhcr *vhcr,
2562 struct mlx4_cmd_mailbox *inbox,
2563 struct mlx4_cmd_mailbox *outbox,
2564 struct mlx4_cmd_info *cmd)
2565{
2566 int cqn = vhcr->in_modifier;
2567 struct res_cq *cq;
2568 int err;
2569
2570 err = get_res(dev, slave, cqn, RES_CQ, &cq);
2571 if (err)
2572 return err;
2573
2574 if (cq->com.from_state != RES_CQ_HW)
2575 goto ex_put;
2576
2577 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2578ex_put:
2579 put_res(dev, slave, cqn, RES_CQ);
2580
2581 return err;
2582}
2583
2584static int handle_resize(struct mlx4_dev *dev, int slave,
2585 struct mlx4_vhcr *vhcr,
2586 struct mlx4_cmd_mailbox *inbox,
2587 struct mlx4_cmd_mailbox *outbox,
2588 struct mlx4_cmd_info *cmd,
2589 struct res_cq *cq)
2590{
2591 int err;
2592 struct res_mtt *orig_mtt;
2593 struct res_mtt *mtt;
2594 struct mlx4_cq_context *cqc = inbox->buf;
2b8fb286 2595 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
c82e9aa0
EC
2596
2597 err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
2598 if (err)
2599 return err;
2600
2601 if (orig_mtt != cq->mtt) {
2602 err = -EINVAL;
2603 goto ex_put;
2604 }
2605
2b8fb286 2606 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
c82e9aa0
EC
2607 if (err)
2608 goto ex_put;
2609
2610 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
2611 if (err)
2612 goto ex_put1;
2613 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2614 if (err)
2615 goto ex_put1;
2616 atomic_dec(&orig_mtt->ref_count);
2617 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
2618 atomic_inc(&mtt->ref_count);
2619 cq->mtt = mtt;
2620 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2621 return 0;
2622
2623ex_put1:
2624 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2625ex_put:
2626 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
2627
2628 return err;
2629
2630}
2631
2632int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
2633 struct mlx4_vhcr *vhcr,
2634 struct mlx4_cmd_mailbox *inbox,
2635 struct mlx4_cmd_mailbox *outbox,
2636 struct mlx4_cmd_info *cmd)
2637{
2638 int cqn = vhcr->in_modifier;
2639 struct res_cq *cq;
2640 int err;
2641
2642 err = get_res(dev, slave, cqn, RES_CQ, &cq);
2643 if (err)
2644 return err;
2645
2646 if (cq->com.from_state != RES_CQ_HW)
2647 goto ex_put;
2648
2649 if (vhcr->op_modifier == 0) {
2650 err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
dcf353b1 2651 goto ex_put;
c82e9aa0
EC
2652 }
2653
2654 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2655ex_put:
2656 put_res(dev, slave, cqn, RES_CQ);
2657
2658 return err;
2659}
2660
c82e9aa0
EC
2661static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
2662{
2663 int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
2664 int log_rq_stride = srqc->logstride & 7;
2665 int page_shift = (srqc->log_page_size & 0x3f) + 12;
2666
2667 if (log_srq_size + log_rq_stride + 4 < page_shift)
2668 return 1;
2669
2670 return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
2671}
2672
2673int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2674 struct mlx4_vhcr *vhcr,
2675 struct mlx4_cmd_mailbox *inbox,
2676 struct mlx4_cmd_mailbox *outbox,
2677 struct mlx4_cmd_info *cmd)
2678{
2679 int err;
2680 int srqn = vhcr->in_modifier;
2681 struct res_mtt *mtt;
2682 struct res_srq *srq;
2683 struct mlx4_srq_context *srqc = inbox->buf;
2b8fb286 2684 int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
c82e9aa0
EC
2685
2686 if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
2687 return -EINVAL;
2688
2689 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
2690 if (err)
2691 return err;
2b8fb286 2692 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
c82e9aa0
EC
2693 if (err)
2694 goto ex_abort;
2695 err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
2696 mtt);
2697 if (err)
2698 goto ex_put_mtt;
2699
c82e9aa0
EC
2700 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2701 if (err)
2702 goto ex_put_mtt;
2703
2704 atomic_inc(&mtt->ref_count);
2705 srq->mtt = mtt;
2706 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2707 res_end_move(dev, slave, RES_SRQ, srqn);
2708 return 0;
2709
2710ex_put_mtt:
2711 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2712ex_abort:
2713 res_abort_move(dev, slave, RES_SRQ, srqn);
2714
2715 return err;
2716}
2717
2718int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2719 struct mlx4_vhcr *vhcr,
2720 struct mlx4_cmd_mailbox *inbox,
2721 struct mlx4_cmd_mailbox *outbox,
2722 struct mlx4_cmd_info *cmd)
2723{
2724 int err;
2725 int srqn = vhcr->in_modifier;
2726 struct res_srq *srq;
2727
2728 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
2729 if (err)
2730 return err;
2731 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2732 if (err)
2733 goto ex_abort;
2734 atomic_dec(&srq->mtt->ref_count);
2735 if (srq->cq)
2736 atomic_dec(&srq->cq->ref_count);
2737 res_end_move(dev, slave, RES_SRQ, srqn);
2738
2739 return 0;
2740
2741ex_abort:
2742 res_abort_move(dev, slave, RES_SRQ, srqn);
2743
2744 return err;
2745}
2746
2747int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2748 struct mlx4_vhcr *vhcr,
2749 struct mlx4_cmd_mailbox *inbox,
2750 struct mlx4_cmd_mailbox *outbox,
2751 struct mlx4_cmd_info *cmd)
2752{
2753 int err;
2754 int srqn = vhcr->in_modifier;
2755 struct res_srq *srq;
2756
2757 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2758 if (err)
2759 return err;
2760 if (srq->com.from_state != RES_SRQ_HW) {
2761 err = -EBUSY;
2762 goto out;
2763 }
2764 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2765out:
2766 put_res(dev, slave, srqn, RES_SRQ);
2767 return err;
2768}
2769
2770int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2771 struct mlx4_vhcr *vhcr,
2772 struct mlx4_cmd_mailbox *inbox,
2773 struct mlx4_cmd_mailbox *outbox,
2774 struct mlx4_cmd_info *cmd)
2775{
2776 int err;
2777 int srqn = vhcr->in_modifier;
2778 struct res_srq *srq;
2779
2780 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2781 if (err)
2782 return err;
2783
2784 if (srq->com.from_state != RES_SRQ_HW) {
2785 err = -EBUSY;
2786 goto out;
2787 }
2788
2789 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2790out:
2791 put_res(dev, slave, srqn, RES_SRQ);
2792 return err;
2793}
2794
2795int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
2796 struct mlx4_vhcr *vhcr,
2797 struct mlx4_cmd_mailbox *inbox,
2798 struct mlx4_cmd_mailbox *outbox,
2799 struct mlx4_cmd_info *cmd)
2800{
2801 int err;
2802 int qpn = vhcr->in_modifier & 0x7fffff;
2803 struct res_qp *qp;
2804
2805 err = get_res(dev, slave, qpn, RES_QP, &qp);
2806 if (err)
2807 return err;
2808 if (qp->com.from_state != RES_QP_HW) {
2809 err = -EBUSY;
2810 goto out;
2811 }
2812
2813 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2814out:
2815 put_res(dev, slave, qpn, RES_QP);
2816 return err;
2817}
2818
54679e14
JM
2819int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
2820 struct mlx4_vhcr *vhcr,
2821 struct mlx4_cmd_mailbox *inbox,
2822 struct mlx4_cmd_mailbox *outbox,
2823 struct mlx4_cmd_info *cmd)
2824{
2825 struct mlx4_qp_context *context = inbox->buf + 8;
2826 adjust_proxy_tun_qkey(dev, vhcr, context);
2827 update_pkey_index(dev, slave, inbox);
2828 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2829}
2830
c82e9aa0
EC
2831int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
2832 struct mlx4_vhcr *vhcr,
2833 struct mlx4_cmd_mailbox *inbox,
2834 struct mlx4_cmd_mailbox *outbox,
2835 struct mlx4_cmd_info *cmd)
2836{
54679e14 2837 int err;
c82e9aa0
EC
2838 struct mlx4_qp_context *qpc = inbox->buf + 8;
2839
54679e14
JM
2840 err = verify_qp_parameters(dev, inbox, QP_TRANS_INIT2RTR, slave);
2841 if (err)
2842 return err;
2843
2844 update_pkey_index(dev, slave, inbox);
2845 update_gid(dev, inbox, (u8)slave);
2846 adjust_proxy_tun_qkey(dev, vhcr, qpc);
3f7fb021
RE
2847 err = update_vport_qp_param(dev, inbox, slave);
2848 if (err)
2849 return err;
54679e14
JM
2850
2851 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2852}
2853
2854int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
2855 struct mlx4_vhcr *vhcr,
2856 struct mlx4_cmd_mailbox *inbox,
2857 struct mlx4_cmd_mailbox *outbox,
2858 struct mlx4_cmd_info *cmd)
2859{
2860 int err;
2861 struct mlx4_qp_context *context = inbox->buf + 8;
2862
2863 err = verify_qp_parameters(dev, inbox, QP_TRANS_RTR2RTS, slave);
2864 if (err)
2865 return err;
2866
2867 update_pkey_index(dev, slave, inbox);
2868 update_gid(dev, inbox, (u8)slave);
2869 adjust_proxy_tun_qkey(dev, vhcr, context);
2870 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2871}
2872
2873int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
2874 struct mlx4_vhcr *vhcr,
2875 struct mlx4_cmd_mailbox *inbox,
2876 struct mlx4_cmd_mailbox *outbox,
2877 struct mlx4_cmd_info *cmd)
2878{
2879 int err;
2880 struct mlx4_qp_context *context = inbox->buf + 8;
2881
2882 err = verify_qp_parameters(dev, inbox, QP_TRANS_RTS2RTS, slave);
2883 if (err)
2884 return err;
2885
2886 update_pkey_index(dev, slave, inbox);
2887 update_gid(dev, inbox, (u8)slave);
2888 adjust_proxy_tun_qkey(dev, vhcr, context);
2889 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2890}
2891
2892
2893int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
2894 struct mlx4_vhcr *vhcr,
2895 struct mlx4_cmd_mailbox *inbox,
2896 struct mlx4_cmd_mailbox *outbox,
2897 struct mlx4_cmd_info *cmd)
2898{
2899 struct mlx4_qp_context *context = inbox->buf + 8;
2900 adjust_proxy_tun_qkey(dev, vhcr, context);
2901 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2902}
2903
2904int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave,
2905 struct mlx4_vhcr *vhcr,
2906 struct mlx4_cmd_mailbox *inbox,
2907 struct mlx4_cmd_mailbox *outbox,
2908 struct mlx4_cmd_info *cmd)
2909{
2910 int err;
2911 struct mlx4_qp_context *context = inbox->buf + 8;
2912
2913 err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2SQD, slave);
2914 if (err)
2915 return err;
2916
2917 adjust_proxy_tun_qkey(dev, vhcr, context);
2918 update_gid(dev, inbox, (u8)slave);
2919 update_pkey_index(dev, slave, inbox);
2920 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2921}
2922
2923int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
2924 struct mlx4_vhcr *vhcr,
2925 struct mlx4_cmd_mailbox *inbox,
2926 struct mlx4_cmd_mailbox *outbox,
2927 struct mlx4_cmd_info *cmd)
2928{
2929 int err;
2930 struct mlx4_qp_context *context = inbox->buf + 8;
2931
2932 err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2RTS, slave);
2933 if (err)
2934 return err;
c82e9aa0 2935
54679e14
JM
2936 adjust_proxy_tun_qkey(dev, vhcr, context);
2937 update_gid(dev, inbox, (u8)slave);
2938 update_pkey_index(dev, slave, inbox);
c82e9aa0
EC
2939 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2940}
2941
2942int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
2943 struct mlx4_vhcr *vhcr,
2944 struct mlx4_cmd_mailbox *inbox,
2945 struct mlx4_cmd_mailbox *outbox,
2946 struct mlx4_cmd_info *cmd)
2947{
2948 int err;
2949 int qpn = vhcr->in_modifier & 0x7fffff;
2950 struct res_qp *qp;
2951
2952 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
2953 if (err)
2954 return err;
2955 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2956 if (err)
2957 goto ex_abort;
2958
2959 atomic_dec(&qp->mtt->ref_count);
2960 atomic_dec(&qp->rcq->ref_count);
2961 atomic_dec(&qp->scq->ref_count);
2962 if (qp->srq)
2963 atomic_dec(&qp->srq->ref_count);
2964 res_end_move(dev, slave, RES_QP, qpn);
2965 return 0;
2966
2967ex_abort:
2968 res_abort_move(dev, slave, RES_QP, qpn);
2969
2970 return err;
2971}
2972
2973static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
2974 struct res_qp *rqp, u8 *gid)
2975{
2976 struct res_gid *res;
2977
2978 list_for_each_entry(res, &rqp->mcg_list, list) {
2979 if (!memcmp(res->gid, gid, 16))
2980 return res;
2981 }
2982 return NULL;
2983}
2984
2985static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
9f5b6c63 2986 u8 *gid, enum mlx4_protocol prot,
fab1e24a 2987 enum mlx4_steer_type steer, u64 reg_id)
c82e9aa0
EC
2988{
2989 struct res_gid *res;
2990 int err;
2991
2992 res = kzalloc(sizeof *res, GFP_KERNEL);
2993 if (!res)
2994 return -ENOMEM;
2995
2996 spin_lock_irq(&rqp->mcg_spl);
2997 if (find_gid(dev, slave, rqp, gid)) {
2998 kfree(res);
2999 err = -EEXIST;
3000 } else {
3001 memcpy(res->gid, gid, 16);
3002 res->prot = prot;
9f5b6c63 3003 res->steer = steer;
fab1e24a 3004 res->reg_id = reg_id;
c82e9aa0
EC
3005 list_add_tail(&res->list, &rqp->mcg_list);
3006 err = 0;
3007 }
3008 spin_unlock_irq(&rqp->mcg_spl);
3009
3010 return err;
3011}
3012
3013static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
9f5b6c63 3014 u8 *gid, enum mlx4_protocol prot,
fab1e24a 3015 enum mlx4_steer_type steer, u64 *reg_id)
c82e9aa0
EC
3016{
3017 struct res_gid *res;
3018 int err;
3019
3020 spin_lock_irq(&rqp->mcg_spl);
3021 res = find_gid(dev, slave, rqp, gid);
9f5b6c63 3022 if (!res || res->prot != prot || res->steer != steer)
c82e9aa0
EC
3023 err = -EINVAL;
3024 else {
fab1e24a 3025 *reg_id = res->reg_id;
c82e9aa0
EC
3026 list_del(&res->list);
3027 kfree(res);
3028 err = 0;
3029 }
3030 spin_unlock_irq(&rqp->mcg_spl);
3031
3032 return err;
3033}
3034
fab1e24a
HHZ
3035static int qp_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
3036 int block_loopback, enum mlx4_protocol prot,
3037 enum mlx4_steer_type type, u64 *reg_id)
3038{
3039 switch (dev->caps.steering_mode) {
3040 case MLX4_STEERING_MODE_DEVICE_MANAGED:
3041 return mlx4_trans_to_dmfs_attach(dev, qp, gid, gid[5],
3042 block_loopback, prot,
3043 reg_id);
3044 case MLX4_STEERING_MODE_B0:
3045 return mlx4_qp_attach_common(dev, qp, gid,
3046 block_loopback, prot, type);
3047 default:
3048 return -EINVAL;
3049 }
3050}
3051
3052static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
3053 enum mlx4_protocol prot, enum mlx4_steer_type type,
3054 u64 reg_id)
3055{
3056 switch (dev->caps.steering_mode) {
3057 case MLX4_STEERING_MODE_DEVICE_MANAGED:
3058 return mlx4_flow_detach(dev, reg_id);
3059 case MLX4_STEERING_MODE_B0:
3060 return mlx4_qp_detach_common(dev, qp, gid, prot, type);
3061 default:
3062 return -EINVAL;
3063 }
3064}
3065
c82e9aa0
EC
3066int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3067 struct mlx4_vhcr *vhcr,
3068 struct mlx4_cmd_mailbox *inbox,
3069 struct mlx4_cmd_mailbox *outbox,
3070 struct mlx4_cmd_info *cmd)
3071{
3072 struct mlx4_qp qp; /* dummy for calling attach/detach */
3073 u8 *gid = inbox->buf;
3074 enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
162344ed 3075 int err;
c82e9aa0
EC
3076 int qpn;
3077 struct res_qp *rqp;
fab1e24a 3078 u64 reg_id = 0;
c82e9aa0
EC
3079 int attach = vhcr->op_modifier;
3080 int block_loopback = vhcr->in_modifier >> 31;
3081 u8 steer_type_mask = 2;
75c6062c 3082 enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1;
c82e9aa0
EC
3083
3084 qpn = vhcr->in_modifier & 0xffffff;
3085 err = get_res(dev, slave, qpn, RES_QP, &rqp);
3086 if (err)
3087 return err;
3088
3089 qp.qpn = qpn;
3090 if (attach) {
fab1e24a
HHZ
3091 err = qp_attach(dev, &qp, gid, block_loopback, prot,
3092 type, &reg_id);
3093 if (err) {
3094 pr_err("Fail to attach rule to qp 0x%x\n", qpn);
c82e9aa0 3095 goto ex_put;
fab1e24a
HHZ
3096 }
3097 err = add_mcg_res(dev, slave, rqp, gid, prot, type, reg_id);
c82e9aa0 3098 if (err)
fab1e24a 3099 goto ex_detach;
c82e9aa0 3100 } else {
fab1e24a 3101 err = rem_mcg_res(dev, slave, rqp, gid, prot, type, &reg_id);
c82e9aa0
EC
3102 if (err)
3103 goto ex_put;
c82e9aa0 3104
fab1e24a
HHZ
3105 err = qp_detach(dev, &qp, gid, prot, type, reg_id);
3106 if (err)
3107 pr_err("Fail to detach rule from qp 0x%x reg_id = 0x%llx\n",
3108 qpn, reg_id);
3109 }
c82e9aa0 3110 put_res(dev, slave, qpn, RES_QP);
fab1e24a 3111 return err;
c82e9aa0 3112
fab1e24a
HHZ
3113ex_detach:
3114 qp_detach(dev, &qp, gid, prot, type, reg_id);
c82e9aa0
EC
3115ex_put:
3116 put_res(dev, slave, qpn, RES_QP);
c82e9aa0
EC
3117 return err;
3118}
3119
7fb40f87
HHZ
3120/*
3121 * MAC validation for Flow Steering rules.
3122 * VF can attach rules only with a mac address which is assigned to it.
3123 */
3124static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header,
3125 struct list_head *rlist)
3126{
3127 struct mac_res *res, *tmp;
3128 __be64 be_mac;
3129
3130 /* make sure it isn't multicast or broadcast mac*/
3131 if (!is_multicast_ether_addr(eth_header->eth.dst_mac) &&
3132 !is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
3133 list_for_each_entry_safe(res, tmp, rlist, list) {
3134 be_mac = cpu_to_be64(res->mac << 16);
3135 if (!memcmp(&be_mac, eth_header->eth.dst_mac, ETH_ALEN))
3136 return 0;
3137 }
3138 pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n",
3139 eth_header->eth.dst_mac, slave);
3140 return -EINVAL;
3141 }
3142 return 0;
3143}
3144
3145/*
3146 * In case of missing eth header, append eth header with a MAC address
3147 * assigned to the VF.
3148 */
3149static int add_eth_header(struct mlx4_dev *dev, int slave,
3150 struct mlx4_cmd_mailbox *inbox,
3151 struct list_head *rlist, int header_id)
3152{
3153 struct mac_res *res, *tmp;
3154 u8 port;
3155 struct mlx4_net_trans_rule_hw_ctrl *ctrl;
3156 struct mlx4_net_trans_rule_hw_eth *eth_header;
3157 struct mlx4_net_trans_rule_hw_ipv4 *ip_header;
3158 struct mlx4_net_trans_rule_hw_tcp_udp *l4_header;
3159 __be64 be_mac = 0;
3160 __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
3161
3162 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
015465f8 3163 port = ctrl->port;
7fb40f87
HHZ
3164 eth_header = (struct mlx4_net_trans_rule_hw_eth *)(ctrl + 1);
3165
3166 /* Clear a space in the inbox for eth header */
3167 switch (header_id) {
3168 case MLX4_NET_TRANS_RULE_ID_IPV4:
3169 ip_header =
3170 (struct mlx4_net_trans_rule_hw_ipv4 *)(eth_header + 1);
3171 memmove(ip_header, eth_header,
3172 sizeof(*ip_header) + sizeof(*l4_header));
3173 break;
3174 case MLX4_NET_TRANS_RULE_ID_TCP:
3175 case MLX4_NET_TRANS_RULE_ID_UDP:
3176 l4_header = (struct mlx4_net_trans_rule_hw_tcp_udp *)
3177 (eth_header + 1);
3178 memmove(l4_header, eth_header, sizeof(*l4_header));
3179 break;
3180 default:
3181 return -EINVAL;
3182 }
3183 list_for_each_entry_safe(res, tmp, rlist, list) {
3184 if (port == res->port) {
3185 be_mac = cpu_to_be64(res->mac << 16);
3186 break;
3187 }
3188 }
3189 if (!be_mac) {
3190 pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d .\n",
3191 port);
3192 return -EINVAL;
3193 }
3194
3195 memset(eth_header, 0, sizeof(*eth_header));
3196 eth_header->size = sizeof(*eth_header) >> 2;
3197 eth_header->id = cpu_to_be16(__sw_id_hw[MLX4_NET_TRANS_RULE_ID_ETH]);
3198 memcpy(eth_header->dst_mac, &be_mac, ETH_ALEN);
3199 memcpy(eth_header->dst_mac_msk, &mac_msk, ETH_ALEN);
3200
3201 return 0;
3202
3203}
3204
8fcfb4db
HHZ
3205int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3206 struct mlx4_vhcr *vhcr,
3207 struct mlx4_cmd_mailbox *inbox,
3208 struct mlx4_cmd_mailbox *outbox,
3209 struct mlx4_cmd_info *cmd)
3210{
7fb40f87
HHZ
3211
3212 struct mlx4_priv *priv = mlx4_priv(dev);
3213 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3214 struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC];
1b9c6b06 3215 int err;
a9c01e7a 3216 int qpn;
2c473ae7 3217 struct res_qp *rqp;
7fb40f87
HHZ
3218 struct mlx4_net_trans_rule_hw_ctrl *ctrl;
3219 struct _rule_hw *rule_header;
3220 int header_id;
1b9c6b06 3221
0ff1fb65
HHZ
3222 if (dev->caps.steering_mode !=
3223 MLX4_STEERING_MODE_DEVICE_MANAGED)
3224 return -EOPNOTSUPP;
1b9c6b06 3225
7fb40f87 3226 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
a9c01e7a 3227 qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
2c473ae7 3228 err = get_res(dev, slave, qpn, RES_QP, &rqp);
a9c01e7a
HHZ
3229 if (err) {
3230 pr_err("Steering rule with qpn 0x%x rejected.\n", qpn);
3231 return err;
3232 }
7fb40f87
HHZ
3233 rule_header = (struct _rule_hw *)(ctrl + 1);
3234 header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id));
3235
3236 switch (header_id) {
3237 case MLX4_NET_TRANS_RULE_ID_ETH:
a9c01e7a
HHZ
3238 if (validate_eth_header_mac(slave, rule_header, rlist)) {
3239 err = -EINVAL;
3240 goto err_put;
3241 }
7fb40f87 3242 break;
60396683
JM
3243 case MLX4_NET_TRANS_RULE_ID_IB:
3244 break;
7fb40f87
HHZ
3245 case MLX4_NET_TRANS_RULE_ID_IPV4:
3246 case MLX4_NET_TRANS_RULE_ID_TCP:
3247 case MLX4_NET_TRANS_RULE_ID_UDP:
3248 pr_warn("Can't attach FS rule without L2 headers, adding L2 header.\n");
a9c01e7a
HHZ
3249 if (add_eth_header(dev, slave, inbox, rlist, header_id)) {
3250 err = -EINVAL;
3251 goto err_put;
3252 }
7fb40f87
HHZ
3253 vhcr->in_modifier +=
3254 sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2;
3255 break;
3256 default:
3257 pr_err("Corrupted mailbox.\n");
a9c01e7a
HHZ
3258 err = -EINVAL;
3259 goto err_put;
7fb40f87
HHZ
3260 }
3261
1b9c6b06
HHZ
3262 err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
3263 vhcr->in_modifier, 0,
3264 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
3265 MLX4_CMD_NATIVE);
3266 if (err)
a9c01e7a 3267 goto err_put;
1b9c6b06 3268
2c473ae7 3269 err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn);
1b9c6b06
HHZ
3270 if (err) {
3271 mlx4_err(dev, "Fail to add flow steering resources.\n ");
3272 /* detach rule*/
3273 mlx4_cmd(dev, vhcr->out_param, 0, 0,
2065b38b 3274 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
1b9c6b06 3275 MLX4_CMD_NATIVE);
2c473ae7 3276 goto err_put;
1b9c6b06 3277 }
2c473ae7 3278 atomic_inc(&rqp->ref_count);
a9c01e7a
HHZ
3279err_put:
3280 put_res(dev, slave, qpn, RES_QP);
1b9c6b06 3281 return err;
8fcfb4db
HHZ
3282}
3283
3284int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
3285 struct mlx4_vhcr *vhcr,
3286 struct mlx4_cmd_mailbox *inbox,
3287 struct mlx4_cmd_mailbox *outbox,
3288 struct mlx4_cmd_info *cmd)
3289{
1b9c6b06 3290 int err;
2c473ae7
HHZ
3291 struct res_qp *rqp;
3292 struct res_fs_rule *rrule;
1b9c6b06 3293
0ff1fb65
HHZ
3294 if (dev->caps.steering_mode !=
3295 MLX4_STEERING_MODE_DEVICE_MANAGED)
3296 return -EOPNOTSUPP;
1b9c6b06 3297
2c473ae7
HHZ
3298 err = get_res(dev, slave, vhcr->in_param, RES_FS_RULE, &rrule);
3299 if (err)
3300 return err;
3301 /* Release the rule form busy state before removal */
3302 put_res(dev, slave, vhcr->in_param, RES_FS_RULE);
3303 err = get_res(dev, slave, rrule->qpn, RES_QP, &rqp);
3304 if (err)
3305 return err;
3306
1b9c6b06
HHZ
3307 err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
3308 if (err) {
3309 mlx4_err(dev, "Fail to remove flow steering resources.\n ");
2c473ae7 3310 goto out;
1b9c6b06
HHZ
3311 }
3312
3313 err = mlx4_cmd(dev, vhcr->in_param, 0, 0,
3314 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
3315 MLX4_CMD_NATIVE);
2c473ae7
HHZ
3316 if (!err)
3317 atomic_dec(&rqp->ref_count);
3318out:
3319 put_res(dev, slave, rrule->qpn, RES_QP);
1b9c6b06 3320 return err;
8fcfb4db
HHZ
3321}
3322
c82e9aa0
EC
3323enum {
3324 BUSY_MAX_RETRIES = 10
3325};
3326
3327int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
3328 struct mlx4_vhcr *vhcr,
3329 struct mlx4_cmd_mailbox *inbox,
3330 struct mlx4_cmd_mailbox *outbox,
3331 struct mlx4_cmd_info *cmd)
3332{
3333 int err;
3334 int index = vhcr->in_modifier & 0xffff;
3335
3336 err = get_res(dev, slave, index, RES_COUNTER, NULL);
3337 if (err)
3338 return err;
3339
3340 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3341 put_res(dev, slave, index, RES_COUNTER);
3342 return err;
3343}
3344
3345static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
3346{
3347 struct res_gid *rgid;
3348 struct res_gid *tmp;
c82e9aa0
EC
3349 struct mlx4_qp qp; /* dummy for calling attach/detach */
3350
3351 list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
fab1e24a
HHZ
3352 switch (dev->caps.steering_mode) {
3353 case MLX4_STEERING_MODE_DEVICE_MANAGED:
3354 mlx4_flow_detach(dev, rgid->reg_id);
3355 break;
3356 case MLX4_STEERING_MODE_B0:
3357 qp.qpn = rqp->local_qpn;
3358 (void) mlx4_qp_detach_common(dev, &qp, rgid->gid,
3359 rgid->prot, rgid->steer);
3360 break;
3361 }
c82e9aa0
EC
3362 list_del(&rgid->list);
3363 kfree(rgid);
3364 }
3365}
3366
3367static int _move_all_busy(struct mlx4_dev *dev, int slave,
3368 enum mlx4_resource type, int print)
3369{
3370 struct mlx4_priv *priv = mlx4_priv(dev);
3371 struct mlx4_resource_tracker *tracker =
3372 &priv->mfunc.master.res_tracker;
3373 struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
3374 struct res_common *r;
3375 struct res_common *tmp;
3376 int busy;
3377
3378 busy = 0;
3379 spin_lock_irq(mlx4_tlock(dev));
3380 list_for_each_entry_safe(r, tmp, rlist, list) {
3381 if (r->owner == slave) {
3382 if (!r->removing) {
3383 if (r->state == RES_ANY_BUSY) {
3384 if (print)
3385 mlx4_dbg(dev,
aa1ec3dd 3386 "%s id 0x%llx is busy\n",
c82e9aa0
EC
3387 ResourceType(type),
3388 r->res_id);
3389 ++busy;
3390 } else {
3391 r->from_state = r->state;
3392 r->state = RES_ANY_BUSY;
3393 r->removing = 1;
3394 }
3395 }
3396 }
3397 }
3398 spin_unlock_irq(mlx4_tlock(dev));
3399
3400 return busy;
3401}
3402
3403static int move_all_busy(struct mlx4_dev *dev, int slave,
3404 enum mlx4_resource type)
3405{
3406 unsigned long begin;
3407 int busy;
3408
3409 begin = jiffies;
3410 do {
3411 busy = _move_all_busy(dev, slave, type, 0);
3412 if (time_after(jiffies, begin + 5 * HZ))
3413 break;
3414 if (busy)
3415 cond_resched();
3416 } while (busy);
3417
3418 if (busy)
3419 busy = _move_all_busy(dev, slave, type, 1);
3420
3421 return busy;
3422}
3423static void rem_slave_qps(struct mlx4_dev *dev, int slave)
3424{
3425 struct mlx4_priv *priv = mlx4_priv(dev);
3426 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3427 struct list_head *qp_list =
3428 &tracker->slave_list[slave].res_list[RES_QP];
3429 struct res_qp *qp;
3430 struct res_qp *tmp;
3431 int state;
3432 u64 in_param;
3433 int qpn;
3434 int err;
3435
3436 err = move_all_busy(dev, slave, RES_QP);
3437 if (err)
3438 mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy"
3439 "for slave %d\n", slave);
3440
3441 spin_lock_irq(mlx4_tlock(dev));
3442 list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
3443 spin_unlock_irq(mlx4_tlock(dev));
3444 if (qp->com.owner == slave) {
3445 qpn = qp->com.res_id;
3446 detach_qp(dev, slave, qp);
3447 state = qp->com.from_state;
3448 while (state != 0) {
3449 switch (state) {
3450 case RES_QP_RESERVED:
3451 spin_lock_irq(mlx4_tlock(dev));
4af1c048
HHZ
3452 rb_erase(&qp->com.node,
3453 &tracker->res_tree[RES_QP]);
c82e9aa0
EC
3454 list_del(&qp->com.list);
3455 spin_unlock_irq(mlx4_tlock(dev));
3456 kfree(qp);
3457 state = 0;
3458 break;
3459 case RES_QP_MAPPED:
3460 if (!valid_reserved(dev, slave, qpn))
3461 __mlx4_qp_free_icm(dev, qpn);
3462 state = RES_QP_RESERVED;
3463 break;
3464 case RES_QP_HW:
3465 in_param = slave;
3466 err = mlx4_cmd(dev, in_param,
3467 qp->local_qpn, 2,
3468 MLX4_CMD_2RST_QP,
3469 MLX4_CMD_TIME_CLASS_A,
3470 MLX4_CMD_NATIVE);
3471 if (err)
3472 mlx4_dbg(dev, "rem_slave_qps: failed"
3473 " to move slave %d qpn %d to"
3474 " reset\n", slave,
3475 qp->local_qpn);
3476 atomic_dec(&qp->rcq->ref_count);
3477 atomic_dec(&qp->scq->ref_count);
3478 atomic_dec(&qp->mtt->ref_count);
3479 if (qp->srq)
3480 atomic_dec(&qp->srq->ref_count);
3481 state = RES_QP_MAPPED;
3482 break;
3483 default:
3484 state = 0;
3485 }
3486 }
3487 }
3488 spin_lock_irq(mlx4_tlock(dev));
3489 }
3490 spin_unlock_irq(mlx4_tlock(dev));
3491}
3492
3493static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
3494{
3495 struct mlx4_priv *priv = mlx4_priv(dev);
3496 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3497 struct list_head *srq_list =
3498 &tracker->slave_list[slave].res_list[RES_SRQ];
3499 struct res_srq *srq;
3500 struct res_srq *tmp;
3501 int state;
3502 u64 in_param;
3503 LIST_HEAD(tlist);
3504 int srqn;
3505 int err;
3506
3507 err = move_all_busy(dev, slave, RES_SRQ);
3508 if (err)
3509 mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs to "
3510 "busy for slave %d\n", slave);
3511
3512 spin_lock_irq(mlx4_tlock(dev));
3513 list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
3514 spin_unlock_irq(mlx4_tlock(dev));
3515 if (srq->com.owner == slave) {
3516 srqn = srq->com.res_id;
3517 state = srq->com.from_state;
3518 while (state != 0) {
3519 switch (state) {
3520 case RES_SRQ_ALLOCATED:
3521 __mlx4_srq_free_icm(dev, srqn);
3522 spin_lock_irq(mlx4_tlock(dev));
4af1c048
HHZ
3523 rb_erase(&srq->com.node,
3524 &tracker->res_tree[RES_SRQ]);
c82e9aa0
EC
3525 list_del(&srq->com.list);
3526 spin_unlock_irq(mlx4_tlock(dev));
3527 kfree(srq);
3528 state = 0;
3529 break;
3530
3531 case RES_SRQ_HW:
3532 in_param = slave;
3533 err = mlx4_cmd(dev, in_param, srqn, 1,
3534 MLX4_CMD_HW2SW_SRQ,
3535 MLX4_CMD_TIME_CLASS_A,
3536 MLX4_CMD_NATIVE);
3537 if (err)
3538 mlx4_dbg(dev, "rem_slave_srqs: failed"
3539 " to move slave %d srq %d to"
3540 " SW ownership\n",
3541 slave, srqn);
3542
3543 atomic_dec(&srq->mtt->ref_count);
3544 if (srq->cq)
3545 atomic_dec(&srq->cq->ref_count);
3546 state = RES_SRQ_ALLOCATED;
3547 break;
3548
3549 default:
3550 state = 0;
3551 }
3552 }
3553 }
3554 spin_lock_irq(mlx4_tlock(dev));
3555 }
3556 spin_unlock_irq(mlx4_tlock(dev));
3557}
3558
3559static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
3560{
3561 struct mlx4_priv *priv = mlx4_priv(dev);
3562 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3563 struct list_head *cq_list =
3564 &tracker->slave_list[slave].res_list[RES_CQ];
3565 struct res_cq *cq;
3566 struct res_cq *tmp;
3567 int state;
3568 u64 in_param;
3569 LIST_HEAD(tlist);
3570 int cqn;
3571 int err;
3572
3573 err = move_all_busy(dev, slave, RES_CQ);
3574 if (err)
3575 mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs to "
3576 "busy for slave %d\n", slave);
3577
3578 spin_lock_irq(mlx4_tlock(dev));
3579 list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
3580 spin_unlock_irq(mlx4_tlock(dev));
3581 if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
3582 cqn = cq->com.res_id;
3583 state = cq->com.from_state;
3584 while (state != 0) {
3585 switch (state) {
3586 case RES_CQ_ALLOCATED:
3587 __mlx4_cq_free_icm(dev, cqn);
3588 spin_lock_irq(mlx4_tlock(dev));
4af1c048
HHZ
3589 rb_erase(&cq->com.node,
3590 &tracker->res_tree[RES_CQ]);
c82e9aa0
EC
3591 list_del(&cq->com.list);
3592 spin_unlock_irq(mlx4_tlock(dev));
3593 kfree(cq);
3594 state = 0;
3595 break;
3596
3597 case RES_CQ_HW:
3598 in_param = slave;
3599 err = mlx4_cmd(dev, in_param, cqn, 1,
3600 MLX4_CMD_HW2SW_CQ,
3601 MLX4_CMD_TIME_CLASS_A,
3602 MLX4_CMD_NATIVE);
3603 if (err)
3604 mlx4_dbg(dev, "rem_slave_cqs: failed"
3605 " to move slave %d cq %d to"
3606 " SW ownership\n",
3607 slave, cqn);
3608 atomic_dec(&cq->mtt->ref_count);
3609 state = RES_CQ_ALLOCATED;
3610 break;
3611
3612 default:
3613 state = 0;
3614 }
3615 }
3616 }
3617 spin_lock_irq(mlx4_tlock(dev));
3618 }
3619 spin_unlock_irq(mlx4_tlock(dev));
3620}
3621
3622static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
3623{
3624 struct mlx4_priv *priv = mlx4_priv(dev);
3625 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3626 struct list_head *mpt_list =
3627 &tracker->slave_list[slave].res_list[RES_MPT];
3628 struct res_mpt *mpt;
3629 struct res_mpt *tmp;
3630 int state;
3631 u64 in_param;
3632 LIST_HEAD(tlist);
3633 int mptn;
3634 int err;
3635
3636 err = move_all_busy(dev, slave, RES_MPT);
3637 if (err)
3638 mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts to "
3639 "busy for slave %d\n", slave);
3640
3641 spin_lock_irq(mlx4_tlock(dev));
3642 list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
3643 spin_unlock_irq(mlx4_tlock(dev));
3644 if (mpt->com.owner == slave) {
3645 mptn = mpt->com.res_id;
3646 state = mpt->com.from_state;
3647 while (state != 0) {
3648 switch (state) {
3649 case RES_MPT_RESERVED:
b20e519a 3650 __mlx4_mpt_release(dev, mpt->key);
c82e9aa0 3651 spin_lock_irq(mlx4_tlock(dev));
4af1c048
HHZ
3652 rb_erase(&mpt->com.node,
3653 &tracker->res_tree[RES_MPT]);
c82e9aa0
EC
3654 list_del(&mpt->com.list);
3655 spin_unlock_irq(mlx4_tlock(dev));
3656 kfree(mpt);
3657 state = 0;
3658 break;
3659
3660 case RES_MPT_MAPPED:
b20e519a 3661 __mlx4_mpt_free_icm(dev, mpt->key);
c82e9aa0
EC
3662 state = RES_MPT_RESERVED;
3663 break;
3664
3665 case RES_MPT_HW:
3666 in_param = slave;
3667 err = mlx4_cmd(dev, in_param, mptn, 0,
3668 MLX4_CMD_HW2SW_MPT,
3669 MLX4_CMD_TIME_CLASS_A,
3670 MLX4_CMD_NATIVE);
3671 if (err)
3672 mlx4_dbg(dev, "rem_slave_mrs: failed"
3673 " to move slave %d mpt %d to"
3674 " SW ownership\n",
3675 slave, mptn);
3676 if (mpt->mtt)
3677 atomic_dec(&mpt->mtt->ref_count);
3678 state = RES_MPT_MAPPED;
3679 break;
3680 default:
3681 state = 0;
3682 }
3683 }
3684 }
3685 spin_lock_irq(mlx4_tlock(dev));
3686 }
3687 spin_unlock_irq(mlx4_tlock(dev));
3688}
3689
3690static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
3691{
3692 struct mlx4_priv *priv = mlx4_priv(dev);
3693 struct mlx4_resource_tracker *tracker =
3694 &priv->mfunc.master.res_tracker;
3695 struct list_head *mtt_list =
3696 &tracker->slave_list[slave].res_list[RES_MTT];
3697 struct res_mtt *mtt;
3698 struct res_mtt *tmp;
3699 int state;
3700 LIST_HEAD(tlist);
3701 int base;
3702 int err;
3703
3704 err = move_all_busy(dev, slave, RES_MTT);
3705 if (err)
3706 mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts to "
3707 "busy for slave %d\n", slave);
3708
3709 spin_lock_irq(mlx4_tlock(dev));
3710 list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
3711 spin_unlock_irq(mlx4_tlock(dev));
3712 if (mtt->com.owner == slave) {
3713 base = mtt->com.res_id;
3714 state = mtt->com.from_state;
3715 while (state != 0) {
3716 switch (state) {
3717 case RES_MTT_ALLOCATED:
3718 __mlx4_free_mtt_range(dev, base,
3719 mtt->order);
3720 spin_lock_irq(mlx4_tlock(dev));
4af1c048
HHZ
3721 rb_erase(&mtt->com.node,
3722 &tracker->res_tree[RES_MTT]);
c82e9aa0
EC
3723 list_del(&mtt->com.list);
3724 spin_unlock_irq(mlx4_tlock(dev));
3725 kfree(mtt);
3726 state = 0;
3727 break;
3728
3729 default:
3730 state = 0;
3731 }
3732 }
3733 }
3734 spin_lock_irq(mlx4_tlock(dev));
3735 }
3736 spin_unlock_irq(mlx4_tlock(dev));
3737}
3738
1b9c6b06
HHZ
3739static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave)
3740{
3741 struct mlx4_priv *priv = mlx4_priv(dev);
3742 struct mlx4_resource_tracker *tracker =
3743 &priv->mfunc.master.res_tracker;
3744 struct list_head *fs_rule_list =
3745 &tracker->slave_list[slave].res_list[RES_FS_RULE];
3746 struct res_fs_rule *fs_rule;
3747 struct res_fs_rule *tmp;
3748 int state;
3749 u64 base;
3750 int err;
3751
3752 err = move_all_busy(dev, slave, RES_FS_RULE);
3753 if (err)
3754 mlx4_warn(dev, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n",
3755 slave);
3756
3757 spin_lock_irq(mlx4_tlock(dev));
3758 list_for_each_entry_safe(fs_rule, tmp, fs_rule_list, com.list) {
3759 spin_unlock_irq(mlx4_tlock(dev));
3760 if (fs_rule->com.owner == slave) {
3761 base = fs_rule->com.res_id;
3762 state = fs_rule->com.from_state;
3763 while (state != 0) {
3764 switch (state) {
3765 case RES_FS_RULE_ALLOCATED:
3766 /* detach rule */
3767 err = mlx4_cmd(dev, base, 0, 0,
3768 MLX4_QP_FLOW_STEERING_DETACH,
3769 MLX4_CMD_TIME_CLASS_A,
3770 MLX4_CMD_NATIVE);
3771
3772 spin_lock_irq(mlx4_tlock(dev));
3773 rb_erase(&fs_rule->com.node,
3774 &tracker->res_tree[RES_FS_RULE]);
3775 list_del(&fs_rule->com.list);
3776 spin_unlock_irq(mlx4_tlock(dev));
3777 kfree(fs_rule);
3778 state = 0;
3779 break;
3780
3781 default:
3782 state = 0;
3783 }
3784 }
3785 }
3786 spin_lock_irq(mlx4_tlock(dev));
3787 }
3788 spin_unlock_irq(mlx4_tlock(dev));
3789}
3790
c82e9aa0
EC
3791static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
3792{
3793 struct mlx4_priv *priv = mlx4_priv(dev);
3794 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3795 struct list_head *eq_list =
3796 &tracker->slave_list[slave].res_list[RES_EQ];
3797 struct res_eq *eq;
3798 struct res_eq *tmp;
3799 int err;
3800 int state;
3801 LIST_HEAD(tlist);
3802 int eqn;
3803 struct mlx4_cmd_mailbox *mailbox;
3804
3805 err = move_all_busy(dev, slave, RES_EQ);
3806 if (err)
3807 mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs to "
3808 "busy for slave %d\n", slave);
3809
3810 spin_lock_irq(mlx4_tlock(dev));
3811 list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
3812 spin_unlock_irq(mlx4_tlock(dev));
3813 if (eq->com.owner == slave) {
3814 eqn = eq->com.res_id;
3815 state = eq->com.from_state;
3816 while (state != 0) {
3817 switch (state) {
3818 case RES_EQ_RESERVED:
3819 spin_lock_irq(mlx4_tlock(dev));
4af1c048
HHZ
3820 rb_erase(&eq->com.node,
3821 &tracker->res_tree[RES_EQ]);
c82e9aa0
EC
3822 list_del(&eq->com.list);
3823 spin_unlock_irq(mlx4_tlock(dev));
3824 kfree(eq);
3825 state = 0;
3826 break;
3827
3828 case RES_EQ_HW:
3829 mailbox = mlx4_alloc_cmd_mailbox(dev);
3830 if (IS_ERR(mailbox)) {
3831 cond_resched();
3832 continue;
3833 }
3834 err = mlx4_cmd_box(dev, slave, 0,
3835 eqn & 0xff, 0,
3836 MLX4_CMD_HW2SW_EQ,
3837 MLX4_CMD_TIME_CLASS_A,
3838 MLX4_CMD_NATIVE);
eb71d0d6
JM
3839 if (err)
3840 mlx4_dbg(dev, "rem_slave_eqs: failed"
3841 " to move slave %d eqs %d to"
3842 " SW ownership\n", slave, eqn);
c82e9aa0 3843 mlx4_free_cmd_mailbox(dev, mailbox);
eb71d0d6
JM
3844 atomic_dec(&eq->mtt->ref_count);
3845 state = RES_EQ_RESERVED;
c82e9aa0
EC
3846 break;
3847
3848 default:
3849 state = 0;
3850 }
3851 }
3852 }
3853 spin_lock_irq(mlx4_tlock(dev));
3854 }
3855 spin_unlock_irq(mlx4_tlock(dev));
3856}
3857
ba062d52
JM
3858static void rem_slave_counters(struct mlx4_dev *dev, int slave)
3859{
3860 struct mlx4_priv *priv = mlx4_priv(dev);
3861 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3862 struct list_head *counter_list =
3863 &tracker->slave_list[slave].res_list[RES_COUNTER];
3864 struct res_counter *counter;
3865 struct res_counter *tmp;
3866 int err;
3867 int index;
3868
3869 err = move_all_busy(dev, slave, RES_COUNTER);
3870 if (err)
3871 mlx4_warn(dev, "rem_slave_counters: Could not move all counters to "
3872 "busy for slave %d\n", slave);
3873
3874 spin_lock_irq(mlx4_tlock(dev));
3875 list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
3876 if (counter->com.owner == slave) {
3877 index = counter->com.res_id;
4af1c048
HHZ
3878 rb_erase(&counter->com.node,
3879 &tracker->res_tree[RES_COUNTER]);
ba062d52
JM
3880 list_del(&counter->com.list);
3881 kfree(counter);
3882 __mlx4_counter_free(dev, index);
3883 }
3884 }
3885 spin_unlock_irq(mlx4_tlock(dev));
3886}
3887
3888static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
3889{
3890 struct mlx4_priv *priv = mlx4_priv(dev);
3891 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3892 struct list_head *xrcdn_list =
3893 &tracker->slave_list[slave].res_list[RES_XRCD];
3894 struct res_xrcdn *xrcd;
3895 struct res_xrcdn *tmp;
3896 int err;
3897 int xrcdn;
3898
3899 err = move_all_busy(dev, slave, RES_XRCD);
3900 if (err)
3901 mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns to "
3902 "busy for slave %d\n", slave);
3903
3904 spin_lock_irq(mlx4_tlock(dev));
3905 list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
3906 if (xrcd->com.owner == slave) {
3907 xrcdn = xrcd->com.res_id;
4af1c048 3908 rb_erase(&xrcd->com.node, &tracker->res_tree[RES_XRCD]);
ba062d52
JM
3909 list_del(&xrcd->com.list);
3910 kfree(xrcd);
3911 __mlx4_xrcd_free(dev, xrcdn);
3912 }
3913 }
3914 spin_unlock_irq(mlx4_tlock(dev));
3915}
3916
c82e9aa0
EC
3917void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
3918{
3919 struct mlx4_priv *priv = mlx4_priv(dev);
3920
3921 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
3922 /*VLAN*/
3923 rem_slave_macs(dev, slave);
80cb0021 3924 rem_slave_fs_rule(dev, slave);
c82e9aa0
EC
3925 rem_slave_qps(dev, slave);
3926 rem_slave_srqs(dev, slave);
3927 rem_slave_cqs(dev, slave);
3928 rem_slave_mrs(dev, slave);
3929 rem_slave_eqs(dev, slave);
3930 rem_slave_mtts(dev, slave);
ba062d52
JM
3931 rem_slave_counters(dev, slave);
3932 rem_slave_xrcdns(dev, slave);
c82e9aa0
EC
3933 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
3934}