Merge git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / infiniband / core / cma.c
1 /*
2 * Copyright (c) 2005 Voltaire Inc. All rights reserved.
3 * Copyright (c) 2002-2005, Network Appliance, Inc. All rights reserved.
4 * Copyright (c) 1999-2005, Mellanox Technologies, Inc. All rights reserved.
5 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
6 *
7 * This Software is licensed under one of the following licenses:
8 *
9 * 1) under the terms of the "Common Public License 1.0" a copy of which is
10 * available from the Open Source Initiative, see
11 * http://www.opensource.org/licenses/cpl.php.
12 *
13 * 2) under the terms of the "The BSD License" a copy of which is
14 * available from the Open Source Initiative, see
15 * http://www.opensource.org/licenses/bsd-license.php.
16 *
17 * 3) under the terms of the "GNU General Public License (GPL) Version 2" a
18 * copy of which is available from the Open Source Initiative, see
19 * http://www.opensource.org/licenses/gpl-license.php.
20 *
21 * Licensee has the right to choose one of the above licenses.
22 *
23 * Redistributions of source code must retain the above copyright
24 * notice and one of the license notices.
25 *
26 * Redistributions in binary form must reproduce both the above copyright
27 * notice, one of the license notices in the documentation
28 * and/or other materials provided with the distribution.
29 *
30 */
31
32 #include <linux/completion.h>
33 #include <linux/in.h>
34 #include <linux/in6.h>
35 #include <linux/mutex.h>
36 #include <linux/random.h>
37 #include <linux/idr.h>
38 #include <linux/inetdevice.h>
39
40 #include <net/tcp.h>
41
42 #include <rdma/rdma_cm.h>
43 #include <rdma/rdma_cm_ib.h>
44 #include <rdma/ib_cache.h>
45 #include <rdma/ib_cm.h>
46 #include <rdma/ib_sa.h>
47 #include <rdma/iw_cm.h>
48
49 MODULE_AUTHOR("Sean Hefty");
50 MODULE_DESCRIPTION("Generic RDMA CM Agent");
51 MODULE_LICENSE("Dual BSD/GPL");
52
53 #define CMA_CM_RESPONSE_TIMEOUT 20
54 #define CMA_MAX_CM_RETRIES 15
55
56 static void cma_add_one(struct ib_device *device);
57 static void cma_remove_one(struct ib_device *device);
58
59 static struct ib_client cma_client = {
60 .name = "cma",
61 .add = cma_add_one,
62 .remove = cma_remove_one
63 };
64
65 static struct ib_sa_client sa_client;
66 static struct rdma_addr_client addr_client;
67 static LIST_HEAD(dev_list);
68 static LIST_HEAD(listen_any_list);
69 static DEFINE_MUTEX(lock);
70 static struct workqueue_struct *cma_wq;
71 static DEFINE_IDR(sdp_ps);
72 static DEFINE_IDR(tcp_ps);
73 static DEFINE_IDR(udp_ps);
74 static DEFINE_IDR(ipoib_ps);
75 static int next_port;
76
77 struct cma_device {
78 struct list_head list;
79 struct ib_device *device;
80 struct completion comp;
81 atomic_t refcount;
82 struct list_head id_list;
83 };
84
85 enum cma_state {
86 CMA_IDLE,
87 CMA_ADDR_QUERY,
88 CMA_ADDR_RESOLVED,
89 CMA_ROUTE_QUERY,
90 CMA_ROUTE_RESOLVED,
91 CMA_CONNECT,
92 CMA_DISCONNECT,
93 CMA_ADDR_BOUND,
94 CMA_LISTEN,
95 CMA_DEVICE_REMOVAL,
96 CMA_DESTROYING
97 };
98
99 struct rdma_bind_list {
100 struct idr *ps;
101 struct hlist_head owners;
102 unsigned short port;
103 };
104
105 /*
106 * Device removal can occur at anytime, so we need extra handling to
107 * serialize notifying the user of device removal with other callbacks.
108 * We do this by disabling removal notification while a callback is in process,
109 * and reporting it after the callback completes.
110 */
111 struct rdma_id_private {
112 struct rdma_cm_id id;
113
114 struct rdma_bind_list *bind_list;
115 struct hlist_node node;
116 struct list_head list;
117 struct list_head listen_list;
118 struct cma_device *cma_dev;
119 struct list_head mc_list;
120
121 enum cma_state state;
122 spinlock_t lock;
123 struct completion comp;
124 atomic_t refcount;
125 wait_queue_head_t wait_remove;
126 atomic_t dev_remove;
127
128 int backlog;
129 int timeout_ms;
130 struct ib_sa_query *query;
131 int query_id;
132 union {
133 struct ib_cm_id *ib;
134 struct iw_cm_id *iw;
135 } cm_id;
136
137 u32 seq_num;
138 u32 qkey;
139 u32 qp_num;
140 u8 srq;
141 };
142
143 struct cma_multicast {
144 struct rdma_id_private *id_priv;
145 union {
146 struct ib_sa_multicast *ib;
147 } multicast;
148 struct list_head list;
149 void *context;
150 struct sockaddr addr;
151 u8 pad[sizeof(struct sockaddr_in6) -
152 sizeof(struct sockaddr)];
153 };
154
155 struct cma_work {
156 struct work_struct work;
157 struct rdma_id_private *id;
158 enum cma_state old_state;
159 enum cma_state new_state;
160 struct rdma_cm_event event;
161 };
162
163 union cma_ip_addr {
164 struct in6_addr ip6;
165 struct {
166 __u32 pad[3];
167 __u32 addr;
168 } ip4;
169 };
170
171 struct cma_hdr {
172 u8 cma_version;
173 u8 ip_version; /* IP version: 7:4 */
174 __u16 port;
175 union cma_ip_addr src_addr;
176 union cma_ip_addr dst_addr;
177 };
178
179 struct sdp_hh {
180 u8 bsdh[16];
181 u8 sdp_version; /* Major version: 7:4 */
182 u8 ip_version; /* IP version: 7:4 */
183 u8 sdp_specific1[10];
184 __u16 port;
185 __u16 sdp_specific2;
186 union cma_ip_addr src_addr;
187 union cma_ip_addr dst_addr;
188 };
189
190 struct sdp_hah {
191 u8 bsdh[16];
192 u8 sdp_version;
193 };
194
195 #define CMA_VERSION 0x00
196 #define SDP_MAJ_VERSION 0x2
197
198 static int cma_comp(struct rdma_id_private *id_priv, enum cma_state comp)
199 {
200 unsigned long flags;
201 int ret;
202
203 spin_lock_irqsave(&id_priv->lock, flags);
204 ret = (id_priv->state == comp);
205 spin_unlock_irqrestore(&id_priv->lock, flags);
206 return ret;
207 }
208
209 static int cma_comp_exch(struct rdma_id_private *id_priv,
210 enum cma_state comp, enum cma_state exch)
211 {
212 unsigned long flags;
213 int ret;
214
215 spin_lock_irqsave(&id_priv->lock, flags);
216 if ((ret = (id_priv->state == comp)))
217 id_priv->state = exch;
218 spin_unlock_irqrestore(&id_priv->lock, flags);
219 return ret;
220 }
221
222 static enum cma_state cma_exch(struct rdma_id_private *id_priv,
223 enum cma_state exch)
224 {
225 unsigned long flags;
226 enum cma_state old;
227
228 spin_lock_irqsave(&id_priv->lock, flags);
229 old = id_priv->state;
230 id_priv->state = exch;
231 spin_unlock_irqrestore(&id_priv->lock, flags);
232 return old;
233 }
234
235 static inline u8 cma_get_ip_ver(struct cma_hdr *hdr)
236 {
237 return hdr->ip_version >> 4;
238 }
239
240 static inline void cma_set_ip_ver(struct cma_hdr *hdr, u8 ip_ver)
241 {
242 hdr->ip_version = (ip_ver << 4) | (hdr->ip_version & 0xF);
243 }
244
245 static inline u8 sdp_get_majv(u8 sdp_version)
246 {
247 return sdp_version >> 4;
248 }
249
250 static inline u8 sdp_get_ip_ver(struct sdp_hh *hh)
251 {
252 return hh->ip_version >> 4;
253 }
254
255 static inline void sdp_set_ip_ver(struct sdp_hh *hh, u8 ip_ver)
256 {
257 hh->ip_version = (ip_ver << 4) | (hh->ip_version & 0xF);
258 }
259
260 static inline int cma_is_ud_ps(enum rdma_port_space ps)
261 {
262 return (ps == RDMA_PS_UDP || ps == RDMA_PS_IPOIB);
263 }
264
265 static void cma_attach_to_dev(struct rdma_id_private *id_priv,
266 struct cma_device *cma_dev)
267 {
268 atomic_inc(&cma_dev->refcount);
269 id_priv->cma_dev = cma_dev;
270 id_priv->id.device = cma_dev->device;
271 list_add_tail(&id_priv->list, &cma_dev->id_list);
272 }
273
274 static inline void cma_deref_dev(struct cma_device *cma_dev)
275 {
276 if (atomic_dec_and_test(&cma_dev->refcount))
277 complete(&cma_dev->comp);
278 }
279
280 static void cma_detach_from_dev(struct rdma_id_private *id_priv)
281 {
282 list_del(&id_priv->list);
283 cma_deref_dev(id_priv->cma_dev);
284 id_priv->cma_dev = NULL;
285 }
286
287 static int cma_set_qkey(struct ib_device *device, u8 port_num,
288 enum rdma_port_space ps,
289 struct rdma_dev_addr *dev_addr, u32 *qkey)
290 {
291 struct ib_sa_mcmember_rec rec;
292 int ret = 0;
293
294 switch (ps) {
295 case RDMA_PS_UDP:
296 *qkey = RDMA_UDP_QKEY;
297 break;
298 case RDMA_PS_IPOIB:
299 ib_addr_get_mgid(dev_addr, &rec.mgid);
300 ret = ib_sa_get_mcmember_rec(device, port_num, &rec.mgid, &rec);
301 *qkey = be32_to_cpu(rec.qkey);
302 break;
303 default:
304 break;
305 }
306 return ret;
307 }
308
309 static int cma_acquire_dev(struct rdma_id_private *id_priv)
310 {
311 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
312 struct cma_device *cma_dev;
313 union ib_gid gid;
314 int ret = -ENODEV;
315
316 switch (rdma_node_get_transport(dev_addr->dev_type)) {
317 case RDMA_TRANSPORT_IB:
318 ib_addr_get_sgid(dev_addr, &gid);
319 break;
320 case RDMA_TRANSPORT_IWARP:
321 iw_addr_get_sgid(dev_addr, &gid);
322 break;
323 default:
324 return -ENODEV;
325 }
326
327 list_for_each_entry(cma_dev, &dev_list, list) {
328 ret = ib_find_cached_gid(cma_dev->device, &gid,
329 &id_priv->id.port_num, NULL);
330 if (!ret) {
331 ret = cma_set_qkey(cma_dev->device,
332 id_priv->id.port_num,
333 id_priv->id.ps, dev_addr,
334 &id_priv->qkey);
335 if (!ret)
336 cma_attach_to_dev(id_priv, cma_dev);
337 break;
338 }
339 }
340 return ret;
341 }
342
343 static void cma_deref_id(struct rdma_id_private *id_priv)
344 {
345 if (atomic_dec_and_test(&id_priv->refcount))
346 complete(&id_priv->comp);
347 }
348
349 static int cma_disable_remove(struct rdma_id_private *id_priv,
350 enum cma_state state)
351 {
352 unsigned long flags;
353 int ret;
354
355 spin_lock_irqsave(&id_priv->lock, flags);
356 if (id_priv->state == state) {
357 atomic_inc(&id_priv->dev_remove);
358 ret = 0;
359 } else
360 ret = -EINVAL;
361 spin_unlock_irqrestore(&id_priv->lock, flags);
362 return ret;
363 }
364
365 static void cma_enable_remove(struct rdma_id_private *id_priv)
366 {
367 if (atomic_dec_and_test(&id_priv->dev_remove))
368 wake_up(&id_priv->wait_remove);
369 }
370
371 static int cma_has_cm_dev(struct rdma_id_private *id_priv)
372 {
373 return (id_priv->id.device && id_priv->cm_id.ib);
374 }
375
376 struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler,
377 void *context, enum rdma_port_space ps)
378 {
379 struct rdma_id_private *id_priv;
380
381 id_priv = kzalloc(sizeof *id_priv, GFP_KERNEL);
382 if (!id_priv)
383 return ERR_PTR(-ENOMEM);
384
385 id_priv->state = CMA_IDLE;
386 id_priv->id.context = context;
387 id_priv->id.event_handler = event_handler;
388 id_priv->id.ps = ps;
389 spin_lock_init(&id_priv->lock);
390 init_completion(&id_priv->comp);
391 atomic_set(&id_priv->refcount, 1);
392 init_waitqueue_head(&id_priv->wait_remove);
393 atomic_set(&id_priv->dev_remove, 0);
394 INIT_LIST_HEAD(&id_priv->listen_list);
395 INIT_LIST_HEAD(&id_priv->mc_list);
396 get_random_bytes(&id_priv->seq_num, sizeof id_priv->seq_num);
397
398 return &id_priv->id;
399 }
400 EXPORT_SYMBOL(rdma_create_id);
401
402 static int cma_init_ud_qp(struct rdma_id_private *id_priv, struct ib_qp *qp)
403 {
404 struct ib_qp_attr qp_attr;
405 int qp_attr_mask, ret;
406
407 qp_attr.qp_state = IB_QPS_INIT;
408 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
409 if (ret)
410 return ret;
411
412 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
413 if (ret)
414 return ret;
415
416 qp_attr.qp_state = IB_QPS_RTR;
417 ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE);
418 if (ret)
419 return ret;
420
421 qp_attr.qp_state = IB_QPS_RTS;
422 qp_attr.sq_psn = 0;
423 ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE | IB_QP_SQ_PSN);
424
425 return ret;
426 }
427
428 static int cma_init_conn_qp(struct rdma_id_private *id_priv, struct ib_qp *qp)
429 {
430 struct ib_qp_attr qp_attr;
431 int qp_attr_mask, ret;
432
433 qp_attr.qp_state = IB_QPS_INIT;
434 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
435 if (ret)
436 return ret;
437
438 return ib_modify_qp(qp, &qp_attr, qp_attr_mask);
439 }
440
441 int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd,
442 struct ib_qp_init_attr *qp_init_attr)
443 {
444 struct rdma_id_private *id_priv;
445 struct ib_qp *qp;
446 int ret;
447
448 id_priv = container_of(id, struct rdma_id_private, id);
449 if (id->device != pd->device)
450 return -EINVAL;
451
452 qp = ib_create_qp(pd, qp_init_attr);
453 if (IS_ERR(qp))
454 return PTR_ERR(qp);
455
456 if (cma_is_ud_ps(id_priv->id.ps))
457 ret = cma_init_ud_qp(id_priv, qp);
458 else
459 ret = cma_init_conn_qp(id_priv, qp);
460 if (ret)
461 goto err;
462
463 id->qp = qp;
464 id_priv->qp_num = qp->qp_num;
465 id_priv->srq = (qp->srq != NULL);
466 return 0;
467 err:
468 ib_destroy_qp(qp);
469 return ret;
470 }
471 EXPORT_SYMBOL(rdma_create_qp);
472
473 void rdma_destroy_qp(struct rdma_cm_id *id)
474 {
475 ib_destroy_qp(id->qp);
476 }
477 EXPORT_SYMBOL(rdma_destroy_qp);
478
479 static int cma_modify_qp_rtr(struct rdma_cm_id *id)
480 {
481 struct ib_qp_attr qp_attr;
482 int qp_attr_mask, ret;
483
484 if (!id->qp)
485 return 0;
486
487 /* Need to update QP attributes from default values. */
488 qp_attr.qp_state = IB_QPS_INIT;
489 ret = rdma_init_qp_attr(id, &qp_attr, &qp_attr_mask);
490 if (ret)
491 return ret;
492
493 ret = ib_modify_qp(id->qp, &qp_attr, qp_attr_mask);
494 if (ret)
495 return ret;
496
497 qp_attr.qp_state = IB_QPS_RTR;
498 ret = rdma_init_qp_attr(id, &qp_attr, &qp_attr_mask);
499 if (ret)
500 return ret;
501
502 return ib_modify_qp(id->qp, &qp_attr, qp_attr_mask);
503 }
504
505 static int cma_modify_qp_rts(struct rdma_cm_id *id)
506 {
507 struct ib_qp_attr qp_attr;
508 int qp_attr_mask, ret;
509
510 if (!id->qp)
511 return 0;
512
513 qp_attr.qp_state = IB_QPS_RTS;
514 ret = rdma_init_qp_attr(id, &qp_attr, &qp_attr_mask);
515 if (ret)
516 return ret;
517
518 return ib_modify_qp(id->qp, &qp_attr, qp_attr_mask);
519 }
520
521 static int cma_modify_qp_err(struct rdma_cm_id *id)
522 {
523 struct ib_qp_attr qp_attr;
524
525 if (!id->qp)
526 return 0;
527
528 qp_attr.qp_state = IB_QPS_ERR;
529 return ib_modify_qp(id->qp, &qp_attr, IB_QP_STATE);
530 }
531
532 static int cma_ib_init_qp_attr(struct rdma_id_private *id_priv,
533 struct ib_qp_attr *qp_attr, int *qp_attr_mask)
534 {
535 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
536 int ret;
537
538 ret = ib_find_cached_pkey(id_priv->id.device, id_priv->id.port_num,
539 ib_addr_get_pkey(dev_addr),
540 &qp_attr->pkey_index);
541 if (ret)
542 return ret;
543
544 qp_attr->port_num = id_priv->id.port_num;
545 *qp_attr_mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT;
546
547 if (cma_is_ud_ps(id_priv->id.ps)) {
548 qp_attr->qkey = id_priv->qkey;
549 *qp_attr_mask |= IB_QP_QKEY;
550 } else {
551 qp_attr->qp_access_flags = 0;
552 *qp_attr_mask |= IB_QP_ACCESS_FLAGS;
553 }
554 return 0;
555 }
556
557 int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr,
558 int *qp_attr_mask)
559 {
560 struct rdma_id_private *id_priv;
561 int ret = 0;
562
563 id_priv = container_of(id, struct rdma_id_private, id);
564 switch (rdma_node_get_transport(id_priv->id.device->node_type)) {
565 case RDMA_TRANSPORT_IB:
566 if (!id_priv->cm_id.ib || cma_is_ud_ps(id_priv->id.ps))
567 ret = cma_ib_init_qp_attr(id_priv, qp_attr, qp_attr_mask);
568 else
569 ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, qp_attr,
570 qp_attr_mask);
571 if (qp_attr->qp_state == IB_QPS_RTR)
572 qp_attr->rq_psn = id_priv->seq_num;
573 break;
574 case RDMA_TRANSPORT_IWARP:
575 if (!id_priv->cm_id.iw) {
576 qp_attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE;
577 *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS;
578 } else
579 ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr,
580 qp_attr_mask);
581 break;
582 default:
583 ret = -ENOSYS;
584 break;
585 }
586
587 return ret;
588 }
589 EXPORT_SYMBOL(rdma_init_qp_attr);
590
591 static inline int cma_zero_addr(struct sockaddr *addr)
592 {
593 struct in6_addr *ip6;
594
595 if (addr->sa_family == AF_INET)
596 return ZERONET(((struct sockaddr_in *) addr)->sin_addr.s_addr);
597 else {
598 ip6 = &((struct sockaddr_in6 *) addr)->sin6_addr;
599 return (ip6->s6_addr32[0] | ip6->s6_addr32[1] |
600 ip6->s6_addr32[2] | ip6->s6_addr32[3]) == 0;
601 }
602 }
603
604 static inline int cma_loopback_addr(struct sockaddr *addr)
605 {
606 return LOOPBACK(((struct sockaddr_in *) addr)->sin_addr.s_addr);
607 }
608
609 static inline int cma_any_addr(struct sockaddr *addr)
610 {
611 return cma_zero_addr(addr) || cma_loopback_addr(addr);
612 }
613
614 static inline __be16 cma_port(struct sockaddr *addr)
615 {
616 if (addr->sa_family == AF_INET)
617 return ((struct sockaddr_in *) addr)->sin_port;
618 else
619 return ((struct sockaddr_in6 *) addr)->sin6_port;
620 }
621
622 static inline int cma_any_port(struct sockaddr *addr)
623 {
624 return !cma_port(addr);
625 }
626
627 static int cma_get_net_info(void *hdr, enum rdma_port_space ps,
628 u8 *ip_ver, __u16 *port,
629 union cma_ip_addr **src, union cma_ip_addr **dst)
630 {
631 switch (ps) {
632 case RDMA_PS_SDP:
633 if (sdp_get_majv(((struct sdp_hh *) hdr)->sdp_version) !=
634 SDP_MAJ_VERSION)
635 return -EINVAL;
636
637 *ip_ver = sdp_get_ip_ver(hdr);
638 *port = ((struct sdp_hh *) hdr)->port;
639 *src = &((struct sdp_hh *) hdr)->src_addr;
640 *dst = &((struct sdp_hh *) hdr)->dst_addr;
641 break;
642 default:
643 if (((struct cma_hdr *) hdr)->cma_version != CMA_VERSION)
644 return -EINVAL;
645
646 *ip_ver = cma_get_ip_ver(hdr);
647 *port = ((struct cma_hdr *) hdr)->port;
648 *src = &((struct cma_hdr *) hdr)->src_addr;
649 *dst = &((struct cma_hdr *) hdr)->dst_addr;
650 break;
651 }
652
653 if (*ip_ver != 4 && *ip_ver != 6)
654 return -EINVAL;
655 return 0;
656 }
657
658 static void cma_save_net_info(struct rdma_addr *addr,
659 struct rdma_addr *listen_addr,
660 u8 ip_ver, __u16 port,
661 union cma_ip_addr *src, union cma_ip_addr *dst)
662 {
663 struct sockaddr_in *listen4, *ip4;
664 struct sockaddr_in6 *listen6, *ip6;
665
666 switch (ip_ver) {
667 case 4:
668 listen4 = (struct sockaddr_in *) &listen_addr->src_addr;
669 ip4 = (struct sockaddr_in *) &addr->src_addr;
670 ip4->sin_family = listen4->sin_family;
671 ip4->sin_addr.s_addr = dst->ip4.addr;
672 ip4->sin_port = listen4->sin_port;
673
674 ip4 = (struct sockaddr_in *) &addr->dst_addr;
675 ip4->sin_family = listen4->sin_family;
676 ip4->sin_addr.s_addr = src->ip4.addr;
677 ip4->sin_port = port;
678 break;
679 case 6:
680 listen6 = (struct sockaddr_in6 *) &listen_addr->src_addr;
681 ip6 = (struct sockaddr_in6 *) &addr->src_addr;
682 ip6->sin6_family = listen6->sin6_family;
683 ip6->sin6_addr = dst->ip6;
684 ip6->sin6_port = listen6->sin6_port;
685
686 ip6 = (struct sockaddr_in6 *) &addr->dst_addr;
687 ip6->sin6_family = listen6->sin6_family;
688 ip6->sin6_addr = src->ip6;
689 ip6->sin6_port = port;
690 break;
691 default:
692 break;
693 }
694 }
695
696 static inline int cma_user_data_offset(enum rdma_port_space ps)
697 {
698 switch (ps) {
699 case RDMA_PS_SDP:
700 return 0;
701 default:
702 return sizeof(struct cma_hdr);
703 }
704 }
705
706 static void cma_cancel_route(struct rdma_id_private *id_priv)
707 {
708 switch (rdma_node_get_transport(id_priv->id.device->node_type)) {
709 case RDMA_TRANSPORT_IB:
710 if (id_priv->query)
711 ib_sa_cancel_query(id_priv->query_id, id_priv->query);
712 break;
713 default:
714 break;
715 }
716 }
717
718 static inline int cma_internal_listen(struct rdma_id_private *id_priv)
719 {
720 return (id_priv->state == CMA_LISTEN) && id_priv->cma_dev &&
721 cma_any_addr(&id_priv->id.route.addr.src_addr);
722 }
723
724 static void cma_destroy_listen(struct rdma_id_private *id_priv)
725 {
726 cma_exch(id_priv, CMA_DESTROYING);
727
728 if (id_priv->cma_dev) {
729 switch (rdma_node_get_transport(id_priv->id.device->node_type)) {
730 case RDMA_TRANSPORT_IB:
731 if (id_priv->cm_id.ib && !IS_ERR(id_priv->cm_id.ib))
732 ib_destroy_cm_id(id_priv->cm_id.ib);
733 break;
734 case RDMA_TRANSPORT_IWARP:
735 if (id_priv->cm_id.iw && !IS_ERR(id_priv->cm_id.iw))
736 iw_destroy_cm_id(id_priv->cm_id.iw);
737 break;
738 default:
739 break;
740 }
741 cma_detach_from_dev(id_priv);
742 }
743 list_del(&id_priv->listen_list);
744
745 cma_deref_id(id_priv);
746 wait_for_completion(&id_priv->comp);
747
748 kfree(id_priv);
749 }
750
751 static void cma_cancel_listens(struct rdma_id_private *id_priv)
752 {
753 struct rdma_id_private *dev_id_priv;
754
755 mutex_lock(&lock);
756 list_del(&id_priv->list);
757
758 while (!list_empty(&id_priv->listen_list)) {
759 dev_id_priv = list_entry(id_priv->listen_list.next,
760 struct rdma_id_private, listen_list);
761 cma_destroy_listen(dev_id_priv);
762 }
763 mutex_unlock(&lock);
764 }
765
766 static void cma_cancel_operation(struct rdma_id_private *id_priv,
767 enum cma_state state)
768 {
769 switch (state) {
770 case CMA_ADDR_QUERY:
771 rdma_addr_cancel(&id_priv->id.route.addr.dev_addr);
772 break;
773 case CMA_ROUTE_QUERY:
774 cma_cancel_route(id_priv);
775 break;
776 case CMA_LISTEN:
777 if (cma_any_addr(&id_priv->id.route.addr.src_addr) &&
778 !id_priv->cma_dev)
779 cma_cancel_listens(id_priv);
780 break;
781 default:
782 break;
783 }
784 }
785
786 static void cma_release_port(struct rdma_id_private *id_priv)
787 {
788 struct rdma_bind_list *bind_list = id_priv->bind_list;
789
790 if (!bind_list)
791 return;
792
793 mutex_lock(&lock);
794 hlist_del(&id_priv->node);
795 if (hlist_empty(&bind_list->owners)) {
796 idr_remove(bind_list->ps, bind_list->port);
797 kfree(bind_list);
798 }
799 mutex_unlock(&lock);
800 }
801
802 static void cma_leave_mc_groups(struct rdma_id_private *id_priv)
803 {
804 struct cma_multicast *mc;
805
806 while (!list_empty(&id_priv->mc_list)) {
807 mc = container_of(id_priv->mc_list.next,
808 struct cma_multicast, list);
809 list_del(&mc->list);
810 ib_sa_free_multicast(mc->multicast.ib);
811 kfree(mc);
812 }
813 }
814
815 void rdma_destroy_id(struct rdma_cm_id *id)
816 {
817 struct rdma_id_private *id_priv;
818 enum cma_state state;
819
820 id_priv = container_of(id, struct rdma_id_private, id);
821 state = cma_exch(id_priv, CMA_DESTROYING);
822 cma_cancel_operation(id_priv, state);
823
824 mutex_lock(&lock);
825 if (id_priv->cma_dev) {
826 mutex_unlock(&lock);
827 switch (rdma_node_get_transport(id->device->node_type)) {
828 case RDMA_TRANSPORT_IB:
829 if (id_priv->cm_id.ib && !IS_ERR(id_priv->cm_id.ib))
830 ib_destroy_cm_id(id_priv->cm_id.ib);
831 break;
832 case RDMA_TRANSPORT_IWARP:
833 if (id_priv->cm_id.iw && !IS_ERR(id_priv->cm_id.iw))
834 iw_destroy_cm_id(id_priv->cm_id.iw);
835 break;
836 default:
837 break;
838 }
839 cma_leave_mc_groups(id_priv);
840 mutex_lock(&lock);
841 cma_detach_from_dev(id_priv);
842 }
843 mutex_unlock(&lock);
844
845 cma_release_port(id_priv);
846 cma_deref_id(id_priv);
847 wait_for_completion(&id_priv->comp);
848
849 kfree(id_priv->id.route.path_rec);
850 kfree(id_priv);
851 }
852 EXPORT_SYMBOL(rdma_destroy_id);
853
854 static int cma_rep_recv(struct rdma_id_private *id_priv)
855 {
856 int ret;
857
858 ret = cma_modify_qp_rtr(&id_priv->id);
859 if (ret)
860 goto reject;
861
862 ret = cma_modify_qp_rts(&id_priv->id);
863 if (ret)
864 goto reject;
865
866 ret = ib_send_cm_rtu(id_priv->cm_id.ib, NULL, 0);
867 if (ret)
868 goto reject;
869
870 return 0;
871 reject:
872 cma_modify_qp_err(&id_priv->id);
873 ib_send_cm_rej(id_priv->cm_id.ib, IB_CM_REJ_CONSUMER_DEFINED,
874 NULL, 0, NULL, 0);
875 return ret;
876 }
877
878 static int cma_verify_rep(struct rdma_id_private *id_priv, void *data)
879 {
880 if (id_priv->id.ps == RDMA_PS_SDP &&
881 sdp_get_majv(((struct sdp_hah *) data)->sdp_version) !=
882 SDP_MAJ_VERSION)
883 return -EINVAL;
884
885 return 0;
886 }
887
888 static void cma_set_rep_event_data(struct rdma_cm_event *event,
889 struct ib_cm_rep_event_param *rep_data,
890 void *private_data)
891 {
892 event->param.conn.private_data = private_data;
893 event->param.conn.private_data_len = IB_CM_REP_PRIVATE_DATA_SIZE;
894 event->param.conn.responder_resources = rep_data->responder_resources;
895 event->param.conn.initiator_depth = rep_data->initiator_depth;
896 event->param.conn.flow_control = rep_data->flow_control;
897 event->param.conn.rnr_retry_count = rep_data->rnr_retry_count;
898 event->param.conn.srq = rep_data->srq;
899 event->param.conn.qp_num = rep_data->remote_qpn;
900 }
901
902 static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
903 {
904 struct rdma_id_private *id_priv = cm_id->context;
905 struct rdma_cm_event event;
906 int ret = 0;
907
908 if (cma_disable_remove(id_priv, CMA_CONNECT))
909 return 0;
910
911 memset(&event, 0, sizeof event);
912 switch (ib_event->event) {
913 case IB_CM_REQ_ERROR:
914 case IB_CM_REP_ERROR:
915 event.event = RDMA_CM_EVENT_UNREACHABLE;
916 event.status = -ETIMEDOUT;
917 break;
918 case IB_CM_REP_RECEIVED:
919 event.status = cma_verify_rep(id_priv, ib_event->private_data);
920 if (event.status)
921 event.event = RDMA_CM_EVENT_CONNECT_ERROR;
922 else if (id_priv->id.qp && id_priv->id.ps != RDMA_PS_SDP) {
923 event.status = cma_rep_recv(id_priv);
924 event.event = event.status ? RDMA_CM_EVENT_CONNECT_ERROR :
925 RDMA_CM_EVENT_ESTABLISHED;
926 } else
927 event.event = RDMA_CM_EVENT_CONNECT_RESPONSE;
928 cma_set_rep_event_data(&event, &ib_event->param.rep_rcvd,
929 ib_event->private_data);
930 break;
931 case IB_CM_RTU_RECEIVED:
932 case IB_CM_USER_ESTABLISHED:
933 event.event = RDMA_CM_EVENT_ESTABLISHED;
934 break;
935 case IB_CM_DREQ_ERROR:
936 event.status = -ETIMEDOUT; /* fall through */
937 case IB_CM_DREQ_RECEIVED:
938 case IB_CM_DREP_RECEIVED:
939 if (!cma_comp_exch(id_priv, CMA_CONNECT, CMA_DISCONNECT))
940 goto out;
941 event.event = RDMA_CM_EVENT_DISCONNECTED;
942 break;
943 case IB_CM_TIMEWAIT_EXIT:
944 case IB_CM_MRA_RECEIVED:
945 /* ignore event */
946 goto out;
947 case IB_CM_REJ_RECEIVED:
948 cma_modify_qp_err(&id_priv->id);
949 event.status = ib_event->param.rej_rcvd.reason;
950 event.event = RDMA_CM_EVENT_REJECTED;
951 event.param.conn.private_data = ib_event->private_data;
952 event.param.conn.private_data_len = IB_CM_REJ_PRIVATE_DATA_SIZE;
953 break;
954 default:
955 printk(KERN_ERR "RDMA CMA: unexpected IB CM event: %d",
956 ib_event->event);
957 goto out;
958 }
959
960 ret = id_priv->id.event_handler(&id_priv->id, &event);
961 if (ret) {
962 /* Destroy the CM ID by returning a non-zero value. */
963 id_priv->cm_id.ib = NULL;
964 cma_exch(id_priv, CMA_DESTROYING);
965 cma_enable_remove(id_priv);
966 rdma_destroy_id(&id_priv->id);
967 return ret;
968 }
969 out:
970 cma_enable_remove(id_priv);
971 return ret;
972 }
973
974 static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id,
975 struct ib_cm_event *ib_event)
976 {
977 struct rdma_id_private *id_priv;
978 struct rdma_cm_id *id;
979 struct rdma_route *rt;
980 union cma_ip_addr *src, *dst;
981 __u16 port;
982 u8 ip_ver;
983
984 if (cma_get_net_info(ib_event->private_data, listen_id->ps,
985 &ip_ver, &port, &src, &dst))
986 goto err;
987
988 id = rdma_create_id(listen_id->event_handler, listen_id->context,
989 listen_id->ps);
990 if (IS_ERR(id))
991 goto err;
992
993 cma_save_net_info(&id->route.addr, &listen_id->route.addr,
994 ip_ver, port, src, dst);
995
996 rt = &id->route;
997 rt->num_paths = ib_event->param.req_rcvd.alternate_path ? 2 : 1;
998 rt->path_rec = kmalloc(sizeof *rt->path_rec * rt->num_paths,
999 GFP_KERNEL);
1000 if (!rt->path_rec)
1001 goto destroy_id;
1002
1003 rt->path_rec[0] = *ib_event->param.req_rcvd.primary_path;
1004 if (rt->num_paths == 2)
1005 rt->path_rec[1] = *ib_event->param.req_rcvd.alternate_path;
1006
1007 ib_addr_set_sgid(&rt->addr.dev_addr, &rt->path_rec[0].sgid);
1008 ib_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid);
1009 ib_addr_set_pkey(&rt->addr.dev_addr, be16_to_cpu(rt->path_rec[0].pkey));
1010 rt->addr.dev_addr.dev_type = RDMA_NODE_IB_CA;
1011
1012 id_priv = container_of(id, struct rdma_id_private, id);
1013 id_priv->state = CMA_CONNECT;
1014 return id_priv;
1015
1016 destroy_id:
1017 rdma_destroy_id(id);
1018 err:
1019 return NULL;
1020 }
1021
1022 static struct rdma_id_private *cma_new_udp_id(struct rdma_cm_id *listen_id,
1023 struct ib_cm_event *ib_event)
1024 {
1025 struct rdma_id_private *id_priv;
1026 struct rdma_cm_id *id;
1027 union cma_ip_addr *src, *dst;
1028 __u16 port;
1029 u8 ip_ver;
1030 int ret;
1031
1032 id = rdma_create_id(listen_id->event_handler, listen_id->context,
1033 listen_id->ps);
1034 if (IS_ERR(id))
1035 return NULL;
1036
1037
1038 if (cma_get_net_info(ib_event->private_data, listen_id->ps,
1039 &ip_ver, &port, &src, &dst))
1040 goto err;
1041
1042 cma_save_net_info(&id->route.addr, &listen_id->route.addr,
1043 ip_ver, port, src, dst);
1044
1045 ret = rdma_translate_ip(&id->route.addr.src_addr,
1046 &id->route.addr.dev_addr);
1047 if (ret)
1048 goto err;
1049
1050 id_priv = container_of(id, struct rdma_id_private, id);
1051 id_priv->state = CMA_CONNECT;
1052 return id_priv;
1053 err:
1054 rdma_destroy_id(id);
1055 return NULL;
1056 }
1057
1058 static void cma_set_req_event_data(struct rdma_cm_event *event,
1059 struct ib_cm_req_event_param *req_data,
1060 void *private_data, int offset)
1061 {
1062 event->param.conn.private_data = private_data + offset;
1063 event->param.conn.private_data_len = IB_CM_REQ_PRIVATE_DATA_SIZE - offset;
1064 event->param.conn.responder_resources = req_data->responder_resources;
1065 event->param.conn.initiator_depth = req_data->initiator_depth;
1066 event->param.conn.flow_control = req_data->flow_control;
1067 event->param.conn.retry_count = req_data->retry_count;
1068 event->param.conn.rnr_retry_count = req_data->rnr_retry_count;
1069 event->param.conn.srq = req_data->srq;
1070 event->param.conn.qp_num = req_data->remote_qpn;
1071 }
1072
1073 static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
1074 {
1075 struct rdma_id_private *listen_id, *conn_id;
1076 struct rdma_cm_event event;
1077 int offset, ret;
1078
1079 listen_id = cm_id->context;
1080 if (cma_disable_remove(listen_id, CMA_LISTEN))
1081 return -ECONNABORTED;
1082
1083 memset(&event, 0, sizeof event);
1084 offset = cma_user_data_offset(listen_id->id.ps);
1085 event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
1086 if (cma_is_ud_ps(listen_id->id.ps)) {
1087 conn_id = cma_new_udp_id(&listen_id->id, ib_event);
1088 event.param.ud.private_data = ib_event->private_data + offset;
1089 event.param.ud.private_data_len =
1090 IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE - offset;
1091 } else {
1092 conn_id = cma_new_conn_id(&listen_id->id, ib_event);
1093 cma_set_req_event_data(&event, &ib_event->param.req_rcvd,
1094 ib_event->private_data, offset);
1095 }
1096 if (!conn_id) {
1097 ret = -ENOMEM;
1098 goto out;
1099 }
1100
1101 atomic_inc(&conn_id->dev_remove);
1102 mutex_lock(&lock);
1103 ret = cma_acquire_dev(conn_id);
1104 mutex_unlock(&lock);
1105 if (ret)
1106 goto release_conn_id;
1107
1108 conn_id->cm_id.ib = cm_id;
1109 cm_id->context = conn_id;
1110 cm_id->cm_handler = cma_ib_handler;
1111
1112 ret = conn_id->id.event_handler(&conn_id->id, &event);
1113 if (!ret)
1114 goto out;
1115
1116 /* Destroy the CM ID by returning a non-zero value. */
1117 conn_id->cm_id.ib = NULL;
1118
1119 release_conn_id:
1120 cma_exch(conn_id, CMA_DESTROYING);
1121 cma_enable_remove(conn_id);
1122 rdma_destroy_id(&conn_id->id);
1123
1124 out:
1125 cma_enable_remove(listen_id);
1126 return ret;
1127 }
1128
1129 static __be64 cma_get_service_id(enum rdma_port_space ps, struct sockaddr *addr)
1130 {
1131 return cpu_to_be64(((u64)ps << 16) + be16_to_cpu(cma_port(addr)));
1132 }
1133
1134 static void cma_set_compare_data(enum rdma_port_space ps, struct sockaddr *addr,
1135 struct ib_cm_compare_data *compare)
1136 {
1137 struct cma_hdr *cma_data, *cma_mask;
1138 struct sdp_hh *sdp_data, *sdp_mask;
1139 __u32 ip4_addr;
1140 struct in6_addr ip6_addr;
1141
1142 memset(compare, 0, sizeof *compare);
1143 cma_data = (void *) compare->data;
1144 cma_mask = (void *) compare->mask;
1145 sdp_data = (void *) compare->data;
1146 sdp_mask = (void *) compare->mask;
1147
1148 switch (addr->sa_family) {
1149 case AF_INET:
1150 ip4_addr = ((struct sockaddr_in *) addr)->sin_addr.s_addr;
1151 if (ps == RDMA_PS_SDP) {
1152 sdp_set_ip_ver(sdp_data, 4);
1153 sdp_set_ip_ver(sdp_mask, 0xF);
1154 sdp_data->dst_addr.ip4.addr = ip4_addr;
1155 sdp_mask->dst_addr.ip4.addr = ~0;
1156 } else {
1157 cma_set_ip_ver(cma_data, 4);
1158 cma_set_ip_ver(cma_mask, 0xF);
1159 cma_data->dst_addr.ip4.addr = ip4_addr;
1160 cma_mask->dst_addr.ip4.addr = ~0;
1161 }
1162 break;
1163 case AF_INET6:
1164 ip6_addr = ((struct sockaddr_in6 *) addr)->sin6_addr;
1165 if (ps == RDMA_PS_SDP) {
1166 sdp_set_ip_ver(sdp_data, 6);
1167 sdp_set_ip_ver(sdp_mask, 0xF);
1168 sdp_data->dst_addr.ip6 = ip6_addr;
1169 memset(&sdp_mask->dst_addr.ip6, 0xFF,
1170 sizeof sdp_mask->dst_addr.ip6);
1171 } else {
1172 cma_set_ip_ver(cma_data, 6);
1173 cma_set_ip_ver(cma_mask, 0xF);
1174 cma_data->dst_addr.ip6 = ip6_addr;
1175 memset(&cma_mask->dst_addr.ip6, 0xFF,
1176 sizeof cma_mask->dst_addr.ip6);
1177 }
1178 break;
1179 default:
1180 break;
1181 }
1182 }
1183
1184 static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
1185 {
1186 struct rdma_id_private *id_priv = iw_id->context;
1187 struct rdma_cm_event event;
1188 struct sockaddr_in *sin;
1189 int ret = 0;
1190
1191 if (cma_disable_remove(id_priv, CMA_CONNECT))
1192 return 0;
1193
1194 memset(&event, 0, sizeof event);
1195 switch (iw_event->event) {
1196 case IW_CM_EVENT_CLOSE:
1197 event.event = RDMA_CM_EVENT_DISCONNECTED;
1198 break;
1199 case IW_CM_EVENT_CONNECT_REPLY:
1200 sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr;
1201 *sin = iw_event->local_addr;
1202 sin = (struct sockaddr_in *) &id_priv->id.route.addr.dst_addr;
1203 *sin = iw_event->remote_addr;
1204 switch (iw_event->status) {
1205 case 0:
1206 event.event = RDMA_CM_EVENT_ESTABLISHED;
1207 break;
1208 case -ECONNRESET:
1209 case -ECONNREFUSED:
1210 event.event = RDMA_CM_EVENT_REJECTED;
1211 break;
1212 case -ETIMEDOUT:
1213 event.event = RDMA_CM_EVENT_UNREACHABLE;
1214 break;
1215 default:
1216 event.event = RDMA_CM_EVENT_CONNECT_ERROR;
1217 break;
1218 }
1219 break;
1220 case IW_CM_EVENT_ESTABLISHED:
1221 event.event = RDMA_CM_EVENT_ESTABLISHED;
1222 break;
1223 default:
1224 BUG_ON(1);
1225 }
1226
1227 event.status = iw_event->status;
1228 event.param.conn.private_data = iw_event->private_data;
1229 event.param.conn.private_data_len = iw_event->private_data_len;
1230 ret = id_priv->id.event_handler(&id_priv->id, &event);
1231 if (ret) {
1232 /* Destroy the CM ID by returning a non-zero value. */
1233 id_priv->cm_id.iw = NULL;
1234 cma_exch(id_priv, CMA_DESTROYING);
1235 cma_enable_remove(id_priv);
1236 rdma_destroy_id(&id_priv->id);
1237 return ret;
1238 }
1239
1240 cma_enable_remove(id_priv);
1241 return ret;
1242 }
1243
1244 static int iw_conn_req_handler(struct iw_cm_id *cm_id,
1245 struct iw_cm_event *iw_event)
1246 {
1247 struct rdma_cm_id *new_cm_id;
1248 struct rdma_id_private *listen_id, *conn_id;
1249 struct sockaddr_in *sin;
1250 struct net_device *dev = NULL;
1251 struct rdma_cm_event event;
1252 int ret;
1253
1254 listen_id = cm_id->context;
1255 if (cma_disable_remove(listen_id, CMA_LISTEN))
1256 return -ECONNABORTED;
1257
1258 /* Create a new RDMA id for the new IW CM ID */
1259 new_cm_id = rdma_create_id(listen_id->id.event_handler,
1260 listen_id->id.context,
1261 RDMA_PS_TCP);
1262 if (!new_cm_id) {
1263 ret = -ENOMEM;
1264 goto out;
1265 }
1266 conn_id = container_of(new_cm_id, struct rdma_id_private, id);
1267 atomic_inc(&conn_id->dev_remove);
1268 conn_id->state = CMA_CONNECT;
1269
1270 dev = ip_dev_find(iw_event->local_addr.sin_addr.s_addr);
1271 if (!dev) {
1272 ret = -EADDRNOTAVAIL;
1273 cma_enable_remove(conn_id);
1274 rdma_destroy_id(new_cm_id);
1275 goto out;
1276 }
1277 ret = rdma_copy_addr(&conn_id->id.route.addr.dev_addr, dev, NULL);
1278 if (ret) {
1279 cma_enable_remove(conn_id);
1280 rdma_destroy_id(new_cm_id);
1281 goto out;
1282 }
1283
1284 mutex_lock(&lock);
1285 ret = cma_acquire_dev(conn_id);
1286 mutex_unlock(&lock);
1287 if (ret) {
1288 cma_enable_remove(conn_id);
1289 rdma_destroy_id(new_cm_id);
1290 goto out;
1291 }
1292
1293 conn_id->cm_id.iw = cm_id;
1294 cm_id->context = conn_id;
1295 cm_id->cm_handler = cma_iw_handler;
1296
1297 sin = (struct sockaddr_in *) &new_cm_id->route.addr.src_addr;
1298 *sin = iw_event->local_addr;
1299 sin = (struct sockaddr_in *) &new_cm_id->route.addr.dst_addr;
1300 *sin = iw_event->remote_addr;
1301
1302 memset(&event, 0, sizeof event);
1303 event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
1304 event.param.conn.private_data = iw_event->private_data;
1305 event.param.conn.private_data_len = iw_event->private_data_len;
1306 ret = conn_id->id.event_handler(&conn_id->id, &event);
1307 if (ret) {
1308 /* User wants to destroy the CM ID */
1309 conn_id->cm_id.iw = NULL;
1310 cma_exch(conn_id, CMA_DESTROYING);
1311 cma_enable_remove(conn_id);
1312 rdma_destroy_id(&conn_id->id);
1313 }
1314
1315 out:
1316 if (dev)
1317 dev_put(dev);
1318 cma_enable_remove(listen_id);
1319 return ret;
1320 }
1321
1322 static int cma_ib_listen(struct rdma_id_private *id_priv)
1323 {
1324 struct ib_cm_compare_data compare_data;
1325 struct sockaddr *addr;
1326 __be64 svc_id;
1327 int ret;
1328
1329 id_priv->cm_id.ib = ib_create_cm_id(id_priv->id.device, cma_req_handler,
1330 id_priv);
1331 if (IS_ERR(id_priv->cm_id.ib))
1332 return PTR_ERR(id_priv->cm_id.ib);
1333
1334 addr = &id_priv->id.route.addr.src_addr;
1335 svc_id = cma_get_service_id(id_priv->id.ps, addr);
1336 if (cma_any_addr(addr))
1337 ret = ib_cm_listen(id_priv->cm_id.ib, svc_id, 0, NULL);
1338 else {
1339 cma_set_compare_data(id_priv->id.ps, addr, &compare_data);
1340 ret = ib_cm_listen(id_priv->cm_id.ib, svc_id, 0, &compare_data);
1341 }
1342
1343 if (ret) {
1344 ib_destroy_cm_id(id_priv->cm_id.ib);
1345 id_priv->cm_id.ib = NULL;
1346 }
1347
1348 return ret;
1349 }
1350
1351 static int cma_iw_listen(struct rdma_id_private *id_priv, int backlog)
1352 {
1353 int ret;
1354 struct sockaddr_in *sin;
1355
1356 id_priv->cm_id.iw = iw_create_cm_id(id_priv->id.device,
1357 iw_conn_req_handler,
1358 id_priv);
1359 if (IS_ERR(id_priv->cm_id.iw))
1360 return PTR_ERR(id_priv->cm_id.iw);
1361
1362 sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr;
1363 id_priv->cm_id.iw->local_addr = *sin;
1364
1365 ret = iw_cm_listen(id_priv->cm_id.iw, backlog);
1366
1367 if (ret) {
1368 iw_destroy_cm_id(id_priv->cm_id.iw);
1369 id_priv->cm_id.iw = NULL;
1370 }
1371
1372 return ret;
1373 }
1374
1375 static int cma_listen_handler(struct rdma_cm_id *id,
1376 struct rdma_cm_event *event)
1377 {
1378 struct rdma_id_private *id_priv = id->context;
1379
1380 id->context = id_priv->id.context;
1381 id->event_handler = id_priv->id.event_handler;
1382 return id_priv->id.event_handler(id, event);
1383 }
1384
1385 static void cma_listen_on_dev(struct rdma_id_private *id_priv,
1386 struct cma_device *cma_dev)
1387 {
1388 struct rdma_id_private *dev_id_priv;
1389 struct rdma_cm_id *id;
1390 int ret;
1391
1392 id = rdma_create_id(cma_listen_handler, id_priv, id_priv->id.ps);
1393 if (IS_ERR(id))
1394 return;
1395
1396 dev_id_priv = container_of(id, struct rdma_id_private, id);
1397
1398 dev_id_priv->state = CMA_ADDR_BOUND;
1399 memcpy(&id->route.addr.src_addr, &id_priv->id.route.addr.src_addr,
1400 ip_addr_size(&id_priv->id.route.addr.src_addr));
1401
1402 cma_attach_to_dev(dev_id_priv, cma_dev);
1403 list_add_tail(&dev_id_priv->listen_list, &id_priv->listen_list);
1404
1405 ret = rdma_listen(id, id_priv->backlog);
1406 if (ret)
1407 goto err;
1408
1409 return;
1410 err:
1411 cma_destroy_listen(dev_id_priv);
1412 }
1413
1414 static void cma_listen_on_all(struct rdma_id_private *id_priv)
1415 {
1416 struct cma_device *cma_dev;
1417
1418 mutex_lock(&lock);
1419 list_add_tail(&id_priv->list, &listen_any_list);
1420 list_for_each_entry(cma_dev, &dev_list, list)
1421 cma_listen_on_dev(id_priv, cma_dev);
1422 mutex_unlock(&lock);
1423 }
1424
1425 static int cma_bind_any(struct rdma_cm_id *id, sa_family_t af)
1426 {
1427 struct sockaddr_in addr_in;
1428
1429 memset(&addr_in, 0, sizeof addr_in);
1430 addr_in.sin_family = af;
1431 return rdma_bind_addr(id, (struct sockaddr *) &addr_in);
1432 }
1433
1434 int rdma_listen(struct rdma_cm_id *id, int backlog)
1435 {
1436 struct rdma_id_private *id_priv;
1437 int ret;
1438
1439 id_priv = container_of(id, struct rdma_id_private, id);
1440 if (id_priv->state == CMA_IDLE) {
1441 ret = cma_bind_any(id, AF_INET);
1442 if (ret)
1443 return ret;
1444 }
1445
1446 if (!cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_LISTEN))
1447 return -EINVAL;
1448
1449 id_priv->backlog = backlog;
1450 if (id->device) {
1451 switch (rdma_node_get_transport(id->device->node_type)) {
1452 case RDMA_TRANSPORT_IB:
1453 ret = cma_ib_listen(id_priv);
1454 if (ret)
1455 goto err;
1456 break;
1457 case RDMA_TRANSPORT_IWARP:
1458 ret = cma_iw_listen(id_priv, backlog);
1459 if (ret)
1460 goto err;
1461 break;
1462 default:
1463 ret = -ENOSYS;
1464 goto err;
1465 }
1466 } else
1467 cma_listen_on_all(id_priv);
1468
1469 return 0;
1470 err:
1471 id_priv->backlog = 0;
1472 cma_comp_exch(id_priv, CMA_LISTEN, CMA_ADDR_BOUND);
1473 return ret;
1474 }
1475 EXPORT_SYMBOL(rdma_listen);
1476
1477 static void cma_query_handler(int status, struct ib_sa_path_rec *path_rec,
1478 void *context)
1479 {
1480 struct cma_work *work = context;
1481 struct rdma_route *route;
1482
1483 route = &work->id->id.route;
1484
1485 if (!status) {
1486 route->num_paths = 1;
1487 *route->path_rec = *path_rec;
1488 } else {
1489 work->old_state = CMA_ROUTE_QUERY;
1490 work->new_state = CMA_ADDR_RESOLVED;
1491 work->event.event = RDMA_CM_EVENT_ROUTE_ERROR;
1492 work->event.status = status;
1493 }
1494
1495 queue_work(cma_wq, &work->work);
1496 }
1497
1498 static int cma_query_ib_route(struct rdma_id_private *id_priv, int timeout_ms,
1499 struct cma_work *work)
1500 {
1501 struct rdma_dev_addr *addr = &id_priv->id.route.addr.dev_addr;
1502 struct ib_sa_path_rec path_rec;
1503
1504 memset(&path_rec, 0, sizeof path_rec);
1505 ib_addr_get_sgid(addr, &path_rec.sgid);
1506 ib_addr_get_dgid(addr, &path_rec.dgid);
1507 path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(addr));
1508 path_rec.numb_path = 1;
1509 path_rec.reversible = 1;
1510
1511 id_priv->query_id = ib_sa_path_rec_get(&sa_client, id_priv->id.device,
1512 id_priv->id.port_num, &path_rec,
1513 IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID |
1514 IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH |
1515 IB_SA_PATH_REC_REVERSIBLE,
1516 timeout_ms, GFP_KERNEL,
1517 cma_query_handler, work, &id_priv->query);
1518
1519 return (id_priv->query_id < 0) ? id_priv->query_id : 0;
1520 }
1521
1522 static void cma_work_handler(struct work_struct *_work)
1523 {
1524 struct cma_work *work = container_of(_work, struct cma_work, work);
1525 struct rdma_id_private *id_priv = work->id;
1526 int destroy = 0;
1527
1528 atomic_inc(&id_priv->dev_remove);
1529 if (!cma_comp_exch(id_priv, work->old_state, work->new_state))
1530 goto out;
1531
1532 if (id_priv->id.event_handler(&id_priv->id, &work->event)) {
1533 cma_exch(id_priv, CMA_DESTROYING);
1534 destroy = 1;
1535 }
1536 out:
1537 cma_enable_remove(id_priv);
1538 cma_deref_id(id_priv);
1539 if (destroy)
1540 rdma_destroy_id(&id_priv->id);
1541 kfree(work);
1542 }
1543
1544 static int cma_resolve_ib_route(struct rdma_id_private *id_priv, int timeout_ms)
1545 {
1546 struct rdma_route *route = &id_priv->id.route;
1547 struct cma_work *work;
1548 int ret;
1549
1550 work = kzalloc(sizeof *work, GFP_KERNEL);
1551 if (!work)
1552 return -ENOMEM;
1553
1554 work->id = id_priv;
1555 INIT_WORK(&work->work, cma_work_handler);
1556 work->old_state = CMA_ROUTE_QUERY;
1557 work->new_state = CMA_ROUTE_RESOLVED;
1558 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
1559
1560 route->path_rec = kmalloc(sizeof *route->path_rec, GFP_KERNEL);
1561 if (!route->path_rec) {
1562 ret = -ENOMEM;
1563 goto err1;
1564 }
1565
1566 ret = cma_query_ib_route(id_priv, timeout_ms, work);
1567 if (ret)
1568 goto err2;
1569
1570 return 0;
1571 err2:
1572 kfree(route->path_rec);
1573 route->path_rec = NULL;
1574 err1:
1575 kfree(work);
1576 return ret;
1577 }
1578
1579 int rdma_set_ib_paths(struct rdma_cm_id *id,
1580 struct ib_sa_path_rec *path_rec, int num_paths)
1581 {
1582 struct rdma_id_private *id_priv;
1583 int ret;
1584
1585 id_priv = container_of(id, struct rdma_id_private, id);
1586 if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ROUTE_RESOLVED))
1587 return -EINVAL;
1588
1589 id->route.path_rec = kmalloc(sizeof *path_rec * num_paths, GFP_KERNEL);
1590 if (!id->route.path_rec) {
1591 ret = -ENOMEM;
1592 goto err;
1593 }
1594
1595 memcpy(id->route.path_rec, path_rec, sizeof *path_rec * num_paths);
1596 return 0;
1597 err:
1598 cma_comp_exch(id_priv, CMA_ROUTE_RESOLVED, CMA_ADDR_RESOLVED);
1599 return ret;
1600 }
1601 EXPORT_SYMBOL(rdma_set_ib_paths);
1602
1603 static int cma_resolve_iw_route(struct rdma_id_private *id_priv, int timeout_ms)
1604 {
1605 struct cma_work *work;
1606
1607 work = kzalloc(sizeof *work, GFP_KERNEL);
1608 if (!work)
1609 return -ENOMEM;
1610
1611 work->id = id_priv;
1612 INIT_WORK(&work->work, cma_work_handler);
1613 work->old_state = CMA_ROUTE_QUERY;
1614 work->new_state = CMA_ROUTE_RESOLVED;
1615 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
1616 queue_work(cma_wq, &work->work);
1617 return 0;
1618 }
1619
1620 int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms)
1621 {
1622 struct rdma_id_private *id_priv;
1623 int ret;
1624
1625 id_priv = container_of(id, struct rdma_id_private, id);
1626 if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ROUTE_QUERY))
1627 return -EINVAL;
1628
1629 atomic_inc(&id_priv->refcount);
1630 switch (rdma_node_get_transport(id->device->node_type)) {
1631 case RDMA_TRANSPORT_IB:
1632 ret = cma_resolve_ib_route(id_priv, timeout_ms);
1633 break;
1634 case RDMA_TRANSPORT_IWARP:
1635 ret = cma_resolve_iw_route(id_priv, timeout_ms);
1636 break;
1637 default:
1638 ret = -ENOSYS;
1639 break;
1640 }
1641 if (ret)
1642 goto err;
1643
1644 return 0;
1645 err:
1646 cma_comp_exch(id_priv, CMA_ROUTE_QUERY, CMA_ADDR_RESOLVED);
1647 cma_deref_id(id_priv);
1648 return ret;
1649 }
1650 EXPORT_SYMBOL(rdma_resolve_route);
1651
1652 static int cma_bind_loopback(struct rdma_id_private *id_priv)
1653 {
1654 struct cma_device *cma_dev;
1655 struct ib_port_attr port_attr;
1656 union ib_gid gid;
1657 u16 pkey;
1658 int ret;
1659 u8 p;
1660
1661 mutex_lock(&lock);
1662 if (list_empty(&dev_list)) {
1663 ret = -ENODEV;
1664 goto out;
1665 }
1666 list_for_each_entry(cma_dev, &dev_list, list)
1667 for (p = 1; p <= cma_dev->device->phys_port_cnt; ++p)
1668 if (!ib_query_port(cma_dev->device, p, &port_attr) &&
1669 port_attr.state == IB_PORT_ACTIVE)
1670 goto port_found;
1671
1672 p = 1;
1673 cma_dev = list_entry(dev_list.next, struct cma_device, list);
1674
1675 port_found:
1676 ret = ib_get_cached_gid(cma_dev->device, p, 0, &gid);
1677 if (ret)
1678 goto out;
1679
1680 ret = ib_get_cached_pkey(cma_dev->device, p, 0, &pkey);
1681 if (ret)
1682 goto out;
1683
1684 ib_addr_set_sgid(&id_priv->id.route.addr.dev_addr, &gid);
1685 ib_addr_set_pkey(&id_priv->id.route.addr.dev_addr, pkey);
1686 id_priv->id.port_num = p;
1687 cma_attach_to_dev(id_priv, cma_dev);
1688 out:
1689 mutex_unlock(&lock);
1690 return ret;
1691 }
1692
1693 static void addr_handler(int status, struct sockaddr *src_addr,
1694 struct rdma_dev_addr *dev_addr, void *context)
1695 {
1696 struct rdma_id_private *id_priv = context;
1697 struct rdma_cm_event event;
1698
1699 memset(&event, 0, sizeof event);
1700 atomic_inc(&id_priv->dev_remove);
1701
1702 /*
1703 * Grab mutex to block rdma_destroy_id() from removing the device while
1704 * we're trying to acquire it.
1705 */
1706 mutex_lock(&lock);
1707 if (!cma_comp_exch(id_priv, CMA_ADDR_QUERY, CMA_ADDR_RESOLVED)) {
1708 mutex_unlock(&lock);
1709 goto out;
1710 }
1711
1712 if (!status && !id_priv->cma_dev)
1713 status = cma_acquire_dev(id_priv);
1714 mutex_unlock(&lock);
1715
1716 if (status) {
1717 if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ADDR_BOUND))
1718 goto out;
1719 event.event = RDMA_CM_EVENT_ADDR_ERROR;
1720 event.status = status;
1721 } else {
1722 memcpy(&id_priv->id.route.addr.src_addr, src_addr,
1723 ip_addr_size(src_addr));
1724 event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
1725 }
1726
1727 if (id_priv->id.event_handler(&id_priv->id, &event)) {
1728 cma_exch(id_priv, CMA_DESTROYING);
1729 cma_enable_remove(id_priv);
1730 cma_deref_id(id_priv);
1731 rdma_destroy_id(&id_priv->id);
1732 return;
1733 }
1734 out:
1735 cma_enable_remove(id_priv);
1736 cma_deref_id(id_priv);
1737 }
1738
1739 static int cma_resolve_loopback(struct rdma_id_private *id_priv)
1740 {
1741 struct cma_work *work;
1742 struct sockaddr_in *src_in, *dst_in;
1743 union ib_gid gid;
1744 int ret;
1745
1746 work = kzalloc(sizeof *work, GFP_KERNEL);
1747 if (!work)
1748 return -ENOMEM;
1749
1750 if (!id_priv->cma_dev) {
1751 ret = cma_bind_loopback(id_priv);
1752 if (ret)
1753 goto err;
1754 }
1755
1756 ib_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid);
1757 ib_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid);
1758
1759 if (cma_zero_addr(&id_priv->id.route.addr.src_addr)) {
1760 src_in = (struct sockaddr_in *)&id_priv->id.route.addr.src_addr;
1761 dst_in = (struct sockaddr_in *)&id_priv->id.route.addr.dst_addr;
1762 src_in->sin_family = dst_in->sin_family;
1763 src_in->sin_addr.s_addr = dst_in->sin_addr.s_addr;
1764 }
1765
1766 work->id = id_priv;
1767 INIT_WORK(&work->work, cma_work_handler);
1768 work->old_state = CMA_ADDR_QUERY;
1769 work->new_state = CMA_ADDR_RESOLVED;
1770 work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
1771 queue_work(cma_wq, &work->work);
1772 return 0;
1773 err:
1774 kfree(work);
1775 return ret;
1776 }
1777
1778 static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
1779 struct sockaddr *dst_addr)
1780 {
1781 if (src_addr && src_addr->sa_family)
1782 return rdma_bind_addr(id, src_addr);
1783 else
1784 return cma_bind_any(id, dst_addr->sa_family);
1785 }
1786
1787 int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
1788 struct sockaddr *dst_addr, int timeout_ms)
1789 {
1790 struct rdma_id_private *id_priv;
1791 int ret;
1792
1793 id_priv = container_of(id, struct rdma_id_private, id);
1794 if (id_priv->state == CMA_IDLE) {
1795 ret = cma_bind_addr(id, src_addr, dst_addr);
1796 if (ret)
1797 return ret;
1798 }
1799
1800 if (!cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_ADDR_QUERY))
1801 return -EINVAL;
1802
1803 atomic_inc(&id_priv->refcount);
1804 memcpy(&id->route.addr.dst_addr, dst_addr, ip_addr_size(dst_addr));
1805 if (cma_any_addr(dst_addr))
1806 ret = cma_resolve_loopback(id_priv);
1807 else
1808 ret = rdma_resolve_ip(&addr_client, &id->route.addr.src_addr,
1809 dst_addr, &id->route.addr.dev_addr,
1810 timeout_ms, addr_handler, id_priv);
1811 if (ret)
1812 goto err;
1813
1814 return 0;
1815 err:
1816 cma_comp_exch(id_priv, CMA_ADDR_QUERY, CMA_ADDR_BOUND);
1817 cma_deref_id(id_priv);
1818 return ret;
1819 }
1820 EXPORT_SYMBOL(rdma_resolve_addr);
1821
1822 static void cma_bind_port(struct rdma_bind_list *bind_list,
1823 struct rdma_id_private *id_priv)
1824 {
1825 struct sockaddr_in *sin;
1826
1827 sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr;
1828 sin->sin_port = htons(bind_list->port);
1829 id_priv->bind_list = bind_list;
1830 hlist_add_head(&id_priv->node, &bind_list->owners);
1831 }
1832
1833 static int cma_alloc_port(struct idr *ps, struct rdma_id_private *id_priv,
1834 unsigned short snum)
1835 {
1836 struct rdma_bind_list *bind_list;
1837 int port, ret;
1838
1839 bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL);
1840 if (!bind_list)
1841 return -ENOMEM;
1842
1843 do {
1844 ret = idr_get_new_above(ps, bind_list, snum, &port);
1845 } while ((ret == -EAGAIN) && idr_pre_get(ps, GFP_KERNEL));
1846
1847 if (ret)
1848 goto err1;
1849
1850 if (port != snum) {
1851 ret = -EADDRNOTAVAIL;
1852 goto err2;
1853 }
1854
1855 bind_list->ps = ps;
1856 bind_list->port = (unsigned short) port;
1857 cma_bind_port(bind_list, id_priv);
1858 return 0;
1859 err2:
1860 idr_remove(ps, port);
1861 err1:
1862 kfree(bind_list);
1863 return ret;
1864 }
1865
1866 static int cma_alloc_any_port(struct idr *ps, struct rdma_id_private *id_priv)
1867 {
1868 struct rdma_bind_list *bind_list;
1869 int port, ret;
1870
1871 bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL);
1872 if (!bind_list)
1873 return -ENOMEM;
1874
1875 retry:
1876 do {
1877 ret = idr_get_new_above(ps, bind_list, next_port, &port);
1878 } while ((ret == -EAGAIN) && idr_pre_get(ps, GFP_KERNEL));
1879
1880 if (ret)
1881 goto err1;
1882
1883 if (port > sysctl_local_port_range[1]) {
1884 if (next_port != sysctl_local_port_range[0]) {
1885 idr_remove(ps, port);
1886 next_port = sysctl_local_port_range[0];
1887 goto retry;
1888 }
1889 ret = -EADDRNOTAVAIL;
1890 goto err2;
1891 }
1892
1893 if (port == sysctl_local_port_range[1])
1894 next_port = sysctl_local_port_range[0];
1895 else
1896 next_port = port + 1;
1897
1898 bind_list->ps = ps;
1899 bind_list->port = (unsigned short) port;
1900 cma_bind_port(bind_list, id_priv);
1901 return 0;
1902 err2:
1903 idr_remove(ps, port);
1904 err1:
1905 kfree(bind_list);
1906 return ret;
1907 }
1908
1909 static int cma_use_port(struct idr *ps, struct rdma_id_private *id_priv)
1910 {
1911 struct rdma_id_private *cur_id;
1912 struct sockaddr_in *sin, *cur_sin;
1913 struct rdma_bind_list *bind_list;
1914 struct hlist_node *node;
1915 unsigned short snum;
1916
1917 sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr;
1918 snum = ntohs(sin->sin_port);
1919 if (snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE))
1920 return -EACCES;
1921
1922 bind_list = idr_find(ps, snum);
1923 if (!bind_list)
1924 return cma_alloc_port(ps, id_priv, snum);
1925
1926 /*
1927 * We don't support binding to any address if anyone is bound to
1928 * a specific address on the same port.
1929 */
1930 if (cma_any_addr(&id_priv->id.route.addr.src_addr))
1931 return -EADDRNOTAVAIL;
1932
1933 hlist_for_each_entry(cur_id, node, &bind_list->owners, node) {
1934 if (cma_any_addr(&cur_id->id.route.addr.src_addr))
1935 return -EADDRNOTAVAIL;
1936
1937 cur_sin = (struct sockaddr_in *) &cur_id->id.route.addr.src_addr;
1938 if (sin->sin_addr.s_addr == cur_sin->sin_addr.s_addr)
1939 return -EADDRINUSE;
1940 }
1941
1942 cma_bind_port(bind_list, id_priv);
1943 return 0;
1944 }
1945
1946 static int cma_get_port(struct rdma_id_private *id_priv)
1947 {
1948 struct idr *ps;
1949 int ret;
1950
1951 switch (id_priv->id.ps) {
1952 case RDMA_PS_SDP:
1953 ps = &sdp_ps;
1954 break;
1955 case RDMA_PS_TCP:
1956 ps = &tcp_ps;
1957 break;
1958 case RDMA_PS_UDP:
1959 ps = &udp_ps;
1960 break;
1961 case RDMA_PS_IPOIB:
1962 ps = &ipoib_ps;
1963 break;
1964 default:
1965 return -EPROTONOSUPPORT;
1966 }
1967
1968 mutex_lock(&lock);
1969 if (cma_any_port(&id_priv->id.route.addr.src_addr))
1970 ret = cma_alloc_any_port(ps, id_priv);
1971 else
1972 ret = cma_use_port(ps, id_priv);
1973 mutex_unlock(&lock);
1974
1975 return ret;
1976 }
1977
1978 int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
1979 {
1980 struct rdma_id_private *id_priv;
1981 int ret;
1982
1983 if (addr->sa_family != AF_INET)
1984 return -EAFNOSUPPORT;
1985
1986 id_priv = container_of(id, struct rdma_id_private, id);
1987 if (!cma_comp_exch(id_priv, CMA_IDLE, CMA_ADDR_BOUND))
1988 return -EINVAL;
1989
1990 if (!cma_any_addr(addr)) {
1991 ret = rdma_translate_ip(addr, &id->route.addr.dev_addr);
1992 if (ret)
1993 goto err1;
1994
1995 mutex_lock(&lock);
1996 ret = cma_acquire_dev(id_priv);
1997 mutex_unlock(&lock);
1998 if (ret)
1999 goto err1;
2000 }
2001
2002 memcpy(&id->route.addr.src_addr, addr, ip_addr_size(addr));
2003 ret = cma_get_port(id_priv);
2004 if (ret)
2005 goto err2;
2006
2007 return 0;
2008 err2:
2009 if (!cma_any_addr(addr)) {
2010 mutex_lock(&lock);
2011 cma_detach_from_dev(id_priv);
2012 mutex_unlock(&lock);
2013 }
2014 err1:
2015 cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_IDLE);
2016 return ret;
2017 }
2018 EXPORT_SYMBOL(rdma_bind_addr);
2019
2020 static int cma_format_hdr(void *hdr, enum rdma_port_space ps,
2021 struct rdma_route *route)
2022 {
2023 struct sockaddr_in *src4, *dst4;
2024 struct cma_hdr *cma_hdr;
2025 struct sdp_hh *sdp_hdr;
2026
2027 src4 = (struct sockaddr_in *) &route->addr.src_addr;
2028 dst4 = (struct sockaddr_in *) &route->addr.dst_addr;
2029
2030 switch (ps) {
2031 case RDMA_PS_SDP:
2032 sdp_hdr = hdr;
2033 if (sdp_get_majv(sdp_hdr->sdp_version) != SDP_MAJ_VERSION)
2034 return -EINVAL;
2035 sdp_set_ip_ver(sdp_hdr, 4);
2036 sdp_hdr->src_addr.ip4.addr = src4->sin_addr.s_addr;
2037 sdp_hdr->dst_addr.ip4.addr = dst4->sin_addr.s_addr;
2038 sdp_hdr->port = src4->sin_port;
2039 break;
2040 default:
2041 cma_hdr = hdr;
2042 cma_hdr->cma_version = CMA_VERSION;
2043 cma_set_ip_ver(cma_hdr, 4);
2044 cma_hdr->src_addr.ip4.addr = src4->sin_addr.s_addr;
2045 cma_hdr->dst_addr.ip4.addr = dst4->sin_addr.s_addr;
2046 cma_hdr->port = src4->sin_port;
2047 break;
2048 }
2049 return 0;
2050 }
2051
2052 static int cma_sidr_rep_handler(struct ib_cm_id *cm_id,
2053 struct ib_cm_event *ib_event)
2054 {
2055 struct rdma_id_private *id_priv = cm_id->context;
2056 struct rdma_cm_event event;
2057 struct ib_cm_sidr_rep_event_param *rep = &ib_event->param.sidr_rep_rcvd;
2058 int ret = 0;
2059
2060 if (cma_disable_remove(id_priv, CMA_CONNECT))
2061 return 0;
2062
2063 memset(&event, 0, sizeof event);
2064 switch (ib_event->event) {
2065 case IB_CM_SIDR_REQ_ERROR:
2066 event.event = RDMA_CM_EVENT_UNREACHABLE;
2067 event.status = -ETIMEDOUT;
2068 break;
2069 case IB_CM_SIDR_REP_RECEIVED:
2070 event.param.ud.private_data = ib_event->private_data;
2071 event.param.ud.private_data_len = IB_CM_SIDR_REP_PRIVATE_DATA_SIZE;
2072 if (rep->status != IB_SIDR_SUCCESS) {
2073 event.event = RDMA_CM_EVENT_UNREACHABLE;
2074 event.status = ib_event->param.sidr_rep_rcvd.status;
2075 break;
2076 }
2077 if (id_priv->qkey != rep->qkey) {
2078 event.event = RDMA_CM_EVENT_UNREACHABLE;
2079 event.status = -EINVAL;
2080 break;
2081 }
2082 ib_init_ah_from_path(id_priv->id.device, id_priv->id.port_num,
2083 id_priv->id.route.path_rec,
2084 &event.param.ud.ah_attr);
2085 event.param.ud.qp_num = rep->qpn;
2086 event.param.ud.qkey = rep->qkey;
2087 event.event = RDMA_CM_EVENT_ESTABLISHED;
2088 event.status = 0;
2089 break;
2090 default:
2091 printk(KERN_ERR "RDMA CMA: unexpected IB CM event: %d",
2092 ib_event->event);
2093 goto out;
2094 }
2095
2096 ret = id_priv->id.event_handler(&id_priv->id, &event);
2097 if (ret) {
2098 /* Destroy the CM ID by returning a non-zero value. */
2099 id_priv->cm_id.ib = NULL;
2100 cma_exch(id_priv, CMA_DESTROYING);
2101 cma_enable_remove(id_priv);
2102 rdma_destroy_id(&id_priv->id);
2103 return ret;
2104 }
2105 out:
2106 cma_enable_remove(id_priv);
2107 return ret;
2108 }
2109
2110 static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
2111 struct rdma_conn_param *conn_param)
2112 {
2113 struct ib_cm_sidr_req_param req;
2114 struct rdma_route *route;
2115 int ret;
2116
2117 req.private_data_len = sizeof(struct cma_hdr) +
2118 conn_param->private_data_len;
2119 req.private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
2120 if (!req.private_data)
2121 return -ENOMEM;
2122
2123 if (conn_param->private_data && conn_param->private_data_len)
2124 memcpy((void *) req.private_data + sizeof(struct cma_hdr),
2125 conn_param->private_data, conn_param->private_data_len);
2126
2127 route = &id_priv->id.route;
2128 ret = cma_format_hdr((void *) req.private_data, id_priv->id.ps, route);
2129 if (ret)
2130 goto out;
2131
2132 id_priv->cm_id.ib = ib_create_cm_id(id_priv->id.device,
2133 cma_sidr_rep_handler, id_priv);
2134 if (IS_ERR(id_priv->cm_id.ib)) {
2135 ret = PTR_ERR(id_priv->cm_id.ib);
2136 goto out;
2137 }
2138
2139 req.path = route->path_rec;
2140 req.service_id = cma_get_service_id(id_priv->id.ps,
2141 &route->addr.dst_addr);
2142 req.timeout_ms = 1 << (CMA_CM_RESPONSE_TIMEOUT - 8);
2143 req.max_cm_retries = CMA_MAX_CM_RETRIES;
2144
2145 ret = ib_send_cm_sidr_req(id_priv->cm_id.ib, &req);
2146 if (ret) {
2147 ib_destroy_cm_id(id_priv->cm_id.ib);
2148 id_priv->cm_id.ib = NULL;
2149 }
2150 out:
2151 kfree(req.private_data);
2152 return ret;
2153 }
2154
2155 static int cma_connect_ib(struct rdma_id_private *id_priv,
2156 struct rdma_conn_param *conn_param)
2157 {
2158 struct ib_cm_req_param req;
2159 struct rdma_route *route;
2160 void *private_data;
2161 int offset, ret;
2162
2163 memset(&req, 0, sizeof req);
2164 offset = cma_user_data_offset(id_priv->id.ps);
2165 req.private_data_len = offset + conn_param->private_data_len;
2166 private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
2167 if (!private_data)
2168 return -ENOMEM;
2169
2170 if (conn_param->private_data && conn_param->private_data_len)
2171 memcpy(private_data + offset, conn_param->private_data,
2172 conn_param->private_data_len);
2173
2174 id_priv->cm_id.ib = ib_create_cm_id(id_priv->id.device, cma_ib_handler,
2175 id_priv);
2176 if (IS_ERR(id_priv->cm_id.ib)) {
2177 ret = PTR_ERR(id_priv->cm_id.ib);
2178 goto out;
2179 }
2180
2181 route = &id_priv->id.route;
2182 ret = cma_format_hdr(private_data, id_priv->id.ps, route);
2183 if (ret)
2184 goto out;
2185 req.private_data = private_data;
2186
2187 req.primary_path = &route->path_rec[0];
2188 if (route->num_paths == 2)
2189 req.alternate_path = &route->path_rec[1];
2190
2191 req.service_id = cma_get_service_id(id_priv->id.ps,
2192 &route->addr.dst_addr);
2193 req.qp_num = id_priv->qp_num;
2194 req.qp_type = IB_QPT_RC;
2195 req.starting_psn = id_priv->seq_num;
2196 req.responder_resources = conn_param->responder_resources;
2197 req.initiator_depth = conn_param->initiator_depth;
2198 req.flow_control = conn_param->flow_control;
2199 req.retry_count = conn_param->retry_count;
2200 req.rnr_retry_count = conn_param->rnr_retry_count;
2201 req.remote_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT;
2202 req.local_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT;
2203 req.max_cm_retries = CMA_MAX_CM_RETRIES;
2204 req.srq = id_priv->srq ? 1 : 0;
2205
2206 ret = ib_send_cm_req(id_priv->cm_id.ib, &req);
2207 out:
2208 if (ret && !IS_ERR(id_priv->cm_id.ib)) {
2209 ib_destroy_cm_id(id_priv->cm_id.ib);
2210 id_priv->cm_id.ib = NULL;
2211 }
2212
2213 kfree(private_data);
2214 return ret;
2215 }
2216
2217 static int cma_connect_iw(struct rdma_id_private *id_priv,
2218 struct rdma_conn_param *conn_param)
2219 {
2220 struct iw_cm_id *cm_id;
2221 struct sockaddr_in* sin;
2222 int ret;
2223 struct iw_cm_conn_param iw_param;
2224
2225 cm_id = iw_create_cm_id(id_priv->id.device, cma_iw_handler, id_priv);
2226 if (IS_ERR(cm_id)) {
2227 ret = PTR_ERR(cm_id);
2228 goto out;
2229 }
2230
2231 id_priv->cm_id.iw = cm_id;
2232
2233 sin = (struct sockaddr_in*) &id_priv->id.route.addr.src_addr;
2234 cm_id->local_addr = *sin;
2235
2236 sin = (struct sockaddr_in*) &id_priv->id.route.addr.dst_addr;
2237 cm_id->remote_addr = *sin;
2238
2239 ret = cma_modify_qp_rtr(&id_priv->id);
2240 if (ret)
2241 goto out;
2242
2243 iw_param.ord = conn_param->initiator_depth;
2244 iw_param.ird = conn_param->responder_resources;
2245 iw_param.private_data = conn_param->private_data;
2246 iw_param.private_data_len = conn_param->private_data_len;
2247 if (id_priv->id.qp)
2248 iw_param.qpn = id_priv->qp_num;
2249 else
2250 iw_param.qpn = conn_param->qp_num;
2251 ret = iw_cm_connect(cm_id, &iw_param);
2252 out:
2253 if (ret && !IS_ERR(cm_id)) {
2254 iw_destroy_cm_id(cm_id);
2255 id_priv->cm_id.iw = NULL;
2256 }
2257 return ret;
2258 }
2259
2260 int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
2261 {
2262 struct rdma_id_private *id_priv;
2263 int ret;
2264
2265 id_priv = container_of(id, struct rdma_id_private, id);
2266 if (!cma_comp_exch(id_priv, CMA_ROUTE_RESOLVED, CMA_CONNECT))
2267 return -EINVAL;
2268
2269 if (!id->qp) {
2270 id_priv->qp_num = conn_param->qp_num;
2271 id_priv->srq = conn_param->srq;
2272 }
2273
2274 switch (rdma_node_get_transport(id->device->node_type)) {
2275 case RDMA_TRANSPORT_IB:
2276 if (cma_is_ud_ps(id->ps))
2277 ret = cma_resolve_ib_udp(id_priv, conn_param);
2278 else
2279 ret = cma_connect_ib(id_priv, conn_param);
2280 break;
2281 case RDMA_TRANSPORT_IWARP:
2282 ret = cma_connect_iw(id_priv, conn_param);
2283 break;
2284 default:
2285 ret = -ENOSYS;
2286 break;
2287 }
2288 if (ret)
2289 goto err;
2290
2291 return 0;
2292 err:
2293 cma_comp_exch(id_priv, CMA_CONNECT, CMA_ROUTE_RESOLVED);
2294 return ret;
2295 }
2296 EXPORT_SYMBOL(rdma_connect);
2297
2298 static int cma_accept_ib(struct rdma_id_private *id_priv,
2299 struct rdma_conn_param *conn_param)
2300 {
2301 struct ib_cm_rep_param rep;
2302 struct ib_qp_attr qp_attr;
2303 int qp_attr_mask, ret;
2304
2305 if (id_priv->id.qp) {
2306 ret = cma_modify_qp_rtr(&id_priv->id);
2307 if (ret)
2308 goto out;
2309
2310 qp_attr.qp_state = IB_QPS_RTS;
2311 ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, &qp_attr,
2312 &qp_attr_mask);
2313 if (ret)
2314 goto out;
2315
2316 qp_attr.max_rd_atomic = conn_param->initiator_depth;
2317 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask);
2318 if (ret)
2319 goto out;
2320 }
2321
2322 memset(&rep, 0, sizeof rep);
2323 rep.qp_num = id_priv->qp_num;
2324 rep.starting_psn = id_priv->seq_num;
2325 rep.private_data = conn_param->private_data;
2326 rep.private_data_len = conn_param->private_data_len;
2327 rep.responder_resources = conn_param->responder_resources;
2328 rep.initiator_depth = conn_param->initiator_depth;
2329 rep.failover_accepted = 0;
2330 rep.flow_control = conn_param->flow_control;
2331 rep.rnr_retry_count = conn_param->rnr_retry_count;
2332 rep.srq = id_priv->srq ? 1 : 0;
2333
2334 ret = ib_send_cm_rep(id_priv->cm_id.ib, &rep);
2335 out:
2336 return ret;
2337 }
2338
2339 static int cma_accept_iw(struct rdma_id_private *id_priv,
2340 struct rdma_conn_param *conn_param)
2341 {
2342 struct iw_cm_conn_param iw_param;
2343 int ret;
2344
2345 ret = cma_modify_qp_rtr(&id_priv->id);
2346 if (ret)
2347 return ret;
2348
2349 iw_param.ord = conn_param->initiator_depth;
2350 iw_param.ird = conn_param->responder_resources;
2351 iw_param.private_data = conn_param->private_data;
2352 iw_param.private_data_len = conn_param->private_data_len;
2353 if (id_priv->id.qp) {
2354 iw_param.qpn = id_priv->qp_num;
2355 } else
2356 iw_param.qpn = conn_param->qp_num;
2357
2358 return iw_cm_accept(id_priv->cm_id.iw, &iw_param);
2359 }
2360
2361 static int cma_send_sidr_rep(struct rdma_id_private *id_priv,
2362 enum ib_cm_sidr_status status,
2363 const void *private_data, int private_data_len)
2364 {
2365 struct ib_cm_sidr_rep_param rep;
2366
2367 memset(&rep, 0, sizeof rep);
2368 rep.status = status;
2369 if (status == IB_SIDR_SUCCESS) {
2370 rep.qp_num = id_priv->qp_num;
2371 rep.qkey = id_priv->qkey;
2372 }
2373 rep.private_data = private_data;
2374 rep.private_data_len = private_data_len;
2375
2376 return ib_send_cm_sidr_rep(id_priv->cm_id.ib, &rep);
2377 }
2378
2379 int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
2380 {
2381 struct rdma_id_private *id_priv;
2382 int ret;
2383
2384 id_priv = container_of(id, struct rdma_id_private, id);
2385 if (!cma_comp(id_priv, CMA_CONNECT))
2386 return -EINVAL;
2387
2388 if (!id->qp && conn_param) {
2389 id_priv->qp_num = conn_param->qp_num;
2390 id_priv->srq = conn_param->srq;
2391 }
2392
2393 switch (rdma_node_get_transport(id->device->node_type)) {
2394 case RDMA_TRANSPORT_IB:
2395 if (cma_is_ud_ps(id->ps))
2396 ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS,
2397 conn_param->private_data,
2398 conn_param->private_data_len);
2399 else if (conn_param)
2400 ret = cma_accept_ib(id_priv, conn_param);
2401 else
2402 ret = cma_rep_recv(id_priv);
2403 break;
2404 case RDMA_TRANSPORT_IWARP:
2405 ret = cma_accept_iw(id_priv, conn_param);
2406 break;
2407 default:
2408 ret = -ENOSYS;
2409 break;
2410 }
2411
2412 if (ret)
2413 goto reject;
2414
2415 return 0;
2416 reject:
2417 cma_modify_qp_err(id);
2418 rdma_reject(id, NULL, 0);
2419 return ret;
2420 }
2421 EXPORT_SYMBOL(rdma_accept);
2422
2423 int rdma_notify(struct rdma_cm_id *id, enum ib_event_type event)
2424 {
2425 struct rdma_id_private *id_priv;
2426 int ret;
2427
2428 id_priv = container_of(id, struct rdma_id_private, id);
2429 if (!cma_has_cm_dev(id_priv))
2430 return -EINVAL;
2431
2432 switch (id->device->node_type) {
2433 case RDMA_NODE_IB_CA:
2434 ret = ib_cm_notify(id_priv->cm_id.ib, event);
2435 break;
2436 default:
2437 ret = 0;
2438 break;
2439 }
2440 return ret;
2441 }
2442 EXPORT_SYMBOL(rdma_notify);
2443
2444 int rdma_reject(struct rdma_cm_id *id, const void *private_data,
2445 u8 private_data_len)
2446 {
2447 struct rdma_id_private *id_priv;
2448 int ret;
2449
2450 id_priv = container_of(id, struct rdma_id_private, id);
2451 if (!cma_has_cm_dev(id_priv))
2452 return -EINVAL;
2453
2454 switch (rdma_node_get_transport(id->device->node_type)) {
2455 case RDMA_TRANSPORT_IB:
2456 if (cma_is_ud_ps(id->ps))
2457 ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT,
2458 private_data, private_data_len);
2459 else
2460 ret = ib_send_cm_rej(id_priv->cm_id.ib,
2461 IB_CM_REJ_CONSUMER_DEFINED, NULL,
2462 0, private_data, private_data_len);
2463 break;
2464 case RDMA_TRANSPORT_IWARP:
2465 ret = iw_cm_reject(id_priv->cm_id.iw,
2466 private_data, private_data_len);
2467 break;
2468 default:
2469 ret = -ENOSYS;
2470 break;
2471 }
2472 return ret;
2473 }
2474 EXPORT_SYMBOL(rdma_reject);
2475
2476 int rdma_disconnect(struct rdma_cm_id *id)
2477 {
2478 struct rdma_id_private *id_priv;
2479 int ret;
2480
2481 id_priv = container_of(id, struct rdma_id_private, id);
2482 if (!cma_has_cm_dev(id_priv))
2483 return -EINVAL;
2484
2485 switch (rdma_node_get_transport(id->device->node_type)) {
2486 case RDMA_TRANSPORT_IB:
2487 ret = cma_modify_qp_err(id);
2488 if (ret)
2489 goto out;
2490 /* Initiate or respond to a disconnect. */
2491 if (ib_send_cm_dreq(id_priv->cm_id.ib, NULL, 0))
2492 ib_send_cm_drep(id_priv->cm_id.ib, NULL, 0);
2493 break;
2494 case RDMA_TRANSPORT_IWARP:
2495 ret = iw_cm_disconnect(id_priv->cm_id.iw, 0);
2496 break;
2497 default:
2498 ret = -EINVAL;
2499 break;
2500 }
2501 out:
2502 return ret;
2503 }
2504 EXPORT_SYMBOL(rdma_disconnect);
2505
2506 static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
2507 {
2508 struct rdma_id_private *id_priv;
2509 struct cma_multicast *mc = multicast->context;
2510 struct rdma_cm_event event;
2511 int ret;
2512
2513 id_priv = mc->id_priv;
2514 if (cma_disable_remove(id_priv, CMA_ADDR_BOUND) &&
2515 cma_disable_remove(id_priv, CMA_ADDR_RESOLVED))
2516 return 0;
2517
2518 if (!status && id_priv->id.qp)
2519 status = ib_attach_mcast(id_priv->id.qp, &multicast->rec.mgid,
2520 multicast->rec.mlid);
2521
2522 memset(&event, 0, sizeof event);
2523 event.status = status;
2524 event.param.ud.private_data = mc->context;
2525 if (!status) {
2526 event.event = RDMA_CM_EVENT_MULTICAST_JOIN;
2527 ib_init_ah_from_mcmember(id_priv->id.device,
2528 id_priv->id.port_num, &multicast->rec,
2529 &event.param.ud.ah_attr);
2530 event.param.ud.qp_num = 0xFFFFFF;
2531 event.param.ud.qkey = be32_to_cpu(multicast->rec.qkey);
2532 } else
2533 event.event = RDMA_CM_EVENT_MULTICAST_ERROR;
2534
2535 ret = id_priv->id.event_handler(&id_priv->id, &event);
2536 if (ret) {
2537 cma_exch(id_priv, CMA_DESTROYING);
2538 cma_enable_remove(id_priv);
2539 rdma_destroy_id(&id_priv->id);
2540 return 0;
2541 }
2542
2543 cma_enable_remove(id_priv);
2544 return 0;
2545 }
2546
2547 static void cma_set_mgid(struct rdma_id_private *id_priv,
2548 struct sockaddr *addr, union ib_gid *mgid)
2549 {
2550 unsigned char mc_map[MAX_ADDR_LEN];
2551 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
2552 struct sockaddr_in *sin = (struct sockaddr_in *) addr;
2553 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) addr;
2554
2555 if (cma_any_addr(addr)) {
2556 memset(mgid, 0, sizeof *mgid);
2557 } else if ((addr->sa_family == AF_INET6) &&
2558 ((be32_to_cpu(sin6->sin6_addr.s6_addr32[0]) & 0xFF10A01B) ==
2559 0xFF10A01B)) {
2560 /* IPv6 address is an SA assigned MGID. */
2561 memcpy(mgid, &sin6->sin6_addr, sizeof *mgid);
2562 } else {
2563 ip_ib_mc_map(sin->sin_addr.s_addr, mc_map);
2564 if (id_priv->id.ps == RDMA_PS_UDP)
2565 mc_map[7] = 0x01; /* Use RDMA CM signature */
2566 mc_map[8] = ib_addr_get_pkey(dev_addr) >> 8;
2567 mc_map[9] = (unsigned char) ib_addr_get_pkey(dev_addr);
2568 *mgid = *(union ib_gid *) (mc_map + 4);
2569 }
2570 }
2571
2572 static int cma_join_ib_multicast(struct rdma_id_private *id_priv,
2573 struct cma_multicast *mc)
2574 {
2575 struct ib_sa_mcmember_rec rec;
2576 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
2577 ib_sa_comp_mask comp_mask;
2578 int ret;
2579
2580 ib_addr_get_mgid(dev_addr, &rec.mgid);
2581 ret = ib_sa_get_mcmember_rec(id_priv->id.device, id_priv->id.port_num,
2582 &rec.mgid, &rec);
2583 if (ret)
2584 return ret;
2585
2586 cma_set_mgid(id_priv, &mc->addr, &rec.mgid);
2587 if (id_priv->id.ps == RDMA_PS_UDP)
2588 rec.qkey = cpu_to_be32(RDMA_UDP_QKEY);
2589 ib_addr_get_sgid(dev_addr, &rec.port_gid);
2590 rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
2591 rec.join_state = 1;
2592
2593 comp_mask = IB_SA_MCMEMBER_REC_MGID | IB_SA_MCMEMBER_REC_PORT_GID |
2594 IB_SA_MCMEMBER_REC_PKEY | IB_SA_MCMEMBER_REC_JOIN_STATE |
2595 IB_SA_MCMEMBER_REC_QKEY | IB_SA_MCMEMBER_REC_SL |
2596 IB_SA_MCMEMBER_REC_FLOW_LABEL |
2597 IB_SA_MCMEMBER_REC_TRAFFIC_CLASS;
2598
2599 mc->multicast.ib = ib_sa_join_multicast(&sa_client, id_priv->id.device,
2600 id_priv->id.port_num, &rec,
2601 comp_mask, GFP_KERNEL,
2602 cma_ib_mc_handler, mc);
2603 if (IS_ERR(mc->multicast.ib))
2604 return PTR_ERR(mc->multicast.ib);
2605
2606 return 0;
2607 }
2608
2609 int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
2610 void *context)
2611 {
2612 struct rdma_id_private *id_priv;
2613 struct cma_multicast *mc;
2614 int ret;
2615
2616 id_priv = container_of(id, struct rdma_id_private, id);
2617 if (!cma_comp(id_priv, CMA_ADDR_BOUND) &&
2618 !cma_comp(id_priv, CMA_ADDR_RESOLVED))
2619 return -EINVAL;
2620
2621 mc = kmalloc(sizeof *mc, GFP_KERNEL);
2622 if (!mc)
2623 return -ENOMEM;
2624
2625 memcpy(&mc->addr, addr, ip_addr_size(addr));
2626 mc->context = context;
2627 mc->id_priv = id_priv;
2628
2629 spin_lock(&id_priv->lock);
2630 list_add(&mc->list, &id_priv->mc_list);
2631 spin_unlock(&id_priv->lock);
2632
2633 switch (rdma_node_get_transport(id->device->node_type)) {
2634 case RDMA_TRANSPORT_IB:
2635 ret = cma_join_ib_multicast(id_priv, mc);
2636 break;
2637 default:
2638 ret = -ENOSYS;
2639 break;
2640 }
2641
2642 if (ret) {
2643 spin_lock_irq(&id_priv->lock);
2644 list_del(&mc->list);
2645 spin_unlock_irq(&id_priv->lock);
2646 kfree(mc);
2647 }
2648 return ret;
2649 }
2650 EXPORT_SYMBOL(rdma_join_multicast);
2651
2652 void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr)
2653 {
2654 struct rdma_id_private *id_priv;
2655 struct cma_multicast *mc;
2656
2657 id_priv = container_of(id, struct rdma_id_private, id);
2658 spin_lock_irq(&id_priv->lock);
2659 list_for_each_entry(mc, &id_priv->mc_list, list) {
2660 if (!memcmp(&mc->addr, addr, ip_addr_size(addr))) {
2661 list_del(&mc->list);
2662 spin_unlock_irq(&id_priv->lock);
2663
2664 if (id->qp)
2665 ib_detach_mcast(id->qp,
2666 &mc->multicast.ib->rec.mgid,
2667 mc->multicast.ib->rec.mlid);
2668 ib_sa_free_multicast(mc->multicast.ib);
2669 kfree(mc);
2670 return;
2671 }
2672 }
2673 spin_unlock_irq(&id_priv->lock);
2674 }
2675 EXPORT_SYMBOL(rdma_leave_multicast);
2676
2677 static void cma_add_one(struct ib_device *device)
2678 {
2679 struct cma_device *cma_dev;
2680 struct rdma_id_private *id_priv;
2681
2682 cma_dev = kmalloc(sizeof *cma_dev, GFP_KERNEL);
2683 if (!cma_dev)
2684 return;
2685
2686 cma_dev->device = device;
2687
2688 init_completion(&cma_dev->comp);
2689 atomic_set(&cma_dev->refcount, 1);
2690 INIT_LIST_HEAD(&cma_dev->id_list);
2691 ib_set_client_data(device, &cma_client, cma_dev);
2692
2693 mutex_lock(&lock);
2694 list_add_tail(&cma_dev->list, &dev_list);
2695 list_for_each_entry(id_priv, &listen_any_list, list)
2696 cma_listen_on_dev(id_priv, cma_dev);
2697 mutex_unlock(&lock);
2698 }
2699
2700 static int cma_remove_id_dev(struct rdma_id_private *id_priv)
2701 {
2702 struct rdma_cm_event event;
2703 enum cma_state state;
2704
2705 /* Record that we want to remove the device */
2706 state = cma_exch(id_priv, CMA_DEVICE_REMOVAL);
2707 if (state == CMA_DESTROYING)
2708 return 0;
2709
2710 cma_cancel_operation(id_priv, state);
2711 wait_event(id_priv->wait_remove, !atomic_read(&id_priv->dev_remove));
2712
2713 /* Check for destruction from another callback. */
2714 if (!cma_comp(id_priv, CMA_DEVICE_REMOVAL))
2715 return 0;
2716
2717 memset(&event, 0, sizeof event);
2718 event.event = RDMA_CM_EVENT_DEVICE_REMOVAL;
2719 return id_priv->id.event_handler(&id_priv->id, &event);
2720 }
2721
2722 static void cma_process_remove(struct cma_device *cma_dev)
2723 {
2724 struct rdma_id_private *id_priv;
2725 int ret;
2726
2727 mutex_lock(&lock);
2728 while (!list_empty(&cma_dev->id_list)) {
2729 id_priv = list_entry(cma_dev->id_list.next,
2730 struct rdma_id_private, list);
2731
2732 if (cma_internal_listen(id_priv)) {
2733 cma_destroy_listen(id_priv);
2734 continue;
2735 }
2736
2737 list_del_init(&id_priv->list);
2738 atomic_inc(&id_priv->refcount);
2739 mutex_unlock(&lock);
2740
2741 ret = cma_remove_id_dev(id_priv);
2742 cma_deref_id(id_priv);
2743 if (ret)
2744 rdma_destroy_id(&id_priv->id);
2745
2746 mutex_lock(&lock);
2747 }
2748 mutex_unlock(&lock);
2749
2750 cma_deref_dev(cma_dev);
2751 wait_for_completion(&cma_dev->comp);
2752 }
2753
2754 static void cma_remove_one(struct ib_device *device)
2755 {
2756 struct cma_device *cma_dev;
2757
2758 cma_dev = ib_get_client_data(device, &cma_client);
2759 if (!cma_dev)
2760 return;
2761
2762 mutex_lock(&lock);
2763 list_del(&cma_dev->list);
2764 mutex_unlock(&lock);
2765
2766 cma_process_remove(cma_dev);
2767 kfree(cma_dev);
2768 }
2769
2770 static int cma_init(void)
2771 {
2772 int ret;
2773
2774 get_random_bytes(&next_port, sizeof next_port);
2775 next_port = ((unsigned int) next_port %
2776 (sysctl_local_port_range[1] - sysctl_local_port_range[0])) +
2777 sysctl_local_port_range[0];
2778 cma_wq = create_singlethread_workqueue("rdma_cm");
2779 if (!cma_wq)
2780 return -ENOMEM;
2781
2782 ib_sa_register_client(&sa_client);
2783 rdma_addr_register_client(&addr_client);
2784
2785 ret = ib_register_client(&cma_client);
2786 if (ret)
2787 goto err;
2788 return 0;
2789
2790 err:
2791 rdma_addr_unregister_client(&addr_client);
2792 ib_sa_unregister_client(&sa_client);
2793 destroy_workqueue(cma_wq);
2794 return ret;
2795 }
2796
2797 static void cma_cleanup(void)
2798 {
2799 ib_unregister_client(&cma_client);
2800 rdma_addr_unregister_client(&addr_client);
2801 ib_sa_unregister_client(&sa_client);
2802 destroy_workqueue(cma_wq);
2803 idr_destroy(&sdp_ps);
2804 idr_destroy(&tcp_ps);
2805 idr_destroy(&udp_ps);
2806 idr_destroy(&ipoib_ps);
2807 }
2808
2809 module_init(cma_init);
2810 module_exit(cma_cleanup);