2 * Copyright (c) 2005 Voltaire Inc. All rights reserved.
3 * Copyright (c) 2002-2005, Network Appliance, Inc. All rights reserved.
4 * Copyright (c) 1999-2005, Mellanox Technologies, Inc. All rights reserved.
5 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
7 * This Software is licensed under one of the following licenses:
9 * 1) under the terms of the "Common Public License 1.0" a copy of which is
10 * available from the Open Source Initiative, see
11 * http://www.opensource.org/licenses/cpl.php.
13 * 2) under the terms of the "The BSD License" a copy of which is
14 * available from the Open Source Initiative, see
15 * http://www.opensource.org/licenses/bsd-license.php.
17 * 3) under the terms of the "GNU General Public License (GPL) Version 2" a
18 * copy of which is available from the Open Source Initiative, see
19 * http://www.opensource.org/licenses/gpl-license.php.
21 * Licensee has the right to choose one of the above licenses.
23 * Redistributions of source code must retain the above copyright
24 * notice and one of the license notices.
26 * Redistributions in binary form must reproduce both the above copyright
27 * notice, one of the license notices in the documentation
28 * and/or other materials provided with the distribution.
32 #include <linux/completion.h>
34 #include <linux/in6.h>
35 #include <linux/mutex.h>
36 #include <linux/random.h>
37 #include <linux/idr.h>
38 #include <linux/inetdevice.h>
42 #include <rdma/rdma_cm.h>
43 #include <rdma/rdma_cm_ib.h>
44 #include <rdma/ib_cache.h>
45 #include <rdma/ib_cm.h>
46 #include <rdma/ib_sa.h>
47 #include <rdma/iw_cm.h>
49 MODULE_AUTHOR("Sean Hefty");
50 MODULE_DESCRIPTION("Generic RDMA CM Agent");
51 MODULE_LICENSE("Dual BSD/GPL");
53 #define CMA_CM_RESPONSE_TIMEOUT 20
54 #define CMA_MAX_CM_RETRIES 15
56 static void cma_add_one(struct ib_device
*device
);
57 static void cma_remove_one(struct ib_device
*device
);
59 static struct ib_client cma_client
= {
62 .remove
= cma_remove_one
65 static struct ib_sa_client sa_client
;
66 static struct rdma_addr_client addr_client
;
67 static LIST_HEAD(dev_list
);
68 static LIST_HEAD(listen_any_list
);
69 static DEFINE_MUTEX(lock
);
70 static struct workqueue_struct
*cma_wq
;
71 static DEFINE_IDR(sdp_ps
);
72 static DEFINE_IDR(tcp_ps
);
73 static DEFINE_IDR(udp_ps
);
77 struct list_head list
;
78 struct ib_device
*device
;
80 struct completion comp
;
82 struct list_head id_list
;
99 struct rdma_bind_list
{
101 struct hlist_head owners
;
106 * Device removal can occur at anytime, so we need extra handling to
107 * serialize notifying the user of device removal with other callbacks.
108 * We do this by disabling removal notification while a callback is in process,
109 * and reporting it after the callback completes.
111 struct rdma_id_private
{
112 struct rdma_cm_id id
;
114 struct rdma_bind_list
*bind_list
;
115 struct hlist_node node
;
116 struct list_head list
;
117 struct list_head listen_list
;
118 struct cma_device
*cma_dev
;
120 enum cma_state state
;
122 struct completion comp
;
124 wait_queue_head_t wait_remove
;
129 struct ib_sa_query
*query
;
142 struct work_struct work
;
143 struct rdma_id_private
*id
;
144 enum cma_state old_state
;
145 enum cma_state new_state
;
146 struct rdma_cm_event event
;
159 u8 ip_version
; /* IP version: 7:4 */
161 union cma_ip_addr src_addr
;
162 union cma_ip_addr dst_addr
;
167 u8 sdp_version
; /* Major version: 7:4 */
168 u8 ip_version
; /* IP version: 7:4 */
169 u8 sdp_specific1
[10];
172 union cma_ip_addr src_addr
;
173 union cma_ip_addr dst_addr
;
181 #define CMA_VERSION 0x00
182 #define SDP_MAJ_VERSION 0x2
184 static int cma_comp(struct rdma_id_private
*id_priv
, enum cma_state comp
)
189 spin_lock_irqsave(&id_priv
->lock
, flags
);
190 ret
= (id_priv
->state
== comp
);
191 spin_unlock_irqrestore(&id_priv
->lock
, flags
);
195 static int cma_comp_exch(struct rdma_id_private
*id_priv
,
196 enum cma_state comp
, enum cma_state exch
)
201 spin_lock_irqsave(&id_priv
->lock
, flags
);
202 if ((ret
= (id_priv
->state
== comp
)))
203 id_priv
->state
= exch
;
204 spin_unlock_irqrestore(&id_priv
->lock
, flags
);
208 static enum cma_state
cma_exch(struct rdma_id_private
*id_priv
,
214 spin_lock_irqsave(&id_priv
->lock
, flags
);
215 old
= id_priv
->state
;
216 id_priv
->state
= exch
;
217 spin_unlock_irqrestore(&id_priv
->lock
, flags
);
221 static inline u8
cma_get_ip_ver(struct cma_hdr
*hdr
)
223 return hdr
->ip_version
>> 4;
226 static inline void cma_set_ip_ver(struct cma_hdr
*hdr
, u8 ip_ver
)
228 hdr
->ip_version
= (ip_ver
<< 4) | (hdr
->ip_version
& 0xF);
231 static inline u8
sdp_get_majv(u8 sdp_version
)
233 return sdp_version
>> 4;
236 static inline u8
sdp_get_ip_ver(struct sdp_hh
*hh
)
238 return hh
->ip_version
>> 4;
241 static inline void sdp_set_ip_ver(struct sdp_hh
*hh
, u8 ip_ver
)
243 hh
->ip_version
= (ip_ver
<< 4) | (hh
->ip_version
& 0xF);
246 static void cma_attach_to_dev(struct rdma_id_private
*id_priv
,
247 struct cma_device
*cma_dev
)
249 atomic_inc(&cma_dev
->refcount
);
250 id_priv
->cma_dev
= cma_dev
;
251 id_priv
->id
.device
= cma_dev
->device
;
252 list_add_tail(&id_priv
->list
, &cma_dev
->id_list
);
255 static inline void cma_deref_dev(struct cma_device
*cma_dev
)
257 if (atomic_dec_and_test(&cma_dev
->refcount
))
258 complete(&cma_dev
->comp
);
261 static void cma_detach_from_dev(struct rdma_id_private
*id_priv
)
263 list_del(&id_priv
->list
);
264 cma_deref_dev(id_priv
->cma_dev
);
265 id_priv
->cma_dev
= NULL
;
268 static int cma_acquire_dev(struct rdma_id_private
*id_priv
)
270 enum rdma_node_type dev_type
= id_priv
->id
.route
.addr
.dev_addr
.dev_type
;
271 struct cma_device
*cma_dev
;
275 switch (rdma_node_get_transport(dev_type
)) {
276 case RDMA_TRANSPORT_IB
:
277 ib_addr_get_sgid(&id_priv
->id
.route
.addr
.dev_addr
, &gid
);
279 case RDMA_TRANSPORT_IWARP
:
280 iw_addr_get_sgid(&id_priv
->id
.route
.addr
.dev_addr
, &gid
);
286 list_for_each_entry(cma_dev
, &dev_list
, list
) {
287 ret
= ib_find_cached_gid(cma_dev
->device
, &gid
,
288 &id_priv
->id
.port_num
, NULL
);
290 cma_attach_to_dev(id_priv
, cma_dev
);
297 static void cma_deref_id(struct rdma_id_private
*id_priv
)
299 if (atomic_dec_and_test(&id_priv
->refcount
))
300 complete(&id_priv
->comp
);
303 static void cma_release_remove(struct rdma_id_private
*id_priv
)
305 if (atomic_dec_and_test(&id_priv
->dev_remove
))
306 wake_up(&id_priv
->wait_remove
);
309 struct rdma_cm_id
*rdma_create_id(rdma_cm_event_handler event_handler
,
310 void *context
, enum rdma_port_space ps
)
312 struct rdma_id_private
*id_priv
;
314 id_priv
= kzalloc(sizeof *id_priv
, GFP_KERNEL
);
316 return ERR_PTR(-ENOMEM
);
318 id_priv
->state
= CMA_IDLE
;
319 id_priv
->id
.context
= context
;
320 id_priv
->id
.event_handler
= event_handler
;
322 spin_lock_init(&id_priv
->lock
);
323 init_completion(&id_priv
->comp
);
324 atomic_set(&id_priv
->refcount
, 1);
325 init_waitqueue_head(&id_priv
->wait_remove
);
326 atomic_set(&id_priv
->dev_remove
, 0);
327 INIT_LIST_HEAD(&id_priv
->listen_list
);
328 get_random_bytes(&id_priv
->seq_num
, sizeof id_priv
->seq_num
);
332 EXPORT_SYMBOL(rdma_create_id
);
334 static int cma_init_ib_qp(struct rdma_id_private
*id_priv
, struct ib_qp
*qp
)
336 struct ib_qp_attr qp_attr
;
337 struct rdma_dev_addr
*dev_addr
;
340 dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
341 ret
= ib_find_cached_pkey(id_priv
->id
.device
, id_priv
->id
.port_num
,
342 ib_addr_get_pkey(dev_addr
),
343 &qp_attr
.pkey_index
);
347 qp_attr
.qp_state
= IB_QPS_INIT
;
348 qp_attr
.qp_access_flags
= 0;
349 qp_attr
.port_num
= id_priv
->id
.port_num
;
350 return ib_modify_qp(qp
, &qp_attr
, IB_QP_STATE
| IB_QP_ACCESS_FLAGS
|
351 IB_QP_PKEY_INDEX
| IB_QP_PORT
);
354 static int cma_init_iw_qp(struct rdma_id_private
*id_priv
, struct ib_qp
*qp
)
356 struct ib_qp_attr qp_attr
;
358 qp_attr
.qp_state
= IB_QPS_INIT
;
359 qp_attr
.qp_access_flags
= IB_ACCESS_LOCAL_WRITE
;
361 return ib_modify_qp(qp
, &qp_attr
, IB_QP_STATE
| IB_QP_ACCESS_FLAGS
);
364 int rdma_create_qp(struct rdma_cm_id
*id
, struct ib_pd
*pd
,
365 struct ib_qp_init_attr
*qp_init_attr
)
367 struct rdma_id_private
*id_priv
;
371 id_priv
= container_of(id
, struct rdma_id_private
, id
);
372 if (id
->device
!= pd
->device
)
375 qp
= ib_create_qp(pd
, qp_init_attr
);
379 switch (rdma_node_get_transport(id
->device
->node_type
)) {
380 case RDMA_TRANSPORT_IB
:
381 ret
= cma_init_ib_qp(id_priv
, qp
);
383 case RDMA_TRANSPORT_IWARP
:
384 ret
= cma_init_iw_qp(id_priv
, qp
);
395 id_priv
->qp_num
= qp
->qp_num
;
396 id_priv
->srq
= (qp
->srq
!= NULL
);
402 EXPORT_SYMBOL(rdma_create_qp
);
404 void rdma_destroy_qp(struct rdma_cm_id
*id
)
406 ib_destroy_qp(id
->qp
);
408 EXPORT_SYMBOL(rdma_destroy_qp
);
410 static int cma_modify_qp_rtr(struct rdma_cm_id
*id
)
412 struct ib_qp_attr qp_attr
;
413 int qp_attr_mask
, ret
;
418 /* Need to update QP attributes from default values. */
419 qp_attr
.qp_state
= IB_QPS_INIT
;
420 ret
= rdma_init_qp_attr(id
, &qp_attr
, &qp_attr_mask
);
424 ret
= ib_modify_qp(id
->qp
, &qp_attr
, qp_attr_mask
);
428 qp_attr
.qp_state
= IB_QPS_RTR
;
429 ret
= rdma_init_qp_attr(id
, &qp_attr
, &qp_attr_mask
);
433 return ib_modify_qp(id
->qp
, &qp_attr
, qp_attr_mask
);
436 static int cma_modify_qp_rts(struct rdma_cm_id
*id
)
438 struct ib_qp_attr qp_attr
;
439 int qp_attr_mask
, ret
;
444 qp_attr
.qp_state
= IB_QPS_RTS
;
445 ret
= rdma_init_qp_attr(id
, &qp_attr
, &qp_attr_mask
);
449 return ib_modify_qp(id
->qp
, &qp_attr
, qp_attr_mask
);
452 static int cma_modify_qp_err(struct rdma_cm_id
*id
)
454 struct ib_qp_attr qp_attr
;
459 qp_attr
.qp_state
= IB_QPS_ERR
;
460 return ib_modify_qp(id
->qp
, &qp_attr
, IB_QP_STATE
);
463 int rdma_init_qp_attr(struct rdma_cm_id
*id
, struct ib_qp_attr
*qp_attr
,
466 struct rdma_id_private
*id_priv
;
469 id_priv
= container_of(id
, struct rdma_id_private
, id
);
470 switch (rdma_node_get_transport(id_priv
->id
.device
->node_type
)) {
471 case RDMA_TRANSPORT_IB
:
472 ret
= ib_cm_init_qp_attr(id_priv
->cm_id
.ib
, qp_attr
,
474 if (qp_attr
->qp_state
== IB_QPS_RTR
)
475 qp_attr
->rq_psn
= id_priv
->seq_num
;
477 case RDMA_TRANSPORT_IWARP
:
478 ret
= iw_cm_init_qp_attr(id_priv
->cm_id
.iw
, qp_attr
,
488 EXPORT_SYMBOL(rdma_init_qp_attr
);
490 static inline int cma_zero_addr(struct sockaddr
*addr
)
492 struct in6_addr
*ip6
;
494 if (addr
->sa_family
== AF_INET
)
495 return ZERONET(((struct sockaddr_in
*) addr
)->sin_addr
.s_addr
);
497 ip6
= &((struct sockaddr_in6
*) addr
)->sin6_addr
;
498 return (ip6
->s6_addr32
[0] | ip6
->s6_addr32
[1] |
499 ip6
->s6_addr32
[2] | ip6
->s6_addr32
[3]) == 0;
503 static inline int cma_loopback_addr(struct sockaddr
*addr
)
505 return LOOPBACK(((struct sockaddr_in
*) addr
)->sin_addr
.s_addr
);
508 static inline int cma_any_addr(struct sockaddr
*addr
)
510 return cma_zero_addr(addr
) || cma_loopback_addr(addr
);
513 static inline __be16
cma_port(struct sockaddr
*addr
)
515 if (addr
->sa_family
== AF_INET
)
516 return ((struct sockaddr_in
*) addr
)->sin_port
;
518 return ((struct sockaddr_in6
*) addr
)->sin6_port
;
521 static inline int cma_any_port(struct sockaddr
*addr
)
523 return !cma_port(addr
);
526 static int cma_get_net_info(void *hdr
, enum rdma_port_space ps
,
527 u8
*ip_ver
, __u16
*port
,
528 union cma_ip_addr
**src
, union cma_ip_addr
**dst
)
532 if (sdp_get_majv(((struct sdp_hh
*) hdr
)->sdp_version
) !=
536 *ip_ver
= sdp_get_ip_ver(hdr
);
537 *port
= ((struct sdp_hh
*) hdr
)->port
;
538 *src
= &((struct sdp_hh
*) hdr
)->src_addr
;
539 *dst
= &((struct sdp_hh
*) hdr
)->dst_addr
;
542 if (((struct cma_hdr
*) hdr
)->cma_version
!= CMA_VERSION
)
545 *ip_ver
= cma_get_ip_ver(hdr
);
546 *port
= ((struct cma_hdr
*) hdr
)->port
;
547 *src
= &((struct cma_hdr
*) hdr
)->src_addr
;
548 *dst
= &((struct cma_hdr
*) hdr
)->dst_addr
;
552 if (*ip_ver
!= 4 && *ip_ver
!= 6)
557 static void cma_save_net_info(struct rdma_addr
*addr
,
558 struct rdma_addr
*listen_addr
,
559 u8 ip_ver
, __u16 port
,
560 union cma_ip_addr
*src
, union cma_ip_addr
*dst
)
562 struct sockaddr_in
*listen4
, *ip4
;
563 struct sockaddr_in6
*listen6
, *ip6
;
567 listen4
= (struct sockaddr_in
*) &listen_addr
->src_addr
;
568 ip4
= (struct sockaddr_in
*) &addr
->src_addr
;
569 ip4
->sin_family
= listen4
->sin_family
;
570 ip4
->sin_addr
.s_addr
= dst
->ip4
.addr
;
571 ip4
->sin_port
= listen4
->sin_port
;
573 ip4
= (struct sockaddr_in
*) &addr
->dst_addr
;
574 ip4
->sin_family
= listen4
->sin_family
;
575 ip4
->sin_addr
.s_addr
= src
->ip4
.addr
;
576 ip4
->sin_port
= port
;
579 listen6
= (struct sockaddr_in6
*) &listen_addr
->src_addr
;
580 ip6
= (struct sockaddr_in6
*) &addr
->src_addr
;
581 ip6
->sin6_family
= listen6
->sin6_family
;
582 ip6
->sin6_addr
= dst
->ip6
;
583 ip6
->sin6_port
= listen6
->sin6_port
;
585 ip6
= (struct sockaddr_in6
*) &addr
->dst_addr
;
586 ip6
->sin6_family
= listen6
->sin6_family
;
587 ip6
->sin6_addr
= src
->ip6
;
588 ip6
->sin6_port
= port
;
595 static inline int cma_user_data_offset(enum rdma_port_space ps
)
601 return sizeof(struct cma_hdr
);
605 static void cma_cancel_route(struct rdma_id_private
*id_priv
)
607 switch (rdma_node_get_transport(id_priv
->id
.device
->node_type
)) {
608 case RDMA_TRANSPORT_IB
:
610 ib_sa_cancel_query(id_priv
->query_id
, id_priv
->query
);
617 static inline int cma_internal_listen(struct rdma_id_private
*id_priv
)
619 return (id_priv
->state
== CMA_LISTEN
) && id_priv
->cma_dev
&&
620 cma_any_addr(&id_priv
->id
.route
.addr
.src_addr
);
623 static void cma_destroy_listen(struct rdma_id_private
*id_priv
)
625 cma_exch(id_priv
, CMA_DESTROYING
);
627 if (id_priv
->cma_dev
) {
628 switch (rdma_node_get_transport(id_priv
->id
.device
->node_type
)) {
629 case RDMA_TRANSPORT_IB
:
630 if (id_priv
->cm_id
.ib
&& !IS_ERR(id_priv
->cm_id
.ib
))
631 ib_destroy_cm_id(id_priv
->cm_id
.ib
);
633 case RDMA_TRANSPORT_IWARP
:
634 if (id_priv
->cm_id
.iw
&& !IS_ERR(id_priv
->cm_id
.iw
))
635 iw_destroy_cm_id(id_priv
->cm_id
.iw
);
640 cma_detach_from_dev(id_priv
);
642 list_del(&id_priv
->listen_list
);
644 cma_deref_id(id_priv
);
645 wait_for_completion(&id_priv
->comp
);
650 static void cma_cancel_listens(struct rdma_id_private
*id_priv
)
652 struct rdma_id_private
*dev_id_priv
;
655 list_del(&id_priv
->list
);
657 while (!list_empty(&id_priv
->listen_list
)) {
658 dev_id_priv
= list_entry(id_priv
->listen_list
.next
,
659 struct rdma_id_private
, listen_list
);
660 cma_destroy_listen(dev_id_priv
);
665 static void cma_cancel_operation(struct rdma_id_private
*id_priv
,
666 enum cma_state state
)
670 rdma_addr_cancel(&id_priv
->id
.route
.addr
.dev_addr
);
672 case CMA_ROUTE_QUERY
:
673 cma_cancel_route(id_priv
);
676 if (cma_any_addr(&id_priv
->id
.route
.addr
.src_addr
) &&
678 cma_cancel_listens(id_priv
);
685 static void cma_release_port(struct rdma_id_private
*id_priv
)
687 struct rdma_bind_list
*bind_list
= id_priv
->bind_list
;
693 hlist_del(&id_priv
->node
);
694 if (hlist_empty(&bind_list
->owners
)) {
695 idr_remove(bind_list
->ps
, bind_list
->port
);
701 void rdma_destroy_id(struct rdma_cm_id
*id
)
703 struct rdma_id_private
*id_priv
;
704 enum cma_state state
;
706 id_priv
= container_of(id
, struct rdma_id_private
, id
);
707 state
= cma_exch(id_priv
, CMA_DESTROYING
);
708 cma_cancel_operation(id_priv
, state
);
711 if (id_priv
->cma_dev
) {
713 switch (rdma_node_get_transport(id
->device
->node_type
)) {
714 case RDMA_TRANSPORT_IB
:
715 if (id_priv
->cm_id
.ib
&& !IS_ERR(id_priv
->cm_id
.ib
))
716 ib_destroy_cm_id(id_priv
->cm_id
.ib
);
718 case RDMA_TRANSPORT_IWARP
:
719 if (id_priv
->cm_id
.iw
&& !IS_ERR(id_priv
->cm_id
.iw
))
720 iw_destroy_cm_id(id_priv
->cm_id
.iw
);
726 cma_detach_from_dev(id_priv
);
730 cma_release_port(id_priv
);
731 cma_deref_id(id_priv
);
732 wait_for_completion(&id_priv
->comp
);
734 kfree(id_priv
->id
.route
.path_rec
);
737 EXPORT_SYMBOL(rdma_destroy_id
);
739 static int cma_rep_recv(struct rdma_id_private
*id_priv
)
743 ret
= cma_modify_qp_rtr(&id_priv
->id
);
747 ret
= cma_modify_qp_rts(&id_priv
->id
);
751 ret
= ib_send_cm_rtu(id_priv
->cm_id
.ib
, NULL
, 0);
757 cma_modify_qp_err(&id_priv
->id
);
758 ib_send_cm_rej(id_priv
->cm_id
.ib
, IB_CM_REJ_CONSUMER_DEFINED
,
763 static int cma_verify_rep(struct rdma_id_private
*id_priv
, void *data
)
765 if (id_priv
->id
.ps
== RDMA_PS_SDP
&&
766 sdp_get_majv(((struct sdp_hah
*) data
)->sdp_version
) !=
773 static void cma_set_rep_event_data(struct rdma_cm_event
*event
,
774 struct ib_cm_rep_event_param
*rep_data
,
777 event
->param
.conn
.private_data
= private_data
;
778 event
->param
.conn
.private_data_len
= IB_CM_REP_PRIVATE_DATA_SIZE
;
779 event
->param
.conn
.responder_resources
= rep_data
->responder_resources
;
780 event
->param
.conn
.initiator_depth
= rep_data
->initiator_depth
;
781 event
->param
.conn
.flow_control
= rep_data
->flow_control
;
782 event
->param
.conn
.rnr_retry_count
= rep_data
->rnr_retry_count
;
783 event
->param
.conn
.srq
= rep_data
->srq
;
784 event
->param
.conn
.qp_num
= rep_data
->remote_qpn
;
787 static int cma_ib_handler(struct ib_cm_id
*cm_id
, struct ib_cm_event
*ib_event
)
789 struct rdma_id_private
*id_priv
= cm_id
->context
;
790 struct rdma_cm_event event
;
793 atomic_inc(&id_priv
->dev_remove
);
794 if (!cma_comp(id_priv
, CMA_CONNECT
))
797 memset(&event
, 0, sizeof event
);
798 switch (ib_event
->event
) {
799 case IB_CM_REQ_ERROR
:
800 case IB_CM_REP_ERROR
:
801 event
.event
= RDMA_CM_EVENT_UNREACHABLE
;
802 event
.status
= -ETIMEDOUT
;
804 case IB_CM_REP_RECEIVED
:
805 event
.status
= cma_verify_rep(id_priv
, ib_event
->private_data
);
807 event
.event
= RDMA_CM_EVENT_CONNECT_ERROR
;
808 else if (id_priv
->id
.qp
&& id_priv
->id
.ps
!= RDMA_PS_SDP
) {
809 event
.status
= cma_rep_recv(id_priv
);
810 event
.event
= event
.status
? RDMA_CM_EVENT_CONNECT_ERROR
:
811 RDMA_CM_EVENT_ESTABLISHED
;
813 event
.event
= RDMA_CM_EVENT_CONNECT_RESPONSE
;
814 cma_set_rep_event_data(&event
, &ib_event
->param
.rep_rcvd
,
815 ib_event
->private_data
);
817 case IB_CM_RTU_RECEIVED
:
818 case IB_CM_USER_ESTABLISHED
:
819 event
.event
= RDMA_CM_EVENT_ESTABLISHED
;
821 case IB_CM_DREQ_ERROR
:
822 event
.status
= -ETIMEDOUT
; /* fall through */
823 case IB_CM_DREQ_RECEIVED
:
824 case IB_CM_DREP_RECEIVED
:
825 if (!cma_comp_exch(id_priv
, CMA_CONNECT
, CMA_DISCONNECT
))
827 event
.event
= RDMA_CM_EVENT_DISCONNECTED
;
829 case IB_CM_TIMEWAIT_EXIT
:
830 case IB_CM_MRA_RECEIVED
:
833 case IB_CM_REJ_RECEIVED
:
834 cma_modify_qp_err(&id_priv
->id
);
835 event
.status
= ib_event
->param
.rej_rcvd
.reason
;
836 event
.event
= RDMA_CM_EVENT_REJECTED
;
837 event
.param
.conn
.private_data
= ib_event
->private_data
;
838 event
.param
.conn
.private_data_len
= IB_CM_REJ_PRIVATE_DATA_SIZE
;
841 printk(KERN_ERR
"RDMA CMA: unexpected IB CM event: %d",
846 ret
= id_priv
->id
.event_handler(&id_priv
->id
, &event
);
848 /* Destroy the CM ID by returning a non-zero value. */
849 id_priv
->cm_id
.ib
= NULL
;
850 cma_exch(id_priv
, CMA_DESTROYING
);
851 cma_release_remove(id_priv
);
852 rdma_destroy_id(&id_priv
->id
);
856 cma_release_remove(id_priv
);
860 static struct rdma_id_private
*cma_new_conn_id(struct rdma_cm_id
*listen_id
,
861 struct ib_cm_event
*ib_event
)
863 struct rdma_id_private
*id_priv
;
864 struct rdma_cm_id
*id
;
865 struct rdma_route
*rt
;
866 union cma_ip_addr
*src
, *dst
;
870 if (cma_get_net_info(ib_event
->private_data
, listen_id
->ps
,
871 &ip_ver
, &port
, &src
, &dst
))
874 id
= rdma_create_id(listen_id
->event_handler
, listen_id
->context
,
879 cma_save_net_info(&id
->route
.addr
, &listen_id
->route
.addr
,
880 ip_ver
, port
, src
, dst
);
883 rt
->num_paths
= ib_event
->param
.req_rcvd
.alternate_path
? 2 : 1;
884 rt
->path_rec
= kmalloc(sizeof *rt
->path_rec
* rt
->num_paths
,
889 rt
->path_rec
[0] = *ib_event
->param
.req_rcvd
.primary_path
;
890 if (rt
->num_paths
== 2)
891 rt
->path_rec
[1] = *ib_event
->param
.req_rcvd
.alternate_path
;
893 ib_addr_set_sgid(&rt
->addr
.dev_addr
, &rt
->path_rec
[0].sgid
);
894 ib_addr_set_dgid(&rt
->addr
.dev_addr
, &rt
->path_rec
[0].dgid
);
895 ib_addr_set_pkey(&rt
->addr
.dev_addr
, be16_to_cpu(rt
->path_rec
[0].pkey
));
896 rt
->addr
.dev_addr
.dev_type
= RDMA_NODE_IB_CA
;
898 id_priv
= container_of(id
, struct rdma_id_private
, id
);
899 id_priv
->state
= CMA_CONNECT
;
908 static struct rdma_id_private
*cma_new_udp_id(struct rdma_cm_id
*listen_id
,
909 struct ib_cm_event
*ib_event
)
911 struct rdma_id_private
*id_priv
;
912 struct rdma_cm_id
*id
;
913 union cma_ip_addr
*src
, *dst
;
918 id
= rdma_create_id(listen_id
->event_handler
, listen_id
->context
,
924 if (cma_get_net_info(ib_event
->private_data
, listen_id
->ps
,
925 &ip_ver
, &port
, &src
, &dst
))
928 cma_save_net_info(&id
->route
.addr
, &listen_id
->route
.addr
,
929 ip_ver
, port
, src
, dst
);
931 ret
= rdma_translate_ip(&id
->route
.addr
.src_addr
,
932 &id
->route
.addr
.dev_addr
);
936 id_priv
= container_of(id
, struct rdma_id_private
, id
);
937 id_priv
->state
= CMA_CONNECT
;
944 static void cma_set_req_event_data(struct rdma_cm_event
*event
,
945 struct ib_cm_req_event_param
*req_data
,
946 void *private_data
, int offset
)
948 event
->param
.conn
.private_data
= private_data
+ offset
;
949 event
->param
.conn
.private_data_len
= IB_CM_REQ_PRIVATE_DATA_SIZE
- offset
;
950 event
->param
.conn
.responder_resources
= req_data
->responder_resources
;
951 event
->param
.conn
.initiator_depth
= req_data
->initiator_depth
;
952 event
->param
.conn
.flow_control
= req_data
->flow_control
;
953 event
->param
.conn
.retry_count
= req_data
->retry_count
;
954 event
->param
.conn
.rnr_retry_count
= req_data
->rnr_retry_count
;
955 event
->param
.conn
.srq
= req_data
->srq
;
956 event
->param
.conn
.qp_num
= req_data
->remote_qpn
;
959 static int cma_req_handler(struct ib_cm_id
*cm_id
, struct ib_cm_event
*ib_event
)
961 struct rdma_id_private
*listen_id
, *conn_id
;
962 struct rdma_cm_event event
;
965 listen_id
= cm_id
->context
;
966 atomic_inc(&listen_id
->dev_remove
);
967 if (!cma_comp(listen_id
, CMA_LISTEN
)) {
972 memset(&event
, 0, sizeof event
);
973 offset
= cma_user_data_offset(listen_id
->id
.ps
);
974 event
.event
= RDMA_CM_EVENT_CONNECT_REQUEST
;
975 if (listen_id
->id
.ps
== RDMA_PS_UDP
) {
976 conn_id
= cma_new_udp_id(&listen_id
->id
, ib_event
);
977 event
.param
.ud
.private_data
= ib_event
->private_data
+ offset
;
978 event
.param
.ud
.private_data_len
=
979 IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE
- offset
;
981 conn_id
= cma_new_conn_id(&listen_id
->id
, ib_event
);
982 cma_set_req_event_data(&event
, &ib_event
->param
.req_rcvd
,
983 ib_event
->private_data
, offset
);
990 atomic_inc(&conn_id
->dev_remove
);
992 ret
= cma_acquire_dev(conn_id
);
995 goto release_conn_id
;
997 conn_id
->cm_id
.ib
= cm_id
;
998 cm_id
->context
= conn_id
;
999 cm_id
->cm_handler
= cma_ib_handler
;
1001 ret
= conn_id
->id
.event_handler(&conn_id
->id
, &event
);
1005 /* Destroy the CM ID by returning a non-zero value. */
1006 conn_id
->cm_id
.ib
= NULL
;
1009 cma_exch(conn_id
, CMA_DESTROYING
);
1010 cma_release_remove(conn_id
);
1011 rdma_destroy_id(&conn_id
->id
);
1014 cma_release_remove(listen_id
);
1018 static __be64
cma_get_service_id(enum rdma_port_space ps
, struct sockaddr
*addr
)
1020 return cpu_to_be64(((u64
)ps
<< 16) + be16_to_cpu(cma_port(addr
)));
1023 static void cma_set_compare_data(enum rdma_port_space ps
, struct sockaddr
*addr
,
1024 struct ib_cm_compare_data
*compare
)
1026 struct cma_hdr
*cma_data
, *cma_mask
;
1027 struct sdp_hh
*sdp_data
, *sdp_mask
;
1029 struct in6_addr ip6_addr
;
1031 memset(compare
, 0, sizeof *compare
);
1032 cma_data
= (void *) compare
->data
;
1033 cma_mask
= (void *) compare
->mask
;
1034 sdp_data
= (void *) compare
->data
;
1035 sdp_mask
= (void *) compare
->mask
;
1037 switch (addr
->sa_family
) {
1039 ip4_addr
= ((struct sockaddr_in
*) addr
)->sin_addr
.s_addr
;
1040 if (ps
== RDMA_PS_SDP
) {
1041 sdp_set_ip_ver(sdp_data
, 4);
1042 sdp_set_ip_ver(sdp_mask
, 0xF);
1043 sdp_data
->dst_addr
.ip4
.addr
= ip4_addr
;
1044 sdp_mask
->dst_addr
.ip4
.addr
= ~0;
1046 cma_set_ip_ver(cma_data
, 4);
1047 cma_set_ip_ver(cma_mask
, 0xF);
1048 cma_data
->dst_addr
.ip4
.addr
= ip4_addr
;
1049 cma_mask
->dst_addr
.ip4
.addr
= ~0;
1053 ip6_addr
= ((struct sockaddr_in6
*) addr
)->sin6_addr
;
1054 if (ps
== RDMA_PS_SDP
) {
1055 sdp_set_ip_ver(sdp_data
, 6);
1056 sdp_set_ip_ver(sdp_mask
, 0xF);
1057 sdp_data
->dst_addr
.ip6
= ip6_addr
;
1058 memset(&sdp_mask
->dst_addr
.ip6
, 0xFF,
1059 sizeof sdp_mask
->dst_addr
.ip6
);
1061 cma_set_ip_ver(cma_data
, 6);
1062 cma_set_ip_ver(cma_mask
, 0xF);
1063 cma_data
->dst_addr
.ip6
= ip6_addr
;
1064 memset(&cma_mask
->dst_addr
.ip6
, 0xFF,
1065 sizeof cma_mask
->dst_addr
.ip6
);
1073 static int cma_iw_handler(struct iw_cm_id
*iw_id
, struct iw_cm_event
*iw_event
)
1075 struct rdma_id_private
*id_priv
= iw_id
->context
;
1076 struct rdma_cm_event event
;
1077 struct sockaddr_in
*sin
;
1080 memset(&event
, 0, sizeof event
);
1081 atomic_inc(&id_priv
->dev_remove
);
1083 switch (iw_event
->event
) {
1084 case IW_CM_EVENT_CLOSE
:
1085 event
.event
= RDMA_CM_EVENT_DISCONNECTED
;
1087 case IW_CM_EVENT_CONNECT_REPLY
:
1088 sin
= (struct sockaddr_in
*) &id_priv
->id
.route
.addr
.src_addr
;
1089 *sin
= iw_event
->local_addr
;
1090 sin
= (struct sockaddr_in
*) &id_priv
->id
.route
.addr
.dst_addr
;
1091 *sin
= iw_event
->remote_addr
;
1092 switch (iw_event
->status
) {
1094 event
.event
= RDMA_CM_EVENT_ESTABLISHED
;
1098 event
.event
= RDMA_CM_EVENT_REJECTED
;
1101 event
.event
= RDMA_CM_EVENT_UNREACHABLE
;
1104 event
.event
= RDMA_CM_EVENT_CONNECT_ERROR
;
1108 case IW_CM_EVENT_ESTABLISHED
:
1109 event
.event
= RDMA_CM_EVENT_ESTABLISHED
;
1115 event
.status
= iw_event
->status
;
1116 event
.param
.conn
.private_data
= iw_event
->private_data
;
1117 event
.param
.conn
.private_data_len
= iw_event
->private_data_len
;
1118 ret
= id_priv
->id
.event_handler(&id_priv
->id
, &event
);
1120 /* Destroy the CM ID by returning a non-zero value. */
1121 id_priv
->cm_id
.iw
= NULL
;
1122 cma_exch(id_priv
, CMA_DESTROYING
);
1123 cma_release_remove(id_priv
);
1124 rdma_destroy_id(&id_priv
->id
);
1128 cma_release_remove(id_priv
);
1132 static int iw_conn_req_handler(struct iw_cm_id
*cm_id
,
1133 struct iw_cm_event
*iw_event
)
1135 struct rdma_cm_id
*new_cm_id
;
1136 struct rdma_id_private
*listen_id
, *conn_id
;
1137 struct sockaddr_in
*sin
;
1138 struct net_device
*dev
= NULL
;
1139 struct rdma_cm_event event
;
1142 listen_id
= cm_id
->context
;
1143 atomic_inc(&listen_id
->dev_remove
);
1144 if (!cma_comp(listen_id
, CMA_LISTEN
)) {
1145 ret
= -ECONNABORTED
;
1149 /* Create a new RDMA id for the new IW CM ID */
1150 new_cm_id
= rdma_create_id(listen_id
->id
.event_handler
,
1151 listen_id
->id
.context
,
1157 conn_id
= container_of(new_cm_id
, struct rdma_id_private
, id
);
1158 atomic_inc(&conn_id
->dev_remove
);
1159 conn_id
->state
= CMA_CONNECT
;
1161 dev
= ip_dev_find(iw_event
->local_addr
.sin_addr
.s_addr
);
1163 ret
= -EADDRNOTAVAIL
;
1164 cma_release_remove(conn_id
);
1165 rdma_destroy_id(new_cm_id
);
1168 ret
= rdma_copy_addr(&conn_id
->id
.route
.addr
.dev_addr
, dev
, NULL
);
1170 cma_release_remove(conn_id
);
1171 rdma_destroy_id(new_cm_id
);
1176 ret
= cma_acquire_dev(conn_id
);
1177 mutex_unlock(&lock
);
1179 cma_release_remove(conn_id
);
1180 rdma_destroy_id(new_cm_id
);
1184 conn_id
->cm_id
.iw
= cm_id
;
1185 cm_id
->context
= conn_id
;
1186 cm_id
->cm_handler
= cma_iw_handler
;
1188 sin
= (struct sockaddr_in
*) &new_cm_id
->route
.addr
.src_addr
;
1189 *sin
= iw_event
->local_addr
;
1190 sin
= (struct sockaddr_in
*) &new_cm_id
->route
.addr
.dst_addr
;
1191 *sin
= iw_event
->remote_addr
;
1193 memset(&event
, 0, sizeof event
);
1194 event
.event
= RDMA_CM_EVENT_CONNECT_REQUEST
;
1195 event
.param
.conn
.private_data
= iw_event
->private_data
;
1196 event
.param
.conn
.private_data_len
= iw_event
->private_data_len
;
1197 ret
= conn_id
->id
.event_handler(&conn_id
->id
, &event
);
1199 /* User wants to destroy the CM ID */
1200 conn_id
->cm_id
.iw
= NULL
;
1201 cma_exch(conn_id
, CMA_DESTROYING
);
1202 cma_release_remove(conn_id
);
1203 rdma_destroy_id(&conn_id
->id
);
1209 cma_release_remove(listen_id
);
1213 static int cma_ib_listen(struct rdma_id_private
*id_priv
)
1215 struct ib_cm_compare_data compare_data
;
1216 struct sockaddr
*addr
;
1220 id_priv
->cm_id
.ib
= ib_create_cm_id(id_priv
->id
.device
, cma_req_handler
,
1222 if (IS_ERR(id_priv
->cm_id
.ib
))
1223 return PTR_ERR(id_priv
->cm_id
.ib
);
1225 addr
= &id_priv
->id
.route
.addr
.src_addr
;
1226 svc_id
= cma_get_service_id(id_priv
->id
.ps
, addr
);
1227 if (cma_any_addr(addr
))
1228 ret
= ib_cm_listen(id_priv
->cm_id
.ib
, svc_id
, 0, NULL
);
1230 cma_set_compare_data(id_priv
->id
.ps
, addr
, &compare_data
);
1231 ret
= ib_cm_listen(id_priv
->cm_id
.ib
, svc_id
, 0, &compare_data
);
1235 ib_destroy_cm_id(id_priv
->cm_id
.ib
);
1236 id_priv
->cm_id
.ib
= NULL
;
1242 static int cma_iw_listen(struct rdma_id_private
*id_priv
, int backlog
)
1245 struct sockaddr_in
*sin
;
1247 id_priv
->cm_id
.iw
= iw_create_cm_id(id_priv
->id
.device
,
1248 iw_conn_req_handler
,
1250 if (IS_ERR(id_priv
->cm_id
.iw
))
1251 return PTR_ERR(id_priv
->cm_id
.iw
);
1253 sin
= (struct sockaddr_in
*) &id_priv
->id
.route
.addr
.src_addr
;
1254 id_priv
->cm_id
.iw
->local_addr
= *sin
;
1256 ret
= iw_cm_listen(id_priv
->cm_id
.iw
, backlog
);
1259 iw_destroy_cm_id(id_priv
->cm_id
.iw
);
1260 id_priv
->cm_id
.iw
= NULL
;
1266 static int cma_listen_handler(struct rdma_cm_id
*id
,
1267 struct rdma_cm_event
*event
)
1269 struct rdma_id_private
*id_priv
= id
->context
;
1271 id
->context
= id_priv
->id
.context
;
1272 id
->event_handler
= id_priv
->id
.event_handler
;
1273 return id_priv
->id
.event_handler(id
, event
);
1276 static void cma_listen_on_dev(struct rdma_id_private
*id_priv
,
1277 struct cma_device
*cma_dev
)
1279 struct rdma_id_private
*dev_id_priv
;
1280 struct rdma_cm_id
*id
;
1283 id
= rdma_create_id(cma_listen_handler
, id_priv
, id_priv
->id
.ps
);
1287 dev_id_priv
= container_of(id
, struct rdma_id_private
, id
);
1289 dev_id_priv
->state
= CMA_ADDR_BOUND
;
1290 memcpy(&id
->route
.addr
.src_addr
, &id_priv
->id
.route
.addr
.src_addr
,
1291 ip_addr_size(&id_priv
->id
.route
.addr
.src_addr
));
1293 cma_attach_to_dev(dev_id_priv
, cma_dev
);
1294 list_add_tail(&dev_id_priv
->listen_list
, &id_priv
->listen_list
);
1296 ret
= rdma_listen(id
, id_priv
->backlog
);
1302 cma_destroy_listen(dev_id_priv
);
1305 static void cma_listen_on_all(struct rdma_id_private
*id_priv
)
1307 struct cma_device
*cma_dev
;
1310 list_add_tail(&id_priv
->list
, &listen_any_list
);
1311 list_for_each_entry(cma_dev
, &dev_list
, list
)
1312 cma_listen_on_dev(id_priv
, cma_dev
);
1313 mutex_unlock(&lock
);
1316 static int cma_bind_any(struct rdma_cm_id
*id
, sa_family_t af
)
1318 struct sockaddr_in addr_in
;
1320 memset(&addr_in
, 0, sizeof addr_in
);
1321 addr_in
.sin_family
= af
;
1322 return rdma_bind_addr(id
, (struct sockaddr
*) &addr_in
);
1325 int rdma_listen(struct rdma_cm_id
*id
, int backlog
)
1327 struct rdma_id_private
*id_priv
;
1330 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1331 if (id_priv
->state
== CMA_IDLE
) {
1332 ret
= cma_bind_any(id
, AF_INET
);
1337 if (!cma_comp_exch(id_priv
, CMA_ADDR_BOUND
, CMA_LISTEN
))
1340 id_priv
->backlog
= backlog
;
1342 switch (rdma_node_get_transport(id
->device
->node_type
)) {
1343 case RDMA_TRANSPORT_IB
:
1344 ret
= cma_ib_listen(id_priv
);
1348 case RDMA_TRANSPORT_IWARP
:
1349 ret
= cma_iw_listen(id_priv
, backlog
);
1358 cma_listen_on_all(id_priv
);
1362 id_priv
->backlog
= 0;
1363 cma_comp_exch(id_priv
, CMA_LISTEN
, CMA_ADDR_BOUND
);
1366 EXPORT_SYMBOL(rdma_listen
);
1368 static void cma_query_handler(int status
, struct ib_sa_path_rec
*path_rec
,
1371 struct cma_work
*work
= context
;
1372 struct rdma_route
*route
;
1374 route
= &work
->id
->id
.route
;
1377 route
->num_paths
= 1;
1378 *route
->path_rec
= *path_rec
;
1380 work
->old_state
= CMA_ROUTE_QUERY
;
1381 work
->new_state
= CMA_ADDR_RESOLVED
;
1382 work
->event
.event
= RDMA_CM_EVENT_ROUTE_ERROR
;
1383 work
->event
.status
= status
;
1386 queue_work(cma_wq
, &work
->work
);
1389 static int cma_query_ib_route(struct rdma_id_private
*id_priv
, int timeout_ms
,
1390 struct cma_work
*work
)
1392 struct rdma_dev_addr
*addr
= &id_priv
->id
.route
.addr
.dev_addr
;
1393 struct ib_sa_path_rec path_rec
;
1395 memset(&path_rec
, 0, sizeof path_rec
);
1396 ib_addr_get_sgid(addr
, &path_rec
.sgid
);
1397 ib_addr_get_dgid(addr
, &path_rec
.dgid
);
1398 path_rec
.pkey
= cpu_to_be16(ib_addr_get_pkey(addr
));
1399 path_rec
.numb_path
= 1;
1401 id_priv
->query_id
= ib_sa_path_rec_get(&sa_client
, id_priv
->id
.device
,
1402 id_priv
->id
.port_num
, &path_rec
,
1403 IB_SA_PATH_REC_DGID
| IB_SA_PATH_REC_SGID
|
1404 IB_SA_PATH_REC_PKEY
| IB_SA_PATH_REC_NUMB_PATH
,
1405 timeout_ms
, GFP_KERNEL
,
1406 cma_query_handler
, work
, &id_priv
->query
);
1408 return (id_priv
->query_id
< 0) ? id_priv
->query_id
: 0;
1411 static void cma_work_handler(struct work_struct
*_work
)
1413 struct cma_work
*work
= container_of(_work
, struct cma_work
, work
);
1414 struct rdma_id_private
*id_priv
= work
->id
;
1417 atomic_inc(&id_priv
->dev_remove
);
1418 if (!cma_comp_exch(id_priv
, work
->old_state
, work
->new_state
))
1421 if (id_priv
->id
.event_handler(&id_priv
->id
, &work
->event
)) {
1422 cma_exch(id_priv
, CMA_DESTROYING
);
1426 cma_release_remove(id_priv
);
1427 cma_deref_id(id_priv
);
1429 rdma_destroy_id(&id_priv
->id
);
1433 static int cma_resolve_ib_route(struct rdma_id_private
*id_priv
, int timeout_ms
)
1435 struct rdma_route
*route
= &id_priv
->id
.route
;
1436 struct cma_work
*work
;
1439 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
1444 INIT_WORK(&work
->work
, cma_work_handler
);
1445 work
->old_state
= CMA_ROUTE_QUERY
;
1446 work
->new_state
= CMA_ROUTE_RESOLVED
;
1447 work
->event
.event
= RDMA_CM_EVENT_ROUTE_RESOLVED
;
1449 route
->path_rec
= kmalloc(sizeof *route
->path_rec
, GFP_KERNEL
);
1450 if (!route
->path_rec
) {
1455 ret
= cma_query_ib_route(id_priv
, timeout_ms
, work
);
1461 kfree(route
->path_rec
);
1462 route
->path_rec
= NULL
;
1468 int rdma_set_ib_paths(struct rdma_cm_id
*id
,
1469 struct ib_sa_path_rec
*path_rec
, int num_paths
)
1471 struct rdma_id_private
*id_priv
;
1474 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1475 if (!cma_comp_exch(id_priv
, CMA_ADDR_RESOLVED
, CMA_ROUTE_RESOLVED
))
1478 id
->route
.path_rec
= kmalloc(sizeof *path_rec
* num_paths
, GFP_KERNEL
);
1479 if (!id
->route
.path_rec
) {
1484 memcpy(id
->route
.path_rec
, path_rec
, sizeof *path_rec
* num_paths
);
1487 cma_comp_exch(id_priv
, CMA_ROUTE_RESOLVED
, CMA_ADDR_RESOLVED
);
1490 EXPORT_SYMBOL(rdma_set_ib_paths
);
1492 static int cma_resolve_iw_route(struct rdma_id_private
*id_priv
, int timeout_ms
)
1494 struct cma_work
*work
;
1496 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
1501 INIT_WORK(&work
->work
, cma_work_handler
);
1502 work
->old_state
= CMA_ROUTE_QUERY
;
1503 work
->new_state
= CMA_ROUTE_RESOLVED
;
1504 work
->event
.event
= RDMA_CM_EVENT_ROUTE_RESOLVED
;
1505 queue_work(cma_wq
, &work
->work
);
1509 int rdma_resolve_route(struct rdma_cm_id
*id
, int timeout_ms
)
1511 struct rdma_id_private
*id_priv
;
1514 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1515 if (!cma_comp_exch(id_priv
, CMA_ADDR_RESOLVED
, CMA_ROUTE_QUERY
))
1518 atomic_inc(&id_priv
->refcount
);
1519 switch (rdma_node_get_transport(id
->device
->node_type
)) {
1520 case RDMA_TRANSPORT_IB
:
1521 ret
= cma_resolve_ib_route(id_priv
, timeout_ms
);
1523 case RDMA_TRANSPORT_IWARP
:
1524 ret
= cma_resolve_iw_route(id_priv
, timeout_ms
);
1535 cma_comp_exch(id_priv
, CMA_ROUTE_QUERY
, CMA_ADDR_RESOLVED
);
1536 cma_deref_id(id_priv
);
1539 EXPORT_SYMBOL(rdma_resolve_route
);
1541 static int cma_bind_loopback(struct rdma_id_private
*id_priv
)
1543 struct cma_device
*cma_dev
;
1544 struct ib_port_attr port_attr
;
1551 if (list_empty(&dev_list
)) {
1555 list_for_each_entry(cma_dev
, &dev_list
, list
)
1556 for (p
= 1; p
<= cma_dev
->device
->phys_port_cnt
; ++p
)
1557 if (!ib_query_port(cma_dev
->device
, p
, &port_attr
) &&
1558 port_attr
.state
== IB_PORT_ACTIVE
)
1562 cma_dev
= list_entry(dev_list
.next
, struct cma_device
, list
);
1565 ret
= ib_get_cached_gid(cma_dev
->device
, p
, 0, &gid
);
1569 ret
= ib_get_cached_pkey(cma_dev
->device
, p
, 0, &pkey
);
1573 ib_addr_set_sgid(&id_priv
->id
.route
.addr
.dev_addr
, &gid
);
1574 ib_addr_set_pkey(&id_priv
->id
.route
.addr
.dev_addr
, pkey
);
1575 id_priv
->id
.port_num
= p
;
1576 cma_attach_to_dev(id_priv
, cma_dev
);
1578 mutex_unlock(&lock
);
1582 static void addr_handler(int status
, struct sockaddr
*src_addr
,
1583 struct rdma_dev_addr
*dev_addr
, void *context
)
1585 struct rdma_id_private
*id_priv
= context
;
1586 struct rdma_cm_event event
;
1588 memset(&event
, 0, sizeof event
);
1589 atomic_inc(&id_priv
->dev_remove
);
1592 * Grab mutex to block rdma_destroy_id() from removing the device while
1593 * we're trying to acquire it.
1596 if (!cma_comp_exch(id_priv
, CMA_ADDR_QUERY
, CMA_ADDR_RESOLVED
)) {
1597 mutex_unlock(&lock
);
1601 if (!status
&& !id_priv
->cma_dev
)
1602 status
= cma_acquire_dev(id_priv
);
1603 mutex_unlock(&lock
);
1606 if (!cma_comp_exch(id_priv
, CMA_ADDR_RESOLVED
, CMA_ADDR_BOUND
))
1608 event
.event
= RDMA_CM_EVENT_ADDR_ERROR
;
1609 event
.status
= status
;
1611 memcpy(&id_priv
->id
.route
.addr
.src_addr
, src_addr
,
1612 ip_addr_size(src_addr
));
1613 event
.event
= RDMA_CM_EVENT_ADDR_RESOLVED
;
1616 if (id_priv
->id
.event_handler(&id_priv
->id
, &event
)) {
1617 cma_exch(id_priv
, CMA_DESTROYING
);
1618 cma_release_remove(id_priv
);
1619 cma_deref_id(id_priv
);
1620 rdma_destroy_id(&id_priv
->id
);
1624 cma_release_remove(id_priv
);
1625 cma_deref_id(id_priv
);
1628 static int cma_resolve_loopback(struct rdma_id_private
*id_priv
)
1630 struct cma_work
*work
;
1631 struct sockaddr_in
*src_in
, *dst_in
;
1635 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
1639 if (!id_priv
->cma_dev
) {
1640 ret
= cma_bind_loopback(id_priv
);
1645 ib_addr_get_sgid(&id_priv
->id
.route
.addr
.dev_addr
, &gid
);
1646 ib_addr_set_dgid(&id_priv
->id
.route
.addr
.dev_addr
, &gid
);
1648 if (cma_zero_addr(&id_priv
->id
.route
.addr
.src_addr
)) {
1649 src_in
= (struct sockaddr_in
*)&id_priv
->id
.route
.addr
.src_addr
;
1650 dst_in
= (struct sockaddr_in
*)&id_priv
->id
.route
.addr
.dst_addr
;
1651 src_in
->sin_family
= dst_in
->sin_family
;
1652 src_in
->sin_addr
.s_addr
= dst_in
->sin_addr
.s_addr
;
1656 INIT_WORK(&work
->work
, cma_work_handler
);
1657 work
->old_state
= CMA_ADDR_QUERY
;
1658 work
->new_state
= CMA_ADDR_RESOLVED
;
1659 work
->event
.event
= RDMA_CM_EVENT_ADDR_RESOLVED
;
1660 queue_work(cma_wq
, &work
->work
);
1667 static int cma_bind_addr(struct rdma_cm_id
*id
, struct sockaddr
*src_addr
,
1668 struct sockaddr
*dst_addr
)
1670 if (src_addr
&& src_addr
->sa_family
)
1671 return rdma_bind_addr(id
, src_addr
);
1673 return cma_bind_any(id
, dst_addr
->sa_family
);
1676 int rdma_resolve_addr(struct rdma_cm_id
*id
, struct sockaddr
*src_addr
,
1677 struct sockaddr
*dst_addr
, int timeout_ms
)
1679 struct rdma_id_private
*id_priv
;
1682 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1683 if (id_priv
->state
== CMA_IDLE
) {
1684 ret
= cma_bind_addr(id
, src_addr
, dst_addr
);
1689 if (!cma_comp_exch(id_priv
, CMA_ADDR_BOUND
, CMA_ADDR_QUERY
))
1692 atomic_inc(&id_priv
->refcount
);
1693 memcpy(&id
->route
.addr
.dst_addr
, dst_addr
, ip_addr_size(dst_addr
));
1694 if (cma_any_addr(dst_addr
))
1695 ret
= cma_resolve_loopback(id_priv
);
1697 ret
= rdma_resolve_ip(&addr_client
, &id
->route
.addr
.src_addr
,
1698 dst_addr
, &id
->route
.addr
.dev_addr
,
1699 timeout_ms
, addr_handler
, id_priv
);
1705 cma_comp_exch(id_priv
, CMA_ADDR_QUERY
, CMA_ADDR_BOUND
);
1706 cma_deref_id(id_priv
);
1709 EXPORT_SYMBOL(rdma_resolve_addr
);
1711 static void cma_bind_port(struct rdma_bind_list
*bind_list
,
1712 struct rdma_id_private
*id_priv
)
1714 struct sockaddr_in
*sin
;
1716 sin
= (struct sockaddr_in
*) &id_priv
->id
.route
.addr
.src_addr
;
1717 sin
->sin_port
= htons(bind_list
->port
);
1718 id_priv
->bind_list
= bind_list
;
1719 hlist_add_head(&id_priv
->node
, &bind_list
->owners
);
1722 static int cma_alloc_port(struct idr
*ps
, struct rdma_id_private
*id_priv
,
1723 unsigned short snum
)
1725 struct rdma_bind_list
*bind_list
;
1728 bind_list
= kzalloc(sizeof *bind_list
, GFP_KERNEL
);
1733 ret
= idr_get_new_above(ps
, bind_list
, snum
, &port
);
1734 } while ((ret
== -EAGAIN
) && idr_pre_get(ps
, GFP_KERNEL
));
1740 ret
= -EADDRNOTAVAIL
;
1745 bind_list
->port
= (unsigned short) port
;
1746 cma_bind_port(bind_list
, id_priv
);
1749 idr_remove(ps
, port
);
1755 static int cma_alloc_any_port(struct idr
*ps
, struct rdma_id_private
*id_priv
)
1757 struct rdma_bind_list
*bind_list
;
1760 bind_list
= kzalloc(sizeof *bind_list
, GFP_KERNEL
);
1766 ret
= idr_get_new_above(ps
, bind_list
, next_port
, &port
);
1767 } while ((ret
== -EAGAIN
) && idr_pre_get(ps
, GFP_KERNEL
));
1772 if (port
> sysctl_local_port_range
[1]) {
1773 if (next_port
!= sysctl_local_port_range
[0]) {
1774 idr_remove(ps
, port
);
1775 next_port
= sysctl_local_port_range
[0];
1778 ret
= -EADDRNOTAVAIL
;
1782 if (port
== sysctl_local_port_range
[1])
1783 next_port
= sysctl_local_port_range
[0];
1785 next_port
= port
+ 1;
1788 bind_list
->port
= (unsigned short) port
;
1789 cma_bind_port(bind_list
, id_priv
);
1792 idr_remove(ps
, port
);
1798 static int cma_use_port(struct idr
*ps
, struct rdma_id_private
*id_priv
)
1800 struct rdma_id_private
*cur_id
;
1801 struct sockaddr_in
*sin
, *cur_sin
;
1802 struct rdma_bind_list
*bind_list
;
1803 struct hlist_node
*node
;
1804 unsigned short snum
;
1806 sin
= (struct sockaddr_in
*) &id_priv
->id
.route
.addr
.src_addr
;
1807 snum
= ntohs(sin
->sin_port
);
1808 if (snum
< PROT_SOCK
&& !capable(CAP_NET_BIND_SERVICE
))
1811 bind_list
= idr_find(ps
, snum
);
1813 return cma_alloc_port(ps
, id_priv
, snum
);
1816 * We don't support binding to any address if anyone is bound to
1817 * a specific address on the same port.
1819 if (cma_any_addr(&id_priv
->id
.route
.addr
.src_addr
))
1820 return -EADDRNOTAVAIL
;
1822 hlist_for_each_entry(cur_id
, node
, &bind_list
->owners
, node
) {
1823 if (cma_any_addr(&cur_id
->id
.route
.addr
.src_addr
))
1824 return -EADDRNOTAVAIL
;
1826 cur_sin
= (struct sockaddr_in
*) &cur_id
->id
.route
.addr
.src_addr
;
1827 if (sin
->sin_addr
.s_addr
== cur_sin
->sin_addr
.s_addr
)
1831 cma_bind_port(bind_list
, id_priv
);
1835 static int cma_get_port(struct rdma_id_private
*id_priv
)
1840 switch (id_priv
->id
.ps
) {
1851 return -EPROTONOSUPPORT
;
1855 if (cma_any_port(&id_priv
->id
.route
.addr
.src_addr
))
1856 ret
= cma_alloc_any_port(ps
, id_priv
);
1858 ret
= cma_use_port(ps
, id_priv
);
1859 mutex_unlock(&lock
);
1864 int rdma_bind_addr(struct rdma_cm_id
*id
, struct sockaddr
*addr
)
1866 struct rdma_id_private
*id_priv
;
1869 if (addr
->sa_family
!= AF_INET
)
1870 return -EAFNOSUPPORT
;
1872 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1873 if (!cma_comp_exch(id_priv
, CMA_IDLE
, CMA_ADDR_BOUND
))
1876 if (!cma_any_addr(addr
)) {
1877 ret
= rdma_translate_ip(addr
, &id
->route
.addr
.dev_addr
);
1882 ret
= cma_acquire_dev(id_priv
);
1883 mutex_unlock(&lock
);
1888 memcpy(&id
->route
.addr
.src_addr
, addr
, ip_addr_size(addr
));
1889 ret
= cma_get_port(id_priv
);
1895 if (!cma_any_addr(addr
)) {
1897 cma_detach_from_dev(id_priv
);
1898 mutex_unlock(&lock
);
1901 cma_comp_exch(id_priv
, CMA_ADDR_BOUND
, CMA_IDLE
);
1904 EXPORT_SYMBOL(rdma_bind_addr
);
1906 static int cma_format_hdr(void *hdr
, enum rdma_port_space ps
,
1907 struct rdma_route
*route
)
1909 struct sockaddr_in
*src4
, *dst4
;
1910 struct cma_hdr
*cma_hdr
;
1911 struct sdp_hh
*sdp_hdr
;
1913 src4
= (struct sockaddr_in
*) &route
->addr
.src_addr
;
1914 dst4
= (struct sockaddr_in
*) &route
->addr
.dst_addr
;
1919 if (sdp_get_majv(sdp_hdr
->sdp_version
) != SDP_MAJ_VERSION
)
1921 sdp_set_ip_ver(sdp_hdr
, 4);
1922 sdp_hdr
->src_addr
.ip4
.addr
= src4
->sin_addr
.s_addr
;
1923 sdp_hdr
->dst_addr
.ip4
.addr
= dst4
->sin_addr
.s_addr
;
1924 sdp_hdr
->port
= src4
->sin_port
;
1928 cma_hdr
->cma_version
= CMA_VERSION
;
1929 cma_set_ip_ver(cma_hdr
, 4);
1930 cma_hdr
->src_addr
.ip4
.addr
= src4
->sin_addr
.s_addr
;
1931 cma_hdr
->dst_addr
.ip4
.addr
= dst4
->sin_addr
.s_addr
;
1932 cma_hdr
->port
= src4
->sin_port
;
1938 static int cma_sidr_rep_handler(struct ib_cm_id
*cm_id
,
1939 struct ib_cm_event
*ib_event
)
1941 struct rdma_id_private
*id_priv
= cm_id
->context
;
1942 struct rdma_cm_event event
;
1943 struct ib_cm_sidr_rep_event_param
*rep
= &ib_event
->param
.sidr_rep_rcvd
;
1946 memset(&event
, 0, sizeof event
);
1947 atomic_inc(&id_priv
->dev_remove
);
1948 if (!cma_comp(id_priv
, CMA_CONNECT
))
1951 switch (ib_event
->event
) {
1952 case IB_CM_SIDR_REQ_ERROR
:
1953 event
.event
= RDMA_CM_EVENT_UNREACHABLE
;
1954 event
.status
= -ETIMEDOUT
;
1956 case IB_CM_SIDR_REP_RECEIVED
:
1957 event
.param
.ud
.private_data
= ib_event
->private_data
;
1958 event
.param
.ud
.private_data_len
= IB_CM_SIDR_REP_PRIVATE_DATA_SIZE
;
1959 if (rep
->status
!= IB_SIDR_SUCCESS
) {
1960 event
.event
= RDMA_CM_EVENT_UNREACHABLE
;
1961 event
.status
= ib_event
->param
.sidr_rep_rcvd
.status
;
1964 if (rep
->qkey
!= RDMA_UD_QKEY
) {
1965 event
.event
= RDMA_CM_EVENT_UNREACHABLE
;
1966 event
.status
= -EINVAL
;
1969 ib_init_ah_from_path(id_priv
->id
.device
, id_priv
->id
.port_num
,
1970 id_priv
->id
.route
.path_rec
,
1971 &event
.param
.ud
.ah_attr
);
1972 event
.param
.ud
.qp_num
= rep
->qpn
;
1973 event
.param
.ud
.qkey
= rep
->qkey
;
1974 event
.event
= RDMA_CM_EVENT_ESTABLISHED
;
1978 printk(KERN_ERR
"RDMA CMA: unexpected IB CM event: %d",
1983 ret
= id_priv
->id
.event_handler(&id_priv
->id
, &event
);
1985 /* Destroy the CM ID by returning a non-zero value. */
1986 id_priv
->cm_id
.ib
= NULL
;
1987 cma_exch(id_priv
, CMA_DESTROYING
);
1988 cma_release_remove(id_priv
);
1989 rdma_destroy_id(&id_priv
->id
);
1993 cma_release_remove(id_priv
);
1997 static int cma_resolve_ib_udp(struct rdma_id_private
*id_priv
,
1998 struct rdma_conn_param
*conn_param
)
2000 struct ib_cm_sidr_req_param req
;
2001 struct rdma_route
*route
;
2004 req
.private_data_len
= sizeof(struct cma_hdr
) +
2005 conn_param
->private_data_len
;
2006 req
.private_data
= kzalloc(req
.private_data_len
, GFP_ATOMIC
);
2007 if (!req
.private_data
)
2010 if (conn_param
->private_data
&& conn_param
->private_data_len
)
2011 memcpy((void *) req
.private_data
+ sizeof(struct cma_hdr
),
2012 conn_param
->private_data
, conn_param
->private_data_len
);
2014 route
= &id_priv
->id
.route
;
2015 ret
= cma_format_hdr((void *) req
.private_data
, id_priv
->id
.ps
, route
);
2019 id_priv
->cm_id
.ib
= ib_create_cm_id(id_priv
->id
.device
,
2020 cma_sidr_rep_handler
, id_priv
);
2021 if (IS_ERR(id_priv
->cm_id
.ib
)) {
2022 ret
= PTR_ERR(id_priv
->cm_id
.ib
);
2026 req
.path
= route
->path_rec
;
2027 req
.service_id
= cma_get_service_id(id_priv
->id
.ps
,
2028 &route
->addr
.dst_addr
);
2029 req
.timeout_ms
= 1 << (CMA_CM_RESPONSE_TIMEOUT
- 8);
2030 req
.max_cm_retries
= CMA_MAX_CM_RETRIES
;
2032 ret
= ib_send_cm_sidr_req(id_priv
->cm_id
.ib
, &req
);
2034 ib_destroy_cm_id(id_priv
->cm_id
.ib
);
2035 id_priv
->cm_id
.ib
= NULL
;
2038 kfree(req
.private_data
);
2042 static int cma_connect_ib(struct rdma_id_private
*id_priv
,
2043 struct rdma_conn_param
*conn_param
)
2045 struct ib_cm_req_param req
;
2046 struct rdma_route
*route
;
2050 memset(&req
, 0, sizeof req
);
2051 offset
= cma_user_data_offset(id_priv
->id
.ps
);
2052 req
.private_data_len
= offset
+ conn_param
->private_data_len
;
2053 private_data
= kzalloc(req
.private_data_len
, GFP_ATOMIC
);
2057 if (conn_param
->private_data
&& conn_param
->private_data_len
)
2058 memcpy(private_data
+ offset
, conn_param
->private_data
,
2059 conn_param
->private_data_len
);
2061 id_priv
->cm_id
.ib
= ib_create_cm_id(id_priv
->id
.device
, cma_ib_handler
,
2063 if (IS_ERR(id_priv
->cm_id
.ib
)) {
2064 ret
= PTR_ERR(id_priv
->cm_id
.ib
);
2068 route
= &id_priv
->id
.route
;
2069 ret
= cma_format_hdr(private_data
, id_priv
->id
.ps
, route
);
2072 req
.private_data
= private_data
;
2074 req
.primary_path
= &route
->path_rec
[0];
2075 if (route
->num_paths
== 2)
2076 req
.alternate_path
= &route
->path_rec
[1];
2078 req
.service_id
= cma_get_service_id(id_priv
->id
.ps
,
2079 &route
->addr
.dst_addr
);
2080 req
.qp_num
= id_priv
->qp_num
;
2081 req
.qp_type
= IB_QPT_RC
;
2082 req
.starting_psn
= id_priv
->seq_num
;
2083 req
.responder_resources
= conn_param
->responder_resources
;
2084 req
.initiator_depth
= conn_param
->initiator_depth
;
2085 req
.flow_control
= conn_param
->flow_control
;
2086 req
.retry_count
= conn_param
->retry_count
;
2087 req
.rnr_retry_count
= conn_param
->rnr_retry_count
;
2088 req
.remote_cm_response_timeout
= CMA_CM_RESPONSE_TIMEOUT
;
2089 req
.local_cm_response_timeout
= CMA_CM_RESPONSE_TIMEOUT
;
2090 req
.max_cm_retries
= CMA_MAX_CM_RETRIES
;
2091 req
.srq
= id_priv
->srq
? 1 : 0;
2093 ret
= ib_send_cm_req(id_priv
->cm_id
.ib
, &req
);
2095 if (ret
&& !IS_ERR(id_priv
->cm_id
.ib
)) {
2096 ib_destroy_cm_id(id_priv
->cm_id
.ib
);
2097 id_priv
->cm_id
.ib
= NULL
;
2100 kfree(private_data
);
2104 static int cma_connect_iw(struct rdma_id_private
*id_priv
,
2105 struct rdma_conn_param
*conn_param
)
2107 struct iw_cm_id
*cm_id
;
2108 struct sockaddr_in
* sin
;
2110 struct iw_cm_conn_param iw_param
;
2112 cm_id
= iw_create_cm_id(id_priv
->id
.device
, cma_iw_handler
, id_priv
);
2113 if (IS_ERR(cm_id
)) {
2114 ret
= PTR_ERR(cm_id
);
2118 id_priv
->cm_id
.iw
= cm_id
;
2120 sin
= (struct sockaddr_in
*) &id_priv
->id
.route
.addr
.src_addr
;
2121 cm_id
->local_addr
= *sin
;
2123 sin
= (struct sockaddr_in
*) &id_priv
->id
.route
.addr
.dst_addr
;
2124 cm_id
->remote_addr
= *sin
;
2126 ret
= cma_modify_qp_rtr(&id_priv
->id
);
2130 iw_param
.ord
= conn_param
->initiator_depth
;
2131 iw_param
.ird
= conn_param
->responder_resources
;
2132 iw_param
.private_data
= conn_param
->private_data
;
2133 iw_param
.private_data_len
= conn_param
->private_data_len
;
2135 iw_param
.qpn
= id_priv
->qp_num
;
2137 iw_param
.qpn
= conn_param
->qp_num
;
2138 ret
= iw_cm_connect(cm_id
, &iw_param
);
2140 if (ret
&& !IS_ERR(cm_id
)) {
2141 iw_destroy_cm_id(cm_id
);
2142 id_priv
->cm_id
.iw
= NULL
;
2147 int rdma_connect(struct rdma_cm_id
*id
, struct rdma_conn_param
*conn_param
)
2149 struct rdma_id_private
*id_priv
;
2152 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2153 if (!cma_comp_exch(id_priv
, CMA_ROUTE_RESOLVED
, CMA_CONNECT
))
2157 id_priv
->qp_num
= conn_param
->qp_num
;
2158 id_priv
->srq
= conn_param
->srq
;
2161 switch (rdma_node_get_transport(id
->device
->node_type
)) {
2162 case RDMA_TRANSPORT_IB
:
2163 if (id
->ps
== RDMA_PS_UDP
)
2164 ret
= cma_resolve_ib_udp(id_priv
, conn_param
);
2166 ret
= cma_connect_ib(id_priv
, conn_param
);
2168 case RDMA_TRANSPORT_IWARP
:
2169 ret
= cma_connect_iw(id_priv
, conn_param
);
2180 cma_comp_exch(id_priv
, CMA_CONNECT
, CMA_ROUTE_RESOLVED
);
2183 EXPORT_SYMBOL(rdma_connect
);
2185 static int cma_accept_ib(struct rdma_id_private
*id_priv
,
2186 struct rdma_conn_param
*conn_param
)
2188 struct ib_cm_rep_param rep
;
2189 struct ib_qp_attr qp_attr
;
2190 int qp_attr_mask
, ret
;
2192 if (id_priv
->id
.qp
) {
2193 ret
= cma_modify_qp_rtr(&id_priv
->id
);
2197 qp_attr
.qp_state
= IB_QPS_RTS
;
2198 ret
= ib_cm_init_qp_attr(id_priv
->cm_id
.ib
, &qp_attr
,
2203 qp_attr
.max_rd_atomic
= conn_param
->initiator_depth
;
2204 ret
= ib_modify_qp(id_priv
->id
.qp
, &qp_attr
, qp_attr_mask
);
2209 memset(&rep
, 0, sizeof rep
);
2210 rep
.qp_num
= id_priv
->qp_num
;
2211 rep
.starting_psn
= id_priv
->seq_num
;
2212 rep
.private_data
= conn_param
->private_data
;
2213 rep
.private_data_len
= conn_param
->private_data_len
;
2214 rep
.responder_resources
= conn_param
->responder_resources
;
2215 rep
.initiator_depth
= conn_param
->initiator_depth
;
2216 rep
.target_ack_delay
= CMA_CM_RESPONSE_TIMEOUT
;
2217 rep
.failover_accepted
= 0;
2218 rep
.flow_control
= conn_param
->flow_control
;
2219 rep
.rnr_retry_count
= conn_param
->rnr_retry_count
;
2220 rep
.srq
= id_priv
->srq
? 1 : 0;
2222 ret
= ib_send_cm_rep(id_priv
->cm_id
.ib
, &rep
);
2227 static int cma_accept_iw(struct rdma_id_private
*id_priv
,
2228 struct rdma_conn_param
*conn_param
)
2230 struct iw_cm_conn_param iw_param
;
2233 ret
= cma_modify_qp_rtr(&id_priv
->id
);
2237 iw_param
.ord
= conn_param
->initiator_depth
;
2238 iw_param
.ird
= conn_param
->responder_resources
;
2239 iw_param
.private_data
= conn_param
->private_data
;
2240 iw_param
.private_data_len
= conn_param
->private_data_len
;
2241 if (id_priv
->id
.qp
) {
2242 iw_param
.qpn
= id_priv
->qp_num
;
2244 iw_param
.qpn
= conn_param
->qp_num
;
2246 return iw_cm_accept(id_priv
->cm_id
.iw
, &iw_param
);
2249 static int cma_send_sidr_rep(struct rdma_id_private
*id_priv
,
2250 enum ib_cm_sidr_status status
,
2251 const void *private_data
, int private_data_len
)
2253 struct ib_cm_sidr_rep_param rep
;
2255 memset(&rep
, 0, sizeof rep
);
2256 rep
.status
= status
;
2257 if (status
== IB_SIDR_SUCCESS
) {
2258 rep
.qp_num
= id_priv
->qp_num
;
2259 rep
.qkey
= RDMA_UD_QKEY
;
2261 rep
.private_data
= private_data
;
2262 rep
.private_data_len
= private_data_len
;
2264 return ib_send_cm_sidr_rep(id_priv
->cm_id
.ib
, &rep
);
2267 int rdma_accept(struct rdma_cm_id
*id
, struct rdma_conn_param
*conn_param
)
2269 struct rdma_id_private
*id_priv
;
2272 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2273 if (!cma_comp(id_priv
, CMA_CONNECT
))
2276 if (!id
->qp
&& conn_param
) {
2277 id_priv
->qp_num
= conn_param
->qp_num
;
2278 id_priv
->srq
= conn_param
->srq
;
2281 switch (rdma_node_get_transport(id
->device
->node_type
)) {
2282 case RDMA_TRANSPORT_IB
:
2283 if (id
->ps
== RDMA_PS_UDP
)
2284 ret
= cma_send_sidr_rep(id_priv
, IB_SIDR_SUCCESS
,
2285 conn_param
->private_data
,
2286 conn_param
->private_data_len
);
2287 else if (conn_param
)
2288 ret
= cma_accept_ib(id_priv
, conn_param
);
2290 ret
= cma_rep_recv(id_priv
);
2292 case RDMA_TRANSPORT_IWARP
:
2293 ret
= cma_accept_iw(id_priv
, conn_param
);
2305 cma_modify_qp_err(id
);
2306 rdma_reject(id
, NULL
, 0);
2309 EXPORT_SYMBOL(rdma_accept
);
2311 int rdma_notify(struct rdma_cm_id
*id
, enum ib_event_type event
)
2313 struct rdma_id_private
*id_priv
;
2316 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2317 if (!cma_comp(id_priv
, CMA_CONNECT
))
2320 switch (id
->device
->node_type
) {
2321 case RDMA_NODE_IB_CA
:
2322 ret
= ib_cm_notify(id_priv
->cm_id
.ib
, event
);
2330 EXPORT_SYMBOL(rdma_notify
);
2332 int rdma_reject(struct rdma_cm_id
*id
, const void *private_data
,
2333 u8 private_data_len
)
2335 struct rdma_id_private
*id_priv
;
2338 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2339 if (!cma_comp(id_priv
, CMA_CONNECT
))
2342 switch (rdma_node_get_transport(id
->device
->node_type
)) {
2343 case RDMA_TRANSPORT_IB
:
2344 if (id
->ps
== RDMA_PS_UDP
)
2345 ret
= cma_send_sidr_rep(id_priv
, IB_SIDR_REJECT
,
2346 private_data
, private_data_len
);
2348 ret
= ib_send_cm_rej(id_priv
->cm_id
.ib
,
2349 IB_CM_REJ_CONSUMER_DEFINED
, NULL
,
2350 0, private_data
, private_data_len
);
2352 case RDMA_TRANSPORT_IWARP
:
2353 ret
= iw_cm_reject(id_priv
->cm_id
.iw
,
2354 private_data
, private_data_len
);
2362 EXPORT_SYMBOL(rdma_reject
);
2364 int rdma_disconnect(struct rdma_cm_id
*id
)
2366 struct rdma_id_private
*id_priv
;
2369 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2370 if (!cma_comp(id_priv
, CMA_CONNECT
) &&
2371 !cma_comp(id_priv
, CMA_DISCONNECT
))
2374 switch (rdma_node_get_transport(id
->device
->node_type
)) {
2375 case RDMA_TRANSPORT_IB
:
2376 ret
= cma_modify_qp_err(id
);
2379 /* Initiate or respond to a disconnect. */
2380 if (ib_send_cm_dreq(id_priv
->cm_id
.ib
, NULL
, 0))
2381 ib_send_cm_drep(id_priv
->cm_id
.ib
, NULL
, 0);
2383 case RDMA_TRANSPORT_IWARP
:
2384 ret
= iw_cm_disconnect(id_priv
->cm_id
.iw
, 0);
2393 EXPORT_SYMBOL(rdma_disconnect
);
2395 static void cma_add_one(struct ib_device
*device
)
2397 struct cma_device
*cma_dev
;
2398 struct rdma_id_private
*id_priv
;
2400 cma_dev
= kmalloc(sizeof *cma_dev
, GFP_KERNEL
);
2404 cma_dev
->device
= device
;
2405 cma_dev
->node_guid
= device
->node_guid
;
2407 init_completion(&cma_dev
->comp
);
2408 atomic_set(&cma_dev
->refcount
, 1);
2409 INIT_LIST_HEAD(&cma_dev
->id_list
);
2410 ib_set_client_data(device
, &cma_client
, cma_dev
);
2413 list_add_tail(&cma_dev
->list
, &dev_list
);
2414 list_for_each_entry(id_priv
, &listen_any_list
, list
)
2415 cma_listen_on_dev(id_priv
, cma_dev
);
2416 mutex_unlock(&lock
);
2419 static int cma_remove_id_dev(struct rdma_id_private
*id_priv
)
2421 struct rdma_cm_event event
;
2422 enum cma_state state
;
2424 /* Record that we want to remove the device */
2425 state
= cma_exch(id_priv
, CMA_DEVICE_REMOVAL
);
2426 if (state
== CMA_DESTROYING
)
2429 cma_cancel_operation(id_priv
, state
);
2430 wait_event(id_priv
->wait_remove
, !atomic_read(&id_priv
->dev_remove
));
2432 /* Check for destruction from another callback. */
2433 if (!cma_comp(id_priv
, CMA_DEVICE_REMOVAL
))
2436 memset(&event
, 0, sizeof event
);
2437 event
.event
= RDMA_CM_EVENT_DEVICE_REMOVAL
;
2438 return id_priv
->id
.event_handler(&id_priv
->id
, &event
);
2441 static void cma_process_remove(struct cma_device
*cma_dev
)
2443 struct rdma_id_private
*id_priv
;
2447 while (!list_empty(&cma_dev
->id_list
)) {
2448 id_priv
= list_entry(cma_dev
->id_list
.next
,
2449 struct rdma_id_private
, list
);
2451 if (cma_internal_listen(id_priv
)) {
2452 cma_destroy_listen(id_priv
);
2456 list_del_init(&id_priv
->list
);
2457 atomic_inc(&id_priv
->refcount
);
2458 mutex_unlock(&lock
);
2460 ret
= cma_remove_id_dev(id_priv
);
2461 cma_deref_id(id_priv
);
2463 rdma_destroy_id(&id_priv
->id
);
2467 mutex_unlock(&lock
);
2469 cma_deref_dev(cma_dev
);
2470 wait_for_completion(&cma_dev
->comp
);
2473 static void cma_remove_one(struct ib_device
*device
)
2475 struct cma_device
*cma_dev
;
2477 cma_dev
= ib_get_client_data(device
, &cma_client
);
2482 list_del(&cma_dev
->list
);
2483 mutex_unlock(&lock
);
2485 cma_process_remove(cma_dev
);
2489 static int cma_init(void)
2493 get_random_bytes(&next_port
, sizeof next_port
);
2494 next_port
= (next_port
% (sysctl_local_port_range
[1] -
2495 sysctl_local_port_range
[0])) +
2496 sysctl_local_port_range
[0];
2497 cma_wq
= create_singlethread_workqueue("rdma_cm_wq");
2501 ib_sa_register_client(&sa_client
);
2502 rdma_addr_register_client(&addr_client
);
2504 ret
= ib_register_client(&cma_client
);
2510 rdma_addr_unregister_client(&addr_client
);
2511 ib_sa_unregister_client(&sa_client
);
2512 destroy_workqueue(cma_wq
);
2516 static void cma_cleanup(void)
2518 ib_unregister_client(&cma_client
);
2519 rdma_addr_unregister_client(&addr_client
);
2520 ib_sa_unregister_client(&sa_client
);
2521 destroy_workqueue(cma_wq
);
2522 idr_destroy(&sdp_ps
);
2523 idr_destroy(&tcp_ps
);
2524 idr_destroy(&udp_ps
);
2527 module_init(cma_init
);
2528 module_exit(cma_cleanup
);