Commit | Line | Data |
---|---|---|
922a8e9f TT |
1 | /* |
2 | * Copyright (c) 2004, 2005 Intel Corporation. All rights reserved. | |
3 | * Copyright (c) 2004 Topspin Corporation. All rights reserved. | |
4 | * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved. | |
5 | * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. | |
6 | * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. | |
7 | * Copyright (c) 2005 Network Appliance, Inc. All rights reserved. | |
8 | * | |
9 | * This software is available to you under a choice of one of two | |
10 | * licenses. You may choose to be licensed under the terms of the GNU | |
11 | * General Public License (GPL) Version 2, available from the file | |
12 | * COPYING in the main directory of this source tree, or the | |
13 | * OpenIB.org BSD license below: | |
14 | * | |
15 | * Redistribution and use in source and binary forms, with or | |
16 | * without modification, are permitted provided that the following | |
17 | * conditions are met: | |
18 | * | |
19 | * - Redistributions of source code must retain the above | |
20 | * copyright notice, this list of conditions and the following | |
21 | * disclaimer. | |
22 | * | |
23 | * - Redistributions in binary form must reproduce the above | |
24 | * copyright notice, this list of conditions and the following | |
25 | * disclaimer in the documentation and/or other materials | |
26 | * provided with the distribution. | |
27 | * | |
28 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
29 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
30 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
31 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
32 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
33 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
34 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
35 | * SOFTWARE. | |
36 | * | |
37 | */ | |
38 | #include <linux/dma-mapping.h> | |
39 | #include <linux/err.h> | |
40 | #include <linux/idr.h> | |
41 | #include <linux/interrupt.h> | |
922a8e9f | 42 | #include <linux/rbtree.h> |
d43c36dc | 43 | #include <linux/sched.h> |
922a8e9f TT |
44 | #include <linux/spinlock.h> |
45 | #include <linux/workqueue.h> | |
46 | #include <linux/completion.h> | |
5a0e3ad6 | 47 | #include <linux/slab.h> |
e4dd23d7 | 48 | #include <linux/module.h> |
433d80d6 | 49 | #include <linux/sysctl.h> |
922a8e9f TT |
50 | |
51 | #include <rdma/iw_cm.h> | |
52 | #include <rdma/ib_addr.h> | |
53 | ||
54 | #include "iwcm.h" | |
55 | ||
56 | MODULE_AUTHOR("Tom Tucker"); | |
57 | MODULE_DESCRIPTION("iWARP CM"); | |
58 | MODULE_LICENSE("Dual BSD/GPL"); | |
59 | ||
60 | static struct workqueue_struct *iwcm_wq; | |
61 | struct iwcm_work { | |
62 | struct work_struct work; | |
63 | struct iwcm_id_private *cm_id; | |
64 | struct list_head list; | |
65 | struct iw_cm_event event; | |
66 | struct list_head free_list; | |
67 | }; | |
68 | ||
433d80d6 SW |
69 | static unsigned int default_backlog = 256; |
70 | ||
71 | static struct ctl_table_header *iwcm_ctl_table_hdr; | |
72 | static struct ctl_table iwcm_ctl_table[] = { | |
73 | { | |
74 | .procname = "default_backlog", | |
75 | .data = &default_backlog, | |
76 | .maxlen = sizeof(default_backlog), | |
77 | .mode = 0644, | |
78 | .proc_handler = proc_dointvec, | |
79 | }, | |
80 | { } | |
81 | }; | |
82 | ||
922a8e9f TT |
83 | /* |
84 | * The following services provide a mechanism for pre-allocating iwcm_work | |
85 | * elements. The design pre-allocates them based on the cm_id type: | |
86 | * LISTENING IDS: Get enough elements preallocated to handle the | |
87 | * listen backlog. | |
88 | * ACTIVE IDS: 4: CONNECT_REPLY, ESTABLISHED, DISCONNECT, CLOSE | |
89 | * PASSIVE IDS: 3: ESTABLISHED, DISCONNECT, CLOSE | |
90 | * | |
91 | * Allocating them in connect and listen avoids having to deal | |
92 | * with allocation failures on the event upcall from the provider (which | |
93 | * is called in the interrupt context). | |
94 | * | |
95 | * One exception is when creating the cm_id for incoming connection requests. | |
96 | * There are two cases: | |
97 | * 1) in the event upcall, cm_event_handler(), for a listening cm_id. If | |
98 | * the backlog is exceeded, then no more connection request events will | |
99 | * be processed. cm_event_handler() returns -ENOMEM in this case. Its up | |
715a588f | 100 | * to the provider to reject the connection request. |
922a8e9f TT |
101 | * 2) in the connection request workqueue handler, cm_conn_req_handler(). |
102 | * If work elements cannot be allocated for the new connect request cm_id, | |
103 | * then IWCM will call the provider reject method. This is ok since | |
104 | * cm_conn_req_handler() runs in the workqueue thread context. | |
105 | */ | |
106 | ||
107 | static struct iwcm_work *get_work(struct iwcm_id_private *cm_id_priv) | |
108 | { | |
109 | struct iwcm_work *work; | |
110 | ||
111 | if (list_empty(&cm_id_priv->work_free_list)) | |
112 | return NULL; | |
113 | work = list_entry(cm_id_priv->work_free_list.next, struct iwcm_work, | |
114 | free_list); | |
115 | list_del_init(&work->free_list); | |
116 | return work; | |
117 | } | |
118 | ||
119 | static void put_work(struct iwcm_work *work) | |
120 | { | |
121 | list_add(&work->free_list, &work->cm_id->work_free_list); | |
122 | } | |
123 | ||
124 | static void dealloc_work_entries(struct iwcm_id_private *cm_id_priv) | |
125 | { | |
126 | struct list_head *e, *tmp; | |
127 | ||
128 | list_for_each_safe(e, tmp, &cm_id_priv->work_free_list) | |
129 | kfree(list_entry(e, struct iwcm_work, free_list)); | |
130 | } | |
131 | ||
132 | static int alloc_work_entries(struct iwcm_id_private *cm_id_priv, int count) | |
133 | { | |
134 | struct iwcm_work *work; | |
135 | ||
136 | BUG_ON(!list_empty(&cm_id_priv->work_free_list)); | |
137 | while (count--) { | |
138 | work = kmalloc(sizeof(struct iwcm_work), GFP_KERNEL); | |
139 | if (!work) { | |
140 | dealloc_work_entries(cm_id_priv); | |
141 | return -ENOMEM; | |
142 | } | |
143 | work->cm_id = cm_id_priv; | |
144 | INIT_LIST_HEAD(&work->list); | |
145 | put_work(work); | |
146 | } | |
147 | return 0; | |
148 | } | |
149 | ||
150 | /* | |
715a588f KK |
151 | * Save private data from incoming connection requests to |
152 | * iw_cm_event, so the low level driver doesn't have to. Adjust | |
922a8e9f TT |
153 | * the event ptr to point to the local copy. |
154 | */ | |
715a588f | 155 | static int copy_private_data(struct iw_cm_event *event) |
922a8e9f TT |
156 | { |
157 | void *p; | |
158 | ||
bed8bdfd | 159 | p = kmemdup(event->private_data, event->private_data_len, GFP_ATOMIC); |
922a8e9f TT |
160 | if (!p) |
161 | return -ENOMEM; | |
922a8e9f TT |
162 | event->private_data = p; |
163 | return 0; | |
164 | } | |
165 | ||
ebb90986 SW |
166 | static void free_cm_id(struct iwcm_id_private *cm_id_priv) |
167 | { | |
168 | dealloc_work_entries(cm_id_priv); | |
169 | kfree(cm_id_priv); | |
170 | } | |
171 | ||
922a8e9f | 172 | /* |
9ab1ffa8 KK |
173 | * Release a reference on cm_id. If the last reference is being |
174 | * released, enable the waiting thread (in iw_destroy_cm_id) to | |
175 | * get woken up, and return 1 if a thread is already waiting. | |
922a8e9f TT |
176 | */ |
177 | static int iwcm_deref_id(struct iwcm_id_private *cm_id_priv) | |
178 | { | |
922a8e9f TT |
179 | BUG_ON(atomic_read(&cm_id_priv->refcount)==0); |
180 | if (atomic_dec_and_test(&cm_id_priv->refcount)) { | |
181 | BUG_ON(!list_empty(&cm_id_priv->work_list)); | |
922a8e9f | 182 | complete(&cm_id_priv->destroy_comp); |
ebb90986 | 183 | return 1; |
922a8e9f TT |
184 | } |
185 | ||
ebb90986 | 186 | return 0; |
922a8e9f TT |
187 | } |
188 | ||
189 | static void add_ref(struct iw_cm_id *cm_id) | |
190 | { | |
191 | struct iwcm_id_private *cm_id_priv; | |
192 | cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); | |
193 | atomic_inc(&cm_id_priv->refcount); | |
194 | } | |
195 | ||
196 | static void rem_ref(struct iw_cm_id *cm_id) | |
197 | { | |
198 | struct iwcm_id_private *cm_id_priv; | |
199 | cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); | |
ebb90986 SW |
200 | if (iwcm_deref_id(cm_id_priv) && |
201 | test_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags)) { | |
202 | BUG_ON(!list_empty(&cm_id_priv->work_list)); | |
203 | free_cm_id(cm_id_priv); | |
204 | } | |
922a8e9f TT |
205 | } |
206 | ||
207 | static int cm_event_handler(struct iw_cm_id *cm_id, struct iw_cm_event *event); | |
208 | ||
209 | struct iw_cm_id *iw_create_cm_id(struct ib_device *device, | |
210 | iw_cm_handler cm_handler, | |
211 | void *context) | |
212 | { | |
213 | struct iwcm_id_private *cm_id_priv; | |
214 | ||
215 | cm_id_priv = kzalloc(sizeof(*cm_id_priv), GFP_KERNEL); | |
216 | if (!cm_id_priv) | |
217 | return ERR_PTR(-ENOMEM); | |
218 | ||
219 | cm_id_priv->state = IW_CM_STATE_IDLE; | |
220 | cm_id_priv->id.device = device; | |
221 | cm_id_priv->id.cm_handler = cm_handler; | |
222 | cm_id_priv->id.context = context; | |
223 | cm_id_priv->id.event_handler = cm_event_handler; | |
224 | cm_id_priv->id.add_ref = add_ref; | |
225 | cm_id_priv->id.rem_ref = rem_ref; | |
226 | spin_lock_init(&cm_id_priv->lock); | |
227 | atomic_set(&cm_id_priv->refcount, 1); | |
228 | init_waitqueue_head(&cm_id_priv->connect_wait); | |
229 | init_completion(&cm_id_priv->destroy_comp); | |
230 | INIT_LIST_HEAD(&cm_id_priv->work_list); | |
231 | INIT_LIST_HEAD(&cm_id_priv->work_free_list); | |
232 | ||
233 | return &cm_id_priv->id; | |
234 | } | |
235 | EXPORT_SYMBOL(iw_create_cm_id); | |
236 | ||
237 | ||
238 | static int iwcm_modify_qp_err(struct ib_qp *qp) | |
239 | { | |
240 | struct ib_qp_attr qp_attr; | |
241 | ||
242 | if (!qp) | |
243 | return -EINVAL; | |
244 | ||
245 | qp_attr.qp_state = IB_QPS_ERR; | |
246 | return ib_modify_qp(qp, &qp_attr, IB_QP_STATE); | |
247 | } | |
248 | ||
249 | /* | |
250 | * This is really the RDMAC CLOSING state. It is most similar to the | |
251 | * IB SQD QP state. | |
252 | */ | |
253 | static int iwcm_modify_qp_sqd(struct ib_qp *qp) | |
254 | { | |
255 | struct ib_qp_attr qp_attr; | |
256 | ||
257 | BUG_ON(qp == NULL); | |
258 | qp_attr.qp_state = IB_QPS_SQD; | |
259 | return ib_modify_qp(qp, &qp_attr, IB_QP_STATE); | |
260 | } | |
261 | ||
262 | /* | |
263 | * CM_ID <-- CLOSING | |
264 | * | |
715a588f | 265 | * Block if a passive or active connection is currently being processed. Then |
922a8e9f TT |
266 | * process the event as follows: |
267 | * - If we are ESTABLISHED, move to CLOSING and modify the QP state | |
268 | * based on the abrupt flag | |
269 | * - If the connection is already in the CLOSING or IDLE state, the peer is | |
270 | * disconnecting concurrently with us and we've already seen the | |
271 | * DISCONNECT event -- ignore the request and return 0 | |
272 | * - Disconnect on a listening endpoint returns -EINVAL | |
273 | */ | |
274 | int iw_cm_disconnect(struct iw_cm_id *cm_id, int abrupt) | |
275 | { | |
276 | struct iwcm_id_private *cm_id_priv; | |
277 | unsigned long flags; | |
278 | int ret = 0; | |
279 | struct ib_qp *qp = NULL; | |
280 | ||
281 | cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); | |
282 | /* Wait if we're currently in a connect or accept downcall */ | |
283 | wait_event(cm_id_priv->connect_wait, | |
284 | !test_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags)); | |
285 | ||
286 | spin_lock_irqsave(&cm_id_priv->lock, flags); | |
287 | switch (cm_id_priv->state) { | |
288 | case IW_CM_STATE_ESTABLISHED: | |
289 | cm_id_priv->state = IW_CM_STATE_CLOSING; | |
290 | ||
291 | /* QP could be <nul> for user-mode client */ | |
292 | if (cm_id_priv->qp) | |
293 | qp = cm_id_priv->qp; | |
294 | else | |
295 | ret = -EINVAL; | |
296 | break; | |
297 | case IW_CM_STATE_LISTEN: | |
298 | ret = -EINVAL; | |
299 | break; | |
300 | case IW_CM_STATE_CLOSING: | |
301 | /* remote peer closed first */ | |
302 | case IW_CM_STATE_IDLE: | |
303 | /* accept or connect returned !0 */ | |
304 | break; | |
305 | case IW_CM_STATE_CONN_RECV: | |
306 | /* | |
307 | * App called disconnect before/without calling accept after | |
308 | * connect_request event delivered. | |
309 | */ | |
310 | break; | |
311 | case IW_CM_STATE_CONN_SENT: | |
312 | /* Can only get here if wait above fails */ | |
313 | default: | |
314 | BUG(); | |
315 | } | |
316 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | |
317 | ||
318 | if (qp) { | |
319 | if (abrupt) | |
320 | ret = iwcm_modify_qp_err(qp); | |
321 | else | |
322 | ret = iwcm_modify_qp_sqd(qp); | |
323 | ||
324 | /* | |
325 | * If both sides are disconnecting the QP could | |
326 | * already be in ERR or SQD states | |
327 | */ | |
328 | ret = 0; | |
329 | } | |
330 | ||
331 | return ret; | |
332 | } | |
333 | EXPORT_SYMBOL(iw_cm_disconnect); | |
334 | ||
335 | /* | |
336 | * CM_ID <-- DESTROYING | |
337 | * | |
338 | * Clean up all resources associated with the connection and release | |
339 | * the initial reference taken by iw_create_cm_id. | |
340 | */ | |
341 | static void destroy_cm_id(struct iw_cm_id *cm_id) | |
342 | { | |
343 | struct iwcm_id_private *cm_id_priv; | |
344 | unsigned long flags; | |
345 | int ret; | |
346 | ||
347 | cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); | |
348 | /* | |
349 | * Wait if we're currently in a connect or accept downcall. A | |
350 | * listening endpoint should never block here. | |
351 | */ | |
352 | wait_event(cm_id_priv->connect_wait, | |
353 | !test_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags)); | |
354 | ||
355 | spin_lock_irqsave(&cm_id_priv->lock, flags); | |
356 | switch (cm_id_priv->state) { | |
357 | case IW_CM_STATE_LISTEN: | |
358 | cm_id_priv->state = IW_CM_STATE_DESTROYING; | |
359 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | |
360 | /* destroy the listening endpoint */ | |
361 | ret = cm_id->device->iwcm->destroy_listen(cm_id); | |
362 | spin_lock_irqsave(&cm_id_priv->lock, flags); | |
363 | break; | |
364 | case IW_CM_STATE_ESTABLISHED: | |
365 | cm_id_priv->state = IW_CM_STATE_DESTROYING; | |
366 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | |
367 | /* Abrupt close of the connection */ | |
368 | (void)iwcm_modify_qp_err(cm_id_priv->qp); | |
369 | spin_lock_irqsave(&cm_id_priv->lock, flags); | |
370 | break; | |
371 | case IW_CM_STATE_IDLE: | |
372 | case IW_CM_STATE_CLOSING: | |
373 | cm_id_priv->state = IW_CM_STATE_DESTROYING; | |
374 | break; | |
375 | case IW_CM_STATE_CONN_RECV: | |
376 | /* | |
377 | * App called destroy before/without calling accept after | |
ebb90986 SW |
378 | * receiving connection request event notification or |
379 | * returned non zero from the event callback function. | |
380 | * In either case, must tell the provider to reject. | |
922a8e9f TT |
381 | */ |
382 | cm_id_priv->state = IW_CM_STATE_DESTROYING; | |
54e05f15 | 383 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); |
cb58160e | 384 | cm_id->device->iwcm->reject(cm_id, NULL, 0); |
54e05f15 | 385 | spin_lock_irqsave(&cm_id_priv->lock, flags); |
922a8e9f TT |
386 | break; |
387 | case IW_CM_STATE_CONN_SENT: | |
388 | case IW_CM_STATE_DESTROYING: | |
389 | default: | |
390 | BUG(); | |
391 | break; | |
392 | } | |
393 | if (cm_id_priv->qp) { | |
394 | cm_id_priv->id.device->iwcm->rem_ref(cm_id_priv->qp); | |
395 | cm_id_priv->qp = NULL; | |
396 | } | |
397 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | |
398 | ||
399 | (void)iwcm_deref_id(cm_id_priv); | |
400 | } | |
401 | ||
402 | /* | |
403 | * This function is only called by the application thread and cannot | |
404 | * be called by the event thread. The function will wait for all | |
405 | * references to be released on the cm_id and then kfree the cm_id | |
406 | * object. | |
407 | */ | |
408 | void iw_destroy_cm_id(struct iw_cm_id *cm_id) | |
409 | { | |
410 | struct iwcm_id_private *cm_id_priv; | |
411 | ||
412 | cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); | |
413 | BUG_ON(test_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags)); | |
414 | ||
415 | destroy_cm_id(cm_id); | |
416 | ||
417 | wait_for_completion(&cm_id_priv->destroy_comp); | |
418 | ||
ebb90986 | 419 | free_cm_id(cm_id_priv); |
922a8e9f TT |
420 | } |
421 | EXPORT_SYMBOL(iw_destroy_cm_id); | |
422 | ||
423 | /* | |
424 | * CM_ID <-- LISTEN | |
425 | * | |
426 | * Start listening for connect requests. Generates one CONNECT_REQUEST | |
427 | * event for each inbound connect request. | |
428 | */ | |
429 | int iw_cm_listen(struct iw_cm_id *cm_id, int backlog) | |
430 | { | |
431 | struct iwcm_id_private *cm_id_priv; | |
432 | unsigned long flags; | |
13fccdb3 | 433 | int ret; |
922a8e9f TT |
434 | |
435 | cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); | |
436 | ||
433d80d6 SW |
437 | if (!backlog) |
438 | backlog = default_backlog; | |
439 | ||
922a8e9f TT |
440 | ret = alloc_work_entries(cm_id_priv, backlog); |
441 | if (ret) | |
442 | return ret; | |
443 | ||
444 | spin_lock_irqsave(&cm_id_priv->lock, flags); | |
445 | switch (cm_id_priv->state) { | |
446 | case IW_CM_STATE_IDLE: | |
447 | cm_id_priv->state = IW_CM_STATE_LISTEN; | |
448 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | |
449 | ret = cm_id->device->iwcm->create_listen(cm_id, backlog); | |
450 | if (ret) | |
451 | cm_id_priv->state = IW_CM_STATE_IDLE; | |
452 | spin_lock_irqsave(&cm_id_priv->lock, flags); | |
453 | break; | |
454 | default: | |
455 | ret = -EINVAL; | |
456 | } | |
457 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | |
458 | ||
459 | return ret; | |
460 | } | |
461 | EXPORT_SYMBOL(iw_cm_listen); | |
462 | ||
463 | /* | |
464 | * CM_ID <-- IDLE | |
465 | * | |
466 | * Rejects an inbound connection request. No events are generated. | |
467 | */ | |
468 | int iw_cm_reject(struct iw_cm_id *cm_id, | |
469 | const void *private_data, | |
470 | u8 private_data_len) | |
471 | { | |
472 | struct iwcm_id_private *cm_id_priv; | |
473 | unsigned long flags; | |
474 | int ret; | |
475 | ||
476 | cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); | |
477 | set_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); | |
478 | ||
479 | spin_lock_irqsave(&cm_id_priv->lock, flags); | |
480 | if (cm_id_priv->state != IW_CM_STATE_CONN_RECV) { | |
481 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | |
482 | clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); | |
483 | wake_up_all(&cm_id_priv->connect_wait); | |
484 | return -EINVAL; | |
485 | } | |
486 | cm_id_priv->state = IW_CM_STATE_IDLE; | |
487 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | |
488 | ||
489 | ret = cm_id->device->iwcm->reject(cm_id, private_data, | |
490 | private_data_len); | |
491 | ||
492 | clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); | |
493 | wake_up_all(&cm_id_priv->connect_wait); | |
494 | ||
495 | return ret; | |
496 | } | |
497 | EXPORT_SYMBOL(iw_cm_reject); | |
498 | ||
499 | /* | |
500 | * CM_ID <-- ESTABLISHED | |
501 | * | |
502 | * Accepts an inbound connection request and generates an ESTABLISHED | |
503 | * event. Callers of iw_cm_disconnect and iw_destroy_cm_id will block | |
504 | * until the ESTABLISHED event is received from the provider. | |
505 | */ | |
506 | int iw_cm_accept(struct iw_cm_id *cm_id, | |
507 | struct iw_cm_conn_param *iw_param) | |
508 | { | |
509 | struct iwcm_id_private *cm_id_priv; | |
510 | struct ib_qp *qp; | |
511 | unsigned long flags; | |
512 | int ret; | |
513 | ||
514 | cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); | |
515 | set_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); | |
516 | ||
517 | spin_lock_irqsave(&cm_id_priv->lock, flags); | |
518 | if (cm_id_priv->state != IW_CM_STATE_CONN_RECV) { | |
519 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | |
520 | clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); | |
521 | wake_up_all(&cm_id_priv->connect_wait); | |
522 | return -EINVAL; | |
523 | } | |
524 | /* Get the ib_qp given the QPN */ | |
525 | qp = cm_id->device->iwcm->get_qp(cm_id->device, iw_param->qpn); | |
526 | if (!qp) { | |
527 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | |
26012f07 AT |
528 | clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); |
529 | wake_up_all(&cm_id_priv->connect_wait); | |
922a8e9f TT |
530 | return -EINVAL; |
531 | } | |
532 | cm_id->device->iwcm->add_ref(qp); | |
533 | cm_id_priv->qp = qp; | |
534 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | |
535 | ||
536 | ret = cm_id->device->iwcm->accept(cm_id, iw_param); | |
537 | if (ret) { | |
538 | /* An error on accept precludes provider events */ | |
539 | BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_RECV); | |
540 | cm_id_priv->state = IW_CM_STATE_IDLE; | |
541 | spin_lock_irqsave(&cm_id_priv->lock, flags); | |
542 | if (cm_id_priv->qp) { | |
543 | cm_id->device->iwcm->rem_ref(qp); | |
544 | cm_id_priv->qp = NULL; | |
545 | } | |
546 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | |
547 | clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); | |
548 | wake_up_all(&cm_id_priv->connect_wait); | |
549 | } | |
550 | ||
551 | return ret; | |
552 | } | |
553 | EXPORT_SYMBOL(iw_cm_accept); | |
554 | ||
555 | /* | |
556 | * Active Side: CM_ID <-- CONN_SENT | |
557 | * | |
558 | * If successful, results in the generation of a CONNECT_REPLY | |
559 | * event. iw_cm_disconnect and iw_cm_destroy will block until the | |
560 | * CONNECT_REPLY event is received from the provider. | |
561 | */ | |
562 | int iw_cm_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param) | |
563 | { | |
564 | struct iwcm_id_private *cm_id_priv; | |
13fccdb3 | 565 | int ret; |
922a8e9f TT |
566 | unsigned long flags; |
567 | struct ib_qp *qp; | |
568 | ||
569 | cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); | |
570 | ||
571 | ret = alloc_work_entries(cm_id_priv, 4); | |
572 | if (ret) | |
573 | return ret; | |
574 | ||
575 | set_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); | |
576 | spin_lock_irqsave(&cm_id_priv->lock, flags); | |
577 | ||
578 | if (cm_id_priv->state != IW_CM_STATE_IDLE) { | |
579 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | |
580 | clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); | |
581 | wake_up_all(&cm_id_priv->connect_wait); | |
582 | return -EINVAL; | |
583 | } | |
584 | ||
585 | /* Get the ib_qp given the QPN */ | |
586 | qp = cm_id->device->iwcm->get_qp(cm_id->device, iw_param->qpn); | |
587 | if (!qp) { | |
588 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | |
26012f07 AT |
589 | clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); |
590 | wake_up_all(&cm_id_priv->connect_wait); | |
922a8e9f TT |
591 | return -EINVAL; |
592 | } | |
593 | cm_id->device->iwcm->add_ref(qp); | |
594 | cm_id_priv->qp = qp; | |
595 | cm_id_priv->state = IW_CM_STATE_CONN_SENT; | |
596 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | |
597 | ||
598 | ret = cm_id->device->iwcm->connect(cm_id, iw_param); | |
599 | if (ret) { | |
600 | spin_lock_irqsave(&cm_id_priv->lock, flags); | |
601 | if (cm_id_priv->qp) { | |
602 | cm_id->device->iwcm->rem_ref(qp); | |
603 | cm_id_priv->qp = NULL; | |
604 | } | |
605 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | |
606 | BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_SENT); | |
607 | cm_id_priv->state = IW_CM_STATE_IDLE; | |
608 | clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); | |
609 | wake_up_all(&cm_id_priv->connect_wait); | |
610 | } | |
611 | ||
612 | return ret; | |
613 | } | |
614 | EXPORT_SYMBOL(iw_cm_connect); | |
615 | ||
616 | /* | |
617 | * Passive Side: new CM_ID <-- CONN_RECV | |
618 | * | |
619 | * Handles an inbound connect request. The function creates a new | |
620 | * iw_cm_id to represent the new connection and inherits the client | |
621 | * callback function and other attributes from the listening parent. | |
622 | * | |
623 | * The work item contains a pointer to the listen_cm_id and the event. The | |
624 | * listen_cm_id contains the client cm_handler, context and | |
625 | * device. These are copied when the device is cloned. The event | |
626 | * contains the new four tuple. | |
627 | * | |
628 | * An error on the child should not affect the parent, so this | |
629 | * function does not return a value. | |
630 | */ | |
631 | static void cm_conn_req_handler(struct iwcm_id_private *listen_id_priv, | |
632 | struct iw_cm_event *iw_event) | |
633 | { | |
634 | unsigned long flags; | |
635 | struct iw_cm_id *cm_id; | |
636 | struct iwcm_id_private *cm_id_priv; | |
637 | int ret; | |
638 | ||
639 | /* | |
640 | * The provider should never generate a connection request | |
641 | * event with a bad status. | |
642 | */ | |
643 | BUG_ON(iw_event->status); | |
644 | ||
922a8e9f TT |
645 | cm_id = iw_create_cm_id(listen_id_priv->id.device, |
646 | listen_id_priv->id.cm_handler, | |
647 | listen_id_priv->id.context); | |
648 | /* If the cm_id could not be created, ignore the request */ | |
649 | if (IS_ERR(cm_id)) | |
83b96586 | 650 | goto out; |
922a8e9f TT |
651 | |
652 | cm_id->provider_data = iw_event->provider_data; | |
653 | cm_id->local_addr = iw_event->local_addr; | |
654 | cm_id->remote_addr = iw_event->remote_addr; | |
655 | ||
656 | cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); | |
657 | cm_id_priv->state = IW_CM_STATE_CONN_RECV; | |
658 | ||
3eae7c9f SW |
659 | /* |
660 | * We could be destroying the listening id. If so, ignore this | |
661 | * upcall. | |
662 | */ | |
663 | spin_lock_irqsave(&listen_id_priv->lock, flags); | |
664 | if (listen_id_priv->state != IW_CM_STATE_LISTEN) { | |
665 | spin_unlock_irqrestore(&listen_id_priv->lock, flags); | |
666 | iw_cm_reject(cm_id, NULL, 0); | |
667 | iw_destroy_cm_id(cm_id); | |
668 | goto out; | |
669 | } | |
670 | spin_unlock_irqrestore(&listen_id_priv->lock, flags); | |
671 | ||
922a8e9f TT |
672 | ret = alloc_work_entries(cm_id_priv, 3); |
673 | if (ret) { | |
674 | iw_cm_reject(cm_id, NULL, 0); | |
675 | iw_destroy_cm_id(cm_id); | |
83b96586 | 676 | goto out; |
922a8e9f TT |
677 | } |
678 | ||
679 | /* Call the client CM handler */ | |
680 | ret = cm_id->cm_handler(cm_id, iw_event); | |
681 | if (ret) { | |
ebb90986 | 682 | iw_cm_reject(cm_id, NULL, 0); |
922a8e9f TT |
683 | set_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags); |
684 | destroy_cm_id(cm_id); | |
685 | if (atomic_read(&cm_id_priv->refcount)==0) | |
ebb90986 | 686 | free_cm_id(cm_id_priv); |
922a8e9f TT |
687 | } |
688 | ||
83b96586 | 689 | out: |
922a8e9f TT |
690 | if (iw_event->private_data_len) |
691 | kfree(iw_event->private_data); | |
692 | } | |
693 | ||
694 | /* | |
695 | * Passive Side: CM_ID <-- ESTABLISHED | |
696 | * | |
697 | * The provider generated an ESTABLISHED event which means that | |
698 | * the MPA negotion has completed successfully and we are now in MPA | |
699 | * FPDU mode. | |
700 | * | |
701 | * This event can only be received in the CONN_RECV state. If the | |
702 | * remote peer closed, the ESTABLISHED event would be received followed | |
703 | * by the CLOSE event. If the app closes, it will block until we wake | |
704 | * it up after processing this event. | |
705 | */ | |
706 | static int cm_conn_est_handler(struct iwcm_id_private *cm_id_priv, | |
707 | struct iw_cm_event *iw_event) | |
708 | { | |
709 | unsigned long flags; | |
13fccdb3 | 710 | int ret; |
922a8e9f TT |
711 | |
712 | spin_lock_irqsave(&cm_id_priv->lock, flags); | |
713 | ||
714 | /* | |
715 | * We clear the CONNECT_WAIT bit here to allow the callback | |
716 | * function to call iw_cm_disconnect. Calling iw_destroy_cm_id | |
717 | * from a callback handler is not allowed. | |
718 | */ | |
719 | clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); | |
720 | BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_RECV); | |
721 | cm_id_priv->state = IW_CM_STATE_ESTABLISHED; | |
722 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | |
723 | ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event); | |
724 | wake_up_all(&cm_id_priv->connect_wait); | |
725 | ||
726 | return ret; | |
727 | } | |
728 | ||
729 | /* | |
730 | * Active Side: CM_ID <-- ESTABLISHED | |
731 | * | |
732 | * The app has called connect and is waiting for the established event to | |
733 | * post it's requests to the server. This event will wake up anyone | |
734 | * blocked in iw_cm_disconnect or iw_destroy_id. | |
735 | */ | |
736 | static int cm_conn_rep_handler(struct iwcm_id_private *cm_id_priv, | |
737 | struct iw_cm_event *iw_event) | |
738 | { | |
739 | unsigned long flags; | |
13fccdb3 | 740 | int ret; |
922a8e9f TT |
741 | |
742 | spin_lock_irqsave(&cm_id_priv->lock, flags); | |
743 | /* | |
744 | * Clear the connect wait bit so a callback function calling | |
745 | * iw_cm_disconnect will not wait and deadlock this thread | |
746 | */ | |
747 | clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); | |
748 | BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_SENT); | |
d0c49bf3 | 749 | if (iw_event->status == 0) { |
922a8e9f TT |
750 | cm_id_priv->id.local_addr = iw_event->local_addr; |
751 | cm_id_priv->id.remote_addr = iw_event->remote_addr; | |
752 | cm_id_priv->state = IW_CM_STATE_ESTABLISHED; | |
753 | } else { | |
754 | /* REJECTED or RESET */ | |
755 | cm_id_priv->id.device->iwcm->rem_ref(cm_id_priv->qp); | |
756 | cm_id_priv->qp = NULL; | |
757 | cm_id_priv->state = IW_CM_STATE_IDLE; | |
758 | } | |
759 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | |
760 | ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event); | |
761 | ||
762 | if (iw_event->private_data_len) | |
763 | kfree(iw_event->private_data); | |
764 | ||
765 | /* Wake up waiters on connect complete */ | |
766 | wake_up_all(&cm_id_priv->connect_wait); | |
767 | ||
768 | return ret; | |
769 | } | |
770 | ||
771 | /* | |
772 | * CM_ID <-- CLOSING | |
773 | * | |
774 | * If in the ESTABLISHED state, move to CLOSING. | |
775 | */ | |
776 | static void cm_disconnect_handler(struct iwcm_id_private *cm_id_priv, | |
777 | struct iw_cm_event *iw_event) | |
778 | { | |
779 | unsigned long flags; | |
780 | ||
781 | spin_lock_irqsave(&cm_id_priv->lock, flags); | |
782 | if (cm_id_priv->state == IW_CM_STATE_ESTABLISHED) | |
783 | cm_id_priv->state = IW_CM_STATE_CLOSING; | |
784 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | |
785 | } | |
786 | ||
787 | /* | |
788 | * CM_ID <-- IDLE | |
789 | * | |
790 | * If in the ESTBLISHED or CLOSING states, the QP will have have been | |
791 | * moved by the provider to the ERR state. Disassociate the CM_ID from | |
792 | * the QP, move to IDLE, and remove the 'connected' reference. | |
793 | * | |
794 | * If in some other state, the cm_id was destroyed asynchronously. | |
795 | * This is the last reference that will result in waking up | |
796 | * the app thread blocked in iw_destroy_cm_id. | |
797 | */ | |
798 | static int cm_close_handler(struct iwcm_id_private *cm_id_priv, | |
799 | struct iw_cm_event *iw_event) | |
800 | { | |
801 | unsigned long flags; | |
802 | int ret = 0; | |
803 | spin_lock_irqsave(&cm_id_priv->lock, flags); | |
804 | ||
805 | if (cm_id_priv->qp) { | |
806 | cm_id_priv->id.device->iwcm->rem_ref(cm_id_priv->qp); | |
807 | cm_id_priv->qp = NULL; | |
808 | } | |
809 | switch (cm_id_priv->state) { | |
810 | case IW_CM_STATE_ESTABLISHED: | |
811 | case IW_CM_STATE_CLOSING: | |
812 | cm_id_priv->state = IW_CM_STATE_IDLE; | |
813 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | |
814 | ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event); | |
815 | spin_lock_irqsave(&cm_id_priv->lock, flags); | |
816 | break; | |
817 | case IW_CM_STATE_DESTROYING: | |
818 | break; | |
819 | default: | |
820 | BUG(); | |
821 | } | |
822 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | |
823 | ||
824 | return ret; | |
825 | } | |
826 | ||
827 | static int process_event(struct iwcm_id_private *cm_id_priv, | |
828 | struct iw_cm_event *iw_event) | |
829 | { | |
830 | int ret = 0; | |
831 | ||
832 | switch (iw_event->event) { | |
833 | case IW_CM_EVENT_CONNECT_REQUEST: | |
834 | cm_conn_req_handler(cm_id_priv, iw_event); | |
835 | break; | |
836 | case IW_CM_EVENT_CONNECT_REPLY: | |
837 | ret = cm_conn_rep_handler(cm_id_priv, iw_event); | |
838 | break; | |
839 | case IW_CM_EVENT_ESTABLISHED: | |
840 | ret = cm_conn_est_handler(cm_id_priv, iw_event); | |
841 | break; | |
842 | case IW_CM_EVENT_DISCONNECT: | |
843 | cm_disconnect_handler(cm_id_priv, iw_event); | |
844 | break; | |
845 | case IW_CM_EVENT_CLOSE: | |
846 | ret = cm_close_handler(cm_id_priv, iw_event); | |
847 | break; | |
848 | default: | |
849 | BUG(); | |
850 | } | |
851 | ||
852 | return ret; | |
853 | } | |
854 | ||
855 | /* | |
856 | * Process events on the work_list for the cm_id. If the callback | |
857 | * function requests that the cm_id be deleted, a flag is set in the | |
858 | * cm_id flags to indicate that when the last reference is | |
859 | * removed, the cm_id is to be destroyed. This is necessary to | |
860 | * distinguish between an object that will be destroyed by the app | |
861 | * thread asleep on the destroy_comp list vs. an object destroyed | |
862 | * here synchronously when the last reference is removed. | |
863 | */ | |
c4028958 | 864 | static void cm_work_handler(struct work_struct *_work) |
922a8e9f | 865 | { |
4c1ac1b4 | 866 | struct iwcm_work *work = container_of(_work, struct iwcm_work, work); |
33ba0fa9 | 867 | struct iw_cm_event levent; |
922a8e9f TT |
868 | struct iwcm_id_private *cm_id_priv = work->cm_id; |
869 | unsigned long flags; | |
870 | int empty; | |
871 | int ret = 0; | |
d7c1fbd6 | 872 | int destroy_id; |
922a8e9f TT |
873 | |
874 | spin_lock_irqsave(&cm_id_priv->lock, flags); | |
875 | empty = list_empty(&cm_id_priv->work_list); | |
876 | while (!empty) { | |
877 | work = list_entry(cm_id_priv->work_list.next, | |
878 | struct iwcm_work, list); | |
879 | list_del_init(&work->list); | |
880 | empty = list_empty(&cm_id_priv->work_list); | |
33ba0fa9 | 881 | levent = work->event; |
922a8e9f TT |
882 | put_work(work); |
883 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | |
884 | ||
33ba0fa9 | 885 | ret = process_event(cm_id_priv, &levent); |
922a8e9f TT |
886 | if (ret) { |
887 | set_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags); | |
888 | destroy_cm_id(&cm_id_priv->id); | |
889 | } | |
890 | BUG_ON(atomic_read(&cm_id_priv->refcount)==0); | |
d7c1fbd6 | 891 | destroy_id = test_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags); |
ebb90986 | 892 | if (iwcm_deref_id(cm_id_priv)) { |
d7c1fbd6 | 893 | if (destroy_id) { |
ebb90986 SW |
894 | BUG_ON(!list_empty(&cm_id_priv->work_list)); |
895 | free_cm_id(cm_id_priv); | |
896 | } | |
922a8e9f TT |
897 | return; |
898 | } | |
e413a823 SW |
899 | if (empty) |
900 | return; | |
922a8e9f TT |
901 | spin_lock_irqsave(&cm_id_priv->lock, flags); |
902 | } | |
903 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | |
904 | } | |
905 | ||
906 | /* | |
907 | * This function is called on interrupt context. Schedule events on | |
908 | * the iwcm_wq thread to allow callback functions to downcall into | |
909 | * the CM and/or block. Events are queued to a per-CM_ID | |
910 | * work_list. If this is the first event on the work_list, the work | |
911 | * element is also queued on the iwcm_wq thread. | |
912 | * | |
913 | * Each event holds a reference on the cm_id. Until the last posted | |
914 | * event has been delivered and processed, the cm_id cannot be | |
915 | * deleted. | |
916 | * | |
917 | * Returns: | |
918 | * 0 - the event was handled. | |
919 | * -ENOMEM - the event was not handled due to lack of resources. | |
920 | */ | |
921 | static int cm_event_handler(struct iw_cm_id *cm_id, | |
922 | struct iw_cm_event *iw_event) | |
923 | { | |
924 | struct iwcm_work *work; | |
925 | struct iwcm_id_private *cm_id_priv; | |
926 | unsigned long flags; | |
927 | int ret = 0; | |
928 | ||
929 | cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); | |
930 | ||
931 | spin_lock_irqsave(&cm_id_priv->lock, flags); | |
932 | work = get_work(cm_id_priv); | |
933 | if (!work) { | |
934 | ret = -ENOMEM; | |
935 | goto out; | |
936 | } | |
937 | ||
c4028958 | 938 | INIT_WORK(&work->work, cm_work_handler); |
922a8e9f TT |
939 | work->cm_id = cm_id_priv; |
940 | work->event = *iw_event; | |
941 | ||
942 | if ((work->event.event == IW_CM_EVENT_CONNECT_REQUEST || | |
943 | work->event.event == IW_CM_EVENT_CONNECT_REPLY) && | |
944 | work->event.private_data_len) { | |
715a588f | 945 | ret = copy_private_data(&work->event); |
922a8e9f TT |
946 | if (ret) { |
947 | put_work(work); | |
948 | goto out; | |
949 | } | |
950 | } | |
951 | ||
952 | atomic_inc(&cm_id_priv->refcount); | |
953 | if (list_empty(&cm_id_priv->work_list)) { | |
954 | list_add_tail(&work->list, &cm_id_priv->work_list); | |
955 | queue_work(iwcm_wq, &work->work); | |
956 | } else | |
957 | list_add_tail(&work->list, &cm_id_priv->work_list); | |
958 | out: | |
959 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | |
960 | return ret; | |
961 | } | |
962 | ||
963 | static int iwcm_init_qp_init_attr(struct iwcm_id_private *cm_id_priv, | |
964 | struct ib_qp_attr *qp_attr, | |
965 | int *qp_attr_mask) | |
966 | { | |
967 | unsigned long flags; | |
968 | int ret; | |
969 | ||
970 | spin_lock_irqsave(&cm_id_priv->lock, flags); | |
971 | switch (cm_id_priv->state) { | |
972 | case IW_CM_STATE_IDLE: | |
973 | case IW_CM_STATE_CONN_SENT: | |
974 | case IW_CM_STATE_CONN_RECV: | |
975 | case IW_CM_STATE_ESTABLISHED: | |
976 | *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS; | |
1ca8d156 | 977 | qp_attr->qp_access_flags = IB_ACCESS_REMOTE_WRITE| |
922a8e9f TT |
978 | IB_ACCESS_REMOTE_READ; |
979 | ret = 0; | |
980 | break; | |
981 | default: | |
982 | ret = -EINVAL; | |
983 | break; | |
984 | } | |
985 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | |
986 | return ret; | |
987 | } | |
988 | ||
989 | static int iwcm_init_qp_rts_attr(struct iwcm_id_private *cm_id_priv, | |
990 | struct ib_qp_attr *qp_attr, | |
991 | int *qp_attr_mask) | |
992 | { | |
993 | unsigned long flags; | |
994 | int ret; | |
995 | ||
996 | spin_lock_irqsave(&cm_id_priv->lock, flags); | |
997 | switch (cm_id_priv->state) { | |
998 | case IW_CM_STATE_IDLE: | |
999 | case IW_CM_STATE_CONN_SENT: | |
1000 | case IW_CM_STATE_CONN_RECV: | |
1001 | case IW_CM_STATE_ESTABLISHED: | |
1002 | *qp_attr_mask = 0; | |
1003 | ret = 0; | |
1004 | break; | |
1005 | default: | |
1006 | ret = -EINVAL; | |
1007 | break; | |
1008 | } | |
1009 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | |
1010 | return ret; | |
1011 | } | |
1012 | ||
1013 | int iw_cm_init_qp_attr(struct iw_cm_id *cm_id, | |
1014 | struct ib_qp_attr *qp_attr, | |
1015 | int *qp_attr_mask) | |
1016 | { | |
1017 | struct iwcm_id_private *cm_id_priv; | |
1018 | int ret; | |
1019 | ||
1020 | cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); | |
1021 | switch (qp_attr->qp_state) { | |
1022 | case IB_QPS_INIT: | |
1023 | case IB_QPS_RTR: | |
1024 | ret = iwcm_init_qp_init_attr(cm_id_priv, | |
1025 | qp_attr, qp_attr_mask); | |
1026 | break; | |
1027 | case IB_QPS_RTS: | |
1028 | ret = iwcm_init_qp_rts_attr(cm_id_priv, | |
1029 | qp_attr, qp_attr_mask); | |
1030 | break; | |
1031 | default: | |
1032 | ret = -EINVAL; | |
1033 | break; | |
1034 | } | |
1035 | return ret; | |
1036 | } | |
1037 | EXPORT_SYMBOL(iw_cm_init_qp_attr); | |
1038 | ||
1039 | static int __init iw_cm_init(void) | |
1040 | { | |
1041 | iwcm_wq = create_singlethread_workqueue("iw_cm_wq"); | |
1042 | if (!iwcm_wq) | |
1043 | return -ENOMEM; | |
1044 | ||
433d80d6 SW |
1045 | iwcm_ctl_table_hdr = register_net_sysctl(&init_net, "net/iw_cm", |
1046 | iwcm_ctl_table); | |
1047 | if (!iwcm_ctl_table_hdr) { | |
1048 | pr_err("iw_cm: couldn't register sysctl paths\n"); | |
1049 | destroy_workqueue(iwcm_wq); | |
1050 | return -ENOMEM; | |
1051 | } | |
1052 | ||
922a8e9f TT |
1053 | return 0; |
1054 | } | |
1055 | ||
1056 | static void __exit iw_cm_cleanup(void) | |
1057 | { | |
433d80d6 | 1058 | unregister_net_sysctl_table(iwcm_ctl_table_hdr); |
922a8e9f TT |
1059 | destroy_workqueue(iwcm_wq); |
1060 | } | |
1061 | ||
1062 | module_init(iw_cm_init); | |
1063 | module_exit(iw_cm_cleanup); |