Merge branch 'for-linus' of git://git.o-hand.com/linux-rpurdie-leds
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / infiniband / core / iwcm.c
1 /*
2 * Copyright (c) 2004, 2005 Intel Corporation. All rights reserved.
3 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
4 * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved.
5 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
6 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
7 * Copyright (c) 2005 Network Appliance, Inc. All rights reserved.
8 *
9 * This software is available to you under a choice of one of two
10 * licenses. You may choose to be licensed under the terms of the GNU
11 * General Public License (GPL) Version 2, available from the file
12 * COPYING in the main directory of this source tree, or the
13 * OpenIB.org BSD license below:
14 *
15 * Redistribution and use in source and binary forms, with or
16 * without modification, are permitted provided that the following
17 * conditions are met:
18 *
19 * - Redistributions of source code must retain the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer.
22 *
23 * - Redistributions in binary form must reproduce the above
24 * copyright notice, this list of conditions and the following
25 * disclaimer in the documentation and/or other materials
26 * provided with the distribution.
27 *
28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
29 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
30 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
31 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
32 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
33 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
34 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 * SOFTWARE.
36 *
37 */
38 #include <linux/dma-mapping.h>
39 #include <linux/err.h>
40 #include <linux/idr.h>
41 #include <linux/interrupt.h>
42 #include <linux/pci.h>
43 #include <linux/rbtree.h>
44 #include <linux/spinlock.h>
45 #include <linux/workqueue.h>
46 #include <linux/completion.h>
47
48 #include <rdma/iw_cm.h>
49 #include <rdma/ib_addr.h>
50
51 #include "iwcm.h"
52
53 MODULE_AUTHOR("Tom Tucker");
54 MODULE_DESCRIPTION("iWARP CM");
55 MODULE_LICENSE("Dual BSD/GPL");
56
57 static struct workqueue_struct *iwcm_wq;
58 struct iwcm_work {
59 struct work_struct work;
60 struct iwcm_id_private *cm_id;
61 struct list_head list;
62 struct iw_cm_event event;
63 struct list_head free_list;
64 };
65
66 /*
67 * The following services provide a mechanism for pre-allocating iwcm_work
68 * elements. The design pre-allocates them based on the cm_id type:
69 * LISTENING IDS: Get enough elements preallocated to handle the
70 * listen backlog.
71 * ACTIVE IDS: 4: CONNECT_REPLY, ESTABLISHED, DISCONNECT, CLOSE
72 * PASSIVE IDS: 3: ESTABLISHED, DISCONNECT, CLOSE
73 *
74 * Allocating them in connect and listen avoids having to deal
75 * with allocation failures on the event upcall from the provider (which
76 * is called in the interrupt context).
77 *
78 * One exception is when creating the cm_id for incoming connection requests.
79 * There are two cases:
80 * 1) in the event upcall, cm_event_handler(), for a listening cm_id. If
81 * the backlog is exceeded, then no more connection request events will
82 * be processed. cm_event_handler() returns -ENOMEM in this case. Its up
83 * to the provider to reject the connection request.
84 * 2) in the connection request workqueue handler, cm_conn_req_handler().
85 * If work elements cannot be allocated for the new connect request cm_id,
86 * then IWCM will call the provider reject method. This is ok since
87 * cm_conn_req_handler() runs in the workqueue thread context.
88 */
89
90 static struct iwcm_work *get_work(struct iwcm_id_private *cm_id_priv)
91 {
92 struct iwcm_work *work;
93
94 if (list_empty(&cm_id_priv->work_free_list))
95 return NULL;
96 work = list_entry(cm_id_priv->work_free_list.next, struct iwcm_work,
97 free_list);
98 list_del_init(&work->free_list);
99 return work;
100 }
101
102 static void put_work(struct iwcm_work *work)
103 {
104 list_add(&work->free_list, &work->cm_id->work_free_list);
105 }
106
107 static void dealloc_work_entries(struct iwcm_id_private *cm_id_priv)
108 {
109 struct list_head *e, *tmp;
110
111 list_for_each_safe(e, tmp, &cm_id_priv->work_free_list)
112 kfree(list_entry(e, struct iwcm_work, free_list));
113 }
114
115 static int alloc_work_entries(struct iwcm_id_private *cm_id_priv, int count)
116 {
117 struct iwcm_work *work;
118
119 BUG_ON(!list_empty(&cm_id_priv->work_free_list));
120 while (count--) {
121 work = kmalloc(sizeof(struct iwcm_work), GFP_KERNEL);
122 if (!work) {
123 dealloc_work_entries(cm_id_priv);
124 return -ENOMEM;
125 }
126 work->cm_id = cm_id_priv;
127 INIT_LIST_HEAD(&work->list);
128 put_work(work);
129 }
130 return 0;
131 }
132
133 /*
134 * Save private data from incoming connection requests to
135 * iw_cm_event, so the low level driver doesn't have to. Adjust
136 * the event ptr to point to the local copy.
137 */
138 static int copy_private_data(struct iw_cm_event *event)
139 {
140 void *p;
141
142 p = kmemdup(event->private_data, event->private_data_len, GFP_ATOMIC);
143 if (!p)
144 return -ENOMEM;
145 event->private_data = p;
146 return 0;
147 }
148
149 static void free_cm_id(struct iwcm_id_private *cm_id_priv)
150 {
151 dealloc_work_entries(cm_id_priv);
152 kfree(cm_id_priv);
153 }
154
155 /*
156 * Release a reference on cm_id. If the last reference is being
157 * released, enable the waiting thread (in iw_destroy_cm_id) to
158 * get woken up, and return 1 if a thread is already waiting.
159 */
160 static int iwcm_deref_id(struct iwcm_id_private *cm_id_priv)
161 {
162 BUG_ON(atomic_read(&cm_id_priv->refcount)==0);
163 if (atomic_dec_and_test(&cm_id_priv->refcount)) {
164 BUG_ON(!list_empty(&cm_id_priv->work_list));
165 complete(&cm_id_priv->destroy_comp);
166 return 1;
167 }
168
169 return 0;
170 }
171
172 static void add_ref(struct iw_cm_id *cm_id)
173 {
174 struct iwcm_id_private *cm_id_priv;
175 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
176 atomic_inc(&cm_id_priv->refcount);
177 }
178
179 static void rem_ref(struct iw_cm_id *cm_id)
180 {
181 struct iwcm_id_private *cm_id_priv;
182 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
183 if (iwcm_deref_id(cm_id_priv) &&
184 test_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags)) {
185 BUG_ON(!list_empty(&cm_id_priv->work_list));
186 free_cm_id(cm_id_priv);
187 }
188 }
189
190 static int cm_event_handler(struct iw_cm_id *cm_id, struct iw_cm_event *event);
191
192 struct iw_cm_id *iw_create_cm_id(struct ib_device *device,
193 iw_cm_handler cm_handler,
194 void *context)
195 {
196 struct iwcm_id_private *cm_id_priv;
197
198 cm_id_priv = kzalloc(sizeof(*cm_id_priv), GFP_KERNEL);
199 if (!cm_id_priv)
200 return ERR_PTR(-ENOMEM);
201
202 cm_id_priv->state = IW_CM_STATE_IDLE;
203 cm_id_priv->id.device = device;
204 cm_id_priv->id.cm_handler = cm_handler;
205 cm_id_priv->id.context = context;
206 cm_id_priv->id.event_handler = cm_event_handler;
207 cm_id_priv->id.add_ref = add_ref;
208 cm_id_priv->id.rem_ref = rem_ref;
209 spin_lock_init(&cm_id_priv->lock);
210 atomic_set(&cm_id_priv->refcount, 1);
211 init_waitqueue_head(&cm_id_priv->connect_wait);
212 init_completion(&cm_id_priv->destroy_comp);
213 INIT_LIST_HEAD(&cm_id_priv->work_list);
214 INIT_LIST_HEAD(&cm_id_priv->work_free_list);
215
216 return &cm_id_priv->id;
217 }
218 EXPORT_SYMBOL(iw_create_cm_id);
219
220
221 static int iwcm_modify_qp_err(struct ib_qp *qp)
222 {
223 struct ib_qp_attr qp_attr;
224
225 if (!qp)
226 return -EINVAL;
227
228 qp_attr.qp_state = IB_QPS_ERR;
229 return ib_modify_qp(qp, &qp_attr, IB_QP_STATE);
230 }
231
232 /*
233 * This is really the RDMAC CLOSING state. It is most similar to the
234 * IB SQD QP state.
235 */
236 static int iwcm_modify_qp_sqd(struct ib_qp *qp)
237 {
238 struct ib_qp_attr qp_attr;
239
240 BUG_ON(qp == NULL);
241 qp_attr.qp_state = IB_QPS_SQD;
242 return ib_modify_qp(qp, &qp_attr, IB_QP_STATE);
243 }
244
245 /*
246 * CM_ID <-- CLOSING
247 *
248 * Block if a passive or active connection is currently being processed. Then
249 * process the event as follows:
250 * - If we are ESTABLISHED, move to CLOSING and modify the QP state
251 * based on the abrupt flag
252 * - If the connection is already in the CLOSING or IDLE state, the peer is
253 * disconnecting concurrently with us and we've already seen the
254 * DISCONNECT event -- ignore the request and return 0
255 * - Disconnect on a listening endpoint returns -EINVAL
256 */
257 int iw_cm_disconnect(struct iw_cm_id *cm_id, int abrupt)
258 {
259 struct iwcm_id_private *cm_id_priv;
260 unsigned long flags;
261 int ret = 0;
262 struct ib_qp *qp = NULL;
263
264 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
265 /* Wait if we're currently in a connect or accept downcall */
266 wait_event(cm_id_priv->connect_wait,
267 !test_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags));
268
269 spin_lock_irqsave(&cm_id_priv->lock, flags);
270 switch (cm_id_priv->state) {
271 case IW_CM_STATE_ESTABLISHED:
272 cm_id_priv->state = IW_CM_STATE_CLOSING;
273
274 /* QP could be <nul> for user-mode client */
275 if (cm_id_priv->qp)
276 qp = cm_id_priv->qp;
277 else
278 ret = -EINVAL;
279 break;
280 case IW_CM_STATE_LISTEN:
281 ret = -EINVAL;
282 break;
283 case IW_CM_STATE_CLOSING:
284 /* remote peer closed first */
285 case IW_CM_STATE_IDLE:
286 /* accept or connect returned !0 */
287 break;
288 case IW_CM_STATE_CONN_RECV:
289 /*
290 * App called disconnect before/without calling accept after
291 * connect_request event delivered.
292 */
293 break;
294 case IW_CM_STATE_CONN_SENT:
295 /* Can only get here if wait above fails */
296 default:
297 BUG();
298 }
299 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
300
301 if (qp) {
302 if (abrupt)
303 ret = iwcm_modify_qp_err(qp);
304 else
305 ret = iwcm_modify_qp_sqd(qp);
306
307 /*
308 * If both sides are disconnecting the QP could
309 * already be in ERR or SQD states
310 */
311 ret = 0;
312 }
313
314 return ret;
315 }
316 EXPORT_SYMBOL(iw_cm_disconnect);
317
318 /*
319 * CM_ID <-- DESTROYING
320 *
321 * Clean up all resources associated with the connection and release
322 * the initial reference taken by iw_create_cm_id.
323 */
324 static void destroy_cm_id(struct iw_cm_id *cm_id)
325 {
326 struct iwcm_id_private *cm_id_priv;
327 unsigned long flags;
328 int ret;
329
330 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
331 /*
332 * Wait if we're currently in a connect or accept downcall. A
333 * listening endpoint should never block here.
334 */
335 wait_event(cm_id_priv->connect_wait,
336 !test_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags));
337
338 spin_lock_irqsave(&cm_id_priv->lock, flags);
339 switch (cm_id_priv->state) {
340 case IW_CM_STATE_LISTEN:
341 cm_id_priv->state = IW_CM_STATE_DESTROYING;
342 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
343 /* destroy the listening endpoint */
344 ret = cm_id->device->iwcm->destroy_listen(cm_id);
345 spin_lock_irqsave(&cm_id_priv->lock, flags);
346 break;
347 case IW_CM_STATE_ESTABLISHED:
348 cm_id_priv->state = IW_CM_STATE_DESTROYING;
349 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
350 /* Abrupt close of the connection */
351 (void)iwcm_modify_qp_err(cm_id_priv->qp);
352 spin_lock_irqsave(&cm_id_priv->lock, flags);
353 break;
354 case IW_CM_STATE_IDLE:
355 case IW_CM_STATE_CLOSING:
356 cm_id_priv->state = IW_CM_STATE_DESTROYING;
357 break;
358 case IW_CM_STATE_CONN_RECV:
359 /*
360 * App called destroy before/without calling accept after
361 * receiving connection request event notification or
362 * returned non zero from the event callback function.
363 * In either case, must tell the provider to reject.
364 */
365 cm_id_priv->state = IW_CM_STATE_DESTROYING;
366 break;
367 case IW_CM_STATE_CONN_SENT:
368 case IW_CM_STATE_DESTROYING:
369 default:
370 BUG();
371 break;
372 }
373 if (cm_id_priv->qp) {
374 cm_id_priv->id.device->iwcm->rem_ref(cm_id_priv->qp);
375 cm_id_priv->qp = NULL;
376 }
377 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
378
379 (void)iwcm_deref_id(cm_id_priv);
380 }
381
382 /*
383 * This function is only called by the application thread and cannot
384 * be called by the event thread. The function will wait for all
385 * references to be released on the cm_id and then kfree the cm_id
386 * object.
387 */
388 void iw_destroy_cm_id(struct iw_cm_id *cm_id)
389 {
390 struct iwcm_id_private *cm_id_priv;
391
392 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
393 BUG_ON(test_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags));
394
395 destroy_cm_id(cm_id);
396
397 wait_for_completion(&cm_id_priv->destroy_comp);
398
399 free_cm_id(cm_id_priv);
400 }
401 EXPORT_SYMBOL(iw_destroy_cm_id);
402
403 /*
404 * CM_ID <-- LISTEN
405 *
406 * Start listening for connect requests. Generates one CONNECT_REQUEST
407 * event for each inbound connect request.
408 */
409 int iw_cm_listen(struct iw_cm_id *cm_id, int backlog)
410 {
411 struct iwcm_id_private *cm_id_priv;
412 unsigned long flags;
413 int ret;
414
415 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
416
417 ret = alloc_work_entries(cm_id_priv, backlog);
418 if (ret)
419 return ret;
420
421 spin_lock_irqsave(&cm_id_priv->lock, flags);
422 switch (cm_id_priv->state) {
423 case IW_CM_STATE_IDLE:
424 cm_id_priv->state = IW_CM_STATE_LISTEN;
425 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
426 ret = cm_id->device->iwcm->create_listen(cm_id, backlog);
427 if (ret)
428 cm_id_priv->state = IW_CM_STATE_IDLE;
429 spin_lock_irqsave(&cm_id_priv->lock, flags);
430 break;
431 default:
432 ret = -EINVAL;
433 }
434 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
435
436 return ret;
437 }
438 EXPORT_SYMBOL(iw_cm_listen);
439
440 /*
441 * CM_ID <-- IDLE
442 *
443 * Rejects an inbound connection request. No events are generated.
444 */
445 int iw_cm_reject(struct iw_cm_id *cm_id,
446 const void *private_data,
447 u8 private_data_len)
448 {
449 struct iwcm_id_private *cm_id_priv;
450 unsigned long flags;
451 int ret;
452
453 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
454 set_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
455
456 spin_lock_irqsave(&cm_id_priv->lock, flags);
457 if (cm_id_priv->state != IW_CM_STATE_CONN_RECV) {
458 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
459 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
460 wake_up_all(&cm_id_priv->connect_wait);
461 return -EINVAL;
462 }
463 cm_id_priv->state = IW_CM_STATE_IDLE;
464 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
465
466 ret = cm_id->device->iwcm->reject(cm_id, private_data,
467 private_data_len);
468
469 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
470 wake_up_all(&cm_id_priv->connect_wait);
471
472 return ret;
473 }
474 EXPORT_SYMBOL(iw_cm_reject);
475
476 /*
477 * CM_ID <-- ESTABLISHED
478 *
479 * Accepts an inbound connection request and generates an ESTABLISHED
480 * event. Callers of iw_cm_disconnect and iw_destroy_cm_id will block
481 * until the ESTABLISHED event is received from the provider.
482 */
483 int iw_cm_accept(struct iw_cm_id *cm_id,
484 struct iw_cm_conn_param *iw_param)
485 {
486 struct iwcm_id_private *cm_id_priv;
487 struct ib_qp *qp;
488 unsigned long flags;
489 int ret;
490
491 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
492 set_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
493
494 spin_lock_irqsave(&cm_id_priv->lock, flags);
495 if (cm_id_priv->state != IW_CM_STATE_CONN_RECV) {
496 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
497 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
498 wake_up_all(&cm_id_priv->connect_wait);
499 return -EINVAL;
500 }
501 /* Get the ib_qp given the QPN */
502 qp = cm_id->device->iwcm->get_qp(cm_id->device, iw_param->qpn);
503 if (!qp) {
504 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
505 return -EINVAL;
506 }
507 cm_id->device->iwcm->add_ref(qp);
508 cm_id_priv->qp = qp;
509 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
510
511 ret = cm_id->device->iwcm->accept(cm_id, iw_param);
512 if (ret) {
513 /* An error on accept precludes provider events */
514 BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_RECV);
515 cm_id_priv->state = IW_CM_STATE_IDLE;
516 spin_lock_irqsave(&cm_id_priv->lock, flags);
517 if (cm_id_priv->qp) {
518 cm_id->device->iwcm->rem_ref(qp);
519 cm_id_priv->qp = NULL;
520 }
521 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
522 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
523 wake_up_all(&cm_id_priv->connect_wait);
524 }
525
526 return ret;
527 }
528 EXPORT_SYMBOL(iw_cm_accept);
529
530 /*
531 * Active Side: CM_ID <-- CONN_SENT
532 *
533 * If successful, results in the generation of a CONNECT_REPLY
534 * event. iw_cm_disconnect and iw_cm_destroy will block until the
535 * CONNECT_REPLY event is received from the provider.
536 */
537 int iw_cm_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
538 {
539 struct iwcm_id_private *cm_id_priv;
540 int ret;
541 unsigned long flags;
542 struct ib_qp *qp;
543
544 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
545
546 ret = alloc_work_entries(cm_id_priv, 4);
547 if (ret)
548 return ret;
549
550 set_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
551 spin_lock_irqsave(&cm_id_priv->lock, flags);
552
553 if (cm_id_priv->state != IW_CM_STATE_IDLE) {
554 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
555 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
556 wake_up_all(&cm_id_priv->connect_wait);
557 return -EINVAL;
558 }
559
560 /* Get the ib_qp given the QPN */
561 qp = cm_id->device->iwcm->get_qp(cm_id->device, iw_param->qpn);
562 if (!qp) {
563 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
564 return -EINVAL;
565 }
566 cm_id->device->iwcm->add_ref(qp);
567 cm_id_priv->qp = qp;
568 cm_id_priv->state = IW_CM_STATE_CONN_SENT;
569 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
570
571 ret = cm_id->device->iwcm->connect(cm_id, iw_param);
572 if (ret) {
573 spin_lock_irqsave(&cm_id_priv->lock, flags);
574 if (cm_id_priv->qp) {
575 cm_id->device->iwcm->rem_ref(qp);
576 cm_id_priv->qp = NULL;
577 }
578 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
579 BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_SENT);
580 cm_id_priv->state = IW_CM_STATE_IDLE;
581 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
582 wake_up_all(&cm_id_priv->connect_wait);
583 }
584
585 return ret;
586 }
587 EXPORT_SYMBOL(iw_cm_connect);
588
589 /*
590 * Passive Side: new CM_ID <-- CONN_RECV
591 *
592 * Handles an inbound connect request. The function creates a new
593 * iw_cm_id to represent the new connection and inherits the client
594 * callback function and other attributes from the listening parent.
595 *
596 * The work item contains a pointer to the listen_cm_id and the event. The
597 * listen_cm_id contains the client cm_handler, context and
598 * device. These are copied when the device is cloned. The event
599 * contains the new four tuple.
600 *
601 * An error on the child should not affect the parent, so this
602 * function does not return a value.
603 */
604 static void cm_conn_req_handler(struct iwcm_id_private *listen_id_priv,
605 struct iw_cm_event *iw_event)
606 {
607 unsigned long flags;
608 struct iw_cm_id *cm_id;
609 struct iwcm_id_private *cm_id_priv;
610 int ret;
611
612 /*
613 * The provider should never generate a connection request
614 * event with a bad status.
615 */
616 BUG_ON(iw_event->status);
617
618 /*
619 * We could be destroying the listening id. If so, ignore this
620 * upcall.
621 */
622 spin_lock_irqsave(&listen_id_priv->lock, flags);
623 if (listen_id_priv->state != IW_CM_STATE_LISTEN) {
624 spin_unlock_irqrestore(&listen_id_priv->lock, flags);
625 goto out;
626 }
627 spin_unlock_irqrestore(&listen_id_priv->lock, flags);
628
629 cm_id = iw_create_cm_id(listen_id_priv->id.device,
630 listen_id_priv->id.cm_handler,
631 listen_id_priv->id.context);
632 /* If the cm_id could not be created, ignore the request */
633 if (IS_ERR(cm_id))
634 goto out;
635
636 cm_id->provider_data = iw_event->provider_data;
637 cm_id->local_addr = iw_event->local_addr;
638 cm_id->remote_addr = iw_event->remote_addr;
639
640 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
641 cm_id_priv->state = IW_CM_STATE_CONN_RECV;
642
643 ret = alloc_work_entries(cm_id_priv, 3);
644 if (ret) {
645 iw_cm_reject(cm_id, NULL, 0);
646 iw_destroy_cm_id(cm_id);
647 goto out;
648 }
649
650 /* Call the client CM handler */
651 ret = cm_id->cm_handler(cm_id, iw_event);
652 if (ret) {
653 iw_cm_reject(cm_id, NULL, 0);
654 set_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags);
655 destroy_cm_id(cm_id);
656 if (atomic_read(&cm_id_priv->refcount)==0)
657 free_cm_id(cm_id_priv);
658 }
659
660 out:
661 if (iw_event->private_data_len)
662 kfree(iw_event->private_data);
663 }
664
665 /*
666 * Passive Side: CM_ID <-- ESTABLISHED
667 *
668 * The provider generated an ESTABLISHED event which means that
669 * the MPA negotion has completed successfully and we are now in MPA
670 * FPDU mode.
671 *
672 * This event can only be received in the CONN_RECV state. If the
673 * remote peer closed, the ESTABLISHED event would be received followed
674 * by the CLOSE event. If the app closes, it will block until we wake
675 * it up after processing this event.
676 */
677 static int cm_conn_est_handler(struct iwcm_id_private *cm_id_priv,
678 struct iw_cm_event *iw_event)
679 {
680 unsigned long flags;
681 int ret;
682
683 spin_lock_irqsave(&cm_id_priv->lock, flags);
684
685 /*
686 * We clear the CONNECT_WAIT bit here to allow the callback
687 * function to call iw_cm_disconnect. Calling iw_destroy_cm_id
688 * from a callback handler is not allowed.
689 */
690 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
691 BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_RECV);
692 cm_id_priv->state = IW_CM_STATE_ESTABLISHED;
693 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
694 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event);
695 wake_up_all(&cm_id_priv->connect_wait);
696
697 return ret;
698 }
699
700 /*
701 * Active Side: CM_ID <-- ESTABLISHED
702 *
703 * The app has called connect and is waiting for the established event to
704 * post it's requests to the server. This event will wake up anyone
705 * blocked in iw_cm_disconnect or iw_destroy_id.
706 */
707 static int cm_conn_rep_handler(struct iwcm_id_private *cm_id_priv,
708 struct iw_cm_event *iw_event)
709 {
710 unsigned long flags;
711 int ret;
712
713 spin_lock_irqsave(&cm_id_priv->lock, flags);
714 /*
715 * Clear the connect wait bit so a callback function calling
716 * iw_cm_disconnect will not wait and deadlock this thread
717 */
718 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
719 BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_SENT);
720 if (iw_event->status == IW_CM_EVENT_STATUS_ACCEPTED) {
721 cm_id_priv->id.local_addr = iw_event->local_addr;
722 cm_id_priv->id.remote_addr = iw_event->remote_addr;
723 cm_id_priv->state = IW_CM_STATE_ESTABLISHED;
724 } else {
725 /* REJECTED or RESET */
726 cm_id_priv->id.device->iwcm->rem_ref(cm_id_priv->qp);
727 cm_id_priv->qp = NULL;
728 cm_id_priv->state = IW_CM_STATE_IDLE;
729 }
730 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
731 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event);
732
733 if (iw_event->private_data_len)
734 kfree(iw_event->private_data);
735
736 /* Wake up waiters on connect complete */
737 wake_up_all(&cm_id_priv->connect_wait);
738
739 return ret;
740 }
741
742 /*
743 * CM_ID <-- CLOSING
744 *
745 * If in the ESTABLISHED state, move to CLOSING.
746 */
747 static void cm_disconnect_handler(struct iwcm_id_private *cm_id_priv,
748 struct iw_cm_event *iw_event)
749 {
750 unsigned long flags;
751
752 spin_lock_irqsave(&cm_id_priv->lock, flags);
753 if (cm_id_priv->state == IW_CM_STATE_ESTABLISHED)
754 cm_id_priv->state = IW_CM_STATE_CLOSING;
755 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
756 }
757
758 /*
759 * CM_ID <-- IDLE
760 *
761 * If in the ESTBLISHED or CLOSING states, the QP will have have been
762 * moved by the provider to the ERR state. Disassociate the CM_ID from
763 * the QP, move to IDLE, and remove the 'connected' reference.
764 *
765 * If in some other state, the cm_id was destroyed asynchronously.
766 * This is the last reference that will result in waking up
767 * the app thread blocked in iw_destroy_cm_id.
768 */
769 static int cm_close_handler(struct iwcm_id_private *cm_id_priv,
770 struct iw_cm_event *iw_event)
771 {
772 unsigned long flags;
773 int ret = 0;
774 spin_lock_irqsave(&cm_id_priv->lock, flags);
775
776 if (cm_id_priv->qp) {
777 cm_id_priv->id.device->iwcm->rem_ref(cm_id_priv->qp);
778 cm_id_priv->qp = NULL;
779 }
780 switch (cm_id_priv->state) {
781 case IW_CM_STATE_ESTABLISHED:
782 case IW_CM_STATE_CLOSING:
783 cm_id_priv->state = IW_CM_STATE_IDLE;
784 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
785 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event);
786 spin_lock_irqsave(&cm_id_priv->lock, flags);
787 break;
788 case IW_CM_STATE_DESTROYING:
789 break;
790 default:
791 BUG();
792 }
793 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
794
795 return ret;
796 }
797
798 static int process_event(struct iwcm_id_private *cm_id_priv,
799 struct iw_cm_event *iw_event)
800 {
801 int ret = 0;
802
803 switch (iw_event->event) {
804 case IW_CM_EVENT_CONNECT_REQUEST:
805 cm_conn_req_handler(cm_id_priv, iw_event);
806 break;
807 case IW_CM_EVENT_CONNECT_REPLY:
808 ret = cm_conn_rep_handler(cm_id_priv, iw_event);
809 break;
810 case IW_CM_EVENT_ESTABLISHED:
811 ret = cm_conn_est_handler(cm_id_priv, iw_event);
812 break;
813 case IW_CM_EVENT_DISCONNECT:
814 cm_disconnect_handler(cm_id_priv, iw_event);
815 break;
816 case IW_CM_EVENT_CLOSE:
817 ret = cm_close_handler(cm_id_priv, iw_event);
818 break;
819 default:
820 BUG();
821 }
822
823 return ret;
824 }
825
826 /*
827 * Process events on the work_list for the cm_id. If the callback
828 * function requests that the cm_id be deleted, a flag is set in the
829 * cm_id flags to indicate that when the last reference is
830 * removed, the cm_id is to be destroyed. This is necessary to
831 * distinguish between an object that will be destroyed by the app
832 * thread asleep on the destroy_comp list vs. an object destroyed
833 * here synchronously when the last reference is removed.
834 */
835 static void cm_work_handler(struct work_struct *_work)
836 {
837 struct iwcm_work *work = container_of(_work, struct iwcm_work, work);
838 struct iw_cm_event levent;
839 struct iwcm_id_private *cm_id_priv = work->cm_id;
840 unsigned long flags;
841 int empty;
842 int ret = 0;
843
844 spin_lock_irqsave(&cm_id_priv->lock, flags);
845 empty = list_empty(&cm_id_priv->work_list);
846 while (!empty) {
847 work = list_entry(cm_id_priv->work_list.next,
848 struct iwcm_work, list);
849 list_del_init(&work->list);
850 empty = list_empty(&cm_id_priv->work_list);
851 levent = work->event;
852 put_work(work);
853 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
854
855 ret = process_event(cm_id_priv, &levent);
856 if (ret) {
857 set_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags);
858 destroy_cm_id(&cm_id_priv->id);
859 }
860 BUG_ON(atomic_read(&cm_id_priv->refcount)==0);
861 if (iwcm_deref_id(cm_id_priv)) {
862 if (test_bit(IWCM_F_CALLBACK_DESTROY,
863 &cm_id_priv->flags)) {
864 BUG_ON(!list_empty(&cm_id_priv->work_list));
865 free_cm_id(cm_id_priv);
866 }
867 return;
868 }
869 spin_lock_irqsave(&cm_id_priv->lock, flags);
870 }
871 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
872 }
873
874 /*
875 * This function is called on interrupt context. Schedule events on
876 * the iwcm_wq thread to allow callback functions to downcall into
877 * the CM and/or block. Events are queued to a per-CM_ID
878 * work_list. If this is the first event on the work_list, the work
879 * element is also queued on the iwcm_wq thread.
880 *
881 * Each event holds a reference on the cm_id. Until the last posted
882 * event has been delivered and processed, the cm_id cannot be
883 * deleted.
884 *
885 * Returns:
886 * 0 - the event was handled.
887 * -ENOMEM - the event was not handled due to lack of resources.
888 */
889 static int cm_event_handler(struct iw_cm_id *cm_id,
890 struct iw_cm_event *iw_event)
891 {
892 struct iwcm_work *work;
893 struct iwcm_id_private *cm_id_priv;
894 unsigned long flags;
895 int ret = 0;
896
897 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
898
899 spin_lock_irqsave(&cm_id_priv->lock, flags);
900 work = get_work(cm_id_priv);
901 if (!work) {
902 ret = -ENOMEM;
903 goto out;
904 }
905
906 INIT_WORK(&work->work, cm_work_handler);
907 work->cm_id = cm_id_priv;
908 work->event = *iw_event;
909
910 if ((work->event.event == IW_CM_EVENT_CONNECT_REQUEST ||
911 work->event.event == IW_CM_EVENT_CONNECT_REPLY) &&
912 work->event.private_data_len) {
913 ret = copy_private_data(&work->event);
914 if (ret) {
915 put_work(work);
916 goto out;
917 }
918 }
919
920 atomic_inc(&cm_id_priv->refcount);
921 if (list_empty(&cm_id_priv->work_list)) {
922 list_add_tail(&work->list, &cm_id_priv->work_list);
923 queue_work(iwcm_wq, &work->work);
924 } else
925 list_add_tail(&work->list, &cm_id_priv->work_list);
926 out:
927 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
928 return ret;
929 }
930
931 static int iwcm_init_qp_init_attr(struct iwcm_id_private *cm_id_priv,
932 struct ib_qp_attr *qp_attr,
933 int *qp_attr_mask)
934 {
935 unsigned long flags;
936 int ret;
937
938 spin_lock_irqsave(&cm_id_priv->lock, flags);
939 switch (cm_id_priv->state) {
940 case IW_CM_STATE_IDLE:
941 case IW_CM_STATE_CONN_SENT:
942 case IW_CM_STATE_CONN_RECV:
943 case IW_CM_STATE_ESTABLISHED:
944 *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS;
945 qp_attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE |
946 IB_ACCESS_REMOTE_WRITE|
947 IB_ACCESS_REMOTE_READ;
948 ret = 0;
949 break;
950 default:
951 ret = -EINVAL;
952 break;
953 }
954 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
955 return ret;
956 }
957
958 static int iwcm_init_qp_rts_attr(struct iwcm_id_private *cm_id_priv,
959 struct ib_qp_attr *qp_attr,
960 int *qp_attr_mask)
961 {
962 unsigned long flags;
963 int ret;
964
965 spin_lock_irqsave(&cm_id_priv->lock, flags);
966 switch (cm_id_priv->state) {
967 case IW_CM_STATE_IDLE:
968 case IW_CM_STATE_CONN_SENT:
969 case IW_CM_STATE_CONN_RECV:
970 case IW_CM_STATE_ESTABLISHED:
971 *qp_attr_mask = 0;
972 ret = 0;
973 break;
974 default:
975 ret = -EINVAL;
976 break;
977 }
978 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
979 return ret;
980 }
981
982 int iw_cm_init_qp_attr(struct iw_cm_id *cm_id,
983 struct ib_qp_attr *qp_attr,
984 int *qp_attr_mask)
985 {
986 struct iwcm_id_private *cm_id_priv;
987 int ret;
988
989 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
990 switch (qp_attr->qp_state) {
991 case IB_QPS_INIT:
992 case IB_QPS_RTR:
993 ret = iwcm_init_qp_init_attr(cm_id_priv,
994 qp_attr, qp_attr_mask);
995 break;
996 case IB_QPS_RTS:
997 ret = iwcm_init_qp_rts_attr(cm_id_priv,
998 qp_attr, qp_attr_mask);
999 break;
1000 default:
1001 ret = -EINVAL;
1002 break;
1003 }
1004 return ret;
1005 }
1006 EXPORT_SYMBOL(iw_cm_init_qp_attr);
1007
1008 static int __init iw_cm_init(void)
1009 {
1010 iwcm_wq = create_singlethread_workqueue("iw_cm_wq");
1011 if (!iwcm_wq)
1012 return -ENOMEM;
1013
1014 return 0;
1015 }
1016
1017 static void __exit iw_cm_cleanup(void)
1018 {
1019 destroy_workqueue(iwcm_wq);
1020 }
1021
1022 module_init(iw_cm_init);
1023 module_exit(iw_cm_cleanup);