IB/srp: Preparation for transmit ring response allocation
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / infiniband / ulp / srp / ib_srp.c
CommitLineData
aef9ec39
RD
1/*
2 * Copyright (c) 2005 Cisco Systems. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
aef9ec39
RD
31 */
32
aef9ec39
RD
33#include <linux/module.h>
34#include <linux/init.h>
35#include <linux/slab.h>
36#include <linux/err.h>
37#include <linux/string.h>
38#include <linux/parser.h>
39#include <linux/random.h>
de25968c 40#include <linux/jiffies.h>
aef9ec39
RD
41
42#include <asm/atomic.h>
43
44#include <scsi/scsi.h>
45#include <scsi/scsi_device.h>
46#include <scsi/scsi_dbg.h>
47#include <scsi/srp.h>
3236822b 48#include <scsi/scsi_transport_srp.h>
aef9ec39 49
aef9ec39
RD
50#include "ib_srp.h"
51
52#define DRV_NAME "ib_srp"
53#define PFX DRV_NAME ": "
54#define DRV_VERSION "0.2"
55#define DRV_RELDATE "November 1, 2005"
56
57MODULE_AUTHOR("Roland Dreier");
58MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator "
59 "v" DRV_VERSION " (" DRV_RELDATE ")");
60MODULE_LICENSE("Dual BSD/GPL");
61
74b0a15b
VP
62static int srp_sg_tablesize = SRP_DEF_SG_TABLESIZE;
63static int srp_max_iu_len;
64
65module_param(srp_sg_tablesize, int, 0444);
66MODULE_PARM_DESC(srp_sg_tablesize,
1e89a194 67 "Max number of gather/scatter entries per I/O (default is 12, max 255)");
74b0a15b 68
aef9ec39
RD
69static int topspin_workarounds = 1;
70
71module_param(topspin_workarounds, int, 0444);
72MODULE_PARM_DESC(topspin_workarounds,
73 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
74
559ce8f1
IR
75static int mellanox_workarounds = 1;
76
77module_param(mellanox_workarounds, int, 0444);
78MODULE_PARM_DESC(mellanox_workarounds,
79 "Enable workarounds for Mellanox SRP target bugs if != 0");
80
aef9ec39
RD
81static void srp_add_one(struct ib_device *device);
82static void srp_remove_one(struct ib_device *device);
9c03dc9f
BVA
83static void srp_recv_completion(struct ib_cq *cq, void *target_ptr);
84static void srp_send_completion(struct ib_cq *cq, void *target_ptr);
aef9ec39
RD
85static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
86
3236822b
FT
87static struct scsi_transport_template *ib_srp_transport_template;
88
aef9ec39
RD
89static struct ib_client srp_client = {
90 .name = "srp",
91 .add = srp_add_one,
92 .remove = srp_remove_one
93};
94
c1a0b23b
MT
95static struct ib_sa_client srp_sa_client;
96
aef9ec39
RD
97static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
98{
99 return (struct srp_target_port *) host->hostdata;
100}
101
102static const char *srp_target_info(struct Scsi_Host *host)
103{
104 return host_to_target(host)->target_name;
105}
106
5d7cbfd6
RD
107static int srp_target_is_topspin(struct srp_target_port *target)
108{
109 static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
3d1ff48d 110 static const u8 cisco_oui[3] = { 0x00, 0x1b, 0x0d };
5d7cbfd6
RD
111
112 return topspin_workarounds &&
3d1ff48d
RK
113 (!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) ||
114 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
5d7cbfd6
RD
115}
116
117static int srp_target_is_mellanox(struct srp_target_port *target)
118{
119 static const u8 mellanox_oui[3] = { 0x00, 0x02, 0xc9 };
120
121 return mellanox_workarounds &&
122 !memcmp(&target->ioc_guid, mellanox_oui, sizeof mellanox_oui);
123}
124
aef9ec39
RD
125static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
126 gfp_t gfp_mask,
127 enum dma_data_direction direction)
128{
129 struct srp_iu *iu;
130
131 iu = kmalloc(sizeof *iu, gfp_mask);
132 if (!iu)
133 goto out;
134
135 iu->buf = kzalloc(size, gfp_mask);
136 if (!iu->buf)
137 goto out_free_iu;
138
05321937
GKH
139 iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
140 direction);
141 if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
aef9ec39
RD
142 goto out_free_buf;
143
144 iu->size = size;
145 iu->direction = direction;
146
147 return iu;
148
149out_free_buf:
150 kfree(iu->buf);
151out_free_iu:
152 kfree(iu);
153out:
154 return NULL;
155}
156
157static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
158{
159 if (!iu)
160 return;
161
05321937
GKH
162 ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
163 iu->direction);
aef9ec39
RD
164 kfree(iu->buf);
165 kfree(iu);
166}
167
168static void srp_qp_event(struct ib_event *event, void *context)
169{
170 printk(KERN_ERR PFX "QP event %d\n", event->event);
171}
172
173static int srp_init_qp(struct srp_target_port *target,
174 struct ib_qp *qp)
175{
176 struct ib_qp_attr *attr;
177 int ret;
178
179 attr = kmalloc(sizeof *attr, GFP_KERNEL);
180 if (!attr)
181 return -ENOMEM;
182
969a60f9
RD
183 ret = ib_find_pkey(target->srp_host->srp_dev->dev,
184 target->srp_host->port,
185 be16_to_cpu(target->path.pkey),
186 &attr->pkey_index);
aef9ec39
RD
187 if (ret)
188 goto out;
189
190 attr->qp_state = IB_QPS_INIT;
191 attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
192 IB_ACCESS_REMOTE_WRITE);
193 attr->port_num = target->srp_host->port;
194
195 ret = ib_modify_qp(qp, attr,
196 IB_QP_STATE |
197 IB_QP_PKEY_INDEX |
198 IB_QP_ACCESS_FLAGS |
199 IB_QP_PORT);
200
201out:
202 kfree(attr);
203 return ret;
204}
205
9fe4bcf4
DD
206static int srp_new_cm_id(struct srp_target_port *target)
207{
208 struct ib_cm_id *new_cm_id;
209
05321937 210 new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
9fe4bcf4
DD
211 srp_cm_handler, target);
212 if (IS_ERR(new_cm_id))
213 return PTR_ERR(new_cm_id);
214
215 if (target->cm_id)
216 ib_destroy_cm_id(target->cm_id);
217 target->cm_id = new_cm_id;
218
219 return 0;
220}
221
aef9ec39
RD
222static int srp_create_target_ib(struct srp_target_port *target)
223{
224 struct ib_qp_init_attr *init_attr;
225 int ret;
226
227 init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
228 if (!init_attr)
229 return -ENOMEM;
230
9c03dc9f
BVA
231 target->recv_cq = ib_create_cq(target->srp_host->srp_dev->dev,
232 srp_recv_completion, NULL, target, SRP_RQ_SIZE, 0);
233 if (IS_ERR(target->recv_cq)) {
234 ret = PTR_ERR(target->recv_cq);
da9d2f07 235 goto err;
aef9ec39
RD
236 }
237
9c03dc9f
BVA
238 target->send_cq = ib_create_cq(target->srp_host->srp_dev->dev,
239 srp_send_completion, NULL, target, SRP_SQ_SIZE, 0);
240 if (IS_ERR(target->send_cq)) {
241 ret = PTR_ERR(target->send_cq);
da9d2f07 242 goto err_recv_cq;
9c03dc9f
BVA
243 }
244
245 ib_req_notify_cq(target->recv_cq, IB_CQ_NEXT_COMP);
aef9ec39
RD
246
247 init_attr->event_handler = srp_qp_event;
248 init_attr->cap.max_send_wr = SRP_SQ_SIZE;
249 init_attr->cap.max_recv_wr = SRP_RQ_SIZE;
250 init_attr->cap.max_recv_sge = 1;
251 init_attr->cap.max_send_sge = 1;
252 init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
253 init_attr->qp_type = IB_QPT_RC;
9c03dc9f
BVA
254 init_attr->send_cq = target->send_cq;
255 init_attr->recv_cq = target->recv_cq;
aef9ec39 256
05321937 257 target->qp = ib_create_qp(target->srp_host->srp_dev->pd, init_attr);
aef9ec39
RD
258 if (IS_ERR(target->qp)) {
259 ret = PTR_ERR(target->qp);
da9d2f07 260 goto err_send_cq;
aef9ec39
RD
261 }
262
263 ret = srp_init_qp(target, target->qp);
da9d2f07
RD
264 if (ret)
265 goto err_qp;
aef9ec39 266
da9d2f07
RD
267 kfree(init_attr);
268 return 0;
269
270err_qp:
271 ib_destroy_qp(target->qp);
272
273err_send_cq:
274 ib_destroy_cq(target->send_cq);
275
276err_recv_cq:
277 ib_destroy_cq(target->recv_cq);
278
279err:
aef9ec39
RD
280 kfree(init_attr);
281 return ret;
282}
283
284static void srp_free_target_ib(struct srp_target_port *target)
285{
286 int i;
287
288 ib_destroy_qp(target->qp);
9c03dc9f
BVA
289 ib_destroy_cq(target->send_cq);
290 ib_destroy_cq(target->recv_cq);
aef9ec39
RD
291
292 for (i = 0; i < SRP_RQ_SIZE; ++i)
293 srp_free_iu(target->srp_host, target->rx_ring[i]);
dd5e6e38 294 for (i = 0; i < SRP_SQ_SIZE; ++i)
aef9ec39
RD
295 srp_free_iu(target->srp_host, target->tx_ring[i]);
296}
297
298static void srp_path_rec_completion(int status,
299 struct ib_sa_path_rec *pathrec,
300 void *target_ptr)
301{
302 struct srp_target_port *target = target_ptr;
303
304 target->status = status;
305 if (status)
7aa54bd7
DD
306 shost_printk(KERN_ERR, target->scsi_host,
307 PFX "Got failed path rec status %d\n", status);
aef9ec39
RD
308 else
309 target->path = *pathrec;
310 complete(&target->done);
311}
312
313static int srp_lookup_path(struct srp_target_port *target)
314{
315 target->path.numb_path = 1;
316
317 init_completion(&target->done);
318
c1a0b23b 319 target->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
05321937 320 target->srp_host->srp_dev->dev,
aef9ec39
RD
321 target->srp_host->port,
322 &target->path,
247e020e 323 IB_SA_PATH_REC_SERVICE_ID |
aef9ec39
RD
324 IB_SA_PATH_REC_DGID |
325 IB_SA_PATH_REC_SGID |
326 IB_SA_PATH_REC_NUMB_PATH |
327 IB_SA_PATH_REC_PKEY,
328 SRP_PATH_REC_TIMEOUT_MS,
329 GFP_KERNEL,
330 srp_path_rec_completion,
331 target, &target->path_query);
332 if (target->path_query_id < 0)
333 return target->path_query_id;
334
335 wait_for_completion(&target->done);
336
337 if (target->status < 0)
7aa54bd7
DD
338 shost_printk(KERN_WARNING, target->scsi_host,
339 PFX "Path record query failed\n");
aef9ec39
RD
340
341 return target->status;
342}
343
344static int srp_send_req(struct srp_target_port *target)
345{
346 struct {
347 struct ib_cm_req_param param;
348 struct srp_login_req priv;
349 } *req = NULL;
350 int status;
351
352 req = kzalloc(sizeof *req, GFP_KERNEL);
353 if (!req)
354 return -ENOMEM;
355
356 req->param.primary_path = &target->path;
357 req->param.alternate_path = NULL;
358 req->param.service_id = target->service_id;
359 req->param.qp_num = target->qp->qp_num;
360 req->param.qp_type = target->qp->qp_type;
361 req->param.private_data = &req->priv;
362 req->param.private_data_len = sizeof req->priv;
363 req->param.flow_control = 1;
364
365 get_random_bytes(&req->param.starting_psn, 4);
366 req->param.starting_psn &= 0xffffff;
367
368 /*
369 * Pick some arbitrary defaults here; we could make these
370 * module parameters if anyone cared about setting them.
371 */
372 req->param.responder_resources = 4;
373 req->param.remote_cm_response_timeout = 20;
374 req->param.local_cm_response_timeout = 20;
375 req->param.retry_count = 7;
376 req->param.rnr_retry_count = 7;
377 req->param.max_cm_retries = 15;
378
379 req->priv.opcode = SRP_LOGIN_REQ;
380 req->priv.tag = 0;
74b0a15b 381 req->priv.req_it_iu_len = cpu_to_be32(srp_max_iu_len);
aef9ec39
RD
382 req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
383 SRP_BUF_FORMAT_INDIRECT);
0c0450db 384 /*
3cd96564 385 * In the published SRP specification (draft rev. 16a), the
0c0450db
R
386 * port identifier format is 8 bytes of ID extension followed
387 * by 8 bytes of GUID. Older drafts put the two halves in the
388 * opposite order, so that the GUID comes first.
389 *
390 * Targets conforming to these obsolete drafts can be
391 * recognized by the I/O Class they report.
392 */
393 if (target->io_class == SRP_REV10_IB_IO_CLASS) {
394 memcpy(req->priv.initiator_port_id,
01cb9bcb 395 &target->path.sgid.global.interface_id, 8);
0c0450db 396 memcpy(req->priv.initiator_port_id + 8,
01cb9bcb 397 &target->initiator_ext, 8);
0c0450db
R
398 memcpy(req->priv.target_port_id, &target->ioc_guid, 8);
399 memcpy(req->priv.target_port_id + 8, &target->id_ext, 8);
400 } else {
401 memcpy(req->priv.initiator_port_id,
01cb9bcb
IR
402 &target->initiator_ext, 8);
403 memcpy(req->priv.initiator_port_id + 8,
404 &target->path.sgid.global.interface_id, 8);
0c0450db
R
405 memcpy(req->priv.target_port_id, &target->id_ext, 8);
406 memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8);
407 }
408
aef9ec39
RD
409 /*
410 * Topspin/Cisco SRP targets will reject our login unless we
01cb9bcb
IR
411 * zero out the first 8 bytes of our initiator port ID and set
412 * the second 8 bytes to the local node GUID.
aef9ec39 413 */
5d7cbfd6 414 if (srp_target_is_topspin(target)) {
7aa54bd7
DD
415 shost_printk(KERN_DEBUG, target->scsi_host,
416 PFX "Topspin/Cisco initiator port ID workaround "
417 "activated for target GUID %016llx\n",
418 (unsigned long long) be64_to_cpu(target->ioc_guid));
aef9ec39 419 memset(req->priv.initiator_port_id, 0, 8);
01cb9bcb 420 memcpy(req->priv.initiator_port_id + 8,
05321937 421 &target->srp_host->srp_dev->dev->node_guid, 8);
aef9ec39 422 }
aef9ec39
RD
423
424 status = ib_send_cm_req(target->cm_id, &req->param);
425
426 kfree(req);
427
428 return status;
429}
430
431static void srp_disconnect_target(struct srp_target_port *target)
432{
433 /* XXX should send SRP_I_LOGOUT request */
434
435 init_completion(&target->done);
e6581056 436 if (ib_send_cm_dreq(target->cm_id, NULL, 0)) {
7aa54bd7
DD
437 shost_printk(KERN_DEBUG, target->scsi_host,
438 PFX "Sending CM DREQ failed\n");
e6581056
RD
439 return;
440 }
aef9ec39
RD
441 wait_for_completion(&target->done);
442}
443
c4028958 444static void srp_remove_work(struct work_struct *work)
aef9ec39 445{
c4028958
DH
446 struct srp_target_port *target =
447 container_of(work, struct srp_target_port, work);
aef9ec39
RD
448
449 spin_lock_irq(target->scsi_host->host_lock);
450 if (target->state != SRP_TARGET_DEAD) {
451 spin_unlock_irq(target->scsi_host->host_lock);
aef9ec39
RD
452 return;
453 }
454 target->state = SRP_TARGET_REMOVED;
455 spin_unlock_irq(target->scsi_host->host_lock);
456
b3589fd4 457 spin_lock(&target->srp_host->target_lock);
aef9ec39 458 list_del(&target->list);
b3589fd4 459 spin_unlock(&target->srp_host->target_lock);
aef9ec39 460
3236822b 461 srp_remove_host(target->scsi_host);
aef9ec39
RD
462 scsi_remove_host(target->scsi_host);
463 ib_destroy_cm_id(target->cm_id);
464 srp_free_target_ib(target);
465 scsi_host_put(target->scsi_host);
aef9ec39
RD
466}
467
468static int srp_connect_target(struct srp_target_port *target)
469{
9fe4bcf4 470 int retries = 3;
aef9ec39
RD
471 int ret;
472
473 ret = srp_lookup_path(target);
474 if (ret)
475 return ret;
476
477 while (1) {
478 init_completion(&target->done);
479 ret = srp_send_req(target);
480 if (ret)
481 return ret;
482 wait_for_completion(&target->done);
483
484 /*
485 * The CM event handling code will set status to
486 * SRP_PORT_REDIRECT if we get a port redirect REJ
487 * back, or SRP_DLID_REDIRECT if we get a lid/qp
488 * redirect REJ back.
489 */
490 switch (target->status) {
491 case 0:
492 return 0;
493
494 case SRP_PORT_REDIRECT:
495 ret = srp_lookup_path(target);
496 if (ret)
497 return ret;
498 break;
499
500 case SRP_DLID_REDIRECT:
501 break;
502
9fe4bcf4
DD
503 case SRP_STALE_CONN:
504 /* Our current CM id was stale, and is now in timewait.
505 * Try to reconnect with a new one.
506 */
507 if (!retries-- || srp_new_cm_id(target)) {
508 shost_printk(KERN_ERR, target->scsi_host, PFX
509 "giving up on stale connection\n");
510 target->status = -ECONNRESET;
511 return target->status;
512 }
513
514 shost_printk(KERN_ERR, target->scsi_host, PFX
515 "retrying stale connection\n");
516 break;
517
aef9ec39
RD
518 default:
519 return target->status;
520 }
521 }
522}
523
d945e1df
RD
524static void srp_unmap_data(struct scsi_cmnd *scmnd,
525 struct srp_target_port *target,
526 struct srp_request *req)
527{
bb350d1d 528 if (!scsi_sglist(scmnd) ||
d945e1df
RD
529 (scmnd->sc_data_direction != DMA_TO_DEVICE &&
530 scmnd->sc_data_direction != DMA_FROM_DEVICE))
531 return;
532
f5358a17
RD
533 if (req->fmr) {
534 ib_fmr_pool_unmap(req->fmr);
535 req->fmr = NULL;
536 }
537
05321937 538 ib_dma_unmap_sg(target->srp_host->srp_dev->dev, scsi_sglist(scmnd),
bb350d1d 539 scsi_sg_count(scmnd), scmnd->sc_data_direction);
d945e1df
RD
540}
541
526b4caa
IR
542static void srp_remove_req(struct srp_target_port *target, struct srp_request *req)
543{
544 srp_unmap_data(req->scmnd, target, req);
545 list_move_tail(&req->list, &target->free_reqs);
546}
547
548static void srp_reset_req(struct srp_target_port *target, struct srp_request *req)
549{
550 req->scmnd->result = DID_RESET << 16;
551 req->scmnd->scsi_done(req->scmnd);
552 srp_remove_req(target, req);
553}
554
aef9ec39
RD
555static int srp_reconnect_target(struct srp_target_port *target)
556{
aef9ec39 557 struct ib_qp_attr qp_attr;
526b4caa 558 struct srp_request *req, *tmp;
aef9ec39
RD
559 struct ib_wc wc;
560 int ret;
aef9ec39
RD
561
562 spin_lock_irq(target->scsi_host->host_lock);
563 if (target->state != SRP_TARGET_LIVE) {
564 spin_unlock_irq(target->scsi_host->host_lock);
565 return -EAGAIN;
566 }
567 target->state = SRP_TARGET_CONNECTING;
568 spin_unlock_irq(target->scsi_host->host_lock);
569
570 srp_disconnect_target(target);
571 /*
572 * Now get a new local CM ID so that we avoid confusing the
573 * target in case things are really fouled up.
574 */
9fe4bcf4
DD
575 ret = srp_new_cm_id(target);
576 if (ret)
aef9ec39 577 goto err;
aef9ec39
RD
578
579 qp_attr.qp_state = IB_QPS_RESET;
580 ret = ib_modify_qp(target->qp, &qp_attr, IB_QP_STATE);
581 if (ret)
582 goto err;
583
584 ret = srp_init_qp(target, target->qp);
585 if (ret)
586 goto err;
587
9c03dc9f
BVA
588 while (ib_poll_cq(target->recv_cq, 1, &wc) > 0)
589 ; /* nothing */
590 while (ib_poll_cq(target->send_cq, 1, &wc) > 0)
aef9ec39
RD
591 ; /* nothing */
592
d916a8f1 593 spin_lock_irq(target->scsi_host->host_lock);
526b4caa
IR
594 list_for_each_entry_safe(req, tmp, &target->req_queue, list)
595 srp_reset_req(target, req);
d916a8f1 596 spin_unlock_irq(target->scsi_host->host_lock);
aef9ec39
RD
597
598 target->rx_head = 0;
599 target->tx_head = 0;
600 target->tx_tail = 0;
aef9ec39 601
1033ff67 602 target->qp_in_error = 0;
aef9ec39
RD
603 ret = srp_connect_target(target);
604 if (ret)
605 goto err;
606
607 spin_lock_irq(target->scsi_host->host_lock);
608 if (target->state == SRP_TARGET_CONNECTING) {
609 ret = 0;
610 target->state = SRP_TARGET_LIVE;
611 } else
612 ret = -EAGAIN;
613 spin_unlock_irq(target->scsi_host->host_lock);
614
615 return ret;
616
617err:
7aa54bd7
DD
618 shost_printk(KERN_ERR, target->scsi_host,
619 PFX "reconnect failed (%d), removing target port.\n", ret);
aef9ec39
RD
620
621 /*
622 * We couldn't reconnect, so kill our target port off.
623 * However, we have to defer the real removal because we might
624 * be in the context of the SCSI error handler now, which
625 * would deadlock if we call scsi_remove_host().
626 */
627 spin_lock_irq(target->scsi_host->host_lock);
628 if (target->state == SRP_TARGET_CONNECTING) {
629 target->state = SRP_TARGET_DEAD;
c4028958 630 INIT_WORK(&target->work, srp_remove_work);
aef9ec39
RD
631 schedule_work(&target->work);
632 }
633 spin_unlock_irq(target->scsi_host->host_lock);
634
635 return ret;
636}
637
559ce8f1 638static int srp_map_fmr(struct srp_target_port *target, struct scatterlist *scat,
f5358a17
RD
639 int sg_cnt, struct srp_request *req,
640 struct srp_direct_buf *buf)
641{
642 u64 io_addr = 0;
643 u64 *dma_pages;
644 u32 len;
645 int page_cnt;
646 int i, j;
647 int ret;
05321937 648 struct srp_device *dev = target->srp_host->srp_dev;
85507bcc 649 struct ib_device *ibdev = dev->dev;
bb350d1d 650 struct scatterlist *sg;
f5358a17
RD
651
652 if (!dev->fmr_pool)
653 return -ENODEV;
654
5d7cbfd6
RD
655 if (srp_target_is_mellanox(target) &&
656 (ib_sg_dma_address(ibdev, &scat[0]) & ~dev->fmr_page_mask))
559ce8f1
IR
657 return -EINVAL;
658
f5358a17 659 len = page_cnt = 0;
bb350d1d
FT
660 scsi_for_each_sg(req->scmnd, sg, sg_cnt, i) {
661 unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
85507bcc 662
bb350d1d 663 if (ib_sg_dma_address(ibdev, sg) & ~dev->fmr_page_mask) {
f5358a17
RD
664 if (i > 0)
665 return -EINVAL;
666 else
667 ++page_cnt;
668 }
bb350d1d 669 if ((ib_sg_dma_address(ibdev, sg) + dma_len) &
f5358a17
RD
670 ~dev->fmr_page_mask) {
671 if (i < sg_cnt - 1)
672 return -EINVAL;
673 else
674 ++page_cnt;
675 }
676
85507bcc 677 len += dma_len;
f5358a17
RD
678 }
679
680 page_cnt += len >> dev->fmr_page_shift;
681 if (page_cnt > SRP_FMR_SIZE)
682 return -ENOMEM;
683
684 dma_pages = kmalloc(sizeof (u64) * page_cnt, GFP_ATOMIC);
685 if (!dma_pages)
686 return -ENOMEM;
687
688 page_cnt = 0;
bb350d1d
FT
689 scsi_for_each_sg(req->scmnd, sg, sg_cnt, i) {
690 unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
85507bcc
RC
691
692 for (j = 0; j < dma_len; j += dev->fmr_page_size)
f5358a17 693 dma_pages[page_cnt++] =
bb350d1d 694 (ib_sg_dma_address(ibdev, sg) &
85507bcc
RC
695 dev->fmr_page_mask) + j;
696 }
f5358a17
RD
697
698 req->fmr = ib_fmr_pool_map_phys(dev->fmr_pool,
adfaa888 699 dma_pages, page_cnt, io_addr);
f5358a17
RD
700 if (IS_ERR(req->fmr)) {
701 ret = PTR_ERR(req->fmr);
6583eb3d 702 req->fmr = NULL;
f5358a17
RD
703 goto out;
704 }
705
85507bcc
RC
706 buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, &scat[0]) &
707 ~dev->fmr_page_mask);
f5358a17
RD
708 buf->key = cpu_to_be32(req->fmr->fmr->rkey);
709 buf->len = cpu_to_be32(len);
710
711 ret = 0;
712
713out:
714 kfree(dma_pages);
715
716 return ret;
717}
718
aef9ec39
RD
719static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
720 struct srp_request *req)
721{
cf368713 722 struct scatterlist *scat;
aef9ec39 723 struct srp_cmd *cmd = req->cmd->buf;
cf368713 724 int len, nents, count;
f5358a17 725 u8 fmt = SRP_DATA_DESC_DIRECT;
85507bcc
RC
726 struct srp_device *dev;
727 struct ib_device *ibdev;
aef9ec39 728
bb350d1d 729 if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
aef9ec39
RD
730 return sizeof (struct srp_cmd);
731
732 if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
733 scmnd->sc_data_direction != DMA_TO_DEVICE) {
7aa54bd7
DD
734 shost_printk(KERN_WARNING, target->scsi_host,
735 PFX "Unhandled data direction %d\n",
736 scmnd->sc_data_direction);
aef9ec39
RD
737 return -EINVAL;
738 }
739
bb350d1d
FT
740 nents = scsi_sg_count(scmnd);
741 scat = scsi_sglist(scmnd);
aef9ec39 742
05321937 743 dev = target->srp_host->srp_dev;
85507bcc
RC
744 ibdev = dev->dev;
745
746 count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
f5358a17
RD
747
748 fmt = SRP_DATA_DESC_DIRECT;
749 len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf);
aef9ec39 750
cf368713 751 if (count == 1) {
f5358a17
RD
752 /*
753 * The midlayer only generated a single gather/scatter
754 * entry, or DMA mapping coalesced everything to a
755 * single entry. So a direct descriptor along with
756 * the DMA MR suffices.
757 */
cf368713 758 struct srp_direct_buf *buf = (void *) cmd->add_data;
aef9ec39 759
85507bcc
RC
760 buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
761 buf->key = cpu_to_be32(dev->mr->rkey);
762 buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
559ce8f1 763 } else if (srp_map_fmr(target, scat, count, req,
f5358a17
RD
764 (void *) cmd->add_data)) {
765 /*
766 * FMR mapping failed, and the scatterlist has more
767 * than one entry. Generate an indirect memory
768 * descriptor.
769 */
cf368713 770 struct srp_indirect_buf *buf = (void *) cmd->add_data;
bb350d1d 771 struct scatterlist *sg;
cf368713 772 u32 datalen = 0;
f5358a17 773 int i;
aef9ec39 774
cf368713 775 fmt = SRP_DATA_DESC_INDIRECT;
f5358a17
RD
776 len = sizeof (struct srp_cmd) +
777 sizeof (struct srp_indirect_buf) +
778 count * sizeof (struct srp_direct_buf);
779
bb350d1d
FT
780 scsi_for_each_sg(scmnd, sg, count, i) {
781 unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
85507bcc 782
f5358a17 783 buf->desc_list[i].va =
bb350d1d 784 cpu_to_be64(ib_sg_dma_address(ibdev, sg));
f5358a17 785 buf->desc_list[i].key =
85507bcc
RC
786 cpu_to_be32(dev->mr->rkey);
787 buf->desc_list[i].len = cpu_to_be32(dma_len);
788 datalen += dma_len;
f5358a17 789 }
aef9ec39 790
cf368713
RD
791 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
792 cmd->data_out_desc_cnt = count;
793 else
794 cmd->data_in_desc_cnt = count;
795
f5358a17
RD
796 buf->table_desc.va =
797 cpu_to_be64(req->cmd->dma + sizeof *cmd + sizeof *buf);
cf368713 798 buf->table_desc.key =
05321937 799 cpu_to_be32(target->srp_host->srp_dev->mr->rkey);
cf368713
RD
800 buf->table_desc.len =
801 cpu_to_be32(count * sizeof (struct srp_direct_buf));
802
cf368713 803 buf->len = cpu_to_be32(datalen);
aef9ec39
RD
804 }
805
806 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
807 cmd->buf_fmt = fmt << 4;
808 else
809 cmd->buf_fmt = fmt;
810
aef9ec39
RD
811 return len;
812}
813
c996bb47
BVA
814static int srp_post_recv(struct srp_target_port *target)
815{
816 unsigned long flags;
817 struct srp_iu *iu;
818 struct ib_sge list;
819 struct ib_recv_wr wr, *bad_wr;
820 unsigned int next;
821 int ret;
822
823 spin_lock_irqsave(target->scsi_host->host_lock, flags);
824
dd5e6e38 825 next = target->rx_head & SRP_RQ_MASK;
c996bb47
BVA
826 wr.wr_id = next;
827 iu = target->rx_ring[next];
828
829 list.addr = iu->dma;
830 list.length = iu->size;
831 list.lkey = target->srp_host->srp_dev->mr->lkey;
832
833 wr.next = NULL;
834 wr.sg_list = &list;
835 wr.num_sge = 1;
836
837 ret = ib_post_recv(target->qp, &wr, &bad_wr);
838 if (!ret)
839 ++target->rx_head;
840
841 spin_unlock_irqrestore(target->scsi_host->host_lock, flags);
842
843 return ret;
844}
845
aef9ec39
RD
846static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
847{
848 struct srp_request *req;
849 struct scsi_cmnd *scmnd;
850 unsigned long flags;
851 s32 delta;
852
853 delta = (s32) be32_to_cpu(rsp->req_lim_delta);
854
855 spin_lock_irqsave(target->scsi_host->host_lock, flags);
856
857 target->req_lim += delta;
858
859 req = &target->req_ring[rsp->tag & ~SRP_TAG_TSK_MGMT];
860
861 if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
862 if (be32_to_cpu(rsp->resp_data_len) < 4)
863 req->tsk_status = -1;
864 else
865 req->tsk_status = rsp->data[3];
866 complete(&req->done);
867 } else {
d945e1df 868 scmnd = req->scmnd;
aef9ec39 869 if (!scmnd)
7aa54bd7
DD
870 shost_printk(KERN_ERR, target->scsi_host,
871 "Null scmnd for RSP w/tag %016llx\n",
872 (unsigned long long) rsp->tag);
aef9ec39
RD
873 scmnd->result = rsp->status;
874
875 if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
876 memcpy(scmnd->sense_buffer, rsp->data +
877 be32_to_cpu(rsp->resp_data_len),
878 min_t(int, be32_to_cpu(rsp->sense_data_len),
879 SCSI_SENSE_BUFFERSIZE));
880 }
881
882 if (rsp->flags & (SRP_RSP_FLAG_DOOVER | SRP_RSP_FLAG_DOUNDER))
bb350d1d 883 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
aef9ec39 884 else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER))
bb350d1d 885 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
aef9ec39 886
aef9ec39 887 if (!req->tsk_mgmt) {
aef9ec39
RD
888 scmnd->host_scribble = (void *) -1L;
889 scmnd->scsi_done(scmnd);
890
d945e1df 891 srp_remove_req(target, req);
aef9ec39
RD
892 } else
893 req->cmd_done = 1;
894 }
895
896 spin_unlock_irqrestore(target->scsi_host->host_lock, flags);
897}
898
aef9ec39
RD
899static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
900{
85507bcc 901 struct ib_device *dev;
aef9ec39 902 struct srp_iu *iu;
c996bb47 903 int res;
aef9ec39
RD
904 u8 opcode;
905
9c03dc9f 906 iu = target->rx_ring[wc->wr_id];
aef9ec39 907
05321937 908 dev = target->srp_host->srp_dev->dev;
85507bcc
RC
909 ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_ti_iu_len,
910 DMA_FROM_DEVICE);
aef9ec39
RD
911
912 opcode = *(u8 *) iu->buf;
913
914 if (0) {
7aa54bd7
DD
915 shost_printk(KERN_ERR, target->scsi_host,
916 PFX "recv completion, opcode 0x%02x\n", opcode);
7a700811
BVA
917 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1,
918 iu->buf, wc->byte_len, true);
aef9ec39
RD
919 }
920
921 switch (opcode) {
922 case SRP_RSP:
923 srp_process_rsp(target, iu->buf);
924 break;
925
926 case SRP_T_LOGOUT:
927 /* XXX Handle target logout */
7aa54bd7
DD
928 shost_printk(KERN_WARNING, target->scsi_host,
929 PFX "Got target logout request\n");
aef9ec39
RD
930 break;
931
932 default:
7aa54bd7
DD
933 shost_printk(KERN_WARNING, target->scsi_host,
934 PFX "Unhandled SRP opcode 0x%02x\n", opcode);
aef9ec39
RD
935 break;
936 }
937
85507bcc
RC
938 ib_dma_sync_single_for_device(dev, iu->dma, target->max_ti_iu_len,
939 DMA_FROM_DEVICE);
c996bb47
BVA
940
941 res = srp_post_recv(target);
942 if (res != 0)
943 shost_printk(KERN_ERR, target->scsi_host,
944 PFX "Recv failed with error code %d\n", res);
aef9ec39
RD
945}
946
9c03dc9f 947static void srp_recv_completion(struct ib_cq *cq, void *target_ptr)
aef9ec39
RD
948{
949 struct srp_target_port *target = target_ptr;
950 struct ib_wc wc;
aef9ec39
RD
951
952 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
953 while (ib_poll_cq(cq, 1, &wc) > 0) {
954 if (wc.status) {
7aa54bd7 955 shost_printk(KERN_ERR, target->scsi_host,
9c03dc9f 956 PFX "failed receive status %d\n",
7aa54bd7 957 wc.status);
1033ff67 958 target->qp_in_error = 1;
aef9ec39
RD
959 break;
960 }
961
9c03dc9f
BVA
962 srp_handle_recv(target, &wc);
963 }
964}
965
966static void srp_send_completion(struct ib_cq *cq, void *target_ptr)
967{
968 struct srp_target_port *target = target_ptr;
969 struct ib_wc wc;
970
971 while (ib_poll_cq(cq, 1, &wc) > 0) {
972 if (wc.status) {
973 shost_printk(KERN_ERR, target->scsi_host,
974 PFX "failed send status %d\n",
975 wc.status);
976 target->qp_in_error = 1;
977 break;
978 }
979
980 ++target->tx_tail;
aef9ec39
RD
981 }
982}
983
aef9ec39
RD
984/*
985 * Must be called with target->scsi_host->host_lock held to protect
47f2bce9
RD
986 * req_lim and tx_head. Lock cannot be dropped between call here and
987 * call to __srp_post_send().
aef9ec39 988 */
8cba2077
DD
989static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target,
990 enum srp_request_type req_type)
aef9ec39 991{
dd5e6e38 992 s32 rsv = (req_type == SRP_REQ_TASK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
8cba2077 993
9c03dc9f
BVA
994 srp_send_completion(target->send_cq, target);
995
aef9ec39
RD
996 if (target->tx_head - target->tx_tail >= SRP_SQ_SIZE)
997 return NULL;
998
dd5e6e38 999 if (target->req_lim <= rsv) {
6bfa24fa 1000 ++target->zero_req_lim;
8cba2077
DD
1001 return NULL;
1002 }
47f2bce9 1003
dd5e6e38 1004 return target->tx_ring[target->tx_head & SRP_SQ_MASK];
aef9ec39
RD
1005}
1006
1007/*
1008 * Must be called with target->scsi_host->host_lock held to protect
1009 * req_lim and tx_head.
1010 */
1011static int __srp_post_send(struct srp_target_port *target,
1012 struct srp_iu *iu, int len)
1013{
1014 struct ib_sge list;
1015 struct ib_send_wr wr, *bad_wr;
1016 int ret = 0;
1017
aef9ec39
RD
1018 list.addr = iu->dma;
1019 list.length = len;
05321937 1020 list.lkey = target->srp_host->srp_dev->mr->lkey;
aef9ec39
RD
1021
1022 wr.next = NULL;
dd5e6e38 1023 wr.wr_id = target->tx_head & SRP_SQ_MASK;
aef9ec39
RD
1024 wr.sg_list = &list;
1025 wr.num_sge = 1;
1026 wr.opcode = IB_WR_SEND;
1027 wr.send_flags = IB_SEND_SIGNALED;
1028
1029 ret = ib_post_send(target->qp, &wr, &bad_wr);
1030
1031 if (!ret) {
1032 ++target->tx_head;
1033 --target->req_lim;
1034 }
1035
1036 return ret;
1037}
1038
1039static int srp_queuecommand(struct scsi_cmnd *scmnd,
1040 void (*done)(struct scsi_cmnd *))
1041{
1042 struct srp_target_port *target = host_to_target(scmnd->device->host);
1043 struct srp_request *req;
1044 struct srp_iu *iu;
1045 struct srp_cmd *cmd;
85507bcc 1046 struct ib_device *dev;
aef9ec39
RD
1047 int len;
1048
1049 if (target->state == SRP_TARGET_CONNECTING)
1050 goto err;
1051
1052 if (target->state == SRP_TARGET_DEAD ||
1053 target->state == SRP_TARGET_REMOVED) {
1054 scmnd->result = DID_BAD_TARGET << 16;
1055 done(scmnd);
1056 return 0;
1057 }
1058
8cba2077 1059 iu = __srp_get_tx_iu(target, SRP_REQ_NORMAL);
aef9ec39
RD
1060 if (!iu)
1061 goto err;
1062
05321937 1063 dev = target->srp_host->srp_dev->dev;
85507bcc
RC
1064 ib_dma_sync_single_for_cpu(dev, iu->dma, srp_max_iu_len,
1065 DMA_TO_DEVICE);
aef9ec39 1066
d945e1df 1067 req = list_entry(target->free_reqs.next, struct srp_request, list);
aef9ec39
RD
1068
1069 scmnd->scsi_done = done;
1070 scmnd->result = 0;
d945e1df 1071 scmnd->host_scribble = (void *) (long) req->index;
aef9ec39
RD
1072
1073 cmd = iu->buf;
1074 memset(cmd, 0, sizeof *cmd);
1075
1076 cmd->opcode = SRP_CMD;
1077 cmd->lun = cpu_to_be64((u64) scmnd->device->lun << 48);
d945e1df 1078 cmd->tag = req->index;
aef9ec39
RD
1079 memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
1080
aef9ec39
RD
1081 req->scmnd = scmnd;
1082 req->cmd = iu;
1083 req->cmd_done = 0;
1084 req->tsk_mgmt = NULL;
1085
1086 len = srp_map_data(scmnd, target, req);
1087 if (len < 0) {
7aa54bd7
DD
1088 shost_printk(KERN_ERR, target->scsi_host,
1089 PFX "Failed to map data\n");
aef9ec39
RD
1090 goto err;
1091 }
1092
85507bcc
RC
1093 ib_dma_sync_single_for_device(dev, iu->dma, srp_max_iu_len,
1094 DMA_TO_DEVICE);
aef9ec39
RD
1095
1096 if (__srp_post_send(target, iu, len)) {
7aa54bd7 1097 shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
aef9ec39
RD
1098 goto err_unmap;
1099 }
1100
d945e1df 1101 list_move_tail(&req->list, &target->req_queue);
aef9ec39
RD
1102
1103 return 0;
1104
1105err_unmap:
1106 srp_unmap_data(scmnd, target, req);
1107
1108err:
1109 return SCSI_MLQUEUE_HOST_BUSY;
1110}
1111
1112static int srp_alloc_iu_bufs(struct srp_target_port *target)
1113{
1114 int i;
1115
1116 for (i = 0; i < SRP_RQ_SIZE; ++i) {
1117 target->rx_ring[i] = srp_alloc_iu(target->srp_host,
1118 target->max_ti_iu_len,
1119 GFP_KERNEL, DMA_FROM_DEVICE);
1120 if (!target->rx_ring[i])
1121 goto err;
1122 }
1123
dd5e6e38 1124 for (i = 0; i < SRP_SQ_SIZE; ++i) {
aef9ec39 1125 target->tx_ring[i] = srp_alloc_iu(target->srp_host,
74b0a15b 1126 srp_max_iu_len,
aef9ec39
RD
1127 GFP_KERNEL, DMA_TO_DEVICE);
1128 if (!target->tx_ring[i])
1129 goto err;
1130 }
1131
1132 return 0;
1133
1134err:
1135 for (i = 0; i < SRP_RQ_SIZE; ++i) {
1136 srp_free_iu(target->srp_host, target->rx_ring[i]);
1137 target->rx_ring[i] = NULL;
1138 }
1139
dd5e6e38 1140 for (i = 0; i < SRP_SQ_SIZE; ++i) {
aef9ec39
RD
1141 srp_free_iu(target->srp_host, target->tx_ring[i]);
1142 target->tx_ring[i] = NULL;
1143 }
1144
1145 return -ENOMEM;
1146}
1147
1148static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
1149 struct ib_cm_event *event,
1150 struct srp_target_port *target)
1151{
7aa54bd7 1152 struct Scsi_Host *shost = target->scsi_host;
aef9ec39
RD
1153 struct ib_class_port_info *cpi;
1154 int opcode;
1155
1156 switch (event->param.rej_rcvd.reason) {
1157 case IB_CM_REJ_PORT_CM_REDIRECT:
1158 cpi = event->param.rej_rcvd.ari;
1159 target->path.dlid = cpi->redirect_lid;
1160 target->path.pkey = cpi->redirect_pkey;
1161 cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
1162 memcpy(target->path.dgid.raw, cpi->redirect_gid, 16);
1163
1164 target->status = target->path.dlid ?
1165 SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
1166 break;
1167
1168 case IB_CM_REJ_PORT_REDIRECT:
5d7cbfd6 1169 if (srp_target_is_topspin(target)) {
aef9ec39
RD
1170 /*
1171 * Topspin/Cisco SRP gateways incorrectly send
1172 * reject reason code 25 when they mean 24
1173 * (port redirect).
1174 */
1175 memcpy(target->path.dgid.raw,
1176 event->param.rej_rcvd.ari, 16);
1177
7aa54bd7
DD
1178 shost_printk(KERN_DEBUG, shost,
1179 PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
1180 (unsigned long long) be64_to_cpu(target->path.dgid.global.subnet_prefix),
1181 (unsigned long long) be64_to_cpu(target->path.dgid.global.interface_id));
aef9ec39
RD
1182
1183 target->status = SRP_PORT_REDIRECT;
1184 } else {
7aa54bd7
DD
1185 shost_printk(KERN_WARNING, shost,
1186 " REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
aef9ec39
RD
1187 target->status = -ECONNRESET;
1188 }
1189 break;
1190
1191 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
7aa54bd7
DD
1192 shost_printk(KERN_WARNING, shost,
1193 " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
aef9ec39
RD
1194 target->status = -ECONNRESET;
1195 break;
1196
1197 case IB_CM_REJ_CONSUMER_DEFINED:
1198 opcode = *(u8 *) event->private_data;
1199 if (opcode == SRP_LOGIN_REJ) {
1200 struct srp_login_rej *rej = event->private_data;
1201 u32 reason = be32_to_cpu(rej->reason);
1202
1203 if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
7aa54bd7
DD
1204 shost_printk(KERN_WARNING, shost,
1205 PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
aef9ec39 1206 else
7aa54bd7
DD
1207 shost_printk(KERN_WARNING, shost,
1208 PFX "SRP LOGIN REJECTED, reason 0x%08x\n", reason);
aef9ec39 1209 } else
7aa54bd7
DD
1210 shost_printk(KERN_WARNING, shost,
1211 " REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
1212 " opcode 0x%02x\n", opcode);
aef9ec39
RD
1213 target->status = -ECONNRESET;
1214 break;
1215
9fe4bcf4
DD
1216 case IB_CM_REJ_STALE_CONN:
1217 shost_printk(KERN_WARNING, shost, " REJ reason: stale connection\n");
1218 target->status = SRP_STALE_CONN;
1219 break;
1220
aef9ec39 1221 default:
7aa54bd7
DD
1222 shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n",
1223 event->param.rej_rcvd.reason);
aef9ec39
RD
1224 target->status = -ECONNRESET;
1225 }
1226}
1227
1228static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
1229{
1230 struct srp_target_port *target = cm_id->context;
1231 struct ib_qp_attr *qp_attr = NULL;
1232 int attr_mask = 0;
1233 int comp = 0;
1234 int opcode = 0;
c996bb47 1235 int i;
aef9ec39
RD
1236
1237 switch (event->event) {
1238 case IB_CM_REQ_ERROR:
7aa54bd7
DD
1239 shost_printk(KERN_DEBUG, target->scsi_host,
1240 PFX "Sending CM REQ failed\n");
aef9ec39
RD
1241 comp = 1;
1242 target->status = -ECONNRESET;
1243 break;
1244
1245 case IB_CM_REP_RECEIVED:
1246 comp = 1;
1247 opcode = *(u8 *) event->private_data;
1248
1249 if (opcode == SRP_LOGIN_RSP) {
1250 struct srp_login_rsp *rsp = event->private_data;
1251
1252 target->max_ti_iu_len = be32_to_cpu(rsp->max_ti_iu_len);
1253 target->req_lim = be32_to_cpu(rsp->req_lim_delta);
1254
1255 target->scsi_host->can_queue = min(target->req_lim,
1256 target->scsi_host->can_queue);
1257 } else {
7aa54bd7
DD
1258 shost_printk(KERN_WARNING, target->scsi_host,
1259 PFX "Unhandled RSP opcode %#x\n", opcode);
aef9ec39
RD
1260 target->status = -ECONNRESET;
1261 break;
1262 }
1263
d2fcea7d
VP
1264 if (!target->rx_ring[0]) {
1265 target->status = srp_alloc_iu_bufs(target);
1266 if (target->status)
1267 break;
1268 }
aef9ec39
RD
1269
1270 qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
1271 if (!qp_attr) {
1272 target->status = -ENOMEM;
1273 break;
1274 }
1275
1276 qp_attr->qp_state = IB_QPS_RTR;
1277 target->status = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
1278 if (target->status)
1279 break;
1280
1281 target->status = ib_modify_qp(target->qp, qp_attr, attr_mask);
1282 if (target->status)
1283 break;
1284
c996bb47
BVA
1285 for (i = 0; i < SRP_RQ_SIZE; i++) {
1286 target->status = srp_post_recv(target);
1287 if (target->status)
1288 break;
1289 }
aef9ec39
RD
1290 if (target->status)
1291 break;
1292
1293 qp_attr->qp_state = IB_QPS_RTS;
1294 target->status = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
1295 if (target->status)
1296 break;
1297
1298 target->status = ib_modify_qp(target->qp, qp_attr, attr_mask);
1299 if (target->status)
1300 break;
1301
1302 target->status = ib_send_cm_rtu(cm_id, NULL, 0);
1303 if (target->status)
1304 break;
1305
1306 break;
1307
1308 case IB_CM_REJ_RECEIVED:
7aa54bd7 1309 shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
aef9ec39
RD
1310 comp = 1;
1311
1312 srp_cm_rej_handler(cm_id, event, target);
1313 break;
1314
b7ac4ab4 1315 case IB_CM_DREQ_RECEIVED:
7aa54bd7
DD
1316 shost_printk(KERN_WARNING, target->scsi_host,
1317 PFX "DREQ received - connection closed\n");
b7ac4ab4 1318 if (ib_send_cm_drep(cm_id, NULL, 0))
7aa54bd7
DD
1319 shost_printk(KERN_ERR, target->scsi_host,
1320 PFX "Sending CM DREP failed\n");
aef9ec39
RD
1321 break;
1322
1323 case IB_CM_TIMEWAIT_EXIT:
7aa54bd7
DD
1324 shost_printk(KERN_ERR, target->scsi_host,
1325 PFX "connection closed\n");
aef9ec39
RD
1326
1327 comp = 1;
1328 target->status = 0;
1329 break;
1330
b7ac4ab4
IR
1331 case IB_CM_MRA_RECEIVED:
1332 case IB_CM_DREQ_ERROR:
1333 case IB_CM_DREP_RECEIVED:
1334 break;
1335
aef9ec39 1336 default:
7aa54bd7
DD
1337 shost_printk(KERN_WARNING, target->scsi_host,
1338 PFX "Unhandled CM event %d\n", event->event);
aef9ec39
RD
1339 break;
1340 }
1341
1342 if (comp)
1343 complete(&target->done);
1344
1345 kfree(qp_attr);
1346
1347 return 0;
1348}
1349
d945e1df
RD
1350static int srp_send_tsk_mgmt(struct srp_target_port *target,
1351 struct srp_request *req, u8 func)
aef9ec39 1352{
aef9ec39
RD
1353 struct srp_iu *iu;
1354 struct srp_tsk_mgmt *tsk_mgmt;
aef9ec39
RD
1355
1356 spin_lock_irq(target->scsi_host->host_lock);
1357
1285b3a0
RD
1358 if (target->state == SRP_TARGET_DEAD ||
1359 target->state == SRP_TARGET_REMOVED) {
d945e1df 1360 req->scmnd->result = DID_BAD_TARGET << 16;
1285b3a0
RD
1361 goto out;
1362 }
1363
aef9ec39
RD
1364 init_completion(&req->done);
1365
8cba2077 1366 iu = __srp_get_tx_iu(target, SRP_REQ_TASK_MGMT);
aef9ec39
RD
1367 if (!iu)
1368 goto out;
1369
1370 tsk_mgmt = iu->buf;
1371 memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
1372
1373 tsk_mgmt->opcode = SRP_TSK_MGMT;
d945e1df
RD
1374 tsk_mgmt->lun = cpu_to_be64((u64) req->scmnd->device->lun << 48);
1375 tsk_mgmt->tag = req->index | SRP_TAG_TSK_MGMT;
aef9ec39 1376 tsk_mgmt->tsk_mgmt_func = func;
d945e1df 1377 tsk_mgmt->task_tag = req->index;
aef9ec39
RD
1378
1379 if (__srp_post_send(target, iu, sizeof *tsk_mgmt))
1380 goto out;
1381
1382 req->tsk_mgmt = iu;
1383
1384 spin_unlock_irq(target->scsi_host->host_lock);
d945e1df 1385
aef9ec39
RD
1386 if (!wait_for_completion_timeout(&req->done,
1387 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
d945e1df 1388 return -1;
aef9ec39 1389
d945e1df 1390 return 0;
aef9ec39
RD
1391
1392out:
1393 spin_unlock_irq(target->scsi_host->host_lock);
d945e1df
RD
1394 return -1;
1395}
1396
1397static int srp_find_req(struct srp_target_port *target,
1398 struct scsi_cmnd *scmnd,
1399 struct srp_request **req)
1400{
1401 if (scmnd->host_scribble == (void *) -1L)
1402 return -1;
1403
1404 *req = &target->req_ring[(long) scmnd->host_scribble];
1405
1406 return 0;
aef9ec39
RD
1407}
1408
1409static int srp_abort(struct scsi_cmnd *scmnd)
1410{
d945e1df
RD
1411 struct srp_target_port *target = host_to_target(scmnd->device->host);
1412 struct srp_request *req;
1413 int ret = SUCCESS;
1414
7aa54bd7 1415 shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
aef9ec39 1416
1033ff67
IR
1417 if (target->qp_in_error)
1418 return FAILED;
d945e1df
RD
1419 if (srp_find_req(target, scmnd, &req))
1420 return FAILED;
1421 if (srp_send_tsk_mgmt(target, req, SRP_TSK_ABORT_TASK))
1422 return FAILED;
1423
1424 spin_lock_irq(target->scsi_host->host_lock);
1425
1426 if (req->cmd_done) {
1427 srp_remove_req(target, req);
1428 scmnd->scsi_done(scmnd);
1429 } else if (!req->tsk_status) {
1430 srp_remove_req(target, req);
1431 scmnd->result = DID_ABORT << 16;
1432 } else
1433 ret = FAILED;
1434
1435 spin_unlock_irq(target->scsi_host->host_lock);
1436
1437 return ret;
aef9ec39
RD
1438}
1439
1440static int srp_reset_device(struct scsi_cmnd *scmnd)
1441{
d945e1df
RD
1442 struct srp_target_port *target = host_to_target(scmnd->device->host);
1443 struct srp_request *req, *tmp;
1444
7aa54bd7 1445 shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
aef9ec39 1446
1033ff67
IR
1447 if (target->qp_in_error)
1448 return FAILED;
d945e1df
RD
1449 if (srp_find_req(target, scmnd, &req))
1450 return FAILED;
1451 if (srp_send_tsk_mgmt(target, req, SRP_TSK_LUN_RESET))
1452 return FAILED;
1453 if (req->tsk_status)
1454 return FAILED;
1455
1456 spin_lock_irq(target->scsi_host->host_lock);
1457
1458 list_for_each_entry_safe(req, tmp, &target->req_queue, list)
526b4caa
IR
1459 if (req->scmnd->device == scmnd->device)
1460 srp_reset_req(target, req);
d945e1df
RD
1461
1462 spin_unlock_irq(target->scsi_host->host_lock);
1463
1464 return SUCCESS;
aef9ec39
RD
1465}
1466
1467static int srp_reset_host(struct scsi_cmnd *scmnd)
1468{
1469 struct srp_target_port *target = host_to_target(scmnd->device->host);
1470 int ret = FAILED;
1471
7aa54bd7 1472 shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
aef9ec39
RD
1473
1474 if (!srp_reconnect_target(target))
1475 ret = SUCCESS;
1476
1477 return ret;
1478}
1479
ee959b00
TJ
1480static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr,
1481 char *buf)
6ecb0c84 1482{
ee959b00 1483 struct srp_target_port *target = host_to_target(class_to_shost(dev));
6ecb0c84
RD
1484
1485 if (target->state == SRP_TARGET_DEAD ||
1486 target->state == SRP_TARGET_REMOVED)
1487 return -ENODEV;
1488
1489 return sprintf(buf, "0x%016llx\n",
1490 (unsigned long long) be64_to_cpu(target->id_ext));
1491}
1492
ee959b00
TJ
1493static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr,
1494 char *buf)
6ecb0c84 1495{
ee959b00 1496 struct srp_target_port *target = host_to_target(class_to_shost(dev));
6ecb0c84
RD
1497
1498 if (target->state == SRP_TARGET_DEAD ||
1499 target->state == SRP_TARGET_REMOVED)
1500 return -ENODEV;
1501
1502 return sprintf(buf, "0x%016llx\n",
1503 (unsigned long long) be64_to_cpu(target->ioc_guid));
1504}
1505
ee959b00
TJ
1506static ssize_t show_service_id(struct device *dev,
1507 struct device_attribute *attr, char *buf)
6ecb0c84 1508{
ee959b00 1509 struct srp_target_port *target = host_to_target(class_to_shost(dev));
6ecb0c84
RD
1510
1511 if (target->state == SRP_TARGET_DEAD ||
1512 target->state == SRP_TARGET_REMOVED)
1513 return -ENODEV;
1514
1515 return sprintf(buf, "0x%016llx\n",
1516 (unsigned long long) be64_to_cpu(target->service_id));
1517}
1518
ee959b00
TJ
1519static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
1520 char *buf)
6ecb0c84 1521{
ee959b00 1522 struct srp_target_port *target = host_to_target(class_to_shost(dev));
6ecb0c84
RD
1523
1524 if (target->state == SRP_TARGET_DEAD ||
1525 target->state == SRP_TARGET_REMOVED)
1526 return -ENODEV;
1527
1528 return sprintf(buf, "0x%04x\n", be16_to_cpu(target->path.pkey));
1529}
1530
ee959b00
TJ
1531static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
1532 char *buf)
6ecb0c84 1533{
ee959b00 1534 struct srp_target_port *target = host_to_target(class_to_shost(dev));
6ecb0c84
RD
1535
1536 if (target->state == SRP_TARGET_DEAD ||
1537 target->state == SRP_TARGET_REMOVED)
1538 return -ENODEV;
1539
5b095d98 1540 return sprintf(buf, "%pI6\n", target->path.dgid.raw);
6ecb0c84
RD
1541}
1542
ee959b00
TJ
1543static ssize_t show_orig_dgid(struct device *dev,
1544 struct device_attribute *attr, char *buf)
3633b3d0 1545{
ee959b00 1546 struct srp_target_port *target = host_to_target(class_to_shost(dev));
3633b3d0
IR
1547
1548 if (target->state == SRP_TARGET_DEAD ||
1549 target->state == SRP_TARGET_REMOVED)
1550 return -ENODEV;
1551
5b095d98 1552 return sprintf(buf, "%pI6\n", target->orig_dgid);
3633b3d0
IR
1553}
1554
89de7486
BVA
1555static ssize_t show_req_lim(struct device *dev,
1556 struct device_attribute *attr, char *buf)
1557{
1558 struct srp_target_port *target = host_to_target(class_to_shost(dev));
1559
1560 if (target->state == SRP_TARGET_DEAD ||
1561 target->state == SRP_TARGET_REMOVED)
1562 return -ENODEV;
1563
1564 return sprintf(buf, "%d\n", target->req_lim);
1565}
1566
ee959b00
TJ
1567static ssize_t show_zero_req_lim(struct device *dev,
1568 struct device_attribute *attr, char *buf)
6bfa24fa 1569{
ee959b00 1570 struct srp_target_port *target = host_to_target(class_to_shost(dev));
6bfa24fa
RD
1571
1572 if (target->state == SRP_TARGET_DEAD ||
1573 target->state == SRP_TARGET_REMOVED)
1574 return -ENODEV;
1575
1576 return sprintf(buf, "%d\n", target->zero_req_lim);
1577}
1578
ee959b00
TJ
1579static ssize_t show_local_ib_port(struct device *dev,
1580 struct device_attribute *attr, char *buf)
ded7f1a1 1581{
ee959b00 1582 struct srp_target_port *target = host_to_target(class_to_shost(dev));
ded7f1a1
IR
1583
1584 return sprintf(buf, "%d\n", target->srp_host->port);
1585}
1586
ee959b00
TJ
1587static ssize_t show_local_ib_device(struct device *dev,
1588 struct device_attribute *attr, char *buf)
ded7f1a1 1589{
ee959b00 1590 struct srp_target_port *target = host_to_target(class_to_shost(dev));
ded7f1a1 1591
05321937 1592 return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name);
ded7f1a1
IR
1593}
1594
ee959b00
TJ
1595static DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL);
1596static DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL);
1597static DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL);
1598static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
1599static DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL);
1600static DEVICE_ATTR(orig_dgid, S_IRUGO, show_orig_dgid, NULL);
89de7486 1601static DEVICE_ATTR(req_lim, S_IRUGO, show_req_lim, NULL);
ee959b00
TJ
1602static DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL);
1603static DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL);
1604static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
1605
1606static struct device_attribute *srp_host_attrs[] = {
1607 &dev_attr_id_ext,
1608 &dev_attr_ioc_guid,
1609 &dev_attr_service_id,
1610 &dev_attr_pkey,
1611 &dev_attr_dgid,
1612 &dev_attr_orig_dgid,
89de7486 1613 &dev_attr_req_lim,
ee959b00
TJ
1614 &dev_attr_zero_req_lim,
1615 &dev_attr_local_ib_port,
1616 &dev_attr_local_ib_device,
6ecb0c84
RD
1617 NULL
1618};
1619
aef9ec39
RD
1620static struct scsi_host_template srp_template = {
1621 .module = THIS_MODULE,
b7f008fd
RD
1622 .name = "InfiniBand SRP initiator",
1623 .proc_name = DRV_NAME,
aef9ec39
RD
1624 .info = srp_target_info,
1625 .queuecommand = srp_queuecommand,
1626 .eh_abort_handler = srp_abort,
1627 .eh_device_reset_handler = srp_reset_device,
1628 .eh_host_reset_handler = srp_reset_host,
dd5e6e38 1629 .can_queue = SRP_CMD_SQ_SIZE,
aef9ec39 1630 .this_id = -1,
dd5e6e38 1631 .cmd_per_lun = SRP_CMD_SQ_SIZE,
6ecb0c84
RD
1632 .use_clustering = ENABLE_CLUSTERING,
1633 .shost_attrs = srp_host_attrs
aef9ec39
RD
1634};
1635
1636static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
1637{
3236822b
FT
1638 struct srp_rport_identifiers ids;
1639 struct srp_rport *rport;
1640
aef9ec39
RD
1641 sprintf(target->target_name, "SRP.T10:%016llX",
1642 (unsigned long long) be64_to_cpu(target->id_ext));
1643
05321937 1644 if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dma_device))
aef9ec39
RD
1645 return -ENODEV;
1646
3236822b
FT
1647 memcpy(ids.port_id, &target->id_ext, 8);
1648 memcpy(ids.port_id + 8, &target->ioc_guid, 8);
aebd5e47 1649 ids.roles = SRP_RPORT_ROLE_TARGET;
3236822b
FT
1650 rport = srp_rport_add(target->scsi_host, &ids);
1651 if (IS_ERR(rport)) {
1652 scsi_remove_host(target->scsi_host);
1653 return PTR_ERR(rport);
1654 }
1655
b3589fd4 1656 spin_lock(&host->target_lock);
aef9ec39 1657 list_add_tail(&target->list, &host->target_list);
b3589fd4 1658 spin_unlock(&host->target_lock);
aef9ec39
RD
1659
1660 target->state = SRP_TARGET_LIVE;
1661
aef9ec39 1662 scsi_scan_target(&target->scsi_host->shost_gendev,
1962a4a1 1663 0, target->scsi_id, SCAN_WILD_CARD, 0);
aef9ec39
RD
1664
1665 return 0;
1666}
1667
ee959b00 1668static void srp_release_dev(struct device *dev)
aef9ec39
RD
1669{
1670 struct srp_host *host =
ee959b00 1671 container_of(dev, struct srp_host, dev);
aef9ec39
RD
1672
1673 complete(&host->released);
1674}
1675
1676static struct class srp_class = {
1677 .name = "infiniband_srp",
ee959b00 1678 .dev_release = srp_release_dev
aef9ec39
RD
1679};
1680
1681/*
1682 * Target ports are added by writing
1683 *
1684 * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
1685 * pkey=<P_Key>,service_id=<service ID>
1686 *
1687 * to the add_target sysfs attribute.
1688 */
1689enum {
1690 SRP_OPT_ERR = 0,
1691 SRP_OPT_ID_EXT = 1 << 0,
1692 SRP_OPT_IOC_GUID = 1 << 1,
1693 SRP_OPT_DGID = 1 << 2,
1694 SRP_OPT_PKEY = 1 << 3,
1695 SRP_OPT_SERVICE_ID = 1 << 4,
1696 SRP_OPT_MAX_SECT = 1 << 5,
52fb2b50 1697 SRP_OPT_MAX_CMD_PER_LUN = 1 << 6,
0c0450db 1698 SRP_OPT_IO_CLASS = 1 << 7,
01cb9bcb 1699 SRP_OPT_INITIATOR_EXT = 1 << 8,
aef9ec39
RD
1700 SRP_OPT_ALL = (SRP_OPT_ID_EXT |
1701 SRP_OPT_IOC_GUID |
1702 SRP_OPT_DGID |
1703 SRP_OPT_PKEY |
1704 SRP_OPT_SERVICE_ID),
1705};
1706
a447c093 1707static const match_table_t srp_opt_tokens = {
52fb2b50
VP
1708 { SRP_OPT_ID_EXT, "id_ext=%s" },
1709 { SRP_OPT_IOC_GUID, "ioc_guid=%s" },
1710 { SRP_OPT_DGID, "dgid=%s" },
1711 { SRP_OPT_PKEY, "pkey=%x" },
1712 { SRP_OPT_SERVICE_ID, "service_id=%s" },
1713 { SRP_OPT_MAX_SECT, "max_sect=%d" },
1714 { SRP_OPT_MAX_CMD_PER_LUN, "max_cmd_per_lun=%d" },
0c0450db 1715 { SRP_OPT_IO_CLASS, "io_class=%x" },
01cb9bcb 1716 { SRP_OPT_INITIATOR_EXT, "initiator_ext=%s" },
52fb2b50 1717 { SRP_OPT_ERR, NULL }
aef9ec39
RD
1718};
1719
1720static int srp_parse_options(const char *buf, struct srp_target_port *target)
1721{
1722 char *options, *sep_opt;
1723 char *p;
1724 char dgid[3];
1725 substring_t args[MAX_OPT_ARGS];
1726 int opt_mask = 0;
1727 int token;
1728 int ret = -EINVAL;
1729 int i;
1730
1731 options = kstrdup(buf, GFP_KERNEL);
1732 if (!options)
1733 return -ENOMEM;
1734
1735 sep_opt = options;
1736 while ((p = strsep(&sep_opt, ",")) != NULL) {
1737 if (!*p)
1738 continue;
1739
1740 token = match_token(p, srp_opt_tokens, args);
1741 opt_mask |= token;
1742
1743 switch (token) {
1744 case SRP_OPT_ID_EXT:
1745 p = match_strdup(args);
a20f3a6d
IR
1746 if (!p) {
1747 ret = -ENOMEM;
1748 goto out;
1749 }
aef9ec39
RD
1750 target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
1751 kfree(p);
1752 break;
1753
1754 case SRP_OPT_IOC_GUID:
1755 p = match_strdup(args);
a20f3a6d
IR
1756 if (!p) {
1757 ret = -ENOMEM;
1758 goto out;
1759 }
aef9ec39
RD
1760 target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16));
1761 kfree(p);
1762 break;
1763
1764 case SRP_OPT_DGID:
1765 p = match_strdup(args);
a20f3a6d
IR
1766 if (!p) {
1767 ret = -ENOMEM;
1768 goto out;
1769 }
aef9ec39
RD
1770 if (strlen(p) != 32) {
1771 printk(KERN_WARNING PFX "bad dest GID parameter '%s'\n", p);
ce1823f0 1772 kfree(p);
aef9ec39
RD
1773 goto out;
1774 }
1775
1776 for (i = 0; i < 16; ++i) {
1777 strlcpy(dgid, p + i * 2, 3);
1778 target->path.dgid.raw[i] = simple_strtoul(dgid, NULL, 16);
1779 }
bf17c1c7 1780 kfree(p);
3633b3d0 1781 memcpy(target->orig_dgid, target->path.dgid.raw, 16);
aef9ec39
RD
1782 break;
1783
1784 case SRP_OPT_PKEY:
1785 if (match_hex(args, &token)) {
1786 printk(KERN_WARNING PFX "bad P_Key parameter '%s'\n", p);
1787 goto out;
1788 }
1789 target->path.pkey = cpu_to_be16(token);
1790 break;
1791
1792 case SRP_OPT_SERVICE_ID:
1793 p = match_strdup(args);
a20f3a6d
IR
1794 if (!p) {
1795 ret = -ENOMEM;
1796 goto out;
1797 }
aef9ec39 1798 target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16));
247e020e 1799 target->path.service_id = target->service_id;
aef9ec39
RD
1800 kfree(p);
1801 break;
1802
1803 case SRP_OPT_MAX_SECT:
1804 if (match_int(args, &token)) {
1805 printk(KERN_WARNING PFX "bad max sect parameter '%s'\n", p);
1806 goto out;
1807 }
1808 target->scsi_host->max_sectors = token;
1809 break;
1810
52fb2b50
VP
1811 case SRP_OPT_MAX_CMD_PER_LUN:
1812 if (match_int(args, &token)) {
1813 printk(KERN_WARNING PFX "bad max cmd_per_lun parameter '%s'\n", p);
1814 goto out;
1815 }
dd5e6e38 1816 target->scsi_host->cmd_per_lun = min(token, SRP_CMD_SQ_SIZE);
52fb2b50
VP
1817 break;
1818
0c0450db
R
1819 case SRP_OPT_IO_CLASS:
1820 if (match_hex(args, &token)) {
1821 printk(KERN_WARNING PFX "bad IO class parameter '%s' \n", p);
1822 goto out;
1823 }
1824 if (token != SRP_REV10_IB_IO_CLASS &&
1825 token != SRP_REV16A_IB_IO_CLASS) {
1826 printk(KERN_WARNING PFX "unknown IO class parameter value"
1827 " %x specified (use %x or %x).\n",
1828 token, SRP_REV10_IB_IO_CLASS, SRP_REV16A_IB_IO_CLASS);
1829 goto out;
1830 }
1831 target->io_class = token;
1832 break;
1833
01cb9bcb
IR
1834 case SRP_OPT_INITIATOR_EXT:
1835 p = match_strdup(args);
a20f3a6d
IR
1836 if (!p) {
1837 ret = -ENOMEM;
1838 goto out;
1839 }
01cb9bcb
IR
1840 target->initiator_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
1841 kfree(p);
1842 break;
1843
aef9ec39
RD
1844 default:
1845 printk(KERN_WARNING PFX "unknown parameter or missing value "
1846 "'%s' in target creation request\n", p);
1847 goto out;
1848 }
1849 }
1850
1851 if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL)
1852 ret = 0;
1853 else
1854 for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i)
1855 if ((srp_opt_tokens[i].token & SRP_OPT_ALL) &&
1856 !(srp_opt_tokens[i].token & opt_mask))
1857 printk(KERN_WARNING PFX "target creation request is "
1858 "missing parameter '%s'\n",
1859 srp_opt_tokens[i].pattern);
1860
1861out:
1862 kfree(options);
1863 return ret;
1864}
1865
ee959b00
TJ
1866static ssize_t srp_create_target(struct device *dev,
1867 struct device_attribute *attr,
aef9ec39
RD
1868 const char *buf, size_t count)
1869{
1870 struct srp_host *host =
ee959b00 1871 container_of(dev, struct srp_host, dev);
aef9ec39
RD
1872 struct Scsi_Host *target_host;
1873 struct srp_target_port *target;
1874 int ret;
1875 int i;
1876
1877 target_host = scsi_host_alloc(&srp_template,
1878 sizeof (struct srp_target_port));
1879 if (!target_host)
1880 return -ENOMEM;
1881
3236822b 1882 target_host->transportt = ib_srp_transport_template;
3c8edf0e
AR
1883 target_host->max_lun = SRP_MAX_LUN;
1884 target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
5f068992 1885
aef9ec39 1886 target = host_to_target(target_host);
aef9ec39 1887
0c0450db 1888 target->io_class = SRP_REV16A_IB_IO_CLASS;
aef9ec39
RD
1889 target->scsi_host = target_host;
1890 target->srp_host = host;
1891
d945e1df 1892 INIT_LIST_HEAD(&target->free_reqs);
aef9ec39 1893 INIT_LIST_HEAD(&target->req_queue);
dd5e6e38 1894 for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
d945e1df
RD
1895 target->req_ring[i].index = i;
1896 list_add_tail(&target->req_ring[i].list, &target->free_reqs);
1897 }
aef9ec39
RD
1898
1899 ret = srp_parse_options(buf, target);
1900 if (ret)
1901 goto err;
1902
969a60f9 1903 ib_query_gid(host->srp_dev->dev, host->port, 0, &target->path.sgid);
aef9ec39 1904
7aa54bd7
DD
1905 shost_printk(KERN_DEBUG, target->scsi_host, PFX
1906 "new target: id_ext %016llx ioc_guid %016llx pkey %04x "
5b095d98 1907 "service_id %016llx dgid %pI6\n",
aef9ec39
RD
1908 (unsigned long long) be64_to_cpu(target->id_ext),
1909 (unsigned long long) be64_to_cpu(target->ioc_guid),
1910 be16_to_cpu(target->path.pkey),
1911 (unsigned long long) be64_to_cpu(target->service_id),
8867cd7c 1912 target->path.dgid.raw);
aef9ec39
RD
1913
1914 ret = srp_create_target_ib(target);
1915 if (ret)
1916 goto err;
1917
9fe4bcf4
DD
1918 ret = srp_new_cm_id(target);
1919 if (ret)
aef9ec39 1920 goto err_free;
aef9ec39 1921
1033ff67 1922 target->qp_in_error = 0;
aef9ec39
RD
1923 ret = srp_connect_target(target);
1924 if (ret) {
7aa54bd7
DD
1925 shost_printk(KERN_ERR, target->scsi_host,
1926 PFX "Connection failed\n");
aef9ec39
RD
1927 goto err_cm_id;
1928 }
1929
1930 ret = srp_add_target(host, target);
1931 if (ret)
1932 goto err_disconnect;
1933
1934 return count;
1935
1936err_disconnect:
1937 srp_disconnect_target(target);
1938
1939err_cm_id:
1940 ib_destroy_cm_id(target->cm_id);
1941
1942err_free:
1943 srp_free_target_ib(target);
1944
1945err:
1946 scsi_host_put(target_host);
1947
1948 return ret;
1949}
1950
ee959b00 1951static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target);
aef9ec39 1952
ee959b00
TJ
1953static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
1954 char *buf)
aef9ec39 1955{
ee959b00 1956 struct srp_host *host = container_of(dev, struct srp_host, dev);
aef9ec39 1957
05321937 1958 return sprintf(buf, "%s\n", host->srp_dev->dev->name);
aef9ec39
RD
1959}
1960
ee959b00 1961static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
aef9ec39 1962
ee959b00
TJ
1963static ssize_t show_port(struct device *dev, struct device_attribute *attr,
1964 char *buf)
aef9ec39 1965{
ee959b00 1966 struct srp_host *host = container_of(dev, struct srp_host, dev);
aef9ec39
RD
1967
1968 return sprintf(buf, "%d\n", host->port);
1969}
1970
ee959b00 1971static DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
aef9ec39 1972
f5358a17 1973static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
aef9ec39
RD
1974{
1975 struct srp_host *host;
1976
1977 host = kzalloc(sizeof *host, GFP_KERNEL);
1978 if (!host)
1979 return NULL;
1980
1981 INIT_LIST_HEAD(&host->target_list);
b3589fd4 1982 spin_lock_init(&host->target_lock);
aef9ec39 1983 init_completion(&host->released);
05321937 1984 host->srp_dev = device;
aef9ec39
RD
1985 host->port = port;
1986
ee959b00
TJ
1987 host->dev.class = &srp_class;
1988 host->dev.parent = device->dev->dma_device;
d927e38c 1989 dev_set_name(&host->dev, "srp-%s-%d", device->dev->name, port);
aef9ec39 1990
ee959b00 1991 if (device_register(&host->dev))
f5358a17 1992 goto free_host;
ee959b00 1993 if (device_create_file(&host->dev, &dev_attr_add_target))
aef9ec39 1994 goto err_class;
ee959b00 1995 if (device_create_file(&host->dev, &dev_attr_ibdev))
aef9ec39 1996 goto err_class;
ee959b00 1997 if (device_create_file(&host->dev, &dev_attr_port))
aef9ec39
RD
1998 goto err_class;
1999
2000 return host;
2001
2002err_class:
ee959b00 2003 device_unregister(&host->dev);
aef9ec39 2004
f5358a17 2005free_host:
aef9ec39
RD
2006 kfree(host);
2007
2008 return NULL;
2009}
2010
2011static void srp_add_one(struct ib_device *device)
2012{
f5358a17
RD
2013 struct srp_device *srp_dev;
2014 struct ib_device_attr *dev_attr;
2015 struct ib_fmr_pool_param fmr_param;
aef9ec39 2016 struct srp_host *host;
aef9ec39
RD
2017 int s, e, p;
2018
f5358a17
RD
2019 dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL);
2020 if (!dev_attr)
cf311cd4 2021 return;
aef9ec39 2022
f5358a17
RD
2023 if (ib_query_device(device, dev_attr)) {
2024 printk(KERN_WARNING PFX "Query device failed for %s\n",
2025 device->name);
2026 goto free_attr;
2027 }
2028
2029 srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL);
2030 if (!srp_dev)
2031 goto free_attr;
2032
2033 /*
2034 * Use the smallest page size supported by the HCA, down to a
2035 * minimum of 512 bytes (which is the smallest sector that a
2036 * SCSI command will ever carry).
2037 */
2038 srp_dev->fmr_page_shift = max(9, ffs(dev_attr->page_size_cap) - 1);
2039 srp_dev->fmr_page_size = 1 << srp_dev->fmr_page_shift;
bf628dc2 2040 srp_dev->fmr_page_mask = ~((u64) srp_dev->fmr_page_size - 1);
f5358a17
RD
2041
2042 INIT_LIST_HEAD(&srp_dev->dev_list);
2043
2044 srp_dev->dev = device;
2045 srp_dev->pd = ib_alloc_pd(device);
2046 if (IS_ERR(srp_dev->pd))
2047 goto free_dev;
2048
2049 srp_dev->mr = ib_get_dma_mr(srp_dev->pd,
2050 IB_ACCESS_LOCAL_WRITE |
2051 IB_ACCESS_REMOTE_READ |
2052 IB_ACCESS_REMOTE_WRITE);
2053 if (IS_ERR(srp_dev->mr))
2054 goto err_pd;
2055
2056 memset(&fmr_param, 0, sizeof fmr_param);
2057 fmr_param.pool_size = SRP_FMR_POOL_SIZE;
2058 fmr_param.dirty_watermark = SRP_FMR_DIRTY_SIZE;
2059 fmr_param.cache = 1;
2060 fmr_param.max_pages_per_fmr = SRP_FMR_SIZE;
2061 fmr_param.page_shift = srp_dev->fmr_page_shift;
2062 fmr_param.access = (IB_ACCESS_LOCAL_WRITE |
2063 IB_ACCESS_REMOTE_WRITE |
2064 IB_ACCESS_REMOTE_READ);
2065
2066 srp_dev->fmr_pool = ib_create_fmr_pool(srp_dev->pd, &fmr_param);
2067 if (IS_ERR(srp_dev->fmr_pool))
2068 srp_dev->fmr_pool = NULL;
aef9ec39 2069
07ebafba 2070 if (device->node_type == RDMA_NODE_IB_SWITCH) {
aef9ec39
RD
2071 s = 0;
2072 e = 0;
2073 } else {
2074 s = 1;
2075 e = device->phys_port_cnt;
2076 }
2077
2078 for (p = s; p <= e; ++p) {
f5358a17 2079 host = srp_add_port(srp_dev, p);
aef9ec39 2080 if (host)
f5358a17 2081 list_add_tail(&host->list, &srp_dev->dev_list);
aef9ec39
RD
2082 }
2083
f5358a17
RD
2084 ib_set_client_data(device, &srp_client, srp_dev);
2085
2086 goto free_attr;
2087
2088err_pd:
2089 ib_dealloc_pd(srp_dev->pd);
2090
2091free_dev:
2092 kfree(srp_dev);
2093
2094free_attr:
2095 kfree(dev_attr);
aef9ec39
RD
2096}
2097
2098static void srp_remove_one(struct ib_device *device)
2099{
f5358a17 2100 struct srp_device *srp_dev;
aef9ec39
RD
2101 struct srp_host *host, *tmp_host;
2102 LIST_HEAD(target_list);
2103 struct srp_target_port *target, *tmp_target;
aef9ec39 2104
f5358a17 2105 srp_dev = ib_get_client_data(device, &srp_client);
aef9ec39 2106
f5358a17 2107 list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
ee959b00 2108 device_unregister(&host->dev);
aef9ec39
RD
2109 /*
2110 * Wait for the sysfs entry to go away, so that no new
2111 * target ports can be created.
2112 */
2113 wait_for_completion(&host->released);
2114
2115 /*
2116 * Mark all target ports as removed, so we stop queueing
2117 * commands and don't try to reconnect.
2118 */
b3589fd4 2119 spin_lock(&host->target_lock);
549c5fc2 2120 list_for_each_entry(target, &host->target_list, list) {
0c5b3952
IR
2121 spin_lock_irq(target->scsi_host->host_lock);
2122 target->state = SRP_TARGET_REMOVED;
2123 spin_unlock_irq(target->scsi_host->host_lock);
aef9ec39 2124 }
b3589fd4 2125 spin_unlock(&host->target_lock);
aef9ec39
RD
2126
2127 /*
2128 * Wait for any reconnection tasks that may have
2129 * started before we marked our target ports as
2130 * removed, and any target port removal tasks.
2131 */
2132 flush_scheduled_work();
2133
2134 list_for_each_entry_safe(target, tmp_target,
2135 &host->target_list, list) {
b0e47c8b 2136 srp_remove_host(target->scsi_host);
ad696989 2137 scsi_remove_host(target->scsi_host);
aef9ec39
RD
2138 srp_disconnect_target(target);
2139 ib_destroy_cm_id(target->cm_id);
2140 srp_free_target_ib(target);
2141 scsi_host_put(target->scsi_host);
2142 }
2143
aef9ec39
RD
2144 kfree(host);
2145 }
2146
f5358a17
RD
2147 if (srp_dev->fmr_pool)
2148 ib_destroy_fmr_pool(srp_dev->fmr_pool);
2149 ib_dereg_mr(srp_dev->mr);
2150 ib_dealloc_pd(srp_dev->pd);
2151
2152 kfree(srp_dev);
aef9ec39
RD
2153}
2154
3236822b
FT
2155static struct srp_function_template ib_srp_transport_functions = {
2156};
2157
aef9ec39
RD
2158static int __init srp_init_module(void)
2159{
2160 int ret;
2161
dd5e6e38
BVA
2162 BUILD_BUG_ON_NOT_POWER_OF_2(SRP_SQ_SIZE);
2163 BUILD_BUG_ON_NOT_POWER_OF_2(SRP_RQ_SIZE);
2164
1e89a194
DD
2165 if (srp_sg_tablesize > 255) {
2166 printk(KERN_WARNING PFX "Clamping srp_sg_tablesize to 255\n");
2167 srp_sg_tablesize = 255;
2168 }
2169
3236822b
FT
2170 ib_srp_transport_template =
2171 srp_attach_transport(&ib_srp_transport_functions);
2172 if (!ib_srp_transport_template)
2173 return -ENOMEM;
2174
74b0a15b
VP
2175 srp_template.sg_tablesize = srp_sg_tablesize;
2176 srp_max_iu_len = (sizeof (struct srp_cmd) +
2177 sizeof (struct srp_indirect_buf) +
2178 srp_sg_tablesize * 16);
2179
aef9ec39
RD
2180 ret = class_register(&srp_class);
2181 if (ret) {
2182 printk(KERN_ERR PFX "couldn't register class infiniband_srp\n");
3236822b 2183 srp_release_transport(ib_srp_transport_template);
aef9ec39
RD
2184 return ret;
2185 }
2186
c1a0b23b
MT
2187 ib_sa_register_client(&srp_sa_client);
2188
aef9ec39
RD
2189 ret = ib_register_client(&srp_client);
2190 if (ret) {
2191 printk(KERN_ERR PFX "couldn't register IB client\n");
3236822b 2192 srp_release_transport(ib_srp_transport_template);
c1a0b23b 2193 ib_sa_unregister_client(&srp_sa_client);
aef9ec39
RD
2194 class_unregister(&srp_class);
2195 return ret;
2196 }
2197
2198 return 0;
2199}
2200
2201static void __exit srp_cleanup_module(void)
2202{
2203 ib_unregister_client(&srp_client);
c1a0b23b 2204 ib_sa_unregister_client(&srp_sa_client);
aef9ec39 2205 class_unregister(&srp_class);
3236822b 2206 srp_release_transport(ib_srp_transport_template);
aef9ec39
RD
2207}
2208
2209module_init(srp_init_module);
2210module_exit(srp_cleanup_module);