[PATCH] pcmcia: new suspend core
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / scsi / iscsi_tcp.c
1 /*
2 * iSCSI Initiator over TCP/IP Data-Path
3 *
4 * Copyright (C) 2004 Dmitry Yusupov
5 * Copyright (C) 2004 Alex Aizman
6 * Copyright (C) 2005 Mike Christie
7 * maintained by open-iscsi@googlegroups.com
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published
11 * by the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * See the file COPYING included with this distribution for more details.
20 *
21 * Credits:
22 * Christoph Hellwig
23 * FUJITA Tomonori
24 * Arne Redlich
25 * Zhenyu Wang
26 */
27
28 #include <linux/types.h>
29 #include <linux/list.h>
30 #include <linux/inet.h>
31 #include <linux/blkdev.h>
32 #include <linux/crypto.h>
33 #include <linux/delay.h>
34 #include <linux/kfifo.h>
35 #include <linux/scatterlist.h>
36 #include <net/tcp.h>
37 #include <scsi/scsi_cmnd.h>
38 #include <scsi/scsi_device.h>
39 #include <scsi/scsi_eh.h>
40 #include <scsi/scsi_request.h>
41 #include <scsi/scsi_tcq.h>
42 #include <scsi/scsi_host.h>
43 #include <scsi/scsi.h>
44 #include <scsi/scsi_transport_iscsi.h>
45
46 #include "iscsi_tcp.h"
47
48 MODULE_AUTHOR("Dmitry Yusupov <dmitry_yus@yahoo.com>, "
49 "Alex Aizman <itn780@yahoo.com>");
50 MODULE_DESCRIPTION("iSCSI/TCP data-path");
51 MODULE_LICENSE("GPL");
52 MODULE_VERSION("0:4.445");
53 /* #define DEBUG_TCP */
54 /* #define DEBUG_SCSI */
55 #define DEBUG_ASSERT
56
57 #ifdef DEBUG_TCP
58 #define debug_tcp(fmt...) printk(KERN_DEBUG "tcp: " fmt)
59 #else
60 #define debug_tcp(fmt...)
61 #endif
62
63 #ifdef DEBUG_SCSI
64 #define debug_scsi(fmt...) printk(KERN_DEBUG "scsi: " fmt)
65 #else
66 #define debug_scsi(fmt...)
67 #endif
68
69 #ifndef DEBUG_ASSERT
70 #ifdef BUG_ON
71 #undef BUG_ON
72 #endif
73 #define BUG_ON(expr)
74 #endif
75
76 #define INVALID_SN_DELTA 0xffff
77
78 static unsigned int iscsi_max_lun = 512;
79 module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO);
80
81 /* global data */
82 static kmem_cache_t *taskcache;
83
84 static inline void
85 iscsi_buf_init_virt(struct iscsi_buf *ibuf, char *vbuf, int size)
86 {
87 sg_init_one(&ibuf->sg, (u8 *)vbuf, size);
88 ibuf->sent = 0;
89 }
90
91 static inline void
92 iscsi_buf_init_iov(struct iscsi_buf *ibuf, char *vbuf, int size)
93 {
94 ibuf->sg.page = (void*)vbuf;
95 ibuf->sg.offset = (unsigned int)-1;
96 ibuf->sg.length = size;
97 ibuf->sent = 0;
98 }
99
100 static inline void*
101 iscsi_buf_iov_base(struct iscsi_buf *ibuf)
102 {
103 return (char*)ibuf->sg.page + ibuf->sent;
104 }
105
106 static inline void
107 iscsi_buf_init_sg(struct iscsi_buf *ibuf, struct scatterlist *sg)
108 {
109 /*
110 * Fastpath: sg element fits into single page
111 */
112 if (sg->length + sg->offset <= PAGE_SIZE && page_count(sg->page) >= 2) {
113 ibuf->sg.page = sg->page;
114 ibuf->sg.offset = sg->offset;
115 ibuf->sg.length = sg->length;
116 } else
117 iscsi_buf_init_iov(ibuf, page_address(sg->page), sg->length);
118 ibuf->sent = 0;
119 }
120
121 static inline int
122 iscsi_buf_left(struct iscsi_buf *ibuf)
123 {
124 int rc;
125
126 rc = ibuf->sg.length - ibuf->sent;
127 BUG_ON(rc < 0);
128 return rc;
129 }
130
131 static inline void
132 iscsi_hdr_digest(struct iscsi_conn *conn, struct iscsi_buf *buf,
133 u8* crc)
134 {
135 crypto_digest_digest(conn->tx_tfm, &buf->sg, 1, crc);
136 buf->sg.length += sizeof(uint32_t);
137 }
138
139 static void
140 iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err)
141 {
142 struct iscsi_session *session = conn->session;
143 unsigned long flags;
144
145 spin_lock_irqsave(&session->lock, flags);
146 if (session->conn_cnt == 1 || session->leadconn == conn)
147 session->state = ISCSI_STATE_FAILED;
148 spin_unlock_irqrestore(&session->lock, flags);
149 set_bit(SUSPEND_BIT, &conn->suspend_tx);
150 set_bit(SUSPEND_BIT, &conn->suspend_rx);
151 iscsi_conn_error(iscsi_handle(conn), err);
152 }
153
154 static inline int
155 iscsi_check_assign_cmdsn(struct iscsi_session *session, struct iscsi_nopin *hdr)
156 {
157 uint32_t max_cmdsn = be32_to_cpu(hdr->max_cmdsn);
158 uint32_t exp_cmdsn = be32_to_cpu(hdr->exp_cmdsn);
159
160 if (max_cmdsn < exp_cmdsn -1 &&
161 max_cmdsn > exp_cmdsn - INVALID_SN_DELTA)
162 return ISCSI_ERR_MAX_CMDSN;
163 if (max_cmdsn > session->max_cmdsn ||
164 max_cmdsn < session->max_cmdsn - INVALID_SN_DELTA)
165 session->max_cmdsn = max_cmdsn;
166 if (exp_cmdsn > session->exp_cmdsn ||
167 exp_cmdsn < session->exp_cmdsn - INVALID_SN_DELTA)
168 session->exp_cmdsn = exp_cmdsn;
169
170 return 0;
171 }
172
173 static inline int
174 iscsi_hdr_extract(struct iscsi_conn *conn)
175 {
176 struct sk_buff *skb = conn->in.skb;
177
178 if (conn->in.copy >= conn->hdr_size &&
179 conn->in_progress == IN_PROGRESS_WAIT_HEADER) {
180 /*
181 * Zero-copy PDU Header: using connection context
182 * to store header pointer.
183 */
184 if (skb_shinfo(skb)->frag_list == NULL &&
185 !skb_shinfo(skb)->nr_frags)
186 conn->in.hdr = (struct iscsi_hdr *)
187 ((char*)skb->data + conn->in.offset);
188 else {
189 /* ignoring return code since we checked
190 * in.copy before */
191 skb_copy_bits(skb, conn->in.offset,
192 &conn->hdr, conn->hdr_size);
193 conn->in.hdr = &conn->hdr;
194 }
195 conn->in.offset += conn->hdr_size;
196 conn->in.copy -= conn->hdr_size;
197 } else {
198 int hdr_remains;
199 int copylen;
200
201 /*
202 * PDU header scattered across SKB's,
203 * copying it... This'll happen quite rarely.
204 */
205
206 if (conn->in_progress == IN_PROGRESS_WAIT_HEADER)
207 conn->in.hdr_offset = 0;
208
209 hdr_remains = conn->hdr_size - conn->in.hdr_offset;
210 BUG_ON(hdr_remains <= 0);
211
212 copylen = min(conn->in.copy, hdr_remains);
213 skb_copy_bits(skb, conn->in.offset,
214 (char*)&conn->hdr + conn->in.hdr_offset, copylen);
215
216 debug_tcp("PDU gather offset %d bytes %d in.offset %d "
217 "in.copy %d\n", conn->in.hdr_offset, copylen,
218 conn->in.offset, conn->in.copy);
219
220 conn->in.offset += copylen;
221 conn->in.copy -= copylen;
222 if (copylen < hdr_remains) {
223 conn->in_progress = IN_PROGRESS_HEADER_GATHER;
224 conn->in.hdr_offset += copylen;
225 return -EAGAIN;
226 }
227 conn->in.hdr = &conn->hdr;
228 conn->discontiguous_hdr_cnt++;
229 conn->in_progress = IN_PROGRESS_WAIT_HEADER;
230 }
231
232 return 0;
233 }
234
235 static inline void
236 iscsi_ctask_cleanup(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
237 {
238 struct scsi_cmnd *sc = ctask->sc;
239 struct iscsi_session *session = conn->session;
240
241 spin_lock(&session->lock);
242 if (unlikely(!sc)) {
243 spin_unlock(&session->lock);
244 return;
245 }
246 if (sc->sc_data_direction == DMA_TO_DEVICE) {
247 struct iscsi_data_task *dtask, *n;
248 /* WRITE: cleanup Data-Out's if any */
249 spin_lock(&conn->lock);
250 list_for_each_entry_safe(dtask, n, &ctask->dataqueue, item) {
251 list_del(&dtask->item);
252 mempool_free(dtask, ctask->datapool);
253 }
254 spin_unlock(&conn->lock);
255 }
256 ctask->xmstate = XMSTATE_IDLE;
257 ctask->r2t = NULL;
258 ctask->sc = NULL;
259 __kfifo_put(session->cmdpool.queue, (void*)&ctask, sizeof(void*));
260 spin_unlock(&session->lock);
261 }
262
263 /**
264 * iscsi_cmd_rsp - SCSI Command Response processing
265 * @conn: iscsi connection
266 * @ctask: scsi command task
267 **/
268 static int
269 iscsi_cmd_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
270 {
271 int rc;
272 struct iscsi_cmd_rsp *rhdr = (struct iscsi_cmd_rsp *)conn->in.hdr;
273 struct iscsi_session *session = conn->session;
274 struct scsi_cmnd *sc = ctask->sc;
275
276 rc = iscsi_check_assign_cmdsn(session, (struct iscsi_nopin*)rhdr);
277 if (rc) {
278 sc->result = (DID_ERROR << 16);
279 goto out;
280 }
281
282 conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1;
283
284 sc->result = (DID_OK << 16) | rhdr->cmd_status;
285
286 if (rhdr->response != ISCSI_STATUS_CMD_COMPLETED) {
287 sc->result = (DID_ERROR << 16);
288 goto out;
289 }
290
291 if (rhdr->cmd_status == SAM_STAT_CHECK_CONDITION && conn->senselen) {
292 int sensecopy = min(conn->senselen, SCSI_SENSE_BUFFERSIZE);
293
294 memcpy(sc->sense_buffer, conn->data + 2, sensecopy);
295 debug_scsi("copied %d bytes of sense\n", sensecopy);
296 }
297
298 if (sc->sc_data_direction == DMA_TO_DEVICE)
299 goto out;
300
301 if (rhdr->flags & ISCSI_FLAG_CMD_UNDERFLOW) {
302 int res_count = be32_to_cpu(rhdr->residual_count);
303
304 if (res_count > 0 && res_count <= sc->request_bufflen)
305 sc->resid = res_count;
306 else
307 sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
308 } else if (rhdr->flags & ISCSI_FLAG_CMD_BIDI_UNDERFLOW)
309 sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
310 else if (rhdr->flags & ISCSI_FLAG_CMD_OVERFLOW)
311 sc->resid = be32_to_cpu(rhdr->residual_count);
312
313 out:
314 debug_scsi("done [sc %lx res %d itt 0x%x]\n",
315 (long)sc, sc->result, ctask->itt);
316 conn->scsirsp_pdus_cnt++;
317 iscsi_ctask_cleanup(conn, ctask);
318 sc->scsi_done(sc);
319 return rc;
320 }
321
322 /**
323 * iscsi_data_rsp - SCSI Data-In Response processing
324 * @conn: iscsi connection
325 * @ctask: scsi command task
326 **/
327 static int
328 iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
329 {
330 int rc;
331 struct iscsi_data_rsp *rhdr = (struct iscsi_data_rsp *)conn->in.hdr;
332 struct iscsi_session *session = conn->session;
333 int datasn = be32_to_cpu(rhdr->datasn);
334
335 rc = iscsi_check_assign_cmdsn(session, (struct iscsi_nopin*)rhdr);
336 if (rc)
337 return rc;
338 /*
339 * setup Data-In byte counter (gets decremented..)
340 */
341 ctask->data_count = conn->in.datalen;
342
343 if (conn->in.datalen == 0)
344 return 0;
345
346 if (ctask->datasn != datasn)
347 return ISCSI_ERR_DATASN;
348
349 ctask->datasn++;
350
351 ctask->data_offset = be32_to_cpu(rhdr->offset);
352 if (ctask->data_offset + conn->in.datalen > ctask->total_length)
353 return ISCSI_ERR_DATA_OFFSET;
354
355 if (rhdr->flags & ISCSI_FLAG_DATA_STATUS) {
356 struct scsi_cmnd *sc = ctask->sc;
357
358 conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1;
359 if (rhdr->flags & ISCSI_FLAG_CMD_UNDERFLOW) {
360 int res_count = be32_to_cpu(rhdr->residual_count);
361
362 if (res_count > 0 &&
363 res_count <= sc->request_bufflen) {
364 sc->resid = res_count;
365 sc->result = (DID_OK << 16) | rhdr->cmd_status;
366 } else
367 sc->result = (DID_BAD_TARGET << 16) |
368 rhdr->cmd_status;
369 } else if (rhdr->flags & ISCSI_FLAG_CMD_BIDI_UNDERFLOW)
370 sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
371 else if (rhdr->flags & ISCSI_FLAG_CMD_OVERFLOW) {
372 sc->resid = be32_to_cpu(rhdr->residual_count);
373 sc->result = (DID_OK << 16) | rhdr->cmd_status;
374 } else
375 sc->result = (DID_OK << 16) | rhdr->cmd_status;
376 }
377
378 conn->datain_pdus_cnt++;
379 return 0;
380 }
381
382 /**
383 * iscsi_solicit_data_init - initialize first Data-Out
384 * @conn: iscsi connection
385 * @ctask: scsi command task
386 * @r2t: R2T info
387 *
388 * Notes:
389 * Initialize first Data-Out within this R2T sequence and finds
390 * proper data_offset within this SCSI command.
391 *
392 * This function is called with connection lock taken.
393 **/
394 static void
395 iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
396 struct iscsi_r2t_info *r2t)
397 {
398 struct iscsi_data *hdr;
399 struct iscsi_data_task *dtask;
400 struct scsi_cmnd *sc = ctask->sc;
401
402 dtask = mempool_alloc(ctask->datapool, GFP_ATOMIC);
403 BUG_ON(!dtask);
404 hdr = &dtask->hdr;
405 memset(hdr, 0, sizeof(struct iscsi_data));
406 hdr->ttt = r2t->ttt;
407 hdr->datasn = cpu_to_be32(r2t->solicit_datasn);
408 r2t->solicit_datasn++;
409 hdr->opcode = ISCSI_OP_SCSI_DATA_OUT;
410 memcpy(hdr->lun, ctask->hdr.lun, sizeof(hdr->lun));
411 hdr->itt = ctask->hdr.itt;
412 hdr->exp_statsn = r2t->exp_statsn;
413 hdr->offset = cpu_to_be32(r2t->data_offset);
414 if (r2t->data_length > conn->max_xmit_dlength) {
415 hton24(hdr->dlength, conn->max_xmit_dlength);
416 r2t->data_count = conn->max_xmit_dlength;
417 hdr->flags = 0;
418 } else {
419 hton24(hdr->dlength, r2t->data_length);
420 r2t->data_count = r2t->data_length;
421 hdr->flags = ISCSI_FLAG_CMD_FINAL;
422 }
423 conn->dataout_pdus_cnt++;
424
425 r2t->sent = 0;
426
427 iscsi_buf_init_virt(&r2t->headbuf, (char*)hdr,
428 sizeof(struct iscsi_hdr));
429
430 r2t->dtask = dtask;
431
432 if (sc->use_sg) {
433 int i, sg_count = 0;
434 struct scatterlist *sg = sc->request_buffer;
435
436 r2t->sg = NULL;
437 for (i = 0; i < sc->use_sg; i++, sg += 1) {
438 /* FIXME: prefetch ? */
439 if (sg_count + sg->length > r2t->data_offset) {
440 int page_offset;
441
442 /* sg page found! */
443
444 /* offset within this page */
445 page_offset = r2t->data_offset - sg_count;
446
447 /* fill in this buffer */
448 iscsi_buf_init_sg(&r2t->sendbuf, sg);
449 r2t->sendbuf.sg.offset += page_offset;
450 r2t->sendbuf.sg.length -= page_offset;
451
452 /* xmit logic will continue with next one */
453 r2t->sg = sg + 1;
454 break;
455 }
456 sg_count += sg->length;
457 }
458 BUG_ON(r2t->sg == NULL);
459 } else
460 iscsi_buf_init_iov(&ctask->sendbuf,
461 (char*)sc->request_buffer + r2t->data_offset,
462 r2t->data_count);
463
464 list_add(&dtask->item, &ctask->dataqueue);
465 }
466
467 /**
468 * iscsi_r2t_rsp - iSCSI R2T Response processing
469 * @conn: iscsi connection
470 * @ctask: scsi command task
471 **/
472 static int
473 iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
474 {
475 struct iscsi_r2t_info *r2t;
476 struct iscsi_session *session = conn->session;
477 struct iscsi_r2t_rsp *rhdr = (struct iscsi_r2t_rsp *)conn->in.hdr;
478 int r2tsn = be32_to_cpu(rhdr->r2tsn);
479 int rc;
480
481 if (conn->in.ahslen)
482 return ISCSI_ERR_AHSLEN;
483
484 if (conn->in.datalen)
485 return ISCSI_ERR_DATALEN;
486
487 if (ctask->exp_r2tsn && ctask->exp_r2tsn != r2tsn)
488 return ISCSI_ERR_R2TSN;
489
490 rc = iscsi_check_assign_cmdsn(session, (struct iscsi_nopin*)rhdr);
491 if (rc)
492 return rc;
493
494 /* FIXME: use R2TSN to detect missing R2T */
495
496 /* fill-in new R2T associated with the task */
497 spin_lock(&session->lock);
498 if (!ctask->sc || ctask->mtask ||
499 session->state != ISCSI_STATE_LOGGED_IN) {
500 printk(KERN_INFO "iscsi_tcp: dropping R2T itt %d in "
501 "recovery...\n", ctask->itt);
502 spin_unlock(&session->lock);
503 return 0;
504 }
505 rc = __kfifo_get(ctask->r2tpool.queue, (void*)&r2t, sizeof(void*));
506 BUG_ON(!rc);
507
508 r2t->exp_statsn = rhdr->statsn;
509 r2t->data_length = be32_to_cpu(rhdr->data_length);
510 if (r2t->data_length == 0 ||
511 r2t->data_length > session->max_burst) {
512 spin_unlock(&session->lock);
513 return ISCSI_ERR_DATALEN;
514 }
515
516 r2t->data_offset = be32_to_cpu(rhdr->data_offset);
517 if (r2t->data_offset + r2t->data_length > ctask->total_length) {
518 spin_unlock(&session->lock);
519 return ISCSI_ERR_DATALEN;
520 }
521
522 r2t->ttt = rhdr->ttt; /* no flip */
523 r2t->solicit_datasn = 0;
524
525 iscsi_solicit_data_init(conn, ctask, r2t);
526
527 ctask->exp_r2tsn = r2tsn + 1;
528 ctask->xmstate |= XMSTATE_SOL_HDR;
529 __kfifo_put(ctask->r2tqueue, (void*)&r2t, sizeof(void*));
530 __kfifo_put(conn->writequeue, (void*)&ctask, sizeof(void*));
531
532 schedule_work(&conn->xmitwork);
533 conn->r2t_pdus_cnt++;
534 spin_unlock(&session->lock);
535
536 return 0;
537 }
538
539 static int
540 iscsi_hdr_recv(struct iscsi_conn *conn)
541 {
542 int rc = 0;
543 struct iscsi_hdr *hdr;
544 struct iscsi_cmd_task *ctask;
545 struct iscsi_session *session = conn->session;
546 uint32_t cdgst, rdgst = 0;
547
548 hdr = conn->in.hdr;
549
550 /* verify PDU length */
551 conn->in.datalen = ntoh24(hdr->dlength);
552 if (conn->in.datalen > conn->max_recv_dlength) {
553 printk(KERN_ERR "iscsi_tcp: datalen %d > %d\n",
554 conn->in.datalen, conn->max_recv_dlength);
555 return ISCSI_ERR_DATALEN;
556 }
557 conn->data_copied = 0;
558
559 /* read AHS */
560 conn->in.ahslen = hdr->hlength * 4;
561 conn->in.offset += conn->in.ahslen;
562 conn->in.copy -= conn->in.ahslen;
563 if (conn->in.copy < 0) {
564 printk(KERN_ERR "iscsi_tcp: can't handle AHS with length "
565 "%d bytes\n", conn->in.ahslen);
566 return ISCSI_ERR_AHSLEN;
567 }
568
569 /* calculate read padding */
570 conn->in.padding = conn->in.datalen & (ISCSI_PAD_LEN-1);
571 if (conn->in.padding) {
572 conn->in.padding = ISCSI_PAD_LEN - conn->in.padding;
573 debug_scsi("read padding %d bytes\n", conn->in.padding);
574 }
575
576 if (conn->hdrdgst_en) {
577 struct scatterlist sg;
578
579 sg_init_one(&sg, (u8 *)hdr,
580 sizeof(struct iscsi_hdr) + conn->in.ahslen);
581 crypto_digest_digest(conn->rx_tfm, &sg, 1, (u8 *)&cdgst);
582 rdgst = *(uint32_t*)((char*)hdr + sizeof(struct iscsi_hdr) +
583 conn->in.ahslen);
584 if (cdgst != rdgst) {
585 printk(KERN_ERR "iscsi_tcp: itt %x: hdrdgst error "
586 "recv 0x%x calc 0x%x\n", conn->in.itt, rdgst,
587 cdgst);
588 return ISCSI_ERR_HDR_DGST;
589 }
590 }
591
592 /* save opcode for later */
593 conn->in.opcode = hdr->opcode & ISCSI_OPCODE_MASK;
594
595 /* verify itt (itt encoding: age+cid+itt) */
596 if (hdr->itt != cpu_to_be32(ISCSI_RESERVED_TAG)) {
597 if ((hdr->itt & AGE_MASK) !=
598 (session->age << AGE_SHIFT)) {
599 printk(KERN_ERR "iscsi_tcp: received itt %x expected "
600 "session age (%x)\n", hdr->itt,
601 session->age & AGE_MASK);
602 return ISCSI_ERR_BAD_ITT;
603 }
604
605 if ((hdr->itt & CID_MASK) != (conn->id << CID_SHIFT)) {
606 printk(KERN_ERR "iscsi_tcp: received itt %x, expected "
607 "CID (%x)\n", hdr->itt, conn->id);
608 return ISCSI_ERR_BAD_ITT;
609 }
610 conn->in.itt = hdr->itt & ITT_MASK;
611 } else
612 conn->in.itt = hdr->itt;
613
614 debug_tcp("opcode 0x%x offset %d copy %d ahslen %d datalen %d\n",
615 hdr->opcode, conn->in.offset, conn->in.copy,
616 conn->in.ahslen, conn->in.datalen);
617
618 if (conn->in.itt < session->cmds_max) {
619 ctask = (struct iscsi_cmd_task *)session->cmds[conn->in.itt];
620
621 if (!ctask->sc) {
622 printk(KERN_INFO "iscsi_tcp: dropping ctask with "
623 "itt 0x%x\n", ctask->itt);
624 conn->in.datalen = 0; /* force drop */
625 return 0;
626 }
627
628 if (ctask->sc->SCp.phase != session->age) {
629 printk(KERN_ERR "iscsi_tcp: ctask's session age %d, "
630 "expected %d\n", ctask->sc->SCp.phase,
631 session->age);
632 return ISCSI_ERR_SESSION_FAILED;
633 }
634
635 conn->in.ctask = ctask;
636
637 debug_scsi("rsp [op 0x%x cid %d sc %lx itt 0x%x len %d]\n",
638 hdr->opcode, conn->id, (long)ctask->sc,
639 ctask->itt, conn->in.datalen);
640
641 switch(conn->in.opcode) {
642 case ISCSI_OP_SCSI_CMD_RSP:
643 BUG_ON((void*)ctask != ctask->sc->SCp.ptr);
644 if (!conn->in.datalen)
645 rc = iscsi_cmd_rsp(conn, ctask);
646 else
647 /*
648 * got sense or response data; copying PDU
649 * Header to the connection's header
650 * placeholder
651 */
652 memcpy(&conn->hdr, hdr,
653 sizeof(struct iscsi_hdr));
654 break;
655 case ISCSI_OP_SCSI_DATA_IN:
656 BUG_ON((void*)ctask != ctask->sc->SCp.ptr);
657 /* save flags for non-exceptional status */
658 conn->in.flags = hdr->flags;
659 /* save cmd_status for sense data */
660 conn->in.cmd_status =
661 ((struct iscsi_data_rsp*)hdr)->cmd_status;
662 rc = iscsi_data_rsp(conn, ctask);
663 break;
664 case ISCSI_OP_R2T:
665 BUG_ON((void*)ctask != ctask->sc->SCp.ptr);
666 if (ctask->sc->sc_data_direction == DMA_TO_DEVICE)
667 rc = iscsi_r2t_rsp(conn, ctask);
668 else
669 rc = ISCSI_ERR_PROTO;
670 break;
671 default:
672 rc = ISCSI_ERR_BAD_OPCODE;
673 break;
674 }
675 } else if (conn->in.itt >= ISCSI_MGMT_ITT_OFFSET &&
676 conn->in.itt < ISCSI_MGMT_ITT_OFFSET +
677 session->mgmtpool_max) {
678 struct iscsi_mgmt_task *mtask = (struct iscsi_mgmt_task *)
679 session->mgmt_cmds[conn->in.itt -
680 ISCSI_MGMT_ITT_OFFSET];
681
682 debug_scsi("immrsp [op 0x%x cid %d itt 0x%x len %d]\n",
683 conn->in.opcode, conn->id, mtask->itt,
684 conn->in.datalen);
685
686 switch(conn->in.opcode) {
687 case ISCSI_OP_LOGIN_RSP:
688 case ISCSI_OP_TEXT_RSP:
689 case ISCSI_OP_LOGOUT_RSP:
690 rc = iscsi_check_assign_cmdsn(session,
691 (struct iscsi_nopin*)hdr);
692 if (rc)
693 break;
694
695 if (!conn->in.datalen) {
696 rc = iscsi_recv_pdu(iscsi_handle(conn), hdr,
697 NULL, 0);
698 if (conn->login_mtask != mtask) {
699 spin_lock(&session->lock);
700 __kfifo_put(session->mgmtpool.queue,
701 (void*)&mtask, sizeof(void*));
702 spin_unlock(&session->lock);
703 }
704 }
705 break;
706 case ISCSI_OP_SCSI_TMFUNC_RSP:
707 rc = iscsi_check_assign_cmdsn(session,
708 (struct iscsi_nopin*)hdr);
709 if (rc)
710 break;
711
712 if (conn->in.datalen || conn->in.ahslen) {
713 rc = ISCSI_ERR_PROTO;
714 break;
715 }
716 conn->tmfrsp_pdus_cnt++;
717 spin_lock(&session->lock);
718 if (conn->tmabort_state == TMABORT_INITIAL) {
719 __kfifo_put(session->mgmtpool.queue,
720 (void*)&mtask, sizeof(void*));
721 conn->tmabort_state =
722 ((struct iscsi_tm_rsp *)hdr)->
723 response == ISCSI_TMF_RSP_COMPLETE ?
724 TMABORT_SUCCESS:TMABORT_FAILED;
725 /* unblock eh_abort() */
726 wake_up(&conn->ehwait);
727 }
728 spin_unlock(&session->lock);
729 break;
730 case ISCSI_OP_NOOP_IN:
731 if (hdr->ttt != ISCSI_RESERVED_TAG) {
732 rc = ISCSI_ERR_PROTO;
733 break;
734 }
735 rc = iscsi_check_assign_cmdsn(session,
736 (struct iscsi_nopin*)hdr);
737 if (rc)
738 break;
739 conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
740
741 if (!conn->in.datalen) {
742 struct iscsi_mgmt_task *mtask;
743
744 rc = iscsi_recv_pdu(iscsi_handle(conn), hdr,
745 NULL, 0);
746 mtask = (struct iscsi_mgmt_task *)
747 session->mgmt_cmds[conn->in.itt -
748 ISCSI_MGMT_ITT_OFFSET];
749 if (conn->login_mtask != mtask) {
750 spin_lock(&session->lock);
751 __kfifo_put(session->mgmtpool.queue,
752 (void*)&mtask, sizeof(void*));
753 spin_unlock(&session->lock);
754 }
755 }
756 break;
757 default:
758 rc = ISCSI_ERR_BAD_OPCODE;
759 break;
760 }
761 } else if (conn->in.itt == ISCSI_RESERVED_TAG) {
762 switch(conn->in.opcode) {
763 case ISCSI_OP_NOOP_IN:
764 if (!conn->in.datalen) {
765 rc = iscsi_check_assign_cmdsn(session,
766 (struct iscsi_nopin*)hdr);
767 if (!rc && hdr->ttt != ISCSI_RESERVED_TAG)
768 rc = iscsi_recv_pdu(iscsi_handle(conn),
769 hdr, NULL, 0);
770 } else
771 rc = ISCSI_ERR_PROTO;
772 break;
773 case ISCSI_OP_REJECT:
774 /* we need sth like iscsi_reject_rsp()*/
775 case ISCSI_OP_ASYNC_EVENT:
776 /* we need sth like iscsi_async_event_rsp() */
777 rc = ISCSI_ERR_BAD_OPCODE;
778 break;
779 default:
780 rc = ISCSI_ERR_BAD_OPCODE;
781 break;
782 }
783 } else
784 rc = ISCSI_ERR_BAD_ITT;
785
786 return rc;
787 }
788
789 /**
790 * iscsi_ctask_copy - copy skb bits to the destanation cmd task
791 * @conn: iscsi connection
792 * @ctask: scsi command task
793 * @buf: buffer to copy to
794 * @buf_size: size of buffer
795 * @offset: offset within the buffer
796 *
797 * Notes:
798 * The function calls skb_copy_bits() and updates per-connection and
799 * per-cmd byte counters.
800 *
801 * Read counters (in bytes):
802 *
803 * conn->in.offset offset within in progress SKB
804 * conn->in.copy left to copy from in progress SKB
805 * including padding
806 * conn->in.copied copied already from in progress SKB
807 * conn->data_copied copied already from in progress buffer
808 * ctask->sent total bytes sent up to the MidLayer
809 * ctask->data_count left to copy from in progress Data-In
810 * buf_left left to copy from in progress buffer
811 **/
812 static inline int
813 iscsi_ctask_copy(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
814 void *buf, int buf_size, int offset)
815 {
816 int buf_left = buf_size - (conn->data_copied + offset);
817 int size = min(conn->in.copy, buf_left);
818 int rc;
819
820 size = min(size, ctask->data_count);
821
822 debug_tcp("ctask_copy %d bytes at offset %d copied %d\n",
823 size, conn->in.offset, conn->in.copied);
824
825 BUG_ON(size <= 0);
826 BUG_ON(ctask->sent + size > ctask->total_length);
827
828 rc = skb_copy_bits(conn->in.skb, conn->in.offset,
829 (char*)buf + (offset + conn->data_copied), size);
830 /* must fit into skb->len */
831 BUG_ON(rc);
832
833 conn->in.offset += size;
834 conn->in.copy -= size;
835 conn->in.copied += size;
836 conn->data_copied += size;
837 ctask->sent += size;
838 ctask->data_count -= size;
839
840 BUG_ON(conn->in.copy < 0);
841 BUG_ON(ctask->data_count < 0);
842
843 if (buf_size != (conn->data_copied + offset)) {
844 if (!ctask->data_count) {
845 BUG_ON(buf_size - conn->data_copied < 0);
846 /* done with this PDU */
847 return buf_size - conn->data_copied;
848 }
849 return -EAGAIN;
850 }
851
852 /* done with this buffer or with both - PDU and buffer */
853 conn->data_copied = 0;
854 return 0;
855 }
856
857 /**
858 * iscsi_tcp_copy - copy skb bits to the destanation buffer
859 * @conn: iscsi connection
860 * @buf: buffer to copy to
861 * @buf_size: number of bytes to copy
862 *
863 * Notes:
864 * The function calls skb_copy_bits() and updates per-connection
865 * byte counters.
866 **/
867 static inline int
868 iscsi_tcp_copy(struct iscsi_conn *conn, void *buf, int buf_size)
869 {
870 int buf_left = buf_size - conn->data_copied;
871 int size = min(conn->in.copy, buf_left);
872 int rc;
873
874 debug_tcp("tcp_copy %d bytes at offset %d copied %d\n",
875 size, conn->in.offset, conn->data_copied);
876 BUG_ON(size <= 0);
877
878 rc = skb_copy_bits(conn->in.skb, conn->in.offset,
879 (char*)buf + conn->data_copied, size);
880 BUG_ON(rc);
881
882 conn->in.offset += size;
883 conn->in.copy -= size;
884 conn->in.copied += size;
885 conn->data_copied += size;
886
887 if (buf_size != conn->data_copied)
888 return -EAGAIN;
889
890 return 0;
891 }
892
893 static inline void
894 partial_sg_digest_update(struct iscsi_conn *conn, struct scatterlist *sg,
895 int offset, int length)
896 {
897 struct scatterlist temp;
898
899 memcpy(&temp, sg, sizeof(struct scatterlist));
900 temp.offset = offset;
901 temp.length = length;
902 crypto_digest_update(conn->data_rx_tfm, &temp, 1);
903 }
904
905 static void
906 iscsi_recv_digest_update(struct iscsi_conn *conn, char* buf, int len)
907 {
908 struct scatterlist tmp;
909
910 sg_init_one(&tmp, buf, len);
911 crypto_digest_update(conn->data_rx_tfm, &tmp, 1);
912 }
913
914 static int iscsi_scsi_data_in(struct iscsi_conn *conn)
915 {
916 struct iscsi_cmd_task *ctask = conn->in.ctask;
917 struct scsi_cmnd *sc = ctask->sc;
918 struct scatterlist *sg;
919 int i, offset, rc = 0;
920
921 BUG_ON((void*)ctask != sc->SCp.ptr);
922
923 /*
924 * copying Data-In into the Scsi_Cmnd
925 */
926 if (!sc->use_sg) {
927 i = ctask->data_count;
928 rc = iscsi_ctask_copy(conn, ctask, sc->request_buffer,
929 sc->request_bufflen, ctask->data_offset);
930 if (rc == -EAGAIN)
931 return rc;
932 if (conn->datadgst_en)
933 iscsi_recv_digest_update(conn, sc->request_buffer, i);
934 rc = 0;
935 goto done;
936 }
937
938 offset = ctask->data_offset;
939 sg = sc->request_buffer;
940
941 if (ctask->data_offset)
942 for (i = 0; i < ctask->sg_count; i++)
943 offset -= sg[i].length;
944 /* we've passed through partial sg*/
945 if (offset < 0)
946 offset = 0;
947
948 for (i = ctask->sg_count; i < sc->use_sg; i++) {
949 char *dest;
950
951 dest = kmap_atomic(sg[i].page, KM_SOFTIRQ0);
952 rc = iscsi_ctask_copy(conn, ctask, dest + sg[i].offset,
953 sg[i].length, offset);
954 kunmap_atomic(dest, KM_SOFTIRQ0);
955 if (rc == -EAGAIN)
956 /* continue with the next SKB/PDU */
957 return rc;
958 if (!rc) {
959 if (conn->datadgst_en) {
960 if (!offset)
961 crypto_digest_update(conn->data_rx_tfm,
962 &sg[i], 1);
963 else
964 partial_sg_digest_update(conn, &sg[i],
965 sg[i].offset + offset,
966 sg[i].length - offset);
967 }
968 offset = 0;
969 ctask->sg_count++;
970 }
971
972 if (!ctask->data_count) {
973 if (rc && conn->datadgst_en)
974 /*
975 * data-in is complete, but buffer not...
976 */
977 partial_sg_digest_update(conn, &sg[i],
978 sg[i].offset, sg[i].length-rc);
979 rc = 0;
980 break;
981 }
982
983 if (!conn->in.copy)
984 return -EAGAIN;
985 }
986 BUG_ON(ctask->data_count);
987
988 done:
989 /* check for non-exceptional status */
990 if (conn->in.flags & ISCSI_FLAG_DATA_STATUS) {
991 debug_scsi("done [sc %lx res %d itt 0x%x]\n",
992 (long)sc, sc->result, ctask->itt);
993 conn->scsirsp_pdus_cnt++;
994 iscsi_ctask_cleanup(conn, ctask);
995 sc->scsi_done(sc);
996 }
997
998 return rc;
999 }
1000
1001 static int
1002 iscsi_data_recv(struct iscsi_conn *conn)
1003 {
1004 struct iscsi_session *session = conn->session;
1005 int rc = 0;
1006
1007 switch(conn->in.opcode) {
1008 case ISCSI_OP_SCSI_DATA_IN:
1009 rc = iscsi_scsi_data_in(conn);
1010 break;
1011 case ISCSI_OP_SCSI_CMD_RSP: {
1012 /*
1013 * SCSI Sense Data:
1014 * copying the entire Data Segment.
1015 */
1016 if (iscsi_tcp_copy(conn, conn->data, conn->in.datalen)) {
1017 rc = -EAGAIN;
1018 goto exit;
1019 }
1020
1021 /*
1022 * check for sense
1023 */
1024 conn->in.hdr = &conn->hdr;
1025 conn->senselen = (conn->data[0] << 8) | conn->data[1];
1026 rc = iscsi_cmd_rsp(conn, conn->in.ctask);
1027 if (!rc && conn->datadgst_en)
1028 iscsi_recv_digest_update(conn, conn->data,
1029 conn->in.datalen);
1030 }
1031 break;
1032 case ISCSI_OP_TEXT_RSP:
1033 case ISCSI_OP_LOGIN_RSP:
1034 case ISCSI_OP_NOOP_IN: {
1035 struct iscsi_mgmt_task *mtask = NULL;
1036
1037 if (conn->in.itt != ISCSI_RESERVED_TAG)
1038 mtask = (struct iscsi_mgmt_task *)
1039 session->mgmt_cmds[conn->in.itt -
1040 ISCSI_MGMT_ITT_OFFSET];
1041
1042 /*
1043 * Collect data segment to the connection's data
1044 * placeholder
1045 */
1046 if (iscsi_tcp_copy(conn, conn->data, conn->in.datalen)) {
1047 rc = -EAGAIN;
1048 goto exit;
1049 }
1050
1051 rc = iscsi_recv_pdu(iscsi_handle(conn), conn->in.hdr,
1052 conn->data, conn->in.datalen);
1053
1054 if (!rc && conn->datadgst_en &&
1055 conn->in.opcode != ISCSI_OP_LOGIN_RSP)
1056 iscsi_recv_digest_update(conn, conn->data,
1057 conn->in.datalen);
1058
1059 if (mtask && conn->login_mtask != mtask) {
1060 spin_lock(&session->lock);
1061 __kfifo_put(session->mgmtpool.queue, (void*)&mtask,
1062 sizeof(void*));
1063 spin_unlock(&session->lock);
1064 }
1065 }
1066 break;
1067 case ISCSI_OP_ASYNC_EVENT:
1068 case ISCSI_OP_REJECT:
1069 default:
1070 BUG_ON(1);
1071 }
1072 exit:
1073 return rc;
1074 }
1075
1076 /**
1077 * iscsi_tcp_data_recv - TCP receive in sendfile fashion
1078 * @rd_desc: read descriptor
1079 * @skb: socket buffer
1080 * @offset: offset in skb
1081 * @len: skb->len - offset
1082 **/
1083 static int
1084 iscsi_tcp_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
1085 unsigned int offset, size_t len)
1086 {
1087 int rc;
1088 struct iscsi_conn *conn = rd_desc->arg.data;
1089 int processed;
1090 char pad[ISCSI_PAD_LEN];
1091 struct scatterlist sg;
1092
1093 /*
1094 * Save current SKB and its offset in the corresponding
1095 * connection context.
1096 */
1097 conn->in.copy = skb->len - offset;
1098 conn->in.offset = offset;
1099 conn->in.skb = skb;
1100 conn->in.len = conn->in.copy;
1101 BUG_ON(conn->in.copy <= 0);
1102 debug_tcp("in %d bytes\n", conn->in.copy);
1103
1104 more:
1105 conn->in.copied = 0;
1106 rc = 0;
1107
1108 if (unlikely(conn->suspend_rx)) {
1109 debug_tcp("conn %d Rx suspended!\n", conn->id);
1110 return 0;
1111 }
1112
1113 if (conn->in_progress == IN_PROGRESS_WAIT_HEADER ||
1114 conn->in_progress == IN_PROGRESS_HEADER_GATHER) {
1115 rc = iscsi_hdr_extract(conn);
1116 if (rc) {
1117 if (rc == -EAGAIN)
1118 goto nomore;
1119 else {
1120 iscsi_conn_failure(conn, rc);
1121 return 0;
1122 }
1123 }
1124
1125 /*
1126 * Verify and process incoming PDU header.
1127 */
1128 rc = iscsi_hdr_recv(conn);
1129 if (!rc && conn->in.datalen) {
1130 if (conn->datadgst_en) {
1131 BUG_ON(!conn->data_rx_tfm);
1132 crypto_digest_init(conn->data_rx_tfm);
1133 }
1134 conn->in_progress = IN_PROGRESS_DATA_RECV;
1135 } else if (rc) {
1136 iscsi_conn_failure(conn, rc);
1137 return 0;
1138 }
1139 }
1140
1141 if (conn->in_progress == IN_PROGRESS_DDIGEST_RECV) {
1142 uint32_t recv_digest;
1143 debug_tcp("extra data_recv offset %d copy %d\n",
1144 conn->in.offset, conn->in.copy);
1145 skb_copy_bits(conn->in.skb, conn->in.offset,
1146 &recv_digest, 4);
1147 conn->in.offset += 4;
1148 conn->in.copy -= 4;
1149 if (recv_digest != conn->in.datadgst) {
1150 debug_tcp("iscsi_tcp: data digest error!"
1151 "0x%x != 0x%x\n", recv_digest,
1152 conn->in.datadgst);
1153 iscsi_conn_failure(conn, ISCSI_ERR_DATA_DGST);
1154 return 0;
1155 } else {
1156 debug_tcp("iscsi_tcp: data digest match!"
1157 "0x%x == 0x%x\n", recv_digest,
1158 conn->in.datadgst);
1159 conn->in_progress = IN_PROGRESS_WAIT_HEADER;
1160 }
1161 }
1162
1163 if (conn->in_progress == IN_PROGRESS_DATA_RECV && conn->in.copy) {
1164
1165 debug_tcp("data_recv offset %d copy %d\n",
1166 conn->in.offset, conn->in.copy);
1167
1168 rc = iscsi_data_recv(conn);
1169 if (rc) {
1170 if (rc == -EAGAIN) {
1171 rd_desc->count = conn->in.datalen -
1172 conn->in.ctask->data_count;
1173 goto again;
1174 }
1175 iscsi_conn_failure(conn, rc);
1176 return 0;
1177 }
1178 conn->in.copy -= conn->in.padding;
1179 conn->in.offset += conn->in.padding;
1180 if (conn->datadgst_en) {
1181 if (conn->in.padding) {
1182 debug_tcp("padding -> %d\n", conn->in.padding);
1183 memset(pad, 0, conn->in.padding);
1184 sg_init_one(&sg, pad, conn->in.padding);
1185 crypto_digest_update(conn->data_rx_tfm, &sg, 1);
1186 }
1187 crypto_digest_final(conn->data_rx_tfm,
1188 (u8 *) & conn->in.datadgst);
1189 debug_tcp("rx digest 0x%x\n", conn->in.datadgst);
1190 conn->in_progress = IN_PROGRESS_DDIGEST_RECV;
1191 } else
1192 conn->in_progress = IN_PROGRESS_WAIT_HEADER;
1193 }
1194
1195 debug_tcp("f, processed %d from out of %d padding %d\n",
1196 conn->in.offset - offset, (int)len, conn->in.padding);
1197 BUG_ON(conn->in.offset - offset > len);
1198
1199 if (conn->in.offset - offset != len) {
1200 debug_tcp("continue to process %d bytes\n",
1201 (int)len - (conn->in.offset - offset));
1202 goto more;
1203 }
1204
1205 nomore:
1206 processed = conn->in.offset - offset;
1207 BUG_ON(processed == 0);
1208 return processed;
1209
1210 again:
1211 processed = conn->in.offset - offset;
1212 debug_tcp("c, processed %d from out of %d rd_desc_cnt %d\n",
1213 processed, (int)len, (int)rd_desc->count);
1214 BUG_ON(processed == 0);
1215 BUG_ON(processed > len);
1216
1217 conn->rxdata_octets += processed;
1218 return processed;
1219 }
1220
1221 static void
1222 iscsi_tcp_data_ready(struct sock *sk, int flag)
1223 {
1224 struct iscsi_conn *conn = sk->sk_user_data;
1225 read_descriptor_t rd_desc;
1226
1227 read_lock(&sk->sk_callback_lock);
1228
1229 /* use rd_desc to pass 'conn' to iscsi_tcp_data_recv */
1230 rd_desc.arg.data = conn;
1231 rd_desc.count = 0;
1232 tcp_read_sock(sk, &rd_desc, iscsi_tcp_data_recv);
1233
1234 read_unlock(&sk->sk_callback_lock);
1235 }
1236
1237 static void
1238 iscsi_tcp_state_change(struct sock *sk)
1239 {
1240 struct iscsi_conn *conn;
1241 struct iscsi_session *session;
1242 void (*old_state_change)(struct sock *);
1243
1244 read_lock(&sk->sk_callback_lock);
1245
1246 conn = (struct iscsi_conn*)sk->sk_user_data;
1247 session = conn->session;
1248
1249 if ((sk->sk_state == TCP_CLOSE_WAIT ||
1250 sk->sk_state == TCP_CLOSE) &&
1251 !atomic_read(&sk->sk_rmem_alloc)) {
1252 debug_tcp("iscsi_tcp_state_change: TCP_CLOSE|TCP_CLOSE_WAIT\n");
1253 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
1254 }
1255
1256 old_state_change = conn->old_state_change;
1257
1258 read_unlock(&sk->sk_callback_lock);
1259
1260 old_state_change(sk);
1261 }
1262
1263 /**
1264 * iscsi_write_space - Called when more output buffer space is available
1265 * @sk: socket space is available for
1266 **/
1267 static void
1268 iscsi_write_space(struct sock *sk)
1269 {
1270 struct iscsi_conn *conn = (struct iscsi_conn*)sk->sk_user_data;
1271 conn->old_write_space(sk);
1272 debug_tcp("iscsi_write_space: cid %d\n", conn->id);
1273 clear_bit(SUSPEND_BIT, &conn->suspend_tx);
1274 schedule_work(&conn->xmitwork);
1275 }
1276
1277 static void
1278 iscsi_conn_set_callbacks(struct iscsi_conn *conn)
1279 {
1280 struct sock *sk = conn->sock->sk;
1281
1282 /* assign new callbacks */
1283 write_lock_bh(&sk->sk_callback_lock);
1284 sk->sk_user_data = conn;
1285 conn->old_data_ready = sk->sk_data_ready;
1286 conn->old_state_change = sk->sk_state_change;
1287 conn->old_write_space = sk->sk_write_space;
1288 sk->sk_data_ready = iscsi_tcp_data_ready;
1289 sk->sk_state_change = iscsi_tcp_state_change;
1290 sk->sk_write_space = iscsi_write_space;
1291 write_unlock_bh(&sk->sk_callback_lock);
1292 }
1293
1294 static void
1295 iscsi_conn_restore_callbacks(struct iscsi_conn *conn)
1296 {
1297 struct sock *sk = conn->sock->sk;
1298
1299 /* restore socket callbacks, see also: iscsi_conn_set_callbacks() */
1300 write_lock_bh(&sk->sk_callback_lock);
1301 sk->sk_user_data = NULL;
1302 sk->sk_data_ready = conn->old_data_ready;
1303 sk->sk_state_change = conn->old_state_change;
1304 sk->sk_write_space = conn->old_write_space;
1305 sk->sk_no_check = 0;
1306 write_unlock_bh(&sk->sk_callback_lock);
1307 }
1308
1309 /**
1310 * iscsi_send - generic send routine
1311 * @sk: kernel's socket
1312 * @buf: buffer to write from
1313 * @size: actual size to write
1314 * @flags: socket's flags
1315 *
1316 * Notes:
1317 * depending on buffer will use tcp_sendpage() or tcp_sendmsg().
1318 * buf->sg.offset == -1 tells us that buffer is non S/G and forces
1319 * to use tcp_sendmsg().
1320 */
1321 static inline int
1322 iscsi_send(struct socket *sk, struct iscsi_buf *buf, int size, int flags)
1323 {
1324 int res;
1325
1326 if ((int)buf->sg.offset >= 0) {
1327 int offset = buf->sg.offset + buf->sent;
1328
1329 /* tcp_sendpage */
1330 res = sk->ops->sendpage(sk, buf->sg.page, offset, size, flags);
1331 } else {
1332 struct msghdr msg;
1333
1334 buf->iov.iov_base = iscsi_buf_iov_base(buf);
1335 buf->iov.iov_len = size;
1336
1337 memset(&msg, 0, sizeof(struct msghdr));
1338
1339 /* tcp_sendmsg */
1340 res = kernel_sendmsg(sk, &msg, &buf->iov, 1, size);
1341 }
1342
1343 return res;
1344 }
1345
1346 /**
1347 * iscsi_sendhdr - send PDU Header via tcp_sendpage()
1348 * @conn: iscsi connection
1349 * @buf: buffer to write from
1350 * @datalen: lenght of data to be sent after the header
1351 *
1352 * Notes:
1353 * (Tx, Fast Path)
1354 **/
1355 static inline int
1356 iscsi_sendhdr(struct iscsi_conn *conn, struct iscsi_buf *buf, int datalen)
1357 {
1358 struct socket *sk = conn->sock;
1359 int flags = 0; /* MSG_DONTWAIT; */
1360 int res, size;
1361
1362 size = buf->sg.length - buf->sent;
1363 BUG_ON(buf->sent + size > buf->sg.length);
1364 if (buf->sent + size != buf->sg.length || datalen)
1365 flags |= MSG_MORE;
1366
1367 res = iscsi_send(sk, buf, size, flags);
1368 debug_tcp("sendhdr %d bytes, sent %d res %d\n", size, buf->sent, res);
1369 if (res >= 0) {
1370 conn->txdata_octets += res;
1371 buf->sent += res;
1372 if (size != res)
1373 return -EAGAIN;
1374 return 0;
1375 } else if (res == -EAGAIN) {
1376 conn->sendpage_failures_cnt++;
1377 set_bit(SUSPEND_BIT, &conn->suspend_tx);
1378 } else if (res == -EPIPE)
1379 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
1380
1381 return res;
1382 }
1383
1384 /**
1385 * iscsi_sendpage - send one page of iSCSI Data-Out.
1386 * @conn: iscsi connection
1387 * @buf: buffer to write from
1388 * @count: remaining data
1389 * @sent: number of bytes sent
1390 *
1391 * Notes:
1392 * (Tx, Fast Path)
1393 **/
1394 static inline int
1395 iscsi_sendpage(struct iscsi_conn *conn, struct iscsi_buf *buf,
1396 int *count, int *sent)
1397 {
1398 struct socket *sk = conn->sock;
1399 int flags = 0; /* MSG_DONTWAIT; */
1400 int res, size;
1401
1402 size = buf->sg.length - buf->sent;
1403 BUG_ON(buf->sent + size > buf->sg.length);
1404 if (size > *count)
1405 size = *count;
1406 if (buf->sent + size != buf->sg.length || *count != size)
1407 flags |= MSG_MORE;
1408
1409 res = iscsi_send(sk, buf, size, flags);
1410 debug_tcp("sendpage: %d bytes, sent %d left %d sent %d res %d\n",
1411 size, buf->sent, *count, *sent, res);
1412 if (res >= 0) {
1413 conn->txdata_octets += res;
1414 buf->sent += res;
1415 *count -= res;
1416 *sent += res;
1417 if (size != res)
1418 return -EAGAIN;
1419 return 0;
1420 } else if (res == -EAGAIN) {
1421 conn->sendpage_failures_cnt++;
1422 set_bit(SUSPEND_BIT, &conn->suspend_tx);
1423 } else if (res == -EPIPE)
1424 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
1425
1426 return res;
1427 }
1428
1429 static inline void
1430 iscsi_data_digest_init(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
1431 {
1432 BUG_ON(!conn->data_tx_tfm);
1433 crypto_digest_init(conn->data_tx_tfm);
1434 ctask->digest_count = 4;
1435 }
1436
1437 static inline void
1438 iscsi_buf_data_digest_update(struct iscsi_conn *conn, struct iscsi_buf *buf)
1439 {
1440 struct scatterlist sg;
1441
1442 if (buf->sg.offset != -1)
1443 crypto_digest_update(conn->data_tx_tfm, &buf->sg, 1);
1444 else {
1445 sg_init_one(&sg, (char *)buf->sg.page, buf->sg.length);
1446 crypto_digest_update(conn->data_tx_tfm, &sg, 1);
1447 }
1448 }
1449
1450 static inline int
1451 iscsi_digest_final_send(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
1452 struct iscsi_buf *buf, uint32_t *digest, int final)
1453 {
1454 int rc = 0;
1455 int sent = 0;
1456
1457 if (final)
1458 crypto_digest_final(conn->data_tx_tfm, (u8*)digest);
1459
1460 iscsi_buf_init_virt(buf, (char*)digest, 4);
1461 rc = iscsi_sendpage(conn, buf, &ctask->digest_count, &sent);
1462 if (rc) {
1463 ctask->datadigest = *digest;
1464 ctask->xmstate |= XMSTATE_DATA_DIGEST;
1465 } else
1466 ctask->digest_count = 4;
1467 return rc;
1468 }
1469
1470 /**
1471 * iscsi_solicit_data_cont - initialize next Data-Out
1472 * @conn: iscsi connection
1473 * @ctask: scsi command task
1474 * @r2t: R2T info
1475 * @left: bytes left to transfer
1476 *
1477 * Notes:
1478 * Initialize next Data-Out within this R2T sequence and continue
1479 * to process next Scatter-Gather element(if any) of this SCSI command.
1480 *
1481 * Called under connection lock.
1482 **/
1483 static void
1484 iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
1485 struct iscsi_r2t_info *r2t, int left)
1486 {
1487 struct iscsi_data *hdr;
1488 struct iscsi_data_task *dtask;
1489 struct scsi_cmnd *sc = ctask->sc;
1490 int new_offset;
1491
1492 dtask = mempool_alloc(ctask->datapool, GFP_ATOMIC);
1493 BUG_ON(!dtask);
1494 hdr = &dtask->hdr;
1495 memset(hdr, 0, sizeof(struct iscsi_data));
1496 hdr->ttt = r2t->ttt;
1497 hdr->datasn = cpu_to_be32(r2t->solicit_datasn);
1498 r2t->solicit_datasn++;
1499 hdr->opcode = ISCSI_OP_SCSI_DATA_OUT;
1500 memcpy(hdr->lun, ctask->hdr.lun, sizeof(hdr->lun));
1501 hdr->itt = ctask->hdr.itt;
1502 hdr->exp_statsn = r2t->exp_statsn;
1503 new_offset = r2t->data_offset + r2t->sent;
1504 hdr->offset = cpu_to_be32(new_offset);
1505 if (left > conn->max_xmit_dlength) {
1506 hton24(hdr->dlength, conn->max_xmit_dlength);
1507 r2t->data_count = conn->max_xmit_dlength;
1508 } else {
1509 hton24(hdr->dlength, left);
1510 r2t->data_count = left;
1511 hdr->flags = ISCSI_FLAG_CMD_FINAL;
1512 }
1513 conn->dataout_pdus_cnt++;
1514
1515 iscsi_buf_init_virt(&r2t->headbuf, (char*)hdr,
1516 sizeof(struct iscsi_hdr));
1517
1518 r2t->dtask = dtask;
1519
1520 if (sc->use_sg && !iscsi_buf_left(&r2t->sendbuf)) {
1521 BUG_ON(ctask->bad_sg == r2t->sg);
1522 iscsi_buf_init_sg(&r2t->sendbuf, r2t->sg);
1523 r2t->sg += 1;
1524 } else
1525 iscsi_buf_init_iov(&ctask->sendbuf,
1526 (char*)sc->request_buffer + new_offset,
1527 r2t->data_count);
1528
1529 list_add(&dtask->item, &ctask->dataqueue);
1530 }
1531
1532 static void
1533 iscsi_unsolicit_data_init(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
1534 {
1535 struct iscsi_data *hdr;
1536 struct iscsi_data_task *dtask;
1537
1538 dtask = mempool_alloc(ctask->datapool, GFP_ATOMIC);
1539 BUG_ON(!dtask);
1540 hdr = &dtask->hdr;
1541 memset(hdr, 0, sizeof(struct iscsi_data));
1542 hdr->ttt = cpu_to_be32(ISCSI_RESERVED_TAG);
1543 hdr->datasn = cpu_to_be32(ctask->unsol_datasn);
1544 ctask->unsol_datasn++;
1545 hdr->opcode = ISCSI_OP_SCSI_DATA_OUT;
1546 memcpy(hdr->lun, ctask->hdr.lun, sizeof(hdr->lun));
1547 hdr->itt = ctask->hdr.itt;
1548 hdr->exp_statsn = cpu_to_be32(conn->exp_statsn);
1549 hdr->offset = cpu_to_be32(ctask->total_length -
1550 ctask->r2t_data_count -
1551 ctask->unsol_count);
1552 if (ctask->unsol_count > conn->max_xmit_dlength) {
1553 hton24(hdr->dlength, conn->max_xmit_dlength);
1554 ctask->data_count = conn->max_xmit_dlength;
1555 hdr->flags = 0;
1556 } else {
1557 hton24(hdr->dlength, ctask->unsol_count);
1558 ctask->data_count = ctask->unsol_count;
1559 hdr->flags = ISCSI_FLAG_CMD_FINAL;
1560 }
1561
1562 iscsi_buf_init_virt(&ctask->headbuf, (char*)hdr,
1563 sizeof(struct iscsi_hdr));
1564
1565 list_add(&dtask->item, &ctask->dataqueue);
1566
1567 ctask->dtask = dtask;
1568 }
1569
1570 /**
1571 * iscsi_cmd_init - Initialize iSCSI SCSI_READ or SCSI_WRITE commands
1572 * @conn: iscsi connection
1573 * @ctask: scsi command task
1574 * @sc: scsi command
1575 **/
1576 static void
1577 iscsi_cmd_init(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
1578 struct scsi_cmnd *sc)
1579 {
1580 struct iscsi_session *session = conn->session;
1581
1582 BUG_ON(__kfifo_len(ctask->r2tqueue));
1583
1584 ctask->sc = sc;
1585 ctask->conn = conn;
1586 ctask->hdr.opcode = ISCSI_OP_SCSI_CMD;
1587 ctask->hdr.flags = ISCSI_ATTR_SIMPLE;
1588 int_to_scsilun(sc->device->lun, (struct scsi_lun *)ctask->hdr.lun);
1589 ctask->hdr.itt = ctask->itt | (conn->id << CID_SHIFT) |
1590 (session->age << AGE_SHIFT);
1591 ctask->hdr.data_length = cpu_to_be32(sc->request_bufflen);
1592 ctask->hdr.cmdsn = cpu_to_be32(session->cmdsn); session->cmdsn++;
1593 ctask->hdr.exp_statsn = cpu_to_be32(conn->exp_statsn);
1594 memcpy(ctask->hdr.cdb, sc->cmnd, sc->cmd_len);
1595 memset(&ctask->hdr.cdb[sc->cmd_len], 0, MAX_COMMAND_SIZE - sc->cmd_len);
1596
1597 ctask->mtask = NULL;
1598 ctask->sent = 0;
1599 ctask->sg_count = 0;
1600
1601 ctask->total_length = sc->request_bufflen;
1602
1603 if (sc->sc_data_direction == DMA_TO_DEVICE) {
1604 ctask->exp_r2tsn = 0;
1605 ctask->hdr.flags |= ISCSI_FLAG_CMD_WRITE;
1606 BUG_ON(ctask->total_length == 0);
1607 if (sc->use_sg) {
1608 struct scatterlist *sg = sc->request_buffer;
1609
1610 iscsi_buf_init_sg(&ctask->sendbuf,
1611 &sg[ctask->sg_count++]);
1612 ctask->sg = sg;
1613 ctask->bad_sg = sg + sc->use_sg;
1614 } else {
1615 iscsi_buf_init_iov(&ctask->sendbuf, sc->request_buffer,
1616 sc->request_bufflen);
1617 }
1618
1619 /*
1620 * Write counters:
1621 *
1622 * imm_count bytes to be sent right after
1623 * SCSI PDU Header
1624 *
1625 * unsol_count bytes(as Data-Out) to be sent
1626 * without R2T ack right after
1627 * immediate data
1628 *
1629 * r2t_data_count bytes to be sent via R2T ack's
1630 *
1631 * pad_count bytes to be sent as zero-padding
1632 */
1633 ctask->imm_count = 0;
1634 ctask->unsol_count = 0;
1635 ctask->unsol_datasn = 0;
1636 ctask->xmstate = XMSTATE_W_HDR;
1637 /* calculate write padding */
1638 ctask->pad_count = ctask->total_length & (ISCSI_PAD_LEN-1);
1639 if (ctask->pad_count) {
1640 ctask->pad_count = ISCSI_PAD_LEN - ctask->pad_count;
1641 debug_scsi("write padding %d bytes\n",
1642 ctask->pad_count);
1643 ctask->xmstate |= XMSTATE_W_PAD;
1644 }
1645 if (session->imm_data_en) {
1646 if (ctask->total_length >= session->first_burst)
1647 ctask->imm_count = min(session->first_burst,
1648 conn->max_xmit_dlength);
1649 else
1650 ctask->imm_count = min(ctask->total_length,
1651 conn->max_xmit_dlength);
1652 hton24(ctask->hdr.dlength, ctask->imm_count);
1653 ctask->xmstate |= XMSTATE_IMM_DATA;
1654 } else
1655 zero_data(ctask->hdr.dlength);
1656
1657 if (!session->initial_r2t_en)
1658 ctask->unsol_count = min(session->first_burst,
1659 ctask->total_length) - ctask->imm_count;
1660 if (!ctask->unsol_count)
1661 /* No unsolicit Data-Out's */
1662 ctask->hdr.flags |= ISCSI_FLAG_CMD_FINAL;
1663 else
1664 ctask->xmstate |= XMSTATE_UNS_HDR | XMSTATE_UNS_INIT;
1665
1666 ctask->r2t_data_count = ctask->total_length -
1667 ctask->imm_count -
1668 ctask->unsol_count;
1669
1670 debug_scsi("cmd [itt %x total %d imm %d imm_data %d "
1671 "r2t_data %d]\n",
1672 ctask->itt, ctask->total_length, ctask->imm_count,
1673 ctask->unsol_count, ctask->r2t_data_count);
1674 } else {
1675 ctask->hdr.flags |= ISCSI_FLAG_CMD_FINAL;
1676 if (sc->sc_data_direction == DMA_FROM_DEVICE)
1677 ctask->hdr.flags |= ISCSI_FLAG_CMD_READ;
1678 ctask->datasn = 0;
1679 ctask->xmstate = XMSTATE_R_HDR;
1680 zero_data(ctask->hdr.dlength);
1681 }
1682
1683 iscsi_buf_init_virt(&ctask->headbuf, (char*)&ctask->hdr,
1684 sizeof(struct iscsi_hdr));
1685 conn->scsicmd_pdus_cnt++;
1686 }
1687
1688 /**
1689 * iscsi_mtask_xmit - xmit management(immediate) task
1690 * @conn: iscsi connection
1691 * @mtask: task management task
1692 *
1693 * Notes:
1694 * The function can return -EAGAIN in which case caller must
1695 * call it again later, or recover. '0' return code means successful
1696 * xmit.
1697 *
1698 * Management xmit state machine consists of two states:
1699 * IN_PROGRESS_IMM_HEAD - PDU Header xmit in progress
1700 * IN_PROGRESS_IMM_DATA - PDU Data xmit in progress
1701 **/
1702 static int
1703 iscsi_mtask_xmit(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask)
1704 {
1705
1706 debug_scsi("mtask deq [cid %d state %x itt 0x%x]\n",
1707 conn->id, mtask->xmstate, mtask->itt);
1708
1709 if (mtask->xmstate & XMSTATE_IMM_HDR) {
1710 mtask->xmstate &= ~XMSTATE_IMM_HDR;
1711 if (mtask->data_count)
1712 mtask->xmstate |= XMSTATE_IMM_DATA;
1713 if (conn->c_stage != ISCSI_CONN_INITIAL_STAGE &&
1714 conn->stop_stage != STOP_CONN_RECOVER &&
1715 conn->hdrdgst_en)
1716 iscsi_hdr_digest(conn, &mtask->headbuf,
1717 (u8*)mtask->hdrext);
1718 if (iscsi_sendhdr(conn, &mtask->headbuf, mtask->data_count)) {
1719 mtask->xmstate |= XMSTATE_IMM_HDR;
1720 if (mtask->data_count)
1721 mtask->xmstate &= ~XMSTATE_IMM_DATA;
1722 return -EAGAIN;
1723 }
1724 }
1725
1726 if (mtask->xmstate & XMSTATE_IMM_DATA) {
1727 BUG_ON(!mtask->data_count);
1728 mtask->xmstate &= ~XMSTATE_IMM_DATA;
1729 /* FIXME: implement.
1730 * Virtual buffer could be spreaded across multiple pages...
1731 */
1732 do {
1733 if (iscsi_sendpage(conn, &mtask->sendbuf,
1734 &mtask->data_count, &mtask->sent)) {
1735 mtask->xmstate |= XMSTATE_IMM_DATA;
1736 return -EAGAIN;
1737 }
1738 } while (mtask->data_count);
1739 }
1740
1741 BUG_ON(mtask->xmstate != XMSTATE_IDLE);
1742 return 0;
1743 }
1744
1745 static inline int
1746 handle_xmstate_r_hdr(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
1747 {
1748 ctask->xmstate &= ~XMSTATE_R_HDR;
1749 if (conn->hdrdgst_en)
1750 iscsi_hdr_digest(conn, &ctask->headbuf, (u8*)ctask->hdrext);
1751 if (!iscsi_sendhdr(conn, &ctask->headbuf, 0)) {
1752 BUG_ON(ctask->xmstate != XMSTATE_IDLE);
1753 return 0; /* wait for Data-In */
1754 }
1755 ctask->xmstate |= XMSTATE_R_HDR;
1756 return -EAGAIN;
1757 }
1758
1759 static inline int
1760 handle_xmstate_w_hdr(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
1761 {
1762 ctask->xmstate &= ~XMSTATE_W_HDR;
1763 if (conn->hdrdgst_en)
1764 iscsi_hdr_digest(conn, &ctask->headbuf, (u8*)ctask->hdrext);
1765 if (iscsi_sendhdr(conn, &ctask->headbuf, ctask->imm_count)) {
1766 ctask->xmstate |= XMSTATE_W_HDR;
1767 return -EAGAIN;
1768 }
1769 return 0;
1770 }
1771
1772 static inline int
1773 handle_xmstate_data_digest(struct iscsi_conn *conn,
1774 struct iscsi_cmd_task *ctask)
1775 {
1776 ctask->xmstate &= ~XMSTATE_DATA_DIGEST;
1777 debug_tcp("resent data digest 0x%x\n", ctask->datadigest);
1778 if (iscsi_digest_final_send(conn, ctask, &ctask->immbuf,
1779 &ctask->datadigest, 0)) {
1780 ctask->xmstate |= XMSTATE_DATA_DIGEST;
1781 debug_tcp("resent data digest 0x%x fail!\n",
1782 ctask->datadigest);
1783 return -EAGAIN;
1784 }
1785 return 0;
1786 }
1787
1788 static inline int
1789 handle_xmstate_imm_data(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
1790 {
1791 BUG_ON(!ctask->imm_count);
1792 ctask->xmstate &= ~XMSTATE_IMM_DATA;
1793
1794 if (conn->datadgst_en) {
1795 iscsi_data_digest_init(conn, ctask);
1796 ctask->immdigest = 0;
1797 }
1798
1799 for (;;) {
1800 if (iscsi_sendpage(conn, &ctask->sendbuf, &ctask->imm_count,
1801 &ctask->sent)) {
1802 ctask->xmstate |= XMSTATE_IMM_DATA;
1803 if (conn->datadgst_en) {
1804 crypto_digest_final(conn->data_tx_tfm,
1805 (u8*)&ctask->immdigest);
1806 debug_tcp("tx imm sendpage fail 0x%x\n",
1807 ctask->datadigest);
1808 }
1809 return -EAGAIN;
1810 }
1811 if (conn->datadgst_en)
1812 iscsi_buf_data_digest_update(conn, &ctask->sendbuf);
1813
1814 if (!ctask->imm_count)
1815 break;
1816 iscsi_buf_init_sg(&ctask->sendbuf,
1817 &ctask->sg[ctask->sg_count++]);
1818 }
1819
1820 if (conn->datadgst_en && !(ctask->xmstate & XMSTATE_W_PAD)) {
1821 if (iscsi_digest_final_send(conn, ctask, &ctask->immbuf,
1822 &ctask->immdigest, 1)) {
1823 debug_tcp("sending imm digest 0x%x fail!\n",
1824 ctask->immdigest);
1825 return -EAGAIN;
1826 }
1827 debug_tcp("sending imm digest 0x%x\n", ctask->immdigest);
1828 }
1829
1830 return 0;
1831 }
1832
1833 static inline int
1834 handle_xmstate_uns_hdr(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
1835 {
1836 struct iscsi_data_task *dtask;
1837
1838 ctask->xmstate |= XMSTATE_UNS_DATA;
1839 if (ctask->xmstate & XMSTATE_UNS_INIT) {
1840 iscsi_unsolicit_data_init(conn, ctask);
1841 BUG_ON(!ctask->dtask);
1842 dtask = ctask->dtask;
1843 if (conn->hdrdgst_en)
1844 iscsi_hdr_digest(conn, &ctask->headbuf,
1845 (u8*)dtask->hdrext);
1846 ctask->xmstate &= ~XMSTATE_UNS_INIT;
1847 }
1848 if (iscsi_sendhdr(conn, &ctask->headbuf, ctask->data_count)) {
1849 ctask->xmstate &= ~XMSTATE_UNS_DATA;
1850 ctask->xmstate |= XMSTATE_UNS_HDR;
1851 return -EAGAIN;
1852 }
1853
1854 debug_scsi("uns dout [itt 0x%x dlen %d sent %d]\n",
1855 ctask->itt, ctask->unsol_count, ctask->sent);
1856 return 0;
1857 }
1858
1859 static inline int
1860 handle_xmstate_uns_data(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
1861 {
1862 struct iscsi_data_task *dtask = ctask->dtask;
1863
1864 BUG_ON(!ctask->data_count);
1865 ctask->xmstate &= ~XMSTATE_UNS_DATA;
1866
1867 if (conn->datadgst_en) {
1868 iscsi_data_digest_init(conn, ctask);
1869 dtask->digest = 0;
1870 }
1871
1872 for (;;) {
1873 int start = ctask->sent;
1874
1875 if (iscsi_sendpage(conn, &ctask->sendbuf, &ctask->data_count,
1876 &ctask->sent)) {
1877 ctask->unsol_count -= ctask->sent - start;
1878 ctask->xmstate |= XMSTATE_UNS_DATA;
1879 /* will continue with this ctask later.. */
1880 if (conn->datadgst_en) {
1881 crypto_digest_final(conn->data_tx_tfm,
1882 (u8 *)&dtask->digest);
1883 debug_tcp("tx uns data fail 0x%x\n",
1884 dtask->digest);
1885 }
1886 return -EAGAIN;
1887 }
1888
1889 BUG_ON(ctask->sent > ctask->total_length);
1890 ctask->unsol_count -= ctask->sent - start;
1891
1892 /*
1893 * XXX:we may run here with un-initial sendbuf.
1894 * so pass it
1895 */
1896 if (conn->datadgst_en && ctask->sent - start > 0)
1897 iscsi_buf_data_digest_update(conn, &ctask->sendbuf);
1898
1899 if (!ctask->data_count)
1900 break;
1901 iscsi_buf_init_sg(&ctask->sendbuf,
1902 &ctask->sg[ctask->sg_count++]);
1903 }
1904 BUG_ON(ctask->unsol_count < 0);
1905
1906 /*
1907 * Done with the Data-Out. Next, check if we need
1908 * to send another unsolicited Data-Out.
1909 */
1910 if (ctask->unsol_count) {
1911 if (conn->datadgst_en) {
1912 if (iscsi_digest_final_send(conn, ctask,
1913 &dtask->digestbuf,
1914 &dtask->digest, 1)) {
1915 debug_tcp("send uns digest 0x%x fail\n",
1916 dtask->digest);
1917 return -EAGAIN;
1918 }
1919 debug_tcp("sending uns digest 0x%x, more uns\n",
1920 dtask->digest);
1921 }
1922 ctask->xmstate |= XMSTATE_UNS_INIT;
1923 return 1;
1924 }
1925
1926 if (conn->datadgst_en && !(ctask->xmstate & XMSTATE_W_PAD)) {
1927 if (iscsi_digest_final_send(conn, ctask,
1928 &dtask->digestbuf,
1929 &dtask->digest, 1)) {
1930 debug_tcp("send last uns digest 0x%x fail\n",
1931 dtask->digest);
1932 return -EAGAIN;
1933 }
1934 debug_tcp("sending uns digest 0x%x\n",dtask->digest);
1935 }
1936
1937 return 0;
1938 }
1939
1940 static inline int
1941 handle_xmstate_sol_data(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
1942 {
1943 struct iscsi_session *session = conn->session;
1944 struct iscsi_r2t_info *r2t = ctask->r2t;
1945 struct iscsi_data_task *dtask = r2t->dtask;
1946 int left;
1947
1948 ctask->xmstate &= ~XMSTATE_SOL_DATA;
1949 ctask->dtask = dtask;
1950
1951 if (conn->datadgst_en) {
1952 iscsi_data_digest_init(conn, ctask);
1953 dtask->digest = 0;
1954 }
1955 solicit_again:
1956 /*
1957 * send Data-Out whitnin this R2T sequence.
1958 */
1959 if (!r2t->data_count)
1960 goto data_out_done;
1961
1962 if (iscsi_sendpage(conn, &r2t->sendbuf, &r2t->data_count, &r2t->sent)) {
1963 ctask->xmstate |= XMSTATE_SOL_DATA;
1964 /* will continue with this ctask later.. */
1965 if (conn->datadgst_en) {
1966 crypto_digest_final(conn->data_tx_tfm,
1967 (u8 *)&dtask->digest);
1968 debug_tcp("r2t data send fail 0x%x\n", dtask->digest);
1969 }
1970 return -EAGAIN;
1971 }
1972
1973 BUG_ON(r2t->data_count < 0);
1974 if (conn->datadgst_en)
1975 iscsi_buf_data_digest_update(conn, &r2t->sendbuf);
1976
1977 if (r2t->data_count) {
1978 BUG_ON(ctask->sc->use_sg == 0);
1979 if (!iscsi_buf_left(&r2t->sendbuf)) {
1980 BUG_ON(ctask->bad_sg == r2t->sg);
1981 iscsi_buf_init_sg(&r2t->sendbuf, r2t->sg);
1982 r2t->sg += 1;
1983 }
1984 goto solicit_again;
1985 }
1986
1987 data_out_done:
1988 /*
1989 * Done with this Data-Out. Next, check if we have
1990 * to send another Data-Out for this R2T.
1991 */
1992 BUG_ON(r2t->data_length - r2t->sent < 0);
1993 left = r2t->data_length - r2t->sent;
1994 if (left) {
1995 if (conn->datadgst_en) {
1996 if (iscsi_digest_final_send(conn, ctask,
1997 &dtask->digestbuf,
1998 &dtask->digest, 1)) {
1999 debug_tcp("send r2t data digest 0x%x"
2000 "fail\n", dtask->digest);
2001 return -EAGAIN;
2002 }
2003 debug_tcp("r2t data send digest 0x%x\n",
2004 dtask->digest);
2005 }
2006 iscsi_solicit_data_cont(conn, ctask, r2t, left);
2007 ctask->xmstate |= XMSTATE_SOL_DATA;
2008 ctask->xmstate &= ~XMSTATE_SOL_HDR;
2009 return 1;
2010 }
2011
2012 /*
2013 * Done with this R2T. Check if there are more
2014 * outstanding R2Ts ready to be processed.
2015 */
2016 BUG_ON(ctask->r2t_data_count - r2t->data_length < 0);
2017 if (conn->datadgst_en) {
2018 if (iscsi_digest_final_send(conn, ctask, &dtask->digestbuf,
2019 &dtask->digest, 1)) {
2020 debug_tcp("send last r2t data digest 0x%x"
2021 "fail\n", dtask->digest);
2022 return -EAGAIN;
2023 }
2024 debug_tcp("r2t done dout digest 0x%x\n", dtask->digest);
2025 }
2026
2027 ctask->r2t_data_count -= r2t->data_length;
2028 ctask->r2t = NULL;
2029 spin_lock_bh(&session->lock);
2030 __kfifo_put(ctask->r2tpool.queue, (void*)&r2t, sizeof(void*));
2031 spin_unlock_bh(&session->lock);
2032 if (__kfifo_get(ctask->r2tqueue, (void*)&r2t, sizeof(void*))) {
2033 ctask->r2t = r2t;
2034 ctask->xmstate |= XMSTATE_SOL_DATA;
2035 ctask->xmstate &= ~XMSTATE_SOL_HDR;
2036 return 1;
2037 }
2038
2039 return 0;
2040 }
2041
2042 static inline int
2043 handle_xmstate_w_pad(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
2044 {
2045 struct iscsi_data_task *dtask = ctask->dtask;
2046 int sent;
2047
2048 ctask->xmstate &= ~XMSTATE_W_PAD;
2049 iscsi_buf_init_virt(&ctask->sendbuf, (char*)&ctask->pad,
2050 ctask->pad_count);
2051 if (iscsi_sendpage(conn, &ctask->sendbuf, &ctask->pad_count, &sent)) {
2052 ctask->xmstate |= XMSTATE_W_PAD;
2053 return -EAGAIN;
2054 }
2055
2056 if (conn->datadgst_en) {
2057 iscsi_buf_data_digest_update(conn, &ctask->sendbuf);
2058 /* imm data? */
2059 if (!dtask) {
2060 if (iscsi_digest_final_send(conn, ctask, &ctask->immbuf,
2061 &ctask->immdigest, 1)) {
2062 debug_tcp("send padding digest 0x%x"
2063 "fail!\n", ctask->immdigest);
2064 return -EAGAIN;
2065 }
2066 debug_tcp("done with padding, digest 0x%x\n",
2067 ctask->datadigest);
2068 } else {
2069 if (iscsi_digest_final_send(conn, ctask,
2070 &dtask->digestbuf,
2071 &dtask->digest, 1)) {
2072 debug_tcp("send padding digest 0x%x"
2073 "fail\n", dtask->digest);
2074 return -EAGAIN;
2075 }
2076 debug_tcp("done with padding, digest 0x%x\n",
2077 dtask->digest);
2078 }
2079 }
2080
2081 return 0;
2082 }
2083
2084 static int
2085 iscsi_ctask_xmit(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
2086 {
2087 int rc = 0;
2088
2089 debug_scsi("ctask deq [cid %d xmstate %x itt 0x%x]\n",
2090 conn->id, ctask->xmstate, ctask->itt);
2091
2092 /*
2093 * serialize with TMF AbortTask
2094 */
2095 if (ctask->mtask)
2096 return rc;
2097
2098 if (ctask->xmstate & XMSTATE_R_HDR) {
2099 rc = handle_xmstate_r_hdr(conn, ctask);
2100 return rc;
2101 }
2102
2103 if (ctask->xmstate & XMSTATE_W_HDR) {
2104 rc = handle_xmstate_w_hdr(conn, ctask);
2105 if (rc)
2106 return rc;
2107 }
2108
2109 /* XXX: for data digest xmit recover */
2110 if (ctask->xmstate & XMSTATE_DATA_DIGEST) {
2111 rc = handle_xmstate_data_digest(conn, ctask);
2112 if (rc)
2113 return rc;
2114 }
2115
2116 if (ctask->xmstate & XMSTATE_IMM_DATA) {
2117 rc = handle_xmstate_imm_data(conn, ctask);
2118 if (rc)
2119 return rc;
2120 }
2121
2122 if (ctask->xmstate & XMSTATE_UNS_HDR) {
2123 BUG_ON(!ctask->unsol_count);
2124 ctask->xmstate &= ~XMSTATE_UNS_HDR;
2125 unsolicit_head_again:
2126 rc = handle_xmstate_uns_hdr(conn, ctask);
2127 if (rc)
2128 return rc;
2129 }
2130
2131 if (ctask->xmstate & XMSTATE_UNS_DATA) {
2132 rc = handle_xmstate_uns_data(conn, ctask);
2133 if (rc == 1)
2134 goto unsolicit_head_again;
2135 else if (rc)
2136 return rc;
2137 goto done;
2138 }
2139
2140 if (ctask->xmstate & XMSTATE_SOL_HDR) {
2141 struct iscsi_r2t_info *r2t;
2142
2143 ctask->xmstate &= ~XMSTATE_SOL_HDR;
2144 ctask->xmstate |= XMSTATE_SOL_DATA;
2145 if (!ctask->r2t)
2146 __kfifo_get(ctask->r2tqueue, (void*)&ctask->r2t,
2147 sizeof(void*));
2148 solicit_head_again:
2149 r2t = ctask->r2t;
2150 if (conn->hdrdgst_en)
2151 iscsi_hdr_digest(conn, &r2t->headbuf,
2152 (u8*)r2t->dtask->hdrext);
2153 if (iscsi_sendhdr(conn, &r2t->headbuf, r2t->data_count)) {
2154 ctask->xmstate &= ~XMSTATE_SOL_DATA;
2155 ctask->xmstate |= XMSTATE_SOL_HDR;
2156 return -EAGAIN;
2157 }
2158
2159 debug_scsi("sol dout [dsn %d itt 0x%x dlen %d sent %d]\n",
2160 r2t->solicit_datasn - 1, ctask->itt, r2t->data_count,
2161 r2t->sent);
2162 }
2163
2164 if (ctask->xmstate & XMSTATE_SOL_DATA) {
2165 rc = handle_xmstate_sol_data(conn, ctask);
2166 if (rc == 1)
2167 goto solicit_head_again;
2168 if (rc)
2169 return rc;
2170 }
2171
2172 done:
2173 /*
2174 * Last thing to check is whether we need to send write
2175 * padding. Note that we check for xmstate equality, not just the bit.
2176 */
2177 if (ctask->xmstate == XMSTATE_W_PAD)
2178 rc = handle_xmstate_w_pad(conn, ctask);
2179
2180 return rc;
2181 }
2182
2183 /**
2184 * iscsi_data_xmit - xmit any command into the scheduled connection
2185 * @conn: iscsi connection
2186 *
2187 * Notes:
2188 * The function can return -EAGAIN in which case the caller must
2189 * re-schedule it again later or recover. '0' return code means
2190 * successful xmit.
2191 **/
2192 static int
2193 iscsi_data_xmit(struct iscsi_conn *conn)
2194 {
2195 if (unlikely(conn->suspend_tx)) {
2196 debug_tcp("conn %d Tx suspended!\n", conn->id);
2197 return 0;
2198 }
2199
2200 /*
2201 * Transmit in the following order:
2202 *
2203 * 1) un-finished xmit (ctask or mtask)
2204 * 2) immediate control PDUs
2205 * 3) write data
2206 * 4) SCSI commands
2207 * 5) non-immediate control PDUs
2208 *
2209 * No need to lock around __kfifo_get as long as
2210 * there's one producer and one consumer.
2211 */
2212
2213 BUG_ON(conn->ctask && conn->mtask);
2214
2215 if (conn->ctask) {
2216 if (iscsi_ctask_xmit(conn, conn->ctask))
2217 goto again;
2218 /* done with this in-progress ctask */
2219 conn->ctask = NULL;
2220 }
2221 if (conn->mtask) {
2222 if (iscsi_mtask_xmit(conn, conn->mtask))
2223 goto again;
2224 /* done with this in-progress mtask */
2225 conn->mtask = NULL;
2226 }
2227
2228 /* process immediate first */
2229 if (unlikely(__kfifo_len(conn->immqueue))) {
2230 struct iscsi_session *session = conn->session;
2231 while (__kfifo_get(conn->immqueue, (void*)&conn->mtask,
2232 sizeof(void*))) {
2233 if (iscsi_mtask_xmit(conn, conn->mtask))
2234 goto again;
2235
2236 if (conn->mtask->hdr.itt ==
2237 cpu_to_be32(ISCSI_RESERVED_TAG)) {
2238 spin_lock_bh(&session->lock);
2239 __kfifo_put(session->mgmtpool.queue,
2240 (void*)&conn->mtask, sizeof(void*));
2241 spin_unlock_bh(&session->lock);
2242 }
2243 }
2244 /* done with this mtask */
2245 conn->mtask = NULL;
2246 }
2247
2248 /* process write queue */
2249 while (__kfifo_get(conn->writequeue, (void*)&conn->ctask,
2250 sizeof(void*))) {
2251 if (iscsi_ctask_xmit(conn, conn->ctask))
2252 goto again;
2253 }
2254
2255 /* process command queue */
2256 while (__kfifo_get(conn->xmitqueue, (void*)&conn->ctask,
2257 sizeof(void*))) {
2258 if (iscsi_ctask_xmit(conn, conn->ctask))
2259 goto again;
2260 }
2261 /* done with this ctask */
2262 conn->ctask = NULL;
2263
2264 /* process the rest control plane PDUs, if any */
2265 if (unlikely(__kfifo_len(conn->mgmtqueue))) {
2266 struct iscsi_session *session = conn->session;
2267
2268 while (__kfifo_get(conn->mgmtqueue, (void*)&conn->mtask,
2269 sizeof(void*))) {
2270 if (iscsi_mtask_xmit(conn, conn->mtask))
2271 goto again;
2272
2273 if (conn->mtask->hdr.itt ==
2274 cpu_to_be32(ISCSI_RESERVED_TAG)) {
2275 spin_lock_bh(&session->lock);
2276 __kfifo_put(session->mgmtpool.queue,
2277 (void*)&conn->mtask,
2278 sizeof(void*));
2279 spin_unlock_bh(&session->lock);
2280 }
2281 }
2282 /* done with this mtask */
2283 conn->mtask = NULL;
2284 }
2285
2286 return 0;
2287
2288 again:
2289 if (unlikely(conn->suspend_tx))
2290 return 0;
2291
2292 return -EAGAIN;
2293 }
2294
2295 static void
2296 iscsi_xmitworker(void *data)
2297 {
2298 struct iscsi_conn *conn = data;
2299
2300 /*
2301 * serialize Xmit worker on a per-connection basis.
2302 */
2303 down(&conn->xmitsema);
2304 if (iscsi_data_xmit(conn))
2305 schedule_work(&conn->xmitwork);
2306 up(&conn->xmitsema);
2307 }
2308
2309 #define FAILURE_BAD_HOST 1
2310 #define FAILURE_SESSION_FAILED 2
2311 #define FAILURE_SESSION_FREED 3
2312 #define FAILURE_WINDOW_CLOSED 4
2313 #define FAILURE_SESSION_TERMINATE 5
2314
2315 static int
2316 iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
2317 {
2318 struct Scsi_Host *host;
2319 int reason = 0;
2320 struct iscsi_session *session;
2321 struct iscsi_conn *conn = NULL;
2322 struct iscsi_cmd_task *ctask = NULL;
2323
2324 sc->scsi_done = done;
2325 sc->result = 0;
2326
2327 host = sc->device->host;
2328 session = iscsi_hostdata(host->hostdata);
2329 BUG_ON(host != session->host);
2330
2331 spin_lock(&session->lock);
2332
2333 if (session->state != ISCSI_STATE_LOGGED_IN) {
2334 if (session->state == ISCSI_STATE_FAILED) {
2335 reason = FAILURE_SESSION_FAILED;
2336 goto reject;
2337 } else if (session->state == ISCSI_STATE_TERMINATE) {
2338 reason = FAILURE_SESSION_TERMINATE;
2339 goto fault;
2340 }
2341 reason = FAILURE_SESSION_FREED;
2342 goto fault;
2343 }
2344
2345 /*
2346 * Check for iSCSI window and take care of CmdSN wrap-around
2347 */
2348 if ((int)(session->max_cmdsn - session->cmdsn) < 0) {
2349 reason = FAILURE_WINDOW_CLOSED;
2350 goto reject;
2351 }
2352
2353 conn = session->leadconn;
2354
2355 __kfifo_get(session->cmdpool.queue, (void*)&ctask, sizeof(void*));
2356 BUG_ON(ctask->sc);
2357
2358 sc->SCp.phase = session->age;
2359 sc->SCp.ptr = (char*)ctask;
2360 iscsi_cmd_init(conn, ctask, sc);
2361
2362 __kfifo_put(conn->xmitqueue, (void*)&ctask, sizeof(void*));
2363 debug_scsi(
2364 "ctask enq [%s cid %d sc %lx itt 0x%x len %d cmdsn %d win %d]\n",
2365 sc->sc_data_direction == DMA_TO_DEVICE ? "write" : "read",
2366 conn->id, (long)sc, ctask->itt, sc->request_bufflen,
2367 session->cmdsn, session->max_cmdsn - session->exp_cmdsn + 1);
2368 spin_unlock(&session->lock);
2369
2370 if (!in_interrupt() && !down_trylock(&conn->xmitsema)) {
2371 spin_unlock_irq(host->host_lock);
2372 if (iscsi_data_xmit(conn))
2373 schedule_work(&conn->xmitwork);
2374 up(&conn->xmitsema);
2375 spin_lock_irq(host->host_lock);
2376 } else
2377 schedule_work(&conn->xmitwork);
2378
2379 return 0;
2380
2381 reject:
2382 spin_unlock(&session->lock);
2383 debug_scsi("cmd 0x%x rejected (%d)\n", sc->cmnd[0], reason);
2384 return SCSI_MLQUEUE_HOST_BUSY;
2385
2386 fault:
2387 spin_unlock(&session->lock);
2388 printk(KERN_ERR "iscsi_tcp: cmd 0x%x is not queued (%d)\n",
2389 sc->cmnd[0], reason);
2390 sc->sense_buffer[0] = 0x70;
2391 sc->sense_buffer[2] = NOT_READY;
2392 sc->sense_buffer[7] = 0x6;
2393 sc->sense_buffer[12] = 0x08;
2394 sc->sense_buffer[13] = 0x00;
2395 sc->result = (DID_NO_CONNECT << 16);
2396 sc->resid = sc->request_bufflen;
2397 sc->scsi_done(sc);
2398 return 0;
2399 }
2400
2401 static int
2402 iscsi_change_queue_depth(struct scsi_device *sdev, int depth)
2403 {
2404 if (depth > ISCSI_MAX_CMD_PER_LUN)
2405 depth = ISCSI_MAX_CMD_PER_LUN;
2406 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth);
2407 return sdev->queue_depth;
2408 }
2409
2410 static int
2411 iscsi_pool_init(struct iscsi_queue *q, int max, void ***items, int item_size)
2412 {
2413 int i;
2414
2415 *items = kmalloc(max * sizeof(void*), GFP_KERNEL);
2416 if (*items == NULL)
2417 return -ENOMEM;
2418
2419 q->max = max;
2420 q->pool = kmalloc(max * sizeof(void*), GFP_KERNEL);
2421 if (q->pool == NULL) {
2422 kfree(*items);
2423 return -ENOMEM;
2424 }
2425
2426 q->queue = kfifo_init((void*)q->pool, max * sizeof(void*),
2427 GFP_KERNEL, NULL);
2428 if (q->queue == ERR_PTR(-ENOMEM)) {
2429 kfree(q->pool);
2430 kfree(*items);
2431 return -ENOMEM;
2432 }
2433
2434 for (i = 0; i < max; i++) {
2435 q->pool[i] = kmalloc(item_size, GFP_KERNEL);
2436 if (q->pool[i] == NULL) {
2437 int j;
2438
2439 for (j = 0; j < i; j++)
2440 kfree(q->pool[j]);
2441
2442 kfifo_free(q->queue);
2443 kfree(q->pool);
2444 kfree(*items);
2445 return -ENOMEM;
2446 }
2447 memset(q->pool[i], 0, item_size);
2448 (*items)[i] = q->pool[i];
2449 __kfifo_put(q->queue, (void*)&q->pool[i], sizeof(void*));
2450 }
2451 return 0;
2452 }
2453
2454 static void
2455 iscsi_pool_free(struct iscsi_queue *q, void **items)
2456 {
2457 int i;
2458
2459 for (i = 0; i < q->max; i++)
2460 kfree(items[i]);
2461 kfree(q->pool);
2462 kfree(items);
2463 }
2464
2465 static iscsi_connh_t
2466 iscsi_conn_create(iscsi_sessionh_t sessionh, uint32_t conn_idx)
2467 {
2468 struct iscsi_session *session = iscsi_ptr(sessionh);
2469 struct iscsi_conn *conn = NULL;
2470
2471 conn = kmalloc(sizeof(struct iscsi_conn), GFP_KERNEL);
2472 if (conn == NULL)
2473 goto conn_alloc_fail;
2474 memset(conn, 0, sizeof(struct iscsi_conn));
2475
2476 conn->c_stage = ISCSI_CONN_INITIAL_STAGE;
2477 conn->in_progress = IN_PROGRESS_WAIT_HEADER;
2478 conn->id = conn_idx;
2479 conn->exp_statsn = 0;
2480 conn->tmabort_state = TMABORT_INITIAL;
2481
2482 /* initial operational parameters */
2483 conn->hdr_size = sizeof(struct iscsi_hdr);
2484 conn->data_size = DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH;
2485 conn->max_recv_dlength = DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH;
2486
2487 spin_lock_init(&conn->lock);
2488
2489 /* initialize general xmit PDU commands queue */
2490 conn->xmitqueue = kfifo_alloc(session->cmds_max * sizeof(void*),
2491 GFP_KERNEL, NULL);
2492 if (conn->xmitqueue == ERR_PTR(-ENOMEM))
2493 goto xmitqueue_alloc_fail;
2494
2495 /* initialize write response PDU commands queue */
2496 conn->writequeue = kfifo_alloc(session->cmds_max * sizeof(void*),
2497 GFP_KERNEL, NULL);
2498 if (conn->writequeue == ERR_PTR(-ENOMEM))
2499 goto writequeue_alloc_fail;
2500
2501 /* initialize general immediate & non-immediate PDU commands queue */
2502 conn->immqueue = kfifo_alloc(session->mgmtpool_max * sizeof(void*),
2503 GFP_KERNEL, NULL);
2504 if (conn->immqueue == ERR_PTR(-ENOMEM))
2505 goto immqueue_alloc_fail;
2506
2507 conn->mgmtqueue = kfifo_alloc(session->mgmtpool_max * sizeof(void*),
2508 GFP_KERNEL, NULL);
2509 if (conn->mgmtqueue == ERR_PTR(-ENOMEM))
2510 goto mgmtqueue_alloc_fail;
2511
2512 INIT_WORK(&conn->xmitwork, iscsi_xmitworker, conn);
2513
2514 /* allocate login_mtask used for the login/text sequences */
2515 spin_lock_bh(&session->lock);
2516 if (!__kfifo_get(session->mgmtpool.queue,
2517 (void*)&conn->login_mtask,
2518 sizeof(void*))) {
2519 spin_unlock_bh(&session->lock);
2520 goto login_mtask_alloc_fail;
2521 }
2522 spin_unlock_bh(&session->lock);
2523
2524 /* allocate initial PDU receive place holder */
2525 if (conn->data_size <= PAGE_SIZE)
2526 conn->data = kmalloc(conn->data_size, GFP_KERNEL);
2527 else
2528 conn->data = (void*)__get_free_pages(GFP_KERNEL,
2529 get_order(conn->data_size));
2530 if (!conn->data)
2531 goto max_recv_dlenght_alloc_fail;
2532
2533 init_timer(&conn->tmabort_timer);
2534 init_MUTEX(&conn->xmitsema);
2535 init_waitqueue_head(&conn->ehwait);
2536
2537 return iscsi_handle(conn);
2538
2539 max_recv_dlenght_alloc_fail:
2540 spin_lock_bh(&session->lock);
2541 __kfifo_put(session->mgmtpool.queue, (void*)&conn->login_mtask,
2542 sizeof(void*));
2543 spin_unlock_bh(&session->lock);
2544 login_mtask_alloc_fail:
2545 kfifo_free(conn->mgmtqueue);
2546 mgmtqueue_alloc_fail:
2547 kfifo_free(conn->immqueue);
2548 immqueue_alloc_fail:
2549 kfifo_free(conn->writequeue);
2550 writequeue_alloc_fail:
2551 kfifo_free(conn->xmitqueue);
2552 xmitqueue_alloc_fail:
2553 kfree(conn);
2554 conn_alloc_fail:
2555 return iscsi_handle(NULL);
2556 }
2557
2558 static void
2559 iscsi_conn_destroy(iscsi_connh_t connh)
2560 {
2561 struct iscsi_conn *conn = iscsi_ptr(connh);
2562 struct iscsi_session *session = conn->session;
2563
2564 down(&conn->xmitsema);
2565 set_bit(SUSPEND_BIT, &conn->suspend_tx);
2566 if (conn->c_stage == ISCSI_CONN_INITIAL_STAGE && conn->sock) {
2567 struct sock *sk = conn->sock->sk;
2568
2569 /*
2570 * conn_start() has never been called!
2571 * need to cleanup the socket.
2572 */
2573 write_lock_bh(&sk->sk_callback_lock);
2574 set_bit(SUSPEND_BIT, &conn->suspend_rx);
2575 write_unlock_bh(&sk->sk_callback_lock);
2576
2577 sock_hold(conn->sock->sk);
2578 iscsi_conn_restore_callbacks(conn);
2579 sock_put(conn->sock->sk);
2580 sock_release(conn->sock);
2581 conn->sock = NULL;
2582 }
2583
2584 spin_lock_bh(&session->lock);
2585 conn->c_stage = ISCSI_CONN_CLEANUP_WAIT;
2586 if (session->leadconn == conn) {
2587 /*
2588 * leading connection? then give up on recovery.
2589 */
2590 session->state = ISCSI_STATE_TERMINATE;
2591 wake_up(&conn->ehwait);
2592 }
2593 spin_unlock_bh(&session->lock);
2594
2595 up(&conn->xmitsema);
2596
2597 /*
2598 * Block until all in-progress commands for this connection
2599 * time out or fail.
2600 */
2601 for (;;) {
2602 spin_lock_bh(&conn->lock);
2603 if (!session->host->host_busy) { /* OK for ERL == 0 */
2604 spin_unlock_bh(&conn->lock);
2605 break;
2606 }
2607 spin_unlock_bh(&conn->lock);
2608 msleep_interruptible(500);
2609 printk("conn_destroy(): host_busy %d host_failed %d\n",
2610 session->host->host_busy, session->host->host_failed);
2611 /*
2612 * force eh_abort() to unblock
2613 */
2614 wake_up(&conn->ehwait);
2615 }
2616
2617 /* now free crypto */
2618 if (conn->hdrdgst_en || conn->datadgst_en) {
2619 if (conn->tx_tfm)
2620 crypto_free_tfm(conn->tx_tfm);
2621 if (conn->rx_tfm)
2622 crypto_free_tfm(conn->rx_tfm);
2623 if (conn->data_tx_tfm)
2624 crypto_free_tfm(conn->data_tx_tfm);
2625 if (conn->data_rx_tfm)
2626 crypto_free_tfm(conn->data_rx_tfm);
2627 }
2628
2629 /* free conn->data, size = MaxRecvDataSegmentLength */
2630 if (conn->data_size <= PAGE_SIZE)
2631 kfree(conn->data);
2632 else
2633 free_pages((unsigned long)conn->data,
2634 get_order(conn->data_size));
2635
2636 spin_lock_bh(&session->lock);
2637 __kfifo_put(session->mgmtpool.queue, (void*)&conn->login_mtask,
2638 sizeof(void*));
2639 list_del(&conn->item);
2640 if (list_empty(&session->connections))
2641 session->leadconn = NULL;
2642 if (session->leadconn && session->leadconn == conn)
2643 session->leadconn = container_of(session->connections.next,
2644 struct iscsi_conn, item);
2645
2646 if (session->leadconn == NULL)
2647 /* none connections exits.. reset sequencing */
2648 session->cmdsn = session->max_cmdsn = session->exp_cmdsn = 1;
2649 spin_unlock_bh(&session->lock);
2650
2651 kfifo_free(conn->xmitqueue);
2652 kfifo_free(conn->writequeue);
2653 kfifo_free(conn->immqueue);
2654 kfifo_free(conn->mgmtqueue);
2655 kfree(conn);
2656 }
2657
2658 static int
2659 iscsi_conn_bind(iscsi_sessionh_t sessionh, iscsi_connh_t connh,
2660 uint32_t transport_fd, int is_leading)
2661 {
2662 struct iscsi_session *session = iscsi_ptr(sessionh);
2663 struct iscsi_conn *tmp = ERR_PTR(-EEXIST), *conn = iscsi_ptr(connh);
2664 struct sock *sk;
2665 struct socket *sock;
2666 int err;
2667
2668 /* lookup for existing socket */
2669 sock = sockfd_lookup(transport_fd, &err);
2670 if (!sock) {
2671 printk(KERN_ERR "iscsi_tcp: sockfd_lookup failed %d\n", err);
2672 return -EEXIST;
2673 }
2674
2675 /* lookup for existing connection */
2676 spin_lock_bh(&session->lock);
2677 list_for_each_entry(tmp, &session->connections, item) {
2678 if (tmp == conn) {
2679 if (conn->c_stage != ISCSI_CONN_STOPPED ||
2680 conn->stop_stage == STOP_CONN_TERM) {
2681 printk(KERN_ERR "iscsi_tcp: can't bind "
2682 "non-stopped connection (%d:%d)\n",
2683 conn->c_stage, conn->stop_stage);
2684 spin_unlock_bh(&session->lock);
2685 return -EIO;
2686 }
2687 break;
2688 }
2689 }
2690 if (tmp != conn) {
2691 /* bind new iSCSI connection to session */
2692 conn->session = session;
2693
2694 list_add(&conn->item, &session->connections);
2695 }
2696 spin_unlock_bh(&session->lock);
2697
2698 if (conn->stop_stage != STOP_CONN_SUSPEND) {
2699 /* bind iSCSI connection and socket */
2700 conn->sock = sock;
2701
2702 /* setup Socket parameters */
2703 sk = sock->sk;
2704 sk->sk_reuse = 1;
2705 sk->sk_sndtimeo = 15 * HZ; /* FIXME: make it configurable */
2706 sk->sk_allocation = GFP_ATOMIC;
2707
2708 /* FIXME: disable Nagle's algorithm */
2709
2710 /*
2711 * Intercept TCP callbacks for sendfile like receive
2712 * processing.
2713 */
2714 iscsi_conn_set_callbacks(conn);
2715
2716 /*
2717 * set receive state machine into initial state
2718 */
2719 conn->in_progress = IN_PROGRESS_WAIT_HEADER;
2720 }
2721
2722 if (is_leading)
2723 session->leadconn = conn;
2724
2725 /*
2726 * Unblock xmitworker(), Login Phase will pass through.
2727 */
2728 clear_bit(SUSPEND_BIT, &conn->suspend_rx);
2729 clear_bit(SUSPEND_BIT, &conn->suspend_tx);
2730
2731 return 0;
2732 }
2733
2734 static int
2735 iscsi_conn_start(iscsi_connh_t connh)
2736 {
2737 struct iscsi_conn *conn = iscsi_ptr(connh);
2738 struct iscsi_session *session = conn->session;
2739 struct sock *sk;
2740
2741 /* FF phase warming up... */
2742
2743 if (session == NULL) {
2744 printk(KERN_ERR "iscsi_tcp: can't start unbound connection\n");
2745 return -EPERM;
2746 }
2747
2748 sk = conn->sock->sk;
2749
2750 write_lock_bh(&sk->sk_callback_lock);
2751 spin_lock_bh(&session->lock);
2752 conn->c_stage = ISCSI_CONN_STARTED;
2753 session->state = ISCSI_STATE_LOGGED_IN;
2754
2755 switch(conn->stop_stage) {
2756 case STOP_CONN_RECOVER:
2757 /*
2758 * unblock eh_abort() if it is blocked. re-try all
2759 * commands after successful recovery
2760 */
2761 session->conn_cnt++;
2762 conn->stop_stage = 0;
2763 conn->tmabort_state = TMABORT_INITIAL;
2764 session->age++;
2765 wake_up(&conn->ehwait);
2766 break;
2767 case STOP_CONN_TERM:
2768 session->conn_cnt++;
2769 conn->stop_stage = 0;
2770 break;
2771 case STOP_CONN_SUSPEND:
2772 conn->stop_stage = 0;
2773 clear_bit(SUSPEND_BIT, &conn->suspend_rx);
2774 clear_bit(SUSPEND_BIT, &conn->suspend_tx);
2775 break;
2776 default:
2777 break;
2778 }
2779 spin_unlock_bh(&session->lock);
2780 write_unlock_bh(&sk->sk_callback_lock);
2781
2782 return 0;
2783 }
2784
2785 static void
2786 iscsi_conn_stop(iscsi_connh_t connh, int flag)
2787 {
2788 struct iscsi_conn *conn = iscsi_ptr(connh);
2789 struct iscsi_session *session = conn->session;
2790 struct sock *sk;
2791 unsigned long flags;
2792
2793 BUG_ON(!conn->sock);
2794 sk = conn->sock->sk;
2795 write_lock_bh(&sk->sk_callback_lock);
2796 set_bit(SUSPEND_BIT, &conn->suspend_rx);
2797 write_unlock_bh(&sk->sk_callback_lock);
2798
2799 down(&conn->xmitsema);
2800
2801 spin_lock_irqsave(session->host->host_lock, flags);
2802 spin_lock(&session->lock);
2803 conn->stop_stage = flag;
2804 conn->c_stage = ISCSI_CONN_STOPPED;
2805 set_bit(SUSPEND_BIT, &conn->suspend_tx);
2806
2807 if (flag != STOP_CONN_SUSPEND)
2808 session->conn_cnt--;
2809
2810 if (session->conn_cnt == 0 || session->leadconn == conn)
2811 session->state = ISCSI_STATE_FAILED;
2812
2813 spin_unlock(&session->lock);
2814 spin_unlock_irqrestore(session->host->host_lock, flags);
2815
2816 if (flag == STOP_CONN_TERM || flag == STOP_CONN_RECOVER) {
2817 struct iscsi_cmd_task *ctask;
2818 struct iscsi_mgmt_task *mtask;
2819
2820 /*
2821 * Socket must go now.
2822 */
2823 sock_hold(conn->sock->sk);
2824 iscsi_conn_restore_callbacks(conn);
2825 sock_put(conn->sock->sk);
2826
2827 /*
2828 * flush xmit queues.
2829 */
2830 spin_lock_bh(&session->lock);
2831 while (__kfifo_get(conn->writequeue, (void*)&ctask,
2832 sizeof(void*)) ||
2833 __kfifo_get(conn->xmitqueue, (void*)&ctask,
2834 sizeof(void*))) {
2835 struct iscsi_r2t_info *r2t;
2836
2837 /*
2838 * flush ctask's r2t queues
2839 */
2840 while (__kfifo_get(ctask->r2tqueue, (void*)&r2t,
2841 sizeof(void*)))
2842 __kfifo_put(ctask->r2tpool.queue, (void*)&r2t,
2843 sizeof(void*));
2844
2845 spin_unlock_bh(&session->lock);
2846 local_bh_disable();
2847 iscsi_ctask_cleanup(conn, ctask);
2848 local_bh_enable();
2849 spin_lock_bh(&session->lock);
2850 }
2851 conn->ctask = NULL;
2852 while (__kfifo_get(conn->immqueue, (void*)&mtask,
2853 sizeof(void*)) ||
2854 __kfifo_get(conn->mgmtqueue, (void*)&mtask,
2855 sizeof(void*))) {
2856 __kfifo_put(session->mgmtpool.queue,
2857 (void*)&mtask, sizeof(void*));
2858 }
2859 conn->mtask = NULL;
2860 spin_unlock_bh(&session->lock);
2861
2862 /*
2863 * release socket only after we stopped data_xmit()
2864 * activity and flushed all outstandings
2865 */
2866 sock_release(conn->sock);
2867 conn->sock = NULL;
2868
2869 /*
2870 * for connection level recovery we should not calculate
2871 * header digest. conn->hdr_size used for optimization
2872 * in hdr_extract() and will be re-negotiated at
2873 * set_param() time.
2874 */
2875 if (flag == STOP_CONN_RECOVER) {
2876 conn->hdr_size = sizeof(struct iscsi_hdr);
2877 conn->hdrdgst_en = 0;
2878 conn->datadgst_en = 0;
2879 }
2880 }
2881 up(&conn->xmitsema);
2882 }
2883
2884 static int
2885 iscsi_conn_send_generic(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
2886 char *data, uint32_t data_size)
2887 {
2888 struct iscsi_session *session = conn->session;
2889 struct iscsi_nopout *nop = (struct iscsi_nopout *)hdr;
2890 struct iscsi_mgmt_task *mtask;
2891
2892 spin_lock_bh(&session->lock);
2893 if (session->state == ISCSI_STATE_TERMINATE) {
2894 spin_unlock_bh(&session->lock);
2895 return -EPERM;
2896 }
2897 if (hdr->opcode == (ISCSI_OP_LOGIN | ISCSI_OP_IMMEDIATE) ||
2898 hdr->opcode == (ISCSI_OP_TEXT | ISCSI_OP_IMMEDIATE))
2899 /*
2900 * Login and Text are sent serially, in
2901 * request-followed-by-response sequence.
2902 * Same mtask can be used. Same ITT must be used.
2903 * Note that login_mtask is preallocated at conn_create().
2904 */
2905 mtask = conn->login_mtask;
2906 else {
2907 BUG_ON(conn->c_stage == ISCSI_CONN_INITIAL_STAGE);
2908 BUG_ON(conn->c_stage == ISCSI_CONN_STOPPED);
2909
2910 if (!__kfifo_get(session->mgmtpool.queue,
2911 (void*)&mtask, sizeof(void*))) {
2912 spin_unlock_bh(&session->lock);
2913 return -ENOSPC;
2914 }
2915 }
2916
2917 /*
2918 * pre-format CmdSN and ExpStatSN for outgoing PDU.
2919 */
2920 if (hdr->itt != cpu_to_be32(ISCSI_RESERVED_TAG)) {
2921 hdr->itt = mtask->itt | (conn->id << CID_SHIFT) |
2922 (session->age << AGE_SHIFT);
2923 nop->cmdsn = cpu_to_be32(session->cmdsn);
2924 if (conn->c_stage == ISCSI_CONN_STARTED &&
2925 !(hdr->opcode & ISCSI_OP_IMMEDIATE))
2926 session->cmdsn++;
2927 } else
2928 /* do not advance CmdSN */
2929 nop->cmdsn = cpu_to_be32(session->cmdsn);
2930
2931 nop->exp_statsn = cpu_to_be32(conn->exp_statsn);
2932
2933 memcpy(&mtask->hdr, hdr, sizeof(struct iscsi_hdr));
2934
2935 iscsi_buf_init_virt(&mtask->headbuf, (char*)&mtask->hdr,
2936 sizeof(struct iscsi_hdr));
2937
2938 spin_unlock_bh(&session->lock);
2939
2940 if (data_size) {
2941 memcpy(mtask->data, data, data_size);
2942 mtask->data_count = data_size;
2943 } else
2944 mtask->data_count = 0;
2945
2946 mtask->xmstate = XMSTATE_IMM_HDR;
2947
2948 if (mtask->data_count) {
2949 iscsi_buf_init_iov(&mtask->sendbuf, (char*)mtask->data,
2950 mtask->data_count);
2951 }
2952
2953 debug_scsi("mgmtpdu [op 0x%x hdr->itt 0x%x datalen %d]\n",
2954 hdr->opcode, hdr->itt, data_size);
2955
2956 /*
2957 * since send_pdu() could be called at least from two contexts,
2958 * we need to serialize __kfifo_put, so we don't have to take
2959 * additional lock on fast data-path
2960 */
2961 if (hdr->opcode & ISCSI_OP_IMMEDIATE)
2962 __kfifo_put(conn->immqueue, (void*)&mtask, sizeof(void*));
2963 else
2964 __kfifo_put(conn->mgmtqueue, (void*)&mtask, sizeof(void*));
2965
2966 schedule_work(&conn->xmitwork);
2967
2968 return 0;
2969 }
2970
2971 static int
2972 iscsi_eh_host_reset(struct scsi_cmnd *sc)
2973 {
2974 struct iscsi_cmd_task *ctask = (struct iscsi_cmd_task *)sc->SCp.ptr;
2975 struct iscsi_conn *conn = ctask->conn;
2976 struct iscsi_session *session = conn->session;
2977
2978 spin_lock_bh(&session->lock);
2979 if (session->state == ISCSI_STATE_TERMINATE) {
2980 debug_scsi("failing host reset: session terminated "
2981 "[CID %d age %d]", conn->id, session->age);
2982 spin_unlock_bh(&session->lock);
2983 return FAILED;
2984 }
2985 spin_unlock_bh(&session->lock);
2986
2987 debug_scsi("failing connection CID %d due to SCSI host reset "
2988 "[itt 0x%x age %d]", conn->id, ctask->itt,
2989 session->age);
2990 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
2991
2992 return SUCCESS;
2993 }
2994
2995 static void
2996 iscsi_tmabort_timedout(unsigned long data)
2997 {
2998 struct iscsi_cmd_task *ctask = (struct iscsi_cmd_task *)data;
2999 struct iscsi_conn *conn = ctask->conn;
3000 struct iscsi_session *session = conn->session;
3001
3002 spin_lock(&session->lock);
3003 if (conn->tmabort_state == TMABORT_INITIAL) {
3004 __kfifo_put(session->mgmtpool.queue,
3005 (void*)&ctask->mtask, sizeof(void*));
3006 conn->tmabort_state = TMABORT_TIMEDOUT;
3007 debug_scsi("tmabort timedout [sc %lx itt 0x%x]\n",
3008 (long)ctask->sc, ctask->itt);
3009 /* unblock eh_abort() */
3010 wake_up(&conn->ehwait);
3011 }
3012 spin_unlock(&session->lock);
3013 }
3014
3015 static int
3016 iscsi_eh_abort(struct scsi_cmnd *sc)
3017 {
3018 int rc;
3019 struct iscsi_cmd_task *ctask = (struct iscsi_cmd_task *)sc->SCp.ptr;
3020 struct iscsi_conn *conn = ctask->conn;
3021 struct iscsi_session *session = conn->session;
3022
3023 conn->eh_abort_cnt++;
3024 debug_scsi("aborting [sc %lx itt 0x%x]\n", (long)sc, ctask->itt);
3025
3026 /*
3027 * two cases for ERL=0 here:
3028 *
3029 * 1) connection-level failure;
3030 * 2) recovery due protocol error;
3031 */
3032 down(&conn->xmitsema);
3033 spin_lock_bh(&session->lock);
3034 if (session->state != ISCSI_STATE_LOGGED_IN) {
3035 if (session->state == ISCSI_STATE_TERMINATE) {
3036 spin_unlock_bh(&session->lock);
3037 up(&conn->xmitsema);
3038 goto failed;
3039 }
3040 spin_unlock_bh(&session->lock);
3041 } else {
3042 struct iscsi_tm *hdr = &conn->tmhdr;
3043
3044 /*
3045 * Still LOGGED_IN...
3046 */
3047
3048 if (!ctask->sc || sc->SCp.phase != session->age) {
3049 /*
3050 * 1) ctask completed before time out. But session
3051 * is still ok => Happy Retry.
3052 * 2) session was re-open during time out of ctask.
3053 */
3054 spin_unlock_bh(&session->lock);
3055 up(&conn->xmitsema);
3056 goto success;
3057 }
3058 conn->tmabort_state = TMABORT_INITIAL;
3059 spin_unlock_bh(&session->lock);
3060
3061 /*
3062 * ctask timed out but session is OK
3063 * ERL=0 requires task mgmt abort to be issued on each
3064 * failed command. requests must be serialized.
3065 */
3066 memset(hdr, 0, sizeof(struct iscsi_tm));
3067 hdr->opcode = ISCSI_OP_SCSI_TMFUNC | ISCSI_OP_IMMEDIATE;
3068 hdr->flags = ISCSI_TM_FUNC_ABORT_TASK;
3069 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
3070 memcpy(hdr->lun, ctask->hdr.lun, sizeof(hdr->lun));
3071 hdr->rtt = ctask->hdr.itt;
3072 hdr->refcmdsn = ctask->hdr.cmdsn;
3073
3074 rc = iscsi_conn_send_generic(conn, (struct iscsi_hdr *)hdr,
3075 NULL, 0);
3076 if (rc) {
3077 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
3078 debug_scsi("abort sent failure [itt 0x%x]", ctask->itt);
3079 } else {
3080 struct iscsi_r2t_info *r2t;
3081
3082 /*
3083 * TMF abort vs. TMF response race logic
3084 */
3085 spin_lock_bh(&session->lock);
3086 ctask->mtask = (struct iscsi_mgmt_task *)
3087 session->mgmt_cmds[(hdr->itt & ITT_MASK) -
3088 ISCSI_MGMT_ITT_OFFSET];
3089 /*
3090 * have to flush r2tqueue to avoid r2t leaks
3091 */
3092 while (__kfifo_get(ctask->r2tqueue, (void*)&r2t,
3093 sizeof(void*))) {
3094 __kfifo_put(ctask->r2tpool.queue, (void*)&r2t,
3095 sizeof(void*));
3096 }
3097 if (conn->tmabort_state == TMABORT_INITIAL) {
3098 conn->tmfcmd_pdus_cnt++;
3099 conn->tmabort_timer.expires = 3*HZ + jiffies;
3100 conn->tmabort_timer.function =
3101 iscsi_tmabort_timedout;
3102 conn->tmabort_timer.data = (unsigned long)ctask;
3103 add_timer(&conn->tmabort_timer);
3104 debug_scsi("abort sent [itt 0x%x]", ctask->itt);
3105 } else {
3106 if (!ctask->sc ||
3107 conn->tmabort_state == TMABORT_SUCCESS) {
3108 conn->tmabort_state = TMABORT_INITIAL;
3109 spin_unlock_bh(&session->lock);
3110 up(&conn->xmitsema);
3111 goto success;
3112 }
3113 conn->tmabort_state = TMABORT_INITIAL;
3114 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
3115 }
3116 spin_unlock_bh(&session->lock);
3117 }
3118 }
3119 up(&conn->xmitsema);
3120
3121
3122 /*
3123 * block eh thread until:
3124 *
3125 * 1) abort response;
3126 * 2) abort timeout;
3127 * 3) session re-opened;
3128 * 4) session terminated;
3129 */
3130 for (;;) {
3131 int p_state = session->state;
3132
3133 rc = wait_event_interruptible(conn->ehwait,
3134 (p_state == ISCSI_STATE_LOGGED_IN ?
3135 (session->state == ISCSI_STATE_TERMINATE ||
3136 conn->tmabort_state != TMABORT_INITIAL) :
3137 (session->state == ISCSI_STATE_TERMINATE ||
3138 session->state == ISCSI_STATE_LOGGED_IN)));
3139 if (rc) {
3140 /* shutdown.. */
3141 session->state = ISCSI_STATE_TERMINATE;
3142 goto failed;
3143 }
3144
3145 if (signal_pending(current))
3146 flush_signals(current);
3147
3148 if (session->state == ISCSI_STATE_TERMINATE)
3149 goto failed;
3150
3151 spin_lock_bh(&session->lock);
3152 if (sc->SCp.phase == session->age &&
3153 (conn->tmabort_state == TMABORT_TIMEDOUT ||
3154 conn->tmabort_state == TMABORT_FAILED)) {
3155 conn->tmabort_state = TMABORT_INITIAL;
3156 if (!ctask->sc) {
3157 /*
3158 * ctask completed before tmf abort response or
3159 * time out.
3160 * But session is still ok => Happy Retry.
3161 */
3162 spin_unlock_bh(&session->lock);
3163 break;
3164 }
3165 spin_unlock_bh(&session->lock);
3166 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
3167 continue;
3168 }
3169 spin_unlock_bh(&session->lock);
3170 break;
3171 }
3172
3173 success:
3174 debug_scsi("abort success [sc %lx itt 0x%x]\n", (long)sc, ctask->itt);
3175 rc = SUCCESS;
3176 goto exit;
3177
3178 failed:
3179 debug_scsi("abort failed [sc %lx itt 0x%x]\n", (long)sc, ctask->itt);
3180 rc = FAILED;
3181
3182 exit:
3183 del_timer_sync(&conn->tmabort_timer);
3184
3185 down(&conn->xmitsema);
3186 if (conn->sock) {
3187 struct sock *sk = conn->sock->sk;
3188
3189 write_lock_bh(&sk->sk_callback_lock);
3190 iscsi_ctask_cleanup(conn, ctask);
3191 write_unlock_bh(&sk->sk_callback_lock);
3192 }
3193 up(&conn->xmitsema);
3194 return rc;
3195 }
3196
3197 static int
3198 iscsi_r2tpool_alloc(struct iscsi_session *session)
3199 {
3200 int i;
3201 int cmd_i;
3202
3203 /*
3204 * initialize per-task: R2T pool and xmit queue
3205 */
3206 for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
3207 struct iscsi_cmd_task *ctask = session->cmds[cmd_i];
3208
3209 /*
3210 * pre-allocated x4 as much r2ts to handle race when
3211 * target acks DataOut faster than we data_xmit() queues
3212 * could replenish r2tqueue.
3213 */
3214
3215 /* R2T pool */
3216 if (iscsi_pool_init(&ctask->r2tpool, session->max_r2t * 4,
3217 (void***)&ctask->r2ts, sizeof(struct iscsi_r2t_info))) {
3218 goto r2t_alloc_fail;
3219 }
3220
3221 /* R2T xmit queue */
3222 ctask->r2tqueue = kfifo_alloc(
3223 session->max_r2t * 4 * sizeof(void*), GFP_KERNEL, NULL);
3224 if (ctask->r2tqueue == ERR_PTR(-ENOMEM)) {
3225 iscsi_pool_free(&ctask->r2tpool, (void**)ctask->r2ts);
3226 goto r2t_alloc_fail;
3227 }
3228
3229 /*
3230 * number of
3231 * Data-Out PDU's within R2T-sequence can be quite big;
3232 * using mempool
3233 */
3234 ctask->datapool = mempool_create(ISCSI_DTASK_DEFAULT_MAX,
3235 mempool_alloc_slab, mempool_free_slab, taskcache);
3236 if (ctask->datapool == NULL) {
3237 kfifo_free(ctask->r2tqueue);
3238 iscsi_pool_free(&ctask->r2tpool, (void**)ctask->r2ts);
3239 goto r2t_alloc_fail;
3240 }
3241 INIT_LIST_HEAD(&ctask->dataqueue);
3242 }
3243
3244 return 0;
3245
3246 r2t_alloc_fail:
3247 for (i = 0; i < cmd_i; i++) {
3248 mempool_destroy(session->cmds[i]->datapool);
3249 kfifo_free(session->cmds[i]->r2tqueue);
3250 iscsi_pool_free(&session->cmds[i]->r2tpool,
3251 (void**)session->cmds[i]->r2ts);
3252 }
3253 return -ENOMEM;
3254 }
3255
3256 static void
3257 iscsi_r2tpool_free(struct iscsi_session *session)
3258 {
3259 int i;
3260
3261 for (i = 0; i < session->cmds_max; i++) {
3262 mempool_destroy(session->cmds[i]->datapool);
3263 kfifo_free(session->cmds[i]->r2tqueue);
3264 iscsi_pool_free(&session->cmds[i]->r2tpool,
3265 (void**)session->cmds[i]->r2ts);
3266 }
3267 }
3268
3269 static struct scsi_host_template iscsi_sht = {
3270 .name = "iSCSI Initiator over TCP/IP, v."
3271 ISCSI_VERSION_STR,
3272 .queuecommand = iscsi_queuecommand,
3273 .change_queue_depth = iscsi_change_queue_depth,
3274 .can_queue = ISCSI_XMIT_CMDS_MAX - 1,
3275 .sg_tablesize = ISCSI_SG_TABLESIZE,
3276 .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN,
3277 .eh_abort_handler = iscsi_eh_abort,
3278 .eh_host_reset_handler = iscsi_eh_host_reset,
3279 .use_clustering = DISABLE_CLUSTERING,
3280 .proc_name = "iscsi_tcp",
3281 .this_id = -1,
3282 };
3283
3284 static iscsi_sessionh_t
3285 iscsi_session_create(uint32_t initial_cmdsn, struct Scsi_Host *host)
3286 {
3287 int cmd_i;
3288 struct iscsi_session *session;
3289
3290 session = iscsi_hostdata(host->hostdata);
3291 memset(session, 0, sizeof(struct iscsi_session));
3292
3293 session->host = host;
3294 session->id = host->host_no;
3295 session->state = ISCSI_STATE_LOGGED_IN;
3296 session->mgmtpool_max = ISCSI_MGMT_CMDS_MAX;
3297 session->cmds_max = ISCSI_XMIT_CMDS_MAX;
3298 session->cmdsn = initial_cmdsn;
3299 session->exp_cmdsn = initial_cmdsn + 1;
3300 session->max_cmdsn = initial_cmdsn + 1;
3301 session->max_r2t = 1;
3302
3303 /* initialize SCSI PDU commands pool */
3304 if (iscsi_pool_init(&session->cmdpool, session->cmds_max,
3305 (void***)&session->cmds, sizeof(struct iscsi_cmd_task)))
3306 goto cmdpool_alloc_fail;
3307
3308 /* pre-format cmds pool with ITT */
3309 for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++)
3310 session->cmds[cmd_i]->itt = cmd_i;
3311
3312 spin_lock_init(&session->lock);
3313 INIT_LIST_HEAD(&session->connections);
3314
3315 /* initialize immediate command pool */
3316 if (iscsi_pool_init(&session->mgmtpool, session->mgmtpool_max,
3317 (void***)&session->mgmt_cmds, sizeof(struct iscsi_mgmt_task)))
3318 goto mgmtpool_alloc_fail;
3319
3320
3321 /* pre-format immediate cmds pool with ITT */
3322 for (cmd_i = 0; cmd_i < session->mgmtpool_max; cmd_i++) {
3323 session->mgmt_cmds[cmd_i]->itt = ISCSI_MGMT_ITT_OFFSET + cmd_i;
3324 session->mgmt_cmds[cmd_i]->data = kmalloc(
3325 DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH, GFP_KERNEL);
3326 if (!session->mgmt_cmds[cmd_i]->data) {
3327 int j;
3328
3329 for (j = 0; j < cmd_i; j++)
3330 kfree(session->mgmt_cmds[j]->data);
3331 goto immdata_alloc_fail;
3332 }
3333 }
3334
3335 if (iscsi_r2tpool_alloc(session))
3336 goto r2tpool_alloc_fail;
3337
3338 return iscsi_handle(session);
3339
3340 r2tpool_alloc_fail:
3341 for (cmd_i = 0; cmd_i < session->mgmtpool_max; cmd_i++)
3342 kfree(session->mgmt_cmds[cmd_i]->data);
3343 iscsi_pool_free(&session->mgmtpool, (void**)session->mgmt_cmds);
3344 immdata_alloc_fail:
3345 mgmtpool_alloc_fail:
3346 iscsi_pool_free(&session->cmdpool, (void**)session->cmds);
3347 cmdpool_alloc_fail:
3348 return iscsi_handle(NULL);
3349 }
3350
3351 static void
3352 iscsi_session_destroy(iscsi_sessionh_t sessionh)
3353 {
3354 int cmd_i;
3355 struct iscsi_data_task *dtask, *n;
3356 struct iscsi_session *session = iscsi_ptr(sessionh);
3357
3358 for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
3359 struct iscsi_cmd_task *ctask = session->cmds[cmd_i];
3360 list_for_each_entry_safe(dtask, n, &ctask->dataqueue, item) {
3361 list_del(&dtask->item);
3362 mempool_free(dtask, ctask->datapool);
3363 }
3364 }
3365
3366 for (cmd_i = 0; cmd_i < session->mgmtpool_max; cmd_i++)
3367 kfree(session->mgmt_cmds[cmd_i]->data);
3368
3369 iscsi_r2tpool_free(session);
3370 iscsi_pool_free(&session->mgmtpool, (void**)session->mgmt_cmds);
3371 iscsi_pool_free(&session->cmdpool, (void**)session->cmds);
3372 }
3373
3374 static int
3375 iscsi_conn_set_param(iscsi_connh_t connh, enum iscsi_param param,
3376 uint32_t value)
3377 {
3378 struct iscsi_conn *conn = iscsi_ptr(connh);
3379 struct iscsi_session *session = conn->session;
3380
3381 spin_lock_bh(&session->lock);
3382 if (conn->c_stage != ISCSI_CONN_INITIAL_STAGE &&
3383 conn->stop_stage != STOP_CONN_RECOVER) {
3384 printk(KERN_ERR "iscsi_tcp: can not change parameter [%d]\n",
3385 param);
3386 spin_unlock_bh(&session->lock);
3387 return 0;
3388 }
3389 spin_unlock_bh(&session->lock);
3390
3391 switch(param) {
3392 case ISCSI_PARAM_MAX_RECV_DLENGTH: {
3393 char *saveptr = conn->data;
3394 gfp_t flags = GFP_KERNEL;
3395
3396 if (conn->data_size >= value) {
3397 conn->max_recv_dlength = value;
3398 break;
3399 }
3400
3401 spin_lock_bh(&session->lock);
3402 if (conn->stop_stage == STOP_CONN_RECOVER)
3403 flags = GFP_ATOMIC;
3404 spin_unlock_bh(&session->lock);
3405
3406 if (value <= PAGE_SIZE)
3407 conn->data = kmalloc(value, flags);
3408 else
3409 conn->data = (void*)__get_free_pages(flags,
3410 get_order(value));
3411 if (conn->data == NULL) {
3412 conn->data = saveptr;
3413 return -ENOMEM;
3414 }
3415 if (conn->data_size <= PAGE_SIZE)
3416 kfree(saveptr);
3417 else
3418 free_pages((unsigned long)saveptr,
3419 get_order(conn->data_size));
3420 conn->max_recv_dlength = value;
3421 conn->data_size = value;
3422 }
3423 break;
3424 case ISCSI_PARAM_MAX_XMIT_DLENGTH:
3425 conn->max_xmit_dlength = value;
3426 break;
3427 case ISCSI_PARAM_HDRDGST_EN:
3428 conn->hdrdgst_en = value;
3429 conn->hdr_size = sizeof(struct iscsi_hdr);
3430 if (conn->hdrdgst_en) {
3431 conn->hdr_size += sizeof(__u32);
3432 if (!conn->tx_tfm)
3433 conn->tx_tfm = crypto_alloc_tfm("crc32c", 0);
3434 if (!conn->tx_tfm)
3435 return -ENOMEM;
3436 if (!conn->rx_tfm)
3437 conn->rx_tfm = crypto_alloc_tfm("crc32c", 0);
3438 if (!conn->rx_tfm) {
3439 crypto_free_tfm(conn->tx_tfm);
3440 return -ENOMEM;
3441 }
3442 } else {
3443 if (conn->tx_tfm)
3444 crypto_free_tfm(conn->tx_tfm);
3445 if (conn->rx_tfm)
3446 crypto_free_tfm(conn->rx_tfm);
3447 }
3448 break;
3449 case ISCSI_PARAM_DATADGST_EN:
3450 conn->datadgst_en = value;
3451 if (conn->datadgst_en) {
3452 if (!conn->data_tx_tfm)
3453 conn->data_tx_tfm =
3454 crypto_alloc_tfm("crc32c", 0);
3455 if (!conn->data_tx_tfm)
3456 return -ENOMEM;
3457 if (!conn->data_rx_tfm)
3458 conn->data_rx_tfm =
3459 crypto_alloc_tfm("crc32c", 0);
3460 if (!conn->data_rx_tfm) {
3461 crypto_free_tfm(conn->data_tx_tfm);
3462 return -ENOMEM;
3463 }
3464 } else {
3465 if (conn->data_tx_tfm)
3466 crypto_free_tfm(conn->data_tx_tfm);
3467 if (conn->data_rx_tfm)
3468 crypto_free_tfm(conn->data_rx_tfm);
3469 }
3470 break;
3471 case ISCSI_PARAM_INITIAL_R2T_EN:
3472 session->initial_r2t_en = value;
3473 break;
3474 case ISCSI_PARAM_MAX_R2T:
3475 if (session->max_r2t == roundup_pow_of_two(value))
3476 break;
3477 iscsi_r2tpool_free(session);
3478 session->max_r2t = value;
3479 if (session->max_r2t & (session->max_r2t - 1))
3480 session->max_r2t = roundup_pow_of_two(session->max_r2t);
3481 if (iscsi_r2tpool_alloc(session))
3482 return -ENOMEM;
3483 break;
3484 case ISCSI_PARAM_IMM_DATA_EN:
3485 session->imm_data_en = value;
3486 break;
3487 case ISCSI_PARAM_FIRST_BURST:
3488 session->first_burst = value;
3489 break;
3490 case ISCSI_PARAM_MAX_BURST:
3491 session->max_burst = value;
3492 break;
3493 case ISCSI_PARAM_PDU_INORDER_EN:
3494 session->pdu_inorder_en = value;
3495 break;
3496 case ISCSI_PARAM_DATASEQ_INORDER_EN:
3497 session->dataseq_inorder_en = value;
3498 break;
3499 case ISCSI_PARAM_ERL:
3500 session->erl = value;
3501 break;
3502 case ISCSI_PARAM_IFMARKER_EN:
3503 BUG_ON(value);
3504 session->ifmarker_en = value;
3505 break;
3506 case ISCSI_PARAM_OFMARKER_EN:
3507 BUG_ON(value);
3508 session->ofmarker_en = value;
3509 break;
3510 default:
3511 break;
3512 }
3513
3514 return 0;
3515 }
3516
3517 static int
3518 iscsi_conn_get_param(iscsi_connh_t connh, enum iscsi_param param,
3519 uint32_t *value)
3520 {
3521 struct iscsi_conn *conn = iscsi_ptr(connh);
3522 struct iscsi_session *session = conn->session;
3523
3524 switch(param) {
3525 case ISCSI_PARAM_MAX_RECV_DLENGTH:
3526 *value = conn->max_recv_dlength;
3527 break;
3528 case ISCSI_PARAM_MAX_XMIT_DLENGTH:
3529 *value = conn->max_xmit_dlength;
3530 break;
3531 case ISCSI_PARAM_HDRDGST_EN:
3532 *value = conn->hdrdgst_en;
3533 break;
3534 case ISCSI_PARAM_DATADGST_EN:
3535 *value = conn->datadgst_en;
3536 break;
3537 case ISCSI_PARAM_INITIAL_R2T_EN:
3538 *value = session->initial_r2t_en;
3539 break;
3540 case ISCSI_PARAM_MAX_R2T:
3541 *value = session->max_r2t;
3542 break;
3543 case ISCSI_PARAM_IMM_DATA_EN:
3544 *value = session->imm_data_en;
3545 break;
3546 case ISCSI_PARAM_FIRST_BURST:
3547 *value = session->first_burst;
3548 break;
3549 case ISCSI_PARAM_MAX_BURST:
3550 *value = session->max_burst;
3551 break;
3552 case ISCSI_PARAM_PDU_INORDER_EN:
3553 *value = session->pdu_inorder_en;
3554 break;
3555 case ISCSI_PARAM_DATASEQ_INORDER_EN:
3556 *value = session->dataseq_inorder_en;
3557 break;
3558 case ISCSI_PARAM_ERL:
3559 *value = session->erl;
3560 break;
3561 case ISCSI_PARAM_IFMARKER_EN:
3562 *value = session->ifmarker_en;
3563 break;
3564 case ISCSI_PARAM_OFMARKER_EN:
3565 *value = session->ofmarker_en;
3566 break;
3567 default:
3568 return ISCSI_ERR_PARAM_NOT_FOUND;
3569 }
3570
3571 return 0;
3572 }
3573
3574 static void
3575 iscsi_conn_get_stats(iscsi_connh_t connh, struct iscsi_stats *stats)
3576 {
3577 struct iscsi_conn *conn = iscsi_ptr(connh);
3578
3579 stats->txdata_octets = conn->txdata_octets;
3580 stats->rxdata_octets = conn->rxdata_octets;
3581 stats->scsicmd_pdus = conn->scsicmd_pdus_cnt;
3582 stats->dataout_pdus = conn->dataout_pdus_cnt;
3583 stats->scsirsp_pdus = conn->scsirsp_pdus_cnt;
3584 stats->datain_pdus = conn->datain_pdus_cnt;
3585 stats->r2t_pdus = conn->r2t_pdus_cnt;
3586 stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt;
3587 stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt;
3588 stats->custom_length = 3;
3589 strcpy(stats->custom[0].desc, "tx_sendpage_failures");
3590 stats->custom[0].value = conn->sendpage_failures_cnt;
3591 strcpy(stats->custom[1].desc, "rx_discontiguous_hdr");
3592 stats->custom[1].value = conn->discontiguous_hdr_cnt;
3593 strcpy(stats->custom[2].desc, "eh_abort_cnt");
3594 stats->custom[2].value = conn->eh_abort_cnt;
3595 }
3596
3597 static int
3598 iscsi_conn_send_pdu(iscsi_connh_t connh, struct iscsi_hdr *hdr, char *data,
3599 uint32_t data_size)
3600 {
3601 struct iscsi_conn *conn = iscsi_ptr(connh);
3602 int rc;
3603
3604 down(&conn->xmitsema);
3605 rc = iscsi_conn_send_generic(conn, hdr, data, data_size);
3606 up(&conn->xmitsema);
3607
3608 return rc;
3609 }
3610
3611 static struct iscsi_transport iscsi_tcp_transport = {
3612 .owner = THIS_MODULE,
3613 .name = "tcp",
3614 .caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST
3615 | CAP_DATADGST,
3616 .host_template = &iscsi_sht,
3617 .hostdata_size = sizeof(struct iscsi_session),
3618 .max_conn = 1,
3619 .max_cmd_len = ISCSI_TCP_MAX_CMD_LEN,
3620 .create_session = iscsi_session_create,
3621 .destroy_session = iscsi_session_destroy,
3622 .create_conn = iscsi_conn_create,
3623 .bind_conn = iscsi_conn_bind,
3624 .destroy_conn = iscsi_conn_destroy,
3625 .set_param = iscsi_conn_set_param,
3626 .get_param = iscsi_conn_get_param,
3627 .start_conn = iscsi_conn_start,
3628 .stop_conn = iscsi_conn_stop,
3629 .send_pdu = iscsi_conn_send_pdu,
3630 .get_stats = iscsi_conn_get_stats,
3631 };
3632
3633 static int __init
3634 iscsi_tcp_init(void)
3635 {
3636 int error;
3637
3638 if (iscsi_max_lun < 1) {
3639 printk(KERN_ERR "Invalid max_lun value of %u\n", iscsi_max_lun);
3640 return -EINVAL;
3641 }
3642 iscsi_tcp_transport.max_lun = iscsi_max_lun;
3643
3644 taskcache = kmem_cache_create("iscsi_taskcache",
3645 sizeof(struct iscsi_data_task), 0,
3646 SLAB_HWCACHE_ALIGN | SLAB_NO_REAP, NULL, NULL);
3647 if (!taskcache)
3648 return -ENOMEM;
3649
3650 error = iscsi_register_transport(&iscsi_tcp_transport);
3651 if (error)
3652 kmem_cache_destroy(taskcache);
3653
3654 return error;
3655 }
3656
3657 static void __exit
3658 iscsi_tcp_exit(void)
3659 {
3660 iscsi_unregister_transport(&iscsi_tcp_transport);
3661 kmem_cache_destroy(taskcache);
3662 }
3663
3664 module_init(iscsi_tcp_init);
3665 module_exit(iscsi_tcp_exit);