Merge branch 'next' of git://git.infradead.org/users/vkoul/slave-dma
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / fs / cifs / transport.c
1 /*
2 * fs/cifs/transport.c
3 *
4 * Copyright (C) International Business Machines Corp., 2002,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
6 * Jeremy Allison (jra@samba.org) 2006.
7 *
8 * This library is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU Lesser General Public License as published
10 * by the Free Software Foundation; either version 2.1 of the License, or
11 * (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
16 * the GNU Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public License
19 * along with this library; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
22
23 #include <linux/fs.h>
24 #include <linux/list.h>
25 #include <linux/gfp.h>
26 #include <linux/wait.h>
27 #include <linux/net.h>
28 #include <linux/delay.h>
29 #include <linux/freezer.h>
30 #include <linux/tcp.h>
31 #include <linux/highmem.h>
32 #include <asm/uaccess.h>
33 #include <asm/processor.h>
34 #include <linux/mempool.h>
35 #include "cifspdu.h"
36 #include "cifsglob.h"
37 #include "cifsproto.h"
38 #include "cifs_debug.h"
39
40 void
41 cifs_wake_up_task(struct mid_q_entry *mid)
42 {
43 wake_up_process(mid->callback_data);
44 }
45
46 struct mid_q_entry *
47 AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
48 {
49 struct mid_q_entry *temp;
50
51 if (server == NULL) {
52 cERROR(1, "Null TCP session in AllocMidQEntry");
53 return NULL;
54 }
55
56 temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
57 if (temp == NULL)
58 return temp;
59 else {
60 memset(temp, 0, sizeof(struct mid_q_entry));
61 temp->mid = smb_buffer->Mid; /* always LE */
62 temp->pid = current->pid;
63 temp->command = cpu_to_le16(smb_buffer->Command);
64 cFYI(1, "For smb_command %d", smb_buffer->Command);
65 /* do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
66 /* when mid allocated can be before when sent */
67 temp->when_alloc = jiffies;
68 temp->server = server;
69
70 /*
71 * The default is for the mid to be synchronous, so the
72 * default callback just wakes up the current task.
73 */
74 temp->callback = cifs_wake_up_task;
75 temp->callback_data = current;
76 }
77
78 atomic_inc(&midCount);
79 temp->mid_state = MID_REQUEST_ALLOCATED;
80 return temp;
81 }
82
83 void
84 DeleteMidQEntry(struct mid_q_entry *midEntry)
85 {
86 #ifdef CONFIG_CIFS_STATS2
87 __le16 command = midEntry->server->vals->lock_cmd;
88 unsigned long now;
89 #endif
90 midEntry->mid_state = MID_FREE;
91 atomic_dec(&midCount);
92 if (midEntry->large_buf)
93 cifs_buf_release(midEntry->resp_buf);
94 else
95 cifs_small_buf_release(midEntry->resp_buf);
96 #ifdef CONFIG_CIFS_STATS2
97 now = jiffies;
98 /* commands taking longer than one second are indications that
99 something is wrong, unless it is quite a slow link or server */
100 if ((now - midEntry->when_alloc) > HZ) {
101 if ((cifsFYI & CIFS_TIMER) && (midEntry->command != command)) {
102 printk(KERN_DEBUG " CIFS slow rsp: cmd %d mid %llu",
103 midEntry->command, midEntry->mid);
104 printk(" A: 0x%lx S: 0x%lx R: 0x%lx\n",
105 now - midEntry->when_alloc,
106 now - midEntry->when_sent,
107 now - midEntry->when_received);
108 }
109 }
110 #endif
111 mempool_free(midEntry, cifs_mid_poolp);
112 }
113
114 void
115 cifs_delete_mid(struct mid_q_entry *mid)
116 {
117 spin_lock(&GlobalMid_Lock);
118 list_del(&mid->qhead);
119 spin_unlock(&GlobalMid_Lock);
120
121 DeleteMidQEntry(mid);
122 }
123
124 /*
125 * smb_send_kvec - send an array of kvecs to the server
126 * @server: Server to send the data to
127 * @iov: Pointer to array of kvecs
128 * @n_vec: length of kvec array
129 * @sent: amount of data sent on socket is stored here
130 *
131 * Our basic "send data to server" function. Should be called with srv_mutex
132 * held. The caller is responsible for handling the results.
133 */
134 static int
135 smb_send_kvec(struct TCP_Server_Info *server, struct kvec *iov, size_t n_vec,
136 size_t *sent)
137 {
138 int rc = 0;
139 int i = 0;
140 struct msghdr smb_msg;
141 unsigned int remaining;
142 size_t first_vec = 0;
143 struct socket *ssocket = server->ssocket;
144
145 *sent = 0;
146
147 smb_msg.msg_name = (struct sockaddr *) &server->dstaddr;
148 smb_msg.msg_namelen = sizeof(struct sockaddr);
149 smb_msg.msg_control = NULL;
150 smb_msg.msg_controllen = 0;
151 if (server->noblocksnd)
152 smb_msg.msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
153 else
154 smb_msg.msg_flags = MSG_NOSIGNAL;
155
156 remaining = 0;
157 for (i = 0; i < n_vec; i++)
158 remaining += iov[i].iov_len;
159
160 i = 0;
161 while (remaining) {
162 /*
163 * If blocking send, we try 3 times, since each can block
164 * for 5 seconds. For nonblocking we have to try more
165 * but wait increasing amounts of time allowing time for
166 * socket to clear. The overall time we wait in either
167 * case to send on the socket is about 15 seconds.
168 * Similarly we wait for 15 seconds for a response from
169 * the server in SendReceive[2] for the server to send
170 * a response back for most types of requests (except
171 * SMB Write past end of file which can be slow, and
172 * blocking lock operations). NFS waits slightly longer
173 * than CIFS, but this can make it take longer for
174 * nonresponsive servers to be detected and 15 seconds
175 * is more than enough time for modern networks to
176 * send a packet. In most cases if we fail to send
177 * after the retries we will kill the socket and
178 * reconnect which may clear the network problem.
179 */
180 rc = kernel_sendmsg(ssocket, &smb_msg, &iov[first_vec],
181 n_vec - first_vec, remaining);
182 if (rc == -ENOSPC || rc == -EAGAIN) {
183 /*
184 * Catch if a low level driver returns -ENOSPC. This
185 * WARN_ON will be removed by 3.10 if no one reports
186 * seeing this.
187 */
188 WARN_ON_ONCE(rc == -ENOSPC);
189 i++;
190 if (i >= 14 || (!server->noblocksnd && (i > 2))) {
191 cERROR(1, "sends on sock %p stuck for 15 "
192 "seconds", ssocket);
193 rc = -EAGAIN;
194 break;
195 }
196 msleep(1 << i);
197 continue;
198 }
199
200 if (rc < 0)
201 break;
202
203 /* send was at least partially successful */
204 *sent += rc;
205
206 if (rc == remaining) {
207 remaining = 0;
208 break;
209 }
210
211 if (rc > remaining) {
212 cERROR(1, "sent %d requested %d", rc, remaining);
213 break;
214 }
215
216 if (rc == 0) {
217 /* should never happen, letting socket clear before
218 retrying is our only obvious option here */
219 cERROR(1, "tcp sent no data");
220 msleep(500);
221 continue;
222 }
223
224 remaining -= rc;
225
226 /* the line below resets i */
227 for (i = first_vec; i < n_vec; i++) {
228 if (iov[i].iov_len) {
229 if (rc > iov[i].iov_len) {
230 rc -= iov[i].iov_len;
231 iov[i].iov_len = 0;
232 } else {
233 iov[i].iov_base += rc;
234 iov[i].iov_len -= rc;
235 first_vec = i;
236 break;
237 }
238 }
239 }
240
241 i = 0; /* in case we get ENOSPC on the next send */
242 rc = 0;
243 }
244 return rc;
245 }
246
247 /**
248 * rqst_page_to_kvec - Turn a slot in the smb_rqst page array into a kvec
249 * @rqst: pointer to smb_rqst
250 * @idx: index into the array of the page
251 * @iov: pointer to struct kvec that will hold the result
252 *
253 * Helper function to convert a slot in the rqst->rq_pages array into a kvec.
254 * The page will be kmapped and the address placed into iov_base. The length
255 * will then be adjusted according to the ptailoff.
256 */
257 void
258 cifs_rqst_page_to_kvec(struct smb_rqst *rqst, unsigned int idx,
259 struct kvec *iov)
260 {
261 /*
262 * FIXME: We could avoid this kmap altogether if we used
263 * kernel_sendpage instead of kernel_sendmsg. That will only
264 * work if signing is disabled though as sendpage inlines the
265 * page directly into the fraglist. If userspace modifies the
266 * page after we calculate the signature, then the server will
267 * reject it and may break the connection. kernel_sendmsg does
268 * an extra copy of the data and avoids that issue.
269 */
270 iov->iov_base = kmap(rqst->rq_pages[idx]);
271
272 /* if last page, don't send beyond this offset into page */
273 if (idx == (rqst->rq_npages - 1))
274 iov->iov_len = rqst->rq_tailsz;
275 else
276 iov->iov_len = rqst->rq_pagesz;
277 }
278
279 static int
280 smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst)
281 {
282 int rc;
283 struct kvec *iov = rqst->rq_iov;
284 int n_vec = rqst->rq_nvec;
285 unsigned int smb_buf_length = get_rfc1002_length(iov[0].iov_base);
286 unsigned int i;
287 size_t total_len = 0, sent;
288 struct socket *ssocket = server->ssocket;
289 int val = 1;
290
291 if (ssocket == NULL)
292 return -ENOTSOCK;
293
294 cFYI(1, "Sending smb: smb_len=%u", smb_buf_length);
295 dump_smb(iov[0].iov_base, iov[0].iov_len);
296
297 /* cork the socket */
298 kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
299 (char *)&val, sizeof(val));
300
301 rc = smb_send_kvec(server, iov, n_vec, &sent);
302 if (rc < 0)
303 goto uncork;
304
305 total_len += sent;
306
307 /* now walk the page array and send each page in it */
308 for (i = 0; i < rqst->rq_npages; i++) {
309 struct kvec p_iov;
310
311 cifs_rqst_page_to_kvec(rqst, i, &p_iov);
312 rc = smb_send_kvec(server, &p_iov, 1, &sent);
313 kunmap(rqst->rq_pages[i]);
314 if (rc < 0)
315 break;
316
317 total_len += sent;
318 }
319
320 uncork:
321 /* uncork it */
322 val = 0;
323 kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
324 (char *)&val, sizeof(val));
325
326 if ((total_len > 0) && (total_len != smb_buf_length + 4)) {
327 cFYI(1, "partial send (wanted=%u sent=%zu): terminating "
328 "session", smb_buf_length + 4, total_len);
329 /*
330 * If we have only sent part of an SMB then the next SMB could
331 * be taken as the remainder of this one. We need to kill the
332 * socket so the server throws away the partial SMB
333 */
334 server->tcpStatus = CifsNeedReconnect;
335 }
336
337 if (rc < 0 && rc != -EINTR)
338 cERROR(1, "Error %d sending data on socket to server", rc);
339 else
340 rc = 0;
341
342 return rc;
343 }
344
345 static int
346 smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec)
347 {
348 struct smb_rqst rqst = { .rq_iov = iov,
349 .rq_nvec = n_vec };
350
351 return smb_send_rqst(server, &rqst);
352 }
353
354 int
355 smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
356 unsigned int smb_buf_length)
357 {
358 struct kvec iov;
359
360 iov.iov_base = smb_buffer;
361 iov.iov_len = smb_buf_length + 4;
362
363 return smb_sendv(server, &iov, 1);
364 }
365
366 static int
367 wait_for_free_credits(struct TCP_Server_Info *server, const int timeout,
368 int *credits)
369 {
370 int rc;
371
372 spin_lock(&server->req_lock);
373 if (timeout == CIFS_ASYNC_OP) {
374 /* oplock breaks must not be held up */
375 server->in_flight++;
376 *credits -= 1;
377 spin_unlock(&server->req_lock);
378 return 0;
379 }
380
381 while (1) {
382 if (*credits <= 0) {
383 spin_unlock(&server->req_lock);
384 cifs_num_waiters_inc(server);
385 rc = wait_event_killable(server->request_q,
386 has_credits(server, credits));
387 cifs_num_waiters_dec(server);
388 if (rc)
389 return rc;
390 spin_lock(&server->req_lock);
391 } else {
392 if (server->tcpStatus == CifsExiting) {
393 spin_unlock(&server->req_lock);
394 return -ENOENT;
395 }
396
397 /*
398 * Can not count locking commands against total
399 * as they are allowed to block on server.
400 */
401
402 /* update # of requests on the wire to server */
403 if (timeout != CIFS_BLOCKING_OP) {
404 *credits -= 1;
405 server->in_flight++;
406 }
407 spin_unlock(&server->req_lock);
408 break;
409 }
410 }
411 return 0;
412 }
413
414 static int
415 wait_for_free_request(struct TCP_Server_Info *server, const int timeout,
416 const int optype)
417 {
418 return wait_for_free_credits(server, timeout,
419 server->ops->get_credits_field(server, optype));
420 }
421
422 static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
423 struct mid_q_entry **ppmidQ)
424 {
425 if (ses->server->tcpStatus == CifsExiting) {
426 return -ENOENT;
427 }
428
429 if (ses->server->tcpStatus == CifsNeedReconnect) {
430 cFYI(1, "tcp session dead - return to caller to retry");
431 return -EAGAIN;
432 }
433
434 if (ses->status != CifsGood) {
435 /* check if SMB session is bad because we are setting it up */
436 if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
437 (in_buf->Command != SMB_COM_NEGOTIATE))
438 return -EAGAIN;
439 /* else ok - we are setting up session */
440 }
441 *ppmidQ = AllocMidQEntry(in_buf, ses->server);
442 if (*ppmidQ == NULL)
443 return -ENOMEM;
444 spin_lock(&GlobalMid_Lock);
445 list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
446 spin_unlock(&GlobalMid_Lock);
447 return 0;
448 }
449
450 static int
451 wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
452 {
453 int error;
454
455 error = wait_event_freezekillable(server->response_q,
456 midQ->mid_state != MID_REQUEST_SUBMITTED);
457 if (error < 0)
458 return -ERESTARTSYS;
459
460 return 0;
461 }
462
463 struct mid_q_entry *
464 cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
465 {
466 int rc;
467 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
468 struct mid_q_entry *mid;
469
470 /* enable signing if server requires it */
471 if (server->sec_mode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
472 hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
473
474 mid = AllocMidQEntry(hdr, server);
475 if (mid == NULL)
476 return ERR_PTR(-ENOMEM);
477
478 rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
479 if (rc) {
480 DeleteMidQEntry(mid);
481 return ERR_PTR(rc);
482 }
483
484 return mid;
485 }
486
487 /*
488 * Send a SMB request and set the callback function in the mid to handle
489 * the result. Caller is responsible for dealing with timeouts.
490 */
491 int
492 cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
493 mid_receive_t *receive, mid_callback_t *callback,
494 void *cbdata, const int flags)
495 {
496 int rc, timeout, optype;
497 struct mid_q_entry *mid;
498
499 timeout = flags & CIFS_TIMEOUT_MASK;
500 optype = flags & CIFS_OP_MASK;
501
502 rc = wait_for_free_request(server, timeout, optype);
503 if (rc)
504 return rc;
505
506 mutex_lock(&server->srv_mutex);
507 mid = server->ops->setup_async_request(server, rqst);
508 if (IS_ERR(mid)) {
509 mutex_unlock(&server->srv_mutex);
510 add_credits(server, 1, optype);
511 wake_up(&server->request_q);
512 return PTR_ERR(mid);
513 }
514
515 mid->receive = receive;
516 mid->callback = callback;
517 mid->callback_data = cbdata;
518 mid->mid_state = MID_REQUEST_SUBMITTED;
519
520 /* put it on the pending_mid_q */
521 spin_lock(&GlobalMid_Lock);
522 list_add_tail(&mid->qhead, &server->pending_mid_q);
523 spin_unlock(&GlobalMid_Lock);
524
525
526 cifs_in_send_inc(server);
527 rc = smb_send_rqst(server, rqst);
528 cifs_in_send_dec(server);
529 cifs_save_when_sent(mid);
530 mutex_unlock(&server->srv_mutex);
531
532 if (rc == 0)
533 return 0;
534
535 cifs_delete_mid(mid);
536 add_credits(server, 1, optype);
537 wake_up(&server->request_q);
538 return rc;
539 }
540
541 /*
542 *
543 * Send an SMB Request. No response info (other than return code)
544 * needs to be parsed.
545 *
546 * flags indicate the type of request buffer and how long to wait
547 * and whether to log NT STATUS code (error) before mapping it to POSIX error
548 *
549 */
550 int
551 SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
552 char *in_buf, int flags)
553 {
554 int rc;
555 struct kvec iov[1];
556 int resp_buf_type;
557
558 iov[0].iov_base = in_buf;
559 iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
560 flags |= CIFS_NO_RESP;
561 rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags);
562 cFYI(DBG2, "SendRcvNoRsp flags %d rc %d", flags, rc);
563
564 return rc;
565 }
566
567 static int
568 cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
569 {
570 int rc = 0;
571
572 cFYI(1, "%s: cmd=%d mid=%llu state=%d", __func__,
573 le16_to_cpu(mid->command), mid->mid, mid->mid_state);
574
575 spin_lock(&GlobalMid_Lock);
576 switch (mid->mid_state) {
577 case MID_RESPONSE_RECEIVED:
578 spin_unlock(&GlobalMid_Lock);
579 return rc;
580 case MID_RETRY_NEEDED:
581 rc = -EAGAIN;
582 break;
583 case MID_RESPONSE_MALFORMED:
584 rc = -EIO;
585 break;
586 case MID_SHUTDOWN:
587 rc = -EHOSTDOWN;
588 break;
589 default:
590 list_del_init(&mid->qhead);
591 cERROR(1, "%s: invalid mid state mid=%llu state=%d", __func__,
592 mid->mid, mid->mid_state);
593 rc = -EIO;
594 }
595 spin_unlock(&GlobalMid_Lock);
596
597 DeleteMidQEntry(mid);
598 return rc;
599 }
600
601 static inline int
602 send_cancel(struct TCP_Server_Info *server, void *buf, struct mid_q_entry *mid)
603 {
604 return server->ops->send_cancel ?
605 server->ops->send_cancel(server, buf, mid) : 0;
606 }
607
608 int
609 cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
610 bool log_error)
611 {
612 unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
613
614 dump_smb(mid->resp_buf, min_t(u32, 92, len));
615
616 /* convert the length into a more usable form */
617 if (server->sec_mode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) {
618 struct kvec iov;
619 int rc = 0;
620 struct smb_rqst rqst = { .rq_iov = &iov,
621 .rq_nvec = 1 };
622
623 iov.iov_base = mid->resp_buf;
624 iov.iov_len = len;
625 /* FIXME: add code to kill session */
626 rc = cifs_verify_signature(&rqst, server,
627 mid->sequence_number + 1);
628 if (rc)
629 cERROR(1, "SMB signature verification returned error = "
630 "%d", rc);
631 }
632
633 /* BB special case reconnect tid and uid here? */
634 return map_smb_to_linux_error(mid->resp_buf, log_error);
635 }
636
637 struct mid_q_entry *
638 cifs_setup_request(struct cifs_ses *ses, struct smb_rqst *rqst)
639 {
640 int rc;
641 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
642 struct mid_q_entry *mid;
643
644 rc = allocate_mid(ses, hdr, &mid);
645 if (rc)
646 return ERR_PTR(rc);
647 rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
648 if (rc) {
649 cifs_delete_mid(mid);
650 return ERR_PTR(rc);
651 }
652 return mid;
653 }
654
655 int
656 SendReceive2(const unsigned int xid, struct cifs_ses *ses,
657 struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
658 const int flags)
659 {
660 int rc = 0;
661 int timeout, optype;
662 struct mid_q_entry *midQ;
663 char *buf = iov[0].iov_base;
664 unsigned int credits = 1;
665 struct smb_rqst rqst = { .rq_iov = iov,
666 .rq_nvec = n_vec };
667
668 timeout = flags & CIFS_TIMEOUT_MASK;
669 optype = flags & CIFS_OP_MASK;
670
671 *resp_buf_type = CIFS_NO_BUFFER; /* no response buf yet */
672
673 if ((ses == NULL) || (ses->server == NULL)) {
674 cifs_small_buf_release(buf);
675 cERROR(1, "Null session");
676 return -EIO;
677 }
678
679 if (ses->server->tcpStatus == CifsExiting) {
680 cifs_small_buf_release(buf);
681 return -ENOENT;
682 }
683
684 /*
685 * Ensure that we do not send more than 50 overlapping requests
686 * to the same server. We may make this configurable later or
687 * use ses->maxReq.
688 */
689
690 rc = wait_for_free_request(ses->server, timeout, optype);
691 if (rc) {
692 cifs_small_buf_release(buf);
693 return rc;
694 }
695
696 /*
697 * Make sure that we sign in the same order that we send on this socket
698 * and avoid races inside tcp sendmsg code that could cause corruption
699 * of smb data.
700 */
701
702 mutex_lock(&ses->server->srv_mutex);
703
704 midQ = ses->server->ops->setup_request(ses, &rqst);
705 if (IS_ERR(midQ)) {
706 mutex_unlock(&ses->server->srv_mutex);
707 cifs_small_buf_release(buf);
708 /* Update # of requests on wire to server */
709 add_credits(ses->server, 1, optype);
710 return PTR_ERR(midQ);
711 }
712
713 midQ->mid_state = MID_REQUEST_SUBMITTED;
714 cifs_in_send_inc(ses->server);
715 rc = smb_sendv(ses->server, iov, n_vec);
716 cifs_in_send_dec(ses->server);
717 cifs_save_when_sent(midQ);
718
719 mutex_unlock(&ses->server->srv_mutex);
720
721 if (rc < 0) {
722 cifs_small_buf_release(buf);
723 goto out;
724 }
725
726 if (timeout == CIFS_ASYNC_OP) {
727 cifs_small_buf_release(buf);
728 goto out;
729 }
730
731 rc = wait_for_response(ses->server, midQ);
732 if (rc != 0) {
733 send_cancel(ses->server, buf, midQ);
734 spin_lock(&GlobalMid_Lock);
735 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
736 midQ->callback = DeleteMidQEntry;
737 spin_unlock(&GlobalMid_Lock);
738 cifs_small_buf_release(buf);
739 add_credits(ses->server, 1, optype);
740 return rc;
741 }
742 spin_unlock(&GlobalMid_Lock);
743 }
744
745 cifs_small_buf_release(buf);
746
747 rc = cifs_sync_mid_result(midQ, ses->server);
748 if (rc != 0) {
749 add_credits(ses->server, 1, optype);
750 return rc;
751 }
752
753 if (!midQ->resp_buf || midQ->mid_state != MID_RESPONSE_RECEIVED) {
754 rc = -EIO;
755 cFYI(1, "Bad MID state?");
756 goto out;
757 }
758
759 buf = (char *)midQ->resp_buf;
760 iov[0].iov_base = buf;
761 iov[0].iov_len = get_rfc1002_length(buf) + 4;
762 if (midQ->large_buf)
763 *resp_buf_type = CIFS_LARGE_BUFFER;
764 else
765 *resp_buf_type = CIFS_SMALL_BUFFER;
766
767 credits = ses->server->ops->get_credits(midQ);
768
769 rc = ses->server->ops->check_receive(midQ, ses->server,
770 flags & CIFS_LOG_ERROR);
771
772 /* mark it so buf will not be freed by cifs_delete_mid */
773 if ((flags & CIFS_NO_RESP) == 0)
774 midQ->resp_buf = NULL;
775 out:
776 cifs_delete_mid(midQ);
777 add_credits(ses->server, credits, optype);
778
779 return rc;
780 }
781
782 int
783 SendReceive(const unsigned int xid, struct cifs_ses *ses,
784 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
785 int *pbytes_returned, const int timeout)
786 {
787 int rc = 0;
788 struct mid_q_entry *midQ;
789
790 if (ses == NULL) {
791 cERROR(1, "Null smb session");
792 return -EIO;
793 }
794 if (ses->server == NULL) {
795 cERROR(1, "Null tcp session");
796 return -EIO;
797 }
798
799 if (ses->server->tcpStatus == CifsExiting)
800 return -ENOENT;
801
802 /* Ensure that we do not send more than 50 overlapping requests
803 to the same server. We may make this configurable later or
804 use ses->maxReq */
805
806 if (be32_to_cpu(in_buf->smb_buf_length) > CIFSMaxBufSize +
807 MAX_CIFS_HDR_SIZE - 4) {
808 cERROR(1, "Illegal length, greater than maximum frame, %d",
809 be32_to_cpu(in_buf->smb_buf_length));
810 return -EIO;
811 }
812
813 rc = wait_for_free_request(ses->server, timeout, 0);
814 if (rc)
815 return rc;
816
817 /* make sure that we sign in the same order that we send on this socket
818 and avoid races inside tcp sendmsg code that could cause corruption
819 of smb data */
820
821 mutex_lock(&ses->server->srv_mutex);
822
823 rc = allocate_mid(ses, in_buf, &midQ);
824 if (rc) {
825 mutex_unlock(&ses->server->srv_mutex);
826 /* Update # of requests on wire to server */
827 add_credits(ses->server, 1, 0);
828 return rc;
829 }
830
831 rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
832 if (rc) {
833 mutex_unlock(&ses->server->srv_mutex);
834 goto out;
835 }
836
837 midQ->mid_state = MID_REQUEST_SUBMITTED;
838
839 cifs_in_send_inc(ses->server);
840 rc = smb_send(ses->server, in_buf, be32_to_cpu(in_buf->smb_buf_length));
841 cifs_in_send_dec(ses->server);
842 cifs_save_when_sent(midQ);
843 mutex_unlock(&ses->server->srv_mutex);
844
845 if (rc < 0)
846 goto out;
847
848 if (timeout == CIFS_ASYNC_OP)
849 goto out;
850
851 rc = wait_for_response(ses->server, midQ);
852 if (rc != 0) {
853 send_cancel(ses->server, in_buf, midQ);
854 spin_lock(&GlobalMid_Lock);
855 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
856 /* no longer considered to be "in-flight" */
857 midQ->callback = DeleteMidQEntry;
858 spin_unlock(&GlobalMid_Lock);
859 add_credits(ses->server, 1, 0);
860 return rc;
861 }
862 spin_unlock(&GlobalMid_Lock);
863 }
864
865 rc = cifs_sync_mid_result(midQ, ses->server);
866 if (rc != 0) {
867 add_credits(ses->server, 1, 0);
868 return rc;
869 }
870
871 if (!midQ->resp_buf || !out_buf ||
872 midQ->mid_state != MID_RESPONSE_RECEIVED) {
873 rc = -EIO;
874 cERROR(1, "Bad MID state?");
875 goto out;
876 }
877
878 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
879 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
880 rc = cifs_check_receive(midQ, ses->server, 0);
881 out:
882 cifs_delete_mid(midQ);
883 add_credits(ses->server, 1, 0);
884
885 return rc;
886 }
887
888 /* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
889 blocking lock to return. */
890
891 static int
892 send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
893 struct smb_hdr *in_buf,
894 struct smb_hdr *out_buf)
895 {
896 int bytes_returned;
897 struct cifs_ses *ses = tcon->ses;
898 LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
899
900 /* We just modify the current in_buf to change
901 the type of lock from LOCKING_ANDX_SHARED_LOCK
902 or LOCKING_ANDX_EXCLUSIVE_LOCK to
903 LOCKING_ANDX_CANCEL_LOCK. */
904
905 pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
906 pSMB->Timeout = 0;
907 pSMB->hdr.Mid = get_next_mid(ses->server);
908
909 return SendReceive(xid, ses, in_buf, out_buf,
910 &bytes_returned, 0);
911 }
912
913 int
914 SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
915 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
916 int *pbytes_returned)
917 {
918 int rc = 0;
919 int rstart = 0;
920 struct mid_q_entry *midQ;
921 struct cifs_ses *ses;
922
923 if (tcon == NULL || tcon->ses == NULL) {
924 cERROR(1, "Null smb session");
925 return -EIO;
926 }
927 ses = tcon->ses;
928
929 if (ses->server == NULL) {
930 cERROR(1, "Null tcp session");
931 return -EIO;
932 }
933
934 if (ses->server->tcpStatus == CifsExiting)
935 return -ENOENT;
936
937 /* Ensure that we do not send more than 50 overlapping requests
938 to the same server. We may make this configurable later or
939 use ses->maxReq */
940
941 if (be32_to_cpu(in_buf->smb_buf_length) > CIFSMaxBufSize +
942 MAX_CIFS_HDR_SIZE - 4) {
943 cERROR(1, "Illegal length, greater than maximum frame, %d",
944 be32_to_cpu(in_buf->smb_buf_length));
945 return -EIO;
946 }
947
948 rc = wait_for_free_request(ses->server, CIFS_BLOCKING_OP, 0);
949 if (rc)
950 return rc;
951
952 /* make sure that we sign in the same order that we send on this socket
953 and avoid races inside tcp sendmsg code that could cause corruption
954 of smb data */
955
956 mutex_lock(&ses->server->srv_mutex);
957
958 rc = allocate_mid(ses, in_buf, &midQ);
959 if (rc) {
960 mutex_unlock(&ses->server->srv_mutex);
961 return rc;
962 }
963
964 rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
965 if (rc) {
966 cifs_delete_mid(midQ);
967 mutex_unlock(&ses->server->srv_mutex);
968 return rc;
969 }
970
971 midQ->mid_state = MID_REQUEST_SUBMITTED;
972 cifs_in_send_inc(ses->server);
973 rc = smb_send(ses->server, in_buf, be32_to_cpu(in_buf->smb_buf_length));
974 cifs_in_send_dec(ses->server);
975 cifs_save_when_sent(midQ);
976 mutex_unlock(&ses->server->srv_mutex);
977
978 if (rc < 0) {
979 cifs_delete_mid(midQ);
980 return rc;
981 }
982
983 /* Wait for a reply - allow signals to interrupt. */
984 rc = wait_event_interruptible(ses->server->response_q,
985 (!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
986 ((ses->server->tcpStatus != CifsGood) &&
987 (ses->server->tcpStatus != CifsNew)));
988
989 /* Were we interrupted by a signal ? */
990 if ((rc == -ERESTARTSYS) &&
991 (midQ->mid_state == MID_REQUEST_SUBMITTED) &&
992 ((ses->server->tcpStatus == CifsGood) ||
993 (ses->server->tcpStatus == CifsNew))) {
994
995 if (in_buf->Command == SMB_COM_TRANSACTION2) {
996 /* POSIX lock. We send a NT_CANCEL SMB to cause the
997 blocking lock to return. */
998 rc = send_cancel(ses->server, in_buf, midQ);
999 if (rc) {
1000 cifs_delete_mid(midQ);
1001 return rc;
1002 }
1003 } else {
1004 /* Windows lock. We send a LOCKINGX_CANCEL_LOCK
1005 to cause the blocking lock to return. */
1006
1007 rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
1008
1009 /* If we get -ENOLCK back the lock may have
1010 already been removed. Don't exit in this case. */
1011 if (rc && rc != -ENOLCK) {
1012 cifs_delete_mid(midQ);
1013 return rc;
1014 }
1015 }
1016
1017 rc = wait_for_response(ses->server, midQ);
1018 if (rc) {
1019 send_cancel(ses->server, in_buf, midQ);
1020 spin_lock(&GlobalMid_Lock);
1021 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1022 /* no longer considered to be "in-flight" */
1023 midQ->callback = DeleteMidQEntry;
1024 spin_unlock(&GlobalMid_Lock);
1025 return rc;
1026 }
1027 spin_unlock(&GlobalMid_Lock);
1028 }
1029
1030 /* We got the response - restart system call. */
1031 rstart = 1;
1032 }
1033
1034 rc = cifs_sync_mid_result(midQ, ses->server);
1035 if (rc != 0)
1036 return rc;
1037
1038 /* rcvd frame is ok */
1039 if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
1040 rc = -EIO;
1041 cERROR(1, "Bad MID state?");
1042 goto out;
1043 }
1044
1045 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1046 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1047 rc = cifs_check_receive(midQ, ses->server, 0);
1048 out:
1049 cifs_delete_mid(midQ);
1050 if (rstart && rc == -EACCES)
1051 return -ERESTARTSYS;
1052 return rc;
1053 }